diff --git "a/4625.jsonl" "b/4625.jsonl"
new file mode 100644--- /dev/null
+++ "b/4625.jsonl"
@@ -0,0 +1,44 @@
+{"seq_id":"40293858597","text":"from gtts import gTTS\n#from subprocess import call\nfrom playsound import playsound\n\ndef create_audio(audio:str, filename:str):\n\n tts = gTTS(audio, lang='en')\n audio_path = f'audios/{filename}.mp3'\n tts.save(audio_path)\n # call(['afplay', 'audios/hello.mp3']) # MacOSX\n # call(['aplay', 'audios/hello.mp3']) # Linux\n playsound(audio_path) #Windows\n\nif __name__=='__main__':\n create_audio('just a second','feedback')","repo_name":"nataMamed/personal-assistent","sub_path":"assistent/audio_creator.py","file_name":"audio_creator.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11903590598","text":"from flask import (\n Blueprint,\n render_template,\n flash,\n g,\n session,\n redirect,\n url_for,\n request\n)\nfrom werkzeug.urls import url_parse\nfrom flask_login import (\n login_user,\n logout_user,\n current_user\n)\nfrom app import db\nfrom app.extensions import login\nfrom app.models import User, Posts\nfrom app.auth.forms import (\n LoginForm,\n RegistrationForm\n)\n\nauth = Blueprint('auth', __name__)\n\n@auth.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('post.index'))\n form = LoginForm(request.form)\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n remember_me = form.remember_me.data\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password')\n return redirect(url_for('post.index'))\n login_user(user, remember=remember_me)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('post.index')\n return redirect(next_page)\n \n return render_template('auth/login.html',\n title='Login',\n form=form\n )\n\n@auth.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('post.index'))\n\n@auth.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('post.index'))\n form = RegistrationForm(request.form)\n if form.validate_on_submit():\n user = User(username=form.username.data,\n fullnames=form.fullnames.data,\n phone_number=form.phone_number.data,\n password=form.password.data)\n db.session.add(user)\n db.session.commit()\n flash('Congratulations, you are now a registered user!')\n return redirect(url_for('auth.login'))\n \n return render_template('auth/register.html',\n title='Register',\n form=form)","repo_name":"richiemounti/vihiga","sub_path":"app/auth/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23608272861","text":"INPUT = {\r\n 'dirs': ('string', 'multiarray')\r\n \r\n}\r\n\r\nTEST = ('''\\\r\n3\r\n0 2\r\n/home/gcj/finals\r\n/home/gcj/quals\r\n2 1\r\n/chicken\r\n/chicken/egg\r\n/chicken\r\n1 3\r\n/a\r\n/a/b\r\n/a/c\r\n/b/b\r\n''','''\\\r\nCase #1: 4\r\nCase #2: 0\r\nCase #3: 4\r\n''')\r\n\r\n\r\ndef count_mkdirs(dir, existing, mkdir):\r\n if dir == '': return mkdir\r\n try:\r\n existing.index(dir)\r\n return mkdir\r\n except:\r\n # directory does not exist\r\n existing.append(dir)\r\n return count_mkdirs(dir.rpartition('/')[0], existing, mkdir+1)\r\n\r\ndef main(dirs):\r\n existing, new = dirs\r\n mkdir = 0\r\n for d in new:\r\n mkdir += count_mkdirs(d, existing, 0)\r\n return mkdir","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_59/299.py","file_name":"299.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23466466821","text":"#!/usr/bin/python\r\nfrom __future__ import print_function\r\nfrom sys import stdin\r\n#log = open(\"C:\\\\Users\\\\tx_2\\Documents\\\\Coding\\\\Google Code Jam 2015\\\\D-small-output.txt\", \"w\")\r\n\r\ndef readiline():\r\n return map( int, stdin.readline().strip().split() )\r\n\r\ndef readsline():\r\n return map( str, stdin.readline().strip().split() )\r\n\r\nT, = readiline()\r\n\r\nfor i in xrange(1,T+1):\r\n X,R,C = readiline()\r\n if X == 1:\r\n winguarantee = True\r\n else:\r\n winguarantee = True\r\n if (R <= X-2) or (C<= X-2):\r\n winguarantee = False\r\n if (R <= X-1) and (C<=X-1):\r\n winguarantee = False\r\n if R*C%X != 0:\r\n winguarantee = False\r\n\r\n if winguarantee:\r\n print ('Case #%d: GABRIEL' % (i))\r\n else:\r\n print ('Case #%d: RICHARD' % (i))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_158/922.py","file_name":"922.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5729470314","text":"def solution(citations):\n citations.sort()\n answer = citations[-1]\n while answer > 0:\n mnval, mxval = 0, 0\n for i in citations:\n if i >= answer:\n mxval += 1\n else:\n mnval += 1\n if mnval <= answer <= mxval:\n break\n else:\n answer -= 1\n return answer","repo_name":"lynever/prac_programmars","sub_path":"프로그래머스/lv2/42747. H-Index/H-Index.py","file_name":"H-Index.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5617824558","text":"#!/usr/bin/python3\ndef add_tuple(tuple_a=(), tuple_b=()):\n\n a = list(tuple_a) + [0, 0]\n b = list(tuple_b) + [0, 0]\n n_list = []\n\n for trav in range(2):\n n_list.append(a[trav] + b[trav])\n return(tuple(n_list))\n","repo_name":"dr4g0nB/holbertonschool-higher_level_programming","sub_path":"0x03-python-data_structures/7-add_tuple.py","file_name":"7-add_tuple.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34057370603","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 21 14:47:13 2020\r\n\r\n@author: hongi\r\n\"\"\"\r\nimport math\r\n\r\ndef compare_dictionaries(d1, d2):\r\n \"\"\" take two feature dictionaries d1 and d2 as inputs, and it should compute and return their log similarity score\r\n \"\"\"\r\n score = 0\r\n gef = 0\r\n for z in d1:\r\n gef += d1[z]\r\n total = gef\r\n \r\n for x in d2:\r\n if x in d1:\r\n score += math.log(d1[x] / total) * d2[x] \r\n else:\r\n score += math.log(0.5/total) * d2[x]\r\n return score\r\ndef clean_text(txt):\r\n \"\"\"takes a string of text txt as a parameter and returns a list \r\n containing the words in txt after it has been “cleaned”.\r\n \"\"\"\r\n s = txt\r\n s = s.replace('.', '')\r\n s = s.replace(',', '')\r\n s = s.replace('?', '')\r\n s = s.replace('!', '')\r\n s = s.replace(';', '')\r\n s = s.replace(':', '')\r\n s = s.replace('\"', '')\r\n jeff = s.lower()\r\n return jeff.split(' ')\r\n \r\ndef stem(s):\r\n \"\"\"accepts a string as a parameter. The function should then return the\r\n stem of s. The stem of a word is the root part of the word, which excludes\r\n any prefixes and suffixes.\r\n \"\"\"\r\n if len(s) < 5 :\r\n return s\r\n \r\n if s[-3:] == 'ing':\r\n if s[-4] == s[-5]:\r\n if s[-4] == 'l' :\r\n s = s[:-3]\r\n s = s[:-4]\r\n else: \r\n s = s[:-3]\r\n elif s[-2:] == 'er':\r\n s = s[:-2]\r\n elif s[-1] == 's' :\r\n s = s[:-1]\r\n stem_rest = stem(s)\r\n return stem_rest\r\n if len(s) >= 9:\r\n if s[-3:] == 'ion':\r\n s = s[:-3]\r\n \r\n \r\n elif s[0:3] == 'mis':\r\n if s == 'misses' or s == 'missus':\r\n return s[0:4]\r\n else:\r\n s = s[3:]\r\n elif s[:2] == 'un':\r\n s = s[2:]\r\n \r\n if len(s) >= 7:\r\n if s[:4] == 'over':\r\n s = s[4:]\r\n return s\r\n \r\n\r\nclass TextModel:\r\n \"\"\"a class that will compare two texts in a variety of ways\r\n \"\"\"\r\n def __init__(self, model_name):\r\n \"\"\"constructs a new textmodel object by accepting a string model_name \r\n as a parameter and initializing three attributes\r\n \"\"\"\r\n self.name = model_name\r\n self.words = ({})\r\n self.word_lengths = ({})\r\n self.stems = ({})\r\n self.sentence_lengths = ({})\r\n self.punctuation = ({})\r\n \r\n \r\n def __repr__(self):\r\n \"\"\"returns a string that includes the name of the model as well as the\r\n sizes of the dictionaries for each feature of the text\r\n \"\"\"\r\n printer = 'text model name: ' + str(self.name) + '\\n'\r\n printer += ' number of words: ' + str(len(self.words)) +'\\n'\r\n printer += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\r\n printer += ' number of stems: ' + str(len(self.stems)) + '\\n'\r\n printer += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\r\n printer += ' number of different punctuations: ' + str(len(self.punctuation)) \r\n return printer\r\n \r\n def add_string(self, s):\r\n \"\"\"adds a string of text s to the model by augmenting the feature \r\n dictionaries defined in the constructor\r\n \"\"\"\r\n space = 0\r\n count = 0\r\n word_list = clean_text(s)\r\n for w in word_list:\r\n # Update self.words to reflect w\r\n if w not in self.words:\r\n self.words[w] = 0\r\n self.words[w] += 1\r\n #self.word_lengths\r\n for w in word_list:\r\n if len(w) not in self.word_lengths:\r\n self.word_lengths[len(w)] = 0\r\n self.word_lengths[len(w)] += 1\r\n #self.stem\r\n for w in word_list:\r\n if stem(w) not in self.stems:\r\n self.stems[stem(w)] = 0 \r\n self.stems[stem(w)] += 1 \r\n #self.sentence_lengths\r\n for w in s:\r\n if w == ' ':\r\n space += 1\r\n if w in '.?!' and count == 0:\r\n if space not in self.sentence_lengths:\r\n self.sentence_lengths[space+1] = 1\r\n space = 0\r\n count += 1 \r\n elif w in '.?!' and count > 0:\r\n if space not in self.sentence_lengths:\r\n self.sentence_lengths[space] = 0\r\n self.sentence_lengths[space] += 1\r\n space = 0\r\n #self.punctuation\r\n for w in s:\r\n if w == '?':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == \"...\":\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == \".\":\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == \"!\":\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == \"-\":\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '/':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == ';':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '[':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '\\\"':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '(':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '—':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == ':':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n if w == '\\'':\r\n if w not in self.punctuation:\r\n self.punctuation[w] = 0\r\n self.punctuation[w] += 1 \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n def add_file(self, filename):\r\n \"\"\"adds all of the text in the file identified by filename to the model\r\n \"\"\"\r\n f = open(filename, 'r', encoding='utf8', errors='ignore')\r\n text = f.read()\r\n self.add_string(text)\r\n f.close()\r\n \r\n def save_model(self):\r\n \"\"\"saves the TextModel object self by writing its various feature \r\n dictionaries to files\r\n \"\"\"\r\n jeff = self.name + '_words'\r\n f = open(jeff, 'w')\r\n f.write(str(self.words))\r\n f.close()\r\n \r\n jeph = self.name + '_word_lengths'\r\n f = open(jeph, 'w')\r\n f.write(str(self.word_lengths))\r\n f.close()\r\n \r\n geoff = self.name + '_stems'\r\n f = open(geoff, 'w')\r\n f.write(str(self.stems))\r\n f.close()\r\n \r\n joeff= self.name + '_sentence_lengths'\r\n f = open(joeff, 'w')\r\n f.write(str(self.sentence_lengths))\r\n f.close()\r\n \r\n geoph = self.name + '_punctuation'\r\n f = open(geoph, 'w')\r\n f.write(str(self.punctuation))\r\n f.close()\r\n \r\n def read_model(self):\r\n f = open(self.name + '_words')\r\n d_str=f.read()\r\n f.close()\r\n \r\n d = dict(eval(d_str))\r\n self.words = d\r\n \r\n \r\n f = open(self.name + '_word_lengths')\r\n d_str=f.read()\r\n f.close()\r\n \r\n z = dict(eval(d_str))\r\n self.word_lengths = z\r\n \r\n \r\n f = open(self.name + '_stems')\r\n d_str=f.read()\r\n f.close()\r\n \r\n z = dict(eval(d_str))\r\n self.stems = z\r\n \r\n \r\n f = open(self.name + '_sentence_lengths')\r\n d_str=f.read()\r\n f.close()\r\n \r\n z = dict(eval(d_str))\r\n self.sentence_lengths = z\r\n\r\n \r\n f = open(self.name + '_punctuation')\r\n d_str=f.read()\r\n f.close()\r\n \r\n z = dict(eval(d_str))\r\n self.punctuation = z \r\n \r\n def similarity_scores(self, other):\r\n \"\"\"computes and returns a list of log similarity scores measuring the \r\n similarity of self and other – one score for each type of feature \r\n (words, word lengths, stems, sentence lengths, and your additional \r\n feature)\r\n \"\"\"\r\n word_score = []\r\n word_score += [compare_dictionaries(other.words, self.words)]\r\n word_score += [compare_dictionaries(other.word_lengths, self.word_lengths)]\r\n word_score += [compare_dictionaries(other.stems, self.stems)]\r\n word_score += [compare_dictionaries(other.sentence_lengths, self.sentence_lengths)] \r\n word_score += [compare_dictionaries(other.punctuation, self.punctuation)]\r\n return word_score\r\n \r\n def classify(self, source1, source2):\r\n \"\"\"compares the called TextModel object (self) to two other “source” \r\n TextModel objects (source1 and source2) and determines which of these \r\n other TextModels is the more likely source of the called TextModel\r\n \"\"\"\r\n scores1 = self.similarity_scores(source1)\r\n scores2 = self.similarity_scores(source2)\r\n print('scores for ' + source1.name +':' + str(scores1))\r\n print('scores for ' + source2.name +':' + str(scores2))\r\n weighted_sum1 = 2*scores1[0] + 2*scores1[1] + scores1[2] + 2*scores1[3] + scores1[4] \r\n weighted_sum2 = 2*scores2[0] + 2*scores2[1] + scores2[2] + 2*scores2[3] + scores2[4] \r\n if max(weighted_sum1, weighted_sum2) == weighted_sum1: \r\n print(str(self.name) + ' is more likely to have come from ' + str(source1.name))\r\n elif max(weighted_sum1, weighted_sum2) == weighted_sum2: \r\n print(str(self.name) + ' is more likely to have come from ' + str(source2.name))\r\n\r\ndef test():\r\n \"\"\" test text model with given strings \"\"\"\r\n source1 = TextModel('source1')\r\n source1.add_string('It is interesting that she is interested.')\r\n\r\n source2 = TextModel('source2')\r\n source2.add_string('I am very, very excited about this!')\r\n\r\n mystery = TextModel('mystery')\r\n mystery.add_string('Is he interested? No, but I am.')\r\n mystery.classify(source1, source2)\r\n\r\ndef run_tests():\r\n \"\"\" tests the text model with new text files \"\"\"\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)\r\n ","repo_name":"Ingi-Hong/txt-compare","sub_path":"TextCompare.py","file_name":"TextCompare.py","file_ext":"py","file_size_in_byte":11793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36053528144","text":"#!/usr/bin/env python\nfrom string import Template\npov = file(\"tmpl.pov\").read()\nsphere = file(\"sphere.pov\").read()\npovt = Template(pov)\nspheret = Template(sphere)\nlines = file(\"50.txt\").readlines()\nout = []\nfor line in lines:\n line = line.strip()\n strs = line.split(\" \")\n out.append([strs[0],strs[1],strs[2],strs[3]])\n\npd = dict(spheres=\"\")\nfor i in xrange(0,len(out)):\n fname = \"50povs/{:04}.pov\".format(i)\n nsphere = out[i]\n x,y,z = nsphere[1],nsphere[2],nsphere[3]\n pd[\"lx\"] = str(x)\n pd[\"ly\"] = str(y)\n pd[\"lz\"] = str(z)\n pd[\"spheres\"] += spheret.substitute({\"x\":x,\"y\":y,\"z\":z})\n if i%1 == 0:\n f = file(fname,\"w\")\n f.write(povt.substitute(pd))\n f.close()\n","repo_name":"abramhindle/vadims-fractals","sub_path":"pov/povit.py","file_name":"povit.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10182601093","text":"## input\n# 5\n# R R R U D D\n\nimport sys\ninput = lambda : sys.stdin.readline().rstrip()\n\nn = int(input())\nroot = list(map(str, input().split()))\n\nnow_x, now_y = 1, 1\nfor direct in root:\n if direct == 'U' and now_x > 1:\n now_x -= 1\n elif direct == 'D' and now_x < n:\n now_x += 1\n elif direct == 'R' and now_x < n:\n now_y += 1\n elif direct == 'L' and now_x > 1:\n now_y -= 1\n\nprint(now_x, now_y)","repo_name":"minsu4107/Algorithm","sub_path":"This_is_Code_Test/4/1-1. 상하좌우.py","file_name":"1-1. 상하좌우.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28908282896","text":"import dataclasses\nimport datetime\n\nimport regex as re\nfrom srt import Subtitle\n\nfrom libresvip.core.time_sync import TimeSynchronizer\nfrom libresvip.model.base import Note, Project, SingingTrack\n\nfrom .options import OutputOptions, SplitOption\n\nSYMBOL_PATTERN = re.compile(r\"(?!-)\\p{punct}+\")\n\n\n@dataclasses.dataclass\nclass SrtGenerator:\n options: OutputOptions\n synchronizer: TimeSynchronizer = dataclasses.field(init=False)\n\n def generate_project(self, project: Project) -> list[Subtitle]:\n self.synchronizer = TimeSynchronizer(project.song_tempo_list)\n if self.options.track_index == -1:\n singing_track = next(\n track for track in project.track_list if isinstance(track, SingingTrack)\n )\n else:\n singing_track = project.track_list[self.options.track_index]\n note_list = singing_track.note_list\n buffer = []\n lyric_lines = []\n for i, note in enumerate(note_list):\n buffer.append(note)\n commit_flag = False\n condition_symbol = SYMBOL_PATTERN.search(note.lyric) is not None\n condition_gap = (\n i + 1 < len(note_list)\n and note_list[i + 1].start_pos - note.end_pos >= 60\n )\n if self.options.split_by == SplitOption.SYMBOL:\n commit_flag = condition_symbol\n elif self.options.split_by == SplitOption.GAP:\n commit_flag = condition_gap\n elif self.options.split_by == SplitOption.BOTH:\n commit_flag = condition_symbol or condition_gap\n if i + 1 == len(note_list):\n commit_flag = True\n if commit_flag:\n self.commit_current_lyric_line(lyric_lines, buffer)\n buffer = []\n return lyric_lines\n\n def commit_current_lyric_line(\n self, lyric_lines: list[Subtitle], buffer: list[Note]\n ):\n start_time = self.get_time_from_ticks(buffer[0].start_pos)\n end_time = self.get_time_from_ticks(buffer[-1].end_pos)\n lyrics = \"\".join(SYMBOL_PATTERN.sub(\"\", note.lyric) for note in buffer)\n lyric_lines.append(\n Subtitle(\n index=len(lyric_lines) + 1,\n start=start_time,\n end=end_time,\n content=lyrics,\n )\n )\n\n def get_time_from_ticks(self, ticks: int) -> datetime.timedelta:\n seconds = self.synchronizer.get_actual_secs_from_ticks(ticks)\n seconds_int = int(seconds)\n milliseconds = (seconds % 1) * 1000\n return datetime.timedelta(seconds=seconds_int, milliseconds=milliseconds)\n","repo_name":"SoulMelody/LibreSVIP","sub_path":"libresvip/plugins/srt/srt_generator.py","file_name":"srt_generator.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"}
+{"seq_id":"11961724061","text":"import logging\n\ndef show_pair_patterns(sid, patterns):\n logger = logging.getLogger('MainLogger')\n if len(patterns) > 0:\n logger.info(\"processing: {}\".format(sid))\n for name, dates in patterns.items():\n for date in dates:\n logger.info(\"pattern {} range from {} to {}\".format(name, date[0], date[1]))\n\ndef show_single_patterns(sid, patterns):\n logger = logging.getLogger('MainLogger')\n if len(patterns) > 0:\n logger.info(\"processing: {}\".format(sid))\n for name, dates in patterns.items():\n logger.info(\"pattern {} at {}\".format(name, dates))\n\ndef compute_growth(cal_df, field):\n # return (cal_df[field] - cal_df[field].shift(1)) / cal_df[field].shift(1) * 100\n return cal_df[field].diff() / cal_df[field].abs().shift()\n\ndef value_to_float(x):\n if type(x) == float or type(x) == int:\n return x\n if 'k' in x or 'K' in x:\n if len(x) > 1:\n return float(x.replace('k', '').replace('K', '')) * 1000\n return 1000.0\n if 'm' in x or 'M' in x:\n if len(x) > 1:\n return float(x.replace('m', '').replace('M', '')) * 1_000_000\n return 1_000_000.0\n if 'b' in x or 'B' in x:\n return float(x.replace('b', '').replace('B', '')) * 1_000_000_000\n return x","repo_name":"fmchan/HK-Stock-Analysis","sub_path":"datautils/pattern_utils.py","file_name":"pattern_utils.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12885569648","text":"# -*- coding: utf-8 -*-\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\nimport shutil\nimport tempfile\nimport os\nimport copy\n\nimport numpy as np\n\nfrom corral import run\n\nfrom astropy.io import fits\n\nfrom PyAstronomy import pyasl\n\nfrom six.moves import zip, range\n\nfrom .. import bin\nfrom ..lib.context_managers import cd\nfrom ..models import PawprintStack\n\n\n# =============================================================================\n# CONSTANTS\n# =============================================================================\n\nPAWPRINT_DTYPE = {\n \"names\": [\n 'ra_h', 'ra_m', 'ra_s', 'dec_d', 'dec_m', 'dec_s', 'x', 'y',\n 'mag1', 'mag_err1', 'mag2', 'mag_err2',\n 'mag3', 'mag_err3', 'mag4', 'mag_err4',\n 'mag5', 'mag_err5', 'mag6', 'mag_err6', 'mag7', 'mag_err7',\n 'chip_nro', 'stel_cls', 'elip', 'pos_ang', 'confidence',\n ],\n \"formats\": [\n int, int, float, int, int, float, float, float,\n float, float, float, float,\n float, float, float, float,\n float, float, float, float, float, float,\n int, int, float, float, float\n ]\n}\n\n\n# =============================================================================\n# STEPS\n# =============================================================================\n\nclass ReadPawprintStack(run.Step):\n \"\"\"Convert the pawprint into a numpy array\n ans also set the mjd and band metadata. This makes the pawprint-stack ready\n to be matched again their tiles.\n\n \"\"\"\n\n model = PawprintStack\n conditions = [model.status == \"raw\"]\n groups = [\"preprocess\", \"read\"]\n\n # =========================================================================\n # STEP SETUP & TEARDOWN\n # =========================================================================\n\n def setup(self):\n self.vvv_flx2mag = bin.vvv_flx2mag.execute\n self.temp_directory = tempfile.mkdtemp(suffix=\"_carpyncho_ppstk\")\n\n def teardown(self, *args, **kwargs):\n if not os.path.exists(self.temp_directory):\n shutil.rmtree(self.temp_directory)\n\n # =========================================================================\n # EXTRACT HEADER\n # =========================================================================\n\n def extract_headers(self, hdulist):\n mjd = hdulist[0].header[\"MJD-OBS\"]\n band = hdulist[0].header[\"ESO INS FILT1 NAME\"].strip()\n return band, mjd\n\n # =========================================================================\n # TO ARRAY\n # =========================================================================\n\n def load_fit(self, pawprint):\n to_cd = os.path.dirname(pawprint)\n basename = os.path.basename(pawprint)\n asciiname = os.path.splitext(basename)[0] + \".txt\"\n asciipath = os.path.join(self.temp_directory, asciiname)\n\n # create the ascii table\n with cd(to_cd):\n self.vvv_flx2mag(basename, asciipath)\n\n # read ascii table\n odata = np.genfromtxt(asciipath, PAWPRINT_DTYPE)\n os.remove(asciipath)\n return odata, len(odata)\n\n def add_columns(self, odata, size, pwp_id, mjd, dtypes):\n \"\"\"Add id, hjds, ra_deg and dec_deg columns to existing recarray\n\n \"\"\"\n\n # calculate the ra and the dec columns\n radeg = 15 * (odata['ra_h'] +\n odata['ra_m'] / 60.0 +\n odata['ra_s'] / 3600.0)\n\n decdeg = np.sign(odata['dec_d']) * (np.abs(odata['dec_d']) +\n odata['dec_m'] / 60.0 +\n odata['dec_s'] / 3600.0)\n\n # calculate the hjds\n hjds = np.fromiter(\n (pyasl.helio_jd(mjd, ra, dec) for ra, dec in zip(radeg, decdeg)),\n dtype=float)\n\n # create ids\n ps_name = \"3\" + str(pwp_id).zfill(7)\n\n def get_id(order):\n order = str(order).rjust(8, \"0\")\n return (ps_name + order)\n\n ids = np.fromiter(\n (get_id(idx + 1) for idx in range(size)), dtype=np.int64)\n\n # create a new dtype to store the ra and dec as degrees\n dtype = copy.deepcopy(dtypes)\n dtype[\"names\"].insert(0, \"dec_deg\")\n dtype[\"names\"].insert(0, \"ra_deg\")\n dtype[\"names\"].insert(0, \"hjd\")\n dtype[\"names\"].insert(0, \"id\")\n\n dtype[\"formats\"].insert(0, float)\n dtype[\"formats\"].insert(0, float)\n dtype[\"formats\"].insert(0, float)\n dtype[\"formats\"].insert(0, np.int64)\n\n # create an empty array and copy the values\n data = np.empty(len(odata), dtype=dtype)\n for name in data.dtype.names:\n if name == \"id\":\n data[name] = ids\n elif name == \"ra_deg\":\n data[name] = radeg\n elif name == \"dec_deg\":\n data[name] = decdeg\n elif name == \"hjd\":\n data[name] = hjds\n else:\n data[name] = odata[name]\n return data\n\n def to_array(self, pwp_stk):\n original_array, size = self.load_fit(pwp_stk.raw_file_path)\n arr = self.add_columns(\n odata=original_array, size=size, pwp_id=pwp_stk.id,\n mjd=pwp_stk.mjd, dtypes=PAWPRINT_DTYPE)\n return arr, size\n\n # =========================================================================\n # STEP FUNCTIONS\n # =========================================================================\n\n def process(self, pwp):\n with fits.open(pwp.raw_file_path) as hdulist:\n pwp.band, pwp.mjd = self.extract_headers(hdulist)\n\n arr, size = self.to_array(pwp)\n\n pwp.size = size\n pwp.store_npy_file(arr)\n pwp.status = \"ready-to-match\"\n\n yield pwp\n self.session.commit()\n","repo_name":"carpyncho/carpyncho","sub_path":"carpyncho/steps/read_pawprint_stack.py","file_name":"read_pawprint_stack.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35231773493","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom collaborative_filtering import CollaborativeFiltering\nfrom content_based_filtering import ContentBasedFiltering\nfrom hybrid_filtering import HybridFiltering\n\n# Gather and preprocess user data\ndata = pd.read_csv('user_data.csv').dropna().drop_duplicates()\ndata['age_in_days'] = (pd.to_datetime('today') - pd.to_datetime(data['birth_date'])).dt.days\ndata['is_male'] = (data['gender'] == 'male').astype(int)\nX = data[['age_in_days', 'is_male', 'product_interest']]\ny = data['purchase_history']\n\n# Implement machine learning algorithms\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\ncf_model = CollaborativeFiltering()\ncf_model.fit(X_train, y_train)\ncb_model = ContentBasedFiltering()\ncb_model.fit(X_train, y_train)\nhybrid_model = HybridFiltering(cf_model, cb_model)\nhybrid_model.fit(X_train, y_train)\n\n# Develop a recommendation system\ndef recommend(user_data):\n recommendations = hybrid_model.predict(user_data)\n return recommendations.head(5)\n\n# Test and evaluate the system\nscores = cross_val_score(hybrid_model, X, y, cv=5)\nmean_score = np.mean(scores)\nprint(f\"Mean cross-validation score: {mean_score}\")\n","repo_name":"Shahupdates/pythonTrafficMonitoring","sub_path":"recommendation_system.py","file_name":"recommendation_system.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74754757635","text":"\"\"\" Get a non-comprehensive list of words that blocks can make\"\"\"\nimport copy\nimport json\n\nimport click\n\nMIN_WORD_LENGTH = 3\nALLOWED_SHORT_WORDS = ['a', 'i', 'am', 'an', 'be', 'do', 'go', 'he', 'if',\n 'in', 'is', 'it', 'my', 'no', 'oh', 'on', 'or']\n\n\ndef load_blocks(path):\n \"\"\" Load the JSON file located at 'path' and return the blocks key \"\"\"\n with open(path) as json_file:\n data = json.load(json_file)\n return data['blocks']\n\n\ndef letter_in_block(letter, block):\n \"\"\" Bool: is a letter in a block \"\"\"\n for block_letter in block:\n if block_letter.lower() == letter.lower():\n return True\n return False\n\n\ndef blocks_make_word(blocks, word):\n \"\"\" Return bool if blocks make word, and the remaining blocks \"\"\"\n word = word.strip()\n block_cp = copy.deepcopy(blocks)\n for letter in word:\n letter_found = False\n for block in block_cp:\n if letter_in_block(letter, block):\n letter_found = True\n block_cp.remove(block)\n break\n if not letter_found:\n # print(f'letter \"{letter}\" not in {block_cp} for {word}')\n return False, blocks\n return True, block_cp\n\n\ndef load_dictionary(path, blocks):\n \"\"\" Load txt file listing english words, filter to possible from block \"\"\"\n words = []\n print('LOADING DICTIONARY')\n with open(path) as words_file:\n total = 0\n for word in words_file:\n word = word.strip()\n total += 1\n if len(word) < MIN_WORD_LENGTH and word not in ALLOWED_SHORT_WORDS:\n # The dictionary, and English, has a ton of useless short words\n # like \"AA\" and \"MM\"\n continue\n result, rem_blocks = blocks_make_word(blocks, word)\n if result:\n words.append(word)\n print('{}/{} usable words found'.format(len(words), total))\n return words\n\n\ndef find_words(dictionary, blocks, line='', progress_interval=0):\n \"\"\" Return a dict tree of words made from the given blocks \"\"\"\n x = line.replace(\" \", \"\")\n print(f'{line} ({len(x)} blocks)')\n blocks_cp = copy.deepcopy(blocks)\n words = {}\n for entry in dictionary:\n if progress_interval != 0:\n index = dictionary.index(entry)\n if index % progress_interval == 0:\n dictionary_length = len(dictionary)\n print(f'{index} / {dictionary_length} ({entry})')\n made_word, remaining_blocks = blocks_make_word(blocks_cp, entry)\n if made_word:\n words[entry] = find_words(dictionary, remaining_blocks,\n line=f'{line} {entry}')\n return words\n\n\ndef print_words(words, line=''):\n \"\"\" Accepts a nested word tree dict, prints it \"\"\"\n for key in words:\n new_line = f'{line} {key}'\n if words[key] == {}:\n click.echo(new_line.lstrip())\n else:\n print_words(words[key], new_line)\n","repo_name":"kylep/word-blocks","sub_path":"words/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"42609730706","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.random import rand\nimport pandas as pd\nfrom math import pi\n\ndef awards(think, connect, innovate, design, motivate, control,\n label, imgfile):\n# with plt.xkcd():\n fig, ax = plt.subplots()\n\n\n\n df = pd.DataFrame({\n 'group': [label],\n 'think': [think],\n 'connect': [connect],\n 'innovate': [innovate],\n 'design': [design],\n 'movivate': [motivate],\n 'control': [control]\n })\n\n categories=list(df)[1:]\n N = len(categories)\n\n # We are going to plot the first line of the data frame.\n # But we need to repeat the first value to close the circular graph:\n values=df.loc[0].drop('group').values.flatten().tolist()\n values += values[:1]\n values\n\n # What will be the angle of each axis in the plot? (we divide the plot / number of variable)\n angles = [n / float(N) * 2 * pi for n in range(N)]\n angles += angles[:1]\n\n # Initialise the spider plot\n ax = plt.subplot(111, polar=True)\n\n # Draw one axe per variable + add labels labels yet\n plt.xticks(angles[:-1], categories, color='grey', size=8)\n\n # Draw ylabels\n # ax.set_rlabel_position(0)\n plt.yticks([10,20,30], [\"10\",\"20\",\"30\"], color=\"grey\", size=0)\n plt.ylim(0, 1)\n\n # Plot data\n ax.plot(angles, values, linewidth=1, linestyle='solid')\n\n # Fill area\n ax.fill(angles, values, 'b', alpha=0.1)\n # plt.show()\n\n plt.savefig(imgfile)\n\nawards(.4, .95, .1, .4, .0, .6,\n 'Single-dimensional team', 'img-generated/awards_4628.png')\nawards(.8, .2, .2, .8, .6, .7,\n 'Inspire candidate team', 'img-generated/awards_inspire.png')\nawards(.95, .95, .8, .8, .75, .9,\n 'Strong inspire candidate', 'img-generated/awards_strong_inspire.png')\nawards(.95, .95, 1.2, .95, 1.6, .8,\n 'Supers Inspire Winner', 'img-generated/awards_8496.png')\n","repo_name":"cporter/ftc-awards-rr","sub_path":"inspire_charts.py","file_name":"inspire_charts.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70030542916","text":"\"\"\" \nLicensed under GNU GPL-3.0-or-later\n\nThis file is part of RS Companion.\n\nRS Companion is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nRS Companion is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with RS Companion. If not, see .\n\nAuthor: Phillip Riskin\nDate: 2020\nProject: Companion App\nCompany: Red Scientific\nhttps://redscientific.com/index.html\n\"\"\"\n\nfrom logging import getLogger, StreamHandler\nfrom asyncio import sleep, Event\nfrom RSCompanionAsync.Devices.Camera.Model.cam_stream_reader import StreamReader\nfrom RSCompanionAsync.Devices.Camera.Model.cam_defs import common_resolutions\n\n\nclass SizeGetter:\n def __init__(self, stream: StreamReader, log_handlers: [StreamHandler] = None):\n self._logger = getLogger(__name__)\n if log_handlers:\n for h in log_handlers:\n self._logger.addHandler(h)\n self._stream = stream\n self._done_flag = Event()\n self._cancel_bool = False\n self._current_status = 0\n self.running = True\n\n async def get_sizes(self) -> list:\n \"\"\"\n Get supported resolutions and return as list.\n :return list: The list of supported frame resolutions for the given StreamReader.\n \"\"\"\n sizes = list()\n initial_size = self._stream.get_resolution()\n initial_size = (int(initial_size[0]), int(initial_size[1]))\n sizes.append(initial_size)\n if initial_size not in common_resolutions:\n list_index = 0\n else:\n list_index = common_resolutions.index(initial_size) + 1\n for i in range(list_index, len(common_resolutions)):\n if self._cancel_bool:\n return list()\n ret, res = self._stream.test_resolution(common_resolutions[i])\n if ret and res in common_resolutions:\n sizes.append((int(res[0]), int(res[1])))\n self._current_status = i / (len(common_resolutions) - list_index) * 100\n await sleep(.001)\n self._stream.set_resolution(initial_size)\n sizes.sort()\n return sizes\n\n @property\n def status(self) -> int:\n return self._current_status\n\n def stop(self) -> None:\n \"\"\"\n Stop get_sizes if running.\n :return None:\n \"\"\"\n self._logger.debug(\"running\")\n self._cancel_bool = True\n self._logger.debug(\"done\")\n","repo_name":"USnark772/RSCompanionV3","sub_path":"RSCompanionAsync/Devices/Camera/Model/cam_size_getter.py","file_name":"cam_size_getter.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22297554094","text":"import argparse\nimport os\nimport sys\nimport datetime\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\n\nimport src.models as models\nimport data\nimport src\nimport src.supervision\nimport src.supervision.metrics as metrics\n\nimport src.dataset.box_pose_dataset_factory as dataset_factory\nimport src.dataset.depthmap_val_dataset as depthmap_val_dataset\nimport src.dataset.real_dataloader as real_dataloader\nimport src.dataset as dataset\nimport src.io as io\nimport src.utils as utils\nimport src.dataset.samplers.pose.pose_sampler as pose_sampler\nimport numpy as np\nimport torch\nimport src.dataset.samplers.intrinsics_generator as intrinsics_generator\nimport src.other as other\nfrom src.dataset.rendering.box_renderer import BoxRenderFlags\nimport random\nimport time\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef parse_arguments(args):\n usage_text = (\n \"StructureNet train/test executor.\"\n \"Usage: python main.py [options],\"\n \" with [options]:\"\n )\n parser = argparse.ArgumentParser(description=usage_text)\n # durations\n parser.add_argument('-e','--epochs', type = int, default=20, help = \"Train for a total number of epochs.\")\n parser.add_argument('-b','--batch_size', type = int, default=5, help = \"Train with a number of samples each train iteration.\")\n parser.add_argument('--test_batch_size', default=1, type = int, help = \"Test with a number of samples each test iteration.\") \n parser.add_argument('-c','--checkpoint_iters', type=int, default=1000, help='Save checkpoint (i.e. weights & optimizer) every iterations.')\n parser.add_argument('-t','--test_iters', type=int, default=1000, help='Test model every iterations.')\n parser.add_argument('--train_duration', type=int, default = sys.maxsize, help='Train duration counted in iteration.')\n # paths\n parser.add_argument('--train_path', type = str, help = \"Path to the training folder containing the files\")\n parser.add_argument('--test_path', type = str, help = \"Path to the testing folder containing the files\")\n weight_group = parser.add_mutually_exclusive_group()\n weight_group.add_argument('--weights', type = str, help = \"Path to weights file (for fine-tuning or continuing training)\")\n parser.add_argument('--opt_state', type = str, help = \"Path to stored optimizer state file (for continuing training)\")\n # data paths\n parser.add_argument('--corbs_path', type=str, help = \"Path to CORBS background dataset\")\n parser.add_argument('--vcl_path', type = str, help = \"Path to VCL background dataset\")\n parser.add_argument('--intnet_path', type = str, help = 'Path to interior net background dataset')\n parser.add_argument('--valset_path', type = str, help = 'Path to validation set')\n parser.add_argument('--real_data_path', type=str, help= 'Path to real-data set')\n parser.add_argument('--test_data_path', type=str, help= 'Path to rendered data for test')\n #parser.add_argument(\"--device_list\",nargs=\"*\", type=str, default = [\"M72e\",\"M72h\",\"M72i\",\"M72j\",\"M11\"], help = \"List of device names to be loaded\") \n parser.add_argument(\"--device_list\",nargs=\"*\", type=str, default = [\"M11\"], help = \"List of device names to be loaded\") \n #model\n parser.add_argument('-hl', '--heat_weight', type = float, default=0.0, help='Weight/contribution of heatmap loss ot the total loss')\n parser.add_argument('-sl', '--seg_weight', type = float, default=1.0, help='Weight/contribution of segmentation loss ot the total loss')\n parser.add_argument('-cl', '--cor_weight', type = float, default=0.0, help='Weight/contribution of soft correspondences loss')\n parser.add_argument('--soft_cor', type = str2bool, default=False, help='Flag soft correspondences loss using SVD')\n parser.add_argument('-snl', '--surface_weight', type = float, default=0.0, help='Weight/contribution of surface normals loss ot the total loss')\n parser.add_argument('-nc','--nclasses', type = int, default=25, help = \"Number of classes.\")\n parser.add_argument('--model_name', default=\"default\", type=str, help='Model selection argument.')\n parser.add_argument('--saved_params_path', type=str, help='Path where a trained model has been stored')\n parser.add_argument('--ndf', type=int, default=8, help='Constant values used to define input and output channels at nn layers')\n parser.add_argument('--upsample_type', default=\"nearest\", type=str, help='Model selection argument.')\n # optimization\n parser.add_argument('-o','--optimizer', type=str, default=\"adam\", help='The optimizer that will be used during training.')\n parser.add_argument('-l','--lr', type=float, default=0.0002, help='Optimization Learning Rate.')\n parser.add_argument('-m','--momentum', type=float, default=0.9, help='Optimization Momentum.')\n parser.add_argument('--momentum2', type=float, default=0.999, help='Optimization Second Momentum (optional, only used by some optimizers).')\n parser.add_argument('--epsilon', type=float, default=1e-8, help='Optimization Epsilon (optional, only used by some optimizers).')\n parser.add_argument('--weight_decay', type=float, default=0.0005, help='Optimization Weight Decay.')\n weight_group.add_argument('--weight_init', type=str, default=\"xavier\", help='Weight initialization method.')\n # hardware\n parser.add_argument('-g','--gpu', type=str, default='0', help='The ids of the GPU(s) that will be utilized. (e.g. 0 or 0,1, or 0,2). Use -1 for CPU.')\n parser.add_argument('--num_workers' ,type= int, default = 3, help='Number of workers to use for dataload')\n # other\n parser.add_argument('-n','--name', type=str, default='default_name', help='The name of this train/test. Used when storing information.')\n parser.add_argument(\"--depth_thres\", type=float, default=5.0, help = \"Depth threshold - depth clipping.\")\n parser.add_argument(\"--train_data_type\", type=str, help = \"Setting enabling type of training data (real, synthetic)\", choices=[\"real\", \"synthetic\", \"both\"] , default=\"synthetic\")\n \n #dataset parameters\n parser.add_argument(\"--rmin\", type=float, default=1.5, help = \"Synthetic dataset params, for more info check PoseSamplerParams.\")\n parser.add_argument(\"--rmax\", type=float, default=2.5, help = \"Synthetic dataset params, for more info check PoseSamplerParams.\")\n parser.add_argument(\"--zmin\", type=float, default=-0.35, help = \"Synthetic dataset params, for more info check PoseSamplerParams.\")\n parser.add_argument(\"--zmax\", type=float, default=1, help = \"Synthetic dataset params, for more info check PoseSamplerParams.\")\n parser.add_argument(\"--lookat\", type=float, default=0.5, help = \"Synthetic dataset params, for more info check PoseSamplerParams.\")\n parser.add_argument(\"--upvecvar\", type=float, default=10.0, help = \"Synthetic dataset params, for more info check PoseSamplerParams.\")\n parser.add_argument(\"--samples_per_dataset\", type=int, default=50000, help = \"Number of samples per dataset.\")\n parser.add_argument(\"--dr\", type=float, default=None, help = \"Synthetic dataset params, for more info check PoseSamplerParams.\")\n parser.add_argument(\"--dz\", type=float, default=None, help = \"Synthetic dataset params, for more info check PoseSamplerParams.\")\n parser.add_argument(\"--dphi\", type=float, default=None, help = \"Synthetic dataset params, for more info check PoseSamplerParams.\")\n #visualization\n parser.add_argument('-d','--disp_iters', type=int, default=10, help='Log training progress (i.e. loss etc.) on console every iterations.')\n parser.add_argument('--visdom', type=str, nargs='?', default=None, const=\"195.251.117.98\", help = \"Visdom server IP (port defaults to 8097)\")\n parser.add_argument('--visdom_iters', type=int, default=10, help = \"Iteration interval that results will be reported at the visdom server for visualization.\")\n #validation\n parser.add_argument('--confidence_threshold', type = float, default = 0.75, help ='confidence probability threshold to reject uncofident predictions')\n\n return parser.parse_known_args(args)\n\nif __name__ == \"__main__\":\n args, uknown = parse_arguments(sys.argv)\n #create and init device\n print(\"{} | Torch Version: {}\".format(datetime.datetime.now(), torch.__version__)) \n gpus = [int(id) for id in args.gpu.split(',') if int(id) >= 0]\n device = torch.device(\"cuda:{}\" .format(gpus[0]) if torch.cuda.is_available() and len(gpus) > 0 and gpus[0] >= 0 else \"cpu\")\n print(\"Training {0} for {1} epochs using a batch size of {2} on {3}\".format(args.name, args.epochs, args.batch_size, device)) \n\n visualizer =src.utils.visualization. NullVisualizer() if args.visdom is None\\\n else src.utils.visualization.VisdomVisualizer(args.name, args.visdom, count=4)\n if args.visdom is None:\n args.visdom_iters = 0\n\n #create model parameters\n model_params = {\n 'width': 320,\n 'height': 180,\n 'ndf': args.ndf,\n 'upsample_type': args.upsample_type,\n 'nclasses': args.nclasses\n }\n\n #create & init model || load pretrained model\n if args.saved_params_path is None:\n model = models.get_UNet_model(args.model_name, model_params).to(device)\n other.initialize_weights(model, args.weights if args.weights is not None else args.weight_init)\n model_name = args.model_name\n start_epoch = 0\n iterations = 0\n else:\n checkpoint = torch.load(args.saved_params_path)\n model = models.get_UNet_model(checkpoint['model_name'], model_params)\n print(\"Loading previously saved model from {}\".format(args.saved_params_path))\n model.load_state_dict(checkpoint['state_dict'])\n model = model.to(device)\n model_name = checkpoint['model_name']\n start_epoch = checkpoint['epoch']\n iterations = checkpoint['iterations']\n \n #create and init optimizer\n opt_params = other.OptimizerParameters(learning_rate=args.lr, momentum=args.momentum, \\\n momentum2=args.momentum2, epsilon=args.epsilon)\n optimizer = other.get_optimizer(args.optimizer, model.parameters(), opt_params)\n if args.saved_params_path is not None:\n print(\"Loading previously saved optimizer state from {}\".format(args.saved_params_path))\n optimizer.load_state_dict(checkpoint[\"optim_dict\"])\n \n #create data importer\n sample_count_per_subdataset = args.samples_per_dataset\n \n path_to_corbs_background_dataset = args.corbs_path\n path_to_vcl_background_dataset = args.vcl_path\n path_to_intnet_background_dataset = args.intnet_path\n\n # box flags\n box_flags_map = {\n 17 : BoxRenderFlags.LABEL_TOP_AND_BOTTOM_AS_BACKGROUND, \n 21 : BoxRenderFlags.LABEL_DOWN_AS_BACKGROUND, \n 25 : None\n }\n\n rnd_seed = 1234\n random.seed(rnd_seed) # this will generate fixed seeds of subcomponents that create the datasets (factory uses random.random() to initialize seeds)\n torch.random.manual_seed(rnd_seed)\n\n dataset_type = dataset_factory.BoxPoseDatasetType.VERTICAL_2\n\n\n # create dataset of 16:9 resolution based on RS2\n if args.train_data_type in [\"synthetic\",\"both\"]:\n if args.dr is None and args.dz is None and args.dphi is None:\n params = pose_sampler.PoseSamplerParams(\n num_positions = sample_count_per_subdataset,\n rmin = args.rmin,\n rmax = args.rmax,\n zmin = args.zmin,\n zmax = args.zmax,\n look_at_radius = args.lookat,\n up_vector_variance=args.upvecvar\n )\n dsiterators = dataset_factory.create_rs2_16_9_box_pose_dataset(path_to_corbs_background_dataset,path_to_vcl_background_dataset,\n path_to_intnet_background_dataset,\n pose_params = params,\n box_render_flags=box_flags_map[args.nclasses], \n dataset_type = dataset_type,\n out_resolution_width = model_params[\"width\"], out_resolution_height = model_params[\"height\"])\n elif args.dr is not None and args.dz is not None and args.dphi is not None:\n params = pose_sampler.PoseSamplerParamsGrid(\n rmin = args.rmin,\n rmax = args.rmax,\n dr = args.dr,\n zmin = args.zmin,\n zmax = args.zmax,\n dz = args.dz,\n look_at_radius = args.lookat,\n up_vector_variance=args.upvecvar,\n dphi = args.dphi\n )\n dsiterators = dataset_factory.create_rs2_16_9_grid_box_pose_dataset(path_to_corbs_background_dataset,path_to_vcl_background_dataset,\n path_to_intnet_background_dataset,\n pose_params = params,\n box_render_flags=box_flags_map[args.nclasses], \n dataset_type = dataset_type,\n out_resolution_width = model_params[\"width\"], out_resolution_height = model_params[\"height\"])\n else:\n raise Exception(\"Not valid\")\n \n else:\n dsiterators = list()\n\n ########\n\n # real data\n if args.train_data_type in [\"real\",\"both\"]:\n real_data_params = real_dataloader.DataLoaderParams(\\\n root_path=args.real_data_path, device_list=args.device_list,\\\n device_repository_path=args.real_data_path, depth_threshold=args.depth_thres, decimation_scale = 4) \n real_data_iterator = real_dataloader.DataLoad(real_data_params)\n dsiterators.append(real_data_iterator)\n\n dsiterator = torch.utils.data.ConcatDataset(dsiterators)\n #end of real data\n \n num_workers = args.num_workers\n dataset = torch.utils.data.DataLoader(dsiterator,\\\n batch_size = args.batch_size, shuffle=True,\\\n num_workers = num_workers, pin_memory=False)\n\n ##### TODO VALIDATION DATALOADER HERE #####\n if args.valset_path is not None:\n vdsiterator_params = depthmap_val_dataset.DepthmapDatasetParams(args.valset_path, 0.001, 4)\n vdsiterator = depthmap_val_dataset.DepthmapDataset(vdsiterator_params)\n\n vdataset = torch.utils.data.DataLoader(vdsiterator,\\\n batch_size = args.batch_size, shuffle=True,\\\n num_workers = 0, pin_memory=False)\n\n if args.test_data_path is not None:\n test_iterator_params = depthmap_val_dataset.DepthmapDatasetParams(\n args.test_data_path, 0.001,\n max_len = None,\n number_of_classes = args.nclasses)\n\n test_iterator = depthmap_val_dataset.DepthmapDataset(test_iterator_params) \n test_dataset = torch.utils.data.DataLoader(test_iterator,\\\n batch_size = args.batch_size, shuffle=False,\\\n num_workers = args.num_workers, pin_memory=True)\n\n\n ###########################################\n\n if args.nclasses not in (17,21,25):\n raise Exception(\"Wrong class number argument ({})\".format(args.nclasses))\n else:\n class_w = torch.ones((args.nclasses)).float()\n\n\n seg_criterion_1 = nn.NLLLoss2d(weight=class_w, reduction='mean').to(device)\n L2_criterion = nn.MSELoss().to(device)\n L2_norm_criterion = nn.MSELoss().to(device)\n\n #logging init\n batch_seg_loss = other.AverageMeter()\n batch_heat_loss = other.AverageMeter()\n batch_surface_loss = other.AverageMeter()\n batch_total_loss = other.AverageMeter()\n batch_soft_cor_loss = other.AverageMeter()\n batch_soft_cor_loss_unlabeled = other.AverageMeter()\n\n frame_index = 0\n for epoch in range(start_epoch, args.epochs):\n \n #init\n seg_loss = 0.0\n heat_loss = 0.0\n surface_loss = 0.0\n total_loss = 0.0\n model.train()\n\n for batch_id, batch in enumerate(dataset):\n \n if iterations > args.train_duration:\n epoch = args.epochs + 1\n break\n start = time.perf_counter()\n optimizer.zero_grad()\n \n batch_d = batch['depth']\n\n #forward pass\n if model_name == 'with_normals':\n activs, heat_pred, out, normals = model(batch_d.to(device))\n elif model_name == 'heatmap':\n activs, heat_pred, out = model(batch_d.to(device))\n else:\n activs, out = model(batch_d.to(device))\n\n\n real_batch, synth_batch, real_ids, synth_ids = utils.train_utils.split_batch(batch)\n\n if real_batch:\n out_real = out[real_ids]\n\n #prepare target\n if synth_batch:\n labels = synth_batch['labels'].float()\n target = labels\n out_synth = out[synth_ids]\n \n #prepare heatmap target\n seg_loss = seg_criterion_1(out_synth, target.squeeze(1).long().to(device))\n \n\n\n total_loss = seg_loss\n \n\n\n if synth_batch is not None:\n soft_cor_loss = src.supervision.losses.soft_correspondences_loss(\n torch.exp(out_synth),\n synth_batch,\n confidence = 0.0,\n criterion = L2_criterion,\n device = device,\n SVD=args.soft_cor\n )\n if soft_cor_loss is not None:\n total_loss += args.cor_weight * soft_cor_loss\n\n\n #backprop + grad update\n total_loss.backward()\n optimizer.step()\n\n\n if synth_batch:\n batch_seg_loss.update(seg_loss.cpu().detach())\n if args.cor_weight != 0.0 and synth_batch is not None and soft_cor_loss is not None:\n batch_soft_cor_loss.update(soft_cor_loss.cpu().detach())\n if model_name == 'heatmap':\n batch_heat_loss.update(heat_loss.cpu().detach())\n if model_name == 'with_normals':\n batch_heat_loss.update(heat_loss.cpu().detach())\n batch_surface_loss.update(surface_loss.cpu().detach())\n batch_total_loss.update(total_loss.cpu().detach())\n\n if real_batch is not None:\n batch_soft_cor_loss_unlabeled.update(soft_cor_loss_unlabeled.cpu().detach())\n\n iterations += args.batch_size\n print(\"Epoch: {}, iteration: {}, learning rate: {}, batch time: {}, Total Loss: {}, Seg Loss: {}, Heat Loss: {}, Surface Loss: {}\\n\"\\\n .format(epoch, iterations, optimizer.param_groups[0]['lr'], time.perf_counter() - start, batch_total_loss.avg.item(), batch_seg_loss.avg.item(), batch_heat_loss.avg.item(), batch_surface_loss.avg.item()))\n\n #visualization - Visdom\n if ((iterations) % args.disp_iters) == 0:\n visualizer.append_loss(epoch + 1, iterations, batch_total_loss.avg, \"total_loss\")\n if real_batch is not None:\n visualizer.append_loss(epoch + 1, iterations, batch_soft_cor_loss_unlabeled.avg, \"soft_cor_loss_unlabeled\")\n visualizer.append_loss(epoch + 1, iterations, batch_seg_loss.avg, \"segmentation_loss\")\n if args.cor_weight != 0.0:\n visualizer.append_loss(epoch + 1, iterations, batch_soft_cor_loss.avg, \"soft_correspondences_loss\")\n if model_name == 'heatmap':\n visualizer.append_loss(epoch + 1, iterations, batch_heat_loss.avg, \"heatmap_loss\")\n if model_name == 'with_normals':\n visualizer.append_loss(epoch + 1, iterations, batch_heat_loss.avg, \"heatmap_loss\")\n visualizer.append_loss(epoch + 1, iterations, batch_surface_loss.avg, \"surface_loss\")\n if (iterations % args.visdom_iters) == 0:\n for bidx in range(np.min([batch_d.size()[0], 5])):\n visualizer.show_seg_map(out[bidx].argmax(0), 'segmentation prediction' + str(bidx))\n visualizer.show_seg_map(batch_d[bidx], 'input depth' + str(bidx))\n if model_name == 'heatmap':\n visualizer.show_seg_map(heat_gt[bidx], 'heatmap gt' + str(bidx))\n visualizer.show_seg_map(heat_pred[bidx], 'heatmap prediction' + str(bidx))\n if model_name == 'with_normals':\n visualizer.show_seg_map(heat_pred[bidx], 'heatmap prediction' + str(bidx))\n visualizer.show_seg_map(heat_gt[bidx], 'heatmap gt' + str(bidx))\n visualizer.show_normals(normals[bidx], 'normals prediction' + str(bidx))\n visualizer.show_normals(normals_target[bidx].float(), 'normals gt' + str(bidx))\n\n #validation\n if args.valset_path is not None:\n with torch.no_grad():\n model.eval()\n confidence_threshold = args.confidence_threshold\n frame_index = 0\n total_iou = 0\n for vbatch_id, vbatch in enumerate(vdataset):\n #resize input\n vbatch_d = vbatch['depth']\n \n #prepare target\n vlabels = vbatch['labels'].float()\n\n activs, out = model(vbatch_d.to(device))\n\n batch_size = vbatch_d.shape[0]\n\n confidence_t, labels_pred_t = out.max(dim = 1, keepdim = True) \n confidence_t = torch.exp(confidence_t) # convert log probability to probability\n labels_pred_t [confidence_t < confidence_threshold] = 0 # uncertain classs\n\n frame_index += batch_size\n sample_iou, mask = metrics.jaccard(labels_pred_t.cpu().float(), vlabels, args.nclasses)\n total_iou = torch.sum(sample_iou*mask.float(), dim = -1) / mask.sum(dim = -1).float()\n\n print(\"Epoch: {}, Average IoU: {}\\n\"\\\n .format(epoch, total_iou.mean()))\n\n visualizer.append_loss(epoch + 1, iterations, total_iou.mean(), \"average IoU\", mode='val')\n\n if args.test_data_path is not None:\n total_iou = 0\n frame_index = 0\n bar = tqdm(total = test_iterator.__len__())\n with torch.no_grad():\n model.eval()\n confidence_threshold = args.confidence_threshold\n for batch_id, batch in enumerate(test_dataset):\n vbatch_d = batch['depth']\n \n #prepare target\n vlabels = batch['labels'].float()\n\n activs, out = model(vbatch_d.to(device))\n\n batch_size = vbatch_d.shape[0]\n\n confidence_t, labels_pred_t = out[index].max(dim = 1, keepdim = True) \n confidence_t = torch.exp(confidence_t) # convert log probability to probability\n labels_pred_t [confidence_t < confidence_threshold] = 0 # uncertain classs\n \n frame_index += batch_size\n sample_iou, mask = metrics.jaccard(labels_pred_t.cpu().float(), vlabels, args.nclasses)\n total_iou = torch.sum(sample_iou*mask.float(), dim = -1) / mask.sum(dim = -1).float()\n\n print(\"Epoch: {}, Test Average IoU: {}\\n\"\\\n .format(epoch, total_iou.mean()))\n\n visualizer.append_loss(epoch + 1, iterations, total_iou.mean(), \"average test IoU\", mode='val')\n\n\n #save model params\n src.utils.save_checkpoint({\n 'nclasses' : args.nclasses,\n 'ndf' : args.ndf,\n 'epoch': epoch, \n 'iterations': iterations,\n 'batch_size': args.batch_size,\n 'model_name': model_name,\n 'state_dict': model.state_dict(),\n 'optim_dict': optimizer.state_dict(), \n }, epoch, name = args.name)\n\n torch.cuda.empty_cache()\n if epoch > args.epochs:\n break","repo_name":"VCL3D/StructureNet","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24402,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"61"}
+{"seq_id":"12210958002","text":"import pytest\nfrom pages.main_page import MainPage\n\n\nclass TestCaseSearchInYandex:\n def test_should_be_search_field(self, browser):\n \"\"\"Проверка наличия поля поиска\"\"\"\n page = MainPage(browser)\n page.open()\n page.should_be_search_field()\n\n def test_should_be_suggest_block(self, browser):\n \"\"\"Проверка наличия блока подсказок\"\"\"\n page = MainPage(browser)\n page.open()\n page.enter_text('Тензор')\n page.should_be_suggest_block()\n\n def test_should_be_search_result(self, browser):\n \"\"\"Проверка наличия страницы с результатом поиска\"\"\"\n page = MainPage(browser)\n page.open()\n page.enter_text('Тензор')\n page.should_be_search_result()\n\n @pytest.mark.xfail\n def test_should_be_result_links(self, browser):\n \"\"\"Проверка наличия ссылки в результате поиска\"\"\"\n page = MainPage(browser)\n page.open()\n page.enter_text('Тензор')\n page.should_be_link('tensor.ru')\n\n\n\n","repo_name":"bashkoigor/yandex_auto_tests","sub_path":"tests/test_main_page.py","file_name":"test_main_page.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23589319121","text":"\r\ndef run_test():\r\n N, P = map(int, input().split())\r\n G = [int(x) for x in input().split()]\r\n assert len(G) == N\r\n d = {0:0, 1:0, 2:0, 3:0}\r\n for g in G:\r\n d[g%P] += 1\r\n\r\n if P == 2:\r\n return d[0] + (d[1] + 1) // 2\r\n elif P == 3:\r\n ans = d[0]\r\n sm, lg = min(d[1], d[2]), max(d[1], d[2])\r\n ans += sm\r\n ans += (lg - sm + 2) // 3\r\n return ans\r\n elif P == 4:\r\n ans = d[0]\r\n sm, lg = min(d[1], d[3]), max(d[1], d[3])\r\n md = d[2]\r\n ans += md // 2\r\n md = md % 2\r\n ans += sm\r\n sm, lg = 0, lg - sm\r\n if lg % 4 == 0:\r\n ans += lg // 4 + md\r\n elif lg % 4 == 1:\r\n ans += lg // 4 + 1\r\n elif lg % 4 == 2:\r\n ans += lg // 4 + 1\r\n elif lg % 4 == 3:\r\n ans += lg // 4 + 1 + md\r\n return ans\r\n\r\nfor i in range(1, int(input()) + 1):\r\n print(\"Case #{}: {}\".format(i, run_test()))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_212/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27326377346","text":"\"\"\"Напишите программу, которая будет преобразовывать десятичное число в\nдвоичное.\"\"\"\n\n\ndef function():\n while True:\n try:\n x = int(input(\"Введите число : \"))\n except ValueError:\n print(\"Error! Это не число, попробуйте снова.\")\n else:\n return x\n\n\nus_namber = function()\nbinary_number = []\nwhile us_namber > 1:\n temp = us_namber % 2\n binary_number.append(temp)\n us_namber = us_namber // 2\n if us_namber == 1:\n binary_number.append(1)\nbinary_number.reverse()\nprint(binary_number)\n","repo_name":"RuslanSemenchenko1974/Python_Seminar_3_HomeTask","sub_path":"3/Task_add_4.py","file_name":"Task_add_4.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35851336983","text":"#概率转化期望/确保次数.py\r\nimport math as ma\r\n\r\ndef is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\ndef judgment(xx):\r\n if is_number(xx) == True:\r\n return xx\r\n else:\r\n while is_number(xx) == False:\r\n print(\"输入错误!\")\r\n cc = input(\"请输入数字 \")\r\n if is_number(cc) == True:\r\n return cc\r\n\r\ndef is_in(ss):\r\n if ss in ['类型一','类型二','一','二','1','2']:\r\n return True\r\n else:\r\n return False\r\n\r\ndef isin(sss):\r\n if is_in(sss) == True:\r\n return sss\r\n else:\r\n while is_in(sss) == False:\r\n print(\"输入错误\")\r\n ccc = input(\"请输入 类型一/类型二/一/二/1/2 \")\r\n if is_in(ccc) == True:\r\n return ccc\r\n\r\n\r\ndef main():\r\n K = input(\"开始? y/n \")\r\n while K == 'y':\r\n print(\"本工具可计算期望次数和确保次数,请选择您需要的模式\")\r\n ps = input(\"请输入模式代号: 1表示计算期望次数 2表示计算确保次数\")\r\n if ps == '1':\r\n print(\"类型一:在一个池子里做放回抽样(即打boss刷战甲)\")\r\n print(\"类型二:单独敌人放回抽样(即刷某些敌人掉落)\")\r\n b = isin(input(\"请选择类型: \"))\r\n if b in ['类型一','一','1']:\r\n c = int(judgment(input(\"有几个东西?\")))\r\n app = []\r\n for i in range(0,c):\r\n m = c/(i+1)\r\n app.append(m)\r\n Sum = sum(app)\r\n e1 = ma.floor(Sum)\r\n e2 = ma.ceil(Sum)\r\n print(\"转化后的期望次数为: {}~{}次\".format(e1,e2))\r\n K = input(\"继续? y/n \")\r\n else:\r\n p = float(judgment(input(\"请输入掉落概率 \")))\r\n n = 1/p\r\n n1 = ma.floor(n)\r\n n2 = ma.ceil(n)\r\n print(\"转化后的概率为: {}~{}次\".format(n1,n2))\r\n K = input(\"继续? y/n \")\r\n else:\r\n p1 = float(judgment(input(\"请输入获取概率: \")))\r\n many = float(judgment(input(\"请输入确保概率(0.99 0.999 0.9999等): \")))\r\n s1 = ma.log(1-many,10)\r\n s2 = ma.log(1-p1,10)\r\n equal1 = ma.floor(s1/s2)\r\n equal2 = ma.ceil(s1/s2)\r\n print(\"转化后的次数为: {} ~ {} 次\".format(equal1,equal2))\r\n K = input(\"继续? y/n \")\r\n\r\nmain()\r\n","repo_name":"six12six12/all","sub_path":"概率转化期望-确保次数.py","file_name":"概率转化期望-确保次数.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17172668381","text":"import itertools\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib.dates import AutoDateLocator, AutoDateFormatter, MonthLocator, num2date\nimport numpy as np\nfrom matplotlib.ticker import FuncFormatter\n\n\n\n# 显示中文\nmpl.rcParams['font.sans-serif'] = [u'SimHei']\nmpl.rcParams['axes.unicode_minus'] = False\n\n\ndef save_confusion_matrix(matrix, label, path):\n \"\"\"\n 保存混淆矩阵到指定路径\n :return:\n \"\"\"\n # 设置大小\n plt.figure(figsize=(5, 5))\n plt.imshow(matrix, interpolation=\"nearest\", cmap=plt.get_cmap('Blues'))\n plt.title(\"confusion_matrix\")\n\n # 添加颜色渐变条\n plt.colorbar()\n\n plt.xticks(range(len(matrix)), label, rotation=0)\n plt.yticks(range(len(matrix)), label, rotation=0)\n\n # 将矩阵中的值添加进入图片\n for i, j in itertools.product(range(len(matrix)), range(len(matrix))):\n plt.text(j, i, matrix[i][j], horizontalalignment=\"center\",\n color=\"black\")\n\n # 自动调整\n plt.tight_layout()\n plt.xlabel(\"True Label\")\n plt.ylabel(\"Predicted Label\")\n\n # 调节图片边距\n plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)\n plt.savefig(path)\n print(\"=======> 保存混淆矩阵图片到 \", path)\n plt.close()\n\n\ndef save_text_to_pic(text, path):\n \"\"\" 保存文本成图片 \"\"\"\n\n # 设置大小\n plt.figure(figsize=(5, 5))\n\n plt.axis([0, 2, 0, 2])\n plt.text(1, 1, text, ha=\"center\", va='center', fontsize=12, wrap=True)\n\n # 去掉坐标轴\n plt.axis('off')\n plt.xticks([])\n plt.yticks([])\n\n plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)\n plt.savefig(path)\n print(\"=======> 保存分类报告图片到 \", path)\n plt.close()\n\n\ndef save_roc_to_pic(fpr, tpr, roc_auc, path):\n \"\"\" 绘制 ROC曲线并保存成图片到指定路径 \"\"\"\n\n plt.figure(figsize=(5, 5))\n plt.plot(fpr, tpr, color='b',\n lw=2, label='ROC curve (area = %0.2f)' % roc_auc) # 假正率为横坐标,真正率为纵坐标做曲线\n\n # 画出对角虚线\n plt.plot([0, 1], [0, 1], color='r', lw=2, linestyle='--')\n\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n\n plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)\n plt.savefig(path)\n print(\"=======> 保存ROC曲线图片到 \", path)\n plt.close()\n\n\ndef plot(m, fcst, splitFlag, ax=None, uncertainty=True, plot_cap=True, xlabel='时间', ylabel='流量',\n figsize=(10, 6)):\n \"\"\" prophet 绘图 \"\"\"\n if ax is None:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n else:\n fig = ax.get_figure()\n fcst_t = fcst['ds'].dt.to_pydatetime()\n ax.plot(m.history['ds'].dt.to_pydatetime(), m.history['y'], 'k.')\n # ax.plot(fcst_t, fcst['yhat'], ls='-', c='#0072B2')\n ax.plot(fcst_t[0: -splitFlag], fcst['yhat'][0: -splitFlag], ls='-', c='#0072B2')\n ax.plot(fcst_t[-splitFlag:], fcst['yhat'][-splitFlag:], ls='-', c='r')\n if uncertainty:\n ax.fill_between(fcst_t, fcst['yhat_lower'], fcst['yhat_upper'],\n color='#0072B2', alpha=0.2)\n\n # Specify formatting to workaround matplotlib issue #12925\n locator = AutoDateLocator(interval_multiples=False)\n formatter = AutoDateFormatter(locator)\n ax.xaxis.set_major_locator(locator)\n ax.xaxis.set_major_formatter(formatter)\n ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n fig.tight_layout()\n return fig\n\n\ndef plot_components(m, fcst, uncertainty=True, plot_cap=True, weekly_start=0, yearly_start=0, figsize=None):\n \"\"\" prophet 绘制成分组件 \"\"\"\n components = ['trend']\n if m.train_holiday_names is not None and 'holidays' in fcst:\n components.append('holidays')\n if 'weekly' in m.seasonalities and 'weekly' in fcst:\n components.append('weekly')\n if 'yearly' in m.seasonalities and 'yearly' in fcst:\n components.append('yearly')\n\n components.extend([\n name for name in sorted(m.seasonalities)\n if name in fcst and name not in ['weekly', 'yearly']\n ])\n\n regressors = {'additive': False, 'multiplicative': False}\n for name, props in m.extra_regressors.items():\n regressors[props['mode']] = True\n for mode in ['additive', 'multiplicative']:\n if regressors[mode] and 'extra_regressors_{}'.format(mode) in fcst:\n components.append('extra_regressors_{}'.format(mode))\n npanel = len(components)\n\n figsize = figsize if figsize else (9, 3 * npanel)\n fig, axes = plt.subplots(npanel, 1, facecolor='w', figsize=figsize)\n\n if npanel == 1:\n axes = [axes]\n\n multiplicative_axes = []\n\n for ax, plot_name in zip(axes, components):\n if plot_name == 'trend':\n plot_forecast_component(\n m=m, fcst=fcst, name='trend', ax=ax, uncertainty=uncertainty,\n plot_cap=plot_cap, ylabel=\"趋势分量\"\n )\n elif plot_name in m.seasonalities:\n if plot_name == 'weekly' or m.seasonalities[plot_name]['period'] == 7:\n plot_weekly(\n m=m, name=plot_name, ax=ax, uncertainty=uncertainty, weekly_start=weekly_start\n )\n elif plot_name == 'yearly' or m.seasonalities[plot_name]['period'] == 365.25:\n plot_yearly(\n m=m, name=plot_name, ax=ax, uncertainty=uncertainty, yearly_start=yearly_start\n )\n else:\n plot_seasonality(\n m=m, name=plot_name, ax=ax, uncertainty=uncertainty,\n )\n elif plot_name in [\n 'holidays',\n 'extra_regressors_additive',\n 'extra_regressors_multiplicative',\n ]:\n name = plot_name\n if plot_name == 'holidays':\n name = \"节假日分量\"\n plot_forecast_component(\n m=m, fcst=fcst, name=plot_name, ax=ax, uncertainty=uncertainty,\n plot_cap=False, ylabel=name\n )\n if plot_name in m.component_modes['multiplicative']:\n multiplicative_axes.append(ax)\n\n fig.tight_layout()\n # Reset multiplicative axes labels after tight_layout adjustment\n for ax in multiplicative_axes:\n ax = set_y_as_percent(ax)\n return fig\n\n\ndef plot_forecast_component(m, fcst, name, ax=None, uncertainty=True, plot_cap=False,\n figsize=(10, 6), ylabel=\"\"):\n artists = []\n if not ax:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n fcst_t = fcst['ds'].dt.to_pydatetime()\n artists += ax.plot(fcst_t, fcst[name], ls='-', c='#0072B2')\n if 'cap' in fcst and plot_cap:\n artists += ax.plot(fcst_t, fcst['cap'], ls='--', c='k')\n if m.logistic_floor and 'floor' in fcst and plot_cap:\n ax.plot(fcst_t, fcst['floor'], ls='--', c='k')\n if uncertainty:\n artists += [ax.fill_between(\n fcst_t, fcst[name + '_lower'], fcst[name + '_upper'],\n color='#0072B2', alpha=0.2)]\n # Specify formatting to workaround matplotlib issue #12925\n locator = AutoDateLocator(interval_multiples=False)\n formatter = AutoDateFormatter(locator)\n ax.xaxis.set_major_locator(locator)\n ax.xaxis.set_major_formatter(formatter)\n ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)\n ax.set_xlabel('时间')\n\n ax.set_ylabel(name if ylabel == \"\" else ylabel)\n if name in m.component_modes['multiplicative']:\n ax = set_y_as_percent(ax)\n return artists\n\n\ndef plot_weekly(m, ax=None, uncertainty=True, weekly_start=0, figsize=(10, 6), name='weekly'):\n\n artists = []\n if not ax:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n # Compute weekly seasonality for a Sun-Sat sequence of dates.\n days = (pd.date_range(start='2017-01-01', periods=7) +\n pd.Timedelta(days=weekly_start))\n df_w = seasonality_plot_df(m, days)\n seas = m.predict_seasonal_components(df_w)\n days = days.weekday_name\n artists += ax.plot(range(len(days)), seas[name], ls='-',\n c='#0072B2')\n if uncertainty:\n artists += [ax.fill_between(range(len(days)),\n seas[name + '_lower'], seas[name + '_upper'],\n color='#0072B2', alpha=0.2)]\n ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)\n ax.set_xticks(range(len(days)))\n ax.set_xticklabels(days)\n ax.set_xlabel('时间')\n ax.set_ylabel(\"周分量\")\n if m.seasonalities[name]['mode'] == 'multiplicative':\n ax = set_y_as_percent(ax)\n return artists\n\n\ndef plot_yearly(m, ax=None, uncertainty=True, yearly_start=0, figsize=(10, 6), name='yearly'):\n artists = []\n if not ax:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n # Compute yearly seasonality for a Jan 1 - Dec 31 sequence of dates.\n days = (pd.date_range(start='2017-01-01', periods=365) +\n pd.Timedelta(days=yearly_start))\n df_y = seasonality_plot_df(m, days)\n seas = m.predict_seasonal_components(df_y)\n artists += ax.plot(\n df_y['ds'].dt.to_pydatetime(), seas[name], ls='-', c='#0072B2')\n if uncertainty:\n artists += [ax.fill_between(\n df_y['ds'].dt.to_pydatetime(), seas[name + '_lower'],\n seas[name + '_upper'], color='#0072B2', alpha=0.2)]\n ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)\n months = MonthLocator(range(1, 13), bymonthday=1, interval=2)\n ax.xaxis.set_major_formatter(FuncFormatter(\n lambda x, pos=None: '{dt:%B} {dt.day}'.format(dt=num2date(x))))\n ax.xaxis.set_major_locator(months)\n ax.set_xlabel('时间')\n ax.set_ylabel(\"年分量\")\n if m.seasonalities[name]['mode'] == 'multiplicative':\n ax = set_y_as_percent(ax)\n return artists\n\n\ndef plot_seasonality(m, name, ax=None, uncertainty=True, figsize=(10, 6)):\n\n artists = []\n if not ax:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n # Compute seasonality from Jan 1 through a single period.\n start = pd.to_datetime('2017-01-01 0000')\n period = m.seasonalities[name]['period']\n end = start + pd.Timedelta(days=period)\n plot_points = 200\n days = pd.to_datetime(np.linspace(start.value, end.value, plot_points))\n df_y = seasonality_plot_df(m, days)\n seas = m.predict_seasonal_components(df_y)\n artists += ax.plot(df_y['ds'].dt.to_pydatetime(), seas[name], ls='-',\n c='#0072B2')\n if uncertainty:\n artists += [ax.fill_between(\n df_y['ds'].dt.to_pydatetime(), seas[name + '_lower'],\n seas[name + '_upper'], color='#0072B2', alpha=0.2)]\n ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)\n xticks = pd.to_datetime(np.linspace(start.value, end.value, 7)\n ).to_pydatetime()\n ax.set_xticks(xticks)\n if period <= 2:\n fmt_str = '{dt:%T}'\n elif period < 14:\n fmt_str = '{dt:%m}/{dt:%d} {dt:%R}'\n else:\n fmt_str = '{dt:%m}/{dt:%d}'\n ax.xaxis.set_major_formatter(FuncFormatter(\n lambda x, pos=None: fmt_str.format(dt=num2date(x))))\n ax.set_xlabel('时间')\n ax.set_ylabel(\"季节分量\")\n if m.seasonalities[name]['mode'] == 'multiplicative':\n ax = set_y_as_percent(ax)\n return artists\n\n\ndef seasonality_plot_df(m, ds):\n\n df_dict = {'ds': ds, 'cap': 1., 'floor': 0.}\n for name in m.extra_regressors:\n df_dict[name] = 0.\n # Activate all conditional seasonality columns\n for props in m.seasonalities.values():\n if props['condition_name'] is not None:\n df_dict[props['condition_name']] = True\n df = pd.DataFrame(df_dict)\n df = m.setup_dataframe(df)\n return df\n\n\n\ndef set_y_as_percent(ax):\n yticks = 100 * ax.get_yticks()\n yticklabels = ['{0:.4g}%'.format(y) for y in yticks]\n ax.set_yticklabels(yticklabels)\n return ax\n\n\n","repo_name":"haixiaoxuan/code-python","sub_path":"tensorflow-on-spark-demo/algorithm_utils/drawing_pic.py","file_name":"drawing_pic.py","file_ext":"py","file_size_in_byte":12237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18704235956","text":"import random\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision import models\nfrom torchvision.utils import make_grid\nimport torchvision.transforms as transforms\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom torch.nn.functional import conv2d\nfrom models.resnetCA import ResDaulNet18_TP5\nfrom models.resnet import ResNet18\nfrom utils import plot_filter_ch\n\nrandom_seed = 12\ng = torch.Generator()\ng.manual_seed(random_seed)\ntorch.manual_seed(random_seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nrandom.seed(random_seed)\ntorch.cuda.manual_seed(random_seed)\ntorch.cuda.manual_seed_all(random_seed) # multi-GPU\nnp.random.seed(random_seed)\n\npath = \"outputs/resdual5_cifar-10_paper/ckpt.pth\"\nSAVEDAT = torch.load(path)\n\nmodel = ResDaulNet18_TP5()\nmodel = nn.DataParallel(model)\nmodel.load_state_dict(SAVEDAT[\"net\"])\nmodel.eval()\n\nresnet = ResNet18()\npath = \"outputs/resnet18/ckpt.pth\"\n\nnormalize = transforms.Normalize(\n mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]\n)\n\ntransform_train = transforms.Compose(\n [\n transforms.ToTensor(),\n # normalize,\n ]\n)\n\ntransform_test = transforms.Compose(\n [\n transforms.ToTensor(),\n # normalize,\n ]\n)\n\ntrainset = torchvision.datasets.CIFAR10(\n root=\"C:/cifar-10\", train=False, download=True, transform=transform_train\n)\ntrainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, shuffle=True, num_workers=0\n)\n\ntestset = torchvision.datasets.CIFAR10(\n root=\"C:/cifar-10\", train=False, download=True, transform=transform_test\n)\ntestloader = torch.utils.data.DataLoader(\n testset, batch_size=100, shuffle=False, num_workers=0\n)\n\nfor image, label in trainloader:\n break\n\ninput = (image[0, :, :, :][None, :, :, :]).float().cuda()\n\nout = model.module.conv1(input)\nout = model.module.bn1(out)\n\nbranch_1 = model.module.layer1[0].conv1_d1\nbranch_2 = model.module.layer1[1].conv1_d2\n\nout_1 = branch_1(out)\nout_2 = branch_2(out)\n\nplot_filter_ch(out_1, title=\"out_dw1\", fname=\"dw1\", save_opt=True)\nplot_filter_ch(out_2, title=\"out_dw2\", fname=\"dw2\", save_opt=True)\n\nplot_filter_ch(out_1 + out_2, title=\"out\", fname=\"summ\", save_opt=True)\n\nimg_ori = np.squeeze(input.detach().cpu().numpy()).transpose((1, 2, 0))\nfig_ori = plt.figure()\nplt.imshow(img_ori)\nplt.axis(\"off\")\nplt.show()\nfig_ori.savefig(\"orig.png\", dpi=150)\n","repo_name":"angseung/Res-DualNet_V2","sub_path":"vis_output.py","file_name":"vis_output.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27144944924","text":"from typing import Optional, Dict\nfrom microservice import Product\nimport microservice\nimport uvicorn\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi import APIRouter, Body, FastAPI\nfrom fastapi.responses import Response\n\napp = FastAPI()\nrouter = APIRouter()\n\n\n@app.get(\"/retrieve_productname_by_name\")\ndef retrieve_productname_by_name(name: Optional[str] = None):\n products_filtered = microservice.retrieve_productname_by_name(name)\n return Response(content=products_filtered, media_type=\"application/json\")\n\n\n@app.get(\"/retrieve_product_by_name\")\ndef retrieve_product_by_name(name: Optional[str] = None):\n products_filtered = microservice.retrieve_product_by_name(name)\n return Response(content=products_filtered, media_type=\"application/json\")\n\n\n@app.get(\"/retrieve_product_by_id\")\ndef retrieve_product_by_id(_id: str):\n products_filtered = microservice.retrieve_product_by_id(_id)\n return Response(content=products_filtered, media_type=\"application/json\")\n\n\n@app.post(\"/retrieve_productname_by_options\")\ndef retrieve_productname_by_options(options: Optional[Dict] = Body(None)):\n products_filtered = microservice.retrieve_productname_by_options(options)\n return Response(content=products_filtered, media_type=\"application/json\")\n\n\n@app.post(\"/create\")\ndef create_product(product: Product = Body(..., example={\n \"name\": \"Iphone11\",\n \"description\": \"Phone from Apple\",\n \"options\": {\"os\": \"Ios\",\n \"year\": \"2018\"},\n}, embed=True)):\n product = jsonable_encoder(product)\n new_product = microservice.create_product(product)\n return Response(content=new_product, media_type=\"application/json\")\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=8000, reload=True)\n","repo_name":"Razer725/microservice-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"38781129032","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom tedega_share import (\n init_logger,\n get_logger,\n monitor_connectivity,\n monitor_system\n)\n\nfrom tedega_view import (\n create_application,\n config_view_endpoint\n)\n\nfrom tedega_storage.rdbms import (\n BaseItem,\n RDBMSStorageBase,\n init_storage,\n get_storage\n)\n\n########################################################################\n# Model #\n########################################################################\n\n\nclass Ping(BaseItem, RDBMSStorageBase):\n __tablename__ = \"pings\"\n\n\n########################################################################\n# Controller #\n########################################################################\n\n\n@config_view_endpoint(path=\"/pings\", method=\"GET\", auth=None)\ndef ping():\n data = {}\n log = get_logger()\n with get_storage() as storage:\n\n factory = Ping.get_factory(storage)\n item = factory.create()\n storage.create(item)\n\n items = storage.read(Ping)\n data[\"total\"] = len(items)\n data[\"data\"] = [item.get_values() for item in items]\n log.info(\"Let's log something\")\n return data\n\n\ndef build_app(servicename):\n # Define things we want to happen of application creation. We want:\n # 1. Initialise out fluent logger.\n # 2. Initialise the storage.\n # 3. Start the monitoring of out service to the \"outside\".\n # 4. Start the monitoring of the system every 10sec (CPU, RAM,DISK).\n run_on_init = [(init_logger, servicename),\n (init_storage, None),\n (monitor_connectivity, [(\"www.google.com\", 80)]),\n (monitor_system, 10)]\n application = create_application(servicename, run_on_init=run_on_init)\n return application\n\nif __name__ == \"__main__\":\n application = build_app(\"tedega_examples\")\n application.run()\n","repo_name":"tedega/tedega","sub_path":"docs/examples/minimal/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73040694915","text":"import turtle\nimport random\nimport time\nimport os\n\n\ndef add_long_space(a, string1, string2):\n return string1 + \" \" * a + string2\n\n\n# Game Screen\nscreen = turtle.Screen()\nscreen.title('Pong Game')\nscreen.bgcolor('black')\nscreen.setup(width=800, height=600)\nscreen.tracer(0)\n\n# Score Board\nscore_a = 0\nscore_b = 0\n\n# Paddle A\npaddle_a = turtle.Turtle()\npaddle_a.speed(0)\npaddle_a.shape('square')\npaddle_a.shapesize(stretch_wid=5, stretch_len=1)\npaddle_a.color('white')\npaddle_a.penup()\npaddle_a.goto(-350, 0)\n\n# Paddle B\npaddle_b = turtle.Turtle()\npaddle_b.speed(0)\npaddle_b.shape('square')\npaddle_b.shapesize(stretch_wid=5, stretch_len=1)\npaddle_b.color('white')\npaddle_b.penup()\npaddle_b.goto(350, 0)\n\n# Ball\nball = turtle.Turtle()\nball.speed(0)\nball.shape('circle')\nball.color('white')\nball.penup()\nball.goto(0, 0)\nball.dx = random.choice([-2, 2])\nball.dy = random.choice([-2, 2])\n\n# Scoreboard\nscore = turtle.Turtle()\nscore.speed(0)\nscore.color('white')\nscore.penup()\nscore.hideturtle()\nscore.goto(0, 250)\n\n# winner\nwin = turtle.Turtle()\nwin.speed(0)\nwin.color('white')\nwin.penup()\nwin.hideturtle()\nwin.goto(0, 0)\n\n\n# Paddle Movement\ndef paddle_a_up():\n y = paddle_a.ycor()\n y += 20\n paddle_a.sety(y)\n\n\ndef paddle_a_down():\n y = paddle_a.ycor()\n y -= 20\n paddle_a.sety(y)\n\n\n# Paddle B movement\ndef paddle_b_up():\n y = paddle_b.ycor()\n y += 20\n paddle_b.sety(y)\n\n\ndef paddle_b_down():\n y = paddle_b.ycor()\n y -= 20\n paddle_b.sety(y)\n\n\nscreen.listen()\nscreen.onkeypress(paddle_a_up, 'w')\nscreen.onkeypress(paddle_a_down, 's')\nscreen.onkeypress(paddle_b_up, 'Up')\nscreen.onkeypress(paddle_b_down, 'Down')\n\n# Main Game Loop\nos.system('afplay start.mp3&')\n\nwhile True:\n screen.update()\n\n # Ball Movement\n ball.setx(ball.xcor() + ball.dx)\n ball.sety(ball.ycor() + ball.dy)\n\n # If the ball touches after the top part or the bottom part of the screen\n # the ball will change the dy to negative and change the y direction\n if ball.ycor() > 285:\n ball.sety(285)\n ball.dy *= -1\n os.system('afplay pongsound.mp3&')\n\n if ball.ycor() < -285:\n ball.sety(-285)\n ball.dy *= -1\n os.system('afplay pongsound.mp3&')\n\n # if the ball touches the right or the left side of the screen\n # it will go to the center and get a random x and y direction \\ add to the scoreboard\n if ball.xcor() > 380:\n ball.goto(0, 0)\n ball.dx *= random.choice([-1, 1])\n score_a += 1\n\n if ball.xcor() < -370:\n ball.goto(0, 0)\n ball.dx *= random.choice([-1, 1])\n score_b += 1\n\n # Ball and Paddle Collisions\n if 330 < ball.xcor() < 340 and paddle_b.ycor() + 60 > ball.ycor() > paddle_b.ycor() - 60:\n ball.setx(330)\n ball.dx *= -1\n os.system('afplay pongsound.mp3&')\n\n if -330 > ball.xcor() > -340 and paddle_a.ycor() + 60 > ball.ycor() > paddle_a.ycor() - 60:\n ball.setx(-330)\n ball.dx *= -1\n os.system('afplay pongsound.mp3&')\n\n # Score\n score.clear()\n score.write(add_long_space(30, f'Player A: {score_a}', f'Player B: {score_b}'), align='center',\n font=('courier', 20, 'normal'))\n screen.update()\n\n # Winner = 3\n if score_a >= 3:\n win.write('Player A Won!', align='center', font=('courier', 55, 'normal'))\n screen.update()\n ball.hideturtle()\n time.sleep(5)\n break\n\n if score_b >= 3:\n win.write('Player B Won!', align='center', font=('courier', 55, 'normal'))\n screen.update()\n ball.hideturtle()\n time.sleep(5)\n break\n\n # Loop Paddle Movement\n # Paddle A\n if paddle_a.ycor() >= 380:\n paddle_a.sety(-380)\n elif paddle_a.ycor() <= -380:\n paddle_a.sety(380)\n\n # Paddle B\n if paddle_b.ycor() >= 380:\n paddle_b.sety(-380)\n elif paddle_b.ycor() <= -380:\n paddle_b.sety(380)\n\n # Ball sleep each time it gets to the center\n if ball.xcor() == 0:\n time.sleep(0.2)\n","repo_name":"shaharyair/pong_game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"38778565456","text":"from setuptools import setup, find_packages\nfrom extro import __version__\n\nwith open('requirements.txt') as fd:\n requires = fd.read().splitlines()\n\nsetup(name='extro.py',\n author='Lars Kellogg-Stedman',\n author_email='lars@oddbit.com',\n url='https://github.com/larsks/extro.py',\n version=__version__,\n py_modules=['extro'],\n install_requires=requires,\n entry_points={'console_scripts': ['extropy = extro:main']})\n","repo_name":"larsks/extro.py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70776450436","text":"import cantera as ct\nimport cantera.ctml_writer as w\nfrom fipy import *\nfrom numpy import *\nimport matplotlib.pylab as plt\nimport numpy as np\n\n\nsimple_atmosphere = '''ideal_gas(name = \"titan_atmosphere\",\n elements=\"H N\",\n species = [\"gri30: H2 N2\"],\n reactions=\"none\",\n transport='Mix',\n initial_state=state(temperature=91.8,\n pressure=(1.5, 'atm'),\n mole_fractions='H2:0.1, N2:99.9')\n )'''\n\nreaction_string_1 = \"C2H2 + 3 H2 => 2 CH4\"\nreaction_string_2 = \"C2H6 + H2 = 2 CH4\"\n\ncomplex_atmosphere = '''ideal_gas(name = \"titan_atmosphere\",\n elements=\"H N C\",\n species = [\"gri30: H2 N2 C2H2 C2H6 CH4\"],\n reactions=[\"C2H2 + 3 H2 => 2 CH4\"],\n transport='Mix',\n initial_state=state(temperature=91.8,\n pressure=(1.5, 'atm'),\n mole_fractions='H2:0.00099, CH4:0.0565, C2H6:0.00001, C2H2:0.000002, N2:0.942498')\n )'''\n\n\n\n\n# create a gas-phase object to represent the gas in the pores, with a\n# dusty gas transport manager\ng = ct.Solution(source=complex_atmosphere)\n\n# set the gas state\n#g.TPX = 500.0, ct.one_atm, \"OH:1, H:2, O2:3, O:1.0E-8, H2:1.0E-8, H2O:1.0E-8, H2O2:1.0E-8, HO2:1.0E-8, AR:1.0E-8\"\n\n# set its parameters\n#g.porosity = 0.2\n#g.tortuosity = 4.0\n#g.mean_pore_radius = 1.5e-7\n#g.mean_particle_diameter = 1.5e-6 # lengths in meters\n\n# print the multicomponent diffusion coefficients\nprint(g.mix_diff_coeffs)\n\n# compute molar species fluxes\n\n\n#g.TP = g.T, 1.2 * ct.one_atm\n#T2, rho2, Y2 = g.TDY\n#delta = 0.001\n\n#print(g.molar_fluxes(T1, T1, rho1, rho1, Y1, Y1, delta))\n#print(g.molar_fluxes(T1, T2, rho1, rho2, Y1, Y2, delta))\n\n######################################\n# Diffusion coefficient function\ndef get_diffusion_coeff( phi ):\n\n D = CellVariable(name=\"DiffusionCoeff\",mesh=mesh)\n\n for i in range(phi.shape[0]):\n print(\"dif loop\")\n var = phi.value[i]\n X0 = \"H2:%0.8f, N2:%0.8f\"%(var,1-var)\n a.X=X0\n\n # mixture-averaged diffusion coefficients\n Dcoeff = a.mix_diff_coeffs[h2i]\n\n D.put(i,Dcoeff)\n\n return D\n\n###################################### \n# Gas object \n#\n# This object will be used to evaluate all thermodynamic, kinetic,\n# and transport properties\n#\n#rxnmech = 'h2o2.cti' # reaction mechanism file\n#name = 'ohmech' # gas mixture model\n#comp = 'O2:1.0, AR:1.0' # gas composition\na = g\n\n#a.TP = 298.15, 101325\n#a.P = ct.OneAtm\n#a.X = comp\n\nh2lab = 'H2'\nn2lab = 'N2'\nh2i = a.species_index(h2lab)\nn2i = a.species_index(n2lab)\n\n# First, define the mesh\n# \nnx = 100\ndx = 0.01\nL = nx * dx\nxs = linspace(0,L,nx)\nmesh = Grid1D(nx=nx,dx=dx)\n\n# Define boundary values \nh2Left = 0.0001\nh2Right = 0.01\n\n# Define timestep-related parameters\n# Courant stability constraint for timestep\nD0 = 1.0e-5\nsafetyFactor = 0.9\ntimestepDuration = safetyFactor * dx**2 / (2 * D0)\n\n# Simulation duration\nsteps = 100\nprint(\"Nsteps =\" + str(steps))\nt = timestepDuration * steps\n\n\n# Now define your sweep variables\nh2var = [CellVariable(name=h2lab, mesh=mesh, value=h2Right, hasOld=1)]\n\n# Define the equation being solved\neq = TransientTerm() == DiffusionTerm(coeff=get_diffusion_coeff(h2var[0]))\n\n# Set the boundary conditions\nh2var[0].constrain(h2Left, mesh.facesLeft)\nh2var[0].constrain(h2Right, mesh.facesRight)\n\n\n# Setup the plot\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.set_xlabel('Distance m')\nax.set_ylabel('Hydrogen mols/m^2')\nax.set_title('Hydrogen')\n# Timestep through solutions\nh2var[0].setValue(h2Right)\nfor step in range(steps):\n # only move forward in time once per time step\n h2var[0].updateOld()\n \n # but \"sweep\" many times per time step\n for sweep in range(3):\n print(\"sweep loop\")\n res = eq.sweep(var=h2var[0],\n dt=timestepDuration)\n\n if step%20==0:\n \n ax.plot(xs,h2var[0].value)\n \nplt.show()\n","repo_name":"jweisbaum/titan_hydrogen","sub_path":"diffusion.py","file_name":"diffusion.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34442826211","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport json\nfrom typing import List\nfrom src.lappy.models.bound import Bound\nfrom src.lappy.models.well import Well\n\n\nclass Field(object):\n \"\"\"\n Field class. Contains information about input geometry: bound and wells.\n\n Args:\n name (str): field name.\n bound (Bound): domain bound.\n wells (List[Well]): wells.\n \"\"\"\n def __init__(self, name, bound: Bound, wells: List[Well]):\n self.name = name\n self.bound = bound\n self.wells = wells\n\n @classmethod\n def create(cls, name: str, bound_file: str, wells_file: str):\n \"\"\"\n Creates class instance from input json file for bound and wells.\n\n Args:\n name (str): field name.\n bound_file (str): json file name which contains domain bound data.\n wells_file (str): json file name which contains wells data.\n \"\"\"\n bound = Bound.from_json(bound_file)\n with open(wells_file, \"r\", encoding=\"utf-8\") as f:\n fdata = f.read()\n data = json.loads(fdata)\n wells = list(map(Well.from_dict, data[\"wells\"]))\n return cls(name, bound=bound, wells=wells)\n","repo_name":"erythrocyte/qtlappy","sub_path":"old_var/src/lappy/models/field.py","file_name":"field.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74668554114","text":"from .models import Users, User_OpenAuth, User_LocalAuth\nfrom rest_framework import serializers\nfrom datetime import datetime\nfrom django.utils import timezone\n\n#class UsersSerializer(serializers.HyperlinkedModelSerializer):\nclass UsersSerializer(serializers.ModelSerializer):\n class Meta:\n model = Users\n #fields = ('UserID','NickName','LevelID','AccountPic_URL','SelfIntroduction','Gender','LocationProvince','LocationCity','CTime','MTime')\n fields = ('UserID', 'NickName', 'LevelID', 'AccountPic_URL', 'SelfIntroduction', 'Gender', 'LocationProvince',\n 'LocationCity')\n\n def create(self, validated_data):\n return Users.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n #instance.UserID = validated_data.get('UserID', instance.UserID)\n instance.NickName = validated_data.get('NickName', instance.NickName)\n instance.AccountPic_URL = validated_data.get('AccountPic_URL', instance.AccountPic_URL)\n instance.SelfIntroduction = validated_data.get('SelfIntroduction', instance.SelfIntroduction)\n instance.Gender = validated_data.get('Gender', instance.Gender)\n instance.LocationProvince = validated_data.get('LocationProvince', instance.LocationProvince)\n instance.LocationCity = validated_data.get('LocationCity', instance.LocationCity)\n instance.MTime = timezone.now()\n instance.save()\n return instance\n\n def __repr__(self):\n return u'' % (self.UserID, self.NickName)\n\nclass User_OpenAuthSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User_OpenAuth\n fields = ('ID','UserID','NickName','OAuthType','OAuthID','OAuth_Access_Token',\n 'OAuth_Refresh_Access_Token','OAuth_Expires','Scope','AccountPic_URL',\n 'Gender','Country','Province','City','CTime','MTime')\n\n def create(self, validated_data):\n return User_OpenAuth.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.NickName = validated_data.get('NickName', instance.NickName)\n instance.OAuthType = validated_data.get('OAuthType', instance.OAuthType)\n instance.OAuthID = validated_data.get('OAuthID', instance.OAuthID)\n instance.OAuth_Access_Token = validated_data.get('OAuth_Access_Token', instance.OAuth_Access_Token)\n instance.OAuth_Refresh_Access_Token = validated_data.get('OAuth_Refresh_Access_Token', instance.OAuth_Refresh_Access_Token)\n instance.OAuth_Expires = validated_data.get('OAuth_Expires', instance.OAuth_Expires)\n instance.Scope = validated_data.get('Scope', instance.Scope)\n instance.AccountPic_URL = validated_data.get('AccountPic_URL', instance.AccountPic_URL)\n instance.Gender = validated_data.get('Gender', instance.Gender)\n instance.Country = validated_data.get('Country', instance.Country)\n instance.Province = validated_data.get('Province', instance.Province)\n instance.City = validated_data.get('City', instance.City)\n instance.MTime = timezone.now()\n instance.save()\n return instance\n\nclass User_LocalAuthSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n module = User_LocalAuth\n fields = ('ID','UserID','LAuthType','UserName','Password','CTime','MTime')\n\n def create(self, validated_data):\n return User_LocalAuth.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.LAuthType = validated_data.get('LAuthType', instance.LAuthType)\n instance.UserName = validated_data.get('UserName', instance.UserName)\n instance.Password = validated_data.get('Password', instance.Password)\n instance.IsActive = validated_data.get('IsActive', instance.IsActive)\n instance.MTime = timezone.now()\n instance.save()\n return instance\n\n","repo_name":"dangwy/samp","sub_path":"sluds/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3216009569","text":"import argparse\nimport logging\nfrom pyfiglet import Figlet\n\nfrom vizuka import dimension_reduction\nfrom vizuka import data_loader\nfrom vizuka import config\n\nfrom vizuka.config import (\n VERSION,\n INPUT_FILE_BASE_NAME,\n DATA_PATH,\n BASE_PATH,\n PROJECTION_DEFAULT_PARAMS,\n)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.WARN)\n\ndef do_reduce(algorithm_name, parameters, version, data_path, reduced_path):\n \"\"\"\n Project the data in :param data_path: (version :param version:)\n with algorithm :param algorithm_name: using parameters in :param parameters\n and save it in :param reduced_path:\n \"\"\"\n algo_builder = dimension_reduction.make_projector(algorithm_name)\n algo = algo_builder(**parameters)\n\n (x, _ ,_ ,_, loaded, preprocessed_filename ) = data_loader.load_preprocessed(\n file_base_name = INPUT_FILE_BASE_NAME,\n path = data_path,\n version = version,\n )\n if not loaded:\n logging.warn(\"\\nNo data found\\nCorresponding file not found: {}\\nPlease check --show-required-files\".format(preprocessed_filename))\n return\n \n logging.warn(\"Projecting the preprocessed data in 2D.. this may take a while!\")\n algo.project(x) # do the dimension projection\n algo.save_projection(version=version, path=reduced_path) # save the result\n logging.warn(\"Projecting done\")\n\n\ndef main():\n \"\"\"\n Loads parameters and run do_reduce\n \"\"\"\n\n print(Figlet().renderText('Vizuka'))\n\n builtin_projectors, extra_projectors = dimension_reduction.list_projectors()\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-a', '--algorithm',\n help=(\n 'algorithm name to reduce dimensions, available are :\\n'\n + 'builtin: {}\\n'.format(list(builtin_projectors.keys()))\n + 'extra plugins: {}'.format(list(extra_projectors.keys()))\n )\n )\n parser.add_argument(\n '-p', '--parameters', action='append',\n help='specify parameters, e.g: \"-p perplexity:50 -p learning_rate:0.1\" '\n 'It will load default values in config.py if not specified'\n )\n parser.add_argument(\n '-v', '--version',\n help='specify a version of the files to load/generate (see vizuka --show_required_files), currently: '+VERSION)\n parser.add_argument(\n '--path',\n help='change the location of your data/ folder containing reduced/ (the projections)'\n ' and set/ (the raw and preprocessed data).'\n ' Default to {}'.format(BASE_PATH)\n )\n parser.add_argument(\n '--verbose', action=\"store_true\",\n help=\"verbose mode\")\n\n parser.set_defaults(\n algorithm = 'manual',\n parameters = {},\n path = BASE_PATH,\n version = VERSION,\n )\n\n args = parser.parse_args()\n\n (data_path, reduced_path, _, _, _, _) = config.path_builder(args.path)\n\n algorithm_name = args.algorithm\n parameters = args.parameters\n version = args.version\n verbose = args.verbose\n\n print(\"VERSION: Loading dataset labeled {} (cf --version)\".format(version))\n \n if verbose:\n logger.setLevel(logging.DEBUG)\n \n if algorithm_name == 'manual':\n choice_list, choice_dict = \"\", {}\n for i,method in enumerate([*builtin_projectors, *extra_projectors]):\n choice_list+=\"\\t\\t [{}]: {}\\n\".format(i, method)\n choice_dict[i]=method\n choice = input(\"No algorithm specified (-a or --algorithm)\\n\\tChoices available are:\\n\"+choice_list+\"\\t[?] > \")\n try:\n choice_int=int(choice)\n except:\n logger.warn(\"Please enter a valid integer !\")\n return\n algorithm_name = choice_dict[choice_int]\n \n\n for raw_param in parameters:\n if ':' not in raw_param:\n raise TypeError('parameter -p not used correctly! see --help')\n param_name, param_value = raw_param.split(':')\n parameters[param_name:param_value]\n\n default_params = PROJECTION_DEFAULT_PARAMS.get(algorithm_name, {})\n for default_param_name, default_param_value in default_params.items():\n if default_param_name not in parameters:\n parameters[default_param_name] = default_param_value\n logger.info('parameter {} not specified, using default value: {}'.format(\n default_param_name, default_param_value)\n )\n\n do_reduce(algorithm_name, parameters, version, data_path, reduced_path)\n \n cmd_to_launch = \"vizuka\"\n cmd_to_launch+= (' --version {}'.format(version) if version!=VERSION else '')\n cmd_to_launch+= ('--path {}'.format(args.path) if args.path != BASE_PATH else '')\n print('\"Projection done, you can now launch \"vizuka\"')\n\nif __name__=='__main__':\n main()\n","repo_name":"0011001011/vizuka","sub_path":"vizuka/launch_reduce.py","file_name":"launch_reduce.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"61"}
+{"seq_id":"3238465756","text":"class Solution:\n def calculate(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n # Pratt Parser: Top-Down Operator Precedence parsing\n # For more details see https://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing\n \n # TDOP\n def expression(rbp=0):\n t = self.token\n self.token = next(self.token_gen)\n left = t.nud()\n while rbp < self.token.lbp:\n t = self.token\n self.token = next(self.token_gen)\n left = t.led(left)\n return left\n \n def tokenize(program):\n for token in re.findall('\\d+|[-+*/]', program):\n if token.isdigit():\n yield number_token(token)\n elif token == \"+\":\n yield operator_add_token()\n elif token == \"-\":\n yield operator_neg_token()\n elif token == \"*\":\n yield operator_mul_token()\n elif token == \"/\":\n yield operator_div_token()\n else:\n raise SyntaxError('unknown operator: {}'.format(token))\n yield end_token()\n \n def parse(program):\n self.token_gen = tokenize(program)\n self.token = next(self.token_gen)\n return expression() \n \n # handle parentheses\n def match(tok=None):\n if tok and tok != type(self.token):\n raise SyntaxError('Expected {}'.format(tok))\n self.token = next(self.token_gen)\n \n class number_token(object):\n def __init__(self, value):\n self.value = int(value)\n def nud(self):\n return self.value\n \n class operator_add_token(object):\n lbp = 20\n def led(self, left):\n right = expression(20)\n return left + right\n class operator_neg_token(object):\n lbp = 20\n def led(self, left):\n return left - expression(20)\n \n class operator_mul_token(object):\n lbp = 30\n def led(self, left):\n return left * expression(30)\n \n class operator_div_token(object):\n lbp = 30\n def led(self, left):\n return left // expression(30)\n \n class end_token(object):\n lbp = 0\n \n return parse(s.strip())","repo_name":"chien-wei/LeetCode","sub_path":"0227_Basic_Calculator_II.py","file_name":"0227_Basic_Calculator_II.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7600324965","text":"from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline\nfrom curd.Classification import get_all_text, get_work_cla_dict, get_tag_dict, get_candidate\nfrom curd.workexp import get_context\n\nmodel_name = \"D:\\python2\\code7\\Fxxk-alogrithm\\\\api\\model\\class\\\\xlm-roberta-large-xnli-anli\"\ntokenizer = AutoTokenizer.from_pretrained(model_name)\nmodel = AutoModelForSequenceClassification.from_pretrained(model_name)\n\nclassifier = pipeline(\"zero-shot-classification\", model=model, tokenizer=tokenizer)\n\n\nasync def work_classification(id, ids):\n candidate_labels = await get_candidate(ids)\n text = await get_all_text(id)\n work_cla = classifier(text, candidate_labels)\n work_dict = await get_work_cla_dict(work_cla)\n return work_dict\n\n\nasync def tag_classification(id):\n candidate_labels = [\"工作变动稳定\", \"工作变动频繁\"]\n work = await get_context(id)\n tag_cla = classifier(work, candidate_labels)\n tag_dict = await get_tag_dict(tag_cla)\n return tag_dict\n","repo_name":"spumant/fxxk","sub_path":"api/model/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"15390812378","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n# @Time : 12/28/2017 6:08 PM \n# @Author : sunyonghai \n# @File : ResNet50.py \n# @Software: BG_AI\n# =========================================================\nimport glob\nfrom keras.utils import Sequence\nimport math\nimport numpy as np\nimport cv2\nfrom keras.models import Sequential, Model\nfrom keras.layers import Conv2D, Input\nfrom keras import optimizers\nfrom keras.applications.resnet50 import ResNet50\n\n\ndef my_generator(path, batch_size, target_size=(256, 256)):\n a_filenames = glob.glob(path + 'a/*.jpg')\n a_filenames.sort(key=lambda x: int(x.split('/')[-1][:-4]))\n p_filenames = glob.glob(path + 'p/*.jpg')\n p_filenames.sort(key=lambda x: int(x.split('/')[-1][:-4]))\n n_filenames = glob.glob(path + 'n/*.jpg')\n n_filenames.sort(key=lambda x: int(x.split('/')[-1][:-4]))\n num_imgs = len(glob.glob(path + 'a/*.jpg'))\n num_batches = math.ceil(num_imgs / batch_size)\n\n while True:\n for idx in range(num_batches):\n batch_a = a_filenames[idx * batch_size: (idx + 1) * batch_size]\n batch_p = p_filenames[idx * batch_size: (idx + 1) * batch_size]\n batch_n = n_filenames[idx * batch_size: (idx + 1) * batch_size]\n\n a_arrays = np.array([cv2.resize(cv2.imread(filename), target_size) for filename in batch_a])\n p_arrays = np.array([cv2.resize(cv2.imread(filename), target_size) for filename in batch_p])\n n_arrays = np.array([cv2.resize(cv2.imread(filename), target_size) for filename in batch_n])\n batch_y = np.zeros((batch_size, batch_size))\n\n yield [a_arrays, p_arrays, n_arrays], batch_y\n\ndef dummy_gen():\n\n while True:\n for i in range(100):\n a = np.random.random((1, 224, 224, 3))\n b = np.random.random((1, 224, 224, 3))\n c = np.random.random((1, 224, 224, 3))\n y = np.zeros((10, 10))\n\n yield [a, b, c], y\n\n# inp = Input((256, 256, 3))\n# inp1 = Input((256, 256, 3))\n# inp2 = Input((256, 256, 3))\n# inp3 = Input((256, 256, 3))\n\n# x = Conv2D(128, 3, input_shape=(256, 256, 3))(inp)\n# x = Conv2D(256, 3)(x)\n# x = Conv2D(256, 3)(x)\n# base_model = Model(inp, x)\nbase_model = ResNet50(weights='imagenet')\nbase_model.compile(optimizer=optimizers.Adam(), loss='categorical_crossentropy')\n\n# out1 = base_model(inp1)\n# out2 = base_model(inp2)\n# out3 = base_model(inp3)\n\n# model = Model([inp1, inp2, inp3], [out1, out2, out3])\n\n\n# my_gen = my_generator(path, batch_size=1, target_size=(256, 256))\n# my_gen = dummy_gen()\n# model.predict_generator(my_gen, steps=10, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=1)\n\nx = np.random.random((1, 224, 224, 3))\nX = np.concatenate([x for i in range(1000)])\ny = np.random.random((1000, 1000))\nbase_model.fit(X, y, batch_size=10, epochs=10, verbose=1)","repo_name":"FMsunyh/dlfive","sub_path":"Scratch/ResNet50.py","file_name":"ResNet50.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"74566413313","text":"import pytest\n\nfrom pygridgain import Client, AioClient\nfrom tests.util import start_ignite_gen\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef server1():\n yield from start_ignite_gen(1)\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef server2():\n yield from start_ignite_gen(2)\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef server3():\n yield from start_ignite_gen(3)\n\n\n@pytest.fixture(scope='module')\ndef client():\n client = Client()\n try:\n client.connect('127.0.0.1', 10801)\n yield client\n finally:\n client.close()\n\n\n@pytest.fixture(scope='module')\nasync def async_client(event_loop):\n client = AioClient()\n try:\n await client.connect('127.0.0.1', 10801)\n yield client\n finally:\n await client.close()\n\n\n@pytest.fixture\nasync def async_cache(async_client: 'AioClient'):\n cache = await async_client.create_cache('my_bucket')\n try:\n yield cache\n finally:\n await cache.destroy()\n\n\n@pytest.fixture\ndef cache(client):\n cache = client.create_cache('my_bucket')\n try:\n yield cache\n finally:\n cache.destroy()\n\n\n@pytest.fixture(autouse=True)\ndef expiry_policy_supported(request, server1):\n client = Client()\n with client.connect('127.0.0.1', 10801):\n result = client.protocol_context.is_expiry_policy_supported()\n if not result and request.node.get_closest_marker('skip_if_no_expiry_policy'):\n pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.')\n\n return result\n","repo_name":"gridgain/python-thin-client","sub_path":"tests/common/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71395821954","text":"import numpy as np\nimport traceback as tb\n\ninputList = []\n\nprint(\"Please enter your input one number per line, or 'q' to exit:\")\nwhile True:\n line = input()\n if line == 'q':\n print(\"Exit\")\n break\n else:\n try:\n inputList.append(float(line))\n print(\"Your output is:\")\n print(\"{:.3f} {:.3f} {}\".format(np.mean(inputList), np.nanstd(inputList), np.median(inputList)))\n\n except ValueError:\n tbError = tb.format_exc()\n print(\"Invalid value entered, please try again, or enter 'q' to exit.\")\n\n","repo_name":"el-grudge/RunningStats","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"86488315440","text":"#Model of the economy\r\nimport random as rand\r\n\r\n#State variables people\r\n\r\n#Types of componets of economy\r\n\r\nclass econ:\r\n def __init__(self,label,P,G):\r\n self.label = label\r\n self.P = P\r\n self.G = G\r\n self._time = 0\r\n self._gdp = 0\r\n def __str__(self):\r\n return '{}'.format(self.label)\r\n\r\n def get_owners(self):\r\n O = []\r\n for people in self.P:\r\n if isinstance(people, owner):\r\n O.append(people)\r\n return O\r\n def get_workers(self):\r\n W = []\r\n for people in self.P:\r\n if isinstance(people, worker):\r\n W.append(people)\r\n return W\r\n def total_spending(self):\r\n sum = 0\r\n for people in self.P:\r\n sum += people.spent\r\n return int(sum)\r\n @property\r\n def time(self):\r\n return self._time\r\n\r\n @time.setter\r\n def time(self,year):\r\n self._time = year\r\n @property\r\n def gdp(self):\r\n sum = 0\r\n for person in self.P:\r\n sum += person.money\r\n for comp in self.G:\r\n sum += comp.val\r\n return int(sum)\r\n def forecast(self,year) :\r\n a = 0.50/len(self.get_owners())\r\n b = 0.25/len(self.get_workers())\r\n c = 0.25\r\n for time in range(self._time,year):\r\n #shuffle list of companies\r\n rand.shuffle(self.G)\r\n for group in self.G:\r\n # Amount of G+S on offer by company\r\n GS = group.prod\r\n P_money = []\r\n for person in self.P:\r\n P_money.append(person._money)\r\n # prob person buy and % of G+S the person will buy\r\n r = rand.random()\r\n #People get paid by their company and buy G+S from other company\r\n if group.ID != person.ID:\r\n if person.money > GS*r:\r\n person.money -= GS*r\r\n GS -= GS*r\r\n #Profit made\r\n profit = group.prod - GS\r\n i = 0\r\n for person in self.P:\r\n person.spent = int(P_money[i] - person.money)\r\n i += 1\r\n if group.ID == person.ID:\r\n if isinstance(person,owner): \r\n person.money += profit*a\r\n elif isinstance(person,worker):\r\n person.money += profit*b\r\n group.val += profit*c \r\n self.time = year\r\n\r\n\r\nclass comp:\r\n def __init__(self,label,val):\r\n self.ID = id(self)\r\n self.label = label\r\n self.val = val\r\n self.prod = self.val/10\r\n @property\r\n def prod(self):\r\n return self.val/10\r\n \r\n @prod.setter\r\n def prod(self,appreciation):\r\n self.prod = appreciation\r\n \r\n @property\r\n def val(self):\r\n return self.val\r\n @val.setter\r\n def val(self,nw_val):\r\n self.val = nw_val\r\n\r\n def __str__(self):\r\n return '{}'.format(self)\r\nclass worker:\r\n def __init__(self,money,comp):\r\n self.ID = id(comp)\r\n self._money = money\r\n self._spent = 0\r\n self.assets = 0\r\n @property\r\n def money(self):\r\n return self._money\r\n\r\n @money.setter\r\n def money(self,change):\r\n self._money = change\r\n @property\r\n def spent(self):\r\n return self._spent\r\n @spent.setter\r\n def spent(self,nw_spent):\r\n self._spent = nw_spent\r\n def __str__(self):\r\n return '{}'.format(self)\r\n\r\nclass owner:\r\n def __init__(self,money,assets,comp):\r\n self.ID = id(comp)\r\n self.assets = assets\r\n self._money = money\r\n self._spent = 0\r\n @property\r\n def money(self):\r\n return self._money\r\n\r\n @money.setter\r\n def money(self,change):\r\n self._money = change\r\n @property\r\n def spent(self):\r\n return self._spent\r\n @spent.setter\r\n def spent(self,nw_spent):\r\n self._spent = nw_spent\r\n\r\n def __str__(self):\r\n return '{}'.format(self)\r\n\r\n#proportion of profits from G+S made by each party\r\na = 0.75\r\nb = 0.125 \r\n\r\ndef CrePop():\r\n f = open('c:\\\\Users\\\\Sena\\\\Documents\\\\Python Scripts\\\\vmpy\\\\text.txt','w')\r\n name = ['Woolworths','Napster','Myspace','AOL','Blockbuster','Smyths','IBM','Verizon','AT&T','Comcast','Apple','Sony','Disney','Netflix','Amazon','Google']\r\n print('Comp', file=f)\r\n for i in range(16):\r\n t = i+1\r\n val = 5000*t\r\n print('G{} {} {}'.format(t,name[i],val),file=f)\r\n\r\n print('\\n','Population', file=f)\r\n for i in range(50):\r\n t = i+1\r\n a = 500*t\r\n if i <= 20: \r\n b = 550*t\r\n print('p{} {} {}'.format(t,b,a),file=f)\r\n else:\r\n print('p{} {}'.format(t,a),file=f)\r\n print('Done!')\r\n f.close()\r\n\r\ndef main():\r\n G1 = comp('Amazon',50000)\r\n p1 = owner(50000,10000,G1)\r\n p2,p3 = worker(5000,G1), worker(3000,G1)\r\n\r\n G2 = comp('Apple',100000)\r\n p4 = owner(30000,5000,G2)\r\n p5, p6 = worker(2500,G2), worker(5750,G2)\r\n\r\n G3 = comp('Google',700000)\r\n p7,p8 = owner(50000,1000,G3), owner(15000,4000,G3)\r\n p9, p10, p11, p12 = worker(5000,G3), worker(7250,G3), worker(6000,G3), worker(3000,G3)\r\n\r\n P = []\r\n G = [G1,G2,G3]\r\n Pi = [p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12]\r\n UK = econ('UK',Pi,G)\r\n \r\n for i in range(10): \r\n UK.forecast(i)\r\n #print('the year is {}'.format(UK.time)) \r\n #print('{} value in year {} is '.format(G2.label,i),G2.val)\r\n print('Total spending is {}'.format(UK.total_spending()))\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"senoichi/Python-functions","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74950232193","text":"import boto3\nimport pprint\n\nrds = boto3.client('rds', region_name='us-east-1')\n\ndb_instance_id = \"dr-test-db\" # name of the DataBase Instance\nsnapshot_name = \"awsbackup:job-037811c5-606e-aa46-22d5-010815d758cb\" # fill with latest snapshot\ndb_instance_class = \"db.t3.small\" # size of the instance\ndb_subnet_group_name = \"rds_prod_subg\" # rds subnet groups\niops = 1000 # if the above is set to io1\noption_group = \"default:postgres-11\" # rds option group\ndb_parameter_group = \"default.postgres11\" # rds parameter group\ndelete_protection = False # only for testing\ntags = [{'Key': 'Name', 'Value': 'test-db'}] # add more with comma seperation\n\nresponse = rds.restore_db_instance_from_db_snapshot (\n DBInstanceIdentifier=db_instance_id,\n DBSnapshotIdentifier=snapshot_name,\n DBInstanceClass=db_instance_class,\n DBSubnetGroupName=db_subnet_group_name,\n MultiAZ=True,\n PubliclyAccessible=False,\n AutoMinorVersionUpgrade=True,\n Iops=iops,\n OptionGroupName=option_group,\n Tags=tags,\n StorageType='io1',\n EnableCloudwatchLogsExports=[\n 'postgresql',\n 'upgrade'\n ],\n DBParameterGroupName=db_parameter_group,\n DeletionProtection=delete_protection\n)\n\nprint(\"Creating DB instance\")\npprint.pprint(response)\n","repo_name":"JohnMops/Python","sub_path":"AWS/Scripts/create_rds_from_snapshot.py","file_name":"create_rds_from_snapshot.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"30111113767","text":"import gevent\nimport ssl\nfrom websocket import create_connection\nfrom locust import HttpLocust, TaskSet, task, events\nimport json\nimport time\n\n\nclass TaskSetAPI(TaskSet):\n \"\"\"\n events = (\n 'api/v1/namespaces/{}/pods'.format(project),\n 'api/v1/namespaces/{}/services'.format(project),\n 'api/v1/namespaces/{}/replicationcontrollers'.format(project),\n\n 'oapi/v1/namespaces/{}/builds'.format(project),\n 'oapi/v1/namespaces/{}/deploymentconfigs'.format(project),\n 'oapi/v1/namespaces/{}/imagestreams'.format(project),\n 'oapi/v1/namespaces/{}/routes'.format(project),\n 'oapi/v1/namespaces/{}/buildconfigs'.format(project)\n )\n \"\"\"\n\n @task(1)\n def on_start(self):\n\n urllist = []\n project = 'webapp1'\n resVersion = 0\n watch='True'\n token = 'pi31Z_9uzN2p0ZAzUt1Sfsmq1lakFrXDnxdJf87Ut_0'\n srv = 'ec2-x-x-x-x.us-west-2.compute.amazonaws.com:8443'\n\n '''url = 'wss://{}/api/v1/namespaces/{}/events?watch=true&resourceVersion={}&access_token={}'.format(srv,\n project,\n resVersion,\n token)\n '''\n\n api_ws_urls = (\n 'api/v1/namespaces/{}/pods'.format(project),\n 'api/v1/namespaces/{}/services'.format(project),\n 'api/v1/namespaces/{}/replicationcontrollers'.format(project),\n\n 'oapi/v1/namespaces/{}/builds'.format(project),\n 'oapi/v1/namespaces/{}/deploymentconfigs'.format(project),\n 'oapi/v1/namespaces/{}/imagestreams'.format(project),\n 'oapi/v1/namespaces/{}/routes'.format(project),\n 'oapi/v1/namespaces/{}/buildconfigs'.format(project)\n )\n\n for event in api_ws_urls:\n urllist.append('wss://{}/{}?watch={}&resourceVersion={}&access_token={}'.format(\n srv, event, watch, resVersion, token))\n\n for url in urllist:\n ws = create_connection(url, sslopt={\"cert_reqs\": ssl.CERT_NONE})\n self.ws = ws\n\n\n def _receive():\n while True:\n res = ws.recv()\n start_at = time.time()\n data = json.loads(res)\n print(res, data)\n\n end_at = time.time()\n response_time = int((end_at - start_at) * 1000000)\n events.request_success.fire(\n request_type='WebSocket Received',\n name='test/wss',\n response_time=response_time,\n response_length=len(res),\n )\n\n gevent.spawn(_receive)\n\n\n @task(2)\n def send(self):\n \"\"\"\n Payload here makes the connection break [broken pipe].\n Real use case should be with:\n oc create -f $payloadfile.{yml,json}\n\n \"\"\"\n payload = {\n \"apiVersion\": \"v1\",\n \"count\": 4,\n \"firstTimestamp\": \"2016-03-14T16:35:15.000Z\",\n \"involvedObject\": {\n \"apiVersion\": \"v1\",\n \"kind\": \"Build\",\n \"name\": \"ruby-hello-world-1-build\",\n \"namespace\": \"default\",\n \"resourceVersion\": \"3000\",\n \"uid\": \"bbfd8973-ea02-11e5-bab8-28d2447dc82b\"\n },\n \"kind\": \"Event\",\n \"lastTimestamp\": \"2016-03-14T16:35:22.000Z\",\n \"message\": \"no nodes available to schedule pods\",\n \"metadata\": {\n \"creationTimestamp\": \"2016-03-14T16:35:15.000Z\",\n \"deletionTimestamp\": \"2016-03-14T18:35:22.000Z\",\n \"name\": \"ruby-hello-world-1-build.143bc3016cfdd9c0\",\n \"namespace\": \"default\",\n \"resourceVersion\": \"3027\",\n \"selfLink\": \"/api/v1/namespaces/webapp1/events/ruby-hello-world-1-build.143bc3016cfdd9c0\",\n \"uid\": \"bbfed6ce-ea02-11e5-bab8-28d2447dc82b\"\n },\n \"reason\": \"FailedScheduling\",\n \"source\": {\n \"component\": \"default-scheduler\"\n },\n \"type\": \"Warning\"\n }\n\n start_at = time.time()\n body = json.dumps(payload)\n self.ws.send(body)\n\n events.request_success.fire(\n request_type='WebSocket Sent',\n name='API event',\n response_time=int((time.time() - start_at) * 1000000),\n response_length=len(body),\n )\n\n def on_quit(self):\n self.ws.close()\n\n\nclass LocustDispatcher(HttpLocust):\n #desthost = 'https://ec2-x-x-x-x.us-west-2.compute.amazonaws.com:8443'\n\n task_set = TaskSetAPI\n\n '''\n These are the minimum and maximum time, in ms, that a simulated user will wait between executing each task.\n min_wait and max_wait default to 1000, and therefore a locust will always wait 1 second between each task if\n min_wait and max_wait are not declared.\n '''\n min_wait = 500\n max_wait = 1000\n","repo_name":"openshift/svt","sub_path":"applications_scalability/websockets_perf/utils/locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"61"}
+{"seq_id":"30488103820","text":"import boto3\r\nfrom botocore.exceptions import ClientError\r\nimport datetime\r\nfrom datetime import timedelta, date\r\nimport logging,re, os\r\nfrom dotenv import load_dotenv\r\n\r\nload_dotenv()\r\n\r\n#Setup logger\r\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level='INFO')\r\n\r\n#Setup profile to use in this session\r\nboto3.setup_default_session(profile_name=os.getenv(\"PROFILE_NAME\"))\r\n\r\n#Setup boto3 client\r\ns3_client = boto3.client('s3')\r\n\r\n#Setup region\r\nregion = os.getenv(\"REGION\")\r\n\r\n#the number of days an object will be considered expired\r\nexpire_threshold = int(os.getenv(\"EXPIRE_THRESHOLD\"))\r\n\r\n#the total number of s3 objects that will be removed\r\ntotal_objects = 0\r\n\r\nexpiring_folders = []\r\n\r\n#bucket Name\r\nbucket_name = os.getenv(\"BUCKET_NAME\")\r\ns3_lifecycle = os.getenv(\"S3_LIFECYCLE\")\r\n\r\n#Get 14 days ahead of today\r\ndate_in_two_weeks = date.today() + timedelta(days=14)\r\nlogging.info(f\"Getting the dates in 14 days: {date_in_two_weeks}\")\r\n\r\n#Get date 3 years before the date\r\ncut_date = date_in_two_weeks - timedelta(days=expire_threshold)\r\nlogging.info(f\"Files older than {cut_date} will be considered expired\")\r\n\r\ndef check_number_of_objects(folder_names):\r\n file_count=0\r\n folder_names = folder_names\r\n s3 = boto3.resource('s3')\r\n bucket = s3.Bucket(bucket_name)\r\n for file in bucket.objects.filter(Prefix=folder_names):\r\n file_count+=1\r\n return file_count\r\n\r\ndef check_subdirectories_older_than_date(s3client,logs_file, cut_date):\r\n global total_objects\r\n global expiring_folders\r\n prefix = logs_file+\"/\"\r\n result = s3client.list_objects(Bucket=bucket_name, Prefix=prefix, Delimiter='/')\r\n for o in result.get('CommonPrefixes'):\r\n subfolder = o.get('Prefix')\r\n \r\n # Use regex to find the dates in the folder structure\r\n regex = r\"(?<=dt=)\\d+\"\r\n match = re.search(regex, subfolder)\r\n if match:\r\n dates_in_log_folder = match.group(0)\r\n dates_in_log_folder_object = datetime.datetime.strptime(dates_in_log_folder,\"%Y%m%d\")\r\n \r\n #Check if folder than is going to be older than cut-off date\r\n if dates_in_log_folder_object.date() <= cut_date:\r\n num_of_files = check_number_of_objects(subfolder)\r\n logging.info(f\"This folder will be expired by {cut_date} : {subfolder}, which contains {num_of_files} objects\" )\r\n expiring_folders.append(subfolder)\r\n total_objects += num_of_files\r\n else:\r\n logging.info(f\"No folders with dates found in {logs_file}\")\r\n \r\ndef publish_message(topic_arn, message, subject, region):\r\n \"\"\"\r\n Publishes a message to a topic.\r\n \"\"\"\r\n AWS_REGION = region\r\n sns_client = boto3.client('sns', region_name=AWS_REGION)\r\n \r\n try:\r\n response = sns_client.publish(\r\n TopicArn=topic_arn,\r\n Message=message,\r\n Subject=subject,\r\n )['MessageId']\r\n except ClientError:\r\n logging.exception(f'Could not publish message to the topic.')\r\n raise\r\n else:\r\n return response\r\n\r\nif __name__ == \"__main__\":\r\n # Get the content of logs.directory and get the files\r\n with open(\"logs_directory.txt\") as f:\r\n logs_directory = [line.rstrip() for line in f]\r\n \r\n # Loop through all the logs directories in S3\r\n for log_type in logs_directory:\r\n s3client = s3_client\r\n logging.info(f\"Checking the folders inside {log_type}\")\r\n check_subdirectories_older_than_date(s3client,log_type,cut_date)\r\n\r\n #Total objects will be expired in the next 14 days\r\n logging.info(f\"A total of {total_objects:,} objects will be expired by {cut_date}\")\r\n \r\n #Send SNS message\r\n topic_arn = os.getenv(\"TOPIC_ARN\")\r\n message = f'This is a notification about logs files deletion.\\n' \\\r\n f'{total_objects:,} objects will be removed by {cut_date} (14 days from today).\\n' \\\r\n f'These folders and its content will be expiring by {cut_date} : \\n'\r\n \r\n for folders in expiring_folders:\r\n message += folders + \"\\n\"\r\n \r\n message += f'Checkout the bucket S3 lifecycle here {s3_lifecycle}.'\r\n \r\n subject = 'Logs will be removed in S3 logs [TESTING]'\r\n logging.info(f'Publishing message to topic - {topic_arn}...')\r\n message_id = publish_message(topic_arn, message, subject,region)\r\n logging.info(\r\n f'Message published to topic - {topic_arn} with message Id - {message_id}.'\r\n )\r\n","repo_name":"HapizTeoh/s3-objects-expiry-checker","sub_path":"check_s3_files.py","file_name":"check_s3_files.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24472265831","text":"\r\ndef proceso():\r\n \"\"\"\r\n Se realiza el proceso ingreso y de implrecion de los nodos con las aristas \r\n parametros: \r\n ------------------------\r\n retorna:\r\n -----------------------\r\n \"\"\"\r\n # Pedir número de nodos\r\n n = int(input(\"Ingrese el numero de nodos: \"))\r\n\r\n # Inicializar lista de adyacencia\r\n graph = {i: [] for i in range(n)}\r\n\r\n # Pedir número de aristas\r\n m = int(input(\"Ingrese el numero de aristas: \"))\r\n\r\n # Pedir datos de las aristas\r\n for i in range(m):\r\n #pedimos el formato en el que se va a ingresar y guardar\r\n u, v = map(int, input(\"Ingrese la arista {} (en formato 1 2 dependiendo el numero de nodo): \".format(i+1)).split())\r\n #guardamos los valores\r\n graph[u].append(v)\r\n\r\n # Imprimir grafo\r\n print(\"Grafo:\")\r\n #ciclo de repeticion para imprimir los datos\r\n for u in range(n):\r\n #imprime los datos\r\n print(\"{} -> {}\".format(u, graph[u]))\r\n\r\nif __name__ == '__main__':\r\n \"\"\"\r\n funcion general \r\n parametros:\r\n -------------\r\n retorna:\r\n -------------\r\n \"\"\"\r\n #llamamos al proceso \r\n proceso()","repo_name":"M4t3B4rriga/MODELOS_DISCRETOS","sub_path":"[NRC_8001]_Tarea1_dirigido_Unidad3_Barriga_Llumiquinga_Mateo_Sebastian_Unidad..py","file_name":"[NRC_8001]_Tarea1_dirigido_Unidad3_Barriga_Llumiquinga_Mateo_Sebastian_Unidad..py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9980579474","text":"#!/usr/bin/python3\n#\n# table.py\n#\n# Implements a two-dimension table where all cells must be of same type\n#\n \nfrom collections.abc import Sized\n \nclass Table:\n #\n # Initializes an instance of Table class\n #\n # celltype: type of each cell in table\n # xlabels: labels of x-axis\n # ylables: labels of y-axis\n # unit: unit of each cell (printed as suffix)\n #\n def __init__(self, celltype, xlabels, ylabels, unit=\"\"):\n if not isinstance(celltype, type):\n raise TypeError(\"celltype must be a type (e.g. str, float)\")\n self.celltype = celltype\n self.xlabels = tuple(xlabels)\n self.ylabels = tuple(ylabels)\n self.unit = unit\n \n self.table = [[None for j in range(24)] for i in range(35)] \n # TODO: finish me\n return\n \n #\n # \"private\" member function to validate key\n #\n def _validate_key(self, key):\n if not isinstance(key, Sized):\n raise TypeError(\"key must be a sized container\")\n if len(key) != 2:\n raise KeyError(\"key must have exactly two elements\")\n # unpack key to row and column\n row, col = key \n if row not in self.ylabels:\n raise KeyError(\"%s is not a valid y-label\"%str(row))\n if col not in self.xlabels:\n raise KeyError(\"%s is not a valid x-label\"%str(col))\n return row, col\n \n #\n # Overloads index operator for assigning to a cell\n #\n # key: key of the cell\n # value: value of the cell (must be of type 'celltype')\n # \n def __setitem__(self, key, value):\n if not isinstance(value, self.celltype):\n raise TypeError(\"value must be of type %s\"%(self.celltype.__name__))\n row, col = self._validate_key(key)\n \n self.table[self.ylabels.index(row)][self.xlabels.index(col)] = value\n \n return self.table[self.ylabels.index(row)][self.xlabels.index(col)]\n \n #\n # Overloads index operator for retrieving a value from a cell\n #\n # key: key of the cell\n # \n def __getitem__(self, key):\n row, col = self._validate_key(key)\n \n # TODO: implement me\n return self.table[self.ylabels.index(row)][self.xlabels.index(col)]\n \n #\n # Overloads index operator for deleting a cell's value. You should\n # set the cell's value back to None\n #\n # key: key of the cell\n # \n def __delitem__(self, key):\n row, col = self._validate_key(key)\n self.table[self.ylabels.index(row)][self.xlabels.index(col)] = None\n # TODO: implement me\n return\n\n def __str__(self):\n # column width\n colwidth = 6 if self.celltype is float else 2\n \n # y-label width (for first column)\n ylwidth = max([len(str(y)) for y in self.ylabels])\n \n # print title row (space delimited labels)\n print(\" \".join([ \" \"*ylwidth ] + \n [ str(x)[:colwidth].center(colwidth) for x in self.xlabels ]))\n \n # print each row from the table\n for y in self.ylabels:\n row = [ str(y).rjust(ylwidth) ]\n for x in self.xlabels:\n val = self[y,x]\n if val is None:\n text = '-' * colwidth\n elif self.unit == '%':\n # crash now if probability table has a value error\n assert(isinstance(val, float) and val >= 0.)\n text = \"%.3f%%\"%(val*100)\n elif isinstance(val, float):\n text = \"%.3f\"%val if val < 0 else \" %.3f\"%val\n else:\n text = str(val)[:colwidth].center(colwidth)\n row.append(text)\n print(\" \".join(row))\n return '\\nEnd Table\\n'\n \n \n\n\n","repo_name":"KevinUTAT/easyBlackJack","sub_path":"Part2/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40738894893","text":"import bpy\nimport bmesh\nimport re\nimport random\nfrom math import sqrt\n\ndef currentLayer():\n #digging scene\n context = bpy.context\n scene = context.scene\n #current layer number\n layerNum = 0\n for layer in scene.layers:\n if layer:\n break\n else:\n layerNum += 1\n return layerNum\n #current number of group\n\n#1. RENAME BRICKS \ndef rename():\n #current layer number\n layerNum = currentLayer()\n #remove selection from all objects on scene\n bpy.ops.object.select_all(action='DESELECT')\n #digging scene\n context = bpy.context\n scene = context.scene\n #looking for objects on current layer\n for object in scene.objects:\n if object.layers[layerNum]:\n if object.type == 'MESH':\n object.name = 'Brick.000'\n\n#2. REMOVING DOUBLES\ndef removeDoubles():\n\tbpy.ops.object.select_all(action='SELECT')\n\n\tcontext = bpy.context\n\n\tdistance = 0.0001\n\n\tif True:\n\t\tmeshes = [obj.data for obj in context.selected_objects if obj.type == 'MESH']\n\t\t\n\t\tbm = bmesh.new()\n\t\t\n\t\tfor mesh in meshes:\n\t\t\tbm.from_mesh(mesh)\n\t\t\tbmesh.ops.remove_doubles(bm, verts = bm.verts, dist = distance)\n\t\t\t#соединение рядом стоящих точек\n\t\t\t#bmesh.ops.automerge(bm, verts = bm.verts, dist = distance)\n\t\t\t#print(mesh.name)\n\t\t\tbm.to_mesh(mesh)\n\t\t\tmesh.update()\n\t\t\tbm.clear()\n\t\tbm.free()\n\n\tbpy.ops.object.select_all(action='DESELECT')\n\n#3. CORRECTING TEXTURES\ndef textureCorrection():\n #deselecting objects on scene \n bpy.ops.object.select_all(action='DESELECT')\n #knocking to necessery scene\n context = bpy.context\n scene = context.scene\n #current layer number\n layerNum = currentLayer()\n #list of faces in object\n face_list = [] \n for object in scene.objects:\n #Be sure to specify the desired layer\n if object.layers[layerNum] and object.type == 'MESH':\n #set active object for editing\n scene.objects.active = object\n bpy.context.tool_settings.mesh_select_mode = (False, False, True)\n #deselecting all faces\n for face in object.data.polygons:\n face.select = False\n #selecting daces only with material = 1\n for face in object.data.polygons:\n if face.material_index == 1:\n face.select = True\n bpy.ops.object.editmode_toggle() \n bpy.ops.uv.smart_project()\n bpy.ops.object.editmode_toggle()\n \n#4. CREATING GROUPS OF VERTICES\ndef groupsCreation(objLocGroup):\n #remove selection from all objects on scene\n bpy.ops.object.select_all(action='DESELECT')\n #digging scene\n context = bpy.context\n scene = context.scene\n #current layer number\n layerNum = currentLayer()\n #list of verticies\n vertexList = []\n #looking for objects on current layer\n for object in scene.objects:\n if object.layers[layerNum]:\n if object.type == 'MESH':\n #activating object to start editing it\n scene.objects.active = object\n #clear list of vertex groups in object if it somehow been there\n object.vertex_groups.clear()\n #create vertexGroup\n vertexGroup = object.vertex_groups.new(name = object.name)\n #create list of vertices in object\n for vertex in object.data.vertices:\n vertexList.append(vertex.index)\n #assign vertices in list to created group\n vertexGroup.add(vertexList, 1.0, 'ADD')\n #clear list of vertices for new group\n vertexList.clear()\n objLocGroup.append([object.location, vertexGroup.name])\n\n#5. JOINING PIECES IN ONE MESH \ndef joinPieces():\n bpy.ops.object.select_all(action='SELECT')\n #digging scene\n context = bpy.context\n scene = context.scene\n #current layer number\n layerNum = currentLayer()\n #looking for objects on current layer\n for object in scene.objects:\n if object.layers[layerNum]:\n bpy.context.scene.objects.active = bpy.data.objects[object.name]\n bpy.ops.object.join() \n \n#6. MAKE PAIR OF VERTEX AND GROUP FOR COLORIZE\ndef makePairVertexGroup(groupsAndVerticies, objLocGroup, vertexDistObjCoColorList):\n #remove selection from all objects on scene\n bpy.ops.object.select_all(action='DESELECT')\n #digging scene\n context = bpy.context\n scene = context.scene\n #current layer number\n layerNum = currentLayer()\n #looking for objects on current layer\n for object in scene.objects:\n if object.layers[layerNum] and object.type == 'MESH':\n #looking for group\n for vGroup in object.vertex_groups:\n if 'Brick' in vGroup.name:\n #group color\n color = (random.random(), random.random(), random.random())\n #list of verticies\n vertexList = []\n for vertex in object.data.vertices:\n for g in vertex.groups:\n if g.group == vGroup.index:\n for elem in objLocGroup:\n if vGroup.name == elem[1]:\n objLoc = elem[0]\n groupsAndVerticies.append([vertex.index, vGroup.index, color])\n #collecting distance of vertex to object centr\n vertexWorldCo = object.matrix_world * vertex.co\n deltaX = objLoc[0] - vertexWorldCo.x\n deltaY = objLoc[1] - vertexWorldCo.y\n deltaZ = objLoc[2] - vertexWorldCo.z\n distance = sqrt(deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ)\n vertexDistObjCoColorList.append([distance, objLoc, color])\n\n#7. VERTEX COLORIZE\ndef colorizeVerticies(groupsAndVerticies):\n bpy.ops.object.select_all(action='DESELECT')\n layerNum = currentLayer()\n #knocking to necessery scene\n context = bpy.context\n scene = context.scene\n #vertex color\n newColor = (0.0, 0.0, 0.0)\n #looking for objects on current layer\n for object in scene.objects:\n if object.layers[layerNum]:\n if object.type == 'MESH':\n #mesh from object\n mesh = bpy.context.object.data\n if mesh.vertex_colors.active is None:\n mesh.vertex_colors.new()\n for poly in mesh.polygons:\n #here we need to pair vertex with group\n for vertex in poly.vertices:\n for pair in groupsAndVerticies:\n if vertex == pair[0]:\n newColor = pair[2]\n groupsAndVerticies.remove(pair)\n break\n for loop in poly.loop_indices:\n mesh.vertex_colors.active.data[loop].color = newColor\n #print(mesh.vertex_colors.active.data[loop].color)\n mesh.update()\n \n \n#8. NORMALS CORRECTION\ndef normalsCorrection():\n bpy.ops.object.select_all(action='DESELECT')\n layerNum = currentLayer()\n #knocking to necessery scene\n context = bpy.context\n scene = context.scene\n #looking for objects on current layer\n for object in scene.objects:\n if object.layers[layerNum]:\n if object.type == 'MESH':\n object.select = True\n bpy.context.scene.objects.active = object\n # go edit mode\n bpy.ops.object.mode_set(mode='EDIT')\n # select al faces\n bpy.ops.mesh.select_all(action='SELECT')\n # recalculate outside normals \n bpy.ops.mesh.normals_make_consistent(inside=False)\n # go object mode again\n bpy.ops.object.editmode_toggle() \n\n#9. CLEAR GROUPS \ndef clearVertexGroups():\n #remove selection from all objects on scene\n bpy.ops.object.select_all(action='DESELECT')\n #digging scene\n context = bpy.context\n scene = context.scene\n #current layer number\n layerNum = currentLayer()\n #list of verticies\n vertexList = []\n #looking for objects on current layer\n for object in scene.objects:\n if object.layers[layerNum]:\n if object.type == 'MESH':\n #activating object to start editing it\n scene.objects.active = object\n #clear list of vertex groups in object if it somehow been there\n object.vertex_groups.clear() \n\n#debug \ndef emptyAdd(list):\n context = bpy.context\n i = 0\n for elem in list:\n i += 1\n print(i, elem)\n bpy.ops.object.select_all(action='DESELECT')\n obj_empty = bpy.data.objects.new('Empty' + str(i), None)\n obj_empty.location = elem \n context.scene.objects.link(obj_empty)\n \n\n#debug\ndef listPrint(list):\n print('___print list___')\n print('VVVVVVV')\n i = 0\n for elem in list:\n i += 1\n print(i, elem)\n print('WWWWWWW')\n\n#10. CREATE FILE WITH PIECES COORDS\ndef saveVertsFile(vertexDistObjCoColorList):\n #digging scene\n context = bpy.context\n scene = context.scene\n #current layer number\n layerNum = currentLayer()\n #create file if it doesnt exist\n filename = 'Wall.txt'\n file = open(filename, 'w+')\n #open file for writing\n file = open(filename, 'a')\n #list with distances to vertex from brick center\n distance = []\n #current brick color\n color = [0.0, 0.0, 0.0]\n #current brick coords\n coord = [0.0, 0.0, 0.0]\n i = 0\n for elem in vertexDistObjCoColorList:\n #middle distance\n distanceMiddle = 0\n if i == 0:\n color = elem[2]\n if color != elem[2]:\n for distElem in distance:\n distanceMiddle += distElem\n distanceMiddle /= len(distance)\n distance.clear()\n file.write(str(coord[0]) + ' ' + str(coord[1]) + ' ' + str(coord[2]) + ' ' + str(distanceMiddle) + ' ' + str(round(color[0], 2)) + ' ' + str(round(color[1], 2)) + ' ' + str(round(color[2], 2)) + '\\n')\n elif i == len(vertexDistObjCoColorList) - 1:\n for distElem in distance:\n distanceMiddle += distElem\n distanceMiddle /= len(distance)\n distance.clear()\n file.write(str(coord[0]) + ' ' + str(coord[1]) + ' ' + str(coord[2]) + ' ' + str(distanceMiddle) + ' ' + str(round(color[0], 2)) + ' ' + str(round(color[1], 2)) + ' ' + str(round(color[2], 2)) + '\\n')\n distance.append(elem[0])\n coord = elem[1]\n color = elem[2]\n i += 1\n file.close\n \ndef createPolyColorGroups(cont):\n bpy.ops.object.select_all(action='DESELECT')\n layerNum = currentLayer()\n #knocking to necessery scene\n context = bpy.context\n scene = context.scene\n #vertex color\n newColor = (0.0, 0.0, 0.0)\n #looking for objects on current layer\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.origin_set(type = 'ORIGIN_CURSOR')\n bpy.ops.object.select_all(action='DESELECT')\n for object in scene.objects:\n if object.layers[layerNum]:\n if object.type == 'MESH':\n #mesh from object\n mesh = bpy.context.object.data\n for poly in mesh.polygons:\n #here we need to pair vertex with group\n color = []\n for loop in poly.loop_indices:\n color = mesh.vertex_colors.active.data[loop].color\n #расчет a, b, c, d, color\n threeDots = []\n i = 0\n for vertexIndex in poly.vertices:\n vertex = object.data.vertices[vertexIndex]\n vertexWorldCo = object.matrix_world * vertex.co\n i += 1\n threeDots.append(vertexWorldCo)\n if i == 3:\n X1 = threeDots[0].x\n Y1 = threeDots[0].y\n Z1 = threeDots[0].z\n X2 = threeDots[1].x\n Y2 = threeDots[1].y\n Z2 = threeDots[1].z\n X3 = threeDots[2].x\n Y3 = threeDots[2].y\n Z3 = threeDots[2].z\n a = (Y2 - Y1)*(Z3 - Z1) - (Z2 - Z1)*(Y3 - Y1)\n b = (Z2 - Z1)*(X3 - X1) - (X2 - X1)*(Z3 - Z1)\n c = (X2 - X1)*(Y3 - Y1) - (Y2 - Y1)*(X3 - X1)\n d = - X1 * a - Y1 * b - Z1 * c \n polyColor.append([a, b, c, d, color])\n threeDots.clear()\n \n#11. CREATE FILE WITH POLY AND COLOR\ndef savePolyFile(polyColor):\n #create file if it doesnt exist\n filename = 'Wall.txt'\n file = open(filename, 'w+')\n #open file for writing\n file = open(filename, 'a')\n #current brick color\n color = [0.0, 0.0, 0.0]\n for elem in polyColor:\n color = elem[4]\n file.write(str(elem[0]) + ' ' + str(elem[1]) + ' ' + str(elem[2]) + ' ' + str(elem[3]) + ' ' + str(round(color[0], 2)) + ' ' + str(round(color[1], 2)) + ' ' + str(round(color[2], 2)) + '\\n')\n file.close\n\n \n\n\n#1. RENAME BRICKS \nrename() \n#2. REMOVING DOUBLES\nremoveDoubles()\n#3. CORRECTING TEXTURES\ntextureCorrection()\n#4. CREATING GROUPS OF VERTICES\nobjLocGroup = []\ngroupsCreation(objLocGroup)\n#listPrint(objLocGroup)\n#5. JOINING PIECES IN ONE MESH\njoinPieces()\n#6. MAKE PAIR OF VERTEX AND GROUP FOR COLORIZE\n#array where verticies assigned to groups\ngroupsAndVerticies = []\n#array where verticies coordinates assigned to color\nvertexDistObjCoColorList = []\nmakePairVertexGroup(groupsAndVerticies, objLocGroup, vertexDistObjCoColorList)\n#listPrint(vertexColorDistList)\n#emptyAdd(vertexColorDistList)\n#7. VERTEX COLORIfZE\npolyColor = []\ncolorizeVerticies(groupsAndVerticies)\n#8. NORMALS CORRECTION\nnormalsCorrection()\nbpy.ops.object.select_all(action='SELECT')\nbpy.ops.object.origin_set(type = 'ORIGIN_CURSOR')\nbpy.ops.object.select_all(action='DESELECT')\n#9. CLEAR GROUPS\n#clearVertexGroups()\n#10. CREATE POLY COLOR GROUPS\n#createPolyColorGroups(polyColor)\n#11. CREATE FILE WITH PIECES COORDS\n#saveVertsFile(vertexDistObjCoColorList)\n#savePolyFile(polyColor)","repo_name":"kircodeengineer/Turnbased-Game-Prototype","sub_path":"Walls/processingScript.py","file_name":"processingScript.py","file_ext":"py","file_size_in_byte":14813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23377341741","text":"#!/usr/bin/python3\n\nimport sys\nimport heapq\n\ndef docase():\n\tn = int(sys.stdin.readline())\n\tx = []\n\tl = []\n\tbestl = []\n\tfor i in range(n):\n\t\tr = sys.stdin.readline().split()\n\t\tx.append(int(r[0]))\n\t\tl.append(int(r[1]))\n\t\tbestl.append(0)\n\ttarget = int(sys.stdin.readline())\n\tq = []\n\theapq.heappush(q, (-x[0], 0))\n\tbestl[0] = x[0]\n\twhile len(q):\n\t\tpri, cur = heapq.heappop(q)\n\t\tif pri != -bestl[cur]:\n\t\t\tcontinue\n\t\tif x[cur]+bestl[cur] >= target:\n\t\t\treturn \"YES\"\n\t\ti = 1\n\t\twhile cur+i < n and x[cur+i] <= x[cur]+bestl[cur]:\n\t\t\tavl = min(bestl[cur], x[cur+i]-x[cur], l[cur+i])\n\t\t\tif avl > bestl[cur+i]:\n\t\t\t\tbestl[cur+i] = avl\n\t\t\t\theapq.heappush(q, (-avl, cur+i))\n\t\t\ti += 1\n\treturn \"NO\"\n\nncases = int(sys.stdin.readline())\ncase = 0\nfor case in range(ncases):\n sys.stdout.write(\"Case #%d: %s\\n\" % (case+1, docase()))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_108/49.py","file_name":"49.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}