diff --git "a/2712.jsonl" "b/2712.jsonl" new file mode 100644--- /dev/null +++ "b/2712.jsonl" @@ -0,0 +1,638 @@ +{"seq_id":"147844977","text":"# Bir muzik calar objesi yapmanizi istiyoruz. Class attribute olarak bos bir song listesi olusturun.\n# Class methods olarak song listesini sifirlama, +listeyi goruntuleme, song ekleme, song silme,\n# sonraki parcayi cal, onceki parcayi cal, +karisik cal ozelliklerini ekleyin.\nimport time as zaman\nimport random as rastgele\n\nclass Playplay():\n\n def __init__(self, songs = []):\n self.songs = songs\n self. att = 1\n self.ses = 75\n self.calansong =\"\"\n\n################################################################\n#song ekle\n def songEkle(self,song):\n self.songs.append(song)\n\n################################################################\n#song listesi\n def songListesi(self):\n print(self.songs)\n\n################################################################\n#song seç\n def songSec(self):\n sayac= 1\n for i in self.songs:\n print(\"{}.{}\".format(sayac,i))\n sayac +=1\n sel=int(input(\"Enter song number: \"))\n print(\"Song is changing\")\n zaman.sleep(0.5)\n self.calansong=self.songs[sel -1]\n\n################################################################\n#rastgele song\n def rastgelesong(self):\n rastgele_sayi = rastgele.randint(0,len(self.songs))\n self.calansong = self.songs[rastgele_sayi]\n\n################################################################\n#kapat\n def shutdown(self):\n print(\"Mini Music Player is shutting down...\")\n zaman.sleep(1)\n print(\"Goodbye!!\")\n self.att= 0\n\n################################################################\n#song sil\n def songSil(self):\n sel= int(input(\"Enter the number of song to delete: \"))\n self.songs.pop(sel-1)\n\n################################################################\n#liste sil\n def songListesil(self):\n print(\"Clearing list..\")\n zaman.sleep(1)\n self.songs.clear()\n if not self.songListesi:\n self.calansong.clear()\n zaman.sleep(1)\n################################################################\n# ses arttır\n def sesArttir(self):\n if (self.ses >= 100):\n pass\n else:\n print(\"Vol increasing!\")\n zaman.sleep(0.5)\n self.ses += 5\n print(\"Volume increased, vol is :{}\".format(self.ses))\n\n################################################################\n# ses azalt\n def sesAzalt(self):\n if (self.ses <= 0):\n pass\n else:\n print(\"Vol decreasing!\")\n zaman.sleep(2)\n self.ses -= 5\n print(\"Volume decreased, vol is :{}\".format(self.ses))\n\n################################################################\n #def sonrakisong(self):\n\n################################################################\n #def oncekisong(self):\n\n################################################################\n def menu(self):\n print(f\"\"\"___________ M e n ü ___________\nNow playing.{self.calansong}\n___________ ~ ~ ~ ~ ___________\n1»Select 2»Random Sel \n3»Previous 4»Next \n5»Add 6»Delete\n7»Del list 8»Vol + \n9»Vol - 0»Off\"\"\")\n\n\ncasa1 =Playplay(songs=[\"Bella Caio\"])\nprint(\" *Mini Music Player*\")\nwhile casa1.att:\n casa1.menu()\n sel=int(input(\"*Selection* \"))\n if (sel==1):\n casa1.songSec()\n elif (sel == 2):\n casa1.rastgelesong()\n elif (sel == 3):\n casa1.sonrakiparcayical()\n elif (sel == 4):\n casa1.oncekiparcayical()\n elif (sel == 5):\n song=input(\"Type your song: \")\n casa1.songEkle(song)\n elif (sel == 6):\n casa1.songSil()\n elif (sel == 7):\n casa1.songListesil()\n elif (sel == 8):\n casa1.sesArttir()\n elif (sel == 9):\n casa1.sesAzalt()\n else:\n casa1.shutdown()\n","sub_path":"musiccplayer.py","file_name":"musiccplayer.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"533229996","text":"from collections import Counter\n\nimport json\nimport math\nimport numpy as np\nimport pandas as pd\n\n\ndef get_language_data():\n df = pd.read_csv('../data/profile/users_profile_data.csv')\n\n df['language'] = map(lambda lang: 'en' if 'en-' in lang else lang, df['language'])\n df['language'] = map(lambda lang: 'es' if 'es-' in lang else lang, df['language'])\n df['language'] = map(lambda lang: 'pt' if 'pt-' in lang else lang, df['language'])\n\n # df = df.loc[]\n top10 = ['en', 'es', 'pt', 'fr', 'it', 'de', 'ja', 'ar''en-gb', 'id']\n\n df['language'] = map(lambda lang: lang if lang in top10 else 'other', df['language'])\n\n return dict(zip(df['user_id'], df['language']))\n\n\ndef get_freqs(values):\n values = np.array(values).astype(float)\n return values / np.sum(values)\n\n\ndef ecdf(values):\n return Counter(values)\n\n\ndef calc_cdf(Fn, x):\n x = round(x, 4)\n keys = filter(lambda f: round(f, 4) <= x, Fn)\n if len(keys) == 0:\n return 0.\n return np.sum(map(lambda p: p * Fn[p], keys))\n\n\ndef calc_ccdf(Fn, x):\n return 1 - calc_cdf(Fn=Fn, x=x)\n\n\ndef calc_entropy(freqs):\n freqs = get_freqs(freqs)\n log_size = math.log(len(freqs), 2)\n return - np.sum(freqs * np.log2(freqs)) / log_size\n\n\ndef calc_top(freqs):\n freqs = get_freqs(freqs)\n std = np.round(np.std(freqs), 4)\n if std > 0.:\n return np.round(np.ceil((np.max(freqs) - np.mean(freqs)) / std), 5)\n else:\n return 0.\n\n\ndef calc_mean(values):\n return np.round(np.mean(values), 5)\n\n\ndef calc_ntop(freqs):\n mx = np.max(freqs)\n mn = np.mean(freqs)\n return np.round((mx - mn) / (mx + mn), 4)\n\n\ndef calc_ttop(freqs):\n mx = np.max(freqs)\n mn = np.mean(freqs)\n return (mx - mn) / (mx + mn)\n\n\ndef handle_summary(input_file, output_file, lang=True):\n with open(input_file, 'r') as infile:\n language_data = get_language_data()\n keys = []\n tops = []\n ntops = []\n entropies = []\n ccdfs = []\n language = []\n for line in infile.readlines():\n jd = json.loads(line)\n key = jd.keys()[0]\n values = get_freqs(jd[key])\n if len(values) >= 10:\n keys.append(key)\n tops.append(calc_top(freqs=values))\n nt = calc_ntop(freqs=values)\n ntops.append(nt if nt > 0. else 0.)\n ent = calc_entropy(freqs=values)\n entropies.append(ent if ent < 1. else 1.)\n _ccdf = calc_ccdf(Fn=ecdf(values=values), x=calc_mean(values))\n _ccdf = _ccdf if _ccdf > 0. else 0.\n ccdfs.append(_ccdf)\n if lang:\n language.append(language_data[int(key)])\n df = pd.DataFrame()\n df['id'] = keys\n df['top'] = tops\n df['ntop'] = ntops\n df['entropy'] = entropies\n df['ccdf'] = ccdfs\n if lang:\n df['language'] = language\n df.to_csv(output_file)\n\n # print calc_top(get_freqs([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\n","sub_path":"ccdf/ccdf.py","file_name":"ccdf.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"4805417","text":"\n\n#calss header\nclass _INITIATE():\n\tdef __init__(self,): \n\t\tself.name = \"INITIATE\"\n\t\tself.definitions = [u'to cause something to begin: ', u'to teach someone about an area of knowledge, or to allow someone into a group by a special ceremony: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_initiate.py","file_name":"_initiate.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"43505635","text":"\n\n# Quiz Class\n'''\nClass: A class is a blueprint for a particular object\nit stores the attributes of that object\n\n\nIn this class a quiz has 3 attributes - the question the valid choices and the answer\n'''\n\n\nclass stocks:\n\n def __init__(self, question, answer_keys, correct_answer, points):\n self.question = question # question to be asked\n self.answer_keys = answer_keys # options for the answer\n self.correct_answer = correct_answer # correct answer\n self.points = points # points for correct answer\n\n\n# create a list of questions, answers and answer keys\n# (\"\\n\" will put the text on a new line in the console)\nquestions = [\n # Question (instance) #answer keys #answer\n stocks(\"What color are apples? (type 'exit' to quit)\" + \"\\n\" + \"(a) Green/Red\" + \\\n \"\\n\" + \"(b) 5\" + \"\\n\" + \"(c) black \\n\\n\", [\"a\", \"b\", \"c\"], \"a\", 5),\n stocks(\"What color are bananas? (type 'exit' to quit)\" + \"\\n\" + \"(a) white\" + \\\n \"\\n\" + \"(b) yellow\" + \"\\n\" + \"(c) black \\n\\n\", [\"a\", \"b\", \"c\"], \"b\", 5),\n stocks(\"What color are oranges (type 'exit' to quit)\" + \"\\n\" + \"(a) houses\" + \"\\n\" + \"(b) the sun\" + \"\\n\" + \"(c) orange \\n\\n\", [\"a\", \"b\", \"c\"], \"c\", 5)]\n\n\n# function to present questions to user\ndef run_test(questions):\n score = 0 # set base score as zero\n\n ''' question_no is an aribitrary name given to represent the index of the \n questions list (this can be anything x,z,e,t etc'''\n\n for question_no in questions: # call each question and ask user for an answser\n\n # call question and take answer\n user_answer = input(question_no.question).lower().strip()\n\n # if the user types exit convert answer to upper and quit the program\n if user_answer.lower().strip() == 'exit':\n print('bye!')\n quit()\n\n ###validate user input###\n # repeat user prompt if answer is not in approved list for that question\n answer_in_scope = False\n while answer_in_scope is False:\n # if the user answe is not in the choice list prompt user for another answer\n if user_answer not in question_no.answer_keys:\n answer_in_scope = False\n print(\"Not A valid option, Select a valid option: \")\n user_answer = input(question_no.question).lower().strip()\n\n else:\n answer_in_scope = True # assign true if the answer is a valid choice to move on\n\n ###validate answer###\n if (user_answer == question_no.correct_answer):\n score += question_no.points # if correct add points for that question to the score\n print(\"Correct!\" + \"\\n\\n\") # identify the answer is correct\n\n else:\n # identify the answer is wrong\n print(\"Nope! the correct answer is: \" +\n str(question_no.correct_answer) + \"\\n\\n\")\n\n # quiz points is the iteration variable - we sum points for the all the questions to get the total points\n print(\"You're score is: \" + str(score) + \"/\" +\n str(sum(quiz_points.points for quiz_points in questions))) # calculate the score\n\n\n# run the quiz\nrun_test(questions)\n","sub_path":"001.2qnasamples.py","file_name":"001.2qnasamples.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"279588371","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2015 FingerApp Studio, Inc.\n\nimport sys\n\nfrom tornado import options\n\nfrom net.http.controller import PushHttpServer\nfrom logger.controller import LogManager\n\n\ndef print_usage(fname):\n print(\"Usage: %s [config] [task_id=optional]\" % fname)\n sys.exit()\n\n\ndef main(argv):\n\n print(\"SNMP API Server\")\n\n if len(argv) < 2:\n print_usage(argv[0])\n\n options.parse_command_line()\n task_id = 0\n config_file = argv[1]\n\n if len(argv) > 2:\n task_id = int(argv[2])\n\n # http server instance\n push_http_server = PushHttpServer(\n config_file=config_file,\n task_id=task_id\n )\n\n # enable log\n push_http_server.enable_log()\n\n general = LogManager.get_instance(\"general\")\n general.debug(\"Piccolo API Server is started in %s mode...\"\n % push_http_server.stage)\n general.debug(\"Task ID : %d\" % push_http_server.task_id)\n general.debug(\"HTTP listen port : %d\" % push_http_server.listen_port)\n\n # run\n push_http_server.run()\n\n return True\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"585165373","text":"#\nlog_level = 10\n\n# CRITICAL: 50\n# ERROR: 40\n# WARNING: 30\n# INFO: 20\n# DEBUG: 10\n\nbg_color = '#2b2b2b'\nheader_color = '#8c8c8c'\nheader_font_size = 11\ntext_color = '#d1d1d1'\nsong_title_color = '#5ec9ff'\nfont = 'Arial'\nitem_font = 'Arial'\nsong_font = 'Arial'\nplan_text_size = 14\nother_text_size = 12\nproducer_note_text_size = 9\nlive_color = '#a34444'\ndefault_font = 'Arial'\nitem_time_size = 8\nplan_item_frame_width = 1400\n\nseparator_color ='#3f3f3f'\n\naccent_color_1 = '#ffc638'\naccent_text_color = '#2b2b2b'\naccent_text_size = 18\naccent_text_font = 'Arial Bold'\n\nclock_text_color = text_color\nclock_text_size = 38\nclock_text_font = 'Arial Bold'\nclock_overrun_color = '#a34444'\nclock_section_live_item_text_size = 32\n\napp_cue_font_size = 6\n\ntext_entry_box_bg_color = '#444444'\n\ndisplay_kipros = True\n\nkipro_idle_color = '#317c42'\nkipro_recording_color = live_color\nkipro_error_color = '#ff0000'\nkipro_unable_to_commmunicate_color = '#7a7a7a'\n\ninterval_update_kipros = True\nkipro_update_interval = 1 # in seconds. Only applies if interval_update_kipros and display_kipros is True.\n\nglobal_cue_font_size = 11\n\n\ncurrent_cues_text_size = 11\n\noptions_button_text_size = 10\n\nui_debug_color = '#7aa825'\nitem_separator_color = '#dddddd'\n\nreminder_color = '#1c1c1c'\nreminder_font_size = 24\n\ndelay_kipro_start = True\n#adds .5s of delay between multiple kipros, so they import in the correct order in a nle\n\nkipro_timeout_threshold = 1 #in seconds\n\n# how often to refresh live adjacent plan\nadjacent_plan_refresh_interval = 5\n\nenable_webserver = False\n\nclose_global_cues_menu_after_cue = False\n\n#uuids\nadvance_on_next_uuid = 'a0fac1cd-3bff-4286-80e2-20b284361ba0'\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"609056295","text":"from zope.interface import Interface\nfrom plone.indexer import indexer\nfrom plone.directives import form\nfrom plone.namedfile.field import NamedBlobImage\nfrom plone.app.textfield import RichText\nfrom z3c.relationfield.schema import RelationList, RelationChoice\nfrom plone.formwidget.contenttree import ObjPathSourceBinder\n\nfrom vwc.dexgallery import _\n\nclass IPhoto(form.Schema):\n \"\"\"\n A photo appearing in the image gallery.\n \"\"\"\n image = NamedBlobImage(\n title = _(u'Photo'),\n )\n \n caption = RichText(\n title = _(u'Caption'),\n required = False,\n )\n \n tags = RelationList(\n title = _(u'Tags'),\n default = [],\n value_type=RelationChoice(\n title = _(u'Tag'),\n source = ObjPathSourceBinder(\n navigation_tree_query = {'path': {'query':'/'}},\n portal_type = 'Document',\n ),\n ),\n required = False,\n )\n\n@indexer(Interface)\ndef null_indexer(obj):\n raise AttributeError\n\n@indexer(IPhoto)\ndef photo_tags(obj):\n tags = set()\n for relation in obj.tags:\n if relation.isBroken():\n continue\n tags.add(relation.to_object.Title())\n return list(tags)\n\n@indexer(IPhoto)\ndef SearchableText(obj):\n return ' '.join([obj.Title(), obj.Description(), obj.caption.output])\n","sub_path":"src/vwc.dexgallery/vwc/dexgallery/content/photo.py","file_name":"photo.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"364710241","text":"# -*- coding:utf-8 -*-\nclass Solution:\n def rectCover(self, number):\n # write code here)\n if number <= 0:\n return 0\n list = [1,2]\n while number>=2:\n list[0],list[1] = list[1], list[0]+list[1]\n number -= 1\n return list[0]\n","sub_path":"10.矩形覆盖.py","file_name":"10.矩形覆盖.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"79348967","text":"from __future__ import print_function\nfrom __future__ import division\nimport numpy as np\nimport os\nfrom matplotlib import pyplot as plt\nimport cv2\nimport pandas as pd\nimport seaborn as sns\nimport scipy.stats as stats\nimport scipy.misc\nfrom scipy import ndimage\nimport matplotlib\nfrom numpy import array\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n#from scipy.stats import threshold\nfrom csv_processing import get_zone_dataframe\nimport sys\n\n\n# GLOBALS\nAPS_FILE_NAME = '00360f79fd6e02781457eda48f85da90'\nTHREAT_LABELS = 'stage1_labels.csv'\nCOLORMAP = 'gray'\nZONES = 17\n\n# Divide the available space on an image into 16 sectors. In the [0] image these\n# zones correspond to the TSA threat zones. But on rotated images, the slice\n# list uses the sector that best shows the threat zone\nsector01_pts = np.array([[0, 160], [200, 160], [200, 230], [0, 230]], np.int32)\nsector02_pts = np.array([[0, 0], [200, 0], [200, 160], [0, 160]], np.int32)\nsector03_pts = np.array([[330, 160], [512, 160], [512, 240], [330, 240]], np.int32)\nsector04_pts = np.array([[350, 0], [512, 0], [512, 160], [350, 160]], np.int32)\n\n# sector 5 is used for both threat zone 5 and 17\nsector05_pts = np.array([[0, 220], [512, 220], [512, 300], [0, 300]], np.int32)\n\nsector06_pts = np.array([[0, 300], [256, 300], [256, 360], [0, 360]], np.int32)\nsector07_pts = np.array([[256, 300], [512, 300], [512, 360], [256, 360]], np.int32)\nsector08_pts = np.array([[0, 370], [225, 370], [225, 450], [0, 450]], np.int32)\nsector09_pts = np.array([[225, 370], [275, 370], [275, 450], [225, 450]], np.int32)\nsector10_pts = np.array([[275, 370], [512, 370], [512, 450], [275, 450]], np.int32)\nsector11_pts = np.array([[0, 450], [256, 450], [256, 525], [0, 525]], np.int32)\nsector12_pts = np.array([[256, 450], [512, 450], [512, 525], [256, 525]], np.int32)\nsector13_pts = np.array([[0, 525], [256, 525], [256, 600], [0, 600]], np.int32)\nsector14_pts = np.array([[256, 525], [512, 525], [512, 600], [256, 600]], np.int32)\nsector15_pts = np.array([[0, 600], [256, 600], [256, 660], [0, 660]], np.int32)\nsector16_pts = np.array([[256, 600], [512, 600], [512, 660], [256, 660]], np.int32)\n\n# Each element in the zone_slice_list contains the sector to use in the call to roi()\nzone_slice_list = [[ # threat zone 1\n sector01_pts, sector01_pts, sector01_pts, None,\n None, None, sector03_pts, sector03_pts,\n sector03_pts, sector03_pts, sector03_pts,\n None, None, sector01_pts, sector01_pts, sector01_pts],\n\n [ # threat zone 2\n sector02_pts, sector02_pts, sector02_pts, None,\n None, None, sector04_pts, sector04_pts,\n sector04_pts, sector04_pts, sector04_pts, None,\n None, sector02_pts, sector02_pts, sector02_pts],\n\n [ # threat zone 3\n sector03_pts, sector03_pts, sector03_pts, sector03_pts,\n None, None, sector01_pts, sector01_pts,\n sector01_pts, sector01_pts, sector01_pts, sector01_pts,\n None, None, sector03_pts, sector03_pts],\n\n [ # threat zone 4\n sector04_pts, sector04_pts, sector04_pts, sector04_pts,\n None, None, sector02_pts, sector02_pts,\n sector02_pts, sector02_pts, sector02_pts, sector02_pts,\n None, None, sector04_pts, sector04_pts],\n\n [ # threat zone 5\n sector05_pts, sector05_pts, sector05_pts, sector05_pts,\n sector05_pts, sector05_pts, sector05_pts, sector05_pts,\n None, None, None, None,\n None, None, None, None],\n\n [ # threat zone 6\n sector06_pts, None, None, None,\n None, None, None, None,\n sector07_pts, sector07_pts, sector06_pts, sector06_pts,\n sector06_pts, sector06_pts, sector06_pts, sector06_pts],\n\n [ # threat zone 7\n sector07_pts, sector07_pts, sector07_pts, sector07_pts,\n sector07_pts, sector07_pts, sector07_pts, sector07_pts,\n sector06_pts, None, None, None,\n None, None, None, None],\n\n [ # threat zone 8\n sector08_pts, sector08_pts, None, None,\n None, None, None, sector10_pts,\n sector10_pts, sector10_pts, sector10_pts, sector10_pts,\n sector08_pts, sector08_pts, sector08_pts, sector08_pts],\n\n [ # threat zone 9\n sector09_pts, sector09_pts, sector08_pts, sector08_pts,\n sector08_pts, None, None, None,\n sector09_pts, sector09_pts, None, None,\n None, None, sector10_pts, sector09_pts],\n\n [ # threat zone 10\n sector10_pts, sector10_pts, sector10_pts, sector10_pts,\n sector10_pts, sector08_pts, sector10_pts, None,\n None, None, None, None,\n None, None, None, sector10_pts],\n\n [ # threat zone 11\n sector11_pts, sector11_pts, sector11_pts, sector11_pts,\n None, None, sector12_pts, sector12_pts,\n sector12_pts, sector12_pts, sector12_pts, None,\n sector11_pts, sector11_pts, sector11_pts, sector11_pts],\n\n [ # threat zone 12\n sector12_pts, sector12_pts, sector12_pts, sector12_pts,\n sector12_pts, sector11_pts, sector11_pts, sector11_pts,\n sector11_pts, sector11_pts, sector11_pts, None,\n None, sector12_pts, sector12_pts, sector12_pts],\n\n [ # threat zone 13\n sector13_pts, sector13_pts, sector13_pts, sector13_pts,\n None, None, sector14_pts, sector14_pts,\n sector14_pts, sector14_pts, sector14_pts, None,\n sector13_pts, sector13_pts, sector13_pts, sector13_pts],\n\n [ # sector 14\n sector14_pts, sector14_pts, sector14_pts, sector14_pts,\n sector14_pts, None, sector13_pts, sector13_pts,\n sector13_pts, sector13_pts, sector13_pts, None,\n None, None, None, None],\n\n [ # threat zone 15\n sector15_pts, sector15_pts, sector15_pts, sector15_pts,\n None, None, sector16_pts, sector16_pts,\n sector16_pts, sector16_pts, None, sector15_pts,\n sector15_pts, None, sector15_pts, sector15_pts],\n\n [ # threat zone 16\n sector16_pts, sector16_pts, sector16_pts, sector16_pts,\n sector16_pts, sector16_pts, sector15_pts, sector15_pts,\n sector15_pts, sector15_pts, sector15_pts, None,\n None, None, sector16_pts, sector16_pts],\n\n [ # threat zone 17\n None, None, None, None,\n None, None, None, None,\n sector05_pts, sector05_pts, sector05_pts, sector05_pts,\n sector05_pts, sector05_pts, sector05_pts, sector05_pts]]\n\n# ----------------------------------------------------------------------------------\n# read_header(infile): takes an aps file and creates a dict of the data\n#\n# infile: an aps file\n#\n# returns: all of the fields in the header\n# ----------------------------------------------------------------------------------\ndef read_header(APS_FILE_NAME):\n # declare dictionary\n h = dict()\n\n with open(APS_FILE_NAME, 'r+b') as fid:\n h['filename'] = b''.join(np.fromfile(fid, dtype='S1', count=20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype='S1', count=20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype='S1', count=80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype='S1', count=80))\n h['energy_type'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['config_type'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['file_type'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['trans_type'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['scan_type'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['data_type'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype='S1', count=16))\n h['frequency'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['mat_velocity'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['num_pts'] = np.fromfile(fid, dtype=np.int32, count=1)\n h['num_polarization_channels'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['spare00'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['band_width'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['spare01'] = np.fromfile(fid, dtype=np.int16, count=5)\n h['polarization_type'] = np.fromfile(fid, dtype=np.int16, count=4)\n h['record_header_size'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['word_type'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['word_precision'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['min_data_value'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['max_data_value'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['avg_data_value'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['data_scale_factor'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['data_units'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['surf_removal'] = np.fromfile(fid, dtype=np.uint16, count=1)\n h['edge_weighting'] = np.fromfile(fid, dtype=np.uint16, count=1)\n h['x_units'] = np.fromfile(fid, dtype=np.uint16, count=1)\n h['y_units'] = np.fromfile(fid, dtype=np.uint16, count=1)\n h['z_units'] = np.fromfile(fid, dtype=np.uint16, count=1)\n h['t_units'] = np.fromfile(fid, dtype=np.uint16, count=1)\n h['spare02'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['x_return_speed'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['y_return_speed'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['z_return_speed'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['scan_orientation'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['scan_direction'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['data_storage_order'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['scanner_type'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['x_inc'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['y_inc'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['z_inc'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['t_inc'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['num_x_pts'] = np.fromfile(fid, dtype=np.int32, count=1)\n h['num_y_pts'] = np.fromfile(fid, dtype=np.int32, count=1)\n h['num_z_pts'] = np.fromfile(fid, dtype=np.int32, count=1)\n h['num_t_pts'] = np.fromfile(fid, dtype=np.int32, count=1)\n h['x_speed'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['y_speed'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['z_speed'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['x_acc'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['y_acc'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['z_acc'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['x_motor_res'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['y_motor_res'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['z_motor_res'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['x_encoder_res'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['y_encoder_res'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['z_encoder_res'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype='S1', count=8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype='S1', count=8))\n h['depth_recon'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['x_max_travel'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['y_max_travel'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['elevation_offset_angle'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['z_max_travel'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['adc_type'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['spare06'] = np.fromfile(fid, dtype=np.int16, count=1)\n h['scanner_radius'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['x_offset'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['y_offset'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['z_offset'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['t_delay'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['range_gate_start'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['range_gate_end'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['ahis_software_version'] = np.fromfile(fid, dtype=np.float32, count=1)\n h['spare_end'] = np.fromfile(fid, dtype=np.float32, count=10)\n\n return h\n\n\n# ----------------------------------------------------------------------------------\n# read_data(infile): reads and rescales any of the four image types\n#\n# infile: an .aps, .aps3d, .a3d, or ahi file\n#\n# returns: the stack of images\n#\n# note: word_type == 7 is an np.float32, word_type == 4 is np.uint16\n# ----------------------------------------------------------------------------------\ndef read_data(infile):\n # read in header and get dimensions\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n\n extension = os.path.splitext(infile)[1]\n\n with open(infile, 'rb') as fid:\n\n # skip the header\n fid.seek(512)\n\n # handle .aps and .a3aps files\n if extension == '.aps' or extension == '.a3daps':\n\n if (h['word_type'] == 7):\n data = np.fromfile(fid, dtype=np.float32, count=nx * ny * nt)\n\n elif (h['word_type'] == 4):\n data = np.fromfile(fid, dtype=np.uint16, count=nx * ny * nt)\n\n # scale and reshape the data\n data = data * h['data_scale_factor']\n data = data.reshape(nx, ny, nt, order='F').copy()\n\n # handle .a3d files\n elif extension == '.a3d':\n\n if (h['word_type'] == 7):\n data = np.fromfile(fid, dtype=np.float32, count=nx * ny * nt)\n\n elif (h['word_type'] == 4):\n data = np.fromfile(fid, dtype=np.uint16, count=nx * ny * nt)\n\n # scale and reshape the data\n data = data * h['data_scale_factor']\n data = data.reshape(nx, nt, ny, order='F').copy()\n\n # handle .ahi files\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype=np.float32, count=2 * nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0, :, :, :].copy()\n imag = data[1, :, :, :].copy()\n\n if extension != '.ahi':\n return data\n else:\n return real, imag\n\n\n# ----------------------------------------------------------------------------------\n# get_single_image(infile, nth_image): returns the nth image from the image stack\n#\n# infile: an aps file\n#\n# returns: an image\n# ----------------------------------------------------------------------------------\ndef get_single_image(infile, nth_image):\n # read in the aps file, it comes in as shape(512, 620, 16)\n img = read_data(infile)\n\n # transpose so that the slice is the first dimension shape(16, 620, 512)\n img = img.transpose()\n\n return np.flipud(img[nth_image])\n\n\n#----------------------------------------------------------------------------------\n# convert_to_grayscale(img): converts a ATI scan to grayscale\n#\n# infile: an aps file\n#\n# returns: an image\n#----------------------------------------------------------------------------------\ndef convert_to_grayscale(img):\n # scale pixel values to grayscale\n base_range = np.amax(img) - np.amin(img)\n rescaled_range = 255 - 0\n img_rescaled = (((img - np.amin(img)) * rescaled_range) / base_range)\n return np.uint8(img_rescaled)\n\ndef print_header(header):\n print('{:16}{}'.format('Key', 'Value'))\n for data_item in sorted(header):\n print ('{:15}:{}'.format(data_item, header[data_item]))\n print('')\n\ndef get_crop_dimensions(angle, zone):\n threat_zone = zone_slice_list[zone - 1]\n #print('threat_zone_1',threat_zone)\n\n sector_pts = threat_zone[angle - 1]\n #print('sector_pts', sector_pts)\n\n if sector_pts is not None:\n sector_pts_00 = sector_pts[:, 0]\n sector_pts_01 = sector_pts[:, 1]\n min_0 = min(sector_pts_00)\n max_0 = max(sector_pts_00)\n min_1 = min(sector_pts_01)\n max_1 = max(sector_pts_01)\n crop_zone = [min_1, max_1, min_0, max_0]\n return crop_zone\n else:\n return None\n\n# Use this function for batch processing, see test() for example\n# No Thresholding done yet\ndef get_cropped_zones(data_dir, filelist, file_extension, angle):\n zones = []\n #zones.append((None, None))\n\n for zone in range(1, ZONES + 1):\n #tuple (zoned images, zoned lables)\n zonedImage = ([], [])\n zones.append(zonedImage)\n\n # [Id, Zone, Prob]\n df = get_zone_dataframe()\n\n #filter on filelist\n df = df[df['Id'].isin(filelist)]\n i=1;\n for filename in filelist:\n file_df = df[df['Id'] == filename]\n single_image = get_single_image(data_dir + filename + '.' + file_extension, angle-1) #angle needs to subracted with to normalze with get_crop_dimenisons function\n single_image = convert_to_grayscale(single_image)\n for zone in range(1, ZONES + 1):\n #print('Runnning zone:' + str(zone) + ' angle:' + str(angle))\n\n crop_dim = get_crop_dimensions(angle, zone)\n if crop_dim is not None:\n cropped_zone = single_image[crop_dim[0]:crop_dim[1],crop_dim[2]:crop_dim[3]]\n zones[zone-1][0].append(cropped_zone)\n label = file_df[file_df['Zone'] == 'Zone' + str(zone)]['Prob']\n zones[zone-1][1].append([int(label.values[0])])\n else:\n a=1\n #print('Zone ' + str(zone) + ' not available for ' + filename + ' at angle ' + str(angle))\n print('Percentage Complete - ' + \"{0:.2f}\".format(i*100/len(filelist)) + \"%\\r\",end=\"\")\n #sys.stdout.flush()\n i=i+1\n print(\"\")\n return zones\n\n\ndef test():\n\n # setup\n angle = 8\n zone = 1\n COLORMAP = 'gray'\n filename = './' + APS_FILE_NAME + '.aps'\n\n try:\n for f in os.listdir('./test'):\n os.remove(os.path.join('./test/', f))\n os.rmdir('./test')\n except OSError:\n pass\n except Exception as e:\n print(e)\n\n try:\n os.mkdir('./test')\n except OSError:\n pass\n except Exception as e:\n print(e)\n\n # test\n print('Printing headers:')\n print_header(read_header(filename))\n\n # test\n print('Reading image data')\n read_data(filename)\n\n # test\n print('converting to grayscale')\n grayscale = convert_to_grayscale(get_single_image(filename, angle))\n plt.imshow(grayscale, cmap = plt.get_cmap(COLORMAP))\n plt.savefig('./test/full_image', dpi=100)\n\n # test\n print('Getting zones information')\n zones = get_cropped_zones('./', filelist = [APS_FILE_NAME], file_extension='aps', angle=angle)\n\n for zone in range(1, ZONES + 1):\n print ('Saving zone ' + str(zone) + ' image')\n images, labels = zones[zone]\n if len(images) > 0:\n plt.imshow(images[0], cmap = plt.get_cmap(COLORMAP))\n plt.savefig('./test/img_zone-' + str(zone) + '_label-' + str(labels[0]), dpi=100)\n else:\n print('No images present in this zone!')\n\n#--matrix--\n# np.set_printoptions(precision=3,)\n# print(*crop)\n# print(type(crop))\n# thresholded = threshold(crop, 50)\n# #np.savetxt('test1.txt', crop_zone_13, fmt='%d')\n# np.savetxt('test_with_object.txt', thresholded, fmt='%d', newline='\\n' )\n\n#if __name__ == '__main__':\n #print('Executing test')\n #test()\n","sub_path":"segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":19984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"403088569","text":"# -*- coding: utf-8 -*-\nimport pytest\nfrom mock import MagicMock\nfrom pyleecan.Classes.Arc1 import Arc1\nfrom pyleecan.Classes.Arc2 import Arc2\nfrom pyleecan.Classes.Segment import Segment\nfrom pyleecan.Classes.SurfLine import SurfLine\nfrom numpy import pi\n\n\nclass Test_SurfLine_meth(object):\n \"\"\"Unittest for SurfLine methods\"\"\"\n\n def test_comp_length(self):\n \"\"\"Check that you can compute the length of the Surface\"\"\"\n line1 = Arc1(begin=1, end=1j, radius=1)\n line1.comp_length = MagicMock(return_value=1)\n line2 = Arc2(begin=1, center=0, angle=pi / 2)\n line2.comp_length = MagicMock(return_value=1)\n line3 = Segment(begin=1j, end=0)\n line3.comp_length = MagicMock(return_value=1)\n\n surface = SurfLine(line_list=[line1, line2, line3], label=\"test\", point_ref=0)\n length = surface.comp_length()\n line1.comp_length.assert_called_once()\n line2.comp_length.assert_called_once()\n line3.comp_length.assert_called_once()\n assert round(abs(abs(length - 3) - 0), 7) == 0\n\n def test_rotate(self):\n \"\"\"Check that you can rotate the surface\"\"\"\n line1 = Arc1(begin=1, end=1j, radius=1)\n line2 = Arc2(begin=1, center=0, angle=pi / 2)\n line3 = Segment(begin=1j, end=0)\n surface = SurfLine(line_list=[line1, line2, line3], label=\"test\", point_ref=0)\n surface.rotate(pi / 2)\n assert round(abs(abs(line1.begin - 1j) - 0), 7) == 0\n assert round(abs(abs(line1.end + 1) - 0), 7) == 0\n assert round(abs(abs(line2.begin - 1j) - 0), 7) == 0\n assert round(abs(line2.center - 0), 7) == 0\n assert round(abs(abs(line3.begin + 1) - 0), 7) == 0\n assert round(abs(line3.end - 0), 7) == 0\n\n def test_translate(self):\n \"\"\"Check that you can rotate the surface\"\"\"\n line1 = Arc1(begin=1, end=1j, radius=1)\n line2 = Arc2(begin=1, center=0, angle=pi / 2)\n line3 = Segment(begin=1j, end=0)\n surface = SurfLine(line_list=[line1, line2, line3], label=\"test\", point_ref=0)\n surface.translate(1j)\n assert round(abs(abs(line1.begin - 1j) - 1), 7) == 0\n assert round(abs(line1.end - 2j), 7) == 0\n assert round(abs(abs(line2.begin - 1j) - 1), 7) == 0\n assert round(abs(abs(line2.center - 1j) - 0), 7) == 0\n assert round(abs(abs(line3.begin - 2j) - 0), 7) == 0\n assert round(abs(line3.end - 1j), 7) == 0\n","sub_path":"Tests/Methods/Geometry/test_SurfLine_meth.py","file_name":"test_SurfLine_meth.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"224075393","text":"# https://www.interviewbit.com/problems/allocate-books/\n\nclass Solution:\n # @param A : list of integers\n # @param B : integer\n # @return an integer\n def books(self, A, B):\n if len(A) < B:\n return -1\n def find_min_num_student(A, max_num_pages):\n min_num_student = 1\n cur = 0\n \n for i in range(len(A)):\n if A[i] > max_num_pages:\n return float('inf')\n if cur + A[i] > max_num_pages:\n min_num_student += 1\n cur = A[i]\n else:\n cur += A[i]\n return min_num_student\n \n left = 0\n right = sum(A)\n min_num_pages = -1\n while left <= right: \n mid = (left + right) // 2\n if find_min_num_student(A, mid) > B:\n left = mid + 1\n else:\n min_num_pages = mid\n right = mid - 1\n \n return min_num_pages\n \n\n","sub_path":"interviewbit/allocate_books.py","file_name":"allocate_books.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"524271206","text":"from src.Config import Config\nfrom src.NEAT.Gene import ConnectionGene, NodeType\n\n\ndef initialize_pop(Node, Genome, initial_individuals, create_triangles=True, modules=None):\n in_node_params = (0, NodeType.INPUT)\n out_node_params = (1, NodeType.OUTPUT)\n tri_node_params = (2, NodeType.HIDDEN)\n\n pop = []\n individuals_to_create = initial_individuals if not create_triangles else initial_individuals // 2\n\n for _ in range(individuals_to_create):\n reps = []\n pop.append(Genome([ConnectionGene(0, 0, 1)],\n [Node(*in_node_params), Node(*out_node_params)]))\n\n if modules is not None and Config.blueprint_nodes_use_representatives:\n for node in pop[-1]._nodes.values():\n reps.append(node.choose_representative(modules, reps))\n\n if create_triangles:\n reps = []\n pop.append(Genome([ConnectionGene(0, 0, 1), ConnectionGene(1, 0, 2), ConnectionGene(2, 2, 1)],\n [Node(*in_node_params), Node(*tri_node_params), Node(*out_node_params)]))\n\n if modules is not None and Config.blueprint_nodes_use_representatives:\n for node in pop[-1]._nodes.values():\n reps.append(node.choose_representative(modules, reps))\n\n for indv in pop:\n indv.calculate_heights()\n\n return pop\n\n\ndef initialize_mutations(create_triangles=True):\n if not create_triangles:\n return {(0, 1): 0} # linear connection\n\n return {(0, 1): 0, # linear connection\n 0: 2, # node mutation on linear connection\n (0, 2): 1, # connection mutation for above node mutation\n (2, 1): 2} # connection mutation for above node mutation\n","sub_path":"src/CoDeepNEAT/PopulationInitialiser.py","file_name":"PopulationInitialiser.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"75063856","text":"\ndef busca(lista, elemento):\n for i in range(len(lista)):\n if lista[i] == elemento:\n letra = i \n return letra\n return False\n\n \n'''list = ['a', 'e', 'l']\nprint(busca(list,'e'))\nlista = [12,13,14]\nprint(busca(lista, 15))'''\n","sub_path":"Coursera 2/envio 4º exercicio/Busca.py","file_name":"Busca.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"473354279","text":"import math\nfrom random import randrange\n\nimport hw4\nfrom tests.test_framework import *\n\n\ndef main():\n builder = TestBuilder(\"hw 4\", 'hw4.py', linter_points=5, default_test_points=4)\n builder.add_to_blacklist({\n 'if (?!__name__).*:': 'if statements not allowed for this assignment. please remove it to continue',\n 'while.*:': 'while loops not allowed for this assignment. please remove it to continue',\n '\\[*\\]': 'lists are not allowed for this assignment. please remove it to continue',\n 'list': 'lists are not allowed for this assignment. please remove it to continue'\n })\n pi_tests_section = build_IO_section('pi2', [('1')], [[\"4.0\", \"0.8584073464102069\"]], build_pi_tests(9), hw4.pi2,\n test_all_output=True)\n\n def comp_func(actual, expected):\n return abs(float(actual[0]) - float(expected[0])) < 0.0000000000001 and abs(\n float(actual[1]) - float(expected[1])) < 0.0000000000001\n\n test: Test\n for test in pi_tests_section.outline:\n test.comp_func = comp_func\n builder.add_items(pi_tests_section)\n builder.run()\n\n\ndef build_pi_tests(n):\n res = []\n for i in range(n):\n positive = 1\n negative = -3\n sum = 0\n terms = randrange(1, 1000)\n for i in range(terms):\n if i % 2 == 0:\n sum += 4 / positive\n positive += 4\n else:\n sum += 4 / negative\n negative -= 4\n res.append({'test': [str(terms)], 'expected': [str(sum), str(abs(sum - math.pi))]})\n return res\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/hw4/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"454310895","text":"# -*- coding: utf-8 -*-\nfrom shutil import copytree, make_archive\nimport os\nimport grp\nimport logging\nimport itertools\nimport numpy as np\nimport time\n\n# progress bar\nfrom tqdm import tqdm\n\nclass GroupMembersFiles:\n \"\"\"Locates all files owned by the members of a group and moved them to an archive folder .\n \"\"\"\n def __init__(self):\n grp_names = input(\"enter group name/(s): \")\n self.grp_names = grp_names.split(\" \")\n logging.info(\"input group name {}\".format(str(self.grp_names)))\n print(\"generating group info\")\n\n\n def get_grp_infos(self):\n\n for grp_name in self.grp_names:\n\n try:\n self.group_info_ = grp.getgrnam(grp_name)\n print(\"{}\".format(str(self.group_info_)))\n logging.info(\"{}\".format(str(self.group_info_)))\n\n except KeyError:\n print(\"input group name {} does not exist!\".format(grp_name))\n logging.warning(\n \"input group name {} does not exist!\".format(grp_name))\n exit(1)\n logging.info(\n \"exit status 1 {} does not exist\".format(grp_name))\n\n\n def get_unique_users(self):\n \"\"\"\"Creates a unique list of all users, incase a user belongs to multiple groups.\n \"\"\"\n\n self.grp_mem_ = list()\n for grp_name in self.grp_names:\n self.grp_name = grp_name\n self.grp_mem_.append(grp.getgrnam(self.grp_name).gr_mem)\n\n self.grp_members = np.unique(\n list(itertools.chain.from_iterable(self.grp_mem_))) # gets unique list of group members to avoid duplicate backup\n logging.info(\"generating unique users list from group name/(s)\")\n\n return self.grp_members\n\n\n def get_confirmation(self):\n \"\"\"\"Gets backup confirmation from admin.\n \"\"\"\n self.get_con = input(\n \"Do you want to continue to back up files for all group users above? yes/no\\n\")\n\n if self.get_con == \"yes\":\n self.con_exit = 0\n logging.info(\"back up permission granted {}\".format(\n str(self.con_exit)))\n\n elif self.get_con == \"no\":\n self.con_exit = 1\n logging.info(\"back up permission not granted {}\".format(\n str(self.con_exit)))\n exit(1)\n\n else:\n logging.error(\"enter yes or no to confirm back up permission\")\n print(\"answer with yes or no\")\n self.get_confirmation()\n\n\n def check_backup_dir(self, os_dir):\n \"\"\"Checks if target backup directory exists,\n otherwise it creates target backup directory automatically\n \"\"\"\n self.os_dir = os_dir\n if not os.path.exists(self.os_dir):\n return 1\n else:\n return self.os_dir\n\n\n def create_new_target_dir(self, os_dir):\n self.new_dir = os.mkdir(os_dir)\n return self.new_dir\n\n\n def backup_group_user_files(self):\n \"\"\"\n Copys members all files to an archive directory\n \"\"\"\n grp_names_ = self.grp_names # get group names from as input from terminal\n\n # get group informations\n group_info_ = self.get_grp_infos()\n grp_users = self.get_unique_users()\n logging.info(\n \"names of users to be backuped: {}\".format(str(grp_users)))\n print(grp_users)\n\n self.get_confirmation() # assert back up permission\n\n # get back up directory as input from terminal\n backup_target = input(\"enter target backup directory: \")\n logging.info(\"get target directory name from terminal\")\n\n # check if directory exists, otherwise create target backup directory automatically\n target_dir = self.check_backup_dir(backup_target)\n if target_dir == 1:\n print(backup_target, \"does not exist.\")\n self.get_con_ = input(\n \"do you want this program to create {}? yes/no\\n\".format(backup_target))\n\n if self.get_con_ == \"yes\":\n print(\"creating new backup directory {}\".format(backup_target))\n target_dir = self.create_new_target_dir(backup_target)\n else:\n exit(1)\n\n # copy members files to an archive directory\n for user_name in tqdm(grp_users, desc='creating archive'):\n path_dir = \"/home/\"\n logging.info(\"starting backup of {} files\".format(user_name))\n # create directory for individual user files\n # res_dir = copytree(path_dir+user_name, str(backup_target)+\"/\"+user_name)\n\n # create gzip’ed tar-file archive\n archive_name = os.path.expanduser(os.path.join(str(backup_target), user_name))\n root_dir = os.path.expanduser(os.path.join(path_dir, user_name))\n res_tar = make_archive(archive_name, 'gztar', root_dir)\n\n logging.info(\n \"created gzip’ed tar-file backup of {}\".format(user_name))\n logging.info(\n \"backup of {} files successful\".format(user_name))\n logging.info(\"exit status = 0\")\n \n time.sleep(3)\n\n\ndef guf():\n #create a log file\n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',\n filename=\"backup.log\", level=logging.DEBUG)\n group_names_input = GroupMembersFiles()\n group_names_input.backup_group_user_files()\n print(\"backup process completed\")\n\n\nif __name__ == \"__main__\":\n guf()\n","sub_path":"gufs.py","file_name":"gufs.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"63368463","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport pickle\nimport sqlite3\nimport argparse\nimport os\nimport time\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nimport json\nimport math\n\n\ndef generate_estimator(X_train, X_test, y_train, y_test):\n if args.search_for_new_model_parameters:\n # do a randomised search to find the best classifier\n print('setting up randomised search')\n parameters = {\n \"loss\":[\"deviance\"],\n \"learning_rate\": [0.01, 0.05, 0.1],\n \"max_depth\":[3, 5, 8, 20, 100],\n \"max_features\":[\"log2\",\"sqrt\"],\n \"criterion\": [\"friedman_mse\"],\n \"subsample\":[0.6, 0.8, 1.0],\n \"n_estimators\":[50, 100, 500]\n }\n # cross-validation splitting strategy uses 'cv' folds in a (Stratified)KFold\n rsearch = RandomizedSearchCV(GradientBoostingClassifier(), parameters, n_iter=20, n_jobs=-1, random_state=10, cv=2, scoring='accuracy', verbose=1)\n print('fitting to the training set')\n # find the best fit within the parameter search space\n rsearch.fit(X_train, y_train)\n best_estimator = rsearch.best_estimator_\n print('best score from the search: {}'.format(round(rsearch.best_score_, 4)))\n best_params = rsearch.best_params_\n print(best_params)\n else:\n print('fitting the estimator to the training data')\n # use the model parameters we found previously\n best_params = {'subsample': 0.6, 'n_estimators': 280, 'min_samples_split': 400, 'min_samples_leaf': 10, 'max_features': 'log2', 'max_depth': 11, 'loss': 'lad', 'learning_rate': 0.05}\n best_estimator = GradientBoostingClassifier(**best_params)\n best_estimator.fit(X_train, y_train) # find the best fit within the parameter search space\n\n # calculate the estimator's score on the train and test sets\n print('evaluating against the training and test set')\n train_score = best_estimator.score(X_train, y_train)\n test_score = best_estimator.score(X_test, y_test)\n print(\"R-squared for training set: {}, test set: {}\".format(round(train_score,2), round(test_score,2)))\n return best_estimator\n\n\n####################################################################\n\n# nohup python ./open-path/pda/build-target-decoy-classifier.py -en dwm-test > build-target-decoy-classifier.log 2>&1 &\n\nparser = argparse.ArgumentParser(description='With metrics from each of the library sequences, from each run, build a feature targets and decoys training set.')\nparser.add_argument('-eb','--experiment_base_dir', type=str, default='./experiments', help='Path to the experiments directory.', required=False)\nparser.add_argument('-en','--experiment_name', type=str, help='Name of the experiment.', required=True)\nparser.add_argument('-min','--minimum_number_files', type=int, default=10, help='For inclusion in the training set, the minimum number of files in which the sequence was identified.', required=False)\nparser.add_argument('-tsm','--training_set_multiplier', type=int, default=10, help='Make the target training set this many times bigger than the decoy set.', required=False)\nparser.add_argument('-snmp','--search_for_new_model_parameters', action='store_true', help='Search for new model parameters.')\nargs = parser.parse_args()\n\n# Print the arguments for the log\ninfo = []\nfor arg in vars(args):\n info.append((arg, getattr(args, arg)))\nprint(info)\n\nstart_run = time.time()\n\n# check the experiment directory exists\nEXPERIMENT_DIR = \"{}/{}\".format(args.experiment_base_dir, args.experiment_name)\nif not os.path.exists(EXPERIMENT_DIR):\n print(\"The experiment directory is required but doesn't exist: {}\".format(EXPERIMENT_DIR))\n sys.exit(1)\n\n# check the experiment metrics file exists\nTARGET_DECOY_MODEL_DIR = \"{}/target-decoy-models\".format(EXPERIMENT_DIR)\nMETRICS_DB_NAME = \"{}/experiment-metrics-for-library-sequences.sqlite\".format(TARGET_DECOY_MODEL_DIR)\nif not os.path.isfile(METRICS_DB_NAME):\n print(\"The experiment sequence metrics file doesn't exist: {}\".format(METRICS_DB_NAME))\n sys.exit(1)\n\n# create the index if it's not already there\ndb_conn = sqlite3.connect(METRICS_DB_NAME)\nprint(\"creating index in {}\".format(METRICS_DB_NAME))\nsrc_c = db_conn.cursor()\nsrc_c.execute(\"create index if not exists idx_extracted_metrics_1 on extracted_metrics (number_of_runs_identified)\")\n# load the sequences\nprint(\"loading metrics from {}\".format(METRICS_DB_NAME))\nmetrics_df = pd.read_sql_query('select target_metrics,decoy_metrics from extracted_metrics where number_of_runs_identified >= {}'.format(args.minimum_number_files), db_conn)\ndb_conn.close()\nprint(\"loaded {} metrics for library sequences that satisfy the criteria for inclusion in the training set from {}\".format(len(metrics_df), METRICS_DB_NAME))\n\n# now we can build the training set\nprint(\"building the training set\")\n\nif len(metrics_df) > 0:\n # unpack the metrics from each sequence\n metrics = []\n metrics_names = None\n for row in metrics_df.itertuples():\n # target metrics\n target_metrics = json.loads(row.target_metrics)\n if isinstance(target_metrics, dict):\n l = [target_metrics[key] for key in sorted(target_metrics)]\n l.append('target')\n metrics.append(tuple(l))\n if metrics_names == None:\n metrics_names = [key for key in sorted(target_metrics)]\n\n # decoy metrics\n decoy_metrics = json.loads(row.decoy_metrics)\n if isinstance(decoy_metrics, dict):\n l = [decoy_metrics[key] for key in sorted(decoy_metrics)]\n l.append('decoy')\n metrics.append(tuple(l))\n\n columns = metrics_names.copy()\n columns.append('class_name')\n\n metrics_df = pd.DataFrame(metrics, columns=columns)\n\n # tidy up any attributes that will upset the model training\n metrics_df.fillna(value=0.0, inplace=True)\n metrics_df.replace(to_replace=-math.inf, value=0, inplace=True)\n\n # down-sample the target class to balance the classes\n target_class_df = metrics_df[(metrics_df.class_name == 'target')]\n decoy_class_df = metrics_df[(metrics_df.class_name == 'decoy')]\n print('prior to down-sampling, targets {}, decoys {}'.format(len(target_class_df), len(decoy_class_df)))\n number_of_targets_for_training_set = args.training_set_multiplier * len(decoy_class_df)\n if len(target_class_df) > number_of_targets_for_training_set:\n target_class_df = target_class_df.sample(n=number_of_targets_for_training_set) # even them up somewhat, sort-of\n metrics_df = pd.concat([target_class_df, decoy_class_df], ignore_index=True)\n\n # set up the train and test sets\n X = metrics_df[metrics_names].values\n y = metrics_df[['class_name']].values[:,0]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\n print('training set targets: {}, decoys: {}'.format(np.count_nonzero(y_train == 'target'), np.count_nonzero(y_train == 'decoy')))\n print('test set targets: {}, decoys: {}'.format(np.count_nonzero(y_test == 'target'), np.count_nonzero(y_test == 'decoy')))\n\n # save the training set alongside the model\n np.save('{}/X_train.npy'.format(TARGET_DECOY_MODEL_DIR), X_train)\n np.save('{}/y_train.npy'.format(TARGET_DECOY_MODEL_DIR), y_train)\n np.save('{}/X_test.npy'.format(TARGET_DECOY_MODEL_DIR), X_test)\n np.save('{}/y_test.npy'.format(TARGET_DECOY_MODEL_DIR), y_test)\n np.save('{}/feature_names.npy'.format(TARGET_DECOY_MODEL_DIR), np.array(metrics_names))\n\n best_estimator = generate_estimator(X_train, X_test, y_train, y_test)\n\n # save the classifier\n CLASSIFIER_FILE_NAME = \"{}/target-decoy-classifier.pkl\".format(TARGET_DECOY_MODEL_DIR)\n print(\"saving the classifier to {}\".format(CLASSIFIER_FILE_NAME))\n with open(CLASSIFIER_FILE_NAME, 'wb') as file:\n pickle.dump(best_estimator, file)\n\n print(\"make predictions on the test set\")\n predictions = best_estimator.predict(X_test)\n class_probabilities = best_estimator.predict_proba(X_test)\n np.save('{}/class_probabilities.npy'.format(TARGET_DECOY_MODEL_DIR), class_probabilities, allow_pickle=False)\n\n # display some interesting model attributes\n cm = confusion_matrix(y_test, predictions, labels=[\"target\", \"decoy\"])\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Confusion Matrix:\")\n print(cm)\n print('false discovery rate (i.e. decoy was identified as a target): {}'.format(cm[1,0]))\n print()\n print(\"Classification Report\")\n print(classification_report(y_test, predictions))\nelse:\n print(\"there are no sequences that meet the criteria for use in the training set.\")\n\nstop_run = time.time()\nprint(\"total running time ({}): {} seconds\".format(parser.prog, round(stop_run-start_run,1)))\n","sub_path":"pipeline/build-target-decoy-classifier.py","file_name":"build-target-decoy-classifier.py","file_ext":"py","file_size_in_byte":8989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"150412254","text":"\"\"\" from https://github.com/keithito/tacotron \"\"\"\nimport re\nfrom text import cleaners\nfrom text.symbols import symbols, _letters, _punctuation as punctuation_symbols\n\ntry:\n from text.acronyms import normalize_acronyms\nexcept:\n pass\n\n# Mappings from symbol to numeric ID and vice versa:\n_symbol_to_id = {s: i for i, s in enumerate(symbols)}\n_id_to_symbol = {i: s for i, s in enumerate(symbols)}\n\n# Regular expression matching text enclosed in curly braces:\n_curly_re = re.compile(r'(.*?)\\{(.+?)\\}(.*)')\n\n# for arpabet with apostrophe\n_apostrophe = re.compile(r\"(?=\\S*['])([a-zA-Z'-]+)\")\n\n\ndef text_to_sequence(text, cleaner_names):\n \"\"\"Converts a string of text to a sequence of IDs corresponding to the symbols in the text.\n\n The text can optionally have ARPAbet sequences enclosed in curly braces embedded\n in it. For example, \"Turn left on {HH AW1 S S T AH0 N} Street.\"\n\n Args:\n text: string to convert to a sequence\n cleaner_names: names of the cleaner functions to run the text through\n\n Returns:\n List of integers corresponding to the symbols in the text\n \"\"\"\n sequence = []\n\n # Check for curly braces and treat their contents as ARPAbet:\n while len(text):\n m = _curly_re.match(text)\n if not m:\n sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))\n break\n sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))\n sequence += _arpabet_to_sequence(m.group(2))\n text = m.group(3)\n\n return [_symbol_to_id['']] + sequence + [_symbol_to_id['']]\n\n\ndef sequence_to_text(sequence):\n \"\"\"Converts a sequence of IDs back to a string\"\"\"\n result = \"\"\n for symbol_id in sequence:\n if symbol_id in _id_to_symbol:\n s = _id_to_symbol[symbol_id]\n # Enclose ARPAbet back in curly braces:\n if len(s) > 1 and s[0] == \"@\":\n s = \"{%s}\" % s[1:]\n result += s\n return result.replace(\"}{\", \" \")\n\n\ndef grapheme_to_phoneme(text, g2p, lexicon=None):\n \"\"\"Converts grapheme to phoneme\"\"\"\n phones = []\n words = filter(None, re.split(r\"(['(),:;.\\-\\?\\!\\s+])\", text))\n for w in words:\n if lexicon is not None and w.lower() in lexicon:\n phones += lexicon[w.lower()]\n else:\n phones += list(filter(lambda p: p != \" \", g2p(w)))\n return phones\n\n\ndef get_phones(g2p, cmudict, clean_text):\n word_pieces = re.findall(r'\\S*\\{.*?\\}\\S*|\\S+', clean_text)\n word_pieces = [normalize_acronyms(w) if not w.startswith(\"{\") else w for w in word_pieces]\n words = []\n for word_piece in word_pieces:\n if word_piece.startswith(\"{\"):\n start_symbols, word, arpabet_suffix, end_symbols = word_piece, \"thisisabbreviation\", \"\", \"\"\n else:\n start_symbols, word, arpabet_suffix, end_symbols = _clean_word_before_phones(word_piece)\n if not cmudict.lookup(word):\n start_symbols, word, arpabet_suffix, end_symbols = word_piece, \"thisisoov\", \"\", \"\"\n words.append([start_symbols, word, arpabet_suffix, end_symbols])\n phones = \" \".join(g2p(\" \".join([w for _, w, *_ in words]))).split(\" \")\n result_text = \"\"\n for i, ((start_symbols, word, arpabet_suffix, end_symbols), phone) in enumerate(zip(words, phones)):\n if word == \"thisisabbreviation\":\n word_rec = start_symbols\n elif word == \"thisisoov\":\n word_rec = start_symbols\n else:\n word_rec = start_symbols + '{%s}' % (phone.strip() + arpabet_suffix) + end_symbols\n\n if i < len(words) - 1:\n result_text += (word_rec + \" \")\n else:\n result_text += word_rec\n print(\"Raw Text Sequence: {}\".format(clean_text))\n print(\"Phoneme Sequence: {}\".format(result_text))\n return result_text\n\n\ndef get_arpabet(word, cmudict, index=0):\n start_symbols, word, arpabet_suffix, end_symbols = _clean_word_before_phones(word)\n arpabet = None if (word.lower() in HETERONYMS or all(c.isupper() for c in word)) else cmudict.lookup(word)\n\n if arpabet is not None:\n return start_symbols + '{%s}' % (arpabet[index] + arpabet_suffix) + end_symbols\n else:\n return start_symbols + word + end_symbols\n\n\ndef _clean_text(text, cleaner_names):\n for name in cleaner_names:\n cleaner = getattr(cleaners, name)\n if not cleaner:\n raise Exception(\"Unknown cleaner: %s\" % name)\n text = cleaner(text)\n return text\n\n\ndef _symbols_to_sequence(symbols):\n i = 0\n id_sequence = []\n while i < len(symbols):\n if symbols[i:i + 7] == \"\":\n id_sequence.append(_symbol_to_id[\"\"])\n i += 7\n elif symbols[i:i + 6] == \"\":\n id_sequence.append(_symbol_to_id[\"\"])\n i += 6\n elif symbols[i:i + 6] == \"\":\n id_sequence.append(_symbol_to_id[\"\"])\n i += 6\n else:\n if _should_keep_symbol(symbols[i]):\n id_sequence.append(_symbol_to_id[symbols[i]])\n i += 1\n return id_sequence\n\n\ndef _arpabet_to_sequence(text):\n return _symbols_to_sequence([\"@\" + s for s in text.split()])\n\n\ndef _should_keep_symbol(s):\n return s in _symbol_to_id and s is not '_' and s is not '~'\n\n\ndef _clean_word_before_phones(word):\n re_start_punc = r\"\\A\\W+\"\n re_end_punc = r\"\\W+\\Z\"\n\n start_symbols = re.findall(re_start_punc, word)\n if len(start_symbols):\n start_symbols = start_symbols[0]\n word = word[len(start_symbols):]\n else:\n start_symbols = ''\n\n end_symbols = re.findall(re_end_punc, word)\n if len(end_symbols):\n end_symbols = end_symbols[0]\n word = word[:-len(end_symbols)]\n if end_symbols[-1] == \">\" and word.endswith(\"\"\n elif end_symbols[-1] == \">\" and word.endswith(\"\"\n elif end_symbols[-1] == \">\" and word.endswith(\"\"\n # TODO: check it one more time\n # if word[-1] not in _letters:\n # end_symbols = word[-1] + end_symbols\n # word = word[:-1]\n else:\n end_symbols = ''\n\n arpabet_suffix = ''\n if _apostrophe.match(word) is not None and word.lower() != \"it's\" and word.lower()[-1] == 's':\n word = word[:-2]\n arpabet_suffix = ' Z'\n return start_symbols, word, arpabet_suffix, end_symbols\n\n\ndef files_to_list(filename):\n \"\"\"\n Takes a text file of filenames and makes a list of filenames\n \"\"\"\n with open(filename, encoding='utf-8') as f:\n files = f.readlines()\n\n files = [f.rstrip() for f in files]\n return files\n\n\ntry:\n HETERONYMS = set(files_to_list('data/heteronyms'))\nexcept:\n pass\n","sub_path":"text/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"526981","text":"import contextlib\n@contextlib.contextmanager\ndef looking_glass():\n import sys\n origin_write = sys.stdout.write\n\n def reverse_write(text):\n origin_write(text[::-1])\n sys.stdout.write = reverse_write\n\n msg = ''\n try:\n yield 'JABBERWOCKY'\n except ZeroDivisionError:\n msg = 'Please DO NOT divide by zero!'\n finally:\n sys.stdout.write = origin_write\n if msg:\n print(msg)\n\n\nwith looking_glass() as what:\n print('Alice, Kitty and Snowdrop')\n print(what)\n print(1/0)\n\nprint(what)\nprint('Back to normal')\n","sub_path":"src/context-manager/02/mirror_gen_exc.py","file_name":"mirror_gen_exc.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"602414639","text":"# -*- coding: utf-8 -*-\nfrom fluxi import Fluxi\n#%%\n\nfl=Fluxi(\"Demo\")\n#%%\nfl.P(\"A Float\").v=5.3\n#%%\nfl.P(\"An Integer\").v=5\n#%%\n\n#def doit(*args):\n# nonexistent(\"there will be an error here\")\n#def action(*args):\n# doit() \n# \n# \n#fl.A(\"DoSomething\").a=action\n\n\n#%%\n#win=fl.openPopup(\"Fehlerin %s\" % \"Button\",\"überall\",fl.A(\"Button!\"))\n\n\n\n","sub_path":"devtests/demo-general.py","file_name":"demo-general.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"186857843","text":"from matplotlib.pyplot import *\nfrom numpy import *\nfrom math import pi\n\n#tehdään ensin vaakasuora ellipsi negatiiviselle x-akselille\n\nu = -2; #x:n keskikohta\nv = 0; #y:n keskikohta\na = 2; #x:n säde\nb = 1; #y:n säde\n\nt = linspace(0, 2*pi, 200);\nx1 = u+a*cos(t);\ny1 = v+b*sin(t);\n\n#käännetään sitten ellipsiä 45°\nxrot = lambda x,y: cos(pi/4)*x - sin(pi/4)*y;\nyrot = lambda x,y: sin(pi/4)*x + cos(pi/4)*y;\n\nx = xrot(x1,y1);\ny = yrot(x1,y1);\n\nplot(x,y, color='k')\nplot([-3, 1], [0, 0], color='k');\nplot([0, 0], [-3,1], color='k');\ntext(-0.08, 1.1, '$\\sigma_1$', size=14);\ntext(1.1, -0.05, '$\\sigma_2$', size=14);\naxis('equal');\naxis('off');\ntight_layout();\nshow();\n#savefig('jännitysellipsi.png');\n","sub_path":"jännitysellipsi.py","file_name":"jännitysellipsi.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"326776160","text":"import random\n\nfrom flax.geometry import Point, Rectangle, Size\nfrom flax.map import Map\nfrom flax.entity import Entity, CaveWall, Wall, Floor, Tree, Grass, CutGrass, Dirt, Player, Salamango, Armor, StairsDown, StairsUp\n\n\nclass MapCanvas:\n def __init__(self, size):\n self.rect = size.to_rect(Point.origin())\n\n self.arch_grid = {point: CaveWall for point in self.rect.iter_points()}\n self.item_grid = {point: [] for point in self.rect.iter_points()}\n self.creature_grid = {point: None for point in self.rect.iter_points()}\n\n def draw_room(self, rect):\n # TODO i think this should probably return a Room or something, and\n # hold off on drawing the walls yet, so we can erode them a bit and\n # then add line-drawing walls later\n assert rect in self.rect\n\n for point in rect.iter_points():\n self.arch_grid[point] = random.choice([Floor, CutGrass, CutGrass, Grass])\n\n # Top and bottom\n for x in rect.range_width():\n self.arch_grid[Point(x, rect.top)] = Wall\n self.arch_grid[Point(x, rect.bottom)] = Wall\n\n # Left and right (will hit corners again, whatever)\n for y in rect.range_height():\n self.arch_grid[Point(rect.left, y)] = Wall\n self.arch_grid[Point(rect.right, y)] = Wall\n\n def find_floor_points(self):\n for point, arch in self.arch_grid.items():\n # TODO surely other things are walkable\n # TODO maybe this should be a more general method\n # TODO also should exclude a point with existing creature\n if arch is Floor:\n yield point\n\n def maybe_create(self, type_or_thing):\n if isinstance(type_or_thing, Entity):\n return type_or_thing\n else:\n return type_or_thing()\n\n def to_map(self):\n map = Map(self.rect.size)\n maybe_create = self.maybe_create\n\n for point in self.rect.iter_points():\n map.place(maybe_create(self.arch_grid[point]), point)\n for item_type in self.item_grid[point]:\n map.place(maybe_create(item_type), point)\n if self.creature_grid[point]:\n map.place(maybe_create(self.creature_grid[point]), point)\n\n return map\n\n\ndef random_rect_in_rect(area, size):\n \"\"\"Return a rectangle created by randomly placing the given size within the\n given area.\n \"\"\"\n top = random.randint(area.top, area.bottom - size.height + 1)\n left = random.randint(area.left, area.right - size.width + 1)\n\n return Rectangle(Point(left, top), size)\n\n\nclass Fractor:\n \"\"\"The agent noun form of 'fractal'. An object that generates maps in a\n particular style.\n\n This is a base class, containing some generally-useful functionality; the\n interesting differentiation happens in subclasses.\n \"\"\"\n def __init__(self, map_size, region=None):\n self.map_canvas = MapCanvas(map_size)\n if region is None:\n self.region = self.map_canvas.rect\n else:\n self.region = region\n\n def generate_map(self, start=False, up=None, down=None):\n \"\"\"The method you probably want to call. Does some stuff, then spits\n out a map.\n \"\"\"\n self.generate()\n\n if start:\n self.place_player()\n\n if up:\n self.place_portal(StairsUp, up)\n if down:\n self.place_portal(StairsDown, down)\n\n return self.map_canvas.to_map()\n\n def generate(self):\n \"\"\"Implement in subclasses. Ought to do something to the canvas.\"\"\"\n raise NotImplementedError\n\n # Utility methods follow\n\n def generate_room(self, region, room_size=Size(8, 8)):\n room_rect = random_rect_in_rect(region, room_size)\n self.map_canvas.draw_room(room_rect)\n\n def place_player(self):\n floor_points = list(self.map_canvas.find_floor_points())\n assert floor_points, \"can't place player with no open spaces\"\n points = random.sample(floor_points, 3)\n self.map_canvas.creature_grid[points[0]] = Player\n self.map_canvas.creature_grid[points[1]] = Salamango\n self.map_canvas.item_grid[points[2]].append(Armor)\n\n def place_portal(self, portal_type, destination):\n from flax.component import IPortal\n\n # TODO should be able to maybe pass in attribute definitions directly?\n portal = portal_type()\n portal.component_data[IPortal['destination']] = destination\n\n # TODO would rather the map canvas just keep track of this directly\n floor_points = list(self.map_canvas.find_floor_points())\n assert floor_points, \"can't place portal with no open spaces\"\n point = random.choice(floor_points)\n self.map_canvas.arch_grid[point] = portal\n\n\nclass BinaryPartitionFractor(Fractor):\n def __init__(self, *args, minimum_size):\n super().__init__(*args)\n self.minimum_size = minimum_size\n\n # TODO i feel like this class doesn't quite... do... anything. all it\n # will ever do is spit out a list of other regions, and it has to construct\n # a bunch of other copies of itself to do that...\n\n def generate(self):\n regions = self.maximally_partition()\n for region in regions:\n self.generate_room(region)\n\n def maximally_partition(self):\n # TODO this should preserve the tree somehow, so a hallway can be drawn\n # along the edges\n regions = [self.region]\n final_regions = []\n\n while regions:\n nonfinal_regions = []\n for region in regions:\n new_regions = self.partition(region)\n if len(new_regions) > 1:\n nonfinal_regions.extend(new_regions)\n else:\n final_regions.extend(new_regions)\n\n regions = nonfinal_regions\n\n return final_regions\n\n def partition(self, region):\n possible_directions = []\n\n # TODO this needs a chance to stop before hitting the minimum size --\n # some sort of ramp-down where larger sizes are much less likely. a\n # bit awkward though since just not dividing means we end up with a\n # room 2x the minimum size. maybe the partitioning needs tweaking too,\n # like normal curve or something.\n\n if region.height >= self.minimum_size.height * 2:\n possible_directions.append(self.partition_horizontal)\n if region.width >= self.minimum_size.width * 2:\n possible_directions.append(self.partition_vertical)\n\n if possible_directions:\n method = random.choice(possible_directions)\n return method(region)\n else:\n return [region]\n\n def partition_horizontal(self, region):\n # We're looking for the far edge of the top partition, so subtract 1\n # to allow it on the border of the minimum size\n top = region.top + self.minimum_size.height - 1\n bottom = region.bottom - self.minimum_size.height\n\n if top > bottom:\n return [region]\n\n midpoint = random.randrange(top, bottom + 1)\n\n return [\n region.replace(bottom=midpoint),\n region.replace(top=midpoint + 1),\n ]\n\n def partition_vertical(self, region):\n # We're looking for the far edge of the left partition, so subtract 1\n # to allow it on the border of the minimum size\n left = region.left + self.minimum_size.width - 1\n right = region.right - self.minimum_size.width\n\n if left > right:\n return [region]\n\n midpoint = random.randrange(left, right + 1)\n\n return [\n region.replace(right=midpoint),\n region.replace(left=midpoint + 1),\n ]\n\n\nclass PerlinFractor(Fractor):\n def generate(self):\n from flax.noise import discrete_perlin_noise_factory\n noise = discrete_perlin_noise_factory(*self.region.size, resolution=4, octaves=2)\n for point in self.region.iter_points():\n n = noise(*point)\n if n < 0.2:\n arch = Floor\n elif n < 0.4:\n arch = Dirt\n elif n < 0.6:\n arch = CutGrass\n elif n < 0.8:\n arch = Grass\n else:\n arch = Tree\n self.map_canvas.arch_grid[point] = arch\n","sub_path":"flax/fractor.py","file_name":"fractor.py","file_ext":"py","file_size_in_byte":8361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"580858469","text":"import urllib.request\nimport json\nimport csv\n\nresponse=urllib.request.urlopen('https://api.github.com/search/repositories?q=is:public+language:python&forks:%3E=200')\ndata = response.read()\nif data:\n data = json.loads(data)\n items=data[\"items\"]\n print(len(items))\n \n headers=[\"name\" , \"description\" , \"html_url\", \"watchers_count\", \"stargazers_count\", \"forks_count\",\"language\",\"forks\",\"stargazers_count\"]\n require_data=[]\n for i in items:\n if i[\"stargazers_count\"]>2000:\n dic_data={\n headers[0]:i[headers[0]],\n headers[1]:i[headers[1]],\n headers[2]:i[headers[2]],\n headers[3]:i[headers[3]],\n headers[4]:i[headers[4]],\n headers[5]:i[headers[5]],\n headers[6]:i[headers[6]],\n headers[7]:i[headers[7]],\n headers[8]:i[headers[8]],\n }\n require_data.append(dic_data)\n\n with open(\"clean_list.csv\",\"w\",newline='') as f:\n writer = csv.DictWriter(f, fieldnames=headers)\n writer.writeheader()\n writer.writerows(require_data)\nelse:\n print(\"Not found\")","sub_path":"assignment_1.py","file_name":"assignment_1.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"54174306","text":"from lxml import etree as ET\nfrom copy import deepcopy\nimport os, sys\n\n'''\nThe code is automated for all districts, but it is preferred to execute this code for one district at a time.\nThe large XML files take up the RAM memory. Execute the script one district at a time.\n'''\ndistricts = ['Bangalore','Chennai','Delhi','Gurgaon','Hyderabad','Kolkata','Mumbai']\n#districts = ['Delhi']\n\ntarget_directory = \"Processed_OSM_data\"\nos.makedirs( target_directory, exist_ok=True )\n\nfor district in districts:\n print(district)\n\n encoding = 'utf-8' \n clean_osm_data_file = open(target_directory+'/processed_'+district+'.osm', \"w+\")\n # clean_osm_data_file = open('processed_'+district+'.osm', \"w+\")\n clean_osm_data_file.write('\\n\\n')\n\n raw_osm_data_path = 'Raw_OSM_data/'+district+'.osm'\n number_of_highway_ways = 0\n number_of_highway_nodes = 0\n\n context = ET.iterparse(raw_osm_data_path, events=('end',), tag='way')\n final_node_ids = []\n\n way_string = \"\"\n for event, elem in context:\n for node_child in elem: # iterating over the sub-elements of a way element\n if node_child.tag == \"tag\":\n if node_child.attrib[\"k\"] == \"highway\":\n number_of_highway_ways += 1\n #clean_osm_data_file.write( (ET.tostring(elem, pretty_print=True)).decode(encoding))\n way_string += (ET.tostring(elem, pretty_print=True)).decode(encoding)\n # store the node id of all nodes referred by this way\n for referred_node in elem:\n if referred_node.tag == \"nd\":\n final_node_ids.append(referred_node.attrib[\"ref\"])\n elem.clear()\n \n del context \n print(\"At this point all ways have been processed\")\n print(\"Total ways with highway tag: \", number_of_highway_ways)\n\n final_node_ids = set(final_node_ids)\n context = ET.iterparse(raw_osm_data_path, events=('end',), tag='node') \n for event, elem in context:\n if elem.attrib[\"id\"] in final_node_ids:\n number_of_highway_nodes += 1\n clean_osm_data_file.write( (ET.tostring(elem, pretty_print=True)).decode(encoding))\n elem.clear()\n\n del context\n print(\"Total nodes with highway tag: \",number_of_highway_nodes)\n clean_osm_data_file.write(way_string)\n clean_osm_data_file.write(\"\")\n clean_osm_data_file.close()\n\n print(\"Done\")\n\n\n\n\n","sub_path":"COMPASS_FINAL_VERSION/Extract_Roads_From_OSM.py","file_name":"Extract_Roads_From_OSM.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"273394210","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 5 21:07:10 2018\n\n@author: amajidsinar\n\nLEFT = 0\nDOWN = 1\nRIGHT = 2\nUP = 3\n\"\"\"\n\nimport gym\nfrom collections import defaultdict\nimport numpy as np\nimport random\n\nenv = gym.make('FrozenLake-v0')\n\nepisodes = 100\ntimesteps = 100\n\ndiamond = 100\npit = -15\ncost = -1\n\nnA = env.action_space.n\n\nQ = defaultdict(lambda:np.zeros(nA))\n\nalpha = 0.9\ngamma = 0.9\n\n#define the soft policy function\ndef soft_policy(Q,epsilon=0.3):\n #create the probability distribution\n q = np.ones(nA) * epsilon / nA\n q[np.argmax(Q)] += 1 - epsilon\n \n roll_the_dice = random.random()\n \n #self explanatory\n cumsum = []\n for i,val in enumerate(q):\n cumsum.append(sum([j for j in q[:i+1]]))\n \n for i,val in enumerate(cumsum):\n if roll_the_dice < val:\n return i\n\nfor e in range(episodes):\n state = env.reset()\n for t in range(timesteps):\n #take action a to land state s'\n action = soft_policy(Q[state],0.3)\n next_state,reward,done,_ = env.step(action)\n #set different reward based on the next state\n if next_state == 15:\n reward = diamond\n elif next_state==5 or next_state==7 or next_state==11 or next_state==12:\n reward = pit\n else:\n reward = cost\n #from state s' pick the a' but dont take the move yet\n next_action = Q[next_state].argmax()\n #estimate the current value by sampling the next step or how much we would get if we take action a'\n #in the next state\n sample = gamma * Q[next_state][next_action]\n Q[state][action] += alpha * (reward + sample - Q[state][action])\n if done:\n break;\n state = next_state\n ","sub_path":"Temporal-Difference/Q-Learning.py","file_name":"Q-Learning.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"31546568","text":"# -*- coding: utf-8 -*-\n\"\"\"The Simulation using only the Latent Model.\nFor the simulation using only the Basic Model see file 'simulations_basic.py'.\nFor a file containing simulations using both models, see file 'simulations.py'.\n\"\"\"\n\nimport torch\nimport torch.optim as optim\nimport models_REINFORCE as models\n#import plots_simulations as plots\n\n\"\"\"The Simulation using the Latent Model.\"\"\"\n#([[precise likelihood],[precise prior],[healthy interoception],[chronic pain]])\nvs_h = torch.tensor([[1.],[100.],[100.],[1000.]])\nvs_s = torch.tensor([[[100.,100.],[100.,100.]],[[1.,1.],[1.,1.]],\n [[80.,80.],[80.,80.]],[[500.,500.],[500.,500.]]])\nlamdas_h = torch.tensor([[0.5,0.5],[0.5,0.5],[0.8,0.2],[0.2,0.8]]) #[p(nopain),p(pain)]\n#[p(tickle|nopain),p(nociception|nopain)],[p(tickle|pain),p(nociception|pain)]\nlamdas_s = torch.tensor([[[0.8,0.2],[0.2,0.8]],[[0.8,0.2],[0.2,0.8]],\n [[0.8,0.2],[0.2,0.8]],[[0.5,0.5],[0.5,0.5]]])\nalphas_h = vs_h*lamdas_h\nalphas_s = vs_s * lamdas_s\n\n#Create lists to store updated probs. and precisions after each observation.\n#lamdas_nopain = [[],[],[],[]]\n#lamdas_pain = [[],[],[],[]]\n#vs_nopain = [[],[],[],[]]\n#vs_pain = [[],[],[],[]]\n#lamdas_ticklenopain = [[],[],[],[]]\n#lamdas_nocinopain = [[],[],[],[]]\n#lamdas_ticklepain = [[],[],[],[]]\n#lamdas_nocipain = [[],[],[],[]]\n\n#Create artificial data.\ndata = []\nfor _ in range(30):\n data.append(torch.tensor([1.,0.])) #tickle\nfor _ in range(30):\n data.append(torch.tensor([0.,1.])) #nociception\n\nprint(\"--------------------------------\")\nprint(\"Simulation using the Latent Model.\")\nprint(\"Four different settings of prior and likelihood parameters v and Lamda are compared:\")\n#print(\"-Imprecise Prior & Precise Likelihood\")\n#print(\"-Precise Prior & Imprecise Likelihood\")\n#print(\"-Healthy Interoception\")\n#print(\"-Chronic Pain\")\n#print(\"H E {no pain, pain}; S E {tickle, nociception}.\")\n#print(\"Obervations: 30 times nociception followed by 30 times tickle.\")\nprint(\"--------------------------------\")\nfor v in range(len(vs_h)): #for each precision setting\n print(\"Simulating \", ['Precise Likelihood','Precise Prior',\n 'Healthy Interoception','Chronic Pain'][v])\n print(\"Prior parameters:\")\n print(\"No pain: \",\"{0:.3f}\".format(alphas_h[v][0].item()),\n \"; Pain: \",\"{0:.3f}\".format(alphas_h[v][1].item()))\n print(\"Updated parameters:\")\n\n for d in range(len(data)): #for each artificial observation\n model = models.Latent_Model(alphas_h[v],alphas_s[v])\n optimizer = optim.Adam(model.parameters(), lr=0.0006)\n for step in range(30000):\n optimizer.zero_grad()\n loss = model(data[d])\n loss.backward()\n optimizer.step()\n \n #Get updated parameters.\n alpha_h = model.q_log_alpha_h.exp().detach()\n alpha_s = model.q_log_alpha_s.exp().detach()\n #Update model parameters for the next instantiation.\n alphas_h[v] = alpha_h\n alphas_s[v] = alpha_s\n\n# #Add updated probs. and precisions to lists.\n# lamdas_nopain[v].append(alpha_h[0]/alpha_h.sum())\n# lamdas_pain[v].append(alpha_h[1]/alpha_h.sum())\n# \n# vs_nopain[v].append(alpha_s[0][0]/(alpha_s[0][0]/alpha_s[0].sum()))\n# vs_pain[v].append(alpha_s[1][0]/(alpha_s[1][0]/alpha_s[1].sum()))\n#\n# lamdas_ticklenopain[v].append(alpha_s[0][0]/alpha_s[0].sum())\n# lamdas_nocinopain[v].append(alpha_s[0][1]/alpha_s[0].sum())\n# lamdas_ticklepain[v].append(alpha_s[1][0]/alpha_s[1].sum())\n# lamdas_nocipain[v].append(alpha_s[1][1]/alpha_s[1].sum())\n#\n print(\"No pain: \",\"{0:.3f}\".format(alpha_h[0].item()),\n \"; Pain: \",\"{0:.3f}\".format(alpha_h[1].item()))\n#\n##Create plots like in Anna-Lena's figures 3.8 - 3.11\n#plots.plot_lm_precision(lamdas_nopain,lamdas_pain,vs_nopain,vs_pain)\n#plots.plot_lm_healthy(lamdas_nopain,lamdas_pain,vs_nopain,vs_pain,\n# lamdas_nocipain,lamdas_ticklepain,\n# lamdas_nocinopain,lamdas_ticklenopain)\n#plots.plot_lm_chronicpain(lamdas_nopain,lamdas_pain,vs_nopain,vs_pain,\n# lamdas_nocipain,lamdas_ticklepain,\n# lamdas_nocinopain,lamdas_ticklenopain)","sub_path":"Simulations/simulations_latent_model.py","file_name":"simulations_latent_model.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"67665395","text":"from urllib.request import urlretrieve\nfrom concurrent.futures import ThreadPoolExecutor\n\n\n'''Скачать страницы с некоторых сайтов и посмотреть, сколько символов в каждом html.\nРеализовать скачивание сразу в нескольких потоках. Использовать ThreadPoolExecutor и urllib.request.urlretrieve.\n'''\n\n\ndef fetch(url):\n data = urlretrieve(url)\n html = open(data[0])\n l = html.read()\n html.close()\n return len(l)\n\n\nurls = ['https://docs.python.org/3/',\n 'http://python.org/',\n 'https://docs.python.org/3/tutorial/introduction.html',\n 'https://docs.python.org/3/library/concurrent.futures.html']\n\n\nwith ThreadPoolExecutor() as executor:\n for url in urls:\n r = executor.submit(fetch, url)\n print(f'Длина страницы {url} - {r.result()} символов')\n","sub_path":"DZ_1509_Mnogopotochnost.py","file_name":"DZ_1509_Mnogopotochnost.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"133939280","text":"from myhdl import block, instance, delay\n\n@block\ndef ClkDriver(clk, period=20):\n time_low = period//2\n time_high = period - time_low\n\n @instance\n def drive_clk():\n while True:\n yield delay(time_low)\n clk.next = 1\n yield delay(time_high)\n clk.next = 0\n \n return drive_clk","sub_path":"myhdl/clk.py","file_name":"clk.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"424554211","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os, re\nfrom Base import Base\nfrom sqlalchemy import Column, Integer, String, DateTime, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom datetime import datetime\nfrom decimal import *\n\nclass Location(Base):\n\n __tablename__ = 'locations'\n\n id = Column(Integer, primary_key=True)\n nmea = Column(String(255), nullable=False)\n device_id = Column(Integer, ForeignKey('devices.id'), nullable=False)\n created_at = Column(DateTime, nullable=False)\n updated_at = Column(DateTime, nullable=False)\n\n device = relationship(\"Device\",\n foreign_keys=\"Location.device_id\",\n backref=\"locations\")\n\n def __init__(self, nmea, device_id):\n timestamp = datetime.now()\n self.nmea = nmea\n self.device_id = device_id\n self.created_at = timestamp\n self.updated_at = timestamp\n\n @staticmethod\n def nmea_to_deg(nmea):\n \"\"\"\n Convert NMEA Spefication to Lat Long\n $GPRMC,194509.000,A,4042.6142,N,07400.4168,W,2.03,221.11,160412,,,A*77\n Lat is specified in DDMM.MMMM\n Lng is specified in DDDMM.MMMM\n \"\"\"\n search_string = \"\"\n negation = -1 if nmea[1] == \"S\" or nmea[1] == \"W\" else 1\n if nmea[1] == \"E\" or nmea[1] == \"W\":\n search_string = '^([\\d]{3})(.*)'\n elif nmea[1] == \"N\" or nmea[1] == \"S\":\n search_string = '^([\\d]{2})(.*)'\n else:\n raise\n conversion = re.compile(search_string)\n deg = int(conversion.match(nmea[0]).group(1))\n minutes = float(conversion.match(nmea[0]).group(2))/60\n return (deg + minutes)*negation\n\n def speed(self):\n \"\"\"\n Find the speed in the NMEA text\n $GPRMC,194509.000,A,4042.6142,N,07400.4168,W,2.03,221.11,160412,,,A*77\n \"\"\"\n speed_knots = self.nmea.split(',')[7]\n speed_mph = '{0:.2f}'.format(Decimal(speed_knots) * Decimal(1.15078))\n return speed_mph\n\n def is_active(self):\n \"\"\"\n Find the status in the NMEA text\n $GPRMC,194509.000,A,4042.6142,N,07400.4168,W,2.03,221.11,160412,,,A*77\n \"\"\"\n status = self.nmea.split(',')[2]\n if status == \"V\":\n return False\n elif status == \"A\":\n return True\n\n \n\n def lat_lng(self):\n try:\n raw = self.nmea.split(',')\n return ( \n self.nmea_to_deg((raw[3], raw[4])), \n self.nmea_to_deg((raw[5], raw[6])) \n )\n except:\n return None\n","sub_path":"gpsweb/models/Location.py","file_name":"Location.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"331420047","text":"from django.conf.urls import url\nimport views\n\nurlpatterns = [\n url(r'^$', views.cart),\n url(r'^add(\\d+)_(\\d+)/$', views.add),\n url(r'amount(\\d+)_(\\d+)/$', views.amount),\n url(r'delete(\\d+)/$',views.delete),\n url(r'^order/$',views.order),\n]","sub_path":"test9/df_cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"511445209","text":"# Core.py<--Chip8Emulator ;; Zedd Serjeant\n\"\"\"\nThis is the core of the Chip-8 Emulation project. It contains all of the\nprocessor stuff.\nWebsites with information on the chip-8 structure:\nhttp://www.multigesture.net/articles/how-to-write-an-emulator-chip-8-interpreter/\nhttp://www.multigesture.net/wp-content/uploads/mirror/goldroad/chip8.shtml\nhttp://en.wikipedia.org/wiki/CHIP-8#\nhttp://mattmik.com/chip8.html\nhttp://devernay.free.fr/hacks/chip8/\nhttp://s4.zetaboards.com/wow/topic/9663604/1/\nhttp://stackoverflow.com/questions/6619882/decoding-and-matching-chip-8-opcodes-in-c-c\n\"\"\"\n\nfrom random import randint\nimport pygame\nfrom pygame.locals import *\n\nDEBUGGING = 1\ndef DEBUG(string):\n \"Print Verbose data for debugging. Disable with DEBUGGING flag.\"\n if DEBUGGING:\n print(string)\n\nclass CPU():\n #There are 2 versions of opcode 8XY6 and 8XYE. Set this flag\n # for the legacy version\n legacy = 0\n instructions_executed = 0 #Timer counts down every 14 instructions\n #The variables that will be used\n opcode = 0x0000\n memory = bytearray(4095)\n #Graphics\n graphics = [[0]*64 for i in range(32)] #64x32 pixels, for graphics.\n draw_graphics = 0\n V = bytearray(16) #Registers V0..VF\n #Index and preogram counter. Both store 0x000..0xFFF\n I = 0x000\n PC = 0x200\n #Timers\n delay_timer = 0\n sound_timer = 0\n #Stack\n stack = []\n #Key States\n key_states = [0]*16\n key_pressed = 0 #Set to a key if a key has been pressed this cycle\n #The FontSet. Has a sprite for every HEX character\n font_set = [\n 0xF0, 0x90, 0x90, 0x90, 0xF0, #0\n 0x20, 0x60, 0x20, 0x20, 0x70, #1\n 0xF0, 0x10, 0xF0, 0x80, 0xF0, #2\n 0xF0, 0x10, 0xF0, 0x10, 0xF0, #3\n 0x90, 0x90, 0xF0, 0x10, 0x10, #4\n 0xF0, 0x80, 0xF0, 0x10, 0xF0, #5\n 0xF0, 0x80, 0xF0, 0x90, 0xF0, #6\n 0xF0, 0x10, 0x20, 0x40, 0x40, #7\n 0xF0, 0x90, 0xF0, 0x90, 0xF0, #8\n 0xF0, 0x90, 0xF0, 0x10, 0xF0, #9\n 0xF0, 0x90, 0xF0, 0x90, 0x90, #A\n 0xE0, 0x90, 0xE0, 0x90, 0xE0, #B\n 0xF0, 0x80, 0x80, 0x80, 0xF0, #C\n 0xE0, 0x90, 0x90, 0x90, 0xE0, #D\n 0xF0, 0x80, 0xF0, 0x80, 0xF0, #E\n 0xF0, 0x80, 0xF0, 0x80, 0x80] #F\n\n def initialize(self):\n #Load Fontset\n for i in range(0x00, 0x4F):\n self.memory[i] = self.font_set[i]\n\n def loadFile(self, name):\n file = open(name, \"rb\", buffering=0)\n data = file.readall()\n self.memory[0x200:len(data)+0x200] = data\n\n def emulateCycle(self):\n #Fetch opcode\n self.draw_graphics = 0\n opcode = self.memory[self.PC] << 8 | self.memory[self.PC+1]\n nibb1 = (opcode & 0xF000) >> 12 #First 4 bits of opcode\n nibb2 = X = (opcode & 0x0F00) >> 8 #For VX\n nibb3 = Y = (opcode & 0x00F0) >> 4 #For VY\n nibb4 = N = (opcode & 0x000F)\n NNN = (opcode & 0x0FFF)\n NN = (opcode & 0x00FF)\n VX = self.V[nibb2]\n VY = self.V[nibb3]\n #Notes:\n #NI = Next Instruction\n #LSB = Least Significant Bit\n #MSB = Most Significan Bit\n if nibb1 == 0x0:\n if opcode == 0x0000:\n DEBUG(\"NULL\")\n self.PC += 2\n elif nibb2 > 0x0: #0NNN: Execute System Code. Not used\n DEBUG(\"The confusing opcode was executed\")\n self.PC += 2\n elif nibb4 == 0x0: #00E0: Clear the screen\n DEBUG(\"ClearScreen\") ##Debug\n self.graphics = [[0]*64 for i in range(32)]\n self.draw_screen = 1\n self.PC += 2\n elif nibb4 == 0xE: #00EE: Return from subroutine\n DEBUG(\"Return from subroutine\") ##Debug\n try:\n self.PC = self.stack.pop()\n except IndexError:\n DEBUG(\"ERROR: Attempt to return from subroutine when no \\\n call was made.\")\n return \"Exit\"\n else: DEBUG(\"unknown opcode\")\n elif nibb1 == 0x1: #1NNN: Jumps to NNN\n DEBUG(\"Jump to {0}\".format(NNN)) ##Debug\n self.PC = NNN\n elif nibb1 == 0x2: #2NNN: call subroutine at NNN\n DEBUG(\"Call {0}\".format(hex(NNN))) ##Debug\n self.stack.append(self.PC + 2)\n self.PC = NNN\n elif nibb1 == 0x3: #3XNN: skip NI if VX == NN\n DEBUG(\"Skip NI if V{0} == {1}\".format(hex(X),hex(NN))) ##Debug\n if VX == NN: self.PC += 4 #skip\n else: self.PC += 2\n elif nibb1 == 0x4: #4XNN: skip NI if VX != NN\n DEBUG(\"Skip NI if V{0} != {1}\".format(hex(X),hex(NN))) ##Debug\n if VX != NN: self.PC += 4 #skip\n else: self.PC += 2\n elif nibb1 == 0x5: #5XY0: skip NI if VX == VY\n DEBUG(\"Skip NI if V{0}[{1}] == V{2}[{3}]\".format(hex(X),hex(VX),hex(Y),hex(VY)))\n if VX == VY: self.PC += 4 #skip\n else: self.PC += 2\n elif nibb1 == 0x6: #6XNN: set VX to NN\n DEBUG(\"Set V{0} to {1}\".format(hex(X), hex(NN))) ##Debug\n self.V[X] = NN\n self.PC += 2\n elif nibb1 == 0x7: #7XNN: Adds NN to VX\n DEBUG(\"Add {0} to V{1}\".format(hex(NN), hex(X))) ##Debug\n if VX + NN > 0xFF:\n self.V[X] = NN - (0xFF - VX) #Carry. No flag is set\n else:\n self.V[X] += NN\n self.PC += 2\n elif nibb1 == 0x8:\n if nibb4 == 0x0: #8XY0: sets VX to VY\n DEBUG(\"Set V{0} to V{1}[{2}]\".format(hex(X),hex(Y),hex(VY))) ##Debug\n self.V[X] = VY\n self.PC += 2\n elif nibb4 == 0x1: #8XY1: set VX to (VX or VY)\n DEBUG(\"Set V{0} to (V{0}[{1}] or V{2}[{3}])\".format(hex(X),hex(VX),hex(Y),hex(VY))) ##Debug\n self.V[X] = VX | VY\n self.PC += 2\n elif nibb4 == 0x2: #8XY2: set VX to (VX and VY)\n DEBUG(\"Set V{0} to (V{0}[{1}] and V{2}[{3}])\".format(hex(X),hex(VX),hex(Y),hex(VY))) ##Debug\n self.V[X] = VX & VY\n self.PC += 2\n elif nibb4 == 0x3: #8XY3: set VX to (VX xor VY)\n DEBUG(\"Set V{0} to (V{0}[{1}] xor V{2}[{3}])\".format(hex(X),hex(VX),hex(Y),hex(VY))) ##Debug\n self.V[X] = VX ^ VY\n self.PC += 2\n elif nibb4 == 0x4: #8XY4: add VY to VX. Sets VF for carry\n DEBUG(\"Add V{0}[{1}] to V{2}[{3}]\".format(hex(Y),hex(VY),hex(X),hex(VX))) ##Debug\n total = VX + VY\n if total > 255:\n self.V[0xF] = 1 # carry\n total -= 255\n else: self.V[0xF] = 0\n self.V[X] = total\n self.PC += 2\n elif nibb4 == 0x5: #8XY5: minus VY from VX. Unsets VF when borrow\n DEBUG(\"Minus V{0}[{1}] from V{2}[{3}]\".format(hex(Y),hex(VY),hex(X),hex(VX))) ##Debug\n #This code may be wrong, but most likely isn't\n total = VX - VY\n if total < 0:\n self.V[0xF] = 0 #borrow\n total += 255\n else: self.V[0xF] = 1\n self.V[X] = total\n self.PC += 2\n elif nibb4 == 0x6: #8XY6: ...\n #Legacy: VF = LSB of VY. VX = (VY >> 1)\n #Modern: VF = LSB of VX. VX = (VX >> 1)\n #Set the legacy flag for the legacy version\n if self.legacy:\n DEBUG(\"Shift V{0} to the right. Store in V{1}\".format(hex(Y),hex(X)))\n self.V[0xF] = int(bin(VY)[-1]) #getting LSB. Tricky in Python.\n self.V[X] = VY >> 1\n else: #Use the modern version\n DEBUG(\"Shift V{0} to the right. Store in V{0}\".format(hex(X)))\n self.V[0xF] = int(bin(VX)[-1])\n self.V[X] >>= 1\n self.PC += 2\n elif nibb4 == 0x7: #8XY7: VX = (VY-VX). VF = 0 when borrow\n #This code may be wrong, but likely isn't\n DEBUG(\"V{0} = V{1}[{2}] - V{0}[{3}]\".format(hex(X),hex(Y),hex(VY),hex(VX)))\n total = VY - VX\n if total < 0:\n self.V[0xF] = 0 #borrow\n total += 255\n else: self.V[0xF] = 1\n self.V[X] = total\n self.PC += 2\n elif nibb4 == 0xE: #8XYE: ...\n #Legacy: VF = MSB of VY. VX = (VY << 1)\n #Modern: VF = MSB of VX. VX = (VX << 1)\n #Set the legacy flag for the legacy version\n if self.legacy:\n DEBUG(\"Shift V{0} to the left. Store in V{1}\".format(hex(Y),hex(X)))\n self.V[0xF] = int(bin(VY)[0])\n self.V[X] = VY << 1\n else: #Modern version\n DEBUG(\"Shift V{0} to the left. Store in V{0}\".format(hex(X)))\n self.V[0xF] = int(bin(VX)[0])\n self.V[X] <<= 1\n self.PC += 2\n else:\n DEBUG(\"unknown Opcode\")\n self.PC += 2\n elif nibb1 == 0x9: #9XY0: Skips NI if VX != VY\n DEBUG(\"Skip NI if V{0}[{1}] != V{2}[{3}]\".format(hex(X),hex(VX),hex(Y),hex(VY)))\n if VX != VY: self.PC += 4 #skip\n else: self.PC += 2\n elif nibb1 == 0xA: #ANNN: Sets I to address NNN\n DEBUG(\"Set I to {0}\".format(hex(NNN))) ##Debug\n self.I = NNN\n self.PC += 2\n elif nibb1 == 0xB: #BNNN: Jumps to NNN plus V0\n DEBUG(\"Jump to {0} plus V0[{1}]\".format(hex(NNN),hex(self.V[0x0])))\n address = NNN + self.V[0x0]\n if address > 0xFFF: address -= 0xFFF\n self.PC = address\n elif nibb1 == 0xC: #CXNN: VX = (RandomNumber & NN)\n DEBUG(\"Set V{0} to RandNumber masked by {1}\".format(hex(X),hex(NN)))\n r = randint(0x00, 0xFF) & NN\n self.V[X] = r\n self.PC += 2\n elif nibb1 == 0xD: #DXYN: Draw sprite data at (VX,VY) starting from I\n DEBUG(\"Draw sprite at V{0}[{3}], V{1}[{4}] :: {2} rows high\".format(hex(X),hex(Y),N,VX,VY)) ##Debug\n self.V[0xF] = 0\n for yline in range(N): #N is the height\n pixel = self.memory[self.I + yline]\n for xline in range(8):\n if pixel & (0x80 >> xline) != 0:\n try:\n if self.graphics[VY+yline][VX+xline] == 1:\n self.V[0xF] = 1\n except IndexError:\n continue\n self.graphics[VY+yline][VX+xline] ^= 1\n self.draw_graphics = 1\n self.PC += 2\n #Old Code:\n \"\"\"\n self.V[0xF] = 0\n for yline in range(N): #N is height\n pixel_string = bin(self.memory[self.I + yline])[2:] #string of bits\n xline = 0\n for bit in pixel_string:\n if int(bit):\n if self.graphics[VY+yline][VX+xline]:\n self.V[0xF] = 1 #collision\n self.graphics[VY+yline][VX+xline] ^= int(bit)\n xline += 1\n self.draw_graphics = 1\n self.PC += 2\n \"\"\"\n elif nibb1 == 0xE:\n if NN == 0x9E: #EX9E: Skip NI if key stored in VX is pressed\n DEBUG(\"Skip NI if key V{0}[{1}] is pressed - {2}\".format(hex(X),hex(VX),self.key_states[VX]))\n if self.key_states[VX]:\n self.PC += 4\n else:\n self.PC += 2\n elif NN == 0xA1: #EXA1: Skip NI if key stored in VX is not pressed\n DEBUG(\"Skip NI if key V{0}[{1}] is not pressed - {2}\".format(hex(X),hex(VX),self.key_states[VX]))\n if not self.key_states[VX]:\n self.PC += 4\n else:\n self.PC += 2\n else:\n DEBUG(\"Opcode Unknown\")\n self.PC += 2\n elif nibb1 == 0xF:\n if NN == 0x07: #FX07: Store DelayTimer in VX\n DEBUG(\"Store DelayTimer[{0}] in V{1}\".format(self.delay_timer,hex(X))) ##Debug\n self.V[X] = self.delay_timer\n self.PC += 2\n elif NN == 0x0A: #FX0A: Await keypress, then store result in VX\n #Possibly buggy. Reacts if key has been pressed the same\n # cycle as this instruction. Maybe it should react to keypresses\n # after the cycle this runs.\n DEBUG(\"Await Keypress\")\n if self.key_pressed:\n self.V[X] = self.key_pressed\n self.PC += 2\n #Execution is halted until keypress, so nothing else happens. \n elif NN == 0x15: #FX15: Set DelayTimer to VX\n DEBUG(\"Set Delay Timer to V{0}[{1}]\".format(hex(X),hex(VX))) ##Debug\n self.delay_timer = VX\n self.PC += 2\n elif NN == 0x18: #FX18: Set SoundTimer to VX\n DEBUG(\"Set Sound Timer to V{0}[{1}]\".format(hex(X),hex(VX))) ##Debug\n self.sound_timer = VX\n self.PC += 2\n elif NN == 0x1E: #FX1E: I += VX\n DEBUG(\"I += V{0}[{1}]\".format(hex(X),hex(VX)))\n if self.I + VX > 0xFFF:\n self.I = (VX - (0xFFF - self.I)) #rollover\n self.V[0xF] = 1 #set the carry flag\n else:\n self.I += VX\n self.PC += 2\n elif NN == 0x29: #FX29: Set I to fontset data at VX\n DEBUG(\"Set I to sprite V{0}[{1}]\".format(hex(X),hex(VX))) ##Debug\n self.I = VX * 5\n self.PC += 2\n elif NN == 0x33: #FX33: Stores VX at [I,I+1,I+2] as BCD\n DEBUG(\"Store BCD of V{0}[{1}][{2}]\".format(hex(X),hex(VX),VX)) ##Debug\n data = str(VX)\n if len(data) == 1:\n data = \"00\"+data\n elif len(data) == 2:\n data = \"0\"+data\n self.memory[self.I] = int(data[0])\n self.memory[self.I + 1] = int(data[1])\n self.memory[self.I + 2] = int(data[2])\n DEBUG(str(self.memory[self.I]) +\" :: \"+ str(self.memory[self.I + 1]) +\" :: \"+ str(self.memory[self.I + 2]))\n self.PC += 2;\n elif NN == 0x55: #FX55: Stores V0-VX in memory starting with I. I += (X + 1)\n DEBUG(\"Write {0} to disk\".format(\"V{0}[{1}]\".format(hex(i),hex(self.V[i])) for i in range(X))) ##Debug\n for i in range(X+1):\n self.memory[self.I + i] = self.V[i]\n self.I = self.I+X+1\n self.PC += 2\n elif NN == 0x65: #FX65: Fills V0-VX from memory stating with I. I += (X + 1)\n DEBUG(\"Read {0} data from disk\".format(hex(X)))\n DEBUG(str(self.memory[self.I]) +\" :: \"+ str(self.memory[self.I + 1]) +\" :: \"+ str(self.memory[self.I + 2]))\n for i in range(X+1):\n self.V[i] = self.memory[self.I + i]\n self.I = self.I+X+1\n self.PC += 2\n else:\n DEBUG(\"Opcode Unknown\")\n self.PC += 2\n else:\n DEBUG(\"Unknown opcode :: {0}\".format(opcode))\n self.PC += 2\n\n #Timers\n if self.delay_timer > 0:\n if self.instructions_executed == 0:\n self.delay_timer -= 1\n if self.sound_timer > 0:\n print(\"BEEP!\")\n if self.instructions_executed == 0:\n self.sound_timer -= 1\n #key_pressed flag is reset\n self.key_pressed = 0\n #Count amount of instructions executed\n self.instructions_executed += 1\n if self.instructions_executed == 14:\n self.instructions_executed = 0\n\ndef main(name):\n #Initialize pygame\n pygame.init()\n #64x32 resolution. Each CHIP-8 pixel is a 10x10 PC pixel\n screen = pygame.display.set_mode((640, 320))\n pygame.display.set_caption(name)\n clock = pygame.time.Clock()\n #Initalize CHIP-8\n chip8 = CPU()\n chip8.initialize()\n chip8.loadFile(name)\n while 1:\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_1: #Key 1\n chip8.key_states[0x1] = 1\n chip8.key_pressed = 0x1\n elif event.key == K_2: #Key 2\n chip8.key_states[0x2] = 1\n chip8.key_pressed = 0x2\n elif event.key == K_3: #Key 3\n chip8.key_states[0x3] = 1\n chip8.key_pressed = 0x3\n elif event.key == K_4: #Key C\n chip8.key_states[0xC] = 1\n chip8.key_pressed = 0xC\n elif event.key == K_q: #Key 4\n chip8.key_states[0x4] = 1\n chip8.key_pressed = 0x4\n elif event.key == K_w: #Key 5\n chip8.key_states[0x5] = 1\n chip8.key_pressed = 0x5\n elif event.key == K_e: #Key 6\n chip8.key_states[0x6] = 1\n chip8.key_pressed = 0x6\n elif event.key == K_r: #Key D\n chip8.key_states[0xD] = 1\n chip8.key_pressed = 0xD\n elif event.key == K_a: #Key 7\n chip8.key_states[0x7] = 1\n chip8.key_pressed = 0x7\n elif event.key == K_s: #Key 8\n chip8.key_states[0x8] = 1\n chip8.key_pressed = 0x8\n elif event.key == K_d: #Key 9\n chip8.key_states[0x9] = 1\n chip8.key_pressed = 0x9\n elif event.key == K_f: #Key E\n chip8.key_states[0xE] = 1\n chip8.key_pressed = 0xE\n elif event.key == K_z: #Key A\n chip8.key_states[0xA] = 1\n chip8.key_pressed = 0xA\n elif event.key == K_x: #Key 0\n chip8.key_states[0x0] = 1\n chip8.key_pressed = 0x0\n elif event.key == K_c: #Key B\n chip8.key_states[0xB] = 1\n chip8.key_pressed = 0xB\n elif event.key == K_v: #Key F\n chip8.key_states[0xF] = 1\n chip8.key_pressed = 0xF\n elif event.type == KEYUP:\n if event.key == K_1: #Key 1\n chip8.key_states[0x1] = 0\n elif event.key == K_2: #Key 2\n chip8.key_states[0x2] = 0\n elif event.key == K_3: #Key 3\n chip8.key_states[0x3] = 0\n elif event.key == K_4: #Key C\n chip8.key_states[0xC] = 0\n elif event.key == K_q: #Key 4\n chip8.key_states[0x4] = 0\n elif event.key == K_w: #Key 5\n chip8.key_states[0x5] = 0\n elif event.key == K_e: #Key 6\n chip8.key_states[0x6] = 0\n elif event.key == K_r: #Key D\n chip8.key_states[0xD] = 0\n elif event.key == K_a: #Key 7\n chip8.key_states[0x7] = 0\n elif event.key == K_s: #Key 8\n chip8.key_states[0x8] = 0\n elif event.key == K_d: #Key 9\n chip8.key_states[0x9] = 0\n elif event.key == K_f: #Key E\n chip8.key_states[0xE] = 0\n elif event.key == K_z: #Key A\n chip8.key_states[0xA] = 0\n elif event.key == K_x: #Key 0\n chip8.key_states[0x0] = 0\n elif event.key == K_c: #Key B\n chip8.key_states[0xB] = 0\n elif event.key == K_v: #Key F\n chip8.key_states[0xF] = 0\n elif event.type == pygame.QUIT:\n pygame.quit()\n return\n if chip8.draw_graphics:\n #Draw the graphics array\n #DEBUG(chip8.graphics)\n screen.fill((0,0,0))\n for y in range(32):\n for x in range(64):\n if chip8.graphics[y][x]:\n screen.fill((255, 255, 255), rect=(x*10, y*10, 10, 10))\n pygame.display.flip()\n state = chip8.emulateCycle()\n if state == \"Exit\":\n return\n clock.tick(840) #currently clocked at 100%\n \nmain(\"clock.ch8\")\n \n \n","sub_path":"Core.py","file_name":"Core.py","file_ext":"py","file_size_in_byte":20781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"74254246","text":"import tensorflow as tf\nimport numpy as np\nimport random\n\ndef genData():\n a = int(random.uniform(0, 2))\n b = int(random.uniform(0, 2))\n c = a & b;\n return [a,b],[ 1 if c==0 else 0, 1 if c==1 else 0]\n\ndef getData(size):\n datas=[]\n results=[]\n for i in range(size):\n data,result = genData()\n datas.append(data)\n results.append(result)\n return np.array(datas).reshape([size,2]).astype(np.float32),np.array(results).reshape([size,2]).astype(np.float32)\n\ndef train(trainData,inferData):\n with tf.Graph().as_default() as g:\n y = tf.placeholder(\"float32\",shape=[None,2],name=\"output\")\n x,y_= infer()\n cross_entropy = -tf.reduce_sum(y * tf.log(y_))\n train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy)\n init_op = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init_op)\n for i in range(20000):\n cross,_= sess.run([cross_entropy,train_step],feed_dict={x:trainData,y:inferData})\n if i % 10000 == 0 :\n print(\"%d : cross_entropy : %s\" % (i,cross))\n saver = tf.train.Saver()\n saver.save(sess, \"data/save-and.ckpt\")\n graph_def = g.as_graph_def()\n tf.train.write_graph(graph_def, 'data/', 'expert-graph-and.pb', as_text=False)\n\ndef infer():\n x = tf.placeholder(\"float32\",shape=[None,2],name=\"input\")\n\n W = tf.Variable(tf.zeros([2,2]))\n b = tf.Variable(tf.zeros([2]))\n\n logits = tf.matmul(x,W)+b;\n\n y_ = tf.nn.softmax(logits,name=\"output\")\n return x,y_\n\n\ndef inference(x):\n with tf.gfile.FastGFile(\"data/expert-graph-and.pb\", 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n init_op = tf.global_variables_initializer();\n #x,y_= infer()\n #saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(init_op)\n #saver.restore(sess, tf.train.latest_checkpoint('data'))\n y_conv = sess.graph.get_tensor_by_name('output:0')\n result = sess.run(y_conv,feed_dict={'input:0':np.array([x])})\n #result = sess.run(y_conv,feed_dict={x:np.array([[1,1]])})\n return result\n\n#x,y = getData(100)\n#print(x)\n#print(y)\n#train(x,y)\n#print(inference([1,1]))\n\n#tf.one_hot()\n\na = np.array([[1,2],\n [2,3],\n [3,1]])\n\nb = np.array([[1,2],\n [2,3],\n [3,1]])\nprint(a/b)","sub_path":"HelloWord/hello/MyLearn-1.py","file_name":"MyLearn-1.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"237914388","text":"\r\nclass people:\r\n name = 'jack'\r\n\r\np = people() # 实例属性不需要在类中显示定义\r\np.age =12\r\nprint (p.name) #正确\r\nprint (p.age ) #正确\r\n\r\nprint (people.name ) #正确 类属性\r\n\r\ntry:\r\n print (people.age ) #错误 实例属性\r\nexcept AttributeError:\r\n print(\"no this attribution\")\r\n\r\nif hasattr(people, 'age'): # 检查实例是否有这个属性\r\n print(people.age)\r\nelse:\r\n print(\"No this attribution\")\r\n\r\n\r\nprint(\"*\"*50)\r\n\r\n\r\n# @classmethod装饰器来创建类方法:\r\n# 一个只在类中运行而不在实例中运行的方法.\r\n# classmethod 是和一个class相关的方法,可以通过类或类实例调用,并将该class对象(不是class的实例对象)隐式地 当作第一个参数传入。\r\n# 就这种方法可能会比较奇怪一点,\r\n# 不过只要你搞清楚了python里class也是个真实地存在于内存中的对象,\r\n# 而不是静态语言中只存在于编译期间的类型。\r\n\r\n# @staticmethod\r\n# 经常有一些跟类有关系的功能但在运行时又不需要实例和类参与的情况下需要用到静态方法.\r\n# 比如更改环境变量或者修改其他类的属性等能用到静态方法.\r\n# staticmethod 基本上和一个全局函数差不多,只不过可以通过类或类的实例对象来调用而已, 不会隐式地传入任何参数。这个和静态语言中的静态方法比较像。\r\n\r\n\r\nclass Kls(object):\r\n no_inst = 0\r\n def __init__(self):\r\n Kls.no_inst = Kls.no_inst + 1\r\n @classmethod\r\n def get_no_of_instance(cls_obj):\r\n return cls_obj.no_inst\r\nik1 = Kls()\r\nik2 = Kls()\r\nik3 = Kls()\r\nprint (ik1.get_no_of_instance())\r\nprint (Kls.get_no_of_instance())\r\n\r\nprint(\"*\"*50)\r\n\r\n\r\n# 静态方法和在普通的非class的method作用是一样的,只不过是命名空间是在类里面。一般使用场景就是和类相关的操作,但是又不会依赖和改变类、实例的状态。\r\n\r\nclass Utility(object):\r\n @staticmethod\r\n def list_all_files_in_dir(dir_path):\r\n ...\r\n\r\n# 上面的代码就是将project里面的所有公共使用的工具函数都归为一类,建立一个Utility类,将所有的公用方法都堆进去。\r\n# (当然,这个不是通用做法,一般就是放到一个module里面,这里只是举栗子)\r\nclass Kls(object):\r\n def __init__(self, data):\r\n self.data = data\r\n\r\n def printd(self):\r\n print(self.data)\r\n\r\n @staticmethod\r\n def smethod(*arg):\r\n print('Static:', arg)\r\n\r\n @classmethod\r\n def cmethod(*arg):\r\n print('Class:', arg)\r\n\r\nik = Kls(23)\r\nik.printd()\r\nik.smethod()\r\nik.cmethod()\r\ntry:\r\n Kls.printd()\r\nexcept AttributeError:\r\n print(\"no this attribute\")\r\nKls.smethod()\r\nKls.cmethod()\r\n\r\nprint(\"*\"*50)\r\n\r\n\r\n\r\nclass Hello(object):\r\n def hello(self, name='world'):\r\n print('Hello, %s.' % name)\r\n\r\n# >>> from hello import Hello\r\n# >>> h = Hello()\r\n# >>> h.hello()\r\n# Hello, world.\r\n# >>> print(type(Hello))\r\n# \r\n# >>> print(type(h))\r\n# \r\n\r\n# type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello。\r\n# 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。\r\n\r\n# 要创建一个class对象,type()函数依次传入3个参数:\r\n# 1.class的名称;\r\n# 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素写法;\r\n# 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。\r\n#================================================================================================\r\n# **********************************************************************************************************************\r\n# **********************************************************************************************************************\r\n#\r\n# 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。\r\n#\r\n# **********************************************************************************************************************\r\n# **********************************************************************************************************************\r\n# 当我们定义了类以后,就可以根据这个类创建出实例,所以:先定义类,然后创建实例。\r\n# 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类\r\n# 因此,要控制类的创建行为,就要使用metaclass。\r\n# 连接起来就是:先定义metaclass,就可以创建类,最后创建实例。\r\n# 定义ListMetaclass,按照默认习惯,metaclass的类名总是以Metaclass结尾,以便清楚地表示这是一个metaclass:\r\n\r\n# metaclass是创建类,所以必须从`type`类型派生:\r\nclass ListMetaclass(type):\r\n def __new__(cls, name, bases, attrs):\r\n attrs['add'] = lambda self, value: self.append(value)\r\n return type.__new__(cls, name, bases, attrs)\r\n\r\nclass MyList(list):\r\n __metaclass__ = ListMetaclass # 指示使用ListMetaclass来定制类\r\n\r\n# 当我们写下__metaclass__ = ListMetaclass语句时,魔术就生效了,\r\n# 它指示Python解释器在创建MyList时,要通过ListMetaclass.__new__()来创建,在此,我们可以修改类的定义,比如加上新的方法,然后,返回修改后的定义。\r\n\r\n\r\n# 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗?正常情况下,确实应该直接写,通过metaclass修改纯属变态。\r\n# 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。\r\n# 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。\r\n# 让我们来尝试编写一个ORM框架。\r\n# 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码:\r\n\r\n # class User(Model):\r\n # # 定义类的属性到列的映射:\r\n # id = IntegerField('id')\r\n # name = StringField('username')\r\n # email = StringField('email')\r\n # password = StringField('password')\r\n #\r\n # # 创建一个实例:\r\n # u = User(id=12345, name='Michael', email='test@orm.org', password='my-pwd')\r\n # # 保存到数据库:\r\n # u.save()\r\n\r\n# 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。\r\n# 虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。\r\n#\r\n# 现在,我们就按上面的接口来实现该ORM。\r\n#\r\n# 首先来定义Field类,它负责保存数据库表的字段名和字段类型:\r\n\r\nclass Field(object):\r\n def __init__(self, name, column_type):\r\n self.name = name\r\n self.column_type = column_type\r\n def __str__(self):\r\n return '<%s:%s>' % (self.__class__.__name__, self.name)\r\n\r\n# 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等:\r\n\r\nclass StringField(Field):\r\n def __init__(self, name):\r\n super(StringField, self).__init__(name, 'varchar(100)')\r\n\r\nclass IntegerField(Field):\r\n def __init__(self, name):\r\n super(IntegerField, self).__init__(name, 'bigint')\r\n\r\n# 下一步,就是编写最复杂的ModelMetaclass了:\r\nclass ModelMetaclass(type):\r\n def __new__(cls, name, bases, attrs):\r\n if name=='Model':\r\n return type.__new__(cls, name, bases, attrs)\r\n mappings = dict()\r\n for k, v in attrs.iteritems():\r\n if isinstance(v, Field):\r\n print('Found mapping: %s==>%s' % (k, v))\r\n mappings[k] = v\r\n for k in mappings.iterkeys():\r\n attrs.pop(k)\r\n attrs['__table__'] = name # 假设表名和类名一致\r\n attrs['__mappings__'] = mappings # 保存属性和列的映射关系\r\n return type.__new__(cls, name, bases, attrs)\r\n\r\nclass Model(dict):\r\n __metaclass__ = ModelMetaclass\r\n\r\n def __init__(self, **kw):\r\n super(Model, self).__init__(**kw)\r\n\r\n def __getattr__(self, key):\r\n try:\r\n return self[key]\r\n except KeyError:\r\n raise AttributeError(r\"'Model' object has no attribute '%s'\" % key)\r\n\r\n def __setattr__(self, key, value):\r\n self[key] = value\r\n\r\n def save(self):\r\n fields = []\r\n params = []\r\n args = []\r\n for k, v in self.__mappings__.iteritems():\r\n fields.append(v.name)\r\n params.append('?')\r\n args.append(getattr(self, k, None))\r\n sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(params))\r\n print('SQL: %s' % sql)\r\n print('ARGS: %s' % str(args))\r\n\r\n# 当用户定义一个class User(Model)时,\r\n# Python解释器首先在当前类User的定义中查找__metaclass__,如果没有找到,就继续在父类Model中查找__metaclass__,\r\n# 找到了,就使用Model中定义的__metaclass__的ModelMetaclass来创建User类,\r\n# 也就是说,metaclass可以隐式地继承到子类,但子类自己却感觉不到。\r\n\r\n\r\n# 对象通过提供__call__(slef, [,*args [,**kwargs]])方法可以模拟函数的行为\r\n\r\n\r\nclass foo:\r\n def __call__(self, a, b):\r\n print(a, b)\r\n\r\nx = foo()\r\nx(1, 2)\r\n","sub_path":"a-start/000_basic/000_basics_class.py","file_name":"000_basics_class.py","file_ext":"py","file_size_in_byte":9705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"89191536","text":"'''\n Base系列编码\n 2020 1 21 15:27 \n @焱灬火火火 \n'''\n\nclass Base():\n '''\n Decode:解码\n Encode:编码\n '''\n\n def __init__(self):\n pass\n\n def Output(self, strings):\n '''\n\t\t\t作用:输出数据\n\t\t\targs={\n\t\t\t\tstrings:将输出的数据\n\t\t\t}\n '''\n \n print(strings)\n \n def Mhex(self, a):\n '''\n 将字符转换为2位16进制数\n args{\n a: 要转换的字符\n }\n '''\n a = hex(ord(a))\n\n return a[-2:]\n \n def Encode(self, text = \"\", blank = ' ', mode = 1):\n '''\n 对输入的字符串编码\n args{\n text:要编码的字符串\n blank:分隔符\n mode:模式\n }\n '''\n\n self.code = ''\n\n for i in text:\n self.code += self.Mhex(i) + blank\n \n self.Output(\"BASE16编码:\" + self.code)\n\n def MBin(self, a):\n '''\n 将输入的数字转换为二进制\n args{\n a: 要转换的字符\n }\n '''\n a = ord(a)\n if (a >= ord('a')) and (a <= ord('f')): \n return 10 + a - ord('a')\n elif (a >= ord('0')) and (a <= ord('9')): \n return a - ord('0')\n else : \n return -1\n \n def Decode(self, text = \"\",blank = ' ', mode = 1):\n '''\n 对输入的字符串解码\n args{\n text:要解码的字符串\n mode:模式\n }\n '''\n section = []\n section = text.split(blank)\n self.mcode = ''\n\n for i in section:\n num = 0\n j = 0\n while j < len(i):\n num += pow(16,j) * self.MBin(i[len(i) - j - 1])\n j += 1\n self.mcode += chr(num)\n\n self.Output(\"BASE16解码:\" + self.mcode)\n\nif __name__ == '__main__':\n c = Base()\n c.Encode('abcdefghijkl;')\n c.Decode('61 62 63 64 65 66 67 68 69 6a 6b 6c 3b ')","sub_path":"python/encode/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"390687661","text":"#!/usr/bin/env python\nfrom SPAdesPipeline.OLCspades.accessoryFunctions import *\n\n__author__ = 'adamkoziol'\n\n\nclass VtypeResults(object):\n def reporter(self):\n printtime('Creating reports', self.start)\n # Create the report path if necessary\n make_path(self.reportpath)\n # Initialise strings to hold results as well as file names\n detailedheader = 'Strain,MatchName,Vtype,PercentIdentity,AverageCoverage,SNPs,Gaps\\n'\n detailedresults = ''\n detailedresultsfile = '{}/{}_detailedResults.csv'.format(self.reportpath, self.analysistype)\n header = 'Strain,virulenceType\\n'\n results = ''\n resultsfile = '{}/{}_results.csv'.format(self.reportpath, self.analysistype)\n # Populate the strings for each sample\n for sample in self.metadata:\n # Initialise the set containing the virulence types\n virulenceset = set()\n # Ensure that there are results in the sample object\n if 'results' in sample[self.analysistype].datastore:\n if sample[self.analysistype].results != 'NA':\n # Iterate through the results dictionary\n for vtx in sample[self.analysistype].results:\n # Create easier to understand variable names\n percentidentity = sample[self.analysistype].results[vtx].items()[0][0]\n averagedepth = sample[self.analysistype].results[vtx].items()[0][1]\n snps = str(sample[self.analysistype].resultssnp[vtx])\n gaps = str(sample[self.analysistype].resultsgap[vtx])\n # print sample.name, vtx, percentidentity, averagedepth, snps, gaps\n # The alleles have names like stx2a_2_F_4, this splicing yields stx2a\n allelename = vtx[:5] + \"_F\" if \"F\" in vtx else vtx[:5] + \"_R\"\n # Add the alleles to a set\n virulenceset.add(allelename)\n # Populate the detailed results with the appropriate values\n detailedresults += ','.join([sample.name, vtx, allelename, percentidentity,\n averagedepth, snps, gaps])\n # Start a new line for the next set of results\n detailedresults += '\\n'\n # Add an extra line between samples\n detailedresults += '\\n'\n # Initialise the set to hold the vtypes\n vtxset = set()\n for vtx in virulenceset:\n vtype = vtx.split('_')[0]\n # If forward and reverse primers are present (eg stx2c_F and stx2c_R), then add the vtype to the set\n if vtype + '_F' in virulenceset and vtype + '_R' in virulenceset:\n vtxset.add(vtype)\n # Populate the results\n if vtxset:\n results += sample.name + ','\n results += ';'.join(sorted(vtxset))\n results += '\\n'\n else:\n results += sample.name + ','\n results += '-\\n'\n # Set attributes for metadata collection\n sample[self.analysistype].detailedresultsfile = detailedresultsfile\n sample[self.analysistype].resultsfile = resultsfile\n sample[self.analysistype].reportdir = self.reportpath\n sample.general.vtxset = sorted(vtxset)\n sample.general.vtype = ','.join(sorted(vtxset))\n # Open and write the detail and regular reports\n with open(detailedresultsfile, 'wb') as writedetails:\n writedetails.write(detailedheader)\n writedetails.write(detailedresults)\n with open(resultsfile, 'wb') as writeresults:\n writeresults.write(header)\n writeresults.write(results)\n\n def __init__(self, inputobject, analysistype):\n self.path = inputobject.path\n self.sequencepath = inputobject.sequencepath\n self.targetpath = inputobject.customtargetpath\n self.metadata = inputobject.runmetadata.samples\n self.start = inputobject.starttime\n self.analysistype = analysistype\n self.reportpath = os.path.join(self.path, 'reports')\n # Create the reports\n self.reporter()\n","sub_path":"sipprverse/vtyperesults.py","file_name":"vtyperesults.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"634432708","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport urllib\nimport requests\nimport json\nimport io\nimport imghdr\nimport uuid\nimport os\nimport wikipedia\n\nwikipedia.set_lang(\"ja\")\nny = wikipedia.page(\"New York\")\nprint(ny)\n\nONE_SEARCH_LIMIT = 50\nROOT_URL = 'https://api.datamarket.azure.com/Bing/Search/v1/Image?'\nAPI_KEY = ''\nTIMEOUT = 5\nSAVE_DIR = 'images'\n\ndef main():\n parser = argparse.ArgumentParser(description='Collect image via Bing Search API')\n parser.add_argument('search_word', type=str, help='Search word')\n parser.add_argument('count', type=int, help='Number of collected images counts')\n options = parser.parse_args()\n\n if options.count % ONE_SEARCH_LIMIT != 0:\n raise Exception('number must be divisible by {0}!!'.format(ONE_SEARCH_LIMIT))\n\n for i in range(options.count // ONE_SEARCH_LIMIT):\n offset = i * ONE_SEARCH_LIMIT\n print(offset)\n params= {\n 'Query': \"'{}'\".format(options.search_word),\n 'Market': \"'{}'\".format('ja-JP'),\n '$format': 'json',\n '$top': '{0:d}'.format(ONE_SEARCH_LIMIT),\n '$skip': '{0:d}'.format(offset),\n }\n url = ROOT_URL + urllib.parse.urlencode(params)\n\n response_json = requests.get(url, auth=('', API_KEY))\n response = json.loads(response_json.text)\n\n for result in response['d']['results']:\n image_url = result['MediaUrl']\n try:\n response_image = requests.get(image_url, timeout=TIMEOUT)\n image_binary = response_image.content\n except:\n continue\n\n with io.BytesIO(image_binary) as fh:\n image_type = imghdr.what(fh)\n\n if image_type == 'jpeg':\n extension = '.jpg'\n elif image_type == 'png':\n extension = '.png'\n else:\n continue\n\n filename = str(uuid.uuid4()) + extension\n \n if not os.path.isdir(SAVE_DIR):\n os.mkdir(SAVE_DIR)\n with open(os.path.join(SAVE_DIR, filename), 'wb') as f:\n f.write(image_binary)\n\n\nif __name__ == '__main__':\n main()","sub_path":"wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"351014515","text":"import pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as dates\nimport math\n\ndef sanitize_dataframe(df):\n for column in df.columns[1:]:\n last_known_element = None\n for index, element in enumerate(df[column]):\n if np.isnan(element):\n if last_known_element is not None:\n df.loc[index, column] = last_known_element\n else:\n cur_index = index\n while np.isnan(df.loc[cur_index, column]):\n cur_index += 1\n\n df.loc[index:cur_index, column] = df.loc[cur_index, column]\n else:\n last_known_element = element\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description = 'Summarize csv data to create charts.')\n parser.add_argument('--filepath', type = str, help = 'Filepath to csv.', required = True)\n parser.add_argument('--debug', action = 'store_true', help = 'Debug issues with visusalizer.')\n parser.add_argument('--baseline', help = 'One of [zero, wiggle, weighted_wiggle, sym]')\n\n args = parser.parse_args()\n df = None\n with open(args.filepath, 'r') as f:\n df = pd.read_csv(f, parse_dates = ['Date'])\n\n assert df is not None, \"Data must be provided for visualization to occur.\"\n sanitize_dataframe(df)\n\n #plot data\n fig, ax = plt.subplots(figsize=(15,7))\n fig.autofmt_xdate()\n plt.grid(True)\n ax.yaxis.set_major_formatter(\n mpl.ticker.FuncFormatter(lambda x, p: '$' + format(int(x / 100), ',')))\n dates = [pd.Timestamp(date).to_pydatetime() for date in df['Date']]\n columns = [[float(element) for element in df[column]] for column in df.columns if column != 'Date']\n baseline = args.baseline # 'zero' 'wiggle' 'weighted_wiggle' 'sym'\n plot = plt.stackplot(dates, *columns, baseline=baseline, labels = [column for column in df.columns if column != 'Date'])\n plt.legend(loc = 'upper left')\n plt.title('Net worth')\n plt.show(plot)","sub_path":"visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"187176708","text":"import aiohttp\nimport asyncio\nfrom rich import print\n\n\nfrom gql import gql, Client\nfrom gql.transport.aiohttp import AIOHTTPTransport\n\n\nasync def main():\n\n # async with aiohttp.ClientSession() as session:\n # async with session.get('http://51.15.17.205:9000/tick/mhu') as resp:\n # print(resp.status)\n # print(await resp.json())\n\n transport = AIOHTTPTransport(\n url=\"https://dbschool.alcyone.life/graphql\")\n\n # Using `async with` on the client will start a connection on the transport\n # and provide a `session` variable to execute queries on this connection\n async with Client(\n transport=transport, fetch_schema_from_transport=True,\n ) as session:\n\n # Execute single query\n query = gql(\n \"\"\"\n mutation {\n createTicker(input: { data: { symbol: \"mhu\", price: 600000 } }) {\n ticker {\n symbol\n price\n }\n }\n }\n \"\"\"\n )\n\n result = await session.execute(query)\n print(result)\n\n\n# asyncio.run(main())\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\n","sub_path":"tests/tick.py","file_name":"tick.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"567153580","text":"from app import app\nfrom unittest import TestCase\nfrom json import loads\nfrom petl import fromcsv\n\nclass TestIntegrations(TestCase):\n def setUp(self):\n self.app = app.test_client()\n\n def test_csv_matches_database_by_id(self):\n \tdatabase_heroes = loads(self.app.get('/heroes').data)\n \tdatabase_villains = loads(self.app.get('/villains').data)\n \tcsv_heroes = fromcsv('../marvel_heroes.csv').dicts()\n \tcsv_villains = fromcsv('../marvel_villains.csv').dicts()\n\n \tdef get_ids(character_list):\n \t\treturn [character['id'] for character in character_list].sort()\n \t\n \tdb_hero_ids = get_ids(database_heroes)\n \tdb_villain_ids = get_ids(database_villains)\n \tcsv_hero_ids = get_ids(csv_heroes)\n \tcsv_villain_ids = get_ids(csv_villains)\n\n \tself.assertEqual(db_hero_ids, csv_hero_ids)\n \tself.assertEqual(db_villain_ids, csv_villain_ids)\n\n def test_for_empty_alignments(self):\n json_data = loads(self.app.get('/all').data)\n unknown_alignments = [char for char in json_data if char[\"alignment\"] == '']\n # assert list is empty\n self.assertFalse(unknown_alignments)\n\n def test_for_empty_externalities(self):\n \tjson_data = loads(self.app.get('/all').data)\n \tchars_with_empty_externalities = [char for char in json_data if char['stats']['externalities'] == []]\n \tself.assertFalse(chars_with_empty_externalities)","sub_path":"app/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"292908928","text":"import unittest\n\nimport numpy\n\nimport chainer\nfrom chainer import caffe_export\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import testing\n\n\n# @testing.parameterize([\n# {'layer': 'LinearFunction'},\n# {'layer': 'Reshape'},\n# {'layer': 'Convolution2DFunction'},\n# {'layer': 'AveragePooling2D'},\n# {'layer': 'MaxPooling2D'},\n# {'layer': 'BatchNormalization'},\n# {'layer': 'ReLU'},\n# {'layer': 'Softmax'},\n# {'layer': 'Add'},\n# ])\nclass TestCaffeExport(unittest.TestCase):\n\n def setUp(self):\n\n class Model(chainer.Chain):\n\n def __init__(self):\n super(Model, self).__init__()\n with self.init_scope():\n self.l1 = L.Convolution2D(None, 1, 1, 1, 0)\n self.b2 = L.BatchNormalization(1)\n self.l3 = L.Linear(None, 1)\n\n def __call__(self, x):\n h = F.relu(self.l1(x))\n h = self.b2(h)\n return self.l3(h)\n\n self.model = Model()\n\n def test_caffe_export_no_save(self):\n x = numpy.ones((1, 3, 7, 7)).astype(numpy.float32)\n with chainer.using_config('train', False), \\\n chainer.force_backprop_mode():\n y = self.model(x)\n\n caffe_export([x], [y], None, True, 'test')\n\n\ntesting.run_module(__name__, __file__)\n","sub_path":"tests/chainer_tests/test_caffe_export.py","file_name":"test_caffe_export.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"1013523","text":"# Assigment: 454 Project 1 Spring 2020\n# Authors: Alexander Barajas-Ritchie, Welinton De Leon, Alondra Lona\n#\n#\nimport itertools\n\n#\n# PROBLEM 1 FUNCTIONS\n#\ndef isAccepted(p):\n # Checks if string p of size 6 is an accepted string or not\n # An accepted string is a string that contains at least one of each in {'a', 'b', 'c', 'd'}\n if(len(p) <= 5):\n return True\n foundA = foundB = foundC = foundD = False\n for i in p:\n if i == 'a':\n foundA = True\n elif i == 'b':\n foundB = True\n elif i == 'c':\n foundC = True\n elif i == 'd':\n foundD = True\n if foundA and foundB and foundC and foundD:\n return True\n return False\n\ndef calcPos(string):\n # Given a string of language {'a', 'b', 'c', 'd'} it calculates the an integer\n # which will be the State number.\n toCalculate = []\n position = 0\n for letter in string:\n toCalculate.append(letter)\n for i in range(0, len(toCalculate)):\n if toCalculate[i] == 'a':\n position = position + 1 * pow(4, (len(toCalculate) - 1) - i)\n if toCalculate[i] == 'b':\n position = position + 2 * pow(4, (len(toCalculate) - 1) - i)\n if toCalculate[i] == 'c':\n position = position + 3 * pow(4, (len(toCalculate) - 1) - i)\n if toCalculate[i] == 'd':\n position = position + 4 * pow(4, (len(toCalculate) - 1) - i)\n return position\n\ndef possiblestring():\n allstring = []\n allstring.append(\"\")\n for i in range(1, 6):\n for perm in itertools.product(\"abcd\", repeat=i):\n temp = \"\"\n allstring.append(temp.join(perm))\n return allstring\n\ndef nextstate(key, string, prev, curr):\n count = 0\n tempA = string + 'a'\n tempB = string + 'b'\n tempC = string + 'c'\n tempD = string + 'd'\n\n if isAccepted(tempA):\n if (len(tempA) > 5):\n tempA = tempA[1:]\n count += prev[calcPos(tempA)]\n\n if isAccepted(tempB):\n if (len(tempB) > 5):\n tempB = tempB[1:]\n count += prev[calcPos(tempB)]\n\n if isAccepted(tempC):\n if (len(tempC) > 5):\n tempC = tempC[1:]\n count += prev[calcPos(tempC)]\n\n if isAccepted(tempD):\n if (len(tempD) > 5):\n tempD = tempD[1:]\n count += prev[calcPos(tempD)]\n\n curr[key] = count\n\n#\n# PROBLEM 2 FUNCTIONS\n#\n\ndef bfs(visited, graph, node):\n q = []\n visited.append(node)\n q.append(node)\n while q:\n s = q.pop(0)\n print(s, end=\" \")\n\n for neighbour in graph[s]:\n if neighbour not in visited:\n visited.append(neighbour)\n q.append(neighbour)\n\n\ndef delta(curr, ele, k):\n return (10 * curr + ele) % k\n\ndef Findstring(k, d):\n\n queue = [] # initialize a queue\n parent = [0] * (k + 1)\n label = [0] * (k + 1)\n visited = [False] * (k + 1) # array of visited states, all false\n visited[k] = True\n queue.append(k) # added the the last element to the queue\n while queue: # queue is not empty could also say while True ?\n curr = queue[0]\n queue.pop(0)\n done = 0\n for elem in d:\n next = delta(curr, elem, k)\n if next == 0:\n done = 1\n visited[next] = True\n parent[next] = curr\n label[next] = elem\n queue.append(next)\n break\n\n elif not visited[next]: # state is false or not visted\n\n visited[next] = True\n parent[next] = curr\n label[next] = elem\n queue.append(next)\n if done == 1:\n break\n if next != 0:\n print(\"No solution\")\n else:\n temp = trace(parent, label, k)\n print(temp[len(temp)::-1])\n\ndef trace(parent, label, k):\n parentValue = parent[0]\n solution = str(label[0])\n while(parentValue != k):\n solution += str(label[parentValue])\n parentValue = parent[parentValue]\n return solution\n\n#\n# MAIN\n#\ndef main():\n # User Input for what problem they would like to work on\n print(\"Enter 1 for Problem 1\")\n print(\"Enter 2 for Problem 2\")\n print(\"Enter 0 to Quit\")\n prob = int(input(\"Problem 1 or Problem 2? (Enter 0 to quit): \"))\n while(prob != 0):\n #\n # Problem 1 Main\n #\n if (prob == 1):\n size = int(input(\"Size of string (Enter 0 to quit): \"))\n prev = [1] * 1365\n curr = [0] * 1365\n base4 = {}\n possible = possiblestring()\n for element in possible:\n temp = calcPos(element)\n base4[temp] = element\n for i in range(size):\n for k, v in base4.items(): # go through prev list\n nextstate(k, v, prev, curr)\n prev = curr\n curr = [0] * 1365\n print(prev[0])\n #\n # Problem 2 Main\n #\n elif (prob == 2):\n k = int(input(\"Insert a value K: \"))\n print(\"Enter integers in the language d ( same line ): \\n > \", end='')\n d = list(map(int, input().split()))\n Findstring(k, d)\n #question = input(\"Quit? y/n: \")\n\n prob = int(input(\"\\nEnter number for which problem you would like to do (Enter 0 to quit): \"))\n\n\nmain()\n","sub_path":"454_project.py","file_name":"454_project.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"628559351","text":"\"\"\"\nWrappers for the \"IOBluetoothUI\" framework on macOS.\n\nThese wrappers don't include documentation, please check Apple's documentation\nfor information on how to use this framework and PyObjC's documentation\nfor general tips and tricks regarding the translation between Python\nand (Objective-)C frameworks\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.dirname(__file__))\n\n\nfrom pyobjc_setup import setup # noqa: E402\n\nVERSION = \"9.2.1\"\n\nsetup(\n name=\"pyobjc-framework-IOBluetoothUI\",\n description=\"Wrappers for the framework IOBluetoothUI on macOS\",\n packages=[\"IOBluetoothUI\"],\n version=VERSION,\n install_requires=[\n \"pyobjc-core>=\" + VERSION,\n \"pyobjc-framework-IOBluetooth>=\" + VERSION,\n ],\n long_description=__doc__,\n options={\"bdist_wheel\": {\"py_limited_api\": \"cp36\"}},\n)\n","sub_path":"pyobjc-framework-IOBluetoothUI/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"252821223","text":"input=''\ninr=[1,100,1000]\nage=[20,15,16,18,34,56]\n\n\nmylist=[x for x in input]\nmy_GBP_list=[x*96.0 for x in inr]\nmy_age=['adult' if x>18 else 'teenage' for x in age]\n\nprint(mylist)\nprint(my_GBP_list)\nprint(my_age)","sub_path":"Basics/ListComphrension.py","file_name":"ListComphrension.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"186212976","text":"import datetime\nimport logging\nimport os\nimport shutil\nimport time\nfrom pathlib import Path\n\nimport pandas as pd\nimport pkg_resources\nimport requests\nfrom gensim.models import KeyedVectors\nfrom joblib import load\nfrom openpredict.config import settings\n\nMISSING_IDS = set()\n\n\ndef log(msg: str):\n \"\"\"Simple print with a timestamp\"\"\"\n log_msg = '[' + str(datetime.datetime.now().strftime(\"%Y-%m-%d@%H:%M:%S\")) + '] ' + msg \n # logging.info(log_msg)\n print(log_msg)\n\n\ndef get_openpredict_dir(subfolder=''):\n \"\"\"Return the full path to the provided files in the OpenPredict data folder\n Where models and features for runs are stored\n \"\"\"\n if not settings.OPENPREDICT_DATA_DIR.endswith('/'):\n settings.OPENPREDICT_DATA_DIR += '/'\n return settings.OPENPREDICT_DATA_DIR + subfolder\n\n\n\ndef init_openpredict_dir():\n \"\"\"Create OpenPredict folder and initiate files if necessary.\"\"\"\n\n print('Using directory: ' + settings.OPENPREDICT_DATA_DIR)\n print('Creating if does not exist: ' + get_openpredict_dir())\n Path(get_openpredict_dir()).mkdir(parents=True, exist_ok=True)\n print('Creating if does not exist: ' + get_openpredict_dir('features'))\n Path(get_openpredict_dir('features')).mkdir(parents=True, exist_ok=True)\n print('Creating if does not exist: ' + get_openpredict_dir('models'))\n Path(get_openpredict_dir('models')).mkdir(parents=True, exist_ok=True)\n print('Creating if does not exist: ' + get_openpredict_dir('kgpredict'))\n Path(get_openpredict_dir('kgpredict')).mkdir(parents=True, exist_ok=True)\n print('Creating if does not exist: ' + get_openpredict_dir('xpredict'))\n Path(get_openpredict_dir('xpredict')).mkdir(parents=True, exist_ok=True)\n\n if not os.path.exists(get_openpredict_dir('features/openpredict-baseline-omim-drugbank.joblib')):\n print('Initiating ' + get_openpredict_dir('features/openpredict-baseline-omim-drugbank.joblib'))\n shutil.copy(pkg_resources.resource_filename('openpredict', 'data/features/openpredict-baseline-omim-drugbank.joblib'),\n get_openpredict_dir('features/openpredict-baseline-omim-drugbank.joblib'))\n if not os.path.exists(get_openpredict_dir('models/openpredict-baseline-omim-drugbank.joblib')):\n print('Initiating ' + get_openpredict_dir('models/openpredict-baseline-omim-drugbank.joblib'))\n shutil.copy(pkg_resources.resource_filename('openpredict', 'data/models/openpredict-baseline-omim-drugbank.joblib'), \n get_openpredict_dir('models/openpredict-baseline-omim-drugbank.joblib'))\n if not os.path.exists(get_openpredict_dir('openpredict-metadata.ttl')):\n print('Creating ' + get_openpredict_dir('openpredict-metadata.ttl'))\n # shutil.copy(get_openpredict_dir('initial-openpredict-metadata.ttl'), \n shutil.copy(pkg_resources.resource_filename('openpredict', 'data/openpredict-metadata.ttl'), \n get_openpredict_dir('openpredict-metadata.ttl'))\n\n\n if not os.path.exists(get_openpredict_dir('kgpredict/kgpredict_drug_diseasemappings.tsv')):\n print('Initiating ' + get_openpredict_dir('kgpredict/kgpredict_drug_diseasemappings.tsv'))\n shutil.copy(pkg_resources.resource_filename('openpredict', 'data/kgpredict/kgpredict_drug_diseasemappings.tsv'), \n get_openpredict_dir('kgpredict/kgpredict_drug_diseasemappings.tsv'))\n\n if not os.path.exists(get_openpredict_dir('xpredict/deepdrug_repurposingpredictiondataset.csv')):\n print('Initiating ' + get_openpredict_dir('xpredict/deepdrug_repurposingpredictiondataset.csv'))\n shutil.copy(pkg_resources.resource_filename('openpredict', 'data/xpredict/deepdrug_repurposingpredictiondataset.csv'),\n get_openpredict_dir('xpredict/deepdrug_repurposingpredictiondataset.csv'))\n\n if not os.path.exists(get_openpredict_dir('kgpredict/embed/entity_embeddings.npy')):\n print(f\"📥️ Downloading Drug Repurposing KG embeddings in {get_openpredict_dir('kgpredict/embed')}\")\n os.system('wget -q --show-progress purl.org/kgpredict -O /tmp/kgpredictfiles.tar.gz')\n os.system(f\"tar -xzvf /tmp/kgpredictfiles.tar.gz -C {get_openpredict_dir('kgpredict')}\")\n os.rename(get_openpredict_dir('kgpredict/embed/DRKG_TransE_l2_entity.npy'), get_openpredict_dir('kgpredict/embed/entity_embeddings.npy'))\n os.rename(get_openpredict_dir('kgpredict/embed/DRKG_TransE_l2_relation.npy'), get_openpredict_dir('kgpredict/embed/relation_embeddings.npy'))\n\n # shutil.copy(pkg_resources.resource_filename('openpredict', 'data/features/openpredict-baseline-omim-drugbank.joblib'),\n # get_openpredict_dir('features/openpredict-baseline-omim-drugbank.joblib'))\n\n print('✅ OpenPredict data initialized')\n\n# echo `pwd` > pwdfile.txt\n# #download kg predict drugrepurposing files\n# wget -q --show-progress purl.org/kgpredict -O kgpredictfiles.tar.gz\n# #extract kgpredict files\n\n# tar -xzvf kgpredictfiles.tar.gz -C ./openpredict/data/kgpredict/\n# rm kgpredictfiles.tar.gz\n\n# mv ./openpredict/data/kgpredict/embed/DRKG_TransE_l2_entity.npy ./openpredict/data/kgpredict/embed/entity_embeddings.npy\n# mv ./openpredict/data/kgpredict/embed/DRKG_TransE_l2_relation.npy ./openpredict/data/kgpredict/embed/relation_embeddings.npy\n\n\n\n \n # attempts = 0\n # while attempts < 30:\n # try:\n # init_triplestore()\n # break\n # except Exception as e:\n # print(e)\n # print('Failed to connect to the SPARQL endpoint, attempt ' + str(attempts))\n # time.sleep(5)\n # attempts += 1\n # Check if https://w3id.org/openpredict/run/openpredict-baseline-omim-drugbank exist before iniating the triplestore\n # add_feature_metadata(\"GO-SIM\", \"GO based drug-drug similarity\", \"Drugs\")\n # add_feature_metadata(\"TARGETSEQ-SIM\", \"Drug target sequence similarity: calculation of SmithWaterman sequence alignment scores\", \"Drugs\")\n # add_feature_metadata(\"PPI-SIM\", \"PPI based drug-drug similarity, calculate distance between drugs on protein-protein interaction network\", \"Drugs\")\n # add_feature_metadata(\"TC\", \"Drug fingerprint similarity, calculating MACS based fingerprint (substructure) similarity\", \"Drugs\")\n # add_feature_metadata(\"SE-SIM\", \"Drug side effect similarity, calculating Jaccard coefficient based on drug sideefects\", \"Drugs\")\n # add_feature_metadata(\"PHENO-SIM\", \"Disease Phenotype Similarity based on MESH terms similarity\", \"Diseases\")\n # add_feature_metadata(\"HPO-SIM\", \"HPO based disease-disease similarity\", \"Diseases\")\n\n\n\ndef get_entities_labels(entity_list):\n \"\"\"Send the list of node IDs to Translator Normalization API to get labels\n See API: https://nodenormalization-sri.renci.org/apidocs/#/Interfaces/get_get_normalized_nodes\n and example notebook: https://github.com/TranslatorIIPrototypes/NodeNormalization/blob/master/documentation/NodeNormalization.ipynb\n \"\"\"\n # TODO: add the preferred identifier CURIE to our answer also?\n try:\n get_label_result = requests.get('https://nodenormalization-sri.renci.org/get_normalized_nodes',\n params={'curie': entity_list})\n get_label_result = get_label_result.json()\n except:\n # Catch if the call to the API fails (API not available)\n logging.info(\"Translator API down: https://nodenormalization-sri.renci.org/apidocs\")\n get_label_result = {}\n # Response is a JSON:\n # { \"HP:0007354\": {\n # \"id\": { \"identifier\": \"MONDO:0004976\",\n # \"label\": \"amyotrophic lateral sclerosis\" },\n return get_label_result\n\ndef normalize_id_to_translator(ids_list):\n \"\"\"Use Translator SRI NodeNormalization API to get the preferred Translator ID\n for an ID https://nodenormalization-sri.renci.org/docs\n \"\"\"\n converted_ids_obj = {}\n resolve_curies = requests.get('https://nodenormalization-sri.renci.org/get_normalized_nodes',\n params={'curie': ids_list})\n # Get corresponding OMIM IDs for MONDO IDs if match\n resp = resolve_curies.json()\n # print(resp)\n for converted_id, translator_ids in resp.items():\n try:\n pref_id = translator_ids['id']['identifier']\n print(converted_id + ' > ' + pref_id)\n converted_ids_obj[converted_id] = pref_id\n except:\n print('❌️ ' + converted_id + ' > ' + str(translator_ids))\n\n return converted_ids_obj\n\ndef convert_baseline_features_ids():\n \"\"\"Convert IDs to use Translator preferred IDs when building the baseline model from scratch\"\"\"\n baseline_features_folder = \"data/baseline_features/\"\n drugfeatfiles = ['drugs-fingerprint-sim.csv','drugs-se-sim.csv', \n 'drugs-ppi-sim.csv', 'drugs-target-go-sim.csv','drugs-target-seq-sim.csv']\n diseasefeatfiles =['diseases-hpo-sim.csv', 'diseases-pheno-sim.csv' ]\n drugfeatfiles = [ pkg_resources.resource_filename('openpredict', os.path.join(baseline_features_folder, fn)) for fn in drugfeatfiles]\n diseasefeatfiles = [ pkg_resources.resource_filename('openpredict', os.path.join(baseline_features_folder, fn)) for fn in diseasefeatfiles]\n\n # Prepare drug-disease dictionary\n drugDiseaseKnown = pd.read_csv(pkg_resources.resource_filename('openpredict', 'data/resources/openpredict-omim-drug.csv'),delimiter=',') \n drugDiseaseKnown.rename(columns={'drugid':'Drug','omimid':'Disease'}, inplace=True)\n drugDiseaseKnown.Disease = drugDiseaseKnown.Disease.astype(str)\n\n drugs_set = set()\n diseases_set = set()\n drugs_set.update(drugDiseaseKnown['Drug'].tolist())\n diseases_set.update(drugDiseaseKnown['Disease'].tolist())\n\n for csv_file in drugfeatfiles:\n df = pd.read_csv(csv_file, delimiter=',')\n drugs_set.update(df['Drug1'].tolist())\n drugs_set.update(df['Drug2'].tolist())\n\n for csv_file in diseasefeatfiles:\n df = pd.read_csv(csv_file, delimiter=',')\n diseases_set.update(df['Disease1'].tolist())\n diseases_set.update(df['Disease2'].tolist())\n \n diseases_set = ['OMIM:{0}'.format(disease) for disease in diseases_set]\n drugs_set = ['DRUGBANK:{0}'.format(drug) for drug in drugs_set]\n\n diseases_mappings = normalize_id_to_translator(diseases_set)\n drugs_mappings = normalize_id_to_translator(drugs_set)\n\n print('Finished API queries')\n # Replace Ids with translator IDs in kown drug disease associations\n drugDiseaseKnown[\"Drug\"] = drugDiseaseKnown[\"Drug\"].apply (lambda row: map_id_to_translator(drugs_mappings, 'DRUGBANK:' + row) )\n drugDiseaseKnown[\"Disease\"] = drugDiseaseKnown[\"Disease\"].apply (lambda row: map_id_to_translator(diseases_mappings, 'OMIM:' + str(row)) )\n drugDiseaseKnown.to_csv('openpredict/data/resources/known-drug-diseases.csv', index=False)\n\n # Replace IDs in drugs baseline features files\n for csv_file in drugfeatfiles:\n df = pd.read_csv(csv_file, delimiter=',')\n df[\"Drug1\"] = df[\"Drug1\"].apply (lambda row: map_id_to_translator(drugs_mappings, 'DRUGBANK:' + row) )\n df[\"Drug2\"] = df[\"Drug2\"].apply (lambda row: map_id_to_translator(drugs_mappings, 'DRUGBANK:' + row) )\n df.to_csv(csv_file.replace('/baseline_features/', '/translator_features/'), index=False)\n\n # Replace IDs in diseases baseline features files\n for csv_file in diseasefeatfiles:\n df = pd.read_csv(csv_file, delimiter=',')\n df[\"Disease1\"] = df[\"Disease1\"].apply (lambda row: map_id_to_translator(diseases_mappings, 'OMIM:' + str(row)) )\n df[\"Disease2\"] = df[\"Disease2\"].apply (lambda row: map_id_to_translator(diseases_mappings, 'OMIM:' + str(row)) )\n df.to_csv(csv_file.replace('/baseline_features/', '/translator_features/'), index=False)\n\n print('❌️ Missing IDs: ')\n for missing_id in MISSING_IDS:\n print(missing_id)\n \n\n # drugs_set.add(2)\n # drugs_set.update([2, 3, 4])\n\n # Extract the dataframes col1 and 2 to a unique list\n # Add those list to the drugs and diseases sets\n # Convert the set/list it using normalize_id_to_translator(ids_list)\n # Update all dataframes using the created mappings\n # And store to baseline_translator\n\ndef map_id_to_translator(mapping_obj, source_id):\n try:\n return mapping_obj[source_id]\n except:\n MISSING_IDS.add(source_id)\n return source_id\n\n\n\ndef load_similarity_embeddings():\n \"\"\"Load embeddings model for similarity\"\"\"\n embedding_folder = 'data/embedding'\n # print(pkg_resources.resource_filename('openpredict', embedding_folder))\n similarity_embeddings = {}\n for model_id in os.listdir(pkg_resources.resource_filename('openpredict', embedding_folder)):\n if model_id.endswith('txt'):\n feature_path = pkg_resources.resource_filename('openpredict', os.path.join(embedding_folder, model_id))\n print(\"📥 Loading similarity features from \" + feature_path)\n emb_vectors = KeyedVectors.load_word2vec_format(feature_path)\n similarity_embeddings[model_id]= emb_vectors\n return similarity_embeddings\n\n\ndef load_treatment_classifier(model_id):\n \"\"\"Load embeddings model for treats and treated_by\"\"\"\n print(\"📥 Loading treatment classifier from joblib for model \" + str(model_id))\n return load(f'{settings.OPENPREDICT_DATA_DIR}/models/{str(model_id)}.joblib')\n\n\ndef load_treatment_embeddings(model_id):\n \"\"\"Load embeddings model for treats and treated_by\"\"\"\n print(f\"📥 Loading treatment features for model {str(model_id)}\")\n (drug_df, disease_df) = load(f'{settings.OPENPREDICT_DATA_DIR}/features/{str(model_id)}.joblib')\n return (drug_df, disease_df)\n\n","sub_path":"openpredict/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"79808391","text":"#!/usr/bin/env python\n\n'''\nApplication 3 of the Udemy Python Course. It will block deemed \"distracting\" websites\nfor a given time. It will automatically add redirect lines to the system host file\nat a given time of day.\n'''\n\n#HOSTS LOCATION C:\\Windows\\System32\\drivers\\etc\n#MAC/LINUX /etc/hosts\n\nimport time\nfrom datetime import datetime as dt\n\npath = r\"C:\\Windows\\System32\\drivers\\etc\\hosts\"\nreDirect = \"127.0.0.1\"\nblockedSites = [\"www.facebook.com\", \"facebook.com\", \"www.youtube.com\", \"youtube.com\"]\n\ndef blockSites():\n '''\n The main function.\n '''\n while True:\n\n if dt(dt.now().year, dt.now().month, dt.now().day, 16) < dt.now()\\\n < dt(dt.now().year, dt.now().month, dt.now().day, 17):\n\n print(\"Access Denied: \" + str(blockedSites))\n with open(path, 'r+') as file:\n content = file.read()\n for website in blockedSites:\n if website in content:\n pass\n else:\n file.write(reDirect + \" \" + website + \"\\n\")\n\n else:\n\n print(\"Access Granted: \" + str(blockedSites))\n with open(path, 'r+') as file:\n content = file.readlines()\n file.seek(0)\n for line in content:\n if not any(website in line for website in blockedSites):\n file.write(line)\n file.truncate()\n\n time.sleep(5)\n\nblockSites()\n","sub_path":"Application 3/app3.py","file_name":"app3.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"449765610","text":"import urllib.request\nimport urllib.parse\nfrom urllib.error import HTTPError\n\nimport json\nimport requests\nfrom django.core.files import File\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.core import serializers\nfrom django.views.generic import View\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass LoginExperience(View):\n def post(self, request):\n fields = ['email', 'password']\n for field in fields:\n if field not in request.POST:\n return JsonResponse({'result': 'Login unsuccesfull', 'error': field + ' required'})\n\n post_data = {'email': request.POST['email'],\n 'password': request.POST['password']}\n link = 'http://models:8000/api/v1/users/user/login/'\n req = urllib.request.Request(\n link, data=urllib.parse.urlencode(post_data).encode(\"utf-8\"))\n resp_json = urllib.request.urlopen(req).read().decode('utf-8')\n response = json.loads(resp_json)\n if('token' in response):\n return JsonResponse({'result': 'Login succesfull', 'token': response['token'], 'username': response['username']})\n elif('found' in response):\n return JsonResponse({'result': 'Login unsuccesfull', 'error': 'User not found'})\n else:\n return JsonResponse({'result': 'Login unsuccesfull', 'error': 'Incorrect Email/Password '})\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass LogoutExperience(View):\n def post(self, request):\n if 'cookie' not in request.POST:\n return JsonResponse({'error': 'cookie required'})\n link = 'http://models:8000/api/v1/users/user/logout/' + \\\n str(request.POST['cookie'])\n req = urllib.request.Request(link)\n resp_json = urllib.request.urlopen(req).read().decode('utf-8')\n response = json.loads(resp_json)\n return JsonResponse({'result': response['Status']}, safe=False)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass CreateBusinessAccountExperience(View):\n def post(self, request):\n fields = ['username', 'email', 'password']\n for field in fields:\n if field not in request.POST:\n return JsonResponse({'created': False, 'error': field + ' required'})\n\n post_data = {'username': request.POST['username'], 'email': request.POST['email'],\n 'password': request.POST['password'], 'rating': 0, 'phone_number': 0}\n link = 'http://models:8000/api/v1/users/user/new/'\n req = urllib.request.Request(\n link, data=urllib.parse.urlencode(post_data).encode(\"utf-8\"))\n resp_json = urllib.request.urlopen(req).read().decode('utf-8')\n response = json.loads(resp_json)\n if response['created'] == True:\n return JsonResponse({'created': True})\n return JsonResponse(response)\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass UploadAvatarExperience(View):\n def post(self, request):\n if 'cookie' not in request.POST:\n return JsonResponse({'authenticated': False, 'error': 'cookie required'})\n link = 'http://models:8000/api/v1/users/user/authenticate/' + \\\n str(request.POST['cookie'])\n req = urllib.request.Request(link)\n resp_json = urllib.request.urlopen(req).read().decode('utf-8')\n response = json.loads(resp_json)\n # Authenticated branch\n if response['found'] == True:\n if 'image_id' not in request.POST:\n return JsonResponse({'authenticated': True, 'error': 'image_id required'})\n requests.post('http://models:8000/api/v1/users/user/avatar/', data={'image_id' : request.POST['image_id'], 'user_id' : str(response['user_id'])})\n return JsonResponse({'authenticated': True, 'result': 'Success'})\n # Not authenticated branch\n else:\n return JsonResponse({'authenticated': False, 'error': 'User not authenticated'})\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass GetAvatarExperience(View):\n def get(self, request, username):\n #Get user info\n link='http://models:8000/api/v1/users/user/byname/' + str(username)\n req = urllib.request.Request(link)\n resp_json = urllib.request.urlopen(req).read().decode('utf-8')\n response = json.loads(resp_json)\n if(response['found'] == False):\n return JsonResponse({'found': False, 'result': 'Error, user not found.'})\n user_id = response['result']\n link='http://models:8000/api/v1/users/user/' + str(user_id) + '/'\n req = urllib.request.Request(link)\n resp_json = urllib.request.urlopen(req).read().decode('utf-8')\n user = json.loads(resp_json)\n if (user['found'] == False):\n return JsonResponse({'found': False, 'result': 'Error, user not found.'})\n link = 'http://models:8000/api/v1/services/image/get/'+ str(user['result']['image_id'])\n image_link = urllib.request.Request(link)\n try:\n resp_json = urllib.request.urlopen(image_link).read().decode('utf-8')\n image = json.loads(resp_json)\n if(image['found']==True):\n image_link = image['result']\n else:\n image_link = \"404\"\n except:\n image_link = \"404\"\n return JsonResponse({'image_link' : image_link})\n\n\n\n\n\n","sub_path":"src/experience/experience_app/views_account.py","file_name":"views_account.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"390413118","text":"import torch\r\nimport torch.nn as nn\r\n\r\nclass TimeDistributed(nn.Module):\r\n def __init__(self, module):\r\n super(TimeDistributed, self).__init__()\r\n self.module = module\r\n\r\n def __multi_time(self,size):\r\n size_temp = list(size)\r\n size_temp = [size_temp[0]*size_temp[1]]+size_temp[2:]\r\n return tuple(size_temp)\r\n\r\n def __dist_time(self,size,batch,time_dim):\r\n size_temp = list(size)\r\n size_temp = [batch,time_dim]+size_temp[1:]\r\n return tuple(size_temp)\r\n\r\n def forward(self, *args):\r\n # Squash samples and timesteps into a single axis\r\n x_reshape = (x.contiguous().view(self.__multi_time(x.size())) for x in args) # (samples * timesteps, input_size)\r\n\r\n y = self.module(*x_reshape)\r\n\r\n y = y.contiguous().view(self.__dist_time(y.size(),args[0].size(0),args[0].size(1))) # (samples, timesteps, output_size)\r\n\r\n return y\r\n\r\nclass PointDistributed(nn.Module):\r\n def __init__(self, module):\r\n super(PointDistributed, self).__init__()\r\n self.module = module\r\n\r\n def forward(self, x):\r\n original = list(x.size())\r\n x = x.view(-1, original[-1])\r\n y = self.module(x)\r\n\r\n original[-1] = -1\r\n y = y.view(original)\r\n return y\r\n\r\n\r\nif __name__ == '__main__':\r\n a = torch.randn(1,60,10,32,32)\r\n model = TD_CNN_LSTM()\r\n\r\n print(model(a))\r\n\r\n","sub_path":"HAR/codes_v5_gesture/Modules/TimeDistributed.py","file_name":"TimeDistributed.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"544901013","text":"\"\"\"empty message\n\nRevision ID: 439d0419436e\nRevises: ca1ae2ed1fe0\nCreate Date: 2019-01-20 18:35:22.321379\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '439d0419436e'\ndown_revision = 'ca1ae2ed1fe0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('beneficiaries', sa.Column('date_verified', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('beneficiaries', 'date_verified')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/439d0419436e_.py","file_name":"439d0419436e_.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"183340011","text":"def quick_sort(arr, left=0, right=None):\n if right == None:\n right = len(arr)-1\n if left < right:\n pos = partition(arr,left,right)\n quick_sort(arr, left, pos-1)\n quick_sort(arr, pos+1, right)\n return arr\n\ndef partition(arr, left, right):\n pivot = arr[right]\n low = left-1\n # iterate over left to the pivot-1\n for i in range(left,right):\n if arr[i]<= pivot:\n low = low + 1\n swap(arr, i, low)\n swap(arr, right, low+1)\n return low+1\n\ndef swap(arr, i, low):\n arr[i], arr[low] = arr[low], arr[i]\n return\n\nif __name__ == \"__main__\":\n arr = [5,12,7,5,5,7]\n print(quick_sort(arr))\n","sub_path":"Python/code_challenges/quick_sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"423245758","text":"import numpy as np\nimport csv, pickle, itertools, os\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n\nN_ADV = 2\nM_KEY = 2\n\n########################################################\n## Sample data\n# 06/15/2002 00:00:00\t83\t691\t.85\t0\n########################################################\n\n########################################################\n## Convert correlation matrix to blocks\n########################################################\ndef plotBlockCorr():\n def dfs(i,vis,corr):\n vis[i]=1\n cur=set([i])\n for j in range(M_KEY):\n if corr[i][j]<2 or vis[j] == 1: continue\n cur=cur.union(dfs(j,vis,corr))\n return cur\n\n # Make blocks\n with open('data/corr_all_key', 'rb') as f:\n corr = pickle.load(f)\n vis = [0 for i in range(M_KEY)]\n\n connected_comp = []\n for i in range(M_KEY):\n if i%100 == 0: print(i,flush=True)\n if vis[i]==0:\n connected_comp.append(dfs(i,vis,corr))\n\n connected_comp=sorted(connected_comp,key=len,reverse=True)\n\n block_corr = np.zeros((M_KEY,M_KEY))\n mp = [0 for i in range(M_KEY)]\n new_index = 0\n\n ## Reindex values\n for component in connected_comp:\n for i in component:\n mp[i] = new_index\n new_index += 1\n\n ## Make new matrix\n for component in connected_comp:\n for r,c in itertools.product(component,component):\n block_corr[mp[r]][mp[c]] = corr[r][c]\n\n ## Threshold matrix\n block_corr[block_corr<2]=0\n block_corr[block_corr>=2]=1\n\n block_corr[block_corr==0]=-1\n block_corr[block_corr==1]=0\n block_corr[block_corr==-1]=1\n\n print(np.max(block_corr),flush=True)\n\n f, ax = plt.subplots()\n # 'nearest' interpolation - faithful but blocky\n ax.imshow(block_corr,cmap=cm.Greys_r)\n # ax.imshow(block_corr,cmap=\"hot\")\n plt.xlabel('Keywords',fontsize=19)\n plt.ylabel('Keywords',fontsize=19)\n plt.xticks([])\n plt.yticks([])\n # plt.title('Keywords sharing at least 2 advertisers')\n plt.savefig('../block_matrix_2.eps', format='eps', dpi=200)\n plt.show()\n\n## run after distribution fitting\ndef get_percentage_of_bids_removed():\n with open(\"data/lowVarAdv\", 'rb') as f:\n low_var_adv = pickle.load(f)\n\n initial_bids = 0\n final_bids = 0\n\n ## Remove advertisers with less than 1000 bids\n print(\"Finding bad advertisers\",flush=True)\n to_delete = [set() for i in range(M_KEY)]\n for i in range(N_ADV):\n with open('raw_data/advertiser-'+str(i), 'rb') as f:\n adv = pickle.load(f)\n initial_bids += len(adv[\"bid\"])\n ## Basic sanity check: remove advertisers bidding less than 2 keywords\n clean_adv={\"bid\":[],\"key\":[]}\n key_map = Counter(adv[\"key\"])\n uniq_keys = len(key_map.keys())\n if i % 100 == 0:\n print(i,flush=True)\n # for k in key_map.keys():\n # if key_map[k] < 1000:\n # to_delete[k].add(i)\n # uniq_keys -= 1\n # if uniq_keys < 2:\n # for k in key_map.keys():\n # to_delete[k].add(i)\n # continue\n for j in range(len(adv[\"bid\"])):\n if(key_map[adv[\"key\"][j]] > 0):\n clean_adv[\"key\"].append(adv[\"key\"][j])\n clean_adv[\"bid\"].append(adv[\"bid\"][j])\n if [i,adv[\"key\"][j]] not in low_var_adv:\n final_bids +=1\n with open('data/advertiser-'+str(i), 'wb') as f:\n pickle.dump(adv, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n print(100*final_bids/(1.0*initial_bids),\"% of bids retained.\",flush=True)\n\n\n\n## FUNCTION 1\n########################################################\n## Generate Numpy arrays\n## key-i :\n## bids: A list of bids placed by advertisers\n## advertiser: ID of advertiser placing the bid\n\n## advertiser-i :\n## bids: A list of bids placed by advertisers\n## key: ID of keyword on which bid was placed\n########################################################\ndef getRawData():\n print(\"Building arrays.\",flush=True)\n key = [{\"bid\":[],\"advertiser\":[]} for i in range(M_KEY)]\n advertiser = [{\"bid\":[],\"key\":[]} for i in range(N_ADV)]\n\n os.system(\"mkdir raw_data\")\n\n with open('Webscope_A1/ydata-ysm-advertiser-bids-v1_0.txt') as csv_f:\n csv_reader = csv.reader(csv_f, delimiter='\\t')\n print(\"Reading f.\",flush=True)\n for i,row in enumerate(csv_reader):\n cur_adv=int(row[2])\n cur_key=int(row[1])\n cur_bid=float(row[3])\n if i%100000==0: print(str(cur_adv)+\", \"+str(cur_key)+\", \"+str(cur_bid),flush=True)\n key[cur_key][\"bid\"].append(cur_bid)\n key[cur_key][\"advertiser\"].append(cur_adv)\n advertiser[cur_adv][\"bid\"].append(cur_bid)\n advertiser[cur_adv][\"key\"].append(cur_key)\n\n for i in range(len(advertiser)):\n if(len(advertiser[i][\"bid\"])==0):\n break;\n with open(\"raw_data/advertiser-\"+str(i), 'wb') as f:\n pickle.dump(advertiser[i], f, protocol=pickle.HIGHEST_PROTOCOL)\n\n for i in range(len(key)):\n with open(\"raw_data/key-\"+str(i), 'wb') as f:\n pickle.dump(key[i], f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n## FUNCTION 2\n########################################################\n## Clean bids and keys\n## Remove advertisers with fewer than 1000 bids\n## Remove advertisers with less than 2 keywords\n########################################################\ndef cleanData():\n os.system(\"mkdir data\")\n\n initial_bids = 0\n final_bids = 0\n ## Remove advertisers with less than 1000 bids\n print(\"Finding bad advertisers\",flush=True)\n to_delete = [set() for i in range(M_KEY)]\n for i in range(1):\n with open('raw_data/advertiser-'+str(i), 'rb') as f:\n adv = pickle.load(f)\n initial_bids += len(adv[\"bid\"])\n ## Basic sanity check: remove advertisers bidding less than 2 keywords\n clean_adv={\"bid\":[],\"key\":[]}\n key_map = Counter(adv[\"key\"])\n uniq_keys = len(key_map.keys())\n if i % 100 == 0:\n print(i,flush=True)\n # for k in key_map.keys():\n # if key_map[k] < 1000:\n # to_delete[k].add(i)\n # uniq_keys -= 1\n #if uniq_keys < 2:\n # for k in key_map.keys():\n # to_delete[k].add(i)\n # continue\n for j in range(len(adv[\"bid\"])):\n if(key_map[adv[\"key\"][j]] > 0):\n clean_adv[\"key\"].append(adv[\"key\"][j])\n clean_adv[\"bid\"].append(adv[\"bid\"][j])\n final_bids += len(clean_adv[\"bid\"])\n\n with open('data/advertiser-'+str(i), 'wb') as f:\n pickle.dump(adv, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n print(100*final_bids/(1.0*initial_bids),\"% of bids retained.\",flush=True)\n\n ## Modify bids correspondingly\n print(\"Modifing keys\",flush=True)\n for i in range(M_KEY):\n if i%100==0: print(str(i),flush=True)\n with open('raw_data/key-'+str(i), 'rb') as f:\n key = pickle.load(f)\n clean_key ={\"bid\":[],\"advertiser\":[]}\n for j in range(len(key[\"bid\"])):\n if(key[\"advertiser\"][j] not in to_delete[i]):\n clean_key[\"advertiser\"].append(key[\"advertiser\"][j])\n clean_key[\"bid\"].append(key[\"bid\"][j])\n with open(\"data/key-\"+str(i), 'wb') as f:\n if i%100==0: print(\"dump clean key\",flush=True)\n pickle.dump(clean_key, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n## FUNCTION 3\ndef get_correlation_and_bids():\n ## keys: Details of key\n keys = []\n for i in range(M_KEY):\n if i%100==0: print(i,flush=True)\n with open('data/key-'+str(i), 'rb') as f:\n tmp = pickle.load(f)\n keys.append(tmp)\n\n ## List of (Set of keywords for each Advertiser)\n ## good_adv[i] = Set of top keywords for advertiser i\n good_adv = []\n ## List of (Count of bids by some advertiser for some keyword)\n ## cnt_bid_adv[i][j] = Count of bids by advertiser j for keyword i\n cnt_bid_adv = []\n tot=0\n for kkk,k in enumerate(keys):\n if kkk%100==0: print(kkk,flush=True)\n ## cnt_adv[i][0]: (count of of bids by advertiser i on current key (k) )\n ## cnt_adv[i][1]: i\n cnt_adv=[[0,i] for i in range(12000)]\n ## calculate values of cnt_adv\n for a in k[\"advertiser\"]:\n cnt_adv[a][0]+=1\n ## cnt_adv[i]: touple(count of of bids by i-th largest advertiser on current key (k) , index of advertiser)\n cnt_adv.sort(reverse=True)\n\n ## cur_adv: Set of advertisers for current key\n cur_adv=set()\n ## tmp[i] number of bids by advertiser i on current keyword\n ## Stores data for 20 largest advertisers\n tmp={}\n for i in range(len(cnt_adv)):\n ## Only take advertisers who bid more that 1000 times.\n MIN_BIDS = 0\n if cnt_adv[i][0] > MIN_BIDS:\n tot+=cnt_adv[i][0]\n tmp[cnt_adv[i][1]]=cnt_adv[i][0]\n cur_adv.add(cnt_adv[i][1])\n\n cnt_bid_adv.append(tmp)\n good_adv.append(cur_adv)\n\n with open(\"data/cnt_bid_adv\", 'wb') as f:\n pickle.dump(cnt_bid_adv, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n ## Corr: correlation matrix of bids\n ## corr[i][j]: number of advertiser shared between key i and j, only advertisers among top 20 for each\n corr = np.zeros((M_KEY,M_KEY))\n for i,j in itertools.product(range(M_KEY),range(M_KEY)):\n corr[i][j]+=len(good_adv[i].intersection(good_adv[j]));\n\n with open(\"data/\"+\"corr_all_key\", 'wb') as f:\n pickle.dump(corr, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n to_delete = set()\n ## Remove bids which don't share any keyword\n for i in range(M_KEY):\n Del=True\n for j in range(M_KEY):\n if(j<=i):continue\n if corr[i][j]>1:\n Del=False\n if Del: to_delete.add(i);\n\n\n ## Tot: total bids placed on top 50 keywords\n ## cnt: total bids places on suitable pair of top 50 keywords\n tot=M_KEY*M_KEY;\n cnt=0;\n for i,j in itertools.product(range(M_KEY),range(M_KEY)):\n if j>i: # and corr[i][j]>=2:\n cnt+=1\n folder=\"data/keys-\"+str(i)+\"-\"+str(j)\n if not os.path.exists(folder):\n os.makedirs(folder)\n with open(folder+\"/advertiser\", 'wb') as f:\n pickle.dump(good_adv[i].intersection(good_adv[j]), f, protocol=pickle.HIGHEST_PROTOCOL)\n\n tot=M_KEY*M_KEY;\n print(\"Fraction of suitable pairs: \"+str(cnt/(tot*1.0)),flush=True)\n a=np.zeros((M_KEY,M_KEY))\n a[corr>=2]=1;\n print(\"Fraction of suitable pairs: \"+str(a.mean()/2),flush=True)\n\n\ndef main():\n print(\"getting raw data...\",flush=True)\n getRawData()\n print(\"got raw data. cleaning data...\",flush=True)\n cleanData()\n print(\"cleaned data. finding correlation and counting bids...\",flush=True)\n get_correlation_and_bids()\n print(\"done.\",flush=True)\n\n\nif __name__ == '__main__' :\n main()\n","sub_path":"celis-fair-online-advertising-modified/cleanData.py","file_name":"cleanData.py","file_ext":"py","file_size_in_byte":11355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"450478518","text":"from django.views.generic import TemplateView, ListView, DetailView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.urls import reverse, reverse_lazy\nfrom InstaApp.models import Post\n# 因为要views要控制model和template所以views中应该要包��model也要包含template\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom InstaApp.forms import CustomUserCreationForm\n\n# Create your views here.\nclass HelloDjango(TemplateView):\n template_name = 'test.html'\n# static的一个页面\n\nclass PostView(ListView):\n model = Post\n template_name = 'index.html'\n# PostView继承ListView\n# override (1)使用Post这个model(2)用template来显示\n\nclass PostDetailView(DetailView):\n model = Post\n template_name = 'post_detail.html'\n\nclass PostCreateView(LoginRequiredMixin, CreateView):\n model = Post\n template_name = 'post_create.html'\n fields = '__all__'\n #在create的时候需要用户提供这个post的什么信息,这里选择需要提供所有的fields,但都不包括id\n login_url = 'login'\n #这里加入了LoginRequireMixin,意思是当create一个post的时候必须处于login的状态,如果不是的话跳转到login_url\n\nclass PostUpdateView(UpdateView):\n model = Post\n template_name = 'post_update.html'\n fields = ['title']\n\nclass PostDeleteView(DeleteView):\n model = Post\n template_name = 'post_delete.html'\n success_url = reverse_lazy('posts')\n # 不能直接用reverse,用reverse相当于在删除的时候同时在跳转,reverse_lazy是删除后再跳转\n # 所以当用到类似delete的操作时候都用reverse_lazy\n\nclass SignUp(CreateView):\n #form_class = UserCreationForm\n form_class = CustomUserCreationForm\n # 这里替换成用户自定义的creation form, 使sign up的时候收集更多的用户信息\n template_name = 'signup.html'\n success_url = reverse_lazy('login')\n\n","sub_path":"InstaApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"568261886","text":"\"\"\"\r\nThe template of the main script of the machine learning process\r\n\"\"\"\r\n\r\nimport games.arkanoid.communication as comm\r\nfrom games.arkanoid.communication import ( \\\r\n SceneInfo, GameStatus, PlatformAction\r\n)\r\n\r\ndef ml_loop():\r\n \"\"\"\r\n The main loop of the machine learning process\r\n\r\n This loop is run in a separate process, and communicates with the game process.\r\n\r\n Note that the game process won't wait for the ml process to generate the\r\n GameInstruction. It is possible that the frame of the GameInstruction\r\n is behind of the current frame in the game process. Try to decrease the fps\r\n to avoid this situation.\r\n \"\"\"\r\n\r\n # === Here is the execution order of the loop === #\r\n # 1. Put the initialization code here.\r\n ball_served = False\r\n\r\n lowest = -1 #lowest block position\r\n lock = False #locker\r\n old_x = 0\r\n old_y = 0\r\n est = -1 # the estimate position of x\r\n\r\n # 2. Inform the game process that ml process is ready before start the loop.\r\n comm.ml_ready()\r\n\r\n # 3. Start an endless loop.\r\n while True:\r\n # 3.1. Receive the scene information sent from the game process.\r\n scene_info = comm.get_scene_info()\r\n\r\n # 3.2. If the game is over or passed, the game process will reset\r\n # the scene and wait for ml process doing resetting job.\r\n if scene_info.status == GameStatus.GAME_OVER or \\\r\n scene_info.status == GameStatus.GAME_PASS:\r\n # Do some stuff if needed\r\n ball_served = False\r\n\r\n # 3.2.1. Inform the game process that ml process is ready\r\n comm.ml_ready()\r\n continue\r\n\r\n # 3.3. Put the code here to handle the scene information\r\n ball_x = scene_info.ball[0]\r\n ball_y = scene_info.ball[1]\r\n plat_x = scene_info.platform[0]\r\n plat_y = scene_info.platform[1]\r\n\r\n for x in scene_info.bricks:\r\n if x[1] > lowest:\r\n lowest = x[1]\r\n for x in scene_info.hard_bricks:\r\n if x[1] > lowest:\r\n lowest = x[1]\r\n print(scene_info.ball)\r\n #print(\"con1\" + str(ball_y>(lowest+10)))\r\n #print(\"con2\" + str(not lock))\r\n #print(\"con3\" + str(ball_y-old_y))\r\n #print(est)\r\n\r\n # 3.4. Send the instruction for this frame to the game process\r\n if not ball_served:\r\n comm.send_instruction(scene_info.frame, PlatformAction.SERVE_TO_RIGHT)\r\n ball_served = True\r\n elif lock == True:\r\n if ball_y == plat_y-5:\r\n print(\"unlock\")\r\n lock = False\r\n if plat_x > est-20:\r\n comm.send_instruction(scene_info.frame, PlatformAction.MOVE_LEFT)\r\n elif plat_x < est-20:\r\n comm.send_instruction(scene_info.frame, PlatformAction.MOVE_RIGHT)\r\n else:\r\n comm.send_instruction(scene_info.frame, PlatformAction.NONE) \r\n elif ball_y > (lowest+110) and not lock and (ball_y - old_y) > 0:\r\n lock = True\r\n print(\"locked\")\r\n vy = (ball_y-old_y)\r\n vx = abs(ball_x-old_x) #velocity of x\r\n if vx < 7:\r\n vx = 7\r\n print(\"velocity of x =\" + str(vx))\r\n print(\"velocity of y =\" + str(vy))\r\n #plat_y = 400\r\n if ball_x - old_x > 0: #move right\r\n if (200-ball_x)/vx > (plat_y-ball_y)/vy:\r\n est = (plat_y-ball_y)/vy*vx+ball_x\r\n else: #hit x = 200\r\n hit_y = ball_y+(200-ball_x)/vx*vy\r\n est = 200 - (plat_y-hit_y)/vy*vx\r\n else: #move left\r\n if (ball_x-0)/vx > (plat_y-ball_y)/vy:\r\n est = ball_x-(plat_y-ball_y)/vy*vx\r\n else: #hit x = 0\r\n hit_y = ball_y+ball_x/vx*vy\r\n est = (plat_y-hit_y)/(ball_y-old_y)*vx\r\n print(\"begin with \" + str(ball_x) + \",\" + str(ball_y))\r\n print(\"est=\" + str(est))\r\n comm.send_instruction(scene_info.frame, PlatformAction.NONE)\r\n else:\r\n if plat_x < 100:\r\n comm.send_instruction(scene_info.frame, PlatformAction.MOVE_RIGHT)\r\n elif plat_x > 100:\r\n comm.send_instruction(scene_info.frame, PlatformAction.MOVE_LEFT)\r\n else:\r\n comm.send_instruction(scene_info.frame, PlatformAction.NONE)\r\n \r\n old_x = ball_x\r\n old_y = ball_y\r\n","sub_path":"ml_play.py","file_name":"ml_play.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"402927033","text":"#%% [markdown]\n# # Similar Value Clustering\n\n#%%\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal as ss\nimport time\n\nnp.random.seed(19)\n\n#%%\nCPU_DATA_EXISTS = False\nMEM_DATA_EXISTS = False\n\n#%%\nif not CPU_DATA_EXISTS:\n CPU_DATA = np.load('google-cpu-full.npy')\n np.random.shuffle(CPU_DATA)\nCPU_DATA_EXISTS = True\n\nif not MEM_DATA_EXISTS:\n MEM_DATA = np.load('google-mem-full.npy')\n np.random.shuffle(MEM_DATA)\nMEM_DATA_EXISTS = True\n\nprint(CPU_DATA.shape)\nprint(MEM_DATA.shape)\n\nassert CPU_DATA.shape == MEM_DATA.shape\n\nNO_OF_MACHINES = CPU_DATA.shape[0]\nNO_OF_TIMESTAMPS = CPU_DATA.shape[1]\nDATA = {'CPU': CPU_DATA, 'MEM': MEM_DATA}\n\n#%%\nSPATIAL_SAMPLE_SIZE = NO_OF_MACHINES\nCPU_SPATIAL_SAMPLE = CPU_DATA[:SPATIAL_SAMPLE_SIZE]\nMEM_SPATIAL_SAMPLE = MEM_DATA[:SPATIAL_SAMPLE_SIZE]\nSPATIAL_SAMPLES = {'CPU': CPU_SPATIAL_SAMPLE, 'MEM': MEM_SPATIAL_SAMPLE}\n\nCPU_SPATIAL_CORRELATIONS = np.empty(\n (SPATIAL_SAMPLE_SIZE, SPATIAL_SAMPLE_SIZE-1))\nMEM_SPATIAL_CORRELATIONS = np.empty(\n (SPATIAL_SAMPLE_SIZE, SPATIAL_SAMPLE_SIZE-1))\nSPATIAL_CORRELATIONS = {\n 'CPU': CPU_SPATIAL_CORRELATIONS, 'MEM': MEM_SPATIAL_CORRELATIONS}\n\nTEMPORAL_SAMPLE_SIZE = 200\nCPU_TEMPORAL_SAMPLE = CPU_DATA[:TEMPORAL_SAMPLE_SIZE]\nMEM_TEMPORAL_SAMPLE = MEM_DATA[:TEMPORAL_SAMPLE_SIZE]\n\nCPU_TEMPORAL_CORRELATIONS = np.empty(\n (TEMPORAL_SAMPLE_SIZE, 2*NO_OF_TIMESTAMPS-1))\nMEM_TEMPORAL_CORRELATIONS = np.empty(\n (TEMPORAL_SAMPLE_SIZE, 2*NO_OF_TIMESTAMPS-1))\nTEMPORAL_CORRELATIONS = {\n 'CPU': CPU_TEMPORAL_CORRELATIONS, 'MEM': MEM_TEMPORAL_CORRELATIONS}\n\n#%%\nplt.subplot(211).plot(CPU_DATA[0][:2*24*60//5])\nplt.title('CPU data')\nplt.subplot(212).plot(MEM_DATA[0][:2*24*60//5])\nplt.title('MEM data')\nplt.tight_layout()\nplt.show()\n\n#%%\nNO_OF_BINS = 10\nBIN_INTERVAL = 1 / NO_OF_BINS\nUTILISATION_BINS = {(i / NO_OF_BINS,\n (i + 1) / NO_OF_BINS) for i in range(10)}\n\n# 5 minutes,\n# 10 minutes,\n# 15 minutes,\n# 1 hour,\n# 1 day,\n# 1 week.\n# Unit of time in timestamps is 5 minutes.\nTREND_LENGTHS = [1, 2, 3, 60 // 5, 1 * 24 * 60 // 5, 7 * 24 * 60 // 5]\nTREND_VALS = {'INCREASING', 'STABLE', 'DECREASING'}\n\nclass Cluster(object):\n def __init__(self, t, bin, trends):\n self.members = []\n self.value = None\n self.t = t\n self.bin = bin\n self.previous_values = dict()\n self.percent_changes = dict()\n self.trends = trends\n for trend in self.trends:\n self.percent_changes[trend] = None\n self.previous_values[trend] = None\n \n def add_member(self, new_member):\n assert new_member.t == self.t\n self.members.append(new_member)\n cluster_size = len(self.members)\n if self.value is None:\n self.value = new_member.value\n else:\n self.value = (((cluster_size-1) * self.value + new_member.value)\n / cluster_size)\n for trend in self.trends:\n if self.previous_values[trend] is None:\n self.previous_values[trend] = new_member.previous_values[trend]\n elif new_member.previous_values[trend] is not None:\n self.previous_values[trend] = (\n ((cluster_size-1) * self.previous_values[trend]\n + new_member.previous_values[trend]) / cluster_size)\n\n if self.percent_changes[trend] is None:\n self.percent_changes[trend] = new_member.percent_changes[trend]\n elif new_member.percent_changes[trend] is not None:\n self.percent_changes[trend] = (\n ((cluster_size-1) * self.percent_changes[trend]\n + new_member.percent_changes[trend]) / cluster_size)\n\nclass ClusterMember(object):\n def __init__(self, data_type, machine_no, t):\n samples = SPATIAL_SAMPLES[data_type]\n self.value = samples[machine_no, t]\n self.machine_no = machine_no\n self.t = t\n\n if self.value < 0:\n self.bin = (0.0, 0.1)\n elif self.value > 1:\n self.bin = (0.9, 1.0)\n else:\n lo = self.value // BIN_INTERVAL / NO_OF_BINS\n hi = round(lo + BIN_INTERVAL, 2)\n self.bin = (lo, hi)\n assert self.bin in UTILISATION_BINS, '{} is not in {} {}.'.format(\n self.bin, UTILISATION_BINS)\n self.previous_values = dict()\n self.percent_changes = dict()\n self.trends = dict()\n\n for length in TREND_LENGTHS:\n if length > t:\n previous_value = None\n percent_change = None\n trend = 'STABLE'\n else:\n previous_value = samples[machine_no, t - length]\n\n if previous_value == 0.0:\n percent_change = -1.0\n else:\n percent_change = self.value / previous_value - 1\n\n if percent_change > 1/3:\n trend = 'INCREASING'\n elif percent_change < -1/3:\n trend = 'DECREASING'\n else:\n trend = 'STABLE'\n assert trend in TREND_VALS, '{} is not in {}'.format(trend,\n TREND_VALS)\n\n self.previous_values[length] = previous_value\n self.percent_changes[length] = percent_change\n self.trends[length] = trend\n\nCLUSTERS_AT_T = dict()\nfor t in range(NO_OF_TIMESTAMPS)[:1]:\n CLUSTERS_AT_T[t] = dict()\n\n#%%\nstart = time.process_time()\nfor data_type in SPATIAL_SAMPLES:\n samples = SPATIAL_SAMPLES[data_type]\n for machine_no in range(SPATIAL_SAMPLE_SIZE):\n for t in range(NO_OF_TIMESTAMPS)[:1]:\n clusters = CLUSTERS_AT_T[t]\n member = ClusterMember(data_type, machine_no, t)\n key = [member.bin]\n for trend_length in TREND_LENGTHS:\n key.append(member.trends[trend_length])\n key = tuple(key)\n if key not in clusters:\n clusters[key] = (\n Cluster(member.t, member.bin, member.trends))\n clusters[key].add_member(member)\nelapsed_time = time.process_time() - start\n\n#%%\nprint(elapsed_time)\nprint('\\n')\nsum = 0\nfor i in CLUSTERS_AT_T[0].keys():\n sum += len(CLUSTERS_AT_T[0][i].members)\n print(\n len(CLUSTERS_AT_T[0][i].members)\n )\nsum\n\n#%%\nsum/NO_OF_MACHINES\n\n#%%\n\n","sub_path":"old/similar_value_clustering.py","file_name":"similar_value_clustering.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"229582906","text":"from pylab import show, text, imshow, plot\nimport matplotlib.pyplot as plt\nimport time\n\nfrom minimize import *\n\n# Average hourly wage\nHOURLY_WAGE = [2.98, 3.09, 3.23, 3.33, 3.46, 3.6, 3.73, 2.91, 4.25, 4.47, 5.04, 5.47, 5.76]\n# Number of homicides per 100,000 people\nMURDERS = [8.6, 8.9, 8.52, 8.89, 13.07, 14.57, 21.36, 28.03, 31.49, 37.39, 46.26, 47.24, 52.33]\n\ndef Cost(B, X=HOURLY_WAGE, Y=MURDERS):\n cost = 0.0\n for i in xrange(0, len(X)):\n cost += (B[0] + B[1] * X[i] - Y[i]) ** 2\n return cost\n\nb1 = np.arange(-60, -14, 1)\nb2 = np.arange(5, 26, 1)\nC = np.matrix([[0 for j in range(len(b2))] for i in range(len(b1))])\n\nfor i in range(len(b1)):\n for j in range(len(b2)):\n C[i, j] = Cost([b1[i], b2[j]])\n\ndef heatmap(X, Y, f, k, trace=None): # trace is a list of [b1, b2] pairs\n ax[k].imshow(C,\n origin='lower',\n extent=[min(b1), max(b1), min(b2), max(b2)],\n vmax=abs(C).max(), vmin=-abs(C).max()\n )\n\nLEARNING_RATE = [80, 40]\nh = 0.00001\nPRECISION = 0.00000000000001\nf, ax = plt.subplots(2, sharex=True)\n\nfor k in range(2): # We need to generate 2 plot\n\n B0 = [0, 0]\n random.seed(int(round(time.time() * 1000)))\n B0[0] = random.randrange(-50, -20)\n B0[1] = random.randrange(10, 20)\n\n begin = time.time()\n (m, steps, trace) = minimize(Cost, B0, LEARNING_RATE, h, PRECISION)\n end = time.time()\n\n heatmap(HOURLY_WAGE, MURDERS, Cost, k, trace)\n ax[k].set_title(\"Trace in heat map %i\" %(k+1))\n ax[k].text(-59.5, 21.5, \"Steps: %i\" %steps)\n ax[k].text(-59.5, 8, \"Start B0: %i Start B1: %i\" %(B0[0], B0[1]))\n for i in range(len(trace)):\n if i == 0:\n ax[k].plot(trace[i][0], trace[i][1], \"ko\", markersize=6, color='red')\n ax[k].text(trace[i][0] - 1.3, trace[i][1] + 0.7, \"Start\")\n elif i == len(trace) - 1:\n ax[k].plot(trace[i][0], trace[i][1], \"ko\", markersize=6, color='red')\n ax[k].text(trace[i][0] - 1, trace[i][1] + 0.7, \"Stop\")\n else:\n ax[k].plot(trace[i][0], trace[i][1], \"ko\", markersize=2)\nshow()","sub_path":"regression/plot_regression.py","file_name":"plot_regression.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"440745156","text":"# -*- coding:utf-8 -*-\r\nimport os\r\nfrom glob import glob\r\nr_img_dir='pose_data_with_imu/model-166532/10'#model-240002/09\r\nN = len(glob(r_img_dir + '/*.txt')) # glob(img_dir + '/*.png'):查找在img_dir文件夹下的.png文件\r\nw_img_dir = 'ours_10_imu.txt'\r\nfns = [os.path.join(root,fn) for root, dirs, files in os.walk(r_img_dir) for fn in files]\r\nfns.sort()\r\nwf=open(w_img_dir,'w')\r\nfor i in range(N):\r\n if i==0:\r\n rf = open(fns[i],'r')\r\n content=rf.readlines()\r\n first_lines=content[0]\r\n second_lines = content[1]\r\n wf.write(first_lines)\r\n wf.write(second_lines)\r\n # elif i==(N-1):\r\n # rf = open(fns[i], 'r')\r\n # content = rf.readlines()\r\n # thirs_lines = content[2]\r\n # wf.write(thirs_lines)\r\n else:\r\n rf = open(fns[i], 'r')\r\n content = rf.readlines()\r\n second_lines = content[1]\r\n wf.write(second_lines)\r\n\r\n","sub_path":"create_full_trajectory.py","file_name":"create_full_trajectory.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"305095769","text":"#!/usr/bin/env python3\n\nimport sys\nimport re\nimport pandas as pd\n\ncolumn_order = ['geoid', 'name', 'state_fips', 'county_fips', 'tract_code',\n 'block_group_code', 'land_area', 'water_area', 'wkt']\n\nstate = pd.read_csv(sys.argv[1])\n\nstate.columns = state.columns.str.lower()\nstate = state.drop(['region', 'division', 'statens', 'geoid', 'stusps', 'lsad', 'mtfcc',\n 'funcstat', 'intptlat', 'intptlon', 'shape_length', 'shape_area'], axis=1)\n\nstate.columns = ['wkt', 'state_fips', 'name', 'land_area', 'water_area', 'geoid']\n\nstate['county_fips'] = ''\nstate['tract_code'] = ''\nstate['block_group_code'] = ''\n\nstate = state[column_order]\n\nstate['state_fips'] = state['state_fips'].astype(str).replace(re.compile('\\.0$'), '')\nstate['state_fips'] = state['state_fips'].replace('nan', '')\nstate['state_fips'] = state['state_fips'].str.pad(2, 'left', '0')\n\nstate.to_csv(sys.argv[2], index=False)\n\n","sub_path":"acs/bin/process_state_geo.py","file_name":"process_state_geo.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"481878513","text":"import matplotlib.pyplot as plt\nimport math\nimport sys\nimport time\nfrom subprocess import Popen, PIPE\nclass GaussSolver():\n def __init__(self, func, a, b, n):\n self.func = func\n self.a = a\n self.b = b\n self.n = n\n self.result = 0\n def legendre(self, n, x):\n if n == 0:\n return 1\n elif n == 1:\n return x\n else:\n return ((2*n-1) * x * self.legendre(n-1,x) - (n-1) * self.legendre(n-2,x))/n\n \n def dLegendre(self, n, x):\n return (n * (x* self.legendre(n, x)- self.legendre(n-1,x)))/(x*x-1)\n\n def weight(self, n, x):\n return 2/((1-x*x)*(self.dLegendre(n,x)**2))\n\n def legendreZeros(self, n, i):\n xold = math.cos(math.pi*(i-.25)/(n+ .5))\n xnew = 0\n iteration = 1\n while ((1+abs(xnew - xold)) > 1):\n if iteration != 1:\n xold = xnew\n xnew = xold - self.legendre(n, xold)/ self.dLegendre(n, xold)\n iteration +=1\n \n return xnew\n \n def execute(self):\n integral = 0\n iteration = 2\n for i in range(1,self.n+1):\n integral = integral + self.func(self.legendreZeros(self.n, i)) * self.weight(self.n, self.legendreZeros(self.n, i))\n self.result = ((self.b - self.a)/2) * integral\n\n def get_result(self):\n return self.result\n\n\ndef func(x):\n xn = x*.5 + .5\n return (xn**3/(xn+1))* math.cos(xn*xn)\ndef func2(x):\n xn = x*.5 + .5\n return (xn**3)\n\n\n\n# g = GaussSolver(func, 0, 1, n)\n# start = time.time()\n# g.execute()\n# end = time.time()\n# python_time = end-start\n# print(\"Result of python code (n = {}): {}\".format(n, g.get_result()))\n# start = time.time()\n# p = Popen(['./q1 {}'.format(n)], shell=True, stdout=PIPE, stdin=PIPE)\n# result = p.stdout.readline().strip()\n# end = time.time()\n# cpp_time = end - start\n# print(result.decode())\n# print('python time: '+ str(python_time) + ' , c++ time: '+ str(cpp_time))\n\n\npython_dict = {}\ncpp_dict = {}\nfor i in range(1,24):\n g = GaussSolver(func, 0, 1, i)\n start = time.time()\n g.execute()\n end = time.time()\n python_time = end-start\n python_dict[i] = python_time\n start = time.time()\n p = Popen(['./q1 {}'.format(i)], shell=True, stdout=PIPE, stdin=PIPE)\n result = p.stdout.readline().strip()\n end = time.time()\n cpp_time = end - start\n cpp_dict[i] = cpp_time\n\nplt.figure(1)\n# plt.subplot(211)\nplt.plot(list(python_dict.keys()), list(python_dict.values()), label='Python')\n\n# plt.subplot(212)\nplt.plot(list(cpp_dict.keys()), list(cpp_dict.values()), label = 'C++')\nplt.show()\n","sub_path":"file_handle/gauss.py","file_name":"gauss.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"513866368","text":"import RPi.GPIO as GPIO\nimport time\n\ndef button_event(channel):\n print('Button press detected')\n\nbutton_pin = 18\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(button_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.add_event_detect(button_pin, GPIO.FALLING,\n callback=button_event, bouncetime=100)\n\nwhile True:\n # Do whatever else you need to do here\n time.sleep(10)\n print('I am alive')\n\nGPIO.cleanup()\n","sub_path":"PiExamples/button_interrupt.py","file_name":"button_interrupt.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"343598739","text":"import dbconn as dbc\n\n\ndef insert_db(values):\n connection = dbc.get_connection()\n \n with connection.cursor() as cursor:\n sql = \"DROP TABLE IF EXISTS wetter\"\n cursor.execute(sql)\n sql = \"CREATE TABLE wetter (latitude float NOT NULL,longtitude float NOT NULL,timezone VARCHAR(60) NOT NULL, summary VARCHAR(120),icon VARCHAR(25),preciptype VARCHAR(25),temperature float NOT NULL ,time VARCHAR(30),apparentTemperature float, cuurently_humidity float, dewPoint float, windSpeed float, cloudCover float, visibility float, uvIndex float);\"\n \n cursor.execute(sql)\n \n sql = \"INSERT INTO wetter VALUES (\"\n \n for list in values:\n for value in list: \n if isinstance(value,basestring):\n sql = sql + \"'\" + value + \"',\"\n else:\n sql = sql + str(value) + \",\"\n\n sql = sql[:-1] + \"),(\"\n \n sql = sql[:-2] + \";\"\n\n cursor.execute(sql) \n connection.commit()\n","sub_path":"db_query.py","file_name":"db_query.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"208370609","text":"import pymysql\n\n\ndef get_id():\n # 打开数据库连接\n db = pymysql.connect(\"192.168.103.31\", \"root\", \"adminadmin\", \"fluent\", charset=\"utf8mb4\")\n\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n\n # SQL 查询语句\n sql = \"\"\"SELECT ID FROM PROVIDER\"\"\"\n\n try:\n # 执行SQL语句\n cursor.execute(sql)\n # 获取所有记录列表\n results = cursor.fetchall()\n id_set = set()\n for row in results:\n id_set.add(\"\".join(row)) # 元祖转字符串\n print(id_set)\n print(len(id_set))\n return id_set\n except:\n print(\"Error: unable to fecth data\")\n\n # 关闭数据库连接\n db.close()\n # cursor.execute(sql)\n # # 提交到数据库执行\n # db.commit()\n #\n # # 关闭数据库连接\n # db.close()\n\n\nif __name__ == '__main__':\n get_id()\n","sub_path":"IDGdemo/fast_fluent/provider/mysql/mysql_get_id.py","file_name":"mysql_get_id.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"538111320","text":"import time\nimport ujson\n\nfrom unittest import TestCase\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\nfrom werkzeug.test import Client\n\nfrom mashapeanalytics.middleware import WsgiMiddleware\nfrom tests.helpers import mock_server\n\n##\n# Pyramid App\n##\ndef create_app():\n def root(request):\n time.sleep(0.01) # Sleep for 10 ms\n return Response('Hello World')\n\n config = Configurator()\n config.add_route('root', '/')\n config.add_view(root, route_name='root')\n\n app = config.make_wsgi_app()\n\n return app\n\n##\n# Test Pyramid\n##\nclass PyramidMiddewareTest(TestCase):\n def setUp(self):\n self.app = WsgiMiddleware(create_app(), 'SERVICE_TOKEN', 'ENVIRONMENT', 'localhost', 56000)\n\n def tearDown(self):\n pass\n\n def test_get(self):\n status = '200 OK' # HTTP Status\n headers = [('Content-type', 'application/json')] # HTTP Headers\n\n # Mock collector\n with mock_server(56000, status, headers, 'Yo!') as collector:\n client = Client(self.app)\n data, status, headers = client.open()\n data = (b'').join(data)\n\n self.assertIn('Hello', str(data))\n\n request = collector.get()\n self.assertEqual(request.get('url'), u'http://localhost:56000/1.0.0/single')\n\n alf = ujson.loads(request.get('body'))\n self.assertTrue(alf['har']['log']['entries'][0]['timings']['wait'] >= 10)\n","sub_path":"tests/test_pyramid.py","file_name":"test_pyramid.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"203630654","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import StepLR, ExponentialLR\nimport numpy as np\nimport os\nfrom collections import OrderedDict\nfrom tqdm import tqdm\nfrom tensorboardX import SummaryWriter\nimport argparse\nfrom common import config\nfrom model import network\nfrom utils import TrainClock, save_args, ensure_dir, WorklogLogger, cycle\nfrom dataset import get_dataloaders\n\ntorch.backends.cudnn.benchmark = True\n\n\nclass Session:\n\n def __init__(self, config, net=None):\n self.log_dir = config.log_dir\n ensure_dir(self.log_dir)\n self.model_dir = config.model_dir\n ensure_dir(self.model_dir)\n self.net = net\n self.best_val_acc = 0.0\n self.clock = TrainClock()\n\n\n def save_checkpoint(self, name):\n ckp_path = os.path.join(self.model_dir, name)\n tmp = {\n 'net': self.net,\n 'best_val_acc': self.best_val_acc,\n 'clock': self.clock.make_checkpoint(),\n }\n torch.save(tmp, ckp_path)\n\n\n def load_checkpoint(self, ckp_path):\n checkpoint = torch.load(ckp_path)\n self.net = checkpoint['net']\n self.clock.restore_checkpoint(checkpoint['clock'])\n self.best_val_acc = checkpoint['best_val_acc']\n\n\n def set_criterion_dict(self, criterion_dict):\n self.criterion_dict = criterion_dict\n\n\n def train_func(self, data):\n # get data\n inputs = data['inputs']\n targets = data['targets']\n inputs = inputs.to(config.device)\n targets = targets.to(config.device)\n\n # pass through the model\n outputs = self.net(inputs)\n\n # update loss metric\n losses = {}\n losses['MSE_loss'] = self.criterion_dict['MSE_loss'](outputs, targets)\n\n return outputs, losses\n\n\n def val_func(self, data):\n # get data\n inputs = data['inputs']\n targets = data['targets']\n inputs = inputs.to(config.device)\n targets = targets.to(config.device)\n\n # pass through the model\n with torch.no_grad():\n outputs = self.net(inputs)\n\n # update loss metric\n losses = {}\n losses['MSE_loss'] = self.criterion_dict['MSE_loss'](outputs, targets)\n\n return outputs, losses\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--continue', dest='continue_path', type=str, required=False)\n parser.add_argument('-g', '--gpu_ids', type=int, default=0, required=False)\n args = parser.parse_args()\n print(args)\n save_args(args, config.log_dir)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu_ids)\n config.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n if not os.path.exists('train_log'):\n os.symlink(config.exp_dir, 'train_log')\n\n net = network()\n net = net.cuda()\n\n # create session\n sess = Session(config, net=net)\n if args.continue_path and os.path.exists(args.continue_path):\n sess.load_checkpoint(args.continue_path)\n\n # create logger\n logger = WorklogLogger(os.path.join(config.log_dir, 'log.txt'))\n\n # create tensorboard writer\n train_tb = SummaryWriter(os.path.join(sess.log_dir, 'train.events'))\n val_tb = SummaryWriter(os.path.join(sess.log_dir, 'val.events'))\n\n # create dataloader\n train_loader = get_dataloaders('train', batch_size=config.batch_size,\n shuffle=True)\n val_loader = get_dataloaders('validation', batch_size=config.batch_size)\n val_loader = cycle(val_loader)\n\n # set criterion and AverageMeter to calc and monitor loss\n criterion_dict = {\n 'MSE_loss': nn.MSELoss()\n }\n sess.set_criterion_dict(criterion_dict)\n\n # set optimizer\n optimizer = optim.Adam(sess.net.parameters(), config.lr)\n\n # set learning rate scheduler\n #scheduler = ExponentialLR(optimizer, 0.9)\n scheduler = StepLR(optimizer, step_size=300)\n\n # start session\n clock = sess.clock\n net = sess.net\n sess.save_checkpoint('start.pth.tar')\n\n # start training\n net.train()\n for e in range(config.nr_epochs):\n pbar = tqdm(train_loader)\n for b, data in enumerate(pbar):\n # pass through net and get loss\n\n outputs, losses = sess.train_func(data)\n losses_values = {k:v.item() for k, v in losses.items()}\n\n # update loss metric\n loss = 0\n for k, v in losses.items():\n loss += v\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # write to log\n logger.put_line(\n \"Train: EPOCH[{}][{}/{}]: \".format(e, b, len(train_loader)) +\n \" \".join([\n \"{}={:.2g}\".format(k, v) for k, v in losses_values.items()\n ])\n )\n\n # update tensorboard\n for k, v in losses_values.items():\n train_tb.add_scalar(k, v, clock.step)\n\n # visualize\n if clock.step % config.display_frequency == 0:\n pass\n\n pbar.set_description(\"EPOCH[{}][{}/{}]\".format(e, b, len(train_loader)))\n pbar.set_postfix(OrderedDict(losses_values))\n\n # validation\n if clock.step % config.val_frequency == 0:\n data = next(val_loader)\n\n outputs, losses = sess.val_func(data)\n losses_values = {k:v.item() for k, v in losses.items()}\n\n logger.put_line(\n \"Val: EPOCH[{}][{}/{}]: \".format(e, b, len(train_loader)) +\n \" \".join([\n \"{}={:.2g}\".format(k, v) for k, v in losses_values.items()\n ])\n )\n\n for k, v in losses_values.items():\n val_tb.add_scalar(k, v, clock.step)\n\n if clock.step % config.display_frequency == 0:\n pass\n\n clock.tick()\n\n train_tb.add_scalar('learning_rate', optimizer.param_groups[-1]['lr'], clock.epoch)\n scheduler.step(clock.epoch)\n\n if clock.epoch % config.save_frequency == 0:\n sess.save_checkpoint('epoch{}.pth.tar'.format(clock.epoch))\n sess.save_checkpoint('latest.pth.tar')\n\n clock.tock()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"code-snippets/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"508508890","text":"from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\nimport json\nimport time\nimport simplejson #for decimal support\n\nfrom faker import Faker #using faker to produce mock sensors data\n\nwith open('config.json') as f:\n config = json.load(f)\n\nclient = AWSIoTMQTTClient(config[\"clientId\"])\nclient.configureEndpoint(config[\"host\"], config[\"port\"])\nclient.configureCredentials(config[\"caPath\"], config[\"keyPath\"], config[\"certPath\"])\n\nclient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\nclient.configureDrainingFrequency(2) # Draining: 2 Hz\nclient.configureConnectDisconnectTimeout(10) # 10 sec\nclient.configureMQTTOperationTimeout(5) # 5 sec\n\n\n# Custom MQTT message callback\ndef customCallback(client, userdata, message):\n print(\"Received a new message: \")\n print(message.payload)\n print(\"from topic: \")\n print(message.topic)\n print(\"--------------\\n\\n\")\n\ntopic = \"tcu\"\nclient.connect()\nclient.subscribe(topic, 1, customCallback)\n\nfake = Faker()\n\nloopCount = 0\nwhile True:\n message = {}\n\n message['vin'] = config[\"clientId\"]\n message['MotorRpm'] = loopCount #speed of the motor\n message['tempOilMotor'] = loopCount #temperature of the oil\n message['torqueMotor'] = loopCount #torque of the motor\n message['powerMotorTotal'] = loopCount #calculated power of the motor\n message['altitude'] = loopCount #altitude of the car \n message['latitude'] = fake.latlng()[0] #gps latitude\n message['longitude'] = fake.latlng()[1] #gps longitude\n message['carSpeed'] = loopCount #speed of the car (instant speed)\n message['timeStamp'] = time.time() #time stamp for the above results\n messageSJson = simplejson.dumps(message)\n client.publish(topic, messageSJson, 1)\n print('Published topic %s: %s\\n' % (topic, messageSJson))\n time.sleep(2)\n loopCount += 1\n","sub_path":"raspberry/data generator/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"49608086","text":"import os #with_python\nimport zipfile #with_python\nimport random #with_python\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport shutil\nfrom shutil import copyfile #with_python\n\nlocal_zip = '/tmp/cats-and-dogs.zip'\nzip_ref = zipfile.ZipFile(local_zip, 'r')\nzip_ref.extractall('/tmp')\nzip_ref.close()\n\nprint(len(os.listdir('/tmp/PetImages/Cat/')))\nprint(len(os.listdir('/tmp/PetImages/Dog/')))\n\n#mkdir -> another function to create dir..\n#rm -rf '/tmp/PetImages/'\ntry:\n os.makedirs('/tmp/cats-v-dogs/training/cats/')\n os.makedirs('/tmp/cats-v-dogs/training/dogs/')\n os.makedirs('/tmp/cats-v-dogs/testing/cats/')\n os.makedirs('/tmp/cats-v-dogs/testing/dogs/')\nexcept OSError:\n pass\n\ndef split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE):\n name_list = os.listdir(SOURCE)\n name_list = random.sample(name_list, len(name_list)) #shuffling list\n sz = int(len(name_list)*SPLIT_SIZE)+1\n list_train = name_list[0:sz]\n list_test = name_list[sz:-1]\n\n for item in list_train:\n if(os.path.getsize(SOURCE+item)>0):\n copyfile(SOURCE+item, TRAINING+item)\n\n for item in list_test:\n if(os.path.getsize(SOURCE+item)>0):\n copyfile(SOURCE+item, TESTING+item)\n\nCAT_SOURCE_DIR = \"/tmp/PetImages/Cat/\"\nTRAINING_CATS_DIR = \"/tmp/cats-v-dogs/training/cats/\"\nTESTING_CATS_DIR = \"/tmp/cats-v-dogs/testing/cats/\"\nDOG_SOURCE_DIR = \"/tmp/PetImages/Dog/\"\nTRAINING_DOGS_DIR = \"/tmp/cats-v-dogs/training/dogs/\"\nTESTING_DOGS_DIR = \"/tmp/cats-v-dogs/testing/dogs/\"\n\nsplit_size = .9\nsplit_data(CAT_SOURCE_DIR, TRAINING_CATS_DIR, TESTING_CATS_DIR, split_size)\nsplit_data(DOG_SOURCE_DIR, TRAINING_DOGS_DIR, TESTING_DOGS_DIR, split_size)\n\nprint(len(os.listdir('/tmp/cats-v-dogs/training/cats/')))\nprint(len(os.listdir('/tmp/cats-v-dogs/training/dogs/')))\nprint(len(os.listdir('/tmp/cats-v-dogs/testing/cats/')))\nprint(len(os.listdir('/tmp/cats-v-dogs/testing/dogs/')))\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(16,(3,3),activation='relu',input_shape=(300,300,3)),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Conv2D(32,(3,3),activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Conv2D(64,(3,3),activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512,activation='relu'),\n tf.keras.layers.Dense(1,activation='sigmoid')\n])\n\nmodel.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'])\nmodel.summary()\n\nTRAINING_DIR = '/tmp/cats-v-dogs/training/'\ntrain_datagen = ImageDataGenerator(rescale=1/255.0)\ntrain_generator = train_datagen.flow_from_directory(\n TRAINING_DIR,\n target_size=(300,300),\n batch_size=128,\n class_mode='binary'\n)\n\nVALIDATION_DIR = '/tmp/cats-v-dogs/testing/'\nvalidation_datagen = ImageDataGenerator(rescale=1/255.0)\nvalidation_generator = train_datagen.flow_from_directory(\n VALIDATION_DIR,\n target_size=(300,300),\n batch_size=32,\n class_mode='binary'\n)\n\nhistory = model.fit(train_generator,\n epochs=15,\n validation_data=validation_generator)\n\n\nshutil.rmtree('/tmp/cats-v-dogs/')\n","sub_path":"Course2-Ex1.py","file_name":"Course2-Ex1.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"525526797","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n# Company: Tecvemar, c.a.\n# Author: Juan Márquez\n# Creation Date: 2018-10-16\n# Version: 0.0.0.1\n#\n# Description:\n#\n#\n##############################################################################\n\n# ~ from datetime import datetime\nfrom osv import fields, osv\nfrom tools.translate import _\n# ~ import pooler\nimport decimal_precision as dp\nimport time\nimport netsvc\n\n##------------------------------------------------------------- tcv_consignment\n\n\nclass tcv_consig_invoice(osv.osv):\n\n _name = 'tcv.consig.invoice'\n\ntcv_consig_invoice()\n\n\n\nclass tcv_consignment(osv.osv):\n\n _name = 'tcv.consignment'\n\n _description = ''\n\n ##-------------------------------------------------------------------------\n\n ##------------------------------------------------------- _internal methods\n\n def _get_type(self, cr, uid, context=None):\n context = context or {}\n return context.get('consignment_type', 'out_consignment')\n\n def _get_consig_partner_id(self, cr, uid, config_id, context=None):\n return self.pool.get('tcv.consignment.config').\\\n get_consig_partner_id(cr, uid, config_id)\n\n ##--------------------------------------------------------- function fields\n\n _columns = {\n 'name': fields.char(\n 'Reference', size=16, required=True, readonly=True),\n 'date': fields.date(\n 'Date', required=True, readonly=True,\n states={'draft': [('readonly', False)]}, select=True),\n 'config_id': fields.many2one(\n 'tcv.consignment.config', 'Configuration', readonly=True,\n states={'draft': [('readonly', False)]}, required=True,\n ondelete='restrict', help=\"Config settings for this document\"),\n 'partner_id': fields.many2one(\n 'res.partner', 'Partner', change_default=True,\n readonly=True, required=True, ondelete='restrict'),\n 'user_id': fields.many2one(\n 'res.users', 'User', readonly=True, select=True,\n ondelete='restrict'),\n 'narration': fields.text(\n 'Notes', readonly=False),\n 'line_ids': fields.one2many(\n 'tcv.consignment.lines', 'line_id', 'Detail',\n readonly=True, states={'draft': [('readonly', False)]}),\n 'type': fields.selection(\n [('in_consignment', 'In consignment'),\n ('out_consignment', 'Out consignment')],\n string='Type', required=True, readonly=True),\n 'state': fields.selection(\n [('draft', 'Draft'), ('done', 'Done'), ('cancel', 'Cancelled')],\n string='State', required=True, readonly=True),\n 'move_id': fields.many2one(\n 'account.move', 'Accounting entries', ondelete='set null',\n help=\"The move of this entry line.\", select=True, readonly=True),\n 'picking_id': fields.many2one(\n 'stock.picking', 'Picking', readonly=False, ondelete='set null',\n help=\"The picking for this entry line\"),\n 'company_id': fields.many2one(\n 'res.company', 'Company', required=True, readonly=True,\n ondelete='restrict'),\n }\n\n _defaults = {\n 'name': lambda *a: '/',\n 'type': _get_type,\n 'user_id': lambda s, c, u, ctx: u,\n 'date': lambda *a: time.strftime('%Y-%m-%d'),\n 'state': lambda *a: 'draft',\n 'company_id': lambda self, cr, uid, c: self.pool.get('res.company').\n _company_default_get(cr, uid, self._name, context=c),\n }\n\n _sql_constraints = [\n ('tcv_consig_invoiceuniq', 'UNIQUE(name)', 'The name must be unique!'),\n ]\n\n ##-------------------------------------------------------------------------\n\n ##---------------------------------------------------------- public methods\n\n def create_stock_move_lines(self, cr, uid, item, lines, context=None):\n res = []\n obj_lot = self.pool.get('stock.production.lot')\n for line in item.line_ids:\n date = time.strftime('%Y-%m-%d %H:%M:%S')\n location_id = obj_lot.get_actual_lot_location(\n cr, uid, line.prod_lot_id.id, context=None)\n data = {\n 'name': item.name,\n 'product_id': line.product_id.id,\n 'product_qty': line.product_uom_qty,\n 'product_uom': line.product_id.uom_id.id,\n 'product_uos_qty': line.product_uom_qty,\n 'product_uos': line.product_id.uom_id.id,\n 'pieces_qty': line.pieces,\n 'date': date,\n 'date_expected': date,\n 'prodlot_id': line.prod_lot_id.id,\n 'location_id': location_id and location_id[0] or 0,\n 'location_dest_id': item.config_id.stock_location_id.id,\n }\n res.append(data)\n return res\n\n def create_stock_picking(self, cr, uid, ids, vals, context=None):\n context = context or {}\n ids = isinstance(ids, (int, long)) and [ids] or ids\n obj_pck = self.pool.get('stock.picking')\n company_id = self.pool.get('res.company')._company_default_get(\n cr, uid, self._name, context=context)\n date = time.strftime('%Y-%m-%d %H:%M:%S')\n for item in self.browse(cr, uid, ids, context=context):\n address = [addr for addr in item.partner_id.address\n if addr.type == 'invoice']\n lines = self.create_stock_move_lines(\n cr, uid, item, None, context)\n picking = {\n 'name': '/',\n 'type': 'internal',\n 'origin': ' '.join((item.name, item.config_id.name)),\n 'date': date,\n 'invoice_state': 'none',\n 'stock_journal_id': item.config_id.stock_journal_id.id,\n 'company_id': company_id,\n 'auto_picking': False,\n 'move_type': 'one',\n 'partner_id': item.partner_id.id,\n 'address_id': address[0].id,\n 'state_rw': 0,\n 'note': item.narration,\n 'move_lines': lines and [(0, 0, l) for l in lines],\n }\n\n pick_id = obj_pck.create(cr, uid, picking, context)\n return pick_id\n\n def create_account_move_lines(self, cr, uid, item, lines, context=None):\n company_id = self.pool.get('res.company')._company_default_get(\n cr, uid, self._name, context=context)\n debit_ids = []\n credit_ids = []\n for line in item.line_ids:\n debit_acc_id = item.config_id.inventory_account_id.id\n crebit_acc_id = line.product_id.property_stock_account_input.id or\\\n line.product_id.categ_id.\\\n property_stock_account_input_categ.id\n cost_price = line.prod_lot_id.property_cost_price\n amount = cost_price * line.product_uom_qty\n name = ' '.join((\n item.config_id.name, item.name,\n line.product_id.code, line.prod_lot_id.name))\n debit_ids.append({\n 'auto': True,\n 'company_id': company_id,\n 'account_id': debit_acc_id,\n 'name': name[: 64],\n 'debit': float('%.2f' % (amount)),\n 'credit': 0.0,\n 'reconcile': False,\n })\n credit_ids.append({\n 'auto': True,\n 'company_id': company_id,\n 'account_id': crebit_acc_id,\n 'name': name[: 64],\n 'debit': 0.0,\n 'credit': float('%.2f' % (amount)),\n 'reconcile': False,\n })\n return credit_ids + debit_ids\n\n def create_account_move(self, cr, uid, ids, context=None):\n context = context or {}\n ids = isinstance(ids, (int, long)) and [ids] or ids\n obj_mov = self.pool.get('account.move')\n obj_per = self.pool.get('account.period')\n company_id = self.pool.get('res.company')._company_default_get(\n cr, uid, self._name, context=context)\n date = time.strftime('%Y-%m-%d %H:%M:%S')\n for item in self.browse(cr, uid, ids, context=context):\n period_id = obj_per.find(cr, uid, date)[0]\n lines = self.create_account_move_lines(\n cr, uid, item, None, context)\n move = {\n 'ref': ' '.join((item.name, item.config_id.name)),\n 'journal_id': item.config_id.sale_journal_id.id,\n 'date': date,\n 'min_date': date,\n 'company_id': company_id,\n 'state': 'draft',\n 'to_check': False,\n 'period_id': period_id,\n 'line_id': lines and [(0, 0, l) for l in lines],\n }\n\n move_id = obj_mov.create(cr, uid, move, context)\n if move_id:\n obj_mov.post(cr, uid, [move_id], context=context)\n return move_id\n\n ##-------------------------------------------------------- buttons (object)\n\n def button_lot_list(self, cr, uid, ids, context=None):\n context = context or {}\n ids = isinstance(ids, (int, long)) and [ids] or ids\n so_brw = self.browse(cr, uid, ids, context={})[0]\n context.update({'consignement_id': so_brw.id,\n 'default_consignement_id': so_brw.id,\n 'default_partner_id': so_brw.partner_id.id,\n })\n view_id = self.pool.get('ir.ui.view').search(\n cr, uid, [('name', '=', 'tcv.consignment.lot.list.form')])\n return {'name': _('Load lot list'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'tcv.sale.lot.list',\n 'view_type': 'form',\n 'view_id': view_id,\n 'view_mode': 'form',\n 'nodestroy': True,\n 'target': 'new',\n 'domain': \"\",\n 'context': context}\n\n ##------------------------------------------------------------ on_change...\n\n def on_change_config_id(self, cr, uid, ids, config_id):\n res = {}\n if config_id:\n partner_id = self._get_consig_partner_id(cr, uid, config_id)\n res.update({'partner_id': partner_id})\n return {'value': res}\n\n ##----------------------------------------------------- create write unlink\n\n def create(self, cr, uid, vals, context=None):\n context = context or {}\n if not vals.get('name') or vals.get('name') == '/':\n if context.get('consignment_type') == 'out_consignment':\n seq_name = 'tcv.consignment.sale'\n\n elif context.get('consignment_type') == 'in_consignment':\n seq_name = 'tcv.consignment.purchase'\n else:\n raise osv.except_osv(\n _('Error!'),\n _('Must indicate consignment_type in context'))\n vals.update({\n 'name': self.pool.get('ir.sequence').get(cr, uid, seq_name),\n 'partner_id': self._get_consig_partner_id(\n cr, uid, vals.get('config_id')),\n })\n res = super(tcv_consignment, self).create(\n cr, uid, vals, context)\n return res\n\n def write(self, cr, uid, ids, vals, context=None):\n if 'config_id' in vals:\n vals.update({'partner_id': self._get_consig_partner_id(\n cr, uid, vals['config_id'])})\n res = super(tcv_consignment, self).write(cr, uid, ids, vals, context)\n return res\n\n ##---------------------------------------------------------------- Workflow\n\n def button_draft(self, cr, uid, ids, context=None):\n vals = {'state': 'draft'}\n return self.write(cr, uid, ids, vals, context)\n\n def button_done(self, cr, uid, ids, context=None):\n context = context or {}\n picking_id = self.create_stock_picking(cr, uid, ids, context)\n move_id = self.create_account_move(cr, uid, ids, context)\n vals = {\n 'state': 'done',\n 'picking_id': picking_id,\n 'move_id': move_id,\n }\n return self.write(cr, uid, ids, vals, context)\n\n def button_cancel(self, cr, uid, ids, context=None):\n vals = {'state': 'cancel'}\n return self.write(cr, uid, ids, vals, context)\n\n def test_draft(self, cr, uid, ids, *args):\n return True\n\n def test_done(self, cr, uid, ids, *args):\n for item in self.browse(cr, uid, ids, context={}):\n for line in item.line_ids:\n if not line.product_uom_qty:\n raise osv.except_osv(\n _('Error!'),\n _('No quantity for lot: %s') % line.prod_lot_id.name)\n return True\n\n def test_cancel(self, cr, uid, ids, *args):\n for item in self.browse(cr, uid, ids, context={}):\n if item.picking_id and \\\n item.picking_id.state not in ('draft', 'cancel'):\n raise osv.except_osv(\n _('Error!'),\n _('Can\\'t cancel while picking\\'s state '\n '<> \"Draft\" or \"Cancel\"'))\n elif item.move_id and \\\n item.move_id.state == ('posted'):\n raise osv.except_osv(\n _('Error!'),\n _('Can\\'t cancel while move\\'s state '\n '= \"Posted\"'))\n return True\n\n\ntcv_consignment()\n\n\n##------------------------------------------------------- tcv_consignment_lines\n\n\nclass tcv_consignment_lines(osv.osv):\n\n _name = 'tcv.consignment.lines'\n\n _description = ''\n\n ##-------------------------------------------------------------------------\n\n ##------------------------------------------------------- _internal methods\n\n ##--------------------------------------------------------- function fields\n\n _columns = {\n 'line_id': fields.many2one(\n 'tcv.consignment', 'Consignment note', required=True,\n ondelete='cascade'),\n 'config_id': fields.related(\n 'line_id', 'config_id', type='many2one',\n relation='tcv.consignment', string='Config', store=True,\n readonly=True),\n 'state': fields.related(\n 'line_id', 'state', type='string', size=32,\n string='State', store=False, readonly=True),\n 'partner_id': fields.related(\n 'line_id', 'partner_id', type='many2one', relation='res.partner',\n string='Partner', store=True, readonly=True),\n 'name': fields.char(\n 'Name', size=64, required=False, readonly=False),\n 'prod_lot_id': fields.many2one(\n 'stock.production.lot', 'Production lot', required=True),\n 'product_id': fields.related(\n 'prod_lot_id', 'product_id', type='many2one',\n relation='product.product', string='Product', store=False,\n readonly=True),\n 'product_uom_qty': fields.float(\n 'Quantity', digits_compute=dp.get_precision('Product UoM')),\n 'pieces': fields.integer(\n 'Pieces'),\n 'sale_line_id': fields.many2one(\n 'sale.order.line', 'Sale order line', readonly=True,\n ondelete='set null'),\n }\n\n _defaults = {\n }\n\n _sql_constraints = [\n ]\n\n ##-------------------------------------------------------------------------\n\n ##---------------------------------------------------------- public methods\n\n ##-------------------------------------------------------- buttons (object)\n\n ##------------------------------------------------------------ on_change...\n\n def on_change_prod_lot_id(self, cr, uid, ids, prod_lot_id):\n res = {}\n if not prod_lot_id:\n return {'value': res}\n obj_lot = self.pool.get('stock.production.lot')\n lot = obj_lot.browse(cr, uid, prod_lot_id, context=None)\n res.update({\n 'product_id': lot.product_id.id,\n 'product_uom_qty': lot.stock_available,\n 'pieces': round(lot.stock_available / lot.lot_factor, 0),\n })\n return {'value': res}\n\n ##----------------------------------------------------- create write unlink\n\n ##---------------------------------------------------------------- Workflow\n\n\ntcv_consignment_lines()\n\n\n##---------------------------------------------------------- tcv_consig_invoice\n\n\nclass tcv_consig_invoice(osv.osv):\n\n _inherit = 'tcv.consig.invoice'\n\n _description = ''\n\n ##-------------------------------------------------------------------------\n\n ##------------------------------------------------------- _internal methods\n\n def _get_consig_partner_id(self, cr, uid, config_id, context=None):\n return self.pool.get('tcv.consignment.config').\\\n get_consig_partner_id(cr, uid, config_id)\n\n def _create_sale_order(self, cr, uid, ids, context=None):\n context = context or {}\n obj_so = self.pool.get('sale.order')\n obj_sol = self.pool.get('sale.order.line')\n obj_cl = self.pool.get('tcv.consignment.lines')\n for item in self.browse(cr, uid, ids, context={}):\n address = [addr for addr in item.partner_id.address if\n addr.type == 'invoice']\n data = {\n 'date_order': time.strftime('%Y-%m-%d'),\n 'origin': item.name,\n 'partner_id': item.partner_id.id,\n 'partner_invoice_id': address[0].id,\n 'partner_order_id': address[0].id,\n 'partner_shipping_id': address[0].id,\n 'user_id': uid,\n 'order_policy': item.config_id.order_policy,\n 'payment_term': item.config_id.payment_term.id,\n }\n order_id = obj_so.create(cr, uid, data, context)\n for line in item.lines:\n taxes = []\n for tax in line.product_id.taxes_id:\n taxes.append((4, tax.id))\n ord_lin = {\n 'order_id': order_id,\n 'product_id': line.product_id.id,\n 'concept_id': line.product_id.concept_id.id,\n 'prod_lot_id': line.prod_lot_id.id,\n 'pieces': line.pieces,\n 'product_uom_qty': line.product_uom_qty,\n 'product_uos_qty': line.product_uom_qty,\n 'product_uom': line.product_id.uom_id.id,\n 'name': line.product_id.name,\n 'price_unit': 1,\n 'type': 'make_to_stock',\n 'delay': line.product_id.sale_delay,\n 'tax_id': taxes,\n }\n line_id = obj_sol.create(cr, uid, ord_lin, context)\n obj_cl.write(\n cr, uid, line.id, {'sale_line_id': line_id},\n context=context)\n obj_so.button_update_lots_prices(cr, uid, [order_id], context)\n wf_service = netsvc.LocalService(\"workflow\")\n wf_service.trg_validate(\n uid, 'sale.order', order_id, 'order_confirm', cr)\n wf_service.trg_validate(\n uid, 'sale.order', order_id, 'manual_invoice', cr)\n so = obj_so.browse(cr, uid, order_id, context=context)\n self.write(\n cr, uid, [item.id],\n {'invoice_id': so.invoice_ids[0].id,\n 'sale_order_id': order_id},\n context=context)\n\n ##--------------------------------------------------------- function fields\n\n _columns = {\n 'name': fields.char(\n 'Reference', size=16, required=True, readonly=True),\n 'date': fields.date(\n 'Date', required=True, readonly=True,\n states={'draft': [('readonly', False)]}, select=True),\n 'config_id': fields.many2one(\n 'tcv.consignment.config', 'Configuration', readonly=True,\n states={'draft': [('readonly', False)]}, required=True,\n ondelete='restrict', help=\"Config settings for this document\"),\n 'partner_id': fields.many2one(\n 'res.partner', 'Partner', change_default=True,\n readonly=True, required=True, ondelete='restrict'),\n 'user_id': fields.many2one(\n 'res.users', 'User', readonly=True, select=True,\n ondelete='restrict'),\n 'narration': fields.text(\n 'Notes', readonly=False),\n 'lines': fields.many2many(\n 'tcv.consignment.lines', 'consig_note_rel_', 'consig_note_id',\n 'consig_inv_id', 'Consig', readonly=True,\n states={'draft': [('readonly', False)]},\n domain=\"[('config_id', '=', config_id), ('state', '=', 'done'), \"\n \"('sale_line_id', '=', 0)]\"),\n 'state': fields.selection(\n [('draft', 'Draft'), ('done', 'Done'), ('cancel', 'Cancelled')],\n string='State', required=True, readonly=True),\n 'invoice_id': fields.many2one(\n 'account.invoice', 'Invoice Reference', ondelete='set null',\n select=True, readonly=True),\n 'sale_order_id': fields.many2one(\n 'sale.order', 'Sale order', ondelete='set null',\n select=True, readonly=True),\n }\n\n _defaults = {\n 'name': lambda *a: '/',\n 'user_id': lambda s, c, u, ctx: u,\n 'date': lambda *a: time.strftime('%Y-%m-%d'),\n 'state': lambda *a: 'draft',\n }\n\n _sql_constraints = [\n ]\n\n ##-------------------------------------------------------------------------\n\n ##---------------------------------------------------------- public methods\n\n ##-------------------------------------------------------- buttons (object)\n\n ##------------------------------------------------------------ on_change...\n\n def on_change_config_id(self, cr, uid, ids, config_id):\n res = {}\n if config_id:\n partner_id = self._get_consig_partner_id(cr, uid, config_id)\n res.update({'partner_id': partner_id, 'lines': []})\n return {'value': res}\n\n ##----------------------------------------------------- create write unlink\n\n def create(self, cr, uid, vals, context=None):\n context = context or {}\n if not vals.get('name') or vals.get('name') == '/':\n seq_name = 'tcv.consig.invoice.sale'\n vals.update({\n 'name': self.pool.get('ir.sequence').get(cr, uid, seq_name),\n 'partner_id': self._get_consig_partner_id(\n cr, uid, vals.get('config_id')),\n })\n res = super(tcv_consig_invoice, self).create(\n cr, uid, vals, context)\n return res\n\n def write(self, cr, uid, ids, vals, context=None):\n if 'config_id' in vals:\n vals.update({'partner_id': self._get_consig_partner_id(\n cr, uid, vals['config_id'])})\n res = super(tcv_consig_invoice, self).write(\n cr, uid, ids, vals, context)\n return res\n\n ##---------------------------------------------------------------- Workflow\n\n def button_draft(self, cr, uid, ids, context=None):\n vals = {'state': 'draft'}\n return self.write(cr, uid, ids, vals, context)\n\n def button_done(self, cr, uid, ids, context=None):\n self._create_sale_order(cr, uid, ids, context)\n vals = {'state': 'done'}\n return self.write(cr, uid, ids, vals, context)\n\n def button_cancel(self, cr, uid, ids, context=None):\n vals = {'state': 'cancel'}\n return self.write(cr, uid, ids, vals, context)\n\n def test_draft(self, cr, uid, ids, *args):\n return True\n\n def test_done(self, cr, uid, ids, *args):\n return True\n\n def test_cancel(self, cr, uid, ids, *args):\n return True\n\n\ntcv_consig_invoice()\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"tcv_consignment/model/tcv_consignment.py","file_name":"tcv_consignment.py","file_ext":"py","file_size_in_byte":24049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"446268614","text":"import psutil\nimport keyboard\n\ndef config_filter(line):\n if '#' in line or line=='':\n return False\n else:\n return True\nconfig = open('config.txt', 'r', encoding='utf-8')\ntarget_list = config.read().replace(' ','').split('\\n')\ntarget_list = list(filter(config_filter, target_list))\n \nkeyboard.read_event()\n\nfor proc in psutil.process_iter():\n if proc.name() in target_list:\n proc.kill()","sub_path":"taskkiller.py","file_name":"taskkiller.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"53310808","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport sys\nimport datetime\nimport time\nimport cv2\nimport os\n\nimport config\nfrom widgets import *\nfrom camera import Camera\nfrom driver import Driver, SLOW_DOWN_RATE\nfrom detectors import SignDetector\nfrom detectors import TaskDetector\nfrom detectors import in_centered_in_image,is_target,is_trophies,is_soldier\nfrom fixed_queue import FixedQueue\nfrom obstacle import raiseflag, shot_target, capture_target, Lightwork,banyun\n# 是否进行标志和目标物检测\ncamera_pwm = Servo(2)\nenable_detection = False\n# 前置摄像头\nfront_camera = Camera(config.front_cam, [640, 480])\n# 侧边摄像头\nside_camera = Camera(config.side_cam, [640, 480])\n# 程序开启运行开关\nstart_button = Button(1, \"UP\")\nstop_button = Button(1, \"DOWN\")\nultr_sensor = UltrasonicSensor(4)\n\nservo1 = Servo(1)\n# 车道巡航\ndriver = Driver()\n# 地面标志检测\nsign_detector = SignDetector()\n# 侧边目标物检测\ntask_detector = TaskDetector()\nSTATE_IDLE = \"idle\"\nSTATE_CRUISE = \"cruise\"\nSTATE_SIGN_DETECTED = \"sign_detected\"\nSTATE_TASK = \"task\"\nSTATE_TASK_8 = \"go_task8\"\nMISS_DURATION = 600\nSLOW_DOWN_TIME = 3\nTEMP_STOP_TIME = 5\nhandled_taskes = set()\n\n#\nOBSTACLE = 0\n# 存储离目标较近的目标序列\ntask_queue_ = FixedQueue()\n\n# candidate队列用类存储远一点的目标\ncandicate_queue_ = FixedQueue()\n\n# for mession\nmession_queue = FixedQueue(class_num=5)\n\n# 任务次序记录\ntaskorder = 0\n\n# 存储视野中较近的检测到的目标\ndef task_queue():\n return task_queue_\n\n# 相较task_queue队列,candidate队列用类存储远一点的目标\ndef candicate_queue():\n return candicate_queue_\n\n# 筛选距离车体最近的标志和目标物\ndef select_queue(detect_res, blow_index, status):\n global task_queue_, candicate_queue_\n\n # no object detected\n if len(detect_res) == 0:\n task_queue_.append(0)\n candicate_queue_.append(0)\n count = 0\n for item in detect_res:\n label = item.index\n # 距离较远的框不插入?\n y_bar = 0.5\n if item.index == 2: # 封狼居胥\n y_bar = 0.6\n elif item.index == 1:#宿营\n y_bar = 0.3\n elif item.index == 7: # obstacle;\n y_bar = 0.7\n elif item.index == 5:#打标\n y_bar = 0.5\n elif item.index == 4:#soldier\n y_bar = 0.3\n if item.relative_box[3] > y_bar:\n if count == blow_index and status == 'cruise':\n task_queue_.append(1, label)\n else:\n candicate_queue_.append(1, label)\n count += 1\n\n# 交换集合序列\n\n\ndef switch_queue():\n global task_queue_, candicate_queue_\n task_queue_, candicate_queue_ = candicate_queue_, task_queue_\n# 打印已识别任务序列\n\n\ndef debug_queues():\n print(\"task queue is \")\n print(task_queue_.deque)\n print(\"candidate queue is\")\n print(candicate_queue_.deque)\n# 确认\"DOWN\"按键是否按下,程序是否处于等待直行状态\n\n\ndef check_stop(current_state):\n if current_state != STATE_IDLE and stop_button.clicked():\n return True\n return False\n# 任务程序入口函数\n\n\ndef idle_handler(arg):\n while True:\n if start_button.clicked():\n time.sleep(0.3)\n return STATE_CRUISE, None\n # time.sleep(0.1);\n print(\"IDLE\")\n driver.stop()\n return STATE_IDLE, None\n\n# 规则中应该限制相邻两个标签的距离,中心距离大于3 / 4图像长度\n# 摄像头视野frame图像中最多只包含两个目标物。\n\n\ndef cruise_handler(arg):\n # 任务完成标志(全局变量)\n # 任务标记量主要是为了误识别导致重复做任务\n global taskorder\n # counter =0\n flagnum = 0\n oldtime = time.time()\n # 设置小车巡航速度\n # driver.set_speed(driver.full_speed)\n if arg != None:\n start_time = time.time()\n cur_speed = driver.full_speed\n driver.set_speed(cur_speed * SLOW_DOWN_RATE)\n driver.set_speed(50)\n driver.set_Kx(0.95)\n while True:\n if arg != None:\n cur_time = time.time()\n if cur_time - start_time > SLOW_DOWN_TIME:\n driver.set_speed(driver.full_speed)\n else:\n driver.set_speed(cur_speed * SLOW_DOWN_RATE)\n if check_stop(STATE_CRUISE):\n driver.stop()\n front_camera.stop()\n side_camera.stop()\n os.system('sudo pkill python')\n return STATE_IDLE, None\n front_image = front_camera.read()\n driver.go(front_image)\n if not enable_detection:\n continue\n if taskorder == 8:\n continue\n # 侦测车道上有无标志图标\n res, blow_index = sign_detector.detect(front_image, \"cruise\")\n # sign valid maybe task maybe just signal (bluesign, triangle, light)\n # 获取标志识别结果,获得所在列表的索引值\n flag, index = task_queue().roadsign_valid()\n if flag:\n flagnum += 1\n if flagnum < 2:\n continue\n else:\n if res and len(res) > 0:\n select_queue(res, blow_index, \"cruise\")\n flagnum = 0\n sign_name = config.sign_list[index]\n print(sign_name)\n print(handled_taskes)\n if sign_name not in handled_taskes:\n if sign_name in [\"barracks\", \"fenglangjuxu\", \"fortress\", \"soldier\", \"target\"]:\n return STATE_SIGN_DETECTED, sign_name\n else:\n driver.set_speed(driver.full_speed)\n print(\"cruise else mode {}\".format(sign_name))\n task_queue().clear()\n continue\n if res and len(res) > 0:\n select_queue(res, blow_index, \"cruise\")\n\n# 地面图标识别\n\n\ndef sign_detected_handler(arg):\n global taskorder\n if arg == \"barracks\" or arg == \"fenglangjuxu\" or arg == \"soldier\":\n handled_taskes.add(arg);\n cur_speed = driver.full_speed\n driver.set_speed(cur_speed * SLOW_DOWN_RATE)\n if taskorder ==2 or taskorder == 4:\n driver.set_speed(25)\n miss_mission = 0\n print(arg)\n # imgnum = 0\n barracksflag = True\n barracksnum = 0\n frontimagenum = 0\n disappearnum = 0\n print('============-------------===============------------============----------=========')\n while True:\n if check_stop(STATE_SIGN_DETECTED):\n driver.stop()\n front_camera.stop()\n side_camera.stop()\n os.system('sudo pkill python')\n return STATE_IDLE, None\n front_image = front_camera.read()\n driver.go(front_image)\n print(\"sign_detected\")\n res_front, blow_index = sign_detector.detect(front_image, \"cruise\")\n print(res_front)\n if res_front and len(res_front) > 0:\n select_queue(res_front, blow_index, \"cruise\")\n frontimagenum += 1\n if res_front[0].name == \"barracks\":\n barracksnum += 1\n print(\"barracksnum =\", barracksnum)\n if barracksnum > 3:\n barracksflag = False\n barracksnum = 0\n frontimagenum = 0\n return STATE_TASK, \"barracks\"\n\n # roadsign disappear\n else:\n if barracksnum > 0:\n disappearnum += 1\n if disappearnum + barracksnum > 3:\n barracksflag = False\n barracksnum = 0\n frontimagenum = 0\n return STATE_TASK, \"barracks\"\n else:\n pass\n if taskorder == 2 or taskorder == 3:\n driver.set_speed(25)\n side_image = side_camera.read()\n res = task_detector.detect(side_image)\n\n elif frontimagenum > 1:\n if arg == \"fenglangjuxu\":\n driver.set_speed(cur_speed * 0.5)\n else:\n driver.set_speed(cur_speed * SLOW_DOWN_RATE)\n side_image = side_camera.read()\n res = task_detector.detect(side_image)\n else:\n driver.set_speed(cur_speed*0.8)\n continue\n\n if len(res) > 0:\n print(res)\n miss_mission = 0\n if res[0].name == \"target\":\n if taskorder == 1 or taskorder == 2 or taskorder == 3:\n num_target = is_target(res)\n if num_target == 1:\n print(\"stepping into target\")\n task_queue().clear()\n driver.stop()\n time.sleep(0.3)\n frontimagenum = 0\n print(\"+++++++++++++++++++start task!res=\", res)\n return STATE_TASK, res\n elif num_target == -1:\n # driver.set_speed(-8\n driver.driver_run(-8,-8)\n time.sleep(0.6)\n else:\n driver.set_speed(8)\n\n elif res[0].name ==\"trophies\" and taskorder == 5:\n num_trophies = is_trophies(res)\n if num_trophies == 1:\n print(\"stepping into target\")\n task_queue().clear()\n driver.stop()\n time.sleep(0.1)\n frontimagenum = 0\n print(\"+++++++++++++++++++start task!res=\", res)\n return STATE_TASK, res\n elif num_trophies == -1:\n driver.set_speed(-8)\n time.sleep(0.2)\n else:\n driver.set_speed(8)\n\n elif res[0].name == \"daijun\" or res[0].name == \"dunhuang\" or res[0].name == \"dingxiangjun\":\n if taskorder == 0 or taskorder == 1 or taskorder == 7:\n if in_centered_in_image(res):\n print(\"stepping into task\")\n task_queue().clear()\n driver.stop()\n time.sleep(0.3)\n # 后续右侧任务\n # return STATE_CRUISE, None\n frontimagenum = 0\n print(\"+++++++++++++++++++start task!res=\", res)\n return STATE_TASK, res\n elif res[0].name == \"soldier\" and taskorder == 6:\n num_soldier = is_soldier(res)\n if num_soldier == 1:\n print(\"stepping into target\")\n task_queue().clear()\n driver.stop()\n time.sleep(0.3)\n frontimagenum = 0\n print(\"+++++++++++++++++++start task!res=\", res)\n return STATE_TASK, res\n elif num_soldier == -1:\n driver.set_speed(-8)\n time.sleep(0.2)\n else:\n driver.set_speed(8)\n\n else:\n miss_mission += 1\n if miss_mission > 30:\n print(\"detected miss, stepping into cruise\")\n task_queue().clear()\n switch_queue()\n driver.set_speed(cur_speed)\n return STATE_CRUISE, None\n\n# 做任务\n\n\ndef task_handler(res):\n global taskorder\n print(\"task\")\n print(\"res=\", res)\n cur_speed = driver.full_speed\n driver.set_speed(cur_speed)\n if res == \"barracks\":\n driver.stop()\n time.sleep(0.2)\n driver.driver_run(30, 30)\n time.sleep(1.65)\n driver.driver_run(-8, -30)\n time.sleep(1.2)\n driver.driver_run(-30, -30)\n time.sleep(0.7)\n driver.driver_run(-30, -8)\n time.sleep(1.1)\n driver.stop()\n for i in range(0, 4):\n Lightwork(2, \"red\")\n time.sleep(0.02)\n Lightwork(2, \"off\")\n driver.driver_run(30, 8)\n time.sleep(1)\n driver.driver_run(30, 30)\n time.sleep(0.75)\n driver.driver_run(8, 30)\n time.sleep(1)\n driver.stop()\n taskorder = 5\n else:\n name = res[0].name\n if name == \"soldier\":\n motor = 3\n driver.stop()\n time.sleep(0.2)\n banyun(motor)\n taskorder = 7\n elif name == \"daijun\":\n if taskorder == 0:\n driver.driver_run(-20, -20)\n time.sleep(0.7)\n driver.stop()\n taskorder = 1\n raiseflag(4, 3)\n elif taskorder == 1:\n driver.driver_run(20, 20)\n time.sleep(0.6)\n driver.stop()\n taskorder = 1\n raiseflag(4, 3)\n else:\n driver.driver_run(-20, -20)\n time.sleep(0.7)\n driver.stop()\n taskorder = 8\n raise_finally(4, 3)\n elif name == \"dingxiangjun\":\n if taskorder == 0:\n driver.driver_run(-20, -20)\n time.sleep(0.7)\n driver.stop()\n taskorder = 1\n raiseflag(4, 3)\n elif taskorder == 1:\n driver.driver_run(20, 20)\n time.sleep(0.6)\n driver.stop()\n taskorder = 1\n raiseflag(4, 3)\n else:\n driver.driver_run(-20, -20)\n time.sleep(0.7)\n driver.stop()\n taskorder = 8\n raise_finally(4, 3)\n\n elif name == \"dunhuang\":\n if taskorder == 0:\n driver.driver_run(-20, -20)\n time.sleep(0.7)\n driver.stop()\n taskorder = 1\n raiseflag(4, 3)\n elif taskorder == 1:\n driver.driver_run(20, 20)\n time.sleep(0.6)\n driver.stop()\n taskorder = 1\n raiseflag(4, 3)\n else:\n driver.driver_run(-20, -20)\n time.sleep(0.7)\n driver.stop()\n taskorder = 8\n raise_finally(4, 3)\n\n elif name == \"target\":\n\n driver.stop()\n time.sleep(0.3)\n shot_target(2)\n taskorder += 1\n\n elif name == \"trophies\":\n driver.stop()\n time.sleep(0.3)\n capture_target(1,3)\n time.sleep(0.5)\n taskorder = 6\n else:\n print(\"Error!#####################\")\n\n # 右侧任务\n if taskorder == 1 or taskorder == 2 or taskorder == 6 or taskorder == 3:\n camera_pwm.servocontrol(-113,70)\n time.sleep(0.1)\n # 后续左侧任务\n else:\n camera_pwm.servocontrol(50,70)\n time.sleep(0.1)\n switch_queue()\n\n print(\"taskorder:\",taskorder)\n return STATE_CRUISE, None\n\n\ndef go_task_8(arg):\n global taskorder\n print(\"go_task_8\")\n cur_speed = driver.full_speed\n driver.set_speed(cur_speed)\n task8_start = time.time()\n while True:\n front_image = front_camera.read()\n driver.go(front_image)\n if time.time()-task8_start < 4:\n driver.set_Kx(0.7)\n continue\n elif time.time()-task8_start < 5:\n driver.set_Kx(0.8)\n else:\n driver.set_Kx(0.9)\n um = ultr_sensor.read()\n # print(\"*8888888\")\n if um != None and um < 20:\n driver.stop()\n return STATE_TASK, \"dingxiangjun\"\n\n\nstate_map = {\n STATE_IDLE: idle_handler,\n STATE_CRUISE: cruise_handler,\n STATE_SIGN_DETECTED: sign_detected_handler,\n STATE_TASK: task_handler,\n STATE_TASK_8: go_task_8,\n}\n\n\ndef main():\n camera_pwm.servocontrol(50,70)\n servo1.servocontrol(0, 50)\n time.sleep(0.5)\n current_state = STATE_IDLE\n arg = None\n front_camera.start()\n side_camera.start()\n Lightwork(2, \"red\")\n time.sleep(0.5)\n Lightwork(2, \"green\")\n time.sleep(0.5)\n Lightwork(2, \"off\")\n try:\n while (True):\n new_state, arg = state_map[current_state](arg)\n current_state = new_state\n driver.stop()\n front_camera.stop()\n side_camera.stop()\n except ZeroDivisionError as e:\n print('except:', e)\n finally:\n print('finally...')\n driver.stop()\n Lightwork(2, \"off\")\n Lightwork(4, \"off\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mastercar/autostart/src/xunhang.py","file_name":"xunhang.py","file_ext":"py","file_size_in_byte":16452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"475339099","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.alert import Alert\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\n# Seleniumをあらゆる環境で起動させるChromeオプション\noptions = Options()\noptions.add_argument('--disable-gpu');\noptions.add_argument('--disable-extensions');\noptions.add_argument('--proxy-server=\"direct://\"');\noptions.add_argument('--proxy-bypass-list=*');\noptions.add_argument('--start-maximized');\n# options.add_argument('--headless'); # ※ヘッドレスモードを使用する場合、コメントアウトを外す\n\nDRIVER_PATH = 'C:/Users/mk/Documents/Selenium/chromedriver_win32/chromedriver.exe'\n# DRIVER_PATH = '/Users/Kenta/Desktop/Selenium/chromedriver' # ローカル\n# DRIVER_PATH = '/app/.chromedriver/bin/chromedriver' # heroku\n\n# ブラウザの起動\ndriver = webdriver.Chrome(executable_path=DRIVER_PATH, chrome_options=options)\n\n# Webページにアクセスする\nurl = 'https://www.sbifxt.co.jp/login.html'\ndriver.get(url)\n\n# IDを入力する\nselector = '#ClientLogin > div.loginbox > p > input'\nelement = driver.find_element_by_css_selector(selector)\nelement.send_keys('3772352841')\n\n# パスワードを打つまで、待つ\ninput()\n\n# ログインする。\nselector = '#ClientLogin > div.loginbtn > ul > li > a:nth-child(1)'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\n# タイトルを取得する\ntitle = driver.title\nprint(title)\n# URLを取得する\nurl = driver.current_url\nprint(url)\n\n#ページロードを待つ。\ndriver.implicitly_wait(10) # seconds\n\n# 成行をクリックする。\nselector = '#tradePanel > nav > ul > li:nth-child(2) > span'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\n# 購入数を入力する\nselector = '#tradeMarket > section > form > div.trade-inner > div:nth-child(3) > div.input-space > input.form-control'\nelement = driver.find_element_by_css_selector(selector)\nelement.clear()\nelement.send_keys('1')\n\ntime.sleep(0.5)\n\n# 新規買をクリックする\nselector = '#tradeMarket > section > form > div.trade-inner > div:nth-child(7) > div.newBuyBtn-group > label'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\ntime.sleep(0.5)\n\n# 新規売をクリックする\nselector = '#tradeMarket > section > form > div.trade-inner > div:nth-child(7) > div.newSellBtn-group > label'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\ntime.sleep(0.5)\n\n# 決済買をクリックする\nselector = '#tradeMarket > section > form > div.trade-inner > div:nth-child(8) > div:nth-child(2) > label'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\ntime.sleep(0.5)\n\n# 決済売をクリックする\nselector = '#tradeMarket > section > form > div.trade-inner > div:nth-child(8) > div:nth-child(1) > label'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\ntime.sleep(0.5)\n\n# 新規買をクリックする\nselector = '#tradeMarket > section > form > div.trade-inner > div:nth-child(7) > div.newBuyBtn-group > label'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\ntime.sleep(0.5)\n\n# 確認画面を省略をクリックする\nselector = '#tradeMarket > section > form > div.trade-inner > div.btngroupSet.checkbox-group.checkboxradio-check > div.omit-conf-success > label'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\ntime.sleep(0.5)\n\n# 確認画面を省略をクリックする\nselector = '#tradeMarket > section > form > div.trade-inner > div.btngroupSet.checkbox-group.checkboxradio-check > div.omit-conf-success > label'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\ntime.sleep(0.5)\n\n# 確認画面を省略をクリックする\nselector = '#tradeMarket > section > form > div.trade-inner > div.btngroupSet.checkbox-group.checkboxradio-check > div.omit-conf-success > label'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\ntime.sleep(0.5)\n\n# 待つ\ninput()\n\n# 注文確認をクリックする\nselector = '#tradeMarket > section > form > div.trade-inner > div.btngroupSet.checkbox-group.checkboxradio-check > div.conf-success > input'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\ntime.sleep(0.5)\n\n# 閉じるをクリックする\nselector = '#ui-id-3 > div.btngroupSet > input'\nelement = driver.find_element_by_css_selector(selector)\nelement.click()\n\n# 待つ\ninput()\n\n\n\n# # 検索窓にSeleniumと入力する\n# selector = '#tsf > div:nth-child(2) > div.A8SBwf> div.RNNXgb > div > div.a4bIc > input'\n# element = driver.find_element_by_css_selector(selector)\n# element.send_keys('Selenium')\n\n# # enterキーを押す\n# element.send_keys(Keys.ENTER)\n\n\n\n# ブラウザを終了する(全てのウィンドウを閉じる)\n# Chromeのショートカットキー(Command+Q)と同じ動作\ndriver.quit()\n\n# 10分で理解する Selenium - Qiita https://qiita.com/Chanmoro/items/9a3c86bb465c1cce738a\n","sub_path":"FX_miurak/cov/scr11.py","file_name":"scr11.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"81860342","text":"# https://leetcode.com/problems/word-break/\n\nimport collections\n\n\nclass Solution:\n # O(N^2) time, O(N) space\n def wordBreak(self, s: 'str', wordDict: 'List[str]') -> 'bool':\n if not s or not wordDict:\n return False\n\n entity_words, visited = set(wordDict), set()\n q = collections.deque([0])\n\n while q:\n start = q.popleft()\n\n if start == len(s):\n return True\n\n if start not in visited:\n visited.add(start)\n\n for end in range(start, len(s)):\n word = s[start:end + 1]\n if word in entity_words:\n q.append(end + 1)\n\n return False\n\n # O(N * M) time, O(N) space; More concise DP approach\n def wordBreak2(self, s: 'str', wordDict: 'List[str]') -> 'bool':\n dp = [False] * len(s)\n\n for i in range(len(s)):\n for w in wordDict:\n if w == s[i - len(w) + 1:i + 1] and (dp[i - len(w)] or i - len(w) == -1):\n dp[i] = True\n break\n return dp[-1]\n","sub_path":"Problems/leetcode/Word_Break_139.py","file_name":"Word_Break_139.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"574393452","text":"from . import Buffer, SAP, Message\r\n\r\n\r\nclass Service:\r\n def __init__(self,name=\"UNNAMED\",logger=None):\r\n self.in_buffers = []\r\n self.buffers_in_to_out = {}\r\n self.buffers_in_to_name = {}\r\n self.name_to_out_buf = {}\r\n self.running=False\r\n self.logger = logger\r\n self.name=name\r\n\r\n # binds self to another service over an SAP\r\n def bindSAP(self, sap, reverse_flow=False):\r\n in_buf = sap.InBuffer\r\n out_buf = sap.OutBuffer \r\n if reverse_flow:\r\n temp = in_buf\r\n in_buf = out_buf\r\n out_buf = temp\r\n\r\n self.buffers_in_to_out[in_buf] = out_buf\r\n self.buffers_in_to_name[in_buf] = sap.name\r\n self.name_to_out_buf[sap.name] = out_buf\r\n\r\n\r\n def stop_listening(self):\r\n self.running=False\r\n\r\n\r\n def listen(self, handler_func):\r\n self.LogInfo(\"Listening...\")\r\n self.running=True\r\n while self.running:\r\n for buf in self.buffers_in_to_out.keys():\r\n if not buf.empty():\r\n sap_name = self.buffers_in_to_name[buf]\r\n msg = buf.read()\r\n self.LogInfo(\"received message via SAP {}\".format(sap_name))\r\n handler_func(msg, self.buffers_in_to_out[buf]) #calls handler function with message and response channel\r\n\r\n\r\n def send(self, sap_name, msg):\r\n self.LogInfo(\"sending {} via SAP {}\".format(msg,sap_name))\r\n #get SAP for this name\r\n out = self.name_to_out_buf[sap_name]\r\n out.write(msg)\r\n\r\n\r\n def LogInfo(self, msg):\r\n msg = self.name + \":\" + msg\r\n if self.logger:\r\n self.logger.LogInfo(msg)\r\n\r\n def respond(self,resp_buffer,name, content):\r\n resp_buffer.write(Message.Message(self.name,\"test.confirm\",\"confirm\"))\r\n\r\n def getMessage(self,name,content):\r\n return Message.Message(self.name, name, content)\r\n\r\n#test functionality\r\ndef _test1(s):\r\n def resp(x, out):\r\n print(x)\r\n out.write(\"bye\")\r\n s.stop_listening()\r\n\r\n s.listen(resp)\r\n\r\n\r\ndef _test2(s):\r\n def resp(x, out):\r\n print(x)\r\n s.stop_listening()\r\n\r\n s.send(\"test\",\"hi\")\r\n s.listen(resp)\r\n\r\n\r\nif __name__ == '__main__':\r\n import multiprocessing\r\n\r\n sap = SAP.SAP(\"test\")\r\n ser1 = Service()\r\n ser2 = Service()\r\n\r\n sap.bind(ser1,ser2)\r\n\r\n\r\n # spawn tests\r\n p1 = multiprocessing.Process(target = _test1, args=(ser1,))\r\n p1.start()\r\n p2 = multiprocessing.Process(target = _test2, args=(ser2,))\r\n p2.start()\r\n\r\n p1.join()\r\n p2.join()\r\n ","sub_path":"baseWAVE/service/Service.py","file_name":"Service.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"469817631","text":"import os\nimport sys\nimport shutil\n\nfrom classes import MyWorkbook\nfrom functions.month_funcs.get_month_parameters import get_month_parameters\nfrom functions.month_funcs.create_all_plots import create_all_plots\nfrom functions.month_funcs.create_pptx_presentation \\\n import create_pptx_presentation\n\ntry:\n month_num = sys.argv[1]\n year_num = sys.argv[2]\nexcept IndexError:\n folder_path_file = open(\"path.txt\", \"r\")\n folder_path = folder_path_file.read()\n folder_path_file.close()\n folder_path += \"/monthly data/\"\n second_newest = os.listdir(folder_path)[-2]\n\n year_num = second_newest[:2]\n month_num = second_newest[5:7]\n\n\n# Preparing the analysis\nfolder_path, file_path, month_label, results_dir \\\n = get_month_parameters(month_num, year_num)\n\nmyWorkbook = MyWorkbook(file_path)\nmyWorksheet = myWorkbook.sheets_list[0]\n\n# Actual analysis\nif not os.path.exists(results_dir + \"/plots/\"):\n os.mkdir(results_dir + \"/plots/\")\n\nplot_numbers_list = create_all_plots(myWorksheet, month_label, results_dir)\n\ncreate_pptx_presentation(month_num, year_num, results_dir, plot_numbers_list)\n\nshutil.rmtree(results_dir + \"/plots/\")\n","sub_path":"monthAnalysis.py","file_name":"monthAnalysis.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"339688806","text":"# File: categorize.py\r\n# Author: Simon Chu\r\n# Date modified: 7/5/2017\r\n# Purpose: find different kinds of images\r\n# click Stop box to end the program\r\n\r\n# Designed by Simon Chu\r\n# for Wilkes University Research Project of Mathematics\r\n# and Computer Science Department in Summer, 2017\r\n\r\n\r\nimport os\r\n\r\n\r\ndef main():\r\n\r\n\r\n path = input(\"Please enter the directory path that you want to find the duplicated files: \")\r\n directList = os.listdir(path)\r\n \r\n\r\n\r\n m = 0 # dogs\r\n n = 0 # cats\r\n o = 0 # unknown\r\n p = 0 # total amount\r\n for i in directList:\r\n if i[-3:] == \"jpg\":\r\n p = p + 1\r\n \r\n if i[4] == \"d\":\r\n m = m + 1\r\n if i[4] == \"c\":\r\n n = n + 1\r\n else:\r\n o = o + 1\r\n print(\"cats =\", m)\r\n print(\"dogs =\", n)\r\n print(\"unknowns =\", o)\r\n print(\"total =\", m + n + o)\r\n\r\nmain()\r\n","sub_path":"dataCategorization/categorize.py","file_name":"categorize.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"373975060","text":"#!/usr/local/bin/python3.6\n# -*-encoding:Utf-8 -*\nimport sqlite3\n\nfrom fonctions import transform_logfile\n\n\nclass LogDB():\n def __init__(self):\n self.db = 'OsirixLogAnalyse.db'\n self.creation_table()\n\n def creation_table(self):\n conn = sqlite3.connect(self.db)\n print(\"- DB: {} ouverte\".format(self.db))\n conn.execute(\"DROP TABLE IF EXISTS LOG_DATABASE\")\n conn.execute('''CREATE TABLE LOG_DATABASE\n (ID INT PRIMARY KEY NOT NULL,\n DATE_LOG TEXT NOT NULL,\n STUDY_NAME TEXT,\n STUDY_ACCESNUM TEXT,\n PAT_ID TEXT,\n PAT_NAME TEXT,\n STUDY_UID TEXT,\n IP TEXT NOT NULL,\n IP_LAND TEXT,\n MESSAGE TEXT NOT NULL,\n REQ_TYPE TEXT);''')\n print (\" -Nouvelle table crée\")\n conn.close()\n\n def remplir(self, fichier_log):\n table, message = transform_logfile(fichier_log)\n conn = sqlite3.connect(self.db)\n for valeurs in table:\n conn.execute(\"INSERT INTO LOG_DATABASE (ID, DATE_LOG, STUDY_NAME, STUDY_ACCESNUM, PAT_ID, PAT_NAME, STUDY_UID, IP, IP_LAND, MESSAGE,REQ_TYPE) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);\", (valeurs))\n conn.commit()\n conn.close()\n print(\"- Ajout de {} lignes dans la db\".format(len(table)))\n message.append(\"Récupération terminée avec succès\".format(len(table)))\n return message\n\n def lecture(self, query, dates):\n self.query = query\n self.dates = dates\n conn = sqlite3.connect(self.db)\n curseur = conn.execute(self.query, self.dates)\n nb_result = 0\n tableau = []\n for row in curseur:\n tableau.append(row)\n nb_result += 1\n conn.close()\n\n return [nb_result, tableau]\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"267015903","text":"from models.senet import senet50\nimport utils\nfrom torch.autograd import Variable\nimport torch\nimport cv2\nimport torchvision.transforms\nimport numpy as np\nfrom PIL import Image\n\n\nclass VGGFace:\n\n def __init__(self, trained_model=\"/media/haoxue/WD/VGGFace2-pytorch/senet50_ft_weight.pkl\", transform=True):\n\n self.net = senet50(num_classes=8631, include_top=False)\n utils.load_state_dict(self.net, trained_model)\n self.net.eval()\n self.transform = transform\n\n def process(self, img_path):\n\n out = self.net(self.load_image(img_path))\n output = out.view(out.size(0), -1)\n output = output.data.cpu().numpy()\n print(np.shape(output))\n return output\n\n def load_image(self, img_path):\n # img = Image.open(img_path)\n # img = torchvision.transforms.CenterCrop(224)(img)\n # img = np.array(img, dtype=np.uint8)\n img = cv2.imread(img_path)\n img = cv2.resize(img, (224, 224))\n if self.transform:\n img = self.transform_img(img)\n\n return Variable(img)\n\n @staticmethod\n def transform_img(img):\n img = img[:, :, ::-1] # RGB -> BGR\n img = img.astype(np.float32)\n img -= np.array([91.4953, 103.8827, 131.0912])\n img = img.transpose(2, 0, 1) # C x H x W\n img = np.expand_dims(img, axis=0) # 1 x C x H x W\n img = torch.from_numpy(img).float()\n return img\n\n\nif __name__ == \"__main__\":\n\n v = VGGFace()\n o = v.process(\"/home/haoxue/Downloads/download.jpeg\")\n\n","sub_path":"infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"39464728","text":"from utils.policies import *\nfrom utils.suf import *\n\npartial_state_update_blocks = [\n {\n 'policies': {\n 'IRC': p_inflation,\n 'provision': p_provision,\n 'delta_F': p_claim,\n 'delta_L': p_vest,\n 'delta_U': p_unvest,\n 'delta_MRa': p_mr_a,\n 'delta_MRv': p_mr_v,\n 'delta_CL': p_cl,\n 'delta_a': p_a,\n 'delta_v': p_v,\n 'delta_m_v_t': p_m_v_t,\n 'delta_agents_amount': p_agents_amount\n\n },\n 'variables': {\n 'I_r': s_I_r,\n 'F': s_F,\n 'L': s_L,\n 'T': s_T,\n 'd_u': s_d_u,\n 'd_v': s_d_l,\n 'A': s_a,\n 'V': s_v,\n 'MRa': s_mr_a,\n 'MRv': s_mr_v,\n 'CL': s_cl,\n 'maxVestingTime': s_m_v_t,\n 'agents_amount': s_agents_amount,\n 's_capitalization': s_capitalization\n }\n }\n]","sub_path":"bostrom_simulation/utils/psub.py","file_name":"psub.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"293895918","text":"#\n# This is the Robotics Language compiler\n#\n# Manifesto.py: Definition of the parameters for this package\n#\n# Created on: 22 October, 2019\n# Author: Gabriel Lopes\n# Licence: Apache 2.0\n# Copyright: Gabriel Lopes\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy of the\n# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by\n# applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n#\nmanifesto = {\n 'packageName': 'generic',\n 'packageShortName': 'Generic',\n 'version': '0.0.0',\n 'requiresCode': False,\n 'order':200,\n 'information': {\n 'author':\n {\n 'name': 'Gabriel Lopes',\n 'email': 'gabriel.lopes@caspar.ai',\n 'web': 'web',\n 'telephone': 'user telephone'\n },\n 'company':\n {\n 'name': 'Caspar.ai',\n 'address': 'company address',\n 'zipcode': 'company zipcode',\n 'city': 'Rotterdam',\n 'country': 'The Netherlands',\n 'email': 'company email',\n 'web': 'https://caspar.ai',\n 'telephone': 'company telephone',\n }\n }\n}\n","sub_path":"RoboticsLanguage/Outputs/Generic/Manifesto.py","file_name":"Manifesto.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"49711856","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nimport string\n\nhtml = \"\"\"\n\n \n

%s

\n \n\n\"\"\"\n\ndef app(environ, start_response):\n\td = environ['QUERY_STRING']\n\tlst = d.split('&')\n\tlst = map(lambda x: x+'
', lst)\n\tbody = html % string.join(lst)\n\tprint(body)\n\tstatus = '200 OK'\n\theaders = [ ('Content-Type', 'text/html'), ('Content-Length', str(len(str(body)))) ]\n\tstart_response(status, headers)\n\treturn body\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"11159694","text":"'''\nPython练习册:https://github.com/Yixiaohan/show-me-the-code\n第 0002 题:将 0001 题生成的 200 个激活码(或者优惠券)保存到 MySQL 关系型数据库中。\n'''\n\nimport string\nimport random\nimport mysql.connector\n\n# 生成激活码\n\n\ndef key_gen(num, len):\n result = []\n\n for j in range(num):\n keylist = [\n random.choice(\n string.ascii_letters +\n string.digits) for i in range(len)]\n key = \"\".join(keylist)\n result.append(key)\n\n return result\n\n\n# 数据库randomkey已提前在终端中创建好\nconn = mysql.connector.connect(\n user='root',\n password='password',\n database='randomkey')\ncursor = conn.cursor()\ncursor.execute(''' CREATE TABLE `user_key` (`key` varchar(50) NOT NULL)''')\nrand_key = key_gen(200, 10)\nfor v in rand_key:\n cursor.execute('insert into user_key values (%s)', [v])\nconn.commit()\ncursor.close()\ncursor = conn.cursor()\ncursor.execute('select * from user_key')\nvalues = cursor.fetchall()\nfor j in values:\n print(j)\ncursor.close()\nconn.close()\n","sub_path":"saverandomkey.py","file_name":"saverandomkey.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"217544284","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pickle\nimport sqlite3\nimport numpy as np\nimport pandas as pd\nfrom PyQt5.QtWidgets import (QFrame, QGroupBox, QLineEdit, QComboBox, QSpinBox, QTableView, QPushButton, QFileDialog,\n QFormLayout, QHBoxLayout, QGridLayout, QSizePolicy)\nfrom openpyxl import load_workbook\nfrom openpyxl.workbook.workbook import Workbook\nfrom openpyxl.worksheet.worksheet import Worksheet\nfrom program.import_model import ImportModel\nfrom program.import_delegate import TypeDelegate\n\n\nclass ImportFrame(QFrame):\n conn: sqlite3.Connection = None\n path: str = './'\n setting_file_path: str = None\n import_wb: Workbook = None\n import_ws: Worksheet = None\n model = ImportModel([])\n type_delegate = TypeDelegate()\n\n # noinspection PyArgumentList\n def __init__(self, *args):\n super(ImportFrame, self).__init__(*args)\n file_group = QGroupBox('Excel File')\n self.file_line = QLineEdit()\n self.file_line.setReadOnly(True)\n file_button = QPushButton('Load')\n file_button.setFixedWidth(100)\n file_layout = QGridLayout()\n file_layout.addWidget(self.file_line, 0, 0)\n file_layout.addWidget(file_button, 0, 1)\n file_group.setLayout(file_layout)\n file_group.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n\n import_group = QGroupBox('Import Settings')\n self.sheet_combo = QComboBox()\n self.header_spin = QSpinBox()\n self.header_spin.setRange(0, 0)\n self.name_line = QLineEdit()\n self.exists_combo = QComboBox()\n self.exists_combo.addItems(['append', 'replace', 'fail'])\n self.exists_combo.setStatusTip(\n '''if table also in database\\nappend: insert new records\\nreplace: replace data\\nfail: abort import''')\n form_layout = QFormLayout()\n form_layout.addRow('Select Sheet', self.sheet_combo)\n form_layout.addRow('Select Title Row', self.header_spin)\n form_layout.addRow('Table Name', self.name_line)\n form_layout.addRow('If Table Exists', self.exists_combo)\n self.column_view = QTableView()\n save_button = QPushButton('&Save')\n save_button.setFixedWidth(100)\n load_button = QPushButton('&Load')\n load_button.setFixedWidth(100)\n fast_button = QPushButton('&FastLoad')\n fast_button.setFixedWidth(100)\n import_button = QPushButton('&Import')\n import_button.setFixedWidth(100)\n button_layout = QHBoxLayout()\n button_layout.addStretch()\n button_layout.addWidget(save_button)\n button_layout.addWidget(load_button)\n button_layout.addWidget(fast_button)\n button_layout.addWidget(import_button)\n\n setting_layout = QGridLayout()\n setting_layout.addLayout(form_layout, 0, 0)\n setting_layout.addWidget(self.column_view, 1, 0)\n setting_layout.addLayout(button_layout, 2, 0)\n import_group.setLayout(setting_layout)\n\n main_layout = QGridLayout()\n main_layout.addWidget(file_group, 0, 0)\n main_layout.addWidget(import_group, 1, 0)\n self.setLayout(main_layout)\n\n file_button.clicked.connect(self.load_file)\n self.header_spin.valueChanged.connect(self.load_column)\n save_button.clicked.connect(self.save_setting)\n load_button.clicked.connect(self.load_setting)\n fast_button.clicked.connect(self.auto_load)\n import_button.clicked.connect(self.import_data)\n\n def load_file(self):\n file_path: str = QFileDialog.getOpenFileName(self.parent(), 'Load File', self.path, 'Excel File(*.xlsx)',\n options=QFileDialog.DontConfirmOverwrite)[0]\n if file_path:\n self.file_line.setText(file_path)\n self.path = '/'.join(file_path.split('/')[:-1])\n self.import_wb = load_workbook(file_path, data_only=True)\n self.sheet_combo.disconnect()\n self.sheet_combo.clear()\n self.sheet_combo.addItems(self.import_wb.sheetnames)\n self.load_sheet()\n self.sheet_combo.currentIndexChanged.connect(self.load_sheet)\n\n def load_sheet(self):\n self.import_ws = self.import_wb[self.sheet_combo.currentText()]\n self.name_line.setText(self.sheet_combo.currentText().replace(' ', '_', 10))\n self.header_spin.setRange(1, self.import_ws.max_row)\n self.load_column()\n\n def load_column(self):\n self.model = ImportModel([cell.value for cell in list(self.import_ws.rows)[self.header_spin.value() - 1]])\n self.column_view.setModel(self.model)\n self.column_view.setItemDelegateForColumn(1, self.type_delegate)\n self.column_view.setColumnWidth(0, 500)\n self.column_view.setColumnWidth(1, 150)\n\n def import_data(self):\n if not self.file_line.text():\n return\n self.sender().setEnabled(False)\n df = pd.read_excel(self.file_line.text(), self.sheet_combo.currentText(), header=self.header_spin.value() - 1,\n usecols=self.model.usecols, dtype=self.model.dtype, parse_dates=self.model.parse_dates,\n date_parser=lambda x: pd.to_datetime(x).strftime('%Y-%m-%d'))\n for k, v in self.model.dtype.items():\n if v == str:\n df[k] = df[k].replace('nan', np.nan)\n df.to_sql(self.name_line.text(), self.conn, if_exists=self.exists_combo.currentText())\n self.sender().setEnabled(True)\n\n def save_setting(self):\n file_path: str = QFileDialog.getSaveFileName(self.parent(), 'Save Setting', './setting/', 'Setting File(*.stg)',\n options=QFileDialog.DontConfirmOverwrite)[0]\n if file_path:\n save_settings = [self.name_line.text(), self.header_spin.value(), self.model.table_settings]\n file = open(file_path, 'wb')\n pickle.dump(save_settings, file)\n file.close()\n\n def load_setting(self):\n file_path: str = QFileDialog.getOpenFileName(self.parent(), 'Load Setting', './setting/', 'Setting File(*.stg)',\n options=QFileDialog.DontConfirmOverwrite)[0]\n if file_path:\n self.setting_file_path = file_path\n file = open(file_path, 'rb')\n load_settings = pickle.load(file)\n self.name_line.setText(load_settings[0])\n self.header_spin.setValue(load_settings[1])\n self.model.table_settings = load_settings[2]\n self.column_view.reset()\n\n def auto_load(self):\n if self.setting_file_path:\n file = open(self.setting_file_path, 'rb')\n load_settings = pickle.load(file)\n self.name_line.setText(load_settings[0])\n self.header_spin.setValue(load_settings[1])\n self.model.table_settings = load_settings[2]\n self.column_view.reset()\n","sub_path":"program/import_frame.py","file_name":"import_frame.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"210699694","text":"import sys\nimport random\n\n###########################################################\n#http://www.practicepython.org/exercise/2014/03/19/07-list-comprehensions.html\n'''Let’s say I give you a list saved in a variable: a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100].\n Write one line of Python that takes this list a and makes a new list that has only the even \n elements of this list in it.'''\na = [i for i in range(1,101)]\nb = [number for number in a if number%2==0]\nprint(b)\n\n\n###########################################################\nprint('This program has finished. \\n Press Enter to exit')\ninput()\nsys.exit()","sub_path":"exercise7listcomprehensions.py","file_name":"exercise7listcomprehensions.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"355308551","text":"import unittest\nimport re\nimport json\nfrom uuid import UUID\n\nfrom azure.kusto.ingest._ingestion_blob_info import _IngestionBlobInfo\nfrom azure.kusto.ingest.exceptions import KustoDuplicateMappingError, KustoDuplicateMappingReferenceError, KustoMappingAndMappingReferenceError\nfrom azure.kusto.ingest import (\n BlobDescriptor,\n IngestionProperties,\n DataFormat,\n ColumnMapping,\n ReportLevel,\n ReportMethod,\n ValidationPolicy,\n ValidationOptions,\n ValidationImplications,\n)\n\nTIMESTAMP_REGEX = \"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{6}\"\n\n\nclass IngestionBlobInfoTest(unittest.TestCase):\n \"\"\"Tests serialization of ingestion blob info. This serialization will be queued to the DM.\"\"\"\n\n def test_blob_info_csv_mapping(self):\n \"\"\"Tests serialization of csv ingestion blob info.\"\"\"\n validation_policy = ValidationPolicy(ValidationOptions.ValidateCsvInputConstantColumns, ValidationImplications.BestEffort)\n columnMapping = ColumnMapping(\"ColumnName\", \"cslDataType\", ordinal=1)\n\n properties = IngestionProperties(\n database=\"database\",\n table=\"table\",\n dataFormat=DataFormat.CSV,\n ingestionMapping=[columnMapping],\n additionalTags=[\"tag\"],\n ingestIfNotExists=[\"ingestIfNotExistTags\"],\n ingestByTags=[\"ingestByTags\"],\n dropByTags=[\"dropByTags\"],\n flushImmediately=True,\n reportLevel=ReportLevel.DoNotReport,\n reportMethod=ReportMethod.Queue,\n validationPolicy=validation_policy,\n )\n blob = BlobDescriptor(\"somepath\", 10)\n blob_info = _IngestionBlobInfo(blob, properties, auth_context=\"authorizationContextText\")\n self._verify_ingestion_blob_info_result(blob_info.to_json())\n\n def test_blob_csv_mapping_reference(self):\n \"\"\"Tests serialization of ingestion blob info with csv mapping reference.\"\"\"\n validation_policy = ValidationPolicy(ValidationOptions.ValidateCsvInputConstantColumns, ValidationImplications.BestEffort)\n properties = IngestionProperties(\n database=\"database\",\n table=\"table\",\n dataFormat=DataFormat.CSV,\n ingestionMappingReference=\"csvMappingReference\",\n additionalTags=[\"tag\"],\n ingestIfNotExists=[\"ingestIfNotExistTags\"],\n ingestByTags=[\"ingestByTags\"],\n dropByTags=[\"dropByTags\"],\n flushImmediately=True,\n reportLevel=ReportLevel.DoNotReport,\n reportMethod=ReportMethod.Queue,\n validationPolicy=validation_policy,\n )\n blob = BlobDescriptor(\"somepath\", 10)\n blob_info = _IngestionBlobInfo(blob, properties, auth_context=\"authorizationContextText\")\n self._verify_ingestion_blob_info_result(blob_info.to_json())\n\n def test_blob_info_json_mapping(self):\n \"\"\"Tests serialization of json ingestion blob info.\"\"\"\n validation_policy = ValidationPolicy(ValidationOptions.ValidateCsvInputConstantColumns, ValidationImplications.BestEffort)\n properties = IngestionProperties(\n database=\"database\",\n table=\"table\",\n dataFormat=DataFormat.JSON,\n ingestionMapping=[ColumnMapping(\"ColumnName\", \"datatype\", path=\"jsonpath\")],\n additionalTags=[\"tag\"],\n ingestIfNotExists=[\"ingestIfNotExistTags\"],\n ingestByTags=[\"ingestByTags\"],\n dropByTags=[\"dropByTags\"],\n flushImmediately=True,\n reportLevel=ReportLevel.DoNotReport,\n reportMethod=ReportMethod.Queue,\n validationPolicy=validation_policy,\n )\n blob = BlobDescriptor(\"somepath\", 10)\n blob_info = _IngestionBlobInfo(blob, properties, auth_context=\"authorizationContextText\")\n self._verify_ingestion_blob_info_result(blob_info.to_json())\n\n def test_blob_json_mapping_reference(self):\n \"\"\"Tests serialization of ingestion blob info with json mapping reference.\"\"\"\n validation_policy = ValidationPolicy(ValidationOptions.ValidateCsvInputConstantColumns, ValidationImplications.BestEffort)\n properties = IngestionProperties(\n database=\"database\",\n table=\"table\",\n dataFormat=DataFormat.JSON,\n mappingReference=\"jsonMappingReference\",\n additionalTags=[\"tag\"],\n ingestIfNotExists=[\"ingestIfNotExistTags\"],\n ingestByTags=[\"ingestByTags\"],\n dropByTags=[\"dropByTags\"],\n flushImmediately=True,\n reportLevel=ReportLevel.DoNotReport,\n reportMethod=ReportMethod.Queue,\n validationPolicy=validation_policy,\n )\n blob = BlobDescriptor(\"somepath\", 10)\n blob_info = _IngestionBlobInfo(blob, properties, auth_context=\"authorizationContextText\")\n self._verify_ingestion_blob_info_result(blob_info.to_json())\n\n def test_blob_info_csv_exceptions(self):\n \"\"\"Tests invalid ingestion properties.\"\"\"\n with self.assertRaises(KustoDuplicateMappingError):\n IngestionProperties(database=\"database\", table=\"table\", mapping=\"mapping\", ingestionMapping=\"ingestionMapping\")\n with self.assertRaises(KustoMappingAndMappingReferenceError):\n IngestionProperties(database=\"database\", table=\"table\", mapping=\"mapping\", ingestionMappingReference=\"ingestionMappingReference\")\n\n with self.assertRaises(KustoMappingAndMappingReferenceError):\n IngestionProperties(database=\"database\", table=\"table\", ingestionMapping=\"ingestionMapping\", ingestionMappingReference=\"ingestionMappingReference\")\n with self.assertRaises(KustoMappingAndMappingReferenceError):\n IngestionProperties(database=\"database\", table=\"table\", mapping=\"mapping\", mappingReference=\"mappingReference\")\n with self.assertRaises(KustoMappingAndMappingReferenceError):\n IngestionProperties(database=\"database\", table=\"table\", ingestionMapping=\"ingestionMapping\", mappingReference=\"mappingReference\")\n with self.assertRaises(KustoDuplicateMappingReferenceError):\n IngestionProperties(database=\"database\", table=\"table\", mappingReference=\"mappingReference\", ingestionMappingReference=\"ingestionMappingReference\")\n\n def _verify_ingestion_blob_info_result(self, ingestion_blob_info):\n result = json.loads(ingestion_blob_info)\n assert result is not None\n assert isinstance(result, dict)\n assert result[\"BlobPath\"] == \"somepath\"\n assert result[\"DatabaseName\"] == \"database\"\n assert result[\"TableName\"] == \"table\"\n assert isinstance(result[\"RawDataSize\"], int)\n assert isinstance(result[\"IgnoreSizeLimit\"], bool)\n assert isinstance(result[\"FlushImmediately\"], bool)\n assert isinstance(result[\"RetainBlobOnSuccess\"], bool)\n assert isinstance(result[\"ReportMethod\"], int)\n assert isinstance(result[\"ReportLevel\"], int)\n assert isinstance(UUID(result[\"Id\"]), UUID)\n assert re.match(TIMESTAMP_REGEX, result[\"SourceMessageCreationTime\"])\n assert result[\"AdditionalProperties\"][\"authorizationContext\"] == \"authorizationContextText\"\n assert result[\"AdditionalProperties\"][\"ingestIfNotExists\"] == '[\"ingestIfNotExistTags\"]'\n assert result[\"AdditionalProperties\"][\"ValidationPolicy\"] in (\n '{\"ValidationImplications\":1,\"ValidationOptions\":1}',\n '{\"ValidationImplications\":ValidationImplications.BestEffort,\"ValidationOptions\":ValidationOptions.ValidateCsvInputConstantColumns}',\n )\n\n assert result[\"AdditionalProperties\"][\"tags\"] == '[\"tag\",\"drop-by:dropByTags\",\"ingest-by:ingestByTags\"]'\n","sub_path":"azure-kusto-ingest/tests/test_ingestion_blob_info.py","file_name":"test_ingestion_blob_info.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"306638040","text":"#!/usr/bin/python3\n\"\"\"This script starts a Flask web application\"\"\"\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models.state import State\nfrom models.amenity import Amenity\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef closeStorage(exception):\n \"\"\"Closes a database/file connection\"\"\"\n storage.close()\n\n\n@app.route('/hbnb_filters', strict_slashes=False)\ndef display():\n \"\"\"displays HTML template\n\n Returns:\n string: HTML template\n \"\"\"\n states = storage.all(State)\n amenities = storage.all(Amenity)\n return render_template(\n '10-hbnb_filters.html', states=states, amenities=amenities)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='5000')\n","sub_path":"web_flask/10-hbnb_filters.py","file_name":"10-hbnb_filters.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"603868088","text":"from newsapi import NewsApiClient\nimport numpy as np\nimport json\nfrom textblob import TextBlob\n\ntry:\n config = json.load(open('../config.json'))\n newsapi = NewsApiClient(api_key=config['news_api'])\nexcept:\n print(\"connection to newsserver failed\")\n\n#sources = newsapi.get_sources()\npolarr = []\nsentarr = []\n\n\ndef get_sentiment():\n try:\n all_articles = newsapi.get_everything(q='ethereum',\n sources='bbc-news, the-verge, vice')\n except:\n print(\"Problem with loading twitterdata\")\n del polarr[:]\n del sentarr[:]\n for article in all_articles['articles']:\n analysis = TextBlob(article['content'])\n sentiment = analysis.sentiment\n polarr.append(sentiment[0])\n sentarr.append(sentiment[1])\n\n return np.average(polarr), np.average(sentarr)\n","sub_path":"src/news_api.py","file_name":"news_api.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"654391018","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/matias/Documentos/MIS_MODULOS_PYTHON/matialvarezs_python_modules_creator/backend/apps/matialvarezs_charge_controller/migrations/0002_localdatabackup.py\n# Compiled at: 2019-08-16 12:04:16\n# Size of source mod 2**32: 1021 bytes\nimport django.contrib.postgres.fields.jsonb\nfrom django.db import migrations, models\nimport django.utils.timezone\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('matialvarezs_charge_controller', '0001_initial')]\n operations = [\n migrations.CreateModel(name='LocalDataBackup', fields=[\n (\n 'id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n (\n 'identity', models.CharField(max_length=2048, unique=True)),\n (\n 'created', models.DateTimeField(default=django.utils.timezone.now)),\n (\n 'last_update', models.DateTimeField(default=django.utils.timezone.now)),\n (\n 'data', django.contrib.postgres.fields.jsonb.JSONField()),\n (\n 'send_to_server', models.BooleanField(default=True))], options={'abstract': False})]","sub_path":"pycfiles/matialvarezs_charge_controller-0.0.154.tar/0002_localdatabackup.cpython-35.py","file_name":"0002_localdatabackup.cpython-35.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"408174313","text":"import update_python_paths\nimport time\nimport re\nimport copy\nimport datetime\nimport json\nimport logging\nimport os\nimport socket\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport traceback\nimport zipfile\nfrom functools import wraps, update_wrapper\n\nimport astropy.units as u\nfrom astropy.coordinates import ICRS, TETE, AltAz\nfrom skyconv_hadec import HADec\nfrom astropy.utils import iers\nfrom flask import Flask, redirect, jsonify, request, make_response, url_for, send_from_directory\nfrom flask_compress import Compress\nfrom werkzeug.serving import make_ssl_devcert\n\nimport control\nimport db\nimport handpad_menu\nimport handpad_server\nimport lx200proto_server\nimport network\nimport settings\nimport skyconv\n\niers.conf.auto_download = False\niers.auto_max_age = None\n\navahi_process = None\n\npower_thread_quit = False\n\nst_queue = None\napp = Flask(__name__, static_folder='../client_refactor/dist/')\nlogging.getLogger('werkzeug').setLevel(logging.ERROR)\n# socketio = SocketIO(app, async_mode='threading', logger=False, engineio_logger=False)\nsettings_json_lock = threading.RLock()\n\npointing_logger = settings.get_logger('pointing')\n\nRELAY_PIN = 6\nSWITCH_PIN = 5\n\ncompress = Compress()\nupdate_python_paths.keep_import()\n\n\n# Sets up power switch\n# TODO: disable power switch for simulation\ndef run_power_switch():\n pass\n # try:\n # import RPi.GPIO as GPIO\n # GPIO.setmode(GPIO.BCM)\n # GPIO.setup(RELAY_PIN, GPIO.OUT)\n # GPIO.setup(SWITCH_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n # GPIO.output(RELAY_PIN, True)\n #\n # while not power_thread_quit:\n # time.sleep(0.1)\n # if GPIO.input(SWITCH_PIN) == 0:\n # continue\n # time.sleep(0.1)\n # if GPIO.input(SWITCH_PIN) == 1:\n # subprocess.run(['sudo', 'shutdown', '-h', 'now'])\n #\n # except ImportError:\n # # We are probably in simulation\n # print(\"Warning: Can't use power switch.\")\n\n\n@app.context_processor\ndef override_url_for():\n \"\"\"\n Generate a new token on every request to prevent the browser from\n caching static files.\n \"\"\"\n return dict(url_for=dated_url_for)\n\n\ndef dated_url_for(endpoint, **values):\n if endpoint == 'static':\n filename = values.get('filename', None)\n if filename:\n file_path = os.path.join(app.root_path,\n endpoint, filename)\n values['q'] = int(os.stat(file_path).st_mtime)\n return url_for(endpoint, **values)\n\n\ndef nocache(view):\n @wraps(view)\n def no_cache(*args, **kwargs):\n response = make_response(view(*args, **kwargs))\n response.headers['Last-Modified'] = datetime.datetime.now()\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response\n\n return update_wrapper(no_cache, view)\n\n\n@app.route('/api/version')\n@nocache\ndef version():\n return jsonify({\"version\": control.version, \"version_date\": control.version_date_str})\n\n\n@app.route('/api/settings')\n@nocache\ndef settings_get():\n s = copy.deepcopy(settings.settings)\n s['encoder_logging'] = control.encoder_logging_enabled\n s['calibration_logging'] = settings.runtime_settings['calibration_logging']\n if not s['location']:\n s['location'] = {\n 'lat': control.DEFAULT_LAT_DEG,\n 'long': control.DEFAULT_LON_DEG,\n 'name': 'Unset Location'\n }\n # print(s)\n return jsonify(s)\n\n\n@app.route('/api/settings_dl')\n@nocache\ndef settings_dl_get():\n return send_from_directory(directory='./', filename='settings.json', as_attachment=True,\n attachment_filename='ssteq25_settings.json')\n\n\n@app.route('/api/settings_dl', methods=['POST'])\n@nocache\ndef settings_dl_post():\n file = request.files['file']\n with tempfile.TemporaryFile(suffix='.json') as tfile:\n file.save(tfile)\n tfile.seek(0)\n json.load(tfile) # just to check it is at least a json\n tfile.seek(0)\n settings.copy_settings(tfile)\n if not settings.is_simulation():\n t = threading.Timer(5, reboot)\n t.start()\n return 'Updated Settings'\n\n\n@app.route('/api/hostname')\n@nocache\ndef hostname_get():\n return jsonify({'hostname': socket.gethostname()})\n\n\n@app.route('/api/shutdown', methods=['PUT'])\n@nocache\ndef shutdown_put():\n control.set_shutdown()\n return 'Shutdown', 200\n\n\n@app.route('/api/logger', methods=['GET'])\n@nocache\ndef logger_get():\n logger_name = request.args.get('name')\n if logger_name == 'pointing':\n pointing_logger.handlers[0].flush()\n return send_from_directory(os.path.join(os.path.expanduser('~'), 'logs'), 'pointing.log', as_attachment=True,\n attachment_filename='pointing_log.txt')\n elif logger_name == 'calibration':\n ret = []\n for row in control.calibration_log:\n # TODO: just support ra/dec right now add others\n if 'sync' in row and 'slewto' in row and 'slewfrom' in row:\n ret.append({\n 'slewfrom': {'ra': row['slewfrom'].ra.deg, 'dec': row['slewfrom'].dec.deg},\n 'slewto': {'ra': row['slewto'].ra.deg, 'dec': row['slewto'].dec.deg},\n 'sync': {'ra': row['sync'].ra.deg, 'dec': row['sync'].dec.deg}\n })\n return jsonify(ret)\n elif logger_name == 'encoder':\n if control.encoder_logging_enabled:\n control.encoder_logging_file.flush()\n return send_from_directory(os.path.join(os.path.expanduser('~'), 'logs'), 'stepper_encoder.csv',\n as_attachment=True,\n attachment_filename='stepper_encoder.csv')\n\n\n@app.route('/api/logger', methods=['DELETE'])\n@nocache\ndef logger_clear():\n args = request.json\n logger_name = args.get('name')\n if logger_name == 'encoder':\n control.encoder_logging_clear = True\n return 'Clearing encoder log', 200\n elif logger_name == 'calibration':\n control.calibration_log = []\n return 'Calibration log cleared', 200\n return 'Invalid logger', 400\n\n\n@app.route('/api/logger', methods=['PUT'])\n@nocache\ndef logger_put():\n args = request.json\n logger_name = args.get('name')\n enabled = args.get('enabled')\n if logger_name == 'encoder':\n control.start_stop_encoder_logger(enabled)\n if enabled:\n return 'Starting logger', 200\n else:\n return 'Stopping logging', 200\n elif logger_name == 'calibration':\n settings.runtime_settings['calibration_logging'] = enabled\n return 'Calibration setting set', 200\n else:\n return 'Invalid logger', 400\n\n\n@app.route('/api/settings', methods=['PUT'])\n@nocache\ndef settings_put():\n print('settings_put')\n settings_buffer = {}\n args = request.json\n keys = [\"ra_track_rate\", \"ra_ticks_per_degree\", \"dec_ticks_per_degree\",\n \"ra_encoder_pulse_per_degree\", \"dec_encoder_pulse_per_degree\",\n \"ra_slew_fastest\", \"ra_slew_faster\", \"ra_slew_medium\",\n \"ra_slew_slower\", \"ra_slew_slowest\",\n \"dec_slew_fastest\", \"dec_slew_faster\", \"dec_slew_medium\", \"dec_slew_slower\", \"dec_slew_slowest\",\n \"time_autosync\", \"polar_align_camera_rotation_x\", \"polar_align_camera_rotation_y\"\n ]\n for key in keys:\n if key in args:\n settings_buffer[key] = float(args[key])\n if 'micro' in args:\n keys = [\"ra_guide_rate\", \"ra_direction\", \"dec_guide_rate\", \"dec_direction\", \"dec_disable\", \"ra_disable\",\n \"ra_accel_tpss\", \"dec_accel_tpss\"]\n for key in keys:\n if key in args['micro']:\n if 'micro' not in settings_buffer:\n settings_buffer['micro'] = {}\n print('=== settingsb micro ' + key, args['micro'][key])\n settings_buffer['micro'][key] = float(args['micro'][key])\n\n keys = [\"atmos_refract\", \"ra_use_encoder\", \"dec_use_encoder\", \"limit_encoder_step_fillin\"]\n for key in keys:\n if key in args:\n settings_buffer[key] = bool(args[key])\n\n keys = [\"color_scheme\"]\n for key in keys:\n if key in args:\n settings_buffer[key] = str(args[key])\n\n keys = [\"color_scheme\", \"atmos_refract\", \"ra_track_rate\", \"ra_slew_fastest\", \"ra_slew_faster\", \"ra_slew_medium\",\n \"ra_encoder_pulse_per_degree\", \"dec_encoder_pulse_per_degree\",\n \"ra_use_encoder\", \"dec_use_encoder\", \"limit_encoder_step_fillin\",\n \"ra_slew_slower\", \"ra_slew_slowest\", \"dec_slew_fastest\", \"dec_slew_faster\", \"dec_slew_medium\",\n \"dec_slew_slower\", \"dec_slew_slowest\", \"ra_ticks_per_degree\", \"dec_ticks_per_degree\", \"time_autosync\",\n \"polar_align_camera_rotation_x\", \"polar_align_camera_rotation_y\"]\n for key in keys:\n if key in args:\n settings.settings[key] = settings_buffer[key]\n if 'micro' in settings_buffer:\n keys = [\"ra_guide_rate\", \"ra_direction\", \"dec_guide_rate\", \"dec_direction\", \"dec_disable\", \"ra_disable\",\n \"ra_accel_tpss\", \"dec_accel_tpss\"]\n for key in keys:\n if key in settings_buffer['micro']:\n print('=== settings micro ' + key, float(settings_buffer['micro'][key]))\n settings.settings['micro'][key] = float(settings_buffer['micro'][key])\n settings.write_settings(settings.settings)\n control.micro_update_settings()\n return 'Settings Saved', 200\n\n\n@app.route('/api/settings_horizon_limit', methods=['PUT'])\n@nocache\ndef settings_horizon_limit():\n reqj = request.json\n enabled = reqj.get('horizon_limit_enabled', None)\n points = reqj.get('points', None)\n dec_greater_than = reqj.get('dec_greater_than', None)\n dec_less_than = reqj.get('dec_less_than', None)\n if points is None and enabled is None and dec_greater_than is None:\n return 'Missing points/enable', 400\n if enabled is not None:\n settings.settings['horizon_limit_enabled'] = enabled\n settings.write_settings(settings.settings)\n if dec_greater_than is not None and dec_less_than is not None:\n settings.settings['horizon_limit_dec']['greater_than'] = dec_greater_than\n settings.settings['horizon_limit_dec']['less_than'] = dec_less_than\n settings.write_settings(settings.settings)\n if points is not None:\n settings.settings['horizon_limit_points'] = points\n settings.write_settings(settings.settings)\n return 'Saved Slew Setting', 200\n\n\n@app.route('/api/settings_network_ethernet', methods=['PUT'])\n@nocache\ndef settings_network_ethernet():\n dhcp_server = request.form.get('dhcp_server', None)\n if dhcp_server:\n dhcp_server = dhcp_server.lower() == 'true'\n else:\n dhcp_server = False\n\n ip = request.form.get('ip', None)\n netmask = request.form.get('netmask', None)\n\n if None not in (dhcp_server, ip, netmask):\n network.set_ethernet_dhcp_server(dhcp_server)\n network.set_ethernet_static(ip, netmask)\n settings.settings['network']['ip'] = ip\n settings.settings['network']['netmask'] = netmask\n settings.settings['network']['dhcp_server'] = dhcp_server\n return 'Saved Network', 200\n\n\n@app.route('/api/settings_network_wifi', methods=['PUT'])\n@nocache\ndef settings_network_wifi():\n reqj = request.json\n ssid = reqj.get('ssid', None)\n wpa2key = reqj.get('wpa2key', None)\n channel = reqj.get('channel', None)\n\n if None in [ssid, channel]:\n return 'Invalid parameters', 400\n\n if wpa2key and len(wpa2key) < 8 or len(wpa2key) > 63:\n return 'Invalid WPA2Key, must be between eight and 63 characters', 400\n try:\n channel = int(channel)\n except ValueError:\n return 'Invalid channel', 400\n if channel < 1 or channel > 14:\n return 'Invalid channel', 400\n if len(ssid) > 31:\n return 'SSID must be less than 32 characters', 400\n network.hostapd_write(ssid, channel, wpa2key)\n settings.settings['network']['ssid'] = ssid\n settings.settings['network']['wpa2key'] = wpa2key\n settings.settings['network']['channel'] = channel\n settings.write_settings(settings.settings)\n if not settings.is_simulation():\n def reconnect():\n # Stop hostapd and dnsmasq let autohotspot go\n subprocess.run(['sudo', '/root/ctrl_dnsmasq.py', 'wlan0', 'disable'])\n subprocess.run(['sudo', '/usr/bin/killall', 'hostapd'])\n subprocess.run(['sudo', '/usr/bin/autohotspot'])\n\n t1 = threading.Thread(target=reconnect)\n t1.start()\n return \"Updated Wifi Settings\", 200\n\n\n@app.route('/api/wifi_connect', methods=['DELETE'])\n@nocache\ndef wifi_connect_delete():\n reqj = request.json\n ssid = reqj.get('ssid', None)\n mac = reqj.get('mac', None)\n if None in [ssid, mac]:\n return 'Missing ssid or mac', 400\n stemp = network.root_file_open('/ssteq/etc/wpa_supplicant.conf')\n wpasup = network.wpa_supplicant_read(stemp[0])\n network.wpa_supplicant_remove(wpasup['networks'], ssid, mac)\n network.wpa_supplicant_write(stemp[0], wpasup['other'], wpasup['networks'])\n network.root_file_close(stemp)\n # If we are currently connected\n wificon = network.current_wifi_connect()\n if wificon['ssid'] == ssid or wificon['mac'] == mac:\n def reconnect():\n if not settings.is_simulation():\n subprocess.run(['sudo', '/usr/bin/autohotspot'])\n\n t1 = threading.Thread(target=reconnect)\n t1.start()\n return 'Removed', 200\n\n\n@app.route('/api/wifi_known', methods=['GET'])\n@nocache\ndef wifi_known():\n stemp = network.root_file_open('/ssteq/etc/wpa_supplicant.conf')\n wpasup = network.wpa_supplicant_read(stemp[0])\n network.root_file_close(stemp)\n return jsonify(wpasup['networks'])\n\n\n@app.route('/api/wifi_connect', methods=['POST'])\n@nocache\ndef wifi_connect():\n reqj = request.json\n ssid = reqj.get('ssid', None)\n mac = reqj.get('mac', None)\n psk = reqj.get('psk', None)\n open_wifi = reqj.get('open', None)\n known = reqj.get('known', None)\n\n if None in [ssid, mac]:\n return 'Missing ssid or mac', 400\n if not known and not open_wifi and psk is None:\n return 'You must give a passphrase', 400\n\n stemp = network.root_file_open('/ssteq/etc/wpa_supplicant.conf')\n wpasup = network.wpa_supplicant_read(stemp[0])\n\n found = False\n for n in wpasup['networks']:\n if n['ssid'] == ssid and n['bssid'] == mac:\n if psk:\n n['psk'] = psk\n n['priority'] = 5\n found = True\n elif 'priority' in n:\n del n['priority']\n\n if not found:\n n = {'bssid': mac, 'ssid': \"\\\"%s\\\"\" % ssid}\n if psk:\n n['psk'] = '\"' + psk + '\"'\n else:\n n['key_mgmt'] = 'None'\n wpasup['networks'].append(n)\n\n network.wpa_supplicant_write(stemp[0], wpasup['other'], wpasup['networks'])\n network.root_file_close(stemp)\n\n def reconnect():\n if not settings.is_simulation():\n subprocess.run(['sudo', '/sbin/wpa_cli', '-i', 'wlan0', 'reconfigure'])\n subprocess.run(['sudo', '/usr/bin/autohotspot'])\n\n t1 = threading.Thread(target=reconnect)\n t1.start()\n return 'Connecting...', 200\n\n\n@app.route('/api/set_location', methods=['DELETE'])\n@nocache\ndef unset_location():\n settings.settings['location'] = None\n control.update_location()\n settings.write_settings(settings.settings)\n return 'Unset Location', 200\n\n\n@app.route('/api/set_location', methods=['PUT'])\n@nocache\ndef set_location():\n location = request.json\n print(location)\n if 'lat' not in location or 'long' not in location or 'elevation' not in location or (\n 'name' not in location and location['name'].strip() != ''):\n return 'Missing arguments', 400\n location = {'lat': float(location['lat']), 'long': float(location['long']),\n 'elevation': float(location['elevation']), 'name': str(location['name'])}\n try:\n control.set_location(location['lat'], location['long'], location['elevation'], location['name'])\n except Exception as e:\n print(e)\n return 'Invalid location', 400\n return 'Set Location', 200\n\n\n@app.route('/api/sync', methods=['GET'])\n@nocache\ndef get_sync_points():\n points = []\n frame = skyconv.model_real_stepper.frame()\n for point in skyconv.model_real_stepper.get_from_points():\n if frame == 'altaz':\n points.append({'alt': point.alt.deg, 'az': point.az.deg})\n else:\n points.append({'ha': point.ha.deg, 'dec': point.dec.deg})\n return jsonify({'frame': frame, 'points': points})\n\n\n@app.route('/api/sync', methods=['DELETE'])\n@nocache\ndef clear_sync():\n control.clear_sync()\n return 'Cleared Model', 200\n\n\n@app.route('/api/sync', methods=['PUT'])\n@nocache\ndef do_sync():\n reqj = request.json\n try:\n size = control.set_sync(**reqj)\n except Exception as e:\n traceback.print_tb(e)\n return str(e), 400\n return jsonify({'text': 'Sync Points: ' + str(size)})\n\n\n@app.route('/api/sync', methods=['POST'])\n@nocache\ndef post_sync():\n reqj = request.json\n model = reqj.get('model')\n if model not in ['single', 'buie', 'affine_all']:\n return 'Invalid model', 400\n # Set models\n control.clear_sync()\n settings.settings['pointing_model'] = model\n settings.write_settings(settings.settings)\n return 'Pointing Model Set', 200\n\n\n@app.route('/api/slewto', methods=['PUT'])\n@nocache\ndef do_slewto():\n reqj = request.json\n try:\n control.set_slew(**reqj)\n except Exception as e:\n traceback.print_exc()\n return str(e), 400\n return 'Slewing', 200\n\n\n@app.route('/api/slewto_check', methods=['PUT'])\n@nocache\ndef do_slewtocheck():\n reqj = request.json\n frame = reqj.get('frame')\n ra = float(reqj.get('ra'))\n dec = float(reqj.get('dec', None))\n if frame == 'tete':\n frame_args = skyconv.get_frame_init_args('tete')\n coord = TETE(ra=ra * u.deg, dec=dec * u.deg, **frame_args)\n else:\n coord = ICRS(ra=ra * u.deg, dec=dec * u.deg)\n return jsonify({'slewcheck': control.slewtocheck(coord)})\n\n\n@app.route('/api/slewto', methods=['DELETE'])\n@nocache\ndef stop_slewto():\n control.cancel_slews()\n return 'Stopping Slew', 200\n\n\n@app.route('/api/set_time', methods=['PUT'])\n@nocache\ndef set_time():\n reqj = request.json\n time_str = reqj.get('time')\n # overwrite = request.form.get('overwrite', None)\n status = control.set_time(time_str)\n return status[1], 200 if status[0] else 500\n\n\n@app.route('/api/set_park_position', methods=['PUT'])\n@nocache\ndef set_park_position():\n control.set_park_position_here()\n return 'Park Position Set', 200\n\n\n@app.route('/api/set_park_position', methods=['DELETE'])\n@nocache\ndef unset_park_position():\n # Will just be default if none\n settings.settings['park_position'] = control.DEFAULT_PARK\n settings.write_settings(settings.settings)\n return 'Park Position Unset', 200\n\n\n@app.route('/api/park', methods=['PUT'])\n@nocache\ndef do_park():\n control.park_scope()\n return 'Parking.', 200\n\n\n@app.route('/api/start_tracking', methods=['PUT'])\n@nocache\ndef start_tracking():\n control.start_tracking()\n return 'Tracking', 200\n\n\n@app.route('/api/stop_tracking', methods=['PUT'])\n@nocache\ndef stop_tracking():\n control.stop_tracking()\n return 'Stopped Tracking', 200\n\n\n@app.route('/api/altitude_data', methods=['POST'])\n@nocache\ndef altitude_data():\n reqj = request.json\n frame = reqj.get('frame')\n obstime = reqj['times']\n if frame in ['tete', 'icrs']:\n ra = reqj['ra']\n dec = reqj['dec']\n if frame == 'tete':\n frame_args = skyconv.get_frame_init_args('tete', obstime=obstime[0])\n coord = TETE(ra=ra * u.deg, dec=dec * u.deg, **frame_args)\n coord = skyconv.to_icrs(coord)\n else: # ICRS\n coord = ICRS(ra=ra * u.deg, dec=dec * u.deg)\n elif frame == 'hadec':\n frame_args = skyconv.get_frame_init_args('hadec', obstime=obstime[0])\n coord = HADec(ha=reqj['ha'] * u.deg, dec=reqj['dec']*u.deg, **frame_args)\n coord = skyconv.to_icrs(coord)\n else: # AltAz\n frame_args = skyconv.get_frame_init_args('altaz', obstime=obstime[0])\n coord = AltAz(alt=reqj['alt'] * u.deg, az=reqj['az'] * u.deg, **frame_args)\n coord = skyconv.to_icrs(coord)\n altazes = skyconv.to_altaz(coord, obstime=obstime)\n return jsonify(altazes.alt.deg.tolist())\n\n\n@app.route('/api/convert_coord', methods=['POST'])\n@nocache\ndef conver_coord():\n reqj = request.json\n frame = reqj.get('frame')\n if frame in ['icrs', 'tete']:\n ra = reqj['ra']\n dec = reqj['dec']\n if frame == 'tete':\n frame_args = skyconv.get_frame_init_args('tete')\n coord = TETE(ra=ra * u.deg, dec=dec * u.deg, **frame_args)\n else:\n coord = ICRS(ra=ra * u.deg, dec=dec * u.deg)\n elif frame == 'hadec':\n frame_args = skyconv.get_frame_init_args('hadec')\n coord = HADec(ha=reqj['ha'] * u.deg, dec=reqj['dec'] * u.deg, **frame_args)\n else:\n frame_args = skyconv.get_frame_init_args('altaz')\n if reqj['alt'] < 5:\n frame_args['pressure'] = 0\n coord = AltAz(alt=reqj['alt'] * u.deg, az=reqj['az'] * u.deg, **frame_args)\n icrs = skyconv.to_icrs(coord)\n hadec = skyconv.to_hadec(icrs)\n altaz = skyconv.to_altaz(icrs)\n tete = skyconv.to_tete(icrs)\n return jsonify({'icrs': {'ra': icrs.ra.deg, 'dec': icrs.dec.deg},\n 'altaz': {'alt': altaz.alt.deg, 'az': altaz.az.deg},\n 'tete': {'ra': tete.ra.deg, 'dec': tete.dec.deg},\n 'hadec': {'ha': hadec.ha.deg, 'dec': hadec.dec.deg}})\n\n\n@app.route('/api/search_object', methods=['GET'])\n@nocache\ndef search_object():\n search = request.args.get('search', None)\n m = re.match(r'^([a-zA-Z]+)(\\d+)$', search)\n if m:\n search = m.group(1) + ' ' + m.group(2)\n if not search:\n return\n planets = db.search_planets(search)\n stars = db.search_stars(search)\n dso = db.search_dso(search)\n return jsonify({'dso': dso, 'stars': stars, 'planets': planets})\n\n\ndef reboot():\n if not settings.is_simulation():\n return subprocess.run(['/usr/bin/sudo', '/sbin/reboot'])\n\n\n@app.route('/api/firmware_update', methods=['POST'])\n@nocache\ndef firmware_update():\n file = request.files['file']\n with tempfile.TemporaryFile(suffix='.zip') as tfile:\n file.save(tfile)\n tfile.seek(0)\n zip_ref = zipfile.ZipFile(tfile)\n if settings.is_simulation():\n zip_ref.extractall('/home/russ/projects/starsynctrackers/SSTForkMountFirmware/piside/upload_test')\n else:\n subprocess.run(['sudo', 'mount', '-o', 'remount,rw', '/ssteq'])\n zip_ref.extractall('/ssteq/piside')\n subprocess.run(['sudo', 'mount', '-o', 'remount,ro', '/ssteq'])\n try:\n subprocess.run(['/usr/bin/python3', 'post_update.py'])\n except Exception as e:\n print(e)\n if not settings.is_simulation():\n t = threading.Timer(5, reboot)\n t.start()\n return 'Updated Firmware'\n\n\n@app.route('/api/wifi_scan', methods=['GET'])\n@nocache\ndef wifi_scan():\n aps = network.wifi_client_scan_iw()\n connected = network.current_wifi_connect()\n return jsonify({'aps': aps, 'connected': connected})\n\n\n@app.route('/api/location_preset', methods=['POST'])\n@nocache\ndef location_preset_add():\n location = request.json\n name = location['name']\n lat = location['lat']\n long = location['long']\n elevation = location['elevation']\n settings.settings['location_presets'].append({'name': name, 'lat': lat, 'long': long, 'elevation': elevation})\n settings.write_settings(settings.settings)\n return \"Saved Location Preset\", 200\n\n\n@app.route('/api/location_gps', methods=['POST'])\n@nocache\ndef location_use_gps():\n if handpad_server.serial:\n lines = handpad_server.handpad_server.gps()\n if lines[0] == 'ERROR':\n return 'Error reading GPS', 500\n info = handpad_menu.parse_gps(lines)\n if info is None:\n return 'No satellites yet, try again later', 500\n control.set_location(info['location']['lat'], info['location']['long'], info['location']['elevation'],\n 'GPS')\n return 'Location set with GPS', 200\n else:\n return 'Handpad not connected', 500\n\n\n@app.route('/api/location_preset', methods=['DELETE'])\n@nocache\ndef location_preset_del():\n location = request.json\n idx = location['index']\n del settings.settings['location_presets'][idx]\n settings.write_settings(settings.settings)\n return \"Deleted Location Preset\", 200\n\n\n@app.route('/api/search_location', methods=['GET'])\n@nocache\ndef search_location():\n search = request.args.get('search', None)\n if not search:\n return\n search = search.strip()\n cities = db.search_cities(search)\n return jsonify(cities)\n\n\n@app.route('/api/manual_control', methods=['POST'])\n@nocache\ndef manual_control():\n message = request.json\n # print(\"Got %s\" + json.dumps(message))\n control.set_alive(message['client_id'])\n control.manual_control(message['direction'], message['speed'], message['client_id'])\n return 'Moving', 200\n\n\n@app.route('/api/status', methods=['GET'])\n@nocache\ndef status_get():\n client_id = request.args.get('client_id')\n control.set_alive(client_id)\n return jsonify(control.last_status)\n\n\n@app.route('/api/extra_logging', methods=['GET'])\n@nocache\ndef extra_logging_get():\n lx200proto_server.extra_logging = True\n return 'Okay', 200\n\n\n@app.route('/')\n@nocache\ndef root():\n return redirect('/index.html')\n\n\n@app.route('/advanced_slew_limits/')\n@nocache\ndef send_static_advanced_slew(path):\n return send_from_directory('../client_advanced_slew_limits/dist', path)\n\n\n@app.route('/')\n@nocache\ndef send_static(path):\n return send_from_directory('../client_main/dist', path)\n\n\ndef main():\n global st_queue, power_thread_quit, avahi_process\n power_thread_quit = False\n if not settings.settings['power_switch']:\n power_thread_quit = True\n power_thread = threading.Thread(target=run_power_switch)\n power_thread.start()\n wifiinfo = network.hostapd_read()\n for key in wifiinfo.keys():\n settings.settings['network'][key] = wifiinfo[key]\n ethernetinfo = network.read_ethernet_settings()\n for key in ethernetinfo.keys():\n settings.settings['network'][key] = ethernetinfo[key]\n st_queue = control.init()\n\n lx200proto_thread = threading.Thread(target=lx200proto_server.main)\n lx200proto_thread.start()\n\n handpad_thread = threading.Thread(target=handpad_server.run)\n handpad_thread.start()\n\n hostname = socket.gethostname()\n # TODO: What about when they change hostname? Or can move this to systemd?\n avahi_process = subprocess.Popen(\n ['/usr/bin/avahi-publish-service', hostname, '_sstmount._tcp', '5000', '/'])\n\n def reconnect():\n time.sleep(30)\n # Stop hostapd and dnsmasq let autohotspot go\n subprocess.run(['sudo', '/root/ctrl_dnsmasq.py', 'wlan0', 'disable'])\n subprocess.run(['sudo', '/usr/bin/killall', 'hostapd'])\n subprocess.run(['sudo', '/usr/bin/autohotspot'])\n\n if not settings.is_simulation():\n t1 = threading.Thread(target=reconnect)\n t1.start()\n\n print('Running...')\n try:\n ssl_context = None\n if len(sys.argv) == 2 and sys.argv[1] == '--https':\n make_ssl_devcert('../key')\n ssl_context = ('../key.crt', '../key.key')\n compress.init_app(app)\n app.run(host=\"0.0.0.0\", debug=False, use_reloader=False, ssl_context=ssl_context)\n power_thread_quit = True\n handpad_server.terminate()\n lx200proto_server.terminate()\n lx200proto_thread.join()\n handpad_thread.join()\n power_thread.join()\n avahi_process.kill()\n finally:\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"piside/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"646871991","text":"import os\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef train(rank, args, model, device, dataset, dataloader_kwargs):\n torch.manual_seed(args.seed + rank)\n\n train_loader = torch.utils.data.DataLoader(dataset, **dataloader_kwargs)\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n for epoch in range(1, args.epochs + 1):\n train_epoch(epoch, args, model, device, train_loader, optimizer)\n\n\n\ndef test(args, model, device, dataset, dataloader_kwargs):\n torch.manual_seed(args.seed)\n\n test_loader = torch.utils.data.DataLoader(dataset, **dataloader_kwargs)\n\n test_epoch(model, device, test_loader)\n\n\ndef train_epoch(epoch, args, model, device, data_loader, optimizer):\n model.train()\n pid = os.getpid()\n criterion = nn.CrossEntropyLoss()\n for batch_idx, (data, target) in enumerate(data_loader):\n optimizer.zero_grad()\n output = model(data.to(device))\n loss = criterion(output, target.to(device))\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('{}\\tTrain Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n pid, epoch, batch_idx * len(data), len(data_loader.dataset),\n 100. * batch_idx / len(data_loader), loss.item()))\n if args.dry_run:\n break\n\n\ndef test_epoch(model, device, data_loader):\n model.eval()\n correct = 0\n total = 0\n criterion = nn.CrossEntropyLoss()\n with torch.no_grad():\n # for data, target in data_loader:\n # output = model(data.to(device))\n # test_loss += criterion(output, target.to(device), reduction='sum').item() # sum up batch loss\n # pred = output.max(1)[1] # get the index of the max log-probability\n # correct += pred.eq(target.to(device)).sum().item()\n for data in data_loader:\n images, labels = data\n # outputs = net(images)\n outputs = model(images)\n # the class with the highest energy is what we choose as prediction\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"621408148","text":"import binascii\nfrom iroha import IrohaCrypto as ic\nfrom iroha import Iroha, IrohaGrpc\nimport sys\n\niroha = Iroha('admin@test')\n#populate from login form. iroha = Iroha('user@domain')\nnet = IrohaGrpc()\nadmin_private_key = open('./configs/admin@test.priv').read()\n#user_private_key = open('./configs/user@test.priv').read() or from form.\n\ndef send_transaction_and_print_status(transaction):\n global net\n hex_hash = binascii.hexlify(ic.hash(transaction))\n print('Transaction hash = {}, creator = {}'.format(\n hex_hash, transaction.payload.reduced_payload.creator_account_id))\n net.send_tx(transaction)\n for status in net.tx_status_stream(transaction):\n print(status)\n\ndef create_domain():\n return\n\ndef add_peer_node():\n return\n\ndef generate_kp():\n global iroha\n pk = ic.private_key()\n user_private_key = pk\n user_public_key = ic.derive_public_key(user_private_key)\n return user_private_key, user_public_key\n\ndef create_users(user_name,domain):\n global iroha\n user_private_key, user_public_key = generate_kp()\n init_cmds = [\n iroha.command('CreateAccount', account_name=user_name, domain_id=domain,\n public_key=user_public_key)\n ]\n init_tx = iroha.transaction(init_cmds)\n ic.sign_transaction(init_tx, admin_private_key)\n send_transaction_and_print_status(init_tx)\n return user_private_key, user_public_key\n\ndef add_asset_to_admin(asset_id, qty):\n global iroha\n \"\"\"\n Add asset supply and assign to 'admin@test'\n \"\"\"\n tx = iroha.transaction([\n iroha.command('AddAssetQuantity',\n asset_id=asset_id, amount=qty)\n ])\n ic.sign_transaction(tx, admin_private_key)\n send_transaction_and_print_status(tx)\n\ndef create_new_asset(asset,domain,precision,qty):\n global iroha\n user_tx = iroha.transaction(\n [iroha.command('CreateAsset', asset_name=asset,\n domain_id=domain, precision=precision)] )\n ic.sign_transaction(user_tx, admin_private_key)\n send_transaction_and_print_status(user_tx)\n asset_id = asset + '#' + domain\n add_asset_to_admin(asset_id=asset_id,qty=qty)\n\ndef add_asset_to_user():\n \"\"\"\n Add 1000.00 units of 'coin#domain' to 'admin@test'\n \"\"\"\n tx = iroha.transaction([\n iroha.command('AddAssetQuantity',\n asset_id='coin#domain', amount='1000.00')\n ])\n ic.sign_transaction(tx, admin_private_key)\n send_transaction_and_print_status(tx)\n\ndef create_ple_doman_and_asset():\n \"\"\"\n Creates domain 'domain' and asset 'coin#domain' with precision 2\n \"\"\"\n commands = [\n iroha.command('CreateDomain', domain_id='domain', default_role='user'),\n iroha.command('CreateAsset', asset_name='coin',\n domain_id='domain', precision=2)\n ]\n tx = ic.sign_transaction(\n iroha.transaction(commands), admin_private_key)\n send_transaction_and_print_status(tx)\n\ndef transfer_asset(owner,recepient,asset_id,description,domain,qty):\n global iroha\n user_tx = iroha.transaction(\n iroha.command('TransferAsset', src_account_id=owner, dest_account_id=recepient,\n asset_id=asset_id, description=description, amount=qty))\n ic.sign_transaction(user_tx, admin_private_key)\n send_transaction_and_print_status(user_tx)\n\ndef get_blocks():\n \"\"\"\n Subscribe to blocks stream from the network\n :return:\n \"\"\"\n query = iroha.blocks_query()\n ic.sign_query(query, admin_private_key)\n for block in net.send_blocks_stream_query(query):\n print('The next block arrived:', block)\n\ndef set_account_detail(account_id,key,value):\n \"\"\"\n Set age to user@domain by admin@test\n \"\"\"\n tx = iroha.transaction([\n iroha.command('SetAccountDetail',\n account_id=account_id, key=key, value=value)\n ])\n ic.sign_transaction(tx, admin_private_key)\n send_transaction_and_print_status(tx)\n\ndef get_asset_info(asset_id):\n \"\"\"\n Get asset info for coin#domain\n :return:\n \"\"\"\n query = iroha.query('GetAssetInfo', asset_id=asset_id)\n ic.sign_query(query, admin_private_key)\n response = net.send_query(query)\n data = response.asset_response.asset\n print('Asset id = {}, precision = {}'.format(data.asset_id, data.precision))\n\ndef get_account_assets():\n \"\"\"\n List all the assets of user@domain\n \"\"\"\n query = iroha.query('GetAccountAssets', account_id='admin@test')\n ic.sign_query(query, admin_private_key)\n\n response = net.send_query(query)\n data = response.account_assets_response.account_assets\n for asset in data:\n print('Asset id = {}, balance = {}'.format(\n asset.asset_id, asset.balance))\n return data\n\ndef get_user_details(account_id):\n \"\"\"\n Get all the kv-storage entries for user@domain\n \"\"\"\n query = iroha.query('GetAccountDetail', account_id=account_id)\n ic.sign_query(query, admin_private_key)\n\n response = net.send_query(query)\n data = response.account_detail_response\n print('Account id = {}, details = {}'.format(account_id, data.detail))\n return data","sub_path":"flask/iroha_client.py","file_name":"iroha_client.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"419514705","text":"# SQUARE TURTLE WITH VARIABLE\nfrom turtle import *\n\nshape('turtle')\nspeed(1)\n\n\ndef square(sidelength):\n for i in range(60):\n right(5) # geser 5 derajat\n forward(sidelength)\n right(90)\n\n\nsquare(50)\n","sub_path":"03_turtle_variable.py","file_name":"03_turtle_variable.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"116427574","text":"from gym.core import Wrapper\nfrom glob import glob\nimport csv\nimport uuid\nimport pandas\nimport os.path as osp\nimport json\nimport os\nimport time\nimport functools\nimport gym\nfrom gym import spaces\nimport numpy as np\nimport tensorflow as tf\nfrom baselines import logger\nfrom multiprocessing import Process, Pipe\nfrom collections import deque\nfrom abc import ABC, abstractmethod\nfrom pommerman import make\nfrom pommerman.agents import BaseAgent\nfrom pommerman import characters\nfrom pommerman import agents\nfrom baselines.common import explained_variance, set_global_seeds\nfrom baselines.common.policies import build_policy\nfrom baselines.common.tf_util import get_session, save_variables, load_variables\nfrom baselines.common.vec_env import VecEnv, VecEnvWrapper, CloudpickleWrapper\nfrom baselines.common.vec_env.util import copy_obs_dict, dict_to_obs, obs_space_info\n\ntry:\n from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer\n from mpi4py import MPI\n from baselines.common.mpi_util import sync_from_root\nexcept ImportError:\n MPI = None\n\nfrom baselines.common.tf_util import initialize\n\n\nclass Monitor(Wrapper):\n EXT = \"monitor.csv\"\n f = None\n\n def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):\n Wrapper.__init__(self, env=env)\n self.tstart = time.time()\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": time.time(), 'env_id': env.spec and env.spec.id},\n extra_keys=reset_keywords + info_keywords\n )\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards = None\n self.needs_reset = True\n self.episode_rewards = []\n self.episode_lengths = []\n self.episode_times = []\n self.total_steps = 0\n self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()\n self.id = env.id\n\n def reset(self, **kwargs):\n self.reset_state()\n for k in self.reset_keywords:\n v = kwargs.get(k)\n if v is None:\n raise ValueError('Expected you to pass kwarg %s into reset'%k)\n self.current_reset_info[k] = v\n return self.env.reset(**kwargs)\n\n def reset_state(self):\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\"Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)\")\n self.rewards = []\n self.needs_reset = False\n\n def step(self, action):\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n ob, rew, done, info, alive_list = self.env.step(action)\n self.update(ob, rew, done, info)\n return (ob, rew, done, info, alive_list)\n\n def update(self, ob, rew, done, info):\n self.rewards.append(rew)\n if done:\n # self.needs_reset = True\n a, b, c, d = 0, 0, 0, 0\n for rl in self.rewards:\n a += rl[0]\n b += rl[1]\n c += rl[2]\n d += rl[3]\n eprew = [a, b, c, d]\n eplen = len(self.rewards)\n epinfo = {\"r\": [round(epr, 6) for epr in eprew], \"l\": eplen, \"t\": round(time.time() - self.tstart, 6)}\n for k in self.info_keywords:\n epinfo[k] = info[k]\n self.episode_rewards.append(eprew)\n self.episode_lengths.append(eplen)\n self.episode_times.append(time.time() - self.tstart)\n epinfo.update(self.current_reset_info)\n self.results_writer.write_row(epinfo)\n\n if isinstance(info, dict):\n info['episode'] = epinfo\n\n self.total_steps += 1\n\n def close(self):\n if self.f is not None:\n self.f.close()\n\n def get_total_steps(self):\n return self.total_steps\n\n def get_episode_rewards(self):\n return self.episode_rewards\n\n def get_episode_lengths(self):\n return self.episode_lengths\n\n def get_episode_times(self):\n return self.episode_times\n\n\nclass LoadMonitorResultsError(Exception):\n pass\n\n\nclass ResultsWriter(object):\n def __init__(self, filename=None, header='', extra_keys=()):\n self.extra_keys = extra_keys\n if filename is None:\n self.f = None\n self.logger = None\n else:\n if not filename.endswith(Monitor.EXT):\n if osp.isdir(filename):\n filename = osp.join(filename, Monitor.EXT)\n else:\n filename = filename + \".\" + Monitor.EXT\n self.f = open(filename, \"wt\")\n if isinstance(header, dict):\n header = '# {} \\n'.format(json.dumps(header))\n self.f.write(header)\n self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+tuple(extra_keys))\n self.logger.writeheader()\n self.f.flush()\n\n def write_row(self, epinfo):\n if self.logger:\n self.logger.writerow(epinfo)\n self.f.flush()\n\n\ndef get_monitor_files(dir):\n return glob(osp.join(dir, \"*\" + Monitor.EXT))\n\n\ndef load_results(dir):\n import pandas\n monitor_files = (\n glob(osp.join(dir, \"*monitor.json\")) +\n glob(osp.join(dir, \"*monitor.csv\"))) # get both csv and (old) json files\n if not monitor_files:\n raise LoadMonitorResultsError(\"no monitor files of the form *%s found in %s\" % (Monitor.EXT, dir))\n dfs = []\n headers = []\n for fname in monitor_files:\n with open(fname, 'rt') as fh:\n if fname.endswith('csv'):\n firstline = fh.readline()\n if not firstline:\n continue\n assert firstline[0] == '#'\n header = json.loads(firstline[1:])\n df = pandas.read_csv(fh, index_col=None)\n headers.append(header)\n elif fname.endswith('json'): # Deprecated json format\n episodes = []\n lines = fh.readlines()\n header = json.loads(lines[0])\n headers.append(header)\n for line in lines[1:]:\n episode = json.loads(line)\n episodes.append(episode)\n df = pandas.DataFrame(episodes)\n else:\n assert 0, 'unreachable'\n df['t'] += header['t_start']\n dfs.append(df)\n df = pandas.concat(dfs)\n df.sort_values('t', inplace=True)\n df.reset_index(inplace=True)\n df['t'] -= min(header['t_start'] for header in headers)\n df.headers = headers # HACK to preserve backwards compatibility\n return df\n\n\ndef test_monitor():\n env = gym.make(\"CartPole-v1\")\n env.seed(0)\n mon_file = \"/tmp/baselines-test-%s.monitor.csv\" % uuid.uuid4()\n menv = Monitor(env, mon_file)\n menv.reset()\n for _ in range(1000):\n _, _, done, _ = menv.step(0)\n if done:\n menv.reset()\n\n f = open(mon_file, 'rt')\n\n firstline = f.readline()\n assert firstline.startswith('#')\n metadata = json.loads(firstline[1:])\n assert metadata['env_id'] == \"CartPole-v1\"\n assert set(metadata.keys()) == {'env_id', 'gym_version', 't_start'}, \"Incorrect keys in monitor metadata\"\n\n last_logline = pandas.read_csv(f, index_col=None)\n assert set(last_logline.keys()) == {'l', 't', 'r'}, \"Incorrect keys in monitor logline\"\n f.close()\n os.remove(mon_file)\n\n\ndef worker(remote, parent_remote, env_fn_wrapper):\n parent_remote.close()\n env = env_fn_wrapper.x()\n try:\n while True:\n cmd, data = remote.recv()\n if cmd == 'step':\n ob, reward, done, info, alive_list = env.step(data)\n if (env.id + 10) not in alive_list or done:\n ob = env.reset()\n done = True\n remote.send((ob, reward, done, info))\n elif cmd == 'reset':\n ob = env.reset()\n remote.send(ob)\n elif cmd == 'render':\n remote.send(env.render(mode='rgb_array'))\n elif cmd == 'close':\n remote.close()\n break\n elif cmd == 'get_spaces':\n remote.send((env.observation_space, env.action_space))\n else:\n raise NotImplementedError\n except KeyboardInterrupt:\n print('SubprocVecEnv worker: got KeyboardInterrupt')\n finally:\n env.close()\n\n\nclass SubprocVecEnv(VecEnv):\n \"\"\"\n VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.\n Recommended to use when num_envs > 1 and step() can be a bottleneck.\n \"\"\"\n def __init__(self, env_fns, spaces=None):\n \"\"\"\n Arguments:\n\n env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable\n \"\"\"\n self.waiting = False\n self.closed = False\n nenvs = len(env_fns)\n self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n for p in self.ps:\n p.daemon = True # if the main process crashes, we should not cause things to hang\n p.start()\n for remote in self.work_remotes:\n remote.close()\n\n self.remotes[0].send(('get_spaces', None))\n observation_space, action_space = self.remotes[0].recv()\n self.viewer = None\n VecEnv.__init__(self, len(env_fns), observation_space, action_space)\n\n def step_async(self, actions):\n self._assert_not_closed()\n for remote, action in zip(self.remotes, actions):\n remote.send(('step', action))\n self.waiting = True\n\n def step_wait(self):\n self._assert_not_closed()\n results = [remote.recv() for remote in self.remotes]\n self.waiting = False\n obs, rews, dones, infos = zip(*results)\n return np.stack(obs), np.stack(rews), np.stack(dones), infos\n\n def reset(self):\n self._assert_not_closed()\n for remote in self.remotes:\n remote.send(('reset', None))\n return np.stack([remote.recv() for remote in self.remotes])\n\n def close_extras(self):\n self.closed = True\n if self.waiting:\n for remote in self.remotes:\n remote.recv()\n for remote in self.remotes:\n remote.send(('close', None))\n for p in self.ps:\n p.join()\n\n def get_images(self):\n self._assert_not_closed()\n for pipe in self.remotes:\n pipe.send(('render', None))\n imgs = [pipe.recv() for pipe in self.remotes]\n return imgs\n\n def _assert_not_closed(self):\n assert not self.closed, \"Trying to operate on a SubprocVecEnv after calling close()\"\n\n\nclass VecFrameStack(VecEnvWrapper):\n def __init__(self, venv, nstack):\n self.venv = venv\n self.nstack = nstack\n wos = venv.observation_space # wrapped ob space\n low = np.repeat(wos.low, self.nstack, axis=-1)\n high = np.repeat(wos.high, self.nstack, axis=-1)\n self.stackedobs = np.zeros((venv.num_envs, 4,) + low.shape, low.dtype)\n observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)\n VecEnvWrapper.__init__(self, venv, observation_space=observation_space)\n\n def step_wait(self):\n obs, rews, news, infos = self.venv.step_wait()\n for i in range(4):\n self.stackedobs[i] = np.roll(self.stackedobs[i], shift=-1, axis=-1)\n if news[i]:\n self.stackedobs[i] = 0\n self.stackedobs[i][..., -obs.shape[-1]:] = obs[i]\n return self.stackedobs, rews, news, infos\n\n def reset(self):\n obs = self.venv.reset()\n for i in range(4):\n self.stackedobs[i][...] = 0\n self.stackedobs[i][..., -obs.shape[-1]:] = obs[i]\n return self.stackedobs\n\n\ndef make_vec_env(agent_list, num_env, seed=None, wrapper_kwargs=None, start_index=0, gamestate=None):\n \"\"\"\n Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.\n \"\"\"\n if wrapper_kwargs is None:\n wrapper_kwargs = {}\n mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0\n seed = seed + 10000 * mpi_rank if seed is not None else None\n\n def make_thunk(rank):\n return lambda: make_env(\n agent_list=agent_list,\n subrank=rank,\n seed=seed,\n gamestate=gamestate,\n wrapper_kwargs=wrapper_kwargs\n )\n\n set_global_seeds(seed)\n env = SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])\n return VecFrameStack(env, 4)\n\n\ndef docker_featurize(obs):\n board = np.asarray(obs['board'])\n\n # convert board items into bitmaps\n maps = [board == i for i in range(10)]\n maps.append(obs['bomb_blast_strength'])\n maps.append(obs['bomb_life'])\n\n # duplicate ammo, blast_strength and can_kick over entire map\n maps.append(np.full(board.shape, obs['ammo']))\n maps.append(np.full(board.shape, obs['blast_strength']))\n maps.append(np.full(board.shape, obs['can_kick']))\n\n # add my position as bitmap\n position = np.zeros(board.shape)\n position[obs['position']] = 1\n maps.append(position)\n\n # add teammate\n if obs['teammate'] is not None:\n maps.append(board == obs['teammate'])\n else:\n maps.append(np.zeros(board.shape))\n\n # add enemies\n enemies = [board == e for e in obs['enemies']]\n maps.append(np.any(enemies, axis=0))\n\n return np.stack(maps, axis=2)\n\n\ndef featurize(obs):\n board = obs['board']\n\n # convert board items into bitmaps\n maps = [board == i for i in range(10)]\n maps.append(obs['bomb_blast_strength'])\n maps.append(obs['bomb_life'])\n\n # duplicate ammo, blast_strength and can_kick over entire map\n maps.append(np.full(board.shape, obs['ammo']))\n maps.append(np.full(board.shape, obs['blast_strength']))\n maps.append(np.full(board.shape, obs['can_kick']))\n\n # add my position as bitmap\n position = np.zeros(board.shape)\n position[obs['position']] = 1\n maps.append(position)\n\n # add teammate\n if obs['teammate'] is not None:\n maps.append(board == obs['teammate'].value)\n else:\n maps.append(np.zeros(board.shape))\n\n # add enemies\n enemies = [board == e.value for e in obs['enemies']]\n maps.append(np.any(enemies, axis=0))\n\n return np.stack(maps, axis=2)\n\n\nclass WrappedEnv(gym.Wrapper):\n '''An Env Wrapper used to make it easier to work\n with multiple agents'''\n def __init__(self, env, subrank, visualize=False):\n super(WrappedEnv, self).__init__(env)\n self.visualize = visualize\n self.can_kick_dict = {10: 0, 11: 0, 12: 0, 13: 0}\n self.blast_strength_dict = {10: 2, 11: 2, 12: 2, 13: 2}\n self.ammo_record = {10: [], 11: [], 12: [], 13: []}\n self.ammo_num_list = [1, 1, 1, 1]\n self.alive_list = [10, 11, 12, 13]\n self.num_steps = 0\n self.observation_space = spaces.Box(\n low=0, high=25,\n shape=(11, 11, 18), dtype=env.observation_space.dtype.name)\n self.id = subrank\n\n def step(self, actions):\n if self.visualize:\n self.env.render()\n\n self.num_steps += 1\n\n obs, reward, done, info = self.env.step(actions)\n neo_obs = []\n neo_reward = [0, 0, 0, 0]\n for i, ob in enumerate(obs):\n if (i + 10) not in ob['alive']:\n neo_obs.append(np.zeros(self.observation_space.shape))\n if (i + 10) not in self.alive_list:\n pass\n else:\n # neo_reward[i] += ((self.num_steps - 1) / 800 - 1) * 10\n neo_reward[i] += -5\n self.alive_list.remove(i + 10)\n else:\n neo_obs.append(featurize(ob))\n neo_reward[i] += 0.1\n\n if ob['can_kick'] and not self.can_kick_dict[i + 10]:\n neo_reward[i] += 1\n self.can_kick_dict[i + 10] = 1\n\n if ob['blast_strength'] > self.blast_strength_dict[i + 10]:\n neo_reward[i] += 1\n self.blast_strength_dict[i + 10] = ob['blast_strength']\n\n if ob['ammo'] < self.ammo_num_list[i]:\n self.ammo_record[i + 10].append(self.num_steps - 1)\n neo_reward[i] += 1\n elif ob['ammo'] > self.ammo_num_list[i]:\n if (self.num_steps - 10) in self.ammo_record[i + 10]:\n self.ammo_record[i + 10].remove(self.num_steps - 10)\n else:\n neo_reward[i] += 1\n self.ammo_num_list[i] = ob['ammo']\n\n if done:\n if reward[i] == 0:\n neo_reward[i] += -5\n\n if reward[i] == 1 and (i + 10) in ob['alive']:\n neo_reward[i] += (1 - (self.num_steps - 1) / 800) * 10\n\n # obs = [self.env.featurize(ob) for ob in obs]\n # return obs, reward, done, info\n return neo_obs, neo_reward, done, info, self.alive_list\n\n def reset(self):\n self.can_kick_dict = {10: 0, 11: 0, 12: 0, 13: 0}\n self.blast_strength_dict = {10: 2, 11: 2, 12: 2, 13: 2}\n self.ammo_record = {10: [], 11: [], 12: [], 13: []}\n self.ammo_num_list = [1, 1, 1, 1]\n self.alive_list = [10, 11, 12, 13]\n self.num_steps = 0\n\n obs = self.env.reset()\n obs = [featurize(ob) for ob in obs]\n return obs\n\n\ndef make_env(agent_list, subrank=0, seed=None, gamestate=None, wrapper_kwargs={}):\n mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0\n env = make('PommeTeamCompetitionFast-v0', agent_list)\n env = WrappedEnv(env, subrank)\n\n # env.seed(seed + subrank if seed is not None else None)\n env = Monitor(env,\n logger.get_dir() and os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(subrank)),\n allow_early_resets=True)\n\n return env\n\n\nclass PPOAgent(BaseAgent):\n \"\"\"The PPOAgent. Acts through the algorith, not here.\"\"\"\n\n def __init__(self, character=characters.Bomber):\n super(PPOAgent, self).__init__(character)\n\n def act(self, obs, action_space):\n \"\"\"This agent has its own way of inducing actions\"\"\"\n self.stackedobs = np.roll(self.stackedobs, shift=-1, axis=-1)\n obs = featurize(obs)\n self.stackedobs[..., -obs.shape[-1]:] = obs\n action, _, _, _ = model.step(self.stackedobs)\n return int(action)\n\n def initialize(self, model):\n self.model = model\n self.stackedobs = np.zeros((1, 11, 11, 72))\n\n\nclass Model(object):\n \"\"\"\n We use this object to :\n __init__:\n - Creates the step_model\n - Creates the train_model\n\n train():\n - Make the training part (feedforward and retropropagation of gradients)\n\n save/load():\n - Save load the model\n \"\"\"\n def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,\n nsteps, ent_coef, vf_coef, max_grad_norm):\n sess = get_session()\n\n with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE):\n # CREATE OUR TWO MODELS\n # act_model that is used for sampling\n act_model = policy(nbatch_act, 1, sess)\n\n # Train model for training\n train_model = policy(nbatch_train, nsteps, sess)\n\n # CREATE THE PLACEHOLDERS\n A = train_model.pdtype.sample_placeholder([None])\n ADV = tf.placeholder(tf.float32, [None])\n R = tf.placeholder(tf.float32, [None])\n # Keep track of old actor\n OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])\n # Keep track of old critic\n OLDVPRED = tf.placeholder(tf.float32, [None])\n LR = tf.placeholder(tf.float32, [])\n # Cliprange\n CLIPRANGE = tf.placeholder(tf.float32, [])\n\n neglogpac = train_model.pd.neglogp(A)\n\n # Calculate the entropy\n # Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.\n entropy = tf.reduce_mean(train_model.pd.entropy())\n\n # CALCULATE THE LOSS\n # Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss\n\n # Clip the value to reduce variability during Critic training\n # Get the predicted value\n vpred = train_model.vf\n vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)\n # Unclipped value\n vf_losses1 = tf.square(vpred - R)\n # Clipped value\n vf_losses2 = tf.square(vpredclipped - R)\n\n vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))\n\n # Calculate ratio (pi current policy / pi old policy)\n ratio = tf.exp(OLDNEGLOGPAC - neglogpac)\n\n # Defining Loss = - J is equivalent to max J\n pg_losses = -ADV * ratio\n\n pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)\n\n # Final PG loss\n pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))\n approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))\n clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))\n\n # Total loss\n loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef\n\n # UPDATE THE PARAMETERS USING LOSS\n # 1. Get the model parameters\n params = tf.trainable_variables('ppo2_model')\n # 2. Build our trainer\n if MPI is not None:\n trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=LR, epsilon=1e-5)\n else:\n trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)\n # 3. Calculate the gradients\n grads_and_var = trainer.compute_gradients(loss, params)\n grads, var = zip(*grads_and_var)\n\n if max_grad_norm is not None:\n # Clip the gradients (normalize)\n grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)\n grads_and_var = list(zip(grads, var))\n # zip aggregate each gradient with parameters associated\n # For instance zip(ABCD, xyza) => Ax, By, Cz, Da\n\n _train = trainer.apply_gradients(grads_and_var)\n print(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))\n\n def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):\n # Here we calculate advantage A(s,a) = R + yV(s') - V(s)\n # Returns = R + yV(s')\n advs = returns - values\n\n # Normalize the advantages\n advs = (advs - advs.mean()) / (advs.std() + 1e-8)\n td_map = {train_model.X: obs, A: actions, ADV: advs, R: returns, LR: lr,\n CLIPRANGE: cliprange, OLDNEGLOGPAC: neglogpacs, OLDVPRED: values}\n if states is not None:\n td_map[train_model.S] = states\n td_map[train_model.M] = masks\n return sess.run(\n [pg_loss, vf_loss, entropy, approxkl, clipfrac, _train],\n td_map\n )[:-1]\n self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']\n\n self.train = train\n self.train_model = train_model\n self.act_model = act_model\n self.step = act_model.step\n self.value = act_model.value\n self.initial_state = act_model.initial_state\n\n self.save = functools.partial(save_variables, sess=sess)\n self.load = functools.partial(load_variables, sess=sess)\n\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n initialize()\n global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"\")\n\n if MPI is not None:\n sync_from_root(sess, global_variables) # pylint: disable=E1101\n\n\nclass AbstractEnvRunner(ABC):\n def __init__(self, *, env, model, nsteps):\n self.env = env\n self.model = model\n self.nenv = nenv = env.num_envs if hasattr(env, 'num_envs') else 1\n self.batch_ob_shape = (nenv*nsteps, 4,) + env.observation_space.shape\n self.obs = np.zeros( # pylint: disable=E0632\n (nenv, 4,) + env.observation_space.shape,\n dtype=env.observation_space.dtype.name\n )\n self.obs[:] = env.reset() # pylint: disable=E0632\n self.nsteps = nsteps\n self.states = model.initial_state\n self.dones = [False for _ in range(nenv)]\n\n @abstractmethod\n def run(self):\n raise NotImplementedError\n\n\nclass Runner(AbstractEnvRunner):\n \"\"\"\n We use this object to make a mini batch of experiences\n __init__:\n - Initialize the runner\n\n run():\n - Make a mini batch\n \"\"\"\n def __init__(self, *, env, model, nsteps, gamma, lam):\n super().__init__(env=env, model=model, nsteps=nsteps)\n # Lambda used in GAE (General Advantage Estimation)\n self.lam = lam\n # Discount rate\n self.gamma = gamma\n\n def run(self):\n # Here, we init the lists that will contain the mb of experiences\n mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]\n mb_states = self.states\n epinfos = []\n # For n in range number of steps\n for _ in range(self.nsteps):\n # Given observations, get action value and neglopacs\n # We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init\n tmp_obs = np.zeros((4, 11, 11, 72))\n tmp_actions = []\n tmp_values = []\n tmp_neglogpacs = []\n tmp_dones = []\n all_actions = []\n for i in range(4):\n actions, values, self.states, neglogpacs = self.model.step(self.obs[i], S=self.states, M=self.dones[i])\n tmp_obs[i] = self.obs[i][i].copy()\n tmp_actions.append(actions[i])\n tmp_values.append(values[i])\n tmp_neglogpacs.append(neglogpacs[i])\n tmp_dones.append(self.dones[i])\n all_actions.append(actions)\n mb_obs.append(tmp_obs)\n mb_actions.append(tmp_actions)\n mb_values.append(tmp_values)\n mb_neglogpacs.append(tmp_neglogpacs)\n mb_dones.append(tmp_dones)\n\n # Take actions in env and look the results\n # Infos contains a ton of useful informations\n self.obs[:], rewards, self.dones, infos = self.env.step(all_actions)\n epinfos.append([infos])\n mb_rewards.append([reward[i] for i, reward in enumerate(rewards)])\n # batch of steps to batch of rollouts\n mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32)\n mb_actions = np.asarray(mb_actions)\n mb_values = np.asarray(mb_values, dtype=np.float32)\n mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)\n mb_dones = np.asarray(mb_dones, dtype=np.bool)\n last_values = []\n for i in range(4):\n values = self.model.value(self.obs[i], S=self.states, M=self.dones[i])\n last_values.append(values[i])\n last_values = np.asarray(last_values, dtype=np.float32)\n\n # discount/bootstrap off value fn\n mb_returns = np.zeros_like(mb_rewards)\n mb_advs = np.zeros_like(mb_rewards)\n lastgaelam = 0\n for t in reversed(range(self.nsteps)):\n if t == self.nsteps - 1:\n nextnonterminal = 1.0 - self.dones\n nextvalues = last_values\n else:\n nextnonterminal = 1.0 - mb_dones[t+1]\n nextvalues = mb_values[t+1]\n delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]\n mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam\n mb_returns = mb_advs + mb_values\n return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),\n mb_states, epinfos)\n# obs, returns, masks, actions, values, neglogpacs, states = runner.run()\n\n\ndef sf01(arr):\n \"\"\"\n swap and then flatten axes 0 and 1\n \"\"\"\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])\n\n\ndef constfn(val):\n def f(_):\n return val\n return f\n\n\ndef learn(*, network, env, total_timesteps, eval_env=None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, save_interval=0, load_path=None, **network_kwargs):\n '''\n Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)\n\n Parameters:\n ----------\n\n network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)\n specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns\n tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward\n neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.\n See common/models.py/lstm for more details on using recurrent nets in policies\n\n env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.\n The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.\n\n\n nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where\n nenv is number of environment copies simulated in parallel)\n\n total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)\n\n ent_coef: float policy entropy coefficient in the optimization objective\n\n lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the\n training and 0 is the end of the training.\n\n vf_coef: float value function loss coefficient in the optimization objective\n\n max_grad_norm: float or None gradient norm clipping coefficient\n\n gamma: float discounting factor\n\n lam: float advantage estimation discounting factor (lambda in the paper)\n\n log_interval: int number of timesteps between logging events\n\n nminibatches: int number of training minibatches per update. For recurrent policies,\n should be smaller or equal than number of environments run in parallel.\n\n noptepochs: int number of training epochs per update\n\n cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training\n and 0 is the end of the training\n\n save_interval: int number of timesteps between saving events\n\n load_path: str path to load the model from\n\n **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network\n For instance, 'mlp' network architecture has arguments num_hidden and num_layers.\n\n\n\n '''\n\n set_global_seeds(seed)\n\n if isinstance(lr, float):\n lr = constfn(lr)\n else:\n assert callable(lr)\n if isinstance(cliprange, float):\n cliprange = constfn(cliprange)\n else:\n assert callable(cliprange)\n total_timesteps = int(total_timesteps)\n\n policy = build_policy(env, network, **network_kwargs)\n\n # Get the nb of env\n nenvs = env.num_envs\n\n # Get state_space and action_space\n ob_space = env.observation_space\n ac_space = env.action_space\n\n # Calculate the batch_size\n nbatch = nenvs * nsteps\n nbatch_train = nbatch // nminibatches\n\n # Instantiate the model object (that creates act_model and train_model)\n make_model = lambda : Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=1, nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm)\n model = make_model()\n if load_path is not None:\n model.load(load_path)\n # Instantiate the runner object\n runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)\n if eval_env is not None:\n eval_runner = Runner(env=eval_env, model=model, nsteps=nsteps, gamma=gamma, lam= lam)\n\n epinfobuf = deque(maxlen=100)\n if eval_env is not None:\n eval_epinfobuf = deque(maxlen=100)\n\n # Start total timer\n tfirststart = time.time()\n\n nupdates = total_timesteps//nbatch\n for update in range(1, nupdates+1):\n assert nbatch % nminibatches == 0\n # Start timer\n tstart = time.time()\n frac = 1.0 - (update - 1.0) / nupdates\n # Calculate the learning rate\n lrnow = lr(frac)\n # Calculate the cliprange\n cliprangenow = cliprange(frac)\n # Get minibatch\n obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() # pylint: disable=E0632\n if eval_env is not None:\n eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() # pylint: disable=E0632\n\n epinfobuf.extend(epinfos)\n if eval_env is not None:\n eval_epinfobuf.extend(eval_epinfos)\n\n # Here what we're going to do is for each minibatch calculate the loss and append it.\n mblossvals = []\n if states is None: # nonrecurrent version\n # Index of each element of batch_size\n # Create the indices array\n inds = np.arange(nbatch)\n for _ in range(noptepochs):\n # Randomize the indexes\n np.random.shuffle(inds)\n # 0 to batch_size with batch_train_size step\n for start in range(0, nbatch, nbatch_train):\n end = start + nbatch_train\n mbinds = inds[start:end]\n slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))\n mblossvals.append(model.train(lrnow, cliprangenow, *slices))\n else: # recurrent version\n assert nenvs % nminibatches == 0\n envsperbatch = nenvs // nminibatches\n envinds = np.arange(nenvs)\n flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)\n envsperbatch = nbatch_train // nsteps\n for _ in range(noptepochs):\n np.random.shuffle(envinds)\n for start in range(0, nenvs, envsperbatch):\n end = start + envsperbatch\n mbenvinds = envinds[start:end]\n mbflatinds = flatinds[mbenvinds].ravel()\n slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))\n mbstates = states[mbenvinds]\n mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))\n\n # Feedforward --> get losses --> update\n lossvals = np.mean(mblossvals, axis=0)\n # End timer\n tnow = time.time()\n # Calculate the fps (frame per second)\n fps = int(nbatch / (tnow - tstart))\n if update % log_interval == 0 or update == 1:\n # Calculates if value function is a good predicator of the returns (ev > 1)\n # or if it's just worse than predicting nothing (ev =< 0)\n ev = explained_variance(values, returns)\n logger.logkv(\"serial_timesteps\", update*nsteps)\n logger.logkv(\"nupdates\", update)\n logger.logkv(\"total_timesteps\", update*nbatch)\n logger.logkv(\"fps\", fps)\n logger.logkv(\"explained_variance\", float(ev))\n # logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))\n # logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))\n if eval_env is not None:\n logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )\n logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )\n logger.logkv('time_elapsed', tnow - tfirststart)\n for (lossval, lossname) in zip(lossvals, model.loss_names):\n logger.logkv(lossname, lossval)\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n logger.dumpkvs()\n if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and (MPI is None or MPI.COMM_WORLD.Get_rank() == 0):\n checkdir = osp.join(logger.get_dir(), 'checkpoints')\n os.makedirs(checkdir, exist_ok=True)\n savepath = osp.join(checkdir, '%.5i'%update)\n print('Saving to', savepath)\n model.save(savepath)\n return model\n# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)\ndef safemean(xs):\n return np.nan if len(xs) == 0 else np.mean(xs)\n\n\ndef play(model):\n # Print all possible environments in the Pommerman registry\n # print(pommerman.registry)\n # Create a set of agents (exactly four)\n agent1 = PPOAgent()\n agent1.initialize(model)\n agent2 = PPOAgent()\n agent2.initialize(model)\n agent3 = PPOAgent()\n agent3.initialize(model)\n agent4 = PPOAgent()\n agent4.initialize(model)\n agent_list = [\n agent1,\n agent2,\n # agents.SimpleAgent(),\n agent3,\n agent4,\n # agents.SimpleAgent(),\n ]\n # Make the \"Free-For-All\" environment using the agent list\n env = make('PommeTeamCompetition-v0', agent_list)\n\n # Run the episodes just like OpenAI Gym\n for i_episode in range(1):\n state = env.reset()\n done = False\n while not done:\n actions = env.act(state)\n env.render()\n state, reward, done, info = env.step(actions)\n print('Episode {} finished'.format(i_episode))\n print(' Result {}'.format(info))\n # Result {'result': , 'winners': [0, 2]}\n # Result {'result': }\n env.close()\n\n\n\"\"\"\nif __name__ == '__main__':\n logger.configure('pom_log', ['stdout', 'tensorboard'])\n agent_list = [\n PPOAgent(),\n PPOAgent(),\n PPOAgent(),\n PPOAgent(),\n ]\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config):\n model = learn(network='cnn',\n env=make_vec_env(agent_list, 4),\n nsteps=8192,\n nminibatches=1024,\n log_interval=1,\n ent_coef=0.01,\n lr=lambda _: 2e-4,\n cliprange=lambda _: 0.1,\n # total_timesteps=int(1e7),\n total_timesteps=int(0),\n save_interval=10,\n load_path='00025',\n )\n play(model)\n\"\"\"\n","sub_path":"examples/ppo-agent/ppo_agent.py","file_name":"ppo_agent.py","file_ext":"py","file_size_in_byte":40140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"78345051","text":"import numpy as np\n\ndef getXMinuteTradeReturns(data, seconds):\n delta = 1000 * seconds\n \n nRecs = data.getN()\n lastTs = data.getTimestamp(0)\n lastPrice = data.getPrice(0)\n TradeReturns = []\n \n for startI in range( 1, nRecs ):\n timestamp = data.getTimestamp( startI )\n \n # check this\n if timestamp > (lastTs + delta):\n lastTs = lastTs + delta\n newPrice = data.getPrice( startI )\n \n TradeReturns.append( np.log(newPrice/lastPrice) )\n lastPrice = newPrice\n lastTs = timestamp\n \n return TradeReturns\n\ndef getXMinuteMidQuoteReturns(data, minutes):\n delta = 1000 * 60 * minutes\n \n nRecs = data.getN()\n lastTs = data.getTimestamp(0)\n lastMidQuote = (data.getAskPrice( 0 ) + data.getBidPrice( 0 )) / 2 \n midQuoteReturns = []\n for startI in range( 1, nRecs ):\n timestamp = data.getTimestamp( startI )\n \n # check this\n if timestamp > (lastTs + delta):\n lastTs = lastTs + delta\n #askPrice = data.getAskPrice( startI )\n #bidPrice = data.getBidPrice( startI ) \n midQuote = (data.getAskPrice( startI ) + data.getBidPrice( startI )) / 2 \n \n midQuoteReturns.append( np.log(midQuote/lastMidQuote) )\n lastMidQuote = midQuote\n lastTs = timestamp\n \n return midQuoteReturns\n","sub_path":"partB/xMinuteReturn.py","file_name":"xMinuteReturn.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"586867948","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nimport keras.metrics\nimport pandas as pd\nimport numpy as np\nimport math\nimport statistics\nfrom keras.callbacks import CSVLogger\nfrom sklearn.model_selection import train_test_split\nimport os\nfrom keras.models import load_model\nfrom keras.utils import np_utils\n\n\nclass Prediction_Model:\n\n def __init__(self, project, release, all_projects=False, restart=False, cc_loc=True):\n\n self.nn_name = \"c2v_\"+self.target_column+\"_\"+project+\"_\"+str(release)\n if all_projects:\n self.nn_name = self.nn_name + \"_all\"\n self.project = project\n self.all_projects = all_projects\n self.cc_loc = cc_loc\n self.release = release\n\n if restart:\n self.restart()\n\n self.model = self.load_nn()\n\n def restart(self):\n try:\n os.remove(\"../files/nn_training/models/\"+self.nn_name+\".h5\")\n except:\n pass\n\n try:\n os.remove(\"../files/nn_training/pickle_barrel/X_train_\"+self.nn_name+\".pkl\")\n os.remove(\"../files/nn_training/pickle_barrel/X_test_\"+self.nn_name+\".pkl\")\n os.remove(\"../files/nn_training/pickle_barrel/y_train_\"+self.nn_name+\".pkl\")\n os.remove(\"../files/nn_training/pickle_barrel/y_test_\"+self.nn_name+\".pkl\")\n except:\n pass\n\n def build_dataset(self):\n\n print(\"building dataset...\")\n\n #Assemble data\n projects = [self.project]\n if self.all_projects:\n projects = ['accumulo', 'bookkeeper', 'camel', 'cassandra', 'cxf', 'derby', 'hive', 'openjpa']\n dfs = []\n for p in projects:\n p_df = pd.read_csv(\"../files/\"+p+\"/train_data5.csv\")\n p_df['project'] = p\n\n buggy = p_df.loc[p_df['buggy']==1]\n clean = p_df.loc[p_df['buggy']==0]\n #Make sure not to load too many vectors or youll run out of memory\n #if p_df.loc[p_df['buggy']==0].shape[0] > 100000:\n # clean = p_df.loc[p_df['buggy']==0].sample(n=100000)\n\n dfs.append(buggy)\n if self.target_column == \"buggy\":\n dfs.append(clean)\n\n\n df = pd.concat(dfs)\n\n #Convert vector data to lists\n def make_vector_list(v):\n try:\n v = v.replace('\\n','').split(' ')\n return [float(i) for i in v]\n except:\n return [0.0]*384\n\n df['vector'] = df['vector'].apply(lambda v : make_vector_list(v))\n\n\n #Make sure exp has no na sort_values\n df['experience'] = df['experience'].fillna(0.0)\n df['priority'] = df['priority'].apply(lambda p : int(p-1))\n\n # Split data into train and test sets\n\n # If using all projects, only look at one release from each\n # Test on one project, train on the others\n if self.all_projects:\n df = df.loc[df['release_id']==self.release]\n test_set = df.loc[df['project']==self.project]\n train_set = df.loc[df['project']!=self.project]\n\n # If using one project, train on one release, test on the next\n else:\n df = df.loc[df['project']==self.project]\n test_set = df.loc[df['release_id']==self.release+1]\n train_set = df.loc[df['release_id']==self.release]\n\n # Specify inputs and outputs\n X_train = pd.DataFrame(train_set['vector'].to_list())\n y_train = train_set[self.target_column]\n X_test = pd.DataFrame(test_set['vector'].to_list())\n y_test = test_set[self.target_column]\n\n # If wanted, include cc and loc in the input data\n if self.cc_loc:\n\n X_train[len(X_train.columns)] = train_set['cc'].values\n X_train[len(X_train.columns)] = train_set['loc'].values\n X_test[len(X_test.columns)] = test_set['cc'].values\n X_test[len(X_test.columns)] = test_set['loc'].values\n\n # If the target is categorical, remake the y data so that the vector is spread throughout the columns\n if self.target_column == \"priority\":\n y_train = pd.DataFrame(np_utils.to_categorical(y_train, 5))\n y_test = pd.DataFrame(np_utils.to_categorical(y_test, 5))\n\n\n print(X_train.shape[1])\n\n\n\n #X_train = X_train.fillna(0.0)\n #X_test = X_test.fillna(0.0)\n\n #Only need to save the intra project sets, because inter project divisions are always the same\n #if not self.all_projects:\n #Save train and test sets\n #X_train.to_pickle(\"../files/nn_training/pickle_barrel/X_train_\"+self.nn_name+\".pkl\")\n #X_test.to_pickle(\"../files/nn_training/pickle_barrel/X_test_\"+self.nn_name+\".pkl\")\n #y_train.to_pickle(\"../files/nn_training/pickle_barrel/y_train_\"+self.nn_name+\".pkl\")\n #y_test.to_pickle(\"../files/nn_training/pickle_barrel/y_test_\"+self.nn_name+\".pkl\")\n\n\n with open(\"../method_counts.txt\", 'a') as f:\n print(self.nn_name, X_train.shape[0], X_test.shape[0])\n f.write(self.nn_name +\",\"+str(X_train.shape[0])+\",\"+str(X_test.shape[0])+\"\\n\")\n\n return X_train,X_test,y_train,y_test\n\n def get_dataset(self):\n\n #If the dataset has never been made, make it\n if not os.path.exists(\"../files/nn_training/pickle_barrel/X_train_\"+self.nn_name+\".pkl\"):\n return self.build_dataset()\n\n #If it has been made, load the saved dataset\n X_train = pd.read_pickle(\"../files/nn_training/pickle_barrel/X_train_\"+self.nn_name+\".pkl\")\n X_test = pd.read_pickle(\"../files/nn_training/pickle_barrel/X_test_\"+self.nn_name+\".pkl\")\n y_train = pd.read_pickle(\"../files/nn_training/pickle_barrel/y_train_\"+self.nn_name+\".pkl\")\n y_test = pd.read_pickle(\"../files/nn_training/pickle_barrel/y_test_\"+self.nn_name+\".pkl\")\n\n return X_train,X_test,y_train,y_test\n\n def train(self, nepochs):\n\n\n X_train,X_test,y_train,y_test = self.get_dataset()\n\n #Train model\n print(\"training...\")\n csv_logger = CSVLogger(\"../files/nn_training/training/nn_training_\"+self.nn_name+\".csv\", append=True)\n self.model.fit(X_train,y_train,validation_data = (X_test,y_test), epochs=nepochs, verbose=1, callbacks=[csv_logger])\n self.model.save(\"../files/nn_training/models/\"+self.nn_name+\".h5\")\n\n\nclass Buggy_Model(Prediction_Model):\n def __init__(self, project, release, all_projects=False , restart=False):\n self.target_column = \"buggy\"\n Prediction_Model.__init__(self, project, release, all_projects , restart)\n\n def test(self, print_result=False):\n X_train,X_test,y_train,y_test = self.get_dataset()\n\n y_pred = self.model.predict(X_test)\n\n pred = list()\n test = list()\n\n false_pos = 0.0\n false_neg = 0.0\n true_pos = 0.0\n true_neg = 0.0\n\n for i in range(len(y_pred)):\n\n prediction = int((y_pred[i][0]).round())\n actual = y_test.iloc[i]\n\n pred.append(prediction)\n test.append(actual)\n\n if prediction == 1:\n if actual == 1:\n true_pos += 1\n else:\n false_pos += 1\n else:\n if actual == 0:\n true_neg += 1\n else:\n false_neg += 1\n\n from sklearn.metrics import accuracy_score\n a = accuracy_score(pred,test)\n\n if (true_pos+false_pos) == 0:\n precision = 0\n else:\n precision = true_pos/(true_pos+false_pos)\n\n if (false_neg+true_pos) == 0:\n recall = 0\n else:\n recall = true_pos/(false_neg+true_pos)\n\n if print_result:\n print(\"False positives:\",false_pos)\n print(\"False negatives:\",false_neg)\n print(\"True positives:\",true_pos)\n print(\"True negatives:\",true_neg)\n print(\"precision:\",precision)\n print(\"recall:\",recall)\n print('Accuracy is:', a*100)\n\n return [precision,recall]\n\n def load_nn(self):\n if os.path.exists(\"../files/nn_training/models/\"+self.nn_name+\".h5\"):\n return load_model(\"../files/nn_training/models/\"+self.nn_name+\".h5\")\n\n input_dimensions = 384\n if self.cc_loc:\n input_dimensions = 386\n\n output_dimensions = 1\n\n model = Sequential()\n model.add(Dense(output_dimensions, input_dim=input_dimensions,activation='sigmoid'))\n model.compile(loss=\"binary_crossentropy\", optimizer='adam')\n return model\n\n\n\nclass Priority_Model(Prediction_Model):\n def __init__(self, project, release, all_projects=False , restart=False):\n self.target_column = \"priority\"\n Prediction_Model.__init__(self,project, release,all_projects ,restart)\n\n def test(self, print_result=False):\n X_train,X_test,y_train,y_test = self.get_dataset()\n\n #Convert priority data for softmax output\n priority_conversion_dict = {\n 1: [1,0,0,0,0],\n 2: [0,1,0,0,0],\n 3: [0,0,1,0,0],\n 4: [0,0,0,1,0],\n 5: [0,0,0,0,1]\n }\n y_pred = self.model.predict(X_test)\n #Converting predictions to label\n pred = list()\n test = list()\n for i in range(len(y_pred)):\n #if y_test.iloc[i].to_list() != [0,0,1,0,0]:\n pred.append(priority_conversion_dict[np.argmax(y_pred[i])+1])\n test.append(y_test.iloc[i].to_list())\n\n\n from sklearn.metrics import accuracy_score\n a = accuracy_score(pred,test)\n\n if print_result:\n print('Accuracy is:', a*100)\n\n return a\n\n def load_nn(self):\n if os.path.exists(\"../files/nn_training/models/\"+self.nn_name+\".h5\"):\n return load_model(\"../files/nn_training/models/\"+self.nn_name+\".h5\")\n\n input_dimensions = 384\n if self.cc_loc:\n input_dimensions = 386\n\n output_dimensions = 5\n\n model = Sequential()\n model.add(Dense(128, input_dim=input_dimensions,activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(output_dimensions,activation='softmax'))\n model.compile(loss=\"categorical_crossentropy\", optimizer='adam')\n\n return model\n\n\nclass Fix_Size_Model(Prediction_Model):\n def __init__(self, project, release, all_projects=False , restart=False):\n self.target_column = \"fix_size\"\n Prediction_Model.__init__(self,project,release,all_projects ,restart)\n\n def test(self, print_result=False):\n\n X_train,X_test,y_train,y_test = self.get_dataset()\n\n model = load_model(\"../files/nn_training/models/\"+self.nn_name+\".h5\")\n y_pred = model.predict(X_test)\n #Converting predictions to label\n pred = list()\n test = list()\n MAE = []\n for i in range(len(y_pred)):\n MAE.append(abs((y_pred[i][0]) - (y_test.iloc[i])))\n MAE = statistics.median(MAE)\n\n if print_result:\n print(\"Mean Absolute Error:\",MAE)\n\n return MAE\n\n def load_nn(self):\n if os.path.exists(\"../files/nn_training/models/\"+self.nn_name+\".h5\"):\n return load_model(\"../files/nn_training/models/\"+self.nn_name+\".h5\")\n\n\n input_dimensions = 384\n if self.cc_loc:\n input_dimensions = 386\n\n output_dimensions = 1\n\n model = Sequential()\n model.add(Dense(128, input_dim=input_dimensions,activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(output_dimensions,activation='linear'))\n model.compile(loss=\"mean_squared_error\", optimizer='adam')\n\n return model\n\n\nclass Experience_Model(Prediction_Model):\n\n def __init__(self, project, release, all_projects=False , restart=False):\n self.target_column = \"experience\"\n Prediction_Model.__init__(self,project, release, all_projects ,restart)\n\n\n def test(self, print_result=False):\n\n X_train,X_test,y_train,y_test = self.get_dataset()\n\n\n model = load_model(\"../files/nn_training/models/\"+self.nn_name+\".h5\")\n y_pred = model.predict(X_test)\n #Converting predictions to label\n pred = list()\n test = list()\n MAE = []\n MSE = []\n for i in range(len(y_pred)):\n MAE.append(abs((y_pred[i][0]) - (y_test.iloc[i])))\n MSE.append(((y_pred[i][0]) - (y_test.iloc[i]))**2)\n MAE = statistics.median(MAE)\n MSE = statistics.mean(MSE)\n\n if print_result:\n print(\"Mean Absolute Error:\",MAE)\n print(\"Mean Squared Error:\",MSE)\n\n return MAE\n\n\n def load_nn(self):\n if os.path.exists(\"../files/nn_training/models/\"+self.nn_name+\".h5\"):\n return load_model(\"../files/nn_training/models/\"+self.nn_name+\".h5\")\n\n input_dimensions = 384\n if self.cc_loc:\n input_dimensions = 386\n\n output_dimensions = 1\n model = Sequential()\n model.add(Dense(128, input_dim=input_dimensions,activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(output_dimensions,activation='linear'))\n model.compile(loss=\"mean_squared_error\", optimizer='adam')\n return model\n\n\n\n\n#\n","sub_path":"code2vec_models/python_models/8_c2v_models.py","file_name":"8_c2v_models.py","file_ext":"py","file_size_in_byte":13258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"224650149","text":"#!/usr/bin/env python3\r\n#\r\n\r\n\r\nimport time\r\nimport greenlet\r\nimport gevent\r\n\r\n\r\nclass YieldDemo():\r\n @staticmethod\r\n def funcA():\r\n while True:\r\n print('------func A-----')\r\n time.sleep(0.5)\r\n yield\r\n\r\n @staticmethod\r\n def funcB():\r\n while True:\r\n print('-----func B------')\r\n time.sleep(0.3)\r\n next(YieldDemo.funcA())\r\n\r\n @staticmethod\r\n def run():\r\n YieldDemo.funcB()\r\n\r\n\r\nclass GreentletDemo():\r\n @staticmethod\r\n def funcA():\r\n while True:\r\n print('------func A-----')\r\n time.sleep(0.5)\r\n # g2.switch()\r\n greenlet.greenlet(GreentletDemo.funcB).switch()\r\n\r\n @staticmethod\r\n def funcB():\r\n while True:\r\n print('-----func B------')\r\n time.sleep(0.3)\r\n # g1.switch()\r\n greenlet.greenlet(GreentletDemo.funcA).switch()\r\n\r\n @staticmethod\r\n def run():\r\n # g1 = greenlet.greenlet(GreentletDemo.funcA)\r\n # g2 = greenlet.greenlet(GreentletDemo.funcB)\r\n # g1.switch()\r\n greenlet.greenlet(GreentletDemo.funcA).switch()\r\n\r\n\r\nclass GeventDemo():\r\n @staticmethod\r\n def funcA():\r\n while True:\r\n print('-----func A------')\r\n gevent.sleep(0.5)\r\n\r\n @staticmethod\r\n def funcB():\r\n while True:\r\n print('-----func B------')\r\n gevent.sleep(0.3)\r\n\r\n @staticmethod\r\n def run():\r\n g1 = gevent.spawn(GeventDemo.funcA)\r\n g2 = gevent.spawn(GeventDemo.funcB)\r\n g1.join()\r\n g2.join()\r\n\r\n\r\nif __name__ == '__main__':\r\n # YieldDemo.run()\r\n\r\n # GreentletDemo.run()\r\n\r\n GeventDemo.run()\r\n","sub_path":"basic_/concurrent_/from_yield_to_greenlet_to_gevent.py","file_name":"from_yield_to_greenlet_to_gevent.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"334316031","text":"import cv2\r\n\r\ndetectorFace = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\nreconhecer = cv2.face.FisherFaceRecognizer_create()\r\nreconhecer.read(\"classificadorFisher.yml\")\r\nlargura,altura = 220,220\r\nfont = cv2.FONT_HERSHEY_COMPLEX_SMALL\r\ncamera = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n conectado, imagem = camera.read()\r\n imagemCinza = cv2.cvtColor(imagem,cv2.COLOR_BGR2GRAY)\r\n facesDetectadas = detectorFace.detectMultiScale(imagemCinza,scaleFactor=1.5,minSize=(30,30))\r\n\r\n for(x,y,l,a) in facesDetectadas:\r\n imagemFace = cv2.resize(imagemCinza[y:y+a,x:x+l],(largura,altura))\r\n cv2.rectangle(imagem,(x,y),(x+l,y+a),(0,0,255),2)\r\n id,confianca = reconhecer.predict(imagemFace)\r\n pessoa = \"\"\r\n if id == 1 :pessoa = \"Nego\"\r\n elif id == 2: pessoa = \"Nega\"\r\n\r\n cv2.putText(imagem,pessoa,(x,y+(a+30)),font,2,(0,0,255))\r\n cv2.putText(imagem,str(confianca),(x,y+(a+50)),font,1,(0,0,255))\r\n cv2.imshow(\"Face\",imagem)\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\ncamera.release()\r\ncv2.destroyAllWindows()","sub_path":"Reconhecedor/ReconhecedorFisherFaces.py","file_name":"ReconhecedorFisherFaces.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"514413045","text":"import const_config\nimport dbms\n\nconstDBMS = const_config.get_dbms()\nconstSQLSelectByDay = \"SELECT SUBSTR(a.pubdate,11,9) pubdate, a.seq, a.title, a.postuser FROM tb_article a WHERE a.pubdate LIKE ? || '%' ORDER BY a.seq DESC\"\n\ndef selectArticleByDay(day):\n print(\"Start selectArticle\")\n conn = dbms.connect.sqlite(constDBMS)\n cur = conn.cursor()\n cur.execute(constSQLSelectByDay, (day, ))\n return cur.fetchall()\n\nif __name__ == '__main__':\n for item in selectArticle():\n print(item)\n","sub_path":"db_article_list.py","file_name":"db_article_list.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"506707744","text":"# -*- encoding: utf-8 -*-\n__author__ = \"Chmouel Boudjnah \"\nimport datetime\n\n\ndef convert_iso_datetime(dt):\n \"\"\"\n Convert iso8601 to datetime\n \"\"\"\n isoFormat = \"%Y-%m-%dT%H:%M:%S+0000\"\n if type(dt) is datetime.datetime:\n return dt\n\n if dt.endswith(\"Z\"):\n dt = dt.split('Z')[0]\n isoFormat = \"%Y-%m-%dT%H:%M:%S\"\n\n return datetime.datetime.strptime(dt, isoFormat)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"601124542","text":"import urllib\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.forms import widgets\nfrom django.template.loader import render_to_string\n\n\nclass ModelChoiceWidget(widgets.TextInput):\n \"\"\"\n Model choice widget that uses an iframe popup to enable users\n to select their foreign key value.\n \"\"\"\n #: The template used to render the widget.\n template_name = 'django_cradmin/widgets/modelchoice.django.html'\n\n #: The HTMP input element type. Defaults to ``hidden``, but\n #: you can change it to something like ``text`` (mostly useful for debugging).\n input_type = 'hidden'\n\n #: The default select-button text. You can override this in a subclass,\n #: or use the ``selectbutton_text``-argument for the constructor to\n #: change the button text.\n default_selectbutton_text = _('Select ...')\n\n def __init__(self, queryset, selectview_url, preview='',\n selectbutton_text=None):\n self.queryset = queryset\n self.preview = preview\n self.selectview_url = selectview_url\n self.selectbutton_text = selectbutton_text or self.default_selectbutton_text\n super(ModelChoiceWidget, self).__init__()\n\n # def get_object(self, pk):\n # return self.queryset.get(pk=pk)\n\n def _make_selectview_url(self, fieldid, current_value):\n return '{}?{}'.format(\n self.selectview_url, urllib.urlencode({\n 'foreignkey_select_current_value': current_value,\n 'foreignkey_select_fieldid': fieldid,\n }))\n\n def render(self, name, value, attrs=None):\n if value is None:\n value = ''\n fieldid = attrs['id']\n return render_to_string(self.template_name, {\n 'preview': self.preview,\n 'fieldname': name,\n 'fieldid': fieldid,\n 'fieldvalue': value,\n 'selectview_url': self._make_selectview_url(fieldid, value),\n 'selectbutton_text': self.selectbutton_text,\n 'input_type': self.input_type,\n })\n","sub_path":"django_cradmin/widgets/modelchoice.py","file_name":"modelchoice.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"282684848","text":"import pandas as pd\nimport numpy as np \nfrom surprise import Reader, Dataset, SVD \nimport gensim \nimport pickle \nfrom main import mongo \nimport warnings\nfrom pandas.core.common import SettingWithCopyWarning\n\nwarnings.simplefilter(action=\"ignore\", category=SettingWithCopyWarning)\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning) \n\n\n\n\n\n\nclass GETOS: \n #it takes 20s , but it will work fine later \n def __init__(self):\n self.books_data=pd.DataFrame.from_dict(mongo.db.books.find())\n self.users=pd.DataFrame.from_dict(mongo.db.users.find())\n self.books_rating=pd.DataFrame.from_dict(mongo.db.ratings.find())\n self.user_category=mongo.db.categories.find()\n self.docsim_index = pickle.load(open('SoftCosineSimilarity_final_WES2.pickle','rb'))\n reader = Reader(rating_scale=(1, 10))\n data = Dataset.load_from_df(self.books_rating[['User-ID', 'ISBN', 'Book-Rating']], reader)\n trainset = data.build_full_trainset()\n algo = SVD()\n algo.fit(trainset)\n self.algo=algo\n def refresh_value(self):\n #self.books_data=pd.DataFrame.from_dict(mongo.db.books.find())\n self.users=pd.DataFrame.from_dict(mongo.db.users.find())\n self.books_rating=pd.DataFrame.from_dict(mongo.db.ratings.find())\n\n \"\"\"reader = Reader(rating_scale=(1, 10))\n data = Dataset.load_from_df(self.books_rating[['User-ID', 'ISBN', 'Book-Rating']], reader)\n trainset = data.build_full_trainset()\n algo = SVD()\n algo.fit(trainset)\n self.algo=algo\"\"\"\n\n def refresh_value_logout(self):\n #to get new value in algo matrice (svd matrice)\n self.books_data=pd.DataFrame.from_dict(mongo.db.books.find())\n\n reader = Reader(rating_scale=(1, 10))\n data = Dataset.load_from_df(self.books_rating[['User-ID', 'ISBN', 'Book-Rating']], reader)\n trainset = data.build_full_trainset()\n algo = SVD()\n algo.fit(trainset)\n self.algo=algo\n\n\n def get_books(self):\n return self.books_data\n\n def get_users(self):\n return self.users\n\n def get_user_books_rated(self,user_id):\n \n usr_rat=self.books_rating[self.books_rating[\"User-ID\"]==user_id[\"User-ID\"]]\n \n #from the highest to the lowest\n usr_rat=usr_rat.sort_values(by=\"Book-Rating\",ascending=False)\n\n z=pd.merge(usr_rat[\"ISBN\"],self.books_data,on=\"ISBN\",how=\"inner\")\n \n return z\n\n def get_user_rating(self,user_id):\n return self.books_rating[self.books_rating[\"User-ID\"]==user_id]\n\n def demarage_froid(self,category,user):\n print(\" LA METHODE ACTUELLE : LES LIVRES LES PLUS POPULAIRE DE LA CATEGORY \\n\\t*********************************\\n\")\n books_data_df=self.books_data[self.books_data[\"Category\"].isin(category)]\n average_all=books_data_df[\"average_rating\"].mean()\n nb_vote_min=books_data_df[\"SommeRating\"].quantile(0.99)\n #on calcule les poids de chaque livre (qui depend de nombre des ratting et de la somme des rating)\n def weighted_rating(data,min=nb_vote_min,aver=average_all):\n ar=data[\"average_rating\"]\n vc=data[\"SommeRating\"]\n return (vc/(vc+min)*ar) + (min/(min+vc)*aver)\n books_data_df[\"weighted_score\"]=books_data_df.apply(weighted_rating,axis=1)\n books_data_df=books_data_df.sort_values(\"weighted_score\",ascending=False)\n \n \n #remove already rated books\n user_rat_bfr=(self.get_user_books_rated(user))[\"ISBN\"]\n user_rat_bfr=user_rat_bfr.tolist()\n \n books_data_df = books_data_df[~books_data_df['ISBN'].isin(user_rat_bfr)]\n \n return books_data_df[0:300]\n \n def wrdvec(self,user):\n print(\"** LA METHODE ACTUELLE : Word2Vec \\n\\t*********************************\\n\")\n bo=self.get_user_books_rated(user)\n user_rat_bfr=bo\n bo=bo[0:5]\n \n col=(\"ISBN\",\"rat\",\"sim\")\n array = np.empty((0,3)) \n for i,ro in bo.iterrows():\n test=(self.books_data[self.books_data[\"ISBN\"]==ro[\"ISBN\"]]).squeeze()\n sim=self.docsim_index[test[\"Text\"]]\n \n for i in range(1,11): # car le 0 cest notre livre donc \n row=self.books_data.iloc[sim[i][0]]\n array = np.append(array, np.array([[row[\"ISBN\"],row[\"average_rating\"],sim[i][1]]]), axis=0)\n \n df = pd.DataFrame(data=array, columns=col)\n df=df.sort_values(\"sim\",ascending=False)\n \n bb=pd.merge(df[\"ISBN\"],self.books_data,on=\"ISBN\",how=\"inner\")\n\n user_rat_bfr=(user_rat_bfr[\"ISBN\"]).tolist()\n bb = bb[~bb['ISBN'].isin(user_rat_bfr)]\n\n return bb\n \n def getsvd(self,category,user):\n\n print(\"** LA METHODE ACTUELLE : Pondération de SVD & Word2Vec \\n\\t*********************************\\n\")\n col=(\"ISBN\",\"note\")\n books_data_cb=self.books_data[self.books_data[\"Category\"].isin(category)]\n array = np.empty((0,2))\n for i,row in books_data_cb.iterrows():\n pred = (self.algo.predict(user[\"User-ID\"],row[\"ISBN\"]))\n array = np.append(array, np.array([[row[\"ISBN\"],pred[3]]]), axis=0) \n df = pd.DataFrame(data=array, columns=col)\n df = df.sort_values(\"note\",ascending=False)\n bb=pd.merge(df[\"ISBN\"],self.books_data,on=\"ISBN\",how=\"inner\")\n \n user_rat_bfr=(self.get_user_books_rated(user))[\"ISBN\"]\n user_rat_bfr=user_rat_bfr.tolist()\n #to remove already rated books from rec\n bb = bb[~bb['ISBN'].isin(user_rat_bfr)]\n \n return bb[0:300]\n\n def get_five_sim(self,book):\n\n print(\"\\n\\t****************************\\n calcule de 5 livre similaire a \\\"\",book[\"book_title\"],\"\\\" .\\n\\t****************************\\n\")\n\n col=(\"ISBN\",\"rat\",\"sim\")\n array = np.empty((0,3)) \n\n test=(self.books_data[self.books_data[\"ISBN\"]==book[\"ISBN\"]]).squeeze()\n sim=self.docsim_index[test[\"Text\"]]\n \n for i in range(1,11): # car le 0 cest notre livre donc \n row=self.books_data.iloc[sim[i][0]]\n array = np.append(array, np.array([[row[\"ISBN\"],row[\"average_rating\"],sim[i][1]]]), axis=0)\n \n df = pd.DataFrame(data=array, columns=col)\n df=df.sort_values(\"sim\",ascending=False)\n \n bb=pd.merge(df[\"ISBN\"],self.books_data,on=\"ISBN\",how=\"inner\")\n \n return bb[:5]\n\n def combine_svd_w2vc(self,category,user):\n res1=self.getsvd(category,user)\n res2=self.wrdvec(user)\n resultat=pd.merge(res1,res2,on=\"ISBN\")\n #resultat=resultat.drop_duplicates(subset=\"ISBN\")\n\n return resultat\n \n \n\n\n \n","sub_path":"recom/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"9197316","text":"import math\nimport pygame as game\nfrom pygame.math import Vector2\nimport time\nimport playerClass\nimport zombieClass\nimport opstacleClass\nimport Classes\nimport settings\n\nclass Bullet:\n def __init__(self, vel):\n self.pos = settings.player.bulletSpawn\n self.win = game.display.get_surface()\n\n #object properties\n size = (game.image.load(\"Images/PeterPanV3.png\").get_rect()[2], game.image.load(\"Images/PeterPanV3.png\").get_rect()[3])\n self.OriginalImage = game.transform.scale(game.image.load(\"Images/PeterPanV3.png\"), (int(size[0] * 0.6), int(size[1] * 0.6)))\n self.image = game.transform.rotate(self.OriginalImage, -settings.player.dirAngle).convert_alpha()\n self.imagerect = self.image.get_rect(center = self.pos)\n self.mask = game.mask.from_surface(self.image)\n self.velVector = [vel * math.cos(math.radians(settings.player.dirAngle)),\n vel * math.sin(math.radians(settings.player.dirAngle))]\n #hitbox\n self.HitBoxSize = Vector2(self.imagerect[2] * 0.9, self.imagerect[3] * 0.9)\n self.HitTargets = [opstacleClass.Opstacle]\n self.DamageTargets = [zombieClass.Zombie]\n\n def update(self):\n self.pos[0] += self.velVector[0]\n self.pos[1] += self.velVector[1]\n self.imagerect.center = self.pos\n\n if(self.pos[0] > 5000 + settings.player.pos[0] or self.pos[0] < -5000 + settings.player.pos[0]\n or self.pos[1] > 5000 + settings.player.pos[1] or self.pos[1] < -5000 + settings.player.pos[1]):\n settings.deleteMe.append(self)\n #settings.instances.remove(self)\n\n col = self.CheckCol()\n if(type(col) in self.HitTargets or type(col) in self.DamageTargets):\n settings.deleteMe.append(self)\n if(type(col) in self.DamageTargets):\n col.HP -= settings.bulletDamage\n if(col.HP <= 0):\n col.HP = 0\n\n def CheckCol(self):\n col = None\n for i in settings.instances:\n try:\n res = self.mask.overlap(i.mask, [int((i.pos[0] - i.imagerect[2] / 2) - (self.pos[0] - self.imagerect[2] / 2)),\n int((i.pos[1] - i.imagerect[3] / 2) - (self.pos[1] - self.imagerect[3] / 2))])\n if(res != None):\n col = i\n except:\n pass\n return col\n\n def draw(self):\n imagerect = self.imagerect\n imagerect[0] += settings.CameraPos[0]\n imagerect[1] += settings.CameraPos[1]\n self.win.blit(self.image, imagerect)\n def DrawMe(self):\n game.draw.rect(self.win, (255, 0, 0), (self.imagerect[0] + settings.CameraPos[0], self.imagerect[1] + settings.CameraPos[1], self.imagerect[2], self.imagerect[3]))\n game.draw.rect(self.win, (0, 255, 255), (self.pos[0] - self.HitBoxSize[0] / 2 + settings.CameraPos[0],\n self.pos[1] - self.HitBoxSize[1] / 2 + settings.CameraPos[1],\n self.HitBoxSize[0], self.HitBoxSize[1]))\n game.draw.circle(self.win, (0, 255, 0), [int(self.pos[0] + settings.CameraPos[0]), int(self.pos[1] + settings.CameraPos[1])], 10)\n\nclass Flash:\n def __init__(self, win, surviveTime):\n self.win = win\n self.pos = settings.player.bulletSpawn\n\n #time\n self.surviveTime = surviveTime\n self.t = time.time()\n\n #image\n size = (game.image.load(\"Images/muzzleFlash.png\").get_rect()[2], game.image.load(\"Images/muzzleFlash.png\").get_rect()[3])\n self.OriginalImage = game.transform.scale(game.image.load(\"Images/muzzleFlash.png\"), (int(size[0] * 0.4), int(size[1] * 0.4)))\n self.image = game.transform.rotate(self.OriginalImage, -settings.player.dirAngle)\n self.imagerect = self.image.get_rect(center = self.pos)\n\n def update(self):\n if(time.time() - self.t > self.surviveTime):\n settings.deleteMe.append(self)\n #settings.instances.remove(self)\n\n def DrawMe(self):\n pass\n\n def draw(self):\n self.win.blit(self.image, (self.imagerect[0] + settings.CameraPos[0], self.imagerect[1] + settings.CameraPos[1], self.imagerect[2], self.imagerect[3]))\n","sub_path":"ResizeableZombie/bulletClass.py","file_name":"bulletClass.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"54102970","text":"from datetime import datetime\nfrom tzlocal import get_localzone\n\nfrom vnpy.trader.object import HistoryRequest\nfrom vnpy.trader.database import database_manager\nfrom vnpy.trader.constant import Interval, Exchange\n\nfrom vnpy_pro.data.source.tdxdata import tdxdata_client\nfrom vnpy_pro.data.tdx.tdx_common import get_future_contracts\n\n# 下载合约\nfutures = [\"rb2105.SHFE\"]\n# futures = [\"AG\", \"AP\", \"AU\", \"BU\", \"CF\", \"CU\", \"JD\",\n# \"MA\", \"NI\", \"OI\", \"P\", \"RB\", \"RU\", \"SR\", \"TA\"]\n\nif tdxdata_client.init():\n print(\"数据服务器登录成功\")\nelse:\n print(\"数据服务器登录失败\")\n import sys\n sys.exit(0)\n\nfor future in futures:\n _future = future.split(\".\")\n symbol = _future[0]\n exchange = Exchange.__dict__[_future[1]]\n interval = Interval.MINUTE\n\n # 查询数据库中的最新数据\n # start = datetime(2010, 6, 1)\n # 增量更新数据\n bar = database_manager.get_newest_bar_data(symbol, exchange, interval)\n if bar:\n start = bar.datetime\n else:\n start = datetime(2020, 11, 1)\n\n # 下载数据\n req = HistoryRequest(\n symbol,\n exchange,\n start,\n datetime.now(),\n interval=interval\n )\n data = tdxdata_client.query_history(req)\n\n # 写入数据库\n if data:\n database_manager.save_bar_data(data)\n print(f\"{symbol}更新完成:{data[0].datetime} -- {data[-1].datetime},总计 {len(data)} 条...\")\n\nprint(\"数据更新完毕\")\n# 更新contracts字典\n# tdxdata_client.tdx_api.update_mi_contracts()\n","sub_path":"worktable2/jobs/download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"489691465","text":"# if you want to use ec2 instance, please use following commands in cli to load chrome driver;\n# bash:\n\n# curl https://intoli.com/install-google-chrome.sh | bash\n# google-chrome --version\n# wget https://chromedriver.storage.googleapis.com/88.0.4324.96/chromedriver_linux64.zip\n# sudo yum install unzip\n# unzip chromedriver_linux64\n\nfrom selenium import webdriver\ndriver = webdriver.Chrome(executable_path=\"/Users/m.altun/clarus/devops/04feb_selenium/second_test/chromedriver\")\nbase_url = \"https://clarusway.com/\"\nexpected_title = \"Online Career IT Training School - Clarusway\"\nactual_title = \"\"\ndriver.get(base_url)\nactual_title = driver.title\n\npage_source = driver.page_source\nprint(\"page source : \\n\", page_source)\n\n# if actual_title == expected_title:\n# print(\"Test Passed\")\n# else:\n# print(\"Test Failed\")\n# print(actual_title)\n\ndriver.quit()\n","sub_path":"selenium/second_test/fourth_test.py","file_name":"fourth_test.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"159121787","text":"import numpy as np\nfrom cv2 import cv2\n\ndef image_detect():\n\n img = cv2.imread('media/images/sachin.jpg') \n detect_faces(img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef video_detect():\n\n cap = cv2.VideoCapture('media/videos/1.MP4')\n while(cap.isOpened()):\n ret,frame = cap.read()\n detect_faces(frame)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\ndef cam_detect():\n cap = cv2.VideoCapture(0)\n while(True):\n ret, frame = cap.read()\n detect_faces(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\ndef detect_faces(frame):\n face_cascade = cv2.CascadeClassifier('src/hf.xml')\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for (x,y,w,h) in faces:\n gray = cv2.rectangle(gray,(x,y),(x+w,y+h),(255,0,0),2)\n cv2.imshow('frame',gray)","sub_path":"src/face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"223861771","text":"import numpy as np\r\nimport random\r\n\r\n#加载数据\r\ndef loadDataSet():\r\n trainMat = []; testMat = []\r\n trainlabelMat = []; testlabelMat = []\r\n fr = open('题目4个人收入预测.csv')\r\n t=0 # t判定当前读取的第几组数据\r\n for line in fr.readlines():\r\n currline = line.strip().split(',')\r\n lineArr = []\r\n lineArr.append(1.0) #每个样本添加一个值为1的特征,和对应的系数相乘后作为截距\r\n if t < 3000: #前3000组作为训练集\r\n for i in range(1,58): #第一列特征不载入,因为代表的含义是ID,和最终的判定结果应该无关\r\n lineArr.append(float(currline[i]))\r\n trainMat.append(lineArr) \r\n trainlabelMat.append(float(currline[58]))\r\n else: #后1000组作为测试集\r\n for i in range(1,58):\r\n lineArr.append(float(currline[i]))\r\n testMat.append(lineArr)\r\n testlabelMat.append(float(currline[58]))\r\n t = t+1\r\n return trainMat,trainlabelMat,testMat,testlabelMat\r\n\r\n#归一化处理\r\ndef dataprocess(trainMat):\r\n trainMat = np.mat(trainMat)\r\n m,n = trainMat.shape\r\n for i in range(n):\r\n meanval = np.mean(trainMat[:,i]) #求每种特征的均值\r\n stdval = np.std(trainMat[:,i]) #求每种特征得标准差\r\n if stdval != 0.0: #判定标准差是否为0,为0的话在进行归一化处理时0最为除数是无意义的\r\n trainMat[:,i] = (trainMat[:,i] - meanval)/stdval #标准差不为0则进行归一化处理\r\n else:\r\n trainMat[:,i] = 1 #标准差为0则将该特征值都设为1\r\n return trainMat\r\n\r\n#sigmoid函数\r\ndef sigmoid1(inx):\r\n return 1.0/(1+np.exp(-inx))\r\n\r\n#梯度下降优化算法\r\ndef graddec(trainMat,trainlabelMat):\r\n dataMat = np.mat(trainMat)\r\n labelMat = np.mat(trainlabelMat).transpose()\r\n m,n = np.shape(dataMat)\r\n alpha = 0.1 #设定学习率\r\n maxcycles = 200 #设定迭代次数\r\n weights = np.zeros((n,1)) #初始化特征系数全0\r\n loss = [] #损失值\r\n for k in range(maxcycles):\r\n #alpha = 8/(k+1)+0.1 #动态调整学习率\r\n h = sigmoid1(dataMat * weights)\r\n error = h - labelMat\r\n weights = weights - alpha /m * np.transpose(dataMat) * error #未正则化\r\n # weights = (1 - alpha * u/m) * weights - alpha /m * np.transpose(dataMat) * error #正则化后\r\n if (k+1) % 10 == 0: \r\n loss.append(- float((labelMat.T * np.log(h + 0.0001) + (1 - labelMat.T) * np.log(1.0001 - h))) /m) #损失函数值\r\n return weights,loss\r\n\r\n#\r\ndef stocgraddec(trainMat,trainlabelMat,numIter):\r\n dataMat = np.mat(trainMat)\r\n m,n = np.shape(dataMat)\r\n weights = np.zeros((n, 1))\r\n for i in range(numIter):\r\n dataIndex = list(range(m))\r\n loss = 0\r\n for j in range(m):\r\n alpha = 0.01\r\n randIndex = int(random.uniform(0, len(dataIndex)))\r\n h = sigmoid1(sum(dataMat[randIndex] * weights))\r\n error = sum(h - trainlabelMat[randIndex])\r\n weights = weights - alpha /m * float(error) * np.transpose(dataMat[randIndex])\r\n del(dataIndex[randIndex])\r\n if (i + 1) % 100 ==0:\r\n loss += -float((trainlabelMat[randIndex] * np.log(h + 0.0001) + (1 - trainlabelMat[randIndex]) * np.log(1.0001 - h)))\r\n \"\"\"\r\n if (i + 1) % 100 ==0:\r\n print(loss/m)\r\n \"\"\"\r\n return weights \r\n\r\n\r\n#鉴别函数\r\ndef classify(x,weights):\r\n prob = sigmoid1(sum(x * weights))\r\n if prob > 0.5 : return 1.0 #测试样本的sigmoid函数值大于0.5则将该样本标为类别1\r\n else : return 0.0\r\n\r\n# 测试\r\ndef incometest(testMat,testlabelMat,weights):\r\n errorcount = 0; numcount = 1000.0\r\n for i in range(1000):\r\n if classify(np.array(testMat[i]),weights) != testlabelMat[i]:\r\n errorcount += 1 #和预设标签不同则错误数加一\r\n truerate = (numcount - errorcount)/numcount #求正确率\r\n print(truerate)\r\n return truerate\r\n\r\n","sub_path":"logistic回归/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"439242729","text":"import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\nimport keras\nfrom keras.callbacks import Callback\nfrom model import *\nfrom preprocessor import get_shi, get_vocab, load_nlpcc\nimport numpy as np\n\nM_WORD = 60 #max number of words\n\n# 回调器,方便在训练过程中输出\n\nclass Evaluate(Callback):\n def __init__(self, generator, latent_dim, id2char, n):\n self.log = []\n self.gen = generator\n self.latent_dim = latent_dim\n self.id2char = id2char\n self.n = n\n def on_epoch_end(self, epoch, logs=None):\n self.log.append(gen(self.gen, self.latent_dim, self.id2char, self.n))\n print( (' %s'%(self.log[-1])) )\n\n\nif __name__==\"__main__\":\n# n = 5 # 只抽取五言诗\n n = int(M_WORD / 2)\n latent_dim = 384 # 隐变量维度\n hidden_dim = 384 # 隐层节点数\n \n# shi = get_shi()\n shi, test = load_nlpcc(M_WORD)\n \n\n id2char, char2id = get_vocab(shi+test)\n embedding_len = len(id2char)\n \n # 诗歌id化\n shi2id = [[char2id[j] for j in i] for i in shi]\n shi2id = np.array(shi2id)\n \n vae,generator = VAEModel(embedding_len, hidden_dim, latent_dim, n)\n \n #vae.load_weights('shi.model')\n \n evaluator = Evaluate(generator, latent_dim, id2char, n)\n \n #tensorboard\n tbCallBack = keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)\n\n vae.fit(shi2id,\n shuffle=True,\n epochs=500,\n batch_size=512,\n callbacks=[evaluator, tbCallBack])\n\n vae.save_weights('shi.model')\n\n for i in range(20):\n print(gen(generator, latent_dim, id2char,n))","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"481350516","text":"#crear ciclo que permita ingresar 10 números. Mostrar cuantos son números pare y cuantos\n#son impares\npar=0\nimpar=0\nfor i in range(10):\n nume=int(input(\"Digite un numero: \"))\n if(nume%2==0):\n par=par+1\n else:\n impar=impar+1\nprint(\"La cantidad de numeros pares es: \" + str(par))\nprint(\"La cantidad de numeros impares es: \"+ str(impar))\n\n","sub_path":"ciclofor1.py","file_name":"ciclofor1.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"211330086","text":"from flask import Flask, render_template, request\n \nimport pickle\n\napp = Flask(__name__)\n\n\n# open a file, where you ant to store the data\nfile = open('model.pkl', 'rb')\nclf=pickle.load(file)\nfile.close()\n\n@app.route('/',methods=[\"GET\",\"POST\"])\ndef hello_world():\n if request.method==\"POST\":\n #print(request.form)\n myDict=request.form\n bodyTemp=int(myDict['bodyTemp'])\n bodyPain=int(myDict['bodyPain'])\n runnyNose=int(myDict['runnyNose'])\n diffBreath=int(myDict['diffBreath'])\n o2Saturation=int(myDict['o2Saturation'])\n travelHistory=int(myDict['travelHistory'])\n age=int(myDict['age'])\n LossofTasteSmell=int(myDict['LossofTasteSmell'])\n vomiting=int(myDict['vomiting'])\n Diarrhea=int(myDict['Diarrhea'])\n # Code for inference\n inputFeatures= ([[bodyTemp,bodyPain,runnyNose,diffBreath,o2Saturation,travelHistory,age,LossofTasteSmell,vomiting,Diarrhea]])\n infProb=clf.predict_proba(inputFeatures)[0][1]\n #print(infProb)\n return render_template('show.html',inf=round(infProb*100))\n\n\n #return 'Hello, world Hari Om!'+ str(infProb)\n return render_template('index.html')\n\nif __name__=='__main__':\n app.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"116493503","text":"\n\n#calss header\nclass _HOBNOB():\n\tdef __init__(self,): \n\t\tself.name = \"HOBNOB\"\n\t\tself.definitions = [u'to spend time being friendly with someone who is important or famous: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_hobnob.py","file_name":"_hobnob.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"332498635","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: tianxiaomo\n@license: (C) Apache.\n@contact: huguanghao520@gmail.com\n@software: PyCharm\n@file: cfg.py\n@time: 2019/4/9 17:32 \n@desc:\n'''\ncfg = {\n 'maxlen':None,\n 'vocab':10000,\n 'word_dim':100,\n 'num_pg':22,\n 'num_bound':10,\n 'num_pinyin':56,\n 'num_radical':100,\n 'num_tags':28*3+2,\n 'unit1':120,\n 'unit2':120,\n\n 'train_batch_size':128,\n}","sub_path":"kg_step2/cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"303540866","text":"import numpy as np\nimport ubelt as ub\nimport kwarray\nfrom kwarray import distributions as dmod\n\n\ndef test_rng_case1():\n \"\"\"\n Reproduce a bug from kwarray.__version__ < 0.6.13\n \"\"\"\n rng = 0\n rng = kwarray.ensure_rng(rng)\n a = dmod.Distribution.random(rng=rng)\n print(a)\n values1 = a.sample(10)\n\n rng = 0\n rng = kwarray.ensure_rng(rng)\n a = dmod.Distribution.random(rng=rng)\n print(a)\n values2 = a.sample(10)\n assert np.allclose(values1, values2)\n\n\ndef test_normal_distribution_with_random_seed():\n rng = kwarray.ensure_rng(0)\n distri = dmod.Normal(rng=rng)\n values1 = distri.sample(10)\n print('values1 = {}'.format(ub.urepr(values1, nl=1)))\n\n rng = kwarray.ensure_rng(0)\n distri = dmod.Normal(rng=rng)\n values2 = distri.sample(10)\n print('values2 = {}'.format(ub.urepr(values2, nl=1)))\n assert np.allclose(values1, values2)\n","sub_path":"tests/test_distributions.py","file_name":"test_distributions.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"170046409","text":"def expected_dominant_offspring(pop, p_d):\n \"\"\" Compute the number of expected number of offspring with dominant allele. \"\"\"\n return sum([pop[k] * p_d[k] * 2 for k in pop.keys()])\n\n# Population\npop = {\"AA-AA\": 17489, \"AA-Aa\": 17277, \"AA-aa\": 19870, \"Aa-Aa\": 18298, \"Aa-aa\": 16788, \"aa-aa\": 18533}\n\n# Probability of yielding a child with a dominant allele by genotype of parents\np_d = {\"AA-AA\": 1, \"AA-Aa\": 1, \"AA-aa\": 1, \"Aa-Aa\": 3/4, \"Aa-aa\": 1/2, \"aa-aa\": 0}\n\nsolution = expected_dominant_offspring(pop, p_d)\nprint(solution)\n","sub_path":"bioinformatics_stronghold/iev/iev.py","file_name":"iev.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"614864897","text":"from celery import shared_task\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom requests.exceptions import ConnectionError\n\nfrom .product import product_task\nfrom ..models import Page, Product\nfrom ..parsers import PageParser\n\n\n@shared_task(bind=True, max_retries=3)\ndef page_task(self, page_id):\n \"\"\"Page task\"\"\"\n page = None\n\n try:\n page = Page.objects.get(pk=page_id)\n\n # set status\n page.status = Page.STATUS_CHOICE_PROGRESS\n page.save()\n\n page_parser = PageParser(page_url=page.page_url)\n\n # set product.is_active False\n Product.objects.filter(category=page.category, page=page).update(is_active=False)\n\n # save new links and update old ones\n links = page_parser.get_links()\n for link in links:\n Product.objects.update_or_create(category=page.category, link=link,\n defaults={\n 'page': page,\n 'status': Product.STATUS_CHOICE_NEW,\n 'is_active': True,\n })\n\n # get all products from page for parsing\n products = list(Product.objects.filter(category=page.category, page=page))\n\n recycle = True\n slice_count = 10\n product_ids = []\n\n while recycle:\n # collect product ids\n for i in range(slice_count):\n try:\n product_ids.append(products.pop().pk)\n except IndexError:\n recycle = False\n break\n\n if not product_ids:\n break\n\n # run task\n product_task.delay(product_ids=product_ids)\n product_ids = []\n\n # set status\n page.status = Page.STATUS_CHOICE_DONE\n page.save()\n except ConnectionError:\n if page and hasattr(page, 'save'):\n # set status\n page.status = Page.STATUS_CHOICE_ERROR\n page.save()\n\n # retry task\n self.retry(countdown=10)\n except ObjectDoesNotExist:\n pass\n","sub_path":"apps/fcmoto/tasks/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"364970996","text":"\r\nfrom __future__ import division\r\nimport string\r\nimport re\r\nfrom collections import Counter\r\nimport numpy\r\nimport matplotlib.pyplot as plt\r\nfrom sqlalchemy.engine import create_engine\r\nimport os\r\n\r\n### Constants: ###\r\nlambda_value = 0.1\r\nprior_class_probability_good = 0.6\r\nprior_class_probability_bad = 0.4\r\nalpha = 100\r\nbeta = 5\r\niters = 1000\r\nmax_epochs = 100\r\n\r\n'''\r\nString -> ListofWords\r\nGIVEN: A filename\r\nRETURNS: The list of words from that file, removing the punctions\r\n and in lower case\r\n'''\r\ndef analyzer(filename):\r\n tokens=[]\r\n i=0\r\n review=open(filename,'r')\r\n with open(filename, encoding=\"utf8\", errors='ignore') as f:\r\n for line in f:\r\n if i < 10000:\r\n words = [x.strip(string.punctuation) for x in line.lower().split()]\r\n tokens=tokens+words\r\n i=i+1\r\n else:\r\n break\r\n return tokens\r\n\r\n'''\r\nListOfWords ListofWords -> Integer\r\nGIVEN: Two list of words\r\nRETURNS: The Counter and total vocabulary count from the two list of words\r\n'''\r\ndef get_vocab_size(bad_tokens,good_tokens,test_tokens):\r\n overall_vocab=bad_tokens+good_tokens+test_tokens\r\n count = {}\r\n count = Counter(overall_vocab)\r\n return count,len(count)\r\n\r\n'''\r\nGIVEN: No arguments\r\nRETURNS: The dictionary of test tweets\r\n'''\r\ndef get_test_data():\r\n with open(\"tweets.txt\") as f:\r\n content = f.readlines()\r\n content = [x.strip() for x in content]\r\n import string\r\n dictionary_of_test_data={}\r\n i=0\r\n for line in content:\r\n\r\n words=[x.strip(string.punctuation) for x in line.lower().split()]\r\n dictionary_of_test_data[\"Tweet\"+str(i)]=words\r\n i=i+1\r\n return dictionary_of_test_data\r\n\r\n'''\r\nDictionary -> Dictionary\r\nGIVEN: The test dictionary\r\nRETURNS: Performs Naive_Bayes to the test data and returns the classification\r\n of test data as belonging to Abusive or Non Abusive Tweets in\r\n a dictionary of probabilities corresponding to that tweet\r\n good - corresponds to non abusive tweets\r\n bad - corresponds to abusive tweets\r\nFORMULA: P(C|X) = P(X|C) * P(C) / P(X)\r\n'''\r\ndef naive_bayes(dictionary_of_test_data):\r\n classification={}\r\n for test in dictionary_of_test_data.keys():\r\n tweet=dictionary_of_test_data[test]\r\n good_prob = get_probability(tweet,0)\r\n bad_prob= get_probability(tweet,1)\r\n classification[test]=[good_prob,bad_prob]\r\n return classification\r\n\r\n'''\r\nDictionary -> File\r\nGIVEN: The dictionary containing probability values\r\n of tweets after Naive Bayes and the threshold for classification\r\nRETURNS: The results of tweets (as good or bad) onto\r\n Naive_Bayes_Model_Results.txt and the same as\r\n a dictionary\r\n'''\r\ndef get_NB_results(NB_Classification, threshold):\r\n f = open(\"Naive_Bayes_Model_Results.txt\",'w')\r\n answer_dict={}\r\n for key in NB_classification.keys():\r\n value = NB_classification[key]\r\n ratio = value[0]/value[1]\r\n if ratio < threshold:\r\n answer_dict[key]=\"Good\"\r\n f.write(key+\" \"+\"Good\"+\"\\n\")\r\n else:\r\n answer_dict[key]=\"Bad\"\r\n f.write(key+\" \"+\"Bad\"+\"\\n\")\r\n f.close()\r\n return answer_dict\r\n\r\n'''\r\nDictionary ListofWords -> Float Float Float Float\r\nGIVEN: The model output and expected output\r\nRETURNS: The number of true positives, true negatives,\r\n false positives, false negatives\r\n'''\r\ndef get_PR(NB_results,expected_output):\r\n tp=1\r\n tn=1\r\n fp=1\r\n fn=1\r\n for key in NB_results.keys():\r\n model_outcome = NB_results[key]\r\n expected_output_index = int(key.split('et')[1])\r\n expected_outcome = expected_output[expected_output_index]\r\n if model_outcome == 'Bad' and expected_outcome == 'Bad':\r\n tp = tp + 1\r\n elif model_outcome == 'Good' and expected_outcome == 'Bad':\r\n fn = fn + 1\r\n elif model_outcome == 'Bad' and expected_outcome == 'Good':\r\n fp = fp + 1\r\n else:\r\n tn = tn + 1\r\n return tp,tn,fp,fn\r\n\r\n'''\r\nGIVEN: No arguments\r\nRESULTS: reads the expected output from Test_Answers.txt\r\n with line number corresponding to tweet number\r\n and the string corresponding to its classified\r\n class\r\n'''\r\ndef read_actual_answers():\r\n f=open(\"Test_Answers.txt\",'r')\r\n lines = f.readlines()\r\n answers=[]\r\n for i in range(len(lines)):\r\n answers=answers+[lines[i].split('\\n')[0]]\r\n return answers\r\n\r\n'''\r\nListofWords Integer -> Float\r\nGIVEN: A tweet as a list of words and a number to denote the probability\r\n class (abusive or not) that we are trying to calculate\r\n num = 0 means non abusive tweets\r\n num = 1 means abusive tweets\r\nRETURNS: The probability of tweet as belonging to that particular class\r\n'''\r\ndef get_probability(tweet,num):\r\n prob=1\r\n if num==0:\r\n for word in tweet:\r\n if word in prob_good_words.keys():\r\n term1=prob_good_words[word]\r\n term2=prior_class_probability_good\r\n term3=vocab_counter[word]/vocab_size\r\n prob = prob * (term1 * term2) / term3\r\n else:\r\n term1=1/len(prob_good_words)\r\n term2=prior_class_probability_good\r\n term3=vocab_counter[word]/vocab_size\r\n prob = prob * (term1 * term2) / term3\r\n return prob\r\n else:\r\n for word in tweet:\r\n if word in prob_bad_words.keys():\r\n term1=prob_bad_words[word]\r\n term2=prior_class_probability_bad\r\n term3=vocab_counter[word]/vocab_size\r\n prob = prob * (term1 * term2) / term3\r\n else:\r\n term1=1/len(prob_bad_words)\r\n term2=prior_class_probability_bad\r\n term3=vocab_counter[word]/vocab_size\r\n prob = prob * (term1 * term2) / term3\r\n return prob\r\n\r\n'''\r\nDictionary Integer -> Dictionary\r\nGIVEN: a dictionary with (words & their counts) in a particular class\r\n and total number of words in that particular class\r\nRETURNS: A probabilistic dictionary of those words\r\n'''\r\ndef get_prob_dict(tokens_count, length):\r\n count={}\r\n for key in tokens_count.keys():\r\n count[key]=(tokens_count[key]+lambda_value)/(length + lambda_value * vocab_size)\r\n return count\r\n\r\n\r\ndef save_summaryobject (table, row):\r\n\r\n keys = row.keys();\r\n sql = \"INSERT INTO \" + table + \" (\"\r\n sql = sql + \", \".join(keys)\r\n sql = sql + \") VALUES (\"\r\n sql = sql + \", \".join([ (\"'\" + str(row[key]) + \"'\") for key in keys])\r\n sql = sql + \")\"\r\n\r\n id = connection.execute(sql);\r\n\r\n return id\r\n\r\n\r\n### main() portion of the program ###\r\nprint (\"starting main\")\r\n##\r\n##Analyse the bad_corpus.txt, getting all the word counts\r\n##and write them on to bad_counts.txt\r\n##\r\nbad_tokens=analyzer('bad_corpus.txt')\r\ntotal_bad_words = len(bad_tokens)\r\nbad_tokens_count = Counter(bad_tokens)\r\nwith open('bad_counts.txt','w')as g:\r\n for key in bad_tokens_count.keys():\r\n g.write(key+\" \"+str(bad_tokens_count[key]))\r\n g.write(\"\\n\")\r\n\r\n##\r\n##Analyse the good_corpus.txt, getting all the word counts\r\n##and write them on to good_counts.txt\r\n##\r\ngood_tokens=analyzer('good_corpus.txt')\r\ntotal_good_words = len(good_tokens)\r\ngood_tokens_count = Counter(good_tokens)\r\nwith open('good_counts.txt','w', encoding=\"utf8\", errors='ignore')as g:\r\n for key in good_tokens_count.keys():\r\n g.write(key+\" \"+str(good_tokens_count[key]))\r\n g.write(\"\\n\")\r\n\r\n## get test data in the form of a dictionary of tweets ##\r\ndictionary_of_test_data=get_test_data()\r\n\r\n##\r\n##Analyse the test_data.txt, getting all the word counts\r\n##and write them on to test_counts.txt\r\n##\r\ntest_tokens=analyzer(\"tweets.txt\")\r\ntotal_test_words = len(test_tokens)\r\ntest_tokens_count = Counter(test_tokens)\r\nwith open('test_counts.txt','w')as g:\r\n for key in test_tokens_count.keys():\r\n g.write(key+\" \"+str(test_tokens_count[key]))\r\n g.write(\"\\n\")\r\n\r\n## get the total vocabulary size here ##\r\nvocab_counter,vocab_size = get_vocab_size(bad_tokens,good_tokens,test_tokens)\r\n\r\n## Get the probability counts ##\r\nprob_bad_words=get_prob_dict(bad_tokens_count,total_bad_words)\r\nprob_good_words=get_prob_dict(good_tokens_count,total_good_words)\r\n\r\n## Write bad_prob_counts onto bad_counts_prob.txt ##\r\nwith open('bad_counts_prob.txt','w')as g:\r\n for key in prob_bad_words.keys():\r\n g.write(key+\" \"+str(prob_bad_words[key]))\r\n g.write(\"\\n\")\r\n\r\n## Write good_prob_counts onto good_counts_prob.txt ##\r\nwith open('good_counts_prob.txt','w',encoding=\"utf8\", errors='ignore')as g:\r\n for key in prob_good_words.keys():\r\n g.write(key+\" \"+str(prob_good_words[key]))\r\n g.write(\"\\n\")\r\n\r\n## get test data in the form of a dictionary of tweets ##\r\ndictionary_of_test_data=get_test_data()\r\n\r\n## apply naive bayes here ##\r\nprint (\"Naive Bayes Classifier\")\r\nNB_classification = naive_bayes(dictionary_of_test_data)\r\n\r\npr_dict={}\r\nthreshold=0\r\nwhile threshold mv:\n mv = m[l][c]\nprint(\"-=\"*30)\nfor l in range(0, 3):\n for c in range(0, 3):\n print(f\"[{m[l][c]:^5}]\", end=' ')\n print()\nprint(\"-=\"*30)\nprint(f\"A soma dos elementos pares é {s};\\nA somatória de todos os elementos da 3ª coluna é {st};\")\nprint(f\"E o maior valor da 2ª linha é {mv}.\")\n","sub_path":"ExCursoEmVídeo(Python)/ex87.py","file_name":"ex87.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"525470444","text":"import json\nimport threading\nimport time\nimport re\nimport requests\nimport os\nimport logging\nfrom collections import OrderedDict\nfrom ws4py.client.threadedclient import WebSocketClient\nfrom modules.helper.modules import ChatModule\nfrom modules.helper.parser import self_heal\nfrom modules.helper.system import system_message\n\nlogging.getLogger('requests').setLevel(logging.ERROR)\nlog = logging.getLogger('sc2tv')\nSOURCE = 'fs'\nSOURCE_ICON = 'http://funstream.tv/build/images/icon_home.png'\nSYSTEM_USER = 'Funstream'\n\nPING_DELAY = 30\n\nCONF_DICT = OrderedDict()\nCONF_DICT['gui_information'] = {'category': 'chat'}\nCONF_DICT['config'] = OrderedDict()\nCONF_DICT['config']['channel_name'] = 'CHANGE_ME'\nCONF_DICT['config']['socket'] = 'ws://funstream.tv/socket.io/'\n\nCONF_GUI = {\n 'config': {\n 'hidden': ['socket']},\n 'non_dynamic': ['config.*']}\n\n\nclass FsChat(WebSocketClient):\n def __init__(self, ws, queue, channel_name, **kwargs):\n super(self.__class__, self).__init__(ws, protocols=kwargs.get('protocols', None))\n # Received value setting.\n self.source = SOURCE\n self.queue = queue\n self.channel_name = channel_name\n self.main_thread = kwargs.get('main_thread') # type: FsThread\n self.crit_error = False\n\n self.channel_id = self.fs_get_id()\n\n self.smiles = kwargs.get('smiles')\n self.smile_regex = ':(\\w+|\\d+):'\n\n # Because funstream API is fun, we have to iterate the\n # requests in \"proper\" format:\n #\n # 42Iterator[\"command\",{\"params\":\"param\"}]\n # ex: \t420[\"/chat/join\",{'channel':\"stream/30000\"}\n # ex: \t421[\"/chat/join\",{'channel':\"stream/30000\"}\n # ex: \t429[\"/chat/join\",{'channel':\"stream/30000\"}\n # ex: \t4210[\"/chat/join\",{'channel':\"stream/30000\"}\n #\n # Also, funstream API send duplicates of the messages\n # so we have to ignore the duplicates.\n # We are doing so by creating special array which has\n # last N buffer of unique ID's\n self.iter = 0\n self.duplicates = []\n self.users = []\n self.bufferForDup = 20\n\n def opened(self):\n log.info(\"Websocket Connection Succesfull\")\n self.fs_system_message(\"Connected\")\n\n def closed(self, code, reason=None):\n if reason == 'INV_CH_ID':\n self.crit_error = True\n else:\n log.info(\"Websocket Connection Closed Down\")\n self.fs_system_message(\"Connection died, trying to reconnect\")\n timer = threading.Timer(5.0, self.main_thread.connect)\n timer.start()\n\n def fs_system_message(self, message):\n system_message(message, self.queue, source=SOURCE, icon=SOURCE_ICON, from_user=SYSTEM_USER)\n\n @staticmethod\n def allow_smile(smile, subscriptions):\n allow = False\n\n if smile['user']:\n channel_id = smile['user']['id']\n for sub in subscriptions:\n if sub == channel_id:\n allow = True\n else:\n allow = True\n\n return allow\n\n def received_message(self, mes):\n # Funstream send all kind of different messages\n # Some of them are strange asnwers like \"40\".\n # For that we are trying to find real messages\n # which are more than 5 char length.\n #\n # Websocket has it's own type, so we serialise it to string.\n message = str(mes)\n if len(message) > 5:\n # \"Fun\" messages consists of strange format:\n # \t 43Iter{json}\n # ex: 430{'somedata': 'somedata'}\n # We need to just get the json, so we \"regexp\" it.\n if re.findall('{.*}', message)[0]:\n # If message does have JSON (some of them dont, dont know why)\n # we analyze the real \"json\" message.\n message = json.loads(re.findall('{.*}', message)[0])\n for dict_item in message:\n # SID type is \"start\" packet, after that we can join channels,\n # at least I think so.\n if dict_item == 'sid':\n # \"Funstream\" has some interesting infrastructure, so\n # we first need to find the channel ID from\n # nickname of streamer we need to connect to.\n self.fs_join()\n self.fs_ping()\n elif dict_item == 'status':\n self.fs_system_message('Joined channel {0}'.format(self.channel_name))\n elif dict_item == 'id':\n try:\n self.duplicates.index(message[dict_item])\n except ValueError:\n comp = {'source': self.source,\n 'source_icon': SOURCE_ICON,\n 'user': message['from']['name'],\n 'text': message['text'],\n 'emotes': []}\n if message['to'] is not None:\n comp['to'] = message['to']['name']\n if comp['to'] == self.channel_name:\n comp['pm'] = True\n else:\n comp['to'] = None\n\n smiles_array = re.findall(self.smile_regex, comp['text'])\n for smile in smiles_array:\n for smile_find in self.smiles:\n if smile_find['code'] == smile:\n if self.allow_smile(smile_find, message['store']['subscriptions']):\n comp['emotes'].append({'emote_id': smile, 'emote_url': smile_find['url']})\n\n self.queue.put(comp)\n self.duplicates.append(message[dict_item])\n if len(self.duplicates) > self.bufferForDup:\n self.duplicates.pop(0)\n\n def fs_get_id(self):\n # We get ID from POST request to funstream API, and it hopefuly\n # answers us the correct ID of the channel we need to connect to\n payload = \"{'id': null, 'name': \\\"\" + self.channel_name + \"\\\"}\"\n try:\n request = requests.post(\"http://funstream.tv/api/user\", data=payload, timeout=5)\n if request.status_code == 200:\n channel_id = json.loads(re.findall('{.*}', request.text)[0])['id']\n return channel_id\n else:\n error_message = request.json()\n if 'message' in error_message:\n log.error(\"Unable to get channel ID. {0}\".format(error_message['message']))\n self.closed(0, 'INV_CH_ID')\n else:\n log.error(\"Unable to get channel ID. No message available\")\n self.closed(0, 'INV_CH_ID')\n except requests.ConnectionError:\n log.info(\"Unable to get information from api\")\n return None\n\n def fs_join(self):\n # Because we need to iterate each message we iterate it!\n iter_sio = \"42\"+str(self.iter)\n self.iter += 1\n\n # Then we send the message acording to needed format and\n # hope it joins us\n if self.channel_id:\n join = str(iter_sio) + \"[\\\"/chat/join\\\", \" + json.dumps({'channel': \"stream/\" + str(self.channel_id)},\n sort_keys=False) + \"]\"\n self.send(join)\n self.fs_system_message(\"Joining channel {0}\".format(self.channel_name))\n log.info(\"Joined channel {0}\".format(self.channel_id))\n\n def fs_ping(self):\n # Because funstream is not your normal websocket they\n # have own \"ping/pong\" algorithm, and WE have to send ping.\n # Yes, I don't know why.\n # We have to send ping message every 30 seconds, or funstream will\n # disconnect us. So we have to create separate thread for it.\n # Dont understand why server is not sending his own pings, it\n # would be sooooo easier.\n ping_thread = FsPingThread(self)\n ping_thread.start()\n\n\nclass FsPingThread(threading.Thread):\n def __init__(self, ws):\n threading.Thread.__init__(self)\n self.daemon = \"True\"\n # Using main websocket\n self.ws = ws # type: FsChat\n\n def run(self):\n while not self.ws.terminated:\n self.ws.send(\"2\")\n time.sleep(PING_DELAY)\n\n\nclass FsThread(threading.Thread):\n def __init__(self, queue, socket, channel_name):\n threading.Thread.__init__(self)\n # Basic value setting.\n # Daemon is needed so when main programm exits\n # all threads will exit too.\n self.daemon = \"True\"\n self.queue = queue\n self.socket = socket\n self.channel_name = channel_name\n self.smiles = []\n\n def run(self):\n self.connect()\n\n def connect(self):\n # Connecting to funstream websocket\n try_count = 0\n while True:\n try_count += 1\n log.info(\"Connecting, try {0}\".format(try_count))\n if not self.smiles:\n try:\n smiles = requests.post('http://funstream.tv/api/smile', timeout=5)\n if smiles.status_code == 200:\n smiles_answer = smiles.json()\n for smile in smiles_answer:\n self.smiles.append(smile)\n except requests.ConnectionError:\n log.error(\"Unable to get smiles\")\n ws = FsChat(self.socket, self.queue, self.channel_name, protocols=['websocket'], smiles=self.smiles,\n main_thread=self)\n if ws.crit_error:\n log.critical(\"Got critical error, halting\")\n break\n elif ws.channel_id and self.smiles:\n ws.connect()\n ws.run_forever()\n break\n\n\nclass sc2tv(ChatModule):\n def __init__(self, queue, python_folder, **kwargs):\n ChatModule.__init__(self)\n log.info(\"Initializing funstream chat\")\n\n # Reading config from main directory.\n conf_folder = os.path.join(python_folder, \"conf\")\n conf_file = os.path.join(conf_folder, \"sc2tv.cfg\")\n config = self_heal(conf_file, CONF_DICT)\n self._conf_params = {'folder': conf_folder, 'file': conf_file,\n 'filename': ''.join(os.path.basename(conf_file).split('.')[:-1]),\n 'parser': config,\n 'config': CONF_DICT,\n 'gui': CONF_GUI}\n self.queue = queue\n self.socket = CONF_DICT['config']['socket']\n self.channel_name = CONF_DICT['config']['channel_name']\n\n def load_module(self, *args, **kwargs):\n # Creating new thread with queue in place for messaging transfers\n fs = FsThread(self.queue, self.socket, self.channel_name)\n fs.start()\n","sub_path":"modules/chat/sc2tv.py","file_name":"sc2tv.py","file_ext":"py","file_size_in_byte":11189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"1065378","text":"#from funcoes import *\nfrom copy import deepcopy\nfrom viagens import Viagem\nfrom random import randint, random\nimport math\n\n\nn_cidades = 10\n\nposicoes = {'0':(28, 124),'1':(69, 92),'2':(76, 176),'3':(94, 134),\n\t\t\t'4':(158, 147),'5':(37, 193),'6':(39, 42),'7':(134, 180),\n\t\t\t'8':(171, 79),'9':(19, 29)}\n\ndef vizinho(viagem):\n\tmelhor = Viagem(None, None, 10000000)\n\n\tfor x in range(len(viagem.base)):\n\t\tteste = deepcopy(viagem.base)\n\t\tfor y in range(1,4):\n\t\t\tif y != viagem.base[x]:\n\t\t\t\tteste[x] = y\n\t\t\t\tcusto = custo_caminho(caminho_viajantes(teste))\n\n\t\t\t\tif custo < melhor.custo:\n\t\t\t\t\tmelhor.base = teste\n\t\t\t\t\tmelhor.custo = custo\n\treturn melhor\n\n\ndef gerar_posicoes(n_cidades):\n\tglobal posicoes\n\tfor x in range(n_cidades):\n\t\tposicoes[str(x)] = (randint(10, 200), randint(10, 200))\n\n\n#gerar_posicoes(n_cidades)\n\ndef gerarInicio():\n\tcidades_visitadas = []\n\n\twhile (1 not in cidades_visitadas) or (2 not in cidades_visitadas) or (3 not in cidades_visitadas):\n\t\tcidades_visitadas = []\n\t\tfor x in range(n_cidades):\n\t\t\tcidades_visitadas.append(randint(1,3))\n\n\n\n\tcaminho = caminho_viajantes(cidades_visitadas)\n\n\treturn Viagem(cidades_visitadas, caminho, custo_caminho(caminho))\n\n\ndef caminho_viajantes(cidades_visitadas):\n\tvisitas = []\n\n\tfor x in range(1, 4):\n\t\tfor y, c in enumerate(cidades_visitadas):\n\t\t\tif c==(x):\n\t\t\t\tvisitas.append(y)\n\n\treturn visitas\n\n\ndef custo_caminho(caminho):\n\tcusto = 0\n\n\tcidades = [0, 0]\n\n\tcidades[0] = caminho[0]\n\n\tfor x in range(1, len(caminho)):\n\n\t\tcidades[1] = caminho[x]\n\n\t\t## modulo(cidade1[x] - cidade2[x])\n\t\txDistance = math.fabs(posicoes[str(cidades[0])][0] - posicoes[str(cidades[1])][0])\n\n\t\t## modulo(cidade1[y] - cidade2[y])\n\t\tyDistance = math.fabs(posicoes[str(cidades[0])][1] - posicoes[str(cidades[1])][1])\n\n\t\tcusto += math.sqrt((xDistance**2) + (yDistance**2))\n\n\t\tcidades[0] = cidades[1]\n\n\treturn int(custo)\n\n\n#solucao = vizinho(Viagem([1,3,1,1,2,3]))\n\n#print (solucao.custo)\n\n#solucao2 = vizinho(solucao)\n\n#print (solucao2.custo)\n\ndef novaRota(vetor_base):\n\n\tviagens = Viagem(vetor_base)\n\n\t\"\"\"\n\tpos1 = randint(0, n_cidades-1)\n\tpos2 = randint(0, n_cidades-1)\n\n\tcidade1 = viagens.base[pos1]\n\tcidade2 = viagens.base[pos2]\n\n\tviagens.base[pos2] = cidade1\n\tviagens.base[pos1] = cidade2\n\n\t\"\"\"\n\n\tviagens.base[randint(0, n_cidades-1)] = randint(1,3)\n\n\tviagens.caminho = caminho_viajantes(viagens.base)\n\tviagens.custo = custo_caminho(viagens.caminho)\n\n\n\treturn viagens\n\n\ndef probabilidade_aceitar(deltaCusto, temperatura):\n\tif deltaCusto < 0:\n\t\treturn 1.0\n\n\telse:\n\t\treturn math.exp(-deltaCusto/temperatura)\n\n\ndef simulatedAnnealing(solucao):\n\tTi = 10000\n\tTf = 0.0001\n\ttaxa = 0.99\n\n\n\tS0 = solucao\n\n\tS = S0 #(Solucao Atual)\n\n\tmelhor_solucao = S\n\n\tT = Ti\n\twhile (T > Tf):\n\n\t\ti = 1\n\t\twhile i <= 100:\n\t\t\tnova_solucao = novaRota(S.base)\n\n\t\t\tdeltaCusto = nova_solucao.custo - S.custo\n\n\t\t\tif probabilidade_aceitar(deltaCusto, T) > random():\n\t\t\t\tS = nova_solucao\n\n\t\t\tif (S.custo < melhor_solucao.custo):\n\t\t\t\tmelhor_solucao = S\n\n\n\t\t\ti+=1\n\n\n\t\tT *= taxa\n\n\treturn melhor_solucao\n\ndef hillClimb(sol):\n\tviagem = sol\n\n\twhile (1):\n\n\t\tproximo = vizinho(viagem)\n\n\t\tif (proximo.custo >= viagem.custo):\n\t\t\treturn viagem\n\t\tviagem = proximo\n\n\n\n\nsolucao = simulatedAnnealing(gerarInicio())\n\n\nprint (solucao.caminho)\nprint (solucao.custo)","sub_path":"ProgramasPython/hillClimbQueens/Trabalho Final/Trabalho Final/hillAnnealing.py","file_name":"hillAnnealing.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"639133832","text":"import os\nfrom collections import OrderedDict\nfrom itertools import product\nimport numpy as np\nfrom run_rsa import run_experiment\n\ninput_path = '/home/ms44/nips2017_data/haxby2001'\noutfile_path = '/home/ms44/nips2017_data/haxby2001/results'\n\nif __name__ == \"__main__\":\n\n try:\n myID = int(os.environ[\"SLURM_ARRAY_TASK_ID\"])\n totalIDs = int(os.environ[\"SLURM_ARRAY_TASK_MAX\"])\n except KeyError:\n myID = 1\n totalIDs = 1\n\n print(\"Job %s of %s reporting in!\" % (myID, totalIDs))\n\n runPars = OrderedDict([\n ('method', ['mnrsa']),\n ('subj_num', np.arange(6)),\n ('n_nureg', [30, 50])])\n\n # cartesian over param settings\n allpar = [dict(parset) for parset in (zip(runPars.keys(), p)\n for p in product(*runPars.values()))]\n\n pointsPerId = len(allpar) / totalIDs\n start = int((myID-1)*pointsPerId)\n end = int(len(allpar) if myID == totalIDs else (myID)*pointsPerId)\n print(\"Doing Params %s to %s (inclusive)\" % (start, end-1))\n\n for parnum in range(start, end):\n run_experiment(allpar[parnum], input_path, outfile_path)\n\n print(\"Done!\")\n","sub_path":"rsa_experiments/haxby2001.py","file_name":"haxby2001.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"585311977","text":"import threading\nfrom .pdu import *\nfrom polls import models\nimport os\n\n\ndef send_at(ser, cmd):\n ser.write(bytes(cmd+'\\r', 'UTF-8'))\n result = []\n while True:\n line = ser.readline().decode().replace('\\r\\n', '')\n if 'OK' in line or '>' in line:\n break\n if line != '':\n result.append(line)\n return result\n\ndef handle(r, message):\n temp = decode_pdu(message)\n key = '%s%d' % (temp[0], temp[1])\n val = (temp[3], temp[4], temp[5])\n if r.get(key) is None:\n r[key] = [val]\n else:\n if temp[1] != 0:\n r[key].append(val)\n if temp[2] == 0 or temp[2] == len(r[key]):\n sms = r[key]\n sms = sorted(sms, key=lambda num: num[0])\n res = ''\n for i in range(len(sms)):\n res = '%s%s' % (res, sms[i][1])\n r.pop(key)\n return temp[0], res, sms[len(sms) - 1][2]\n #models.Message.objects.all()\n #fruit = models.Message.objects.create(name='Apple')\n","sub_path":"polls/Message.py","file_name":"Message.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"164242544","text":"import os\nimport re\nimport random\nimport json\nfrom dotenv import load_dotenv\nfrom rake_nltk import Rake\n\nfrom flask import Flask\nfrom flask import request, jsonify\nfrom flask_cors import CORS\n\nimport spacy\nimport torch\nfrom transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\n\n\nfrom song_story import SpacyModel\nfrom song_story import TextRank\nfrom song_story import GPT2, T5\nfrom song_story import GeniusClient\n\nload_dotenv()\n\n\nclass App(Flask):\n def __init__(self, name):\n super(App, self).__init__(name)\n\n \n print(\"[INFO] Loading models\")\n\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n\n \n gpt2_tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n # add the EOS token as PAD token to avoid warnings\n gpt2_model = GPT2LMHeadModel.from_pretrained(\n \"gpt2\", pad_token_id=gpt2_tokenizer.eos_token_id\n )\n\n \n t5_model = T5ForConditionalGeneration.from_pretrained(\"t5-base\")\n t5_tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n t5_state_path = os.path.join(cur_dir, \"./models/final.pt\")\n t5_model.load_state_dict(torch.load(t5_state_path))\n\n \n print(\"[INFO] Initializing classes\")\n\n self.spacy_model = SpacyModel(size=\"md\")\n self.gpt2 = GPT2(gpt2_tokenizer, gpt2_model)\n self.t5 = T5(t5_tokenizer, t5_model, 100)\n self.r = Rake()\n\n token = os.getenv(\"ACCESS_TOKEN\")\n\n \n dataset = os.path.join(cur_dir, './data/artists.txt')\n self.artists = open(dataset).readlines()\n\n \n\n self.client = GeniusClient(token)\n\n\napp = App(__name__)\nCORS(app)\n\n@app.route(\"/status\", methods=[\"GET\"])\ndef status():\n return {\"status\": \"success\", \"message\": \"Server up and running\"}\n\n@app.route(\"/artist\", methods=[\"GET\"])\ndef get_artists():\n name = request.args.get(\"name\")\n\n r = re.compile(\"{}*\".format(name), re.IGNORECASE)\n search_list = list(filter(r.match, app.artists)) \n artists = ','.join(search_list)\n return jsonify({\n 'artists': artists,\n })\n\n@app.route(\"/prompt\", methods=[\"GET\"])\ndef get_prompt():\n\n keys = request.args.get(\"keywords\")\n prompt = app.t5.get_prompt(keys, 300)\n\n@app.route(\"/context\", methods=[\"GET\"])\ndef get_context():\n\n name = request.args.get(\"name\")\n print(name)\n artist = app.client.search_artist(name)\n\n # Choose a random song\n seed = random.randint(0, len(artist.songs) - 1)\n\n _id = artist.songs[seed][0]\n title = artist.songs[seed][1]\n image = artist.image_url\n\n context = app.client.get_referents(_id)\n\n return jsonify(\n {\n \"status\": \"success\",\n \"message\": \"Some story\",\n \"story\": context,\n \"title\": title,\n \"image\": image\n }\n )\n\n\n@app.route(\"/keywords\", methods=[\"POST\"])\ndef get_keywords():\n data = json.loads(request.data)\n text = data[\"text\"]\n app.r.extract_keywords_from_text(text)\n keys = app.r.get_ranked_phrases()\n keys = [key.lower().strip() for key in keys]\n\n return {\"phrase\": \" \".join(keys)}\n\n\n@app.route(\"/story\", methods=[\"POST\"])\ndef get_story():\n data = json.loads(request.data)\n prompt = data[\"prompt\"]\n story = app.gpt2.story_from_prompt(prompt, 500)\n return {\"story\": story}\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=3000)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"123580522","text":"from hdt import HDTDocument, IdentifierPosition\nimport pandas as pd\nimport numpy as np\nimport rocksdb\nimport codecs\nimport datetime\nimport pickle\ndef strict_handler(exception):\n return u\"\", exception.end\ncodecs.register_error(\"strict\", strict_handler)\n\nPATH_LOD = \"/scratch/wbeek/data/LOD-a-lot/data.hdt\"\nPATH_SAMEAS_NETWORK = \"/home/jraad/ssd/data/identity-data/\"\nPATH_ID2TERMS_099 = \"/home/jraad/ssd/data/identity-data-0_99/id2terms_0-99.csv\"\nPATH_TERM2ID_099 = \"/home/jraad/ssd/data/identity-data-0_99/term2id_0-99.csv\"\n\n# load the LOD-a-lot HDT file\nhdt_lod = HDTDocument(PATH_LOD)\n\n# these identifiers will be used later to query the HDT file using their IDs\nid_type = hdt_lod.convert_term(\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", IdentifierPosition.Predicate)\nid_sameAs = hdt_lod.convert_term(\"http://www.w3.org/2002/07/owl#sameAs\", IdentifierPosition.Predicate)\nid_subClassOf = hdt_lod.convert_term(\"http://www.w3.org/2000/01/rdf-schema#subClassOf\", IdentifierPosition.Predicate)\nid_equivalentClass = hdt_lod.convert_term(\"http://www.w3.org/2002/07/owl#equivalentClass\", IdentifierPosition.Predicate)\n\n# output some stats of LOD-a-lot\n# we can query the HDT file using the term IDs (e.g. rdf:type and equivalentClass) or the URIs (e.g. subClassOf and sameAs)\nprint(\"# subjects:\", \"{:,}\".format(hdt_lod.nb_subjects))\nprint(\"# predicates:\", \"{:,}\".format(hdt_lod.nb_predicates))\nprint(\"# objects:\", \"{:,}\".format(hdt_lod.nb_objects))\n(triples, cardinality) = hdt_lod.search_triples(\"\",\"\",\"\")\nprint(\"# triples:\", \"{:,}\".format(cardinality))\n(triples, cardinality) = hdt_lod.search_triples_ids(0, id_type, 0)\nprint(\"# rdf:type statements:\", \"{:,}\".format(cardinality))\n(triples, cardinality) = hdt_lod.search_triples(\"\", \"http://www.w3.org/2000/01/rdf-schema#subClassOf\", \"\")\nprint(\"# rdfs:subClassOf statements:\", \"{:,}\".format(cardinality))\n(triples, cardinality) = hdt_lod.search_triples_ids(0, id_equivalentClass, 0)\nprint(\"# owl:equivalentClass statements:\", \"{:,}\".format(cardinality))\n(triples, cardinality) = hdt_lod.search_triples(\"\", \"http://www.w3.org/2002/07/owl#sameAs\", \"\")\nprint(\"# owl:sameAs statements:\", \"{:,}\".format(cardinality))\n\ndef serializeObject(obj):\n ser_obj = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)\n return ser_obj\n\ndef deSerializeObject(obj):\n deser_obj = pickle.loads(obj)\n return deser_obj\n\ndef insertToDB(key, value, DB):\n try:\n DB.put(serializeObject(key), serializeObject(value))\n except:\n print(\"Exception Occured in inserting to RocksDB\", key)\n\ndef getValueFromDB(key, DB):\n result = DB.get(serializeObject(key))\n if result != None:\n return deSerializeObject(result)\n\ndef splitTermAndID(line):\n parts = line.split(\" \")\n if len(parts) < 2:\n return parts\n else:\n term = \"\"\n for i in range(len(parts) - 1):\n term = term + parts[i]\n return [term, parts[-1]]\n\nDB_TERM_2_ID = rocksdb.DB(\"/home/jraad/ssd/data/DB_TERM_2_ID.db\", rocksdb.Options(create_if_missing=True))\n\nwith open(PATH_TERM2ID_099) as f:\n line = f.readline()\n cnt = 0\n while line:\n line = f.readline()\n cnt += 1\n if cnt%1000000 == 0:\n print(cnt)\n splitted_line = splitTermAndID(line)\n insertToDB(splitted_line[0], splitted_line[1], DB_TERM_2_ID)\nprint(\"Finished creating database (TERM to ID). There is a total of \", \"{:,}\".format(cnt), \"terms\")\n","sub_path":"scripts/create-key-value-stores/create-rocksdb-term2id_0-99.py","file_name":"create-rocksdb-term2id_0-99.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"47075723","text":"#coding=utf-8\n\nimport mxnet as mx\nfrom mxnet import gluon\nfrom mxnet import nd, autograd\nfrom mxnet.gluon import data as gdata\nfrom mxnet.gluon.nn import Sequential\n\n\ndef get_dataset(num_samples=1000):\n w = nd.array([5.5]).reshape(-1, 1)\n b = nd.array([3.5]).reshape(-1, 1)\n x = nd.random.normal(scale=1.0, shape=(num_samples, 1))\n y = nd.dot(x, w) + b\n y += nd.random.normal(scale=0.1, shape=y.shape)\n return x, y\n\n\ndef create_model():\n model = Sequential()\n model.add(gluon.nn.Dense(1))\n return model\n\n\nif __name__ == '__main__':\n lr = 0.01\n epochs = 10\n batch_size = 32\n\n features, labels = get_dataset()\n\n dataset = gdata.ArrayDataset(features, labels)\n dataset_iter = gdata.DataLoader(dataset, batch_size=batch_size, shuffle=True)\n\n net = create_model()\n net.initialize()\n\n losser = gluon.loss.L2Loss()\n trainer = gluon.Trainer(net.collect_params(), optimizer=mx.optimizer.SGD(learning_rate=lr, momentum=0.9))\n\n for epoch in range(epochs):\n for x, y in dataset_iter:\n with autograd.record():\n l = losser(net(x), y)\n l.backward()\n trainer.step(batch_size)\n step_loss = losser(net(features), labels).mean().squeeze().asscalar()\n print(f'epoch {epoch}: loss = {step_loss}.')\n print(net[0].weight.data()[0].asscalar(), net[0].bias.data().asscalar())","sub_path":"LinearRegressionGluon.py","file_name":"LinearRegressionGluon.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"172718544","text":"# -*- coding: utf-8 -*-\r\n# ----------------------------------------------------\r\n# Copyright (c) 2017, Wray Zheng. All Rights Reserved.\r\n# Distributed under the BSD License.\r\n# ----------------------------------------------------\r\n\r\nfrom gmm import *\r\nimport pandas as pd\r\n\r\n# 设置调试模式\r\nDEBUG = True\r\n\r\n# 载入数据\r\ndata = pd.read_csv('log/dataset.csv', header=0, index_col=0)\r\nY = np.array(data)\r\nmatY = np.matrix(Y, copy=True)\r\n\r\n\r\n# 模型个数,即聚类的类别个数\r\nK = 10\r\n\r\n# 计算 GMM 模型参数\r\nmu, cov, alpha = GMM_EM(matY, K, 100)\r\n\r\n# 根据 GMM 模型,对样本数据进行聚类,一个模型对应一个类别\r\nN = Y.shape[0]\r\n# 求当前模型参数下,各模型对样本的响应度矩阵\r\ngamma = getExpectation(matY, mu, cov, alpha)\r\n# 对每个样本,求响应度最大的模型下标,作为其类别标识\r\ncategory = gamma.argmax(axis=1).flatten().tolist()[0]\r\n# 将每个样本放入对应类别的列表中\r\nclasses = []\r\nfor k in range(K):\r\n classes.append(np.array([Y[i] for i in range(N) if category[i] == k]))\r\n\r\n\r\n# 绘制聚类结果\r\nfor i in range(K):\r\n plt.plot(classes[i][:, 0], classes[i][:, 1], 'o', label='class{}'.format(i))\r\n# plt.plot(class1[:, 0], class1[:, 1], 'rs', label=\"class1\")\r\n# plt.plot(class2[:, 0], class2[:, 1], 'bo', label=\"class2\")\r\n# plt.plot(class3[:, 0], class3[:, 1], 'o', label=\"class3\")\r\n# plt.plot(class4[:, 0], class4[:, 1], 'o', label=\"class4\")\r\n# plt.plot(class5[:, 0], class5[:, 1], 'o', label=\"class5\")\r\n\r\nplt.legend(loc=\"best\")\r\nplt.title(\"GMM Clustering By EM Algorithm\")\r\nplt.show()\r\n\r\n# label = pd.read_csv('label.csv', index_col=0)\r\n# classes1 = []\r\n# for k in range(K):\r\n# classes1.append(np.array([Y[i] for i in range(N) if label[i] == k]))\r\n# for i in range(K):\r\n# plt.plot(classes1[i][:, 0], classes1[i][:, 1], 'o', label='class{}'.format(i))\r\n# plt.title(\"ground truth\")\r\n# plt.show()\r\n\r\n\r\ncategory = pd.DataFrame(category)\r\ncategory.to_csv('log/prediction.csv')\r\nprint('finish')\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"602079183","text":"# program0611.py\n'''绘制图案为紫色正方形螺旋'''\nimport turtle\n\nturtle.setup(400, 360)\nturtle.pensize(2) # 设置画笔宽度为1像素\nturtle.pencolor(\"purple\") # 设置画笔颜色为紫色\nturtle.shape(\"turtle\") # 设置画笔形状为“海龟”\nturtle.speed(10) # 设置绘图速度为10\na = 5 # 起始移动长度a为5单位\nfor i in range(40): # 循环40次\n a = a + 6 # 移动长度a每次增加6单位\n turtle.left(90) # 画笔每次移动旋转90度\n turtle.fd(a) # 画笔向前移动a单位\nturtle.hideturtle() # 隐藏画笔\nturtle.done() # 结束绘制\n","sub_path":"ch08a/ex0811.py","file_name":"ex0811.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"295949641","text":"from random import randint\n\ndef input_type():\n \"\"\"\n get number from user\n :rtype: int\n :return: given number converted to int\n \"\"\"\n while True:\n guess = input(\"Guess the number from 1 to 100: \")\n try:\n guess = int(guess)\n if guess >= 1 and guess <= 100:\n break\n else:\n print(\"Your guess should be a number from 1 to 100!\")\n except ValueError:\n print(\"It is not a number!\")\n return guess\n\ndef user_value():\n \"\"\" main function of the game. \"\"\"\n winning_num = randint(1, 100)\n guess = input_type()\n while guess != winning_num:\n if guess > winning_num:\n print(\"Too big!\")\n else:\n print(\"Too small!\")\n guess = input_type()\n print(\"You win!\")\n\n\n\nif __name__ == '__main__':\n user_value()\n\n\n\n","sub_path":"exercise_1/guess_the_number_1.py","file_name":"guess_the_number_1.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"572023079","text":"import torch, numpy, gym, random\nimport torch.nn as nn\nimport torch.optim as opt\nfrom torch.distributions import Categorical\nimport matplotlib.pyplot as plt\nfrom gym.envs.registration import register\n\nregister(\n id='FrozenLakeDeterministic4x4-v0',\n entry_point='gym.envs.toy_text:FrozenLakeEnv',\n kwargs={'map_name' : '4x4', 'is_slippery': False}\n)\n\nenv = gym.make(\"FrozenLakeDeterministic4x4-v0\")\naction_space = env.action_space.n\nobservation_space = env.observation_space.n\nnetwork = nn.Sequential(nn.Linear(observation_space,4),nn.ReLU(),nn.Linear(4,action_space),nn.Softmax(dim=-1))\noptimizer = opt.SGD(network.parameters(),lr=0.1)\n\nepochs = 100000\ndiscount_factor = 0.99\n\nlog_actions = torch.tensor([])\nrewards = []\n\n#Samples From Distribution(Softmax Output)\ndef choose_action(state):\n global log_actions\n state = torch.tensor(numpy.eye(observation_space)[state]).float()\n out = network(state)\n #print(out)\n c = Categorical(out)\n action = c.sample()\n log = torch.tensor([0.0],requires_grad=True).add(c.log_prob(action))#torch.tensor([0.0],requires_grad=True)+torch.log(out[action])#torch.tensor([c.log_prob(action)],requires_grad=True)\n #print(log)\n if len(log_actions.data)!=0:\n log_actions = torch.cat([log_actions,log])\n else:\n log_actions = log\n return action.item()\n\ndef maxprob(state):\n state = torch.tensor(numpy.eye(observation_space)[state]).float()\n prob = network(state)\n return prob.max(0)\n\ndef learn(loss):\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # print(list(network.parameters())[0].grad)\n return loss\n\ndef discount_rewards():\n sum = 0\n discounted_rewards = []\n for r in rewards[::-1]:\n sum = sum * discount_factor + r\n discounted_rewards.insert(0, sum)\n #discounted_rewards = (discounted_rewards - discounted_rewards.mean()) / (discounted_rewards.std() + numpy.finfo(float).eps)\n # print(discounted_rewards)\n return discounted_rewards\n\ndef update_policy():\n global rewards, log_actions\n rewards = torch.tensor(discount_rewards()).float()\n #rewards = (rewards - rewards.mean()) / (rewards.std() + numpy.finfo(float).eps)\n loss = torch.sum(torch.mul(rewards,log_actions).mul(-1),-1)\n learn(loss)\n rewards=[]\n log_actions = torch.tensor([])\n\nprint(\"---------TRAINING---------\")\nallrewards = []\nrewardmeans = []\nfor e in range(epochs):\n frames = 0\n runreward = 0\n state = env.reset()\n\n while True:\n frames += 1\n action = choose_action(state)\n new_state, reward, done, i = env.step(action)\n\n runreward+=reward\n rewards.append(reward)\n state = new_state\n if done or frames%20 == 0:\n update_policy()\n #print(\"E: \"+str(e)+\" F: \"+str(frames)+\" R: \"+str(runreward))\n allrewards.append(runreward)\n break\n\n if (e+1)%100==0:\n mean = numpy.mean(allrewards[-100:])\n print(str(e + 1) + \" M: \" + str(mean))\n rewardmeans.append(mean)\n if mean == 1.0:\n print(\"SOLVED!!!\")\n break\n\nprint(\"---------TESTING---------\")\nfor e in range(1):\n state = env.reset()\n\n while True:\n prob, action = maxprob(state)\n new_state, reward, done, i = env.step(action.item())\n env.render()\n state = new_state\n if done or frames%20 == 0:\n print(\"E: \"+str(e)+\" F: \"+str(frames)+\" R: \"+str(runreward))\n break\n\nprint(\"---------PLOTTING---------\")\nplt.figure(0)\nplt.plot(allrewards)\nplt.title(\"Reward\")\n#plt.savefig('./Plots/PolicyGradient/4x4det/reward.png',bbox_inches='tight')\nplt.figure(1)\nplt.plot(rewardmeans,color=\"orange\")\nplt.title(\"Mean Reward\")\n#plt.savefig('./Plots/PolicyGradient/4x4det/meanreward.png',bbox_inches='tight')\nplt.show()","sub_path":"PolicyGradient_Deterministic_4x4.py","file_name":"PolicyGradient_Deterministic_4x4.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"650719583","text":"import numpy as np\nimport math\nimport scipy.integrate as integrate\nimport pylab\n\ndef exp(x):\n return np.exp(x)\ndef coscos(x):\n return np.cos(np.cos(x))\n\ndef plot_functions():\n x = np.linspace(-2*np.pi, 4*np.pi, 500)\n\n exp_x = exp(x)\n pylab.semilogy(x, exp_x, 'b')\n pylab.grid(True)\n pylab.ylabel(r'$e^{x}\\rightarrow$', fontsize=12)\n pylab.xlabel(r'x$\\rightarrow$', fontsize=12)\n pylab.title('Semilog plot of $e^{x}$', fontsize=12)\n pylab.savefig('fig{}.png'.format(fignum[0]))\n fignum[0] += 1\n pylab.close()\n\n coscos_x = coscos(x)\n pylab.plot(x,coscos_x,'b')\n pylab.grid(True)\n pylab.xlabel(r'x$\\rightarrow$', fontsize=12)\n pylab.ylabel(r'$\\cos(\\cos(x))\\rightarrow$', fontsize=12)\n pylab.title('Plot of $\\cos(\\cos(x))$',fontsize=12)\n pylab.savefig('fig{}.png'.format(fignum[0]))\n fignum[0] += 1\n pylab.close()\n\ndef fourier_transform(n, function):\n coeff = np.zeros(n)\n def u(x, k, f):\n return f(x)*np.cos(k*x)/np.pi\n def v(x,k,f):\n return f(x)*np.sin(k*x)/np.pi\n\n coeff[0] = integrate.quad(function,0,2*np.pi)[0]/(2*np.pi)\n for i in range(1,n):\n if i%2:\n coeff[i] = integrate.quad(u,0,2*np.pi,args=((i//2) +1,function))[0]\n else:\n coeff[i] = integrate.quad(v,0,2*np.pi,args=(i//2,function))[0]\n return coeff\n\ndef plot_coefficients(coeffs, func):\n pylab.semilogy(range(51),np.abs(coeffs),'ro')\n pylab.grid(True)\n pylab.xlabel(r'n$\\rightarrow$',fontsize=12)\n pylab.ylabel(r'Coefficient Magnitude$\\rightarrow$',fontsize=12)\n pylab.title('Semilog Plot of coefficients for '+func,fontsize=12)\n pylab.savefig('fig{}.png'.format(fignum[0]))\n fignum[0] += 1\n pylab.close()\n\n pylab.loglog(range(51),np.abs(coeffs),'ro')\n pylab.grid(True)\n pylab.xlabel(r'n$\\rightarrow$',fontsize=12)\n pylab.ylabel(r'Coefficient Magnitude$\\rightarrow$',fontsize=12)\n pylab.title('Loglog Plot of coefficients of '+func,fontsize=12)\n pylab.savefig('fig{}.png'.format(fignum[0]))\n fignum[0] += 1\n pylab.close()\n\ndef solve(function):\n x = np.linspace(0,2*np.pi,401)\n x = x[:-1]\n y = np.linspace(0,2*np.pi,400)\n A = np.zeros((400,51))\n A[:,0] = 1\n for i in range(1,26):\n A[:,2*i-1] = np.cos(i*x)\n A[:,2*i] = np.sin(i*x) \n B = function(x) \n c = np.linalg.lstsq(A,B,rcond = None)[0]\n return A, c\n\ndef plot_comparison(c, coeff, func):\n pylab.semilogy(range(51),np.abs(c),'go',label='Least Squares Approach')\n pylab.semilogy(range(51),np.abs(coeff),'ro',label='True Value')\n pylab.grid(True)\n pylab.xlabel(r'n$\\rightarrow$',fontsize=12)\n pylab.ylabel(r'$Coefficient\\rightarrow$',fontsize=12)\n pylab.title('Semilog Plot of coefficients for '+func,fontsize=12)\n pylab.legend(loc='upper right')\n pylab.savefig('fig{}.png'.format(fignum[0]))\n fignum[0] += 1\n pylab.close()\n\n pylab.loglog(range(51),np.abs(c),'go',label='Least Squares Approach')\n pylab.loglog(range(51),np.abs(coeff),'ro',label = 'True Value')\n pylab.grid(True)\n pylab.xlabel(r'n$\\rightarrow$',fontsize=12)\n pylab.ylabel(r'$Coefficient\\rightarrow$',fontsize=12)\n pylab.title('Loglog Plot of coefficients of '+func,fontsize=15)\n pylab.legend(loc='lower left')\n pylab.savefig('fig{}.png'.format(fignum[0]))\n fignum[0] += 1\n pylab.close()\n\ndef compute_deviation(c, coeff, function, func):\n dev = abs(coeff - c)\n approximation = np.matmul(A,c)\n x = np.linspace(0,2*np.pi,401)\n x = x[:-1]\n pylab.semilogy(x,approximation,'go',label=\"Function Approximation\")\n pylab.semilogy(x,function(x),'-r',label='True value')\n pylab.grid(True)\n pylab.xlabel(r'n$\\rightarrow$',fontsize=12)\n pylab.ylabel(r'$f(x)\\rightarrow$',fontsize=12)\n pylab.title('Plot of ' + func + ' and its Fourier series approximation',fontsize=12)\n pylab.legend(loc='upper left')\n pylab.savefig('fig{}.png'.format(fignum[0]))\n fignum[0] += 1\n pylab.close()\n\nfignum = [1]\nplot_functions()\nexp_coeff = fourier_transform(51, exp)\ncoscos_coeff = fourier_transform(51, coscos)\nplot_coefficients(exp_coeff, \"$e^{x}$\")\nplot_coefficients(coscos_coeff, \"$cos(cos(x))$\")\n\nA, c_exp = solve(exp)\nA, c_coscos = solve(coscos)\n\nplot_comparison(c_exp, exp_coeff, \"$e^{x}$\")\nplot_comparison(c_coscos, coscos_coeff, \"$cos(cos(x))$\")\n\ncompute_deviation(c_exp, exp_coeff, exp, \"$e^{x}$\")\ncompute_deviation(c_coscos, coscos_coeff, coscos, \"$cos(cos(x))$\")\n\n\n","sub_path":"Assignment4/ee19b106_assignment4.py","file_name":"ee19b106_assignment4.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"597710898","text":"# -*- coding: utf-8 -*-\nfrom futuquant import *\nimport pandas as pd\n#import TA-Lib\nimport talib\nimport sys\nfrom pymongo import MongoClient\nimport datetime\nimport math\nimport time\nfrom pandas.core.frame import DataFrame\nimport numpy as np\n\ncodeid_chengbeng_latestprice_rate_dic = {}\ncodeid_chengbeng_ma250price_rate_dic = {}\ncodeid_chengbeng_ma1000price_rate_dic = {}\ndef cacl_chengbeng_of_latest_ma250_ma1000_pricerate(price_list,code_id_all,stock_name,chengbeng_price):\n ma250=talib.SMA(price_list[\"close\"],timeperiod=250)\n ma1000=talib.SMA(price_list[\"close\"],timeperiod=1000)\n len_price = len(price_list[\"close\"].index)\n latest_newPrice = price_list[\"close\"][len_price-1]\n latest_Ma250Price = ma250[len_price-1]\n latest_Ma1000Price = ma1000[len_price-1]\n #print(\"stock_name:\",stock_name,\" chengbeng_price:\",chengbeng_price,\" latest_newPrice:\",latest_newPrice,\" latest_Ma250Price:\",latest_Ma250Price,\" latest_Ma1000Price:\",latest_Ma1000Price)\n #成本价格为基准线,最新价格低于成本就是亏损\n chengbeng_latestprice_rate=0\n if latest_newPrice == latest_newPrice:\n chengbeng_latestprice_rate = (latest_newPrice-chengbeng_price)*100/abs(chengbeng_price)\n\n chengbeng_latestprice_rate = round(chengbeng_latestprice_rate,3)\n codeid_chengbeng_latestprice_rate_dic[code_id_all] = chengbeng_latestprice_rate\n\n #ma250是基准线\n chengbeng_ma250price_rate =0\n if latest_Ma250Price == latest_Ma250Price:\n chengbeng_ma250price_rate = (chengbeng_price-latest_Ma250Price)*100/abs(latest_Ma250Price)\n chengbeng_ma250price_rate = round(chengbeng_ma250price_rate,3)\n codeid_chengbeng_ma250price_rate_dic[code_id_all] = chengbeng_ma250price_rate\n\n #ma1000是基准线\n chengbeng_ma1000price_rate = 0\n if latest_Ma1000Price == latest_Ma1000Price:\n chengbeng_ma1000price_rate = (chengbeng_price-latest_Ma1000Price)*100/abs(latest_Ma1000Price)\n chengbeng_ma1000price_rate = round(chengbeng_ma1000price_rate,3)\n codeid_chengbeng_ma1000price_rate_dic[code_id_all] = chengbeng_ma1000price_rate\n\n return latest_newPrice,latest_Ma250Price,latest_Ma1000Price\n\n\ndef getZhishuName(zhishu_name):\n if zhishu_name == \"code_50_SH000016\" :\n return \"上证50\"\n if zhishu_name == \"code_100_SH000903\" :\n return \"中证100\"\n if zhishu_name == \"code_300_SZ399300\" :\n return \"沪深300\"\n if zhishu_name == \"code_500_SH000905\" :\n return \"中证500\"\n if zhishu_name == \"code_1000SH000852\" :\n return \"中证1000\"\n if zhishu_name == \"code_chuangye_SZ399006\" :\n return \"创业板\"\n if zhishu_name == \"code_hk_zhishuHK800000\" :\n return \"恒生指数\"\n if zhishu_name == \"code_usa_biaopu500USINX\" :\n return \"标普500\"\n if zhishu_name == \"code_self_zhishu\" :\n return \"自定义指数\"\n\n\ncodeid_chengbengprice = {}\ncodeid_stock_num = {}\ncodeid_stock_name = {}\nif __name__ == '__main__':\n start_time= '1998-06-05'\n #end_time='2018-06-12'\n if len(sys.argv) != 2:\n print(\"exit argv:\",sys.argv)\n sys.exit()\n if len(sys.argv) == 2:\n year = datetime.datetime.now().year\n month = datetime.datetime.now().month\n day = datetime.datetime.now().day\n end_time= str(year)+'-'+str(month)+'-'+str(day)\n #quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n #get stock mongdb\n conn = MongoClient('127.0.0.1', 27017)\n stock_list_coll = conn.stock_list\n file_name = sys.argv[1]\n print(\"######file_name:\",file_name)\n file_object = open(file_name)\n with file_object as f:\n for line in f.readlines():\n stock_list = line.split(\"|\")\n if len(stock_list) >= 2:\n all_info = \"\"\n code_id = stock_list[0]#.replace(\".\",\"\")\n stock_name = stock_list[1].replace(\"\\n\",\"\")\n codeid_stock_name[code_id] = stock_name\n\n chengbeng_price = stock_list[2].replace(\"\\n\",\"\")\n chengbeng_price_list = chengbeng_price.split(\":\")\n chengbeng_price = float(chengbeng_price_list[1])\n #print(\"chengbeng_price:\",chengbeng_price)\n\n stock_num = stock_list[3].replace(\"\\n\",\"\")\n stock_num_list = stock_num.split(\":\")\n stock_num = float(stock_num_list[1])\n #print(\"stock_num:\",stock_num)\n\n stock_buy_time = stock_list[4].replace(\"\\n\",\"\")\n stock_buy_time_list =stock_buy_time.split(\":\")\n stock_buy_time=stock_buy_time[1]\n\n stock_buy_reason = stock_list[5].replace(\"\\n\",\"\")\n\n if code_id in codeid_chengbengprice:\n old_num = float(codeid_stock_num[code_id])\n old_chengbeng = float(codeid_chengbengprice[code_id])\n\n new_chengbeng_all = float(old_num)*float(old_chengbeng) + float(chengbeng_price)*float(stock_num)\n new_chengbeng = new_chengbeng_all/(old_num+stock_num)\n new_chengbeng = round(new_chengbeng,3)\n codeid_chengbengprice[code_id] = new_chengbeng\n else:\n codeid_chengbengprice[code_id] = float(chengbeng_price)\n\n if code_id in codeid_stock_num:\n value = codeid_stock_num[code_id]\n codeid_stock_num[code_id] = float(value) + float(stock_num)\n else:\n codeid_stock_num[code_id] = float(stock_num)\n\n #计算亏损额度\n code_stock_name = {}\n zhishu_item_list = [\"code_50_SH000016\",\"code_100_SH000903\",\"code_300_SZ399300\",\"code_500_SH000905\",\"code_1000SH000852\",\"code_chuangye_SZ399006\",\"code_hk_zhishuHK800000\",\"code_usa_biaopu500USINX\",\"code_self_zhishu\"]\n print(\"------------------------end_time:\",end_time)\n for zhishu_item in zhishu_item_list:\n zhishu_name = getZhishuName(zhishu_item)\n print(\" =====zhishu_name:\",zhishu_name,\" ========start=========== =================== \")\n market = conn.market\n stock_info_list = market[zhishu_item].find()\n tmp_codeid_rate = {}\n tmp_codeid_latest_price = {}\n tmp_codeid_ma250_price= {}\n tmp_codeid_ma1000_price = {}\n tmp_codeid_stockname = {}\n for stock_info in stock_info_list:\n if stock_info[\"code\"] in codeid_chengbengprice:\n code_id = stock_info[\"code\"]\n stock_name = stock_info[\"stock_name\"]\n tmp_codeid_stockname[code_id] = stock_name\n chengbengprice = codeid_chengbengprice[code_id]\n stock_list_coll = conn.stock_list\n code_id_tmp = code_id.replace(\".\",\"\")\n each_stock_price_info = stock_list_coll[code_id_tmp].find().sort([(\"time_key\",1)])\n each_stock_price_info_pd = pd.DataFrame(list(each_stock_price_info))\n latest_newPrice,latest_Ma250Price,latest_Ma1000Price = cacl_chengbeng_of_latest_ma250_ma1000_pricerate(each_stock_price_info_pd,code_id,stock_name,chengbengprice)\n rate_latest = 0\n if code_id in codeid_chengbeng_latestprice_rate_dic:\n rate_latest = codeid_chengbeng_latestprice_rate_dic[code_id]\n tmp_codeid_rate[code_id] =round(rate_latest,3)\n tmp_codeid_latest_price[code_id]=round(latest_newPrice,3)\n tmp_codeid_ma250_price[code_id] =round(latest_Ma250Price,3)\n tmp_codeid_ma1000_price[code_id]=round(latest_Ma1000Price,3)\n #print(\"tmp_codeid_rate:\",tmp_codeid_rate)\n tmp_codeid_rate = sorted(tmp_codeid_rate.items(), key=lambda d: d[1])\n for codeid_rate in tmp_codeid_rate:\n code_id = codeid_rate[0]\n rate_latest = codeid_rate[1]\n code_id_tmp = code_id.replace(\".\",\"\")\n chengbengprice = codeid_chengbengprice[code_id]\n latest_newPrice = tmp_codeid_latest_price[code_id]\n latest_Ma250Price = tmp_codeid_ma250_price[code_id]\n latest_Ma1000Price = tmp_codeid_ma1000_price[code_id]\n stock_name = tmp_codeid_stockname[code_id]\n\n rate_ma250 = 0\n if code_id in codeid_chengbeng_ma250price_rate_dic:\n rate_ma250 = codeid_chengbeng_ma250price_rate_dic[code_id]\n\n rate_ma1000 =0\n if code_id in codeid_chengbeng_ma1000price_rate_dic:\n rate_ma1000 = codeid_chengbeng_ma1000price_rate_dic[code_id]\n\n print(\"【rate_latest】:\",rate_latest,\" chengbengprice:\",chengbengprice,\" latest_newPrice:\",latest_newPrice,\" latest_Ma250Price:\",latest_Ma250Price,\" latest_Ma1000Price:\",latest_Ma1000Price,\" code_id_tmp:\",code_id_tmp)\n #正数表示,买入成本远远高于实际价值;负数,表示买入成本低于成本\n print(\"stock_name:\",stock_name,\" 成本与ma250位置:\",rate_ma250,\" 成本与ma1000位置:\",rate_ma1000)\n num = codeid_stock_num[code_id]\n\n kuisong = num*(latest_newPrice-chengbengprice)\n kuisong = round(kuisong,3)\n all_money = num*chengbengprice\n print(\"盈亏金额:\",kuisong,\" 购买数量:\",num,\" 购买总价值:\",all_money)\n print(\" ------------------------ \")\n print(\"\\n\\n\")\n print(\" =====zhishu_name:\",zhishu_name,\" ========end=========== =================== \")\n \n print(\"cacl--------end\")\n sys.exit()\n\n\n\n","sub_path":"src/sell_stock_list/stock_best_worst_sell.py","file_name":"stock_best_worst_sell.py","file_ext":"py","file_size_in_byte":10406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"512288255","text":"from selenium import webdriver\nimport time\nimport sys\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\noption = Options()\n\noption.add_argument(\"--disable-infobars\")\noption.add_argument(\"start-maximized\")\noption.add_argument(\"--disable-extensions\")\noption.add_experimental_option(\"prefs\", {\n \"profile.default_content_setting_values.notifications\": 2\n})\n\n\ndriver = webdriver.Chrome(chrome_options=option)\ndriver.maximize_window()\ndriver.get('https://www.facebook.com')\n\nwait = WebDriverWait(driver, 10)\n\n# login_elem = browser.find_element_by_xpath(\n# '//*[@id=\"react-root\"]/section/main/article/div[2]/div[2]/p/a')\n\nusername = driver.find_element_by_id(\"email\")\npassword = driver.find_element_by_id(\"pass\")\nsubmit = driver.find_elements_by_class_name(\"_6ltg\")[0]\nusername.send_keys(sys.argv[1])\npassword.send_keys(sys.argv[2])\n# Step 4) Click Login\nsubmit.click()\n\ntime.sleep(5)\ndriver.get(sys.argv[3])\n\ntime.sleep(6)\n\npub = driver.find_elements_by_class_name('oh7imozk')[0]\n\n\nlikeIcon = pub.find_elements_by_class_name('pcp91wgn')[0]\n\n#time.sleep(5)\ndriver.execute_script(\"arguments[0].click();\", likeIcon)\ntime.sleep(2)\n# pic = likeIcon.find_elements_by_tag_name('img')[0]\n# pic.click()\n#likeIcon.click()\n\n#model = driver.find_element_by_css_selector('#mount_0_0 > div > div:nth-child(1) > div.rq0escxv.l9j0dhe7.du4w35lb > div:nth-child(7) > div > div > div.rq0escxv.l9j0dhe7.du4w35lb > div > div.iqfcb0g7.tojvnm2t.a6sixzi8.k5wvi7nf.q3lfd5jv.pk4s997a.bipmatt0.cebpdrjk.qowsmv63.owwhemhu.dp1hu0rb.dhp61c6y.l9j0dhe7.iyyx5f41.a8s20v7p > div > div > div')\nmodel = driver.find_element_by_css_selector('#mount_0_0 > div > div:nth-child(1) > div.rq0escxv.l9j0dhe7.du4w35lb > div:nth-child(7) > div > div > div.rq0escxv.l9j0dhe7.du4w35lb > div > div.iqfcb0g7.tojvnm2t.a6sixzi8.k5wvi7nf.q3lfd5jv.pk4s997a.bipmatt0.cebpdrjk.qowsmv63.owwhemhu.dp1hu0rb.dhp61c6y.l9j0dhe7.iyyx5f41.a8s20v7p > div > div > div > div.q5bimw55.rpm2j7zs.k7i0oixp.gvuykj2m.j83agx80.cbu4d94t.ni8dbmo4.eg9m0zos.l9j0dhe7.du4w35lb.ofs802cu.pohlnb88.dkue75c7.mb9wzai9.l56l04vs.r57mb794.kh7kg01d.c3g1iek1.otl40fxz.cxgpxx05.rz4wbd8a.sj5x9vvc.a8nywdso')\n#model.send_keys(Keys.END)\n#driver.execute_script(\"arguments[0].send_keys(arguments[1]);\", model, Keys.END)\n#model.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n\n#driver.execute_script(\"arguments[0].scrollTop = arguments[0].scrollHeight\", model)\n\ntime.sleep(2)\n\nlast_height = driver.execute_script(\"return arguments[0].scrollHeight\", model)\n\n\nwhile True:\n driver.execute_script(\"arguments[0].scrollTop = arguments[0].scrollHeight\", model)\n\n time.sleep(4)\n\n new_height = driver.execute_script(\"return arguments[0].scrollHeight\", model)\n if new_height == last_height:\n # If heights are the same it will exit the function\n break\n last_height = new_height\n\n\n\nbotons = model.find_elements_by_css_selector('.a8c37x1j.ni8dbmo4.stjgntxs.l9j0dhe7.ltmttdrg.g0qnabr5')\n\nif len(botons) > 0:\n for b in botons:\n\n if b.text == \"Invitar\":\n time.sleep(3)\n driver.execute_script(\"arguments[0].click();\", b)\n\n\ndriver.close()","sub_path":"facebookMark.py","file_name":"facebookMark.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"133510572","text":"#!/usr/bin/env python2\n#\n# Copyright (c) 2016,2018 Cisco and/or its affiliates.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom string import Template\n\nfrom jvpp_model import is_control_ping, is_control_ping_reply, is_dump, is_request\n\n\ndef generate_notifications(work_dir, model, logger):\n \"\"\" Generates notification registry interface and implementation \"\"\"\n logger.debug(\"Generating Notification interfaces and implementation for %s\" % model.json_api_files)\n messages = filter(_notification_filter, model.messages)\n _generate_global_event_callback(work_dir, model, messages)\n _generate_event_registry(work_dir, model, messages)\n _generate_event_registry_impl(work_dir, model, messages)\n _generate_event_registry_provider(work_dir, model)\n\n\ndef _notification_filter(msg):\n # Generate callbacks for all messages except for dumps and requests (handled by vpp, not client).\n # Also skip control ping managed by jvpp registry.\n return (not is_control_ping(msg)) and \\\n (not is_control_ping_reply(msg)) and \\\n (not is_dump(msg)) and \\\n (not is_request(msg))\n\n\ndef _generate_event_registry(work_dir, model, messages):\n plugin_name = model.plugin_java_name\n plugin_package = model.plugin_package\n\n register_callback_methods = []\n for msg in messages:\n name = _callback_name(msg)\n fqn_name = _fqn_callback_name(plugin_package, name)\n # TODO create NotificationListenerRegistration and return that instead of AutoCloseable to better indicate\n # that the registration should be closed\n register_callback_methods.append(\" java.lang.AutoCloseable register%s(%s callback);\" % (name, fqn_name))\n\n with open(\"%s/%sEventRegistry.java\" % (work_dir, plugin_name), \"w\") as f:\n f.write(_EVENT_REGISTRY_TEMPLATE.substitute(\n plugin_package=plugin_package,\n plugin_name=plugin_name,\n json_filename=model.json_api_files,\n register_callback_methods=\"\\n\".join(register_callback_methods)\n ))\n\n_EVENT_REGISTRY_TEMPLATE = Template(\"\"\"\npackage $plugin_package.notification;\n\n/**\n *

Registry for notification callbacks defined in ${plugin_name}.\n *
It was generated by notification_gen.py based on $json_filename.\n */\npublic interface ${plugin_name}EventRegistry extends io.fd.vpp.jvpp.notification.EventRegistry {\n\n$register_callback_methods\n\n @Override\n void close();\n}\n\"\"\")\n\n\ndef _generate_event_registry_impl(work_dir, model, messages):\n plugin_name = model.plugin_java_name\n plugin_package = model.plugin_package\n\n register_callback_methods = []\n handler_methods = []\n for msg in messages:\n notification = msg.java_name_upper\n callback = \"%sCallback\" % notification\n register_callback_methods.append(_REGISTER_CALLBACK_IMPL_TEMPLATE.substitute(\n plugin_package=plugin_package,\n notification=notification,\n callback=callback\n ))\n handler_methods.append(_HANDLER_IMPL_TEMPLATE.substitute(\n plugin_package=plugin_package,\n notification=notification,\n callback=callback\n ))\n\n with open(\"%s/%sEventRegistryImpl.java\" % (work_dir, plugin_name), \"w\") as f:\n f.write(_EVENT_REGISTRY_IMPL_TEMPLATE.substitute(\n plugin_package=plugin_package,\n plugin_name=plugin_name,\n json_filename=model.json_api_files,\n register_callback_methods=\"\".join(register_callback_methods),\n handler_methods=\"\".join(handler_methods)\n ))\n\n_REGISTER_CALLBACK_IMPL_TEMPLATE = Template(\"\"\"\n public java.lang.AutoCloseable register$callback(final $plugin_package.callback.$callback callback){\n if(null != registeredCallbacks.putIfAbsent($plugin_package.dto.$notification.class, callback)){\n throw new IllegalArgumentException(\"Callback for \" + $plugin_package.dto.$notification.class +\n \"notification already registered\");\n }\n return () -> registeredCallbacks.remove($plugin_package.dto.$notification.class);\n }\n\"\"\")\n\n_HANDLER_IMPL_TEMPLATE = Template(\"\"\"\n @Override\n public void on$notification(\n final $plugin_package.dto.$notification notification) {\n if (LOG.isLoggable(java.util.logging.Level.FINE)) {\n LOG.fine(java.lang.String.format(\"Received $notification event message: %s\", notification));\n }\n final io.fd.vpp.jvpp.callback.JVppCallback jVppCallback = registeredCallbacks.get($plugin_package.dto.$notification.class);\n if (null != jVppCallback) {\n (($plugin_package.callback.$callback) registeredCallbacks\n .get($plugin_package.dto.$notification.class))\n .on$notification(notification);\n }\n }\n\"\"\")\n\n_EVENT_REGISTRY_IMPL_TEMPLATE = Template(\"\"\"\npackage $plugin_package.notification;\n\n/**\n *

Notification registry delegating notification processing to registered callbacks.\n *
It was generated by notification_gen.py based on $json_filename.\n */\npublic final class ${plugin_name}EventRegistryImpl implements ${plugin_name}EventRegistry, Global${plugin_name}EventCallback {\n\n // TODO add a special NotificationCallback interface and only allow those to be registered\n private final java.util.concurrent.ConcurrentMap, io.fd.vpp.jvpp.callback.JVppCallback> registeredCallbacks =\n new java.util.concurrent.ConcurrentHashMap<>();\n private static java.util.logging.Logger LOG = java.util.logging.Logger.getLogger(${plugin_name}EventRegistryImpl.class.getName());\n\n $register_callback_methods\n $handler_methods\n\n @Override\n public void close() {\n registeredCallbacks.clear();\n }\n\n @Override\n public void onError(io.fd.vpp.jvpp.VppCallbackException ex) {\n java.util.logging.Logger LOG = java.util.logging.Logger.getLogger(${plugin_name}EventRegistryImpl.class.getName());\n LOG.log(java.util.logging.Level.WARNING, java.lang.String.format(\"Received onError exception: call=%s, context=%d, retval=%d%n\", ex.getMethodName(),\n ex.getCtxId(), ex.getErrorCode()), ex);\n }\n}\n\"\"\")\n\n\ndef _generate_global_event_callback(work_dir, model, messages):\n plugin_name = model.plugin_java_name\n plugin_package = model.plugin_package\n\n callbacks = \"\"\n callback_list = []\n for msg in messages:\n fqn_name = _fqn_callback_name(plugin_package, _callback_name(msg))\n callback_list.append(fqn_name)\n\n if callback_list:\n callbacks = \" extends %s\" % \", \".join(callback_list)\n\n with open(\"%s/Global%sEventCallback.java\" % (work_dir, plugin_name), \"w\") as f:\n f.write(_GLOBAL_EVENT_CALLBACK_TEMPLATE.substitute(\n plugin_package=plugin_package,\n plugin_name=plugin_name,\n json_filename=model.json_api_files,\n callbacks=callbacks\n ))\n\n_GLOBAL_EVENT_CALLBACK_TEMPLATE = Template(\"\"\"\npackage $plugin_package.notification;\n\n/**\n *

Aggregated callback interface for notifications only.\n *
It was generated by notification_gen.py based on $json_filename.\n */\npublic interface Global${plugin_name}EventCallback$callbacks {\n\n}\n\"\"\")\n\n\ndef _generate_event_registry_provider(work_dir, model):\n plugin_name = model.plugin_java_name\n with open(\"%s/%sEventRegistryProvider.java\" % (work_dir, plugin_name), \"w\") as f:\n f.write(_EVENT_REGISTRY_PROVIDER_TEMPLATE.substitute(\n plugin_package=model.plugin_package,\n plugin_name=plugin_name,\n json_filename=model.json_api_files\n ))\n\n_EVENT_REGISTRY_PROVIDER_TEMPLATE = Template(\"\"\"\npackage $plugin_package.notification;\n\n /**\n * Provides ${plugin_name}EventRegistry.\n *
The file was generated by notification_gen.py based on $json_filename.\n */\npublic interface ${plugin_name}EventRegistryProvider extends io.fd.vpp.jvpp.notification.EventRegistryProvider {\n\n @Override\n public ${plugin_name}EventRegistry getEventRegistry();\n}\n\"\"\")\n\n\ndef _callback_name(msg):\n return \"%sCallback\" % msg.java_name_upper\n\n\ndef _fqn_callback_name(plugin_package, callback_name):\n return \"%s.callback.%s\" % (plugin_package, callback_name)\n","sub_path":"extras/japi/java/jvpp/gen/jvppgen/notification_gen.py","file_name":"notification_gen.py","file_ext":"py","file_size_in_byte":8663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"606179114","text":"#! /usr/bin/env python\n# coding=utf-8\nimport cv2\nimport numpy as np\n\nfrom countOre import Info\nfrom mkdir import *\n# import cv2.cv as cv\n\n\n# 定义保存图片函数\n# image:要保存的图片名字\n# addr;图片地址与相片名字的前部分\n# num: 相片,名字的后缀。int 类型\ndef save_image(image, addr, num):\n\taddress = addr + str(num) + '_.jpg' # 加_防止冲突\n\tcv2.imwrite(address, image)\n\n\n# 截取一个视频所有帧\ndef clipAllFps(videoPath, outputPath):\n\t# videoPath = \"./data/ore1.mp4\"\n\tvideoCapture = cv2.VideoCapture(videoPath) # 从文件读取视频\n\t# 判断视频是否打开\n\tif (videoCapture.isOpened()):\n\t\tprint(\"Open\")\n\telse:\n\t\tprint(\"Fail to open!\")\n\n\tmkdir(outputPath)\n\tsuccess, frame = videoCapture.read() # 读取第一帧\n\n\ti = 0\n\twhile success:\n\t\ti = i + 1\n\t\tsave_image(frame, outputPath, i)\n\t\tif success:\n\t\t\tprint('save image:', i)\n\t\tsuccess, frame = videoCapture.read()\n\tvideoCapture.release()\n\n\n# 每隔n帧提取一次\n# timeF : 时间间隔\ndef clipSomeFps(videoPath, n, outputPath):\n\t# 读取视频文件\n\tvideoCapture = cv2.VideoCapture(videoPath)\n\t# 通过摄像头的方式\n\t# videoCapture=cv2.VideoCapture(1)\n\n\tmkdir(outputPath)\n\t# 读帧\n\tsuccess, frame = videoCapture.read()\n\ti = 0\n\tj = 0\n\twhile success:\n\t\ti = i + 1\n\t\tif (i % n == 0):\n\t\t\tj = j + 1\n\t\t\tsave_image(frame, outputPath, j)\n\t\t\tprint('save image:', i)\n\t\tsuccess, frame = videoCapture.read()\n\tvideoCapture.release()\n\n\nif __name__ == \"__main__\":\n\tvideoPath = \"./data/ore1.mp4\"\n\toutputPath = './data/output/'\n\t# clipAllFps(videoPath,outputPath)\n\tinfo = Info()\n\tprint(\"hello\")\n\t# 每隔29帧读取一次\n\tclipSomeFps(videoPath,200,outputPath)\n\n","sub_path":"Code/Minner/loadVedio.py","file_name":"loadVedio.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"283748685","text":"# coding: utf-8\n# Copyright (c) 2018-2019, Taku MURAKAMI. All rights reserved.\n# Distributed under the terms of the BSD 3-clause License.\n\nimport logging\nimport random\nimport math\nimport pymatgen\nfrom pymatgen.io.vasp.inputs import Poscar\n\n\"\"\"\nModel generator.\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\n\nclass ModelGenerator(object):\n \"\"\"\n Model generator. It can support symmetrical and unsymmetrical modification of models.\n In unsymmetrical modification, modifing cell length, cell shape (three angles),\n atom coordinates with varidating fractional coordinates and swapping species are containd.\n \n By treating models, we use the pymatgen.Structure class with dict-type representation.\n You can input any format of structure date it can be used in pymatgen package\n and treating it as pymatgen.Strucuture object in this class.\n Modified models can be output as pymatgen.Strucuture object.\n \n Arguments\n ---------\n struct: (fmt)\n The original structure.\n fmt: str\n The format of \"struct\".\n \n Parameters\n ----------\n struct: pymatgen.Structure\n The original structure.\n struct_dict: dict\n The (modified) structure with dict-type representation.\n \"\"\"\n \n def __init__(self, struct, fmt=\"Structure\"):\n \"\"\"\n Arguments\n ---------\n struct: (fmt)\n The original structure.\n fmt: str\n The format of \"struct\".\n \"\"\"\n if fmt is \"Structure\":\n self.struct = struct\n else:\n self.struct = pymatgen.Structure.from_str(open(struct).read(), fmt=fmt)\n self.struct_dict = self.struct.as_dict()\n \n def get_struct(self):\n \"\"\"\n Gets modified structure.\n \n Returns\n -------\n pymatgen.Structure\n The modified structure.\n \"\"\"\n return pymatgen.Structure.from_dict(self.struct_dict)\n \n def modify_symmetrical(self, min=-0.01, max=0.01):\n \"\"\"\n Modifies given strucuture symmetrical,\n namely, only cell length are isometrically changed.\n \n Arguments\n ---------\n min: float\n Minimum change of cell length (%/100).\n max: float\n Maximum change of cell length (%/100).\n \n Parameters\n ----------\n var: float\n The change of cell length given by random.\n \n Returns\n -------\n self: ModelGenerator\n \"\"\"\n var = random.uniform(1.00+min, 1.00+max)\n for i, row in enumerate(self.struct_dict[\"lattice\"][\"matrix\"]):\n for j, column in enumerate(row):\n column *= var\n self.struct_dict[\"lattice\"][\"matrix\"][i][j] = column\n return self\n \n def modify_unsymmetrical(self, modify_cell=True, modify_shape=False, modify_atom=False,\n swap_atom=False, cell_min=-0.01, cell_max=0.01,\n atom_modify_prob=10.0, atom_min=-0.01, atom_max=0.01,\n atom_swap_num=10, atom_swap_restrict=True):\n \"\"\"\n Modifies given structure unsymmetrical.\n \n Arguments\n ---------\n modify_cell: bool\n If modify cell length.\n modify_shape: bool\n If modify cell shape.\n modify_atom: bool\n If modify atom fractional coordinates.\n swap_atom: bool\n If swap two atoms.\n cell_min: float\n Minimum of changing cell (%/100).\n cell_max: float\n Maxmum of changing cell (%/100).\n atom_modify_prob: float\n Probability of changing atom coordinates (%).\n atom_min: float\n Minimum of changing atom fractional coordinates.\n atom_max: float\n Maxmum of changing atom fractional coordinates (%).\n atom_swap_num: int\n Number of swapping two atoms.\n atom_swap_resrtict: bool\n If swapping restrict to metal(non-metal)-metal(non-metal).\n \n Returns\n -------\n self\n \"\"\"\n if modify_cell is True:\n self.modify_cell(modify_shape, cell_min, cell_max)\n if modify_atom is True:\n self.modify_atom(atom_modify_prob, atom_min, atom_max)\n if swap_atom is True:\n self.swap_atom(atom_swap_num, atom_swap_restrict)\n return self\n \n def modify_cell(self, modify_shape, min, max):\n \"\"\"\n Modifies cell length and shape.\n \n Arguments\n ---------\n modify_shape: bool\n If modify cell shape.\n min: float\n Minimum of changing cell (%/100).\n max: float\n Maximum of changing cell (%/100).\n \"\"\"\n for i, row in enumerate(self.struct_dict[\"lattice\"][\"matrix\"]):\n if modify_shape is True:\n for j, column in enumerate(row):\n # var_shape is to change cell shape if the elements of axis is 0.0.\n # The validity of this type of changing shape is under testing.\n var_shape = random.uniform(min, max)\n var_cell = random.uniform(1.00+min, 1.00+max)\n column += var_shape\n column *= var_cell\n self.struct_dict[\"lattice\"][\"matrix\"][i][j] = column\n else:\n var = random.uniform(1.00+min, 1.00+max)\n for j, column in enumerate(row):\n column *= var\n self.struct_dict[\"lattice\"][\"matrix\"][i][j] = column\n \n def modify_atom(self, prob, min, max):\n \"\"\"\n Modify atomic coordinates.\n \n Arguments\n ---------\n prob: float\n Probability of changing atomic fractional coordinates (%).\n min: float\n Minimum of changing atomic fractional coordinates (%).\n max: float\n Maximum of changing atomic fractional coordinates (%).\n \"\"\"\n for i, site in enumerate(self.struct_dict[\"sites\"]):\n for j, coord in enumerate(site[\"abc\"]):\n if random.uniform(0.0, 100.0) < prob:\n coord += random.uniform(min, max)\n self.struct_dict[\"sites\"][i][\"abc\"][j] = coord\n \n def swap_atom(self, num, restrict):\n \"\"\"\n Swaps to atoms.\n \n Arguments\n ---------\n num: int\n Number of swapping two atoms.\n restrict: bool\n If swapping restrict to metal(non-metal)-metal(non-metal).\n \n Parameters\n ----------\n atom1, atom2: int\n The index of two swapping atoms.\n dummy: pymatgen.Structure.element\n The intermidiate of swapping atoms.\n \"\"\"\n for i in range(num):\n atom1, atom2 = self.select_atoms(restrict)\n dummy = self.struct_dict[\"sites\"][atom1][\"species\"][0][\"element\"]\n self.struct_dict[\"sites\"][atom1][\"species\"][0][\"element\"] = \\\n self.struct_dict[\"sites\"][atom2][\"species\"][0][\"element\"]\n self.struct_dict[\"sites\"][atom2][\"species\"][0][\"element\"] = dummy\n self.sort_atom()\n \n def select_atoms(self, restrict):\n \"\"\"\n Selects two swapping atoms.\n \n Arguments\n ---------\n restrict: bool\n If swapping restrict to metal(non-metal)-metal(non-metal).\n \n Parameters\n ----------\n atom1, atom2: int\n The index of two swapping atoms.\n \n Returns\n -------\n atom1, atom2: int\n The index of two swapping atoms.\n \"\"\"\n while True:\n atom1 = random.randint(0, len(self.struct_dict[\"sites\"])-1)\n atom2 = random.randint(0, len(self.struct_dict[\"sites\"])-1)\n if not restrict or (self.is_metal(atom1) is self.is_metal(atom2)):\n break\n return atom1, atom2\n \n def is_metal(self, atom):\n \"\"\"\n If given atom is metal.\n \n Arguments\n ---------\n atom: int\n The index of a swapping atoms.\n \n Returns\n -------\n bool\n If given atom is metal.\n \"\"\"\n return self.struct_dict[\"sites\"][atom][\"species\"][0][\"element\"] not in [\n \"H\", \"He\", \"B\", \"C\", \"N\", \"O\", \"F\", \"Ne\", \"Si\", \"P\", \"S\", \"Cl\",\n \"Ar\", \"As\", \"Se\", \"Br\", \"Kr\", \"Te\", \"I\", \"Xe\", \"At\", \"Rn\"]\n \n def sort_atom(self):\n \"\"\"\n Sorts atoms in struct_dict with key of atomic species.\n \"\"\"\n self.struct_dict[\"sites\"] = \\\n sorted(self.struct_dict[\"sites\"], key=lambda x:x[\"species\"][0][\"element\"])\n \n def make_enough_large_supercell(self, num=10):\n \"\"\"\n Makes enough large supercell for effective swapping.\n \n Arguments\n ---------\n num: int\n The lower limit of the number of atoms in the cell.\n If the number is lower than it, make shortest lattice vector\n twice larger by using pymatgen.Structure.make_supercell() method.\n \n Parameters\n ----------\n supercell: list\n The magnification rate of supercell,\n [2, 1, 1], [1, 2, 1] or [1, 1, 2].\n lattice: list\n The list of lattice length.\n min_lattice: int\n The index of lattice with minimum length.\n \n Returns\n -------\n bool\n If structure is largened.\n \"\"\"\n if not self.struct.num_sites < num:\n return False\n while True:\n supercell = [1, 1, 1]\n lattice = [self.struct.lattice.a, self.struct.lattice.b, self.struct.lattice.c]\n min_lattice = lattice.index(min(lattice))\n supercell[min_lattice] = 2\n self.struct.make_supercell(supercell)\n if self.struct.num_sites < num:\n self.struct = self.get_struct()\n return True\n \n def reset_struct(self):\n \"\"\"\n Resets modified structure to the initial one.\n \n Returns\n -------\n pymatgen.Structure\n Initial structure.\n \"\"\"\n self.struct_dict = self.struct.as_dict()\n return self.struct\n \n def check_constrains(self, a_min=-1.0, a_max=100.0, b_min=-1.0, b_max=100.0,\n c_min=-1.0, c_max=100.0, alpha_min=0.0, alpha_max=360.0,\n beta_min=0.0, beta_max=360.0, gamma_min=0.0, gamma_max=360.0):\n \"\"\"\n Checks constrains for modified structure.\n \n Arguments\n ---------\n a_min, a_max, b_min, b_max, c_min, c_max: float\n The constrains for cell length, a, b and c.\n alpha_min, alpha_max, beta_min, beta_max, gamma_min, gamma_max: float\n The constrains for cell angle alpha, beta and gamma.\n \n Returns\n -------\n bool\n If constrains holds.\n \"\"\"\n return (a_min < self.struct_dict[\"lattice\"][\"a\"] < a_max and\n b_min < self.struct_dict[\"lattice\"][\"b\"] < b_max and\n c_min < self.struct_dict[\"lattice\"][\"c\"] < c_max and\n alpha_min < self.struct_dict[\"lattice\"][\"alpha\"] < alpha_max and\n beta_min < self.struct_dict[\"lattice\"][\"beta\"] < beta_max and\n gamma_min < self.struct_dict[\"lattice\"][\"gamma\"] < gamma_max )\n","sub_path":"pythroughput/model/modelgenerator.py","file_name":"modelgenerator.py","file_ext":"py","file_size_in_byte":11494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"454664983","text":"\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\n\nclass Crawler_PTT:\n\n\tdef __init__(self, board_name):\n\t\tself.board_name = board_name\n\t\tself.base_url = 'https://www.ptt.cc'\n\n\tdef get_soup_object(self, url):\n\t\treq = requests.get(url, cookies={\"over18\":\"1\"})\n\t\treturn BeautifulSoup(req.text, 'html.parser') # get BeautifulSoup object\n\n\tdef get_to_work(self):\n\t\tself.board_url = \"https://www.ptt.cc/bbs/{board_name}/index.html\".format(board_name=self.board_name)\n\t\tsoup = self.get_soup_object(self.board_url)\n\n\t\tfor thread in soup.find_all(class_='r-ent'):\n\t\t\tdate = thread.find('div', attrs={'class': 'date'}).text\n\t\t\tauthor = thread.find('div', attrs={'class': 'author'}).text\n\t\t\tif thread.find('a'):\n\t\t\t\ttitle = thread.find('div', attrs={'class': 'title'}).text\n\t\t\t\tcontent_path = self.base_url + thread.find('a')['href']\n\t\t\t\t# get content of the thread\n\t\t\t\tsoup = self.get_soup_object(content_path)\n\t\t\t\tcontent = soup.find(\"div\", {\"id\":\"main-container\"}).text\n\t\t\tprint(\"===========================\")\n\t\t\tprint(\"日期\", date)\n\t\t\tprint(\"作者\", author)\n\t\t\tprint(\"標題\", title)\n\t\t\tprint(\"內文\", content)\n\t\t\tprint(\"看板名稱\", self.board_name)\n\t\t\tprint(\"===========================\")\n\nif __name__ == '__main__':\n\tboard_name = 'NBA'\n\tcrawler = Crawler_PTT(board_name)\n\tcrawler.get_to_work()","sub_path":"part2/crawler_ptt.py","file_name":"crawler_ptt.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"14953328","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 5 21:43:18 2017\n\n@author: dawit\n\"\"\"\n\nimport warnings\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\nfrom keras import backend as K\nimport os\nos.environ['THEANO_FLAGS'] = \"device=gpu\" \n# keras imports\nfrom keras.applications.vgg16 import VGG16, preprocess_input\nfrom keras.applications.vgg19 import VGG19, preprocess_input\nfrom keras.applications.xception import Xception, preprocess_input\nfrom keras.applications.resnet50 import ResNet50, preprocess_input\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\nfrom keras.preprocessing import image\nfrom keras.models import Model\nfrom keras.models import model_from_json\nfrom keras.optimizers import SGD\n#from convnetskeras.convnets import preprocess_image_batch, convnet\n# other imports\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\nimport glob\nimport cv2\nimport h5py\nimport os\nimport json\nimport _pickle as cPickle\nimport datetime\nimport time\n\n# load the user configs\nwith open('config.json') as f: \n config = json.load(f)\n\n# config variables\nmodel_name = str(config[\"model\"])\nweights = str(config[\"weights\"])\ninclude_top = str(config[\"include_top\"])\ntrain_path = str(config[\"train_path\"])\nfeatures_path = str(config[\"features_path\"])\nlabels_path = str(config[\"labels_path\"])\ntest_size = str(config[\"test_size\"])\nresults = str(config[\"results\"])\nmodel_path = str(config[\"classifier_path\"])\n#weights_path = config[\"weights_path\"]\nstart = time.time()\n# start time\nprint (\"[STATUS] start time - {}\".format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")))\n\n\n# create the pretrained models\n# check for pretrained weight usage or not\n# check for top layers to be included or not\nif model_name == \"Vgg16\":\n base_model = VGG16(weights=weights)\n model = Model(input=base_model.input, output=base_model.get_layer('block3_pool').output) \n image_size = (224, 224)\nelif model_name == \"Vgg19\":\n base_model = VGG19(weights=weights)\n model = Model(input=base_model.input, output=base_model.get_layer('block3_conv3').output)\n image_size = (224, 224)\nelif model_name == \"resnet50\":\n base_model = ResNet50(weights=weights)\n model = Model(input=base_model.input, output=base_model.get_layer('avg_pool').output)\n image_size = (224, 224)\nelif model_name == \"inceptionv3\":\n base_model = InceptionV3(weights='imagenet')\n model = Model(input=base_model.input, output=base_model.get_layer('mixed8').output)\n image_size = (299, 299)\nelif model_name == \"xception\":\n base_model = Xception(weights=weights)\n model = Model(input=base_model.input, output=base_model.get_layer('block3_sepconv2').output)\n image_size = (299, 299)\nelif model_name == 'Alex-net':\n sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\n model = convnet('alexnet',weights_path=\"weights/alexnet_weights.h5\", heatmap=False)\n model.compile(optimizer=sgd, loss='mse')\n image_size = (227, 227)\nelse:\n base_model = None\n\nprint (\"[INFO] successfully loaded base model and model...\")\n\n# path to training dataset\ntrain_labels = os.listdir(train_path)\n\n# encode the labels\nprint(\"[INFO] encoding labels...\")\nle = LabelEncoder()\nle.fit([tl for tl in train_labels])\n\n# variables to hold features and labels\nfeatures = []\nlabels = []\n\n# loop over all the labels in the folder\nfor i, label in enumerate(train_labels):\n cur_path = train_path + \"/\" + label\n for image_path in glob.glob(cur_path + \"/*.jpg\"):\n img = image.load_img(image_path)\n x = image.img_to_array(img)\n x = x[55:333,155:578]\n x = cv2.resize(x,image_size)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n feature = model.predict(x)\n flat = feature.flatten()\n features.append(flat)\n labels.append(label)\n print (\"[INFO] processed - {}\".format(i))\n print (\"[INFO] completed label - {}\".format(label))\n\n# encode the labels using LabelEncoder\ntargetNames = np.unique(labels)\nle = LabelEncoder()\nle_labels = le.fit_transform(labels)\n\n# get the shape of training labels\nprint (\"[STATUS] training labels: {}\".format(le_labels))\nprint (\"[STATUS] training labels shape: {}\".format(le_labels.shape))\n\n# save features and labels\nh5f_data = h5py.File(features_path, 'w')\nh5f_data.create_dataset('dataset_1', data=np.array(features))\n\nh5f_label = h5py.File(labels_path, 'w')\nh5f_label.create_dataset('dataset_1', data=np.array(le_labels))\n\nh5f_data.close()\nh5f_label.close()\n\n# save model and weights\nmodel_json = model.to_json()\nwith open(model_path + str(test_size) + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n\n# save weights\nmodel.save_weights(model_path + str(test_size) + \".h5\")\nprint(\"[STATUS] saved model and weights to disk..\")\n\nprint (\"[STATUS] features and labels saved..\")\n\n# end time\nend = time.time()\nprint (\"[STATUS] end time - {}\".format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")))\n","sub_path":"CNN/Off-the-shelf-training/FeatureExtraction.py","file_name":"FeatureExtraction.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"524752485","text":"from flask import Flask, request\nfrom datetime import datetime\nfrom html_style import HtmlStyle\n\nclass Visitors(object):\n html_main = '''\n

\n

Visitors book:

\n
\n
\n \n

\n \n

\n \n
\n
\nShow recent visitors..\n
\nDelete visitors book content..\n'''\n\n html_table = '''\n
\nRecent visitors:\n\n \n \n \n \n \n'''\n\n html_return = 'Return to main page..'\n html_delete = '''\n
\n

list of visitors deleted!!


\nReturn to main page..\n
\n'''\n def __init__(self):\n self.visitors_list = []\n self.add('Jan', 'Novak')\n self.add('Jan', 'Starak')\n self.add('Pepa', 'Zdepa')\n self.add('Karel', 'Zeman')\n\n\n def add(self, name: str, surname: str) -> str or None:\n entry = (name, surname, datetime.now().strftime('%d-%m-%Y %H:%M:%S'))\n for i, v in enumerate(self.visitors_list):\n if v[0] == name and v[1] == surname:\n last_visit = self.visitors_list[i][2]\n self.visitors_list[i] = entry\n return last_visit\n\n self.visitors_list.append(entry)\n return None\n\n\n def clear_visitors(self):\n self.visitors_list.clear()\n\n\n def print_visitors_table(self):\n html = ''\n for v in self.visitors_list:\n html += f' \\n'\n\n html += '
NameLast nameLast visit
{v[0]}{v[1]}{v[2]}
'\n return self.html_table + html\n\n\n def on_main_show(self):\n return f'{HtmlStyle.html_main_style} {self.html_main}'\n\n\n def on_visitors_show(self):\n html = f'''\n{HtmlStyle.html_main_style}\n
\n

Hello!

\n{self.print_visitors_table()}\n
\n{self.html_return}\n
\n'''\n return html\n\n\n def on_new_visitor(self, name, surname):\n if len(name) or len(surname):\n last_visit = self.add(name, surname)\n else:\n return f'{HtmlStyle.html_main_style}

Error: you must enter at least one entry!

{self.html_return}
'\n\n if last_visit:\n html_last_visit = f'Your last visit was at {last_visit}
'\n else:\n html_last_visit = '
'\n\n html = f'''\n{HtmlStyle.html_main_style}\n
\n

Hello!


Welcome {name} {surname}!
\n{html_last_visit}\n{self.print_visitors_table()}\n
\n{self.html_return}\n
\n'''\n return html\n\n\n def on_delete_visitors(self):\n self.visitors_list = []\n return f'{HtmlStyle.html_main_style} {self.html_delete}'\n","sub_path":"visitors.py","file_name":"visitors.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"202907063","text":"from pyVim.connect import SmartConnect, Disconnect\nfrom pyVmomi import vim, vmodl\n\nfrom web.settings import Settings as settings\n\nimport base64\nimport sys\nimport ssl\nimport atexit\nimport requests\nimport os\nimport time\nimport logging\nimport re\nimport random\nimport tempfile\nimport uuid\nimport urllib.request\n\n\nclass VCenter:\n\n def __init__(self):\n self._connected = False\n self._connection_cookie = None\n self.content = None\n self.__logger = logging.getLogger(__name__)\n self.vm_folders = None\n\n def __check_connection(self):\n result = self.__get_objects_list_from_container(self.content.rootFolder, vim.Datastore)\n if not result:\n self.connect()\n\n def connect(self):\n context = ssl._create_unverified_context()\n\n si = SmartConnect(\n host=settings.app['vsphere']['host'],\n user=settings.app['vsphere']['username'],\n pwd=settings.app['vsphere']['password'],\n port=settings.app['vsphere']['port'],\n connectionPoolTimeout=settings.app['vsphere']['timeout'],\n sslContext=context\n )\n\n if not si:\n self.__logger.error(\n 'Cannot connect to specified host using specified username and password'\n )\n\n self.content = si.content\n self._connection_cookie = si._stub.cookie\n self._connected = True\n self.vm_folders = VCenter.VmFolders(self)\n self.destination_datastore = None\n self.refresh_destination_datastore()\n self.refresh_destination_resource_pool()\n\n def idle(self):\n self.__check_connection()\n self.__logger.debug('keeping connection alive: {}'.format(self.content.about.vendor))\n\n def refresh_destination_datastore(self):\n self.destination_datastore = self.__get_destination_datastore()\n\n def refresh_destination_resource_pool(self):\n self.destination_resource_pool = self.__get_destination_resource_pool()\n\n def __find_datastore_cluster_by_name(self, datastore_cluster_name):\n \"\"\"\n Returns datastore cluster, if datastore cluster with this name exists. Otherwise None.\n :param datastore_cluster_name:\n :return: datastore cluster or None\n \"\"\"\n data_clusters = self.__get_objects_list_from_container(self.content.rootFolder, vim.StoragePod)\n for dc in data_clusters:\n if dc.name == datastore_cluster_name:\n return dc\n return None\n\n def __find_datastore_by_name(self, datastore_name):\n \"\"\"\n Returns datastore, if datastore with this name exists. Otherwise None.\n :param datastore_name:\n :return: datastore or None\n \"\"\"\n datastores = self.__get_objects_list_from_container(self.content.rootFolder, vim.Datastore)\n for ds in datastores:\n if ds.name == datastore_name:\n return ds\n return None\n\n def __get_free_datastore(self, datastore_cluster):\n \"\"\"\n Returns datastore from 'datastore_cluster' with most free space\n :param datastore_cluster:\n :return: datastore\n \"\"\"\n freespace = 0\n output_ds = None\n for ds in datastore_cluster.childEntity:\n ds_freespace = round(ds.summary.freeSpace/1024/1024/1024, 2)\n self.__logger.debug(\n 'inspected datastore: {}, {:.2f} GiB left'.format(ds.name, ds_freespace)\n )\n if ds.summary.accessible and ds_freespace > freespace:\n freespace = ds_freespace\n output_ds = ds\n\n if output_ds is not None:\n self.__logger.debug(f'selected datastore: {output_ds.name}, {freespace} GiB left')\n\n return output_ds\n\n def __get_destination_datastore(self):\n self.__logger.debug('Getting destination datastores...')\n\n unit_datastore_cluster_name = settings.app['vsphere']['storage']\n ds_cluster = self.__find_datastore_cluster_by_name(unit_datastore_cluster_name)\n\n if ds_cluster is not None:\n self.__logger.debug(f'found datastore cluster: {ds_cluster.name} ({ds_cluster})')\n datastore = self.__get_free_datastore(ds_cluster)\n return datastore\n\n # 'vsphere.storage' may contain directly datastore name\n datastore_name = unit_datastore_cluster_name\n datastore = self.__find_datastore_by_name(datastore_name)\n\n if datastore is not None and datastore.summary.accessible:\n ds_free_space = round(datastore.summary.freeSpace/1024/1024/1024, 2)\n self.__logger.debug(f'selected datastore: {datastore.name}, {ds_free_space} GiB left')\n\n return datastore\n\n def __get_destination_resource_pool(self):\n self.__logger.debug('Getting destination resource pool...')\n resource_pool_name = settings.app['vsphere']['resource_pool']\n resource_pools = self.__get_objects_list_from_container(self.content.rootFolder, vim.ResourcePool)\n for rp in resource_pools:\n if rp.name == resource_pool_name:\n return rp\n return None\n\n def __sleep_between_tries(self):\n time.sleep(random.uniform(\n settings.app['vsphere']['retries']['delay_period_min'],\n settings.app['vsphere']['retries']['delay_period_max']\n )\n )\n\n def __find_snapshot_by_name(self, snapshot_list, snapshot_name):\n for item in snapshot_list:\n if item.name == snapshot_name:\n self.__logger.debug('snapshot found: {}'.format(item))\n return item.snapshot\n\n if item.childSnapshotList != []:\n res_snap = self.__find_snapshot_by_name(item.childSnapshotList, snapshot_name)\n if res_snap:\n return res_snap\n\n return None\n\n def search_for_snapshot(self, vm, snapshot_name):\n res_snap = self.__find_snapshot_by_name(vm.snapshot.rootSnapshotList, snapshot_name)\n if res_snap is None:\n raise ValueError('snapshot {} cannot be found'.format(snapshot_name))\n\n return res_snap\n\n def __determine_root_system_folder(self, dc_folder):\n \"\"\"\n if root_system_folder is specified this tries to search for it and speeds up the deploy\n \"\"\"\n root_system_folder_name = settings.app['vsphere']['root_system_folder']\n if root_system_folder_name is not None:\n root_system_folder = next(\n (item for item in dc_folder.vmFolder.childEntity if item.name == root_system_folder_name),\n dc_folder\n )\n if dc_folder == root_system_folder:\n self.__logger.warn(\"root system folder: {} cannot be found; cfg: {}\".format(\n root_system_folder_name,\n \"config->vsphere->root_system_folder\"\n ))\n return root_system_folder\n return dc_folder\n\n def __determine_dc_folder(self, root_folder):\n \"\"\"\n Determines whether specific datacenter may be used to search for machines\n if search is not successful it returns the origin\n if search is successful it tries to locate the root folder of the system\n \"\"\"\n datacenter_name = settings.app['vsphere']['datacenter']\n if datacenter_name is not None:\n dc_folder = next(\n (item for item in root_folder.childEntity if item.name == datacenter_name),\n root_folder\n )\n return self.__determine_root_system_folder(dc_folder)\n return root_folder\n\n def __search_machine_by_name(self, vm_name):\n for cnt in range(settings.app['vsphere']['retries']['default']):\n try:\n objView = self.content.viewManager.CreateContainerView(\n self.__determine_dc_folder(\n self.content.rootFolder,\n ),\n [vim.VirtualMachine],\n True\n )\n\n vm = next((item for item in objView.view if item.name == vm_name), None)\n objView.Destroy()\n return vm\n except vmodl.fault.ManagedObjectNotFound:\n self.__logger.warn(\n 'vmodl.fault.ManagedObjectNotFound nas occured, try: {}'.format(\n cnt\n )\n )\n self.__sleep_between_tries()\n except Exception:\n settings.raven.captureException(exc_info=True)\n raise ValueError('machine {} cannot be found'.format(vm_name))\n\n def __clone_template(self, template, machine_name, destination_folder, snapshot_name):\n\n snap = self.search_for_snapshot(\n template,\n snapshot_name\n )\n\n sys_dest_ds = self.destination_datastore\n dest_datastore = template.datastore[0] if sys_dest_ds is None else sys_dest_ds\n\n # for full clone, use 'moveAllDiskBackingsAndDisallowSharing'\n if self.destination_resource_pool:\n relocate_spec = vim.vm.RelocateSpec(\n datastore=dest_datastore,\n diskMoveType='createNewChildDiskBacking',\n pool=self.destination_resource_pool,\n transform=vim.vm.RelocateSpec.Transformation.sparse\n )\n else:\n relocate_spec = vim.vm.RelocateSpec(\n datastore=dest_datastore,\n diskMoveType='createNewChildDiskBacking',\n host=template.runtime.host,\n transform=vim.vm.RelocateSpec.Transformation.sparse\n )\n spec = vim.vm.CloneSpec(\n location=relocate_spec,\n powerOn=False,\n snapshot=snap,\n template=False,\n )\n\n task = template.CloneVM_Task(\n destination_folder,\n machine_name,\n spec\n )\n return task\n\n def get_machine_by_uuid(self, uuid):\n self.__logger.debug(f'-> get_machine_by_uuid({uuid})')\n self.__check_connection()\n vm = self.content.searchIndex.FindByUuid(None, uuid, True)\n if vm is None:\n raise Exception(f'machine {uuid} not found')\n\n self.__logger.debug(f'<- get_machine_by_uuid: {vm}')\n return vm\n\n def deploy(self, template_name, machine_name, **kwargs):\n self.__check_connection()\n destination_folder_name = settings.app['vsphere']['folder']\n if 'inventory_folder' in kwargs and kwargs['inventory_folder'] is not None:\n inventory_folder = kwargs['inventory_folder']\n destination_folder_name = '{}/{}'.format(\n settings.app['vsphere']['folder'],\n inventory_folder\n )\n retry_deploy_count = settings.app['vsphere']['retries']['deploy']\n retry_delete_count = settings.app['vsphere']['retries']['delete']\n vm = None\n vm_uuid = None\n for i in range(retry_deploy_count):\n try:\n template = self.__search_machine_by_name(template_name)\n if not template:\n raise RuntimeError(\"template {} hasn't been found\".format(template_name))\n\n self.__logger.debug('template moid: {}\\t name: {}'.format(template._GetMoId(),\n template.name))\n self.__logger.debug('parent: {}'.format(template.parent._GetMoId()))\n self.__logger.debug('datastore: {}'.format(template.datastore[0].name))\n if template.snapshot:\n self.__logger.debug('snapshot: {}'.format(template.snapshot.currentSnapshot))\n task = self.__clone_template(\n template,\n machine_name,\n self.vm_folders.create_folder(settings.app['vsphere']['folder']),\n settings.app['vsphere']['default_snapshot_name']\n )\n\n vm = self.wait_for_task(task)\n self.__logger.debug('Task finished with value: {}'.format(vm))\n if not vm:\n # machine must be checked whether it has been created or not,\n # in no-case machine creation must be re-executed\n # in yes-case created machine must be deleted and no-case repeated\n for f in range(retry_delete_count):\n failed_vm = self.__search_machine_by_name(machine_name)\n if failed_vm:\n self.__logger.warn(\n 'junk machine {} has been created: {}'.format(\n machine_name,\n failed_vm\n )\n )\n destroy_task = failed_vm.Destroy_Task()\n self.wait_for_task(destroy_task)\n failed_vm_recheck = self.__search_machine_by_name(machine_name)\n if not failed_vm_recheck:\n self.__logger.warn(\n 'junk machine {} has been deleted successfully'.format(\n machine_name\n )\n )\n break\n else:\n self.__logger.warn(\n 'junk machine {} has not been deleted'.format(\n machine_name\n )\n )\n self.__sleep_between_tries()\n else:\n self.__logger.debug('vms parent: {}'.format(vm.parent))\n vm_uuid = vm.config.uuid\n except Exception as e:\n settings.raven.captureException(exc_info=True)\n self.__logger.warn('pyvmomi related exception: ', exc_info=True)\n self.__sleep_between_tries()\n if vm:\n for i in range(retry_deploy_count):\n try:\n self.vm_folders.move_vm_to_folder(vm_uuid, destination_folder_name)\n except vim.fault.DuplicateName as e:\n settings.raven.captureException(exc_info=True)\n self.__logger.warn(\n 'destination folder {} not created because; trying again'.format(\n destination_folder_name\n )\n )\n except Exception as e:\n settings.raven.captureException(exc_info=True)\n self.__logger.warn(\n 'destination folder {} was not created because {}'.format(\n destination_folder_name,\n e\n )\n )\n self.__sleep_between_tries()\n raise e\n\n return vm_uuid\n raise RuntimeError(\"virtual machine hasn't been deployed\")\n\n def __has_sibling_objects(self, parent_folder, vm_uuid):\n self.__logger.debug('are there sibling machines in: {}({})?'.format(\n parent_folder,\n parent_folder.name\n ))\n for rep in range(5):\n try:\n objView = self.content.viewManager.CreateContainerView(\n self.content.rootFolder,\n [vim.VirtualMachine, vim.Folder],\n True\n )\n\n for item in objView.view:\n if item.parent == parent_folder:\n if isinstance(item, vim.Folder):\n self.__logger.debug('>>found folder: {}'.format(item.name))\n return True\n if item.config.uuid != vm_uuid:\n self.__logger.debug('>>found vm: {}'.format(item.name))\n return True\n return False\n except vmodl.fault.ManagedObjectNotFound:\n self.__sleep_between_tries()\n pass\n except Exception:\n settings.raven.captureException(exc_info=True)\n self.__sleep_between_tries()\n finally:\n self.__logger.debug('searching done')\n objView.Destroy()\n return True\n\n def undeploy(self, uuid):\n self.__check_connection()\n for attempt in range(6):\n try:\n vm = self.content.searchIndex.FindByUuid(None, uuid, True)\n if vm:\n self.__logger.debug('found vm: {}'.format(vm.config.uuid))\n\n parent_folder = vm.parent\n parent_folder_name = vm.parent.name\n has_sibling_machines = True\n # has_sibling_machines = self.__has_sibling_objects(\n # has_sibling_machines = True\n # parent_folder,\n # vm.config.uuid\n # )\n\n for i in range(5):\n try:\n task = vm.Destroy_Task()\n self.wait_for_task(task)\n self.__logger.debug('vm killed {}'.format(i))\n break\n except vmodl.fault.ManagedObjectNotFound:\n self.__sleep_between_tries()\n\n if not has_sibling_machines:\n self.__logger.debug(\n 'folder: {} is going to be removed'.format(\n parent_folder_name\n )\n )\n self.vm_folders.delete_folder(parent_folder)\n self.__logger.debug('folder: {} has been deleted'.format(\n parent_folder_name\n )\n )\n return\n else:\n self.__logger.warn(\n 'machine {} not found or has been already deleted'.format(uuid)\n )\n return\n except vmodl.fault.ManagedObjectNotFound:\n self.__logger.warn('problem while undeploying machine {}'.format(uuid))\n self.__sleep_between_tries()\n except Exception:\n settings.raven.captureException(exc_info=True)\n self.__sleep_between_tries()\n\n raise RuntimeError(\"virtual machine hasn't been undeployed\")\n\n def start(self, uuid):\n self.__check_connection()\n\n vm = self.content.searchIndex.FindByUuid(None, uuid, True)\n if vm:\n self.__logger.debug('found vm: {}'.format(vm.config.uuid))\n task = vm.PowerOnVM_Task()\n self.wait_for_task(task)\n self.__logger.debug('vm powered on')\n else:\n raise Exception('machine {} not found'.format(uuid))\n\n def stop(self, uuid):\n self.__check_connection()\n for i in range(settings.app['vsphere']['retries']['config_network']):\n try:\n vm = self.content.searchIndex.FindByUuid(None, uuid, True)\n if vm:\n self.__logger.debug('found vm: {}'.format(uuid))\n task = vm.PowerOffVM_Task()\n self.wait_for_task(task)\n self.__logger.debug('vm powered off')\n return\n else:\n raise Exception('machine {} not found'.format(uuid))\n except Exception:\n settings.raven.captureException(exc_info=True)\n self.__sleep_between_tries()\n\n def reset(self, uuid):\n self.__check_connection()\n vm = self.content.searchIndex.FindByUuid(None, uuid, True)\n if not vm:\n raise Exception('machine {} not found'.format(uuid))\n self.__logger.debug('found vm: {}'.format(vm.config.uuid))\n # invoke reset - it does not fail even in case VM is powered off!\n task = vm.ResetVM_Task()\n self.wait_for_task(task)\n self.__logger.debug('vm reset done')\n\n\n def _take_screenshot_to_datastore(self, uuid):\n \"\"\"\n Takes screenshot of VM and saves it in datastore\n :param uuid: machine uuid\n :return: tuple; name of datastore (where screenshot is saved)\n and path to screenshot in datastore\n \"\"\"\n self.__logger.debug('-> take_screenshot()')\n self.__check_connection()\n vm = self.content.searchIndex.FindByUuid(None, uuid, True)\n if vm is None:\n raise Exception(f'machine {uuid} not found')\n\n self.__logger.debug(f'found vm: {vm.config.uuid}')\n screenshot_task = vm.CreateScreenshot_Task()\n self.wait_for_task(screenshot_task)\n result_path = screenshot_task.info.result\n if not result_path:\n return None, None\n # can't we just use self.destination_datastore.info.name ?\n datastore_name, screenshot_path = result_path.split(' ')\n datastore_name = datastore_name.lstrip('[').rstrip(']')\n self.__logger.debug(f'<- take_screenshot: {datastore_name}, {screenshot_path}')\n return datastore_name, screenshot_path\n\n def _store_screenshot_to_hcp(self, machine_uuid: str, screenshot_data) -> str:\n hcp_server = settings.app['hcp']['url']\n hcp_auth = settings.app['hcp']['auth']\n hcp_base_dir = settings.app['hcp']['base_dir']\n\n hcp_filename = f'{machine_uuid}_{uuid.uuid4()}.png'\n upload_url = f'{hcp_server}/rest/{hcp_base_dir}/{hcp_filename}'\n put_request = urllib.request.Request(\n upload_url,\n method='PUT',\n data=screenshot_data,\n )\n put_request.add_header('Content-Length', str(len(screenshot_data)))\n put_request.add_header('Content-Type', 'multipart/form-data')\n put_request.add_header('Authorization', hcp_auth)\n ssl_context = ssl._create_unverified_context()\n response = urllib.request.urlopen(\n put_request,\n context=ssl_context,\n timeout=settings.app['hcp'].get('timeout', 120)\n )\n if response.code != 201:\n settings.raven.captureMessage(\n f'problem uploading data to hcp: {hcp_server}, {upload_url} -> {response.code}'\n )\n return upload_url.replace('/rest/', '/hs3/')\n\n def take_screenshot(self, uuid: str, store_to: str = 'db') -> str:\n \"\"\"\n Takes screenshot of VM and returns it as base64 encoded string or hcp url\n :param uuid: machine uuid\n :param store_to: screenshot destination, db or hcp for now\n :return: base64 encoded string, or hcp url or None in case of failure\n \"\"\"\n datastore, path = self._take_screenshot_to_datastore(uuid=uuid)\n self.__logger.debug(f'datastore: {datastore}, path: {path}')\n if datastore is not None or path is not None:\n screenshot_data = self.get_file_bytes_from_datastore(datastore_name=datastore, remote_path_to_file=path)\n if screenshot_data:\n if store_to == \"hcp\":\n return self._store_screenshot_to_hcp(uuid, screenshot_data)\n elif store_to == \"db\":\n return base64.b64encode(screenshot_data)\n else:\n settings.raven.captureMessage(f'invalid store_to specification ({store_to})')\n else:\n settings.raven.captureMessage('Error obtaining screenshot data')\n\n def take_snapshot(self, uuid, snapshot_name) -> bool:\n self.__logger.debug(f'-> take_snapshot({uuid}, {snapshot_name})')\n vm = self.get_machine_by_uuid(uuid=uuid)\n snapshot_task = vm.CreateSnapshot_Task(\n name=snapshot_name,\n description='',\n memory=True,\n quiesce=False\n )\n snap_obj = self.wait_for_task(snapshot_task)\n result = snapshot_task.info.state == 'success' and snapshot_task.info.error is None\n self.__logger.debug(f'<- take_snapshot(): {result}')\n return result\n\n def remove_snapshot(self, uuid, snapshot_name) -> bool:\n self.__logger.debug(f'-> remove_snapshot({uuid}, {snapshot_name})')\n vm = self.get_machine_by_uuid(uuid)\n snap = self.search_for_snapshot(vm=vm, snapshot_name=snapshot_name)\n remove_task = snap.RemoveSnapshot_Task(removeChildren=False)\n self.wait_for_task(remove_task)\n self.__logger.debug(f'<- remove_snapshot()')\n\n def revert_snapshot(self, uuid, snapshot_name):\n self.__logger.debug(f'-> revert_snapshot({uuid}, {snapshot_name})')\n vm = self.get_machine_by_uuid(uuid)\n snap = self.search_for_snapshot(vm=vm, snapshot_name=snapshot_name)\n revert_task = snap.RevertToSnapshot_Task()\n self.wait_for_task(revert_task)\n # revert task does not give any result explicitly! So we check for 'success' and no error\n result = revert_task.info.state == 'success' and revert_task.info.error is None\n self.__logger.debug(f'<- revert_snapshot(): {result}')\n return result\n\n # TODO rewrite others to use this one\n def __get_objects_list_from_container(self, container, object_type):\n\n result = []\n object_view = None\n try:\n object_view = self.content.viewManager.CreateContainerView(\n container,\n [object_type],\n True)\n result = list(object_view.view)\n except vmodl.fault.ManagedObjectNotFound:\n self.__logger.warn('vmodl.fault.ManagedObjectNotFound has occured')\n except Exception:\n settings.raven.captureException(exc_info=True)\n finally:\n if object_view is not None:\n object_view.Destroy()\n\n return result\n\n def __get_datacenter_for_datastore(self, datastore_name):\n dcs = self.__get_objects_list_from_container(self.content.rootFolder, vim.Datacenter)\n for dc in dcs:\n datastores = self.__get_objects_list_from_container(dc, vim.Datastore)\n for ds in datastores:\n if ds.info.name == datastore_name:\n return dc\n return None\n\n def get_file_bytes_from_datastore(self, remote_path_to_file, datastore_name):\n \"\"\"\n Downloads file from datastore (with retries) and returns its data.\n Note: keep in mind requested file size, since data are in memory!\n :param remote_path_to_file: path to file in datastore (e.g. my_vm/my_vm.png)\n :param datastore_name: name of datastore\n :return: data\n \"\"\"\n self.__check_connection()\n server_name = settings.app['vsphere']['host']\n datacenter = self.__get_datacenter_for_datastore(datastore_name)\n if datacenter is None:\n raise RuntimeError(f'Cannot find datacenter for datastore {datastore_name}')\n\n url = f'https://{server_name}/folder/{remote_path_to_file}?dcPath={datacenter.name}&dsName={datastore_name}'\n\n for i in range(3):\n try:\n # resp = requests.get(url=url, verify=False, headers={'Cookie': self._connection_cookie})\n # the cookie usage was dropped because the new solution improved stability\n resp = requests.get(\n url=url,\n verify=False,\n auth=(settings.app['vsphere']['username'], settings.app['vsphere']['password'])\n )\n if resp.status_code == 200:\n # download ok, save return path\n return resp.content\n else:\n # try again\n msg = f'Download of {remote_path_to_file} (retry {i}) failed with status code: {resp.status_code}'\n self.__logger.warning(msg)\n self.__sleep_between_tries()\n continue\n except Exception as e:\n self.__logger.warning(f'Downloading of {remote_path_to_file} (retry {i}) failed: {e}')\n settings.raven.captureException(exc_info=True)\n\n # failed, nothing to return\n return None\n\n def config_network(self, uuid, **kwargs):\n self.__logger.debug('config_network')\n self.__check_connection()\n for i in range(settings.app['vsphere']['retries']['config_network']):\n try:\n\n vm = self.content.searchIndex.FindByUuid(None, uuid, True)\n\n for device in vm.config.hardware.device:\n if type(device) == vim.vm.device.VirtualE1000 or \\\n type(device) == vim.vm.device.VirtualE1000e or \\\n type(device) == vim.vm.device.VirtualPCNet32 or \\\n type(device) == vim.vm.device.VirtualVmxnet or \\\n type(device) == vim.vm.device.VirtualVmxnet2 or \\\n type(device) == vim.vm.device.VirtualVmxnet3:\n device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo(\n deviceName=kwargs['interface_name']\n )\n\n device_config_spec = vim.VirtualDeviceConfigSpec(\n operation=vim.VirtualDeviceConfigSpecOperation('edit'),\n device=device\n )\n\n machine_config_spec = vim.vm.ConfigSpec(\n deviceChange=[device_config_spec]\n )\n task = vm.ReconfigVM_Task(spec=machine_config_spec)\n self.wait_for_task(task)\n break\n except Exception:\n self.__sleep_between_tries()\n\n def _get_machine_nos_id(self, vm, uuid):\n result = ''\n try:\n for hw in vm.config.hardware.device:\n try:\n mac = hw.macAddress\n if settings.app['nosid_prefix'] is None:\n result = \"v{}\".format(re.sub(':', '', str(mac).upper()))\n else:\n result = \"{}{}\".format(\n settings.app['nosid_prefix'],\n re.sub(':', '', str(mac).upper())\n )\n except AttributeError:\n pass\n except Exception:\n self.__logger.debug(\"obtaining nos_id on machine {} failed\".format(uuid), exc_info=True)\n finally:\n return result\n\n def _get_machine_ips(self, vm, uuid):\n result = []\n try:\n for adapter in vm.guest.net:\n for ip in adapter.ipConfig.ipAddress:\n result.append(ip.ipAddress)\n except Exception:\n self.__logger.debug(\"obtaining ips on machine {} failed\".format(uuid), exc_info=True)\n finally:\n return result\n\n def _get_machine_name(self, vm, uuid):\n result = \"unknown\"\n for i in range(settings.app['vsphere']['retries']['default']):\n try:\n result = vm.config.name\n except Exception:\n self.__logger.debug(\"obtaining machine name {} failed\".format(uuid), exc_info=True)\n return result\n\n def get_machine_info(self, uuid):\n self.__check_connection()\n result = {'ip_addresses': [], 'nos_id': '', 'machine_search_link': ''}\n\n vm = self.content.searchIndex.FindByUuid(None, uuid, True)\n try:\n if vm:\n self.__logger.debug('found vm: {}'.format(uuid))\n result['ip_addresses'] = self._get_machine_ips(vm, uuid)\n result['nos_id'] = self._get_machine_nos_id(vm, uuid)\n\n machine_name = self._get_machine_name(vm, uuid)\n result['machine_name'] = machine_name\n\n host_name = settings.app['vsphere']['host']\n vsphere_address = 'https://{}/'.format(host_name)\n\n result['machine_search_link'] = '{}{}{}{}'.format(\n vsphere_address,\n 'ui/#?extensionId=vsphere.core.search.domainView&query=',\n machine_name,\n '&searchType=simple'\n )\n\n self.__logger.debug('get machine info end')\n except Exception:\n self.__logger.debug('get machine info on {} failed'.format(uuid), exc_info=True)\n finally:\n return result\n\n def wait_for_task(self, task):\n # this function is as ugly as possible but written in this way for stability purposes.\n # the number of callings to pyvmomi library is restricted as much as possible.\n state = None\n while True:\n state = task.info.state\n if state == 'success' or state == 'error':\n break\n message = \"no-message\"\n progress = \"n/a\"\n try:\n progress = task.info.progress\n message = task.info.description.message\n except Exception:\n self.__logger.warn('Problem obtaining progress or description on a vsphere task')\n\n self.__logger.debug('Progress {}% | Task: {}\\r'.format(\n progress,\n message\n ))\n time.sleep(0.5)\n\n result = task.info.result\n self.__logger.debug('Task finished with status: {}, return value: {}'.format(\n state,\n result,\n ))\n\n return result\n\n class VmFolders:\n\n def __init__(self, parent):\n # this stores all folders in vsphere at the time the class was instantiated\n self.vm_folders = {}\n # this stores all subfolders where this lm unit operates\n self.system_folders = {}\n\n self.__logger = logging.getLogger(__name__)\n self.parent = parent\n self.__collect_all_folders()\n\n def __sleep_between_tries(self):\n time.sleep(random.uniform(\n settings.app['vsphere']['retries']['delay_period_min'],\n settings.app['vsphere']['retries']['delay_period_max']\n )\n )\n\n def __get_system_root_folder(self):\n if not settings.app['vsphere']['folder'] in self.vm_folders:\n self.__logger.warn('{} not in vm_folders'.format(settings.app['vsphere']['folder']))\n # self.__logger.warn('{}'.format(self.vm_folders.keys()))\n folder = self.vm_folders[settings.app['vsphere']['folder']]\n # self.__logger.debug('folder: {}'.format(folder))\n\n objView = self.parent.content.viewManager.CreateContainerView(\n self.parent.content.rootFolder,\n [vim.Folder],\n True\n )\n\n try:\n for item in objView.view:\n if str(item) == folder:\n return item\n raise Exception(\"root folder not obtained\")\n finally:\n objView.DestroyView()\n\n def create_subfolder(self, path, subpath):\n self.__logger.debug(\"A request to create {} in {}\".format(subpath, path))\n objView = self.parent.content.viewManager.CreateContainerView(\n # we have to start from parent to fing the current root folder\n self.__get_system_root_folder().parent,\n [vim.Folder],\n True\n )\n\n new_folder = None\n for item in objView.view:\n if str(item) == self.system_folders[path]:\n self.__logger.debug('parent folder {} found'.format(path))\n try:\n new_folder = item.CreateFolder(name=subpath)\n break\n except vim.fault.DuplicateName:\n self.__sleep_between_tries()\n break\n\n objView.DestroyView()\n self.__logger.debug(\"creation done.\")\n self.__collect_system_folders()\n return new_folder\n\n def __obtain_folder(self, path):\n objView = self.parent.content.viewManager.CreateContainerView(\n self.__get_system_root_folder().parent,\n [vim.Folder],\n True\n )\n try:\n for item in objView.view:\n if path in self.system_folders and str(item) == self.system_folders[path]:\n return item\n finally:\n objView.DestroyView()\n\n self.__logger.warn(\"folder: {} not found\".format(path))\n\n def create_folder(self, folder_path):\n self.__collect_system_folders()\n path = self.__correct_folder_format(folder_path)\n if path in self.system_folders:\n return self.__obtain_folder(path)\n\n created_folder = None\n items = path.split('/')\n for splitindex in range(2, len(items)):\n temp_path = '/'.join(items[:splitindex])\n next_folder = items[splitindex:][0]\n if temp_path+'/'+next_folder not in self.system_folders:\n created_folder = self.create_subfolder(temp_path, next_folder)\n else:\n self.__logger.debug('{} exists'.format(temp_path+'/'+next_folder))\n\n if path not in self.system_folders:\n self.__logger.warn(\"Directory {} not created\".format(path))\n return self.__obtain_folder(path)\n\n def delete_folder(self, folder):\n task = folder.Destroy_Task()\n self.parent.wait_for_task(task)\n\n def move_vm_to_folder(self, vm_uuid, folder_path):\n path = self.__correct_folder_format(folder_path)\n if path not in self.system_folders:\n self.create_folder(path)\n\n vm = self.parent.content.searchIndex.FindByUuid(None, vm_uuid, True)\n self.__move_vm_to_existing_folder(vm, path)\n\n def __collect_system_folders(self):\n for repetition in range(5):\n try:\n root_folder_moref = self.__get_system_root_folder()\n objView = self.parent.content.viewManager.CreateContainerView(\n root_folder_moref,\n [vim.Folder],\n True\n )\n\n self.__logger.debug(\"collecting system vm folders....\")\n self.__logger.debug(\"\\troot_folder_moref: {}\".format(str(root_folder_moref)))\n self.system_folders = {\n settings.app['vsphere']['folder']: str(root_folder_moref)\n }\n # all parent folders must be initially added as well\n for i in self.vm_folders.keys():\n if settings.app['vsphere']['folder'].startswith(i):\n self.system_folders[i] = self.vm_folders[i]\n\n for item in objView.view:\n full_name = self.__retrieve_full_folder_path(item)\n self.system_folders[full_name] = str(item)\n\n return\n except vmodl.fault.ManagedObjectNotFound as e:\n self.__logger.warn(\n \"collect_system_folders errored atempt: {}\".format(repetition)\n )\n self.__sleep_between_tries()\n except Exception as e:\n settings.raven.captureException(exc_info=True)\n self.__logger.error(\n \"collect_system_folders errored atempt: {}\".format(repetition)\n )\n raise e\n finally:\n objView.DestroyView()\n\n def __collect_all_folders(self):\n for repetition in range(5):\n try:\n\n objView = self.parent.content.viewManager.CreateContainerView(\n self.parent.content.rootFolder,\n [vim.Folder],\n True\n )\n\n self.__logger.debug(\"collecting all vm folders....\")\n self.vm_folders = {}\n for item in objView.view:\n full_name = self.__retrieve_full_folder_path(item)\n self.vm_folders[full_name] = str(item)\n self.__logger.debug(\"collecting done.\")\n return\n except vmodl.fault.ManagedObjectNotFound as e:\n self.__logger.warn(\"collect_all_folders errored atempt: {}\".format(repetition))\n self.__sleep_between_tries()\n except Exception as e:\n settings.raven.captureException(exc_info=True)\n self.__logger.error(\"collect_all_folders errored atempt: {}\".format(repetition))\n raise e\n finally:\n objView.DestroyView()\n\n def __move_vm_to_existing_folder(self, vm, existing_path):\n objView2 = self.parent.content.viewManager.CreateContainerView(\n self.__get_system_root_folder(),\n [vim.Folder],\n True\n )\n for item in objView2.view:\n if str(item) == self.system_folders[existing_path]:\n task = item.MoveIntoFolder_Task(list=[vm])\n while (task.info.state == 'running' or task.info.state == 'queued'):\n time.sleep(0.2)\n objView2.DestroyView()\n\n def __retrieve_full_folder_path(self, folder):\n if isinstance(folder.parent, vim.Folder):\n return \"{}/{}\".format(\n self.__retrieve_full_folder_path(folder.parent),\n folder.name\n )\n else:\n return \"/{}\".format(folder.name)\n\n def __correct_folder_format(self, folder):\n f_folder = re.sub(r'[/\\s]*$', '', folder)\n if not f_folder.startswith('/vm/'):\n raise Exception(\"correct folder definition must look like\\\n \\\"/vm/root_folder/subfolder..... not {}\\\"\".format(f_folder))\n\n return f_folder\n","sub_path":"vcenter/vcenter.py","file_name":"vcenter.py","file_ext":"py","file_size_in_byte":43811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"62759757","text":"from rdkit import DataStructs\nfrom torch.nn import functional as F\nfrom utils.fingerprints import morgan_bit_fingerprint as mfp\n\ndef tanimoto_similarity(fp1, fp2):\n return DataStructs.TanimotoSimilarity(fp1, fp2)\n\ndef cosine_similarity(encoding_a, encoding_b):\n return F.cosine_similarity(encoding_a, encoding_b).item()\n\ndef rescaled_cosine_similarity(molecule_a, molecule_b, S, scale=\"mean\"):\n value = cosine_similarity(molecule_a, molecule_b)\n\n max_ = 1\n min_ = min(S) if scale == \"min\" else sum(S) / len(S)\n\n return (value - min_) / (max_ - min_)\n\ndef get_similarity(name, model, original_molecule, fp_len=None, fp_rad=None):\n\n if name == \"tanimoto\":\n similarity = lambda x, y: tanimoto_similarity(x, y)\n\n make_encoding = lambda x: mfp(x.smiles, fp_len, fp_rad).fp\n original_encoding = make_encoding(original_molecule)\n\n elif name == \"rescaled_neural_encoding\":\n similarity = lambda x, y: rescaled_cosine_similarity(x, y, similarity_set)\n\n make_encoding = lambda x: model(x.x, x.edge_index)[1]\n original_encoding = make_encoding(original_molecule)\n\n elif name == \"neural_encoding\":\n similarity = lambda x, y: cosine_similarity(x, y)\n\n make_encoding = lambda x: model(x.x, x.edge_index)[1][1]\n original_encoding = make_encoding(original_molecule)\n\n elif name == \"combined\":\n similarity = lambda x, y: 0.5 * cosine_similarity(x[0], y[0]) + 0.5 * tanimoto_similarity(x[1], y[1])\n\n make_encoding = lambda x: (model(x.x, x.edge_index)[1][1], mfp(x.smiles, fp_len, fp_rad).fp)\n original_encoding = make_encoding(original_molecule)\n\n return similarity, make_encoding, original_encoding\n","sub_path":"utils/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"571080583","text":"# future imports\nfrom __future__ import unicode_literals\n\n# stdlib imports\nimport logging\n\n# local imports\nfrom webhooks import Webhooks\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass WebhookSession(object):\n\n def __init__(self, config):\n logger.info('Webhook session started')\n self.device = None\n self.token = config['webhook']['token']\n self.webhook = Webhooks(self.token)\n self.base_url = config['webhook']['webhook']\n\n def fetch_head(self):\n logger.info('Fetching head track')\n webhook_url = '{0}queues/{1}/head/'.format(\n self.base_url, self.device['queue']['id'])\n\n track = self.webhook.get(self.__class__.__name__, webhook_url)\n return track\n\n def pop_head(self):\n logger.info('Removing current head track')\n webhook_url = '{0}queues/{1}/head/'.format(\n self.base_url, self.device['queue']['id'])\n\n self.webhook.delete(self.__class__.__name__, webhook_url)\n\n def start(self):\n logger.info('Webhook session started')\n webhook_url = '{0}players/'.format(self.base_url)\n\n response = self.webhook.get(self.__class__.__name__, webhook_url)\n self.device = response['results'][0]\n\n def stop(self):\n logger.info('Session ended.')\n webhook_url = '{0}queues/{1}/head/'.format(\n self.base_url, self.device['queue']['id'])\n kwargs = {'state': 'stopped'}\n\n self.webhook.patch(self.__class__.__name__, webhook_url, **kwargs)\n\n def update_head(self, kwargs):\n logger.info('Updating current head track.')\n webhook_url = '{0}queues/{1}/head/'.format(\n self.base_url, self.device['queue']['id'])\n\n self.webhook.patch(self.__class__.__name__, webhook_url, **kwargs)\n\n def report_status(self, **kwargs):\n pass\n\n def report_event(self, **kwargs):\n pass\n","sub_path":"mopidy_webhook/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"164073935","text":"text_file = open(\"neki.txt\", \"w\")\n\nwith open('piracy-topics_genre.txt') as f:\n lines = f.read().splitlines()\n\n for line in lines:\n for i in range(0,30):\n text_file.write(line + \"\\n\")\n\n\n\n\n\ntext_file.close()","sub_path":"FRI/Programing/Python/2Letnik/AI/VrsticaDuplicator.py","file_name":"VrsticaDuplicator.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"74591719","text":"import tensorflow as tf\n\n\ndef flatten_unflatten(variable):\n \"\"\"\n >>> apply_snd_to_fst = lambda xf: xf[1](xf[0])\n >>> fu_identity = lambda x: apply_snd_to_fst(flatten_unflatten(x)) == x\n >>> flatten_unflatten('foo')[0]\n ['foo']\n >>> x = {'foo': ([1,(5.,)],)}\n >>> flatten_unflatten(x)[0]\n [1, 5.0]\n >>> fu_identity(x)\n True\n \"\"\"\n def flatten_unflatten_raw(variable):\n if type(variable) is tuple:\n flattened, unflatten_list = flatten_unflatten_raw(list(variable))\n def unflatten_raw(l):\n result, rest = unflatten_list(l)\n return tuple(result), rest\n elif type(variable) is list:\n flattened = sum([flatten_unflatten_raw(v)[0] for v in variable], [])\n def unflatten_raw(l):\n rest = l\n result = []\n for unflattener in [flatten_unflatten_raw(v)[1] for v in variable]:\n v, rest = unflattener(rest)\n result += [v]\n return result, rest\n elif type(variable) is dict:\n flattened = sum([flatten_unflatten_raw(v)[0] for v in variable.values()], [])\n def unflatten_raw(l):\n rest = l\n result = {}\n for k, unflattener in [(k, flatten_unflatten_raw(v)[1]) for (k, v) in variable.items()]:\n v, rest = unflattener(rest)\n result[k] = v\n return result, rest\n else:\n def unflatten_raw(l):\n return l[0], l[1:]\n return [variable], unflatten_raw\n return flattened, unflatten_raw\n flattened, unflatten_raw = flatten_unflatten_raw(variable)\n def unflatten(l):\n unflattened, [] = unflatten_raw(l)\n return unflattened\n return flattened, unflatten\n\n\ndef complex_tuple(variable, *args, **kwargs):\n flattened, unflatten = flatten_unflatten(variable)\n return unflatten(tf.tuple(flattened, *args, **kwargs))\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"tflib.py","file_name":"tflib.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"629220829","text":"# -*- coding: utf-8 -*-\r\nimport os,time,re\r\nimport requests\r\nimport urllib.parse\r\nfrom decimal import Decimal\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom pydrive.auth import GoogleAuth\r\nfrom pydrive.drive import GoogleDrive\r\n\r\ngauth = GoogleAuth()\r\ngauth.CommandLineAuth() #透過授權碼認證\r\ndrive = GoogleDrive(gauth)\r\n\r\ncomic_folder = \"folderID\"\r\n\r\nurl_home = \"http://comic.ck101.com\"\r\nurl_fin = \"http://comic.ck101.com/comicstaglist/%E5%AE%8C%E7%B5%90/\"\r\n\r\nheaders = {\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36\",\r\n }\r\n\r\ndef getpage1(url):\r\n driver = webdriver.PhantomJS()\r\n # time.sleep(5)\r\n driver.get(url)\r\n # 取得資料\r\n html_source = driver.page_source\r\n # 關閉瀏覽器\r\n driver.quit();\r\n return BeautifulSoup(html_source,'html.parser')\r\n\r\ndef getpage2(url):\r\n res = requests.request(\"GET\", url, headers=headers)\r\n return BeautifulSoup(res.text,'html.parser')\r\n\r\ndef creatfolder(name,path,folder):\r\n if not os.path.exists(path):\r\n os.makedirs(path,mode=0o777)\r\n folder = drive.CreateFile({'title': name, \r\n \"parents\": [{\"id\": folder}], \r\n \"mimeType\": \"application/vnd.google-apps.folder\"})\r\n folder.Upload()\r\n return folder.get('id')\r\n\r\ndef download2disk(fileName,r):\r\n print (\"downloading.....\")\r\n with open(fileName,'wb') as f:\r\n f.write(r.content)\r\n print (\"done\")\r\n\r\ndef upload2gdrive(fileName,folderID,path):\r\n print (\"uploading.....\")\r\n file11 = drive.CreateFile({\"title\":fileName,\r\n \"parents\": [{\"kind\": \"drive#fileLink\", \"id\": folderID}]})\r\n file11.SetContentFile(path)\r\n file11.Upload()\r\n print (\"done\")\r\n\r\ntest = 0\r\npage_count = 1 #192\r\nurl = url_fin + str(page_count)\r\nitems_page = getpage2(url)\r\nfinal_page = 198\r\n# final_page = int(items_page.find(\"div\",attrs={\"class\": \"pagination\"}).find(\"span\").find_next_sibling(\"span\").find_next_sibling(\"span\").find_next_sibling(\"a\").string.split(' ')[1])\r\nprint (\"fin page >>>>\" ,final_page)\r\nwhile page_count <= final_page:\r\n item = items_page.find(\"li\",attrs={\"class\": \"list userList \"})\r\n i = 1\r\n n = 3\r\n while i>>\" ,page_count)\r\n item_link = url_home + item.find(\"a\").get('href')\r\n print (\"file :\" ,count)\r\n file_page = getpage2(item_link)\r\n item_name = file_page.find(\"div\",attrs={\"class\": \"titleNav\"}).find(\"h1\").string\r\n item_name = re.sub(r\"[^\\w_0-9]\",\"\", item_name)\r\n save_path=\"comic\\\\\"+item_name\r\n i_folderID = creatfolder(item_name,save_path,comic_folder)\r\n name_count = 1\r\n print (item_name)\r\n count += 1\r\n file = file_page.find(\"div\",attrs={\"class\": \"comicBox\"}).find(\"div\",attrs={\"class\": \"relativeRec\"}).find(\"li\")\r\n now = file_page.find(\"span\",attrs={\"class\": \"current\"})\r\n if now:\r\n have_next_page = now.find_next_sibling(\"a\")\r\n else :\r\n have_next_page = None\r\n while file or have_next_page:\r\n file_name = file.find(\"a\").get('title')\r\n file_name = file_name.replace('?',' ')\r\n file_name = file_name.replace(':',' ')\r\n file_name = file_name.replace('/',' ')\r\n file_name = file_name.replace('<',' ')\r\n file_name = file_name.replace('>',' ')\r\n fn_fp = file.find(\"a\").get('href')\r\n # print (fn_fp)\r\n fn = fn_fp.split('/')[2]\r\n # print (fn)\r\n fp = Decimal(fn_fp.split('/')[3])\r\n # print (fp)\r\n save_path=\"comic\\\\\"+item_name+\"\\\\\"+file_name\r\n f_folderID = creatfolder(file_name,save_path,i_folderID)\r\n print (file_name)\r\n file_link = url_home+\"/\"+\"vols\"+\"/\"+fn+\"/\"+str(fp) + \"/1\"\r\n print (file_link)\r\n\r\n while file_link:\r\n pic_page = getpage2(file_link)\r\n time.sleep(2)\r\n # if not pic_page.find(\"img\",attrs={\"id\": \"defualtPagePic\"}):\r\n # print (\">>>>>>>>\",pic_page,\">>>>>>>>\")\r\n if pic_page.find(\"head\").find(\"meta\",attrs={'content':'Yes'}).find_next_sibling(\"meta\").get('content')==\"book\":\r\n print (\">>>>>>>>\",pic_page.find(\"head\"),\">>>>>>>>\")\r\n name_count = 1\r\n break\r\n # print (pic_page.find(\"head\"))\r\n pic_link = pic_page.find(\"head\").find(\"meta\",attrs={'content':'Yes'}).find_next_sibling(\"meta\").get('content')\r\n r = requests.request(\"POST\", pic_link, headers=headers)\r\n # print (pic_page)\r\n # n_pic = pic_page.find(\"div\",attrs={\"class\": \"btnWrap\"}).find(\"select\").find_next_sibling(\"a\").get('href')\r\n # n_pic_link = url_home+n_pic\r\n\r\n fileName = file_name+\" \"+ str(name_count) +\".jpg\"\r\n\r\n if test == 0 :\r\n test += 1\r\n else :\r\n os.remove(path)\r\n print (\"remove done\")\r\n\r\n path = save_path +\"\\\\\"+ fileName\r\n print (name_count)\r\n name_count += 1\r\n download2disk(path,r)\r\n upload2gdrive(fileName,f_folderID,path)\r\n\r\n fp += 1\r\n file_link = url_home+\"/\"+\"vols\"+\"/\"+fn+\"/\"+str(fp) + \"/1\"\r\n print (file_link)\r\n\r\n # print (pic_link, \">>>>>>>>>>>\")\r\n # print (n_pic_link)\r\n # if n_pic == '#':\r\n # name_count = 1\r\n # break\r\n # else :\r\n # file_link = n_pic_link\r\n\r\n\r\n file = file.find_next_sibling(\"li\")\r\n time.sleep(10)\r\n if not file and have_next_page:\r\n file_page = getpage2(url_home + have_next_page.get('href'))\r\n file = file_page.find(\"div\",attrs={\"class\": \"comicBox\"}).find(\"div\",attrs={\"class\": \"relativeRec\"}).find(\"li\")\r\n now = file_page.find(\"span\",attrs={\"class\": \"current\"})\r\n have_next_page = now.find_next_sibling(\"a\")\r\n \r\n\r\n\r\n \r\n item = item.find_next_sibling(\"li\",attrs={\"class\": \"list userList \"})\r\n time.sleep(60)\r\n page_count += 1\r\n url = url_fin + str(page_count)\r\n items_page = getpage2(url)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"comic_download_new.py","file_name":"comic_download_new.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"142634269","text":"#! /usr/bin/python\n\nimport numpy as np\nfrom statistics import mean\n\nimport sys\nimport sacrebleu\nfrom sacrebleu import sentence_bleu,corpus_bleu\nSMOOTH_VALUE_DEFAULT=1e-8\n\nimport os\nimport sys\nfrom os.path import isdir\n\nhyp_file = open(sys.argv[1],'r').readlines()\nref_file = open(sys.argv[2],'r').readlines()\nhyp_file_dict={i.strip().split(' ')[0].replace(\"-\",\"_\"):\" \".join(i.strip().split(' ')[1:]) for i in hyp_file}\nref_file_dict={i.strip().split(' ')[0].replace(\"-\",\"_\"):\" \".join(i.strip().split(' ')[1:]) for i in ref_file}\n\nlog_folder = sys.argv[3]\n#ref_talk_list=str(sys.argv[4])\n\n\n\n\n\nlog_folder = os.path.join(log_folder,'hyp_talks_moses')\nif not os.path.isdir(log_folder):\n os.makedirs(log_folder)\n\n#ref_talk_list=['/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/dev_uttlist',\n#'/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/tst-COMMON_uttlist', \n#'/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/tst-HE_uttlist']\n\n#ref_talk_list=['/mnt/matylda3/vydana/espnet_latest/espnet_JAN2020/espnet/egs/must_c/mt1_mustc_V2/dev_uttlist','/mnt/matylda3/vydana/espnet_latest/espnet_JAN2020/espnet/egs/must_c/mt1_mustc_V2/tst-COMMON_uttlist','/mnt/matylda3/vydana/espnet_latest/espnet_JAN2020/espnet/egs/must_c/mt1_mustc_V2/tst-HE_uttlist']\n\n#ref_talk_list=['/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/dev_uttlist','/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/tst-COMMON_uttlist','/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/tst-HE_uttlist']\n\nref_talk_list=['/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/tst-COMMON_uttlist']\n\n#breakpoint()\n\n# Dataset_saving_file_hyp_perspeaker = open(os.path.join(log_folder,Dataset_name+'_Dataset_hyp_for_moses_perspeaker'),'w+')\n# Dataset_saving_file_ref_perspeaker = open(os.path.join(log_folder,Dataset_name+'_Dataset_ref_for_moses_perspeaker'),'w+')\n\nfor ted_talks in ref_talk_list:\n teds = open(ted_talks,'r')\n teds = teds.readlines()\n\n teds = [i.strip() for i in teds]\n speakers=set([\"_\".join(i.strip().split('_')[:-2]) for i in teds])\n\n\n Dataset_name=ted_talks.split('/')[-1]\n Dataset_saving_file_hyp = open(os.path.join(log_folder,Dataset_name+'_Dataset_hyp_for_moses_'),'w+')\n Dataset_saving_file_ref = open(os.path.join(log_folder,Dataset_name+'_Dataset_ref_for_moses_'),'w+')\n\n\n Dataset_saving_file_hyp_perspeaker = open(os.path.join(log_folder,Dataset_name+'_Dataset_hyp_for_moses_perspeaker'),'w+')\n Dataset_saving_file_ref_perspeaker = open(os.path.join(log_folder,Dataset_name+'_Dataset_ref_for_moses_perspeaker'),'w+')\n\n\n\n\n Dataset_saving_file_hyp_perspeaker_sc = open(os.path.join(log_folder,Dataset_name+'_Dataset_hyp_for_moses_perspeaker_sc'),'w+')\n Dataset_saving_file_ref_perspeaker_sc = open(os.path.join(log_folder,Dataset_name+'_Dataset_ref_for_moses_perspeaker_sc'),'w+')\n\n\n\n for speaker in speakers:\n speaker_uts=[ i for i in teds if speaker in i ]\n\n speaker_text_hyp=''\n speaker_text_ref=''\n\n \n #speaker_uts=sorted(speaker_uts,reverse=False)\n speaker_uts_dict={i:int(i.split('_')[-1]) for i in speaker_uts}\n speaker_uts_dict_sorted=dict(sorted(speaker_uts_dict.items(), key=lambda item: item[1]))\n\n\n for utt_idx in speaker_uts_dict_sorted.keys():\n\n utt_idx = utt_idx.strip()\n utt_idx = utt_idx.replace('-','_')\n\n hyp_text = hyp_file_dict.get(utt_idx,None)\n ref_text = ref_file_dict.get(utt_idx,None)\n #breakpoint()\n\n if hyp_text==None:\n print(\"Hyp text is none--------------------------------------------------------------------------------------------------------************-----------------\")\n #exit(0)\n else:\n speaker_text_hyp+=' ' + hyp_text\n\n\n if ref_text==None:\n print(\"ref text is none\", 'careful-----------------')\n else:\n speaker_text_ref+=' '+ref_text\n\n\n print(utt_idx,hyp_text, file=Dataset_saving_file_hyp)\n print(utt_idx,ref_text, file=Dataset_saving_file_ref)\n # \n \n\n\n\n\n speaker_text_hyp=\" \".join(speaker_text_hyp.split())\n speaker_text_ref=\" \".join(speaker_text_ref.split())\n \n print(speaker,speaker_text_hyp, file=Dataset_saving_file_hyp_perspeaker)\n print(speaker,speaker_text_ref, file=Dataset_saving_file_ref_perspeaker)\n\n\n print(speaker_text_hyp,'('+ speaker+')' ,file=Dataset_saving_file_hyp_perspeaker_sc)\n print(speaker_text_ref,'('+ speaker+')' ,file=Dataset_saving_file_ref_perspeaker_sc)\n ##-------------------------------------------------------------------------------\n print(speaker,speaker_text_hyp,speaker_text_ref)\n\n\nexit(0)\n\n#=========================================================================================\ndef get_input_tuple_list(inp_file):\n inp_touple_list=[]\n\n for line in inp_file:\n line=line.strip()\n if line:\n utt_id=line.split(' ')[0].replace('-','_').split('_')\n if len(line.strip().split(' ')) >1:\n text=\" \".join(line.strip().split(' ')[1:])\n else:\n text=line.strip().split(' ')[1:]\n utt_id.append(text)\n inp_touple_list.append(tuple(utt_id))\n return inp_touple_list\n\n#=========================================================================================\n\ndef pop_from_list(present_talk_list):\n ref_text=[]\n ref_ids=[]\n while present_talk_list:\n sent_touple = present_talk_list.pop(0)\n ref_ids.append(\"--\".join(sent_touple[:len(sent_touple)-1]))\n\n #ref_text = ref_text + sent_touple[-1]\n #sent_list = sent_touple[-1].split(\" \") if ' ' in sent_touple[-1] else sent_touple[-1] \n ref_text.append(sent_touple[-1])\n return ref_text, ref_ids\n\n#=========================================================================================\n#print(get_input_tuple_list(hyp_file),get_input_tuple_list(ref_file))\n#print(inp_touple_list)\n#=============================================\nhyp_tuple_list = get_input_tuple_list(hyp_file)\nref_tuple_list = get_input_tuple_list(ref_file)\n#=============================================\n\nfinal_scores_pertalk={}\n#no_of_files=inp_file\nref_talk_list=['/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/dev_tedtalk_list',\n'/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/tst-Common_ted_talk_list', \n'/mnt/matylda3/vydana/kaldi/egs/MUSTC_V2/tst-HE_ted_talk_list']\nfor ted_talks in ref_talk_list:\n teds = open(ted_talks,'r')\n teds = teds.readlines()\n\n final_scores_pertalk={}\n All_talks_hyp_sent=[]\n All_talks_ref_sent=[]\n Dataset_name=ted_talks.split('/')[-1]\n\n Dataset_saving_file_hyp = open(os.path.join(log_folder,Dataset_name+'_Dataset_hyp_for_moses_'),'w+')\n Dataset_saving_file_ref = open(os.path.join(log_folder,Dataset_name+'_Dataset_ref_for_moses_'),'w+')\n\n\n\n for ted in teds:\n ted = ted.strip()\n ted = ted.replace('-','_')\n ted_tup = tuple(ted.split('_'))\n\n present_talk_list=[ i for i in hyp_tuple_list if \"--\".join(ted_tup)==\"--\".join(i[:len(ted_tup)])]\n Ref_present_talk_list=[ i for i in ref_tuple_list if \"--\".join(ted_tup)==\"--\".join(i[:len(ted_tup)])]\n\n hyp_text, hyp_ids = pop_from_list(present_talk_list)\n ref_text, ref_ids = pop_from_list(Ref_present_talk_list) \n \n breakpoint()\n All_talks_hyp_sent.extend(hyp_text)\n All_talks_ref_sent.extend(ref_text)\n\n #####WRTE in to textfile for moses bleu\n print(hyp_ids,hyp_text, file=Dataset_saving_file_hyp)\n print(ref_ids,ref_text, file=Dataset_saving_file_ref)\n\n\n\n #print(ref_text, ref_ids,ref_text, ref_ids)\n\n Bleu_score = corpus_bleu(hyp_text,[ref_text],smooth_value=SMOOTH_VALUE_DEFAULT,smooth_method='exp',use_effective_order='True')\n print('BLUE:===>',Bleu_score.score)\n breakpoint()\n ted_name=\"-\".join(ted_tup)\n\n #final_text=hyp_text+'\\t'+ref_text+'\\t'+str(Bleu_score.score) \n final_text=\"---\".join(hyp_text)+'\\t'+\"---\".join(ref_text)+'\\t'+str(Bleu_score.score)\n \n with open(os.path.join(log_folder,ted_name+'_hyp_text_'),'a+') as tedtalk_saving_file:\n print(final_text, file=tedtalk_saving_file)\n\n\n\n\n Dataset_saving_file_hyp.close()\n Dataset_saving_file_ref.close()\n\n breakpoint()\n final_scores_pertalk[ted_name]=Bleu_score.score\n print(ted_talks,mean(final_scores_pertalk.values()))\n with open(os.path.join(log_folder,ted_name+'_hyp_Bleu_'),'a+') as tedtalk_saving_file:\n print(ted_talks, mean(final_scores_pertalk.values()), file=tedtalk_saving_file)\n\n breakpoint()\n Dataset_name=ted_talks.split('/')[-1]\n Dataset_Bleu_score = corpus_bleu(All_talks_hyp_sent,[All_talks_ref_sent],smooth_value=SMOOTH_VALUE_DEFAULT,smooth_method='exp',use_effective_order='True')\n with open(os.path.join(log_folder,Dataset_name+'_Dataset_Bleu_'),'a+') as Dataset_saving_file:\n print(Dataset_name,Dataset_Bleu_score.score, file=Dataset_saving_file)\n print(Dataset_name,Dataset_Bleu_score.score)\n\n","sub_path":"scoring_utils/Compute_WER_on_badly_aligned_tedtalks.py","file_name":"Compute_WER_on_badly_aligned_tedtalks.py","file_ext":"py","file_size_in_byte":9753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"605849345","text":"# Problem 6\n\nfrom sys import argv\n\ndef main():\n\tkey = argv[1]\n\tfile_name = argv[2]\t\n\n\tif key == \"lines\":\n\t\twith open(file_name,'r') as content_file:\n\t\t\tcontent = content_file.readlines()\t\t\t\n\t\t\tcounter = len(content)\n\t\t\tprint(counter)\n\n\tif key == \"words\":\n\t\twith open(file_name,'r') as content_file:\n\t\t\tcontent = content_file.read()\t\t\t\n\t\t\twords_item = content.split()\n\t\t\tcounter = len(words_item)\n\t\t\tprint(counter)\n\n\tif key == \"chars\":\n\t\twith open(file_name,'r') as content_file:\n\t\t\tcontent = content_file.read()\n\t\t\tbox = content.strip(\"\\n\")\n\t\t\tcounter = 0\n\t\t\tfor item in box:\n\t\t\t\tcounter += 1\n\n\t\t\tprint(counter)\n\n# Call the main function\n\nif __name__ == '__main__':\n\tmain()\t\t\t\t\n","sub_path":"week_0/saturday/Problem_6/wc.py","file_name":"wc.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"59870025","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import grad, Variable\n\nimport numpy as np\nfrom pyscf import scf, gto, mcscf\n\nfrom tqdm import tqdm\nfrom time import time\n\nclass BatchDeterminant(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx,input):\n\n # LUP decompose the matrices\n inp_lu, pivots = input.lu()\n perm, inpl, inpu = torch.lu_unpack(inp_lu,pivots)\n \n # get the number of permuations\n s = (pivots != torch.tensor(range(1,input.shape[1]+1)).int()).sum(1).float()\n\n # get the prod of the diag of U\n d = torch.diagonal(inpu,dim1=-2,dim2=-1).prod(1)\n\n # assemble\n det = ((-1)**s * d)\n ctx.save_for_backward(input,det)\n\n return det\n\n @staticmethod\n def backward(ctx, grad_output):\n '''using jaobi's formula \n d det(A) / d A_{ij} = adj^T(A)_{ij} \n using the adjunct formula\n d det(A) / d A_{ij} = ( (det(A) A^{-1})^T )_{ij}\n '''\n input, det = ctx.saved_tensors\n return (grad_output * det).view(-1,1,1) * torch.inverse(input).transpose(1,2)\n\nclass SlaterPooling(nn.Module):\n\n \"\"\"Applies a slater determinant pooling in the active space.\"\"\"\n\n def __init__(self,configs,nup,ndown):\n super(SlaterPooling, self).__init__()\n\n self.configs = configs\n self.nconfs = len(configs[0])\n\n self.index_up = torch.arange(nup)\n self.index_down = torch.arange(nup,nup+ndown)\n\n def forward(self,input):\n\n ''' Compute the product of spin up/down determinants\n Args:\n input : MO values (Nbatch, Nelec, Nmo)\n Returnn:\n determiant (Nbatch, Ndet)\n '''\n nbatch = input.shape[0]\n out = torch.zeros(nbatch,self.nconfs)\n \n for ic,(cup,cdown) in enumerate(zip(self.configs[0],self.configs[1])):\n\n mo_up = input.index_select(1,self.index_up).index_select(2,cup)\n mo_down = input.index_select(1,self.index_down).index_select(2,cdown)\n \n # a batch version of det is on its way (end July 2019)\n # https://github.com/pytorch/pytorch/issues/7500\n # we'll move to that asap but in the mean time \n # using my own BatchDeterminant\n out[:,ic] = BatchDeterminant.apply(mo_up) * BatchDeterminant.apply(mo_down)\n\n return out\n\n\nif __name__ == \"__main__\":\n\n\n x = Variable(torch.rand(10,5,5))\n x.requires_grad = True\n det = BatchDeterminant.apply(x)\n det.backward(torch.ones(10))\n\n det_true = torch.tensor([torch.det(xi).item() for xi in x])\n print(det-det_true)\n","sub_path":"deepqmc/wavefunction/slater_pooling.py","file_name":"slater_pooling.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"228613174","text":"\"\"\"\nRemote file for single-shot KMeans\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport logging\nimport configparser\nimport numpy as np\nimport remote_computations as remote\nimport local_computations as local\n\n\nCONFIG_FILE = 'config.cfg'\nDEFAULT_k = 5\nDEFAULT_epsilon = 0.00001\nDEFAULT_shuffle = True\nDEFAULT_learning_rate = 0.001\nDEFAULT_verbose = True\nDEFAULT_optimization = 'lloyd'\n\n\ndef remote_init_env(config_file=DEFAULT_config_file, k=DEFAULT_k,\n optimization=DEFAULT_optimization, epsilon=DEFAULT_epsilon, learning_rate=DEFAULT_learning_rate,\n verbose=DEFAULT_verbose):\n \"\"\"\n # Description:\n Initialize the remote environment, creating the config file.\n\n # PREVIOUS PHASE:\n None\n\n # INPUT:\n\n | name | type | default |\n | --- | --- | --- |\n | config_file | str | config.cfg |\n | k | int | 5 |\n | optimization | str | lloyd |\n | epsilon | float | 0.00001 |\n | shuffle | bool | False |\n | data_file | str | data.txt |\n | learning_rate | float | 0.001 |\n | verbose | float | True |\n\n # OUTPUT:\n - config file written to disk\n - k\n - learning_rate\n - optimization\n - shuffle\n\n # NEXT PHASE:\n local_init_env\n \"\"\"\n\n logging.info('REMOTE: Initializing remote environment')\n if not os.path.exists(config_file):\n config = configparser.ConfigParser()\n config['REMOTE'] = dict(k=k, optimization=optimization, epsilon=epsilon,\n learning_rate=learning_rate, verbose=verbose)\n with open(config_path, 'w') as file:\n config.write(file)\n # output\n computation_output = dict(output=\n dict(\n work_dir=work_dir,\n config_file=config_file,\n k=k,\n learning_rate=learning_rate,\n optimization=optimization,\n shuffle=shuffle,\n computation_phase=\"remote_init_env\"\n )\n )\n return json.dumps(computation_output)\n\n\ndef remote_init_centroids(args, config_file=DEFAULT_config_file):\n \"\"\"\n # Description:\n Initialize K centroids from locally selected centroids.\n\n # PREVIOUS PHASE:\n local_init_centroids\n\n # INPUT:\n\n | name | type | default |\n | --- | --- | --- |\n | config_file | str | config.cfg |\n\n # OUTPUT:\n - centroids: list of numpy arrays\n\n # NEXT PHASE:\n local_compute_optimizer\n \"\"\"\n logging.info('REMOTE: Initializing centroids')\n # Have each site compute k initial clusters locally\n local_centroids = [cent for site in args for cent in\n args[site]]\n # and select k random clusters from the s*k pool\n np.random.shuffle(local_centroids)\n remote_centroids = local_centroids[:k]\n computation_output = dict(\n output=dict(\n work_dir=work_dir,\n config_file=config_file,\n centroids=remote_centroids,\n computation_phase=\"remote_init_centroids\"\n ),\n success=True\n )\n return json.dumps(computation_output)\n\n\ndef remote_check_convergence(args, config_file=CONFIG_FILE):\n \"\"\"\n # Description:\n Check convergence.\n\n # PREVIOUS PHASE:\n local_check_convergence\n\n # INPUT:\n\n | name | type | default |\n | --- | --- | --- |\n | config_file | str | config.cfg |\n | remote_centroids | list | config.cfg |\n | previous_centroids | list | config.cfg |\n\n # OUTPUT:\n - boolean encoded in name of phase\n\n # NEXT PHASE:\n local_compute_clustering?\n \"\"\"\n logging.info('REMOTE: Check convergence')\n config = configparser.ConfigParser()\n config.read(config_file)\n local_check = [site['local_check'] for site in args]\n remote_check = any(local_check)\n new_phase = \"remote_converged_true\" if remote_check else \"remote_converged_false\"\n computation_output = dict(\n output=dict(\n computation_phase=new_phase\n ),\n success=True\n )\n return json.dumps(computation_output)\n\n\ndef remote_aggregate_output(args):\n \"\"\"\n Aggregate output. TODO: What needs to be aggregated\n \"\"\"\n logging.info('REMOTE: Aggregating input')\n computation_output = dict(\n output=dict(\n computation_phase=\"remote_aggregate_output\"\n ),\n success=True\n )\n return json.dumps(computation_output)\n\n\nif __name__ == '__main__':\n\n parsed_args = json.loads(sys.stdin.read())\n phase_key = list(listRecursive(parsed_args, 'computation_phase'))\n\n if not phase_key:\n computation_output = remote_init_env(**parsed_args['input'])\n sys.stdout.write(computation_output)\n elif 'remote_aggregate_otpimizer' in phase_key:\n computation_output = remote_optimization_step(parsed_args['input'])\n sys.stdout.write(computation_output)\n elif 'local_check_convergence' in phase_key:\n computation_output = remote_check_convergence(parsed_args['input'])\n sys.stdout.write(computation_output)\n elif 'remote_converged_true' in phase_key:\n computation_outut = remote_aggregate_output(parsed_args['input'])\n sys.stdout.write(computation_output)\n else:\n raise ValueError('Oops')\n\n","sub_path":"remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":6108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"563019697","text":"# 10610.py\n# 2018.06.26\n\nn = sorted(input(), reverse=True)\nif n[-1] != '0':\n\tprint(-1)\nelse:\n\tsum_n = sum(map(int, n))\n\tif sum_n % 3:\n\t\tprint(-1)\n\telse:\n\t\tprint(''.join(n))\n\t\t\n# input을 비오름차순으로 정렬하여 list로 받고 약수중에 3, 2, 5가 있는지 확인하고 출력한다.\n","sub_path":"10000/10610.py","file_name":"10610.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"243246763","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# factor for 90% coverage with 90% confidence using Normal distribution\n# with 10 samples from table XII in [1]\n# [1] Montgomery, D. C., & Runger, G. C. (2014). Applied statistics and\n# probability for engineers. Sixth edition. John Wiley & Sons.\nk = 2.535\n\n\n\nrun_times = np.load('../bench_run_times/amd_fx_8350_titanXP/6_break_times.npy')\nn = np.load('../bench_run_times/amd_fx_8350_titanXP/n.npy')\nrun_times1 = np.load('../bench_run_times/amd_fx_8350_titanXP_sp/6_break_times.npy')\n\n\n\nrun_times_means = run_times.mean(axis=2)\nrun_times_stds = run_times.std(axis=2, ddof=1)\nrun_times_means1 = run_times1.mean(axis=2)\nrun_times_stds1 = run_times1.std(axis=2, ddof=1)\n\n\nplt.figure()\nplt.title('AMD FX-8350: 6 line segments')\nplt.grid()\nplt.errorbar(n, run_times_means[0], yerr=k*run_times_stds[0], capsize=2.0, label='Standard')\nplt.errorbar(n, run_times_means[1], yerr=k*run_times_stds[1], capsize=2.0, label='TF CPU float64')\nplt.errorbar(n, run_times_means1[1], yerr=k*run_times_stds1[1], capsize=2.0, label='TF CPU float32')\n\nplt.xlabel('Number of data points')\nplt.ylabel('Run time (seconds, Lower is better)')\nplt.semilogx()\nplt.semilogy()\nplt.legend()\nplt.savefig('../figs/fx_six_breaks.png', bbox_inches='tight')\n\nprint('TF float64 1e7 time faster', run_times_means[0][-1]/run_times_means[1][-1])\nprint('TF float32 1e7 time faster', run_times_means[0][-1]/run_times_means1[1][-1])\n\nplt.show()\n","sub_path":"plot_results/compare_sp_fx.py","file_name":"compare_sp_fx.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"323242841","text":"import os\nimport collections\n\ndef make_dir(directory):\n \"\"\"sensible way to create directory\"\"\"\n try:\n os.makedirs(directory)\n except OSError:\n if os.path.isdir(directory):\n pass\n else:\n err_msg = \"failed to create directory {}\".format(directory)\n raise RuntimeError(err_msg)\n\n\ndef flatten(list_like):\n \"\"\"recursively flatten a nested list\"\"\"\n for i in list_like:\n if isinstance(i, collections.Iterable) and not isinstance(i, str):\n for sub in flatten(i):\n yield sub\n else:\n yield i\n\n\ndef prefix_filepaths(dataframe, name, location):\n \"\"\"\n prefix the filepaths in a loaddata dataframe so that the paths point to the\n image location after the images have been staged\n \"\"\"\n path_cols = [col for col in dataframe.columns if col.startswith(\"PathName\")]\n dataframe[path_cols] = dataframe[path_cols].applymap(\n lambda x: os.path.join(location, \"img_data\", name, x)\n )\n return dataframe\n\ndef any_nan_values(dataframe):\n \"\"\"Check if 'dataframe' contains any missing values\"\"\"\n return dataframe.isnull().any().any()\n","sub_path":"cptools2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"98372190","text":"import time\n\nimport spidev as SPI\n\nimport SSD1306\n\nfrom PIL import Image \nfrom PIL import ImageDraw\nfrom PIL import ImageFont \n\n\n\ndef get_time_info():\n \n now = time.localtime()\n year = now.tm_year\n mon = now.tm_mon\n day = now.tm_mday\n hour = now.tm_hour\n min = now.tm_min\n sec = now.tm_sec\n \n time_info = ['YYMMDD','HHMMSS','CNTDOWNDH', 'CNTDOWNMS']\n time_info[0] = '%2.2i:%2.2i:%2.2i'%(year,mon,day)\n time_info[1] = '%2.2i:%2.2i:%2.2i'%(hour,min,sec)\n \n remain_days = 30 + 3 - day\n remain_hours= 23 - hour\n remain_mins = 59 - min\n remain_secs = 60 - sec\n \n time_info[2] = '%2.2iD:%2.2iH'%(remain_days, remain_hours)\n time_info[3] = '%2.2iM:%2.2iS'%(remain_mins, remain_secs)\n return time_info\n\n\n\nRST = 19\nDC = 16\nbus = 0\ndevice = 0\ndisp=SSD1306.SSD1306(rst=RST,dc=DC,spi=SPI.SpiDev(bus,device))\n\n\nimage= Image.new('1',(128,64))\ndraw = ImageDraw.Draw(image)\nfont = ImageFont.load_default()\n\nlogo=Image.open('pku_logo.png').convert('1')\nlogo=logo.resize((64,64))\n\ndisp.begin()\ndisp.clear()\n\n\ntry:\n while True:\n disp.clear()\n \n draw.rectangle((0,0,127,63),outline=1,fill=0)\n\n draw.bitmap((0,0),logo,fill=1)\n\n time_info = get_time_info() \n\n draw.text((65,2), time_info[0],font=font,fill=255)\n draw.text((75,16),time_info[1],font=font,fill=255)\n \n draw.text((65,26),'Count Down',font=font,fill=255)\n draw.text((75,38),time_info[2],font=font,fill=255)\n draw.text((75,50),time_info[3],font=font,fill=255)\n\n disp.image(image)\n disp.display()\n time.sleep(0.1)\n \nexcept KeyboardInterrupt:\n pass\n\n\n\n","sub_path":"lab_4/pku_logo.py","file_name":"pku_logo.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"267470706","text":"\n\nimport logging\nimport colorama as clr\n\nimport os.path\nimport sys\nimport time\nimport traceback\n# Pylint can't figure out what's in the record library for some reason\n#pylint: disable-msg=E1101\n\ncolours = [clr.Fore.BLUE, clr.Fore.RED, clr.Fore.GREEN, clr.Fore.YELLOW, clr.Fore.MAGENTA, clr.Fore.CYAN, clr.Back.YELLOW + clr.Fore.BLACK, clr.Back.YELLOW + clr.Fore.BLUE, clr.Fore.WHITE]\n\ndef getColor(idx):\n\treturn colours[idx%len(colours)]\n\n\nclass DatabaseHandler(logging.Handler):\n\n\n\tdef __init__(self, level=logging.DEBUG):\n\t\tlogging.Handler.__init__(self, level)\n\n\t\timport settings\n\t\timport psycopg2\n\n\t\ttry:\n\t\t\tself.conn = psycopg2.connect(dbname=settings.DATABASE_DB_NAME, user=settings.DATABASE_USER,password=settings.DATABASE_PASS)\n\t\texcept psycopg2.OperationalError:\n\t\t\tself.conn = psycopg2.connect(host=settings.DATABASE_IP, dbname=settings.DATABASE_DB_NAME, user=settings.DATABASE_USER,password=settings.DATABASE_PASS)\n\n\t\tself.checkInitDb()\n\n\tdef checkInitDb(self):\n\t\twith self.conn.cursor() as cur:\n\n\t\t\tcur.execute('''CREATE TABLE IF NOT EXISTS logTable (\n\t\t\t\t\t\t\t\t\t\t\t\tdbid SERIAL PRIMARY KEY,\n\t\t\t\t\t\t\t\t\t\t\t\ttime DOUBLE PRECISION NOT NULL,\n\t\t\t\t\t\t\t\t\t\t\t\tsource TEXT NOT NULL,\n\t\t\t\t\t\t\t\t\t\t\t\tlevel INTEGER,\n\t\t\t\t\t\t\t\t\t\t\t\tcontent TEXT);''')\n\n\n\t\t\tcur.execute(\"SELECT relname FROM pg_class;\")\n\t\t\thaveIndexes = cur.fetchall()\n\t\t\thaveIndexes = [index[0] for index in haveIndexes]\n\n\n\n\t\t\tindexes = [\n\t\t\t\t# (\"logTable_dbid_index\", '''CREATE INDEX logTable ON logTable (dbid );''' ), # Primary key gets an index automatically\n\t\t\t\t(\"logTable_time_index\", '''CREATE INDEX logTable_time_index ON logTable (time );''' ),\n\t\t\t\t(\"logTable_source_index\", '''CREATE INDEX logTable_source_index ON logTable (source );''' ),\n\t\t\t\t(\"logTable_istext_index\", '''CREATE INDEX logTable_istext_index ON logTable (level );''' ),\n\t\t\t\t(\"logTable_title_coll_index\", '''CREATE INDEX logTable_title_coll_index ON logTable USING BTREE (source COLLATE \"en_US\" text_pattern_ops);''' )\n\t\t\t]\n\n\t\t\tfor name, createCall in indexes:\n\t\t\t\tif not name.lower() in haveIndexes:\n\t\t\t\t\tcur.execute(createCall)\n\n\n\n\n\tdef emit(self, record):\n\n\t\tname = record.name\n\t\tlogTime = record.created\n\t\tlevel = record.levelno\n\t\tmsg = record.getMessage()\n\t\tvalues = (name, logTime, level, msg)\n\n\t\twith self.conn.cursor() as cur:\n\t\t\tcur.execute(\"BEGIN;\")\n\t\t\tcur.execute(\"INSERT INTO logTable (source, time, level, content) VALUES (%s, %s, %s, %s);\", values)\n\t\t\tcur.execute(\"COMMIT;\")\n\n\n\n\n\nclass ColourHandler(logging.Handler):\n\n\tdef __init__(self, level=logging.DEBUG):\n\t\tlogging.Handler.__init__(self, level)\n\t\tself.formatter = logging.Formatter('\\r%(name)s%(padding)s - %(style)s%(levelname)s - %(message)s'+clr.Style.RESET_ALL)\n\t\tclr.init()\n\n\t\tself.logPaths = {}\n\n\tdef emit(self, record):\n\n\t\t# print record.levelname\n\t\t# print record.name\n\n\t\tsegments = record.name.split(\".\")\n\t\tif segments[0] == \"Main\" and len(segments) > 1:\n\t\t\tsegments.pop(0)\n\t\t\tsegments[0] = \"Main.\"+segments[0]\n\n\t\tnameList = []\n\n\t\tfor indice, pathSegment in enumerate(segments):\n\t\t\tif not indice in self.logPaths:\n\t\t\t\tself.logPaths[indice] = [pathSegment]\n\t\t\telif not pathSegment in self.logPaths[indice]:\n\t\t\t\tself.logPaths[indice].append(pathSegment)\n\n\t\t\tname = clr.Style.RESET_ALL\n\t\t\tname += getColor(self.logPaths[indice].index(pathSegment))\n\t\t\tname += pathSegment\n\t\t\tname += clr.Style.RESET_ALL\n\t\t\tnameList.append(name)\n\n\n\t\trecord.name = \".\".join(nameList)\n\n\t\tif record.levelname == \"DEBUG\":\n\t\t\trecord.style = clr.Style.DIM\n\t\telif record.levelname == \"WARNING\":\n\t\t\trecord.style = clr.Style.BRIGHT\n\t\telif record.levelname == \"ERROR\":\n\t\t\trecord.style = clr.Style.BRIGHT+clr.Fore.RED\n\t\telif record.levelname == \"CRITICAL\":\n\t\t\trecord.style = clr.Style.BRIGHT+clr.Back.BLUE+clr.Fore.RED\n\t\telse:\n\t\t\trecord.style = clr.Style.NORMAL\n\n\t\trecord.padding = \"\"\n\t\tprint((self.format(record)))\n\nclass RobustFileHandler(logging.FileHandler):\n\t\"\"\"\n\tA handler class which writes formatted logging records to disk files.\n\t\"\"\"\n\n\tdef emit(self, record):\n\t\t\"\"\"\n\t\tEmit a record.\n\n\t\tIf the stream was not opened because 'delay' was specified in the\n\t\tconstructor, open it before calling the superclass's emit.\n\t\t\"\"\"\n\t\tfailures = 0\n\t\twhile self.stream is None:\n\t\t\ttry:\n\t\t\t\tself.stream = self._open()\n\t\t\texcept:\n\n\t\t\t\ttime.sleep(1)\n\t\t\t\tif failures > 3:\n\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\tprint(\"Cannot open log file?\")\n\t\t\t\t\treturn\n\t\t\t\tfailures += 1\n\t\tfailures = 0\n\t\twhile failures < 3:\n\t\t\ttry:\n\t\t\t\tlogging.StreamHandler.emit(self, record)\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tfailures += 1\n\t\telse:\n\t\t\ttraceback.print_stack()\n\t\t\tprint(\"Error writing to file?\")\n\n\n\t\tself.close()\n\n\ndef exceptHook(exc_type, exc_value, exc_traceback):\n\tif issubclass(exc_type, KeyboardInterrupt):\n\t\tsys.__excepthook__(exc_type, exc_value, exc_traceback)\n\t\treturn\n\tmainLogger = logging.getLogger(\"Main\")\t\t\t# Main logger\n\tmainLogger.critical('Uncaught exception!')\n\tmainLogger.critical(\"Uncaught exception\", exc_info=(exc_type, exc_value, exc_traceback))\n\n# Global hackyness to detect and warn on double-initialization of the logging systems.\nLOGGING_INITIALIZED = False\n\ndef initLogging(logLevel=logging.INFO, logToDb=True):\n\n\tglobal LOGGING_INITIALIZED\n\tif LOGGING_INITIALIZED:\n\t\tcurrent_stack = traceback.format_stack()\n\t\tprint(\"ERROR - Logging initialized twice!\")\n\t\tfor line in current_stack:\n\t\t\tprint(line.rstrip())\n\t\treturn\n\n\tLOGGING_INITIALIZED = True\n\n\tprint(\"Setting up loggers....\")\n\n\tif not os.path.exists(os.path.join(\"./logs\")):\n\t\tos.mkdir(os.path.join(\"./logs\"))\n\n\tmainLogger = logging.getLogger(\"Main\")\t\t\t# Main logger\n\tmainLogger.setLevel(logLevel)\n\n\t# Do not propigate up to any parent loggers other things install\n\tmainLogger.propagate = False\n\n\t# You have to add the dbLogger first, because the colorHandler logger\n\t# modifies the internal values of the record.name attribute,\n\t# and if the dbLogger is added after it, the modified values\n\t# are also sent to the db logger.\n\tif logToDb:\n\t\ttry:\n\t\t\tdbLog = DatabaseHandler()\n\t\t\tmainLogger.addHandler(dbLog)\n\t\texcept:\n\t\t\tprint(\"Warning! Failed to instantiate database logging interface!\")\n\t\t\ttraceback.print_exc()\n\n\n\tch = ColourHandler()\n\tmainLogger.addHandler(ch)\n\n\tlogName\t= \"Error - %s.txt\" % (time.strftime(\"%Y-%m-%d %H;%M;%S\", time.gmtime()))\n\n\terrLogHandler = RobustFileHandler(os.path.join(\"./logs\", logName))\n\terrLogHandler.setLevel(logging.WARNING)\n\tformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\terrLogHandler.setFormatter(formatter)\n\n\tmainLogger.addHandler(errLogHandler)\n\n\t# Install override for excepthook, to catch all errors\n\tsys.excepthook = exceptHook\n\n\tprint(\"done\")\n\n\nif __name__ == \"__main__\":\n\tinitLogging(logToDb=True)\n\tlog = logging.getLogger(\"Main.Test\")\n\tlog.debug(\"Testing logging - level: debug\")\n\tlog.info(\"Testing logging - level: info\")\n\tlog.warn(\"Testing logging - level: warn\")\n\tlog.error(\"Testing logging - level: error\")\n\tlog.critical(\"Testing logging - level: critical\")\n","sub_path":"MangaCMSOld/lib/logSetup.py","file_name":"logSetup.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"118906701","text":"\nfrom maya import cmds\n\nclass TypedSerializableDict(dict):\n '''\n serializes the dictionary to the given node/attribute name pair whenever keys are changed.\n\n You may optionally provide a type dictionary so that key values have a particular type.\n\n NOTE: this is a simple extension of python's built in dict.\n Ie: issubclass(TypedSerializableDict, dict) is True\n '''\n\n def __init__(self, node, attrname, typeDict=None):\n serializedValue = cmds.getAttr('%s.%s' % (node, attrname))\n initDict = {}\n if serializedValue:\n # see the notes in the serialize method for the rationalization of the use of eval here\n initDict = eval(serializedValue)\n\n super(TypedSerializableDict, self).__init__(initDict)\n\n if typeDict is None:\n typeDict = {}\n\n self._node = node\n self._attrname = attrname\n self._typeDict = typeDict\n\n def _coerceValue(self, attr, value):\n\n if attr not in self._typeDict:\n return value\n\n valueType = self._typeDict[attr]\n\n # make sure the value is of the right type\n if type(value) is not valueType:\n value = valueType(value)\n\n return value\n\n def __getitem__(self, attr):\n value = super(TypedSerializableDict, self).__getitem__(attr)\n\n return self._coerceValue(attr, value)\n\n def __setitem__(self, attr, value):\n initValue = self.get(attr)\n if value is None:\n self.pop(attr)\n else:\n value = self._coerceValue(attr, value)\n super(TypedSerializableDict, self).__setitem__(attr, value)\n\n # if the value hasn't changed, don't serialize\n if initValue != value:\n self.serialize()\n\n def __delitem__(self, attr):\n super(TypedSerializableDict, self).__delitem__(attr)\n self.serialize()\n\n def update(self, *a, **kw):\n super(TypedSerializableDict, self).update(*a, **kw)\n self.serialize()\n\n def setdefault(self, key, value):\n if key not in self:\n self[key] = value\n\n def setdefaults(self, otherDict):\n serialize = False\n\n # there is a touch of code repetition here, but it is simply so serialize only gets called\n # at most once for the call - otherwise it would get called up to len(otherDict.keys) times\n for key, value in otherDict.iteritems():\n if key not in self:\n super(TypedSerializableDict, self).__setitem__(key, value)\n serialize = True\n\n if serialize:\n self.serialize()\n\n def serialize(self):\n # we're using super simple serialization here so that complex data types don't get serialized\n # and also so that debugging via the attribute editor is possible (ie human readable\n # serialization values)\n cmds.setAttr('%s.%s' % (self._node, self._attrname), repr(self), type='string')\n\n#end","sub_path":"CONFIG_v2.5/mayaConfig/modules_local/UTSMOD/2016/mac/quarantined_scripts/zoo/zmaya/serialization.py","file_name":"serialization.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"530424923","text":"import torch\nfrom all.nn import ListNetwork\nfrom .stochastic import StochasticPolicy\n\n\nclass GaussianPolicy(StochasticPolicy):\n def __init__(\n self,\n model,\n optimizer,\n action_dim,\n **kwargs\n ):\n model = ListNetwork(model)\n optimizer = optimizer\n\n def distribution(outputs):\n means = outputs[:, 0:action_dim]\n logvars = outputs[:, action_dim:]\n std = logvars.mul(0.5).exp_()\n return torch.distributions.normal.Normal(means, std)\n\n super().__init__(\n model,\n optimizer,\n distribution,\n **kwargs\n )\n","sub_path":"all/policies/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"201365616","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n############################################################################\n# Import statements\n############################################################################\n\nfrom Gauss.Configuration import *\n\nfrom Gaudi.Configuration import *\n\nfrom Configurables import Generation\n\nfrom Configurables import Special\n\nfrom Configurables import FixedNInteractions\n\nfrom Configurables import ReadLHEfileProduction\n\nfrom Configurables import Gauss, GiGa, GiGaPhysListModular, GiGaPhysConstructorMonopole\n\nfrom Configurables import LHCb__ParticlePropertySvc\n\nfrom Configurables import GaudiSequencer, MonopoleTupleAlg, GenTupleAlg\n\nfrom Configurables import GiGaPhysConstructorOp, GiGaPhysConstructorHpd\n\nfrom Configurables import LHCbApp\n\nfrom Configurables import CondDB, CondDBAccessSvc, GiGaInputStream\n\nfrom Configurables import GetMMTHitsAlg, GetNTDHitsAlg\n\nfrom GaudiKernel import SystemOfUnits\n\nfrom Configurables import LHCb__ParticlePropertySvc\n\nfrom Configurables import MonopoleTupleAlg, GenTupleAlg\n\n\n##############################################################################\n\n#-----------------------------------------------------------------------------\n# Generator phase\n#-----------------------------------------------------------------------------\nGaussGen = GenInit(\"GaussGen\")\n#\n# Set the random numbers - these fix the random seed.\n#\nGaussGen.FirstEventNumber = FIRST_EVENT_NUMBER\nGaussGen.RunNumber = RUN_NUMBER\n\n# The output is managed below, so we disable the standard Gauss output.\nGauss().OutputType = 'NONE'\nGauss().Histograms = 'NONE'\n\n# Switch off pileup (recommended!).\nGeneration().addTool(FixedNInteractions, name=\"FixedNInteractions\")\nGeneration().FixedNInteractions.NInteractions = 0\nGeneration().PileUpTool = \"FixedNInteractions\"\n\n# Define \"special\" production.\nGeneration().addTool(Special,\"Special\")\nGeneration().SampleGenerationTool = \"Special\"\n\n# No cuts.\nGeneration().Special.CutTool = \"\"\n\n# Define the production tool\nGeneration().Special.addTool(ReadLHEfileProduction, name=\"ReadLHEfileProduction\")\nGeneration().Special.ProductionTool = \"ReadLHEfileProduction\"\n\n## Define the input file in LHE xml format.\nGeneration().Special.ReadLHEfileProduction.InputFile = \"LHE_FILE_BASENAME\"\n\n##############################################################################\n# Monopole physics\n##############################################################################\n\n# Set add monopole physics constructor from GaussMonopoles to the GiGa\n# physics list.\ngiga = GiGa()\ngiga.addTool( GiGaPhysListModular(\"ModularPL\") , name=\"ModularPL\" )\ngiga.ModularPL.addTool( GiGaPhysConstructorMonopole, name = \"GiGaPhysConstructorMonopole\" )\n\n\n############################################################################\n## Add the Ntuple writer to the Simulation Monitor\n\n## Kind of a hack, but works\nmonopoleTupleAlg = MonopoleTupleAlg()\nGaudiSequencer(\"SimMonitor\").Members+= [ monopoleTupleAlg ]\ngenTupleAlg = GenTupleAlg()\nGaudiSequencer(\"GenMonitor\").Members+= [ genTupleAlg ]\n\n############################################################################\n## Switch off RICH physics (leave geometry)\ngiga.ModularPL.addTool( GiGaPhysConstructorOp, name = \"GiGaPhysConstructorOp\" )\ngiga.ModularPL.addTool( GiGaPhysConstructorHpd, name = \"GiGaPhysConstructorHpd\" )\ngiga.ModularPL.GiGaPhysConstructorOp.RichOpticalPhysicsProcessActivate = False\ngiga.ModularPL.GiGaPhysConstructorHpd.RichHpdPhysicsProcessActivate = False\n\n## Note that the options and the tags will be used directly from ${GAUSSOPTS}\n# Pick beam conditions as set in AppConfig. \n#importOptions(\"$APPCONFIGOPTS/Gauss/Beam6500GeV-md100-nu1.6.py\")\n#importOptions(\"$APPCONFIGOPTS/Gauss/DataType-2015.py\")\n# Set the database tags using those for Sim08.\n#LHCbApp().DDDBtag = \"dddb-20140729\"\n#LHCbApp().CondDBtag = \"sim-20140730-vc-md100\"\n\n############################################################################\n# Database options\n############################################################################\n\n# Add sqlite database to CondDB and turn on MoEDAL geometry\n\n############################################################################\n## Add geometry data\n## Two options:\n## Overwrite entire DDDB with file contents\n#cdb = CondDB()\n#cbd.PartitionConnectionString[\"DDDB\"] = \"sqlite_file:$HOME/LHCb_software/mkingtest.db/DDDB\"\n#cdb.Tags[DDDB] = \"DC06\"\n\n## Add file contents as layer (should overwrite existing entries in same location)\nCondDB(). addLayer(\n CondDBAccessSvc(\"MoEDAL_DDDB\",\n ConnectionString = \"sqlite_file:GEOMETRY_DB_FILE_LOCATION/GEOMETRY_DB_FILENAME/DDDB\",\n DefaultTAG = \"HEAD\"))\n\n############################################################################\n## Switch on geometry for MoEDAL detectors\ngeo = GiGaInputStream('Geo')\n\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/VacTankCoverPipes\" ]\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/VacTankCoverHead\" ]\n\n## When using tags above or equal to 3.0.0, please activate the VacTankTopFlanges\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/VacTankTopFlanges\" ]\n\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/DetectorVacuum\" ]\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/VacuumPump\" ]\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/VeloDustCover\" ]\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/ExtraMaterial\" ]\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/VacuumManifolds\" ]\n\n## When using tags above or equal 3.0.0, please activate : DetectorVacuumHood, SideElectronicCrates, RepeaterBoards, and VeloCables\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/DetectorVacuumHood\" ]\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/SideElectronicCrates\" ]\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/RepeaterBoards\" ]\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/VeloCables\" ]\n\n## !! Attention !! : When using tag 3.0.0 (Run 2), please activate MMT1, MMT2, MMT3, and NTD2015 and deactivate : MMT2014, and NTDRun1 !!\n#geo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/MMT1\" ]\n#geo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/MMT2\" ]\n#geo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/MMT3\" ]\n#geo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/NTD2015\" ]\n\n## !! Attention !! : When using tag 3.1.0 (Run 1), please activate MMT2014, and NTDRun1 and deactivate : MMT1, MMT2, MMT3, and NTD2015 !!\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/MMT2014\" ]\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/NTDRun1\" ]\n\ngeo.StreamItems += [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/HCC2015\" ]\n\n############################################################################\n## Activate MMT, HCC, and NTD sensitive detectors\nfrom Configurables import GetMMTHitsAlg, GetNTDHitsAlg\n\n## !! Attention !! : When using tag 3.0.0 (Run 2), please activate MMT1, MMT2, MMT3 and deactivate : MMT2014 !!\n#getMMTHits = GetMMTHitsAlg(\"GetMMTHits\")\n#getMMTHits.CollectionName = \"MMT/Hits\"\n#getMMTHits.MCHitsLocation = \"/Event/MC/MMT/Hits\"\n#getMMTHits.Detectors = [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/MMT1\" ]\n#GaudiSequencer(\"DetectorsHits\").Members += [ getMMTHits ]\n\n#getMMTHits = GetMMTHitsAlg(\"GetMMTHits\")\n#getMMTHits.CollectionName = \"MMT/Hits\"\n#getMMTHits.MCHitsLocation = \"/Event/MC/MMT/Hits\"\n#getMMTHits.Detectors = [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/MMT2\" ]\n#GaudiSequencer(\"DetectorsHits\").Members += [ getMMTHits ]\n\n#getMMTHits = GetMMTHitsAlg(\"GetMMTHits\")\n#getMMTHits.CollectionName = \"MMT/Hits\"\n#getMMTHits.MCHitsLocation = \"/Event/MC/MMT/Hits\"\n#getMMTHits.Detectors = [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/MMT3\" ]\n#GaudiSequencer(\"DetectorsHits\").Members += [ getMMTHits ]\n\n## !! Attention !! : When using tag 3.1.0 (Run 1), please activate MMT2014 and deactivate : MMT1, MMT2, MMT3 !!\ngetMMTHits = GetMMTHitsAlg(\"GetMMTHits\")\ngetMMTHits.CollectionName = \"MMT/Hits\"\ngetMMTHits.MCHitsLocation = \"/Event/MC/MMT/Hits\"\ngetMMTHits.Detectors = [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/MMT2014\" ]\nGaudiSequencer(\"DetectorsHits\").Members += [ getMMTHits ]\n\n## !! Attention !! : If using tag 3.0.0 (Run 2), please activate NTD2015 and deactivate : NTDRun1 !!\n## !! Attention !! : If using tag 3.1.0 (Run 1), please activate NTDRun1 and deactivate : NTD2015 !!\ngetNTDHits = GetNTDHitsAlg(\"GetNTDHits\")\ngetNTDHits.CollectionName = \"NTD/Hits\"\ngetNTDHits.MCHitsLocation = \"/Event/MC/NTD/Hits\"\n#getNTDHits.Detectors = [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/NTD2015\" ]\ngetNTDHits.Detectors = [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/NTDRun1\" ]\nGaudiSequencer(\"DetectorsHits\").Members += [ getNTDHits ]\n\n## Disable NTD hit retrieval when NTDs are disabled in geometry\ngetNTDHits = GetNTDHitsAlg(\"GetNTDHits\")\ngetNTDHits.CollectionName = \"NTD/Hits\"\ngetNTDHits.MCHitsLocation = \"/Event/MC/NTD/Hits\"\ngetNTDHits.Detectors = [ \"/dd/Structure/LHCb/BeforeMagnetRegion/MoEDAL/HCC2015\" ]\nGaudiSequencer(\"DetectorsHits\").Members += [ getNTDHits ]\n\n##############################################################################\n# Define the MoEDAL option variables\n##############################################################################\n\n# Define the monopole properties.\n\n## The magnetic monopole PDG ID.\nmonopole_pdg = 4110000 #shouldn't coincide with other particles\n\n## The magnetic monopole mass [GeV].\nmonopole_mass = MONOPOLE_MASS_GEV # [GeV]\n\n## The magnetic monopole electric charge [e].\nmonopole_elcharge = MONOPOLE_ELECTRIC_CHARGE # [e]\n\n## The magnetic monopole magnetic charge [g_D].\nmonopole_magcharge = MONOPOLE_MAGNETIC_CHARGE # [g_D]\n\n# Define the monopole behavior.\n\n## Use fields?\nmonopole_usefields = True\n\n## The minimum beta for the magnetic monopoles.\nmonopole_minbeta = 1.0e-3\n\n## nint/step: q = 0, 1, 2, 3, 4, 5, 6\n\nnintpersteparray = [ 1, 10, 20, 40, 80, 160, 320]\n\nnintperstep = nintpersteparray[ int(monopole_magcharge) ]\n\n\n##############################################################################\n# Gauss controls\n##############################################################################\n\n## The number of events to process.\nLHCbApp().EvtMax = NUMBER_OF_EVENTS\n\n# Define input file in LHE xml format\n# This should have been specified in cfg_lhe.py file.\n#Generation().Special.ReadLHEfileProduction.InputFile = \"events.lhe\"\n\n##############################################################################\n# Output files\n##############################################################################\n\n## The output filename.\nMonopoleTupleAlg().OutputNtupleFilename = \"MONOPOLE_DATA_ROOT\"\n\n## Reading the NTD hits\nMonopoleTupleAlg().ReadNTDHits = True\n\n## The generator information output filename.\nGenTupleAlg().OutputNtupleFilename = \"GEN_DATA_ROOT\"\n\n\n##############################################################################\n# Apply variables to MoEDAL options\n##############################################################################\n\nGiGa().ModularPL.GiGaPhysConstructorMonopole.PdgId = monopole_pdg\nGiGa().ModularPL.GiGaPhysConstructorMonopole.Mass = monopole_mass\nGiGa().ModularPL.GiGaPhysConstructorMonopole.ElCharge = monopole_elcharge\nGiGa().ModularPL.GiGaPhysConstructorMonopole.MagCharge = monopole_magcharge\n\nGiGa().ModularPL.GiGaPhysConstructorMonopole.UseFields = monopole_usefields\nGiGa().ModularPL.GiGaPhysConstructorMonopole.MinBeta = monopole_minbeta\nGiGa().ModularPL.GiGaPhysConstructorMonopole.NumberOfInteractionsPerStep = nintperstep\n\n\n############################################################################\n## Add monopole information to Ntuple writer\n## (should retrieve particle information from ParticlePropertySvc, but no magnetic charge field available)\nMonopoleTupleAlg().MonopolePdgs = [ monopole_pdg, -monopole_pdg ]\nMonopoleTupleAlg().MonopoleQmags = [ monopole_magcharge, -monopole_magcharge ]\nGenTupleAlg().MonopolePdgs = [ monopole_pdg, -monopole_pdg ]\nGenTupleAlg().MonopoleQmags = [ monopole_magcharge, -monopole_magcharge ]\n\n\n##############################################################################\n# Patch ParticlePropertySvc to include the monopole details.\n# It's not yet entirely clear what uses this and what uses the (i.e. mass)\n# definitions in GiGaPhysConstructorMonopole.\n# - Either the simulation or generation phases in Gauss definitely use these\n# values.\nParticlePropertyFile = open(\"ParticlePropertySvc_Monopole.txt\", 'w')\nParticlePropertyFile.write(\"\\# ParticlePropertySvc file automatically generated by MoEDAL_options.py\\n\")\nParticlePropertyFile.write(\"PARTICLE\")\n\nParticlePropertyFile.write('\\n')\nParticlePropertyFile.write('magnetic_monopole') # PARTICLE NAME\nParticlePropertyFile.write('\\t' + str(monopole_pdg)) # GEANTID\nParticlePropertyFile.write('\\t' + str(monopole_pdg)) # PDGID\nParticlePropertyFile.write('\\t' + str(monopole_elcharge)) # CHARGE\nParticlePropertyFile.write('\\t' + str(monopole_mass)) # MASS(GeV)\nParticlePropertyFile.write('\\t-1') # TLIFE(s)\nParticlePropertyFile.write('\\tmagnetic_monopole') # EVTGENNAME\nParticlePropertyFile.write('\\t' + str(monopole_pdg)) # PYTHIAID\nParticlePropertyFile.write('\\t0.00000000') # MAXWIDTH\n\nParticlePropertyFile.write('\\n')\nParticlePropertyFile.write('antimagnetic_monopole') # PARTICLE NAME\nParticlePropertyFile.write('\\t' + str(-monopole_pdg)) # GEANTID\nParticlePropertyFile.write('\\t' + str(-monopole_pdg)) # PDGID\nParticlePropertyFile.write('\\t' + str(-monopole_elcharge)) # CHARGE\nParticlePropertyFile.write('\\t' + str(monopole_mass)) # MASS(GeV)\nParticlePropertyFile.write('\\t-1') # TLIFE(s)\nParticlePropertyFile.write('\\tantimagnetic_monopole') # EVTGENNAME\nParticlePropertyFile.write('\\t' + str(-monopole_pdg)) # PYTHIAID\nParticlePropertyFile.write('\\t0.00000000') # MAXWIDTH\n\nLHCb__ParticlePropertySvc().OtherFiles = [ \"ParticlePropertySvc_Monopole.txt\" ]\n","sub_path":"configuration_LHE_TEMPLATE.py","file_name":"configuration_LHE_TEMPLATE.py","file_ext":"py","file_size_in_byte":14325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"151151505","text":"import numpy as np\nfrom numpy.linalg import norm\nimport matplotlib.pyplot as plt\nimport os\n\ndef plotRoom2( obblist, labellist, scheme, output_folder, scenename, roomname ):\n\n # scheme == 0: show objects with different color\n # scheme == 1: show areas with different color\n linewidth = 2\n\n corlist = np.array([ 'r', 'g', 'c', 'b', 'm'])\n\n floor_ind = [idx for idx, label in enumerate(labellist) if label == 'floor']\n ceiling_ind = [idx for idx, label in enumerate(labellist) if label == 'ceiling']\n wall_ind = [idx for idx, label in enumerate(labellist) if label == 'wall']\n virtual_wall_ind = [idx for idx, label in enumerate(labellist) if label == 'virtual_wall']\n # if len(wall_ind) <= 6:\n # return\n com_ind = np.union1d(floor_ind, ceiling_ind)\n com_ind = np.union1d(com_ind, wall_ind)\n com_ind = np.union1d(com_ind, virtual_wall_ind)\n # idx = 1:len(p_data.labellist)\n # idx = setdiff(idx, com_ind)\n # labellist = labellist\n # obblist = obblist\n\n front = [0,0,1]\n up = [0,1,0]\n axes = [1,0,0]\n\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n for i in range(len(labellist)):\n # if labellist[i] == 'virtual_wall' or labellist[i] == 'floor':\n # continue\n if labellist[i] == 'floor':\n continue\n p = obblist[:,i]\n\n center = p[0:3]\n dir_1 = p[3:6]\n dir_2 = p[6:9]\n lens = p[9:12]\n\n dir_1 = dir_1 / norm(dir_1)\n dir_2 = dir_2 / norm(dir_2)\n dir_3 = np.cross(dir_1,dir_2)\n dir_3 = dir_3 / norm(dir_3)\n # dir = lens(1) * dir_1 + lens(2) * dir_2 + lens(3) * dir_3\n\n d1 = 0.5 * lens[0] * dir_1\n d2 = 0.5 * lens[1] * dir_2\n d3 = 0.5 * lens[2] * dir_3\n # d1 = d1 * front(:)\n # d2 = d2 * up(:)\n # d3 = d3 * axes(:)\n cornerpoints = np.full((4, 3), np.nan)\n cornerpoints[0] = center - d1 - d2 - d3\n cornerpoints[1] = center + d1 - d2 - d3\n cornerpoints[2] = center - d1 - d2 + d3\n cornerpoints[3] = center + d1 - d2 + d3\n #cornerpoints(:,1) = 1 - cornerpoints(:,1)\n\n idx = np.where(com_ind == i)[0]\n if len(idx) == 0:\n ### p_data不知道在哪\n # if (scheme == 1) and ('area_id' in p_data):\n # aid = p_data.area_id[i]\n # if aid == 0:\n # cor = [0,0,0,0.3]\n # else:\n # cor = corlist[ (aid - 1) % len(corlist) ]\n # else\n cor = corlist[ i % len(corlist) ]\n\n ax.plot([cornerpoints[0,0], cornerpoints[1,0]], [cornerpoints[0,2], cornerpoints[1,2]], color = cor)\n ax.plot([cornerpoints[1,0], cornerpoints[3,0]], [cornerpoints[1,2], cornerpoints[3,2]], color = cor)\n ax.plot([cornerpoints[3,0], cornerpoints[2,0]], [cornerpoints[3,2], cornerpoints[2,2]], color = cor)\n ax.plot([cornerpoints[2,0], cornerpoints[0,0]], [cornerpoints[2,2], cornerpoints[0,2]], color = cor)\n label = labellist[i]\n label = label.replace('_', ' ')\n\n min_x = min(cornerpoints[:,0])\n max_z = max(cornerpoints[:,2])\n ax.text(min_x + 0.02, max_z - 0.1, label + str(i+1), fontsize = 8) ### 0.01 0.02\n ax.set_aspect('equal', 'box')\n # t.BackgroundColor = cor\n else:\n ax.plot([cornerpoints[0,0], cornerpoints[1,0]], [cornerpoints[0,2], cornerpoints[1,2]], color = 'k')\n ax.plot([cornerpoints[1,0], cornerpoints[3,0]], [cornerpoints[1,2], cornerpoints[3,2]], color = 'k')\n ax.plot([cornerpoints[3,0], cornerpoints[2,0]], [cornerpoints[3,2], cornerpoints[2,2]], color = 'k')\n ax.plot([cornerpoints[2,0], cornerpoints[0,0]], [cornerpoints[2,2], cornerpoints[0,2]], color = 'k')\n label = labellist[i]\n label = label.replace('_', ' ')\n\n x = np.average(cornerpoints[:,0])\n z = np.average(cornerpoints[:,2])\n ax.text(x, z + 0.3 , label + str(i+1), fontsize = 8)\n ax.set_aspect('equal', 'box')\n filename = roomname + '.jpg'\n plt.savefig(f\"{output_folder}/{scenename}/{filename}\", dpi = 400, bbox_inches = 'tight')\n # plt.show()\n # plt.close()\n # axis equal\n","sub_path":"vistools/plotRoom2.py","file_name":"plotRoom2.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"419347053","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('registration', '0013_auto_20160609_0905'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='student',\n name='aums_id',\n field=models.CharField(max_length=32, unique=True, serialize=False, verbose_name='Aums ID', primary_key=True),\n ),\n migrations.AlterField(\n model_name='student',\n name='branch',\n field=models.CharField(blank=True, max_length=32, null=True, verbose_name='Branch', validators=[django.core.validators.RegexValidator(regex=b'^[A-Za-z]*$')]),\n ),\n migrations.AlterField(\n model_name='student',\n name='curr_course',\n field=models.CharField(blank=True, max_length=32, null=True, verbose_name='Current Course', validators=[django.core.validators.RegexValidator(regex=b'^[A-Za-z]*$')]),\n ),\n migrations.AlterField(\n model_name='student',\n name='name',\n field=models.CharField(max_length=32, null=True, verbose_name='First Name', blank=True),\n ),\n ]\n","sub_path":"registration/migrations/0014_auto_20160609_0922.py","file_name":"0014_auto_20160609_0922.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"356291871","text":"#coding=utf8\n\nif __name__ == '__main__':\n\timport os\n\timport numpy as np\n\timport matplotlib.pyplot as plt\n\n\t# Plot length of dialogues\n\tdial_lens = []\n\tlabel_rate = 0.9\n\tfor filename in os.listdir(\"./\"):\n\t\tif filename.find(\"generated_dial_examples\") == -1:\n\t\t\tcontinue\n\t\tfor line in open(filename):\n\t\t\tdial_lens.append((len(line.strip().split(\"\\t\"))-1)/2)\n\tn, bins, patches = plt.hist(dial_lens, 10, density=False, facecolor='g', alpha=0.75)\n\n\tlabel_x = 0\n\tfor i in range(0, len(n)):\n\t\tif sum(n[:i]) > len(dial_lens) * label_rate:\n\t\t\tlabel_x = bins[i]\n\t\t\tbreak\n\tplt.plot([label_x, label_x], [0, 1200], 'r--', linewidth=1)\n\tplt.xlabel('Fumber of dialogue turns')\n\tplt.ylabel('Percentage')\n\tplt.title('Histogram of dialogue turns (AMT)')\n\tplt.text(label_x+1, 900, r'90% dials are less than ' + str(label_x) + r' turns')\n\tplt.grid(True)\n\tplt.savefig(\"hist_dial_turns.png\")\n","sub_path":"GAN_monster/discriminator/testset/statistic_results.py","file_name":"statistic_results.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"455201277","text":"import pygame\nimport sys\nimport time\nimport random\nimport pickle\nimport numpy as np\nimport copy\nimport os\nGREY=(111,111,111)\nWHITE=(255,255,255)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nscreen_size = (800,800)\ngame_size=(8,8)\ncube_height=screen_size[0]/game_size[0]\ncube_width=screen_size[1]/game_size[1]\nchess_vec=[[],[]]\ncandidate_list=[]\npos_dic={}\nchess_count=[0,0]\npos_pos=[[],[]]\n'''\nq_dic={}\ndef load_q_data(file_name):\n\twith open(file_name,'r') as f:\n\t\tq_dic=pickle.load(f)\ndef save_q_data(file_name):\n\twith open(file_name,'w') as f:\n\t\tpickle.dump(q_dic,f)\ndef get_status_string():\n\tst=''\n\tfor i in range(game_size[0]):\n\t\tfor j in range(game_size[1]):\n\t\t\tst+=str(pos_dic[(i,j)].occupied)\n\treturn st\ndef q_learning(a,y,st1,st2,choice):\n\tif st2 not in q_dic.keys():\n\t\tq_dic[st2]={}\n\tif st1 not in q_dic.keys():\n\t\tq_dic[st1]={}\n\tif choice not in q_dic[st1].keys():\n\t\tq_dic[st1][choice]=0\n\tmaxi=0\n\tfor each in q_dic[st2].keys():\n\t\tvv=q_dic[st2][each]\n\t\tif vv>maxi:\n\t\t\tmaxi=vv\n\tq_dic[st1][choice]=(1-a)*q_dic[st1][choice]+a*maxi*y\n'''\nclass one_pos():\n\tdef __init__(self,pos):\n\t\tself.pos=pos\n\t\tself.occupied=0\n\t\tself.neighbors=[0]*8\n\tdef occupy(self,occupied):\n\t\t##st1=get_status_string()\n\t\tself.occupied=occupied\n\t\tchess_vec[occupied-1].append(self.pos)\n\t\tene_occupied=occupied%2+1\n\t\tchess_count[occupied-1]+=1\n\t\tif self in candidate_list:\n\t\t\tcandidate_list.remove(self)\n\t\tfor i in range(8):\n\t\t\tif self.neighbors[i]==0:\n\t\t\t\tcontinue\n\t\t\tif self.neighbors[i].occupied==0:\n\t\t\t\tif self.neighbors[i] not in candidate_list:\n\t\t\t\t\tcandidate_list.append(self.neighbors[i])\n\t\t\tif self.neighbors[i].occupied==ene_occupied:\n\t\t\t\tcurrent_node=self.neighbors[i]\n\t\t\t\twhile 1:\n\t\t\t\t\tcurrent_node=current_node.neighbors[i]\n\t\t\t\t\tif current_node==0:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif current_node.occupied==0:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif current_node.occupied==occupied:\n\t\t\t\t\t\twhile 1:\n\t\t\t\t\t\t\tcurrent_node=current_node.neighbors[(i+4)%8]\n\t\t\t\t\t\t\tif current_node.occupied==occupied:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tcurrent_node.occupied=occupied\n\t\t\t\t\t\t\tchess_vec[occupied-1].append(current_node.pos)\n\t\t\t\t\t\t\tchess_vec[ene_occupied-1].remove(current_node.pos)\n\t\t\t\t\t\t\tchess_count[occupied-1]+=1\n\t\t\t\t\t\t\tchess_count[ene_occupied-1]-=1\n\t\t\t\t\t\tbreak\n\t\t##st2=get_status_string()\n\t\t##a=0.9\n\t\t##y=0.9\n\t\t##q_learning(a,y,st1,st2,self.pos)\nAI_player_list=[]\nclass AI_player():\n\tdef __init__(self,para=[[],[]]):\n\t\tself.para=para\n\t\tif len(para[0])==0:\n\t\t\tfor i in range(4):\n\t\t\t\trand1=random.random()\n\t\t\t\trand2=random.random()\n\t\t\t\tself.para[0].append(rand1*(i+1)*2)\n\t\t\t\tself.para[1].append(rand2*(i+1)*2)\n\tdef merry_and_birth(self,another):\n\t\tchild_para=[[[],[]],[[],[]],[[],[]],[[],[]]]\n\t\ttup=(self.para,another.para)\n\t\tfor i in range(4):\n\t\t\tfor j in range(2):\n\t\t\t\tcomb=np.random.randint(0,2,4)\n\t\t\t\tfor k in range(len(comb)):\n\t\t\t\t\tchild_para[i][j].append(tup[comb[k]][j][k])\n\t\tdecide=random.random()\n\t\tif decide<0.02:\n\t\t\tc1=np.random.randint(0,4)\n\t\t\tc2=np.random.randint(0,2)\n\t\t\tc3=np.random.randint(0,4)\n\t\t\trand3=random.random()\n\t\t\tchild_para[c1][c2][c3]=rand3*(c3+1)\n\t\telif decide<0.12:\n\t\t\tc1=np.random.randint(0,4)\n\t\t\tc2=np.random.randint(0,2)\n\t\t\tc3=np.random.randint(0,4)\n\t\t\trandp=random.random()\n\t\t\tc4=np.random.randint(0,2)\n\t\t\tif c4==0:\n\t\t\t\tc4=-1\n\t\t\tchild_para[c1][c2][c3]=(1+c4*0.1*randp)*child_para[c1][c2][c3]\n\t\t\tif child_para[c1][c2][c3]>2*(c3+1):\n\t\t\t\tchild_para[c1][c2][c3]=2*(c3+1)\n\t\tfor each in child_para:\n\t\t\tnew_child=AI_player(each)\n\t\t\tAI_player_list.append(new_child)\n\t\tself.die()\n\t\tanother.die()\n\tdef fight(self,another,depth,execute):\n\t\tparas=(self.para,another.para)\n\t\twin_sta=[]\n\t\tfor t in range(2):\n\t\t\tinit()\n\t\t\tturn=t\n\t\t\twhile 1:\n\t\t\t\tchoice=AI_choice(turn,depth,paras[turn])[1]\n\t\t\t\tpos_dic[choice].occupy(turn+1)\n\t\t\t\twini=detect_win()\n\t\t\t\tif detect_win()!=-2:\n\t\t\t\t\tif wini==0:\n\t\t\t\t\t\twin_sta.append(0)\n\t\t\t\t\telif wini==1:\n\t\t\t\t\t\twin_sta.append(1)\n\t\t\t\t\telse:\n\t\t\t\t\t\twin_sta.append(-1)\n\t\t\t\t\tbreak\n\t\t\t\tturn=(turn+1)%2\n\t\t\t\tpos_pos[turn]=get_av_pos(turn)\n\t\t\t\tif len(pos_pos[turn])==0:\n\t\t\t\t\tturn=(turn+1)%2\n\t\t\t\t\tpos_pos[turn]=get_av_pos(turn)\n\t\tif execute==1:\n\t\t\tif win_sta[0]+win_sta[1]>0:\n\t\t\t\tanother.die()\n\t\t\telse:\n\t\t\t\tself.die()\n\t\telse:\n\t\t\treturn win_sta\n\tdef die(self):\n\t\tif self in AI_player_list:\n\t\t\tAI_player_list.remove(self)\ndef draw_lines(screen):\n\tfor i in range(1,game_size[0]):\n\t\tpygame.draw.aaline(screen, WHITE,(i*cube_height,0),(i*cube_height,screen_size[1]),5)\n\tfor i in range(1,game_size[1]):\n\t\tpygame.draw.aaline(screen, WHITE,(0,i*cube_width),(screen_size[0],i*cube_width),5)\ndef draw_cubes(screen):\n\tfor each in chess_vec[0]:\n\t\tpygame.draw.rect(screen,[0,0,0],[(each[1])*cube_width,(each[0])*cube_height,cube_width,cube_height],0)\n\tfor each in chess_vec[1]:\n\t\tpygame.draw.rect(screen,[255,255,255],[(each[1])*cube_width,(each[0])*cube_height,cube_width,cube_height],0)\nsaved_list=[]\ndef save():\n\tdata=pickle.dumps((candidate_list,pos_dic,chess_vec,chess_count))\n\tsaved_list.append(data)\ndef load():\n\tglobal candidate_list\n\tglobal pos_dic\n\tglobal chess_vec\n\tglobal chess_count\n\t(candidate_list,pos_dic,chess_vec,chess_count)=pickle.loads(saved_list[-1])\ndef init():\n\tglobal pos_dic\n\tglobal chess_vec\n\tglobal candidate_list\n\tglobal pos_pos\n\tglobal chess_count\n\tpos_dic.clear()\n\tfor i in range(game_size[0]):\n\t\tfor j in range(game_size[1]):\n\t\t\ta_pos=one_pos((i,j))\n\t\t\tpos_dic[(i,j)]=a_pos\n\tfor i in range(game_size[0]):\n\t\tfor j in range(game_size[1]):\n\t\t\tlis=[(i,j+1),(i+1,j+1),(i+1,j),(i+1,j-1),(i,j-1),(i-1,j-1),(i-1,j),(i-1,j+1)]\n\t\t\tfor k in range(len(lis)):\n\t\t\t\tif lis[k][0]>=0 and lis[k][0]=0 and lis[k][1]chess_count[1]:\n\t\t##q_dic[str3][(-1,-1)]=100\n\t\treturn 1\n\telse:\n\t\t##q_dic[str3][(-1,-1)]=-100\n\t\treturn -1\ndef evaluate(turn,emp_para,ene_para):\n\tene_turn=(turn+1)%2\n\tval=chess_count[turn]-chess_count[ene_turn]\n\tfor each in candidate_list:\n\t\tfor i in range(8):\n\t\t\tif each.neighbors[i]==0:\n\t\t\t\tcontinue\n\t\t\tif each.neighbors[i].occupied==0:\n\t\t\t\tcontinue\n\t\t\tnext_to=each.neighbors[i].occupied\n\t\t\tcurrent_node=each.neighbors[i]\n\t\t\tl_count=0\n\t\t\ttempi=emp_para[0]\n\t\t\twhile 1:\n\t\t\t\tcurrent_node=current_node.neighbors[i]\n\t\t\t\tif current_node==0:\n\t\t\t\t\tbreak\n\t\t\t\tif current_node.occupied==next_to:\n\t\t\t\t\tl_count+=1\n\t\t\t\t\tcontinue\n\t\t\t\tif l_count>3:\n\t\t\t\t\ttempi1=(l_count+1)*emp_para[3]/4\n\t\t\t\t\ttempi2=(l_count+1)*ene_para[3]/4\n\t\t\t\telse:\n\t\t\t\t\ttempi1=emp_para[l_count]\n\t\t\t\t\ttempi2=ene_para[l_count]\n\t\t\t\tif current_node.occupied==0:\n\t\t\t\t\tif next_to==turn+1:\n\t\t\t\t\t\tval-=tempi1\n\t\t\t\t\telse:\n\t\t\t\t\t\tval+=tempi1\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tif next_to==turn+1:\n\t\t\t\t\t\tval-=tempi2\n\t\t\t\t\telse:\n\t\t\t\t\t\tval+=tempi2\n\t\t\t\t\tbreak\n\treturn val\ndef min_max(turn,depth,para,min_lim=100000):\n\tif depth==0:\n\t\tvalu=evaluate(turn,para[0],para[1])\n\t\treturn (valu,(-1,-1))\n\tif depth==1:\n\t\tmaxi=-1000000\n\t\tchoici_keep=(-1,-1)\n\t\tsave()\n\t\tav_pos=get_av_pos(turn)\n\t\tfor each in av_pos:\n\t\t\tpos_dic[each].occupy(turn+1)\n\t\t\tvv=evaluate(turn,para[0],para[1])\n\t\t\tif vv>maxi:\n\t\t\t\tmaxi=vv\n\t\t\t\tchoici_keep=each\n\t\t\tload()\n\t\tsaved_list.pop()\n\t\treturn (maxi,choici_keep)\n\tmax_value=-100000\n\tchoice_keep=(-1,-1)\n\tene_turn=(turn+1)%2\n\tsave()\n\tav_pos1=get_av_pos(turn)\t\n\tval2=0\n\tfor each in av_pos1:\n\t\tpos_dic[each].occupy(turn+1)\n\t\tsave()\n\t\tmin_value=100000\n\t\tjian2=0\n\t\tav_pos3=get_av_pos(ene_turn)\n\t\tval2=evaluate(turn,para[0],para[1])\n\t\tmin_value=val2\n\t\tfor each3 in av_pos3:\n\t\t\tpos_dic[each3].occupy(ene_turn+1)\n\t\t\tval2=min_max(turn,depth-2,para,min_value)[0]\n\t\t\tload()\n\t\t\tif val2==-1000000:\n\t\t\t\tcontinue\n\t\t\tif val2min_lim:\n\t\t\tsaved_list.pop()\n\t\t\treturn (-1000000,(-1,-1))\n\t\tif jian2==1:\n\t\t\tcontinue\n\t\tif min_value!=100000 and min_value>max_value:\n\t\t\tmax_value=min_value\n\t\t\tchoice_keep=each\n\tsaved_list.pop()\n\treturn (max_value,choice_keep)\t\ndef AI_choice(turn,depth,para):\n\tchoice=min_max(turn,depth,para)\n\treturn choice\ndef championship(size,depth):\n\tglobal AI_player_list\n\tprint('welcome to campionship')\n\twhile 1:\n\t\tAI_player_list2=AI_player_list.copy()\n\t\tfor k in range(int(size/2)):\n\t\t\tprint(k)\n\t\t\tAI_player_list2[k].fight(AI_player_list2[k+int(size/2)],depth,1)\n\t\tsize=size/2\n\t\tif size<20:\n\t\t\tbreak\n\tcounts=[]\n\tfor i in range(len(AI_player_list)):\n\t\tcounts.append(0)\n\tfor i in range(len(AI_player_list)-1):\n\t\tfor j in range(i+1,len(AI_player_list)):\n\t\t\tval_tup=AI_player_list[i].fight(AI_player_list[j],depth,0)\n\t\t\tval=val_tup[0]+val_tup[1]\n\t\t\tcounts[i]+=val\n\t\t\tcounts[j]+=(-1)*val\n\tmaxi=-10000\n\tpos_keep=-1\n\tfor i in range(len(counts)):\n\t\tif counts[i]>maxi:\n\t\t\tmaxi=counts[i]\n\t\t\tpos_keep=i\n\tprint('The campion para is: ',AI_player_list[i].para)\n\treturn AI_player_list[i].para\ndef half_it(depth):\n\tAI_player_list2=AI_player_list.copy()\n\tfor k in range(int(size/2)):\n\t\tprint(k)\n\t\tAI_player_list2[k].fight(AI_player_list2[k+int(size/2)],depth,1)\t\t\ndef evolution(size,generation,depth,evolute_from=[]):\n\tglobal AI_player_list\n\tAI_player_list.clear()\n\tif len(evolute_from)==0:\n\t\tfor i in range(size):\n\t\t\tn_player= AI_player(para=[[],[]])\n\t\t\tAI_player_list.append(n_player)\n\telse:\n\t\tAI_player_list=evolute_from\n\tfor i in range(generation):\n\t\tprint('generation',i+1)\n\t\tAI_player_list2=AI_player_list.copy()\n\t\tprint(i+1,': fighting.....')\n\t\tfor k in range(int(size/2)):\n\t\t\tprint(k)\n\t\t\tAI_player_list2[k].fight(AI_player_list2[k+int(size/2)],depth,1)\n\t\tprint(i+1,': birthing.....')\n\t\tAI_player_list3=AI_player_list.copy()\n\t\tfor k in range(int(size/4)):\n\t\t\tAI_player_list3[k].merry_and_birth(AI_player_list3[k+int(size/4)])\n\t##with open('group_'+str(size)+'_'+str(generation)+'_'+str(depth)+'.txt','w') as f:\n\t##\tpickle.dump(AI_player_list,f)\n\treturn AI_player_list\ndef PVP():\n\tinit()\n\tpygame.init()\n\tscreen = pygame.display.set_mode(screen_size, 0, 32)\n\tpygame.display.set_caption(\"Othello\")\n\tFPS=30\n\tclock = pygame.time.Clock()\n\tturn=0\n\twin_lab=0\n\twhile win_lab==0:\n\t\tclock.tick(FPS)\n\t\tl=0\n\t\tif pygame.mouse.get_pressed()[0]:\n\t\t\tpos=pygame.mouse.get_pos()\n\t\t\tpos_=(int(pos[1]/cube_height),int(pos[0]/cube_width))\t\t\t\t\n\t\t\tif pos_ in pos_pos[turn]:\n\t\t\t\tl=1\n\t\tif l==1:\n\t\t\tif pos_dic[pos_].occupied==0:\n\t\t\t\tpos_dic[pos_].occupy(turn+1)\n\t\t\twini=detect_win()\n\t\t\tif wini!=-2:\n\t\t\t\tif wini==0:\n\t\t\t\t\tprint('tie')\n\t\t\t\telse:\n\t\t\t\t\tprint('player ',(chess_count[0] None:\n \"\"\"\n Assert that the boolean constraint flag is True.\n\n :param policy_params: Tuple of parameters that specify the solver to be used by the policy.\n :return: None.\n \"\"\"\n\n assert policy_params.boolean_action_flag, \\\n f\"MIP requires boolean_action_flag = True, so activity rates are binary,\" \\\n f\"but provided: {policy_params.boolean_action_flag}.\"\n","sub_path":"src/snc/agents/hedgehog/hh_agents/pure_feedback_mip_hedgehog_agent.py","file_name":"pure_feedback_mip_hedgehog_agent.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"131768403","text":"import re\n\nfrom django.contrib.auth.models import User\nfrom django import forms\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\n\nfrom sideloader.db import models\n\n\nclass BaseModelForm(forms.ModelForm):\n helper = FormHelper()\n helper.form_class = 'form-horizontal'\n helper.label_class = 'col-lg-2'\n helper.field_class = 'col-lg-8'\n helper.add_input(Submit('submit', 'Submit'))\n\nclass BaseForm(forms.Form):\n helper = FormHelper()\n helper.form_class = 'form-horizontal'\n helper.label_class = 'col-lg-2'\n helper.field_class = 'col-lg-8'\n helper.add_input(Submit('submit', 'Submit'))\n\nclass ProjectForm(BaseModelForm):\n allowed_users = forms.ModelMultipleChoiceField(\n queryset=User.objects.all().order_by('username'),\n required=False\n )\n\n allowed_users.help_text = ''\n\n class Meta:\n model = models.Project\n exclude = ()\n\nclass PackageRepoForm(BaseModelForm):\n class Meta:\n model = models.PackageRepo\n exclude = ()\n\nclass RepoForm(BaseModelForm):\n github_url = forms.CharField(label=\"Git checkout URL\")\n\n build_type = forms.ChoiceField(\n label='Deploy type',\n widget=forms.Select,\n choices=(\n ('virtualenv', 'Virtualenv'),\n ('python', 'Python package'), ('flat', 'Flat')))\n\n version_getter = forms.ChoiceField(\n label='Package version',\n widget=forms.Select,\n choices=(\n ('setup.py', 'Python setup.py'),\n ('autonum', 'Auto increment'),\n ('script', 'Custom script'),\n )\n )\n version_cmd = forms.CharField(\n widget=forms.Textarea,\n label=\"Version script\",\n required=False\n )\n\n class Meta:\n model = models.Repo\n exclude = ('idhash', 'created_by_user', 'build_counter', 'project')\n\n def clean(self):\n cleaned_data = super(RepoForm, self).clean()\n\n uri = cleaned_data['github_url'].strip()\n if not (uri[-4:] == '.git'):\n raise forms.ValidationError(\"Not a valid Git URI\")\n\n cleaned_data['github_url'] = uri\n\n return cleaned_data\n\nclass ServerRequestForm(BaseModelForm):\n inftype = forms.ChoiceField(\n label='Infrastructure type',\n widget=forms.Select,\n choices=(\n ('prd', 'Production'),\n ('qa', 'QA'), ('stg', 'Staging')))\n \n cpus = forms.IntegerField(label=\"CPU Cores\", required=True,\n initial=1,\n max_value=8,\n min_value=1,\n help_text=\"Must be between 1 and 8\")\n\n memory = forms.IntegerField(label=\"Memory (GB)\", required=True, \n initial=2,\n max_value=24,\n min_value=1,\n help_text=\"Must be between 1 and 24\")\n\n disk = forms.IntegerField(label=\"Disk space (GB)\", required=True,\n initial=50,\n max_value=250,\n min_value=25,\n help_text=\"Must be between 25 and 250\")\n\n class Meta:\n model = models.ServerRequest\n exclude = (\n 'requested_by', 'project', 'approved_by', 'approval',\n 'provisioned', 'request_date'\n )\n\n def clean(self):\n cleaned_data = super(ServerRequestForm, self).clean()\n\n name = cleaned_data['name'].strip()\n\n if ' ' in name:\n raise forms.ValidationError(\"Server name may not contain spaces\")\n\n if not re.match(r'^[\\w-]+$', name):\n raise forms.ValidationError(\"Server name may only contain letters and numbers\")\n\n cleaned_data['name'] = name.lower()\n\n return cleaned_data\n\nclass TargetForm(BaseModelForm):\n stream_mode = forms.ChoiceField(\n label='Deploy mode',\n widget=forms.Select,\n choices=(\n ('repo', 'Package repository'),\n ('server', 'Server'),\n )\n )\n\n\n class Meta:\n model = models.Target\n exclude = ('current_build', 'log', 'state', 'project')\n\nclass StreamForm(BaseModelForm):\n\n targets = forms.ModelMultipleChoiceField(\n queryset=models.Target.objects.all(),\n required=False\n )\n\n targets.help_text = ''\n\n package_type = forms.ChoiceField(\n label='Package type',\n widget=forms.Select,\n choices=(\n ('deb', 'Debian/Ubuntu'),\n ('rpm', 'RedHat'),\n ('docker', 'Docker image'),\n ('dockerhub', 'Docker Hub'),\n ('tar', 'TAR file'),\n ('pypi', 'PyPi Upload')\n )\n )\n\n architecture = forms.ChoiceField(\n label='CPU architecture',\n widget=forms.Select,\n choices=(\n ('amd64', 'amd64'),\n ('i386', 'i386'),\n )\n )\n\n auto_release = forms.BooleanField(\n help_text=\"Automatically deploy new builds to this release workflow\",\n required=False)\n\n require_signoff = forms.BooleanField(\n label=\"Require sign-off\",\n required=False)\n\n signoff_list = forms.CharField(\n widget=forms.Textarea,\n label=\"Sign-off list\",\n required=False,\n help_text=\"List email addresses on a new line\")\n\n quorum = forms.IntegerField(\n required=False,\n initial=0,\n help_text=\"Required number of sign-offs before release. 0 means all are required\")\n \n notify = forms.BooleanField(\n label=\"Notify\",\n help_text=\"Send notifications of releases by email\",\n required=False)\n\n class Meta:\n model = models.Stream\n exclude = ('project',)\n fields = (\n 'name', 'repo', 'branch', 'package_type', 'architecture',\n 'targets',\n 'post_build', 'auto_release', 'require_signoff',\n 'signoff_list', 'notify', 'notify_list',\n )\n\nclass ReleasePushForm(BaseModelForm):\n tz = forms.CharField(widget=forms.HiddenInput())\n\n class Meta:\n model = models.Release\n exclude = ('release_date', 'flow', 'build', 'waiting')\n\n#class ModuleForm(BaseModelForm):\n# class Meta:\n# model = models.ModuleManifest\n# fields = ('name', 'key', 'structure',)\n\n#class ManifestForm(BaseModelForm):\n# class Meta:\n# model = models.ServerManifest\n# exclude = ('release',)\n\nclass UserForm(BaseModelForm):\n password = forms.CharField(widget=forms.PasswordInput(), initial='')\n class Meta:\n model = User\n exclude = (\n 'email', 'username', 'is_staff', 'is_active', 'is_superuser',\n 'last_login', 'date_joined', 'groups', 'user_permissions'\n )\n","sub_path":"sideloader/web/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"634371567","text":"from seller import Seller\nfrom settings import CONTEXT, ADEX\nfrom gsp import GSP\nfrom vcg import VCG\nimport networkx as nx\nfrom common import get_logger\n\nclass AdExchange():\n def __init__(self):\n '''\n Add connection object here if exchange is physically located in a separate server\n '''\n self.__logger = get_logger(\"AdExchange\")\n self.__logger.info(\"### Starting Ad Exchange...\")\n self.__auction = ADEX['auction']\n self.__reserve = ADEX['reserve']\n\n def availableAttributes(self, path, G):\n return [(u, v, G[u][v]['numberOfStrands'], G[u][v]['capacityPerStrand'], \\\n G[u][v]['costPerStrand'], G[u][v]['ISP'], G[u][v]['prefixA'], G[u][v]['prefixB']\\\n ) for (u,v) in zip(path[0:],path[1:])]\n\n def updateSellerGraph(self, G, path, reqValues):\n for (u,v) in zip(path[0:], path[1:]):\n for item in reqValues:\n G[u][v]['numberOfStrands'] -= item.numberOfStrands\n\n def resourceAvailable(self, G, path, reqValues):\n pathHasCapacity = True\n pathHasStrands = True\n for (u,v) in zip(path[0:], path[1:]):\n for item in reqValues:\n if G[u][v]['numberOfStrands'] < item.numberOfStrands:\n pathHasStrands = False\n if G[u][v]['capacityPerStrand'] < item.capacityPerStrand:\n pathHasCapacity = False\n if pathHasStrands and pathHasCapacity:\n return True\n else:\n return False\n\n def getCostOfPath(self, path, G):\n cost = 0\n for (u,v) in zip(path[0:], path[1:]):\n cost += G[u][v]['costPerStrand']\n return cost\n\n def linksInPath(self, path):\n return zip(path[0:], path[1:])\n\n def updateRequestList(self, reqList, allocationDict):\n newReqList = []\n for k, v in reqList.items():\n for item in v:\n cName = item.clientName\n if (cName in allocationDict.keys()):\n # namedtuple is immutable, so a hacky way to create the mutable effect\n t1 = item._replace(winnerFlag = 1)\n t2 = t1._replace(toPay = allocationDict[cName])\n newReqList.append(t2)\n else:\n newReqList.append(item)\n\n return newReqList\n\n def runVickreyAuction(self, reqList, sellerGraph):\n '''\n Function to call VCG mechanism\n '''\n allocation = []\n self.__logger.debug(\"[AdExchange][runVickreyAuction]\")\n self.__logger.debug(\"Allocation decisions.\")\n\n allocationDict = {}\n for key, v in reqList.items():\n k1,k2=key.split(\"#\")\n # n denotes the number of customers bidding for that conduit\n n = len(v)\n slot_click = [1] * n\n\n shortestPath = nx.shortest_path(sellerGraph, source=k1, target=k2)\n gCoP = self.getCostOfPath(shortestPath, sellerGraph)\n lIP = self.linksInPath(shortestPath)\n k = len(lIP)\n reserve = max(self.__reserve, gCoP/k)\n\n bids = []\n for item in v:\n bids.append((item.clientName, item.bidPerStrand))\n\n if nx.has_path(sellerGraph, k1, k2) and self.resourceAvailable(sellerGraph, shortestPath, v):\n (alloc, payments) = VCG.compute(slot_click, reserve, bids)\n allocation.extend(zip(alloc, [i * k for i in payments]))\n for (kTest, vTest) in allocation:\n allocationDict[kTest] = vTest\n\n # Updates sellerGraph with the allocation\n self.__logger.debug(\"Before > {}\".format(self.availableAttributes(shortestPath, sellerGraph)))\n self.updateSellerGraph(sellerGraph, shortestPath, v)\n self.__logger.debug(\"After > {}\".format(self.availableAttributes(shortestPath, sellerGraph)))\n else:\n self.__logger.info(\"Link does not exists\")\n return self.updateRequestList(reqList, allocationDict)\n\n def runSecondPriceAuction(self, reqList, sellerGraph):\n '''\n Function to call GSP mechanism\n '''\n allocation = []\n self.__logger.debug(\"[AdExchange][runSecondPriceAuction]\")\n self.__logger.debug(\"Fiber allocation decisions.\")\n\n allocationDict = {}\n for key, v in reqList.items():\n k1,k2=key.split(\"#\")\n # n denotes the number of customers bidding for that conduit\n n = len(v)\n slot_click = [1] * n\n\n shortestPath = nx.shortest_path(sellerGraph, source=k1, target=k2)\n gCoP = self.getCostOfPath(shortestPath, sellerGraph)\n lIP = self.linksInPath(shortestPath)\n k = len(lIP)\n reserve = max(self.__reserve, gCoP/k)\n\n bids = []\n for item in v:\n bids.append((item.clientName, item.bidPerStrand))\n\n if nx.has_path(sellerGraph, k1, k2) and self.resourceAvailable(sellerGraph, shortestPath, v):\n (alloc, payments) = GSP.compute(slot_click, reserve, bids)\n allocation.extend(zip(alloc, [i * k for i in payments]))\n for (kTest, vTest) in allocation:\n allocationDict[kTest] = vTest\n\n # Updates sellerGraph with the allocation\n self.__logger.debug(\"Before > {}\".format(self.availableAttributes(shortestPath, sellerGraph)))\n self.updateSellerGraph(sellerGraph, shortestPath, v)\n self.__logger.debug(\"After > {}\".format(self.availableAttributes(shortestPath, sellerGraph)))\n else:\n self.__logger.info(\"Link does not exists or No resources available for the request\")\n return self.updateRequestList(reqList, allocationDict)\n\n def processClientRequests(self, reqList, sellerObj):\n '''\n Function to dispatch client request to the corresponding auction mechanism\n '''\n self.__logger.debug(\"[AdExchange][processClientRequest]Request List: {}\".format(\"|\".join(reqList)))\n self.__logger.debug(\"[AdExchange][processClientRequest]Seller List (locations): {}\".format(\"|\".join(sellerObj.getSellerGraph())))\n self.__logger.debug(\"[AdExchange][processClientRequest]Auction Type: {}\".format(self.__auction))\n self.__logger.debug(\"[AdExchange][processClientRequest]Reserve Price: {}\".format(self.__reserve))\n\n '''\n # Commented -- vague code\n for k, v in reqList.items():\n for item in v:\n self.__logger.info(item)\n '''\n\n # check for auction type and call the corresponding functions\n if self.__auction == \"vcg\":\n return self.runVickreyAuction(reqList, sellerObj.getSellerGraph())\n elif self.__auction == \"gsp\":\n return self.runSecondPriceAuction(reqList, sellerObj.getSellerGraph())\n else:\n raise ValueError(\"Unknown auction type. Either use 'vcg' or 'gsp' in settings.\")\n","sub_path":"src/adexchange/adexchange.py","file_name":"adexchange.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336243440","text":"from django.conf.urls import url\nfrom news import views\n\n\nurlpatterns = [\n url(r'^(?P[\\d]+)/$', views.news, name='news'),\n url(r'^(?P[\\w]+)/$', views.one_news, name='one_news'),\n url(r'^rubric/(?P[\\w]+)/(?P[\\d]+)/$', views.rubric, name='rubric'),\n url(r'^country/(?P[\\w]+)/(?P[\\d]+)/$', views.country, name='country'),\n]\n","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"474114441","text":"from kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.clock import Clock\n\nroot = Builder.load_string('''\n\nFloatLayout:\n Scatter:\n id: clockscatter\n size_hint: None,None\n size: 100,100\n pos: root.width / 2 - self.width / 2, root.height / 2 - self.height / 2\n Button:\n text:'Clock'\n\n\n''')\n\nclass TestApp(App):\n def build(self):\n Clock.schedule_interval(self.rotate, 1) #1 second to rotate the clock, 2 -> 2 seconds, 0-> full speed ahead!\n return root\n def rotate(self, dt):\n root.ids.clockscatter.rotation +=3 #move by 3 steps -> high # = increase speed of rotation, low # is opposite\n\nif __name__ == '__main__':\n TestApp().run()","sub_path":"Kivy_tutorial_files/Kivy_App_Tutorial_00/Desabled_Tool,Carousel,Clock/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"432144902","text":"from django.http import HttpResponse, JsonResponse\n\ndef index(request):\n return HttpResponse(\"Hello, World!\")\n\ndef account(request, user_id):\n if user_id == 0:\n ret = {\"status\": 1, \"id\": \"123100\", \"name\": \"Вася\", \"surname\": \"Пупкин\", \"role\":\"manager\"}\n else:\n ret = {\"status\": 0, \"error\": \"#weqirerh\"}\n return JsonResponse(ret)\n\ndef project(request, user_id):\n if user_id == 0:\n ret = {\"status\": 1, \"projects\":[{ \"projectid\":\"123100\", \"name\": \"Spring Hack\", \"role\": \"manager\", \"task_inwork\":4},{ \"projectid\":\"120990\", \"name\": \"Physics\", \"role\": \"manager\", \"task_inwork\":0}]}\n else:\n ret = {\"status\": 0, \"error\": \"#weqirerh\"}\n return JsonResponse(ret)\n","sub_path":"game/demo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"347905370","text":"import json\nimport re\nfrom urllib.parse import urlencode\nfrom bs4 import BeautifulSoup\nfrom requests.exceptions import RequestException\nimport requests\n\n\ndef get_page_index(offset, keyword):\n\n data = {\n 'offset': offset,\n 'format':'json',\n 'keyword': keyword,\n 'autoload': 'true',\n 'count': '20',\n 'en_qc': 1,\n 'cur_tab': 1\n }\n url = 'https://www.toutiao.com/api/search/content/?' + urlencode(data)\n try:\n response = requests.get(url)\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n print('请求搜索页出错')\n return None\n\n\ndef main():\n html = get_page_index(0, '街拍')\n print(html)\n\n\nif __name__ == '__main__':\n main()","sub_path":"jiepai/spider2.py","file_name":"spider2.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"51119974","text":"\"\"\"\nViews definition for game app\n\"\"\"\n# from django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django_comments.models import Comment\n\nfrom katarius.utils import get_list_or_None\nfrom .models import Author, Question, Answer, Profile, Truth, Level\n\n\n@login_required\ndef index(request):\n \"\"\"\n Index view for the game\n \"\"\"\n profiles_list = get_list_or_None(Profile)\n\n context = {\n 'profiles': profiles_list.order_by('-score')[:10]\n }\n return render(request, 'game/index.html', context)\n\n@login_required\ndef profile(request, user_id):\n \"\"\"\n Profile view for the game\n \"\"\"\n\n user_object = get_object_or_404(User, pk=user_id)\n comments_list = get_list_or_None(Comment, user=user_id)\n level_list = get_list_or_None(Level)\n\n context = {\n 'user': user_object,\n 'comments': comments_list,\n 'levels': level_list\n }\n return render(request, 'game/profile.html', context)\n\n@login_required\ndef start(request):\n \"\"\"\n Start page for the game\n \"\"\"\n questions_list = get_list_or_None(Question)\n context = {\n 'questions': questions_list.order_by('?')[:3]\n }\n return render(request, 'game/start.html', context)\n\n@login_required\ndef authors(request):\n \"\"\"\n Authors view for the game\n \"\"\"\n authors_list = get_list_or_None(Author)\n\n if authors_list is not None:\n authors_list = authors_list.order_by('level__score')\n\n context = {\n 'authors': authors_list\n }\n return render(request, 'game/authors.html', context)\n\n@login_required\ndef author(request, author_id):\n \"\"\"\n Show author entry with 3 random questions and comments\n \"\"\"\n author_object = get_object_or_404(Author, id=author_id)\n questions_list = get_list_or_None(Question, author_id=author_id)\n\n if questions_list is not None:\n questions_list = questions_list.order_by('?')[:3]\n\n context = {\n 'author': author_object,\n 'questions': questions,\n }\n\n if request.user.profile.score < author_object.level.score:\n context['locked'] = True\n\n return render(request, 'game/author.html', context)\n\n@login_required\ndef questions(request):\n \"\"\"\n Questions view for the game\n \"\"\"\n questions_list = Question.objects.all()\n\n if questions_list is not None:\n questions_list = questions_list.order_by('author__level__score')\n\n context = {\n 'questions': questions_list\n }\n return render(request, 'game/questions.html', context)\n\n@login_required\ndef question(request, question_id):\n \"\"\"\n Get question preview\n \"\"\"\n question_object = get_object_or_404(Question, id=question_id)\n\n context = {\n 'question': question_object,\n }\n\n if request.user.profile.score < question_object.author.level.score:\n context['locked'] = True\n\n return render(request, 'game/question.html', context)\n\n@login_required\ndef answers(request, question_id):\n \"\"\"\n Get all answers\n \"\"\"\n question_object = Question.objects.filter(id=question_id).first()\n answers_list = get_list_or_None(Answer, question=question_id)\n\n if answers_list is not None:\n answers_list = answers_list.order_by('?')\n\n context = {\n 'question': question_object,\n 'answers': answers_list\n }\n\n if request.user.profile.score < question_object.author.level.score:\n context['locked'] = True\n\n return render(request, 'game/answers.html', context)\n\n@login_required\ndef answer(request, question_id, answer_id):\n \"\"\"\n Get answer\n \"\"\"\n answer_object = get_object_or_404(Answer, id=answer_id)\n question_object = get_object_or_404(Question, id=question_id)\n\n context = {\n 'answer': answer_object,\n 'question': question_object,\n }\n\n return render(request, 'game/answer.html', context)\n\n@login_required\ndef truths(request):\n \"\"\"\n Get all truths\n \"\"\"\n truth_list = get_list_or_None(Truth)\n\n context = {\n 'truths': truth_list\n }\n\n return render(request, 'game/truths.html', context)\n\n@login_required\ndef truth(request, truth_id):\n \"\"\"\n Get truth\n \"\"\"\n truth_obj = get_object_or_404(Truth, id=truth_id)\n\n context = {\n 'truth': truth_obj\n }\n\n return render(request, 'game/truth.html', context)\n\n@login_required\ndef levels(request):\n \"\"\"\n Get all levels\n \"\"\"\n level_list = get_list_or_None(Level)\n\n context = {\n 'levels': level_list\n }\n\n return render(request, 'game/levels.html', context)\n\n\n","sub_path":"game/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"168260498","text":"import cs50\nimport csv\nimport string\nimport sys\n\nfrom sys import argv\n\n\ndef main():\n # Check for correct number of args\n if (len(argv) != 2):\n print(\"Usage: python roster.py family\")\n return 1\n\n # Csv file name\n family_name = argv[1]\n\n # Create database\n open(\"students.db\", \"r\").close()\n db = cs50.SQL(\"sqlite:///students.db\")\n\n # Select names\n rows = db.execute(\"SELECT first, middle, last, house, birth FROM students WHERE house = ? ORDER BY last asc, first asc\", family_name)\n\n # case when middle is not null then middle end as middle\n for row in rows:\n print(row[\"first\"], end=' ')\n\n if row[\"middle\"] is not None:\n print(row[\"middle\"], end=' ')\n\n print(row[\"last\"], end=', born ')\n\n print(row[\"birth\"])\n\n\nmain()\n","sub_path":"pset7/houses/roster.py","file_name":"roster.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"63388748","text":"#!/usr/bin/python\nfrom google_http import GoogleHttp\nfrom pprint import pprint\nimport format\nimport time\nimport data\nimport os\nfrom subprocess import Popen, PIPE\nimport httplib2\nfrom functools import wraps\nimport time\n\nservice = GoogleHttp(api = \"sheet\").service()\ndrive = GoogleHttp(api = \"drive\").service()\nspeed_ctl_queue = []\n\ndef speed_control(speed, window):\n \"\"\"\n if speed / window is 100 / 100 , then allow 100 request per 100 seconds.\n speed is 100 requests\n window is 100 seconds\n \"\"\"\n def inner(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n current = time.time()\n speed_ctl_queue.append(current)\n if len(speed_ctl_queue) >= speed:\n while True:\n current = time.time()\n start = speed_ctl_queue[-1 * speed]\n if current - start <= window:\n sleep_time = window - (current - start)\n if sleep_time < 1:\n sleep_time = 1\n print(\"So fast! Need to wait %s seconds, current request queue length %d.\" % (sleep_time, len(speed_ctl_queue)))\n time.sleep(sleep_time)\n continue\n else:\n speed_ctl_queue[-1] = current\n break\n return func(*args, **kwargs)\n return wrapper\n return inner\n\ndef run_shell(cmd):\n return Popen([cmd], shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\n\ndef httplib_get_demo():\n \"\"\"\n use httplib library to get data from sheet\n \"\"\"\n spreadsheetId = '137YH4gxqCGkoc7CwafMAOF3tnzsABJCq5bFnJaju8mU'\n rangeName = 'Sheet1!A1:B2'\n handler = GoogleHttp().handler()\n url = 'https://sheets.googleapis.com/v4/spreadsheets/' + spreadsheetId + '/values/' + 'Sheet1%21A2%3AE3?majorDimension=COLUMNS'\n response, content = handler.request(url)\n print(response)\n print(content)\n\n@speed_control(50, 100)\ndef updateVar(spreadsheetId, data, rangeName = 'sheet1!A1:Z'):\n \"\"\"\n update a array of data to sheet, values like this:\n 'values' : [\n ['test2', 'test2', 'test2'],\n ['test3', 'test3', 'test3']\n ]\n \"\"\"\n value_range_body = {\n 'values' : data\n }\n value_input_option = 'USER_ENTERED'\n return service.spreadsheets().values().update(\n spreadsheetId=spreadsheetId,\n range=rangeName,\n valueInputOption=value_input_option,\n body=value_range_body).execute()\n\ndef create_doc(title):\n \"\"\"\n create one new table, use spreadsheet_body to set the configuration\n \"\"\"\n\n spreadsheet_body = {\n 'properties': {\n 'locale': 'zh_CN',\n 'timeZone': 'Etc/GMT',\n 'autoRecalc': 'ON_CHANGE',\n 'title': title\n },\n }\n return service.spreadsheets().create(body = spreadsheet_body).execute()\n\ndef batchget():\n \"\"\"\n batch get type to get data\n \"\"\"\n spreadsheetId = '137YH4gxqCGkoc7CwafMAOF3tnzsABJCq5bFnJaju8mU'\n rangeName = 'Sheet1!A2:E'\n result = service.spreadsheets().values().batchGet(\n spreadsheetId=spreadsheetId, ranges=rangeName).execute()\n values = result.get('values', [])\n print(result)\n\ndef setHorizontalAlignment(spreadsheetId):\n '''\n set Horizontal Alignment of cells\n '''\n\n updatecells = format.setHorizontalAlignment(sheetId = sheetid(spreadsheetId)[0])\n value_range_body = {\n \"requests\" : [ updatecells ]\n }\n result = service.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheetId,\n body=value_range_body).execute()\n values = result.get('values', [])\n print(values)\n\ndef updateDimensionProperties(spreadsheetId):\n '''\n update dimension properties\n '''\n updatecells = format.updateDimensionProperties(\n sheetId = sheetid(spreadsheetId)[0],\n width = 100,\n startIndex = 0,\n endIndex = 20)\n value_range_body = {\n \"requests\" : [ updatecells ]\n }\n result = service.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheetId,\n body=value_range_body).execute()\n values = result.get('values', [])\n print(values)\n\ndef dir_demo():\n pass\n\ndef format_demo():\n '''\n update a array of data to sheet\n '''\n spreadsheetId = '137YH4gxqCGkoc7CwafMAOF3tnzsABJCq5bFnJaju8mU'\n rangeName = 'Sheet1!A1:E6'\n\n spreadsheet_body = {\n 'properties' : {\n 'title' : 'table-4',\n 'defaultFormat' : {\n 'horizontalAlignment' : 'CENTER',\n 'verticalAlignment' : 'MIDDLE',\n 'backgroundColor' : {\n 'red' : 1,\n 'green' : 1,\n 'blue' : 1\n }\n }\n },\n 'sheets' : [\n {\n 'properties' : {\n 'title' : 'sheet1'\n }\n }\n ]\n }\n\n value_input_option = 'USER_ENTERED'\n result = service.spreadsheets().values().batchUpdate(\n spreadsheetId=spreadsheetId,\n range = rangeName,\n valueInputOption=value_input_option,\n body=spreadsheet_body).execute()\n #values = result.get('values', [])\n print(result)\n\ndef updateSheetTitle(spreadsheetId, sheetlist, titlelist):\n '''\n update a array of data to sheet\n '''\n for i in range(len(sheetlist)):\n updatesheet = format.updateSheet(index = i, sheetId = sheetlist[i], title = titlelist[i])\n sheet_body = {\n \"requests\" : [ updatesheet ]\n }\n result = service.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheetId,\n body=sheet_body).execute()\n\ndef get(spreadsheetId):\n return service.spreadsheets().get(spreadsheetId=spreadsheetId).execute()\n\ndef sheetid(spreadsheetid):\n sheetId = []\n for i in get(spreadsheetid).get(\"sheets\", None):\n sheetId.append(i[\"properties\"][\"sheetId\"])\n return sheetId\n\ndef setFont(spreadsheetId, rowIndex = 0, columnIndex = 0, value = ''):\n '''\n update dimension properties\n '''\n updatecells = format.setFont(sheetId = sheetid(spreadsheetId)[0],\n rowIndex = rowIndex,\n columnIndex = columnIndex,\n value = value)\n value_range_body = {\n \"requests\" : [ updatecells ]\n }\n result = service.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheetId,\n body=value_range_body).execute()\n return result\n\ndef copyTo(spreadsheetid, dest_spreadsheet_id):\n \"\"\"\n copy one spreadsheet to destination spreadsheet\n \"\"\"\n spreadsheet_body = {\n 'destination_spreadsheet_id' : dest_spreadsheet_id,\n }\n for sheetId in sheetid(spreadsheetid):\n service.spreadsheets().sheets().copyTo(spreadsheetId=spreadsheetid,\n body=spreadsheet_body,\n sheetId=sheetId).execute()\n\ndef deleteSheet(spreadsheetid, sheetId):\n '''\n delete sheet \n '''\n updatecells = {\n \"deleteSheet\" : {\n \"sheetId\" : str(sheetId)\n }\n }\n\n value_range_body = {\n \"requests\" : [ updatecells ]\n }\n result = service.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheetid,\n body=value_range_body).execute()\n return result\n\ndef mkdir(name, parent = None):\n if parent is None:\n p = []\n else:\n p = [parent]\n file_metadata = {\n 'name' : name,\n 'mimeType' : 'application/vnd.google-apps.folder',\n 'parents' : p\n\n }\n return drive.files().create(body=file_metadata, fields='id').execute()\n\ndef mvFileToDir(fileId, folderId):\n # Retrieve the existing parents to remove\n file = drive.files().get(fileId=fileId,\n fields='parents').execute();\n previous_parents = \",\".join(file.get('parents'))\n # Move the file to the new folder\n file = drive.files().update(fileId=fileId,\n addParents=folderId,\n removeParents=previous_parents,\n fields='id, parents').execute()\n\ndef permissionCreate(fileId):\n request_body = format.domainPermission(domain = 'redhat.com', role = 'reader')\n return drive.permissions().create(fileId=fileId,\n body = request_body).execute()\n\ndef searchFile(name, parent = None):\n page_token = None\n query_cmd = ''\n if parent is None:\n query_cmd = \"mimeType='application/vnd.google-apps.folder' and name='%s' \\\n and trashed=false\" % name\n else:\n query_cmd = \"mimeType='application/vnd.google-apps.folder' and name='%s' \\\n and trashed=false \\\n and '%s' in parents\" % (name, parent)\n while True:\n response = drive.files().list(q=query_cmd,\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n files = response.get('files', [])\n #for f in files: \n # print 'Found file: %s (%s)' % (f.get('name'), f.get('id'))\n if files:\n return files\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break;\n return None\n\ndef mkUniqueDir(dir, parent=None, create=False, fd_list=[]):\n \"\"\"\n dir : the name of folder list that need to be created, from parent to child\n parent : the parent folder class object\n create : just created parent folder, so create child foler directly and no need to search \n return : the last child file class\n \"\"\"\n if len(dir) == 0:\n return\n\n if create:\n f = mkdir(name = dir[0], parent = parent)\n fd_id = f.get('id', None)\n fd_list.append(fd_id)\n mkUniqueDir(dir[1:], fd_id , create = True, fd_list = fd_list)\n else:\n f = searchFile(name = dir[0], parent = parent)\n if f is None:\n f = mkdir(name = dir[0], parent = parent)\n fd_id = f.get('id', None)\n fd_list.append(fd_id)\n mkUniqueDir(dir[1:], fd_id, create = True, fd_list = fd_list)\n else:\n if len(f) > 1:\n print(\"ERROR: More than one same name files %s\" % dir[0])\n fd_id = f[0].get('id', None)\n fd_list.append(fd_id)\n mkUniqueDir(dir[1:], fd_id, create = False, fd_list = fd_list)\n return fd_list\n\ndef get_ovs_version():\n kernel_version=os.popen('uname -r').read().strip('\\n')\n ovs_version_big=os.popen(\"rpm -qa|grep openvswitch | awk -F '-' '{printf $2}'\").read().strip('\\n')\n ovs_version_small=os.popen(\"rpm -qa|grep openvswitch | awk -F '-' '{printf $3}'| awk -F '.' '{printf $1}'\").read().strip('\\n')\n #ovs_version= 'ovs' + ovs_version_big + '-' + ovs_version_small\n ovs_version=os.popen(\"rpm -qa|grep openvswitch|awk -F '.x86' '{printf $1}'\").read().strip('\\n')\n return ovs_version\n\ndef get_dpdk_version():\n dpdk_version_big=os.popen(\"rpm -qa|grep dpdk| head -n 1 | awk -F '-' '{printf $2}'\").read().strip('\\n')\n dpdk_version_small=os.popen(\"rpm -qa|grep dpdk| head -n 1 | awk -F '-' '{printf $3}' | awk -F '.' '{printf $1}'\").read().strip('\\n')\n dpdk_version= 'dpdk' + dpdk_version_big + '-' + dpdk_version_small\n return dpdk_version\n","sub_path":"report/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":11536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"256970802","text":"import sqlite3\r\n\r\nc = sqlite3.connect('test.db')\r\ncc = c.cursor()\r\ncc.execute(\"CREATE TABLE test (date text, name text, money real)\")\r\ncc.execute(\"INSERT INTO test VALUES ('2012-06-02','pez',35.14)\")\r\nc.commit()\r\nn = (\"pez\",)\r\ncc.execute(\"SELECT * FROM test WHERE name=?\", n)\r\nprint(cc.fetchone())\r\n\r\nc.close()\r\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"555242113","text":"N = int(input())\nmusic = [list(input().split()) for _ in range(N)]\nX = input()\nX_after = False\nans = 0\nfor i in range(N):\n if X_after:\n ans += int(music[i][1])\n elif X == music[i][0]:\n X_after = True\nprint(ans)","sub_path":"etc_contest/dwango6/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"538893283","text":"import json\nfrom random import Random\n\nimport names\n\nrand = Random()\n\n# scale locations from 1 tot 10\nlocations = {\n \"Eindhoven\": 5,\n \"Amsterdam\": 2,\n \"Helmond\": 1\n}\n\n\nclass Graph:\n nodes = []\n edges = []\n\n def __init__(self):\n self.generate_nodes(500)\n self.generate_edges(2000)\n\n \"\"\"\n Adds a new node to the graph\n \"\"\"\n\n def generate_node(self, n):\n self.nodes.append(Node(n))\n\n \"\"\"\n Generate N nodes\n \"\"\"\n\n def generate_nodes(self, n):\n for x in range(0, n):\n self.generate_node(x)\n\n def generate_edge(self, is_bidirectional=0.25):\n node1 = rand.choice(self.nodes)\n node2 = rand.choice(self.nodes)\n\n if node1.pk == node2.pk:\n return\n\n if node1.pk in [x.node_to.edges for x in node2.edges]:\n return\n\n bidirectional = False\n\n if rand.randint(0, 100) >= is_bidirectional * 100:\n bidirectional = True\n\n # Create edge\n edge = Edge(node1, node2)\n\n node2.add_edge(edge)\n\n if bidirectional:\n node1.add_edge(Edge(node2, node1))\n\n self.edges.append(edge)\n\n def generate_edges(self, n):\n for x in range(0, n):\n self.generate_edge()\n\n def describe(self):\n print(\"This graph contains {0} nodes and {1} edges\".format(len(self.nodes), len(self.edges)))\n\n print(\"Fetching random sample\")\n\n loc_dist = {}\n total = 0\n min_edges = 1000000\n max_edges = 0\n\n for node in self.nodes:\n # Calculate distribution of locations\n if node.location in loc_dist.keys():\n loc_dist[node.location] += 1\n else:\n loc_dist[node.location] = 1\n\n total += len(node.edges)\n\n if len(node.edges) > max_edges:\n max_edges = len(node.edges)\n elif len(node.edges) < min_edges:\n min_edges = len(node.edges)\n\n print(\"Average node has {0} edges\".format(total / len(self.nodes)))\n print(\"Min edges is {0} \\tMax edges is {1}\".format(min_edges, max_edges))\n\n print(self.nodes[0].json())\n\n for k in loc_dist.keys():\n print(\"{0} - {1}\".format(loc_dist[k], k))\n\n def save(self, path=\"data.json\"):\n f = open(path, \"w\")\n f.write(json.dumps([x.json() for x in self.nodes]))\n f.close()\n\n\nclass Node:\n def __init__(self, pk):\n self.name = names.get_full_name()\n self.location = rand.choice(rand.choices(list(locations.keys()), weights=locations.values()))\n self.edges = []\n self.pk = pk\n\n def add_edge(self, edge):\n self.edges.append(edge)\n\n def __str__(self):\n return \"{0}\".format(self.name, self.location)\n\n def json(self):\n return {\n \"pk\": self.pk,\n \"name\": self.name,\n \"location\": self.location,\n \"edges\": [x.node_to.pk for x in self.edges]\n }\n\n\nclass Edge:\n node_to = None\n node_from = None\n\n def __init__(self, node_to, node_from):\n self.node_to = node_to\n self.node_from = node_from\n\n def __str__(self):\n return \"Edge from {0} to {1}\".format(self.node_from.pk, self.node_to.pk)\n\n def json(self):\n return {\n \"node1\": self.node_to.pk,\n \"node2\": self.node_from.pk\n }\n\n\nif __name__ == \"__main__\":\n graph = Graph()\n\n graph.describe()\n graph.save()\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"551455129","text":"from __future__ import absolute_import, unicode_literals\n\nimport json\nimport unittest\n\nfrom bs4 import BeautifulSoup\n\nfrom wagtaildraftail.widgets import DraftailTextArea\n\n\nclass DraftailTextAreaWidgetTestCase(unittest.TestCase):\n def test_rendering(self):\n widget = DraftailTextArea()\n\n html = widget.render('default_editor', json.dumps({'key': 'val'}), {'id': 'id'})\n\n # \n # \n\n soup = BeautifulSoup(html, 'html.parser')\n\n self.assertIsNotNone(soup.input)\n self.assertIsNotNone(soup.script)\n\n self.assertDictContainsSubset({\n 'type': 'hidden',\n 'name': 'default_editor',\n 'value': '{\"key\": \"val\"}',\n 'id': 'id'\n }, soup.input.attrs)\n\n self.assertEquals(soup.script.text, r\"window.wagtailDraftail.initEditor('default_editor', {})\")\n","sub_path":"tests/test_widgets.py","file_name":"test_widgets.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"506374231","text":"#coding:utf-8\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimage_path = \"bird.png\"\n\n\ndef check_pixel(value, count):\n if value < 10:\n count = count + 1\n return count\n\ndef preProcess():\n image = cv2.imread(image_path)\n cvtColor = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n medianBlur = cv2.medianBlur(cvtColor, 9) \n ret,image = cv2.threshold(medianBlur,127,255,cv2.THRESH_BINARY)\n return image\n\ndef translate2Image(img):\n cv2.imwrite('cv2write.png',img)\n im = Image.open(\"cv2write.png\")\n return im\n\ndef translate2Cv2(img):\n img.save('translate2Cv2.png')\n im = cv2.imread('translate2Cv2.png')\n return im\n\n\n\n#input: the orginal gray image\ndef segmente(img, img1, img2, bird, cage, background): \n \n pixdata = img.load()\n pixdata1 = img1.load()\n pixdata2 = img2.load()\n birdPixdata = bird.load()\n cagePixdata = cage.load()\n backgroundPixdata = background.load()\n w,h = img.size\n for y in range(1,h-8):\n for x in range(1,w-8):\n # right cornor\n if y < 280 or x>534 or y>558 or x<300:\n pixdata1[x,y] = 0\n pixdata2[x,y] = 0\n # detecte birds by using superpixel\n else:\n count = 0\n count1 = 0\n # Detected along a 45 degree angle\n count1 = check_pixel(pixdata[x+1,y+1], count1)\n count1 = check_pixel(pixdata[x+2,y+2], count1)\n count1 = check_pixel(pixdata[x+3,y+3], count1)\n count1 = check_pixel(pixdata[x+4,y+4], count1)\n count1 = check_pixel(pixdata[x+5,y+5], count1)\n count1 = check_pixel(pixdata[x+6,y+6], count1)\n count1 = check_pixel(pixdata[x+7,y+7], count1)\n count1 = check_pixel(pixdata[x+8,y+8], count1)\n\n count = check_pixel(pixdata[x-1,y-1], count)\n count = check_pixel(pixdata[x-2,y-2], count)\n count = check_pixel(pixdata[x-3,y-3], count)\n count = check_pixel(pixdata[x-4,y-4], count)\n count = check_pixel(pixdata[x-5,y-5], count)\n count = check_pixel(pixdata[x-6,y-6], count)\n count = check_pixel(pixdata[x-7,y-7], count)\n count = check_pixel(pixdata[x-8,y-8], count)\n\n if count > 7 or count1 > 7:\n pixdata1[x,y] = 255\n pixdata2[x,y] = 255\n else:\n pixdata1[x,y] = 0\n pixdata2[x,y] = 0\n\n for y in range(1,h-10):\n for x in range(1,w-10):\n count = 0\n count1 = 0\n # Detect along the vertical direction\n count = check_pixel(pixdata1[x-1,y], count)\n count = check_pixel(pixdata1[x-2,y], count)\n count = check_pixel(pixdata1[x-3,y], count)\n count = check_pixel(pixdata1[x-4,y], count)\n count = check_pixel(pixdata1[x-5,y], count)\n count = check_pixel(pixdata1[x-6,y], count)\n count = check_pixel(pixdata1[x-7,y], count)\n count = check_pixel(pixdata1[x-8,y], count)\n count = check_pixel(pixdata1[x-9,y], count)\n count = check_pixel(pixdata1[x-10,y], count)\n\n count1 = check_pixel(pixdata1[x+1,y], count1)\n count1 = check_pixel(pixdata1[x+2,y], count1)\n count1 = check_pixel(pixdata1[x+3,y], count1)\n count1 = check_pixel(pixdata1[x+4,y], count1)\n count1 = check_pixel(pixdata1[x+5,y], count1)\n count1 = check_pixel(pixdata1[x+6,y], count1)\n count1 = check_pixel(pixdata1[x+7,y], count1)\n count1 = check_pixel(pixdata1[x+8,y], count1)\n count1 = check_pixel(pixdata1[x+9,y], count1)\n count1 = check_pixel(pixdata1[x+10,y], count1)\n \n if count > 9 or count1 > 9:\n pixdata2[x,y] = 255\n else:\n pixdata2[x,y] = 0\n\n # Dig the cage and backgroun in the original image\n for y in range(1,h):\n for x in range(1,w): \n if x >523 and y<126 or x>610:\n pixdata[x,y] = 255\n cagePixdata[x,y] = 0\n elif pixdata2[x,y] == 0:\n pixdata[x,y] = 255\n backgroundPixdata[x,y] = 255\n else:\n cagePixdata[x,y] = 0\n \n # Dig the bird in the original image\n for y in range(1,h):\n for x in range(1,w): \n if x >523 and y<126 or x>610:\n pass\n if pixdata2[x,y] == 0 or pixdata[x,y] == 0: \n \n birdPixdata[x,y] = 255\n else:\n backgroundPixdata[x,y] = 255\n \n return bird,cage,background\n\ndef main():\n # pre-processing the image in 3 steps\n image = preProcess()\n image1 = preProcess()\n image2 = preProcess()\n\n # set 3 original images to save the result\n bird = Image.open(image_path)\n cage = Image.open(image_path)\n background = Image.open(image_path)\n\n # set 3 pre-processing image to do the segementation\n im = translate2Image(image)\n im1 = translate2Image(image1)\n im2 = translate2Image(image2)\n \n # segmente\n bird, cage, background = segmente(im, im1, im2, bird, cage, background)\n\n plt.imshow(bird, \"gray\")\n # plt.imshow(cage, \"gray\")\n # plt.imshow(background, \"gray\")\n plt.title(\"target image\")\n plt.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"bird_jingkunchen.py","file_name":"bird_jingkunchen.py","file_ext":"py","file_size_in_byte":5530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"312172858","text":"\ndef factors (num):\n s = \"\"\n for i in range (num):\n if (num%(i+1) == 0):\n s = s + (str(i+1) + \" \")\n #print (s)\n if (prime (s,num)):\n return True\n \ndef prime (s,n):\n if (s.count(\" 1 \" + str(n)) == 1):\n print (s)\n return True\n else:\n return False\n \n\n\ndef factors2 (num):\n s = \"\"\n for i in range (num):\n if (num%(i+1) == 0):\n s = s + (\" \" + str(i+1))\n\n if (prime2 (s,num)):\n return True\n \ndef prime2 (s,n):\n if (s.count(\" 1 \" + str(n)) == 1):\n return True\n else:\n return False\n\n\n \ndef primenumbers ():\n userin = int(input(\"Enter a positive integer: \"))\n for i in range (userin):\n if (factors2 (i)):\n print (i,end= \" \")\n","sub_path":"Labs/Lab 4 Work/pex5.py","file_name":"pex5.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"461066626","text":"import logging\nimport sys\n\nclass Logger(object):\n\n LOGGER_NAME = \"Zurbrigg\"\n\n LEVEL_DEFAULT = logging.DEBUG\n PROPAGATE_DEFAULT = True\n\n _logger_obj = None\n\n\n @classmethod\n def logger_obj(cls):\n\n if not cls._logger_obj:\n if cls.logger_exists():\n cls._logger_obj = logging.getLogger(cls.LOGGER_NAME)\n else:\n cls._logger_obj = logging.getLogger(cls.LOGGER_NAME)\n\n cls._logger_obj.setLevel(cls.LEVEL_DEFAULT)\n cls._logger_obj.propagate = cls.PROPAGATE_DEFAULT\n\n fmt = logging.Formatter(\"[%(name)s][%(levelname)s] %(message)s\")\n\n stream_handler = logging.StreamHandler(sys.stderr)\n stream_handler.setFormatter(fmt)\n cls._logger_obj.addHandler(stream_handler)\n\n return cls._logger_obj\n\n @classmethod\n def logger_exists(cls):\n return cls.LOGGER_NAME in logging.Logger.manager.loggerDict.keys()\n\n @classmethod\n def set_level(cls, level):\n lg = cls.logger_obj()\n lg.setLevel(level)\n \n @classmethod\n def set_propagate(cls, propagate):\n lg = cls.logger_obj()\n lg.propagate = propagate\n\n @classmethod\n def debug(cls, msg, *args, **kwargs):\n lg = cls.logger_obj()\n lg.debug(msg, *args, **kwargs)\n\n @classmethod\n def info(cls, msg, *args, **kwargs):\n lg = cls.logger_obj()\n lg.info(msg, *args, **kwargs)\n\n @classmethod\n def warning(cls, msg, *args, **kwargs):\n lg = cls.logger_obj()\n lg.warning(msg, *args, **kwargs)\n\n @classmethod\n def error(cls, msg, *args, **kwargs):\n lg = cls.logger_obj()\n lg.error(msg, *args, **kwargs)\n\n @classmethod\n def critical(cls, msg, *args, **kwargs):\n lg = cls.logger_obj()\n lg.critical(msg, *args, **kwargs)\n\n @classmethod\n def log(cls, level, msg, *args, **kwargs):\n lg = cls.logger_obj()\n lg.log(level, msg, *args, **kwargs)\n\n @classmethod\n def exception(cls, msg, *args, **kwargs):\n lg = cls.logger_obj()\n lg.exception(msg, *args, **kwargs)\n\n @classmethod\n def write_to_file(cls, path, level=logging.WARNING):\n file_handler = logging.FileHandler(path)\n file_handler.setLevel(level)\n\n fmt = logging.Formatter(\"[%(asctime)s][%(levelname)s] %(message)s\")\n file_handler.setFormatter(fmt)\n\n lg = cls.logger_obj()\n lg.addHandler(file_handler)\n\n\nif __name__ == \"__main__\":\n \n Logger.set_propagate(False)\n\n Logger.debug(\"debug message\")\n Logger.info(\"info message\")\n Logger.warning(\"warning message\")\n Logger.error(\"error message\")\n Logger.critical(\"critical message\")\n\n\n\n\n\n\n\n","sub_path":"090_logging/examples/nuke/32-python_in_production-logging_in_maya/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"283042860","text":"# Copyright (c) Nanjing University, Vision Lab.\n# Last update: 2019.09.17\n\nimport numpy as np\nimport os\n\n\ndef load_ply_data(filename):\n '''\n load data from ply file.\n '''\n\n f = open(filename)\n #1.read all points\n points = []\n for line in f:\n #only x,y,z\n wordslist = line.split(' ')\n try:\n x, y, z = float(wordslist[0]),float(wordslist[1]),float(wordslist[2])\n except ValueError:\n continue\n points.append([x,y,z])\n points = np.array(points)\n points = points.astype(np.int32)#np.uint8\n # print(filename,'\\n','length:',points.shape)\n f.close()\n\n return points\n\ndef write_ply_data(filename, points):\n '''\n write data to ply file.\n '''\n if os.path.exists(filename):\n os.system('rm '+filename)\n f = open(filename,'a+')\n #print('data.shape:',data.shape)\n f.writelines(['ply\\n','format ascii 1.0\\n'])\n f.write('element vertex '+str(points.shape[0])+'\\n')\n f.writelines(['property float x\\n','property float y\\n','property float z\\n'])\n f.write('end_header\\n')\n for _, point in enumerate(points):\n f.writelines([str(point[0]), ' ', str(point[1]), ' ',str(point[2]), '\\n'])\n f.close() \n\n return\n\ndef load_points(filename, cube_size=64, min_num=20):\n \"\"\"Load point cloud & split to cubes.\n \n Args: point cloud file; voxel size; minimun number of points in a cube.\n\n Return: cube positions & points in each cube.\n \"\"\"\n\n # load point clouds\n point_cloud = load_ply_data(filename)\n # partition point cloud to cubes.\n cubes = {}# {block start position, points in block}\n for _, point in enumerate(point_cloud):\n cube_index = tuple((point//cube_size).astype(\"int\"))\n local_point = point % cube_size\n if not cube_index in cubes.keys():\n cubes[cube_index] = local_point\n else:\n cubes[cube_index] = np.vstack((cubes[cube_index] ,local_point))\n # filter by minimum number.\n k_del = []\n for _, k in enumerate(cubes.keys()):\n if cubes[k].shape[0] < min_num:\n k_del.append(k)\n for _, k in enumerate(k_del):\n del cubes[k]\n # get points and cube positions.\n cube_positions = np.array(list(cubes.keys()))\n set_points = []\n # orderd\n step = cube_positions.max() + 1\n cube_positions_n = cube_positions[:,0:1] + cube_positions[:,1:2]*step + cube_positions[:,2:3]*step*step\n cube_positions_n = np.sort(cube_positions_n, axis=0)\n x = cube_positions_n % step\n y = (cube_positions_n // step) % step\n z = cube_positions_n // step // step\n cube_positions_orderd = np.concatenate((x,y,z), -1)\n for _, k in enumerate(cube_positions_orderd):\n set_points.append(cubes[tuple(k)].astype(\"int16\"))\n\n return set_points, cube_positions\n\ndef save_points(set_points, cube_positions, filename, cube_size=64):\n \"\"\"Combine & save points.\"\"\"\n\n # order cube positions.\n step = cube_positions.max() + 1\n cube_positions_n = cube_positions[:,0:1] + cube_positions[:,1:2]*step + cube_positions[:,2:3]*step*step\n cube_positions_n = np.sort(cube_positions_n, axis=0)\n x = cube_positions_n % step\n y = (cube_positions_n // step) % step\n z = cube_positions_n // step // step\n cube_positions_orderd = np.concatenate((x,y,z), -1)\n # combine points.\n point_cloud = []\n for k, v in zip(cube_positions_orderd, set_points):\n points = v + np.array(k) * cube_size\n point_cloud.append(points)\n point_cloud = np.concatenate(point_cloud).astype(\"int\")\n \n write_ply_data(filename, point_cloud)\n\n return\n\ndef points2voxels(set_points, cube_size):\n \"\"\"Transform points to voxels (binary occupancy map).\n Args: points list; cube size;\n\n Return: A tensor with shape [batch_size, cube_size, cube_size, cube_size, 1]\n \"\"\"\n\n voxels = []\n for _, points in enumerate(set_points):\n points = points.astype(\"int\")\n vol = np.zeros((cube_size,cube_size,cube_size))\n vol[points[:,0],points[:,1],points[:,2]] = 1.0\n vol = np.expand_dims(vol,-1) \n voxels.append(vol)\n voxels = np.array(voxels)\n\n return voxels\n\ndef voxels2points(voxels):\n \"\"\"extract points from each voxel.\"\"\"\n\n voxels = np.squeeze(np.uint8(voxels)) # 0 or 1\n set_points = []\n for _, vol in enumerate(voxels):\n points = np.array(np.where(vol>0)).transpose((1,0))\n set_points.append(points)\n \n return set_points\n\n\nif __name__=='__main__':\n name = '../testdata/8iVFB/loot_vox10_1200.ply'\n name_rec = 'rec.ply'\n set_points, cube_positions = load_points(name, cube_size=64, min_num=20)\n voxels = points2voxels(set_points, cube_size=64)\n print('voxels:',voxels.shape)\n points_rec = voxels2points(voxels)\n save_points(points_rec, cube_positions, name_rec, cube_size=64)\n os.system(\"../myutils/pc_error_d\" \\\n + ' -a ' + name + ' -b ' + name_rec + \" -r 1023\")\n","sub_path":"dataprocess/inout_points.py","file_name":"inout_points.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"603468411","text":"import bson\nimport datetime\n\nfrom .. import config\nfrom ..auth import containerauth, always_ok, require_privilege, Privilege\nfrom ..dao import containerstorage, containerutil, noop\nfrom ..validators import verify_payload_exists\n\nfrom .containerhandler import ContainerHandler\n\nlog = config.log\n\n\nclass CollectionsHandler(ContainerHandler):\n # pylint: disable=arguments-differ\n\n container_handler_configurations = ContainerHandler.container_handler_configurations\n\n container_handler_configurations['collections'] = {\n 'permchecker': containerauth.collection_permissions,\n 'storage': containerstorage.CollectionStorage(),\n 'storage_schema_file': 'collection.json',\n 'payload_schema_file': 'collection.json'\n }\n\n def __init__(self, request=None, response=None):\n super(CollectionsHandler, self).__init__(request, response)\n self.config = self.container_handler_configurations['collections']\n self.storage = self.container_handler_configurations['collections']['storage']\n\n def get(self, **kwargs):\n return super(CollectionsHandler, self).get('collections', **kwargs)\n\n @require_privilege(Privilege.is_user)\n def post(self):\n mongo_validator, payload_validator = self._get_validators()\n\n payload = self.request.json_body\n payload_validator(payload, 'POST')\n payload['permissions'] = [{\n '_id': self.uid,\n 'access': 'admin'\n }]\n payload['curator'] = self.uid\n payload['created'] = payload['modified'] = datetime.datetime.utcnow()\n result = mongo_validator(self.storage.exec_op)('POST', payload=payload)\n\n if result.acknowledged:\n return {'_id': result.inserted_id}\n else:\n self.abort(404, 'Element not added in collection {}'.format(self.uid))\n\n @verify_payload_exists\n def put(self, **kwargs):\n _id = kwargs.pop('cid')\n container = self._get_container(_id)\n mongo_validator, payload_validator = self._get_validators()\n\n payload = self.request.json_body or {}\n if not payload:\n self.abort(400, 'PUT request body cannot be empty')\n contents = payload.pop('contents', None)\n payload_validator(payload, 'PUT')\n permchecker = self._get_permchecker(container=container)\n payload['modified'] = datetime.datetime.utcnow()\n result = mongo_validator(permchecker(self.storage.exec_op))('PUT', _id=_id, payload=payload)\n\n if result.modified_count == 1:\n self._add_contents(contents, _id)\n return {'modified': result.modified_count}\n else:\n self.abort(404, 'Element not updated in collection {} {}'.format(self.storage.cont_name, _id))\n\n def _add_contents(self, contents, _id):\n if not contents:\n return\n acq_ids = []\n for item in contents['nodes']:\n if not bson.ObjectId.is_valid(item.get('_id')):\n self.abort(400, 'not a valid object id')\n item_id = bson.ObjectId(item['_id'])\n if item['level'] == 'project':\n sess_ids = [s['_id'] for s in config.db.sessions.find({'project': item_id, 'deleted': {'$exists': False}}, [])]\n acq_ids += [a['_id'] for a in config.db.acquisitions.find({'session': {'$in': sess_ids}, 'deleted': {'$exists': False}}, [])]\n elif item['level'] == 'session':\n acq_ids += [a['_id'] for a in config.db.acquisitions.find({'session': item_id, 'deleted': {'$exists': False}}, [])]\n elif item['level'] == 'acquisition':\n acq_ids += [item_id]\n operator = '$addToSet' if contents['operation'] == 'add' else '$pull'\n if not bson.ObjectId.is_valid(_id):\n self.abort(400, 'not a valid object id')\n config.db.acquisitions.update_many({'_id': {'$in': acq_ids}}, {operator: {'collections': bson.ObjectId(_id)}})\n\n\n def delete(self, **kwargs):\n _id = bson.ObjectId(kwargs.pop('cid'))\n self.config = self.container_handler_configurations['collections']\n self.storage = self.config['storage']\n container = self._get_container(_id)\n container['has_children'] = container.get('files') or container.get('analyses')\n permchecker = self._get_permchecker(container, None)\n # This line exec the actual delete checking permissions using the decorator permchecker\n result = permchecker(self.storage.exec_op)('DELETE', _id)\n config.db.acquisitions.update_many({'collections': _id}, {'$pull': {'collections': _id}})\n\n if result.modified_count == 1:\n return {'deleted': 1}\n else:\n self.abort(404, 'Element not removed from container {} {}'.format(self.storage.cont_name, _id))\n\n def get_all(self):\n projection = self.get_list_projection('collections')\n if self.complete_list:\n permchecker = always_ok\n elif self.public_request:\n permchecker = containerauth.list_public_request\n else:\n permchecker = containerauth.list_permission_checker(self)\n query = {}\n page = permchecker(self.storage.exec_op)('GET', query=query, public=self.public_request, projection=projection, pagination=self.pagination)\n results = page['results']\n if not self.user_is_admin and not self.is_true('join_avatars'):\n self._filter_all_permissions(results, self.uid)\n if self.is_true('join_avatars'):\n self.storage.join_avatars(results)\n if self.is_true('stats'):\n for result in results:\n containerutil.get_collection_stats(result)\n return self.format_page(page)\n\n def curators(self):\n curator_ids = []\n for collection in self.get_all():\n if collection['curator'] not in curator_ids:\n curator_ids.append(collection['curator'])\n curators = config.db.users.find(\n {'_id': {'$in': curator_ids}},\n ['firstname', 'lastname']\n )\n return list(curators)\n\n def get_sessions(self, cid):\n \"\"\"Return the list of sessions in a collection.\"\"\"\n\n # Confirm user has access to collection\n container = self._get_container(cid)\n permchecker = self._get_permchecker(container=container)\n permchecker(noop)('GET', _id=cid)\n\n # Find list of relevant sessions\n agg_res = config.db.acquisitions.aggregate([\n {'$match': {'collections': bson.ObjectId(cid)}},\n {'$group': {'_id': '$session'}},\n ])\n query = {'_id': {'$in': [ar['_id'] for ar in agg_res]}}\n\n if not self.user_is_admin:\n query['permissions._id'] = self.uid\n\n projection = self.get_list_projection('sessions')\n\n page = containerstorage.SessionStorage().get_all_el(query=query, user=None, projection=projection, pagination=self.pagination)\n sessions = page['results']\n\n self._filter_all_permissions(sessions, self.uid)\n\n self.handle_origin(sessions)\n return self.format_page(page)\n\n def get_acquisitions(self, cid):\n \"\"\"Return the list of acquisitions in a collection.\"\"\"\n\n # Confirm user has access to collection\n container = self._get_container(cid)\n permchecker = self._get_permchecker(container=container)\n permchecker(noop)('GET', _id=cid)\n\n\n query = {'collections': bson.ObjectId(cid)}\n sid = self.get_param('session', '')\n if bson.ObjectId.is_valid(sid):\n query['session'] = bson.ObjectId(sid)\n elif sid != '':\n self.abort(400, sid + ' is not a valid ObjectId')\n\n if not self.user_is_admin:\n query['permissions._id'] = self.uid\n\n projection = self.get_list_projection('acquisitions')\n\n acquisitions = list(containerstorage.AcquisitionStorage().get_all_el(query, None, projection))\n\n self._filter_all_permissions(acquisitions, self.uid)\n\n self.handle_origin(acquisitions)\n return acquisitions\n\n def get_list_projection(self, container):\n \"\"\"Return the list_projection for container.\"\"\"\n cfg = self.container_handler_configurations[container]\n return cfg['storage'].get_list_projection()\n","sub_path":"api/handlers/collectionshandler.py","file_name":"collectionshandler.py","file_ext":"py","file_size_in_byte":8286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"363076541","text":"\n\nfrom selang import compile_to_se, compile_to_gen, ref, planet, star, orbit, ring\n\n\norbits = {\n 1: (\n (ring([ref('earth', earth_mass=0.6)] * 7 + [4]), orbit(0.5, eccentricity=0.9)),\n (star('M5V', 0.6), orbit(2, eccentricity=0.7, retrograde=True)),\n (2, orbit(semimajoraxis=1)),\n ),\n 2: (3, orbit(0.1)),\n 4: (5, orbit(0.005)),\n}\nobjects = {\n 1: ref('sun'),\n 2: ref('earth'),\n 3: ref('moon'),\n 4: ref('earth', earth_mass=1.4),\n 5: ref('earth', earth_mass=1.2),\n}\n\none, two = compile_to_se('custom system', orbits, objects,\n '~/games/space_engine/SpaceEngine/', overwrite=True)\n\n\nif isinstance(one, str):\n print(one, two)\nelse:\n print('\\n'.join(one))\n print('##################################################')\n print('\\n'.join(two))\n","sub_path":"data/complex_example.py","file_name":"complex_example.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"651309765","text":"from .base import *\n\n\nrequires = ['base']\n\n\ndef add_css(all):\n cssv = all.variables\n css = all.css\n #\n cssv.dialog.background = cssv.skins.base.default.background\n cssv.dialog.radius = cssv.body.radius\n cssv.dialog.line_height = px(24)\n cssv.dialog.padding = px(10)\n #\n cssv.modal.zindex = 1050\n cssv.modal.background = '#000'\n cssv.modal.opacity = 0.5\n\n # collapse\n css('.collapse',\n Transition('height', '0.35s', 'ease'),\n css('.in', height='auto'),\n position='relative',\n overflow='hidden',\n height=0)\n\n css('.header',\n css(' h3',\n line_height=cssv.dialog.line_height,\n margin=0),\n overflow='hidden')\n\n css('.body-wrap',\n position='relative',\n overflow='hidden')\n\n css('.body',\n position='relative',\n overflow_y='auto')\n\n dialog = css('.dialog',\n Skin(applyto=['border'], border_width=px(1)),\n Skin(' > .header', border_width=spacing(0, 0, px(1))),\n Radius(cssv.dialog.radius),\n Transition('opacity', '0.2s', 'ease'),\n css('.ready',\n css('.collapsed',\n css(' .header', Radius(cssv.dialog.radius)),\n # Remove border from header\n Skin(' .header',\n applyto=['border'],\n border_style='none'))),\n background=cssv.body.background)\n\n # control variant\n dialog.css('.control',\n Radius(0),\n Border(style='none'),\n Skin(gradient=False, prefix='control'),\n css(' > .header',\n Radius(0),\n Border(style='none'),\n padding=0),\n css(' > .body-wrap > .body',\n padding=0))\n\n # modal variant\n css('.modal-open',\n overflow='hidden')\n\n css('.dialog-modal',\n margin=spacing(px(30), 'auto'),\n position='relative')\n\n css('.modal',\n z_index=cssv.modal.zindex,\n overflow_x='auto',\n overflow_y='scroll',\n outline='none')\n\n css('.modal-backdrop',\n Opacity(cssv.modal.opacity),\n background=cssv.modal.background,\n z_index=cssv.modal.zindex-10)\n\n ## DIALOG\n dialog.css(' .header',\n Radius(spacing(cssv.dialog.radius, cssv.dialog.radius, 0, 0)),\n padding=spacing(0.6*cssv.dialog.padding, cssv.dialog.padding))\n\n dialog.css(' .body',\n Radius(spacing(0, 0, cssv.dialog.radius, cssv.dialog.radius)),\n background=cssv.dialog.background,\n padding=cssv.dialog.padding)\n","sub_path":"lux/extensions/ui/style/dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"589757995","text":"import random\r\n\r\nchances = 0\r\n\r\nnumber = random.randint(1, 10)\r\nprint(number)\r\n\r\nprint(\"Guess a number between 1 and 10:\")\r\nwhile chances < 5:\r\n guess = int(input(\"Enter your guess: \"))\r\n if guess == number:\r\n print(\"YOU'VE WON!\")\r\n chances = 7\r\n elif guess > 10 or guess < 0:\r\n print(\"You're number is not in the proper range.\")\r\n elif guess < number:\r\n print(\"Guess a number higher than \" + str(guess))\r\n chances = chances + 1\r\n elif guess > number:\r\n print(\"Guess a number lower than \" + str(guess))\r\n chances = chances + 1\r\n\r\nif chances == 5:\r\n print(\"YOU RAN OUT OF ATTEMPTS, YOU'VE LOST!\")\r\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"542515527","text":"def num_common_letters(goal_word, guess):\n total = 0\n goal_lst = get_list(goal_word)\n guess_lst = get_list(guess)\n singles = []\n if len(goal_lst) == 0:\n return 0\n else:\n for elem in guess_lst:\n if elem not in singles:\n singles += [elem]\n for elem in singles:\n if elem is goal_lst[0]:\n total += 1\n return total + num_common_letters(make_word_from_list(goal_lst[1:]), guess)\n","sub_path":"all_data/cs61a/untarred/89.py","file_name":"89.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"379791214","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nimport auxiliary_function as af\n\nmnist_X, mnist_label = af.load_dataset()\nW = af.calculate_eigenvalue(np.cov(mnist_X.T))\nPCA = np.dot(mnist_X, W)\nplt.title('PCA')\ncolor_k = ['g', 'b', 'r', 'y', 'c']\nfor i in range(5):\n indices = (mnist_label == (i+1))\n plt.plot(PCA[indices, 0], PCA[indices, 1], color_k[i]+'o', label=str(i+1))\nplt.legend()\nplt.show()","sub_path":"home task 7/home task 7 fixed/homework_7_1_pca.py","file_name":"homework_7_1_pca.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"28162202","text":"import socket\nimport json\nimport view\n\n\ndef client(host='localhost', port=8082, data=None):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Criando um socket TCP\n server_address = (host, port) # Conectar ao servidor\n # print(\"Conectando ao servidor {} pela porta {}\".format(server_address[0], server_address[1]))\n sock.connect(server_address)\n try: # Enviar dados ao servidor\n if data: # Verificar se Existe uma mensagem\n # converter dicionário para string\n message = json.dumps(data)\n # print(\"Enviando mensagem ao servidor: {}\".format(message))\n sock.sendall(message.encode('utf-8')) # Enviar mensagem para o servidor\n response = sock.recv(8000) # Receber a resposta do servidor\n response = json.loads(response) # Converter sring para dicionário\n # print(\"Mensagem recebida: {}\".format(response))\n return response\n else:\n print(\"Não tem dados para enviar ao servidor\")\n except socket.error as e:\n print(\"Socket error: %s\" % str(e))\n except Exception as e:\n print(\"Other exception: %s\" % str(e))\n finally:\n # print(\"Fechando a conexão com o servidor\")\n sock.close() # Encerrar a conexão do socket\n","sub_path":"Client/controller/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"182829829","text":"'''\nCreated on Mar 29, 2016\n\n@author: leegc\n'''\n\nfrom io import StringIO\nfrom engine.script_executor import ScriptExecutor\nfrom engine.objects import Result\nimport sys\nimport logging\nimport traceback\nimport time\nimport base64\n\n'''\nObject to handle running the user's Python script and return a boolean value.\n'''\n\nclass InsecureScriptRunner(ScriptExecutor):\n\n def __init__(self):\n #self.script = self._format_script(script)\n pass\n\n def _format_script(self, script):\n # script is in base64 in Mongo \n # decode base64, and then decode to UTF-8 to become a string (rather than bytes-like)\n decoded_script = self._decode_base_64_(script)\n formatted_script = \"\"\n imports = []\n lines = decoded_script.splitlines(True)\n # find all the import lines\n for line in lines:\n if line.startswith(\"from\") or line.startswith(\"import\"):\n logging.warning(\"Import detected: \" + line)\n imports.append(line)\n else:\n formatted_script = formatted_script + line\n \n # put the import lines at the top of the script\n for i in imports:\n formatted_script = i + decoded_script\n return formatted_script\n \n def execute(self, script, metrics_dictionary):\n script = self._format_script(script)\n field_profile = metrics_dictionary\n # pass in a local copy of the metrics_dictionary (seen as 'field_profile' to the user)\n local_script_vars = locals()\n execution_time = '0'\n # run it\n result = False\n error = False\n message = \"\"\n try:\n exec(script, local_script_vars)\n if (bool(local_script_vars['is_valid_interpretation'])):\n result = True\n else: \n result = False\n except AssertionError as err:\n logging.debug(\"User script failed an assertion. Returning false.\")\n result = False\n error = False\n message += \"\\nUser assertion failed in script.\"\n except SyntaxError as err:\n result = str(False)\n error = str(True)\n message += \"\\n\" + str(err.__class__.__name__) + \": \" + str(err.args[0]) + \" on line \" + str(err.lineno)\n except Exception as err:\n logging.debug(\"Invalid Python in user script. Returning false.\")\n logging.debug(\"Exception: \" + str(sys.exc_info()[0]))\n result = str(False)\n error = str(True)\n message += \"\\n\" + str(err.__class__.__name__) + \": \" + str(err.args[0]) + \" on line \" +\\\n str(traceback.extract_tb(sys.exc_info()[2])[-1][1])\n \n overall_result = {'result': bool(result), 'message': message, 'error': bool(error)}\n return Result(overall_result)\n \n def _decode_base_64_(self, base_64_encoded):\n return base64.b64decode(base_64_encoded).decode('UTF-8')","sub_path":"interpretation-engine/src/main/python/dev/development_trusted_runner.py","file_name":"development_trusted_runner.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"159772932","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n#\n# MIT License\n#\n# Copyright (c) 2017 Miha Purg \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n#\n#\n\n\nimport copy\nimport logging\nfrom collections import OrderedDict as ODict\n\nfrom Qpyl.common import __version__, raise_or_log\n\nlogger = logging.getLogger(__name__)\n\ndef _str_in(x, list_of_values):\n if str(x).lower() not in list_of_values:\n raise ValueError\n\n_logical = lambda x: _str_in(x, [\"on\", \"off\", \"1\", \"0\"])\n_integrator = lambda x: _str_in(x, [\"leap-frog\", \"velocity-verlet\"])\n_thermostat = lambda x: _str_in(x, [\"berendsen\", \"langevin\", \"nose-hoover\"])\n\nQ_PARAMETERS = ODict( [ (\"md\", ODict([(\"steps\", int), \n (\"random_seed\", int),\n (\"temperature\", float),\n (\"stepsize\", float),\n (\"bath_coupling\", float),\n (\"initial_temperature\", float),\n (\"separate_scaling\", _logical),\n (\"lrf\", _logical),\n (\"shake_solvent\", _logical),\n (\"shake_solute\", _logical),\n (\"shake_hydrogens\", _logical),\n (\"shake_heavy\", _logical),\n (\"shake_all_solvent\", _logical),\n (\"shake_all_solute\", _logical),\n (\"shake_all_hydrogens\", _logical),\n (\"shake_all_heavy\", _logical),\n (\"force_rms\", _logical),\n (\"integrator\", _integrator),\n (\"thermostat\", _thermostat),\n (\"langevin_random\", _logical),\n (\"langevin_friction\", float),\n (\"nhchains\", int),\n (\"nose-hoover_mass\", int),\n ])),\n (\"cut-offs\", ODict([(\"solute_solute\", float),\n (\"solvent_solvent\", float),\n (\"solute_solvent\", float),\n (\"q_atom\", float),\n (\"lrf\", float)\n ])),\n (\"sphere\", ODict([(\"centre\", str),\n (\"radius\", float),\n (\"shell_radius\", float),\n (\"shell_force\", float),\n (\"shell_force\", float),\n (\"excluded_force\", float),\n (\"excluded_freeze\", _logical),\n (\"exclude_bonded\", _logical)\n ])),\n (\"pbc\", ODict([(\"pressure_seed\", int),\n (\"rigid_box_centre\", _logical),\n (\"constant_pressure\", _logical),\n (\"max_volume_displ\", float),\n (\"pressure\", float),\n (\"atom_based_scaling\", _logical),\n (\"control_box\", str),\n (\"put_solvent_back_in_box\", _logical),\n (\"put_solute_back_in_box\", _logical)\n ])),\n (\"solvent\", ODict([(\"radius\", float),\n (\"centre\", str),\n (\"pack\", float),\n (\"radial_force\", float),\n (\"polarisation\", _logical),\n (\"charge_correction\", _logical),\n (\"polarisation_force\", float),\n (\"morse_depth\", float),\n (\"morse_width\", float),\n (\"model\", str)\n ])),\n (\"qcp\", ODict([(\"selection\", str),\n (\"qcp_size\", str),\n (\"qcp_kie\", _logical),\n (\"qcp_pdb\", str),\n (\"qcp_write\", _logical),\n (\"qcp_seed\", int)\n ])),\n (\"intervals\", ODict([(\"non_bond\", int),\n (\"output\", int),\n (\"temperature\", int),\n (\"energy\", int),\n (\"trajectory\", int),\n (\"volume_change\", int)\n ])),\n (\"files\", ODict([(\"topology\", str),\n (\"restart\", str),\n (\"final\", str),\n (\"trajectory\", str),\n (\"traj_input\", str),\n (\"energy\", str),\n (\"fep\", str),\n (\"restraint\", str),\n (\"water\", str)\n ])),\n (\"lambdas\", str),\n (\"group_contribution\", list),\n (\"atom_restraints\", list),\n (\"sequence_restraints\", list),\n (\"distance_restraints\", list),\n (\"angle_restraints\", list),\n (\"wall_restraints\", list)\n ])\n\nclass QDynInputError(Exception):\n pass\n\nclass QDynInput(object):\n \"\"\"Used for parsing, modifying and generating QDyn inputs.\n\n Args:\n input_string (str): string of a qdyn input file\n parameters (dict): { \"MD\": { \"steps\":10000,\n \"stepsize\":1.00, ... },\n ... }\n ignore_errors (boolean, optional): if set, write error messages to\n logger.warning instead of raising\n QDynInputError\n\n Usage:\n\n try:\n # load and parse\n inp = QDynInput( input_file_string )\n # or inp=QDynInput( parameters={ \"md\": ... } )\n\n # update with another input and save the overridden paramaters\n overridden_parms = inp.update( input_file_2_string )\n\n # update with a dictionary\n new_parameters = { \"md\": { \"steps\" : 100000 },\n { \"temperature\" : 50 } } )\n inp.update( new_parameters )\n\n # check the input\n inp.check()\n\n # get the input string\n new_inp_str = inp.get_string()\n except QDynInputError as e:\n print \"Problem with input file: \" + str(e)\n\n\n \"\"\"\n\n def __init__(self, input_string=\"\", parameters={}, ignore_errors=False):\n self._ignore_errors = ignore_errors\n self.parameters = {}\n self.update(input_string=input_string, parameters=parameters)\n\n\n def _parse_inp(self, input_string):\n # just extracts the keyword:value pairs into self.parameters,\n # no checking is done at this point\n\n if not input_string:\n return {}\n parms = {}\n qsection = \"\"\n for line in input_string.split(\"\\n\"):\n # remove comments and strip whitespaces.\n line = line.split(\"#\")[0].strip()\n line = line.split(\"!\")[0].strip()\n # empty lines are useless\n if line == \"\":\n continue\n # found a qsection\n if line[0] == \"[\":\n qsection = line.strip(\"[\").strip(\"]\").lower()\n if qsection in parms:\n raise QDynInputError(\"Section '{}' appears more than once\"\n \"\".format(qsection))\n if \"group_contribution\" in qsection or \"restraints\" in qsection:\n # make sure the restraints are cleared if\n # an empty section is defined\n parms[qsection] = []\n continue\n\n if not qsection:\n raise QDynInputError(\"Line '%s' not in any qsection\" % line)\n\n if \"group_contribution\" in qsection:\n parms[qsection].append(line)\n elif \"restraints\" in qsection:\n # prettify it\n rest = \" \".join([\"%-6s\" % x for x in line.split()])\n parms[qsection].append(rest)\n elif \"lambdas\" in qsection:\n parms[qsection] = line\n else:\n c = line.strip().split()\n key = c[0]\n try:\n value = \" \".join(c[1:])\n except IndexError:\n value = None # checking is done later in _check_parms\n if qsection not in parms.keys():\n parms[qsection] = {}\n parms[qsection][key] = value\n\n return parms\n\n\n def _check_parms(self, parms):\n # Checks if parameters are supported (typos and such)\n # and if they are of correct type.\n\n for qsection, qsec_parms in parms.iteritems():\n if qsection not in Q_PARAMETERS:\n raise_or_log(\"Unsupported section: '{}'\".format(qsection),\n QDynInputError, logger, self._ignore_errors)\n try:\n if isinstance(qsec_parms, dict):\n for key, value in qsec_parms.iteritems():\n exp_type = Q_PARAMETERS[qsection][key]\n exp_type(value)\n except KeyError:\n raise_or_log(\"Unknown keyword '{}' in section '{}'\"\n \"\".format(key, qsection),\n QDynInputError, logger, self._ignore_errors)\n except ValueError:\n raise_or_log(\"Bad value '{}' for parameter '{}' in section \"\n \"'{}'\".format(value, key, qsection),\n QDynInputError, logger, self._ignore_errors)\n\n\n def _update_dict(self, d1, d2):\n # Updates values in dictionary d1 with values in dictionary d2\n\n # contains parameters that were overwritten as tuples (old,new)\n overridden = {}\n\n for section, prms in d2.iteritems():\n if \"group_contribution\" in section \\\n or \"restraints\" in section \\\n or \"lambdas\" in section:\n if section in d1:\n overridden[section] = (d1[section], prms)\n d1[section] = prms\n else:\n if section not in d1:\n d1[section] = {}\n for keyword, prm in prms.iteritems():\n if keyword in d1[section]:\n if d1[section][keyword] != prm:\n tmpk = section + \"/\" + keyword\n overridden[tmpk] = (d1[section][keyword], prm)\n d1[section][keyword] = prm\n return overridden\n\n\n\n def update(self, input_string=None, parameters=None):\n \"\"\"Update/modify the parameters.\n\n Updates the parameters with either (or both) an input string or\n a parameter dictionary. Either argument works, parameters\n overwrites, no argument fails.\n \"\"\"\n\n parms = self._parse_inp(input_string)\n if parameters:\n self._update_dict(parms, copy.deepcopy(parameters))\n elif not input_string:\n raise ValueError(\"Function requires at least one argument\")\n\n overwritten = self._update_dict(self.parameters, parms)\n self._check_parms(self.parameters)\n return overwritten\n\n\n\n def check(self):\n \"\"\"Check for missing parameters.\n\n Raises QDynInputError if required parameters are missing.\n (is not all knowing, please don't rely to much on it)\n If 'ignore_errors' is set, it logs warnings instead.\n \"\"\"\n # check for nonsense or missing mandatory parameters\n mdp = self.parameters.get(\"md\", [])\n fp = self.parameters.get(\"files\", [])\n ip = self.parameters.get(\"intervals\", [])\n\n for keyword in (\"temperature\", \"steps\", \"stepsize\"):\n if keyword not in mdp:\n raise_or_log(\"Missing parameter '{}'\".format(keyword),\n QDynInputError, logger, self._ignore_errors)\n\n # fep file and lambdas require each other\n if (\"fep\" in fp and \"lambdas\" not in self.parameters) or \\\n (\"fep\" not in fp and \"lambdas\" in self.parameters):\n raise_or_log(\"Parameter 'fep' requires the 'lambdas' section \"\n \"and vice versa\", QDynInputError,\n logger, self._ignore_errors)\n\n # when generating new velocities, both parms need to be present\n if (\"initial_temperature\" in mdp and \"random_seed\" not in mdp) or \\\n (\"initial_temperature\" not in mdp and \"random_seed\" in mdp):\n raise_or_log(\"Parameter 'initial_temperature' requires \"\n \"'random_seed' and vice versa\",\n QDynInputError, logger, self._ignore_errors)\n\n # if a restart file is not defined, we have to generate new velocities\n if \"restart\" not in fp and \"initial_temperature\" not in mdp:\n raise_or_log(\"No restart file, please set 'initial_temperature' \"\n \"and 'random_seed' to generate velocities\",\n QDynInputError, logger, self._ignore_errors)\n\n # since energies are important let's not rely on default values in Q...\n # if an energy file is defined, energy interval must be defined\n if (\"energy\" not in fp and \"energy\" in ip) or \\\n (\"energy\" in fp and \"energy\" not in ip):\n raise_or_log(\"'energy' must be defined in both 'intervals' \"\n \"and 'files' sections\",\n QDynInputError, logger, self._ignore_errors)\n\n\n\n def get_string(self, check=True, sort=True):\n \"\"\"Returns the input as a string.\n\n Arguments:\n check (boolean, optional): if True (default), call self.check()\n sort (boolean, optional): if True (default), sort the sections and\n keywords according to the order in which\n they appear in Q_PARAMETERS\n \"\"\"\n\n if check:\n self.check()\n\n qsections = self.parameters.keys()\n if sort:\n qsections = sorted(qsections,\n key=lambda x: (Q_PARAMETERS.keys() + [x]).index(x))\n\n # generate the string such that all the sections and keywords\n s = []\n for qsection in qsections:\n s.append(\"[%s]\" % qsection)\n if \"group_contribution\" in qsection or \"restraints\" in qsection:\n s.extend(self.parameters[qsection])\n elif \"lambda\" in qsection:\n s.append(self.parameters[qsection])\n else:\n keywords = self.parameters[qsection].keys()\n\n if sort:\n qkeys = Q_PARAMETERS[qsection].keys() \n keywords = sorted(keywords,\n key=lambda x: (qkeys + [x]).index(x))\n\n for key in keywords:\n if key in self.parameters[qsection]:\n val = self.parameters[qsection][key]\n s.append(\"{:<20} {:>30}\".format(key, val))\n\n s.append(\"\")\n\n return \"\\n\".join(s)\n","sub_path":"packages/Qpyl/core/qdyninp.py","file_name":"qdyninp.py","file_ext":"py","file_size_in_byte":17453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"552799546","text":"\"\"\"\n\n All user related methods will be here\n Author: Haoyu Xu\n\n For every api access that needs user authentication, add fllowing\n\n UUID SPEC: \"username:;password:\"\n\n PARASER.add_argument('X-UUID', type=str, location='headers', help='User UUID')\n PARASER.add_argument('X-OTP', type=str, location='headers', help='User OTP')\n\n LIBUSER.check_otp(uuid, otp): check if a user logged in\n\n LIBUSER.is_admin(uuid): check if a user has admin permission\n\n OTP should expire in 30 mins\n\n\"\"\"\nimport json\nfrom flask_restful import Resource, reqparse, request\nfrom lib.message import response\nfrom lib.libuser import LibUser\nLIBUSER = LibUser()\nfrom lib.libconfig import LibConfig\nCONFIG = LibConfig().fetch()\n\nPARASER = reqparse.RequestParser()\n\nclass User(Resource):\n\n def __init__(self):\n self.uuid = None\n self.otp = None\n self.perm = False\n \n @response\n def get(self, uuid=None):\n \"\"\"\n\n This method provides user details\n\n Args:\n self: access global variables\n uuid: user uuid\n\n Returns:\n json: user details\n int: status code\n\n \"\"\"\n\n # check url\n urls = [\n \"/user\",\n \"/users\",\n \"/user/{uuid}\".format(uuid=uuid)\n ]\n path = request.path\n if path not in urls:\n return \"Incorrect HTTP Method\", 400\n \n if path.split(\"/\")[1] == \"user\" and uuid is None:\n PARASER.add_argument('X-UUID', type=str, location='headers', help='User UUID')\n PARASER.add_argument('X-OTP', type=str, location='headers', help='User OTP')\n args = PARASER.parse_args()\n self.uuid = args[\"X-UUID\"]\n self.otp = args[\"X-OTP\"]\n\n if self.__is_empty_or_none(self.uuid, self.otp) is False:\n if LIBUSER.check_otp(uuid=self.uuid, otp=self.otp) is False:\n user_details = LIBUSER.details(self.uuid)\n user_details.pop(\"password\")\n user_details.pop(\"otp\")\n user_details.pop(\"otp_time\")\n return user_details, 200\n else:\n return \"You are unauthorized\", 401\n else:\n return \"The request has unfulfilled fields\", 400\n elif path.split(\"/\")[1] == \"users\":\n PARASER.add_argument('X-UUID', type=str, location='headers', help='User UUID')\n PARASER.add_argument('X-OTP', type=str, location='headers', help='User OTP')\n args = PARASER.parse_args()\n self.uuid = args[\"X-UUID\"]\n self.otp = args[\"X-OTP\"]\n if self.__is_empty_or_none(self.uuid, self.otp) is False:\n if LIBUSER.check_otp(uuid=self.uuid, otp=self.otp) is False:\n if LIBUSER.is_admin(self.uuid):\n users = LIBUSER.get_all_users()\n if users is not None:\n for item in users:\n item.pop(\"password\")\n item.pop(\"otp\")\n item.pop(\"otp_time\")\n return users, 200\n else:\n return \"You don't have this permission\", 403\n else:\n return \"You are unauthorized\", 401\n else:\n return \"The request has unfulfilled fields\", 400\n elif path.split(\"/\")[2] == uuid:\n PARASER.add_argument('X-UUID', type=str, location='headers', help='User UUID')\n PARASER.add_argument('X-OTP', type=str, location='headers', help='User OTP')\n args = PARASER.parse_args()\n self.uuid = args[\"X-UUID\"]\n self.otp = args[\"X-OTP\"]\n if self.__is_empty_or_none(self.uuid, self.otp) is False:\n if LIBUSER.check_otp(uuid=self.uuid, otp=self.otp) is False:\n if LIBUSER.is_admin(self.uuid) is True:\n user_details = LIBUSER.details(uuid)\n if user_details is not None:\n user_details.pop(\"password\")\n user_details.pop(\"otp\")\n user_details.pop(\"otp_time\")\n return user_details, 200\n else:\n return \"User not found\", 404\n else:\n return \"You don't have this permission\", 403\n else:\n return \"You are unauthorized\", 401\n else:\n return \"The request has unfulfilled fields\", 400\n else:\n return \"\", 404\n\n @response\n def delete(self):\n \"\"\"\n\n This method provides user logout\n\n Args:\n self: access global variables\n\n Returns:\n string: logout status\n int: status code\n\n \"\"\"\n # check url\n urls = [\n \"/user/logout\",\n \"/user/delete\"\n ]\n path = request.path\n if path not in urls:\n return \"Incorrect HTTP Method\", 400\n \n if path.split(\"/\")[2] == \"logout\":\n PARASER.add_argument('X-UUID', type=str, location='headers', help='User UUID')\n PARASER.add_argument('X-OTP', type=str, location='headers', help='User OTP')\n args = PARASER.parse_args()\n self.uuid = args[\"X-UUID\"]\n self.otp = args[\"X-OTP\"]\n\n if self.__is_empty_or_none(self.uuid, self.otp) is False:\n if LIBUSER.check_otp(uuid=self.uuid, otp=self.otp) is False:\n is_logged_out = LIBUSER.otp_to_expire(self.uuid)\n if is_logged_out is True:\n return \"You are logged out\", 200\n else:\n return \"Unexpected behaviour\", 500 # should never reach this line\n else:\n return \"You are unauthorized\", 401\n else:\n return \"The request has unfulfilled fields\", 400\n elif path.split(\"/\")[2] == \"delete\":\n PARASER.add_argument('X-UUID', type=str, location='headers', help='User UUID')\n PARASER.add_argument('X-OTP', type=str, location='headers', help='User OTP')\n PARASER.add_argument('uuid', type=str, help='User UUID')\n args = PARASER.parse_args()\n self.uuid = args[\"X-UUID\"]\n self.otp = args[\"X-OTP\"]\n\n if self.__is_empty_or_none(self.uuid, self.otp) is False:\n if LIBUSER.check_otp(uuid=self.uuid, otp=self.otp) is False:\n if LIBUSER.is_admin(self.uuid):\n if self.__is_empty_or_none(args[\"uuid\"]) is False:\n self.uuid = args[\"uuid\"]\n else:\n return \"You don't have this permission\", 403\n \n is_deleted = LIBUSER.delete_user(self.uuid)\n if is_deleted is True:\n return \"User is deleted\", 200\n else:\n return \"User not found\", 404\n else:\n return \"You are unauthorized\", 401\n else:\n return \"The request has unfulfilled fields\", 400\n else:\n return \"\", 404\n\n @response\n def post(self):\n \"\"\"\n\n This method provides user login\n\n Args:\n self: access global variables\n\n Returns:\n string: user otp\n int: status code\n\n \"\"\"\n\n # check url\n urls = [\n \"/user/login\",\n \"/user/add\"\n ]\n path = request.path\n if path not in urls:\n return \"Incorrect HTTP Method\", 400\n\n if path.split(\"/\")[2] == \"login\":\n paraser = reqparse.RequestParser()\n paraser.add_argument('X-UUID', type=str, location='headers', help='User UUID')\n paraser.add_argument('X-PERM', type=bool, location='headers', help='Remember Me', default=False)\n args = paraser.parse_args()\n self.uuid = args[\"X-UUID\"]\n self.perm = args[\"X-PERM\"]\n\n if self.__is_empty_or_none(self.uuid, self.perm) is False:\n user_otp = LIBUSER.get_otp(uuid=self.uuid, permanent=self.perm)\n if user_otp is None:\n return \"Either username or password is incorrect\", 401\n else:\n return user_otp, 200\n else:\n return \"The request has unfulfilled fields\", 400\n elif path.split(\"/\")[2] == \"add\":\n PARASER.add_argument('X-UUID', type=str, location='headers', help='User UUID')\n PARASER.add_argument('X-OTP', type=str, location='headers', help='User OTP')\n PARASER.add_argument('fields', type=str, help='Fields to be updated')\n args = PARASER.parse_args()\n self.uuid = args[\"X-UUID\"]\n self.otp = args[\"X-OTP\"]\n fields = args[\"fields\"]\n if self.__is_empty_or_none(self.uuid, self.otp, fields) is False:\n if LIBUSER.check_otp(uuid=self.uuid, otp=self.otp) is False:\n if LIBUSER.is_admin(self.uuid):\n fields = json.loads(args[\"fields\"].replace(\"'\", '\"'))\n # \"username:;password:\"\n required_fields = [\n \"username\",\n \"password\",\n \"type\"\n ]\n if self.__verify_fields(expected=required_fields, actual=fields) is True:\n \n if self.__is_empty_or_none(fields[\"username\"], fields[\"password\"], fields[\"type\"]) is False:\n \n user = {\n \"username\": fields[\"username\"],\n \"password\": fields[\"password\"]\n }\n is_added = LIBUSER.add_user(user, fields[\"type\"])\n if is_added is True:\n return \"User is added\", 200\n else:\n return \"User either exists or unexpected error happened\", 403\n\n return \"The request has unfulfilled fields\", 400\n else:\n return \"You don't have this permission\", 403\n else:\n return \"You are unauthorized\", 401\n else:\n return \"The request has unfulfilled fields\", 400\n else:\n return \"\", 404\n\n @response\n def put(self):\n \"\"\"\n\n This method updates user\n\n Args:\n self: access global variables\n\n Returns:\n string: update status\n int: status code\n\n \"\"\"\n\n # check url\n urls = [\n \"/user/update\"\n ]\n path = request.path\n if path not in urls:\n return \"Incorrect HTTP Method\", 400\n\n if path.split(\"/\")[2] == \"update\":\n PARASER.add_argument('X-UUID', type=str, location='headers', help='User UUID')\n PARASER.add_argument('X-OTP', type=str, location='headers', help='User OTP')\n PARASER.add_argument('fields', type=str, help='Fields to be updated')\n args = PARASER.parse_args()\n self.uuid = args[\"X-UUID\"]\n self.otp = args[\"X-OTP\"]\n fields = args[\"fields\"]\n\n if self.__is_empty_or_none(self.uuid, self.otp, fields) is False:\n if LIBUSER.check_otp(uuid=self.uuid, otp=self.otp) is False:\n fields = json.loads(args[\"fields\"].replace(\"'\", '\"'))\n updated_fields = []\n failed_to_update_fields = []\n\n if \"uuid\" in fields or \"type\" in fields:\n if LIBUSER.is_admin(self.uuid):\n if \"uuid\" in fields:\n if self.__is_empty_or_none(fields[\"uuid\"]) is False and LIBUSER.is_exists(fields[\"uuid\"]) is True:\n self.uuid = fields[\"uuid\"]\n else:\n return \"Invalid UUID\", 400\n \n if \"type\" in fields:\n if self.__is_empty_or_none(fields[\"type\"]) is False:\n set = {\n \"name\": \"type\",\n \"value\": fields[\"type\"],\n \"skip\": False\n }\n is_updated = LIBUSER.update_user(self.uuid, set)\n\n if is_updated is True:\n updated_fields.append(\"Type\")\n else:\n failed_to_update_fields.append(\"Type\")\n \n else:\n return \"The request has unfulfilled fields\", 400\n else:\n return \"You don't have this permission\", 403\n\n if \"username\" in fields:\n if self.__is_empty_or_none(fields[\"username\"]) is False:\n set = {\n \"name\": \"username\",\n \"value\": fields[\"username\"],\n \"skip\": False\n }\n is_updated = LIBUSER.update_user(self.uuid, set)\n\n if is_updated is True:\n updated_fields.append(\"Username\")\n else:\n failed_to_update_fields.append(\"Username\")\n else:\n return \"The request has unfulfilled fields\", 400\n\n if \"password\" in fields:\n if self.__is_empty_or_none(fields[\"password\"]) is False:\n set = {\n \"name\": \"password\",\n \"value\": fields[\"password\"],\n \"skip\": False\n }\n is_updated = LIBUSER.update_user(self.uuid, set)\n\n if is_updated is True:\n updated_fields.append(\"Password\")\n else:\n failed_to_update_fields.append(\"Password\")\n else:\n return \"The request has unfulfilled fields\", 400\n \n if \"email\" in fields:\n if self.__is_empty_or_none(fields[\"email\"]) is False:\n set = {\n \"name\": \"email\",\n \"value\": fields[\"email\"],\n \"skip\": False\n }\n is_updated = LIBUSER.update_user(self.uuid, set)\n\n if is_updated is True:\n updated_fields.append(\"Email\")\n else:\n failed_to_update_fields.appen(\"Email\")\n else:\n return \"The request has unfulfilled fields\", 400\n\n # update user uuid\n user_details = LIBUSER.details(self.uuid)\n user = {\n \"username\": user_details[\"username\"],\n \"password\": user_details[\"password\"]\n }\n uuid_template = \"username:{username};password:{password}\".format(username=user[\"username\"], password=user[\"password\"])\n old_uuid = self.uuid\n self.uuid = LIBUSER.uuid(uuid_template)\n set = {\n \"name\": \"uuid\",\n \"value\": self.uuid,\n \"skip\": True\n }\n LIBUSER.update_user(old_uuid, set)\n\n if len(updated_fields) == 0:\n if len(failed_to_update_fields) == 1:\n return ((', '.join('{}'.format(key) for key in failed_to_update_fields)) + \" is not being updated\"), 400\n else:\n return ((', '.join('{}'.format(key) for key in failed_to_update_fields)) + \" are not being updated\"), 400\n else:\n if len(updated_fields) == 1:\n return ((', '.join('{}'.format(key) for key in updated_fields)) + \" has been updated\"), 200\n else:\n return ((', '.join('{}'.format(key) for key in updated_fields)) + \" have been updated\"), 200\n else:\n return \"You are unauthorized\", 401\n else:\n return \"The request has unfulfilled fields\", 400\n else:\n return \"\", 404\n\n def __is_empty_or_none(self, *argv):\n \"\"\"\n\n Check if there is a empty or None in the args\n\n Args:\n self: access global variables\n *argv: argument(s) to check if is None or \"\" or \" \" with spaces\n \n Returns:\n bool: True if exists, False otherwise\n\n \"\"\"\n is_exists = True\n\n for arg in argv:\n if arg is None:\n is_exists = True\n break\n elif str(arg).replace(\" \", \"\") == \"\":\n is_exists = True\n break\n else:\n is_exists = False\n \n return is_exists\n\n def __verify_fields(self, expected, actual):\n \"\"\"\n\n This method verify if input dict has required fields\n \n Args:\n self: accessing global parameters\n expected: expected fields\n actual: actual fields\n \n Returns:\n bool: same => True\n not the same => False\n\n \"\"\"\n flags = []\n flag = False\n for key, value in actual.items():\n if key in expected:\n flags.append(True)\n else:\n flags.append(False)\n\n if all(flags) and len(flags) == len(expected):\n flag = True\n\n return flag","sub_path":"route/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":19064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336748566","text":"import os\nfrom PIL import Image, ImageFilter\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.misc import logsumexp\nimport matplotlib.gridspec as gridspec\n\n# force all grayscale images to use consistent 0 -> 1 as min/max\nVMIN = -1.0\nVMAX = 1.0\n\ns = 2\ndigits = [\"check\", \"bar\"]\nclass_map = {0:\"check\", 1:\"bar\"}\nIMG_PATH = \"2by2/\"\nBASES = [1, 2, 3, 4]\nPROJ_IMG_PATHS = [ IMG_PATH + \"proj_test/p\" + str(i) + \".png\" for i in BASES ]\n\n# s = 3\n# digits = [0, 1, 4, 7]\n# class_map = {0:0, 1:1, 2:4, 3:7}\n# IMG_PATH = \"nums/\"\n# BASES = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n# PROJ_IMG_PATHS = [ IMG_PATH + \"proj_test/p\" + str(i) + \".png\" for i in BASES ]\n\nk = len(digits) # number of classes, 4\nCOUNT = 100\nNOISE_SCALE = 255\nDENOM = 255\n\nEXT = \".png\"\n\ndef generate_data(img_path):\n\tdata = np.empty((COUNT, k, s*s))\n\n\tj = 0\n\tfor digit in digits:\n\t\tprint(\"j is {}, digit is {}\".format(j,digit))\n\t\timg = np.array(Image.open(img_path + str(digit) + EXT).convert('L'))\n\t\tif not os.path.exists(img_path + str(digit)):\n\t\t\tos.makedirs(img_path + str(digit))\n\n\t\tw = img.shape[0]\n\t\th = img.shape[1]\n\n\t\tfor i in range(COUNT):\n\t\t\tintensity_noise = (np.random.rand(1) * 1.5) + 0.5 # scale from 0.5 to 2x what it was before\n\t\t\tbackground_noise = (np.random.rand(w,h) - 0.5) * NOISE_SCALE \n\t\t\t# noise = 0\n\t\t\tnoisy_img = (img * intensity_noise) + background_noise\n\t\t\trescaled = noisy_img / DENOM # force range 0 to 1\n\t\t\tdata[i,j] = rescaled.reshape((s*s))\n\t\t\tsave_noisy = Image.fromarray(noisy_img).convert('L')\n\t\t\tsave_noisy.save(img_path + str(digit) + \"/\" + str(digit) + \"_\" + str(i) + EXT)\n\n\t\tj += 1\n\n\treturn data\n\ndef train_test_split(train_ratio=0.7):\n\tdata = generate_data(IMG_PATH)\n\n\t# train test split\n\tr_ratio = train_ratio\n\tt_ratio = 1 - r_ratio\n\n\tr_index = int(r_ratio*COUNT)\n\n\ttrain_data = data[0:r_index,:,:]\n\ttest_data = data[r_index:COUNT,:,:]\n\n\treturn train_data, test_data\n\ndef open_img_flat(img_path):\n\treturn np.array(Image.open(img_path).convert('L')).reshape((s*s)) / DENOM\n\ndef open_img(img_path):\n\treturn np.array(Image.open(img_path).convert('L')) / DENOM\n\ndef remove_ticks(s_ax):\n\t# convenience fxn: remove tickmarks from subplot axes\n\ts_ax.tick_params( \n\t\twhich='both', # remov ticks and labels\n\t\tlabelbottom=False,\n\t\tlabelleft=False,\n\t\tlength=0.0)\n\ndef get_means(data):\n\tmeans = np.zeros((k, s*s))\n\n\tfor c in range(k):\n\t\tclass_subarray = data[:,c,:]\n\t\tmeans[c] = np.average(class_subarray, axis=0)\n\n\treturn means\n\ndef display_means(means):\n\t# Neat visual! \t\n\tfig, ax = plt.subplots(nrows=1, ncols=k)\n\tfig.suptitle(\"Means for each class\")\n\tfor c in range(k):\n\t\treshaped = np.asarray(np.reshape(means[c], (s, s)))\n\t\ts_ax = plt.subplot(1, k, c+1)\n\t\tremove_ticks(s_ax)\n\t\tplt.title(\"class \" + str(class_map[c]))\n\t\tplt.imshow(reshaped, cmap='gray_r')\n\tplt.show()\n\ndef compute_sigmas(data, means):\n\n\tcovariances = np.zeros((k, s*s, s*s))\n\n\t# for stability\n\tdiag = np.identity(s*s) * 0.01\n\t\n\t# # compute average of (point - mean for class)^2 for each class\n\tfor c in range(k):\n\t\tclass_subarray = data[:,c,:]\n\t\tresiduals = (class_subarray - means[c])\n\t\tsum_sq_residuals = np.dot(residuals.T, residuals)\n\t\tcovariances[c] = sum_sq_residuals / class_subarray.shape[0] + diag\n\t\t# covariances[c] = np.cov(class_subarray, rowvar=False) + diag\n\n\treturn covariances\n\ndef plot_eigenvectors(covariances):\n\tfig, ax = plt.subplots(nrows=1, ncols=k)\n\tfig.suptitle(\"Top Eigenvector for each class\")\n\tfor c in range(k):\n\n\t\teigenvalues, eigenvectors = np.linalg.eig(covariances[c])\n\t\tprint(eigenvalues)\n\t\tprint(\"covariances[c] shape: {}\".format(covariances[c].shape))\n\n\t\tbiggest_index = eigenvalues.argmax() # should be the first one, but just in case\n\t\tprint(\"biggest_index: {}\".format(biggest_index))\n\n\t\treshaped = np.asarray(np.reshape(eigenvectors[:,biggest_index], (s, s)))\n\t\ts_ax = plt.subplot(1, k, c+1)\n\t\tremove_ticks(s_ax)\n\n\t\tplt.title(\"class \" + str(class_map[c]) + \"\\n\" + r\"$\\lambda$\" + \" = \" + str(round(eigenvalues[c], 3)))\n\t\tplt.imshow(reshaped, cmap='gray_r')\n\tplt.show()\n\ndef generative_likelihood(digits, means, covariances):\n\n\tD = digits.shape[1]\n\tN = digits.shape[0]\n\n\tgen_loglik = np.zeros((k, N))\n\n\tfor c in range(k):\n\t\tresiduals = np.subtract(digits, means[c])\n\n\t\tsigma_i = np.linalg.inv(covariances[c])\n\n\t\tsq_mahalanobis = np.einsum('ij,ij->i', np.matmul(residuals, sigma_i), residuals)\n\t\t# Note that this einsum is element-wise dot product: more efficient way of doing matmul and taking the diagonal\n\n\t\tpi_bit = -0.5 * D *np.log(2 * np.pi)\n\n\t\tdet_bit = -0.5 * np.log(np.linalg.det(covariances[c]))\n\n\t\tmaha_bit = -0.5 * sq_mahalanobis\n\n\t\tgen_loglik[c] = pi_bit + det_bit + maha_bit\n\n\treturn gen_loglik\n\ndef conditional_likelihood(digits, means, covariances):\n\n\tgen_loglik = generative_likelihood(digits, means, covariances)\n\n\tp_y = 1.0/k # uniform prior\n\tp_xs = logsumexp(gen_loglik)\n\n\tcond_loglik = gen_loglik.T + np.log(p_y) - p_xs # log already applied to p_xs\n\t\n\treturn cond_loglik\n\ndef classify_data(digits, means, covariances):\n\t'''\n\tClassify new points by taking the most likely posterior class\n\t'''\n\tcond_likelihood = conditional_likelihood(digits, means, covariances)\n\t\n\tN = digits.shape[0]\n\n\tpred = cond_likelihood.argmax(axis=1)\n\tfloat_preds = np.array(pred).astype(float)\n\n\treturn float_preds\n\ndef do_basis_projections(input_img, covariances, mtx):\n\n\tcmap_setting = 'gray_r'\n\n\tn = k # number of classes\n\n\tfig, ax = plt.subplots(nrows=1, ncols=n+1) # +1 to display the original image as well\n\n\tfig.suptitle(\"Unit bases projected against covariance matrices\")\n\tfig.set_figheight(10)\n\tfig.set_figwidth(10)\n\n\ts_ax = plt.subplot(1, n+1, 1)\n\tremove_ticks(s_ax)\n\n\tplt.title(\"Original Unit Basis\")\n\t# plt.imshow(input_img.reshape((s,s)), cmap=cmap_setting)\n\tsq_proj = input_img.reshape((s,s))\n\ts_ax.matshow(sq_proj, cmap=cmap_setting)\n\tfor (i,j),z in np.ndenumerate(sq_proj):\n\t\ts_ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center', fontsize=8, color='red')\n\n\tfor c in range(k):\n\t# Apply cov matrix for each class, show result\n\t\tprojection = np.dot(covariances[c], input_img.reshape(s*s))\n\t\ts_ax = plt.subplot(1, n+1, c+2)\n\t\tremove_ticks(s_ax)\n\n\t\tplt.title(\"class \" + str(class_map[c]))\n\t\t# plt.imshow(projection.reshape((s,s)), cmap=cmap_setting)\n\t\tsq_proj = projection.reshape((s,s))\n\t\ts_ax.matshow(sq_proj, cmap=cmap_setting)\n\t\tfor (i,j),z in np.ndenumerate(sq_proj):\n\t\t\ts_ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center', fontsize=8, color='red')\n\n\tplt.show()\n\ndef plot_pca_grid(train_data, covariances):\n\tclasses = range(k)\n\tk_max = len(classes)\n\tdims = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\td_max = len(dims)\n\n\tfig, ax = plt.subplots(nrows=k_max, ncols=d_max)\n\tfig.suptitle(\"Projection and reconstruction onto N basis vectors\")\n\tfig.set_figheight(10)\n\tfig.set_figwidth(10)\n\n\tcount = 1\n\tfor c in classes:\n\t\tfor d in dims:\n\t\t\tmean_proj = project_onto_basis(count, train_data, \n\t\t\t\tcovariances, c, k_max, d, d_max)\n\t\t\treshaped = np.asarray(np.reshape(mean_proj, (s, s)))\n\t\t\ts_ax = plt.subplot(k_max, d_max, count)\n\t\t\tremove_ticks(s_ax)\n\t\t\tif c == 0:\n\t\t\t\t# put labels on first row\n\t\t\t\tplt.title(\"{}\".format(d))\n\t\t\tplt.imshow(reshaped, cmap='gray_r')\n\t\t\tcount += 1\n\n\tfig.tight_layout()\n\tplt.show()\n\ndef project_onto_basis(grid_count, train_data, covariances, c, k_max, d, d_max):\n\tmean_proj = np.zeros((1, s*s))\n\n\teigenvalues, _eigenvectors = np.linalg.eig(covariances[c])\n\teigenvectors = _eigenvectors.T\n\n\tin_order = eigenvalues.argsort()\n\tsorted_eigenvalues = np.flip(eigenvalues[in_order], axis=0)\n\tsorted_eigenvectors = np.flip(eigenvectors[in_order], axis=0)\n\n\t# get top d eigenvectors\n\tnew_basis = sorted_eigenvectors[:d] # each row is an eigenvector\n\t\n\t# note that this new_basis is orthonormal (orthogonal, all vectors normalized to unit length)\n\t# so new_basis inverse = new_basis transpose\n\n\tclass_subarray = train_data[:,c,:]\n\tprojection = np.dot(new_basis, class_subarray.T)\n\n\t# project back\n\treverse = np.dot(new_basis.T, projection)\n\n\t# average all projections\n\tmean_proj = np.average(reverse.T, axis=0)\n\n\treturn mean_proj\n\ndef evaluate_GDA(train_data, test_data, means, covariances):\n\t# Evaluate our Gaussian Discriminant Analysis classifier\n\n\ttrain_accuracy = 0.0\n\tfor i in range(k):\n\t\ttrain_preds = classify_data(train_data[:,i,:], means, covariances)\n\t\ttrain_accuracy += np.sum(np.equal(train_preds, i)) / train_data[:,i,:].shape[0]\n\tprint(\"train accuracy: {}\".format(train_accuracy / k))\n\n\ttest_accuracy = 0.0\n\tfor i in range(k):\n\t\ttest_preds = classify_data(test_data[:,i,:], means, covariances)\n\t\ttest_accuracy += np.sum(np.equal(test_preds, i)) / test_data[:,i,:].shape[0]\n\tprint(\"test accuracy: {}\".format(test_accuracy / k))\n\ndef main():\n\n\ttrain_data, test_data = train_test_split(train_ratio=0.7)\n\n\tmeans = get_means(train_data)\n\tdisplay_means(means)\n\n\tcovariances = compute_sigmas(train_data, means)\n\tplot_eigenvectors(covariances)\n\n\tfor path in PROJ_IMG_PATHS:\n\t \tinput_img = open_img_flat(path)\n\t \tmtx = open_img(path)\n\t \tprint(mtx)\n\t \tdo_basis_projections(input_img, covariances, mtx)\n\n\t# plot_pca_grid(train_data, covariances)\n\n\t# evaluate_GDA(train_data, test_data, means, covariances)\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n","sub_path":"training_data.py","file_name":"training_data.py","file_ext":"py","file_size_in_byte":9088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"397507263","text":"\"\"\"\nChallenge #25 Intermediate\n Write a program to do the following:\n input: a base ten (non-fractional) number at the command line\n output: the binary representation of that number.\n\"\"\"\n\n\ndef binary_of_num(num):\n return bin(num)\n\n\ndef recurse_bin(num):\n if num == 0:\n return '0'\n elif num == 1:\n return '1'\n elif num % 2 == 0:\n return recurse_bin(num // 2) + '0'\n else:\n return recurse_bin(num // 2) + '1'\n\nif __name__ == '__main__':\n print(recurse_bin(int(input('Num input: '))))\n\n","sub_path":"challenge_0025.py","file_name":"challenge_0025.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"125355555","text":"import functools\nimport logging\nimport re\n\nfrom sgsession import Session\n\n\ndef get_related_publishes(to_check, fields=()):\n \"\"\"Find all publishes which derive from the given ones.\n\n Looks in ``sg_source_publishes`` field of ``PublishEvent`` for any of\n the given publishes.\n\n :param list to_check: List of publish entities.\n :param list fields: Extra fields to fetch on derived publishes.\n :return: ``set`` of publish entities.\n\n \"\"\"\n\n if not isinstance(to_check, (list, tuple)):\n to_check = [to_check]\n\n assert all(x['type'] == 'PublishEvent' for x in to_check)\n\n sg = to_check[0].session\n fields = tuple(fields) + ('source_publishes', )\n\n seen = set()\n related = set()\n\n while True:\n\n # Filter out those we have already looked at.\n to_check = [x for x in to_check if x not in seen]\n seen.update(to_check)\n\n if not to_check:\n break\n\n checking = to_check\n to_check = []\n\n # Find all sources of these publishes.\n sg.fetch(checking, ['source_publishes'])\n for x in checking:\n to_check.extend(x['source_publishes'] or ())\n\n # Find any that these are the source of.\n to_check.extend(sg.find('PublishEvent', [('source_publishes', 'in', checking)], fields))\n\n related.update(to_check)\n\n return related\n\n\ndef _split_to_set(x):\n if isinstance(x, set):\n return x\n elif isinstance(x, basestring):\n return set(re.split(r'\\W+', x))\n elif x is None:\n return set()\n else:\n return set(x)\n\n\nclass RepublishEventPlugin(object):\n\n def __init__(self, **kwargs):\n\n self._funcs = []\n self._dispatcher_kwargs = kwargs\n kwargs.setdefault('callback_in_subprocess', False)\n\n if 'name' in kwargs:\n self.log = logging.getLogger(kwargs['name'])\n else:\n name = kwargs.setdefault('name', self.__class__.__name__)\n self.log = logging.getLogger('%s:%s' % (__name__, self.name))\n\n def register(self, src_types, dst_types, src_steps=None, func=None, args=None, kwargs=None):\n\n if func is None:\n return functools.partial(self.register, src_types, dst_types, src_steps,\n args=args, kwargs=kwargs,\n )\n\n src_types = _split_to_set(src_types)\n dst_types = _split_to_set(dst_types)\n src_steps = set(x.title() for x in _split_to_set(src_steps))\n \n if not dst_types:\n raise ValueError('must provide destination types for idempodence checks')\n\n self._funcs.append((src_types, dst_types, src_steps, func, args, kwargs))\n\n def __call__(self, dispatcher):\n dispatcher.register_callback(\n callback=self.handle_event,\n filter={\n 'event_type': 'Shotgun_PublishEvent_Change',\n 'attribute_name': 'sg_version',\n },\n **self._dispatcher_kwargs\n )\n\n def handle_event(self, event):\n\n # Must be setting it to a non-zero version.\n # NOTE: We MUST check the meta for this, otherwise we are liable to\n # schedule this job multiple times as the `entity` field is always\n # up to date.\n version = event.meta.get('new_value')\n if not version:\n self.log.debug('Publish is still being created; skipping')\n return\n\n # Make a clean one every time so that we don't slowly fill up memory.\n sg = Session()\n \n publish = sg.merge(event)['entity']\n if not publish:\n self.log.warning('Publish appears to have been deleted; skipping')\n return\n \n _, login, step_code, step_name, publish_type = publish.fetch((\n 'code',\n 'created_by.HumanUser.login',\n 'sg_link.Task.step.Step.code',\n 'sg_link.Task.step.Step.short_name',\n 'sg_type',\n ))\n steps = set((step_code.title(), step_name.title()))\n\n related = None\n\n for src_types, dst_types, src_steps, func, args, kwargs in self._funcs:\n\n # Make sure it is the right type.\n if publish_type not in src_types:\n self.log.debug('sg_type %r is not %s; skipping' % (publish_type, '/'.join(sorted(src_types))))\n continue\n\n # Make sure it is from the correct step.\n # We've title-cased all step names at this point, and are comparing\n # against both the step code and name, so this should be forgiving.\n if src_steps and not src_steps.intersection(steps):\n self.log.debug('step %s is not %s; skipping' % ('/'.join(sorted(steps)), '/'.join(sorted(src_steps))))\n continue\n\n # Make sure we haven't already derived it, or are in progress of\n # deriving it.\n if related is None:\n related = get_related_publishes(publish, fields=['code', 'sg_type'])\n skip = False\n for x in related:\n if x['sg_type'] in dst_types:\n self.log.warning('Derived %s publish %d \"%s\", already exists; skipping' % (\n x['sg_type'], x['id'], x['code'],\n ))\n skip = True\n if skip:\n continue\n\n # If it is a string, dispatch it to Qube.\n if isinstance(func, basestring):\n\n # Run it as the correct user; assume their Shotgun login matches.\n login = publish.get('created_by.HumanUser.login')\n user = login.split('@')[0] if login else None\n\n qube_args = [publish.minimal]\n qube_args.extend(args or ())\n\n qube_name = 'Republish %s %s \"%s\" as %s' % (\n publish['sg_type'], publish['id'], publish['code'],\n '/'.join(sorted(dst_types))\n )\n\n import qbfutures\n future = qbfutures.submit_ext(func,\n args=qube_args,\n kwargs=kwargs or {},\n name=qube_name,\n user=user,\n priority=8000,\n )\n\n self.log.info('Qube job %d: %s' % (future.job_id, qube_name))\n\n else:\n func(publish, *(args or ()), **(kwargs or {}))\n\n # Only run the first one!\n return\n\n\n","sub_path":"sgpublish/republishes.py","file_name":"republishes.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"578011702","text":"\"\"\"\nPart2Project -- utils.py\n\nCopyright Apr 2018 [Tudor Mihai Avram]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\njobsHandler = None\ncacheHandler = None\nfeatureExtractor = None\nmodel = None\n\nCLASSIFIABLE_NODES = [\n 'Process',\n 'File',\n 'Pipe'\n]\n\nUNCLASSIFIABLE_NODES = [\n 'Machine',\n 'Pipe'\n]\n\n","sub_path":"server/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"197465056","text":"import asyncio\nimport sys\nimport traceback\nimport inspect\n\nfrom imjoyUtils import Promise\nasync def task_worker(self, async_q, logger, abort=None):\n while True:\n if abort is not None and abort.is_set():\n break\n d = await async_q.get()\n try:\n if d is None:\n continue\n if d['type'] == 'getInterface':\n self._sendInterface()\n elif d['type'] == 'setInterface':\n self._setRemote(d['api'])\n self.emit({'type':'interfaceSetAsRemote'})\n if not self._init:\n self.emit({'type':'getInterface'})\n self._init = True\n elif d['type'] == 'interfaceSetAsRemote':\n #self.emit({'type':'getInterface'})\n self._remote_set = True\n elif d['type'] == 'execute':\n if not self._executed:\n try:\n type = d['code']['type']\n content = d['code']['content']\n exec(content, self._local)\n self._executed = True\n self.emit({'type':'executeSuccess'})\n except Exception as e:\n logger.info('error during execution: %s', traceback.format_exc())\n self.emit({'type':'executeFailure', 'error': repr(e)})\n elif d['type'] == 'method':\n if d['name'] in self._interface:\n if 'promise' in d:\n try:\n resolve, reject = self._unwrap(d['promise'], False)\n method = self._interface[d['name']]\n args = self._unwrap(d['args'], True)\n # args.append({'id': self.id})\n result = method(*args)\n if result is not None and inspect.isawaitable(result):\n result = await result\n resolve(result)\n except Exception as e:\n logger.error('error in method %s: %s', d['name'], traceback.format_exc())\n reject(e)\n else:\n try:\n method = self._interface[d['name']]\n args = self._unwrap(d['args'], True)\n # args.append({'id': self.id})\n result = method(*args)\n if result is not None and inspect.isawaitable(result):\n await result\n except Exception as e:\n logger.error('error in method %s: %s', d['name'], traceback.format_exc())\n else:\n raise Exception('method '+d['name'] +' is not found.')\n elif d['type'] == 'callback':\n if 'promise' in d:\n try:\n resolve, reject = self._unwrap(d['promise'], False)\n method = self._store.fetch(d['id'])[d['num']]\n args = self._unwrap(d['args'], True)\n # args.append({'id': self.id})\n result = method(*args)\n if result is not None and inspect.isawaitable(result):\n result = await result\n resolve(result)\n except Exception as e:\n logger.error('error in method %s: %s', d['id'], traceback.format_exc())\n reject(e)\n else:\n try:\n method = self._store.fetch(d['id'])[d['num']]\n args = self._unwrap(d['args'], True)\n # args.append({'id': self.id})\n result = method(*args)\n if result is not None and inspect.isawaitable(result):\n await reresultt\n except Exception as e:\n logger.error('error in method %s: %s', d['id'], traceback.format_exc())\n except Exception as e:\n print('error occured in the loop.', e)\n finally:\n sys.stdout.flush()\n async_q.task_done()\n\n\nclass FuturePromise(Promise, asyncio.Future):\n def __init__(self, pfunc, loop):\n self.loop = loop\n Promise.__init__(self, pfunc)\n asyncio.Future.__init__(self)\n\n def resolve(self, result):\n if self._resolve_handler or self._finally_handler:\n Promise.resolve(self, result)\n else:\n self.loop.call_soon(self.set_result, result)\n\n\n def reject(self, error):\n if self._catch_handler or self._finally_handler:\n Promise.reject(self, error)\n else:\n if error:\n self.loop.call_soon(self.set_exception, Exception())\n else:\n self.loop.call_soon(self.set_exception, Exception(str(error)))\n","sub_path":"imjoy/imjoyUtils3.py","file_name":"imjoyUtils3.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"111455254","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web', '0004_auto_20150630_1308'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='events',\n name='registered_players',\n field=models.ManyToManyField(null=True, related_name='regitered_players', blank=True, verbose_name='Regitered users', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='events',\n name='registered_teams',\n field=models.ManyToManyField(null=True, to='web.Teams', blank=True, verbose_name='Registered teams'),\n ),\n ]\n","sub_path":"web/migrations/0005_auto_20150630_1346.py","file_name":"0005_auto_20150630_1346.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"539910741","text":"# -*- coding: utf-8 -*-\n\"\"\"Create several entities by extrusion, check that the expected\nsub-entities are returned and the resulting mesh is correct.\n\"\"\"\nimport pygmsh\nimport numpy as np\n\n\ndef test():\n kernels = [pygmsh.built_in, pygmsh.opencascade]\n for kernel in kernels:\n geom = kernel.Geometry()\n p = geom.add_point([0, 0, 0], 1)\n p_top, _, _ = geom.extrude(p, translation_axis=[1, 0, 0])\n\n # The mesh should now contain exactly two points,\n # the second one should be where the translation pointed.\n points, _, _, _, _ = pygmsh.generate_mesh(geom)\n assert len(points) == 2\n assert np.array_equal(points[-1], [1, 0, 0])\n\n # Check that the top entity (a PointBase) can be extruded correctly\n # again.\n _, _, _ = geom.extrude(p_top, translation_axis=[1, 0, 0])\n points, _, _, _, _ = pygmsh.generate_mesh(geom)\n assert len(points) == 3\n assert np.array_equal(points[-1], [2, 0, 0])\n\n # Set up new geometry with one line.\n geom = kernel.Geometry()\n p1 = geom.add_point([0, 0, 0], 1)\n p2 = geom.add_point([1, 0, 0], 1)\n line = geom.add_line(p1, p2)\n\n l_top, _, _ = geom.extrude(line, [0, 1, 0])\n points, _, _, _, _ = pygmsh.generate_mesh(geom)\n assert len(points) == 5\n assert np.array_equal(points[-2], [1, 1, 0])\n\n # Check again for top entity (a LineBase).\n _, _, _ = geom.extrude(l_top, [0, 1, 0])\n points, _, _, _, _ = pygmsh.generate_mesh(geom)\n assert len(points) == 8\n assert np.array_equal(points[-3], [1, 2, 0])\n","sub_path":"test/test_extrusion_entities.py","file_name":"test_extrusion_entities.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"531545357","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/mdipierro/make_web2py/web2py/gluon/globals.py\n# Compiled at: 2013-10-14 11:16:25\n\"\"\"\nThis file is part of the web2py Web Framework\nCopyrighted by Massimo Di Pierro \nLicense: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nContains the classes for the global used variables:\n\n- Request\n- Response\n- Session\n\n\"\"\"\nfrom gluon.storage import Storage, List\nfrom gluon.streamer import streamer, stream_file_or_304_or_206, DEFAULT_CHUNK_SIZE\nfrom gluon.xmlrpc import handler\nfrom gluon.contenttype import contenttype\nfrom gluon.html import xmlescape, TABLE, TR, PRE, URL\nfrom gluon.http import HTTP, redirect\nfrom gluon.fileutils import up\nfrom gluon.serializers import json, custom_json\nimport gluon.settings as settings\nfrom gluon.utils import web2py_uuid, secure_dumps, secure_loads\nfrom gluon.settings import global_settings\nimport hashlib, portalocker, cPickle\nfrom pickle import Pickler, MARK, DICT, EMPTY_DICT\nfrom types import DictionaryType\nimport cStringIO, datetime, re, Cookie, os, sys, traceback, threading, cgi, copy, tempfile\nfrom gluon.cache import CacheInRam\nfrom gluon.fileutils import copystream\nFMT = '%a, %d-%b-%Y %H:%M:%S PST'\nPAST = 'Sat, 1-Jan-1971 00:00:00'\nFUTURE = 'Tue, 1-Dec-2999 23:59:59'\ntry:\n from gluon.contrib.minify import minify\n have_minify = True\nexcept ImportError:\n have_minify = False\n\ntry:\n import simplejson as sj\nexcept:\n try:\n import json as sj\n except:\n import gluon.contrib.simplejson as sj\n\nregex_session_id = re.compile('^([\\\\w\\\\-]+/)?[\\\\w\\\\-\\\\.]+$')\n__all__ = [\n 'Request', 'Response', 'Session']\ncurrent = threading.local()\ncss_template = ''\njs_template = ''\ncoffee_template = ''\ntypescript_template = ''\nless_template = ''\ncss_inline = ''\njs_inline = ''\n\nclass SortingPickler(Pickler):\n\n def save_dict(self, obj):\n self.write(EMPTY_DICT if self.bin else MARK + DICT)\n self.memoize(obj)\n self._batch_setitems([ (key, obj[key]) for key in sorted(obj) ])\n\n\nSortingPickler.dispatch = copy.copy(Pickler.dispatch)\nSortingPickler.dispatch[DictionaryType] = SortingPickler.save_dict\n\ndef sorting_dumps(obj, protocol=None):\n file = cStringIO.StringIO()\n SortingPickler(file, protocol).dump(obj)\n return file.getvalue()\n\n\ndef copystream_progress(request, chunk_size=10 ** 5):\n \"\"\"\n copies request.env.wsgi_input into request.body\n and stores progress upload status in cache_ram\n X-Progress-ID:length and X-Progress-ID:uploaded\n \"\"\"\n env = request.env\n if not env.get('CONTENT_LENGTH', None):\n return cStringIO.StringIO()\n else:\n source = env['wsgi.input']\n try:\n size = int(env['CONTENT_LENGTH'])\n except ValueError:\n raise HTTP(400, 'Invalid Content-Length header')\n\n try:\n dest = tempfile.NamedTemporaryFile()\n except NotImplementedError:\n dest = tempfile.TemporaryFile()\n\n if 'X-Progress-ID' not in request.get_vars:\n copystream(source, dest, size, chunk_size)\n return dest\n cache_key = 'X-Progress-ID:' + request.get_vars['X-Progress-ID']\n cache_ram = CacheInRam(request)\n cache_ram(cache_key + ':length', lambda : size, 0)\n cache_ram(cache_key + ':uploaded', lambda : 0, 0)\n while size > 0:\n if size < chunk_size:\n data = source.read(size)\n cache_ram.increment(cache_key + ':uploaded', size)\n else:\n data = source.read(chunk_size)\n cache_ram.increment(cache_key + ':uploaded', chunk_size)\n length = len(data)\n if length > size:\n data, length = data[:size], size\n size -= length\n if length == 0:\n break\n dest.write(data)\n if length < chunk_size:\n break\n\n dest.seek(0)\n cache_ram(cache_key + ':length', None)\n cache_ram(cache_key + ':uploaded', None)\n return dest\n\n\nclass Request(Storage):\n \"\"\"\n defines the request object and the default values of its members\n\n - env: environment variables, by gluon.main.wsgibase()\n - cookies\n - get_vars\n - post_vars\n - vars\n - folder\n - application\n - function\n - args\n - extension\n - now: datetime.datetime.today()\n - restful()\n \"\"\"\n\n def __init__(self, env):\n Storage.__init__(self)\n self.env = Storage(env)\n self.env.web2py_path = global_settings.applications_parent\n self.env.update(global_settings)\n self.cookies = Cookie.SimpleCookie()\n self._get_vars = None\n self._post_vars = None\n self._vars = None\n self._body = None\n self.folder = None\n self.application = None\n self.function = None\n self.args = List()\n self.extension = 'html'\n self.now = datetime.datetime.now()\n self.utcnow = datetime.datetime.utcnow()\n self.is_restful = False\n self.is_https = False\n self.is_local = False\n self.global_settings = settings.global_settings\n return\n\n def parse_get_vars(self):\n query_string = self.env.get('QUERY_STRING', '')\n dget = cgi.parse_qs(query_string, keep_blank_values=1)\n get_vars = self._get_vars = Storage(dget)\n for key, value in get_vars.iteritems():\n if isinstance(value, list) and len(value) == 1:\n get_vars[key] = value[0]\n\n def parse_post_vars(self):\n env = self.env\n post_vars = self._post_vars = Storage()\n body = self.body\n is_json = env.get('content_type', '')[:16] == 'application/json'\n if is_json:\n try:\n json_vars = sj.load(body)\n except:\n json_vars = {}\n\n if isinstance(json_vars, dict):\n post_vars.update(json_vars)\n body.seek(0)\n if body and not is_json and env.request_method in ('POST', 'PUT', 'DELETE',\n 'BOTH'):\n query_string = env.pop('QUERY_STRING', None)\n dpost = cgi.FieldStorage(fp=body, environ=env, keep_blank_values=1)\n try:\n post_vars.update(dpost)\n except:\n pass\n\n if query_string is not None:\n env['QUERY_STRING'] = query_string\n body.seek(0)\n\n def listify(a):\n return not isinstance(a, list) and [a] or a\n\n try:\n keys = sorted(dpost)\n except TypeError:\n keys = []\n\n for key in keys:\n if key is None:\n continue\n dpk = dpost[key]\n pvalue = listify([ _dpk if _dpk.filename else _dpk.value for _dpk in dpk ] if isinstance(dpk, list) else dpk if dpk.filename else dpk.value)\n if len(pvalue):\n post_vars[key] = len(pvalue) > 1 and pvalue or pvalue[0]\n\n return\n\n @property\n def body(self):\n if self._body is None:\n try:\n self._body = copystream_progress(self)\n except IOError:\n raise HTTP(400, 'Bad Request - HTTP body is incomplete')\n\n return self._body\n\n def parse_all_vars(self):\n self._vars = copy.copy(self.get_vars)\n for key, value in self.post_vars.iteritems():\n if key not in self._vars:\n self._vars[key] = value\n else:\n if not isinstance(self._vars[key], list):\n self._vars[key] = [\n self._vars[key]]\n self._vars[key] += value if isinstance(value, list) else [value]\n\n @property\n def get_vars(self):\n \"\"\"lazily parse the query string into get_vars\"\"\"\n if self._get_vars is None:\n self.parse_get_vars()\n return self._get_vars\n\n @property\n def post_vars(self):\n \"\"\"lazily parse the body into post_vars\"\"\"\n if self._post_vars is None:\n self.parse_post_vars()\n return self._post_vars\n\n @property\n def vars(self):\n \"\"\"lazily parse all get_vars and post_vars to fill vars\"\"\"\n if self._vars is None:\n self.parse_all_vars()\n return self._vars\n\n def compute_uuid(self):\n self.uuid = '%s/%s.%s.%s' % (\n self.application,\n self.client.replace(':', '_'),\n self.now.strftime('%Y-%m-%d.%H-%M-%S'),\n web2py_uuid())\n return self.uuid\n\n def user_agent(self):\n from gluon.contrib import user_agent_parser\n session = current.session\n user_agent = session._user_agent or user_agent_parser.detect(self.env.http_user_agent)\n if session:\n session._user_agent = user_agent\n user_agent = Storage(user_agent)\n for key, value in user_agent.items():\n if isinstance(value, dict):\n user_agent[key] = Storage(value)\n\n return user_agent\n\n def requires_https(self):\n \"\"\"\n If request comes in over HTTP, redirect it to HTTPS\n and secure the session.\n \"\"\"\n cmd_opts = global_settings.cmd_options\n if cmd_opts and (cmd_opts.shell or cmd_opts.scheduler) or global_settings.cronjob or self.is_https:\n current.session.secure()\n else:\n current.session.forget()\n redirect(URL(scheme='https', args=self.args, vars=self.vars))\n\n def restful(self):\n\n def wrapper(action, self=self):\n\n def f(_action=action, _self=self, *a, **b):\n self.is_restful = True\n method = _self.env.request_method\n if len(_self.args) and '.' in _self.args[(-1)]:\n _self.args[-1], _, self.extension = self.args[(-1)].rpartition('.')\n current.response.headers['Content-Type'] = contenttype('.' + _self.extension.lower())\n rest_action = _action().get(method, None)\n if not (rest_action and method == method.upper() and callable(rest_action)):\n raise HTTP(400, 'method not supported')\n try:\n return rest_action(*_self.args, **getattr(_self, 'vars', {}))\n except TypeError as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n if len(traceback.extract_tb(exc_traceback)) == 1:\n raise HTTP(400, 'invalid arguments')\n else:\n raise e\n\n return\n\n f.__doc__ = action.__doc__\n f.__name__ = action.__name__\n return f\n\n return wrapper\n\n\nclass Response(Storage):\n \"\"\"\n defines the response object and the default values of its members\n response.write( ) can be used to write in the output html\n \"\"\"\n\n def __init__(self):\n Storage.__init__(self)\n self.status = 200\n self.headers = dict()\n self.headers['X-Powered-By'] = 'web2py'\n self.body = cStringIO.StringIO()\n self.session_id = None\n self.cookies = Cookie.SimpleCookie()\n self.postprocessing = []\n self.flash = ''\n self.meta = Storage()\n self.menu = []\n self.files = []\n self.generic_patterns = []\n self.delimiters = ('{{', '}}')\n self._vars = None\n self._caller = lambda f: f()\n self._view_environment = None\n self._custom_commit = None\n self._custom_rollback = None\n return\n\n def write(self, data, escape=True):\n if not escape:\n self.body.write(str(data))\n else:\n self.body.write(xmlescape(data))\n\n def render(self, *a, **b):\n from compileapp import run_view_in\n if len(a) > 2:\n raise SyntaxError('Response.render can be called with two arguments, at most')\n elif len(a) == 2:\n view, self._vars = a[0], a[1]\n elif len(a) == 1 and isinstance(a[0], str):\n view, self._vars = a[0], {}\n elif len(a) == 1 and hasattr(a[0], 'read') and callable(a[0].read):\n view, self._vars = a[0], {}\n elif len(a) == 1 and isinstance(a[0], dict):\n view, self._vars = None, a[0]\n else:\n view, self._vars = None, {}\n self._vars.update(b)\n self._view_environment.update(self._vars)\n if view:\n import cStringIO\n obody, oview = self.body, self.view\n self.body, self.view = cStringIO.StringIO(), view\n run_view_in(self._view_environment)\n page = self.body.getvalue()\n self.body.close()\n self.body, self.view = obody, oview\n else:\n run_view_in(self._view_environment)\n page = self.body.getvalue()\n return page\n\n def include_meta(self):\n s = ('\\n').join('\\n' % (k, xmlescape(v)) for k, v in (self.meta or {}).iteritems())\n self.write(s, escape=False)\n\n def include_files(self, extensions=None):\n \"\"\"\n Caching method for writing out files.\n By default, caches in ram for 5 minutes. To change,\n response.cache_includes = (cache_method, time_expire).\n Example: (cache.disk, 60) # caches to disk for 1 minute.\n \"\"\"\n from gluon import URL\n files = []\n has_js = has_css = False\n for item in self.files:\n if extensions and item.split('.')[(-1)] not in extensions:\n continue\n if item in files:\n continue\n if item.endswith('.js'):\n has_js = True\n if item.endswith('.css'):\n has_css = True\n files.append(item)\n\n if have_minify and (self.optimize_css and has_css or self.optimize_js and has_js):\n key = hashlib.md5(repr(files)).hexdigest()\n cache = self.cache_includes or (current.cache.ram, 300)\n\n def call_minify(files=files):\n return minify.minify(files, URL('static', 'temp'), current.request.folder, self.optimize_css, self.optimize_js)\n\n if cache:\n cache_model, time_expire = cache\n files = cache_model('response.files.minified/' + key, call_minify, time_expire)\n else:\n files = call_minify()\n s = ''\n for item in files:\n if isinstance(item, str):\n f = item.lower().split('?')[0]\n if self.static_version:\n item = item.replace('/static/', '/static/_%s/' % self.static_version, 1)\n if f.endswith('.css'):\n s += css_template % item\n elif f.endswith('.js'):\n s += js_template % item\n elif f.endswith('.coffee'):\n s += coffee_template % item\n elif f.endswith('.ts'):\n s += typescript_template % item\n elif f.endswith('.less'):\n s += less_template % item\n elif isinstance(item, (list, tuple)):\n f = item[0]\n if f == 'css:inline':\n s += css_inline % item[1]\n elif f == 'js:inline':\n s += js_inline % item[1]\n\n self.write(s, escape=False)\n\n def stream(self, stream, chunk_size=DEFAULT_CHUNK_SIZE, request=None, attachment=False, filename=None):\n \"\"\"\n if a controller function::\n\n return response.stream(file, 100)\n\n the file content will be streamed at 100 bytes at the time\n\n Optional kwargs:\n (for custom stream calls)\n attachment=True # Send as attachment. Usually creates a\n # pop-up download window on browsers\n filename=None # The name for the attachment\n\n Note: for using the stream name (filename) with attachments\n the option must be explicitly set as function parameter(will\n default to the last request argument otherwise)\n \"\"\"\n headers = self.headers\n keys = [ item.lower() for item in headers ]\n if attachment:\n if filename is None:\n attname = ''\n else:\n attname = filename\n headers['Content-Disposition'] = 'attachment;filename=%s' % attname\n if not request:\n request = current.request\n if isinstance(stream, (str, unicode)):\n stream_file_or_304_or_206(stream, chunk_size=chunk_size, request=request, headers=headers, status=self.status)\n if hasattr(stream, 'name'):\n filename = stream.name\n if filename and 'content-type' not in keys:\n headers['Content-Type'] = contenttype(filename)\n if filename and 'content-length' not in keys:\n try:\n headers['Content-Length'] = os.path.getsize(filename)\n except OSError:\n pass\n\n env = request.env\n if request.is_https and isinstance(env.http_user_agent, str) and not re.search('Opera', env.http_user_agent) and re.search('MSIE [5-8][^0-9]', env.http_user_agent):\n headers['Pragma'] = 'cache'\n headers['Cache-Control'] = 'private'\n if request and env.web2py_use_wsgi_file_wrapper:\n wrapped = env.wsgi_file_wrapper(stream, chunk_size)\n else:\n wrapped = streamer(stream, chunk_size=chunk_size)\n return wrapped\n\n def download(self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True, download_filename=None):\n \"\"\"\n example of usage in controller::\n\n def download():\n return response.download(request, db)\n\n downloads from http://..../download/filename\n \"\"\"\n current.session.forget(current.response)\n if not request.args:\n raise HTTP(404)\n name = request.args[(-1)]\n items = re.compile('(?P.*?)\\\\.(?P.*?)\\\\..*').match(name)\n if not items:\n raise HTTP(404)\n t, f = items.group('table'), items.group('field')\n try:\n field = db[t][f]\n except AttributeError:\n raise HTTP(404)\n\n try:\n filename, stream = field.retrieve(name, nameonly=True)\n except IOError:\n raise HTTP(404)\n\n headers = self.headers\n headers['Content-Type'] = contenttype(name)\n if download_filename == None:\n download_filename = filename\n if attachment:\n headers['Content-Disposition'] = 'attachment; filename=\"%s\"' % download_filename.replace('\"', '\"')\n return self.stream(stream, chunk_size=chunk_size, request=request)\n\n def json(self, data, default=None):\n return json(data, default=default or custom_json)\n\n def xmlrpc(self, request, methods):\n \"\"\"\n assuming::\n\n def add(a, b):\n return a+b\n\n if a controller function \"func\"::\n\n return response.xmlrpc(request, [add])\n\n the controller will be able to handle xmlrpc requests for\n the add function. Example::\n\n import xmlrpclib\n connection = xmlrpclib.ServerProxy(\n 'http://hostname/app/contr/func')\n print connection.add(3, 4)\n\n \"\"\"\n return handler(request, self, methods)\n\n def toolbar(self):\n from html import DIV, SCRIPT, BEAUTIFY, TAG, URL, A\n BUTTON = TAG.button\n admin = URL('admin', 'default', 'design', extension='html', args=current.request.application)\n from gluon.dal import DAL\n dbstats = []\n dbtables = {}\n infos = DAL.get_instances()\n for k, v in infos.iteritems():\n dbstats.append(TABLE(*[ TR(PRE(row[0]), '%.2fms' % (row[1] * 1000)) for row in v['dbstats']\n ]))\n dbtables[k] = dict(defined=v['dbtables']['defined'] or '[no defined tables]', lazy=v['dbtables']['lazy'] or '[no lazy tables]')\n\n u = web2py_uuid()\n backtotop = A('Back to top', _href='#totop-%s' % u)\n request = copy.copy(current.request)\n request.update(vars=current.request.vars, get_vars=current.request.get_vars, post_vars=current.request.post_vars)\n return DIV(BUTTON('design', _onclick=\"document.location='%s'\" % admin), BUTTON('request', _onclick=\"jQuery('#request-%s').slideToggle()\" % u), BUTTON('response', _onclick=\"jQuery('#response-%s').slideToggle()\" % u), BUTTON('session', _onclick=\"jQuery('#session-%s').slideToggle()\" % u), BUTTON('db tables', _onclick=\"jQuery('#db-tables-%s').slideToggle()\" % u), BUTTON('db stats', _onclick=\"jQuery('#db-stats-%s').slideToggle()\" % u), DIV(BEAUTIFY(request), backtotop, _class='hidden', _id='request-%s' % u), DIV(BEAUTIFY(current.session), backtotop, _class='hidden', _id='session-%s' % u), DIV(BEAUTIFY(current.response), backtotop, _class='hidden', _id='response-%s' % u), DIV(BEAUTIFY(dbtables), backtotop, _class='hidden', _id='db-tables-%s' % u), DIV(BEAUTIFY(dbstats), backtotop, _class='hidden', _id='db-stats-%s' % u), SCRIPT(\"jQuery('.hidden').hide()\"), _id='totop-%s' % u)\n\n\nclass Session(Storage):\n \"\"\"\n defines the session object and the default values of its members (None)\n\n response.session_storage_type : 'file', 'db', or 'cookie'\n response.session_cookie_compression_level :\n response.session_cookie_expires : cookie expiration\n response.session_cookie_key : for encrypted sessions in cookies\n response.session_id : a number or None if no session\n response.session_id_name :\n response.session_locked :\n response.session_masterapp :\n response.session_new : a new session obj is being created\n response.session_hash : hash of the pickled loaded session\n response.session_pickled : picked session\n\n if session in cookie:\n\n response.session_data_name : name of the cookie for session data\n\n if session in db:\n\n response.session_db_record_id :\n response.session_db_table :\n response.session_db_unique_key :\n\n if session in file:\n\n response.session_file :\n response.session_filename :\n \"\"\"\n\n def connect(self, request=None, response=None, db=None, tablename='web2py_session', masterapp=None, migrate=True, separate=None, check_client=False, cookie_key=None, cookie_expires=None, compression_level=None):\n \"\"\"\n separate can be separate=lambda(session_name): session_name[-2:]\n and it is used to determine a session prefix.\n separate can be True and it is set to session_name[-2:]\n \"\"\"\n request = request or current.request\n response = response or current.response\n masterapp = masterapp or request.application\n cookies = request.cookies\n self._unlock(response)\n response.session_masterapp = masterapp\n response.session_id_name = 'session_id_%s' % masterapp.lower()\n response.session_data_name = 'session_data_%s' % masterapp.lower()\n response.session_cookie_expires = cookie_expires\n response.session_client = str(request.client).replace(':', '.')\n response.session_cookie_key = cookie_key\n response.session_cookie_compression_level = compression_level\n try:\n old_session_id = cookies[response.session_id_name].value\n except KeyError:\n old_session_id = None\n\n response.session_id = old_session_id\n if cookie_key:\n response.session_storage_type = 'cookie'\n elif db:\n response.session_storage_type = 'db'\n else:\n response.session_storage_type = 'file'\n if global_settings.db_sessions is True or masterapp in global_settings.db_sessions:\n return\n if response.session_storage_type == 'cookie':\n if response.session_data_name in cookies:\n session_cookie_data = cookies[response.session_data_name].value\n else:\n session_cookie_data = None\n if session_cookie_data:\n data = secure_loads(session_cookie_data, cookie_key, compression_level=compression_level)\n if data:\n self.update(data)\n response.session_id = True\n elif response.session_storage_type == 'file':\n response.session_new = False\n response.session_file = None\n if response.session_id:\n if not regex_session_id.match(response.session_id):\n response.session_id = None\n else:\n response.session_filename = os.path.join(up(request.folder), masterapp, 'sessions', response.session_id)\n try:\n response.session_file = open(response.session_filename, 'rb+')\n portalocker.lock(response.session_file, portalocker.LOCK_EX)\n response.session_locked = True\n self.update(cPickle.load(response.session_file))\n response.session_file.seek(0)\n oc = response.session_filename.split('/')[(-1)].split('-')[0]\n if check_client and response.session_client != oc:\n raise Exception('cookie attack')\n except:\n response.session_id = None\n\n if not response.session_id:\n uuid = web2py_uuid()\n response.session_id = '%s-%s' % (response.session_client, uuid)\n separate = separate and (lambda session_name: session_name[-2:])\n if separate:\n prefix = separate(response.session_id)\n response.session_id = '%s/%s' % (prefix, response.session_id)\n response.session_filename = os.path.join(up(request.folder), masterapp, 'sessions', response.session_id)\n response.session_new = True\n elif response.session_storage_type == 'db':\n if global_settings.db_sessions is not True:\n global_settings.db_sessions.add(masterapp)\n if response.session_file:\n self._close(response)\n if settings.global_settings.web2py_runtime_gae:\n request.tickets_db = db\n table_migrate = masterapp == request.application\n tname = tablename + '_' + masterapp\n table = db.get(tname, None)\n Field = db.Field\n if table is None:\n db.define_table(tname, Field('locked', 'boolean', default=False), Field('client_ip', length=64), Field('created_datetime', 'datetime', default=request.now), Field('modified_datetime', 'datetime'), Field('unique_key', length=64), Field('session_data', 'blob'), migrate=table_migrate)\n table = db[tname]\n response.session_db_table = table\n if response.session_id:\n try:\n record_id, unique_key = response.session_id.split(':')\n record_id = long(record_id)\n except (TypeError, ValueError):\n record_id = None\n\n if record_id:\n row = table(record_id)\n if row:\n session_data = cPickle.loads(row.session_data)\n self.update(session_data)\n else:\n record_id = None\n if record_id:\n response.session_id = '%s:%s' % (record_id, unique_key)\n response.session_db_unique_key = unique_key\n response.session_db_record_id = record_id\n else:\n response.session_id = None\n response.session_new = True\n else:\n response.session_new = True\n if isinstance(response.session_id, str):\n response.cookies[response.session_id_name] = response.session_id\n response.cookies[response.session_id_name]['path'] = '/'\n if cookie_expires:\n response.cookies[response.session_id_name]['expires'] = cookie_expires.strftime(FMT)\n session_pickled = cPickle.dumps(self)\n response.session_hash = hashlib.md5(session_pickled).hexdigest()\n if self.flash:\n response.flash, self.flash = self.flash, None\n return\n\n def renew(self, clear_session=False):\n if clear_session:\n self.clear()\n request = current.request\n response = current.response\n session = response.session\n masterapp = response.session_masterapp\n cookies = request.cookies\n if response.session_storage_type == 'cookie':\n return\n else:\n if response.session_storage_type == 'file':\n self._close(response)\n uuid = web2py_uuid()\n response.session_id = '%s-%s' % (response.session_client, uuid)\n separate = (lambda s: s[-2:]) if session and response.session_id[2:3] == '/' else None\n if separate:\n prefix = separate(response.session_id)\n response.session_id = '%s/%s' % (\n prefix, response.session_id)\n response.session_filename = os.path.join(up(request.folder), masterapp, 'sessions', response.session_id)\n response.session_new = True\n elif response.session_storage_type == 'db':\n table = response.session_db_table\n if response.session_file:\n self._close(response)\n if response.session_new:\n return\n if response.session_id is None:\n return\n record_id, sep, unique_key = response.session_id.partition(':')\n if record_id.isdigit() and long(record_id) > 0:\n new_unique_key = web2py_uuid()\n row = table(record_id)\n if row and row.unique_key == unique_key:\n table._db(table.id == record_id).update(unique_key=new_unique_key)\n else:\n record_id = None\n if record_id:\n response.session_id = '%s:%s' % (record_id, unique_key)\n response.session_db_record_id = record_id\n response.session_db_unique_key = new_unique_key\n else:\n response.session_new = True\n return\n\n def _fixup_before_save(self):\n response = current.response\n rcookies = response.cookies\n if self._forget and response.session_id_name in rcookies:\n del rcookies[response.session_id_name]\n elif self._secure and response.session_id_name in rcookies:\n rcookies[response.session_id_name]['secure'] = True\n\n def clear_session_cookies(sefl):\n request = current.request\n response = current.response\n session = response.session\n masterapp = response.session_masterapp\n cookies = request.cookies\n rcookies = response.cookies\n if response.session_data_name in cookies:\n rcookies[response.session_data_name] = 'expired'\n rcookies[response.session_data_name]['path'] = '/'\n rcookies[response.session_data_name]['expires'] = PAST\n if response.session_id_name in rcookies:\n del rcookies[response.session_id_name]\n\n def save_session_id_cookie(self):\n request = current.request\n response = current.response\n session = response.session\n masterapp = response.session_masterapp\n cookies = request.cookies\n rcookies = response.cookies\n if response.session_data_name in cookies:\n rcookies[response.session_data_name] = 'expired'\n rcookies[response.session_data_name]['path'] = '/'\n rcookies[response.session_data_name]['expires'] = PAST\n if response.session_id:\n rcookies[response.session_id_name] = response.session_id\n rcookies[response.session_id_name]['path'] = '/'\n expires = response.session_cookie_expires\n if isinstance(expires, datetime.datetime):\n expires = expires.strftime(FMT)\n if expires:\n rcookies[response.session_id_name]['expires'] = expires\n\n def clear(self):\n Storage.clear(self)\n\n def is_new(self):\n if self._start_timestamp:\n return False\n else:\n self._start_timestamp = datetime.datetime.today()\n return True\n\n def is_expired(self, seconds=3600):\n now = datetime.datetime.today()\n if not self._last_timestamp or self._last_timestamp + datetime.timedelta(seconds=seconds) > now:\n self._last_timestamp = now\n return False\n else:\n return True\n\n def secure(self):\n self._secure = True\n\n def forget(self, response=None):\n self._close(response)\n self._forget = True\n\n def _try_store_in_cookie(self, request, response):\n if self._forget or self._unchanged(response):\n self.save_session_id_cookie()\n return False\n else:\n name = response.session_data_name\n compression_level = response.session_cookie_compression_level\n value = secure_dumps(dict(self), response.session_cookie_key, compression_level=compression_level)\n rcookies = response.cookies\n rcookies.pop(name, None)\n rcookies[name] = value\n rcookies[name]['path'] = '/'\n expires = response.session_cookie_expires\n if isinstance(expires, datetime.datetime):\n expires = expires.strftime(FMT)\n if expires:\n rcookies[name]['expires'] = expires\n return True\n\n def _unchanged(self, response):\n session_pickled = cPickle.dumps(self)\n response.session_pickled = session_pickled\n session_hash = hashlib.md5(session_pickled).hexdigest()\n return response.session_hash == session_hash\n\n def _try_store_in_db(self, request, response):\n if not response.session_db_table or self._forget or self._unchanged(response) and not response.session_new:\n if not response.session_db_table and global_settings.db_sessions is not True and response.session_masterapp in global_settings.db_sessions:\n global_settings.db_sessions.remove(response.session_masterapp)\n self.save_session_id_cookie()\n return False\n else:\n table = response.session_db_table\n record_id = response.session_db_record_id\n if response.session_new:\n unique_key = web2py_uuid()\n else:\n unique_key = response.session_db_unique_key\n session_pickled = response.session_pickled or cPickle.dumps(self)\n dd = dict(locked=False, client_ip=response.session_client, modified_datetime=request.now, session_data=session_pickled, unique_key=unique_key)\n if record_id:\n if not table._db(table.id == record_id).update(**dd):\n record_id = None\n if not record_id:\n record_id = table.insert(**dd)\n response.session_id = '%s:%s' % (record_id, unique_key)\n response.session_db_unique_key = unique_key\n response.session_db_record_id = record_id\n self.save_session_id_cookie()\n return True\n\n def _try_store_in_cookie_or_file(self, request, response):\n if response.session_storage_type == 'file':\n return self._try_store_in_file(request, response)\n if response.session_storage_type == 'cookie':\n return self._try_store_in_cookie(request, response)\n\n def _try_store_in_file(self, request, response):\n try:\n if not response.session_id or self._forget or self._unchanged(response):\n self.save_session_id_cookie()\n return False\n if response.session_new or not response.session_file:\n session_folder = os.path.dirname(response.session_filename)\n if not os.path.exists(session_folder):\n os.mkdir(session_folder)\n response.session_file = open(response.session_filename, 'wb')\n portalocker.lock(response.session_file, portalocker.LOCK_EX)\n response.session_locked = True\n if response.session_file:\n session_pickled = response.session_pickled or cPickle.dumps(self)\n response.session_file.write(session_pickled)\n response.session_file.truncate()\n finally:\n self._close(response)\n\n self.save_session_id_cookie()\n return True\n\n def _unlock(self, response):\n if response and response.session_file and response.session_locked:\n try:\n portalocker.unlock(response.session_file)\n response.session_locked = False\n except:\n pass\n\n def _close(self, response):\n if response and response.session_file:\n self._unlock(response)\n try:\n response.session_file.close()\n del response.session_file\n except:\n pass","sub_path":"pycfiles/lback-0.8.1/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":38122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"628773863","text":"print(\"色の三原色はRed,Green,Blueです。\\n\")\n\n\n#\"3\" = \"yellow\"\n#\"5\" = \"magenta\"\n#\"6\" = \"cyan\"\n#\"7\" = \"white\"\nred = 1\ngreen = 2\nblue = 4\nnone = 0\n\ndef colorcomb(str1, str2, str3):\n\n ans = str(str1 + str2 + str3)\n if ans == \"3\":\n return \"yellow\"\n\n elif ans == \"5\":\n return \"magenta\"\n\n elif ans == \"6\":\n return \"cyan\"\n\n else:\n return \"white\"\n\n\nprint(\"blueとredを合わせると\" + colorcomb(blue,red,none) + \"です。\")\nprint(\"blueとgreenを合わせると\" + colorcomb(blue,green,none) + \"です。\")\nprint(\"redとgreenを合わせると\" + colorcomb(red,green,none) + \"です。\")\nprint(\"redとgreenとblueを合わせると\" + colorcomb(red,green,blue) + \"です。\")\n","sub_path":"color_comb.py","file_name":"color_comb.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"158140252","text":"from __future__ import print_function\nfrom builtins import range\n\nfrom binpacking.utilities import print_binsizes\n\nimport numpy as np\n\n\ndef fixed_count(d, N_bin, weight_pos=None, lower_bound=None, upper_bound=None):\n '''\n Distributes a list (or a dictionary or a tuple-list) of weights to a fixed number of\n bins, trying to keep a constant distribution of weight.\n INPUT:\n --- d: list containing weights,\n OR dictionary where each (key,value)-pair carries the weight as value,\n OR list of tuples where one entry in the tuple is the weight. The position of\n this weight has to be given in optional variable weight_pos\n \n optional:\n ~~~ weight_pos: int -- if d is a list of tuples, this integer number gives the position of the weight in a tuple\n ~~~ lower_bound: weights under this bound are not considered\n ~~~ upper_bound: weights exceeding this bound are not considered\n '''\n \n #define functions for the applying the bounds\n if lower_bound is not None and upper_bound is not None and lower_boundlower_bound,alower_bound)[0]\n elif upper_bound is not None:\n get_valid_weight_ndcs = lambda a: np.nonzero(a=upper_bound:\n raise Exception(\"lower_bound is greater or equal to upper_bound\")\n \n isdict = isinstance(d,dict)\n is_tuple_list = not isdict and hasattr(d[0],'__len__')\n \n if is_tuple_list:\n if weight_pos is not None:\n \n new_dict = { i: tup for i,tup in enumerate(d) }\n d = { i: tup[weight_pos] for i,tup in enumerate(d) }\n isdict = True\n else:\n raise Exception(\"no weight axis provided for tuple list\")\n \n \n if isdict:\n \n #get keys and values (weights)\n keys_vals = d.items()\n keys = np.array([ k for k,v in keys_vals ])\n vals = np.array([ v for k,v in keys_vals ])\n \n #sort weights decreasingly\n ndcs = np.argsort(vals)[::-1]\n \n weights = vals[ndcs]\n keys = keys[ndcs]\n \n bins = [ {} for i in range(N_bin) ]\n else:\n weights = np.sort(np.array(d))[::-1]\n bins = [ [] for i in range(N_bin) ]\n \n #find the valid indices\n valid_ndcs = get_valid_weight_ndcs(weights)\n weights = weights[valid_ndcs]\n \n if isdict:\n keys = keys[valid_ndcs]\n \n #the total volume is the sum of all weights\n V_total = weights.sum()\n \n #the first estimate of the maximum bin volume is\n #the total volume divided to all bins\n V_bin_max = V_total / float(N_bin)\n \n #prepare array containing the current weight of the bins\n weight_sum = np.zeros(N_bin)\n \n #iterate through the weight list, starting with heaviest\n for item,weight in enumerate(weights):\n \n if isdict:\n key = keys[item]\n \n #put next value in bin with lowest weight sum\n b = np.argmin(weight_sum)\n \n #calculate new weight of this bin\n new_weight_sum = weight_sum[b] + weight\n \n found_bin = False\n while not found_bin:\n \n #if this weight fits in the bin\n if new_weight_sum <= V_bin_max:\n \n #...put it in\n if isdict:\n bins[b][key] = weight\n else:\n bins[b].append(weight)\n \n #increase weight sum of the bin and continue with\n #next item\n weight_sum[b] = new_weight_sum\n found_bin = True\n \n else:\n #if not, increase the max volume by the sum of\n #the rest of the bins per bin\n V_bin_max += np.sum(weights[item:]) / float(N_bin)\n \n if not is_tuple_list:\n return bins\n else:\n new_bins = []\n for b in range(N_bin):\n new_bins.append([])\n for key in bins[b]:\n new_bins[b].append(new_dict[key])\n return new_bins\n","sub_path":"binpacking/fixed_count.py","file_name":"fixed_count.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"216281461","text":"# -*- coding: utf-8 -*-\n# @Author: Cheng JiangDong\n\n\nimport logging\nimport os.path\nimport time\n\n\nclass Logger:\n\n def __init__(self, logger):\n self.logger = logging.getLogger(logger)\n self.logger.setLevel(logging.DEBUG)\n\n # create a handler to write log file\n logTime = time.strftime(\"%Y%m%d%H%M\", time.localtime(time.time()))\n logPath = os.path.join(os.path.dirname(os.path.abspath('.')), r'logs')\n logName = logPath + logTime + r\".log\"\n fileHandler = logging.FileHandler(logName)\n fileHandler.setLevel(logging.INFO)\n\n # create another handler to send output to console\n consoleHandler = logging.StreamHandler()\n consoleHandler.setLevel(logging.INFO)\n\n # def handler output format\n fomatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fileHandler.setFormatter(fomatter)\n consoleHandler.setFormatter(fomatter)\n\n # add handler to logger\n self.logger.addHandler(fileHandler)\n self.logger.addHandler(consoleHandler)\n\n def getLog(self):\n return self.logger\n","sub_path":"Public/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"527811216","text":"##########################################\n### Rotational Equivariant Neural Nets ###\n##########################################\n\n\n\n################Rot_ResNet######################\nimport os\nimport torch\nimport dill\nfrom e2cnn import gspaces\nfrom e2cnn import nn\nimport numpy as np\nfrom torch.utils import data\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass Rot_Resblock(torch.nn.Module):\n def __init__(self, input_channels, hidden_dim, kernel_size, N):\n super(Rot_Resblock, self).__init__()\n \n r2_act = gspaces.Rot2dOnR2(N = N)\n feat_type_in = nn.FieldType(r2_act, input_channels*[r2_act.regular_repr])\n feat_type_hid = nn.FieldType(r2_act, hidden_dim*[r2_act.regular_repr])\n \n self.layer1 = nn.SequentialModule(\n nn.R2Conv(feat_type_in, feat_type_hid, kernel_size = kernel_size, padding = (kernel_size - 1)//2),\n nn.InnerBatchNorm(feat_type_hid),\n nn.ReLU(feat_type_hid)\n ) \n \n self.layer2 = nn.SequentialModule(\n nn.R2Conv(feat_type_hid, feat_type_hid, kernel_size = kernel_size, padding = (kernel_size - 1)//2),\n nn.InnerBatchNorm(feat_type_hid),\n \n ) \n self.relu = nn.ReLU(feat_type_hid)\n \n self.upscale = None\n if input_channels != hidden_dim:\n self.upscale = nn.SequentialModule(\n nn.R2Conv(feat_type_in, feat_type_hid, kernel_size = kernel_size, padding = (kernel_size - 1)//2),\n nn.InnerBatchNorm(feat_type_hid),\n nn.ReLU(feat_type_hid)\n ) \n \n def forward(self, xx):\n residual = xx\n out = self.layer1(xx)\n out = self.layer2(out)\n if self.upscale:\n out += self.upscale(residual)\n else:\n out += residual\n out = self.relu(out)\n \n return out\n\nclass ResNet(torch.nn.Module):\n def __init__(self, input_frames, kernel_size, N):\n super(ResNet, self).__init__()\n r2_act = gspaces.Rot2dOnR2(N = N)\n self.feat_type_in = nn.FieldType(r2_act, input_frames*[r2_act.trivial_repr])#irrep(1)\n self.feat_type_in_hid = nn.FieldType(r2_act, 16*[r2_act.regular_repr])\n self.feat_type_hid_out = nn.FieldType(r2_act, 256*[r2_act.regular_repr])\n self.feat_type_out = nn.FieldType(r2_act, [r2_act.trivial_repr])#irrep(1)\n \n self.input_layer = nn.SequentialModule(\n nn.R2Conv(self.feat_type_in, self.feat_type_in_hid, kernel_size = kernel_size, padding = (kernel_size - 1)//2),\n nn.InnerBatchNorm(self.feat_type_in_hid),\n nn.ReLU(self.feat_type_in_hid)\n )\n \n layers = [self.input_layer]\n layers += [Rot_Resblock(16, 32, kernel_size, N)]\n layers += [Rot_Resblock(32, 64, kernel_size, N)]\n layers += [Rot_Resblock(64, 128, kernel_size, N)]\n layers += [Rot_Resblock(128, 256, kernel_size, N)]\n layers += [nn.R2Conv(self.feat_type_hid_out, self.feat_type_out, kernel_size = kernel_size, padding = (kernel_size - 1)//2)]\n self.model = torch.nn.Sequential(*layers)\n \n \n def forward(self, xx):\n xx = nn.GeometricTensor(xx, self.feat_type_in)\n out = self.model(xx)\n return out.tensor\n \n\n \n##################Unet######################\n\nclass conv2d(torch.nn.Module):\n def __init__(self, input_channels, output_channels, kernel_size, stride, N, activation = True, deconv = False, last_deconv = False):\n super(conv2d, self).__init__() \n r2_act = gspaces.Rot2dOnR2(N = N)\n \n feat_type_in = nn.FieldType(r2_act, input_channels*[r2_act.regular_repr])\n feat_type_hid = nn.FieldType(r2_act, output_channels*[r2_act.regular_repr])\n if not deconv:\n if activation:\n self.layer = nn.SequentialModule(\n nn.R2Conv(feat_type_in, feat_type_hid, kernel_size = kernel_size, stride = stride, padding = (kernel_size - 1)//2),\n nn.InnerBatchNorm(feat_type_hid),\n nn.ReLU(feat_type_hid)\n ) \n else:\n self.layer = nn.R2Conv(feat_type_in, feat_type_hid, kernel_size = kernel_size, stride = stride,padding = (kernel_size - 1)//2)\n else:\n if last_deconv:\n feat_type_in = nn.FieldType(r2_act, input_channels*[r2_act.regular_repr])\n feat_type_hid = nn.FieldType(r2_act, output_channels*[r2_act.trivial_repr])#irrep(1)\n self.layer = nn.R2Conv(feat_type_in, feat_type_hid, kernel_size = kernel_size, stride = stride, padding = 0)\n else:\n self.layer = nn.SequentialModule(\n nn.R2Conv(feat_type_in, feat_type_hid, kernel_size = kernel_size, stride = stride, padding = 0),\n nn.InnerBatchNorm(feat_type_hid),\n nn.ReLU(feat_type_hid)\n ) \n \n def forward(self, xx):\n return self.layer(xx)\n \nclass deconv2d(torch.nn.Module):\n def __init__(self, input_channels, output_channels, N, last_deconv = False):\n super(deconv2d, self).__init__()\n self.conv2d = conv2d(input_channels = input_channels, output_channels = output_channels, kernel_size = 4, \n activation = True, stride = 1, N = N, deconv = True, last_deconv = last_deconv)\n r2_act = gspaces.Rot2dOnR2(N = N)\n self.feat_type = nn.FieldType(r2_act, input_channels*[r2_act.regular_repr])\n \n def pad(self, xx):\n new_xx = torch.zeros(xx.shape[0], xx.shape[1], xx.shape[2]*2 + 3, xx.shape[3]*2 + 3)\n new_xx[:,:,:-3,:-3][:,:,1::2,1::2] = xx\n new_xx = nn.GeometricTensor(new_xx, self.feat_type)\n return new_xx\n \n def forward(self, xx):\n out = self.pad(xx).to(device)\n return self.conv2d(out)\n \nclass U_net(torch.nn.Module):\n def __init__(self, input_frames, kernel_size, N):\n super(U_net, self).__init__()\n r2_act = gspaces.Rot2dOnR2(N = N)\n self.feat_type_in = nn.FieldType(r2_act, input_frames*[r2_act.trivial_repr])#irrep(1)\n self.feat_type_in_hid = nn.FieldType(r2_act, 32*[r2_act.regular_repr])\n self.feat_type_hid_out = nn.FieldType(r2_act, (16 + input_frames)*[r2_act.trivial_repr])#irrep(1)\n self.feat_type_out = nn.FieldType(r2_act, [r2_act.trivial_repr])#irrep(1)\n \n self.conv1 = nn.SequentialModule(\n nn.R2Conv(self.feat_type_in, self.feat_type_in_hid, kernel_size = kernel_size, stride = 2, padding = (kernel_size - 1)//2),\n nn.InnerBatchNorm(self.feat_type_in_hid),\n nn.ReLU(self.feat_type_in_hid)\n )\n self.conv1_1 = conv2d(32, 32, kernel_size = kernel_size, stride = 1, N = N)\n self.conv2 = conv2d(32, 64, kernel_size = kernel_size, stride = 1, N = N)\n #self.conv2_1 = conv2d(64, 64, kernel_size = kernel_size, stride = 1, N = N)\n self.conv3 = conv2d(64, 128, kernel_size = kernel_size, stride = 2, N = N)\n #self.conv3_1 = conv2d(128, 128, kernel_size = kernel_size, stride = 1, N = N)\n self.conv4 = conv2d(128, 256, kernel_size = kernel_size, stride = 2, N = N)\n #self.conv4_1 = conv2d(256, 256, kernel_size = kernel_size, stride = 1, N = N)\n\n self.deconv3 = deconv2d(256, 64, N)\n self.deconv2 = deconv2d(192, 32, N)\n self.deconv1 = deconv2d(96, 16, N, last_deconv = True)\n \n self.output_layer = nn.R2Conv(self.feat_type_hid_out, self.feat_type_out, kernel_size = kernel_size, padding = (kernel_size - 1)//2)\n \n\n def forward(self, x):\n \n x = nn.GeometricTensor(x, self.feat_type_in)\n out_conv1 = self.conv1_1(self.conv1(x))\n out_conv2 = self.conv2(out_conv1)#)self.conv2_1(\n out_conv3 = self.conv3(out_conv2)#)self.conv3_1(\n out_conv4 = self.conv4(out_conv3)#)self.conv4_1(\n\n out_deconv3 = self.deconv3(out_conv4.tensor)\n concat3 = torch.cat((out_conv3.tensor, out_deconv3.tensor), 1)\n out_deconv2 = self.deconv2(concat3)\n concat2 = torch.cat((out_conv2.tensor, out_deconv2.tensor), 1)\n out_deconv1 = self.deconv1(concat2)\n \n concat0 = torch.cat((x.tensor, out_deconv1.tensor), 1)\n concat0 = nn.GeometricTensor(concat0, self.feat_type_hid_out)\n out = self.output_layer(concat0)\n return out.tensor\n ","sub_path":"Models/Rotation-Equ/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"526364525","text":"#encoding: UTF-8\n#autor: Allan Sánchez Iparrazar\n#Dibuja un vector dado el ángulo\nfrom Graphics import *\n \n\ndef dibujarPlano(t): \n t.penDown()\n t.rotate(270)\n t.forward(800)\n t.penUp()\n t.moveTo(0,400)\n t.penDown()\n t.rotate(90)\n t.forward(800)\n t.penUp()\n \ndef dibujarVector(vector,magnitud,grados):\n \n vector.penDown()\n vector.rotate(grados)\n vector.forward(magnitud)\n \n \n\ndef main():\n v = Window(\"Plano\",800,800)\n t = Arrow((400,0),0)\n t.draw(v)\n vector = Arrow((400,400),0)\n vector.draw(v)\n dibujarPlano(t)\n magnitud = int(input(\"Ingresa la magnitud de tu vector\"))\n grados = int(input(\"Ingresa el ángulo que tendrá tu vector\"))\n dibujarVector(vector,magnitud,grados)\n \n \nmain()","sub_path":"graficandoVectores.py","file_name":"graficandoVectores.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"16683838","text":"# -*- coding:utf-8 -*-\n\n\n# Given an array of integers, return indices of the two numbers such that they add up to a specific target.\r\n#\n# You may assume that each input would have exactly one solution, and you may not use the same element twice.\r\n#\n# Example:\r\n#\n#\n# Given nums = [2, 7, 11, 15], target = 9,\r\n#\n# Because nums[0] + nums[1] = 2 + 7 = 9,\r\n# return [0, 1].\r\n#\n#\n#  \r\n#\n\n\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n \"\"\"\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n if nums[i] + nums[j] == target:\n return [i, j]\n else:\n j += 1\n i += 1\n \"\"\"\n \"\"\"\n return [(i, j) for i in range(len(nums)) for j in range(len(nums)) if i != j and nums[i] + nums[j] == target][0]\n \"\"\"\n result_map = {}\n for i in range(len(nums)):\n complement = target - nums[i]\n if complement in result_map.keys():\n return [result_map[complement], i]\n result_map[nums[i]] = i\n","sub_path":"001-two-sum/two-sum.py","file_name":"two-sum.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"289549445","text":"import euler\n\n\nANSWER = 249\n\n\ndef step(n):\n return n + int(str(n)[::-1])\n\n\ndef main():\n count = 0\n for i in range(1, 10000):\n for _ in range(50):\n i = step(i)\n if euler.is_palindrome(i):\n break\n else:\n count += 1\n return count\n\n\nif __name__ == '__main__':\n print(main())\n","sub_path":"python/problem55.py","file_name":"problem55.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"517120074","text":"import codecs\nimport csv\nimport gzip\nimport json\nimport re\n\nimport pytz\nfrom dateutil import parser\n\nimport boto3\nimport botocore\n\nBUCKET_NAME = 'openaq-data'\nFETCHES_BUCKET = 'openaq-fetches'\n\n\ndef get_object_list(bucket_name=BUCKET_NAME, prefix='/', client=None):\n if client is None:\n client = boto3.client('s3', config=botocore.client.Config(signature_version=botocore.UNSIGNED))\n result = client.list_objects_v2(Bucket=bucket_name, Prefix=prefix, Delimiter='/')\n ret = []\n prefixes = []\n if 'Contents' in result:\n ret += result['Contents']\n\n if 'CommonPrefixes' in result:\n prefixes += list(i['Prefix'] for i in result['CommonPrefixes'])\n\n while result['IsTruncated']:\n token = result['NextContinuationToken']\n result = client.list_objects_v2(Bucket=bucket_name, Prefix=prefix, Delimiter='/', ContinuationToken=token)\n if 'Contents' in result:\n ret += result['Contents']\n\n if 'CommonPrefixes' in result:\n prefixes += list(i['Prefix'] for i in result['CommonPrefixes'])\n\n return ret, prefixes\n\n\n# Usages:\n\ndef get_all_prefixes():\n # get all prefixes from fetches/realtime\n objects, prefixes = get_object_list(bucket_name=FETCHES_BUCKET, prefix='realtime/')\n\n\n# {'Key': 'realtime/2013-11-27/2013-11-27.ndjson',\n# 'LastModified': datetime.datetime(2017, 12, 28, 18, 9, 14, tzinfo=tzutc()),\n# 'ETag': '\"0214c4d59b099b041aa651c8221aa4f3\"',\n# 'Size': 10723,\n# 'StorageClass': 'STANDARD'}\ndef get_all_objects_list(prefixes):\n client = boto3.client('s3', config=botocore.client.Config(signature_version=botocore.UNSIGNED))\n # _, prefixes = get_object_list(bucket_name=FETCHES_BUCKET, prefix='realtime/', client=client)\n\n all_objects = []\n\n for prefix in prefixes:\n obj, pref2 = get_object_list(bucket_name=FETCHES_BUCKET, prefix=prefix['Prefix'], client=client)\n all_objects += obj\n\n return all_objects\n\n\ndef filter_objects(all_objects, start_date, end_date):\n utc = pytz.utc\n if start_date.tzinfo is None:\n start_date = utc.localize(start_date)\n\n if end_date.tzinfo is None:\n end_date = utc.localize(end_date)\n\n return (x for x in all_objects if start_date <= x['LastModified'] <= end_date)\n\n\ndef filter_prefixes(prefixes, start_date, end_date):\n patt = re.compile(r'.*/(\\d{4}-\\d{2}-\\d{2})/')\n return (x for x in prefixes if patt.search(x) and start_date <= parser.parse(patt.search(x).group(1)) <= end_date)\n\n\ndef serialize_object(l):\n return ','.join([l['Key'], str(l['Size']), l['ETag'], l['LastModified'].isoformat()]) + '\\n'\n\n\ndef get_jsons_from_stream(stream, object_name=''):\n reader = codecs.getreader('utf-8')\n\n if object_name.endswith('.gz'):\n generator = reader.decode(gzip.decompress(stream.read()))[0].split('\\n')\n else:\n generator = reader(stream)\n\n for line in generator:\n try:\n yield json.loads(line)\n except:\n print('Unable to deserialize [%s]' % line)\n\n\ndef get_jsons_from_object(bucket, object_name, client=None):\n if client is None:\n client = boto3.client('s3', config=botocore.client.Config(signature_version=botocore.UNSIGNED))\n\n obj = client.get_object(Bucket=bucket, Key=object_name)\n body = obj['Body']\n\n reader = codecs.getreader('utf-8')\n\n if object_name.endswith('.gz'):\n generator = reader.decode(gzip.decompress(body.read()))[0].split('\\n')\n else:\n generator = reader(body)\n\n for line in generator:\n try:\n yield json.loads(line)\n except:\n print(f'Unable to deserialize [{line}]')\n\n \n body.close()\n\n\ndef read_object_list(input_file):\n objects = []\n\n object_reader = csv.DictReader(input_file, fieldnames=['Name', 'Size', 'ETag', 'LastModified'])\n for obj in object_reader:\n obj['LastModified'] = parser.parse(obj['LastModified'])\n obj['Size'] = int(obj['Size'])\n objects.append(obj)\n\n return objects\n\n\ndef split_record(record):\n \"\"\"{\"date\":{\"utc\":\"2018-06-06T23:00:00.000Z\",\"local\":\"2018-06-07T05:00:00+06:00\"},\n \"parameter\":\"pm25\", \n \"value\":27,\n \"unit\":\"µg/m³\",\n \"averagingPeriod\":{\"value\":1,\"unit\":\"hours\"},\n \"city\":\"Dhaka\", --> stationMeta\n \"location\":\"US Diplomatic Post: Dhaka\", --> stationName\n \"coordinates\":{\"latitude\":23.796373,\"longitude\":90.424614}, --> stationMeta\n \"country\":\"BD\", -->stationMeta\n \"sourceName\":\"StateAir_Dhaka\", --> StationId\n \"attribution\":[{\"name\":\"EPA AirNow DOS\",\"url\":\"http://airnow.gov/index.cfm?action=airnow.global_summary\"}], --> Provider \n \"sourceType\":\"government\", --> Provider\n \"mobile\":false} --> StationMetaProvider (catchAll for all that does not fit the station core)\"\"\"\n\n if ('coordinates' not in record):\n record['coordinates'] = {'latitude': 0.0, 'longitude': 0.0}\n\n if record['coordinates'] is None:\n record['coordinates'] = {'latitude': 0.0, 'longitude': 0.0}\n\n if 'averagingPeriod' not in record:\n record['averagingPeriod'] = \"\"\n\n mes_keys = ['parameter', 'value', 'unit', 'averagingPeriod', 'date', 'sourceName']\n stat_keys = ['location', 'city', 'coordinates', 'country', 'sourceName']\n\n measurement = {k: v for k, v in record.items() if k in mes_keys}\n\n station = {k: v for k, v in record.items() if k in stat_keys}\n ext = {k: v for k, v in record.items() if k not in (mes_keys + stat_keys)}\n\n return station, measurement, ext\n\n\ndef get_objects(prefix):\n objects, _ = get_object_list(bucket_name=FETCHES_BUCKET, prefix=prefix)\n return objects\n\n\ndef process_file(object_name, station_dao, mes_dao):\n records = 0\n for record in get_jsons_from_object(bucket=FETCHES_BUCKET, object_name=object_name):\n station, measurement, ext = split_record(record)\n stat_id = station_dao.store_from_json(station)\n mes_dao.store(station_id=stat_id, parameter=measurement['parameter'],\n value=measurement['value'], unit=measurement['unit'],\n averagingPeriod=measurement['averagingPeriod'],\n date=measurement['date']['utc'])\n records += 1\n\n return records\n","sub_path":"mys3utils/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"168810902","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pytest\nfrom webdriver_manager.firefox import GeckoDriverManager\n\n\n@pytest.fixture()\ndef setup(browser):\n if browser==\"chrome\":\n driver= webdriver.Chrome(ChromeDriverManager().install())\n elif browser==\"firefox\":\n driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n else:\n driver = webdriver.Chrome(ChromeDriverManager().install())\n driver.maximize_window()\n driver.implicitly_wait(10)\n return driver\n\ndef pytest_addoption(parser): #This will get the value from CLI/hooks\n parser.addoption(\"--browser\")\n\n@pytest.fixture()\ndef browser(request): #This will return the browser value to the setup\n return request.config.getoption(\"--browser\")\n\n############Pytest html report##############\n#it is a hook to add environment info to the html report\ndef pytest_configure(config):\n config._metadata['Project Name']='NopCommerceApp'\n config._metadata['Module Name'] = 'Commerce'\n config._metadata['Tester'] = 'Kalyan'\n\n#This is hook to delete/modify the env info to html report\n@pytest.mark.optionalhook\ndef pytest_metadata(metadata):\n metadata.pop(\"JAVA_HOME\", None)\n metadata.pop(\"Plugins\", None)","sub_path":"testCases/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"16601629","text":"#!/usr/bin/env python\n\"\"\"Prepare_data.py\n\nProcess the Phoenix-aces synthetic spectra and create .dat files with\nwavelength and flux.\nAdds them to the data directory of eniric for convolutions etc.\nThis wastes a lot of memory duplicating wavelength vector.\n\nJason Neal January 2017\n\"\"\"\nimport argparse\nimport os\nimport re\nimport sys\n\nimport numpy as np\nfrom astropy.io import fits\n\nimport eniric\nimport eniric.io_module as io\n\n\ndef _parser():\n \"\"\"Take care of all the argparse stuff.\n\n :returns: the args\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Transform spectra to prepare for convolution.\"\n )\n parser.add_argument(\n \"-s\",\n \"--startype\",\n help='Spectral Type e.g \"MO\"',\n default=[\"M0\"],\n type=str,\n nargs=\"+\",\n )\n parser.add_argument(\n \"-t\",\n \"--temp\",\n help=\"Temperature of stars to prepare\",\n type=float,\n nargs=\"+\",\n default=[3900.0],\n choices=list(np.arange(2300, 7000, 100.0))\n + list(np.arange(7000, 12001, 200.0)),\n )\n parser.add_argument(\n \"-l\",\n \"--logg\",\n help=\"Logg for stellar models.\",\n default=[4.50],\n type=float,\n nargs=\"+\",\n choices=np.arange(0, 6.01, 0.5),\n )\n parser.add_argument(\n \"-m\",\n \"--metallicity\",\n type=float,\n default=[0.0],\n help=\"Metallicity values.\",\n nargs=\"+\",\n )\n # choices=[list(np.arange(-4.0, -2.0, 1))+list(np.arange(-2.0, 1.01, 0.5))]\n parser.add_argument(\n \"-a\",\n \"--alpha\",\n type=float,\n default=[0.0],\n choices=np.arange(-0.2, 1.201, 0.2),\n help=\"Alpha values. Default = [0.0]\",\n nargs=\"+\",\n )\n parser.add_argument(\n \"-f\",\n \"--flux_type\",\n type=str,\n default=\"photon\",\n choices=[\"photon\", \"energy\"],\n help=\"Type of flux to use. Default converts it to photons.\",\n )\n parser.add_argument(\n \"-d\",\n \"--data_dir\",\n help=\"Data directory to save results.\",\n type=str,\n default=None,\n )\n parser.add_argument(\n \"-p\",\n \"--phoenix_dir\",\n default=None,\n type=str,\n help=\"Phoenix directory to find fits files\",\n )\n parser.add_argument(\n \"-r\",\n \"--replace\",\n action=\"store_true\",\n help=\"Replace data files if already created.\",\n )\n\n return parser.parse_args()\n\n\ndef main(\n startype,\n temp,\n logg,\n metallicity,\n alpha,\n flux_type=\"photon\",\n data_dir=None,\n phoenix_dir=None,\n replace=False,\n):\n \"\"\"Prepare datafiles for phoenix models that match the input parameters.\n\n This add the wavelength information to each spectra and converts\n to microns/photons if the flux_type=\"photons\" is given.\n We do realise that this is a waste of space and it would be more\n storage efficient to just read in the phoenix raw fits files and\n wavelength file when needed.\n\n \"\"\"\n if data_dir is None:\n data_dir = eniric.paths[\"phoenix_dat\"]\n os.makedirs(data_dir, exist_ok=True)\n\n if phoenix_dir is None:\n phoenix_dir = eniric.paths[\"phoenix_raw\"]\n\n # Get Phoenix wavelength data\n wavelength_file = \"WAVE_PHOENIX-ACES-AGSS-COND-2011.fits\"\n wavelength = fits.getdata(os.path.join(phoenix_dir, wavelength_file))\n\n if flux_type == \"photon\":\n file_suffix = \"_wave_photon.dat\" # For saving output\n else:\n file_suffix = \"_wave.dat\"\n\n # Convert Stellar_types into\n stellar_dict = {\"M0\": 3900.0, \"M3\": 3500.0, \"M6\": 2800.0, \"M9\": 2600.0}\n # Add temperature of stellar_type to temp list\n for star in startype:\n try:\n temp.append(stellar_dict[star])\n except KeyError:\n print(\n \"Stellar type {0} is not implemented here (yet), submit and issue.\".format(\n star\n )\n )\n\n # Get all phoenix fits files we want to convert\n for (path, dirs, files) in os.walk(phoenix_dir):\n\n phoenix_files = []\n for f in files:\n # Test if filename meets conditions\n end_cond = f.endswith(\"PHOENIX-ACES-AGSS-COND-2011-HiRes.fits\")\n\n try:\n if \"Alpha=\" in f:\n (match_temp, match_logg, match_feh, match_alpha) = re.search(\n r\"(\\d{5})-(\\d\\.\\d\\d)([+\\-]\\d\\.\\d)\\.Alpha=([+\\-]\\d\\.\\d\\d)\\.\", f\n ).groups()\n alpha_cond = float(match_alpha) in alpha\n else:\n (match_temp, match_logg, match_feh) = re.search(\n r\"(\\d{5})-(\\d\\.\\d\\d)([+\\-]\\d\\.\\d)\", f\n ).groups()\n alpha_cond = True # To make work\n except AttributeError:\n # This file doesn't match what we want so continue with next loop\n continue\n\n temp_cond = float(match_temp) in temp\n feh_cond = float(match_feh) in metallicity\n logg_cond = float(match_logg) in logg\n\n if np.all(\n [end_cond, temp_cond, feh_cond, logg_cond, alpha_cond]\n ): # All conditions met\n # Matching file found\n phoenix_files.append(f)\n else:\n pass\n\n for phoenix_file in phoenix_files:\n z_folder = path.split(os.sep)[-1]\n os.makedirs(os.path.join(data_dir, z_folder), exist_ok=True)\n output_filename = os.path.join(\n data_dir, z_folder, phoenix_file[:-5] + file_suffix\n ) # Name of .dat file\n if os.path.exists(output_filename) and not replace:\n print(\n \"Skipping as {0} already exists (use -r to replace)\".format(\n output_filename\n )\n )\n continue\n spectra = fits.getdata(os.path.join(path, phoenix_file))\n\n # Need to add conversions pedro preformed to flux!\n \"\"\"The energy units of Phoenix fits files is erg/s/cm**2/cm\n We transform the flux into photons in the read_spectrum()\n function by multiplying the flux result by the wavelength (lambda)\n\n Flux_photon = Flux_energy/Energy_photon\n with\n Energy_photon = h*c/lambda\n Flux_photon = Flux_energy * lambda / (h * c)\n\n Here we convert the flux into erg/s/cm**2/\\mum by multiplying by 10**-4 cm/\\mum\n Flux_e(erg/s/cm**2/\\mum) = Flux_e(erg/s/cm**2/cm) * (1 cm) / (10000 \\mum)\n \"\"\"\n\n spectra_micron = spectra * 10 ** -4 # Convert /cm to /micron\n\n if flux_type == \"photon\":\n wavelength_micron = (\n wavelength * 10 ** -4\n ) # Convert Angstrom to micron\n\n spectra_photon = (\n spectra_micron * wavelength_micron\n ) # Ignoring constants h*c in photon energy equation\n\n result = io.pdwrite_cols(\n output_filename,\n wavelength_micron,\n spectra_photon,\n header=[\"# Wavelength (micron)\", r\"Flux (photon/s/cm^2)\"],\n float_format=\"%.7f\",\n )\n\n else:\n result = io.pdwrite_cols(\n output_filename,\n wavelength,\n spectra_micron,\n header=[\"# Wavelength (Angstrom)\", r\"Flux (erg/s/cm^2/micron)\"],\n float_format=None,\n )\n\n if not result:\n print(\"Successfully wrote to \", output_filename)\n else:\n print(\"Failed to write to \", output_filename)\n\n print(\"Done\")\n return 0\n\n\nif __name__ == \"__main__\":\n args = vars(_parser())\n opts = {k: args[k] for k in args}\n sys.exit(main(**opts))\n","sub_path":"eniric/obsolete/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":7973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"179129736","text":"#Check that force on moon is always inward\n#Adjust dem to vary\n\nfrom visual import *\nfrom math import *\n\n##palette: miaka\nmiaka1 = (.988,.207,.298)\nlalunanera = (.161,.133,.122)\nmiaka2 = (.074,.455,.490)\ngameboyteal = (.039,.749,.737)\nsaumur = (.988,.968,.772)\nwatermelon = (.075,.678,.216)\ngoldfish = (.980,.51,.121)\n\nscene.background=saumur\nscene.fullscreen = True\nsun=sphere(radius=1, pos=(0,0,0), color=goldfish)\nreal=False #we've set up some predetermined values for the real system and a\n #convenient fake one\n\nif not real:\n #print(\"made it here\")\n scene.autoscale=False\n scene.range=20\n des=10 #distance earth sun\n dem=.7 #(.7 is good)\n\n G=1\n \n sun=sphere(radius=1, pos=(0,0,0), color=goldfish)\n sun.m=1e5\n sun.p=vector(0,0,0)\n \n earth=sphere(radius=.5, pos=(des,0,0), color=gameboyteal)\n earth.m=99.5\n earth.period=sqrt(4*math.pi**2*des**3/(sun.m+earth.m))\n earth.p=vector(0,earth.m*sqrt(sun.m/des),0)\n \n moon=sphere(radius=.3, pos=(des+dem,0,0), color=lalunanera)\n moon.m=.0032\n moon.p=vector(0,.78*moon.m*sqrt(sun.m/(dem+des))+moon.m*sqrt(earth.m/dem),0)\n \n sun.p=-moon.p-earth.p\n forcearrow=arrow(color=miaka1,shaftwidth=1e-9)\n varrow=arrow(color=moon.color,shaftwidth=1e-1)\n points(pos=(0,0,1.01),color=lalunanera)\n\nelse:\n scene.range=300000000000\n des=149597870700\n dem=384399000\n\n G=6.67384e-11\n \n sun=sphere(radius=1, pos=(0,0,0), color=goldfish)\n sun.m=1.9891e30\n sun.p=vector(0,0,0)\n \n earth=sphere(radius=6371000, pos=(des,0,0), color=gameboyteal)\n earth.m=5.9736e24\n earth.period=365.25636004*24*60*60\n earth.p=vector(0,29780*earth.m,0)\n \n moon=sphere(radius=1737100, pos=(des+dem,0,0), color=lalunanera)\n moon.m=7.3477e22\n moon.p=vector(0,(earth.p[1]/earth.m+1022)*moon.m,0)\n \n sun.p=-moon.p-earth.p\n ep = points(pos=earth.pos,color=earth.color)\n mp = points(pos=moon.pos,color=moon.color)\n sp = points(pos=sun.pos,color=sun.color)\n\npArray=array([sun,earth,moon])\nfor a in pArray:\n a.orbit=curve(color=a.color)\n\ndt = earth.period/50000\nt=0\npause=True\n\nwhile t<100*earth.period:\n rate(10000)\n t=t+dt\n\n if scene.mouse.events:\n m1 = scene.mouse.getevent()\n if m1.press:\n if pause==False: \n pause=True\n else:\n pause=False\n\n if pause==False:\n for a in pArray:\n forceOnA=vector(0,0,0)\n for b in pArray:\n if a!=b:\n distAtoB=b.pos-a.pos\n forceOnA+=G*a.m*b.m*distAtoB/mag(distAtoB)**3\n a.p+=forceOnA*dt\n a.pos+=a.p/a.m*dt\n a.orbit.append(pos=a.pos)\n if a == moon and not real:\n forcearrow.axis=4*forceOnA\n forcearrow.pos=a.pos\n varrow.axis=a.p/a.m/40\n varrow.pos=a.pos\n if real:\n ep.pos=earth.pos\n mp.pos=moon.pos\n sp.pos=sun.pos\n\n","sub_path":"Jerry/earthmoonsun2.py","file_name":"earthmoonsun2.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"244448810","text":"# Merge two sorted linked lists and return it as a new sorted list. The new list\n# should be made by splicing together the nodes of the first two lists. \n# \n# Example: \n# \n# \n# Input: 1->2->4, 1->3->4\n# Output: 1->1->2->3->4->4\n# \n# Related Topics Linked List \n# 👍 4590 👎 610\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n if l1 is None:\n return l2\n elif l2 is None:\n return l1\n elif l1.val <= l2.val:\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"[21]Merge Two Sorted Lists.py","file_name":"[21]Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"635358850","text":"import socket\nimport time\n\nfrom utility_functions import socket_functions\nfrom utility_functions import string_functions\nfrom utility_functions import irc_functions\nimport config\nimport logger\nimport cmdhandler\nimport pubcmdhandler\n\n\nclass Connection:\n msabot = None\n connected = False\n running = False\n socket = None\n\n def __init__(self, bot):\n logger.log('Initializing IRC connection...')\n self.msabot = bot\n self.running = True\n self.socket = socket.socket()\n self.socket.connect((config.get('host'), config.get('port')))\n irc_functions.send(self, 'NICK %s' % config.get('nickname'))\n irc_functions.send(self, 'USER %(nick)s %(nick)s %(nick)s :%(nick)s' % {'nick': config.get('nickname')})\n\n def interrupt(self):\n logger.log('Terminating IRC connection...')\n self.running = False\n irc_functions.say(self, 'Logged out on ' + socket.gethostname(), 'msalihov')\n irc_functions.send(self, 'QUIT :%s\\r\\n' % config.get('quitmsg'))\n time.sleep(3)\n self.socket.close()\n\n def loop(self):\n while self.running:\n databuffer = self.socket.recv(4096)\n datalns = socket_functions.getdatalines(databuffer)\n for line in datalns:\n line = string_functions.datatostring(line)\n if line == '':\n continue\n logger.log(line)\n irc_functions.ping(self, line)\n args = line.split(None, 3)\n if len(args) != 4:\n continue\n arr = string_functions.getdataarray(args)\n if arr['type'] == 'PRIVMSG':\n if arr['target'] == config.get('nickname'):\n cmdhandler.handle(self, arr)\n pubcmdhandler.handle(self, arr)\n logger.logmsg(args)\n","sub_path":"IRC-Bot/Connection.py","file_name":"Connection.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"133004835","text":"import random\r\ndef guessGame():\r\n secret_word = \"K VECTOR\"\r\n guess_count = 0\r\n guess_limit = 3\r\n while guess_count < guess_limit:\r\n guess = input(\"Enter your word: \")\r\n guess_count += 1\r\n if guess == secret_word:\r\n print (\"congrats you made it to the next step\")\r\n break\r\n elif guess_count == guess_limit:\r\n print(\"oh you lost\")\r\n quit()\r\n lower = int(input(\"Enter a lower bound to guess: \"))\r\n upper = int(input(\"Enter upper bound to guess: \"))\r\n x = random.randint(lower, upper)\r\n guess_count = 0\r\n List = []\r\n while True:\r\n user_guess = int(input(f\"Enter a number between {lower} and {upper}: \"))\r\n guess_count += 1\r\n List.append(user_guess)\r\n if user_guess < x:\r\n print(\"You entered a too small number \")\r\n elif user_guess > x:\r\n print(\"You entered too large number\")\r\n else:\r\n print(f\"Congrats! You guessed the right number after {guess_count} times\")\r\n print('Here is the list of your attempts')\r\n print(List)\r\n return\r\n\r\nguessGame()\r\n\r\nplay_again = input('Do you want to play again y/n')\r\nif play_again == 'y'.lower():\r\n print (\"let's play again\")\r\n guessGame()\r\n\r\nelse:\r\n print(\"thanks for playing\")\r\n exit()","sub_path":"Participants' Tasks/amin.py","file_name":"amin.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"625390320","text":"import torch.nn as nn\nfrom SubLayers import MultiHeadAttention, PositionwiseFeedForward\n\nclass EncoderLayer(nn.Module):\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(EncoderLayer,self).__init__()\n self.slf_attn = MultiHeadAttention(\n n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n\n def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):\n enc_ooutput, enc_slf_attn = self.slf_attn(enc_input , enc_input, enc_input, mask=slf_attn_mask)\n enc_ooutput *= non_pad_mask\n\n enc_ooutput = self.pos_ffn(enc_ooutput)\n enc_ooutput *= non_pad_mask\n\n return enc_ooutput, enc_slf_attn\n\n\n\nclass DecoderLayer(nn.Module):\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(DecoderLayer,self).__init__()\n self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, dec_input, enc_output, non_pad_mask=None, slf_attn_mask=None, dec_enc_attn_mask=None):\n dec_output, dec_slf_attn = self.slf_attn(\n dec_input, dec_input, dec_input, mask=slf_attn_mask)\n dec_output *= non_pad_mask\n\n dec_output, dec_enc_attn = self.enc_attn(dec_output, enc_output, enc_output, mask=dec_enc_attn_mask)\n # ??? dec_ouput is q? enc_output is k/value?\n dec_output *= non_pad_mask\n\n dec_output = self.pos_ffn(dec_output)\n dec_output *= non_pad_mask\n\n return dec_output, dec_slf_attn, dec_enc_attn\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"model/transformer/my_transformer/Layers.py","file_name":"Layers.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"90854720","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef main(data):\n\ttresh = np.max(data['AGE']) + 1\n\tplt.hist(data['AGE'].values, bins=tresh)\n\tplt.title(\"# AGE\")\n\tplt.show();\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","sub_path":"myscript.py","file_name":"myscript.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"388248316","text":"# [[1, '英語', 25], [1, '国語', 63], [1, '数学', 42], [2, '地歴', 90], [2, '英語', 44], [3, '数学', 94]]\n# のような番号、教科、点数からなるリストが与えられます。\n# 以下の二つの方法で教科ごとの平均点と最高点、最低点、番号ごとの平均点を求めるプログラムを作成してください\n#\n# 2. pandasモジュールを利用してプログラムを作成してください\n\n# *****\n# 参考サイト:\n# http://www.python.ambitious-engineer.com/archives/category/application/data_analysis/pandas]\n# http://sinhrks.hatenablog.com/entry/2014/11/27/232150\n# *****\nimport pandas as pd\n\nScores = [[1, '英語', 25], [1, '国語', 63], [1, '数学', 42], [2, '地歴', 90], [2, '英語', 44], [3, '数学', 94]]\n\ndef ave_subject(Scores):\n English = []\n Japanese = []\n Math = []\n History = []\n # 各教科ごとのリストを作る\n for n in range(len(Scores)):\n n = n-1\n if Scores[n][1] == '英語':\n English.append(Scores[n])\n if Scores[n][1] == '数学':\n Math.append(Scores[n])\n if Scores[n][1] == '国語':\n Japanese.append(Scores[n])\n if Scores[n][1] == '地歴':\n History.append(Scores[n])\n\n # 各教科ごとのリストをDataFrameにする\n df_Eng = pd.DataFrame(English, columns =['num', 'sub', 'score'])\n df_Math = pd.DataFrame(Math,columns =['num', 'sub', 'score'])\n df_Jpn = pd.DataFrame(Japanese, columns =['num', 'sub', 'score'])\n df_Hist = pd.DataFrame(History, columns =['num', 'sub', 'score'])\n\n # DataFrameで各教科の平均を求める\n Eng_ave = round(df_Eng.mean()['score'])\n Math_ave =round(df_Math.mean()['score'])\n Jpn_ave = round(df_Jpn.mean()['score'])\n Hist_ave = round(df_Hist.mean()['score'])\n\n # 教科ごとの最高点を求める\n Eng_high = df_Eng.max()['score']\n Math_high = df_Math.max()['score']\n Jpn_high = df_Jpn.max()['score']\n Hist_high = df_Hist.max()['score']\n # 教科ごとの最低点を求める\n Eng_low = df_Eng.min()['score']\n Math_low = df_Math.min()['score']\n Jpn_low = df_Jpn.min()['score']\n Hist_low = df_Hist.min()['score']\n\n average_score = \"英語の平均点は\", Eng_ave, \"点。数学の平均点は\", Math_ave, \"点。国語の平均点は\", Jpn_ave, \"点。地歴の平均点は\", Hist_ave, \"点。\"\n\n highest_score = \"英語の最高点は\", Eng_high, \"点、数学の最高点は\", Math_high, \"点、国語の最高点は\", Jpn_high, \"点、地歴の最高点は\", Hist_high, \"点。\"\n\n lowest_score = \"英語の最低点は\", Eng_low, \"点、数学の最低点は\", Math_low, \"点、国語の最低点は\", Jpn_low, \"点、地歴の最低点は\", Hist_low, \"点。\"\n\n # 番号ごとの平均点を求める\n score_1 = []\n score_2 = []\n score_3 = []\n for n in range(len(Scores)):\n n = n-1\n if Scores[n][0] == 1:\n score_1.append(Scores[n])\n if Scores[n][0] == 2:\n score_2.append(Scores[n])\n if Scores[n][0] == 3:\n score_3.append(Scores[n])\n\n # 各リストをDataFrameにする\n df_score_1 = pd.DataFrame(score_1, columns =['num', 'sub', 'score'])\n df_score_2 = pd.DataFrame(score_2, columns =['num', 'sub', 'score'])\n df_score_3 = pd.DataFrame(score_3, columns =['num', 'sub', 'score'])\n\n ave_score_1 = round(df_score_1.mean()['score'])\n ave_score_2 = round(df_score_2.mean()['score'])\n ave_score_3 = round(df_score_3.mean()['score'])\n\n stu_ave = \"番号1の平均点は\", ave_score_1, \"点、番号2の平均点は\", ave_score_2, \"番号3の平均点は\", ave_score_3, \"。\"\n\n return average_score, highest_score, lowest_score, stu_ave\n","sub_path":"24Jun2017_HW/score_calc_pandas.py","file_name":"score_calc_pandas.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"176482410","text":"from ptmworker.helpers import upload_helpers\nfrom ptmscout.config import settings, strings\nfrom ptmscout.database import experiment, modifications, user, modifications, protein\nimport celery\nfrom ptmworker import notify_tasks, protein_tasks\nfrom ptmscout.utils import export_proteins, downloadutils\nimport csv, os, random\n\nNOTIFY_INTERVAL = 5\n\ndef annotate_experiment(user, exp, header, rows, job_id):\n notify_tasks.set_job_stage.apply_async((job_id, 'annotating', len(rows)))\n\n header += [ 'scansite_bind', 'scansite_kinase', 'nearby_modifications',\\\n 'nearby_mutations', 'nearby_mutation_annotations', \\\n 'site_pfam_domains', 'site_uniprot_domains',\\\n 'site_kinase_loop', 'site_macro_molecular',\\\n 'site_topological', 'site_structure',\\\n 'protein_pfam_domains', 'protein_uniprot_domains',\\\n 'protein_GO_BP', 'protein_GO_CC', 'protein_GO_MF' ]\n protein_mods = {}\n \n for ms in exp.measurements:\n if ms.protein_id not in protein_mods:\n protein_mods[ms.protein_id] = modifications.getMeasuredPeptidesByProtein(ms.protein_id, user)\n \n ms_map = {}\n for ms in exp.measurements:\n ms_map[ms.id] = ms\n \n i = 0\n mx_val = len(rows)\n for row in rows:\n ms = ms_map[row[0]]\n prot = ms.protein\n \n min_range = ms.peptides[0].peptide.site_pos - 7\n max_range = ms.peptides[-1].peptide.site_pos + 7\n \n nearby_modifications = set()\n for ms2 in protein_mods[ms.protein_id]:\n for modpep in ms2.peptides:\n site_type = modpep.peptide.site_type\n site_pos = modpep.peptide.site_pos\n mod_name = modpep.modification.name\n \n if min_range <= site_pos and site_pos <= max_range:\n nearby_modifications.add((site_pos, site_type, mod_name))\n \n nearby_modifications = [ \"%s%d: %s\" % (site_type, site_pos, mod_name) for site_pos, site_type, mod_name in sorted(list(nearby_modifications)) ]\n nearby_mutations = [ mutation for mutation in sorted(prot.mutations, key=lambda item: item.location) if min_range < mutation.location and mutation.location < max_range ]\n \n sep = settings.mod_separator_character + ' '\n \n scansite_kinase = []\n scansite_bind = []\n for modpep in ms.peptides:\n for pp in modpep.peptide.predictions:\n if pp.source=='scansite_kinase':\n scansite_kinase.append( \"%s (%.2f)\" % ( pp.value, pp.percentile ))\n if pp.source=='scansite_bind':\n scansite_bind.append( \"%s (%.2f)\" % ( pp.value, pp.percentile ))\n\n pfam_sites = export_proteins.filter_sites(ms, prot.domains)\n domain_sites = export_proteins.filter_site_regions(ms, prot.regions, set(['domain']))\n kinase_sites = export_proteins.filter_site_regions(ms, prot.regions, set(['Activation Loop']))\n macromolecular_sites = export_proteins.filter_site_regions(ms, prot.regions, set([ 'zinc finger region', 'intramembrane region', 'coiled-coil region', 'transmembrane region' ]))\n topological_sites = export_proteins.filter_site_regions(ms, prot.regions, set(['topological domain']))\n site_structure = export_proteins.filter_site_regions(ms, prot.regions, set(['helix', 'turn', 'strand']))\n\n protein_uniprot_domains = export_proteins.filter_regions(prot.regions, set(['domain']))\n\n protein_GO = { 'P':set(), 'F':set(), 'C':set() }\n\n for ge in prot.GO_terms:\n term = ge.GO_term\n protein_GO[term.aspect].add(term.GO)\n\n row.append( sep.join(scansite_bind) )\n row.append( sep.join(scansite_kinase) )\n\n row.append( sep.join(nearby_modifications) )\n row.append( export_proteins.format_mutations( nearby_mutations ) )\n row.append( export_proteins.format_mutation_annotations( nearby_mutations ) )\n\n row.append( export_proteins.format_domains( pfam_sites ) )\n row.append( export_proteins.format_domains( domain_sites ) )\n row.append( export_proteins.format_domains( kinase_sites ) )\n row.append( export_proteins.format_regions( macromolecular_sites ) )\n row.append( export_proteins.format_domains( topological_sites ) )\n row.append( export_proteins.format_regions( site_structure ) )\n\n row.append( export_proteins.format_domains(prot.domains) )\n row.append( export_proteins.format_domains(protein_uniprot_domains) )\n \n row.append( sep.join(list(protein_GO['P'])) )\n row.append( sep.join(list(protein_GO['C'])) )\n row.append( sep.join(list(protein_GO['F'])) )\n\n i+=1\n if i % NOTIFY_INTERVAL == 0:\n notify_tasks.set_job_progress.apply_async((job_id, i, mx_val))\n\n return header, rows\n \ndef get_experiment_header(exp):\n header = ['MS_id', 'query_accession', 'gene', 'locus', 'protein_name', 'species', 'peptide', 'mod_sites', 'gene_site', 'aligned_peptides', 'modification_types']\n \n data_labels = set()\n for ms in exp.measurements:\n for d in ms.data:\n data_labels.add((d.run,d.type,d.units,d.label))\n \n def float_last_term(r,dt,u,l):\n try:\n l = float(l)\n except:\n pass\n \n return (r,dt,u,l)\n \n data_labels = [ \"%s:%s:%s:%s\" % (r,dt,u,str(l)) for r,dt,u,l in sorted(list(data_labels), key=lambda item: float_last_term(*item)) ]\n header += data_labels\n \n return header, data_labels\n\ndef get_experiment_data(exp, data_labels):\n rows = []\n for ms in exp.measurements:\n mod_sites = '; '.join([modpep.peptide.getName() for modpep in ms.peptides])\n aligned_peptides = '; '.join([modpep.peptide.pep_aligned for modpep in ms.peptides])\n modification_types = '; '.join([modpep.modification.name for modpep in ms.peptides])\n \n gene_sites = [ms.protein.getGeneName()] + [modpep.peptide.getName() for modpep in ms.peptides]\n row = [ms.id, ms.query_accession, ms.protein.acc_gene, ms.protein.locus, ms.protein.name, ms.protein.species.name, ms.peptide, mod_sites, '_'.join(gene_sites), aligned_peptides, modification_types]\n \n ms_data = {}\n for d in ms.data:\n formatted_label = \"%s:%s:%s:%s\" % (d.run, d.type, d.units, d.label)\n ms_data[formatted_label] = d.value\n \n for dl in data_labels:\n row.append( ms_data[dl] )\n \n rows.append(row)\n \n return rows\n\n\n@celery.task\n@upload_helpers.notify_job_failed\n@upload_helpers.dynamic_transaction_task\ndef run_experiment_export_job(annotate, export_id, exp_id, user_id, job_id):\n notify_tasks.set_job_status.apply_async((job_id, 'started'))\n notify_tasks.set_job_stage.apply_async((job_id, 'exporting', 0))\n\n exp_filename = 'experiment.%d.%d.%d.tsv' % (exp_id, user_id, export_id)\n exp_path = os.path.join(settings.ptmscout_path, settings.annotation_export_file_path, exp_filename)\n\n usr = user.getUserById(user_id)\n exp = experiment.getExperimentById(exp_id, usr)\n\n header, data_labels = get_experiment_header(exp)\n rows = get_experiment_data(exp, data_labels)\n\n if annotate:\n header, rows = annotate_experiment(usr, exp, header, rows, job_id)\n\n with open(exp_path, 'w') as export_file:\n cw = csv.writer(export_file, dialect='excel-tab')\n\n cw.writerow(header)\n for row in rows:\n cw.writerow(row)\n\n finalize_task = notify_tasks.finalize_experiment_export_job.s()\n return finalize_task, (job_id,), None\n\n@celery.task\n@upload_helpers.notify_job_failed\ndef annotate_proteins(protein_result, accessions, batch_id, exp_id, user_id, job_id):\n protein_map, protein_id_map = protein_result\n\n usr = user.getUserById(user_id)\n notify_tasks.set_job_stage.apply_async((job_id, 'annotate', len(protein_map)))\n data_filename = \"batch.data.%s.%d.tsv\" % (batch_id, user_id)\n metadata_filename = \"batch.metadata.%s.%d.tsv\" % (batch_id, user_id)\n zip_filename = \"batch.%s.%d.zip\" % (batch_id, user_id)\n\n data_filepath = os.path.join(settings.ptmscout_path, settings.annotation_export_file_path, data_filename)\n metadata_filepath = os.path.join(settings.ptmscout_path, settings.annotation_export_file_path, metadata_filename)\n zip_filepath = os.path.join(settings.ptmscout_path, settings.annotation_export_file_path, zip_filename)\n\n header = ['protein_id', 'query_accession', 'other_accessions', 'acc_gene', 'locus', 'protein_name',\\\n 'species', 'sequence', 'modifications', 'evidence',\\\n 'pfam_domains', 'uniprot_domains',\\\n 'kinase_loops', 'macro_molecular',\\\n 'topological', 'structure',\\\n 'mutations', 'mutation_annotations', 'scansite_predictions', 'GO_terms']\n rows = []\n success = 0\n errors = 0\n\n experiment_list = set()\n\n i = 0\n for acc in accessions:\n if acc in protein_map:\n pr = protein_map[acc]\n p = protein.getProteinBySequence( pr.sequence, pr.species )\n mods = modifications.getMeasuredPeptidesByProtein(p.id, usr)\n\n qaccs = export_proteins.get_query_accessions(mods)\n n, fmods, fexps, exp_list = export_proteins.format_modifications(mods, None)\n experiment_list |= exp_list\n\n row = []\n row.append( p.id )\n row.append( acc )\n row.append( export_proteins.format_protein_accessions(p.accessions, qaccs) )\n row.append( p.acc_gene )\n row.append( p.locus )\n row.append( p.name )\n row.append( p.species.name )\n row.append( p.sequence )\n row.append( fmods )\n row.append( fexps )\n\n uniprot_domains = export_proteins.filter_regions(p.regions, set([ 'domain' ]))\n kinase_loops = export_proteins.filter_regions(p.regions, set([ 'Activation Loop' ]))\n macromolecular = export_proteins.filter_regions(p.regions, set([ 'zinc finger region', 'intramembrane region', 'coiled-coil region', 'transmembrane region' ]))\n topological = export_proteins.filter_regions(p.regions, set([ 'topological domain' ]))\n structure = export_proteins.filter_regions(p.regions, set([ 'helix', 'turn', 'strand' ]))\n\n row.append( export_proteins.format_domains(p.domains) )\n row.append( export_proteins.format_domains(uniprot_domains) )\n row.append( export_proteins.format_domains(kinase_loops) )\n row.append( export_proteins.format_regions(macromolecular) )\n row.append( export_proteins.format_domains(topological) )\n row.append( export_proteins.format_regions(structure) )\n\n row.append( export_proteins.format_mutations(p.mutations) )\n row.append( export_proteins.format_mutation_annotations(p.mutations) )\n row.append( export_proteins.format_scansite(mods) )\n row.append( export_proteins.format_GO_terms(p) )\n rows.append( row )\n success += 1\n else:\n errors_for_acc = [ e.message for e in experiment.errorsForAccession(exp_id, acc) ]\n rows.append(['%d ERRORS: %s' % ( len(errors_for_acc), '; '.join(errors_for_acc) ), acc])\n errors+=1\n\n i+=1\n if i % NOTIFY_INTERVAL == 0:\n notify_tasks.set_job_progress.apply_async((job_id, i, len(protein_map)))\n\n with open(data_filepath, 'w') as bfile:\n cw = csv.writer(bfile, dialect='excel-tab')\n cw.writerow(header)\n for row in rows:\n cw.writerow(row)\n\n experiments = [ experiment.getExperimentById(exp_id) for exp_id in experiment_list ]\n downloadutils.experiment_metadata_to_tsv(experiments, metadata_filepath)\n\n downloadutils.zip_package([data_filepath, metadata_filepath], zip_filepath)\n\n exp = experiment.getExperimentById(exp_id, secure=False, check_ready=False)\n exp.delete()\n\n return success, errors\n\ndef create_temp_experiment(user_id, job_id):\n exp = experiment.Experiment()\n exp.name = 'temp experiment %d' % (random.randint(0,100000))\n exp.author = ''\n exp.description = ''\n exp.contact = ''\n exp.PMID=0\n exp.URL=''\n exp.published=0\n exp.ambiguity=0\n exp.experiment_id=None\n exp.dataset=''\n exp.volume=0\n exp.page_start=''\n exp.page_end=''\n exp.journal=''\n exp.publication_year=0\n exp.publication_month=''\n exp.public = 0\n exp.job_id = job_id\n exp.submitted_id = user_id\n exp.type='dataset'\n\n exp.saveExperiment()\n return exp.id\n\n@celery.task\n@upload_helpers.notify_job_failed\n@upload_helpers.dynamic_transaction_task\ndef batch_annotate_proteins(accessions, batch_id, user_id, job_id):\n notify_tasks.set_job_status.apply_async((job_id, 'started'))\n notify_tasks.set_job_stage.apply_async((job_id, 'initializing', 0))\n\n accession_dict = {}\n line_mapping = {}\n for i, acc in enumerate(accessions):\n accession_dict[acc] = set([i+1])\n line_mapping[i+1] = (acc, '')\n\n exp_id = create_temp_experiment(user_id, job_id)\n\n get_proteins_task = protein_tasks.get_proteins_from_external_databases.s(accession_dict, line_mapping, exp_id, job_id)\n get_protein_metadata_task = protein_tasks.query_protein_metadata.s(accession_dict, line_mapping, exp_id, job_id)\n annotate_proteins_task = annotate_proteins.s(accessions, batch_id, exp_id, user_id, job_id)\n notify_task = notify_tasks.finalize_batch_annotate_job.s(job_id)\n\n load_task = ( get_proteins_task | get_protein_metadata_task | annotate_proteins_task | notify_task )\n\n return load_task, (None,), None\n","sub_path":"ptmscout_web/ptmworker/export_tasks.py","file_name":"export_tasks.py","file_ext":"py","file_size_in_byte":13814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"240033740","text":"from m2det.datasets.toplogo10s.children.base_toplogo10_child_dataset import BaseTL10ChildDataSet\nfrom m2det.errors import Errors\n\n\nclass TL10Train(BaseTL10ChildDataSet):\n @property\n def name_list(self):\n name_list_path = self.data_name_root/\"60_images_per_class_train.txt\"\n if not name_list_path.exists():\n raise Errors().FileNotFound(name_list_path)\n\n with name_list_path.open() as f:\n train_name_list = f.read()\n train_name_list = train_name_list.split(\"\\n\")\n return train_name_list","sub_path":"m2det/datasets/toplogo10s/children/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"418113936","text":"import asyncio\nimport aiohttp\nimport aiojobs\nimport uvloop\nimport ujson\n\nfrom typing import (\n List,\n Dict,\n Any,\n TYPE_CHECKING\n)\n\nif TYPE_CHECKING:\n from aiohttp import ClientSession\n\nuvloop.install()\n\nTEST_URL_API_URI = 'https://finangel.com/api/categorization/v1/merchant/partners?category_id='\n\nSCHEDULER_AT_TIME_LIMIT = 20\n\nMATCH_HTTP_CODE = [200]\n\nFAULTY_HTTP_CODE = [403]\n\nmatch_uri = []\nfaulty_uri = []\n\n\nasync def fetch_merchant_dict(client: 'ClientSession') -> List[Dict[str, Any]]:\n async with client.get(TEST_URL_API_URI) as resp:\n assert resp.status == 200\n return await resp.json()\n\n\nasync def test_uri(client: 'ClientSession', uri: str):\n async with client.get(TEST_URL_API_URI) as resp:\n # print(f'{uri} - {resp.status}')\n if resp.status in MATCH_HTTP_CODE:\n match_uri.append(uri)\n elif resp.status in FAULTY_HTTP_CODE:\n faulty_uri.append(uri)\n\n\nasync def main():\n async with aiohttp.ClientSession(\n trust_env=True,\n json_serialize=ujson.dumps,\n connector=aiohttp.TCPConnector(limit=100, ssl=False)\n ) as client:\n merchant_dict = await fetch_merchant_dict(client)\n uri_list_to_test = [merchant.get('merchant_url') for merchant in merchant_dict]\n\n tasks = []\n\n for ind, uri in enumerate(uri_list_to_test, start=1):\n tasks.append(test_uri(client=client, uri=uri))\n\n if len(tasks) == SCHEDULER_AT_TIME_LIMIT or ind == len(uri_list_to_test):\n await asyncio.gather(*tasks)\n tasks.clear()\n\n # scheduler = await aiojobs.create_scheduler(limit=SCHEDULER_AT_TIME_LIMIT)\n #\n # for uri in uri_list_to_test:\n # await scheduler.spawn(test_uri(client=client, uri=uri))\n\n print(f'match = {len(match_uri)}')\n print(f'fault = {len(faulty_uri)}')\n\n\nif __name__ == '__main__':\n asyncio.get_event_loop().run_until_complete(main())\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"350825858","text":"#!/usr/bin/env python\nfrom Map import Map\nimport numpy as np\nimport rospy\nfrom std_msgs.msg import String\nimport random\nfrom test_server.msg import Pos\n\ndef callback(data,args):\n print(\"hallo\")\n rospy.loginfo(rospy.get_caller_id() + \"I heard %s\",data.pos)\n cur_pos=list(data.pos)\n print(cur_pos)\n map = args[0]\n res = map.get_free_spaces(cur_pos)\n\n if 0 in res:\n i = random.randint(0,3)\n p = res[i]\n if p != 0:\n while p != 0:\n i = random.randint(0, 3)\n p = res[i]\n elif 1 in res:\n i = random.randint(0, 3)\n p = res[i]\n if p != 1:\n while p != 1:\n i = random.randint(0, 3)\n p = res[i]\n\n old_pos = cur_pos\n if i == 0:\n cur_pos = [cur_pos[0],cur_pos[1]+1]\n elif i == 1:\n cur_pos = [cur_pos[0]+1,cur_pos[1]]\n elif i == 2:\n cur_pos = [cur_pos[0],cur_pos[1]-1]\n elif i == 3:\n cur_pos = [cur_pos[0]-1,cur_pos[1]]\n map.set_pos(cur_pos[0],cur_pos[1],9)\n map.set_pos(old_pos[0], old_pos[1], 1)\n pub = args[1]\n msg = Pos()\n msg.pos = cur_pos\n pub.publish(msg)\n print(\"pubbed\")\n\n print(map.map)\n\n\ndef listener():\n\n map = Map((10,10))\n map.set_pos(0,0,1)\n map.set_pos(3,4,3)\n map.set_pos(0,7,3)\n map.set_pos(8,5,3)\n map.set_pos(9,2,3)\n # In ROS, nodes are uniquely named. If two nodes with the same\n # name are launched, the previous one is kicked off. The\n # anonymous=True flag means that rospy will choose a unique\n # name for our 'listener' node so that multiple listeners can\n # run simultaneously.\n print(map.map)\n rospy.init_node('server_node', anonymous=True)\n pub = rospy.Publisher(\"position_result_data_node\", Pos,queue_size=10)\n pub = rospy.Publisher(\"shortest_route_res_node\", Pos,queue_size=10)\n rospy.Subscriber(\"position_data_node\", Pos, callback,(map,pub))\n rospy.Subscriber(\"shortest_route_node\", Pos, callback,(map,pub_route))\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\nif __name__ == '__main__':\n\n listener()\n\n","sub_path":"robot/src/test_server/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"369039730","text":"import os\nimport json\nimport ahocorasick\nfrom sets import Set\n\n__all__ = [\n \"ServerStatus\",\n\n \"DEBUG\", \"DB_MAX_SIZE\",\n \"EXCLUDE_MIME_TYPES\",\n \"SETTINGS_PATH\", \"MANAGEMENT_URL_KEYWORD_RATIO_THRESHOLD\",\n \"JOB_KEYWORDS\", \"LOWERCASE_JOB_KEYWORDS_WILDCARDS_REMOVED\",\n \"MANAGEMENT_KEYWORDS\",\n \"EXCLUDE_NAMES\", \"LOWERCASE_JOB_KEYWORDS\",\n \"EXCLUDE_NAMES_PREFIXES\", \"EXCLUDE_NAMES_SUFFIXES\",\n \"LOWERCASE_COMMON_NAMES\",\n \"LOWERCASE_JOB_KEYWORDS_WITHOUT_WILDCARDS\",\n\n \"FETCH_URL_PYTHON_PATH\", \"TEMP_FILE_PATH\"\n]\n\nclass ServerStatus(object):\n Free = \"Free\"\n StartedExtraction = \"StartedExtraction\"\n PausedExtraction = \"PausedExtraction\"\n CompletedExtraction = \"CompletedExtraction\"\n\nDEBUG = (\"TE_DEBUG\" in os.environ)\nDB_MAX_SIZE = 20 * 1024 * 1024 * 1024\n\nEXCLUDE_MIME_TYPES = [\n 'image/x-cmu-raster', 'application/x-shar', 'image/x-ms-bmp',\n 'image/jpeg', 'application/postscript', 'video/quicktime',\n 'application/x-dvi', 'application/x-sh', 'application/x-troff-me',\n 'image/x-xpixmap', 'application/x-texinfo', 'text/xml',\n 'application/x-latex', 'image/ief', 'image/x-portable-anymap',\n 'application/x-mif', 'application/pdf', 'image/png',\n 'image/x-xbitmap', 'image/x-portable-bitmap', 'application/x-ustar',\n 'message/rfc822', 'text/x-setext', 'application/x-hdf',\n 'application/x-troff', 'image/x-rgb', 'application/x-troff-ms',\n 'image/gif', 'application/javascript', 'application/zip',\n 'application/xml', 'application/vnd.ms-excel',\n 'application/pkcs7-mime', 'image/tiff', 'application/x-sv4crc',\n 'application/x-tar', 'video/x-sgi-movie', 'image/x-portable-graymap',\n 'application/x-pn-realaudio', 'audio/x-pn-realaudio', 'audio/mpeg',\n 'audio/x-wav', 'application/x-netcdf', 'application/oda',\n 'image/vnd.microsoft.icon', 'application/x-gtar', 'video/mp4',\n 'application/octet-stream', 'text/x-vcard', 'image/x-portable-pixmap',\n 'application/x-sv4cpio', 'text/plain', 'application/x-wais-source',\n 'application/x-pkcs12', 'application/vnd.ms-powerpoint',\n 'text/tab-separated-values', 'application/x-tex',\n 'text/x-python', 'image/x-xwindowdump', 'application/x-python-code',\n 'application/x-cpio', 'application/x-bcpio',\n 'video/mpeg', 'text/richtext', 'text/x-sgml',\n 'application/x-tcl', 'video/x-msvideo', 'application/x-shockwave-flash',\n 'audio/basic', 'audio/x-aiff', 'application/x-csh',\n 'application/x-troff-man', 'text/css', 'application/msword'\n]\n\ncurrent_path = os.path.split(__file__)[0]\nSETTINGS_PATH = os.path.join(current_path, \"settings.json\")\nFETCH_URL_PYTHON_PATH = os.path.join(current_path, \"..\", \"fetch_url.py\")\nTEMP_FILE_PATH = os.path.join(current_path, \"..\", \"tempfilepath\")\nMANAGEMENT_URL_KEYWORD_RATIO_THRESHOLD = 5\n\nerror = \"settings file not found %s\" % (SETTINGS_PATH,)\nassert os.path.exists(SETTINGS_PATH), error\n\nerror = \"fetch url python file not found %s\" % (FETCH_URL_PYTHON_PATH,)\nassert os.path.exists(FETCH_URL_PYTHON_PATH), error\n\nJOB_KEYWORDS = None\nMANAGEMENT_KEYWORDS = None\nEXCLUDE_NAMES = None\nLOWERCASE_JOB_KEYWORDS = None\nEXCLUDE_NAMES_PREFIXES = None\nEXCLUDE_NAMES_SUFFIXES = None\nLOWERCASE_COMMON_NAMES = None\nLOWERCASE_JOB_KEYWORDS_WILDCARDS_REMOVED = ahocorasick.KeywordTree()\nLOWERCASE_JOB_KEYWORDS_WITHOUT_WILDCARDS = ahocorasick.KeywordTree()\n\n\ndef get_settings():\n global JOB_KEYWORDS, MANAGEMENT_KEYWORDS,\\\n EXCLUDE_NAMES, LOWERCASE_JOB_KEYWORDS,\\\n EXCLUDE_NAMES_PREFIXES, EXCLUDE_NAMES_SUFFIXES,\\\n LOWERCASE_COMMON_NAMES, LOWERCASE_JOB_KEYWORDS_WILDCARDS_REMOVED,\\\n LOWERCASE_JOB_KEYWORDS_WITHOUT_WILDCARDS\n\n f = open(SETTINGS_PATH)\n keywords = f.read()\n f.close()\n json_data = json.loads(keywords)\n assert type(json_data) is dict\n\n JOB_KEYWORDS = json_data[\"jobs\"]\n for x in JOB_KEYWORDS:\n lx = x.lower().strip()\n if \"@\" not in lx:\n LOWERCASE_JOB_KEYWORDS_WITHOUT_WILDCARDS.add(lx)\n word_to_add = None\n words = [x.strip() for x in lx.split(\"@\") if x.strip() != \"\"]\n if len(words) == 1:\n word_to_add = words[0]\n elif len(words) == 0:\n continue\n else:\n max_length = 0\n for word in words:\n if len(word) > max_length:\n max_length = len(word)\n word_to_add = word\n LOWERCASE_JOB_KEYWORDS_WILDCARDS_REMOVED.add(word_to_add)\n LOWERCASE_JOB_KEYWORDS_WILDCARDS_REMOVED.make()\n\n JOB_KEYWORDS = Set(\n [x.replace(\"@\", \"\\w*\").strip() for x in JOB_KEYWORDS]\n )\n LOWERCASE_JOB_KEYWORDS = ' '.join(JOB_KEYWORDS)\n LOWERCASE_JOB_KEYWORDS = Set(\n [x.lower().strip() for x in LOWERCASE_JOB_KEYWORDS.split()]\n )\n\n EXCLUDE_NAMES = json_data[\"exclude_names\"]\n EXCLUDE_NAMES = Set([x.lower().strip() for x in EXCLUDE_NAMES])\n\n EXCLUDE_NAMES_PREFIXES = json_data[\"exclude_names_prefixes\"]\n EXCLUDE_NAMES_PREFIXES = Set(\n [\n x.lower().strip() for x in EXCLUDE_NAMES_PREFIXES\n ]\n )\n\n EXCLUDE_NAMES_SUFFIXES = json_data[\"exclude_names_suffixes\"]\n EXCLUDE_NAMES_SUFFIXES = Set(\n [x.lower().strip() for x in EXCLUDE_NAMES_SUFFIXES]\n )\n\n LOWERCASE_COMMON_NAMES = ' '.join(json_data[\"common_names\"])\n LOWERCASE_COMMON_NAMES = Set(\n [x.lower().strip() for x in LOWERCASE_COMMON_NAMES.split()]\n )\n\n MANAGEMENT_KEYWORDS = json_data[\"management\"]\n\n LOWERCASE_JOB_KEYWORDS_WITHOUT_WILDCARDS.make()\n\nget_settings()\n","sub_path":"Src-server/teamextractionserver/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"592456264","text":"nome = continuar = barato = ''\npreço = .1\ntotal = mais1000 = cont = 0\nmenor = -1\n\nwhile True:\n print('-=' * 35)\n nome = str(input('Digite o nome do produto: ')).strip().lower().title()\n preço = float(input('Agora o preço: R$'))\n cont += 1\n\n total += preço\n if cont == 1:\n menor = preço\n barato = nome\n elif preço < menor:\n menor = preço\n barato = nome\n if preço > 1000:\n mais1000 += 1\n\n continuar = str(input('Deseja continuar digitando mais produtos? (S/N): ')).strip().lower()[0]\n if continuar == 'n':\n break\nprint('-=' * 35)\nprint(f'O valor total dos produtos será R${total:.2f}\\n'\n f'você pagou mais de R$1000,00 em {mais1000} produtos\\n'\n f'e o produto mais barato foi {barato}')\n","sub_path":"Exercícios - Mundo 2/Ex070.py","file_name":"Ex070.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"26024153","text":"from flask_restful import Resource, reqparse, abort, fields, marshal_with, marshal \nfrom flask import request\nfrom bson.objectid import ObjectId\nfrom pymongo import ReturnDocument \nfrom datetime import datetime\n\nfrom src.db.main import get_db\nfrom src.auth.auths import Auth \n\n\n_stars_fields = {\n \"id\": fields.Integer,\n \"title\": fields.String,\n \"author\": fields.String,\n \"url\": fields.String,\n \"date\": fields.DateTime\n }\n\n\n_stars_parser = reqparse.RequestParser()\n_stars_parser.add_argument(\"title\", required=True, help='{error_msg}')\n_stars_parser.add_argument(\"author\", required=True, help='{error_msg}')\n_stars_parser.add_argument(\"url\", required=True, help='{error_msg}')\n_stars_parser.add_argument(\"date\", required=True, help='{error_msg}')\n\n\n\nclass Stars(Resource):\n \"\"\"star list\"\"\"\n\n method_decorators = [Auth.authenticate]\n\n @marshal_with(_stars_fields,envelope='resource')\n def get(self):\n \"\"\"get all star list\"\"\"\n return list(get_db().stars.find())\n\n def post(self):\n \"\"\"添加一个收藏\"\"\"\n args = _stars_parser.parse_args()\n db = get_db()\n max_id = max([item.get(\"id\",0) for item in list(db.stars.find())] or (0,))\n args.update({\n \"id\": max_id + 1,\n \"date\": datetime.strptime(args[\"date\"], '%Y/%m/%d')\n })\n query_id = db.stars.insert_one(args).inserted_id \n return marshal(db.stars.find_one({\"_id\":query_id}),_stars_fields), 201\n\n def put(self):\n \"\"\"更新收藏列表\"\"\"\n return\n\n def delete(self):\n \"\"\"delete all starts : forbid\"\"\"\n return \n\n\nclass Star(Resource): \n \"\"\"handle one star\"\"\"\n\n method_decorators = [Auth.authenticate]\n\n @marshal_with(_stars_fields,envelope='resource')\n def get(self,star_id):\n \"\"\"get one star\"\"\"\n pass\n\n def post(self):\n \"\"\"forbid\"\"\"\n return \n\n def put(self,star_id):\n \"\"\"update one star\"\"\"\n pass\n\n def delete(self,star_id):\n \"\"\"delete one star\"\"\"\n get_db().stars.find_one_and_delete({'id':star_id})\n return '', 204\n\n","sub_path":"src/resources/stars.py","file_name":"stars.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"182257790","text":"from __future__ import print_function\n\nimport mxnet as mx\nimport mxnext as X\nfrom mxnext.backbone.resnet_v2 import Builder\n\n\nbn_count = [10000]\n\nclass TridentResNetV2Builder(Builder):\n def __init__(self):\n super().__init__()\n\n @staticmethod\n def bn_shared(data, name, normalizer, branch_ids=None, share_weight=True):\n if branch_ids is None:\n branch_ids = range(len(data))\n\n gamma = X.var(name + \"_gamma\")\n beta = X.var(name + \"_beta\")\n moving_mean = X.var(name + \"_moving_mean\")\n moving_var = X.var(name + \"_moving_var\")\n\n bn_layers = []\n for i, data_i in zip(branch_ids, data):\n if share_weight:\n bn_i = normalizer(data=data_i, name=name + \"_shared%d\" % i,\n gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var)\n else:\n bn_i = normalizer(data=data_i, name=name + \"_branch%d\" % i)\n bn_layers.append(bn_i)\n\n return bn_layers\n\n @staticmethod\n def conv_shared(data, name, kernel, num_filter, branch_ids=None, no_bias=True, share_weight=True,\n pad=(0, 0), stride=(1, 1), dilate=(1, 1)):\n if branch_ids is None:\n branch_ids = range(len(data))\n\n weight = X.var(name + '_weight')\n if no_bias:\n bias = None\n else:\n bias = X.var(name + '_bias')\n\n conv_layers = []\n for i in range(len(data)):\n data_i = data[i]\n stride_i = stride[i] if type(stride) is list else stride\n dilate_i = dilate[i] if type(dilate) is list else dilate\n pad_i = pad[i] if type(pad) is list else pad\n branch_i = branch_ids[i]\n if share_weight:\n conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i,\n name=name + '_shared%d' % branch_i, no_bias=no_bias, weight=weight, bias=bias)\n else:\n conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i,\n name=name + '_branch%d' % branch_i, no_bias=no_bias)\n conv_layers.append(conv_i)\n\n return conv_layers\n\n @staticmethod\n def deform_conv_shared(data, name, conv_offset, kernel, num_filter, branch_ids=None, no_bias=True, share_weight=True,\n num_deformable_group=4, pad=(0, 0), stride=(1, 1), dilate=(1, 1)):\n if branch_ids is None:\n branch_ids = range(len(data))\n\n weight = X.var(name + '_weight')\n if no_bias:\n bias = None\n else:\n bias = X.var(name + '_bias')\n\n conv_layers = []\n for i in range(len(data)):\n data_i = data[i]\n stride_i = stride[i] if type(stride) is list else stride\n dilate_i = dilate[i] if type(dilate) is list else dilate\n pad_i = pad[i] if type(pad) is list else pad\n conv_offset_i = conv_offset[i] if type(conv_offset) is list else conv_offset\n branch_i = branch_ids[i]\n if share_weight:\n conv_i = mx.contrib.symbol.DeformableConvolution(\n data=data_i, offset=conv_offset_i, kernel=kernel, num_filter=num_filter, stride=stride_i, num_deformable_group=4,\n dilate=dilate_i, pad=pad_i, no_bias=no_bias, weight=weight, bias=bias, name=name + '_shared%d' % branch_i)\n else:\n conv_i = mx.contrib.symbol.DeformableConvolution(\n data=data_i, offset=conv_offset_i, kernel=kernel, num_filter=num_filter, stride=stride_i, num_deformable_group=4,\n dilate=dilate_i, pad=pad_i, no_bias=no_bias, name=name + '_branch%d' % branch_i)\n conv_layers.append(conv_i)\n\n return conv_layers\n\n @staticmethod\n def stack_branch_symbols(data_list):\n data = mx.symbol.stack(*data_list, axis=1)\n data = mx.symbol.Reshape(data, (-3, -2))\n\n return data\n\n @classmethod\n def resnet_trident_unit(cls, data, name, filter, stride, dilate, proj, norm_type, norm_mom, ndev,\n branch_ids, branch_bn_shared, branch_conv_shared, branch_deform=False):\n \"\"\"\n One resnet unit is comprised of 2 or 3 convolutions and a shortcut.\n :param data:\n :param name:\n :param filter:\n :param stride:\n :param dilate:\n :param proj:\n :param norm_type:\n :param norm_mom:\n :param ndev:\n :param branch_ids:\n :param branch_bn_shared:\n :param branch_conv_shared:\n :param branch_deform:\n :return:\n \"\"\"\n if branch_ids is None:\n branch_ids = range(len(data))\n\n norm = X.normalizer_factory(type=norm_type, ndev=ndev, mom=norm_mom)\n\n bn1 = cls.bn_shared(\n data, name=name + \"_bn1\", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared)\n relu1 = [X.relu(bn) for bn in bn1]\n conv1 = cls.conv_shared(\n relu1, name=name + \"_conv1\", num_filter=filter // 4, kernel=(1, 1),\n branch_ids=branch_ids, share_weight=branch_conv_shared)\n\n bn2 = cls.bn_shared(\n conv1, name=name + \"_bn2\", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared)\n relu2 = [X.relu(bn) for bn in bn2]\n if not branch_deform:\n conv2 = cls.conv_shared(\n relu2, name=name + \"_conv2\", num_filter=filter // 4, kernel=(3, 3),\n pad=dilate, stride=stride, dilate=dilate,\n branch_ids=branch_ids, share_weight=branch_conv_shared)\n else:\n conv2_offset = cls.conv_shared(\n relu2, name=name + \"_conv2_offset\", num_filter=72, kernel=(3, 3),\n pad=(1, 1), stride=(1, 1), dilate=(1, 1), no_bias=False,\n branch_ids=branch_ids, share_weight=branch_conv_shared)\n conv2 = cls.deform_conv_shared(\n relu2, name=name + \"_conv2\", conv_offset=conv2_offset, num_filter=filter // 4, kernel=(3, 3),\n pad=dilate, stride=stride, dilate=dilate, num_deformable_group=4,\n branch_ids=branch_ids, share_weight=branch_conv_shared)\n\n bn3 = cls.bn_shared(\n conv2, name=name + \"_bn3\", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared)\n relu3 = [X.relu(bn) for bn in bn3]\n conv3 = cls.conv_shared(\n relu3, name=name + \"_conv3\", num_filter=filter, kernel=(1, 1),\n branch_ids=branch_ids, share_weight=branch_conv_shared)\n\n if proj:\n shortcut = cls.conv_shared(\n relu1, name=name + \"_sc\", num_filter=filter, kernel=(1, 1),\n branch_ids=branch_ids, share_weight=branch_conv_shared)\n else:\n shortcut = data\n\n return [X.add(conv3_i, shortcut_i, name=name + \"_plus_branch{}\".format(i)) \\\n for i, conv3_i, shortcut_i in zip(branch_ids, conv3, shortcut)]\n\n @classmethod\n def resnet_trident_stage(cls, data, name, num_block, filter, stride, dilate, norm_type, norm_mom, ndev,\n num_trident_block, num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform):\n \"\"\"\n One resnet stage is comprised of multiple resnet units. Refer to depth config for more information.\n :param data:\n :param name:\n :param num_block:\n :param filter:\n :param stride:\n :param dilate:\n :param norm_type:\n :param norm_mom:\n :param ndev:\n :param num_branch:\n :param branch_ids:\n :param branch_bn_shared:\n :param branch_conv_shared:\n :return:\n \"\"\"\n assert isinstance(dilate, list) and len(dilate) == num_branch, 'dilate should be a list with num_branch items.'\n num_trident_block = num_trident_block or (num_block - 1) # transform all blocks by default\n\n d = [(d, d) for d in dilate]\n\n data = cls.resnet_unit(data, \"{}_unit1\".format(name), filter, stride, 1, True, norm_type, norm_mom, ndev)\n for i in range(2, num_block + 1):\n # [i ... num_block] == [1 ... num_trident_block]\n if i == (num_block - num_trident_block + 1):\n data = [data] * num_branch\n if i >= (num_block - num_trident_block + 1):\n if branch_deform and i >= num_block - 2:\n unit_deform = True\n else:\n unit_deform = False\n # cast back to fp32 as deformable conv is not optimized for fp16\n if unit_deform and i == num_block - 2:\n for j in range(num_branch):\n data[j] = X.to_fp32(data[j], name=\"deform_to32\")\n data = cls.resnet_trident_unit(\n data, \"{}_unit{}\".format(name, i), filter, (1, 1), d, False, norm_type, norm_mom, ndev,\n branch_ids, branch_bn_shared, branch_conv_shared, branch_deform=unit_deform)\n else:\n data = cls.resnet_unit(data, \"{}_unit{}\".format(name, i), filter, 1, 1, False, norm_type, norm_mom, ndev)\n\n return data\n\n @classmethod\n def resnet_trident_c4(cls, data, num_block, stride, dilate, norm_type, norm_mom, ndev, num_trident_block,\n num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform):\n return cls.resnet_trident_stage(\n data, \"stage3\", num_block, 1024, stride, dilate, norm_type, norm_mom, ndev, num_trident_block,\n num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform)\n\n @classmethod\n def resnet_factory(cls, depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,\n num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,\n norm_type=\"local\", norm_mom=0.9, ndev=None, fp16=False):\n num_c2_unit, num_c3_unit, num_c4_unit, num_c5_unit = TridentResNetV2Builder.depth_config[depth]\n\n data = X.var(\"data\")\n if fp16:\n data = X.to_fp16(data, \"data_fp16\")\n c1 = cls.resnet_c1(data, use_3x3_conv0, use_bn_preprocess, norm_type, norm_mom, ndev)\n c2 = cls.resnet_c2(c1, num_c2_unit, 1, 1, norm_type, norm_mom, ndev)\n c3 = cls.resnet_c3(c2, num_c3_unit, 2, 1, norm_type, norm_mom, ndev)\n c4 = cls.resnet_trident_c4(c3, num_c4_unit, 2, branch_dilates, norm_type, norm_mom, ndev, num_trident_block,\n num_branch, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform)\n # stack branch features and merge into batch dim\n c4 = cls.stack_branch_symbols(c4)\n c5 = cls.resnet_c5(c4, num_c5_unit, 1, 2, norm_type, norm_mom, ndev)\n\n return c1, c2, c3, c4, c5\n\n @classmethod\n def resnet_c4_factory(cls, depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,\n num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,\n norm_type=\"local\", norm_mom=0.9, ndev=None, fp16=False):\n c1, c2, c3, c4, c5 = cls.resnet_factory(depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,\n num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,\n norm_type, norm_mom, ndev, fp16)\n\n return c4\n\n @classmethod\n def resnet_c4c5_factory(cls, depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,\n num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,\n norm_type=\"local\", norm_mom=0.9, ndev=None, fp16=False):\n c1, c2, c3, c4, c5 = cls.resnet_factory(depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,\n num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,\n norm_type, norm_mom, ndev, fp16)\n c5 = X.fixbn(c5, \"bn1\")\n c5 = X.relu(c5)\n\n return c4, c5\n\n def get_backbone(self, variant, depth, endpoint, normalizer, fp16, num_trident_block,\n num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform):\n # parse variant\n if variant == \"mxnet\":\n use_bn_preprocess = True\n use_3x3_conv0 = False\n elif variant == \"tusimple\":\n use_bn_preprocess = False\n use_3x3_conv0 = True\n else:\n raise KeyError(\"Unknown backbone variant {}\".format(variant))\n\n # parse endpoint\n if endpoint == \"c4\":\n factory = self.resnet_c4_factory\n elif endpoint == \"c4c5\":\n factory = self.resnet_c4c5_factory\n else:\n raise KeyError(\"Unknown backbone endpoint {}\".format(endpoint))\n\n return factory(depth, use_3x3_conv0, use_bn_preprocess, num_trident_block,\n num_branch, branch_dilates, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform,\n norm_type=normalizer, fp16=fp16)\n","sub_path":"models/tridentnet/resnet_v2.py","file_name":"resnet_v2.py","file_ext":"py","file_size_in_byte":13384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"631906383","text":"import os\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\n\n# IMPORTANT SCRIPT\n# Script gets all the VCFs from samples and compares mutations from VCF and from Walker dictionaries\n# Works good only with SNP\n\nname,sequence = '',''\nPATH_input = '/export/data/kchukreev/1_input/'\nfasta_sequences = SeqIO.parse(open(PATH_input+'AL123456_rev.fa'),'fasta')\nfor fasta in fasta_sequences:\n name, sequence = fasta.id, fasta.seq.tostring()\n\ndef make_walker_database():\n lines = [line[:-1].split('\\t') for line in open('walker_database.txt').readlines()]\n mutations = []\n for mut in lines:\n if 'del' in mut[0]:\n gene = mut[0].split('_')[0]\n pos = mut[0].split('_')[1]\n letters = mut[0].split('_')[2][3:]\n mutations.append(['del', gene, pos, letters, mut[1], mut[2]])\n elif 'ins' in mut[0]:\n gene = mut[0].split('_')[0]\n pos = mut[0].split('_')[1]\n letters = mut[0].split('_')[2][3:]\n mutations.append(['ins', gene, pos, letters, mut[1], mut[2]])\n else:\n try:\n gene = mut[0].split('_')[0]\n change = mut[0].split('_')[1]\n from_let = change[0]\n to_let = change[-1]\n pos = change[1:-1]\n mutations.append(['snp', gene, pos, from_let, to_let, mut[1], mut[2]])\n except Exception:\n print(mut)\n\n return mutations\ndatabase_mutations = make_walker_database()\n\ndef get_genes_info(sequence):\n\n import re\n lines = open(PATH_input+'AL123456_rev.gff').readlines()\n gene_reg = re.compile('\\tgene \\w*')\n genes = []\n for line in lines:\n m = gene_reg.search(line)\n if m:\n genes.append(line)\n genes = [[line[:-1].split('\\t')[3],line[:-1].split('\\t')[4], line[:-1].split('\\t')[8].split(' ')[1], line[:-1].split('\\t')[6]] for line in genes]\n\n list_genes = ['ahpC', 'eis', 'embA', 'embB', 'embC', 'embR', 'fabG1', 'gid', 'gyrA', 'gyrB', 'inhA', 'iniA', 'iniC',\n 'katG', 'manB', 'ndh', 'pncA', 'rmlD', 'rpoB', 'rpsA', 'rpsL', 'rrs', 'tlyA']\n\n def get_sequence(genes, gene, sequence):\n for element in genes:\n if element[2] == gene:\n return [int(element[0]), int(element[1]), element[3], sequence[int(element[0])-1:int(element[1])]]\n\n genes_seqs = dict()\n for gene in list_genes:\n genes_seqs[gene] = get_sequence(genes,gene,sequence)\n\n return genes_seqs\ngenes = get_genes_info(sequence) #start end strand seq\ngenes['gidB'] = genes['gid']\ndel(genes['gid'])\n\ndef getting_walker_samns(names, kul_list):\n names = [line[:-1] for line in open(names).readlines()]\n kul_list = [line[:-1].split('\\t') for line in open(kul_list).readlines()]\n\n for i in range(len(names)):\n if names[i][:3] != 'SAM':\n name = names[i].split(',')[0]\n for el in kul_list:\n if name == el[4] or name == el[5]:\n names[i] = el[1]\n\n return names\nall_names = [line[:-1] for line in open(PATH_input+'all_ids.txt').readlines()]\nwalker_names = [line[:-1] for line in open(PATH_input+'walker_ids.txt').readlines()]\nnonwalker_names = [name for name in all_names if name not in walker_names]\n\ndef make_upstream_snps(database_mutations, genes):\n upstream_snps = []\n for mutation in database_mutations:\n if '-' in mutation[2] and mutation[0] == 'snp':\n \n #nucleotide shift, nucl < 0\n nucl = int(mutation[2])\n pos = 0\n alt = ''\n\n if genes[mutation[1]][2] == '+':\n pos = genes[mutation[1]][0] + nucl\n alt = mutation[4] \n else:\n pos = genes[mutation[1]][1] - nucl\n alt = Seq(mutation[4]).reverse_complement().tostring()\n\n upstream_snps.append([pos, alt, mutation])\n\n return upstream_snps\nupstream_snps = make_upstream_snps(database_mutations, genes)\n\ndef BuildAminoAcids(sequence, strand, start_pos, isFirstModified, isSecondModified, isThirdModified, alt1, alt2, alt3):\n old_triplet = ''\n new_triplet = ''\n\n if strand == '+':\n\n old_triplet = sequence[start_pos:start_pos+3]\n if isFirstModified:\n new_triplet += alt1\n else:\n new_triplet += sequence[start_pos]\n\n if isSecondModified:\n new_triplet += alt2\n else:\n new_triplet += sequence[start_pos+1]\n\n\n if isThirdModified:\n new_triplet += alt3\n else:\n new_triplet += sequence[start_pos+2]\n\n else:\n\n old_triplet = str(Seq(sequence[start_pos-2:start_pos+1]).reverse_complement())\n\n if isFirstModified:\n new_triplet += alt1\n else:\n new_triplet += sequence[start_pos-2]\n\n\n if isSecondModified:\n new_triplet += alt2\n else:\n new_triplet += sequence[start_pos-1]\n \n if isThirdModified:\n new_triplet += alt3\n else:\n new_triplet += sequence[start_pos]\n\n new_triplet = str(Seq(new_triplet).reverse_complement())\n \n old_aminoacid = str(Seq(old_triplet).translate())\n new_aminoacid = str(Seq(new_triplet).translate())\n\n return [old_aminoacid, new_aminoacid]\n\ndef look_vcf_file(filename, name, upstream_snp, file):\n #for SNP\n print('Looking at ' + name)\n info = []\n file.write(name+'\\n')\n # open .vcf file\n try:\n raw_data_2 = [[int(line.split('\\t')[1]), line.split('\\t')[3], line.split('\\t')[4]] for line in open(filename[:-4]).readlines() if line[0] != '#' and float(line.split('\\t')[5]) >= 600]\n except Exception:\n return []\n\n data = []\n for el in raw_data_2:\n if el not in data:\n data.append(el)\n\n for i in range(len(data)):\n\n pos = data[i][0]\n ref = data[i][1]\n alt = data[i][2]\n\n #UPSTREAM SNP ANALYSIS\n for snp in upstream_snp:\n if snp[0] == pos and snp[1] == alt:\n # print(snp[2])\n file.write(' '.join(snp[2]) + '\\n')\n info.append(snp[2]) \n\n\n cur_gene = ''\n if len(ref) == len(alt):\n #SNP ANALYSIS\n\n for key in genes:\n if int(genes[key][0]) <= int(pos) and int(pos) <= int(genes[key][1]):\n cur_gene = key\n break\n\n if cur_gene != '':\n if genes[cur_gene][2] == '+':\n protein_pos = (pos - genes[cur_gene][0])/3 + 1\n nucleotide_pos = (pos - genes[cur_gene][0])%3\n\n #NEIGHBOURS ANALYSIS\n if nucleotide_pos == 0:\n if i!=len(data)-1 and data[i+1][0] == pos+1:\n if i!=len(data)-2 and data[i+2][0] == pos+2:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-1, True, True, True, alt, data[i+1][2], data[i+2][2])\n else:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-1, True, True, False, alt, data[i+1][2], '-')\n elif i!=len(data)-1 and data[i+1][0] == pos+2:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-1, True, False, True, alt, '-', data[i+1][2])\n else:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-1, True, False, False, alt, '-', '-')\n\n elif nucleotide_pos == 1:\n if i!=0 and data[i-1][0] == pos-1:\n if i!=len(data)-1 and data[i+1][0] == pos+1:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-2, True, True, True, data[i-1][2], alt, data[i+1][2])\n else:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-2, True, True, False, data[i-1][2], alt, '-')\n elif i!=len(data)-1 and data[i+1] == pos+1:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-2, False, True, True, '-', alt, data[i+1][2])\n else:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-2, False, True, False, '-', alt, '-')\n else:\n if i != 0 and data[i-1][0] == pos-1:\n if i != 1 and data[i-2][0] == pos-2:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-3, True, True, True, data[i-2][2], data[i-1][2], alt)\n else:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-3, False, True, True, '-', data[i-1][2], alt)\n elif i != 0 and data[i-1][0] == pos-2:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-3, True, False, True, data[i-2][2], '-', alt)\n else:\n aminoacids = BuildAminoAcids(sequence, \"+\", pos-3, False, False, True, '-', '-', alt)\n\n old_aminoacid = aminoacids[0]\n new_aminoacid = aminoacids[1]\n\n for walker_mut in database_mutations:\n if walker_mut[0] == 'snp':\n if walker_mut[1] == cur_gene and walker_mut[2][0] != '-' and int(walker_mut[2]) == protein_pos:\n \n if old_aminoacid != walker_mut[3]:\n print(walker_mut)\n print(data[i])\n\n if (old_aminoacid == walker_mut[3]) and (new_aminoacid == walker_mut[4]):\n file.write(' '.join(walker_mut) + '\\n')\n info.append(walker_mut)\n\n else: # if strand is '-'\n if cur_gene == 'rrs':\n walker_coord_start = genes[cur_gene][1] - (pos-1)\n reference_nucl = Seq(sequence[walker_coord_start-1]).reverse_complement()\n\n for walker_mut in database_mutations:\n if walker_mut[0] == 'snp':\n if walker_mut[0] == 'snp' and walker_mut[1] == 'rrs':\n if int(walker_mut[2]) == int(walker_coord_start) and str(Seq(alt).reverse_complement()) == walker_mut[4]:\n file.write(' '.join(walker_mut) + '\\n')\n info.append(walker_mut)\n else:\n\n protein_pos = (genes[cur_gene][1] - pos)/3 + 1\n nucleotide_pos = (genes[cur_gene][1] - pos)%3\n\n #NEIGHBOURS ANALYSIS\n if nucleotide_pos == 2:\n if i!=len(data)-1 and data[i+1][0] == pos+1:\n if i!=len(data)-2 and data[i+2][0] == pos+2:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos+1, True, True, True, alt, data[i+1][2], data[i+2][2])\n else:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos+1, True, True, False, alt, data[i+1][2], '-')\n elif i!=len(data)-1 and data[i+1][0] == pos+2:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos+1, True, False, True, alt, '-', data[i+1][2])\n else:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos+1, True, False, False, alt, '-', '-')\n\n elif nucleotide_pos == 1:\n if i!=0 and data[i-1][0] == pos-1:\n if i!=len(data)-1 and data[i+1][0] == pos+1:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos, True, True, True, data[i-1][2], alt, data[i+1][2])\n else:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos, True, True, False, data[i-1][2], alt, '-')\n elif i!=len(data)-1 and data[i+1] == pos+1:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos, False, True, True, '-', alt, data[i+1][2])\n else:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos, False, True, False, '-', alt, '-')\n else:\n if i != 0 and data[i-1][0] == pos-1:\n if i != 1 and data[i-2][0] == pos-2:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos-1, True, True, True, data[i-2][2], data[i-1][2], alt)\n else:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos-1, False, True, True, '-', data[i-1][2], alt)\n elif i != 0 and data[i-1][0] == pos-2:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos-1, True, False, True, data[i-2][2], '-', alt)\n else:\n aminoacids = BuildAminoAcids(sequence, \"-\", pos-1, False, False, True, '-', '-', alt)\n\n old_aminoacid = aminoacids[0]\n new_aminoacid = aminoacids[1]\n\n\n for walker_mut in database_mutations:\n if walker_mut[0] == 'snp':\n if walker_mut[1] == cur_gene and walker_mut[2][0] != '-' and int(walker_mut[2]) == protein_pos:\n if old_aminoacid != walker_mut[3]:\n print(old_aminoacid)\n print(walker_mut)\n print(data[i])\n\n if old_aminoacid == walker_mut[3] and new_aminoacid == walker_mut[4]:\n file.write(' '.join(walker_mut) + '\\n')\n info.append(walker_mut)\n elif len(ref) > len(alt):\n #DELETION ANALYSIS\n delta = len(ref) - len(alt)\n special = ref[1:delta+1]\n pos = pos + 1\n\n cur_gene = ''\n\n for key in genes:\n if int(genes[key][0]) <= int(pos) and int(pos) <= int(genes[key][1]):\n cur_gene = key\n break\n\n if cur_gene != '':\n\n if genes[cur_gene][2] == '+':\n coord_otn = pos - genes[cur_gene][0] + 1\n else:\n coord_otn = genes[cur_gene][1] - pos + 1\n\n for walker_mut in database_mutations:\n if walker_mut[0] == 'del' and cur_gene == walker_mut[1] and coord_otn == int(walker_mut[2]):\n # print('WOW')\n # print(walker_mut)\n # print(str(coord_otn) + ' ' + str(pos) + ' ' + str(special))\n file.write(' '.join(walker_mut) + '\\n')\n info.append(walker_mut)\n # if walker_mut[0] == 'del' and cur_gene == walker_mut[1] and abs(int(coord_otn) - int(walker_mut[2])) < 10:\n # print(walker_mut)\n # print(str(coord_otn) + ' ' + str(pos) + ' ' + str(special))\n\n elif len(alt) > len(ref):\n #INSERTION ANALYSIS\n delta = len(alt) - len(ref)\n special = alt[1:delta+1]\n pos = pos + 1\n\n cur_gene = ''\n\n for key in genes:\n if int(genes[key][0]) <= int(pos) and int(pos) <= int(genes[key][1]):\n cur_gene = key\n break\n\n if cur_gene != '':\n\n if genes[cur_gene][2] == '+':\n coord_otn = pos - genes[cur_gene][0] + 1\n else:\n coord_otn = genes[cur_gene][1] - pos + 1\n\n for walker_mut in database_mutations:\n if walker_mut[0] == 'ins' and cur_gene == walker_mut[1] and coord_otn == int(walker_mut[2]):\n # print('WOW')\n # print(walker_mut)\n # print(str(coord_otn) + ' ' + str(pos) + ' ' + str(special))\n file.write(' '.join(walker_mut) + '\\n')\n info.append(walker_mut)\n # if walker_mut[0] == 'ins' and cur_gene == walker_mut[1] and abs(int(coord_otn) - int(walker_mut[2])) < 10:\n # print(walker_mut)\n # print(str(coord_otn) + ' ' + str(pos) + ' ' + str(special))\n\n return info\n\nfile = open('results_nonwalker.txt', 'w')\nmutation_info = [] # [name, [list of mutations]]\nfor name in nonwalker_names[:-1]:\n mutation_info.append([name, look_vcf_file('/export/data/kkuleshov/myc/sra/' + name + '/' + name + '_h37rv.vcf.unf',name, upstream_snps, file)])\n\nfile.close()\n\n\nprint('DONE')\n\n#STATISTICS ANALYSIS\nstatistics_genes_snp = []\n\nfor snp in database_mutations:\n statistics_genes_snp.append([0, snp])\n for sample in mutation_info:\n for el in sample[1:]:\n if el == snp:\n statistics_genes_snp[-1][0] += 1\n\nfile = open('statistics_snp_nonwalker.txt', 'w')\n\nstatistics_genes_snp = sorted(statistics_genes_snp, key = lambda snp: snp[0])[::-1]\n\nfor snp in statistics_genes_snp:\n file.write(str(snp[0]) + ' ' + str(snp[1]) + '\\n')\n\nfile.close()","sub_path":"3_prediction/pipeline/old_pipeline/cluster_vcf_analysis_new.py","file_name":"cluster_vcf_analysis_new.py","file_ext":"py","file_size_in_byte":17569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"55405762","text":"# for abstract class\nfrom abc import ABC\n\n\nclass DialogManager(ABC):\n\n def find_module(self, utterance, session):\n pass\n # raise Exception(\"Subclass should implement find_ module\")\n\n\nclass DialogTreeManager(ABC):\n\n def find_node_module(self, utterance, rasa_interpreter, session):\n pass\n\n# This dialogmanager iterates through all its modules and returns the module with the highest activation.\n\n\nclass MaxActivationDialogManager(DialogManager):\n\n modules: []\n\n def __init__(self, modules):\n self.modules = modules\n\n def find_module(self, utterance, session):\n\n intent_highest_activation = -1\n entity_highest_activation = -1\n highest_module = None\n intent_name = \"\"\n entity_name = \"\"\n\n for module in self.modules:\n\n intent_activation, intent, entity_activation, entity = module.activation_function(utterance, session)\n if intent_activation > intent_highest_activation:\n intent_highest_activation = intent_activation\n highest_module = module\n intent_name = intent\n if entity_activation > entity_highest_activation:\n entity_highest_activation = entity_activation\n entity_name = entity\n\n return highest_module, intent_highest_activation, intent_name, entity_highest_activation, entity_name\n\n\nclass MaxActivationDialogTreeManager(DialogTreeManager):\n dialog_tree: None\n\n def __init__(self, dialog_tree):\n self.dialog_tree = dialog_tree\n\n def find_node_module(self, utterance, rasa_interpreter, session):\n\n intent_highest_activation = -1\n entity_highest_activation = -1\n highest_module = None\n intent_name = \"\"\n # entity_name = \"\"\n detected_entities = []\n active_node = None\n active_node_id = None\n api_output = dict()\n api_modules_output = []\n user_utterance = dict()\n active_module = dict()\n active_module_output = dict()\n\n for each_tree in self.dialog_tree.trees:\n # print(each_tree)\n for each_node in self.dialog_tree.trees[each_tree]:\n # print(each_node.intent_name)\n # print(node.node_id)\n intent_activation, intent, entity_activation, recognized_entities, api_module_output = each_node.module.activation_function(utterance, rasa_interpreter, each_node.node_id, session)\n # intent_activation, intent, entity_activation, entity, recognized_entities, api_module_output = each_node.module.activation_function(utterance, each_node.node_id, session)\n if intent_activation > intent_highest_activation:\n # print(each_node.intent_name)\n intent_highest_activation = intent_activation\n highest_module = each_node.module\n intent_name = intent\n active_node_id = each_node.node_id\n # print(each_node.intent_name, intent)\n if each_tree == \"topic_tree\" and each_node.intent_name == intent:\n # print(each_node.intent_name, intent)\n active_node = each_node\n # if active_node.intent_name != intent:\n # active_node = None\n # print(active_node.intent_name)\n # print(active_node.intent_name)\n if entity_activation > entity_highest_activation:\n entity_highest_activation = entity_activation\n # entity_name = entity\n if recognized_entities:\n detected_entities = recognized_entities\n if api_module_output:\n api_modules_output.append(api_module_output)\n\n\n user_utterance[\"text\"] = utterance\n active_module[\"id\"] = active_node_id\n active_module[\"type\"] = \"dialog_tree_module\"\n active_module[\"activation_value\"] = intent_highest_activation\n active_module_output[\"recognized_intent\"] = intent_name\n active_module_output[\"recognized_entities\"] = detected_entities\n active_module[\"module_output\"] = active_module_output\n api_output[\"user_utterance\"] = user_utterance\n api_output[\"active_module\"] = active_module\n api_output[\"modules_output\"] = api_modules_output\n # print(api_output)\n\n return highest_module, intent_highest_activation, intent_name, entity_highest_activation, detected_entities, active_node, self.dialog_tree, api_output\n # return highest_module, intent_highest_activation, intent_name, entity_highest_activation, entity_name, active_node, self.dialog_tree, api_output\n","sub_path":"Modular Chatbot/util/framework/frankenbot/dialogmanager/dialogmanager.py","file_name":"dialogmanager.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"636642051","text":"#import all the required libraries\nimport numpy as np\nimport pandas as pd\nfrom keras.models import Model\nfrom keras.layers import Dense, LSTM, Input\nfrom keras.optimizers import Adam\nfrom keras.layers.embeddings import Embedding\nfrom keras.losses import categorical_crossentropy\nfrom string import punctuation\nimport re\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\n\n#reading the files\ndef read_data():\n with open('data/small_vocab_en', 'r', encoding = 'utf-8') as f:\n file_english = f.readlines()\n with open('data/small_vocab_fr', 'r', encoding = 'utf-8') as g:\n file_french = g.readlines()\n \n english_sentences = [sentences[:-2].strip() for sentences in file_english]\n french_sentences = [sentences[:-2].strip() for sentences in file_french]\n\n return english_sentences, french_sentences\n\n\nenglish_sentences, french_sentences = read_data()\n\n#creating a dataframe, we do this for easy and fast processing of data\ndf = pd.DataFrame()\ndf['English'] = english_sentences\ndf['French'] = french_sentences\ndf = shuffle(df)\n\ndef data_preprocess(df):\n df['English']=df['English'].apply(lambda x: x.lower())\n df['French'] = df['French'].apply(lambda x: x.lower())\n \n # Remove quotes\n df['English']=df['English'].apply(lambda x: re.sub(\"'\", '', x))\n df['French']=df['French'].apply(lambda x: re.sub(\"'\", '', x))\n \n # Remove all the special characters\n exclude = set(punctuation) # Set of all special characters\n df['English']=df['English'].apply(lambda x: ''.join(ch for ch in x if ch not in exclude))\n df['French']=df['French'].apply(lambda x: ''.join(ch for ch in x if ch not in exclude))\n \n #removing extra white spaces\n df['English']=df['English'].apply(lambda x: x.strip())\n df['French']=df['French'].apply(lambda x: x.strip())\n \n #using start and end tokens to ensure the start and end of sentence\n df['French'] = df['French'].apply(lambda x : ' '+ x + ' ')\n \n return df\n\ndf = data_preprocess(df)\n\n#creating the vocabulary for english and french sentences\ndef create_vocab(df):\n #taking empty sets for english and french words\n eng_words = set()\n fr_words = set()\n \n for eng in df['English']:\n for word in eng.split():\n if word not in eng_words:\n eng_words.add(word)\n for fr in df['French']:\n for word in fr.split():\n if word not in fr_words:\n fr_words.add(word)\n \n return sorted(list(eng_words)), sorted(list(fr_words))\n\neng_vocab, fr_vocab = create_vocab(df)\n\nmax_eng_sent_length = np.max([len(sent.split()) for sent in df['English']])\nmax_fr_sent_length = np.max([len(sent.split()) for sent in df['French']])\n\nnum_encoder_tokens = len(eng_vocab)\nnum_decoder_tokens = len(fr_vocab)\n#Add +1 for 0 padding(padding used to make all sentences equal in length)\nnum_decoder_tokens += 1\n\n#create word -> token and token -> word dictionary\neng_word_to_token = {word:i+1 for i, word in enumerate(eng_vocab)} \neng_token_to_word = {i:word for word, i in eng_word_to_token.items()}\nfr_word_to_token = {word:i+1 for i, word in enumerate(fr_vocab)}\nfr_token_to_word = {i:word for word, i in fr_word_to_token.items()}\n\n\n\n#creating a generator\ndef generate_batch(x, y, batch_size=128):\n \n while True:\n for j in range(0, len(x), batch_size):\n #this is a bit explicit code, we can also use keras tokenizer and padding\n encoder_input_data = np.zeros((batch_size, max_eng_sent_length))\n decoder_input_data = np.zeros((batch_size, max_fr_sent_length))\n \n decoder_output_data = np.zeros((batch_size, max_fr_sent_length, num_decoder_tokens))\n \n for i, (input_text, output_text) in enumerate(zip(x[j:j+batch_size], y[j:j+batch_size])):\n for t, word in enumerate(input_text.split()):\n encoder_input_data[i, t] = eng_word_to_token[word]\n for t, word in enumerate(output_text.split()):\n if t0:\n decoder_output_data[i, t-1, fr_word_to_token[word]] = 1\n yield ([encoder_input_data, decoder_input_data], decoder_output_data)\n\n\nX = df['English']\ny = df['French']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)\n\n\n#defining the encoder training model\nhidden_dims = 64\nencoder_input = Input(shape = (None,))\nenc_embed_layer = Embedding(num_encoder_tokens+1, hidden_dims, mask_zero = True)\nenc_embeds = enc_embed_layer(encoder_input)\nenc_lstm = LSTM(hidden_dims, return_state = True)\nencoder_outputs, state_h, state_c = enc_lstm(enc_embeds)\nencoder_states = [state_h, state_c]\n\n#defining decoder training model\nhidden_dims = 64\ndecoder_input = Input(shape = (None,))\ndec_embed_layer = Embedding(num_decoder_tokens, hidden_dims, mask_zero = True)\ndec_embeds = dec_embed_layer(decoder_input)\ndec_lstm = LSTM(hidden_dims, return_sequences = True, return_state = True)\ndecoder_outputs, _, _ = dec_lstm(dec_embeds, initial_state=encoder_states)\ndecoder_dense = Dense(num_decoder_tokens, activation = 'softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\nmodel = Model([encoder_input, decoder_input], decoder_outputs)\n\nmodel.compile(optimizer = Adam(learning_rate = 0.01), loss = 'categorical_crossentropy', metrics = ['acc'])\n\nbatch_size = 128\nmodel.fit_generator(generator = generate_batch(X_train, y_train, batch_size = batch_size),\n steps_per_epoch = len(X_train)//batch_size,\n epochs = 50,\n validation_data = generate_batch(X_test, y_test,\n batch_size = batch_size),\n validation_steps = len(X_test)//batch_size)\n\n\nmodel.save_weights('nmt_weights.h5')\n\n#load the weights in case you have not trained the model\nmodel.load_weights('nmt_weights.h5')\n\n\n#defining decoder inference model\nencoder_model = Model(encoder_input, encoder_states)\n\ndecoder_state_h = Input(shape = (hidden_dims,))\ndecoder_state_c = Input(shape = (hidden_dims,))\ndecoder_state_inputs = [decoder_state_h, decoder_state_c]\ndecoder_embed = dec_embed_layer(decoder_input)\ndecoder_outputs2, state_h2, state_c2 = dec_lstm(decoder_embed, initial_state = decoder_state_inputs)\ndecoder_states2 = [state_h2, state_c2]\ndecoder_outputs2 = decoder_dense(decoder_outputs2)\n\ndecoder_model = Model([decoder_input] + decoder_state_inputs, [decoder_outputs2] + decoder_states2)\n\nprint(type(decoder_input))\nprint(type(decoder_state_inputs))\nprint(type(decoder_outputs2))\nprint(type(decoder_states2))\n\ndef decode_seq(input_seq):\n state_vector = encoder_model.predict(input_seq)\n target_seq = np.zeros((1,1))\n target_seq[0, 0] = fr_word_to_token['']\n print(type(target_seq))\n print(type(state_vector))\n stop_condition = False\n decoded_sentence = ''\n while not stop_condition:\n output_tokens, h, c = decoder_model.predict([target_seq] + state_vector)\n #It's for excluding the END clause\n decoded_output = np.argmax(output_tokens[0, -1, :])\n decoded_word = fr_token_to_word[decoded_output]\n if decoded_word != '':\n decoded_sentence += ' ' + decoded_word\n \n if decoded_word == '':\n stop_condition = True\n \n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = decoded_output\n \n state_vector = [h, c]\n return decoded_sentence\n \ndata_gen = generate_batch(X_train, y_train, batch_size = 1)\n(input_data, actual_output), _ = next(data_gen)\n\nresult = decode_seq(input_data)\n\nprint('Input Sentence: ', X_train[1])\nprint('Output Sentence: ', y_train[1])\nprint('Predicted Sentence', result)\n\n \n\n\n \n \n \n \n \n \n \n \n \n ","sub_path":"Language Translation/LT_enc_dec.py","file_name":"LT_enc_dec.py","file_ext":"py","file_size_in_byte":8082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"1495960","text":"import numpy as np\nfrom numpy.random import randn\nfrom utils import wrap, MotionModel, MeasurementModel\n\ndef rad(degree):\n return degree * np.pi/180\n\nclass TurtleBot:\n def __init__(self,alphas, sensor_covariance, x0=np.zeros((3,1)),\n ts=0.1, landmarks=np.empty(0), fov=360):\n self.g = MotionModel(alphas, noise=True)\n self.h = MeasurementModel()\n self.Q_sqrt = np.sqrt(sensor_covariance)\n self.x = wrap(x0, dim=2)\n self.dt = ts\n self.landmarks = landmarks\n self.bearing_lim = rad(fov/2)\n \n def propagateDynamics(self, u):\n self.x = self.g(u, self.x, self.dt)\n return self.x\n\n def getSensorMeasurement(self):\n if not self.landmarks.size > 1:\n return -1\n z = np.zeros((2, len(self.landmarks))) \n for i, (mx,my) in enumerate(self.landmarks):\n z[:,i:i+1] = self.h(self.x, mx, my) + self.Q_sqrt @ randn(2,1)\n z[1] = wrap(z[1])\n\n unseen = np.abs(z[1]) > self.bearing_lim\n z[:,unseen] = np.nan\n\n return z \n\n","sub_path":"ekf_slam/turtlebot.py","file_name":"turtlebot.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"77692715","text":"#!/usr/bin/env python3\n\nclass Solution:\n def searchSuggestionsSystem(self, products, searchWord):\n products = sorted(products)\n \n\n searchWord_ch = list(searchWord)\n #print(searchWord_ch)\n \n table = []\n for product in products:\n product_ch = list(product)\n #print(product_ch)\n \n i = 0\n diff = []\n\n if len(product_ch) <= len(searchWord_ch):\n while i < len(product_ch):\n if not i == 0:\n diff.append(diff[i-1] + abs(ord(searchWord_ch[i]) - ord(product_ch[i])))\n else:\n diff.append(abs(ord(searchWord_ch[i]) - ord(product_ch[i])))\n i += 1\n \n while i < len(searchWord_ch):\n if not i == 0:\n diff.append(diff[i-1] + ord(searchWord_ch[i]))\n else:\n diff.append(ord(searchWord_ch[i]))\n\n i += 1\n\n else:\n while i < len(searchWord_ch):\n if not i == 0:\n diff.append(diff[i-1] + abs(ord(searchWord_ch[i]) - ord(product_ch[i])))\n else:\n diff.append(abs(ord(searchWord_ch[i]) - ord(product_ch[i])))\n\n i += 1\n \n while i < len(product_ch):\n if not i == 0:\n diff.append(diff[i-1] + ord(product_ch[i]))\n else:\n diff.append(ord(product_ch[i]))\n \n i += 1\n \n table.append(diff)\n\n \n answer = []\n for i in range(len(searchWord)):\n ans = []\n\n count = 0\n for index, element in enumerate(table):\n if element[i] == 0:\n ans.append(products[index])\n count += 1\n\n if count == 3:\n break\n \n answer.append(ans)\n\n return answer\n\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.searchSuggestionsSystem([\"mobile\",\"mouse\",\"moneypot\",\"monitor\",\"mousepad\"], \"mouse\"))\n print(s.searchSuggestionsSystem([\"havana\"], \"havana\"))\n print(s.searchSuggestionsSystem([\"bags\",\"baggage\",\"banner\",\"box\",\"cloths\"], \"bags\"))\n print(s.searchSuggestionsSystem([\"havana\"], \"tatiana\"))\n","sub_path":"LeetCode/search_suggestions_system/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"451528136","text":"import paramiko,time\nfrom scp import SCPClient\n\ngSSHRef = None\ngchannel = None\nscp = None\n\ndef InitSSH(server,username,password):\n global gSSHRef, gchannel,scp\n\n reply = 'already there'\n if gSSHRef == None:\n gSSHRef = paramiko.SSHClient()\n gSSHRef.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n result = gSSHRef.connect(server, username=username, password=password, timeout = 5)\n reply = str(result)\n gchannel = gSSHRef.invoke_shell()\n gchannel.settimeout(0)\n scp = SCPClient(gSSHRef.get_transport())\n return reply\n\ndef CloseSSH():\n if gchannel != None:\n gchannel.close()\n if gSSHRef != None:\n gSSHRef.close()\n if scp != None:\n scp.close()\n return('done')\n\ndef WriteSSH(string):\n global gSSHRef, gchannel,scp\n reply = 'no reference'\n if gSSHRef != None:\n reply = 'no file'\n if gchannel != None:\n size = gchannel.send(string.encode())\n reply = str(size)\n return reply\n\ndef ReadSSH():\n global gSSHRef, gchannel,scp\n reply = 'no reference'\n if gSSHRef != None:\n reply = 'no file'\n if gchannel != None:\n reply = ''\n if gchannel.recv_ready():\n reply = gchannel.recv(9999).decode()\n\n return(reply)\n\ndef WriteWaitReadSSH(string,char,timeout=10000):\n global gSSHRef, gchannel,scp\n reply = ReadSSH()\n WriteSSH(string)\n n = int(timeout/10)\n for i in range(n):\n ans = ReadSSH()\n reply = reply + ans\n if ans.find(char) >= 0:\n #print(ans)\n return reply\n time.sleep(0.01)\n return reply\n\ndef scp_get(name):\n try:\n scp.get(name)\n return 'get succeeded\\n'\n except Exception as e:\n return 'scp.get('+name+')\\n' +str(e)\n\ndef scp_put(source, dest):\n try:\n scp.put(source, dest)\n return 'scp.put('+source+','+dest+')\\n' +'put succeeded\\n'\n except Exception as e:\n return str(e)\n\ndef scp_put_all(source, dest):\n try:\n scp.put(source, recursive = True, remote_path = dest)\n return 'put succeeded\\n'\n except Exception as e:\n return str(e)\n","sub_path":"LabVIEWCode/Subs/SSH/RogersSSH.py","file_name":"RogersSSH.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"54158021","text":"import os\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom Antena import Antena\nfrom Filtracja import Filtracja\nfrom Sygnal import Sygnal\nfrom SygnalCiagly import SygnalCiagly\nfrom SygnalDyskretny import SygnalDyskretny\n\n\nclass Main:\n\n def wybor_sygnalu(self):\n print(\"---SYGNALY---\")\n print(\"1. szum_o_rozkladzie_jednostajnym\")\n print(\"2. szum_gaussowski\")\n print(\"3. sygnal_sinusoidalny\")\n print(\"4. sygnal_sinusoidalny_wyprostowany_jednopolowkowo\")\n print(\"5. sygnal_sinusoidalny_wyprostowany_dwupolowkowo\")\n print(\"6. sygnal_prostokatny\")\n print(\"7. sygnal_prostokatny_symetryczny\")\n print(\"8. sygnal_trojkatny\")\n print(\"9. skok_jednostkowy\")\n print(\"10. impuls_jednostkowy\")\n print(\"11. szum_impulsowy\")\n print(\"------------------\")\n\n def menu_glowne(self, argument):\n sc = SygnalCiagly()\n sd = SygnalDyskretny()\n\n amp = int(input('Podaj amplitude : '))\n t1 = int(input('Podaj czas poczatkowy : '))\n d = int(input('Podaj czas trwania sygnalu : '))\n\n if argument == 1:\n # ??? czy tak chce zrobic? czy zostawic ze rysowanie w SygnalCiagly????????????????\n sc.szum_o_rozkladzie_jednostajnym(amp, t1, d).rysuj_sygnal()\n # sc.szum_o_rozkladzie_jednostajnym(30, 0, 50),\n elif argument == 2:\n sc.szum_gaussowski(amp, t1, d).rysuj_sygnal()\n # sc.szum_gaussowski(30, 0, 50),\n elif argument == 3:\n okres_T = int(input('Podaj okres podstawowy : '))\n sc.sygnal_sinusoidalny(amp, okres_T, t1, d).rysuj_sygnal()\n # sc.sygnal_sinusoidalny(10, 7, 0, 10),\n elif argument == 4:\n okres_T = int(input('Podaj okres podstawowy : '))\n sc.sygnal_sinusoidalny_wyprostowany_jednopolowkowo(amp, okres_T, t1, d).rysuj_sygnal()\n # sc.sygnal_sinusoidalny_wyprostowany_jednopolowkowo(10, 6, 0, 10),\n elif argument == 5:\n okres_T = int(input('Podaj okres podstawowy : '))\n sc.sygnal_sinusoidalny_wyprostowany_dwupolowkowo(amp, okres_T, t1, d).rysuj_sygnal()\n # sc.sygnal_sinusoidalny_wyprostowany_dwupolowkowo(10, 6, 0, 10),\n elif argument == 6:\n okres_T = int(input('Podaj okres podstawowy : '))\n sc.sygnal_prostokatny(amp, okres_T, t1, d).rysuj_sygnal()\n # sc.sygnal_prostokatny(10, 2, 0, 10),\n elif argument == 7:\n okres_T = int(input('Podaj okres podstawowy : '))\n sc.sygnal_prostokatny_symetryczny(amp, okres_T, t1, d).rysuj_sygnal()\n # sc.sygnal_prostokatny_symetryczny(10, 2, 0, 10),\n elif argument == 8:\n okres_T = int(input('Podaj okres podstawowy : '))\n sc.sygnal_trojkatny(amp, okres_T, t1, d).rysuj_sygnal()\n # sc.sygnal_trojkatny(10, 2, 0, 10),\n elif argument == 9:\n ts = int(input('Podaj współczynnik wypełnienia : '))\n sc.skok_jednostkowy(amp, t1, d, ts).rysuj_sygnal()\n # sc.skok_jednostkowy(10, -10, 20),\n elif argument == 10:\n sd.impuls_jednostkowy(amp, t1, d).rysuj_sygnal()\n # sd.impuls_jednostkowy(1, -25, 50),\n elif argument == 11:\n p = int(input('Podaj prawdopodobienstwo : '))\n sd.szum_impulsowy(amp, t1, d, p).rysuj_sygnal()\n # sd.szum_impulsowy(1, 0, 50, 80)\n else:\n print(\"NACISNIETO ZLY PRZYCISK!!!\")\n\n def wczytaj_z_pliku(self, nazwa_pliku):\n # w pliku poczatek i koniec przedzialu x'ow oraz wspolrzedne y\n print(\"Wczytywanie z pliku...\")\n plik = open(nazwa_pliku)\n caly_tekst = plik.read()\n plik.close()\n podzial_na_linie = caly_tekst.split('\\n')\n wartosci_y = podzial_na_linie[1].split(', ') # wspolrzedne y\n przedzial_wartosci_x = podzial_na_linie[0].split(', ') # tablica z dwoma wartosciami tj.(pocz. i koniec)\n ilosc_x = len(wartosci_y)\n\n # konwersja tablicy str na float\n for i in range(0, len(wartosci_y)):\n wartosci_y[i] = float(wartosci_y[i])\n\n wartosci_x = np.linspace(int(przedzial_wartosci_x[0]), int(przedzial_wartosci_x[1]), ilosc_x) # wspolrzedne x\n # plt.plot(wartosci_x, wartosci_y)\n # plt.xlim(int(przedzial_wartosci_x[0]), int(przedzial_wartosci_x[1])) # od do X\n # plt.xlabel('t[s]')\n # plt.ylabel('Amplituda')\n # plt.show()\n\n return Sygnal(wartosci_x, wartosci_y)\n\n def zapisz_do_pliku(self, sygnal, nazwa_pliku):\n poczatek_przedzialu = int(sygnal.wartosci_x[0])\n koniec_przedzialu = int(sygnal.wartosci_x[len(sygnal.wartosci_x) - 1])\n wartosci_y = sygnal.wartosci_y\n\n print(\"Zapisywanie do pliku...\")\n plik = open(nazwa_pliku, \"w\")\n przecinek = \", \"\n tekst = str(poczatek_przedzialu) + przecinek + str(koniec_przedzialu)\n plik.write(tekst)\n plik.write(\"\\n\")\n plik.write(self.zamien_liste_w_str(wartosci_y))\n plik.close()\n\n def zamien_liste_w_str(self, tablica1):\n stringg = \"\"\n for i in range(len(tablica1)):\n if i != len(tablica1) - 1:\n stringg = stringg + str(tablica1[i]) + \", \"\n else:\n stringg = stringg + str(tablica1[i])\n return stringg\n\n\nif __name__ == '__main__':\n main = Main()\n # main.wybor_sygnalu()\n # inp = input('Podaj jaki sygnal chcesz rozpatrzec : ')\n # # os.system('cls') # powinno czyscic ale moze zrobi tylko jak zrobbimy z tego skrypt\n # main.menu_glowne(int(inp))\n\n sc = SygnalCiagly()\n # dzialania na wykresach\n # sc.szum_o_rozkladzie_jednostajnym(30, 0, 50).dzielenie(sc.sygnal_prostokatny(10, 2, 0, 10)).rysuj_sygnal()\n # print(sc.szum_o_rozkladzie_jednostajnym(30, 0, 50).wariancja())\n # print(sc.szum_o_rozkladzie_jednostajnym(30, 0, 50).wariancja())\n # main.wczytaj_z_pliku()\n # tablica = [0, 1, 2, 3, 4]\n # main.zapisz_do_pliku(0, 10, tablica)\n # sc.sygnal_trojkatny(10, 2, 0, 10).rysuj_histogram(5)\n # main.wczytaj_z_pliku()\n # x = np.linspace(t1, t1 + d, 1000)\n # plt.plot(syg.wartosci_x, syg.wartosci_y)\n # plt.xlim(self.czas_poczatkowy, self.czas_poczatkowy + self.czas_trwania_sygnalu) # od do X\n # # plt.xlim(self.wartosci_x[0], self.wartosci_x[len(self.wartosci_x)-1]) # od do X\n # plt.xlabel('t[s]')\n # plt.ylabel('Amplituda')\n # plt.show()\n\n # sc.sygnal_prostokatny(15, 5, 0, 20).rysuj_sygnal()\n # sc.sygnal_prostokatny(15, 5, 0, 20).rysuj_histogram(15)\n # sc.sygnal_prostokatny(15, 5, 0, 20).pokazWynikiParametrow()\n\n # sc.sygnal_sinusoidalny(15, 5, 0, 20).rysuj_histogram(15)\n # sc.sygnal_sinusoidalny(15, 5, 0, 20).pokazWynikiParametrow()\n # sc.sygnal_sinusoidalny(15, 5, 0, 20).rysuj_sygnal()\n\n # sc.sygnal_trojkatny(15, 5, 0, 20).rysuj_sygnal()\n # sc.sygnal_trojkatny(15, 5, 0, 20).rysuj_histogram(15)\n # sc.sygnal_trojkatny(15, 5, 0, 20).pokazWynikiParametrow()\n\n sd = SygnalDyskretny()\n # sd.szum_impulsowy(15, 0, 20, 70).rysuj_sygnal()\n # sd.szum_impulsowy(15, 0, 20, 70).rysuj_histogram(15)\n # sd.szum_impulsowy(15, 0, 20, 70).pokazWynikiParametrow()\n # sc.skok_jednostkowy(10, 0, 20, 3).rysuj_sygnal()\n # syg = sc.sygnal_sinusoidalny(15, 5, 0, 20).dodawanie(sc.sygnal_prostokatny(15, 5,0,20))\n # syg.rysuj_sygnal()\n # syg.rysuj_histogram(15)\n # syg.pokazWynikiParametrow()\n\n # syg = sc.sygnal_prostokatny(15, 5, 0, 20).odejmowanie(sc.sygnal_trojkatny(15, 5, 0, 20))\n # syg.rysuj_sygnal()\n # syg.rysuj_histogram(15)\n # syg.pokazWynikiParametrow()\n\n # syg = sc.sygnal_sinusoidalny(15, 5, 0, 20).mnozenie(sc.sygnal_trojkatny(15, 5, 0, 20))\n # syg.rysuj_sygnal()\n # syg.rysuj_histogram(15)\n # syg.pokazWynikiParametrow()\n #\n # syg = sc.sygnal_sinusoidalny(15, 5, 0, 20).dzielenie(sc.sygnal_prostokatny(15, 5, 0, 20))\n # syg.rysuj_sygnal()\n # syg.rysuj_histogram(15)\n # syg.pokazWynikiParametrow()\n\n # sc.sygnal_sinusoidalny_wyprostowany_jednopolowkowo(5, 10, -5, 15).rysuj_sygnal()\n # sc.sygnal_sinusoidalny_wyprostowany_jednopolowkowo(5, 10, -5, 15).pokazWynikiParametrow()\n # 1szum_o_rozk_jednost\n # 2szum_gauss\n # 3syg_sinus\n\n # syg = sc.sygnal_sinusoidalny(15, 5, 0, 20).pokazWynikiParametrow()\n # print(syg)\n # sc.sygnal_trojkatny(15, 5, 0, 20).kwantyzacja(10, 5).rysuj_sygnal()\n # sc.sygnal_trojkatny(16, 5, 6, 30).rysuj_sygnal()\n\n # sc.sygnal_trojkatny(10, 10, 1, 30).kwantyzacja(100, 15).rysuj_sygnal()\n # syg_prob = sc.sygnal_trojkatny(15, 5, 0, 20).ekstrapolacja_zerowego_rzeduNaj(88).rysuj_sygnal()\n print(\"XD\")\n # print(len(sc.sygnal_trojkatny(15, 5, 0, 20).kwantyzacja(70, 5).wartosci_y))\n # print(len(sc.sygnal_trojkatny(15, 5, 0, 20).probkowanie(70).wartosci_y))\n ##ZADANIE 2\n\n # syg = sc.sygnal_sinusoidalny(15, 5, 0, 20)\n # y = syg.wartosci_y\n # x = syg.wartosci_x\n #\n # y2 = sc.sygnal_sinusoidalny(15, 5, 0, 20).kwantyzacja(200, 5).wartosci_y\n # x2 = sc.sygnal_sinusoidalny(15, 5, 0, 20).kwantyzacja(200, 5).wartosci_x\n #\n # plt.plot(x, y, color='red')\n # plt.plot(x2, y2)\n # # plt.xlim(self.wartosci_x[0], self.wartosci_x[len(self.wartosci_x) - 1]) # od do X\n # plt.xlabel('t[s]')\n # plt.ylabel('Amplituda')\n # plt.show()\n #\n # Sygnal.pokaz_wyniki_miar(syg.probkowanie(200),sc.sygnal_sinusoidalny(15, 5, 0, 20).kwantyzacja(200, 5))\n\n # syg = sc.sygnal_prostokatny(15, 5, 0, 20)\n # y = syg.wartosci_y\n # x = syg.wartosci_x\n\n # y2 = sc.sygnal_prostokatny(15, 5, 0, 20).rekonstrukcja_w_oparciu_o_fun_sinc(200).wartosci_y\n # x2 = sc.sygnal_prostokatny(15, 5, 0, 20).rekonstrukcja_w_oparciu_o_fun_sinc(200).wartosci_x\n #\n # plt.plot(x, y, color='red')\n # plt.plot(x2, y2)\n # # plt.xlim(self.wartosci_x[0], self.wartosci_x[len(self.wartosci_x) - 1]) # od do X\n # plt.xlabel('t[s]')\n # plt.ylabel('Amplituda')\n # plt.show()\n print(\"oryginal\")\n # print(syg.probkowanie(200).wartosci_y)\n #\n # print(sc.sygnal_prostokatny(15, 5, 0, 20).kwantyzacja(200, 5).wartosci_y)\n # Sygnal.pokaz_wyniki_miar(syg.probkowanie(200), sc.sygnal_prostokatny(15, 5, 0, 20).kwantyzacja(200, 5)) #wychodzi 0?\n #\n # # syg = sc.sygnal_trojkatny(15, 5, 0, 20)\n # # y = syg.wartosci_y\n # # x = syg.wartosci_x\n # #\n # syg_prob = sc.sygnal_prostokatny(15, 5, 0, 20).kwantyzacja(200,5)\n # syg_prob.sygDyskretny = True\n # y2 = syg_prob.wartosci_y\n # x2 = syg_prob.wartosci_x\n # #\n # # y2 = syg.interpolacja_pierwszego_rzeduNaj(200).wartosci_y\n # # x2 = syg.interpolacja_pierwszego_rzeduNaj(200).wartosci_x\n # #\n # plt.plot(x, y, color='red')\n # plt.plot(x2, y2)\n # # plt.xlim(self.wartosci_x[0], self.wartosci_x[len(self.wartosci_x) - 1]) # od do X\n # plt.xlabel('t[s]')\n # plt.ylabel('Amplituda')\n # plt.show()\n #\n # Sygnal.pokaz_wyniki_miar(syg.probkowanie(200), sc.sygnal_trojkatny(15, 5, 0, 20).kwantyzacja(200, 5))\n # syg2 = sc.sygnal_sinusoidalny(2, 0.01, 0, 1) #100hz\n # syg = sc.sygnal_sinusoidalny(2, 0.01, 0, 1).rekonstrukcja_w_oparciu_o_fun_sinc(101)\n # # syg_kwant = sc.sygnal_sinusoidalny(15, 5, 0, 20).kwantyzacja(400, 8)\n # # Sygnal.pokaz_wyniki_miar(syg_probkowany.probkowanie(100), syg_kwant) # wychodzi 0?\n #\n # y = syg.wartosci_y\n # x = syg.wartosci_x\n # #\n # # y2 = syg_kwant.wartosci_y\n # # x2 = syg_kwant.wartosci_x\n # #\n # y2 = syg2.wartosci_y\n # x2 = syg2.wartosci_x\n # #\n # plt.plot(x, y, color='red')\n # plt.plot(x2, y2)\n # # plt.xlim(self.wartosci_x[0], self.wartosci_x[len(self.wartosci_x) - 1]) # od do X\n # plt.xlabel('t[s]')\n # plt.ylabel('Amplituda')\n # plt.show()\n #\n # print(y)\n # print(x)\n # print(len(x2))\n # print(len(y2))\n # syg = sc.sygnal_sinusoidalny(15, 5, 0, 20)\n # syg2 = sc.sygnal_trojkatny(15, 5, 0, 20)\n\n # syg = sc.sygnal_trojkatny(5, 1, 0, 10)\n # syg2 = sc.sygnal_prostokatny(5, 1, 0, 10)\n # syg3 = Sygnal.korelacja_z_uzyciem_splotu(syg, syg2)\n # syg3.rysuj_sygnal()\n\n # syg.pokazWynikiParametrow()\n\n # filtr = Filtracja.filtr_dolnoprzepustowy(7, 250, 400,8)\n # filtr = Filtracja.filtr_dolny(50, 10, 200) #gitttt\n######################################################ANTENA\n antena = Antena(100, 500, 10, 1, 10)\n antena.create_signal(1,30).rysuj_sygnal()\n print(antena.antene_diffrence(1000))\n####################################################\n # ilosc = antena.antene_diffrence(1000)\n # print(ilosc)\n\n # filtr = Filtracja.filtr_srodkowoprzepustowy(100, 10, 200)\n # okno = Filtracja.okno_hanninga(filtr,100)\n\n # gittttttt filtr okno########################\n # filtr = Filtracja.filtr_srodkowoprzepustowy(57, 1, 250) # gitttt\n # okno = Filtracja.okno_hanninga(filtr, 57)\n # syg = sc.sygnal_prostokatny(5, 3, 0, 10)\n # syg2 = sc.szum_o_rozkladzie_jednostajnym(5, 0, 10)\n # syg3 = syg.dodawanie(syg2)\n # syg3.sygDyskretny = True\n # syg3.rysuj_sygnal() # szum\n # syg4 = Sygnal.operacja_splotu2(syg3, okno)\n # syg4.rysuj_sygnal()\n####################################\n# syg5 = Sygnal.operacja_splotu2(syg3, okno)\n# syg5.rysuj_sygnal()\n","sub_path":"Zadanie3/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":13245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"208160134","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about/', views.about, name='about'),\n path('created/', views.created, name='created'),\n path('/', views.note, name='note'),\n]\n","sub_path":"notes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"380138821","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# 文件名:test1.py\n\n'''\nname = 111\nprint name\n\n#age = raw_input(\"\\n\\nPress the enter key to exit.\")\n#print '' + age + '============='\n'''\n\n#测试字符串类型\nstr1 = '你好!'\nstr2 = str1 * 2\nprint ","sub_path":"py/test/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"445655507","text":"from room import Room\r\nfrom player import Player\r\nfrom item import Item\r\nimport csv\r\nimport textwrap\r\n\r\n# Declare all the rooms\r\n\r\nroom = {\r\n 'outside': Room(\"Outside Cave Entrance\",\r\n \"North of you, the cave mount beckons\"),\r\n\r\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\r\npassages run north and east.\"\"\"),\r\n\r\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\r\ninto the darkness. Ahead to the north, a light flickers in\r\nthe distance, but there is no way across the chasm.\"\"\"),\r\n\r\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\r\nto north. The smell of gold permeates the air.\"\"\"),\r\n\r\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\r\nchamber! Sadly, it has already been completely emptied by\r\nearlier adventurers. The only exit is to the south.\"\"\"),\r\n}\r\n\r\nwrapper = textwrap.TextWrapper(width=70)\r\n\r\n# Link rooms together\r\n\r\nroom['outside'].n_to = room['foyer']\r\nroom['foyer'].s_to = room['outside']\r\nroom['foyer'].n_to = room['overlook']\r\nroom['foyer'].e_to = room['narrow']\r\nroom['overlook'].s_to = room['foyer']\r\nroom['narrow'].w_to = room['foyer']\r\nroom['narrow'].n_to = room['treasure']\r\nroom['treasure'].s_to = room['narrow']\r\n\r\n# Define items from the item_src list\r\nitem_list = {}\r\n\r\nwith open('item_src.txt', 'r') as f:\r\n for line in f:\r\n data = eval(line)\r\n item_list[data['id']] = Item(data['name'], data['description'])\r\n\r\n# Establish items in rooms\r\nfoyer_items = [item_list['bronzesword'], item_list['giraffestatuette']]\r\nroom['foyer'].add_items(foyer_items)\r\n\r\n#\r\n# Main\r\n#\r\n\r\n# Takes in a list of item names and returns a list of the associated ids\r\ndef name_to_id(names):\r\n split = names.split(',')\r\n base = ''\r\n for item in split:\r\n base = base + \"'\"+item.replace(' ','').lower()+\"',\"\r\n return eval(\"[\"+base+\"]\")\r\n\r\n# Helper function to check current room's items and return a string \r\n# which can be printed based on what it finds\r\n\r\n \r\n# Make a new player object that is currently in the 'outside' room.\r\nplayer_name = input('Enter a name for your character: ')\r\np1 = Player(player_name, room['outside'])\r\n\r\n# Write a loop that:\r\n#\r\n# * Prints the current room name\r\n# * Prints the current description (the textwrap module might be useful here).\r\n# * Waits for user input and decides what to do.\r\n#\r\n# If the user enters a cardinal direction, attempt to move to the room there.\r\n# Print an error message if the movement isn't allowed.\r\n#\r\n# If the user enters \"q\", quit the game.\r\nwhile(True):\r\n # Determine current room\r\n c_room = p1.current_room\r\n\r\n # Print Buffer space to make it easier to read\r\n print('')\r\n print('-'*70)\r\n\r\n # Print current Location and location description/items\r\n print('Current Room: {}'.format(c_room.name))\r\n [print(line) for line in wrapper.wrap(text=c_room.description)]\r\n item_str = 'Items in room:\\t{}'.format(c_room.check_items())\r\n [print(line) for line in wrapper.wrap(text=item_str)]\r\n\r\n # Input from user\r\n inp = input('What would you like to do? ')\r\n\r\n # Print buffer to make it easier to read\r\n print('-'*70)\r\n \r\n # Preprocess input\r\n inputs = inp.split(maxsplit=1)\r\n\r\n # Process Input\r\n if(len(inputs) == 1):\r\n inp = inputs[0]\r\n if(inp in ['q', 'quit', 'exit']):\r\n break\r\n elif(inp in ['n','e','s','w']):\r\n p1.move_player(inp)\r\n if(inp in ['i', 'inventory']):\r\n print('Inventory:')\r\n [print(line) for line in p1.get_items()]\r\n if(inp == 'DebugMode'):\r\n if(input('password: ') == 'Giraffe'):\r\n while(True):\r\n try:\r\n code = input('>>>')\r\n if(code == 'exit'):\r\n break\r\n else:\r\n exec(code)\r\n except Exception as e:\r\n print(e)\r\n\r\n elif(len(inputs) == 2):\r\n verb, obj = inputs[0], inputs[1]\r\n if(verb in ['take', 'grab', 'get']):\r\n # Generate list of target item's id's from input\r\n targ_items = name_to_id(obj)\r\n # Attempt to remove target items from room\r\n removed_items_ids, removed_items = c_room.remove_items(targ_items)\r\n # Add items to player inventory\r\n p1.add_items(removed_items)\r\n\r\n if(verb in ['remove', 'drop', 'toss']):\r\n # Generate list of target item's id's from input\r\n targ_items = name_to_id(obj)\r\n # Attempt to remove target items from room\r\n removed_item_ids, removed_items = p1.remove_items(targ_items)\r\n # Add items to room \r\n items = {id:item for id,item in zip(removed_item_ids,removed_items)}\r\n c_room.add_items(removed_items)\r\n\r\n else:\r\n print('Invalid Input')\r\n","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"207680060","text":"# -*- encoding: utf-8 -*-\nt = open(\"OVER_ALL_QA_DATA.txt\",\"r\").readlines()\np = open(\"Result_Q_10Ans_Tel_Combine_all.txt\",\"r\").readlines()\n\n#from indic_transliteration import sanscript\n#from indic_transliteration.sanscript import SchemeMap, SCHEMES, transliterate\n\nfrom difflib import SequenceMatcher\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n\nTarget_Questions = []\nTarget_Answers \t = []\n\n\nfor line in t:\n\tline = line.strip()\n\tline = line.split(\":::\")\n\tTarget_Answers.append(line[0].strip())\n\tTarget_Questions.append(line[1].strip())\n\n\nprint(len(Target_Answers),len(Target_Questions))\nprint(\"\\n#########################################\\n\")\n\nm = open(\"Result_Q_10Ans_Tel_Combined_all_with_Unique_queries.txt\",\"w\")\n#m = open(\"percentage_40.txt\",\"w\")\n\nPredicted_Questions = []\nPredicted_Answers = []\nprint(\"# of lines in a File: \",len(p))\n\nfor line in p:\n\tinput_sent = line.strip()\n\tline = line.strip()\n\tline = line.split(\"@@\")\n\t#print(line[1])\n\tans = line[1].split(\"$$$\")\n\t#print(ans,len(ans))\n\t#break\n\tif not(line[0] in Predicted_Questions):\n\t\tPredicted_Questions.append(line[0])\n\t\tPredicted_Answers.append(ans)\n\t\tm.write(input_sent+\"\\n\")\n\nprint(len(Predicted_Answers),len(Predicted_Questions))\nprint(\"\\n#########################################\\n\")\n\nm.close()\n","sub_path":"scripts/Removing_duplications.py","file_name":"Removing_duplications.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"288139123","text":"# DFS\n# DFS는 깊이 우선 탐색이라고도 부르며 그래프에서 깊은 부분을 우선적으로 탐색하는 알고리즘이다.\n# DFS는 스택 자료구조(혹은 재귀 함수)를 이용하며, 구체적인 동작 과정은 다음과 같다.\n# 1. 탐색 시작 노드를 스택에 삽입하고 방문처리를한다.\n# 2. 스택의 최상단 노드에 방문하지 않은 인접한 노드가 하나라도 있으면 그 노드를 스택에 넣고 방문처리한다.\n# 방문하지 않은 인접노드가 없으면 스택��서 최상단 노드를 꺼낸다.\n# 3. 더 이상 2번의 과정을 수행할 수없을 때가지 반복한다. \n\n# DFS 소스코드 예제\ndef dfs(graph,v,visited):\n # 현재 노드를 방문 처리\n visited[v] = True\n print(v, end=' ')\n # 현재 노드와 연결된 다른 노드를 재귀적으로 방문\n for i in graph[v]:\n if not visited[v]:\n dfs(graph,i,visited)\n \n# 각 노드가 연결된 정보를 표현\ngraph = [\n [],\n [2,3,8],\n [1,7],\n [1,4,5],\n [3,5],\n [3,4],\n [7]\n [2,6,8],\n [1,7]\n]\n\n# 각 노드가 방문된 정보를 표현 (1차원 리스트)\nvisited =[False] * 9\n\n# 정의된 DFS 함수 호출\ndfs(graph,1,visited)","sub_path":"Coding-Test-Python/Python-for-coding-test/05-DFS-BFS/Ch5-DFS.py","file_name":"Ch5-DFS.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"623769308","text":"import http.client\nimport urllib as ul\nimport urllib.request\nimport urllib3\nimport json\nimport time\nimport datetime\nimport os\nimport psutil\nimport csv\nimport mac_input\n\n\n\nwriteAPIkeyentry = \"VRLVOUVOQ4Q5TL1I\" # Replace YOUR-CHANNEL-WRITEAPIKEY with your channel write API key\nwriteAPIkeyexit = \"XCADPV5H310NBZC0\"\nchannelIDentry = \"1247696\" # Replace YOUR-CHANNELID with your channel ID\nchannelIDexit = \"1245307\"\n\n \ndef new_entry(entry_user):\n \n string=entry_user.split(\"=\")\n params = ul.parse.urlencode({'field1': string[0],'field2': string[1],'field3':\"1\",'key':writeAPIkeyentry }) \n headers = {\"Content-typZZe\": \"application/x-www-form-urlencoded\",\"Accept\": \"text/plain\"}\n conn = http.client.HTTPConnection(\"api.thingspeak.com:80\")\n try:\n conn.request(\"POST\", \"/update\", params, headers)\n response = conn.getresponse() \n data = response.read()\n conn.close()\n return 1\n except:\n print(\"connection failed\")\n \ndef new_exit(exit_user):\n string=exit_user.split(\"=\")\n params = ul.parse.urlencode({'field1': string[0],'field2': string[1],'field3':\"1\",'key':writeAPIkeyexit }) \n headers = {\"Content-typZZe\": \"application/x-www-form-urlencoded\",\"Accept\": \"text/plain\"}\n conn = http.client.HTTPConnection(\"api.thingspeak.com:80\")\n try:\n conn.request(\"POST\", \"/update\", params, headers)\n response = conn.getresponse() \n data = response.read()\n conn.close()\n return 1\n except:\n print(\"connection failed\")\n\nif __name__ == \"__main__\": # To ensure that this is run directly and does not run when imported\n #mac_array=[]\n while(1):\n while(os.stat(\"new_mac_id.txt\").st_size==0):\n pass\n f=open(\"new_mac_id.txt\",\"r\")\n writedata=f.read()\n f.close()\n choice=writedata[-1]\n writedata=writedata[:-1]\n if(choice==\"1\"): \n new_entry(writedata)\n if(choice==\"2\"):\n new_exit(writedata)\n f=open(\"new_mac_id.txt\",\"w\")\n f.truncate(0)\n f.close()\n #print(mac_array)\n \n \n #mac_input.input_mac_update\n #new_entry(mac_address)\n \n '''\n while 1:\n # If update interval time has crossed 15 seconds update the message buffer with data\n if time.time() - lastUpdateTime >= updateInterval:\n \n updatesJson()\n '''","sub_path":"Room1/bulk_update.py","file_name":"bulk_update.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"362450836","text":"import os\nimport glob\nimport logging\nimport torch\nimport time\nfrom .base_checkpointer import BaseCheckpointer\n\nfrom typing import Union, Dict, Any, List, Tuple\n\nlogger = logging.getLogger(__name__)\n\n\nclass AdavancedCheckpointer(BaseCheckpointer):\n \"\"\"\n Simple Checkpointer implements the basic functions\n \"\"\"\n def __init__(self,\n model,\n num_checkpoints_to_keep: int = 1000,\n keep_checkpoint_every_num_seconds: float = 3600,\n storage_dir: str = \"Checkpoints\"):\n self.model = model\n self.storage_dir = storage_dir\n self.current_checkpoint = {}\n self.num_checkpoints_to_keep = num_checkpoints_to_keep\n self.keep_checkpoint_every_num_seconds = keep_checkpoint_every_num_seconds\n self._saved_checkpoint_paths = []\n self._last_checkpoint_time = time.time()\n \n # initialization\n os.makedirs(self.storage_dir, exist_ok=True)\n\n def save_checkpoint(self, stamp:str, state: Dict[str, Any]) -> None:\n \"\"\"\n Args:\n stamp: A string to identify the checkpoint. It can just be the epoch number\n state: A dictionary to store all necessary information for later restoring\n\n \"\"\"\n checkpoint_path = os.path.join(self.storage_dir,\n f\"{stamp}_state.pth\")\n\n if self.num_checkpoints_to_keep > 0:\n self._saved_checkpoint_paths.append((time.time(), checkpoint_path))\n path_to_remove = self._saved_checkpoint_paths.pop(0)\n\n # check time requirement\n remove_path = True\n if self.keep_checkpoint_every_num_seconds is not None:\n save_time = path_to_remove[0]\n time_since_checkpoint_kept = (save_time -\n self._last_checkpoint_time)\n if time_since_checkpoint_kept > self.keep_checkpoint_every_num_seconds:\n # We want to keep this checkpoint.\n remove_path = False\n self._last_checkpoint_time = save_time\n\n if remove_path:\n for fname in path_to_remove[1:]:\n if os.path.isfile(fname):\n os.remove(fname)\n\n torch.save(self.current_checkpoint, checkpoint_path)\n\n def restore_checkpoint(self, search_method=None):\n \"\"\"\n Args:\n search_method: a Callable to find the wanted checkpoint path\n \"\"\"\n # if not specified\n if not search_method:\n search_method = self.find_latest_checkpoint\n\n checkpoint_path = search_method()\n # map to the cpu first instead of error\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n\n return checkpoint\n\n def find_latest_checkpoint(self) -> str:\n \"\"\"\n Return the path of the latest checkpoint file.\n \"\"\"\n files = glob.glob(os.path.join(self.storage_dir, \"*_state.pth\"))\n latest_file_path = max(files, key=os.path.getctime)\n latest_file_path = os.path.join(self.storage_dir, latest_file_path)\n return latest_file_path\n","sub_path":"torchfly_dev/training/checkpointer/advanced_checkpointer.py","file_name":"advanced_checkpointer.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"254238697","text":"class SinglyLinkedListNode:\n def __init__(self, node_data):\n self.data = node_data\n self.next = None\n\n\nclass SinglyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def insert_node(self, node_data):\n node = SinglyLinkedListNode(node_data)\n\n if not self.head:\n self.head = node\n else:\n self.tail.next = node\n\n self.tail = node\n\n\ndef items(node):\n res = []\n while node:\n res.append(node.data)\n node = node.next\n\n return res\n\n\ndef findMergeNode(head1, head2):\n keys = {}\n node1 = head1\n\n while node1 is not None:\n if node1.data not in keys:\n keys[node1.data] = set()\n\n keys[node1.data].add(node1)\n node1 = node1.next\n\n node2 = head2\n while node2 is not None:\n if node2.data in keys and node2 in keys[node2.data]:\n return node2.data\n\n node2 = node2.next\n","sub_path":"hackerrank/single_linked_list.py","file_name":"single_linked_list.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"113298669","text":"from datetime import datetime\nimport functools\n\n\nclass Solution:\n def nearestPalindromic(self, n: str) -> str:\n x = int(n)\n if x < 10:\n return str(x - 1) if x > 0 else '0'\n l = len(n)\n s = [c for c in n]\n candidate = set()\n if s[0] == '1':\n candidate.add(int('9' * (l - 1)))\n if s[0] == '9':\n candidate.add(int('1' + '0' * l) + 1)\n i, j = 0, l - 1\n palindrome = True\n while i < j:\n if s[j] != s[i]:\n palindrome = False\n s[j] = s[i]\n i += 1\n j -= 1\n if not palindrome:\n candidate.add(int(''.join(s)))\n midIdx = l // 2\n s1 = s[::]\n s2 = s[::]\n mid = int(s[midIdx])\n if mid < 9:\n s1[midIdx] = str(mid + 1)\n if mid > 0:\n s2[midIdx] = str(mid - 1)\n if l % 2 == 0:\n s1[midIdx - 1] = s1[midIdx]\n s2[midIdx - 1] = s2[midIdx]\n if s1 != s:\n candidate.add(int(''.join(s1)))\n if s2 != s:\n candidate.add(int(''.join(s2)))\n _min = float('inf')\n result = ''\n for c in candidate:\n if abs(c - x) < _min or abs(c - x) == _min and c < int(result):\n _min = abs(c - x)\n result = str(c)\n return result\n\n\ns = Solution()\nstartTime = datetime.now()\nprint(s.nearestPalindromic(\"11911\"))\nprint(s.nearestPalindromic(\"1213\"))\nprint(datetime.now() - startTime)\n","sub_path":"leetcode/2020/find-the-closest-palindrome.py","file_name":"find-the-closest-palindrome.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"88080702","text":"\"\"\"empty message\n\nRevision ID: e9bddc85352d\nRevises: 3ec1a5593d88\nCreate Date: 2019-07-24 17:23:44.563703\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e9bddc85352d'\ndown_revision = '3ec1a5593d88'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('data_trust_data_trust_name_key', 'data_trust', type_='unique')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint('data_trust_data_trust_name_key', 'data_trust', ['data_trust_name'])\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/e9bddc85352d_.py","file_name":"e9bddc85352d_.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"303279338","text":"import os\nimport subprocess\nimport sys\nimport re\nimport csv\nimport pdb\nimport shutil\nimport glob\nimport time\nclass UCCLanguageTestLibrary(object):\n\n ROBOT_LIBRARY_SCOPE = \"TEST SUITE\"\n\n def __init__(self):\n\n self.cpp_result = {}\n self.cpp_result1={}\n self.cpp_result2={}\n self.cpp_summary1={}\n self.cpp_summary2={}\n self.cpp_result['Total Lines']=[]\n self.cpp_result[\"Blank Lines\"]=[]\n self.cpp_result[\"Whole Comments\"]=[]\n self.cpp_result[\"Embedded Comments\"]=[]\n self.cpp_result['Compiler Directive']=[]\n self.cpp_result['Data Decl']=[]\n self.cpp_result['Exec Instr']=[]\n self.cpp_result[\"Logical Sloc\"]=[]\n self.cpp_result[\"Physical Sloc\"]=[]\n self.cpp_result[\"File Type\"]=[]\n self.cpp_result[\"Module Name\"]=[]\n self.cpp_result['psloc_to_lsloc_ratio'] = 0\n\n self.cpp_result1 = self.cpp_result\n self.cpp_result2 = self.cpp_result\n\n self.cpp_result['total_lines'] = None\n self.cpp_result['blank_lines'] = None\n self.cpp_result['whole_comments'] = None\n self.cpp_result['embedded_comments'] = None\n self.cpp_result['compiler_directive'] = None\n self.cpp_result['data_decl'] = None\n self.cpp_result['exec_instr'] = None\n self.cpp_result['logical_sloc'] = None\n self.cpp_result['physical_sloc'] = None\n self.cpp_result['counted_files'] = None\n self.cpp_result['accessed_files'] = None\n self.cpp_result['compiler_keywords']=0\n self.cpp_result['data_keywords']=0\n self.cpp_result['executable_keywords']=0\n self._status = ''\n\n self.f = open('cpp_counting_flag.csv', 'wt')\n self.writer = csv.writer(self.f)\n self.writer.writerow( ('Requirement Name', 'Status', 'Modules Tested') )\n\n def __del__(self):\n self.f.close()\n \n def ucc_parse_cpp(self,version,path):\n with open( path+ '/C_CPP_outfile.csv', 'r') as fh:\n csvr = list(csv.reader(fh))\n i = 11\n row = csvr[i]\n while( row ):\n self.cpp_result['Total Lines'].append(row[0])\n self.cpp_result[\"Blank Lines\"].append(row[1])\n self.cpp_result[\"Whole Comments\"].append(row[2])\n self.cpp_result[\"Embedded Comments\"].append(row[3])\n self.cpp_result['Compiler Directive'].append(row[4])\n self.cpp_result['Data Decl'].append(row[5])\n self.cpp_result['Exec Instr'].append(row[6])\n self.cpp_result[\"Logical Sloc\"].append(row[7])\n self.cpp_result[\"Physical Sloc\"].append(row[8])\n self.cpp_result[\"File Type\"].append(row[9])\n self.cpp_result[\"Module Name\"].append(row[10])\n i=i+1\n row = csvr[i]\n #self.writer.writerow((i,csvr[i]))\n\n i=i+5\n row = csvr[i]\n self.cpp_result['physical_sloc'] = row[7]\n i=i+1\n row = csvr[i]\n self.cpp_result['total_lines'] = row[0]\n self.cpp_result['blank_lines'] = row[1]\n self.cpp_result['whole_comments'] = row[2]\n self.cpp_result['embedded_comments'] = row[3]\n self.cpp_result['compiler_directive'] = row[4]\n self.cpp_result['data_decl'] = row[5]\n self.cpp_result['exec_instr'] = row[6]\n self.cpp_result['logical_sloc'] = row[7]\n\n i=i+2\n row = csvr[i]\n self.cpp_result['counted_files'] = row[3]\n self.cpp_result['accessed_files'] = row[1]\n \n \n i=i+2\n row = csvr[i]\n self.cpp_result['psloc_to_lsloc_ratio'] = row[1]\n \n i=i+4\n row=csvr[i]\n while row:\n self.cpp_result['compiler_keywords']=self.cpp_result['compiler_keywords']+int(row[1])\n self.cpp_result['data_keywords']=self.cpp_result['data_keywords']+int(row[3])\n self.cpp_result['executable_keywords']=self.cpp_result['executable_keywords']+int(row[5])\n i=i+1\n row=csvr[i]\n \n\n if version==\"version1\":\n self.cpp_result1=self.cpp_result\n else:\n self.cpp_result2=self.cpp_result\n\n \n def ucc_total_lines_individual(self):\n if( self.cpp_result1['Total Lines']==self.cpp_result2['Total Lines']):\n self.writer.writerow( (\"Total Lines Individual\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Total Lines Individual\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_blank_lines_individual(self):\n if( self.cpp_result1['Blank Lines']==self.cpp_result2['Blank Lines']):\n self.writer.writerow( (\"Blank Lines Individual\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Blank Lines Individual\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_whole_comments_individual(self):\n if( self.cpp_result1['Whole Comments']==self.cpp_result2['Whole Comments']):\n self.writer.writerow( (\"Whole Commments Individual\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Whole Comments Individual\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_embedded_comments_individual(self):\n if( self.cpp_result1['Embedded Comments']==self.cpp_result2['Embedded Comments']):\n self.writer.writerow( (\"Embedded Comments Individual\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Embedded Comments Individual\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_compiler_directive_individual(self):\n if( self.cpp_result1['Compiler Directive']==self.cpp_result2['Compiler Directive']):\n self.writer.writerow( (\"Compiler Directive Individual\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Compiler Directive Individual\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_data_decl_individual(self):\n if( self.cpp_result1['Data Decl']==self.cpp_result2['Data Decl']):\n self.writer.writerow( (\"Data Decl Individual\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Data Decl Individual\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_exec_instr_individual(self):\n if( self.cpp_result1['Exec Instr']==self.cpp_result2['Exec Instr']):\n self.writer.writerow( (\"Exec Instr Individual\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Exec Instr Individual\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_logical_sloc_individual(self):\n if( self.cpp_result1['Logical Sloc']==self.cpp_result2['Logical Sloc']):\n self.writer.writerow( (\"Logical Sloc Individual\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Logical Sloc Individual\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_physical_sloc_individual(self):\n if( self.cpp_result1['Physical Sloc']==self.cpp_result2['Physical Sloc']):\n self.writer.writerow( (\"Physical Sloc Individual\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Physical Sloc Individual\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_file_type_individual(self):\n if( self.cpp_result1['File Type']==self.cpp_result2['File Type']):\n self.writer.writerow( (\"File Type\", 'Yes','None') )\n else:\n self.writer.writerow( (\"File Type\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n\n def ucc_total_lines(self):\n if( self.cpp_result1['total_lines']==self.cpp_result2['total_lines']):\n self.writer.writerow( (\"Total Lines\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Total Lines\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n\n def ucc_blank_lines(self):\n if( self.cpp_result1['blank_lines']==self.cpp_result2['blank_lines']):\n self.writer.writerow( (\"Blank Lines\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Blank Lines\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_whole_comments(self):\n if( self.cpp_result1['whole_comments']==self.cpp_result2['whole_comments']):\n self.writer.writerow( (\"Whole Commments\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Whole Comments\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_embedded_comments(self):\n if( self.cpp_result1['embedded_comments']==self.cpp_result2['embedded_comments']):\n self.writer.writerow( (\"Embedded Comments\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Embedded Comments\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_compiler_directive(self):\n if( self.cpp_result1['compiler_directive']==self.cpp_result2['compiler_directive']):\n self.writer.writerow( (\"Compiler Directive\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Compiler Directive\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_data_decl(self):\n if( self.cpp_result1['data_decl']==self.cpp_result2['data_decl']):\n self.writer.writerow( (\"Data Decl\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Data Decl\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_exec_instr(self):\n if( self.cpp_result1['exec_instr']==self.cpp_result2['exec_instr']):\n self.writer.writerow( (\"Exec Instr\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Exec Instr\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_logical_sloc(self):\n if( self.cpp_result1['logical_sloc']==self.cpp_result2['logical_sloc']):\n self.writer.writerow( (\"Logical Sloc\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Logical Sloc\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_physical_sloc(self):\n if( self.cpp_result1['physical_sloc']==self.cpp_result2['physical_sloc']):\n self.writer.writerow( (\"Physical Sloc\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Physical Sloc\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_counted_files(self):\n if( self.cpp_result1['counted_files']==self.cpp_result2['counted_files']):\n self.writer.writerow( (\"Counted Files\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Counted Files\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_accessed_files(self):\n if( self.cpp_result1['accessed_files']==self.cpp_result2['accessed_files']):\n self.writer.writerow( (\"Accessed Files\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Accessed Files\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_psloc_to_lsloc_ratio(self):\n if( self.cpp_result1['psloc_to_lsloc_ratio']==self.cpp_result2['psloc_to_lsloc_ratio']):\n self.writer.writerow( (\"PSLOC To LSLOC Ratio\", 'Yes','None') )\n else:\n self.writer.writerow( (\"PSLOC To LSLOC Ratio\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_compiler_keywords(self):\n if( self.cpp_result1['compiler_keywords']==self.cpp_result2['compiler_keywords']):\n self.writer.writerow( (\"Compiler Keywords\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Compiler Keywords\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n\n def ucc_data_keywords(self):\n if( self.cpp_result1['data_keywords']==self.cpp_result2['data_keywords']):\n self.writer.writerow( (\"Compiler Keywords\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Compiler Keywords\", 'No',self.cpp_result1[\"Module Name\"]) )\n\n def ucc_executable_keywords(self):\n if( self.cpp_result1['executable_keywords']==self.cpp_result2['executable_keywords']):\n self.writer.writerow( (\"Compiler Keywords\", 'Yes','None') )\n else:\n self.writer.writerow( (\"Compiler Keywords\", 'No',self.cpp_result1[\"Module Name\"]) )\n \n\n def display_version(self):\n self._run_command('-v')\n\n def run_ucc(self, *args):\n self._run_command(*args)\n\n def _run_command(self, *args):\n t_start = time.time ()\n command = list(args)\n process = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n self._status = process.communicate()[0].strip()\n t_end = time.time ()\n with open(\"time\", \"w\") as f:\n f.write (str(t_end - t_start))\n \n def time_limit (self, limit):\n with open (\"time\", \"r\") as f:\n real_time = float (f.read())\n limit = float (limit)\n if real_time > limit:\n raise Exception(\"Test case took \" + str(real_time) + \" seconds. But limit is: \" + str(limit))\n\n def create_ucc_file_list(self, input_fn, *folders):\n with open(input_fn, 'w') as fh:\n for path in folders:\n for fn in os.listdir(path):\n fh.write(path+\"/\"+fn+\"\\n\")\n # for fn in files:\n \n def copy_files_to_directory(self, source_dir, destination_dir):\n if os.path.exists(destination_dir):\n shutil.rmtree(destination_dir, ignore_errors=True)\n os.mkdir(destination_dir)\n for filename in glob.glob(os.path.join(source_dir, '*.*')):\n shutil.copy(filename, destination_dir)\n \n\n \n \n \n \n\t\t\t\t\n \n","sub_path":"Framework/lang/ucc_test_suites/lib/UCCLanguageTestLibrary.py","file_name":"UCCLanguageTestLibrary.py","file_ext":"py","file_size_in_byte":13450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"417319841","text":"from django.contrib import admin\nfrom api.models import *\n# Register your models here.\nclass thar(admin.ModelAdmin):\n list_display=('name','module1','module2','host','status','date','log')\n search_fields=('name','module1','status','host')\nadmin.site.register(Thar,thar)\nclass sgstatus(admin.ModelAdmin):\n list_display= ('startTime', 'endTime', 'monitorType', 'succNum', 'failNum', 'errNum', 'failRate','succRate','failData')\n search_fields= ('startTime', 'endTime', 'monitorType','failRate')\nadmin.site.register(SGStatus,sgstatus)\n","sub_path":"api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"386029009","text":"import numpy as np\r\n\r\nimport torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\n\r\nclass GaborPyramid(nn.Module):\r\n \"\"\"\r\n Create a module that maps stacks of images to a Gabor pyramid.\r\n Only works in grayscale\r\n \"\"\"\r\n def __init__(self, \r\n nlevels=5):\r\n super(GaborPyramid, self).__init__()\r\n self.nlevels = nlevels\r\n self.setup()\r\n\r\n def setup(self):\r\n # The filters will be 8x1x9x9\r\n xi, yi = torch.meshgrid(torch.arange(-4, 5), torch.arange(-4, 5))\r\n filters = []\r\n for ii in range(4):\r\n coso = np.cos(ii * np.pi / 4)\r\n sino = np.sin(ii * np.pi / 4)\r\n G = torch.exp(-(xi**2+yi**2)/2/2**2)\r\n thefilt1 = torch.cos((coso*xi+sino*yi)*.8) * G\r\n thefilt2 = torch.sin((coso*xi+sino*yi)*.8) * G\r\n thefilt1 = thefilt1 - G / G.mean() * thefilt1.mean()\r\n thefilt2 = thefilt2 - G / G.mean() * thefilt2.mean()\r\n scale = 1 / torch.sqrt((thefilt1 ** 2).sum())\r\n\r\n filters += [thefilt1 * scale, thefilt2 * scale]\r\n\r\n downsample_filt = torch.tensor([[.25, .5, .25], [.5, 1.0, .5], [.25, .5, .25]]).view(1, 1, 3, 3)\r\n downsample_filt /= 4.0\r\n\r\n filters = torch.stack(filters, dim=0).view(8, 1, 9, 9)\r\n self.register_buffer('filters', filters, False)\r\n self.register_buffer('downsample_filt', downsample_filt, False)\r\n\r\n def forward(self, X):\r\n X_ = X.sum(axis=1, keepdims=True)\r\n maps = []\r\n for i in range(self.nlevels):\r\n outputs = F.conv2d(X_, self.filters, padding=4)\r\n magnitude = torch.sqrt((outputs ** 2)[:, ::2, :, :] + \r\n (outputs ** 2)[:, 1::2, :, :])\r\n if i == 0:\r\n maps.append(magnitude)\r\n else:\r\n maps.append(F.interpolate(magnitude, scale_factor=2**i, mode='bilinear', align_corners=False)[:, :, :X.shape[2], :X.shape[3]])\r\n\r\n X_ = F.conv2d(X_, self.downsample_filt, padding=1, stride=2)\r\n \r\n return torch.cat(maps, axis=1)\r\n \r\n\r\nclass GaborPyramid3d(nn.Module):\r\n \"\"\"\r\n Create a module that maps stacks of images to a 3d Gabor pyramid.\r\n Only works in grayscale\r\n \"\"\"\r\n def __init__(self, \r\n nlevels=5,\r\n nt=7,\r\n stride=1,\r\n motionless=False):\r\n super(GaborPyramid3d, self).__init__()\r\n self.nt = nt\r\n self.nlevels = nlevels\r\n self.stride = stride\r\n self.motionless = motionless\r\n self.setup()\r\n\r\n def setup(self):\r\n # The filters will be 8x1xntx9x9\r\n nx, no = 9, 4\r\n zi, yi, xi = torch.meshgrid(torch.arange(-(self.nt // 2), \r\n (self.nt + 1) // 2), \r\n torch.arange(-4, 5), \r\n torch.arange(-4, 5))\r\n\r\n assert zi.shape[0] == self.nt\r\n filters = []\r\n for ii in range(no):\r\n for dt in [-1, 0, 1]:\r\n coso = np.cos(ii * np.pi / no)\r\n sino = np.sin(ii * np.pi / no)\r\n G = torch.exp(-(xi**2 + yi**2 + (zi / self.nt * nx)**2)/2/2**2)\r\n thefilt1 = torch.cos((coso * xi + sino * yi)*.8 + dt * zi / self.nt * np.pi * 2) * G\r\n thefilt2 = torch.sin((coso * xi + sino * yi)*.8 + dt * zi / self.nt * np.pi * 2) * G\r\n thefilt1 = thefilt1 - G / G.mean() * thefilt1.mean()\r\n thefilt2 = thefilt2 - G / G.mean() * thefilt2.mean()\r\n scale = 1 / torch.sqrt((thefilt1 ** 2).sum())\r\n\r\n filters += [thefilt1 * scale, thefilt2 * scale]\r\n\r\n downsample_filt = torch.tensor([[.25, .5, .25], [.5, 1.0, .5], [.25, .5, .25]]).view(1, 1, 3, 3)\r\n downsample_filt /= 4.0\r\n\r\n filters = torch.stack(filters, dim=0).view(no * 3 * 2, 1, self.nt, nx, nx)\r\n self.register_buffer('filters', filters, False)\r\n self.register_buffer('downsample_filt', downsample_filt, False)\r\n\r\n def forward(self, X):\r\n # Transform to grayscale.\r\n X_ = X.sum(axis=1, keepdims=True)\r\n maps = []\r\n for i in range(self.nlevels):\r\n outputs = F.conv3d(X_, \r\n self.filters, \r\n padding=(self.nt//2, 4, 4),\r\n stride=self.stride)\r\n magnitude = torch.sqrt((outputs ** 2)[:, ::2, :, :, :] + \r\n (outputs ** 2)[:, 1::2, :, :, :])\r\n\r\n if self.motionless:\r\n # Add the two directions together\r\n magnitude = torch.cat([(magnitude[:, 0::3, :, :, :] + magnitude[:, 2::3, :, :, :]) / 2.0,\r\n magnitude[:, 1::3, :, :, :]], axis=1)\r\n\r\n if i == 0:\r\n maps.append(magnitude)\r\n else:\r\n # Only the spatial dimension is resized.\r\n the_map = F.interpolate(magnitude.reshape((magnitude.shape[0], -1, magnitude.shape[-2], magnitude.shape[-1])), \r\n scale_factor=2**i, \r\n mode='bilinear', \r\n align_corners=False)\r\n the_map = the_map.reshape(magnitude.shape[0], \r\n magnitude.shape[1], -1, the_map.shape[-2], the_map.shape[-1])[:, :, :, :X.shape[-2], :X.shape[-1]]\r\n maps.append(the_map)\r\n\r\n X_ = F.conv2d(X_.reshape((X_.shape[0]*X_.shape[2], 1, X_.shape[-2], X_.shape[-1])), \r\n self.downsample_filt, \r\n padding=1, \r\n stride=2)\r\n X_ = X_.reshape(X.shape[0], 1, -1, X_.shape[-2], X_.shape[-1])\r\n return torch.cat(maps, axis=1) # [:, :, 2:-2, :, :]","sub_path":"modelzoo/gabor_pyramid.py","file_name":"gabor_pyramid.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"135546155","text":"import numpy as np \nfrom collections import defaultdict \n\nclass actMeasureTabularQLearning:\n\tdef __init__(self, actionSpaceSize, stateSpaceSize, measureBias, qInit=0.0, discount_factor = 0.9, alpha = 0.1, epsilon = 0.1):\n\t\tself.actionSpaceSize = actionSpaceSize\n\t\tself.stateSpaceSize = stateSpaceSize\n\t\tself.epsilon = epsilon\n\t\tself.discountFactor = discount_factor\n\t\tself.alpha = alpha\n\t\tself.Q = np.repeat(qInit, actionSpaceSize*stateSpaceSize).reshape(stateSpaceSize,actionSpaceSize)\t\n\t\tmeasureQ = np.repeat(qInit+measureBias, actionSpaceSize*stateSpaceSize).reshape(stateSpaceSize,actionSpaceSize)\n\t\tself.Q = np.concatenate([self.Q, measureQ], axis=1)\n\tdef epGreedyAction(self, state): \n\t\tep = np.random.uniform(0,1,1)\n\t\tif ep <= self.epsilon:\n\t\t\taction = np.random.choice(np.arange(self.actionSpaceSize)) # pick a random action\n\t\telse: \n\t\t\taction = self.greedyAction(state)\n\t\treturn action\n\tdef greedyAction(self, state):\n\t\treturn np.argmax(self.Q[int(state)])\n\tdef updateQTable(self, state, action, reward, nextState):\n\t\tbestNextAction = np.argmax(self.Q[int(nextState)])\n\t\ttd_target = reward + self.discountFactor * self.Q[int(nextState)][bestNextAction]\n\t\ttd_delta = td_target - self.Q[int(state)][action] \n\t\tself.Q[int(state)][action] += self.alpha * td_delta \n\n","sub_path":"source/agents/actMeasureTabularQLearningWrp.py","file_name":"actMeasureTabularQLearningWrp.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"299352673","text":"# -*- coding:utf-8 -*-\nfrom django.conf import settings\nfrom django.contrib.sites.shortcuts import get_current_site\n\n\ndef common_values(request):\n\n ga_prop_id = getattr(settings, 'GOOGLE_ANALYTICS_PROPERTY_ID', False)\n google_verification_id = getattr(settings, 'GOOGLE_SITE_VERIFICATION_ID', False)\n\n values = {}\n\n if not settings.DEBUG and ga_prop_id:\n values['GOOGLE_ANALYTICS_PROPERTY_ID'] = ga_prop_id\n\n if not settings.DEBUG and google_verification_id:\n values['GOOGLE_SITE_VERIFICATION_ID'] = google_verification_id\n\n values['settings'] = settings.DCF\n\n return values\n","sub_path":"dcf/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"238689008","text":"\"\"\"\r\nQuestion 86 :\r\n By using list comprehension, write a program to\r\n print the list after removing the values 24 in\r\n 12, 24, 35, 70, 88, 120, 155].\r\n\r\n Hints : Use list's remove method to delete a values.\r\n\"\"\"\r\n\r\n# Solution :\r\n\r\nli = [12, 24, 35, 70, 88, 120, 155]\r\nli = [x for x in li if x != 24]\r\nprint(li)\r\n\r\n\"\"\"\r\nOutput :\r\n \r\n\"\"\"","sub_path":"Question-86.py","file_name":"Question-86.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"345995806","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/postlund/pyatv_dev/pyatv/pyatv/mrp/messages.py\n# Compiled at: 2019-10-01 04:57:58\n# Size of source mod 2**32: 5519 bytes\n\"\"\"Helper code for dealing with protobuf messages.\"\"\"\nimport binascii\nfrom pyatv.mrp import protobuf\nfrom pyatv.mrp import tlv8\n\ndef create(message_type, priority=0):\n \"\"\"Create a ProtocolMessage.\"\"\"\n message = protobuf.ProtocolMessage()\n message.type = message_type\n message.priority = priority\n return message\n\n\ndef device_information(name, identifier):\n \"\"\"Create a new DEVICE_INFO_MESSAGE.\"\"\"\n message = create(protobuf.DEVICE_INFO_MESSAGE)\n info = message.inner()\n info.uniqueIdentifier = identifier\n info.name = name\n info.localizedModelName = 'iPhone'\n info.systemBuildVersion = '14G60'\n info.applicationBundleIdentifier = 'com.apple.TVRemote'\n info.applicationBundleVersion = '273.12'\n info.protocolVersion = 1\n info.lastSupportedMessageType = 58\n info.supportsExtendedMotion = True\n return message\n\n\ndef set_ready_state():\n \"\"\"Create a new SET_READY_STATE_MESSAGE.\"\"\"\n return create(protobuf.ProtocolMessage.SET_READY_STATE_MESSAGE)\n\n\ndef set_connection_state():\n \"\"\"Create a new SET_CONNECTION_STATE.\"\"\"\n message = create(protobuf.ProtocolMessage.SET_CONNECTION_STATE_MESSAGE)\n message.inner().state = protobuf.SetConnectionStateMessage.Connected\n return message\n\n\ndef crypto_pairing(pairing_data, is_pairing=False):\n \"\"\"Create a new CRYPTO_PAIRING_MESSAGE.\"\"\"\n message = create(protobuf.CRYPTO_PAIRING_MESSAGE)\n crypto = message.inner()\n crypto.status = 0\n crypto.pairingData = tlv8.write_tlv(pairing_data)\n crypto.isRetrying = False\n crypto.isUsingSystemPairing = False\n crypto.state = 2 if is_pairing else 0\n return message\n\n\ndef client_updates_config(artwork=True, now_playing=True, volume=True, keyboard=True):\n \"\"\"Create a new CLIENT_UPDATES_CONFIG_MESSAGE.\"\"\"\n message = create(protobuf.CLIENT_UPDATES_CONFIG_MESSAGE)\n config = message.inner()\n config.artworkUpdates = artwork\n config.nowPlayingUpdates = now_playing\n config.volumeUpdates = volume\n config.keyboardUpdates = keyboard\n return message\n\n\ndef wake_device():\n \"\"\"Create a new WAKE_DEVICE_MESSAGE.\"\"\"\n return create(protobuf.WAKE_DEVICE_MESSAGE)\n\n\ndef register_hid_device(screen_width, screen_height, absolute=False, integrated_display=False):\n \"\"\"Create a new REGISTER_HID_DEVICE_MESSAGE.\"\"\"\n message = create(protobuf.REGISTER_HID_DEVICE_MESSAGE)\n descriptor = message.inner().deviceDescriptor\n descriptor.absolute = 1 if absolute else 0\n descriptor.integratedDisplay = 1 if integrated_display else 0\n descriptor.screenSizeWidth = screen_width\n descriptor.screenSizeHeight = screen_height\n return message\n\n\ndef send_packed_virtual_touch_event(xpos, ypos, phase, device_id, finger):\n \"\"\"Create a new WAKE_DEVICE_MESSAGE.\"\"\"\n message = create(protobuf.SEND_PACKED_VIRTUAL_TOUCH_EVENT_MESSAGE)\n event = message.inner()\n event.data = xpos.to_bytes(2, byteorder='little')\n event.data += ypos.to_bytes(2, byteorder='little')\n event.data += phase.to_bytes(2, byteorder='little')\n event.data += device_id.to_bytes(2, byteorder='little')\n event.data += finger.to_bytes(2, byteorder='little')\n return message\n\n\ndef send_hid_event(use_page, usage, down):\n \"\"\"Create a new SEND_HID_EVENT_MESSAGE.\"\"\"\n message = create(protobuf.SEND_HID_EVENT_MESSAGE)\n event = message.inner()\n abstime = binascii.unhexlify(b'438922cf08020000')\n data = use_page.to_bytes(2, byteorder='big')\n data += usage.to_bytes(2, byteorder='big')\n data += (1 if down else 0).to_bytes(2, byteorder='big')\n event.hidEventData = abstime + binascii.unhexlify(b'00000000000000000100000000000000020' + b'00000200000000300000001000000000000') + data + binascii.unhexlify(b'0000000000000001000000')\n return message\n\n\ndef command(cmd):\n \"\"\"Playback command request.\"\"\"\n message = create(protobuf.SEND_COMMAND_MESSAGE)\n send_command = message.inner()\n send_command.command = cmd\n return message\n\n\ndef repeat(mode):\n \"\"\"Change repeat mode of current player.\"\"\"\n message = command(protobuf.CommandInfo_pb2.ChangeShuffleMode)\n send_command = message.inner()\n send_command.options.externalPlayerCommand = True\n send_command.options.repeatMode = mode\n return message\n\n\ndef shuffle(enable):\n \"\"\"Change shuffle mode of current player.\"\"\"\n message = command(protobuf.CommandInfo_pb2.ChangeShuffleMode)\n send_command = message.inner()\n send_command.options.shuffleMode = 3 if enable else 1\n return message\n\n\ndef seek_to_position(position):\n \"\"\"Seek to an absolute position in stream.\"\"\"\n message = command(protobuf.CommandInfo_pb2.SeekToPlaybackPosition)\n send_command = message.inner()\n send_command.options.playbackPosition = position\n return message","sub_path":"pycfiles/pyatv-0.6.1-py3-none-any/messages.cpython-36.py","file_name":"messages.cpython-36.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"142618122","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 8 10:31:34 2021\n\n@author: Andres Salazar\n\"\"\"\n\nData_base = []\nOnly_product = []\ncp = 's'\n\nwhile(cp == 's'):\n \n New_products = int(input(\"Numero de articulos a agregar: \"))\n \n for j in range(New_products):\n \n P_name = input(\"Nombre del producto: \")\n P_cost = input(\"Costo del producto: \")\n P_sell_price = input(\"Costo de venta al publico: \")\n P_stock = input(\"Unidades disponibles: \")\n \n for i in (P_name, P_cost, P_sell_price, P_stock):\n Only_product.append(i)\n \n Data_base.append(Only_product)\n \n Ganancia = int(P_sell_price) - int(P_cost)\n \n print(\"------------------------------------------------\")\n print(\"Productos: \")\n print(\"Producto: \", Data_base[j][0])\n print(\"CU: $\", Data_base[j][1])\n print(\"PVP: $\", Data_base[j][2])\n print(\"Unidades disponibles: \", Data_base[j][3])\n print(\"Ganancia: \", Ganancia)\n print(\"------------------------------------------------\")\n \n Only_product = []\n \n cp = input(\"Desa agregar un nuevo producto? s/n: \")\n \n#%%\n\non = 's'\nData_base = []\ncakes = []\nwhile(on == 's'):\n \n sugar = int(input(\"Numero de tasas azucar: \"))\n flour = 2*sugar + 4\n butter = (flour + sugar)//5\n\n for i in (sugar, flour, butter):\n cakes.append(i)\n\n Data_base.append(cakes)\n\n if (butter <= 20):\n pack = 'uno'\n elif(butter > 20 and butter <= 30):\n pack = 'dos'\n elif(butter > 30 and butter <= 50):\n pack = 'tres'\n else:\n pack = 'cuatro'\n \nprint(sugar, flour, butter)\nprint(pack)\n \nprint(\"Con \" + str(sugar) + \" tasas de azucar se necesitan \" + str(flour) + \" de harina y \" + str(butter) \n + \" de mantequilla y es de empaquetado \" + pack + \".\")\n\ncakes = []\n\non = input(\"Desea agregar una nueva torta? s/n \")\n \n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n ","sub_path":"Semana2/Reto_Semana2_Pasteleria_y_Tienda.py","file_name":"Reto_Semana2_Pasteleria_y_Tienda.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"358354486","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\ndef create_cubes(n):\n \n for x in range(n):\n yield x**3\n\n\n# In[6]:\n\n\nfor x in create_cubes(10):\n print(x)\n\n\n# In[7]:\n\n\nlist(create_cubes(10))\n\n\n# In[10]:\n\n\ndef gen_fibon(n):\n \n a = 1 \n b = 1\n output = []\n \n for i in range(n):\n output.append(a)\n a,b = b,a+b\n return output\n\n\n# In[11]:\n\n\nfor number in gen_fibon(10):\n print(number)\n\n\n# In[12]:\n\n\ndef simple_gen():\n for x in range(3):\n yield x\n\n\n# In[14]:\n\n\nfor number in simple_gen():\n print(number)\n\n\n# In[15]:\n\n\ng = simple_gen()\n\n\n# In[16]:\n\n\ng\n\n\n# In[17]:\n\n\nprint(next(g))\n\n\n# In[18]:\n\n\nprint(next(g))\n\n\n# In[19]:\n\n\nprint(next(g))\n\n\n# In[20]:\n\n\nprint(next(g))\n\n\n# In[21]:\n\n\ns = 'hello'\n\n\n# In[ ]:\n\n\nfor letter in s:\n print(letter)\n\n\n# In[22]:\n\n\nnext(s)\n\n\n# In[23]:\n\n\ns_iter = iter(s)\n\n\n# In[24]:\n\n\nnext(s_iter)\n\n\n# In[25]:\n\n\nnext(s_iter)\n\n\n# In[26]:\n\n\nnext(s_iter)\n\n\n# In[27]:\n\n\nnext(s_iter)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Generators and Iterations.py","file_name":"Generators and Iterations.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"125505820","text":"# -*- coding: utf-8 -*-\n# This file is part of Tryton. The COPYRIGHT file at the top level of\n# this repository contains the full copyright notices and license terms.\n\"\"\"\nMiscelleanous tools used by tryton\n\"\"\"\nimport os\nimport sys\nfrom array import array\nfrom itertools import islice\nimport types\nimport io\nimport warnings\nimport importlib\n\nfrom sql import Literal\nfrom sql.operators import Or\n\nfrom trytond.const import OPERATORS\n\n\ndef file_open(name, mode=\"r\", subdir='modules', encoding=None):\n \"\"\"Open a file from the root dir, using a subdir folder.\"\"\"\n from trytond.modules import EGG_MODULES\n root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n def secure_join(root, *paths):\n \"Join paths and ensure it still below root\"\n path = os.path.join(root, *paths)\n path = os.path.normpath(path)\n if not path.startswith(os.path.join(root, '')):\n raise IOError(\"Permission denied: %s\" % name)\n return path\n\n egg_name = False\n if subdir == 'modules':\n module_name = name.split(os.sep)[0]\n if module_name in EGG_MODULES:\n epoint = EGG_MODULES[module_name]\n mod_path = os.path.join(epoint.dist.location,\n *epoint.module_name.split('.')[:-1])\n mod_path = os.path.abspath(mod_path)\n egg_name = secure_join(mod_path, name)\n if not os.path.isfile(egg_name):\n # Find module in path\n for path in sys.path:\n mod_path = os.path.join(path,\n *epoint.module_name.split('.')[:-1])\n mod_path = os.path.abspath(mod_path)\n egg_name = secure_join(mod_path, name)\n if os.path.isfile(egg_name):\n break\n if not os.path.isfile(egg_name):\n # When testing modules from setuptools location is the\n # module directory\n egg_name = secure_join(\n os.path.dirname(epoint.dist.location), name)\n\n if subdir:\n if (subdir == 'modules'\n and (name.startswith('ir' + os.sep)\n or name.startswith('res' + os.sep)\n or name.startswith('tests' + os.sep))):\n name = secure_join(root_path, name)\n else:\n name = secure_join(root_path, subdir, name)\n else:\n name = secure_join(root_path, name)\n\n for i in (name, egg_name):\n if i and os.path.isfile(i):\n return io.open(i, mode, encoding=encoding)\n\n raise IOError('File not found : %s ' % name)\n\n\ndef get_smtp_server():\n \"\"\"\n Instanciate, configure and return a SMTP or SMTP_SSL instance from\n smtplib.\n :return: A SMTP instance. The quit() method must be call when all\n the calls to sendmail() have been made.\n \"\"\"\n from ..sendmail import get_smtp_server\n warnings.warn(\n 'get_smtp_server is deprecated use trytond.sendmail',\n DeprecationWarning)\n return get_smtp_server()\n\n\ndef memoize(maxsize):\n \"\"\"\n Decorator to 'memoize' a function - caching its results with a\n near LRU implementation.\n\n The cache keeps a list of keys logicaly separated in 4 segment :\n\n segment 1 | ... | segment4\n [k,k,k,k,k,k,k, .. ,k,k,k,k,k,k,k]\n\n For each segment there is a pointer that loops on it. When a key\n is accessed from the cache it is promoted to the first segment (at\n the pointer place of segment one), the key under the pointer is\n moved to the next segment, the pointer is then incremented and so\n on. A key that is removed from the last segment is removed from\n the cache.\n\n :param: maxsize the size of the cache (must be greater than or\n equal to 4)\n \"\"\"\n assert maxsize >= 4, \"Memoize cannot work if maxsize is less than 4\"\n\n def wrap(fct):\n cache = {}\n keys = [None for i in range(maxsize)]\n seg_size = maxsize // 4\n\n pointers = [i * seg_size for i in range(4)]\n max_pointers = [(i + 1) * seg_size for i in range(3)] + [maxsize]\n\n def wrapper(*args):\n key = repr(args)\n res = cache.get(key)\n if res:\n pos, res = res\n keys[pos] = None\n else:\n res = fct(*args)\n\n value = res\n for segment, pointer in enumerate(pointers):\n newkey = keys[pointer]\n keys[pointer] = key\n cache[key] = (pointer, value)\n\n pointers[segment] = pointer + 1\n if pointers[segment] == max_pointers[segment]:\n pointers[segment] = segment * seg_size\n\n if newkey is None:\n break\n segment, value = cache.pop(newkey)\n key = newkey\n\n return res\n\n wrapper.__doc__ = fct.__doc__\n wrapper.__name__ = fct.__name__\n\n return wrapper\n return wrap\n\n\ndef reduce_ids(field, ids):\n '''\n Return a small SQL expression for the list of ids and the sql column\n '''\n ids = list(ids)\n if not ids:\n return Literal(False)\n assert all(x.is_integer() for x in ids if isinstance(x, float)), \\\n 'ids must be integer'\n ids = list(map(int, ids))\n ids.sort()\n prev = ids.pop(0)\n continue_list = [prev, prev]\n discontinue_list = array('l')\n sql = Or()\n for i in ids:\n if i == prev:\n continue\n if i != prev + 1:\n if continue_list[-1] - continue_list[0] < 5:\n discontinue_list.extend([continue_list[0] + x for x in\n range(continue_list[-1] - continue_list[0] + 1)])\n else:\n sql.append((field >= continue_list[0])\n & (field <= continue_list[-1]))\n continue_list = []\n continue_list.append(i)\n prev = i\n if continue_list[-1] - continue_list[0] < 5:\n discontinue_list.extend([continue_list[0] + x for x in\n range(continue_list[-1] - continue_list[0] + 1)])\n else:\n sql.append((field >= continue_list[0]) & (field <= continue_list[-1]))\n if discontinue_list:\n sql.append(field.in_(discontinue_list))\n return sql\n\n\ndef reduce_domain(domain):\n '''\n Reduce domain\n '''\n if not domain:\n return []\n operator = 'AND'\n if isinstance(domain[0], str):\n operator = domain[0]\n domain = domain[1:]\n result = [operator]\n for arg in domain:\n if (isinstance(arg, tuple) or\n (isinstance(arg, list) and\n len(arg) > 2 and\n arg[1] in OPERATORS)):\n # clause\n result.append(arg)\n elif isinstance(arg, list) and arg:\n # sub-domain\n sub_domain = reduce_domain(arg)\n sub_operator = sub_domain[0]\n if sub_operator == operator:\n result.extend(sub_domain[1:])\n else:\n result.append(sub_domain)\n else:\n result.append(arg)\n return result\n\n\ndef grouped_slice(records, count=None):\n 'Grouped slice'\n from trytond.transaction import Transaction\n if count is None:\n count = Transaction().database.IN_MAX\n count = max(1, count)\n for i in range(0, len(records), count):\n yield islice(records, i, i + count)\n\n\ndef is_instance_method(cls, method):\n for klass in cls.__mro__:\n type_ = klass.__dict__.get(method)\n if type_ is not None:\n return isinstance(type_, types.FunctionType)\n\n\ndef resolve(name):\n \"Resolve a dotted name to a global object.\"\n name = name.split('.')\n used = name.pop(0)\n found = importlib.import_module(used)\n for n in name:\n used = used + '.' + n\n try:\n found = getattr(found, n)\n except AttributeError:\n found = importlib.import_module(used)\n return found\n","sub_path":"lib/python3.8/site-packages/trytond/tools/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":7961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"639670358","text":"import argparse\nimport openreview\nimport csv\n\n## Argument handling\nparser = argparse.ArgumentParser()\nparser.add_argument('--baseurl', help=\"base url\")\nparser.add_argument('--username')\nparser.add_argument('--password')\nparser.add_argument('--ifile', help=\"input file name - default to status.csv\")\nargs = parser.parse_args()\n\n## Initialize the client library with username and password\nif args.username!=None and args.password!=None:\n client = openreview.Client(baseurl=args.baseurl, username=args.username, password=args.password)\nelse:\n client = openreview.Client(baseurl=args.baseurl)\n## Initialize output file name\nfile_name = \"acceptances.csv\"\nif args.ifile!=None:\n file_name = args.ifile\n\nsubmissions = client.get_notes(invitation='ICLR.cc/2017/workshop/-/submission')\nacceptances = client.get_notes(invitation='ICLR.cc/2017/workshop/-/paper.*/acceptance')\n# valid acceptance values\nvalid_values = [\n \"OK\",\n \"Reject\"\n]\n\n# these\nACCEPT_INDX = 6\n\nany_errors = False\n# accept_new[paper_num] dictionary w/ 'forum', 'acceptance'\naccept_new = {}\n# initialize accept_new from file\ntry:\n with open(file_name, \"rb\") as in_file:\n file_reader = csv.reader(in_file, delimiter=',')\n for row in file_reader:\n # first column is the paper number, last column is the needed acceptance status\n paper_num = int(row[0])\n # print(\"add %s\" %paper_num)\n if row[ACCEPT_INDX] in valid_values:\n accept_new[paper_num] = {}\n accept_new[paper_num]['acceptance'] = row[ACCEPT_INDX]\n else:\n any_errors = True\n print(\"Paper%s invalid acceptance value '%s'\" %(paper_num,row[ACCEPT_INDX]))\n\nexcept (OSError, IOError) as e:\n print(e)\n file_data =[]\n exit()\n\n# if any of the acceptance values were set to unrecognized values, print the accepted values\nif any_errors:\n print(\"Valid acceptance values are %s\" %valid_values)\n\n# Since csv files use paper numbers and acceptance notes use forum,\n# need to translate between them. Paper_numbers a dict w/ forum as key, and num as value\npaper_numbers = {}\nfor paper in submissions:\n paper_numbers[paper.forum] = paper.number\n if paper.number in accept_new.keys():\n accept_new[paper.number]['forum'] = paper.forum\n\n# Remove existing acceptance notes from the accept_new list.\nfor note in acceptances:\n paper_num = paper_numbers[note.forum]\n if paper_num in accept_new.keys():\n # Check if acceptance notes agree w/ spreadsheet values, throw error if problem\n if note.content['decision'] != accept_new[paper_num]['acceptance']:\n print(\"Cannot change previously accepted paper %s\" % paper_num)\n # remove from the new acceptance list\n del accept_new[paper_num]\n\n\n# fill in generic acceptance note info\nnote = openreview.Note()\nnote.signatures = ['ICLR.cc/2017/pcs']\nnote.writers = ['ICLR.cc/2017/pcs']\nnote.readers = ['everyone']\n# for all new acceptances, set paper specific info and post acceptance note\nfor paper_num in accept_new:\n invitation ='ICLR.cc/2017/workshop/-/paper' +str(paper_num)+'/acceptance'\n note.invitation = invitation\n note.forum = accept_new[paper_num]['forum']\n note.replyto = accept_new[paper_num]['forum']\n note.content = {'decision': 'Accept' if accept_new[paper_num]['acceptance'] == 'OK' else 'Reject',\n 'title':'ICLR committee final decision'}\n client.post_note(note)\n print (\"Paper %s: new acceptance\" % paper_num)\n","sub_path":"venues/ICLR.cc/2017/workshop/python/accept-workshop-submissions.py","file_name":"accept-workshop-submissions.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"157555619","text":"from tkinter import *\r\nfrom PIL import Image,ImageTk,ImageOps\r\nfrom tkinter import filedialog,messagebox\r\nfrom scipy import misc\r\nfrom skimage import feature\r\nfrom skimage.io import imread\r\nimport numpy\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.widgets\r\nimport os\r\nimport imageModules\r\nfrom imageModules import *\r\nfrom scipy.ndimage import zoom\r\nimport time\r\nimport os.path\r\nimport threading\r\n\r\ndef zooming(image,event=NONE):\r\n def clipped_zoom(img, zoom_factor, **kwargs):\r\n h, w = img.shape[:2]\r\n zoom_tuple = (zoom_factor,) * 2 + (1,) * (img.ndim - 2)\r\n if zoom_factor < 1:\r\n zh = int(numpy.round(h * zoom_factor))\r\n zw = int(numpy.round(w * zoom_factor))\r\n top = (h - zh) // 2\r\n left = (w - zw) // 2\r\n out = numpy.zeros_like(img)\r\n out[top:top+zh, left:left+zw] = zoom(img, zoom_tuple, **kwargs)\r\n elif zoom_factor > 1:\r\n zh = int(numpy.round(h / zoom_factor))\r\n zw = int(numpy.round(w / zoom_factor))\r\n top = (h - zh) // 2\r\n left = (w - zw) // 2\r\n out = zoom(img[top:top+zh, left:left+zw], zoom_tuple, **kwargs)\r\n trim_top = ((out.shape[0] - h) // 2)\r\n trim_left = ((out.shape[1] - w) // 2)\r\n out = out[trim_top:trim_top+h, trim_left:trim_left+w]\r\n else:\r\n out = img\r\n return out\r\n img1=misc.imread(\"1.jpg\")\r\n zm1=clipped_zoom(img1,2)\r\n misc.imsave(\"zoomed.jpg\",zm1)\r\n img2=Image.open(\"zoomed.jpg\")\r\n root.img2=ImageTk.PhotoImage(img2)\r\n canvas.create_image(root.canvas_width/2,(root.canvas_height/2)+10,anchor=\"center\",image=root.img2)\r\n\r\n\r\n\r\ndef crop(image,event=NONE):\r\n def onselect(eclick,erelease):\r\n if eclick.ydata>erelease.ydata:\r\n eclick.ydata,erelease.ydata=erelease.ydata,eclick.ydata\r\n if eclick.xdata>erelease.xdata:\r\n eclick.xdata,erelease.xdata=erelease.xdata,eclick.xdata\r\n ax.set_ylim(erelease.ydata,eclick.ydata)\r\n ax.set_xlim(eclick.xdata,erelease.xdata)\r\n fig.canvas.draw()\r\n fig=plt.figure()\r\n ax=fig.add_subplot(111)\r\n photo=Image.open(image)\r\n photo.save(\"fhaltu.png\")\r\n filename=\"fhaltu.png\"\r\n im=Image.open(filename)\r\n arr=numpy.asarray(im)\r\n plt_image=plt.imshow(arr)\r\n plt_image.axes.get_xaxis().set_visible(False)\r\n plt_image.axes.get_yaxis().set_visible(False)\r\n rs=matplotlib.widgets.RectangleSelector(ax,onselect,drawtype='box',rectprops=dict(facecolor='red',edgecolor='black',alpha=0.5,fill=True))\r\n plt.show()\r\n plt.savefig(\"crop.jpg\")\r\n\r\ndef ConvertToGreyscale(event=NONE):\r\n try:\r\n matA=misc.imread('1.jpg')\r\n h,w,b=matA.shape\r\n matB=numpy.zeros((h,w))\r\n r=0\r\n while r1300:\r\n factor=width/height\r\n width=int(factor*new_height)\r\n height=new_height\r\n \r\n if height>600:\r\n height=600\r\n return width,height\r\n\r\n\r\ndef Open(event=NONE):\r\n try:\r\n t=threading.Thread()\r\n t.__init__(target=progressBar.start,args=())\r\n t.start()\r\n a=filedialog.askopenfilename(initialdir=\"E:\\\\pythoneg\\\\tkTestCases\\\\IrfanModules\\\\images\",title=\"select file\",filetypes=((\"jpeg files\",\"*.jpg\"),(\"alll files\",\"*.*\")))\r\n if(len(a)==0):\r\n raise FileNotFoundError\r\n filename=Image.open(a)\r\n width,height=checkDimension(filename)\r\n img=filename.resize((width,height),Image.ANTIALIAS)\r\n root.photo=ImageTk.PhotoImage(img)\r\n canvas.delete(\"all\")\r\n canvas.create_image(root.canvas_width/2,(root.canvas_height/2)+10,anchor=\"center\",image=root.photo)\r\n AccessTime=time.ctime(os.path.getatime(a))\r\n Size=os.path.getsize(a)\r\n Size=int(Size/(1024))\r\n b=\"Size : \"+str(height)+\"x\"+str(width)+\" Image Creation Date And Time : \"+str(AccessTime)+\" File Size : \"+str(Size)+\"KB\"\r\n groundLabel.config(text=b)\r\n misc.imsave('1.jpg',img)\r\n misc.imsave(\"original.jpg\",img)\r\n progressBar.stop()\r\n t.join()\r\n except FileNotFoundError:\r\n progressBar.stop()\r\n except Exception as e:\r\n messagebox.showinfo(\"Error\",e)\r\n progressBar.stop()\r\n\r\ndef saveInOriginalFolder(event=NONE):\r\n misc.imsave(\"1.jpg\",img)\r\n\r\ndef saveAs(event=NONE):\r\n try:\r\n name=filedialog.asksaveasfilename(title=\"Select Folder\",defaultextension=\".jpg\",filetypes=[(\"jpeg image\",\"*.jpg\"),(\"png image\",\"*.png\")])\r\n print(name)\r\n img=Image.open(\"1.jpg\")\r\n misc.imsave(name,img)\r\n except Exception as e:\r\n messagebox.showinfo(\"Error\",e)\r\n\r\n\r\ndef reOpen(event=NONE):\r\n img=Image.open(\"original.jpg\")\r\n root.img=ImageTk.PhotoImage(img)\r\n canvas.delete(\"all\")\r\n canvas.create_image(root.canvas_width/2,(root.canvas_height/2)+10,anchor=\"center\",image=root.img)\r\n misc.imsave(\"1.jpg\",img)\r\n\r\nroot=Tk()\r\nroot.title(\"Photoshop\")\r\nroot.canvas_height=640\r\nroot.canvas_width=1350\r\ncanvas=Canvas(root,width=root.canvas_width,height=root.canvas_height,bg=\"#E6DECC\")\r\ncanvas.pack()\r\nroot.geometry('1366x670+0+0')\r\nmenubar=Menu(root)\r\nfileMenu=Menu(menubar,tearoff=0)\r\nfileMenu.add_command(label='Open...',command=Open,accelerator=\"Ctrl+o\")\r\nmenubar.bind_all(\"\",Open)\r\nfileMenu.add_command(label='Reopen',command=reOpen)\r\n# fileMenu.add_command(label='Open Recent Files',command=doNothing)\r\n# fileMenu.add_command(label='Open with external editor',command=doNothing)\r\n# OpenAsMenu=Menu(root)\r\n# OpenAsMenu=Menu(fileMenu,tearoff=0)\r\n# OpenAsMenu.add_command(label=\"HEX File\",command=doNothing)\r\n# OpenAsMenu.add_command(label=\"ASCII File\",command=doNothing)\r\n# OpenAsMenu.add_command(label=\"RAW File\",command=doNothing)\r\n# fileMenu.add_cascade(label=\"Open as\",menu=OpenAsMenu)\r\n# fileMenu.add_separator()\r\n# fileMenu.add_command(label='Thumbnails',command=doNothing)\r\n# fileMenu.add_separator()\r\n# fileMenu.add_command(label='Slideshow...',command=doNothing)\r\n# fileMenu.add_command(label='Start slidedshow with current file list',command=doNothing)\r\n# fileMenu.add_command(label='Batch Conversion/Rename..',command=doNothing)\r\n# fileMenu.add_separator()\r\n# fileMenu.add_command(label='Search files..',command=doNothing)\r\n# fileMenu.add_command(label='Rename File...',command=doNothing)\r\n# fileMenu.add_command(label='Move File...',command=doNothing)\r\n# fileMenu.add_command(label='Copy File...',command=doNothing)\r\n# fileMenu.add_command(label='Delete File..',command=doNothing)\r\n# fileMenu.add_separator()\r\n# fileMenu.add_command(label='Save(original folder)',command=saveInOriginalFolder)\r\n# fileMenu.add_command(label='Save as...',command=saveAs)\r\n# fileMenu.add_command(label='Save for Web...(Plugin)',command=doNothing)\r\n# fileMenu.add_command(label='Save Selection as...',command=doNothing)\r\n# fileMenu.add_separator()\r\n# fileMenu.add_command(label='Print',command=doNothing)\r\n# fileMenu.add_separator()\r\n# fileMenu.add_command(label='Select Scan/TWAIN Source...',command=doNothing)\r\n# fileMenu.add_command(label='Acquire/Batch scanning...',command=doNothing)\r\n# fileMenu.add_command(label='Copy Shop..',command=doNothing)\r\n# fileMenu.add_separator()\r\nfileMenu.add_command(label='Exit',command=root.quit)\r\nmenubar.add_cascade(label=\"File\",menu=fileMenu)\r\n\r\neditMenu=Menu(menubar,tearoff=0)\r\neditMenu.add_command(label='Undo',command=doNothing)\r\neditMenu.add_command(label='Redo',command=doNothing)\r\n'''\r\neditMenu.add_separator()\r\neditMenu.add_command(label='Show Paint dialog',command=doNothing)\r\neditMenu.add_separator()\r\neditMenu.add_command(label='Create custom crop selection',command=doNothing)\r\noption1=Menu(root)\r\noption1=Menu(editMenu,tearoff=0)\r\noption1.add_command(label='1:1',command=doNothing)\r\noption1.add_command(label='3:2',command=doNothing)\r\noption1.add_command(label='4:3',command=doNothing)\r\noption1.add_command(label='16:9',command=doNothing)\r\noption1.add_command(label='16:10',command=doNothing)\r\noption1.add_command(label='21:9 (2.370:1)',command=doNothing)\r\noption1.add_separator()\r\noption1.add_command(label='2:3',command=doNothing)\r\noption1.add_command(label='3:4',command=doNothing)\r\noption1.add_command(label='9:16',command=doNothing)\r\noption1.add_command(label='10:16',command=doNothing)\r\noption1.add_separator()\r\noption1.add_command(label='Current custom selection',command=doNothing)\r\neditMenu.add_cascade(label=\"Create maximized selection (ratio:)\",menu=option1)\r\n\r\neditMenu.add_command(label='Maximize and center selection',command=doNothing)\r\noption2=Menu(root)\r\noption2=Menu(editMenu,tearoff=0)\r\noption2.add_command(label='None',command=doNothing)\r\noption2.add_command(label='Golden ratio',command=doNothing)\r\noption2.add_command(label='Thirds',command=doNothing)\r\noption2.add_command(label='Fourths',command=doNothing)\r\neditMenu.add_cascade(label='Show selection grid',menu=option2)\r\n\r\neditMenu.add_command(label='Show fixed grid',command=doNothing)\r\neditMenu.add_separator()\r\neditMenu.add_command(label='Insert text...',command=doNothing)\r\neditMenu.add_command(label='Insert overlay/watermark image...',command=doNothing)\r\neditMenu.add_separator()\r\neditMenu.add_command(label='Cut-selection',command=doNothing)\r\neditMenu.add_command(label='Cut-area outside of the selection',command=doNothing)\r\noption3=Menu(root)\r\noption3=Menu(editMenu,tearoff=0)\r\noption3.add_command(label='Remove Horizontal strip (sel. height)',command=doNothing)\r\noption3.add_command(label='Remove Vertical strip (sel. width)',command=doNothing)\r\noption3.add_separator()\r\noption3.add_command(label='Insert Horizontal strip (sel. height)',command=doNothing)\r\noption3.add_command(label='Insert Vertical strip (sel. width)',command=doNothing)\r\neditMenu.add_cascade(label='Remove/Insert strip (uses selection)',menu=option3)\r\n\r\neditMenu.add_separator()\r\neditMenu.add_command(label='Crop selection (Cut out)',command=doNothing)\r\neditMenu.add_command(label='Auto-crop borders',command=doNothing)\r\neditMenu.add_command(label='Capture visible window area',command=doNothing)\r\neditMenu.add_separator()\r\neditMenu.add_command(label='Copy',command=doNothing)\r\neditMenu.add_command(label='Paste',command=doNothing)\r\noption4=Menu(root)\r\noption4=Menu(editMenu,tearoff=0)\r\noption4.add_command(label='To right',command=doNothing)\r\noption4.add_command(label='To left',command=doNothing)\r\noption4.add_separator()\r\noption4.add_command(label='To Bottom',command=doNothing)\r\noption4.add_command(label='To Top',command=doNothing)\r\neditMenu.add_cascade(label=\"Paste Special (add on side)\",menu=option4)\r\n\r\neditMenu.add_command(label='Delete (Clear dispay)',command=doNothing)\r\neditMenu.add_separator()\r\neditMenu.add_command(label='Clear Clipboard',command=doNothing)\r\neditMenu.add_separator()\r\n'''\r\nmenubar.add_cascade(label=\"Edit\",menu=editMenu)\r\n\r\nimageMenu=Menu(menubar,tearoff=0)\r\n# imageMenu.add_command(label='Information...',command=doNothing)\r\n# imageMenu.add_separator()\r\nimageMenu.add_command(label='Change canvas color',command=lambda :ChangeCanvasColor(canvas))\r\n# imageMenu.add_command(label='Create New (empty) image...',command=lambda :CreateNewImage(canvas,root),accelerator=\"shift+N\")\r\nmenubar.bind_all(\"\",lambda :CreateNewImage(canvas,root))\r\n#imageMenu.add_command(label='Create Panorama image...',command=CreatePanoramaImage)\r\nimageMenu.add_separator()\r\nimageMenu.add_command(label='Rotate Left(counter clockwise)',command=RotateLeft,accelerator=\"l\")\r\nmenubar.bind_all(\"\",RotateLeft)\r\nimageMenu.add_command(label='Rotate Right(clockwise)',command=RotateRight,accelerator=\"r\")\r\nmenubar.bind_all(\"\",RotateRight)\r\nimage=\"1.jpg\"\r\nimageMenu.add_command(label='Vertical Flip',command=VerticalFlip,accelerator=\"v\")\r\nmenubar.bind_all(\"\",VerticalFlip)\r\nimageMenu.add_command(label='Horizontal Flip',command=HorizontalFlip,accelerator=\"h\")\r\nmenubar.bind_all(\"\",HorizontalFlip)\r\nimageMenu.add_command(label='Custom rotation...',command=lambda :RotateByAngle(root,image,canvas),accelerator=\"Ctrl+u\")\r\nmenubar.bind_all(\"\",lambda :RotateByAngle(root,image,canvas))\r\nimageMenu.add_separator()\r\nimageMenu.add_command(label='Resize/Resample...',command=lambda :ResizeImage(root,image,canvas),accelerator=\"Ctrl+r\")\r\nmenubar.bind_all(\"\",lambda :ResizeImage(root,image,canvas))\r\n#imageMenu.add_command(label='Change canvas size...',command=ChangeCanvasSize,accelerator=\"Shift+v\")\r\n#menubar.bind_all(\"\",ChangeCanvasSize)\r\nimageMenu.add_command(label='Add border/frame...',command=lambda :CreateFrame(root,image,canvas),accelerator=\"Ctrl+d\")\r\nmenubar.bind_all(\"\",lambda :CreateFrame(root,image,canvas))\r\nimageMenu.add_separator()\r\n#imageMenu.add_command(label='Increase Color Depth...',command=doNothing)\r\n#imageMenu.add_command(label='Decrease Color Depth...',command=DecreaseColorDepth)\r\n#imageMenu.add_separator()\r\nimageMenu.add_command(label='Convert to Greyscale',command=ConvertToGreyscale,accelerator=\"Ctrl+g\")\r\nmenubar.bind_all(\"\",ConvertToGreyscale)\r\n# imageMenu.add_command(label=\"crop\",command=lambda :crop(image))\r\n# imageMenu.add_command(label=\"zoom\",command=lambda :zooming(image))\r\nImageOption1=Menu(root)\r\nImageOption1=Menu(imageMenu,tearoff=0)\r\nImageOption1.add_command(label='Red',command=redChannel)\r\nImageOption1.add_command(label='Green',command=greenChannel)\r\nImageOption1.add_command(label='Blue',command=blueChannel)\r\nImageOption1.add_command(label='Alpha',command=doNothing)\r\nimageMenu.add_cascade(label='Show channel',menu=ImageOption1)\r\n\r\nImageOption2=Menu(root)\r\nImageOption2=Menu(imageMenu,tearoff=0)\r\nImageOption2.add_command(label='All channel',command=negative)\r\nImageOption2.add_separator()\r\nImageOption2.add_command(label='Red channel',command=negativeRed)\r\nImageOption2.add_command(label='Green channel',command=negativeGreen)\r\nImageOption2.add_command(label='Blue channel',command=negativeBlue)\r\nimageMenu.add_cascade(label='Negative (invert image)',menu=ImageOption2)\r\n\r\n# imageMenu.add_command(label='Color correction...',command=lambda :ColorCorrection(canvas,image,root),accelerator=\"Shift+G\")\r\n# menubar.bind_all(\"\",lambda :ColorCorrection(canvas,image,root))\r\n# imageMenu.add_command(label='Histogram...',command=doNothing)\r\n# ImageOption7=Menu(root)\r\n# ImageOption7=Menu(imageMenu,tearoff=0)\r\n# ImageOption7.add_command(label=\"Brightness\",command=lambda :AdjustBrightness(canvas,image,root))\r\n# ImageOption7.add_command(label=\"Contrast\",command=lambda :AdjustContrast(canvas,image,root))\r\n# ImageOption7.add_command(label=\"Sharpness\",command=lambda :AdjustSharpness(canvas,image,root))\r\n# ImageOption7.add_command(label=\"Red Balance\",command=lambda :ColorBalancer(canvas,image,root,\"RED\"))\r\n# ImageOption7.add_command(label=\"Green Balance\",command=lambda :ColorBalancer(canvas,image,root,\"GREEN\"))\r\n# ImageOption7.add_command(label=\"Blue Balance\",command=lambda :ColorBalancer(canvas,image,root,\"BLUE\"))\r\n# ImageOption7.add_command(label=\"Gamma Balance\",command=lambda :AdjustGamma(canvas,image,root))\r\n# ImageOption7.add_command(label=\"Saturation\",command=lambda :AdjustSaturation(canvas,image,root))\r\n# imageMenu.add_cascade(label='Color Corrections',menu=ImageOption7)\r\n\r\nimageMenu.add_command(label='Replace Color...',command=lambda :ReplaceColor(root,image,canvas))\r\nimageMenu.add_command(label='Create Titled image...',command=lambda :CreateTitleImage(image,canvas,root))\r\nimageMenu.add_separator()\r\nimageMenu.add_command(label='Auto-adjust colors',command=autoAdjustColor)\r\n# imageMenu.add_command(label='Sharpen',command=lambda :sharpen(image,canvas,root),accelerator=\"Shift+S\")\r\nmenubar.bind(\"\",lambda :sharpen(image,canvas,root))\r\n#imageMenu.add_command(label='Red eye reduction (selection)',command=doNothing)\r\nImageOption3=Menu(root)\r\nImageOption3=Menu(imageMenu,tearoff=0)\r\n# ImageOption3.add_command(label='Effects browser...',command=doNothing)\r\n# ImageOption3.add_separator()\r\n# ImageOption3.add_command(label='3D Button',command=doNothing)\r\n# ImageOption3.add_command(label='Blur',command=doNothing)\r\n# ImageOption3.add_command(label='Emboss',command=doNothing)\r\n# ImageOption3.add_command(label='Oil Paint',command=doNothing)\r\nImageOption3.add_command(label='Edge Detection',command=edgeDetection)\r\n# ImageOption3.add_command(label='Median Filter',command=doNothing)\r\n# ImageOption3.add_command(label='Explosion',command=doNothing)\r\n# ImageOption3.add_command(label='Pixelize',command=doNothing)\r\nImageOption3.add_command(label='Sepia',command=sepia)\r\n# ImageOption3.add_command(label='Rain Drops',command=doNothing)\r\n# ImageOption3.add_separator()\r\n# ImageOption3.add_command(label='AltaLux effects...(Plugin)',command=doNothing)\r\n# ImageOption3.add_separator()\r\n# ImageOption3.add_command(label='Filter Sandbox... (Plugin)',command=doNothing)\r\n# ImageOption3.add_separator()\r\n# ImageOption3.add_command(label='Filter Simulation effect...(Plugin)',command=doNothing)\r\n# ImageOption3.add_separator()\r\n# ImageOption3.add_command(label='Filter Factory... (Plugin)',command=doNothing)\r\n# ImageOption3.add_command(label='Filters Unlimited... (Plugin)',command=doNothing)\r\nimageMenu.add_cascade(label='Effects',menu=ImageOption3)\r\n\r\nImageOption4=Menu(root)\r\nImageOption4=Menu(imageMenu,tearoff=0)\r\nImageOption4.add_command(label='Filter dialog...',command=doNothing)\r\nImageOption4.add_separator()\r\nImageOption4.add_command(label='Perspective Transformations',command=doNothing)\r\nImageOption4.add_command(label='SmartCurve',command=doNothing)\r\nImageOption4.add_command(label='Wire Worm',command=doNothing)\r\nImageOption4.add_command(label=\"Harry's Filters\",command=doNothing)\r\nImageOption4.add_command(label='PapArt',command=doNothing)\r\n#imageMenu.add_cascade(label='Adobe 8BF Plugins',menu=ImageOption4)\r\n\r\nimageMenu.add_separator()\r\nImageOption5=Menu(root)\r\nImageOption5=Menu(imageMenu,tearoff=0)\r\nImageOption5.add_command(label='RGB->RBG',command=RGB2RBG)\r\nImageOption5.add_command(label='RGB->BGR',command=RGB2BGR)\r\nImageOption5.add_command(label='RGB->BRG',command=RGB2BRG)\r\nImageOption5.add_command(label='RGB->GRB',command=RGB2GRB)\r\nImageOption5.add_command(label='RGB->GBR',command=RGB2GBR)\r\nimageMenu.add_cascade(label='Swap Colors',menu=ImageOption5)\r\n\r\nImageOption6=Menu(root)\r\nImageOption6=Menu(imageMenu,tearoff=0)\r\nImageOption6.add_command(label='Edit palette...',command=doNothing)\r\nImageOption6.add_command(label='Export palette...',command=doNothing)\r\nImageOption6.add_command(label='Import palette...',command=doNothing)\r\n#imageMenu.add_cascade(label='Palette',menu=ImageOption6)\r\n\r\nImageOption7=Menu(root)\r\nImageOption7=Menu(imageMenu,tearoff=0)\r\nImageOption7.add_command(label=\"Brightness\",command=lambda :AdjustBrightness(canvas,image,root))\r\nImageOption7.add_command(label=\"Contrast\",command=lambda :AdjustContrast(canvas,image,root))\r\nImageOption7.add_command(label=\"Sharpness\",command=lambda :AdjustSharpness(canvas,image,root))\r\nImageOption7.add_command(label=\"Red Balance\",command=lambda :ColorBalancer(canvas,image,root,\"RED\"))\r\nImageOption7.add_command(label=\"Green Balance\",command=lambda :ColorBalancer(canvas,image,root,\"GREEN\"))\r\nImageOption7.add_command(label=\"Blue Balance\",command=lambda :ColorBalancer(canvas,image,root,\"BLUE\"))\r\nImageOption7.add_command(label=\"Gamma Balance\",command=lambda :AdjustGamma(canvas,image,root))\r\nImageOption7.add_command(label=\"Saturation\",command=lambda :AdjustSaturation(canvas,image,root))\r\nimageMenu.add_cascade(label='Color Corrections',menu=ImageOption7)\r\n\r\nmenubar.add_cascade(label=\"Image\",menu=imageMenu)\r\n# optionMenu=Menu(menubar,tearoff=0)\r\n# optionMenu.add_command(label='Properties/Settings...',command=doNothing)\r\n# optionMenu.add_command(label='Change language...',command=doNothing)\r\n# menubar.add_cascade(label='Option',menu=optionMenu)\r\n\r\n# viewMenu=Menu(menubar,tearoff=0)\r\n# viewMenu.add_command(label='Show/hide status bar',command=doNothing)\r\n# viewMenu.add_command(label='Show/hide toolbar',command=doNothing)\r\n# viewMenu.add_command(label='Show/hide menu bar',command=doNothing)\r\n# viewMenu.add_command(label='Show/hide caption',command=doNothing)\r\n# menubar.add_cascade(label='View',menu=viewMenu)\r\n\r\n# helpMenu=Menu(menubar,tearoff=0)\r\n# helpMenu.add_command(label='Photoshop Help',command=doNothing)\r\n# menubar.add_cascade(label='Help',menu=helpMenu)\r\nroot.config(menu=menubar)\r\n\r\ngroundLabel=Label(root,width=100,height=20,text=\"\",relief=\"groove\",anchor='w')\r\ngroundLabel.config(text=\"No File loaded ( Use -> Open)\")\r\ngroundLabel.pack(side=LEFT)\r\nroot.progress=StringVar()\r\nprogressBar=ttk.Progressbar(root,length=100,maximum=100,mode='determinate',orient=HORIZONTAL)\r\nprogressBar.pack()\r\nroot.mainloop()","sub_path":"vinayEditor-1.0.py","file_name":"vinayEditor-1.0.py","file_ext":"py","file_size_in_byte":28513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"406847821","text":"#Importing the python libraries\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Pattern for detecting job start and end\npatJobStart = '^INFO:root:([0-9.]*):.*:(\\d*)$'\n\n#Pattern for detecting task start\npatTaskStart = '^INFO:root:([0-9.]*):.*:(\\d*):.*:(.*)$'\n\n#Pattern for detecting task end\npatTaskEnd = '^INFO:root:([0-9.]*):.*:(\\S*) (\\d)$'\n\n#Reading the log file\nlogs = open('logs.log','r')\nlog_data = logs.readline()\n\n#Initializing data structures\njobs = dict()\ntasks = dict()\nworker1 = []\ncount1 = 0\nworker2 = []\ncount2 = 0\nworker3 = []\ncount3 = 0\n\n#Processing the log file line by line\nwhile log_data:\n job_match = re.search(patJobStart,log_data)\n if job_match:\n t = float(job_match.group(1))\n jID = int(job_match.group(2))\n if jID not in jobs:\n jobs[jID] = t\n else:\n jobs[jID] = t - jobs[jID]\n task_match_start = re.search(patTaskStart,log_data)\n if task_match_start:\n #print(task_match_start.group(1),task_match_start.group(2),task_match_start.group(3))\n t = float(task_match_start.group(1))\n port = int(task_match_start.group(2))\n tID = task_match_start.group(3)\n \n if tID not in tasks:\n tasks[tID] = t\n \n #To detect count of worker 1\n if port%3999 == 1:\n count1 += 1\n worker1.append((count1,t))\n \n #To detect count of worker 2\n if port%3999 == 2:\n count2 += 1\n worker2.append((count2,t))\n \n #To detect count of worker 3\n if port%3999 == 3:\n count3 += 1\n worker3.append((count3,t))\n\n \n task_match_end = re.search(patTaskEnd,log_data)\n if task_match_end:\n #print(task_match_end.group(1),task_match_end.group(2),task_match_end.group(3))\n t = float(task_match_end.group(1))\n tID = task_match_end.group(2)\n wID = int(task_match_end.group(3))\n if tID in tasks:\n tasks[tID] = t - tasks[tID]\n \n #To decrement count of worker 1 after task completion \n if wID == 1:\n count1 -= 1\n worker1.append((count1,t))\n \n #To decrement count of worker 2 after task completion \n if wID == 2:\n count2 -= 1\n worker2.append((count2,t))\n \n #To decrement count of worker 3 after task completion \n if wID == 3:\n count3 -= 1\n worker3.append((count3,t))\n log_data = logs.readline()\n\n#Printing and generating the required statistical value and graphs\njobs = np.array(list(jobs.values()))\nprint(\"Median of job completion time = \", np.median(jobs))\nprint(\"Mean of job completion time = \", jobs.mean())\ntasks = np.array(list(tasks.values()))\nprint(\"Median of task completion time = \", np.median(tasks))\nprint(\"Mean of task completion time = \", tasks.mean())\n#print(worker1)\n#print(worker2)\n#print(worker3)\n\n#Function to plot the graph for number of task scheduled on worker vs time\ndef plotFig(worker,s):\n x,y=[],[]\n for i in worker:\n x.append(i[0])\n y.append(i[1])\n plt.plot(y,x,'--bo')\n plt.xlabel(\"Time\")\n plt.ylabel(\"Number of running tasks\")\n plt.title(s)\n plt.show()\n\nplotFig(worker1,\"Worker 1\")\nplotFig(worker2,\"Worker 2\")\nplotFig(worker3,\"Worker 3\")\n\n\n\n\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"190453836","text":"from keras.datasets import imdb\n\n# 读取网络电影数据库,25000个训练样本,25000个测试样本\n# num_words=10000指的是保留在训练集中最频繁出现的前10000个词\n# 标签中0代表消极、1代表积极\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)\n\n# 查看训练样本\nprint(train_data[0])\nprint(train_labels[0])\n\n# 每个单词的下标都低于10000\nprint(max([max(sequence) for sequence in train_data]))\nprint(min([min(sequence) for sequence in train_data]))\n\n# 解码整数序列为句子\n# word_index是单词->下标的字典\nword_index = imdb.get_word_index()\n# 反转word_index字典\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n# 我们解码评论,下标从3开始,因为0->'padding',1->'start of sequence',2->'unknown'(其实是从4开始,因为训练集中的下标是从1开始的,1、2、3是无效的)\ndecode_review = ' '.join(reverse_word_index.get(i - 3, '?') for i in train_data[0])\nprint(decode_review)\n\n# 把整数序列编码成二元矩阵\nimport numpy as np\n\n\ndef vectorize_sequences(sequences, dimension=10000):\n results = np.zeros((len(sequences), dimension))\n for i, sequence in enumerate(sequences):\n results[i, sequence] = 1.\n return results\n\n\n# 向量化的训练数据\nx_train = vectorize_sequences(train_data)\n# 向量化的测试数据\nx_test = vectorize_sequences(test_data)\n\n# 查看一个向量化后的样本\nprint(x_train[0])\n\n'''\nl = []\nfor i in train_data[0]:\n if i not in l:\n l.append(i)\nprint(len(l))\n'''\n\n# 向量化标签\ny_train = np.asarray(train_labels).astype('float32')\ny_test = np.asarray(test_labels).astype('float32')\n\nprint(train_labels.shape)\nprint(y_train.shape)\n\n# 模型定义\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\nfrom keras import losses\nfrom keras import metrics\n\nmodel = models.Sequential()\nmodel.add(layers.Dense(16, activation='relu', input_shape=(10000,)))\nmodel.add(layers.Dense(16, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nsmall_model = models.Sequential()\nsmall_model.add(layers.Dense(4, activation='relu', input_shape=(10000,)))\nsmall_model.add(layers.Dense(4, activation='relu'))\nsmall_model.add(layers.Dense(1, activation='sigmoid'))\n\nbig_model = models.Sequential()\nbig_model.add(layers.Dense(512, activation='relu', input_shape=(10000,)))\nbig_model.add(layers.Dense(512, activation='relu'))\nbig_model.add(layers.Dense(1, activation='sigmoid'))\n\n# 模型编译\n# model.compile(optimizer='rmsprop',\n# loss='binary_crossentropy',\n# metrics=['accuracy'])\n# 另一种方式\nmodel.compile(optimizer=optimizers.RMSprop(lr=0.001),\n loss=losses.binary_crossentropy,\n metrics=[metrics.binary_accuracy])\nsmall_model.compile(optimizer=optimizers.RMSprop(lr=0.001),\n loss=losses.binary_crossentropy,\n metrics=[metrics.binary_accuracy])\nbig_model.compile(optimizer=optimizers.RMSprop(lr=0.001),\n loss=losses.binary_crossentropy,\n metrics=[metrics.binary_accuracy])\n\n# 设置验证集\nx_val = x_train[:10000]\npartial_x_train = x_train[10000:]\n\ny_val = y_train[:10000]\npartial_y_train = y_train[10000:]\n\n# 训练模型\nhistory = model.fit(partial_x_train, partial_y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val))\nsmall_history = small_model.fit(partial_x_train, partial_y_train, epochs=20, batch_size=512,\n validation_data=(x_val, y_val))\nbig_history = big_model.fit(partial_x_train, partial_y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val))\n\n# 画出训练集和验证集的目标函数值\nimport matplotlib.pyplot as plt\n\nloss = history.history['val_loss']\nsmall_loss = small_history.history['val_loss']\nbig_loss = big_history.history['val_loss']\n\nepochs = range(1, len(loss) + 1)\n\nplt.plot(epochs, loss, 'ro', label='Original model')\nplt.plot(epochs, small_loss, 'bo', label='Smaller model')\n# plt.plot(epochs, big_loss, 'bo', label='Bigger model')\nplt.title('Validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Validation loss')\nplt.legend()\n\nplt.show()\n","sub_path":"chapter4/movie_reviews_compare.py","file_name":"movie_reviews_compare.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"237366086","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass BaikePipeline(object):\n def process_item(self, item, spider):\n for i in range(0,len(item['ciurl'])):\n ciurl = item['ciurl'][i]\n for j in range(0,len(item['id'])):\n id=item['id'][j]\n res = requests.get(ciurl)\n soup = BeautifulSoup(res.text, 'html.parser')\n file = open(r'E:/测试/百度百科/'+str(id)+'.txt','a')\n for news in soup.select('.cm_fb'):\n a = news.select('a')[0].text\n a_href = news.select('a')[0]['href']\n file.writelines(a)\n file.writelines('\\n') # 写入并换行\n file.writelines(a_href)\n file.writelines('\\n') # 写入并换行\n file.close # 关闭链接\n\n\n return item\n","sub_path":"爬虫项目/baike/baike/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"149587708","text":"#!/usr/bin/env python3\n\nimport io\nimport pytest\nimport html_render as hr\n\n\ndef render_result(element, cur_ind=''):\n file_out = io.StringIO()\n element.render(file_out, cur_ind)\n return file_out.getvalue()\n\n\ndef test_init():\n e = hr.Element()\n e = hr.Element(\"original text\")\n\n\ndef test_append():\n e = hr.Element(\"element to append to\")\n e.append(\"text to append\")\n file_contents = render_result(e).strip()\n print(file_contents)\n\n assert \"element to append to\" in file_contents\n assert \"text to append\" in file_contents\n assert file_contents.index(\"element to\") < file_contents.index(\"text to\")\n assert file_contents.startswith(\"\")\n assert file_contents.endswith(\"\")\n\n\ndef test_render_element2():\n e = hr.Element()\n e.append(\"append to empty e4 Element\")\n e.append(\"append again to e4\")\n\n file_contents = render_result(e).strip()\n\n\ndef test_html():\n e = hr.Html(\"text for html tag\")\n\n\ndef test_body():\n e = hr.Body(\"text for body tag\")\n\n\ndef test_p():\n e = hr.P(\"text for paragraph tag\")\n\n\ndef test_html():\n e = hr.Html(\"this is some text\")\n e.append(\"and this is some more text\")\n file_contents = render_result(e).strip()\n assert \"this is some text\" in file_contents\n assert \"and this is some more text\" in file_contents\n print(file_contents)\n assert file_contents.endswith(\"\")\n\n\ndef test_body():\n e = hr.Body(\"this is some text\")\n e.append(\"and this is some more text\")\n file_contents = render_result(e).strip()\n\n assert \"this is some text\" in file_contents\n assert \"and this is some more text\" in file_contents\n\n assert file_contents.startswith(\"\")\n assert file_contents.endswith(\"\")\n\n\ndef test_p():\n e = hr.P(\"this is some text\")\n e.append(\"and this is some more text\")\n\n file_contents = render_result(e).strip()\n\n assert \"this is some text\" in file_contents\n assert \"and this is some more text\" in file_contents\n\n assert file_contents.startswith(\"

\")\n assert file_contents.endswith(\"

\")\n\n\ndef test_sub_element_paragraph():\n page = hr.Html()\n page.append(\"some plain text.\")\n page.append(hr.P(\"A simple paragraph of text\"))\n page.append(\"Some more plain text.\")\n\n file_contents = render_result(page)\n print(file_contents) # so we can see it if the test fails\n\n # note: The previous tests should make sure that the tags are getting\n # properly rendered, so we don't need to test that here.\n assert \"some plain text\" in file_contents\n assert \"A simple paragraph of text\" in file_contents\n assert \"Some more plain text.\" in file_contents\n assert \"some plain text\" in file_contents\n # but make sure the embedded element's tags get rendered!\n assert \"

\" in file_contents\n assert \"

\" in file_contents\n\n\ndef test_sub_element_head():\n page = hr.Html()\n page.append(\"some plain text.\")\n page.append(hr.Head(\"A HEAD paragraph of text\"))\n page.append(hr.Body(\" Body text.\"))\n\n file_contents = render_result(page)\n print(file_contents) # so we can see it if the test fails\n\n # note: The previous tests should make sure that the tags are getting\n # properly rendered, so we don't need to test that here.\n assert \"some plain text\" in file_contents\n assert \"A HEAD paragraph of text\" in file_contents\n assert \"Body text.\" in file_contents\n assert file_contents.index(\"HEAD paragraph\") < file_contents.index(\"Body text.\")\n\n\ndef test_sub_element_title():\n page = hr.Html()\n page.append(\"some plain text.\")\n header = hr.Head(\"A HEAD paragraph of text\")\n page.append(header)\n header.append(hr.Title(\"A Title paragraph of text\"))\n page.append(hr.Body(\" Body text.\"))\n\n file_contents = render_result(page)\n print(file_contents) # so we can see it if the test fails\n\n # note: The previous tests should make sure that the tags are getting\n # properly rendered, so we don't need to test that here.\n assert \"some plain text\" in file_contents\n assert \"A HEAD paragraph of text\" in file_contents\n assert \"A Title paragraph\" in file_contents\n assert file_contents.index(\"HEAD paragraph\") < file_contents.index(\"A Title paragraph\")\n\n\ndef test_title():\n e = hr.Title(\"This is a Title\")\n\n file_contents = render_result(e).strip()\n\n assert \"This is a Title\" in file_contents\n print(file_contents)\n assert file_contents.startswith(\"\")\n assert file_contents.endswith(\"\")\n assert \"\\n\" not in file_contents\n\n\ndef test_one_line_tag_append():\n e = hr.OneLineTag(\"the initial content\")\n with pytest.raises(NotImplementedError):\n e.append(\"some more content\")\n\n file_contents = render_result(e).strip()\n print(file_contents)\n\n\ndef test_attributes():\n e = hr.P(\"A paragraph of text\", style=\"text-align: center\", id=\"intro\")\n\n file_contents = render_result(e).strip()\n print(file_contents) # so we can see it if the test fails\n\n # note: The previous tests should make sure that the tags are getting\n # properly rendered, so we don't need to test that here.\n # so using only a \"P\" tag is fine\n assert \"A paragraph of text\" in file_contents\n # but make sure the embedded element's tags get rendered!\n # first test the end tag is there -- same as always:\n assert file_contents.endswith(\"

\")\n\n # but now the opening tag is far more complex\n # but it starts the same:\n assert file_contents.startswith(\"\")\n\n # but now the opening tag is far more complex\n # but it starts the same:\n assert file_contents.startswith(\"

\") > file_contents.index('id=\"intro\"')\n assert file_contents[:file_contents.index(\">\")].count(\" \") == 3\n\n\ndef test_hr():\n \"\"\"a simple horizontal rule with no attributes\"\"\"\n hr1 = hr.Hr()\n file_contents = render_result(hr1).strip()\n print(file_contents)\n assert file_contents == '


'\n\n\ndef test_hr_attr():\n \"\"\"a horizontal rule with an attribute\"\"\"\n hr1 = hr.Hr(width=400)\n file_contents = render_result(hr1).strip()\n print(file_contents)\n assert file_contents == '
'\n\n\ndef test_content_in_br():\n with pytest.raises(TypeError):\n br = hr.Br(\"some content\")\n\n\ndef test_br():\n br = hr.Br()\n file_contents = render_result(br).strip()\n print(file_contents)\n assert file_contents == \"
\"\n\n\ndef test_append_content_in_br():\n with pytest.raises(TypeError):\n br = hr.Br()\n br.append(\"some content\")\n\n\ndef test_indent():\n \"\"\"\n Tests that the indentation gets passed through to the renderer\n \"\"\"\n html = hr.Html(\"some content\")\n file_contents = render_result(html, cur_ind=' ').rstrip() # remove the end newline\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n assert lines[1].startswith(\" <\")\n print(repr(lines[-1]))\n assert lines[-1].startswith(\" <\")\n\n\ndef test_indent_contents():\n \"\"\"\n The contents in a element should be indented more than the tag\n by the amount in the indent class attribute\n \"\"\"\n html = hr.Element(\"some content\")\n file_contents = render_result(html, cur_ind=' ')\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n assert lines[1].startswith(hr.Element.indent)\n\n\ndef test_multiple_indent():\n \"\"\"\n make sure multiple levels get indented fully\n \"\"\"\n body = hr.Body()\n body.append(hr.P(\"some text\"))\n html = hr.Html(body)\n\n file_contents = render_result(html)\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n for i in range(3): # this needed to be adapted to the tag\n assert lines[i + 1].startswith(i * hr.Element.indent + \"<\")\n\n assert lines[4].startswith(3 * hr.Element.indent + \"some\")\n\n\ndef test_element_indent1():\n \"\"\"\n Tests whether the Element indents at least simple content\n\n we are expecting to to look like this:\n\n \n this is some text\n <\\html>\n\n More complex indentation should be tested later.\n \"\"\"\n e = hr.Element(\"this is some text\")\n\n # This uses the render_results utility above\n file_contents = render_result(e).strip()\n\n # making sure the content got in there.\n assert \"this is some text\" in file_contents\n\n # break into lines to check indentation\n lines = file_contents.split('\\n')\n print(lines)\n # making sure the opening and closing tags are right.\n assert lines[0] == \"\"\n # this line should be indented by the amount specified\n # by the class attribute: \"indent\"\n assert lines[1].startswith(2*hr.Element.indent + \"thi\")\n# assert lines[2] == \"\"\n assert file_contents.endswith(\"\")\n","sub_path":"students/tlugosan/lesson07/test_html_render.py","file_name":"test_html_render.py","file_ext":"py","file_size_in_byte":9765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"221029299","text":"import unittest_first_tutorial, json, time, logging, psycopg2, random, requests\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"--start-maximized\")\n# create the logging instance for logging to file only\nlogger = logging.getLogger('crawlProxyLog')\n\n# create the handler for the main logger\nfile_logger = logging.FileHandler('crawlProxyLog.log', mode='w')\nNEW_FORMAT = '[%(asctime)s] - [%(levelname)s] - %(message)s'\n\nfile_logger_format = logging.Formatter(NEW_FORMAT)\n\n# tell the handler to use the above format\nfile_logger.setFormatter(file_logger_format)\n\n# finally, add the handler to the base logger\nlogger.addHandler(file_logger)\n\n# remember that by default, logging will start at 'warning' unless\n# we set it manually\nlogger.setLevel(logging.DEBUG)\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.DEBUG)\nlogging.getLogger('crawlProxyLog').addHandler(console)\n\n\ndef getConnection():\n connection = psycopg2.connect(\"host=localhost dbname=pyadmin user=postgres password=12345678\")\n return connection\n\nfor i in range(6):\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.get(f\"http://list.proxylistplus.com/Fresh-HTTP-Proxy-List-{i+1}\")\n\n #rows = driver.find_elements_by_xpath(\"//table[@id='proxylisttable']/tbody/tr\")\n rows = driver.find_elements_by_css_selector(\"#page table.bg tbody tr\")\n length_rows = len(rows)\n for i in range(length_rows):\n row = driver.find_element_by_css_selector(f\"#page table.bg tbody tr:nth-child({i+1})\")\n #ipadd = row.find_element_by_css_selector(\"td:nth-child(2)\").text\n port = row.find_element_by_css_selector(\"td:nth-child(3\").text\n country = row.find_element_by_css_selector(\"td:nth-child(5)\").text\n google = row.find_element_by_css_selector(\"td:nth-child(6)\").text\n https_bo = row.find_element_by_css_selector(\"td:nth-child(7)\").text\n last_check = row.find_element_by_css_selector(\"td:nth-child(8)\").text\n ip_port = f\"{ipadd}:{port}\"\n\n logger.info(ipadd)\n\n conn = getConnection()\n cur = conn.cursor()\n cur.execute(f\"insert into proxy_list(ip_port,ipadd,port,code,country,anonymity,google,https,last_check) values('{ip_port}','{ipadd}','{port}','{code}','{country}','{anonymity}','{google}','{https_bo}','{last_check}') on conflict (ip_port) do nothing\")\n conn.commit()\n cur.close()\n conn.close()\n\n driver.close()\n time.sleep(3)\n\n# table = driver.find_element_by_css_selector(\"tbody\")\n# driver.find_element_by_css_selector(\".proxylisttable_length\").click()\n\n# logger.info(table.text)\n# for i in range(10):\n# table = driver.find_element_by_css_selector(\"tbody\")\n# logger.info(table.text)\n# logger.info(\"#####\")\n# logger.info(f\"i : {i}\")\n# time.sleep(5)\n# driver.find_element_by_css_selector(\"#proxylisttable_next a\").click()\n\n# rows = driver.find_elements_by_tag_name(\"tbody tr\")\n\n# rows = driver.find_elements_by_xpath(\"//table[@id='proxylisttable']/tbody/tr\")\n# row = driver.find_elements_by_css_selector(\"#proxylisttable tbody tr:nth-child(20)\")\n# logger.info(len(rows))\n# logger.info(row.text)\n\n# rows = driver.find_elements_by_css_selector(\"#proxylisttable tbody tr\")\n# logger.info(len(rows))\n","sub_path":"crawl_proxy2.py","file_name":"crawl_proxy2.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"640037547","text":"from django.shortcuts import render\nfrom .models import LoginUser, Goods,GoodsType,ValidCode\nfrom Buyer.models import OrderInfo,PayOrder,PayorderAddress,UserAddress\nimport hashlib\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse\nfrom django.core.paginator import Paginator\nfrom django.core.cache import cache\nimport logging\n# 创建一个收集器\ncollect = logging.getLogger(\"django\")\n\n\n# Create your views here.\n## 密码加密\ndef setPassword(password):\n md5 = hashlib.md5()\n md5.update(password.encode())\n result = md5.hexdigest()\n\n return result\n\n\n## 登录装饰器\ndef loginValid(func):\n def inner(request, *args, **kwargs):\n ##校验登录\n cookie_email = request.COOKIES.get(\"email\")\n session_email = request.session.get(\"email\")\n if cookie_email and session_email and cookie_email == session_email:\n flag = LoginUser.objects.filter(email=cookie_email, id=request.COOKIES.get(\"userid\"), user_type=0).exists()\n if flag:\n return func(request, *args, **kwargs)\n else:\n return HttpResponseRedirect(\"/seller/login/\")\n else:\n return HttpResponseRedirect(\"/seller/login/\")\n\n return inner\n\nimport datetime\n## 注册\ndef register(request):\n ## 接收参数\n if request.method == \"POST\":\n password = request.POST.get(\"password\")\n repassword = request.POST.get(\"repassword\")\n email = request.POST.get(\"email\")\n code = request.POST.get(\"code\")\n ## 校验数据\n if email and password and password == repassword and code:\n ## 有数据\n ## 校验验证码\n validCode = ValidCode.objects.filter(code=code,email=email).order_by(\"-create_time\").first()\n if validCode:\n ## 存在\n ## 校验过期时间 2min 内有效\n now_time = datetime.datetime.now()\n create_time =validCode.create_time\n t = (now_time - create_time).total_seconds() ### 获取时间间隔 秒\n validCode.delete()\n if t < 120:\n LoginUser.objects.create(email=email, password=setPassword(password), user_type=0)\n return HttpResponseRedirect(\"/seller/login/\")\n else:\n message = \"验证码失效\"\n else:\n message = \"验证码不存在\"\n else:\n ## 参数为空\n message = \"参数为空\"\n\n return render(request, \"seller/register.html\", locals())\n\n\n## 登录\ndef login(request):\n\n if request.method == \"POST\":\n email = request.POST.get(\"email\")\n password = request.POST.get(\"password\")\n if email and password:\n user = LoginUser.objects.filter(email=email, password=setPassword(password), user_type=0).first()\n if user:\n ## 成功登录\n response = HttpResponseRedirect(\"/seller/index/\")\n response.set_cookie(\"email\", user.email)\n response.set_cookie(\"userid\", user.id)\n request.session[\"email\"] = user.email\n ## 收集日志\n ## xxxxx 成功登录\n collect.critical(\"%s 成功登录\" % user.email)\n\n return response\n else:\n message = \"账号密码不正确\"\n else:\n message = \"参数为空\"\n\n return render(request, \"seller/login.html\", locals())\nfrom django.views.decorators.cache import cache_page\nfrom CeleryTask.tasks import Test,Myprint\nfrom django.db.models import Sum,Count\nimport datetime\n## 首页\n# @cache_page(60*15) ## 15 分钟\n@loginValid\ndef index(request):\n\n ## 用户\n user_id = request.COOKIES.get(\"userid\")\n ## 获取当月\n month = datetime.datetime.now().month\n print(month)\n\n ## 当月成交总金额\n # 用户: 卖家\n # 当月\n # 订单详情表中的订单金额的和\n # 状态: 2 3 4 6\n month_sum_mount = OrderInfo.objects.filter(\n store_id=user_id,\n order__order_status__in= [2,3,4,6],\n order__order_date__month=month\n ).aggregate(Sum(\"goods_total_price\")).get(\"goods_total_price__sum\")\n\n print(month_sum_mount)\n\n # (二)当月成交订单笔数\n month_count = PayOrder.objects.filter(\n order_date__month= month,\n order_status__in = [2,3,4,6],\n orderinfo__store_id=user_id\n ).count()\n print(month_count)\n\n ## (三)销量最高的商品的名字\n # max_goods = OrderInfo.objects.filter(store_id=user_id).aggregate(Sum(\"goods_count\"))\n # print(max_goods)\n ## annotate 分组 group by\n ## annotate 前面的values 代表 分组的条件\n ## 得到的结果 queryset 值: key->分组字段 聚合字段 value\n # data = OrderInfo.objects.values(\"goods\").annotate(Sum(\"goods_count\")).order_by(\"-goods_count__sum\")\n # print(data)\n # max_goods = Goods.objects.get(id=data[0].get(\"goods\")).goods_name\n # print(max_goods)\n ## select ziduan from xxxx where xxxx=xxx group by ziduan having tiaojian\n data = OrderInfo.objects.filter(store_id=user_id,\n order__order_status__in= [2,3,4,6],\n order__order_date__month=month).values(\"goods\").annotate(Sum(\"goods_count\")).order_by(\"-goods_count__sum\").values(\"goods\")\n print(data)\n max_goods = Goods.objects.get(id=data[0].get(\"goods\")).goods_name\n print(max_goods)\n\n\n\n # (四)当月成交商品的总量\n month_total = OrderInfo.objects.filter(\n order__order_date__month=month,\n order__order_status__in=[2, 3, 4, 6],\n store_id=user_id\n ).aggregate(Sum(\"goods_count\")).get(\"goods_count__sum\")\n print(month_total)\n\n return render(request, \"seller/index.html\",locals())\n\n\n## 退出\ndef logout(request):\n ## 删除 cookie session\n response = HttpResponseRedirect(\"/seller/login/\")\n response.delete_cookie(\"email\")\n del request.session[\"email\"]\n return response\n\n\n## 商品列表页面\n@loginValid\ndef goods_list(request, status, page=1):\n ## 根据状态 查询商品\n ## status 状态的标识\n # 0 获取下架商品的数据\n # 1 获取在售商品的数据\n\n # goods = Goods.objects.all()\n # goods = Goods.objects.filter(goods_status=status).order_by(\"id\")\n goods = Goods.objects.filter(goods_status=status,goods_store_id=request.COOKIES.get(\"userid\")).order_by(\"id\")\n goods_obj = Paginator(goods, 8)\n goods_list = goods_obj.page(page)\n\n # return render(request,\"goods_list.html\",locals())\n return render(request, \"seller/goods_list.html\",locals())\n\n\n## 修改商品的状态\ndef goods_status(request, id, status):\n \"\"\"\n 修改商品的状态\n :param request:\n :param id: 商品的id\n :param status:\n up 上架\n down 下架\n :return:\n \"\"\"\n goods = Goods.objects.get(id=id)\n if status == \"up\":\n ## 商品要上架\n goods.goods_status = 1\n goods.save()\n else:\n ## 代表商品要下架\n goods.goods_status = 0\n goods.save()\n url = request.META.get(\"HTTP_REFERER\") ## 得到请求的地址\n print(url)\n # return HttpResponseRedirect(\"/loginuser/goods_list/1/1/\")\n return HttpResponseRedirect(url)\n\n\n## 个人中心\n@loginValid\ndef user_profile(request):\n ## 返回用户的信息\n ## 从session 或者 cokkie 这种获取登录的用户\n userid = request.COOKIES.get(\"userid\")\n user = LoginUser.objects.get(id=userid)\n ## 处理post请求\n if request.method == \"POST\":\n data = request.POST\n print(data)\n user.email = data.get(\"email\")\n user.phone_number = data.get(\"phone_number\")\n user.username = data.get(\"username\")\n user.age = data.get(\"age\")\n user.gender = int(data.get(\"gender\"))\n user.address = data.get(\"address\")\n if request.FILES.get(\"img\"):\n user.photo = request.FILES.get(\"img\")\n user.save()\n return render(request, \"seller/user_profile.html\", locals())\n\n##录入商品\n@loginValid\ndef goods_add(request):\n goods_type = GoodsType.objects.all()\n if request.method == \"POST\":\n user_id = request.COOKIES.get(\"userid\")\n data = request.POST\n goods = Goods()\n goods.goods_number = data.get(\"goods_number\")\n goods.goods_name = data.get(\"goods_name\")\n goods.goods_price = data.get(\"goods_price\")\n goods.goods_count = data.get(\"goods_count\")\n goods.goods_location = data.get(\"goods_location\")\n goods.goods_safe_date = data.get(\"goods_safe_date\")\n # goods.goods_picture = data.get(\"goods_number\")\n goods.goods_type_id = int(data.get(\"goods_type\"))\n goods.goods_store = LoginUser.objects.get(id=user_id)\n goods.goods_picture = request.FILES.get(\"img\")\n goods.save()\n\n return render(request,'seller/goods_add.html',locals())\n\nfrom sdk.sendDD import senddingding\nimport random\ndef get_code(request):\n result = {\"code\":10000,\"msg\":\"验证码已发送\"}\n ## 发送验证码\n ## 保存到数据库中\n email =request.GET.get(\"email\")\n code = random.randint(1000,9999)\n params = {\n \"content\": \"您的验证码为:{},打死不要告诉别人!!!\".format(code),\n \"atMobiles\": [],\n \"isAtAll\": True\n }\n try:\n ## 发送 使用钉钉\n # senddingding(params)\n # 发布一个异步任务\n from CeleryTask.tasks import senddingd\n senddingd.delay(params)\n\n # 保存\n ValidCode.objects.create(email=email,code=code)\n result = {\"code\": 10000, \"msg\": \"验证码已发送\"}\n except:\n result = {\"code\": 10001, \"msg\": \"验证码发送失败\"}\n\n\n return JsonResponse(result)\n\n\n\ndef middlewaretest(request,version):\n\n print(\"view\")\n # return HttpResponse(\"middlewaretest1\")\n # return render(request,\"seller/base.html\")\n def test01():\n return HttpResponse(\"test01\")\n resp = HttpResponse(\"middlewaretest1\")\n resp.render = test01\n\n return resp\n\n\n@loginValid\ndef order(request):\n ### 获取卖家信息\n user_id = request.COOKIES.get(\"userid\")\n status = int(request.GET.get(\"status\"))\n ## 获取卖家的订单详情 指定的状态\n # order_info = OrderInfo.objects.filter(store_id=user_id).all()\n # order_info_list = []\n # for one in order_info:\n # if one.order.order_status == status:\n # order_info_list.append(one)\n\n # print(order_info) #### 该用户的所有订单详情\n order_info = OrderInfo.objects.filter(store_id=user_id,order__order_status=status).all()\n\n return render(request,\"seller/order.html\",locals())\n\nfrom sdk.send_email import send_email\ndef txzf(request):\n result = {\"code\":10000,\"msg\":\"提醒成功\"}\n ## 获取到订单\n payorder_id = request.GET.get(\"payorder_id\")\n ## 发送邮件\n params = {\n \"subject\":\"天天生鲜提醒支付\",\n \"content\":\"天天生鲜提醒您去支付\",\n \"recver\":\"str_wjp@126.com\"\n }\n send_email(params)\n\n\n\n return JsonResponse(result)\n","sub_path":"Qshop/Seller/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"205656924","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\nimport re\nfrom zope.interface import implements\nfrom twisted.plugin import IPlugin\nfrom twisted.python import log\nfrom desertbot.moduleinterface import IModule, Module, ModuleType, AccessLevel\nfrom desertbot.message import IRCMessage\nfrom desertbot.response import IRCResponse, ResponseType\n\n\nclass Ignore(Module):\n implements(IPlugin, IModule)\n\n name = u\"ignore\"\n triggers = [u\"ignore\", u\"unignore\"]\n moduleType = ModuleType.PASSIVE\n accessLevel = AccessLevel.ADMINS\n modulePriority = -40 # Very high priority, should be lower than admin and logging\n\n def getHelp(self, message):\n \"\"\"\n @type message: IRCMessage\n \"\"\"\n helpDict = {\n u\"ignore\": u\"ignore [module] - prevents the specified user \"\n u\"from using the specified module(s).\",\n u\"unignore\": u\"unignore - stops ignoring the specified user.\"\n }\n return helpDict[message.parameterList[0]]\n\n def onTrigger(self, message):\n \"\"\"\n @type message: IRCMessage\n \"\"\"\n if message.command == u\"ignore\":\n if len(message.parameterList) == 0:\n return IRCResponse(ResponseType.PRIVMSG, u\"Ignore who?\", message.user, message.replyTo)\n else:\n message.bot.dataStore[\"ignores\"][message.parameterList[0]] = message.parameterList[1:]\n return IRCResponse(ResponseType.PRIVMSG,\n u\"Now ignoring: \\\"{}\\\".\".format(message.parameterList[0]),\n message.user, message.replyTo)\n elif message.command == u\"unignore\":\n if len(message.parameterList) == 0:\n return IRCResponse(ResponseType.PRIVMSG, u\"Unignore who?\", message.user, message.replyTo)\n elif message.parameterList[0] not in message.bot.dataStore[\"ignores\"]:\n return IRCResponse(ResponseType.PRIVMSG, u\"I am not ignoring \\\"{}\\\"!\".format(message.parameterList[0]),\n message.user, message.replyTo)\n else:\n del message.bot.dataStore[\"ignores\"][(message.parameterList[0])]\n return IRCResponse(ResponseType.PRIVMSG, u\"No longer ignoring \\\"{}\\\".\".format(message.parameterList[0]),\n message.user, message.replyTo)\n else:\n for userRegex in message.bot.dataStore[\"ignores\"].keys():\n if re.match(userRegex, message.user.getUserString()):\n if message.bot.dataStore[\"ignores\"][userRegex] == u\"all\":\n message.clear()\n else:\n for moduleName in message.bot.dataStore[\"ignores\"][userRegex]:\n module = message.bot.moduleHandler.getModule(moduleName)\n if module is not None:\n if message.bot.moduleHandler._shouldTrigger(module, message):\n message.clear()\n\n def onModuleLoaded(self, bot):\n configFileName = bot.factory.config.configFileName[:-5]\n if os.path.exists(os.path.join(\"data\", configFileName, \"ignores.json\")):\n with open(os.path.join(\"data\", configFileName, \"ignores.json\")) as jsonFile:\n ignores = json.load(jsonFile)\n if len(ignores) != 0:\n bot.dataStore[\"ignores\"] = ignores\n log.msg(\"Loaded {} ignores from ignores file for config \\\"{}\\\".\".format(len(ignores),\n configFileName))\n else:\n log.msg(\"Ignores file for config \\\"{}\\\" is empty.\".format(configFileName))\n bot.dataStore[\"ignores\"] = {}\n else:\n log.err(\"Ignores file not found for config \\\"{}\\\"!\".format(configFileName))\n bot.dataStore[\"ignores\"] = {}\n\n def onModuleUnloaded(self, bot):\n configFileName = bot.factory.config.configFileName[:-5]\n if not os.path.exists(os.path.join(\"data\", configFileName)):\n os.makedirs(os.path.join(\"data\", configFileName))\n with open(os.path.join(\"data\", configFileName, \"admins.json\"), \"w\") as jsonFile:\n json.dump(bot.dataStore[\"ignores\"], jsonFile)\n\n\nignore = Ignore()","sub_path":"desertbot/modules/passive/ignore.py","file_name":"ignore.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"527777514","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom time import sleep\nimport math\nimport pyperclip\n\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\n\nbrowser = webdriver.Chrome()\nbrowser.get('http://suninjuly.github.io/alert_accept.html')\n\nbrowser.find_element(By.TAG_NAME, 'button').click()\n\nbrowser.switch_to.alert.accept()\n\nsleep(1)\n\nnum = calc(browser.find_element(By.ID, 'input_value').text)\n\nanswer = browser.find_element(By.ID, 'answer')\nanswer.send_keys(num)\n\nbrowser.find_element(By.CLASS_NAME, 'btn-primary').click()\n\nalert = browser.switch_to.alert\nresult = alert.text.split(': ')[1]\npyperclip.copy(result)\n\nbrowser.quit()\n","sub_path":"week_2/alerts_1.py","file_name":"alerts_1.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"615746070","text":"\"\"\"basically finish babyneo.de babymilk product crawl, however euro singal has not re matched\"\"\"\n# -*- coding: utf-8 -*-\n__author__ = 'ncp'\nimport scrapy\nfrom Proj_Haitao.items import ProjHaitaoItem\nfrom scrapy.http import Request\nfrom scrapy.spider import Spider # deprecate using BaseSpider\nimport time\n\n\nclass Babyneo(Spider):\n name = 'babyneo'\n allow_domains = ['babyneo.de']\n start_urls = ['http://www.babyneo.de/Infant-nutrition--Aptamil-Nestle-39']\n\n def parse(self, response):\n for url in response.xpath(\"//a[@class='link_name']/@href\").extract():\n yield Request(url, self.parse_url)\n #yield scrapy.Request(response.xpath(\"//a[@class='link_name']/@href\").extract(), callback=self.parse_url)\n\n def parse_url(self, response):\n item = ProjHaitaoItem()\n for goods in response.xpath(\"//div[@class='list_entry_width' and @id]\"):\n alter_namepath = \"descendant::h2[@class='list_entry_name']//span[@class='truncated_full_string']/text()\"\n alter_pricepath = \"descendant::div[@class='list_entry_price reduced'][1]/text()\"\n if goods.xpath(alter_namepath).extract():\n item['name'] = goods.xpath(alter_namepath).extract()\n item['weight'] = goods.xpath(alter_namepath).re(r'\\b\\d+\\w+')\n else:\n item['name'] = goods.xpath(\"descendant::h2[@class='list_entry_name']/a/text()\").extract()\n item['weight'] = goods.xpath(\"descendant::h2[@class='list_entry_name']/a/text()\")[0].re(r'\\b\\d+\\w+')\n if goods.xpath(alter_pricepath):\n item['price'] = goods.xpath(alter_pricepath).re(r'\\b\\d+.\\d+.')[0]\n else:\n item['price'] = goods.xpath(\"descendant::div[@class='list_entry_price']/text()\")[0].extract()\n item['link'] = goods.xpath(\"descendant::h2[@class='list_entry_name']/a/@href\")[0].extract()\n item['instock'] = 'yes'\n item['data'] = time.asctime()\n yield item","sub_path":"Proj_Haitao/spiders/babymilk.py","file_name":"babymilk.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"589697232","text":"def _init_():\r\n print('Please select one of the following operations:')\r\n print('1) Add')\r\n print('2) Subtract')\r\n print('3) Multiply')\r\n print('4) Divide')\r\n print('--------------------------------------------------')\r\n choice = int(input('Input the number of your chosen operation: '))\r\n a = int(input('Now please type in your first number: '))\r\n b = int(input('Now type in the second number: '))\r\n\r\n if choice == 1:\r\n print(add(a, b))\r\n elif choice == 2:\r\n print(subtract(a, b))\r\n elif choice == 3:\r\n print(multiply(a, b))\r\n elif choice == 4:\r\n print(divide(a, b))\r\n else:\r\n print('--------------------------------------------------')\r\n print('You typed in the wrong number.\\nTry again')\r\n print('--------------------------------------------------')\r\n return _init_()\r\n\r\n\r\ndef add(a, b):\r\n c = a + b\r\n return c\r\n\r\ndef subtract(a, b):\r\n c = a - b\r\n return c\r\n\r\ndef multiply(a, b):\r\n c = a * b\r\n return c\r\n\r\ndef divide(a, b):\r\n c = a / b\r\n return c\r\n\r\n\r\n_init_()","sub_path":"Basic-Math/Four-basic-math-operations.py","file_name":"Four-basic-math-operations.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"236081651","text":"import pandas as pd\nimport os\nfrom datetime import date\nimport math\nfrom pathlib import Path\nimport tensorflow as tf\n\nDATA_PATH = Path(os.getcwd()).parent\nTEST_FILE = r'data/raw/ccf_offline_stage1_test_revised.csv'\nTRAIN_OFF_FILE = r'data/raw/ccf_offline_stage1_train.csv'\nTRAIN_ON_FILE = r'data/raw/ccf_online_stage1_train.csv'\nSAMPLE_FILE = r\"data/raw/sample_submission.csv\"\n\n# print(os.path.exists(DATA_PATH))\n# print(os.path.exists(DATA_PATH/TRAIN_OFF_FILE))\n# print(os.path.exists(DATA_PATH/TRAIN_ON_FILE))\n# print(os.path.exists(DATA_PATH/SAMPLE_FILE))\n\n\nclass Dataset:\n \"\"\"\n read in the raw data; convert each col to valid data type; clean the useless samples\n is able to split pos/neg samples and train/test sets\n \"\"\"\n def __init__(self, file_path, ratio=0.9):\n self.df = pd.read_csv(file_path)\n self._convert_datetime()\n self._clean_nan_value( ['Distance','Discount_rate'], [-1, '1'])\n self._clean_non_numeric_value(['Discount_rate'])\n self.pos_samples = self._get_positive_sample()\n self.pos_nums = self.pos_samples.shape[0]\n self.neg_samples = self._get_negative_sample()\n self.neg_nums = self.neg_samples.shape[0]\n self.df = pd.concat([self.neg_samples, self.pos_samples])\n self.df = self.df.sample(frac=1).reset_index(drop=True)\n self.df = self.df.astype({\"Coupon_id\": int})\n self._transform_time_feature()\n len = int(self.df.shape[0] * ratio)\n self.train_df = self.df[:len]\n self.test_df = self.df[len:]\n # print(self.df.head())\n # print(self.df.info())\n\n def _int2datetime(self, x):\n if math.isnan(x):\n return None\n y = int(x / 10000)\n m = int(x % 10000 / 100)\n d = int(x % 100)\n return date(y, m, d)\n\n def _convert_datetime(self):\n if 'Date_received' in self.df.columns:\n self.df['Date_received'] = pd.to_datetime(self.df['Date_received'].apply(self._int2datetime))\n if 'Date' in self.df.columns:\n self.df['Date'] = pd.to_datetime(self.df['Date'].apply(self._int2datetime))\n\n def _convert_discount(self, str):\n if ':' in str:\n discount = str.split(sep=':')\n return (1 - float(discount[1]) / float(discount[0]))\n else:\n return float(str)\n\n def _clean_non_numeric_value(self, columns):\n for col in columns:\n self.df[col] = self.df[col].apply(self._convert_discount)\n\n def _clean_nan_value(self, columns, new_vals):\n # print(columns)\n for col,val in zip(columns, new_vals):\n self.df.loc[self.df[col].isnull(), col] = val\n\n def _get_negative_sample(self):\n neg_df = self.df.loc[~self.df['Coupon_id'].isnull() & self.df['Date'].isnull()]\n pos_df = self.df.loc[~self.df['Coupon_id'].isnull() & ~self.df['Date'].isnull()]\n timeout_df = pos_df.loc[(pos_df['Date'] - pos_df['Date_received']).apply(lambda x: x.days > 15)]\n neg_df = pd.concat([neg_df, timeout_df])\n neg_df['label'] = [0]*neg_df.shape[0]\n return neg_df\n\n # only 7% are positive samples\n def _get_positive_sample(self):\n pos_df = self.df.loc[~self.df['Coupon_id'].isnull() & ~self.df['Date'].isnull()]\n pos_df = pos_df.loc[(pos_df['Date'] - pos_df['Date_received']).apply(lambda x: x.days <= 15)]\n pos_df['label'] = [1]*pos_df.shape[0]\n return pos_df\n\n def _transform_time_feature(self):\n self.df['DoW'] = self.df['Date_received'].dt.dayofweek\n self.df['DoM'] = self.df['Date_received'].dt.month\n self.df.drop(['Date', 'Date_received'], axis=1, inplace=True)\n\n def generate_feature_cols(self):\n cat_cols = ['User_id', 'Merchant_id', 'Coupon_id', 'DoW', 'DoM']\n num_cols = ['Discount_rate', 'Distance']\n one_hot_cols = [tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_hash_bucket\n ('Merchant_id', hash_bucket_size=8900,\n dtype=tf.dtypes.int64)) ]\n one_hot_cols += [tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_hash_bucket\n ('Coupon_id', hash_bucket_size=15000,\n dtype=tf.dtypes.int64))]\n one_hot_cols += [tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_hash_bucket\n ('User_id', hash_bucket_size=7400000,\n dtype=tf.dtypes.int64))]\n # one_hot_cols += [tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_hash_bucket\n # (col, hash_bucket_size=7400000,\n # dtype=tf.dtypes.int64)) for col in ['DoW', 'DoM']]\n feature_cols = [tf.feature_column.numeric_column(k) for k in num_cols]\n return feature_cols + one_hot_cols\n\n\nif __name__ == '__main__':\n data = Dataset(DATA_PATH/TRAIN_OFF_FILE)\n # print(data.neg_nums)\n # print(data.pos_nums)\n","sub_path":"src/pre_process.py","file_name":"pre_process.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"14972700","text":"from django import forms\nfrom .models import Employee, Designation,Unitname\n\nclass EmployeeForm(forms.ModelForm):\n class Meta:\n model = Employee\n fields = ('name', 'birthdate','office', 'unitname','department', 'designation')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['designation'].queryset = Designation.objects.none()\n self.fields['unitname'].queryset = Unitname.objects.none()\n\n\n if 'department' and 'office' in self.data:\n try:\n department_id = int(self.data.get('department'))\n office_id = int(self.data.get('office'))\n self.fields['designation'].queryset = Designation.objects.filter(department_id=department_id).order_by('name')\n self.fields['unitname'].queryset = Unitname.objects.filter(office_id=office_id).order_by('name')\n except (ValueError, TypeError):\n pass # invalid input from the client; ignore and fallback to empty Designation queryset\n elif self.instance.pk:\n self.fields['designation'].queryset = self.instance.department.designation_set.order_by('name')\n self.fields['unitname'].queryset = self.instance.office.unitname_set.order_by('name')\n","sub_path":"src/employee/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34223785","text":"# -*- coding: utf-8 -*-\n\nimport subprocess\nimport os\nimport sys\nimport json\n\nHOME_DIR = os.environ.get('HOME')\n\nAWS_DEFAULT_REGION = os.environ.get('AWS_DEFAULT_REGION')\nAWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\n\nAWS_ECS_COMMAND = 'AWS_DEFAULT_REGION=%s AWS_ACCESS_KEY_ID=%s AWS_SECRET_ACCESS_KEY=%s %s/.local/lib/aws/bin/aws ecs '\nAWS_ECS_COMMAND = AWS_ECS_COMMAND % (AWS_DEFAULT_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, HOME_DIR)\n\nclass AwsCliException(Exception): pass\n\n#aws ecs list-container-instances --cluster CLUSTER\ndef list_container_instances(cluster):\n data = _ecs_command('list-container-instances --cluster %s' % (cluster))\n return [_parse_arn(arn).get('uuid') for arn in data.get('containerInstanceArns')]\n\n#aws ecs describe-container-instances --cluster CLUSTER --container-instances CONTAINER_INSTANCE_UUID\ndef describe_container_instances(cluster, container_instance_uuid):\n return _ecs_command('describe-container-instances --cluster %s --container-instances %s' % (cluster, container_instance_uuid))\n\n#aws ecs list-task-definitions\ndef list_task_definitions():\n data = _ecs_command('list-task-definitions')\n return [_parse_arn(arn).get('uuid') for arn in data.get('taskDefinitionArns')]\n\n#aws ecs list-tasks --cluster CLUSTER\ndef list_tasks(cluster):\n data = _ecs_command('list-tasks --cluster %s' % (cluster))\n return [_parse_arn(arn).get('uuid') for arn in data.get('taskArns')]\n\n#aws ecs describe-tasks --cluster CLUSTER --task TASK_UUID\ndef describe_task(cluster, task_uuid):\n return _ecs_command('describe-tasks --cluster %s --task %s' % (cluster, task_uuid))\n\ndef _ecs_command(command):\n try:\n response = subprocess.check_output(\n AWS_ECS_COMMAND + command, \n shell=True, \n stderr=subprocess.STDOUT\n )\n return json.loads(response)\n except subprocess.CalledProcessError as e:\n raise AwsCliException(e.output)\n except OSError as e:\n raise AwsCliException(e)\n\ndef _parse_arn(arn):\n # arn:aws:SERVICE:REGION:ACCOUNT:TYPE/UUID\n sections = arn.split(':')\n t, uuid = sections[5].split('/')\n\n # only fetch meaningful parts of ARN\n data = {}\n data['service'] = sections[2]\n data['region'] = sections[3]\n data['account'] = sections[4]\n data['type'] = t\n data['uuid'] = uuid\n\n if len(sections) == 7:\n data['extra_id'] = sections[6]\n \n return data\n","sub_path":"ecswww/ecs.py","file_name":"ecs.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"179815899","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom get_data import get_depth\n\ndataset = get_depth()\n\nclass EncoderDecoder(nn.Module):\n\n def __init__(self):\n super(EncoderDecoder, self).__init__()\n self.cnn1 = nn.Conv2d(3,10)\n self.cnn2 = nn.Conv2d(10,25)\n self.cnn3 = nn.Conv2d(25,50)\n\n self.decnn1 = nn.ConvTranspose2d(50,25)\n self.decnn2 = nn.ConvTranspose2d(25,10)\n self.decnn3 = nn.ConvTranspose2d(10,1)\n\n def forward(self,x):\n x = F.relu(self.cnn1(x))\n x = F.relu(self.cnn2(x))\n x = F.relu(self.cnn3(x))\n\n x = F.relu(self.decnn1(x))\n x = F.relu(self.decnn2(x))\n x = F.relu(self.decnn3(x))\n\nmodel = EncoderDecoder()\nlearning_rate = 1e-3\nloss_fn = nn.MSELoss()\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\nmax_epochs = 1\n\nfor epoch in range(max_epoch):\n for data in dataset:\n x,y = data\n x = Variable(torch.Tensor(x), requires_grad=True)\n y = Variable(torch.Tensor(y), requires_grad=False)\n pred = model(x)\n loss = loss_fn(pred,y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n break","sub_path":"legacy/encoder-decoder-graph.py","file_name":"encoder-decoder-graph.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"156476805","text":"\"\"\"Problem statement: - we have a bag of capacity 'c' and we have different objects and each object have certain weight\r\n and profit attached with them. We have to fill the bag with the objects such that capacity of b-\r\n ag do not get exceed and also profit should be maximize.\"\"\"\r\n\r\n\r\n# Function to perform 0/1 knapsack problem using recursion\r\ndef recursive_knapsack(capacity, weights, profit, idx=0):\r\n if idx == len(weights):\r\n return 0\r\n if weights[idx] <= capacity:\r\n # Case in which we are including our object in the bag\r\n option1= profit[idx] + recursive_knapsack(capacity-weights[idx], weights, profit, idx+1)\r\n\r\n # Case in which we are not adding the objects in the bag\r\n option2= recursive_knapsack(capacity, weights, profit, idx+1)\r\n return max(option1, option2)\r\n else:\r\n return recursive_knapsack(capacity, weights, profit, idx+1)\r\n\r\n\r\n# Function to perform 0/1 knapsack problem using memoization\r\ndef memo_knapsack(capacity, weights, profit):\r\n memo= {}\r\n\r\n def knapsack(capacity, idx=0):\r\n if idx == len(weights):\r\n return 0\r\n key= (capacity, weights[idx])\r\n if key in memo:\r\n return memo[key]\r\n if weights[idx] <= capacity:\r\n option1= profit[idx] + knapsack(capacity-weights[idx], idx+1)\r\n option2= knapsack(capacity, idx+1)\r\n memo[key] = max(option1, option2)\r\n else:\r\n memo[key]= knapsack(capacity, idx+1)\r\n\r\n return memo[key]\r\n return knapsack(capacity, 0)\r\n\r\n\r\n# Function to apply the same but in more effective manner using dynamic programming\r\ndef dynamic_knapsack(capacity, weights, profit):\r\n result= [[0 for _ in range(capacity+1)] for _ in range(len(weights)+1)]\r\n\r\n for idx in range(len(weights)):\r\n for c in range(capacity+1):\r\n if weights[idx] <= c:\r\n option1= profit[idx]+ result[idx][c-weights[idx]]\r\n option2= result[idx][c]\r\n result[idx+1][c]= max(option1, option2)\r\n else:\r\n result[idx+1][c]= result[idx][c]\r\n\r\n return result[-1][-1]\r\n\r\ncapacity= 15\r\nweights= [4, 5, 1, 3, 2, 5]\r\nprofits= [2, 3, 1, 5, 4, 7]\r\n\r\nprint('Total profit you get for these set of objects(using memo): ', memo_knapsack(capacity, weights, profits))\r\nprint('Total profit you get for these set of objects(using recursive): ', recursive_knapsack(capacity, weights, profits, 0))\r\nprint('Total profit you get for these set of objects(using dynamic): ', dynamic_knapsack(capacity, weights, profits))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Algorithms/0-1 knapsack.py","file_name":"0-1 knapsack.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"48284830","text":"import random\nimport plotly.express as px \nimport csv\n\ndice_result = []\ncount = []\n\nfor i in range(0,101):\n dice1 = random.randint(1,6)\n dice2 = random.randint(1,6)\n dice_result.append(dice1+dice2)\n count.append(i)\n \nfig = px.bar(x = dice_result, y = count)\nfig.show()\n\nprint(count)\nprint(dice1,dice2)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"154066288","text":"def check(map, arr, idx, val, n):\r\n\r\n if map[idx][idx] == 1:\r\n return False\r\n\r\n for i in range(n):\r\n if map[idx][i] == 1:\r\n if arr[i] == val:\r\n return False\r\n\r\n return True\r\n\r\ndef func(map, arr, idx, m, n):\r\n\r\n if idx == n:\r\n return True\r\n\r\n else:\r\n for val in range(1, m+1):\r\n if check(map, arr, idx, val, n):\r\n arr[idx] = val\r\n if func(map, arr, idx+1, m, n):\r\n return True\r\n else:\r\n arr[idx] = [0]\r\n\r\n return False\r\n\r\ndef main():\r\n m = int(input())\r\n n = int(input())\r\n Map = []\r\n for i in range(n):\r\n Map.append(list(map(int, input().split())))\r\n\r\n arr = [0 for i in range(n)]\r\n\r\n if not func(Map, arr, 0, m, n):\r\n print(\"ohh no\")\r\n\r\n else:\r\n print(arr)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"back_tracking/graph_color.py","file_name":"graph_color.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"633174029","text":"import httplib2\nimport sys\nfrom xml.dom import minidom\n\n\"\"\"\nThe Redmine API is documented at:\nhttp://www.redmine.org/wiki/1/Rest_api\n\nThe API usage examples make use of ActiveResource libraries and the API\nis clearly geared towards making use of an underlying knowledge base of\nActiveResource, so the description is a bit light on details. As such,\nonly the most vital portions of this library have tests associated with\nthem, specifically issue creation. There is an active bug around the\nuse of custom fields in issue management. Adding issue custom fields\nright now will do nothing.\n\nA Redmine URL, user, password, and user API key are needed to initialize\nthe RedmineClient. To run this set of doctests, you will have to supply\nthem below as well.\n\"\"\"\n\nclass RedmineResource:\n \"\"\"\n Base class, do not use directly.\n \"\"\"\n def __init__(self, document = None, node = None, root = None):\n self.document = document\n if self.document is None and node is not None:\n impl = minidom.getDOMImplementation()\n self.resource = impl.createDocument(None, None, None)\n self.resource.appendChild(node)\n elif self.document is None:\n impl = minidom.getDOMImplementation()\n self.resource = impl.createDocument(None, root, None)\n\n def add_element(self, element, id = None, name = None, value = None, is_custom = False):\n element = self.resource.createElement(element)\n if id is not None:\n if type(id).__name__ == \"int\":\n element.setAttribute('id', '%d' % id)\n else:\n element.setAttribute('id', id)\n if name is not None:\n element.setAttribute('name', name)\n if value is not None:\n if type(value).__name__ == \"int\":\n val = self.resource.createTextNode('%d' % value)\n element.appendChild(val)\n else:\n val = self.resource.createTextNode(value)\n element.appendChild(val)\n if is_custom:\n customEl = self.resource.getElementsByTagName('custom_fields')\n if len(customEl) > 0:\n customEl[0].appendChild(element)\n else:\n customEl = self.resource.createElement('custom_fields')\n customEl.appendChild(element)\n self.resource.documentElement.appendChild(customEl)\n else:\n self.resource.documentElement.appendChild(element)\n\n def get_element(self, element_name):\n els = self.resource.getElementsByTagName(element_name)\n if len(els) > 0:\n return els[0].firstChild.data\n else:\n return None\n\n def parse(self, xml):\n self.resource = minidom.parseString(xml)\n return self.resource\n\n def to_xml(self):\n return self.resource.toxml()\n\nclass RedmineProject(RedmineResource):\n def __init__(self, project = None):\n \"\"\"\n Redmine project representation, tied to the API XML form.\n\n >>> r = RedmineProject(None)\n >>> r.to_xml()\n ''\n \"\"\"\n RedmineResource.__init__(self, None, project, 'project')\n\n\nclass RedmineTracker(RedmineResource):\n def __init__(self, tracker = None):\n \"\"\"\n Redmine tracker representation, tied to the API XML form.\n\n >>> r = RedmineTracker(None)\n >>> r.to_xml()\n ''\n \"\"\"\n RedmineResource.__init__(self, None, tracker, 'tracker')\n\n\nclass RedmineIssue(RedmineResource):\n def __init__(self, issue = None):\n \"\"\"\n Redmine issue representation, tied to the API XML form.\n\n >>> r = RedmineIssue(None)\n >>> r.to_xml()\n ''\n >>> r.add_element('project', '1')\n >>> r.to_xml()\n ''\n \"\"\"\n RedmineResource.__init__(self, None, issue, 'issue')\n self.project_id = None\n\n def set_project(self, project_id):\n self.project_id = project_id\n self.add_element('project_id', value = project_id)\n\nclass RedmineClient:\n def __init__(self, base, user, password, key):\n \"\"\"\n Talks to the API.\n \"\"\"\n if base[-1] == '/':\n base = base[0:-1]\n self.base = base\n self.http = httplib2.Http()\n self.http.add_credentials(user, password)\n self.key = key\n\n # ---- Trackers ----\n\n def get_trackers(self):\n \"\"\"\n Get a [list] of all trackers\n GET $base/trackers.xml\n http://www.redmine.org/projects/redmine/wiki/Rest_Trackers\n Warning Tracker are considerated as alpha version and available since Redmine 1.3\n\n >>> r = RedmineClient('http://redmine.example.com', 'test_username', 'test_password', 'test_key')\n >>> p = r.get_trackers()\n >>> len(p) > 0\n True\n \"\"\"\n url = \"%s/trackers.xml?key=%s\" % (self.base, self.key)\n response, content = self.http.request(url, \"GET\")\n trackers = []\n trackersRoot = minidom.parseString(content)\n trackersList = trackersRoot.documentElement.getElementsByTagName('tracker')\n for tracker in trackersList:\n trackers.append(RedmineTracker(tracker))\n return trackers\n\n # ---- Projects ----\n\n def get_projects(self):\n \"\"\"\n Get a [list] of all projects\n GET $base/projects.xml\n\n >>> r = RedmineClient('http://redmine.example.com', 'test_username', 'test_password', 'test_key')\n >>> p = r.get_projects()\n >>> len(p) > 0\n True\n \"\"\"\n url = \"%s/projects.xml?key=%s\" % (self.base, self.key)\n response, content = self.http.request(url, \"GET\")\n projects = []\n projectsRoot = minidom.parseString(content)\n projectsList = projectsRoot.documentElement.getElementsByTagName('project')\n for project in projectsList:\n projects.append(RedmineProject(project))\n return projects\n\n def get_project(self, id):\n \"\"\"\n Get one project based on the numerical ID or short identifier\n GET $base/projects/$id.xml\n \"\"\"\n if type(id).__name__ == \"int\":\n url = \"%s/projects/%d.xml?key=%s\" % (self.base, id, self.key)\n else:\n url = \"%s/projects/%s.xml?key=%s\" % (self.base, id, self.key)\n response, content = self.http.request(url, \"GET\")\n project = RedmineProject(None)\n project.parse(content)\n return project\n\n def create_project(self, project):\n \"\"\"\n Create a project\n POST $base/projects.xml\n \"\"\"\n url = \"%s/projects.xml?key=\" % (self.base, self.key)\n response, content = self.http.request(url, \"POST\", project.to_xml())\n if response.status == 201:\n new_project = RedmineProject(None)\n new_project.parse(content)\n return new_project.get_element('id')\n else:\n # It would be better to have details about failure modes here instead of a global None\n return None \n\n def update_project(self, id, project):\n \"\"\"\n Update a project\n PUT $base/projects/$id.xml\n \"\"\"\n if type(id).__name__ == \"int\":\n url = \"%s/projects/%d.xml?key=%s\" % (self.base, id, self.key)\n else:\n url = \"%s/projects/%s.xml?key=%s\" % (self.base, id, self.key)\n response, content = self.http.request(url, \"PUT\", project.to_xml())\n if response.status == 200:\n return True\n else:\n return False\n\n def delete_project(self, id):\n \"\"\"\n Delete a project\n DELETE $base/projects/$id.xml\n \"\"\"\n if type(id).__name__ == \"int\":\n url = \"%s/projects/%d.xml?key=%s\" % (self.base, id, self.key)\n else:\n url = \"%s/projects/%s.xml?key=%s\" % (self.base, id, self.key)\n response, content = self.http.request(url, \"DELETE\")\n if response.status == 200:\n return True\n else:\n return False\n\n # ---- Issues ----\n\n def get_issues(self, project_id = None, tracker = None, status = None, page = 0):\n \"\"\"\n Get paginated list of all issues\n GET $base/issues.xml?page=$page&project_id=$project&tracker_id=$tracker&status_id=$status\n\n Creating an issue should probably be part of this test...\n >>> r = RedmineClient('http://redmine.example.com', 'test_username', 'test_password', 'test_key')\n >>> l = r.get_issues(1)\n >>> len(l) > 0\n True\n \"\"\"\n url = \"%s/issues.xml?key=%s&page=%d\" % (self.base, self.key, page)\n if type(project_id).__name__ == \"int\":\n url = \"%s&project_id=%d\" % (url, project_id)\n else:\n url = \"%s&project_id=%s\" % (url, project_id)\n if tracker is not None:\n url = \"%s&tracker_id=%d\" % (url, tracker)\n if status is not None:\n if type(status).__name__ == \"int\":\n url = \"%s&status_id=%d\" % (url, status)\n else:\n url = \"%s&status_id=%s\" % (url, status)\n response, content = self.http.request(url, \"GET\")\n issues = []\n issuesRoot = minidom.parseString(content)\n issuesList = issuesRoot.documentElement.getElementsByTagName('issue')\n for issue in issuesList:\n issues.append(RedmineIssue(issue))\n return issues\n\n def get_issue(self, id):\n \"\"\"\n Get one issue\n GET $base/issues/$id.xml\n\n >>> r = RedmineClient('http://redmine.example.com', 'test_username', 'test_password', 'test_key')\n >>> i = r.get_issue(12)\n >>> i.get_element('id').firstChild.data\n u'12'\n \"\"\"\n url = \"%s/issues/%d.xml?key=%s\" % (self.base, id, self.key)\n response, content = self.http.request(url, \"GET\")\n issue = RedmineIssue(None)\n issue.parse(content)\n return issue\n\n def create_issue(self, issue):\n \"\"\"\n Create an issue\n POST $base/issues.xml?project_id=$project_id\n\n >>> r = RedmineClient('http://redmine.example.com', 'test_username', 'test_password', 'test_key')\n >>> i = RedmineIssue(None)\n >>> i.set_project('1')\n >>> i.add_element('tracker', '3')\n >>> i.add_element('status', '1')\n >>> i.add_element('priority', '4')\n >>> i.add_element('author', '3')\n >>> i.add_element('subject', value = 'Test')\n >>> i.add_element('description', value = 'Test')\n >>> i.add_element('custom_field', id = '1', value = 'http://somewhere.com/file.xls', is_custom = True)\n >>> i.to_xml()\n '1TestTesthttp://somewhere.com/file.xls'\n >>> r.create_issue(i) is not None\n True\n \"\"\"\n url = \"%s/issues.xml?key=%s&project_id=%s\" % (self.base, self.key, issue.project_id)\n response, content = self.http.request(url, \"POST\", issue.to_xml(), headers={'Content-type': 'text/xml'})\n if response.status == 201:\n new_issue = RedmineIssue(None)\n new_issue.parse(content)\n return new_issue.get_element('id')\n else:\n # It would be better to have details about failure modes here instead of a global None\n return None \n\n def update_issue(self, id, issue):\n \"\"\"\n Update an issue\n PUT $base/issues/$id.xml\n \"\"\"\n url = \"%s/issues/%d.xml?key=%s\" % (self.base, id, self.key)\n response, content = self.http.request(url, \"PUT\", issue.to_xml())\n if response.status == 200:\n return True\n else:\n return False\n\n def delete_issue(self, id):\n \"\"\"\n Delete an issue\n DELETE $base/issues/$id.xml\n \"\"\"\n url = \"%s/issues/%d.xml?key=%s\" % (self.base, id, self.key)\n response, content = self.http.request(url, \"DELETE\")\n if response.status == 200:\n return True\n else:\n return False\n","sub_path":"django_redmine/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"528281755","text":"import pandas as pd\nimport numpy as np\nimport pickle\n\nfrom sklearn.linear_model import RidgeCV\n\nfrom models.modelNN import*\nfrom models.modelBaseline import*\nfrom models.modelMatrixFact import*\nfrom models.modelSurprise import*\n\nfrom sklearn.model_selection import KFold\nfrom helpers import create_csv, load_data\n\n\n# Load the train set and the submission set\nsamples = load_data('data/sampleSubmission.csv')\ndata = load_data('data/data_train.csv')\n\n# Train and make predictions on the surprise, neural network, baseline and matrix factorization method\nsurprise_ratings = computeSurprise(data, samples)\nNN_ratings = computeNN(data, samples)\nbaseline_ratings = computeBaseline(data, samples)\nMF_ratings = computeMF(data, samples)\n\n# Merge the predictions\nsubmission_pred = surprise_ratings \\\n .merge(NN_ratings, on=['user_id', 'movie_id'])\\\n .merge(baseline_ratings, on = ['user_id', 'movie_id'])\\\n .merge(MF_ratings, on = ['user_id', 'movie_id'])\n\n# Load a pickle data set that contain the train set for the ridge regression\ntest_pred = pd.read_pickle('data/test_pred.pickle')\ntest_pred = test_pred.rename(columns={\"mf_rmse_rating\": \"MF_RMSE_rating\"})\n\n\n# Model used for the blending\nmodels_names = [ 'MF_ALS_rating',\n 'cocluster_rating',\n 'knnmeans_item_rating',\n 'knnmeans_user_rating',\n 'knnzscore_user_rating',\n 'knnzscore_item_rating',\n 'knnbasic_user_rating',\n 'knnbasic_item_rating',\n 'slopeone_rating',\n 'mf_rating',\n 'svd_rating',\n 'svdpp_rating',\n 'item_mean_rating',\n 'user_mean_rating',\n 'global_mean_rating',\n 'MF_RMSE_rating',\n 'NN_deep_rating',\n 'NN_shallow_rating']\n\n\n# Cross validation on a ridge regression with ten fold\ncv_ridge = KFold(n_splits=10)\ngs_ridge = RidgeCV(alphas = [10**-i for i in range (-5, 10)], fit_intercept = False, scoring = 'neg_mean_squared_error', cv = cv_ridge)\n\n# Fit the ridge regression\ngs_ridge.fit(test_pred[models_names],test_pred['rating'] )\n\nprint (\"Best lambda :\", gs_ridge.alpha_, \"\\n\")\nprint (\"Optimal weight:\", gs_ridge.coef_, \"\\n\")\n\n# Make predictions on the submission set\nsubmission_pred['ridge_rating'] = gs_ridge.predict(submission_pred[models_names])\n\n# Create a csv file\nsubmission_path = 'results.csv'\ncreate_csv(submission_path, submission_pred)\n\nprint(\"The submission file was completed successfully as 'result.csv'\")","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"313146390","text":"from pythonforandroid.recipe import Recipe\nfrom pythonforandroid.util import current_directory\nfrom pythonforandroid.logger import shprint\nfrom multiprocessing import cpu_count\nfrom os.path import realpath\nimport sh\n\n\nclass LibShineRecipe(Recipe):\n version = 'c72aba9031bde18a0995e7c01c9b53f2e08a0e46'\n url = 'https://github.com/toots/shine/archive/{version}.zip'\n\n built_libraries = {'libshine.a': 'lib'}\n\n def build_arch(self, arch):\n with current_directory(self.get_build_dir(arch.arch)):\n env = self.get_recipe_env(arch)\n shprint(sh.Command('./bootstrap'))\n configure = sh.Command('./configure')\n shprint(configure,\n '--host=arm-linux',\n '--enable-pic',\n '--disable-shared',\n '--enable-static',\n '--prefix={}'.format(realpath('.')),\n _env=env)\n shprint(sh.make, '-j', str(cpu_count()), _env=env)\n shprint(sh.make, 'install', _env=env)\n\n\nrecipe = LibShineRecipe()\n","sub_path":"android_binding/.buildozer/android/platform/python-for-android/pythonforandroid/recipes/libshine/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"134090237","text":"# adminPswd = a\n# newPassword = b\n# MaxTries = 3\n# for i in range(MaxTries):\n# allPasswords = adminPswd + '\\n' + newPassword\n# cmdInput = user\n# print (cmdInput)\n# return\n# sum = adminPswd + newPassword\n# print (sum)\n\n\n\n# number1 = 10\n# number2 = 20\n#\n# def sum():\n# sum = number1 + number2\n# print (sum)\n# return\n# sum2 = number1 * number2\n# print (sum2)\n#\n# sum()\n\n\n# def getExternalConnectionUser(self, target=core):\n# entities = self.getTargetEntitiesFromEcu(target=target)\n# if len(entities) > 0:\n# accessData = entities[0].access\n# privUser = accessData[\"privileged_user\"]\n# if privUser:\n# return privUser\n# # If no user was defined in discovery, return root\n# logger().debug(\"WARNING: No users found for %s\" % self.getComponentName())\n# else:\n# logger().debug(\"WARNING: No entities found for %s\" % self.getComponentName())\n# return constants.ROOT_USERNAME\n\n\n\n\n\n\n\n# entities = {\"em_user\": \"oemuser\",\n# \"privileged_user\": \"nimbulaadmin\"\n# },\n#\n# accessData = entities[0]\n# print (accessData)\n#\n# privUser = accessData[\"privileged_user\"]\n#\n# print (privUser)\n\nTARGET_CN_OCC = occ\nTARGET_CN_OCMC = ocmcn\nTARGET_CN_CSI = custcn\n\nif component.get_cli_target_str() == TARGET_CN_OCC or TARGET_CN_OCMC or TARGET_CN_CSI:\n print (\"checking after if condition\")\n print (component.get_cli_target_str())","sub_path":"Oc/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"471252034","text":"from django.shortcuts import render\r\nfrom profiles.models import *\r\nfrom .forms import UpdateAccountForm\r\nfrom django.contrib.auth.decorators import login_required\r\n# Create your views here.\r\ndef settings_menu(request):\r\n return render(request, template_name='usersettings/usersettings.html', context={})\r\n\r\n@login_required\r\ndef edit_account(request):\r\n form = UpdateAccountForm(data=request.POST, instance=request.user)\r\n if request.method == 'POST' and form.is_valid():\r\n update = form.save(commit=False)\r\n update.user = request.user\r\n update.save()\r\n else:\r\n form = UpdateAccountForm(instance=request.user)\r\n\r\n return render(request, 'usersettings/edit_account.html', {'form': form})\r\n","sub_path":"neurosphere/usersettings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"527519203","text":"#encoding: utf-8\nfrom OpenOrange import *\n\nParentEventLog = SuperClass(\"EventLog\", \"Record\", __file__)\nclass EventLog(ParentEventLog):\n INSERT = 0\n UPDATE = 1\n DELETE = 2\n \n @classmethod\n def getEventsOnDate(classobj, eventsdate, recordname = None, uniqueids = False):\n return getEventsOnPeriod(eventsdate, eventsdate, uniqueids)\n\n @classmethod\n def getEventsOnPeriod(classobj, fromdate, todate, recordname = None, uniqueids = False):\n query = Query()\n query.sql = \"SELECT {recInternalId} as recInternalId \"\n query.sql += \"FROM [EventLog] \"\n if recordname: query.sql += \"WHERE?AND {RecordName} = s|%s| \" % recordname\n query.sql += \"WHERE?AND {TransDate} BETWEEN d|%s| AND d|%s| \" % (fromdate, todate)\n if uniqueids: \"GROUP BY {recInternalId}\"\n query.sql += \"ORDER BY {TransTime} \"\n if query.open():\n return query\n return None ","sub_path":"base/records/EventLog.py","file_name":"EventLog.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"233963152","text":"import sys, os\n# path = os.path.split(__file__)[0]\n# sys.path.append(path + \"..\\dependencies\")\n# from thread import ThreadCounter as thrC\n\n\n\ndef main():\n a = int(input(\"value\"))\n print(a+10)\nif __name__ == \"__main__\":\n main()","sub_path":"solutions/templates/python3/code/Source.py","file_name":"Source.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"325761292","text":"import time\nimport matplotlib.pyplot as plt\nimport tkinter as tk\n\nfrom tkinter import ttk\nfrom methods.str_para_funcao import *\nfrom methods.integracao.trapezios import *\n\nFONTE = (\"Verdana\", 12)\nFONTE_TEXTO = (\"Verdana\", 15)\nFONTE_TITULO = (\"Verdana\", 15, \"bold\")\n\nBUTTON_WIDTH = 40\nBUTTON_FILL = \"y\"\n\nBG_COLOR = \"#1A5276\"\n\nclass PaginaIntegracaoTrap(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n\n title_label = tk.Label(self,\n text = \"ITEGRAÇÃO TRAPÉZIOS\",\n font = FONTE_TITULO,\n foreground = \"white\",\n bg = BG_COLOR,\n heigh = 4\n )\n title_label.pack(side = \"top\", fill = \"x\")\n\n #---------------- input funcao -----------------#\n func_container = tk.Label(self)\n func_container.pack(side = \"top\", pady = 20)\n \n func_text = tk.Label(\n func_container,\n text = \"Função:\"\n ).pack(side = \"left\")\n\n self.func_entry = tk.Entry(\n func_container,\n width = BUTTON_WIDTH\n )\n self.func_entry.pack(side = \"right\")\n\n #------------- intervalo [a ,b] ----------------#\n interval_label = tk.Label(self)\n interval_label.pack(side = \"top\")\n ##\n\n interval_text = tk.Label(\n interval_label,\n text = \"Intervalo [a, b]:\"\n ).pack(side = \"left\")\n\n self.a_entry = tk.Entry(\n interval_label,\n width = 10\n )\n self.a_entry.pack(side = \"left\", padx = 20)\n\n self.b_entry = tk.Entry(\n interval_label,\n width = 10\n )\n self.b_entry.pack(side = \"right\", fill = BUTTON_FILL)\n\n #------------------ pontos ------------------#\n pontos_container = tk.Label(self)\n pontos_container.pack(side = \"top\", pady = (20, 0))\n\n pontos_text = tk.Label(\n pontos_container,\n text = \"Pontos [2, 1.000]:\"\n ).pack(side = \"left\")\n\n self.pontos_entry = tk.Entry(\n pontos_container,\n width = BUTTON_WIDTH\n )\n self.pontos_entry.pack(side = \"right\")\n\n #----------------- grafico (y/n) -------------------#\n self.graph = tk.IntVar()\n graph_radio = tk.Checkbutton(self, text = \"Plotar Gráfico\", variable = self.graph)\n graph_radio.pack(side = \"top\", pady = (10, 0))\n graph_radio.var = self.graph\n \n #----------------- botao calcular ------------------#\n calcular_button = tk.Button(\n self,\n text = \"Calcular\",\n bg = \"white\",\n width = BUTTON_WIDTH,\n command = self.integrar_por_trapezios\n )\n calcular_button.pack(side = \"top\", fill = BUTTON_FILL, pady = 10)\n\n #----------------- resultado -----------------------#\n self.resultado_area = tk.Label(\n self,\n text = \"Raiz:\\n\"\n ) \n self.resultado_area.pack(side = \"top\", fill = BUTTON_FILL, pady = 10)\n \n self.resultado_time = tk.Label(\n self,\n text = \"Tempo de execução:\\n\"\n ) \n self.resultado_time.pack(side = \"top\", fill = BUTTON_FILL, pady = 10)\n\n\n margin_label = tk.Label(self, bg = BG_COLOR)\n margin_label.pack(side = \"bottom\", fill = \"x\", pady = (10, 0))\n \n def integrar_por_trapezios(self):\n \n f = self.func_entry.get()\n f = expressao_generica(f)\n \n a = float(self.a_entry.get())\n b = float(self.b_entry.get())\n \n pontos = int(self.pontos_entry.get())\n if pontos < 2:\n pontos = 2\n if pontos > 1_000:\n pontos = 1_000\n\n inicio = time.time()\n area = regra_trapezios(f.f, a, b, pontos)\n fim = time.time()\n self.resultado_area[\"text\"] = \"Área:\\n{}\".format(area)\n\n self.resultado_time[\"text\"] = \"Tempo de execução:\\n{}\".format(fim - inicio)\n self.update_idletasks()\n\n # plot do grafico #\n if self.graph.get() == 1:\n grafico_trapezios(f.f, int(a), int(b), pontos, self.func_entry.get().replace(\"**\", \"^\").replace(\"*\", \"\"))\n plt.show()\n","sub_path":"src/app/pages/PaginaIntegracaoTrap.py","file_name":"PaginaIntegracaoTrap.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"484677680","text":"# https://github.com/spro/practical-pytorch\nimport time\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nfrom name_dataset import NameDataset\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nHIDDEN_SIZE = 100\nN_LAYERS = 1\nBATCH_SIZE = 32\nN_EPOCHS = 20\n\ntest_dataset = NameDataset(is_test_set=True)\ntest_loader = DataLoader(dataset=test_dataset,\n batch_size=BATCH_SIZE,\n shuffle=True)\n\n\ntrain_dataset = NameDataset(is_test_set=False)\ntrain_loader = DataLoader(dataset=train_dataset,\n batch_size=BATCH_SIZE,\n shuffle=True)\n\nn_countries = len(train_dataset.get_countries())\nN_CHARS = 128 # ASCII\n\n\ndef time_since(since):\n s = time.time() - since\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef cuda_variable(tensor):\n # Do cuda() before wrapping with variable\n if torch.cuda.is_available():\n return Variable(tensor.cuda())\n else:\n return Variable(tensor)\n\n\n# Sting to char tensor\ndef pad_sequences(vectorized_seqs, seq_lengths, countries):\n seq_tensor = torch.zeros((len(vectorized_seqs), seq_lengths.max())).long()\n for idx, (seq, seqlen) in enumerate(zip(vectorized_seqs, seq_lengths)):\n seq_tensor[idx, :seqlen] = torch.LongTensor(seq)\n\n # SORT YOUR TENSORS BY LENGTH!\n seq_lengths, perm_idx = seq_lengths.sort(0, descending=True)\n seq_tensor = seq_tensor[perm_idx]\n\n target = countries2tensor(countries)\n target = target[perm_idx]\n\n return cuda_variable(seq_tensor), \\\n cuda_variable(seq_lengths), \\\n cuda_variable(target)\n\n\n# vectorized_seqs, seq_lengths\ndef make_variables(names, countries):\n sequence_and_length = [str2ascii_arr(name) for name in names]\n vectorized_seqs = [sl[0] for sl in sequence_and_length]\n seq_lengths = [sl[1] for sl in sequence_and_length]\n return pad_sequences(vectorized_seqs, torch.LongTensor(seq_lengths), countries)\n\n\ndef str2ascii_arr(msg):\n arr = [ord(c) for c in msg]\n return arr, len(arr)\n\n\ndef countries2tensor(countries):\n country_ids = [train_dataset.get_country_id(\n country) for country in countries]\n return torch.LongTensor(country_ids)\n\n\nclass RNNClassifier(nn.Module):\n\n def __init__(self, input_size, hidden_size, output_size, n_layers=1):\n super(RNNClassifier, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.n_layers = n_layers\n\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size, n_layers)\n self.fc = nn.Linear(hidden_size, output_size)\n\n def forward(self, input, seq_lengths):\n # Note: we run this all at once (over the whole input sequence)\n # input shape: S x B (input size)\n batch_size = len(input[1])\n\n # Get hidden\n hidden = self._init_hidden(batch_size)\n\n # input shape: S x B (input size)\n embeded = self.embedding(input) # S x B -> S x B x I (embedding size)\n\n gru_input = embeded\n # FIXME: is this a right way? It makes training and testing slower\n # With pack: [1m 0s (20 95%) 0.0596]\n # Without pack: [0m 45s (20 95%) 0.0901]\n # pack them up nicely\n # gru_input = pack_padded_sequence(embeded, seq_lengths.cpu().numpy())\n\n # To compact weights again call flatten_parameters().\n self.gru.flatten_parameters()\n\n output, hidden = self.gru(gru_input, hidden)\n fc_output = self.fc(hidden[-1]) # Use the last layer\n print(\"Final output size\", fc_output.size())\n return fc_output\n\n def _init_hidden(self, batch_size):\n hidden = torch.zeros(self.n_layers, batch_size, self.hidden_size)\n return cuda_variable(hidden)\n\n\n# Train for a given src and target\n# It feeds single string to demonstrate seq2seq\n# It's extremely slow, and we need to use (1) batch and (2) data parallelism\n# http://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html.\ndef train(names, countries):\n input, seq_lengths, target = make_variables(names, countries)\n\n # transpose to make S(sequence) x B (batch)\n output = classifier(input.t(), seq_lengths)\n\n # FIXME: output size is Batch*n_GPUs * Inputsize\n print(\"output size\", output.size())\n loss = criterion(output, target)\n\n classifier.zero_grad()\n loss.backward()\n optimizer.step()\n\n return loss.data[0]\n\n\ndef test():\n print(\"evaluating ...\")\n correct = 0\n\n for i, (names, countries) in enumerate(test_loader):\n input, seq_lengths, target = make_variables(names, countries)\n\n # transpose to make S(sequence) x B (batch)\n output = classifier(input.t(), seq_lengths)\n print(i, output.size())\n print(target.size())\n\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n print('\\nTest set: Accuracy: {}/{} ({:.0f}%)\\n'.format(\n correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\nif __name__ == '__main__':\n\n classifier = RNNClassifier(N_CHARS, HIDDEN_SIZE, n_countries, N_LAYERS)\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n classifier = nn.DataParallel(classifier)\n\n if torch.cuda.is_available():\n classifier.cuda()\n\n optimizer = torch.optim.Adam(classifier.parameters(), lr=0.001)\n criterion = nn.CrossEntropyLoss()\n\n start = time.time()\n print(\"Training for %d epochs...\" % N_EPOCHS)\n for epoch in range(1, N_EPOCHS + 1):\n loss = 0\n\n for i, (names, countries) in enumerate(train_loader):\n loss += train(names, countries) # Batch size is 1\n\n if i % 100 == 0:\n print('[%s (%d %d%%) %.4f]' %\n (time_since(start), epoch, i * BATCH_SIZE * 100 / len(train_loader.dataset), loss / (i + 1)))\n\n # Testing\n test()\n","sub_path":"12_4_name_classify.py","file_name":"12_4_name_classify.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"574691492","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 2 11:08:10 2015\n\n@author: jyoung\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats\nimport SimpleITK as sitk\n\n\nclass IndexTracker(object):\n def __init__(self, ax, X, Xover=None):\n self.ax = ax\n self.X = X\n self.Xover = Xover\n self.slices, numrows, numcols = X.shape\n self.idx = self.slices//2\n \n def format_coord(x, y):\n col = int(x + 0.5)\n row = int(y + 0.5)\n if col>=0 and col=0 and row 10.0, X2)\n tracker = IndexTracker(ax, X, Xmask)\n else:\n tracker = IndexTracker(ax, X)\n fig.canvas.mpl_connect('scroll_event', tracker.onscroll)\n plt.show()\n","sub_path":"imgscroll.py","file_name":"imgscroll.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"428097218","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nimport json\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom collections import OrderedDict\nfrom PIL import Image\n\ndef load_data(data_path = \"./flowers\"):\n data_dir = data_path\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n\n test_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n\n valid_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n\n train_data = datasets.ImageFolder(train_dir, transform = train_transforms)\n test_data = datasets.ImageFolder(test_dir, transform = test_transforms)\n valid_data = datasets.ImageFolder(valid_dir, transform = valid_transforms)\n\n trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)\n testloader = torch.utils.data.DataLoader(test_data, batch_size=32)\n validloader = torch.utils.data.DataLoader(valid_data, batch_size=32)\n return trainloader, testloader, validloader\n\ndef load_pretrained_model(model_name, hidden_units):\n if model_name == 'densenet':\n model = models.densenet121(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(1024, hidden_units)),\n ('relu1', nn.ReLU()),\n ('drop1', nn.Dropout(p = 0.4)),\n ('fc2', nn.Linear(hidden_units, 256)),\n ('relu2', nn.ReLU()),\n ('drop2', nn.Dropout(p = 0.3)),\n ('fc3', nn.Linear(256, 102)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n \n else:\n model = models.vgg13(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(25088, hidden_units)),\n ('relu1', nn.ReLU()),\n ('drop1', nn.Dropout(p = 0.4)),\n ('fc2', nn.Linear(hidden_units, 256)),\n ('relu2', nn.ReLU()),\n ('drop2', nn.Dropout(p = 0.3)),\n ('fc3', nn.Linear(256, 102)),\n ('output', nn.LogSoftmax(dim=1))\n ])) \n model.classifier = classifier\n return model\n\ndef get_model_and_optimizer(args):\n model = load_pretrained_model(args['architecture'], args['hidden_size'])\n if (args['optimizer'] == 'adam'):\n optimizer = optim.Adam(model.classifier.parameters(), lr=args['learning_rate'])\n \n return model, optimizer\n\ndef train_model(model, optimizer, learning_rate,train_loader,valid_loader,criterion,epoch_number, checkpoint_path):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device);\n \n epochs = epoch_number\n steps = 0\n running_loss = 0\n print_every = 50\n for epoch in range(epochs):\n save_checkpoint({\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'epoch': epoch\n }, checkpoint_path)\n \n for inputs, labels in train_loader:\n steps += 1\n # Move input and label tensors to the default device\n inputs, labels = inputs.to(device), labels.to(device)\n \n optimizer.zero_grad()\n \n outputs = model.forward(inputs)\n\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n test_loss = 0\n accuracy = 0\n model.eval()\n with torch.no_grad():\n for inputs, labels in valid_loader:\n inputs, labels = inputs.to(device), labels.to(device)\n outputs = model.forward(inputs)\n batch_loss = criterion(outputs, labels)\n \n test_loss += batch_loss.item()\n \n # Calculate accuracy\n ps = torch.exp(outputs)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n \n print(f\"Epoch {epoch+1}/{epochs}.. \"\n f\"Training Loss: {running_loss/print_every:.3f}.. \"\n f\"validation Loss: {test_loss/len(valid_loader):.3f}.. \"\n f\"Test Accuracy: {accuracy/len(valid_loader):.3f}\")\n running_loss = 0\n model.train()\n \ndef validation(model,test_loader,criterion):\n test_loss = 0\n accuracy = 0\n total = 0 \n correct = 0\n # Turn off gradients for validation, saves memory and computations\n with torch.no_grad():\n model.eval()\n for images, labels in testloader:\n images, labels = images.to(device), labels.to(device)\n outputs = model(images)\n test_loss += criterion(outputs, labels)\n total += labels.size(0) \n ps = torch.exp(outputs)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n correct += torch.sum(equals.type(torch.FloatTensor)).item()\n accuracy = correct*100/total\n print('Accuracy of the network on the test images:{:.1f} %'.format(accuracy))\n \ndef save_checkpoint(state, file_path):\n torch.save(state, file_path)\n\ndef load_json(filename):\n with open(filename, 'r') as f:\n cat_to_name = json.load(f, object_pairs_hook=OrderedDict)\n class_labels = train_data.classes\n return cat_to_name,class_labels\n\ndef load_saved_model(model, optimizer, experiment):\n print(\"=> loading checkpoint '{}'\".format(experiment))\n \n checkpoint = torch.load(f'experiments/{experiment}/checkpoint.pt')\n\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epoch = checkpoint['epoch']\n return model, optimizer, epoch\n\n\ndef imshow(image, ax=None, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n \n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.transpose((1, 2, 0))\n \n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n \n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n if title is not None:\n ax.set_title(title)\n ax.imshow(image)\n \n return ax\n\ndef predict(image_path, model, cat_to_name, topk=5):\n # Predict the class (or classes) of an image using a trained deep learning model.\n model.eval()\n image = process_image(image_path)\n image_tensor = torch.from_numpy(image).type(torch.FloatTensor)\n image_tensor.resize_([1, 3, 224, 224])\n model.to('cpu')\n result = torch.exp(model(image_tensor))\n ps, index = result.topk(topk)\n ps, index = ps.detach(), index.detach()\n ps.resize_([topk])\n index.resize_([topk])\n \n ps, index = ps.tolist(), index.tolist()\n labels = []\n for i in index:\n labels.append(cat_to_name[str(i)])\n return ps, index, labels\n\ndef process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n img_loader = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n \n pil_image = Image.open(image)\n pil_image = img_loader(pil_image).float()\n \n np_image = np.array(pil_image) \n \n return np_image","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":9529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"99851773","text":"import random\nimport urllib.request\nimport os\nimport requests\nimport json\n\n\npth = os.getcwd()\ndef getLink(n):\n r = requests.get(f'http://www.splashbase.co/api/v1/images/{n}')\n try:\n url2=json.loads(r.text)['url']\n print(json.loads(r.text)['url'])\n downloader(url2)\n except:\n print(\".\", end='')\n getLink(random.randint(1,1000))\n\ndef downloader(image_url):\n full_file_name = 'image.jpg'\n urllib.request.urlretrieve(image_url,full_file_name)\n\ndef change_wallpaper():\n string =\"reg add \\\"HKEY_CURRENT_USER\\\\Control Panel\\\\Desktop\\\" /v Wallpaper /t REG_SZ /d {0}\\\\image.jpg /f\".format(pth)\n os.system(string)\n os.system(\"RUNDLL32.EXE user32.dll,UpdatePerUserSystemParameters\")\n # os.system(\"del .\\image.jpg\")\n\ndef splashbase():\n nm = random.randint(1,7000)\n getLink(nm)\n change_wallpaper()\n\ndef picsum():\n nm = random.randint(1,1200)\n url = 'https://i.picsum.photos/id/{0}/1920/1080.jpg'.format(nm)\n print(url)\n downloader(url)\n change_wallpaper()\n\ndef run():\n v = int(input('1. splashbase\\n2. picsum (recommended)\\n'))\n os.system(\"cls\")\n if v == 1:\n splashbase()\n elif v == 2:\n picsum()\n else:\n print(\"try again2\")\n\nwhile(True):\n run()\n v = input(\"1. Press enter to continue\\n2. q to quit\")\n os.system(\"cls\")\n if v == 'q':\n break","sub_path":"wallpaper.py","file_name":"wallpaper.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"63171879","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def removeElements(self, head: ListNode, val: int) -> ListNode:\n dummyNode = ListNode(1000)\n currentNode, result = head, dummyNode\n\n while currentNode != None:\n if currentNode.val != val:\n result.next = currentNode\n result = currentNode\n currentNode = currentNode.next\n\n result.next = None\n return dummyNode.next\n\n\ndef main():\n head = ListNode(0)\n current = head\n nums = [1, 2, 3, 4, 3, 5]\n for i in range(len(nums)):\n node = ListNode(nums[i])\n current.next = node\n current = node\n\n sol = Solution()\n printer(head)\n res = sol.removeElements(head, 3)\n printer(res)\n\n\ndef printer(head: ListNode):\n while head != None:\n print(head.val, end=\",\")\n head = head.next\n print()\n\n\nmain()\n","sub_path":"leetcode/old_session/203. Remove Linked List Elements.py","file_name":"203. Remove Linked List Elements.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"82116461","text":"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\nfrom typing import List, Optional\n\nimport numpy as np\n\nimport cirq\nfrom cirq_google.ops import SycamoreGate\nfrom cirq_google.transformers.analytical_decompositions import two_qubit_to_sycamore\n\n\n@cirq._compat.deprecated_class(\n deadline='v1.0',\n fix='Use cirq.optimize_for_target_gateset and cirq_google.SycamoreTargetGateset instead.',\n)\nclass ConvertToSycamoreGates(cirq.PointOptimizer):\n \"\"\"Attempts to convert non-native gates into SycamoreGates.\n\n First, checks if the given operation is already a native sycamore operation.\n\n Second, checks if the operation has a known unitary. If so, and the gate is a 1-qubit or\n 2-qubit gate, then performs circuit synthesis of the operation.\n\n Third, attempts to `cirq.decompose` to the operation.\n\n Fourth, if ignore_failures is set, gives up and returns the gate unchanged. Otherwise raises\n a TypeError.\n \"\"\"\n\n def __init__(\n self, tabulation: Optional[cirq.TwoQubitGateTabulation] = None, ignore_failures=False\n ) -> None:\n \"\"\"Inits ConvertToSycamoreGates.\n\n Args:\n tabulation: If set, a tabulation for the Sycamore gate to use for\n decomposing Matrix gates. If unset, an analytic calculation is\n used for Matrix gates. To get a TwoQubitGateTabulation, call the\n `two_qubit_gate_product_tabulation` method with a base gate (in this case,\n usually cirq_google.SYC) and a maximum infidelity.\n ignore_failures: If set, gates that fail to convert are forwarded\n unchanged. If not set, conversion failures raise a TypeError.\n\n Raises:\n ValueError: If the tabulation is not a `TwoQubitGateTabulation`.\n \"\"\"\n super().__init__()\n self.ignore_failures = ignore_failures\n if tabulation is not None and not isinstance(tabulation, cirq.TwoQubitGateTabulation):\n raise ValueError(\"provided tabulation must be of type cirq.TwoQubitGateTabulation\")\n self.tabulation = tabulation\n\n def _is_native_sycamore_op(self, op: cirq.Operation) -> bool:\n \"\"\"Check if the given operation is native to a Sycamore device.\n\n Args:\n op: Input operation.\n\n Returns:\n True if the operation is native to the gmon, false otherwise.\n \"\"\"\n gate = op.gate\n\n if isinstance(\n gate,\n (\n SycamoreGate,\n cirq.MeasurementGate,\n cirq.PhasedXZGate,\n cirq.PhasedXPowGate,\n cirq.XPowGate,\n cirq.YPowGate,\n cirq.ZPowGate,\n ),\n ):\n return True\n\n if (\n isinstance(gate, cirq.FSimGate)\n and math.isclose(gate.theta, np.pi / 2)\n and math.isclose(gate.phi, np.pi / 6)\n ):\n return True\n\n if gate is None and isinstance(op.untagged, cirq.CircuitOperation):\n subcircuit = op.untagged.circuit\n return all(self._is_native_sycamore_op(op) for op in subcircuit.all_operations())\n\n return False\n\n def _convert_one(self, op: cirq.Operation) -> cirq.OP_TREE:\n \"\"\"The main conversion step for the PointOptimizer.\"\"\"\n if not (cirq.has_unitary(op) and 1 <= cirq.num_qubits(op) <= 2):\n return NotImplemented\n\n if cirq.num_qubits(op) == 1:\n return [*cirq.merge_single_qubit_gates_to_phxz(cirq.Circuit(op)).all_operations()]\n\n known_decomp = two_qubit_to_sycamore.known_2q_op_to_sycamore_operations(op)\n if known_decomp is not None:\n return known_decomp\n if self.tabulation is not None:\n return two_qubit_to_sycamore._decompose_arbitrary_into_syc_tabulation(\n op, self.tabulation\n )\n return two_qubit_to_sycamore.two_qubit_matrix_to_sycamore_operations(\n op.qubits[0], op.qubits[1], cirq.unitary(op)\n )\n\n def convert(self, op: cirq.Operation) -> List[cirq.Operation]:\n def on_stuck_raise(bad):\n return TypeError(\n \"Don't know how to work with {!r}. \"\n \"It isn't a native xmon operation, \"\n \"a 1 or 2 qubit gate with a known unitary, \"\n \"or composite.\".format(bad)\n )\n\n return cirq.decompose(\n op,\n keep=self._is_native_sycamore_op,\n intercepting_decomposer=self._convert_one,\n on_stuck_raise=None if self.ignore_failures else on_stuck_raise,\n preserve_structure=True, # keep CircuitOps but decompose their contents\n )\n\n def optimization_at(\n self, circuit: cirq.Circuit, index: int, op: cirq.Operation\n ) -> Optional[cirq.PointOptimizationSummary]:\n if op.gate is None and not isinstance(op.untagged, cirq.CircuitOperation):\n return None\n\n # Check for a SWAP and ZZPowGate together\n if isinstance(op.gate, cirq.ZZPowGate) or op.gate == cirq.SWAP:\n op2 = None\n next_index = circuit.next_moment_operating_on(op.qubits, index + 1)\n if next_index is not None:\n ops_in_front = list({circuit.operation_at(q, next_index) for q in op.qubits})\n if len(ops_in_front) == 1 and ops_in_front[0] is not None:\n op2 = ops_in_front[0]\n else:\n next_index = 0\n if op2 is not None and (\n (op.gate == cirq.SWAP and isinstance(op2.gate, cirq.ZZPowGate))\n or (isinstance(op.gate, cirq.ZZPowGate) and op2.gate == cirq.SWAP)\n ):\n swap_rzz_decomposed = two_qubit_to_sycamore.known_2q_op_to_sycamore_operations(\n cirq.CircuitOperation(cirq.FrozenCircuit(op, op2))\n )\n assert swap_rzz_decomposed is not None\n return cirq.PointOptimizationSummary(\n clear_span=next_index - index + 1,\n clear_qubits=op.qubits,\n new_operations=swap_rzz_decomposed,\n )\n\n converted = self.convert(op)\n if len(converted) == 1 and converted[0] is op:\n return None\n\n return cirq.PointOptimizationSummary(\n clear_span=1, new_operations=converted, clear_qubits=op.qubits\n )\n","sub_path":"cirq-google/cirq_google/optimizers/convert_to_sycamore_gates.py","file_name":"convert_to_sycamore_gates.py","file_ext":"py","file_size_in_byte":6936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"153389233","text":"#!/usr/bin/env mcsplot\n# VMB2 script but using nc files\nimport sys,pickle,time\nimport numpy as np\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib.colors import SymLogNorm,LogNorm\nfrom netCDF4 import Dataset,num2date\nimport pandas as pd\nfrom scipy.interpolate import lagrange\n\ndef moving_average(a, n):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n-1:]/n\n\n# calculate the buoyancy integrals\n# assume that the lower bound is the sea surface rather than the LCL\ng = 9.80655 # m s-2\nsst1 = 295.6\nsst2 = 295.2\n\n# load all data year by year\nbasedir = '/rigel/home/scs2229/top-secret/MCS_clim/ausgabe/meteo_clim/nc/'\n\nf1 = Dataset(basedir + 'colloc_ENDJFd1_ALL.nc','r+')\nT1 = np.asarray(f1.variables['temperature']) # (5018,32)\nqv1 = np.asarray(f1.variables['qv'])\nqc1 = np.asarray(f1.variables['qc'])\nP1 = np.asarray(f1.variables['surface pressure'])\n\nf2 = Dataset(basedir + 'colloc_LNDJFd1_ALL.nc','r+')\nT2 = np.asarray(f2.variables['temperature'])\nqv2 = np.asarray(f2.variables['qv'])\nqc2 = np.asarray(f2.variables['qc'])\nP2 = np.asarray(f2.variables['surface pressure'])\n\n# create the average profile and its spread for all sets\nalt = np.asarray([15418.43,14415.44,13470.22,12579.42,11739.87,10948.81,10191.54,\\\n 9458.63,8749.51,8064.40,7403.97,6769.08,6160.62,5579.46,5026.39,4502.09,\\\n 4007.11,3541.93,3106.86,2702.14,2327.89,1984.11,1670.7,1387.43,1133.93,\\\n 909.7,714.05,546.11,404.74,288.57,195.85,124.48])\npress = np.asarray([122.6137,142.9017,165.0886,189.1466,215.0251,242.6523,272.0593,\\\n 303.2174,336.0439,370.4072,406.1328,443.0086,480.7907,519.2093,557.9734,\\\n 596.7774,635.3060,673.2403,710.2627,746.0635,780.3455,812.8303,843.2634,\\\n 871.4203,897.1118,920.1893,940.5511,958.1477,972.9868,985.1399,994.7472,\\\n 1002.0236])\n\n# calculate the virtual temperature profiles for each depth and phase\nTv1 = T1*((0.622+qv1)/(0.622*(1+qv1)))\nTv2 = T2*((0.622+qv2)/(0.622*(1+qv2)))\n\n# calculate the mean Tv over all systems and then perturbations\nTvmean1 = np.nanmean(T1)*((0.622+np.nanmean(qv1))/(0.622*(1+np.nanmean(qv1))))\nTvprime1 = Tv1 - Tvmean1\nTvmean2 = np.nanmean(T2)*((0.622+np.nanmean(qv2))/(0.622*(1+np.nanmean(qv2))))\nTvprime2 = Tv2 - Tvmean2\n\n# calculate condensate perturbations\nqcprime1 = qc1 - np.nanmean(qc1)\nqcprime2 = qc2 - np.nanmean(qc2)\n\n# buoyancy profiles = g Tv'/bar(Tv) - g l', [=] m2 s-1\nB1 = g*Tvprime1/Tvmean1 - g*qcprime1\nB2 = g*Tvprime2/Tvmean2 - g*qcprime2\n\n# calculate a hydrostatic pressure reference\nRair = 287.058 # J kg-1 K-1\nrhoAir = 1.2041 # this should be some empirical correlation w/ T, no?\npsurf1 = rhoAir*Rair*sst1\npsurf2 = rhoAir*Rair*sst2\n#psurf = 101325\nphydro1 = psurf1*np.exp(-g*alt/(Rair*Tv1))\nphydro2 = psurf2*np.exp(-g*alt/(Rair*Tv2))\n\n# calculate the perturbation from hydrostatic\npp1 = press*100 - phydro1\npp2 = press*100 - phydro2\n\n# density of dry air and vertical gradient of pp\nrho1 = press*100/(Rair*T1)\nrho2 = press*100/(Rair*T2)\n\n# calculate the vertical gradient of pressure perturbations\npf1 = np.zeros((rho1.shape[0],32))\npf2 = np.zeros((rho2.shape[0],32))\npfd1 = np.zeros((rho1.shape[0],32))\npfd2 = np.zeros((rho2.shape[0],32))\nfor kk in np.arange(rho1.shape[0]):\n cc = np.polyfit(alt,pp1[kk],4)\n pf1[kk] = cc[0]*alt**4 + cc[1]*alt**3 + cc[2]*alt**2 + cc[3]*alt + cc[4]\n pfpf = np.poly1d.deriv(np.poly1d(cc[:5]))\n pfd1[kk] = pfpf.c[0]*alt**3 + pfpf.c[1]*alt**2 + pfpf.c[2]*alt + pfpf.c[3]\n pfd1[kk] /= rho1[kk]\n\nfor kk in np.arange(rho2.shape[0]):\n cc = np.polyfit(alt,pp2[kk],4)\n pf2[kk] = cc[0]*alt**4 + cc[1]*alt**3 + cc[2]*alt**2 + cc[3]*alt + cc[4]\n pfpf = np.poly1d.deriv(np.poly1d(cc[:5]))\n pfd2[kk] = pfpf.c[0]*alt**3 + pfpf.c[1]*alt**2 + pfpf.c[2]*alt + pfpf.c[3]\n pfd2[kk] /= rho2[kk]\n \nnp.save('/rigel/home/scs2229/top-secret/MCS_clim/scripts/figs/dragEN_surfp.npy',pfd1)\nnp.save('/rigel/home/scs2229/top-secret/MCS_clim/scripts/figs/dragLN_surfp.npy',pfd2)\nnp.save('/rigel/home/scs2229/top-secret/MCS_clim/scripts/figs/buoyEN_surfp.npy',B1)\nnp.save('/rigel/home/scs2229/top-secret/MCS_clim/scripts/figs/buoyLN_surfp.npy',B2)\nsys.exit()\n\n# plot the buoyancy profiles\nfs = 10\ny1 = 300 # 122\ny2 = 1002\nfig = plt.figure(figsize=(11,7.5))\nax1 = plt.subplot2grid((2,3),(0,0))\nax1.plot(np.transpose(B1),press,linewidth=0.5)\nax1.plot(np.nanmean(B1,axis=0),press,linewidth=1.25,color='k')\nax1.plot([0,0],[y1,y2],color='k',linestyle='--',linewidth=0.75)\nax1.tick_params(axis='both',labelsize=fs)\nplt.xlabel(r'El Ni$\\~n$o buoyancy [m s$^{-2}$]'); plt.ylabel('P [hPa]')\nplt.text(0.05,0.9,'(a)',fontsize=fs,fontweight='bold',transform=ax1.transAxes)\nax1.set_ylim([y1,y2])\nax1.set_xlim([-3,2])\nax1.invert_yaxis()\n\nax2 = plt.subplot2grid((2,3),(0,1))\nax2.plot(np.transpose(B2),press,linewidth=0.5)\nax2.plot(np.nanmean(B2,axis=0),press,linewidth=1.25,color='k')\nax2.plot([0,0],[y1,y2],color='k',linestyle='--',linewidth=0.75)\nax2.tick_params(axis='both',labelsize=fs)\nplt.xlabel(r'La Ni$\\~n$a buoyancy [m s$^{-2}$]')\nplt.text(0.05,0.9,'(b)',fontsize=fs,fontweight='bold',transform=ax2.transAxes)\nax2.set_ylim([y1,y2])\nax2.set_xlim([-3,2])\nax2.invert_yaxis()\n\nax3 = plt.subplot2grid((2,3),(0,2))\nax3.plot(moving_average((np.nanmean(B1,axis=0)-np.nanmean(B2,axis=0))/np.nanmean(B2,axis=0)*100.,3),\\\n moving_average(press,3),linewidth=1.25,color='k')\nax3.plot([0,0],[y1,y2],color='k',linestyle='--',linewidth=0.75)\nplt.text(0.05,0.9,'(c)',fontsize=fs,fontweight='bold',transform=ax3.transAxes)\nplt.xlabel(r'(EN-LN)/LN $\\Delta$ B [%]')\nax3.set_ylim([y1,y2])\nax3.invert_yaxis()\n\nax4 = plt.subplot2grid((2,3),(1,0))\nax4.plot(np.transpose(pfd1[:200:]),alt/1000,linewidth=0.5)\nax4.plot(np.nanmean(pfd1,axis=0),alt/1000,linewidth=1.25,color='k')\nax4.plot([0,0],[0,15],color='k',linestyle='--',linewidth=0.75)\nplt.ylim([0,10])\nax4.set_xlim([-1,2])\nax4.tick_params(axis='both',labelsize=fs)\nplt.xlabel(r'El Ni$\\~n$o pressure '\n '\\n'\n r'gradient force [m s$^{-2}$]')\nplt.ylabel('z [km]')\nplt.text(0.05,0.9,'(d)',fontsize=fs,fontweight='bold',transform=ax4.transAxes)\n\nax5 = plt.subplot2grid((2,3),(1,1))\nax5.plot(np.transpose(pfd2[:200:]),alt/1000,linewidth=0.5)\nax5.plot(np.nanmean(pfd2,axis=0),alt/1000,color='k',linewidth=1.25)\nax5.plot([0,0],[0,10],color='k',linestyle='--',linewidth=0.75)\nplt.ylim([0,10])\nax5.set_xlim([-1,2])\nax5.tick_params(axis='both',labelsize=fs)\nplt.xlabel(r'La Ni$\\~n$a pressure '\n '\\n'\n r'gradient force [m s$^{-2}$]')\nplt.text(0.05,0.9,'(e)',fontsize=fs,fontweight='bold',transform=ax5.transAxes)\n\nax6 = plt.subplot2grid((2,3),(1,2))\nax6.plot((np.nanmean(pfd1,axis=0)-np.nanmean(pfd2,axis=0))/np.nanmean(pfd2,axis=0)*100.,\\\n alt/1000,linewidth=1.25,color='k')\nax6.plot([0,0],[0,10],color='k',linestyle='--',linewidth=0.75)\nplt.text(0.05,0.9,'(f)',fontsize=fs,fontweight='bold',transform=ax6.transAxes)\nax6.set_ylim([0,10])\nax6.set_xlim([-5,7.5])\nplt.xlabel(r'(EN-LN)/LN $\\Delta$ PGF [%]')\n#fig.savefig('/rigel/home/scs2229/top-secret/MCS_clim/ausgabe/meteo_clim/dragBuoyd1_VMB3.pdf',bbox_inches='tight')\nplt.show()\n","sub_path":"VMB3.py","file_name":"VMB3.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"155619973","text":"import os\nimport sys\nfrom django.contrib.gis.utils import LayerMapping\nfrom models import *\n\nmapping_mapping = {\n NHDPoint: nhdpoint_mapping,\n NHDFlowline: nhdflowline_mapping,\n NHDLine: nhdline_mapping,\n NHDArea: nhdarea_mapping,\n NHDWaterbody: nhdwaterbody_mapping\n}\n\ndef run(filename, verbose=True):\n if len(sys.argv) < 2:\n print('Provide file to load as first argument.')\n return 1\n\n for mod, mapping in mapping_mapping.items():\n print(\"Loading \" + str(mod.__name__) + ' with ' + str(mapping) +'\\n')\n lm = LayerMapping(mod, filename, mapping,\n transform=True, layer=mod.__name__, source_srs=4269)\n\n\n lm.save(strict=True, verbose=verbose)\n","sub_path":"newstack/reldb/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"421339311","text":"import keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nimport numpy as np\nimport tifffile\nimport os\nimport random\nimport matplotlib.pyplot as plt\n\n\nbatch_size = 128\nnum_classes = 11\nepochs = 12\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n# the data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = x_train.astype('float32').reshape(60000,28,28,1)\nx_test = x_test.astype('float32').reshape(10000,28,28,1)\n\n#load the 11th class\ngenerated_path = 'D:/ML/adversarial-machine-learning/generated-images'\n\ngenerated_train = os.listdir(generated_path + '/train')\ntrain_picks = random.choices(generated_train, k = 6000)\nfor file in train_picks:\n\timage = tifffile.imread(generated_path + '/train/' + file)\n\timage = image.reshape(28,28,1)\n\tx_train = np.append(x_train,[image],axis = 0)\n\ty_train = np.append(y_train,[10])\n\ngenerated_test = os.listdir(generated_path + '/test')\ntest_picks = random.choices(generated_test, k = 1000)\n\nfor file in test_picks:\n\timage = tifffile.imread(generated_path + '/test/' + file)\n\timage = image.reshape(28,28,1)\n\tx_test = np.append(x_test,[image],axis = 0)\n\ty_test = np.append(y_test,[10])\n\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint('y_train shape', y_train.shape)\n\n# convert class vectors to binary class matrices\n\ny_train = keras.utils.to_categorical(y_train)\ny_test = keras.utils.to_categorical(y_test)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n\t\t\t\t activation='relu',\n\t\t\t\t input_shape=(28,28,1)))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n\t\t\t optimizer=keras.optimizers.Adadelta(),\n\t\t\t metrics=['accuracy'])\n\nhistory = model.fit(x_train, y_train,\n\t\t batch_size=batch_size,\n\t\t epochs=epochs,\n\t\t verbose=1,\n\t\t validation_data=(x_test, y_test),\n\t\t shuffle = True\n\t\t )\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', round(score[0],3))\nprint('Test accuracy:', round(score[1],3))\nmodel.save('model.h5')\n\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title(\"Model's Training & Validation accuracy across epochs\")\nplt.ylabel('Accuracy')\nplt.xlabel('Epochs')\nplt.legend(['Train', 'Validation'], loc='lower right')\nplt.savefig('accuracy.png')\nplt.close()\n\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title(\"Model's Training & Validation loss across epochs\")\nplt.ylabel('Loss')\nplt.xlabel('Epochs')\nplt.legend(['Train', 'Validation'], loc = 'upper right')\nplt.savefig('loss.png')\nplt.close()\n","sub_path":"eleventh-class/eleventh-class.py","file_name":"eleventh-class.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"247659909","text":"# -*- coding:utf-8 -*-\nimport time\nfrom datetime import datetime, timedelta\nfrom requests_oauthlib import OAuth1Session\nfrom . import TaskBase\n\n\nclass TwitterTask(TaskBase):\n oauth = None\n user_id = None\n\n def setup(self):\n twitter_config = self.config['twitter']\n self.oauth = OAuth1Session(\n twitter_config['consumer_key'],\n twitter_config['consumer_secret'],\n twitter_config['access_token'],\n twitter_config['access_token_secret'],\n )\n self.user_id = twitter_config['user_id']\n\n def clean_friendships(self, _):\n friends = self._get_friends(self.user_id)\n followers = self._get_followers(self.user_id)\n print(\"follow count: %d users\" % len(friends))\n print(\"followers count: %d users\" % len(followers))\n friends = set(friends)\n followers = set(followers)\n print(\"mutual count: %d\" % len(friends & followers))\n remove_user = friends - followers\n users = self._lookup_users(list(remove_user))\n remove_count = 0\n failure_count = 0\n for u in users:\n if \"status\" in u:\n created_at = datetime.strptime(u[\"status\"][\"created_at\"], '%a %b %d %H:%M:%S %z %Y')\n else:\n created_at = datetime(2000, 1, 1)\n now = datetime.now(created_at.tzinfo)\n if now - created_at > timedelta(days=200):\n res = self._remove_user(u[\"id\"])\n if res is None:\n failure_count += 1\n print(\"failed to remove @%s\" % u[\"screen_name\"])\n if failure_count > 3:\n print(\"something wrong happens...\")\n break\n else:\n failure_count = 0\n remove_count += 1\n print(\"remove @%s\" % u[\"screen_name\"])\n time.sleep(1)\n print(\"removed %d users\" % remove_count)\n\n follow_users = followers - friends\n friendships = self._lookup_friendships(list(follow_users))\n follow_count = 0\n failure_count = 0\n for f in friendships:\n if f[\"connections\"] == [\"followed_by\"]:\n res = self._follow_user(f[\"id\"])\n if res is None:\n failure_count += 1\n print(\"failed to follow @%s\" % f[\"screen_name\"])\n if failure_count > 3:\n print(\"something wrong happens...\")\n break\n else:\n failure_count = 0\n follow_count += 1\n print(\"follow @%s\" % f[\"screen_name\"])\n time.sleep(1)\n print(\"followed %d users\" % follow_count)\n\n def _get_friends(self, user_id):\n id_list = list()\n args = {\n 'user_id': user_id,\n 'cursor': -1,\n 'count': 5000\n }\n for _ in range(15):\n res = self.oauth.get('https://api.twitter.com/1.1/friends/ids.json', params=args).json()\n id_list.extend(res['ids'])\n if res['previous_cursor'] == res['next_cursor']:\n break\n args['cursor'] = res['next_cursor']\n return id_list\n\n def _get_followers(self, user_id):\n id_list = list()\n args = {\n 'user_id': user_id,\n 'cursor': -1,\n 'count': 5000\n }\n for _ in range(15):\n res = self.oauth.get('https://api.twitter.com/1.1/followers/ids.json', params=args).json()\n id_list.extend(res['ids'])\n if res['previous_cursor'] == res['next_cursor']:\n break\n args['cursor'] = res['next_cursor']\n return id_list\n\n def _lookup_friendships(self, user_ids):\n user_ids = list(map(str, user_ids))\n friendships_list = []\n for i in range(15):\n ids = user_ids[100*i:100*(i+1)]\n if len(ids) == 0:\n break\n args = {\n 'user_id': \",\".join(ids),\n }\n res = self.oauth.get(\"https://api.twitter.com/1.1/friendships/lookup.json\", params=args).json()\n friendships_list.extend(res)\n return friendships_list\n\n def _lookup_users(self, user_ids):\n user_ids = list(map(str, user_ids))\n user_list = []\n for i in range(15):\n ids = user_ids[100*i:100*(i+1)]\n if len(ids) == 0:\n break\n args = {\n 'user_id': \",\".join(ids),\n }\n res = self.oauth.get(\"https://api.twitter.com/1.1/users/lookup.json\", params=args).json()\n user_list.extend(res)\n return user_list\n\n def _follow_user(self, user_id):\n args = {\n 'user_id': user_id\n }\n res = self.oauth.post(\"https://api.twitter.com/1.1/friendships/create.json\", params=args).json()\n return res.get('screen_name')\n\n def _remove_user(self, user_id):\n args = {\n 'user_id': user_id\n }\n res = self.oauth.post(\"https://api.twitter.com/1.1/friendships/destroy.json\", params=args).json()\n return res.get('screen_name')\n","sub_path":"kurofune/task/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"585950862","text":"import sys\nsys.path.append('/git/DeepLearning/NeuralNetwork')\nimport numpy as np\nimport pandas as pd\nfrom Common.Networks.MultiLayersNN import NeuralNetwork\nfrom Common.Optimizers.SGD import *\nfrom Utils.DownloadDatasets import LoadData\n\n(X, Y), (x, y) = LoadData(normalize = True)\ntraining_size = X.shape[0]\nbatch_size = 128\niterations = 1000\noptimizer = SGD()\n\nnetwork = NeuralNetwork(input_size = 784, hidden_size_list = [100],\n output_size = 10)\n\nfor i in range(iterations):\n mask = np.random.choice(training_size, batch_size)\n x_batch = X[mask]\n y_batch = Y[mask]\n\n grads = network.Gradient(x_batch, y_batch)\n optimizer.Update(network.params, grads)\n loss = network.Loss(x_batch, y_batch)\n\n print(loss)\n","sub_path":"NeuralNetwork/Test/Optimizers/SGD.py","file_name":"SGD.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"287517902","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Config',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('key', models.CharField(unique=True, max_length=16, verbose_name='key')),\n ('value', models.CharField(max_length=128, verbose_name='value')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Transaction',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_date', models.DateTimeField(auto_now_add=True, verbose_name='created date')),\n ('modified_date', models.DateTimeField(auto_now=True, verbose_name='modified date')),\n ('order_id', models.PositiveIntegerField(verbose_name='order id', db_index=True)),\n ('transaction_id', models.PositiveIntegerField(verbose_name='order id')),\n ('raw_data', models.TextField(verbose_name='raw data')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"bcash/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"488899411","text":"import sys, os, pwd, signal, time, glob\nfrom resource_management import *\nfrom subprocess import call\n\nclass Master(Script):\n def install(self, env):\n # Install packages listed in metainfo.xml\n self.install_packages(env)\n\n import params\n import status_params\n\n #location of prebuilt package\n snapshot_package='https://s3-eu-west-1.amazonaws.com/precompiledzeppelin/zeppelin-0.5.0-b5c705790196001037e7d080b332d9664ab9b182.tar.gz'\n\n #e.g. /var/lib/ambari-agent/cache/stacks/HDP/2.2/services/zeppelin-stack/package\n service_packagedir = os.path.realpath(__file__).split('/scripts')[0]\n\n Execute('find '+service_packagedir+' -iname \"*.sh\" | xargs chmod +x')\n\n #create the log dir if it not already present\n if not os.path.exists(params.stack_log):\n os.makedirs(params.stack_log)\n\n #depending on whether prebuilt option is selected, execute appropriate script\n if params.download_prebuilt:\n\n #Execute('echo master config dump: ' + str(', '.join(params.config['hostLevelParams'])))\n #Execute('echo stack_version_unformatted: ' + params.stack_version_unformatted)\n #Execute('echo hdp_stack_version: ' + params.hdp_stack_version)\n\n\t #Fetch and unzip snapshot build\n Execute('rm -rf ' + params.zeppelin_dir, ignore_failures=True)\n Execute('wget '+snapshot_package+' -O zeppelin.tar.gz')\n Execute('mkdir '+params.zeppelin_dir)\n Execute('tar -zxvf zeppelin.tar.gz -C ' + params.zeppelin_dir)\n Execute('mv '+params.zeppelin_dir+'/*/* ' + params.zeppelin_dir)\n Execute('rm -rf zeppelin.tar.gz')\n\n\n #update the configs specified by user\n self.configure(env)\n\n\n #run setup_snapshot.sh in FIRSTLAUNCH mode\n #Execute(service_packagedir + '/scripts/setup_snapshot.sh '+params.zeppelin_dir+' '+str(params.stack_port)+' '+status_params.zeppelin_piddir+' '+snapshot_package+' '+str(params.executor_mem)+' '+params.stack_log+' '+params.hive_server_host+' >> ' + params.stack_logfile)\n Execute(service_packagedir + '/scripts/setup_snapshot.sh '+params.zeppelin_dir+' '+params.hive_server_host+' '+params.hive_metastore_host+' '+params.hive_metastore_port+' FIRSTLAUNCH >> ' + params.stack_logfile)\n\n else:\n #create the maven dir if not already present\n if not os.path.exists('/root/.m2'):\n os.makedirs('/root/.m2')\n Execute('cp '+service_packagedir+'/files/settings.xml /root/.m2/')\n Execute(service_packagedir + '/scripts/setup.sh '+params.install_dir+' '+str(params.stack_port)+' '+params.mvn_dir+' '+status_params.zeppelin_piddir+' '+str(params.executor_mem)+' '+params.stack_log+' '+params.hive_server_host+' >> ' + params.stack_logfile)\n\n\n def configure(self, env):\n import params\n import status_params\n env.set_params(params)\n env.set_params(status_params)\n\n #write out zeppelin-site.xml\n XmlConfig(\"zeppelin-site.xml\",\n conf_dir = params.conf_dir,\n configurations = params.config['configurations']['zeppelin-config'],\n owner=params.zeppelin_user\n )\n #write out zeppelin-env.sh\n env_content=InlineTemplate(params.zeppelin_env_content)\n File(format(\"{params.conf_dir}/zeppelin-env.sh\"), content=env_content, owner=params.zeppelin_user) # , mode=0777)\n\n #run setup_snapshot.sh in configure mode to regenerate interpreter and add back version flags\n service_packagedir = os.path.realpath(__file__).split('/scripts')[0]\n Execute(service_packagedir + '/scripts/setup_snapshot.sh '+params.zeppelin_dir+' '+params.hive_server_host+' '+params.hive_metastore_host+' '+params.hive_metastore_port+' CONFIGURE >> ' + params.stack_logfile)\n\n\n def stop(self, env):\n import params\n import status_params\n self.configure(env)\n Execute (params.zeppelin_dir+'/bin/zeppelin-daemon.sh stop >> ' + params.stack_logfile)\n\n\n def start(self, env):\n import params\n import status_params\n self.configure(env)\n Execute (params.zeppelin_dir+'/bin/zeppelin-daemon.sh start >> ' + params.stack_logfile)\n Execute ('su hdfs -c \"hadoop fs -mkdir -p /tmp/.zeppelin; echo Creating dir on HDFS\"')\n Execute ('cp '+params.zeppelin_dir+'/interpreter/spark/zeppelin-spark-0.5.0-SNAPSHOT.jar /tmp/')\n Execute ('su hdfs -c \"hadoop fs -put -f /tmp/zeppelin-spark-0.5.0-SNAPSHOT.jar /tmp/.zeppelin/; echo Copying zeppelin jar to HDFS\"')\n Execute ('su hdfs -c \"hadoop fs -chmod +w /user; echo Modifying HDFS permissions\"')\n\n def status(self, env):\n import status_params\n env.set_params(status_params)\n\n\n pid_file = glob.glob(status_params.zeppelin_piddir + '/zeppelin--*.pid')[0]\n check_process_status(pid_file)\n\n\nif __name__ == \"__main__\":\n Master().execute()\n","sub_path":"package/scripts/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"384458083","text":"from django.urls import path\n\nfrom . import views\n\napp_name=\"housemate\"\nurlpatterns = [\n path('', views.index, name='index'),\n path('admin_login', views.admin_login, name='admin_login'),\n path('user_login', views.user_login, name='user_login'),\n path('myboard', views.myboard, name='myboard'),\n path('register', views.register, name='register'),\n path('registerLandlord', views.registerLandlord, name='registerLandlord'),\n path('profile_view', views.profile_view, name='profile_view'),\n path('edit', views.edit, name='edit')\n]","sub_path":"HPS/housemate/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"256624720","text":"import random\r\nfnames = []\r\nsnames = []\r\nfor f in open(\"forenames.txt\"):\r\n fnames.append(f.strip())\r\nfor f in open(\"surnames.txt\"):\r\n snames.append(f.strip())\r\nfh = open('test_name.txt','w',encoding=\"utf8\")\r\nfor i in range(100):\r\n line = \"{0} {1}\".format(random.choice(fnames),random.choice(snames))\r\n print(line)\r\n fh.write(line)\r\n","sub_path":"name_gen2.py","file_name":"name_gen2.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"253654699","text":"import torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torchvision.models as models\r\n\r\n\r\nclass inceptionv3(nn.Module):\r\n def __init__(self, dimOut, *args):\r\n super().__init__()\r\n self.model = models.inception_v3(num_classes=dimOut)\r\n self.model.cuda()\r\n\r\n # self.extraFc1 = nn.Linear(1000, 200)\r\n # self.drop1 = nn.Dropout(p=dropout)\r\n # self.extraFc2 = nn.Linear(200, dimOut)\r\n\r\n def forward(self, x):\r\n # copy to 3 channels\r\n if x.shape[1] == 1:\r\n x = x.repeat(1, 3, 1, 1)\r\n x = self.model.forward(x)\r\n # x = F.relu(self.extraFc1(x))\r\n # x = self.drop1(x)\r\n # x = self.extraFc2(x)\r\n return F.log_softmax(x, dim=1)\r\n\r\nif __name__ == '__main__':\r\n test = inceptionv3(2)\r\n print('nice')","sub_path":"src/models/new_inception.py","file_name":"new_inception.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"160860100","text":"import csv\nimport os\nimport sys\n\nimport PyQt5.QtWidgets as W\nfrom PyQt5 import uic, QtCore\n\n\nclass Price:\n def __init__(self, a, b, c):\n self.name = a\n self.price = b\n self.shop = c\n\n def __str__(self):\n return f\"Товар {self.name} {self.price}р\\nПродаётся в {self.shop}\"\n\n\nclass Shop:\n def __init__(self, a):\n self.items = a\n\n def to_file(self, a):\n headers = ['Товар', 'Цена', 'Магазин']\n with open(a, 'w', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n\n writer.writerow(headers)\n for item in self.items:\n writer.writerow([item.name, item.price, item.shop])\n\n def append(self, a):\n self.items.append(a)\n self.items.sort(key=lambda x: x.shop)\n\n def find(self, a):\n res = []\n a = a.strip()\n for i in self.items:\n if i.shop.strip() == a:\n res.append(i)\n return Shop(res)\n\n def __str__(self):\n if len(self.items) == 0:\n return ''\n\n return '\\n\\n'.join(str(item) for item in self.items)\n\n\nshop = Shop([])\n\n\nclass MainWindow(W.QMainWindow):\n def __init__(self, *args):\n super().__init__()\n uic.loadUi('./windows/MainWindow.ui', self)\n\n self.InputWindow = InputWindow(self, [])\n self.ErrorWindow = ErrorWindow(self, [])\n self.SaveWindow = SaveWindow(self, [])\n self.SearchWindow = SearchWindow(self, [])\n self.setWindowTitle(\"Товары\")\n\n self.SearchButton.clicked.connect(self.search)\n self.DumpButton.clicked.connect(self.to_file)\n self.InputButton.clicked.connect(self.input)\n self.ExitButton.clicked.connect(self.close)\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(500)\n self.timer.start()\n self.timer.timeout.connect(self.show)\n\n def input(self):\n if len(shop.items) == 8:\n self.ErrorWindow.display(\"8 товаров уже введено\")\n return\n self.InputWindow.show()\n\n def show(self):\n a = Shop(shop.items[:4])\n b = Shop(shop.items[4:])\n\n self.ItemsBrowser1.setText(str(a))\n self.ItemsBrowser2.setText(str(b))\n super().show()\n\n def to_file(self):\n if len(shop.items) == 0:\n self.ErrorWindow.display(\"Товары не введены\")\n return\n self.SaveWindow.show()\n\n def search(self):\n if len(shop.items) == 0:\n self.ErrorWindow.display(\"Товары не введены\")\n return\n self.SearchWindow.show()\n\n def error(self, a):\n self.ErrorWindow.close()\n self.ErrorWindow.display(a)\n\n def close(self):\n self.ErrorWindow.close()\n self.InputWindow.close()\n self.SaveWindow.close()\n self.SearchWindow.close()\n\n super().close()\n\n\nclass InputWindow(W.QWidget):\n def __init__(self, *args):\n super().__init__()\n uic.loadUi('./windows/InputWindow.ui', self)\n\n self.EnterButton.clicked.connect(self.input)\n self.ClearButton.clicked.connect(self.clear)\n self.setWindowTitle(\"Ввод товаров\")\n\n def clear(self):\n self.NameInput.clear()\n self.PriceInput.clear()\n self.ShopInput.clear()\n\n def input(self):\n if not (len(self.NameInput.text().strip()) > 0 and\n len(self.PriceInput.text().strip()) > 0 and\n len(self.ShopInput.text().strip()) > 0):\n self.clear()\n return\n\n p = self.PriceInput.text().strip()\n try:\n p = float(p)\n except ValueError:\n self.clear()\n return\n n = self.NameInput.text().strip()\n s = self.ShopInput.text().strip()\n\n shop.append(Price(n, p, s))\n self.clear()\n\n if len(shop.items) == 8:\n self.close()\n\n self.ItemsRemain.setText(f\"Осталось ввести: {8 - len(shop.items)}\")\n\n def show(self):\n self.ItemsRemain.setText(f\"Осталось ввести: {8 - len(shop.items)}\")\n super().show()\n\n\nclass ErrorWindow(W.QWidget):\n def __init__(self, *args):\n super().__init__()\n uic.loadUi('./windows/ErrorWindow.ui', self)\n\n self.OkButton.clicked.connect(self.close)\n self.setWindowTitle(\"Ошибка\")\n\n def display(self, message):\n self.ErrorBrowser.setText(f\"Ошибка!\\n\\n{message}\")\n self.show()\n\n\nclass SaveWindow(W.QWidget):\n def __init__(self, *args):\n super().__init__()\n uic.loadUi('./windows/SaveWindow.ui', self)\n\n self.ExitButton.clicked.connect(self.close)\n self.SaveButton.clicked.connect(self.save)\n self.setWindowTitle(\"Сохранить\")\n\n def path(self):\n name = self.FilenameInput.text().strip()\n if len(name) == 0:\n raise ValueError(\"Имя файла не введено\")\n if not name.endswith('.csv'):\n name += '.csv'\n return os.getcwd() + '/' + name\n\n def save(self):\n try:\n path = self.path()\n except ValueError:\n path = os.getcwd() + '/' + 'file.csv'\n shop.to_file(path)\n\n self.clear()\n self.close()\n\n def clear(self):\n self.FilenameInput.clear()\n\n def close(self):\n self.clear()\n super().close()\n\n\nclass SearchWindow(W.QWidget):\n def __init__(self, *args):\n super().__init__()\n uic.loadUi('./windows/SearchWindow.ui', self)\n\n self.SearchButton.clicked.connect(self.res)\n self.ExitButton.clicked.connect(self.close)\n self.setWindowTitle(\"Поиск по магазинам\")\n\n def res(self):\n if len(self.QueryInput.text().strip()) == 0:\n self.ResultsBrowser.setText(\"Введите запрос\")\n return\n\n query = self.QueryInput.text()\n self.clear()\n\n shops = shop.find(query)\n if shops.items:\n text = f\"Товары из '{query}': \"\n self.ResultsBrowser.setText(f\"{text}\\n{shops}\")\n else:\n self.ResultsBrowser.setText(f\"Товары из '{query}' не найдены\")\n\n def clear(self):\n self.ResultsBrowser.clear()\n self.QueryInput.clear()\n\n def close(self):\n self.clear()\n super().close()\n\n\napp = W.QApplication(sys.argv)\nmain = MainWindow()\nmain.show()\nmain.input()\nexit(app.exec_())\n","sub_path":"Programming-basics/Labs-for-collegues/Alexandra-Smetanina/Lab7/Task1/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"583820593","text":"\"\"\"\nInput is all Pickle.gz. Now each pickle is a subject.\n\ntrain:validation:test = 8:1:1\n\n********************************************************\nTrain use all record, validation and test use baseline.\n********************************************************\n\nNow subjects with all records are pickle.gz, and subjects with baseline\nare pickle.gz, in different folds. So, what should we do?\n\nrandomly choose train, validation, test index. For train, directly read the\npickle.gz, for validation and test, from the index get the subjects name, then \nfind out corresponding pickle.gz. \n\n**********************************************************************\nSo if we want to expand this code to MCI, then \nEMCI, LMCI and SMC should be named also with 2 prefix charactors.\n\nRemember to add commit for experiment log. Didn't have the trigger to check it now.\n**********************************************************************\n\nGenerate experiment log in Experiment+time.txt\n\n@Zhewei\n5/31/2016\n\nAdd random.seed()\n@Zhewei\n6/3/2016\n\n\nCalculate the different between two time frame, and sent it as input.\n@Zhewei\n6/16/2016\n\n\"\"\"\n\nimport sys,os\nimport datetime\nimport gzip\nimport pickle as Pickle\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\nfrom random import shuffle, randint\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.layers import LSTM\nfrom keras.optimizers import RMSprop, SGD\nfrom keras.initializations import normal, identity\n\niterationNo = 1\nGroups = 2\n\nbatch_size=30\n\ntimeLength = 129\n\ntotalNo = 84#190\ntrainPercent = 60#152\nvalidationPercent = 14#19\ntestpercent = 10#19\nMagicNumber = 17\n\nhd_notes = 2\nlearning_rate = 1e-5\nnb_epoch = 1000\n\n\ndef main(args):\n if len(args) < 2:\n usage( args[0] )\n pass\n else:\n work( args[1:-1], args[-1])\n pass\n pass\n\ndef usage (programm):\n print (\"usage: %s ../data/Subjects_Alltime/*.gz ../data/Subjects_Baseline/*.gz\"%(programm))\n \ndef work(fnames, comment):\n finalResults = list()\n logTime = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')\n logName = '../data/Experiments_log '+logTime+comment+'.txt'\n f_txt = open(logName, 'w')\n f_txt.write(str(sys.argv[0]))\n f_txt.write('\\n')\n f_txt.write(str(sys.argv[1:]))\n f_txt.write('\\n'*3)\n for iNo in range(iterationNo):\n index = [i for i in range(totalNo)]\n shuffle(index)\n AlltimeFile = fnames[0:totalNo]\n BaselineFile = fnames[totalNo:]\n trainIndex = index[0:trainPercent]\n trainData, trainLabel = stackData(AlltimeFile, trainIndex)\n \n\n # validation\n validationIndex = index[trainPercent:trainPercent+validationPercent]\n validationFile = BaselineFile\n validationBaselineIndex = list()\n for iva in validationIndex:\n tmpFile = os.path.basename(AlltimeFile[iva])\n tmpSubj = tmpFile[0:MagicNumber]#<=========================== magic number, to get subject value, such like 013_S_234\n validationBaseline = [sNo for sNo,s in enumerate(validationFile) if tmpSubj in s]\n if validationBaseline:\n validationBaseline = validationBaseline[0]\n validationBaselineIndex.append(validationBaseline)\n \n validationData, validationLabel = stackData(BaselineFile, validationBaselineIndex) \n \n # test \n testIndex = index[trainPercent+validationPercent:]\n testFile = BaselineFile\n testBaselineIndex = list()\n for ite in testIndex:\n tmpFile = os.path.basename(AlltimeFile[ite])\n tmpSubj = tmpFile[0:MagicNumber]#<=========================== magic number\n testBaseline = [sNo for sNo,s in enumerate(testFile) if tmpSubj in s]\n if testBaseline:\n testBaseline = testBaseline[0]\n testBaselineIndex.append(testBaseline)\n \n testData, testLabel = stackData(BaselineFile, testBaselineIndex)\n \n trainData = trainData[:,0:timeLength,:]\n validationData = validationData[:,0:timeLength,:]\n testData = testData[:,0:timeLength,:]\n \n \n print ('*'*30)\n print ('Iteration:', iNo)\n print ('Training subjects:', trainPercent)\n print ('Training samples:', trainData.shape[0])\n print ('Validation subjects:', validationPercent)\n print ('Validation samples:', validationData.shape[0])\n print ('Test subjects:', testpercent)\n print ('Test samples:', testData.shape[0])\n print ('*'*30)\n \n \n \"\"\"\n LSTM\n \"\"\"\n \n nb_classes = Groups\n timesteps = trainData.shape[1]\n featureNo = trainData.shape[2]\n\n Y_train = np_utils.to_categorical(trainLabel, nb_classes)\n Y_test = np_utils.to_categorical(testLabel, nb_classes)\n Y_valid = np_utils.to_categorical(validationLabel, nb_classes)\n \n print (\"Building model...\")\n model = Sequential()\n model.add(LSTM(hd_notes, input_shape=(timesteps, featureNo),\\\n init='glorot_uniform',\\\n inner_init='orthogonal',\\\n forget_bias_init='one',\\\n inner_activation='hard_sigmoid',\\\n activation='tanh', return_sequences=False,\\\n dropout_W=0.4, dropout_U=0.4))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n rmsprop = RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-06)\n # model.compile(loss='binary_crossentropy', optimizer=rmsprop, metrics=[\"accuracy\"])\n sgd = SGD(lr=learning_rate, momentum=0.0, decay=0.0, nesterov=False)\n model.compile(loss='binary_crossentropy', optimizer=rmsprop, metrics=[\"accuracy\"])\n\n print (\"Training model...\")\n\n model.fit(trainData, Y_train, \\\n batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(validationData, Y_valid))\n\n scores = model.evaluate(testData, Y_test, verbose=1)\n print('RNN test score:', scores[0])\n print('RNN test accuracy:', scores[1])\n print (testLabel)\n print (model.predict_classes(testData))\n print (model.predict_classes(trainData))\n print (trainLabel)\n finalResults.append(scores[1])\n validationScore = model.evaluate(validationData, Y_valid, verbose=0)\n \n # record all the information\n f_txt.write('*'*30)\n f_txt.write('\\n')\n f_txt.write('Iteration: ' + str(iNo))\n f_txt.write('\\n')\n f_txt.write('Training samples: ' + str(trainData.shape[0]))\n f_txt.write('\\n')\n f_txt.write('Training Index: ' + str(trainIndex))\n f_txt.write('\\n')\n f_txt.write('Validation samples: ' + str(validationData.shape[0]))\n f_txt.write('\\n')\n f_txt.write('Validation Index: ' + str(validationIndex))\n f_txt.write('\\n')\n f_txt.write('Test samples: ' + str(testData.shape[0]))\n f_txt.write('\\n')\n f_txt.write('Test Index: ' + str(testIndex))\n f_txt.write('\\n')\n f_txt.write('Training ground truth: ' + str(trainLabel))\n f_txt.write('\\n')\n f_txt.write('Training results: ' + str(model.predict_classes(trainData)))\n f_txt.write('\\n')\n f_txt.write('Validation ground truth: ' + str(validationLabel))\n f_txt.write('\\n')\n f_txt.write('Validation result: ' + str(model.predict_classes(validationData)))\n f_txt.write('\\n')\n f_txt.write('Validation accurate: ' + str(validationScore[1]))\n f_txt.write('\\n')\n f_txt.write('Test ground truth: ' + str(testLabel))\n f_txt.write('\\n')\n f_txt.write('Test result: ' + str(model.predict_classes(testData)))\n f_txt.write('\\n')\n f_txt.write('Test accurate: ' + str(scores[1]))\n f_txt.write('\\n')\n pass\n \n print ('Final results is:', finalResults)\n print ('Final accurate results of LSTM is:', sum(finalResults)/iterationNo)\n f_txt.write('\\n'*3)\n f_txt.write('Final results are: ' + str(finalResults))\n f_txt.write('\\n')\n f_txt.write('Final accurate results of LSTM is: ' + str(sum(finalResults)/iterationNo))\n f_txt.write('\\n')\n f_txt.close()\n\n\n\n \ndef stackData(fnames, index):\n Data = np.zeros([1,1])\n Label = np.zeros([1,1])\n for iNo, i in enumerate(index):\n f = gzip.open(fnames[i],'rb')\n tmpdata,tmplabel = Pickle.load(f)\n if iNo == 0:\n Data = tmpdata\n Label = tmplabel\n else:\n Data = np.vstack((Data, tmpdata))\n Label = np.append(Label, tmplabel)\n return Data, Label\n \nif __name__ == \"__main__\":\n main(sys.argv)\n pass\n \n","sub_path":"Outdated/Version0_RNN_ADNI_Analysis/AutoEncoder_LSTM.py","file_name":"AutoEncoder_LSTM.py","file_ext":"py","file_size_in_byte":8896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"215169126","text":"lst = [4, 5]\nprint(lst)\n\n\ndef foo(lst):\n lst.append(\"str\")\n print(lst)\n\n\ndef foo1(lst):\n lst = [1, 4, 6]\n print(lst)\n\n\nfoo(lst)\n\nfoo1(lst)\n\n\nclass MyClass:\n attr = 1234\n\n def too(self):\n return \"Hellow world\"\n\n\nprint(MyClass.attr)\n\nprint(MyClass.too(1))\n","sub_path":"PycharmProjects/examples/88888.py","file_name":"88888.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"402099034","text":"from django.contrib import admin\nfrom django.contrib.admin.widgets import AdminFileWidget, AdminURLFieldWidget\nfrom django.contrib.contenttypes.models import ContentType\nfrom django import template\nfrom django.shortcuts import render_to_response\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext as _\nfrom django.utils.html import escape\n\nimport os\n\nfrom models import Image,Video,Audio,Flash,Collection,\\\n CollectionRelation,MediaTemplate,GrabVideo,Document,Embed\nimport settings\nfrom forms import ImageCreationForm, VideoCreationForm, AudioCreationForm, \\\n FlashCreationForm, DocumentCreationForm\n\nfrom templatetags.media_widgets import snipshot_url\n\nclass AdminImageWidget(AdminFileWidget):\n def render(self, name, value, attrs=None):\n output = []\n try:\n thumbnail = value.instance.thumbnail.url\n width = value.instance.thumb_width\n height = value.instance.thumb_height\n snipshot = snipshot_url(value.instance)\n crop_tag = '''
Crop image with snipshot'''%snipshot\n tag = u'' % (thumbnail, width, height)\n except:\n crop_tag = u\"\"\n tag = u\"No Thumbnail available\"\n if value:\n file_name=str(value)\n output.append(u'%s' % (value.url, tag))\n output.append(crop_tag)\n return mark_safe(u''.join(output))\n\nclass AdminExternalURLWidget(AdminURLFieldWidget):\n def render(self, name, value, attrs=None):\n output = []\n tag = u\"No Thumbnail available\"\n if value:\n file_name=str(value)\n output.append(u'%s' % (value, tag))\n output.append(u'
%s' % (value, value))\n return mark_safe(u''.join(output))\n\nclass GenericCollectionInlineModelAdmin(admin.options.InlineModelAdmin):\n ct_field = \"content_type\"\n ct_fk_field = \"object_id\"\n def __init__(self, parent_model, admin_site):\n super(GenericCollectionInlineModelAdmin, self).__init__(parent_model, admin_site)\n ctypes = ContentType.objects.all().order_by('id').values_list('id', 'app_label', 'model')\n elements = [\"%s: '%s/%s'\" % (x, y, z) for x, y, z in ctypes]\n self.content_types = \"{%s}\" % \",\".join(elements)\n \n def get_formset(self, request, obj=None):\n result = super(GenericCollectionInlineModelAdmin, self).get_formset(request, obj)\n result.content_types = self.content_types\n result.ct_fk_field = self.ct_fk_field\n return result\n\nclass GenericCollectionTabularInline(GenericCollectionInlineModelAdmin):\n template = 'admin/edit_inlines/gen_coll_tabular.html'\n\nclass MediaAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {'fields':('title','caption')}),\n ('Content',{'fields':(('file','external_url'),)}),\n ('Credit',{'fields':('author','one_off_author','reproduction_allowed')}),\n ('Metadata',{'fields':('metadata','mime_type')}),\n ('Connections',{'fields':('public','categories','site')}),\n ('Widget',{'fields':('width','height')}),\n ('Advanced options', {\n 'classes': ('collapse',),\n 'fields': ('slug','widget_template',)\n }),\n )\n add_fieldsets = (\n (None, {'fields': ('title',)}),\n ('Content',{'fields':('external_url','file', 'caption')}),\n ('Rights', {'fields': ('public','reproduction_allowed')}),\n ('Additional Info', {\n 'classes': ('collapse',),\n 'fields': ('slug', 'creation_date', 'site')\n })\n )\n \n list_display = ('title', 'author_name', 'mime_type', 'public', 'creation_date')\n list_filter = ('site', 'creation_date', 'public')\n list_editable = ('public',)\n prepopulated_fields = {'slug': ('title',)}\n date_hierarchy = 'creation_date'\n search_fields = ('caption', 'file')\n add_form_template = 'admin/massmedia/content_add_form.html'\n \n \n def get_fieldsets(self, request, obj=None):\n \"\"\"\n Return add_fieldsets if it is a new object and the form has specified\n different fieldsets for creation vs. change. Otherwise punt.\n \"\"\"\n if not obj and hasattr(self, 'add_fieldsets'):\n return self.add_fieldsets\n return super(MediaAdmin, self).get_fieldsets(request, obj)\n \n def get_form(self, request, obj=None, **kwargs):\n \"\"\"\n Return a special add form if specified\n \"\"\"\n defaults = {}\n if not obj and hasattr(self, 'add_form'):\n defaults = {\n 'form': self.add_form\n }\n defaults.update(kwargs)\n return super(MediaAdmin, self).get_form(request, obj, **defaults)\n \n \n def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):\n opts = self.model._meta\n app_label = opts.app_label\n ordered_objects = opts.get_ordered_objects()\n is_popup = '_popup' in request.REQUEST or 'pop' in request.REQUEST\n context.update({\n 'add': add,\n 'change': change,\n 'has_add_permission': self.has_add_permission(request),\n 'has_change_permission': self.has_change_permission(request, obj),\n 'has_delete_permission': self.has_delete_permission(request, obj),\n 'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,\n 'has_absolute_url': hasattr(self.model, 'get_absolute_url'),\n 'ordered_objects': ordered_objects,\n 'form_url': mark_safe(form_url),\n 'opts': opts,\n 'content_type_id': ContentType.objects.get_for_model(self.model).id,\n 'save_as': self.save_as,\n 'save_on_top': self.save_on_top,\n 'root_path': self.admin_site.root_path,\n 'is_popup': is_popup,\n })\n context_instance = template.RequestContext(request, current_app=self.admin_site.name)\n if add:\n return render_to_response(self.add_form_template or [\n \"admin/%s/%s/add_form.html\" % (app_label, opts.object_name.lower()),\n \"admin/%s/add_form.html\" % app_label,\n \"admin/change_form.html\"\n ], context, context_instance=context_instance)\n else:\n return render_to_response(self.change_form_template or [\n \"admin/%s/%s/change_form.html\" % (app_label, opts.object_name.lower()),\n \"admin/%s/change_form.html\" % app_label,\n \"admin/change_form.html\"\n ], context, context_instance=context_instance)\n \n\nclass ImageAdmin(MediaAdmin):\n list_display = ('render_thumb','title','creation_date')\n list_display_links = ('render_thumb','title',)\n list_editable = tuple()\n add_fieldsets = (\n ('Content',{'fields':('external_url','file','caption')}),\n ('Rights', {'fields': ('public','reproduction_allowed')}),\n ('Additional Info', {\n 'classes': ('collapse',),\n 'fields': ('title', 'slug', 'creation_date', 'site')\n })\n )\n add_form = ImageCreationForm\n \n def formfield_for_dbfield(self, db_field, **kwargs):\n if db_field.name == 'file':\n kwargs['widget'] = AdminImageWidget\n request = kwargs.pop('request')\n return db_field.formfield(**kwargs)\n elif db_field.name == 'external_url':\n kwargs['widget'] = AdminExternalURLWidget\n request = kwargs.pop('request')\n return db_field.formfield(**kwargs)\n return super(ImageAdmin, self).formfield_for_dbfield(db_field, **kwargs)\n\nclass VideoAdmin(MediaAdmin):\n list_display = ('title','thumb','author_name','mime_type','public','creation_date')\n fieldsets = (\n (None, {'fields':('title','caption')}),\n ('Content',{'fields':(('file','external_url'),'thumbnail')}),\n ('Credit',{'fields':('author','one_off_author','reproduction_allowed')}),\n ('Metadata',{'fields':('metadata','mime_type')}),\n ('Connections',{'fields':('public','categories','site')}),\n ('Widget',{'fields':('width','height')}),\n ('Advanced options', {\n 'classes': ('collapse',),\n 'fields': ('slug','widget_template',)\n }),\n )\n \n raw_id_fields = ('thumbnail',)\n add_fieldsets = (\n (None, {'fields': ('title', 'slug',)}),\n ('Content',{'fields':(('external_url','file'), 'thumbnail')}),\n ('Rights', {'fields': ('public','reproduction_allowed')}),\n ('Additional Info', {\n 'classes': ('collapse',),\n 'fields': ('creation_date', 'site')\n })\n )\n add_form = VideoCreationForm\n\n\nclass GrabVideoAdmin(VideoAdmin):\n search_fields = ('title','caption','keywords')\n list_filter = VideoAdmin.list_filter + ('one_off_author',)\n list_display = ('asset_id','layout_id','title','thumb','one_off_author','public','creation_date','categories')\n fieldsets = ( ('Grab',{'fields':('asset_id','layout_id','keywords')}), )\n for fieldset in VideoAdmin.fieldsets:\n if fieldset[0] == 'Content':\n continue\n fieldsets += (fieldset,)\n \nclass AudioAdmin(MediaAdmin,admin.ModelAdmin):\n add_form = AudioCreationForm\n\nclass FlashAdmin(MediaAdmin):\n add_form = FlashCreationForm\n\nclass DocumentAdmin(MediaAdmin):\n add_form = DocumentCreationForm\n\nclass CollectionInline(GenericCollectionTabularInline):\n model = CollectionRelation\n\nclass CollectionAdmin(admin.ModelAdmin):\n fields = ('title','slug','caption','zip_file','public','categories','site')\n list_display = ('title','caption', 'public', 'creation_date')\n list_filter = ('site', 'creation_date','public')\n prepopulated_fields = {'slug': ('title',)}\n date_hierarchy = 'creation_date'\n search_fields = ('caption',)\n inlines = (CollectionInline,)\n class Media:\n js = ('js/genericcollections.js',)\n\nadmin.site.register(Collection , CollectionAdmin)\nadmin.site.register(Image, ImageAdmin)\nadmin.site.register(Video, VideoAdmin)\nadmin.site.register(Audio, AudioAdmin)\nadmin.site.register(Flash, FlashAdmin)\nadmin.site.register(Document, DocumentAdmin)\nadmin.site.register(GrabVideo, GrabVideoAdmin)\nadmin.site.register(Embed)\n\nif not settings.FS_TEMPLATES:\n admin.site.register(MediaTemplate)","sub_path":"eaa2011/setup/build/massmedia/massmedia/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":10625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"298473811","text":"import numpy as np\nimport torch\n\nfrom a2c_ppo_acktr import utils\nfrom a2c_ppo_acktr.envs import make_vec_envs\nfrom collections import Counter\n\n\ndef evaluate(actor_critic, ob_rms, env_name, seed, num_processes, eval_log_dir,\n device):\n eval_envs = make_vec_envs(env_name, seed + num_processes, num_processes,\n None, eval_log_dir, device, True)\n\n vec_norm = utils.get_vec_normalize(eval_envs)\n if vec_norm is not None:\n vec_norm.eval()\n vec_norm.ob_rms = ob_rms\n\n eval_episode_rewards = []\n\n obs = eval_envs.reset()\n eval_recurrent_hidden_states = torch.zeros(num_processes, actor_critic.recurrent_hidden_state_size, device=device)\n eval_masks = torch.zeros(num_processes, 1, device=device)\n\n while len(eval_episode_rewards) < 10:\n with torch.no_grad():\n _, action, _, eval_recurrent_hidden_states = actor_critic.act(\n obs,\n eval_recurrent_hidden_states,\n eval_masks,\n deterministic=True)\n\n # Obser reward and next obs\n obs, _, done, infos = eval_envs.step(action)\n\n eval_masks = torch.tensor(\n [[0.0] if done_ else [1.0] for done_ in done],\n dtype=torch.float32,\n device=device)\n\n for info in infos:\n if 'episode' in info.keys():\n eval_episode_rewards.append(info['episode']['r'])\n\n eval_envs.close()\n\n print(\" Evaluation using {} episodes: mean reward {:.5f}\\n\".format(\n len(eval_episode_rewards), np.mean(eval_episode_rewards)))\n\n\ndef evaluate_carenv(actor_critic, env_name, eval_seed, num_processes, device, num_eval_episodes=100):\n env = make_vec_envs(env_name, eval_seed, num_processes, None, None, device=device,\n allow_early_resets=False, pixels=False)\n\n recurrent_hidden_states = torch.zeros(num_processes, actor_critic.recurrent_hidden_state_size).to(device)\n masks = torch.zeros(num_processes, 1).to(device)\n obs = env.reset()\n t = 0\n final_states = []\n final_positions = []\n while True:\n with torch.no_grad():\n value, action, _, recurrent_hidden_states = actor_critic.act(obs, recurrent_hidden_states, masks)\n\n # Obser reward and next obs\n obs, reward, done, info = env.step(action)\n t += 1\n for i, done_ in enumerate(done):\n if done_:\n if len(final_positions) % 10 == 0:\n print(len(final_positions))\n for key in info[i].keys():\n if 'final_position' not in key:\n final_states.append(key)\n else:\n final_positions.append(info[i][key])\n\n if len(final_positions) > num_eval_episodes:\n break\n\n # masks.fill_(0.0 if done else 1.0)\n masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]).to(device)\n\n env.close()\n print(\"Evaluation Performance:\", Counter(final_states[:num_eval_episodes]))\n return Counter(final_states[:num_eval_episodes])\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"177408797","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom krr_class_new import krr_class\nfrom angular_fingerprintFeature_m import Angular_Fingerprint\nfrom gaussComparator import gaussComparator\nfrom scipy.signal import argrelextrema\nfrom krr_calculator import krr_calculator\nimport time\n\nfrom ase import Atoms\nfrom ase.optimize import BFGS\nfrom ase.io import read, write\nfrom ase.visualize import view\n\ndef loadTraj():\n atoms = read('work_folder/all.traj', index=':')\n atoms = atoms[0:15000]\n #atoms = atoms[::2]\n #atoms = atoms[:Ndata]\n Ndata = len(atoms)\n Na = 24\n dim = 3\n Ntraj = len(atoms)\n \n pos = np.zeros((Ntraj,Na,dim))\n E = np.zeros(Ntraj)\n F = np.zeros((Ntraj, Na, dim))\n for i, a in enumerate(atoms):\n print('Loading: {}/{}\\r'.format(i, Ndata), end='')\n pos[i] = a.positions\n E[i] = a.get_potential_energy()\n F[i] = a.get_forces()\n\n return atoms, pos.reshape((Ntraj, Na*dim)), E, F.reshape((Ntraj, Na*dim))\n\n\ndef main():\n np.random.seed(100)\n atoms, pos, E, F = loadTraj()\n #atoms = read('work_folder/all.traj', index=':100')\n print('\\n',len(atoms))\n a = atoms[0]\n\n Ntrain = 1000\n Ntest = 500\n Ntest2 = 500\n index = np.random.permutation(10000)\n i_train = index[:Ntrain].astype(int)\n i_test = (np.random.permutation(5000)+10000)[:Ntest].astype(int)\n i_test2 = index[Ntrain:Ntrain+Ntest2]\n \n Rc1 = 5\n binwidth1 = 0.1\n sigma1 = 0.4\n\n Rc2 = 3\n Nbins2 = 30\n sigma2 = 0.1\n \n gamma = 5\n \n featureCalculator = Angular_Fingerprint(a, Rc1=Rc1, Rc2=Rc2, binwidth1=binwidth1, Nbins2=Nbins2, sigma1=sigma1, sigma2=sigma2, gamma=0, use_angular=False)\n\n fingerprints_train = []\n for num, i in enumerate(i_train):\n print('Training features: {}/{}\\r'.format(num, Ntrain), end='')\n feature_i = featureCalculator.get_features(atoms[i])\n fingerprints_train.append(feature_i)\n fingerprints_train = np.array(fingerprints_train)\n print('\\n')\n \n fingerprints_test = []\n for num, i in enumerate(i_test):\n print('Test features: {}/{}\\r'.format(num, Ntest), end='')\n feature_i = featureCalculator.get_features(atoms[i])\n fingerprints_test.append(feature_i)\n fingerprints_test = np.array(fingerprints_test)\n print('\\n')\n \n fingerprints_test2 = []\n for num, i in enumerate(i_test):\n print('Test2 features: {}/{}\\r'.format(num, Ntest2), end='')\n feature_i = featureCalculator.get_features(atoms[i])\n fingerprints_test2.append(feature_i)\n fingerprints_test2 = np.array(fingerprints_test2)\n\n Etrain = E[i_train]\n Etest = E[i_test]\n Etest2 = E[i_test2]\n \n # Set up KRR-model\n comparator = gaussComparator()\n krr = krr_class(comparator=comparator, featureCalculator=featureCalculator)\n \n # Perform training with cross-validation\n Npoints = 30\n Ndata = Ntrain\n N_array = np.logspace(1, np.log10(Ndata), Npoints).astype(int)\n GSkwargs = {'reg': [1e-5], 'sigma': np.logspace(1,2,10)}\n\n MAE_val = np.zeros(Npoints)\n MAE_test = np.zeros(Npoints)\n MAE_test2 = np.zeros(Npoints)\n for i, N in enumerate(N_array):\n Esub = E[:N]\n fingerprints_sub = fingerprints_train[:N]\n \n MAE_val_temp, params = krr.train(Esub, featureMat=fingerprints_sub, add_new_data=False, k=5, **GSkwargs)\n MAE_val[i] = MAE_val_temp\n print('N:', N, 'params:', params)\n Epredict = np.array([krr.predict_energy(fnew=f) for f in fingerprints_test])\n Epredict2 = np.array([krr.predict_energy(fnew=f) for f in fingerprints_test2])\n MAE_test[i] = np.mean(np.abs(Epredict - Etest))\n MAE_test2[i] = np.mean(np.abs(Epredict2 - Etest2))\n\n print(MAE_val)\n print(MAE_test)\n print(MAE_test2)\n\n plt.figure()\n plt.loglog(N_array, MAE_test)\n plt.loglog(N_array, MAE_test2)\n \n \n plt.figure()\n plt.plot(np.arange(len(fingerprints_train[0]))*Rc1/len(fingerprints_train[0]), fingerprints_train[0], label='index 0')\n plt.plot(np.arange(len(fingerprints_train[0]))*Rc1/len(fingerprints_train[0]), fingerprints_train[40], label='index 40')\n plt.plot(np.arange(len(fingerprints_train[0]))*Rc1/len(fingerprints_train[0]), fingerprints_train[90], label='index 90')\n plt.legend()\n plt.show()\n \"\"\"\n [ 19.15921531 19.86766022 21.21613115 15.96245691 16.91681297\n 14.8915245 12.6001315 11.93777066 12.33840395 14.75289874\n 9.30982123 8.98324929 7.39914261 6.48309714 6.09473549\n 6.03771978 5.64555205 5.76005092 5.60891719 5.5894711\n 5.5726956 5.40067667 5.37019041 5.33675678 5.52035752\n 5.96919136 5.6995485 6.68662729 6.13550929 5.94384627]\n[ 17.48593875 18.285844 20.43432449 14.74036565 15.77212828\n 13.26720632 11.38015665 10.37589333 10.10622903 13.31456299\n 7.87100404 7.47538609 6.44173639 5.68295358 5.44522854\n 5.48219097 5.20928989 5.33421367 5.28745139 5.34007973\n 5.314172 5.14872923 5.11827076 5.09639421 5.16014279\n 5.45981102 5.4308997 6.03696307 5.63806243 5.52051774]\n \"\"\"\n\n \n \nif __name__ == \"__main__\":\n main()\n","sub_path":"krrThomas/grapheneLC.py","file_name":"grapheneLC.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"235309866","text":"from pylem import MorphanHolder, MorphLanguage, MorphSourceDictHolder\r\nfrom common.logging_wrapper import setup_logging\r\nfrom common.russian_fio import TRussianFioRecognizer\r\nimport json\r\nfrom collections import defaultdict\r\nimport argparse\r\n\r\nFEM_GENDER = \"1\"\r\nMASC_GENDER = \"2\"\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--input-json\", dest=\"input_json\")\r\n parser.add_argument(\"--output-slf\", dest=\"output_slf\")\r\n parser.add_argument(\"--max-output-count\", dest=\"max_output_count\", type=int)\r\n parser.add_argument(\"--wikipedia-titles\", dest=\"wikipedia_titles\")\r\n parser.add_argument(\"--surname-prefix\", dest=\"surname_prefix\")\r\n parser.add_argument(\"--wordform-freq-list\", dest=\"wordform_freq_list\")\r\n parser.add_argument(\"--force-ambiguity\", dest=\"force_ambiguity\", action=\"store_true\", default=False)\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\nclass TFio:\r\n def __init__(self, surname=None, name=None, patronymic=None, gender=None):\r\n self.surname = surname\r\n self.name = name\r\n self.patronymic = patronymic\r\n self.gender = gender\r\n\r\n def read_from_json(self, s):\r\n self.surname = s['s']\r\n self.name = s['n']\r\n self.patronymic = s['p']\r\n self.gender = s['g']\r\n return self\r\n\r\n\r\nclass TPredictor:\r\n def __init__(self):\r\n self.args = parse_args()\r\n self.logger = setup_logging(\"create_slf_for_surnames\")\r\n mwz_path = 'C:/tmp/RML/Source/morph_dict/data/Russian/project.mwz'\r\n self.morph_wizard = MorphSourceDictHolder(mwz_path)\r\n self.morph = MorphanHolder(MorphLanguage.Russian)\r\n self.surnames = defaultdict(list)\r\n self.wordform_freq_list = defaultdict(int)\r\n with open(self.args.wordform_freq_list) as inp:\r\n for l in inp:\r\n k, v = l.split(\"\\t\")\r\n self.wordform_freq_list[k] = int(v)\r\n\r\n def get_freq(self, wordform):\r\n return self.wordform_freq_list.get(wordform, 0)\r\n\r\n def get_freq_case_insensitive(self, wordform):\r\n return self.get_freq(wordform.lower()) + self.get_freq(wordform.upper()) + self.get_freq(wordform.title())\r\n\r\n def get_up_div_lo(self, wordform: str):\r\n lower = self.get_freq(wordform.lower())\r\n upper = self.get_freq(wordform.upper())\r\n title = self.get_freq(wordform.title())\r\n return (upper + title) / (lower + 0.00000001)\r\n\r\n def read_wiki_titles(self):\r\n self.logger.info(\"read {}\".format(self.args.wikipedia_titles))\r\n with open(self.args.wikipedia_titles) as inp:\r\n cnt = 0\r\n for l in inp:\r\n if l.count(',') != 1 and l.count('_') != 2:\r\n continue\r\n words = l.strip().split('_')\r\n if len(words) != 3:\r\n continue\r\n surname, name, patronymic = words\r\n if self.args.surname_prefix is not None and not surname.lower().startswith(self.args.surname_prefix):\r\n continue\r\n surname = surname.strip(',')\r\n fem = TRussianFioRecognizer.is_feminine_patronymic(patronymic)\r\n masc = TRussianFioRecognizer.is_masculine_patronymic(patronymic)\r\n if not fem and not masc:\r\n continue\r\n cnt += 1\r\n gender = FEM_GENDER if fem else MASC_GENDER\r\n self.surnames[surname.lower()].append(TFio(surname,name, patronymic, gender))\r\n self.logger.info(\"read {} fios from {}\".format(cnt, self.args.wikipedia_titles))\r\n\r\n def read_fio_from_disclosures(self):\r\n self.logger.info(\"read {}\".format(self.args.input_json))\r\n with open(self.args.input_json) as inp:\r\n for l in inp:\r\n fio = TFio().read_from_json(json.loads(l))\r\n if self.args.surname_prefix is not None and not fio.surname.lower().startswith(self.args.surname_prefix):\r\n continue\r\n self.surnames[fio.surname.lower()].append(fio)\r\n\r\n def check_slf(self, slf):\r\n for l in slf.split(\"\\n\"):\r\n if l.startswith('$') or l.startswith('='):\r\n continue\r\n words = l.split()\r\n if len(words) > 0:\r\n w = words[0]\r\n if self.morph.is_in_dictionary(w):\r\n freq = self.get_freq_case_insensitive(w)\r\n if freq > 100:\r\n self.logger.debug(\"additional ambiguity, word form {}, freq={}\".format(w, freq))\r\n\r\n def sunname_count(self, predictitons):\r\n surname_count = 0\r\n surname_index = -1\r\n for i in range(len(predictitons)):\r\n if predictitons[i].getCommonGrammems().find('surname') != -1:\r\n surname_count += 1\r\n surname_index = i\r\n return surname_count, surname_index\r\n\r\n def predict_surnames(self, surname, outp):\r\n if surname.endswith('а') and surname[:-1] in self.surnames:\r\n self.logger.debug(\"ignore {} since masculine surname in in the list\".format(surname))\r\n return\r\n\r\n if surname.endswith('ая') and (surname[:-2] + 'ий') in self.surnames:\r\n self.logger.debug(\"ignore {} since masculine surname in in the list\".format(surname))\r\n return\r\n\r\n if surname.count('-') > 0:\r\n self.logger.debug(\"ignore {} since it contains a hyphen\".format(surname))\r\n return\r\n\r\n genders = set (e.gender for e in self.surnames[surname])\r\n if len(genders) == 1 and FEM_GENDER in genders:\r\n if surname.endswith('ова') or surname.endswith('ева') or surname.endswith('ина'):\r\n self.logger.debug(\"delete last char for popular fem surname {}\".format(surname))\r\n surname = surname[:-1]\r\n genders = set([MASC_GENDER])\r\n else:\r\n self.logger.error(\"ignore {} since no masculine surname in in the example\".format(surname))\r\n return\r\n\r\n suf_len = 2\r\n predictions = self.morph_wizard.predict_lemm(surname, suf_len, 2)\r\n if len(predictions) == 0:\r\n self.logger.error(\"no predictions for {}\".format(surname))\r\n else:\r\n surname_count, surname_index = self.sunname_count(predictions)\r\n if surname_count > 1:\r\n suf_len = 3\r\n predictions = self.morph_wizard.predict_lemm(surname, suf_len, 2)\r\n surname_count, surname_index = self.sunname_count(predictions)\r\n if surname_count == 0:\r\n self.logger.error(\"no surname prediction for {}\".format(surname))\r\n elif surname_count > 1:\r\n self.logger.error(\"more than one surname prediction for {}\".format(surname))\r\n else:\r\n prd = predictions[surname_index]\r\n wiki = prd.getWiktionaryTemplateRef()\r\n if wiki == \"\":\r\n self.logger.error(\"weak (not wiktionary) surname paradigm for {}\".format(surname))\r\n else:\r\n self.logger.debug(\"predict {} flexia model={}, suffix freq={}\".format(\r\n surname, prd.getFlexiaModelNo(), prd.getFreq()\r\n ))\r\n slf = prd.getSlf(surname)\r\n self.check_slf(slf)\r\n outp.write(slf)\r\n\r\n def is_a_bad_surname(self, s):\r\n return s in {'август', 'бер', 'антон', 'борис', 'варшава', 'виктор', 'дон', 'лев', 'марк',\r\n 'семен', 'глава', 'иван', 'супруг', 'супруга', 'муж', 'жена'}\r\n\r\n def check_abc(self, s):\r\n return s.count(' ') == 0 or s.count('–') == 0 or s.count('’') == 0\r\n\r\n def main(self):\r\n if self.args.wikipedia_titles is not None:\r\n self.read_wiki_titles()\r\n self.read_fio_from_disclosures()\r\n\r\n self.logger.info(\"process {} surname...\".format(len(self.surnames.keys())))\r\n cnt = 0\r\n with open(self.args.output_slf, \"w\") as outp:\r\n for s in self.surnames.keys():\r\n if not self.check_abc(s):\r\n continue\r\n if self.is_a_bad_surname(s):\r\n self.logger.debug(\"bad surname {}\".format(s))\r\n continue\r\n if self.morph.is_in_dictionary(s) and not self.args.force_ambiguity:\r\n if TRussianFioRecognizer.is_dictionary_surname(s):\r\n self.logger.debug(\"surname {} is in dictionary\".format(s))\r\n elif self.get_up_div_lo(s) > 1.0 or s.endswith('ов'):\r\n self.logger.debug(\"add new homonym for {}\".format(s))\r\n self.predict_surnames(s, outp)\r\n else:\r\n self.logger.debug(\"word {} is probably not a proper name\".format(s))\r\n else:\r\n self.predict_surnames(s, outp)\r\n cnt += 1\r\n if cnt % 1000 == 0:\r\n print (\".\")\r\n if self.args.max_output_count is not None and cnt >= self.args.max_output_count:\r\n break\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n TPredictor().main()\r\n","sub_path":"tools/dlrobot/central/scripts/create_slf_for_surnames.py","file_name":"create_slf_for_surnames.py","file_ext":"py","file_size_in_byte":9349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"10792845","text":"'''\nPreprocess slang dictionaries\n'''\n\nslang_dicts = [\"final_grose.txt\", \"final_hotten.txt\", \"all_online_slang.txt\"]\nidx = 2\nslang_dict = slang_dicts[idx]\n\nfile = open(slang_dict, 'r')\ncontent = file.readlines()\nfile.close()\n\nwords = []\nif idx < 2:\n words = [eval(line.strip())[0] for line in content]\nelse:\n words = [line.strip() for line in content]\n\n# everything is capitalized/uppercase, so use lower\nwords = [w.lower() for w in words]\n\n# separate words with \", or\"\ndelimiter = \", or\"\npairs = []\nfor w in [w for w in words if delimiter in w]:\n pairs.extend(w.split(delimiter))\nwords.extend(pairs)\n\n# remove words with none alphabetical characters\nimport string\nalphas = string.ascii_lowercase\nwords = [w for w in words if all([c in alphas for c in w])]\n\n# remove duplicates\nwords = set(words)\n\n'''\n# remove words in lexemes, show percentage of overlap\n# check lexemes\nimport sys\nsys.path.append(\"../../utils/\")\nfrom load_data import *\nread_data(\"../data/Lexemes.xlsx\")\nall_lexemes = emerged_when(0,2000)\nprint(\"Done reading lexemes\")\n\noverlap = set(words).intersection(all_lexemes)\nprint(\"Overlap\", len(overlap) / len(words))\nprint(\"Count before filter\", len(words))\n\n# remove compound words\nread_data(\"../data/Compound Words.xlsx\")\nall_compounds = emerged_when(0,2000)\nall_compounds2 = []\nfor w in all_compounds:\n if w[0] == '-' or w[-1] == '-':\n continue\n all_compounds2.append(w.replace('-', ''))\n\noverlap = set(words).intersection(all_compounds2)\nprint(\"Overlap\", len(overlap) / len(words))\nprint(\"Count before filter 2\", len(words))\n\n#####\nwords = words - set(all_lexemes)\nwords = words - set(all_compounds2)\nprint(\"Count after all filtering\", len(words))\n'''\n\n# sort and convert to unicode\nwords = sorted(list(words))\nfor i, w in enumerate(words):\n words[i] = u'' + w\n\n# write to file\nimport pickle\nfile = open(\"processed_\" + slang_dict.replace('.txt', '') + '.pkl', 'wb')\npickle.dump(words, file)\n","sub_path":"preprocessing/other_slang/preprocess_dicts.py","file_name":"preprocess_dicts.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"365354485","text":"\n\ndef binaryToDecimal(requiredNumber):\n sum = 0\n mul_factor = 1\n requiredNumber = requiredNumber[::-1]\n\n for digit in requiredNumber:\n if digit == '1':\n sum = sum + mul_factor\n\n mul_factor = mul_factor * 2\n\n return sum\n\n\nif __name__ == '__main__':\n requiredNumber = \"11001101011\"\n\n decimalNumber = binaryToDecimal(requiredNumber)\n\n print(\"The Binary Number Is: \"+str(decimalNumber))","sub_path":"binaryToDecimal.py","file_name":"binaryToDecimal.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"104788094","text":"##################################################################\n# Eke Wokocha #\n# September 29 , 2014 #\n# Period 4 # \n# Program Description: Import a list of words from an file into #\n# an dictionary, put the words into an array, and look for a path#\n# to the array using an A* search method #\n##################################################################\n\n\n#----------- Functions -------------------------------------\ndef importfileintoarray(filename):\n file1 = open(filename, 'r')\n nodes = {}\n while(1): \n letter = ''\n x = ''\n y = ''\n current = file1.read(7)\n if current == '':\n break\n while(current != ' '):\n letter += current\n current = file1.read(1)\n current = file1.read(1)\n while(current != ' '):\n x += current\n current = file1.read(1)\n current = file1.read(1)\n while(current != '\\n'):\n y += current\n current = file1.read(1)\n longi = float(x)\n lati = float(y)\n nodes[letter] = [longi,lati]\n return nodes\n\nfrom math import pi , acos , sin , cos\ndef distance(y1,x1, y2,x2):\n R = 3958.76 # miles\n y1 *= pi/180.0\n x1 *= pi/180.0\n y2 *= pi/180.0\n x2 *= pi/180.0\n return acos( sin(y1)*sin(y2) + cos(y1)*cos(y2)*cos(x2-x1) ) * R\n \ndef neighbors(dictt):\n file1 = open('rrEdges.txt')\n book = {}\n while(1):\n a = file1.read(7)\n if a == '':\n break\n file1.read(1)\n b = file1.read(7)\n file1.read(1)\n dist = distance(dictt[a][0],dictt[a][1],dictt[b][0],dictt[b][1])\n if not a in book.keys():\n book[a] = [[[b,dist]],'xxx',0]\n else: \n book[a][0].append([b,dist])\n if not b in book.keys():\n book[b] = [[[a,dist]],'xxx',0]\n else: \n book[b][0].append([a,dist])\n return book\n\ndef getCol(arr):\n a = []\n for x in range(len(arr)):\n a.append(arr[x][1])\n return a\n\ndef getIndexCol(arr, n):\n for x in range(len(arr)):\n if( arr[x][1] == n ):\n return x\n \ndef Astar(dictt, start, end):\n closed = {}\n popc = 0\n path = []\n node = start\n gval = 0\n fval = gval\n queue = [[fval,node,path,gval]]\n while queue:\n popc+=1\n queue.sort()\n (fval,node,path,gval) = queue.pop(0)\n \n if(node == end):\n print ( path + [node] ) \n print ( \"Path Length : \"+str(len(path + [node])) )\n print ( \"Distance: \" + str(gval) )\n print ( \"Pops : \" + str(popc) )\n break\n \n \n closed[str(node)] = [fval,node,path,gval]\n for chi in dictt[node][0]:\n pathN = path + [node]\n nodeN = chi[0]\n gvalN = gval + chi[1]\n print( type(gval) )\n fvalN = gvalN + distance(chi[0],chi[1],dictt[end][0][0],dictt[end][0][1])\n newChi = [fvalN,nodeN,pathN,gvalN]\n \n if nodeN in closed:\n if(closed[nodeN][3] > gvalN):\n del closed[nodeN]\n close[nodeN] = newChi\n print(\"chane \") \n else:\n if nodeN in getCol(queue):\n if queue[getIndexCol(queue,nodeN)][3] > gvalN :\n \n queue[getIndexCol(queue,nodeN)] = newChi\n else:\n queue.append(newChi)\n \n# --------- Main --------------------------------------------------\ndef main():\n nodes = importfileintoarray('rrNodes.txt')\n book = neighbors(nodes)\n s = input('start\\t')\n d = input('end\\t')\n Astar(book,s,d)\n \n \n \n \nif __name__ == '__main__':\n from random import random, randint; from math import sqrt; from copy import deepcopy;\n from time import clock; START_TIME = clock(); main(); print('\\n +======+')\n print(' | %5.2f'%(clock()-START_TIME), 'seconds |'); print(' +================+')\n \n","sub_path":"Lab6.py","file_name":"Lab6.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"302618467","text":"import os\nimport psycopg2\nfrom flask import Flask, render_template, g, url_for, request, redirect\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.getenv(\"DATABASE_URL\") # this connects to heroku database\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\nfrom sqlalchemy.orm import sessionmaker\n\n# tis part is needed to create session to query database. this should be JUST BELOW app.config..\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy import create_engine\nengine = create_engine(os.getenv(\"DATABASE_URL\"), echo = True)\nfrom sqlalchemy.ext.declarative import declarative_base\nBase = declarative_base()\n\nclass Review_Page(Base):\n\t__tablename__ = 'review_page'\n\tid = Column('id', Integer, primary_key=True)\n\tfirstname = Column('firstname', String(15))\n\tlastname = Column('lastname', String(15))\n\temail = Column('email', String(30))\n\treview_message = Column('review_message', String(800))\n\n\tdef __init__(self, firstname, lastname, email, review_message):\n\t\tself.firstname = firstname\n\t\tself.lastname = lastname\n\t\tself.email = email\n\t\tself.review_message = review_message\n\nclass Feedback_Page(Base):\n\t__tablename__ = 'feedback_page'\n\tid = Column('id', Integer, primary_key=True)\n\tname = Column('name', String(20))\n\texperience = Column('experience', String(12))\n\tfunctionality = Column('functionality', String(12))\n\taesthetics = Column('aesthetics', String(12))\n\tmy_cv = Column('my_cv', String(12))\n\tmy_webapp = Column('my_webapp', String(12))\n\toutstanding = Column('outstanding', String(15))\n\timprove = Column('improve', String(15))\n\temail = Column('email', String(50))\n\n\tdef __init__(self, name, experience, functionality, aesthetics, my_cv, my_webapp, outstanding, improve, email):\n\t\tself.name = name\n\t\tself.experience = experience\n\t\tself.functionality = functionality\n\t\tself.aesthetics = aesthetics\n\t\tself.my_cv = my_cv\n\t\tself.my_webapp = my_webapp\n\t\tself.outstanding = outstanding\n\t\tself.improve = improve\n\t\tself.email = email\n\nclass Contactme_Page(Base):\n\t__tablename__ = 'contactme_page'\n\tid = Column('id', Integer, primary_key=True)\n\tfirstname = Column('firstname', String(15))\n\tlastname = Column('lastname', String(15))\n\temail = Column('email', String(30))\n\texposure = Column('exposure', String(15))\n\tmessage = Column('message', String(800))\n\t\n\tdef __init__(self, firstname, lastname, email, exposure, message):\n\t\tself.firstname = firstname\n\t\tself.lastname = lastname\n\t\tself.email = email\n\t\tself.exposure = exposure\n\t\tself.message = message\n\n\nSession = sessionmaker(bind = engine)\nsession = Session()\n\n@app.route('/')\ndef intro1():\n\treturn render_template('intro1.html')\n\n@app.route('/aboutme')\ndef aboutme():\n\treturn render_template('aboutme.html')\n\n@app.route('/home')\ndef home():\n\treturn render_template('home.html')\n\n@app.route('/cv')\ndef cv():\n\treturn render_template('cv.html')\n\n@app.route('/apps')\ndef web_apps():\n\treturn render_template('web_apps.html')\n\n@app.route('/review', methods=['GET', 'POST'])\ndef review():\n\tif request.method == 'GET':\n\t\tdata = session.query(Review_Page).all()\n\t\treturn render_template('review.html', data=data)\n\telse:\n\t\tfirstname = request.form.get('firstname')\n\t\tlastname = request.form.get('lastname')\n\t\temail = request.form.get('email')\n\t\treview_message = request.form.get('subject')\n\n\t\tdb_data = Review_Page(firstname, lastname, email, review_message)\n\t\tsession.add(db_data)\n\t\tsession.commit()\n\t\tdata = session.query(Review_Page).all()\n\n\t\treturn render_template('review.html', data=data)\n\n@app.route('/feedback', methods=['GET', 'POST'])\ndef feedback():\n\tif request.method == 'GET':\n\t\tdata = session.query(Feedback_Page).all()\n\t\treturn render_template('feedback.html', data=data)\n\telse:\n\t\tname = request.form.get('name')\n\t\texperience = request.form.get('experience')\n\t\tfunctionality = request.form.get('functionality')\n\t\taesthetics = request.form.get('aesthetics')\n\t\tmy_cv = request.form.get('my_cv')\n\t\tmy_webapp = request.form.get('my_webapp')\n\t\toutstanding = request.form.get('outstanding')\n\t\timprove = request.form.get('improve')\n\t\temail = request.form.get('email')\n\n\t\tdb_data = Feedback_Page(name, experience, functionality, aesthetics, my_cv, my_webapp, outstanding, improve, email)\n\t\tsession.add(db_data)\n\t\tsession.commit()\n\t\tdata = session.query(Feedback_Page).all()\n\n\t\treturn render_template('feedback.html', data=data)\n\n@app.route('/contact', methods=['GET', 'POST'])\ndef contact():\n\tif request.method == 'GET':\n\t\tdata = session.query(Contactme_Page).all()\n\t\treturn render_template('contact.html', data=data)\n\telse:\n\t\tfirstname = request.form.get('firstname')\n\t\tlastname = request.form.get('lastname')\n\t\temail = request.form.get('email')\n\t\texposure = request.form.get('exposure')\n\t\tmessage = request.form.get('subject')\n\n\t\tdb_data = Contactme_Page(firstname, lastname, email, exposure, message)\n\t\tsession.add(db_data)\n\t\tsession.commit()\t\n\t\tdata = session.query(Contactme_Page).all()\n\n\t\treturn render_template('contact.html', data=data)\n\n@app.route('/interactive')\ndef interactive():\n\treturn render_template('interactive.html')","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"301305041","text":"p1x, p1y, p2x, p2y = map(int, input().split())\n\ndx = abs(p1x - p2x)\ndy = abs(p1y - p2y)\n\ndef gcd(a, b):\n while b != 0:\n a, b = b, a%b\n return a\n\nif dx == 0 and dy == 0:\n print(0)\nelse:\n ans = gcd(dx, dy) - 1\n print(ans)","sub_path":"ChallengeBook/2/2-6/kousiten.py","file_name":"kousiten.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"230422212","text":"# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\n\"\"\"\nThis module adds the method toACE to the classes in the fudge.productData.distributions.energy module.\n\"\"\"\n\nfrom xData import enums as xDataEnumsModule\nfrom xData import XYs1d as XYs1dModule\n\nfrom fudge.productData.distributions import energy as energyModule\n\ndef ACEInterpolation( function, label ) :\n\n interpolation = function.interpolation\n INTE = -1\n if interpolation == xDataEnumsModule.Interpolation.flat:\n INTE = 1\n elif interpolation == xDataEnumsModule.Interpolation.linlin:\n INTE = 2\n if( INTE == -1 ) :\n INTE = 2\n print(' WARNING: for %s changing interpolation from \"%s\" to \"%s\"' %\n (label, interpolation, xDataEnumsModule.Interpolation.linlin))\n\n return( INTE )\n#\n# XYs2d energy (i.e., f(E'|E)) logic\n#\ndef toACE( self, label, offset, weight, **kwargs ) :\n\n header = [ 0, 4, offset + len( weight ) + 4 ] + weight\n\n INTE = ACEInterpolation( self, label )\n INTT = ACEInterpolation( self[0], label )\n\n NE = len( self )\n e_ins = []\n Ls = []\n epData = []\n offset += len( header ) + 3 + 1 + 2 * NE + 1 # header plus NR, NE, Es, Ls, (1-based).\n for xs_pdf_cdf in self :\n e_ins.append( xs_pdf_cdf.outerDomainValue )\n Ls.append( offset + len( epData ) )\n eps = xs_pdf_cdf.xs.values.values\n pdf = xs_pdf_cdf.pdf.values.values\n cdf = xs_pdf_cdf.cdf.values.values\n epData += [ INTT, len( eps ) ] + eps + pdf + cdf\n\n return( header + [ 1, NE, INTE, NE ] + e_ins + Ls + epData )\n\nenergyModule.XYs2d.toACE = toACE\n#\n# XYs2d energy (i.e., f(E'|E)) logic\n#\ndef toACE( self, label, offset, weight, **kwargs ) :\n\n XYs2d = energyModule.XYs2d( )\n for i1, region in enumerate( self ) :\n for i2, xs_pdf_cdf1d in enumerate( region ) :\n xs_pdf_cdf1d = xs_pdf_cdf1d.copy( )\n if( ( i1 > 0 ) and( i2 == 0 ) ) : xs_pdf_cdf1d.outerDomainValue *= ( 1.0 + 1e-8 )\n XYs2d.append( xs_pdf_cdf1d )\n return( XYs2d.toACE( label, offset, weight, **kwargs ) )\n\nenergyModule.Regions2d.toACE = toACE\n#\n# NBodyPhaseSpace logic\n#\ndef toACE( self, label, offset, weight, massUnit = None, neutronMass = None, **kwargs ) :\n\n return( [ 0, 66, offset + len( weight ) + 4 ] + weight + [ self.numberOfProducts, self.mass.getValueAs( massUnit ) / neutronMass ] )\n\nenergyModule.NBodyPhaseSpace.toACE = toACE\n\n#\n# Evaporation and SimpleMaxwellian logic\n#\ndef toACE( self, label, offset, weight, **kwargs ) :\n\n header = [ 0, self.LF, offset + len( weight ) + 4 ] + weight\n\n theta = self.parameter1.data.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = 0, upperEps = 1e-6 )\n\n NE, e_ins, Ts = len( theta ), [], []\n for x1, y1 in theta :\n e_ins.append( x1 )\n Ts.append( y1 )\n\n return( header + [ 0, NE ] + e_ins + Ts + [ self.U.getValueAs( 'MeV' ) ] )\n\nenergyModule.Evaporation.toACE = toACE\nenergyModule.SimpleMaxwellianFission.toACE = toACE\n\n#\n# WeightedFunctionals logic\n#\ndef toACE( self, label, offset, weight, **kwargs ) :\n\n DLWs = []\n for functional in self :\n weightEs = []\n weightPs = []\n for E1, P1 in functional.weight :\n weightEs.append( E1 )\n weightPs.append( P1 )\n weight = [ 0, len( weightEs ) ] + weightEs + weightPs\n DLW = functional.functional.toACE( label, offset, weight, **kwargs )\n offset += len( DLW )\n if( functional is not self[-1] ) : DLW[0] = offset + 1\n DLWs += DLW\n\n return( DLWs ) \n\nenergyModule.WeightedFunctionals.toACE = toACE\n\n#\n# Watt spectrum logic\n#\ndef toACE( self, label, offset, weight, **kwargs ) :\n\n header = [ 0, self.LF, offset + len( weight ) + 4 ] + weight\n\n A = self.parameter1.data\n A = A.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = 0, upperEps = 1e-6 )\n xs, ys = A.copyDataToXsAndYs( )\n data = [ 0, len( A ) ] + xs + ys\n\n B = self.parameter2.data\n B = B.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = 0, upperEps = 1e-6 )\n xs, ys = B.copyDataToXsAndYs( )\n data += [ 0, len( B ) ] + xs + ys\n data.append( self.U.getValueAs( 'MeV' ) )\n\n return( header + data )\n\nenergyModule.Watt.toACE = toACE\n\n#\n# MadlandNix spectrum logic\n#\ndef toACE( self, label, offset, weight, **kwargs ) :\n\n return( self.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = 0, upperEps = 1e-6 ).toACE( label, offset, weight, **kwargs ) )\n\nenergyModule.MadlandNix.toACE = toACE\n\n#\n# GeneralEvaporation logic\n#\ndef toACE( self, label, offset, weight, **kwargs ) :\n\n header = [ 0, self.LF, offset + len( weight ) + 4 ] + weight\n\n theta = self.parameter1.data\n if( not( isinstance( theta, XYs1dModule.XYs1d ) ) ) : raise TypeError( 'unsupported theta form: moniker = \"%s\".' % theta.moniker )\n\n function = self.parameter2.data\n xys1d = energyModule.XYs1d( [ function.xs.values.values, function.pdf.values.values ], dataForm = 'xsandys',\n interpolation = function.interpolation )\n\n xys2d = energyModule.XYs2d( )\n for e_in, T in theta :\n EpP = [ [ e_out * T, P / T ] for e_out, P in xys1d ]\n xys2d.append( energyModule.XYs1d( EpP, outerDomainValue = e_in, interpolation = xys1d.interpolation ) )\n xs_pdf_cdf1d = xys2d.to_xs_pdf_cdf1d( None, None, None )\n return( xs_pdf_cdf1d.toACE( label, offset, weight, **kwargs ) )\n\nenergyModule.GeneralEvaporation.toACE = toACE\n","sub_path":"brownies/LANL/toACE/energy.py","file_name":"energy.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"468735658","text":"import string\nimport logging\nimport threading\n\nfrom Socket import openSocket, sendWhisper, sendMessage\nfrom Parse import parseMessage\nfrom Config import CHAN, modlist\nfrom ManageCmds import addcom, delcom, enablecom, disablecom, runcom\nfrom threading import Thread\nfrom CreateLogger import createlogger\nfrom Uptime import updateuptime, getuptime, getdowntime\nfrom Cooldown import cooldown\nfrom Animals import getbun, addbun, getcat, addcat\n\nchatLog = createlogger('Chat Log', 'Chat', '#'+CHAN, logging.INFO)\ncommandsLog = createlogger('Command Usage', 'Commands', '#'+CHAN, logging.INFO)\nwcommandsLog = createlogger('Command Usage', 'Commands', 'Whispers', logging.INFO)\n\n\ndef connect():\n\tglobal s\n\ts = openSocket()\n\nreadbuffer = \"\"\n\n\ndef housekeeping():\n\tthreading.Timer(600.0, housekeeping).start()\n\t# rank update goes here\n\ttry:\n\t\ts.send('CAP\\r\\n') # pseudo keep-alive (twitch sux)\n\texcept:\n\t\tconnect()\n\nhousekeeping()\n\n\ndef uptime():\n\tthreading.Timer(30.0, uptime).start()\n\tupdateuptime()\n\nuptime()\n\n\nclass consoleInput(Thread):\n\tdef run(self):\n\t\twhile True:\n\t\t\tchatmessage = raw_input(':')\n\t\t\tsendMessage(s, chatmessage)\n\nconsoleInput().start()\n\nwhile True:\n\treadbuffer = readbuffer + s.recv(1024)\n\ttemp = string.split(readbuffer, \"\\n\")\n\treadbuffer = temp.pop()\n\t\n\tfor line in temp:\n\t\tprint(line)\n\t\tchatLog.info(line)\n\t\tif line.startswith(\"PING \"):\n\t\t\ts.send(\"PONG tmi.twitch.tv\\r\\n\")\n\t\t\tprint(\"PONG!\")\n\t\t\tbreak\n\t\tuser, message, wuser, wmessage = parseMessage(line)\n\n\t\ttry:\n\t\t\tif line.split(\" \")[2] == \"USERNOTICE\":\n\t\t\t\tif line.split(\"msg-id=\")[1].split(\";\")[0] == \"sub\":\n\t\t\t\t\tsubber = line.split(\"display-name=\",1)[1].split(\";\", 1)[0]\n\t\t\t\t\tif subber == \"\":\n\t\t\t\t\t\tsubber = line.split(\"login=\", 1)[1].split(\";\", 1)[0]\n\t\t\t\t\tsendMessage(s, \"starB Gottem! Thanks for subscribing, \"+subber+\"! starB\")\n\t\t\t\telif line.split(\"msg-id=\")[1].split(\";\")[0] == \"resub\":\n\t\t\t\t\tsubber = line.split(\"display-name=\", 1)[1].split(\";\", 1)[0]\n\t\t\t\t\tif subber == \"\":\n\t\t\t\t\t\tsubber = line.split(\"login=\", 1)[1].split(\";\", 1)[0]\n\t\t\t\t\tmonths = line.split(\"msg-param-months=\")[1].split(\";\")[0]\n\t\t\t\t\tsendMessage(s, \"starB Gottem! Thanks for resubbing for \"+months+\" months in a row, \"+subber+\"! starB\")\n\t\texcept IndexError:\n\t\t\tpass\n\n\t\tif user != \"\" and message != \"\":\n\t\t\tprint(user + \": \" + message)\n\t\t\tif message[0] is \"!\":\n\t\t\t\tcmd = message.split(\" \", 1)[0][1:].strip(\"\\r\").lower()\n\t\t\t\tcommandsLog.info(user + ' tried ' + message)\n\t\t\t\tif cmd == \"addcom\":\n\t\t\t\t\tif user not in modlist:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsendMessage(s, addcom(message, user))\n\t\t\t\telif cmd == \"delcom\":\n\t\t\t\t\tif user not in modlist:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsendMessage(s, delcom(message, user))\n\t\t\t\telif cmd == \"enablecom\":\n\t\t\t\t\tif user not in modlist:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsendMessage(s, enablecom(message, user))\n\t\t\t\telif cmd == \"disablecom\":\n\t\t\t\t\tif user not in modlist:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsendMessage(s, disablecom(message, user))\n\t\t\t\telif cmd == \"bunny\":\n\t\t\t\t\tif cooldown(\"!bunny\", 15):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsendMessage(s, getbun())\n\t\t\t\telif cmd == \"addbun\":\n\t\t\t\t\tif user not in modlist:\n\t\t\t\t\t\tbreak\n\t\t\t\t\taddbun(\" \".join(message.split(\" \")[1:]))\n\t\t\t\telif cmd == \"cat\":\n\t\t\t\t\tif cooldown(\"!cat\", 15):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsendMessage(s, getcat())\n\t\t\t\telif cmd == \"addcat\":\n\t\t\t\t\tif user not in modlist:\n\t\t\t\t\t\tbreak\n\t\t\t\t\taddcat(\" \".join(message.split(\" \")[1:]))\n\t\t\t\telif cmd == \"uptime\":\n\t\t\t\t\tif cooldown(\"!uptime\", 15):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tupt = getuptime()\n\t\t\t\t\tif upt is not None:\n\t\t\t\t\t\tsendMessage(s, \"Ster has been live for \"+upt+\" starB\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tsendMessage(s, \"Ster is currently offline.\")\n\t\t\t\telif cmd == \"downtime\":\n\t\t\t\t\tif cooldown(\"!downtime\", 15):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tdtm = getdowntime()\n\t\t\t\t\tif dtm is not None:\n\t\t\t\t\t\tsendMessage(s, \"Ster has been offline for \"+dtm+\".\")\n\t\t\t\telse:\n\t\t\t\t\tsendMessage(s, runcom(cmd, user))\n","sub_path":"BBot.py","file_name":"BBot.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"262961216","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 7 16:11:40 2020\n\n@author: e.rios.kaliman\n\"\"\"\n\nimport time\nfrom os import environ\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport pandas as pd\nfrom selenium.common.exceptions import NoSuchElementException\nfrom pandas import read_excel\n\n\npeliculas=[]\ngeneros_disponibles=[\"Biography\",\"Drama\",\"Animation\",\"Adventure\",\"Comedy\",\"Family\",\"Terror\",\"Action\",\"No encontrado\"]\n\ndata = pd.read_excel(r'C:\\Users\\e.rios.kaliman\\Desktop\\Lista_peliculas.xlsx')\n\nfor index,row in data.iterrows():\n peliculas.append(row[\"Peliculas\"])\n \nprint(peliculas)\nprint(\"BUSCADOR DE PELICULAS :)\")\ncantidad_peliculas = int(input(\"Cuantas peliculas desea buscar?: \"))\n\nfor i in range (cantidad_peliculas):\n peli = input(\"Nombre de pelicula: \")\n peliculas.append(peli)\n\nuser = environ[\"USERPROFILE\"]\nch_options = webdriver.ChromeOptions()\nch_options.add_argument(\"user-data-dir=\" + user +\n \"\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\")\n\ndriver = webdriver.Chrome(executable_path=r'C:\\Users\\e.rios.kaliman\\Desktop\\chromedriver.exe',\n options=ch_options)\n\ndriver.get(\"https://www.imdb.com/\")\nrating=[]\nduracion=[]\nsinopsis=[]\ngenero=[]\n\nfor peli in peliculas:\n \n time.sleep(5)\n \n search=driver.find_element_by_xpath('//*[@id=\"suggestion-search\"]')\n \n search.send_keys(peli)\n search.send_keys(Keys.ENTER)\n \n time.sleep(5)\n \n peliculas_click = driver.find_element_by_xpath('//*[@id=\"main\"]/div/div[2]/table/tbody/tr[1]/td[2]/a')\n peliculas_click.click()\n \n try: \n rating_peli = driver.find_element_by_xpath('//*[@id=\"title-overview-widget\"]/div[1]/div[2]/div/div[1]/div[1]/div[1]/strong/span').text\n rating.append(rating_peli)\n \n duracion_peli = driver.find_element_by_xpath('//*[@id=\"title-overview-widget\"]/div[1]/div[2]/div/div[2]/div[2]/div/time').text\n duracion.append(duracion_peli)\n \n sinopsis_peli = driver.find_element_by_xpath('//*[@id=\"title-overview-widget\"]/div[2]/div[1]/div[1]').text\n sinopsis.append(sinopsis_peli)\n \n genero_peli = driver.find_elements_by_xpath('//*[@id=\"title-overview-widget\"]/div[1]/div[2]/div/div[2]/div[2]/div/a')\n genero.append([genero.text for genero in genero_peli])\n genero.pop()\n \n except NoSuchElementException: \n rating.append(\"No encontrado\")\n duracion.append(\"No encontrado\")\n sinopsis.append(\"No encontrado\")\n genero.append(\"No encontrado\")\n \n driver.get(\"https://www.imdb.com/\")\n\n\nfor i in genero:\n if type(i) ==list:\n i.pop()\n \n\nDiccionario = {\"Pelicula\":peliculas,\"Duracion\":duracion,\"Genero\":genero,\"Sinopsis\":sinopsis,\"Rating\":rating}\n\ndf_peliculas = pd.DataFrame(Diccionario)\n\ndf_peliculas.to_excel(r'C:\\Users\\e.rios.kaliman\\Desktop\\PYTHON\\Peliculas.xls',sheet_name=\"Peliculas IMDB\",index=False)\n\ndriver.quit()\n","sub_path":"Search by Excel Table.py","file_name":"Search by Excel Table.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"123229241","text":"'''\nObiettivo dell'esercizio e' sviluppare la strategia di gioco per il gioco del Mastermind. \n\nNel gioco del Mastermind un giocatore (il \"decodificatore\"), deve indovinare un codice segreto.\nIl codice segreto e' di N cifre decimali distinte (NO RIPETIZIONI, solo 10 possibili simboli per ciascuna posizione).\nIl decodificatore cerca di indovinare il codice per tentativi. Strategicamente nel tentativo \npossono essere presenti anche cifre ripetute pur sapendo che nel codice non sono presenti ripetizioni. \nIn risposta ad ogni tentativo viene fornito un aiuto producendo una coppia di interi (a,b) dove:\n\n- a e' il numero di cifre giuste al posto giusto,\ncioe' le cifre del codice da indovinare che sono effettivamente presenti nel tentativo al posto giusto.\n- b e' il numero di cifre giuste ma al posto sbagliato,\ncioe' le cifre del codice da indovinare che sono effettivamente presenti nel tentativo solo al posto sbagliato.\n\nAd esempio per il codice\n 34670915 e il tentativo\n 93375948\nla risposta deve essere la coppia (2,3).\nIl 2 viene fuori dal contributo delle cifre 7 e 9 del codice da indovinare in quarta e sesta posizione,\nil numero 2 e' dovuto alle cifre 3,4,5 che compaiono tra le cifre del tentativo ma mai al posto giusto.\n\nNella nostra versione di Mastermind il valore di N (lunghezza del codice) puo' essere 6, 7 o 8 e nella coppia data\nin risposta ai tentativi non si vuole rivelare quale delle due due cifre rappresenta il numero di cifre giuste al posto giusto \ndi conseguenza nella coppia di risposta i valori di a e b possono risultare invertiti.\nAd esempio per il codice 34670915 e il tentativo 93375948 le risposte (2,3) e (3,2) sono entrambe possibili.\n\n\nUna configurazione del gioco viene rappresentata come lista di liste (L). \nIl primo elemento di L e' un intero N rappresentante la lunghezza del codice.\nCiascuno degli eventuali altri elementi di L rappresenta un tentativo fin qui\neffettuato dal decodificatore e la relativa risposta.\nPiu' precisamente L[i] con i>0, se presente, conterra' una tupla composta da due elementi:\n- la lista di N cifre con il tentativo effettuato dal decodificatore in un qualche passo precedente\n- la tupla di interi (a,b) ricevuta in risposta.\n\n\nIl programma che dovete realizzare contiene la seguente funzione:\n \n decodificatore(configurazione)\n\nche e' l'AI che guida il gioco. La funzione riceve come input la configurazione attuale del gioco e produce un tentativo (lista di caratteri in '0-9').\n\nPer questo esercizio la valutazione avverra' nel seguente modo: \navete 150 tentativi e 30 secondi di tempo per indovinare quanti piu' codici possibile.\nIl punteggio ottenuto e' dato dal numero di codici che riuscite ad indovinare.\n\nTutti i vostri elaborati al termine verranno valutati su di uno stesso set di codici,\nquesta operazione determinera' una classifica dei vostri elaborati.\nPer entrare in classifica bisogna indovinare almeno cinque codici.\nLa classifica viene suddivisa in 14 fasce che corrispondono ai voti da 18 a 31 ed a \nciascun programma viene assegnato il voto corrispondente.\nSe il numero di valori diversi e' minore di 14 il voto sara' proporzionale alla posizione\nnella graduatoria (con il primo che prende 31 e l'ultimo 18)\n\nIn allegato vi viene fornito un simulatore di gioco (simulatore1.py) che vi permettera' di \nautovalutare la vostra strategia. Il simulatore genera codici casuali e invoca il vostro \nprogramma per ottenere i vari tentativi. Appena un codice viene indovinato ne viene \nproposto uno nuovo. Il processo termina quando avete esaurito i vostri 150 tentativi\no sono passati i 30 secondi.\n\nATTENZIONE: NON usate lettere accentate ne' nel codice ne' nei commenti\n'''\n\nimport random \nimport itertools\n\n# ESEMPIO di strategia che tira a caso una combinazione di N valori anche ripetuti\nz=0\na=[]\nzorro=0\nlol=[]\ng=[]\ncount=0\nx=[0,1,2,3,4,5,6,7,8,9]\nl=[]\nnn=[]\nasd=0\nyy=[]\nj=[]\nnice=0\njk=[]\ndic=[]\ndef decodificatore(configurazione):\n ''' inserire qui la vostra soluzione'''\n global x,yy,l,count,nn,j,z,a,g,dic,lol,nice,jk,asd,zorro\n if len(configurazione)==1:\n count=0\n zorro=0\n x=[0,1,2,3,4,5,6,7,8,9]\n l.clear()\n nn.clear()\n nice=0\n asd=0\n a.clear()\n yy.clear()\n j.clear()\n dic.clear()\n jk.clear()\n z=0\n \n l=[]\n n=configurazione[0]\n risposta=[]\n roundd=round(n/2)\n if len(yy)==n:\n x=[]\n if len(yy)==n:\n x=[]\n if len(yy)==n:\n x=[]\n if x!=[] and len(configurazione)>1 and configurazione[-1][-1][0]+configurazione[-1][-1][1]==0:\n if configurazione[-1][0].count(configurazione[-1][0][0])>5:\n j.append(x[0])\n x=x[1:]\n yy.append(x[0])\n x=x[1:]\n if configurazione[-1][0].count(configurazione[-1][0][0])<5:\n j.append(x[0])\n j.append(x[1])\n x=x[2:]\n if x!=[] and len(configurazione)>1 and configurazione[-1][-1][0]+configurazione[-1][-1][1]==1:\n if configurazione[-1][0].count(configurazione[-1][0][0])<5:\n for i in range(n):\n l.append(configurazione[-1][0][0])\n #count+=1\n return l\n if configurazione[-1][0].count(configurazione[-1][0][0])>5:\n #x.remove(configurazione[-1][0][-1])\n yy.append(x[0])\n x=x[2:]\n if x!=[] and len(configurazione)>1 and configurazione[-1][-1][0]+configurazione[-1][-1][1]==2:\n yy.append(x[0])\n yy.append(x[1])\n x=x[2:]\n if len(x)>=1:\n for i in range(roundd):\n risposta.append(x[0])\n #count+=1\n if len(x)>1:\n for i in range(n-roundd):\n risposta.append(x[1])\n #count+=1\n if len(x)==1:\n for i in range(n):\n risposta.append(x[0])\n x=[]\n if j==[] and x==[] and yy!=[]:\n if 1 not in yy: j.append(1)\n if 0 not in yy: j.append(0)\n if 2 not in yy: j.append(2)\n if 3 not in yy: j.append(3)\n if 4 not in yy: j.append(4)\n if 5 not in yy: j.append(5)\n if 6 not in yy: j.append(6)\n if 7 not in yy: j.append(7)\n if 8 not in yy: j.append(8)\n if 9 not in yy: j.append(9)\n \n if x==[] and yy!=[]:\n nn=itert(yy)\n lol=yy.copy()\n for i in range(len(lol)):\n jk.append(j[0])\n yy.clear()\n if x==[] and yy==[]:\n \n if configurazione[-1][-1]==(0,n) or configurazione[-1][-1]==(n,0):\n dex(n,nn,configurazione[-1][0])\n print(len(nn))\n if (configurazione[-1][-1]==(1,n-1) or configurazione[-1][-1]==(n-1,1)) and asd==0:\n a.clear()\n a=configurazione[-1][0].copy()\n g=configurazione[-1][0].copy()\n a[z]=jk[0]\n print(len(nn))\n return a\n if (configurazione[-1][-1]==(1,n-2) or configurazione[-1][-1]==(n-2,1)) and asd==0 and zorro==0:\n a=g.copy()\n for i in nn[:]:\n if i[z]==g[z]:nn.remove(i)\n z+=1\n a[z]=jk[0]\n print(len(nn))\n return a\n if (configurazione[-1][-1]==(0,n-1) or configurazione[-1][-1]==(n-1,0) )and asd==0 and zorro==0:\n for i in nn[:]:\n if i[z]!=g[z]:nn.remove(i)\n asd+=1\n dex(n,nn,configurazione[-1][0])\n if (configurazione[-1][-1]==(1,n-1) or configurazione[-1][-1]==(n-1,1)) and asd!=0:\n dic=configurazione[-1][0]\n dic[z]=jk[0]\n dex(n,nn,dic)\n print(len(nn))\n '''if (configurazione[-1][-1]==(2,n-2) or configurazione[-1][-1]==(n-2,2)) and asd!=0:\n a=configurazione[-1][0].copy()\n g=configurazione[-1][0].copy()\n a[z]=jk[0]\n zorro+=1\n return a\n if (configurazione[-1][-1]==(1,n-2) or configurazione[-1][-1]==(n-2,1) or configurazione[-1][-1]==(1,n-3) or configurazione[-1][-1]==(n-3,1)) and asd!=0:\n a=g.copy()\n a[z]=jk[0]\n a[nice]=jk[0]\n for i in nn[:]:\n if i[nice]==a[nice]:nn.remove(i)'''\n \n dot=random.choice(nn)\n for i in nn[:]:\n if i==dot:nn.remove(i)\n return dot\n return risposta\ndef itert(lista):\n cc=[]\n a=itertools.permutations(lista)\n for i in a:\n cc.append(list(i))\n return cc\ndef dex(n,nn,qls):\n if n==6:\n a,b,c,d,e,f=qls\n for i in nn[:]:\n if i[0]==a or i[1]==b or i[2]==c or i[3]==d or i[4]==e or i[5]==f: nn.remove(i)\n if n==7:\n a,b,c,d,e,f,g=qls\n for i in nn[:]:\n if i[0]==a or i[1]==b or i[2]==c or i[3]==d or i[4]==e or i[5]==f or i[6]==g: nn.remove(i)\n if n==8:\n a,b,c,d,e,f,g,h=qls\n for i in nn[:]:\n if i[0]==a or i[1]==b or i[2]==c or i[3]==d or i[4]==e or i[5]==f or i[6]==g or i[7]==h: nn.remove(i)\n","sub_path":"students/1665526/homework05/program01.py","file_name":"program01.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"130271441","text":"import os\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport matplotlib.pyplot as plt\n\nimport chart_studio\n\nchart_studio.tools.set_credentials_file(username='tristanovsk', api_key='e2aYFdkLAwuAnCZn8aWe')\nimport chart_studio.plotly as py\nimport plotly.express as px\nimport plotly.offline as po\n\nimport coxmunk.coxmunk as coxmunk\nimport RTxploitation\n# from grs import acutils\n# import grs.auxdata as grsdata\nimport toa_simu.auxdata as ad\n\nfrom Py6S import *\n\npublish = False\nsza = 30\nvza = 10\nazi = 10\nwind = 2\nwind_azi = 0\nstats = 'bh2006'\ndirlut = os.path.abspath('/DATA/projet/VRTC/lut/norm_rad_toa')\ndirlut = os.path.abspath('/DATA/projet/VRTC/fig/nosea')\n\nwl = np.linspace(400, 2400, 200)\nN = len(wl)\n\n####################################\n# Sunglint reflectance\n####################################\nwri = ad.water_refractive_index()\nr_g = [coxmunk.sunglint(sza, vza, azi, m=m).sunglint(wind, wind_azi, stats=stats) for m in wri.n_harmel(wl)]\n\n####################################\n# Rayleigh and aerosol opt. thickness\n####################################\nrot = ad.rot().get_data(wl)\n\n####################################\n# 6S absorbing gases transmittance\n####################################\n\ns = SixS()\ns.geometry.solar_z = sza\ns.geometry.solar_a = 0\ns.geometry.view_z = vza\ns.geometry.view_a = azi\ns.aero_profile = AeroProfile.PredefinedType(AeroProfile.Maritime)\nwavelengths, results = SixSHelpers.Wavelengths.run_wavelengths(s, wl / 1000)\nF0, trans_gas, irradiance = [], [], []\nfor i in range(N):\n res = results[i]\n F0 = np.append(F0, res.solar_spectrum)\n trans_gas = np.append(trans_gas, res.total_gaseous_transmittance)\n irradiance = np.append(irradiance,\n res.diffuse_solar_irradiance + res.direct_solar_irradiance)\n\nEs_toa = F0 * np.cos(np.radians(sza))\n\n####################################\n# Open LUT file\n####################################\n\nmodels = ['rg0.10_sig0.46_nr1.51_ni-0.0200', 'rg0.10_sig0.46_nr1.45_ni-0.0001',\n 'rg0.80_sig0.60_nr1.51_ni-0.0200', 'rg0.80_sig0.60_nr1.35_ni-0.0010']\nichoice = 2\ntiltes = ['TOA radiance', 'TOA normalized radiance', 'TOA normalized radiance corrected for gaseous absorption']\nparams = ['L_TOA', 'L_NTOA', 'L_NTOA_gascorr']\nnorms = [trans_gas * Es_toa, trans_gas, 1]\n\nnorm = norms[ichoice]\ndf_ = pd.DataFrame()\n\nfor model in models:\n\n lutfile = os.path.join(dirlut, 'lut_toa_rad_aero_' + model + '.nc')\n\n lut = xr.open_dataarray(lutfile, group='Lnorm')\n aerosol = xr.open_dataset(lutfile, group='aerosol')\n aot = lut.aot * aerosol.Cext / aerosol.Cext_ref\n\n Ln_toa_ = lut.isel(z=1).interp(sza=sza, vza=vza, azi=azi)\n Ln_toa = Ln_toa_.interp(wl=wl / 1000, method='quadratic') # 'cubic')\n # Ln_toa = Ln_toa_.interp( wl=wl / 1000, method='nearest')\n aot_ = aot.interp(wl=wl / 1000, method='quadratic')\n\n for aot550 in [0.01, 0.1, 0.2, 0.3, 0.5, 0.8]:\n trans_atmo = np.exp(\n -(rot + aot_.interp(aot=aot550)) * (1 / np.cos(np.radians(sza)) + 1 / np.cos(np.radians(vza))))\n Ltoa = norm * Ln_toa.interp(aot=aot550)\n Ltoa_g = Ltoa + norm * trans_atmo * r_g\n df = pd.DataFrame({'wl': wl, 'Ltoa': Ltoa, 'Ltoa_g': Ltoa_g})\n df['aot550'] = aot550\n df['model'] = model\n df_ = pd.concat([df_, df])\n\n####################################\n# Plot spectra\n####################################\ndf__ = df_.melt(id_vars=['wl', 'aot550', 'model'])\nby_ = 'aot550'\n\nfig = px.scatter(df__, x=\"wl\", y=\"value\", color=\"variable\", facet_col='model', facet_col_wrap=2,\n # hover_name=\"site\", hover_data=['date','satellite','Rrs_count'],\n opacity=0.5,\n # range_x=[-0.005,0.04],range_y=[-0.005,0.04],\n animation_frame=by_,\n title=tiltes[ichoice] + \" with and without sunglint, sza= {:.1f}°, vza= {:.1f}° \".format(sza, vza),\n height=950) # ,width=1200)\npo.plot(fig, filename=params[ichoice] + '_test.html')\nif publish:\n py.plot(fig, filename=params[ichoice] + '_test.html', auto_open=False)\n\n####################################\n# Plot aero fig\n####################################\nfig = px.scatter(df__[df__.variable == 'Ltoa'], x=\"aot550\", y=\"value\", color=\"model\", facet_col=('model'),\n facet_col_wrap=2,\n # hover_name=\"site\", hover_data=['date','satellite','Rrs_count'],\n opacity=0.5,\n # range_x=[-0.005,0.04],range_y=[-0.005,0.04],\n animation_frame='wl',\n title=tiltes[ichoice] + \" with and without sunglint, sza= {:.1f}°, vza= {:.1f}° \".format(sza, vza),\n height=950) # ,width=1200)\npo.plot(fig) # ,filename=params[ichoice]+'_test.html')\n\n####################################\n# Set SMAC patrameters for\n# absorbing gases correction\n####################################\nimport lowtran\nimport lowtran.plots as lp\n\nlowtran.nm2lt7(200, 2500, 20)\nc1 = {'model': 6,\n 'h1': 0,\n 'angle': [0, 30, 60],\n 'wlshort': 300,\n 'wllong': 2600,\n 'wlstep': 5,\n\n }\n\nTR = lowtran.transmittance(c1)\nlp.plottrans(TR, c1)\nTR = lowtran.radiance(c1)\nlp.plotradiance(TR, c1)\nTR = lowtran.irradiance(c1)\nlp.plotirrad(TR, c1)\n\ns = SixS()\ns.geometry.solar_z = sza\ns.geometry.solar_a = 0\ns.geometry.view_z = vza\ns.geometry.view_a = azi\ns.aero_profile = AeroProfile.PredefinedType(AeroProfile.Maritime)\nparameter = 'apparent_radiance'\nparameter = 'direct_solar_irradiance'\nparams = ['transmittance_no2.total', 'total_gaseous_transmittance', 'apparent_radiance', 'direct_solar_irradiance',\n 'diffuse_solar_irradiance']\n\nss = []\nfor wl_ in wl / 1000:\n s.wavelength = Wavelength(wl_)\n s.run()\n ss.append(s.outputs)\n\nwavelengths, values = SixSHelpers.Wavelengths.run_wavelengths(s, wl / 1000, n=8, output_name=params[0])\nplt.figure()\nSixSHelpers.Wavelengths.plot_wavelengths(wavelengths, values, 'Pixel Radiance (W/m^2)')\n\nsensordata = grsdata.sensordata('S2A')\nsmac = acutils.smac(sensordata.smac_bands, sensordata.smac_dir)\nsmac.set_gas_param()\n# smac.set_values(o3du=340, h2o=2)\nsmac.set_standard_values(l2h.pressure_msl)\nl2h.aux.no2 = smac.uno2\n","sub_path":"toa_simu/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"488863394","text":"\"\"\"\nflask创建时候的参数\n\"\"\"\nfrom flask import Flask\n\n# 第一个参数指代Flask所对应的模块,其可以决定静态文件从哪个位置开始找。\napp = Flask(__name__,\n\t\t\tstatic_url_path='/static', \t# 表示静态文件访问的路径,可以修改为任意的 /xxx\n\t\t\tstatic_folder='static', # 静态文件存放的目录\n\t\t\ttemplate_folder='templates' # 表示模板文件存放的目录\n\t\t\t)\n\n\n# 添加想要的配置\n# 三种配置方式\n# 三种模式\n\"\"\"\napp.config.from_object() # 从对象中\napp.config.from_pyfile() # 从py文件中\napp.config.from_envvar() # 从环境变量中\n\"\"\"\n\n# 从对象中加载配置\n# class Config(object):\n# \tDEBUG = True\n\n# 配置\n# app.config.from_object(Config) # 从对象中读取\n\n\n# 从文件中加载配置\n# 当前文件目录下的创建该文件\n# app.config.from_pyfile('config.ini')\n\n\n# 从环境变量中加载\n# 在edit configurations 中添加环境变量\n# app.config.from_envvar('ENVCONFIG')\n\n\n\n\"\"\"\napp.config 是字典的子类\napp.config['debug']\n\"\"\"\n\n# 一些常用的配置,可以直接通过app.来配置\n# eg: app.debug = True\napp.debug = True\napp.config['DEBUG'] = True # 等同于上面一句\n\n\n@app.route('/')\ndef index():\n\t# print(app.config)\n\tprint(isinstance(app.config, dict))\n\treturn \"hello world\"\n\nif __name__ == '__main__':\n\tapp.run(debug=True,port=8000)","sub_path":"demo002.py","file_name":"demo002.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"75480750","text":"\n\nfrom utils import *\nimport wandb\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor, FasterRCNN\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\nimport pandas as pd\nimport numpy as np\nfrom mapcalc import *\nfrom mapcalc import calculate_map, calculate_map_range\nimport dataset\nimport pdb\nfrom dataset import *\n\n\nif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n\n\nbatchsize = 2\nin_dim = (300,300)\nnormalization_data = torch.load('mean-std.pt')\nnum_classes = 7\nprint_every = 10\n\ndiseases = ['akiec', 'bcc', 'bkl', 'df', 'mel', 'nv', 'vasc']\nmapping = {'akiec': 0, 'bcc': 1, 'bkl': 2, 'df': 3, 'mel': 4, 'nv': 5, 'vasc': 6}\ninv_mapping = {mapping[k]:k for k in mapping.keys()}\n\n\ndef calculate_AP(model, data_loader, metric, idx = 'val_mAP', th=0.5):\n \"\"\"\n Calculates and stores the average precision in the the metrics dictionary.\n\n model: (nn.Module) model\n data_loader: (nn.DataLoader) Dataloader\n metric: (Dictionary) Dictionary with Average/Class Meter\n \n Returns mAP over all classes for IOU threshold of 0.5\n \"\"\"\n model.eval()\n for dis_in, disease in enumerate(diseases):\n for i, data in enumerate(data_loader):\n image, target = data\n image = image.to(device)\n target = {k: v.numpy()[0] for k, v in target.items()}\n class_id = inv_mapping[target['labels'].item()]\n if class_id == disease:\n result = model(image)[0]\n result = {k: v.cpu() for k, v in result.items()}\n mAP = calculate_map(target, result, th)\n metric[idx].update(dis_in, mAP, n=1)\n return metric[idx].class_average()\n\n\ndef initialize_model():\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes + 1)\n return model\n\n\ndef train_model(model, optimizer, lr_scheduler, data_loader_train, data_loader_valid, data_loader_test, diseases, config):\n best_mAP = 0\n header = 10 * '='\n short_header = 5*'='\n metrics = {'val_mAP': ClassMeter(diseases)}\n metrics['train_loss'] = AverageMeter()\n for epoch in range(config.epochs):\n header = 10 * '='\n short_header = 5*'='\n print(header, \"Epoch {}\".format(epoch), header)\n for i, d in enumerate(data_loader_train):\n\n # Header\n header = 10*\"=\"\n short_header = 5 * '='\n\n # Training\n model.train()\n\n image, target = d\n image = torch.stack([im.to(device) for im in image])\n target = [{k: v.to(device) for k, v in t.items()} for t in target]\n losses = model(image, target)\n loss = sum(loss for loss in losses.values())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n\n metrics['train_loss'].update(loss.item(), n=len(target))\n\n # Log to weights and biases\n wandb.log({'train_loss': metrics['train_loss'].avg})\n\n # Print every 10 epochs\n if i % (print_every) == 0:\n print('[Epoch {} | {} {}] Training Loss: {}'.format(epoch, i, len(data_loader_train), metrics['train_loss'].avg))\n\n with torch.no_grad():\n calculate_AP(model, data_loader_valid, metrics)\n \n # Print Summary\n print(short_header, \"Validation\", short_header)\n current_mAP = metrics['val_mAP'].sample_based_average()\n print('[Val average mAP] : {}'.format(current_mAP)) \n # Log Values\n wandb.log({'val_mAP': current_mAP, 'epoch':epoch})\n for idx, disease in enumerate(diseases):\n wandb.log({'val_mAP' + '_' +disease: metrics['val_mAP'].avg[idx], 'epoch': epoch})\n\n # SAVE BEST MODEL\n if current_mAP > best_mAP:\n torch.save(model.state_dict(), wandb.run.dir +\n '/best.pt'.format(epoch))\n best_mAP = current_mAP\n\n # SAVE EVERY FIVE EPOCHS\n if epoch % 5 == 0:\n torch.save(model.state_dict(), wandb.run.dir +\n '/epoch_{}.pt'.format(epoch))\n\n # fig = draw_box(image, image_id, bbox, target=None, confidence=None)\n # wandb.log({'sample_figure': fig, 'epoch':epoch})\n\n # Test mAP\n test_metrics = {'test_mAP': ClassMeter(diseases)}\n with torch.no_grad():\n calculate_AP(model, data_loader_test, test_metrics, idx='test_mAP')\n print(header, \"Test\", header)\n test_mAP = test_metrics['test_mAP'].sample_based_average()\n print('[Test average mAP] : {}'.format(test_mAP))\n\n # LOG STUFF\n wandb.log({'test_mAP': test_mAP, 'epoch':epoch})\n for idx, disease in enumerate(diseases):\n wandb.log({'test_mAP' + '_' + disease: test_metrics['test_mAP'].avg[idx], 'epoch':epoch})\n \n\n\n \n\n\n# %%\nif __name__ == \"__main__\":\n hyperparameter_defaults = dict(\n num_workers=0,\n batch_size=2,\n learning_rate=0.001,\n epochs=10,)\n wandb.init(project = 'FSDL', config=hyperparameter_defaults )\n config = wandb.config\n dataset = SkinData('/data/kevinmiao', 'final.csv', transform=transforms.Compose([ToTensor, Normalizer(normalization_data)]))\n wandb.log({'new_metric':True})\n train_data, test_data, valid_data = torch.utils.data.random_split(dataset,[int(0.7 * len(dataset)), int(0.15 * len(dataset)), int(0.15 * len(dataset))+1], generator=torch.Generator().manual_seed(42))\n data_loader_train = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, collate_fn = collate_fn)\n data_loader_test = torch.utils.data.DataLoader(test_data, batch_size=1)\n data_loader_valid = torch.utils.data.DataLoader(valid_data, batch_size=1)\n model = initialize_model()\n model = model.to(device)\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n optimizer = torch.optim.SGD(model.parameters(), lr=config.learning_rate)\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=[15, 30], gamma=0.1, last_epoch=-1)\n train_model(model, optimizer, lr_scheduler, data_loader_train, data_loader_valid, data_loader_test, diseases, config)\n \n\n\n\n \n\n\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"557998927","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 30 20:46:21 2021\n\n@author: Yoshihiro Obata\n\"\"\"\n\n# %% importing packages\nimport pandas as pd\nfrom logistic_regr import log_regr\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# %% importing the data\ncols = ['variance', 'skewness', 'curtosis', 'entropy', 'label']\ntrain = pd.read_csv('bank-note/bank-note/train.csv', names=cols)\ntest = pd.read_csv('bank-note/bank-note/test.csv', names=cols)\n\n# %% testing implementation\nT = 50\nlr0 = 0.1\nd0 = 0.05\nvar = 1\nlregr = log_regr(train, T=T, lr=lr0, d=d0, var=var)\nws, Ls, _ = lregr.train_log_regr()\nerr = lregr.predict_log_regr(test)\nTs = np.arange(T)\nplt.plot(Ts, Ls)\ndesc = f'lr: {lr0}, d: {d0}, err: {err}'\nplt.annotate(desc, (1,1), fontsize=20)\n\n# %% tuning the hyperparams for MAP\nlr0s = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]\nd0s = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]\nfor lr0 in lr0s:\n for d0 in d0s:\n print(f'starting tuning with lr: {lr0}, d: {d0}')\n lregr = log_regr(train, T=T, lr=lr0, d=d0, var=var)\n ws, Ls, _ = lregr.train_log_regr()\n err = lregr.predict_log_regr(test)\n # Ts = np.arange(T)\n # plt.figure()\n # plt.plot(Ts, Ls)\n # desc = f'lr: {lr0}, d: {d0}, err: {err}'\n # plt.annotate(desc, (1,1), fontsize=20)\n\n# from looking at errs and the loss, lr=0.05, d=0.001 was chosen\n\n# %% tuning the hyperparams for MLE\nlr0s = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]\nd0s = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]\nfor lr0 in lr0s:\n for d0 in d0s:\n print(f'starting tuning with lr: {lr0}, d: {d0}')\n lregr = log_regr(train, T=T, lr=lr0, d=d0, var=var, method='MLE')\n ws, Ls, _ = lregr.train_log_regr()\n err = lregr.predict_log_regr(test)\n # Ts = np.arange(T)\n # plt.figure()\n # plt.plot(Ts, Ls)\n # desc = f'lr: {lr0}, d: {d0}, err: {err}'\n # plt.annotate(desc, (0,0), fontsize=20)\n# from looking at errs and the loss, lr=0.005 and d=0.05 was chosen\n\n# %% part a\nlr0 = 0.05\nd0 = 0.001\nT = 50\nvaris = [0.01, 0.1, 0.5, 1, 3, 5, 10, 100]\nMAP_train_errs = []\nMAP_test_errs = []\nfor var in varis:\n print(f'Running logistic regression with var={var}...')\n lregr = log_regr(train, T=T, lr=lr0, d=d0, var=var)\n ws, Ls, errs = lregr.train_log_regr(get_err=True, test=(train, test))\n train_err, test_err = errs\n MAP_train_errs.append(train_err)\n MAP_test_errs.append(test_err)\n\n# %% part b\nlr0 = 0.005\nd0 = 0.05 \nlregr = log_regr(train, T=T, lr=lr0, d=d0, var=var, method='MLE')\nws, Ls, errs = lregr.train_log_regr(get_err=True, test=(train, test))\ntrain_err, test_err = errs\nMLE_train_errs = train_err\nMLE_test_errs = test_err\n\n# %% plotting the errors\nfig, ax = plt.subplots(1,2,figsize=(16,4))\nTvals = np.arange(T)\nc1 = ['blue', 'red', 'black', 'cyan', 'green', 'yellow', 'pink', 'purple']\nfor i in range(4):\n ax[0].plot(Tvals, MAP_train_errs[i], linewidth=2, color=c1[i])\n ax[0].plot(Tvals, MAP_test_errs[i], linewidth=2, linestyle='--', color=c1[i])\nfor i in range(4,8,1):\n ax[1].plot(Tvals, MAP_train_errs[i], linewidth=2, color=c1[i])\n ax[1].plot(Tvals, MAP_test_errs[i], linewidth=2, linestyle='--', color=c1[i])\n\nfor i in range(2): \n ax[i].tick_params(labelsize=14, width=2, size=7)\n ax[i].set_xlim([0,50])\n ax[i].set_ylim([0,0.5])\n ax[i].set_xlabel('Epoch', fontsize=16)\n ax[i].set_ylabel('Error', fontsize=16)\n for spine in ax[i].spines:\n ax[i].spines[spine].set_linewidth(2)\n\nlstr1 = [r'Training: $\\sigma^2$=0.01', 'Test: $\\sigma^2$=0.01',\n r'Training: $\\sigma^2$=0.1', 'Test: $\\sigma^2$=0.1',\n r'Training: $\\sigma^2$=0.5', 'Test: $\\sigma^2$=0.5',\n r'Training: $\\sigma^2$=1', 'Test: $\\sigma^2$=1']\nlstr2 = [r'Training: $\\sigma^2$=3', 'Test: $\\sigma^2$=3',\n r'Training: $\\sigma^2$=5', 'Test: $\\sigma^2$=5',\n r'Training: $\\sigma^2$=10', 'Test: $\\sigma^2$=10',\n r'Training: $\\sigma^2$=100', 'Test: $\\sigma^2$=100']\nax[0].legend(lstr1, fontsize=13, loc='upper right')\nax[1].legend(lstr2, fontsize=13, loc='upper right')\n# plt.savefig('MAP_errs.png', dpi=150, bbox_inches='tight')\n# plt.savefig('MAP_errs_ZOOM.png', dpi=150, bbox_inches='tight')\n\n# %% plotting MLE\nTvals = np.arange(T)\nfig, ax = plt.subplots(figsize=(8,4))\nplt.plot(Tvals, MLE_train_errs, linewidth=2, label='Training')\nplt.plot(Tvals, MLE_test_errs, linewidth=2, label='Test')\nax.tick_params(labelsize=14, width=2, size=7)\nax.set_xlim([0,50])\nax.set_ylim([0,0.1])\nax.set_xlabel('Epoch', fontsize=16)\nax.set_ylabel('Error', fontsize=16)\nfor spine in ax.spines:\n ax.spines[spine].set_linewidth(2)\nplt.legend(fontsize=13)\nplt.savefig('MLE_errs.png', dpi=150, bbox_inches='tight')\n\n# %% writting the error values\nMAPtrain = [method[-1] for method in MAP_train_errs]\nMAPtest = [method[-1] for method in MAP_test_errs]\ndf = pd.DataFrame({'var':varis,\n 'MAPtrain': MAPtrain,\n 'MAPtest': MAPtest})\ndf['MLEtrain'] = MLE_train_errs[-1]\ndf['MLEtest'] = MLE_test_errs[-1]\n\ndf.to_csv('table_errs.csv')\n","sub_path":"Logistic Regression/log_regr_problems.py","file_name":"log_regr_problems.py","file_ext":"py","file_size_in_byte":5071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"244655039","text":"import numpy as np\nimport pygeos\n\npoint_polygon_testdata = (\n pygeos.points(np.arange(6), np.arange(6)),\n pygeos.box(2, 2, 4, 4),\n)\npoint = pygeos.points(2, 3)\nline_string = pygeos.linestrings([(0, 0), (1, 0), (1, 1)])\nlinear_ring = pygeos.linearrings([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])\npolygon = pygeos.polygons([(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)])\nmulti_point = pygeos.multipoints([(0, 0), (1, 2)])\nmulti_line_string = pygeos.multilinestrings([[(0, 0), (1, 2)]])\nmulti_polygon = pygeos.multipolygons(\n [\n [(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)],\n [(2.1, 2.1), (2.2, 2.1), (2.2, 2.2), (2.1, 2.2), (2.1, 2.1)],\n ]\n)\ngeometry_collection = pygeos.geometrycollections(\n [pygeos.points(51, -1), pygeos.linestrings([(52, -1), (49, 2)])]\n)\npoint_z = pygeos.points(1.0, 1.0, 1.0)\npolygon_with_hole = pygeos.Geometry(\n \"POLYGON((0 0, 0 10, 10 10, 10 0, 0 0), (2 2, 2 4, 4 4, 4 2, 2 2))\"\n)\nempty_point = pygeos.Geometry(\"POINT EMPTY\")\nempty_line_string = pygeos.Geometry(\"LINESTRING EMPTY\")\nempty = pygeos.Geometry(\"GEOMETRYCOLLECTION EMPTY\")\n\nall_types = (\n point,\n line_string,\n linear_ring,\n polygon,\n multi_point,\n multi_line_string,\n multi_polygon,\n geometry_collection,\n empty,\n)\n","sub_path":"pygeos/test/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"511065261","text":"# vi: ft=python\nimport os\n\n\n# Set up environment\nvars = Variables()\nvars.Add(EnumVariable('db', 'database backend', 'bdb', ('bdb', 'leveldb')))\nvars.Add(BoolVariable('debug', 'enable debugging', False))\nenv = Environment(variables=vars, tools=['default', 'Make'])\nHelp(vars.GenerateHelpText(env))\n\nenv.Replace(CC='clang', LINK='clang')\nenv.Append(CPPPATH=['src'])\nenv.Append(CFLAGS='-Wall -Werror -std=gnu99', LINKFLAGS='-static -pthread')\n\ndebug = env['debug']\nif debug:\n env.Append(CFLAGS='-g')\n env.Append(LINKFLAGS='-g')\n\nenv.Append(LIBS=['dl', 'rt', 'pthread'])\n\nsource_list = Glob('src/*.c') + Glob('src/net/*.c') + ['src/db/main.c']\n\n# Select correct database backend\ndb = env['db']\nenv.Append(CPPDEFINES={'DB_NAME': db})\nsource_list.append('src/db/%s.c' % db)\n\nif db == 'bdb':\n env.Append(LIBS=['db'])\nelif ARGUMENTS.get('db') == 'leveldb':\n env.Append(LIBS=['leveldb', 'snappy'])\n env.Replace(LINK='clang++')\n\n# Enabled coloured output for clang\nenv['ENV']['TERM'] = os.environ['TERM']\n\nenv.Make(source=None, target=['vendor/zlog/src/libzlog.a'], MakePath=Dir('vendor/zlog/src'))\nenv.Append(CPPPATH=['vendor/zlog/src'])\nenv.Append(LIBPATH=['vendor/zlog/src'])\nenv.Append(LIBS='zlog')\nenv.Depends('bin/db', 'vendor/zlog')\n\nenv.Make(source=None, target=['vendor/libuv/libuv.a'], MakePath=Dir('vendor/libuv'))\nenv.Append(CPPPATH=['vendor/libuv/include'])\nenv.Append(LIBPATH=['vendor/libuv'])\nenv.Prepend(LIBS='uv')\nenv.Depends('bin/db', 'vendor/libuv')\n\nenv.Program('bin/db', source_list)\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"79205645","text":"'''\nMANEJA TODO TIPO DE INTERACCIÓN CON LA BASE DE DATOS DEL ESTADO GENERAL\n'''\n\nimport sqlite3\nimport sources.mod.vars as mi_vs\n\n'''#############################################################################################################################################\n TABLA CAJAS '''\n\n# Actualiza la caja cuando se realiza una venta (Situación e Ingreso Diario), usando como referencia el ID de la caja\ndef DB_Cajas_Venta_ID(ID, Monto):\n situacion, IngDia = Dev_Datos_P_Venta(\"ID\", ID)\n situacion += Monto\n IngDia += Monto\n sql = 'UPDATE Cajas SET Situacion = {}, IngresoDia = {} WHERE ID = {}'.format(situacion, IngDia, ID)\n Realiza_consulta(mi_vs.BASE_GENERAL_SEC, sql)\n\n# Actualiza la caja cuando se realiza una venta (Situación e Ingreso Diario), usando como referencia el ORDEN de la caja\ndef DB_Cajas_Venta_Orden(Orden, Monto):\n situacion, IngDia = Dev_Datos_P_Venta(\"Orden\", Orden)\n situacion += Monto\n IngDia += Monto\n sql = 'UPDATE Cajas SET Situacion = {}, IngresoDia = {} WHERE Orden = {}'.format(situacion, IngDia, Orden)\n Realiza_consulta(mi_vs.BASE_GENERAL_SEC, sql)\n\n# Devuelve la cantidad de elementos de una tabla y la cantidad de activos\ndef DB_Cajas_Totales_Cajas(ID):\n sql = 'SELECT * FROM Totales WHERE ID = {}'.format(ID)\n Registro = Realiza_consulta(mi_vs.BASE_GENERAL_PPAL, sql)\n Total = 0\n Activos = 0\n for i in Registro:\n Total = i[2]\n Activos = i[3]\n return Total, Activos\n\n\n\n\n\n# Actualiza datos de la tabla \"Cajas\" según su ID, cuando hay ingresos, ventas\ndef Act_Cajas_Ingresos_ID(Situacion, IngresoDia, IngresoSem, IngresoMen, IngresoAnu, IngresoTot, ID_):\n sql = 'UPDATE Cajas SET Situacion = {}, IngresoDia = {}, IngresoSem = {}, IngresoMen = {}, IngresoAnu = {}, IngresoTot = {} WHERE ID = {}'.format(Situacion, IngresoDia, IngresoSem, IngresoMen, IngresoAnu, IngresoTot, ID_)\n Realiza_consulta(mi_vs.LIST_BASE_DATOS[0] + \"egen.db\", sql)\n\n# Actualiza datos de la tabla \"Cajas\" según su ID, cuando hay ingresos, ventas\ndef Act_Fondos_Ingresos_ID(Estado, IngresoDia, IngresoSem, IngresoMen, IngresoAnu, IngresoTot, ID_):\n sql = 'UPDATE Fondos SET Estado = {}, Ing_dia = {}, Ing_sem = {}, Ing_mes = {}, Ing_ano = {}, Ing_tot = {} WHERE ID = {}'.format(Estado, IngresoDia, IngresoSem, IngresoMen, IngresoAnu, IngresoTot, ID_)\n Realiza_consulta(mi_vs.LIST_BASE_DATOS[0] + \"egen.db\", sql)\n\n\n\n\n\n\n\ndef Nueva_Caja(ListaProductos, ListaStock, ListaAdicionales, SumaUnidad):\n # Devuelve 0 si se guardó correctamente, devuelve 1 si hubo algún error.\n\n # Debido a que nuestra intención es poder ir eliminando productos e ir reordenando la tabla por completo según el orden de los códigos, vamos a evitar el modo\n # \"Autoincremental\" de las tablas. Para eso debemos colocar de manera manual el valor de los ID's. En primer lugar se irán colocando según una correlatividad cargada en el\n # sistema y guardado el último valor en la tabla \"Config\", y cuando se desee reordenar todo, se volverán a ennumerar a gusto. Ésta es la función encargada de ver cuál es el\n # ID para el nuevo producto.\n\n # PARAMETROS:\n # Cada lista corresponde a las primeras 3 tablas. No pueden haber datos en la tabla Estadísticas porque es un producto nuevo, no hay estadísticas.\n # SumaUnidad, si vale cero no hace nada. De lo contrario suma una unidad en la tabla \"Config\", según el número que venga indica una de las siguientes cosas:\n # 1: Suma una unidad al registro de CantPrincipal\n # 2: Suma una unidad al registro de CantAyuda\n # 3: Suma una unidad al registro de CantEliminados\n # 4: Suma una unidad a los registros de CantPrincipal y CantAyuda\n\n Id_ = Dev_Dato_Int(mi_vs.BASE_DATOS_PPAL, \"Config\", \"ID\", 6, \"Valor\")\n Id_ += 1\n Cant_ = Dev_Dato_Int(mi_vs.BASE_DATOS_PPAL, \"Config\", \"ID\", 5, \"Valor\")\n Cant_ += 1\n\n try:\n # Variables de la tabla: Productos\n Codigo = ListaProductos[0]\n CodBulto = ListaProductos[1]\n CantBulto = ListaProductos[2]\n Concepto = ListaProductos[3]\n Marca = ListaProductos[4]\n Detalle = ListaProductos[5]\n UnidadMedida = ListaProductos[6]\n Reg_Add_Productos(Id_, Codigo, CodBulto, CantBulto, Concepto, Marca, Detalle, UnidadMedida)\n\n # Variables de la tabla: Stock\n Cant1 = ListaStock[0]\n Cant2 = ListaStock[1]\n Cant3 = ListaStock[2]\n Vto1 = ListaStock[3]\n Vto2 = ListaStock[4]\n Vto3 = ListaStock[5]\n PcioCpa1 = ListaStock[6]\n PcioCpa2 = ListaStock[7]\n PcioCpa3 = ListaStock[8]\n CantidadTotal = ListaStock[9]\n PcioVta = ListaStock[10]\n StockVerificado = ListaStock[11]\n Reg_Add_Stock(Id_, Cant1, Cant2, Cant3, Vto1, Vto2, Vto3, PcioCpa1, PcioCpa2, PcioCpa3, CantidadTotal, PcioVta, StockVerificado)\n\n # Variables de la tabla: Adicionales\n CajaAsoc = ListaAdicionales[0]\n Mayorista = ListaAdicionales[1]\n UltFechaVta = ListaAdicionales[2]\n Siniestro = ListaAdicionales[3]\n Sobrante = ListaAdicionales[4]\n SinCobrar = ListaAdicionales[5]\n PorcGeneral = ListaAdicionales[6]\n PathImagen = ListaAdicionales[7]\n CantPreaviso = ListaAdicionales[8]\n DiasPreaviso = ListaAdicionales[9]\n Reg_Add_Adicionales(Id_, CajaAsoc, Mayorista, UltFechaVta, Siniestro, Sobrante, SinCobrar, PorcGeneral, PathImagen, CantPreaviso, DiasPreaviso)\n\n # Variables de la tabla: Estadísticas\n GciaSemanal = 0\n GciaMensual = 0\n GciaAnual = 0\n GciaTotal = 0\n CantVendSem = 0\n CantVendMes = 0\n CantVendAnual = 0\n CantVendTotal = 0\n SiniestrosTotal = 0\n Reg_Add_Estadisticas(Id_, GciaSemanal, GciaMensual, GciaAnual, GciaTotal, CantVendSem, CantVendMes, CantVendAnual, CantVendTotal, SiniestrosTotal)\n\n if SumaUnidad > 0:\n if SumaUnidad == 1:\n ValorAnt = Dev_Config(\"CantPrincipal\")\n ValorAnt += 1\n Act_Config(ValorAnt, \"CantPrincipal\")\n elif SumaUnidad == 2:\n ValorAnt = Dev_Config(\"CantAyuda\")\n ValorAnt += 1\n Act_Config(ValorAnt, \"CantAyuda\")\n elif SumaUnidad == 3:\n ValorAnt = Dev_Config(\"CantEliminados\")\n ValorAnt += 1\n Act_Config(ValorAnt, \"CantEliminados\")\n elif SumaUnidad == 4:\n ValorAnt = Dev_Config(\"CantPrincipal\")\n ValorAnt += 1\n Act_Config(ValorAnt, \"CantPrincipal\")\n ValorAnt = Dev_Config(\"CantAyuda\")\n ValorAnt += 1\n Act_Config(ValorAnt, \"CantAyuda\")\n Id_ = float(Id_)\n Act_Config(Id_, \"Ultimo_ID\")\n Cant_ = float(Cant_)\n Act_Config(Cant_, \"Cant_Productos\") \n return 0\n except:\n return 1\n\ndef Reg_Add_Productos(ID, Codigo, CodBulto, CantBulto, Concepto, Marca, Detalle, UnidadMedida):\n sql = 'INSERT INTO Productos VALUES(?, ?, ?, ?, ?, ?, ?, ?)'\n parametros = (ID, Codigo, CodBulto, CantBulto, Concepto, Marca, Detalle, UnidadMedida)\n Realiza_consulta(mi_vs.BASE_DATOS_SEC, sql, parametros)\n\ndef Reg_Add_Stock(ID, Cant1, Cant2, Cant3, Vto1, Vto2, Vto3, PcioCpa1, PcioCpa2, PcioCpa3, CantTotal, PcioVta, StockVerificado):\n sql = 'INSERT INTO Stock VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'\n parametros = (ID, Cant1, Cant2, Cant3, Vto1, Vto2, Vto3, PcioCpa1, PcioCpa2, PcioCpa3, CantTotal, PcioVta, StockVerificado)\n Realiza_consulta(mi_vs.BASE_DATOS_SEC, sql, parametros)\n\ndef Reg_Add_Adicionales(ID, CajaAsoc, Mayorista, UltFechaVta, Siniestro, Sobrante, SinCobrar, PorcGeneral, PathImagen, CantPreaviso, DiasPreaviso):\n sql = 'INSERT INTO Adicionales VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'\n parametros = (ID, CajaAsoc, Mayorista, UltFechaVta, Siniestro, Sobrante, SinCobrar, PorcGeneral, PathImagen, CantPreaviso, DiasPreaviso)\n Realiza_consulta(mi_vs.BASE_DATOS_SEC, sql, parametros)\n\ndef Reg_Add_Estadisticas(ID, GciaSemanal, GciaMensual, GciaAnual, GciaTotal, CantVendSem, CantVendMes, CantVendAnual, CantVendTotal, SiniestrosTotal):\n sql = 'INSERT INTO Estadisticas VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'\n parametros = (ID, GciaSemanal, GciaMensual, GciaAnual, GciaTotal, CantVendSem, CantVendMes, CantVendAnual, CantVendTotal, SiniestrosTotal)\n Realiza_consulta(mi_vs.BASE_DATOS_SEC, sql, parametros)\n\n'''#############################################################################################################################################\n ACTUALIZA PRODUCTO '''\n\n# ESTE BLOQUE DE CÓDIGO ESTÁ PREPARADO PERO NO TERMINADO HASTA QUE AL MENOS TENGAMOS LAS INTERFACES ASÍ SABEMOS SI CON ÉSTAS FUNCIONES ALCANZAN O SI SE NECESITAN MÁS.\n\n# Genera que una actualización que viene preparada en una lista, se pueda hacer dividida en variables para la función que está debajo\ndef Act_Productos_Segun_ID_Por_Lista(Lista):\n Codigo = Lista[1]\n CodBulto = Lista[2]\n CantBulto = Lista[3]\n Concepto = Lista[4]\n Marca = Lista[5]\n Detalle = Lista[6]\n UnidadMedida = Lista[7]\n ID_ = Lista[0]\n Act_Productos_Segun_ID(Codigo, CodBulto, CantBulto, Concepto, Marca, Detalle, UnidadMedida, ID_)\n# Actualiza datos de la tabla \"Productos\" según su ID\ndef Act_Productos_Segun_ID(Codigo, CodBulto, CantBulto, Concepto, Marca, Detalle, UnidadMedida, ID_):\n sql = 'UPDATE Productos SET Codigo = {}, CodBulto = {}, CantBulto = {}, Concepto = {}, Marca = {}, Detalle = {}, UnidadMedida = {} WHERE ID = {}' .format(Codigo, CodBulto, CantBulto, Concepto, Marca, Detalle, UnidadMedida, ID_)\n Realiza_consulta(mi_vs.BASE_DATOS_SEC, sql)\n\n# Actualiza datos de la tabla \"Productos\" según su Código\ndef Act_Productos_Segun_Codigo(CodBulto, CantBulto, Concepto, Marca, Detalle, UnidadMedida, Codigo_):\n sql = 'UPDATE Productos SET CodBulto = {}, CantBulto = {}, Concepto = {}, Marca = {}, Detalle = {}, UnidadMedida = {} WHERE Codigo = {}' .format(CodBulto, CantBulto, Concepto, Marca, Detalle, UnidadMedida, Codigo_)\n Realiza_consulta(mi_vs.BASE_DATOS_SEC, sql)\n\n\n# Genera que una actualización que viene preparada en una lista, se pueda hacer dividida en variables para la función que está debajo\ndef Act_Adicionales_Segun_ID_Por_Lista(Lista):\n CajaAsoc = Lista[1]\n Mayorista = Lista[2]\n UltFechaVta = Lista[3]\n Siniestro = Lista[4]\n Sobrante = Lista[5]\n SinCobrar = Lista[6]\n PorcGeneral = Lista[7]\n PathImagen = Lista[8]\n CantPreaviso = Lista[9]\n DiasPreaviso = Lista[10]\n ID_ = Lista[0]\n Act_Adicionales_Segun_ID(CajaAsoc, Mayorista, UltFechaVta, Siniestro, Sobrante, SinCobrar, PorcGeneral, PathImagen, CantPreaviso, DiasPreaviso, ID_)\n# Actualiza datos de la tabla \"Adicionales\" según su ID\ndef Act_Adicionales_Segun_ID(CajaAsoc, Mayorista, UltFechaVta, Siniestro, Sobrante, SinCobrar, PorcGeneral, PathImagen, CantPreaviso, DiasPreaviso, ID_):\n sql = 'UPDATE Adicionales SET CajaAsoc = {}, Mayorista = {}, UltFechaVta = {}, Siniestro = {}, Sobrante = {}, SinCobrar = {}, PorcGeneral = {}, PathImagen = {}, CantPreaviso = {}, DiasPreaviso = {} WHERE ID = {}' .format(CajaAsoc, Mayorista, UltFechaVta, Siniestro, Sobrante, SinCobrar, PorcGeneral, PathImagen, CantPreaviso, DiasPreaviso, ID_)\n Realiza_consulta(mi_vs.BASE_DATOS_SEC, sql)\n\n\n'''#############################################################################################################################################\n BUSCA INFORMACIÓN DEL PRODUCTO '''\n\n# Busca un código, si lo encuentra devuelve una variable con V-F, y otra con la lista de lo que hay en todas las tablas\n # Variables que devuelve:\n # Encontrado = True o False, si se pudo ejecutar todo sin novedad o no.\n # Lista_Datos: pos0 = ID\n # pos1 = Codigo\n # pos2 = Codigo del bulto\n # pos3 = Cant por bulto\n # pos4 = Concepto\n # pos5 = Marca\n # pos6 = Detalle\n # pos7 = Unidad de Medida\n\n # pos8 = Cant1\n # pos9 = Cant2\n # pos10 = Cant3\n # pos11 = Vto1\n # pos12 = Vto2\n # pos13 = Vto3\n # pos14 = PcioCpa1\n # pos15 = PcioCpa2\n # pos16 = PcioCpa3\n # pos17 = CantTotal\n # pos18 = PcioVta\n # pos19 = StockVerificado\n\n # pos20 = Caja Asociada\n # pos21 = Mayorista\n # pos22 = Ultima Fecha de Venta\n # pos23 = Siniestro\n # pos24 = Sobrante\n # pos25 = Sin Cobrar\n # pos26 = Porcentaje General (Incremento)\n # pos27 = Path Imagen\n # pos28 = Cantidad de stock de Preaviso\n # pos29 = Días de Preaviso\ndef Dev_Info_Producto(Codigo):\n Encontrado = False\n Lista_Datos = []\n try:\n # Tabla: Productos\n Reg = Reg_Un_param(mi_vs.BASE_DATOS_PPAL, \"Productos\", \"Codigo\", Codigo)\n for i in Reg:\n Lista_Datos.append(i[0])\n Lista_Datos.append(i[1])\n Lista_Datos.append(i[2])\n Lista_Datos.append(i[3])\n Lista_Datos.append(i[4])\n Lista_Datos.append(i[5])\n Lista_Datos.append(i[6])\n Lista_Datos.append(i[7])\n Id_ = Lista_Datos[0]\n \n # Tabla: Stock\n Reg = Reg_Un_param(mi_vs.BASE_DATOS_PPAL, \"Stock\", \"ID\", Id_)\n for i in Reg:\n Lista_Datos.append(i[1])\n Lista_Datos.append(i[2])\n Lista_Datos.append(i[3])\n Lista_Datos.append(i[4])\n Lista_Datos.append(i[5])\n Lista_Datos.append(i[6])\n Lista_Datos.append(i[7])\n Lista_Datos.append(i[8])\n Lista_Datos.append(i[9])\n Lista_Datos.append(i[10])\n Lista_Datos.append(i[11])\n Lista_Datos.append(i[12])\n \n # Tabla: Adicionales\n Reg = Reg_Un_param(mi_vs.BASE_DATOS_PPAL, \"Adicionales\", \"ID\", Id_)\n for i in Reg:\n Lista_Datos.append(i[1])\n Lista_Datos.append(i[2])\n Lista_Datos.append(i[3])\n Lista_Datos.append(i[4])\n Lista_Datos.append(i[5])\n Lista_Datos.append(i[6])\n Lista_Datos.append(i[7])\n Lista_Datos.append(i[8])\n Lista_Datos.append(i[9])\n Lista_Datos.append(i[10])\n\n Encontrado = True\n except:\n pass\n return Encontrado, Lista_Datos\n\n\n'''#############################################################################################################################################\n ELIMINA PRODUCTO '''\n\n'''#############################################################################################################################################\n FUNCIONES AUXILIARES '''\n\n# Devuelve el Valor solicitado del registro indicado en \"DatoTexto\", donde \"Valor\" es una columna del tipo float y \"Nombre\" es un texto en la tabla.\ndef Dev_Config(DatoTexto):\n sql = \"SELECT Valor FROM Config WHERE Nombre = '{}'\" .format(DatoTexto)\n Resultado = Realiza_consulta(\"./source/db/prod.db\",sql)\n aux = 0\n for res in Resultado:\n aux = res[0] \n return float(aux)\n\n# Actualiza cualquier valor de la tabla \"Config\"\ndef Act_Config(Valor, Nombre):\n sql = \"UPDATE Config SET Valor = {} WHERE Nombre = '{}'\" .format(Valor, Nombre)\n Realiza_consulta(\"./source/db/prod.db\", sql)\n\n'''#############################################################################################################################################\n EJECUCIÓN '''\n# FUNCIÓN BASE DE ACTUALIZACIÓN CUANDO LA COMPARACIÓN ES NUMÉRICA\ndef Act_Valor_Num(Tabla, Col_Actualiza, Valor_Actualiza, Col_Compara, Valor_Compara):\n sql = 'UPDATE {} SET {} = {} WHERE {} = {}'.format(Tabla, Col_Actualiza, Valor_Actualiza, Col_Compara, Valor_Compara)\n Realiza_consulta(mi_vs.BASE_DATOS_SEC, sql)\n\n# FUNCIÓN BASE DE ACTUALIZACIÓN CUANDO LA COMPARACIÓN ES TEXTO\ndef Act_Valor_Texto(Tabla, Col_Actualiza, Valor_Actualiza, Col_Compara, Texto_Compara):\n sql = \"UPDATE {} SET {} = {} WHERE {} = '{}'\".format(Tabla, Col_Actualiza, Valor_Actualiza, Col_Compara, Texto_Compara)\n Realiza_consulta(mi_vs.BASE_DATOS_SEC, sql)\n\n#CONECTA CON LA BD, REALIZA LA CONSULTA Y GUARDA LOS CAMBIOS\n # Los pasos para trabajar en la bd, son: Conectarse, realizar la consulta, cargarla en una variable y desconectarse\n # query será el parámetro que traiga el tipo de consuta que se desea, y en caso de haber parámetros, se utilizarán, de lo contrario, la tupla queda vacía\ndef Realiza_consulta( BaseDeDatos, query, parameters = ()):\n db_nombre = BaseDeDatos\n # Realizamos la conección y la almacenamos en la variable conn\n with sqlite3.connect(db_nombre) as conn:\n # Cursor, es una propiedad que nos indica en qué posición estamos dentro de la base de datos, y lo almacenamos en la variable Cur\n Cur = conn.cursor()\n # Execute, es la función que realiza la consulta, y los resultados obtenidos serán almacenados en la variable resultado\n resultado = Cur.execute(query, parameters)\n conn.commit()\n return resultado\n\n'''#############################################################################################################################################\n AYUDAS '''\n\n# Devuelve los valores de la situación actual y el ingreso diario de la Caja indicada por parámetro\ndef Dev_Datos_P_Venta(Parametro, ID):\n sql = 'SELECT * FROM Cajas WHERE {} = {}'.format(Parametro, ID)\n Registro = Realiza_consulta(mi_vs.BASE_GENERAL_PPAL, sql)\n situacion = 0\n ingDia = 0\n for i in Registro:\n situacion = i[3]\n ingDia = i[4]\n return situacion, ingDia\n\n# DEVUELVE LA TABLA COMPLETA QUE SE HAYA SOLICITADO\ndef Dev_Tabla(BaseDeDatos, Tabla, OrdenBy = \"\"):\n if OrdenBy == \"\":\n sql = 'SELECT * FROM {}' .format(Tabla)\n else:\n sql = 'SELECT * FROM {} ORDER BY {}'.format(Tabla, OrdenBy)\n Resultado = Realiza_consulta(BaseDeDatos, sql)\n return Resultado\n\n# DEVUELVE EN FORMA DE LISTA UNA COLUMNA DE UNA TABLA\n # La columna se debe indicar con un número entero siendo 0 el primero\ndef Dev_Columna_Lista(Tabla, Colum_Int):\n Tabla = Dev_Tabla(mi_vs.BASE_GENERAL_SEC, Tabla)\n Lista = []\n for reg in Tabla:\n Lista.append(reg[Colum_Int])\n return Lista\n\n# DEVUELVE UN REGISTRO BUSCADO SEGÚN UN DATO EN PARTICULAR\ndef Reg_Un_param(BaseDeDatos, Tabla, Columna, DatoCoincide):\n sql = \"SELECT * FROM {} WHERE {} = '{}'\" .format( Tabla, Columna, DatoCoincide)\n Resultado = Realiza_consulta(BaseDeDatos,sql)\n return Resultado\n\n\n\n\n\n\n\n\n\n\n# Devuelve la tabla solicitada de la base de datos de clientes\ndef Dev_Tabla_Clie(Tabla):\n return Dev_Tabla(\"./db\\\\clie.db\", Tabla)\n\n# Devuelve el total de registros de la tabla solicitada de la base de datos de clientes\ndef Dev_Total_Tabla_Clie(Tabla):\n reg = Reg_Un_param(\"./db\\\\clie.db\", \"sqlite_sequence\", \"name\", Tabla)\n valor = 0\n for i in reg:\n valor = i[1]\n return valor\n\ndef Dev_ID_ClienteTexto(Tabla, ColumnaCompara, DatoTexto):\n sql = \"SELECT ID FROM {} WHERE {} = '{}'\" .format(Tabla, ColumnaCompara, DatoTexto)\n Resultado = Realiza_consulta(\"./db\\\\clie.db\",sql)\n aux = 0\n for res in Resultado:\n aux = res[0] \n return int(aux)\n\n# Devuelve un dato solicitado cuando el valor de comparación es un entero\ndef Dev_Dato_Int(BaseDeDatos, Tabla, ColumnaCompara, DatoCompara, ColumnaDevuelve):\n sql = 'SELECT {} FROM {} WHERE {} = {}'.format(ColumnaDevuelve, Tabla, ColumnaCompara, DatoCompara)\n Result = Realiza_consulta(BaseDeDatos, sql)\n return Result\n\n# INSERTA UN REGISTRO EN LA BASE DE DATOS\ndef Reg_Add2(BaseDeDatos, Activo, Codigo, Linea, Tipo, Interior, Repuesto, ConceptoBazar, ConceptoAyVta, PedidosEsp, Otros,Tamanio, Litros, PcioCosto, Costo10, PSPV, PcioLista, Puntos, PuntosMG, Comentarios, Imagen, Actualizado):\n sql = 'INSERT INTO Productos VALUES(NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'\n parametros = (Activo, Codigo, Linea, Tipo, Interior, Repuesto, ConceptoBazar, ConceptoAyVta, PedidosEsp, Otros,Tamanio, Litros, PcioCosto, Costo10, PSPV, PcioLista, Puntos, PuntosMG, Comentarios, Imagen, Actualizado)\n Realiza_consulta(BaseDeDatos, sql, parametros)\n\n# Actualiza la tabla \"Config\", sirve para actualizar todos los totales de \"Registros\" que hay en cada una de las tablas\ndef Act_Reg_Cant(BaseDeDatos, Cantidad, NomTabla):\n query = 'UPDATE Config SET Registros = ? WHERE Tabla = ?'\n parameters = (Cantidad, NomTabla)\n Realiza_consulta(BaseDeDatos, query, parameters)\n\n\n","sub_path":"sources/mod/mdbegen.py","file_name":"mdbegen.py","file_ext":"py","file_size_in_byte":20971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"555872893","text":"\nfrom requests import Request\nfrom config import *\n\nclass weixinrequest(Request):\n \"\"\"\n super().__init__(a,b,c)继承类时,当子类重写init方法,遇到和父类同样的属性。直接显示引用属性\n (a,b,c)中的abc为子类init()中传入的参数,且传入的参数要与要父类中属性先后顺序一致。\n\n \"\"\"\n def __init__(self,url,callback,method='GET',headers=None,need_proxy=False,fail_time=0,timeout=TIMEOUT):\n super(weixinrequest,self).__init__(method,url,headers)#显示调用指定父类init这个三个属性,其他父类属性则相当于丢弃!!这三个属性,要和源码中属性出现的顺序对应\n self.callback=callback\n self.need_proxy=need_proxy\n self.fail_time=fail_time\n self.timeout=timeout\n\n\n\n","sub_path":"weixin_new/requestmy.py","file_name":"requestmy.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"599604873","text":"#!/usr/bin/env python\n# coding: utf-8\n\n#import for mfcc\nimport scipy\nfrom scipy import fftpack\nfrom scipy import signal\nfrom scipy.io import wavfile as wav\n\n\n\n# In[1]:\n\n\nfrom keras.models import load_model\nfrom tensorflow import keras\nimport pyaudio\nimport math\nimport struct\nimport wave \nimport time\nimport os\nimport numpy as np\n\n\n# In[2]:\n\n\n#SETTINGS\nsaved_model_path = \"/home/pi/Servus/modelCNN\"\nwav_file_path = \"/home/pi/Servus/modelCNN\"\n#silence treshold\nTRESHOLD = 15\nSAMPLE_RATE = 16000\nNUM_CHANNELS = 1\nCHUNK_SIZE = 1024\nAUDIO_FORMAT = pyaudio.paInt16\n#in seconds\nREC_LENGTH = 1\nS_WIDTH = 2\nNORMALIZATION = (1.0/32768.0)\nPADDING = 68\nNUM_MFCC = 40\nclass_label = [\"down\",\"go\",\"left\",\"on\",\"right\",\"stop\",\"up\"]\n\n###################################################################\n\ndef add_eps(x):\n x[scipy.where(x == 0)] = scipy.finfo(dtype=x.dtype).eps\n return x\n\n\ndef preemphasis(seq, coeff):\n return scipy.append(seq[0], seq[1:] - coeff * seq[:-1])\n\n\n# http://www.practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/\ndef freq_to_mel(freq):\n return 1125.0 * scipy.log(1.0 + freq / 700.0)\n\n\ndef mel_to_freq(mel):\n return 700.0 * (scipy.exp(mel / 1125.0) - 1.0)\n\n\ndef iter_bin(out, curr_bin, next_bins, backward=False):\n next_bin = next_bins[scipy.where(next_bins > curr_bin)][0]\n if backward:\n sign = -1\n bias = next_bin\n else:\n sign = 1\n bias = curr_bin\n for f in range(int(curr_bin), int(next_bin)):\n out[f] = sign * (f - bias) / (next_bin - curr_bin)\n\n\ndef mel_filterbank(num_bank, num_freq, sample_freq, low_freq, high_freq):\n num_fft = (num_freq - 1) * 2\n low_mel = freq_to_mel(low_freq)\n high_mel = freq_to_mel(high_freq)\n banks = scipy.linspace(low_mel, high_mel, num_bank + 2)\n bins = scipy.floor((num_fft + 1) * mel_to_freq(banks) / sample_freq)\n out = scipy.zeros((num_bank, num_fft // 2 + 1))\n for b in range(num_bank):\n iter_bin(out[b], bins[b], bins[b+1:])\n iter_bin(out[b], bins[b+1], bins[b+2:], backward=True)\n return out\n\ndef MFCC_spectro(data):\n \n # config is based on Kaldi compute-mfcc-feats\n\n # STFT conf\n frame_length = 25 # frame / msec\n frame_shift = 10 # frame / msec\n remove_dc_offset = True\n window_type = \"hamming\"\n\n # Fbank conf\n preemphasis_coeff = 0.97\n use_power = True # else use magnitude\n high_freq = 0.0 # offset from Nyquist freq [Hz]\n low_freq = 20.0 # offset from 0 [Hz]\n num_mel_bins = 80 # (default 23)\n num_ceps = 40\n num_lifter = 22\n\n sample_freq, raw_seq = wav.read(data)\n \n\n assert raw_seq.ndim == 1 # assume mono\n seq = raw_seq.astype(scipy.float64)\n if remove_dc_offset:\n seq -= scipy.mean(seq)\n\n # STFT feat\n seq = preemphasis(seq, preemphasis_coeff)\n num_samples = sample_freq // 1000\n window = signal.get_window(window_type, frame_length * num_samples)\n mode = \"psd\" if use_power else \"magnitude\"\n f, t, spectrogram = signal.spectrogram(seq, sample_freq, window=window, noverlap=frame_shift*num_samples, mode=mode)\n\n # log-fbank feat\n banks = mel_filterbank(num_mel_bins, spectrogram.shape[0], sample_freq, low_freq, sample_freq // 2 - high_freq)\n fbank_spect = scipy.dot(banks, spectrogram)\n logfbank_spect = scipy.log(add_eps(fbank_spect))\n\n # mfcc feat\n dct_feat = fftpack.dct(logfbank_spect, type=2, axis=0, norm=\"ortho\")[:num_ceps]\n lifter = 1 + num_lifter / 2.0 * scipy.sin(scipy.pi * scipy.arange(num_ceps) / num_lifter)\n mfcc_feat = lifter[:, scipy.newaxis] * dct_feat\n mfcc_feat = np.asarray(mfcc_feat, dtype=np.float32)\n\n pad = PADDING - mfcc_feat.shape[1]\n print(pad)\n print(PADDING)\n if pad<0 :\n return None\n mfcc = np.pad(mfcc_feat, pad_width=((0,0), (0,pad)), mode=\"constant\")\n #scaled =np.mean(mfcc_feat.T, axis=0)\n #plt.matshow(mfcc_feat)\n #plt.savefig(\"mfcc.png\")\n #plt.show()\n return mfcc\n###################################################################\n\n# In[3]:\n\n\nmodel = keras.models.load_model(saved_model_path)\n#record file init\npyaud = pyaudio.PyAudio()\nstream = pyaud.open(format=AUDIO_FORMAT, \n channels=NUM_CHANNELS, \n rate=SAMPLE_RATE, \n input=True, output=True, \n frames_per_buffer=CHUNK_SIZE)\n\n\n# In[4]:\n\n\n#get rms\ndef rms(frame):\n unpack_format = \"%dh\" % (len(frame)/S_WIDTH)\n unpacked = struct.unpack(unpack_format, frame)\n square_sum = 0\n \n for sample in unpacked:\n n = sample * NORMALIZATION\n square_sum = square_sum + n*n\n \n rms = math.sqrt(square_sum / (len(frame)/S_WIDTH))\n \n return rms*1000\n\n\n# In[5]:\n\n\n#save file as .wav\ndef save(recording):\n file = wave.open(wav_file_path,\"wb\")\n file.setnchannels(NUM_CHANNELS)\n file.setsampwidth(pyaud.get_sample_size(AUDIO_FORMAT))\n file.setframerate(SAMPLE_RATE)\n file.writeframes(recording)\n file.close()\n\n\n# In[6]:\n\n\n#make spectrogram out of .wav file\ndef get_spectrogram(file_name):\n try:\n audio, sample_rate = librosa.load(file_name,\n res_type=\"kaiser_fast\")\n mfcc = librosa.feature.mfcc(y=audio, sr=sample_rate,\n n_mfcc = NUM_MFCC)\n pad = PADDING - mfcc.shape[1]\n mfcc = np.pad(mfcc, pad_width=((0,0), (0,pad)), mode=\"constant\")\n except:\n print(\"Error with file: \", file_name)\n return None, None\n return mfcc\n\n\n# In[7]:\n\n\ndef make_prediction(file_name):\n spectro = MFCC_spectro(file_name)\n spectro = spectro.reshape(1, NUM_MFCC, PADDING, NUM_CHANNELS)\n prediction = model.predict_classes(spectro)\n print(\"Predicted class: {}\".format(class_label[int(prediction)])) \n\n\n# In[8]:\n\n\ndef record():\n print(\"rec start\")\n recording = []\n start_time = time.time()\n finish_time = start_time + REC_LENGTH\n \n while start_time <= finish_time:\n frame = stream.read(CHUNK_SIZE)\n start_time = time.time()\n recording.append(frame)\n print(\"rec stop\")\n save(b''.join(recording))\n make_prediction(wav_file_path)\n\n\n# In[9]:\n\n\ndef loop():\n print(\"starting loop\")\n while True:\n input = stream.read(CHUNK_SIZE)\n if rms(input) > TRESHOLD:\n record()\n\n\n# In[10]:\n\n\nloop()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Recognize.py","file_name":"Recognize.py","file_ext":"py","file_size_in_byte":6489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"61624684","text":"import csv\nfrom datetime import datetime\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sales.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\nclass Sales(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n transaction_date = db.Column(db.DateTime, unique=False, nullable=False)\n product = db.Column(db.String(80), unique=False, nullable=False)\n price = db.Column(db.Integer, unique=False, nullable=False)\n payment_type = db.Column(db.String(120), unique=False, nullable=False)\n\n def __repr__(self):\n return '' % self.transaction_date\n\n\ndef main():\n db.create_all()\n with open('hw.csv', encoding='utf-8') as read_files:\n data = csv.DictReader(read_files, delimiter=\";\")\n for row in data:\n sales = Sales(transaction_date=datetime.strptime(row['Transaction_date'], '%m/%d/%Y %H:%M'),\n product=row['Product'], price=int(row['Price']), payment_type=row['Payment_Type'])\n db.session.add(sales)\n db.session.commit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"homework_4/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"92531535","text":"# -*- coding: utf-8 -*-\n\nimport pytest as pt\nfrom biosim.landscape import Square, Jungle, Savannah, Desert, Ocean, Mountain\nfrom biosim.animals import Animal, Herbivore, Carnivore\nimport numpy as np\n\n__author__ = 'Samir Adrik'\n__email__ = 'saad@nmbu.no'\n\n\nclass TestSquare:\n \"\"\"\n Test methods for the Square superclass.\n \"\"\"\n\n @pt.fixture(autouse=True)\n def setup(self):\n self.default_parameters = {\n Savannah: {'f_max': 300.0, 'alpha': 0.3}, Jungle: {'f_max': 800.0}}\n self.square = Square()\n self.squares = {Jungle: Jungle(), Savannah: Savannah(),\n Desert: Desert(), Ocean: Ocean(), Mountain: Mountain()}\n self.allowed_squares = [Jungle(), Savannah(), Desert()]\n\n @pt.fixture(autouse=True)\n def teardown(self):\n \"\"\"\n Executes after all test are run\n\n \"\"\"\n for landscape_type, square in self.squares.items():\n if landscape_type in self.allowed_squares:\n square.restore_square()\n square.set_parameters(self.default_parameters[landscape_type])\n\n @pt.fixture(autouse=True)\n def create_animals_in_jungle(self):\n \"\"\"\n Creates 20 animals (10 herbivores and 10 carnivores) and places them in\n jungle square.\n\n \"\"\"\n self.herbivores = [Herbivore() for _ in range(100)]\n self.carnivores = [Carnivore() for _ in range(100)]\n\n self.jungle = Jungle()\n self.jungle.allocate_animals(self.herbivores + self.carnivores)\n\n def test_square_set_parameters_type(self):\n \"\"\"\n Ensure that new parameters has correct type.\n \"\"\"\n pt.raises(TypeError, self.square.set_parameters, [])\n pt.raises(TypeError, self.square.set_parameters, ())\n\n def test_square_instance_and_subclass(self):\n \"\"\"\n All landscape types are instance and subclasses of square superclass.\n\n \"\"\"\n for square in self.squares.values():\n assert isinstance(square, Square)\n assert issubclass(square.__class__, Square)\n\n def test_square_default_parameters(self):\n \"\"\"\n Default parameters is a dictionary with keys as string and values\n as float for all square subclasses.\n\n \"\"\"\n for parameters in self.default_parameters.values():\n for param_name, param_value in parameters.items():\n assert isinstance(param_name, str)\n assert isinstance(param_value, float)\n\n def test_square_set_parameters(self):\n \"\"\"\n New parameters are added to the default parameters.\n\n \"\"\"\n new_parameters = {'f_max': 450}\n\n for landscape_type in self.squares.keys():\n if landscape_type in self.allowed_squares:\n landscape_type.set_parameters(new_parameters)\n for param_name in new_parameters.keys():\n assert landscape_type.parameters[param_name] == \\\n new_parameters[\n param_name]\n\n def test_square_unknown_parameters(self):\n \"\"\"\n Test that ValueError is raised if unknown parameter is passed to\n set_parameters.\n\n \"\"\"\n unknown_parameter = {'varphi': 200.0}\n\n for landscape_type in self.squares.keys():\n if landscape_type in self.allowed_squares:\n pt.raises(ValueError,\n landscape_type.set_parameters, unknown_parameter)\n\n def test_square_f_max_non_negative(self):\n \"\"\"\n Max fodder parameter must be non-negative.\n\n \"\"\"\n non_negative_f_max = {'f_max': -100.0}\n\n for landscape_type in self.squares.keys():\n if landscape_type in self.allowed_squares:\n pt.raises(ValueError,\n landscape_type.set_parameters, non_negative_f_max)\n\n def test_square_restore_square(self):\n \"\"\"\n Restores square to an empty one\n\n \"\"\"\n self.jungle.restore_square()\n assert self.jungle.animals_in_square == self.square.animals_in_square\n\n def test_square_invalid_animal_list(self):\n \"\"\"\n TypeError is raised if animals allocated to square are not in a list.\n\n \"\"\"\n invalid_animal_list = tuple(self.herbivores)\n\n pt.raises(TypeError, self.jungle.allocate_animals, invalid_animal_list)\n\n def test_square_allocate_animals(self):\n \"\"\"\n Animals get allocated to square\n\n \"\"\"\n assert len(self.jungle.animals_in_square[Herbivore]) != len(\n self.squares[Jungle].animals_in_square[Herbivore])\n assert len(self.jungle.animals_in_square[Carnivore]) != len(\n self.squares[Jungle].animals_in_square[Carnivore])\n\n def test_square_delete_animals(self):\n \"\"\"\n Animals get deleted from square\n\n \"\"\"\n for animal in self.herbivores + self.carnivores:\n self.jungle.delete_animal(animal)\n\n no_animals_in_square = self.square.animals_in_square\n\n assert self.jungle.animals_in_square == no_animals_in_square\n\n def test_square_procreate_animals(self):\n \"\"\"\n Animal count increases after procreation\n\n \"\"\"\n herbivores_with_age_and_weight = [Herbivore(np.random.randint(1, 50),\n np.random.randint(1, 50))\n for _ in range(100)]\n\n jungle = Jungle()\n jungle.allocate_animals(herbivores_with_age_and_weight)\n jungle.procreate_animals()\n\n assert len(jungle.animals_in_square[Herbivore]) > len(\n herbivores_with_age_and_weight)\n\n def test_square_procreation_num_calls(self, mocker):\n \"\"\"\n Test the number of calls to birth() method\n\n \"\"\"\n mocker.spy(Animal, 'birth')\n mocker.spy(Square, 'procreate_animals')\n\n self.jungle.procreate_animals()\n\n # 200 animals in the jungle square to begin with\n assert Square.procreate_animals.call_count == 1\n assert Animal.birth.call_count == 200\n\n def test_square_aging_animals(self):\n \"\"\"\n Aging animals in square\n\n \"\"\"\n self.jungle.aging_animals()\n sum_herbivore_age = sum(\n herb.age for herb in self.jungle.animals_in_square[Herbivore])\n sum_carnivore_age = sum(\n carn.age for carn in self.jungle.animals_in_square[Carnivore])\n\n assert len(\n self.herbivores + self.carnivores) == sum_herbivore_age + \\\n sum_carnivore_age\n\n def test_square_aging_num_calls(self, mocker):\n \"\"\"\n Count the number of calls to the aging method.\n\n \"\"\"\n mocker.spy(Square, 'aging_animals')\n mocker.spy(Animal, 'aging')\n self.jungle.aging_animals()\n\n # 200 animals in the jungle square to begin with\n assert Square.aging_animals.call_count == 1\n assert Animal.aging.call_count == 200\n\n def test_square_death_animals(self):\n \"\"\"\n Animal count decreases after death\n\n \"\"\"\n self.jungle.death_animals()\n\n assert len(self.jungle.animals_in_square[Herbivore]) < len(\n self.herbivores)\n assert len(self.jungle.animals_in_square[Carnivore]) < len(\n self.carnivores)\n\n def test_square_death_num_calls(self, mocker):\n \"\"\"\n Count the number of calls to the death method.\n\n \"\"\"\n mocker.spy(Square, 'death_animals')\n mocker.spy(Animal, 'death')\n self.jungle.death_animals()\n\n # 200 animals in the jungle square to begin with\n assert Square.death_animals.call_count == 1\n assert Animal.death.call_count == 200\n\n def test_square_grow_fodder(self):\n \"\"\"\n Grow fodder in square\n\n \"\"\"\n for square in self.squares.values():\n if square in self.allowed_squares:\n square._grow_fodder()\n assert square.fodder > self.squares[type(square)].fodder\n\n def test_square_feed_animals(self):\n \"\"\"\n Feed animals in square\n\n \"\"\"\n pre_weight_of_herbivores = np.sum(\n herb.weight for herb in self.jungle.animals_in_square[Herbivore])\n pre_weight_of_carnivores = np.sum(\n carn.weight for carn in self.jungle.animals_in_square[Carnivore])\n\n self.jungle.feed_animals()\n\n post_weight_of_herbivores = np.sum(\n herb.weight for herb in self.jungle.animals_in_square[Herbivore])\n post_weight_of_carnivores = np.sum(\n carn.weight for carn in self.jungle.animals_in_square[Carnivore])\n\n assert pre_weight_of_herbivores < post_weight_of_herbivores\n assert pre_weight_of_carnivores < post_weight_of_carnivores\n\n def test_square_feed_animals_call(self, mocker):\n \"\"\"\n Test the number of calls to the feed animals method for a herbivore.\n\n \"\"\"\n mocker.spy(Square, 'feed_animals')\n mocker.spy(Herbivore, 'eating')\n mocker.spy(Carnivore, 'eating')\n\n self.jungle.feed_animals()\n\n # 200 animals in the jungle square to begin with\n # herb_expec_count = 100 (herbivores) * 10 (appetite Herbivore)\n # - 800 ('f_max' jungle) = 80\n assert Square.feed_animals.call_count == 1\n assert Herbivore.eating.call_count == 80\n assert Carnivore.eating.call_count == 100\n\n def test_square_migrate_animals(self):\n \"\"\"\n Animal migration\n\n \"\"\"\n jungle_one = Jungle()\n jungle_two = Jungle()\n jungle_three = Jungle()\n\n self.jungle.migrate_animals([jungle_one, jungle_two, jungle_three])\n\n num_migrated_herbivores = np.sum(\n [len(square.newcomers[Herbivore]) for square in\n [jungle_two, jungle_two, jungle_three]])\n\n num_migrated_carnivores = np.sum(\n [len(square.newcomers[Carnivore]) for square in\n [jungle_two, jungle_two, jungle_three]])\n\n assert num_migrated_herbivores > 0\n assert num_migrated_carnivores > 0\n\n def test_square_add_newcommers(self):\n \"\"\"\n Newly migrated animals get added to animals already existing in square\n\n \"\"\"\n self.jungle.newcomers[Herbivore] = [Herbivore() for _ in range(5)]\n self.jungle.newcomers[Carnivore] = [Carnivore() for _ in range(5)]\n\n self.jungle.add_newcomers()\n\n # It was 100 animals of each species in the Jungle square to begin with.\n assert len(self.jungle.animals_in_square[Herbivore]) == 105\n assert len(self.jungle.animals_in_square[Carnivore]) == 105\n\n def test_square_compute_propensity(self):\n \"\"\"\n Test the compute propensity calculations\n\n \"\"\"\n assert pt.approx(self.jungle.compute_propensity(Herbivore),\n 2.20798251162)\n assert pt.approx(self.jungle.compute_propensity(Carnivore),\n 6461607.99132)\n","sub_path":"test_biosim/test_landscape.py","file_name":"test_landscape.py","file_ext":"py","file_size_in_byte":10946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"193933009","text":"from jd_Crawler.client import setting\nimport traceback\nimport logging\nimport threading\nimport time\nimport queue\nimport random\nimport requests\nimport importlib\n\nclass Request:\n def __init__(self,url,headers=None,data=None,proixy=None,request_callback=None,timeout=None,cookies=None,**attr):\n self.attr=None\n self.url=url\n self.headers=headers\n self.data=data\n self.timeout=timeout\n self.proixy=proixy\n self.attr=attr\n self.request_callback=request_callback\n self.cookies=cookies\n def run_request_callback(self):\n if self.attr['spider_type'] == 2:\n data=self.request_callback(self,driver=self.attr['driver'])\n else:\n data=None\n return data\n\nclass spider_run_error(Exception):\n def __init__(self,st='spider run error'):\n self.st=st\n def __str__(self):\n return self.st\n\n\n#爬虫任务调度器\nclass crawl(threading.Thread):\n #记录任务状态\n NOT_RUN=0\n RUN=1\n END=2\n ERROR=3\n PASTE=4\n BREAK=5\n setting={}\n init_arg={}\n @staticmethod\n def init_setting(setting):\n crawl.setting=setting\n pass\n\n @staticmethod\n def debug_print_setting():\n print(crawl.setting)\n pass\n\n @staticmethod\n def init_originarg(arg):\n crawl.init_arg=arg\n print(crawl.init_arg)\n\n def load_data_after_request_filter(self,task_type,data_collection):\n tmps={}\n temp=[]\n if task_type in setting.DATA_AFTER_REQUEST_FILTER:\n tmps=setting.DATA_AFTER_REQUEST_FILTER[task_type]\n for tmp in sorted(tmps, key=lambda x:(x['priority'])):\n module=importlib.import_module(tmp['module'])\n module_classes=dir(module)\n for module_class in module_classes:\n try:\n if issubclass(getattr(module, module_class), filter_middlerware):\n temp.append(getattr(module,module_class))\n data_collection[getattr(module,module_class).name]={\n 'serial_number':0,\n 'limit_number':tmp['limit_serial_number']\n }\n except TypeError as e:\n continue\n return temp\n\n def run_data_after_request_filter(self,reuqest,context,data_collecter):\n for tmp in self.data_after_request_filter:\n print('serial_number:',self.after_request_filter_collect[tmp.name]['serial_number'],'******** limit_number:',self.after_request_filter_collect[tmp.name]['limit_number'])\n if self.after_request_filter_collect[tmp.name]['serial_number'] <= self.after_request_filter_collect[tmp.name]['limit_number']:\n temp=tmp(reuqest,context,data_collecter=data_collecter[tmp.name]).filter_data()\n else:\n return RuntimeError()\n if temp==None:\n self.after_request_filter_collect[tmp.name]['serial_number']=0\n continue\n else:\n self.after_request_filter_collect[tmp.name]['serial_number']+=1\n return temp\n return None\n\n def __init__(self,spider):\n self.runspider=spider\n self.status=self.NOT_RUN\n self.allow_remove=0\n self.sleep_high=int(self.get_spider_setting('sleep_every_request')['sleep_every_request'])\n self.use_proixy=int(self.get_spider_setting('use_proixy')['use_proixy'])\n self.proixy_time = 1\n self.after_request_filter_collect={}\n self.data_after_request_filter=self.load_data_after_request_filter(self.get_task_type(),self.after_request_filter_collect)\n\n if self.use_proixy==1:\n self.change_manage = int(self.get_spider_setting('proixy_pass_time')['proixy_pass_time'])\n super(crawl, self).__init__()\n\n def filter_middleware(self):\n #\n pass\n\n def clear_front_remove(self):\n self.runspider.clear_front_remove()\n #清理任务池\n\n def allow_crawl_remove(self):\n self.allow_remove=1\n\n def filter_end_remove(self):\n return self.allow_remove\n\n def get_run_status(self):\n tmp={\"status\":self.status}\n return tmp\n\n def get_guid(self):\n return self.runspider.get_guid()\n\n def get_task_type(self):\n return self.runspider.get_task_type()\n\n def get_spider_statue(self):\n tmp={}\n tmp['status'] = self.status\n if self.status==self.NOT_RUN:\n tmp['action']=\"task not run\"\n elif self.status == self.RUN:\n tmp['action'] = \"task running\"\n elif self.status == self.END:\n tmp['action']=\"task end\"\n elif self.status == self.ERROR:\n tmp['action'] = \"task error\"\n self.runspider.get_spider_status(self.status,tmp)\n return tmp\n\n #控制任务开始之前的状态.\n def get_start_status(self):\n return self.runspider.get_start_status()\n\n #将发送任务状态到服务器后,服务器返回来的字段交给spider处理,非线程安全\n def report_ask_del(self,data):\n self.runspider.report_ask_del(data)\n\n # 将爬到的数据在报告任务状态的时候返回给服务器,但是通常需要返回后清空缓冲,所以非线程安全\n def cut_spiter_item(self):\n date=self.runspider.get_all_item()\n return date\n\n def get_spider_item(self):\n data=self.runspider.get_item_not_clear()\n return data\n\n def get_spider_setting(self,*arg):\n return self.runspider.get_spider_setting(*arg)\n\n def delay_every_request(self):\n random.seed(time.time())\n try:\n sleep_low = self.sleep_high * 0.6\n except Exception as e:\n print(e)\n try:\n sleep_time = random.uniform(sleep_low, self.sleep_high)\n time.sleep(sleep_time)\n except Exception as e:\n print(e)\n\n def run(self): #取出spider,执行start_up\n self.status=self.RUN\n if self.runspider:\n spider=self.runspider\n try:\n spider.start_up()\n self.run_crawl(spider)\n spider.end_spider()\n self.status=self.END\n except Exception as e:\n print(traceback.print_exc())\n spider.except_del(e)\n self.status=self.ERROR\n\n def change_newip(self,prioxy_table,ip_status):\n prioxy_table['proixy_status'] = ip_status\n self.runspider.add_bad_ip(prioxy_table) # 将旧的抛弃\n prioxy_table = self.runspider.proixy_pool.get() # 出栈,新的\n # print(self.runspider.proixy_pool.qsize())\n return prioxy_table\n\n def manage_proixy(self,status):\n if self.use_proixy==1:\n if status=='run':\n if self.proixy_time % self.change_manage == 0:\n self.proixy_time = 1\n self.runspider.proixy_now=self.change_newip(self.runspider.proixy_now,'not bad')\n else:\n self.proixy_time+=1\n else:\n self.proixy_time = 1\n self.runspider.proixy_now = self.change_newip(self.runspider.proixy_now, status)\n\n def run_crawl(self,spider):\n #spider\n proixy_error_stand=0 #不换代理的试错\n proixy_error_change=0\n request_pool=[]\n for page in spider.init_page:\n data=Request(page,spider.headers)\n request_pool.append(data)\n for req in spider.init_request:\n request_pool.append(req)\n\n if self.use_proixy == 1:\n self.runspider.proixy_now=self.runspider.proixy_pool.get()\n while True:\n if not request_pool:\n break\n tmp_pool=[]\n for link in request_pool:\n data=None\n while True:\n try:\n self.delay_every_request()\n data = spider.scrapy_request(link)\n self.manage_proixy('run')\n filter_jieguo=self.run_data_after_request_filter(link, data, self.after_request_filter_collect)\n if not filter_jieguo:\n for item in spider.run_spider(link,data):\n if isinstance(item, Request):\n tmp_pool.append(item)\n elif isinstance(item,list):\n spider.filter_item(item)\n spider.save_item(item)\n elif isinstance(filter_jieguo,Request):\n tmp_pool.append(filter_jieguo)\n elif isinstance(filter_jieguo,Exception):\n raise filter_jieguo\n break\n except Exception as e:\n if isinstance(e,RuntimeError):\n raise e\n if isinstance(e,spider_run_error):\n proixy_error_change=int(self.get_spider_setting('proixy_error_change')['proixy_error_change'])\n proixy_error_stand=int(self.get_spider_setting('proixy_error_stand')['proixy_error_stand'])\n if not link.request_callback:\n # print(data.url)\n # context = data.content.decode(data.apparent_encoding, 'ignore')\n # print(context)\n print(traceback.print_exc())\n if proixy_error_change < int(self.get_spider_setting('proixy_error_change')['proixy_error_change']):\n if proixy_error_stand < int(self.get_spider_setting('proixy_error_stand')['proixy_error_stand']):\n proixy_error_stand+=1\n continue\n else:\n proixy_error_change += 1\n proixy_error_stand=0\n self.manage_proixy('not bad')\n else:\n proixy_error_change=1\n for item in spider.paser_error(link,data,e):\n if item!=None:\n if isinstance(item, Request):\n tmp_pool.append(item)\n if isinstance(item,list):\n spider.filter_item(item)\n spider.save_item(item)\n break\n request_pool=tmp_pool\n\n def data_filter(self):\n pass\n\n#爬虫基类\nclass general_spider(object):\n spider_logger=''\n spider_process_type=0\n headers={\n }\n def __init__(self,**arg):\n self.item=[]\n self.spider_status={}\n self.item_lock = threading.Lock()\n \n # self.headers={\n # 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) \\\n # Chrome/51.0.2704.103 Safari/537.36'\n # }\n\n self.spider_logger = logging.getLogger('basespider')\n self.spider_logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fn=logging.FileHandler('test.log')\n fn.setFormatter(formatter)\n self.spider_logger.addHandler(fn)\n self.spider_status['task_guid']=arg['task_guid']\n self.spider_status['task_type'] = arg['task_type']\n self.spider_status['task_depict'] = arg['task_depict']\n self.task_limit=arg['task_limit']\n self.task_limit_number=arg['task_limit_number']\n self.item_save = []\n self.spider_setting = crawl.setting[arg['task_type']]\n self.tool_path=crawl.setting['tool_path']\n self.proixy_now = None\n self.init_data = None\n self.init_page=[]\n self.init_request= []\n if int(self.spider_setting['use_proixy'])==1: #初始化\n self.bad_ip_lock=threading.Lock()\n queue_length=int(self.spider_setting['proixy_ask_num']) + int(self.spider_setting['proixy_threshold'])\n self.proixy_pool = queue.Queue(maxsize=queue_length+5)\n self.bad_ip = []\n\n def scrapy_request(self,request):\n dates=None\n if request.url!=None:\n if not request.request_callback:\n url=request.url\n headers=request.headers\n data=request.data\n timeout=request.timeout\n cookies=request.cookies\n proixy = None\n prioxy_table=None\n if request.proixy:\n prioxy_table = request.proixy\n elif self.proixy_now:\n prioxy_table=self.proixy_now\n if prioxy_table:\n ip = prioxy_table['ip'] # 解prioxy_table\n port = prioxy_table['port']\n proixy_ip = str(ip + \":\" + port)\n if 'http_type' in prioxy_table.keys():\n proixy_type = prioxy_table['http_type'].split('/')[0]\n elif 'proxy_type' in prioxy_table.keys():\n proixy_type = prioxy_table['proxy_type'].split('/')[0]\n if proixy_type !='socks5' or proixy_type !='socks4':\n proixy_http = \"http\" + \"://\" + proixy_ip\n proixy_https = \"http\" + \"://\" + proixy_ip\n else:\n proixy_http = proixy_type + \"://\" + proixy_ip\n proixy_https = proixy_type + \"://\" + proixy_ip\n proixy={}\n proixy['http'] = proixy_http\n proixy['https'] = proixy_https\n dates = requests.get(url, headers=headers,proxies=proixy,data=data,timeout=timeout,cookies=cookies)\n else:\n dates = request.run_request_callback()\n return dates\n\n def paser_error(self,request,data,e):\n print(traceback.print_exc())\n raise e\n\n def get_spider_setting(self,*args):\n tmp={}\n for arg in args:\n if arg in self.spider_setting.keys():\n tmp[arg]= self.spider_setting[arg]\n else:\n tmp[arg]=None\n return tmp\n\n def add_bad_ip(self,data):\n if int(self.spider_setting['use_proixy']) == 1:\n self.bad_ip_lock.acquire()\n self.bad_ip.append(data)\n self.bad_ip_lock.release()\n\n def get_guid(self):\n return self.spider_status['task_guid']\n\n def get_start_status(self):\n task_type=self.get_task_type()\n filter_data={task_type:[self.task_limit,self.task_limit_number]}\n return filter_data\n\n def get_spider_status(self,run_status,tmp):\n tmp['task_guid']=self.spider_status['task_guid']\n tmp['task_type']=self.spider_status['task_type']\n tmp['task_depict']=self.spider_status['task_depict']\n tmp['spider_self']=self.spider_process_type\n if run_status==crawl.NOT_RUN:\n pass\n elif run_status==crawl.RUN:\n pass\n elif run_status==crawl.END:\n pass\n elif run_status==crawl.ERROR:\n pass\n if int(self.spider_setting['use_proixy']) == 1:\n if self.proixy_pool.qsize() < int(self.spider_setting['proixy_threshold']) and run_status != crawl.END and run_status!=crawl.ERROR:\n tmp['proixy_lack']=1\n else:\n tmp['proixy_lack']=0\n tmp['proixy_rubbish']=self.get_bad_table()\n else:\n tmp['proixy_lack'] = 0\n tmp['proixy_rubbish'] =[]\n\n def get_task_type(self):\n return self.spider_status['task_type']\n\n def start_up(self):\n pass\n\n def run_spider(self, link, context):\n pass\n\n def end_spider(self,**kwargs):\n self.clear_front_remove()\n print('the spider end')\n\n def save_item(self,items):\n self.item_lock.acquire()\n for item in items:\n self.item_save.append(item)\n self.item_lock.release()\n\n def get_item_not_clear(self):\n self.item_lock.acquire(timeout=10)\n date = self.item_save.copy()\n self.item_lock.release()\n return date\n\n def get_all_item(self):\n if self.item_lock.acquire(timeout=3):\n # print(len(self.item_save)) #用于调试否则有些大json数组里你也不知道有多少个item是否正确\n date=self.item_save.copy()\n self.item_save.clear()\n self.item_lock.release()\n else:\n date=None\n return date\n\n def filter_item(self,item):\n pass\n\n def report_ask_del(self,data):\n if int(self.spider_setting['use_proixy']) == 1:\n if 'proixy_ip' in data:\n for proixy in data['proixy_ip']:\n self.proixy_pool.put(proixy)\n\n def except_del(self,excep): #默认行为会记录error相关字段\n pass\n\n def get_bad_table(self): #这个逻辑也可以使用join函数的\n self.bad_ip_lock.acquire()\n date=self.bad_ip.copy()\n self.bad_ip.clear()\n self.bad_ip_lock.release()\n return date\n\n def clear_front_remove(self):\n if int(self.spider_setting['use_proixy']) == 1:\n while not self.proixy_pool.empty():\n proixy_ip=self.proixy_pool.get()\n proixy_ip['proixy_status']='not bad'\n self.add_bad_ip(proixy_ip)\n pass\n pass\n\n def request_selenuim(self,request,**arg):\n #貌似除了代理需要关注\n # print(request.url)\n driver=arg['driver']\n driver.get(request.url)\n return driver\n\n#任务生成器\nclass task_generate(object):\n def __init__(self,**arg):\n self.spider_kind=arg\n\n def create_spider(self,spider_name,**arg):\n return crawl(self.spider_kind[spider_name](**arg))\n\n def insert_spider_kind(self,**arg):\n self.spider_kind.update(**arg)\n\n def remove_spider_kind(self,*args):\n for arg in args:\n del self.spider_kind[arg]\n\n\n#filter_middlerware 基类\nclass filter_middlerware(object):\n def __init__(self,Request,data,**arg):\n self.data_collecter=arg['data_collecter']\n\n def get_data_from_collecter(self,data):\n print(self.data_collecter)\n if data in self.data_collecter.keys():\n return self.data_collecter[data]\n else:\n return None\n\n def put_data_to_collecter(self,data):\n self.data_collecter.update(data)\n\n def filter_data(self):\n\n pass","sub_path":"jd_Crawler/client/general_spider.py","file_name":"general_spider.py","file_ext":"py","file_size_in_byte":18848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"286215847","text":"#####################################################################################################\r\n#\r\n# Creates a csv with data over time with some for some given resolutions (e.g. number of generations)\r\n# per selection/diagnotic treatment\r\n#\r\n# Script will go through each replicate for a specific selection/diagnostic treatment\r\n#\r\n# Command Line Inputs:\r\n#\r\n# data_directory: directory where data is located\r\n# dump_directory: directory where data is located\r\n# selection: selection scheme used\r\n# diagnostic: diagnostic used\r\n# seed_offset: seed offset (if any)\r\n# resolution: resolution of the over time data to gather (e.g. collect data every 100 generations)\r\n# objectives: dimensionality\r\n# accuracy: satisfactory trait accuracy %\r\n# generations: generations ran\r\n# param_two: genotypic (0) or phenotypic (1) similarity for fitness sharing\r\n#\r\n# Output: csv with over time data for a specific selection scheme and diagnostic\r\n#\r\n# python3\r\n#####################################################################################################\r\n\r\n######################## IMPORTS ########################\r\nimport pandas as pd\r\nimport argparse\r\nimport sys\r\nimport os\r\n\r\n# file location for data-params.py file\r\nsys.path.insert(1, '../')\r\nimport data_params as dp\r\n\r\n # loop through differnt files that exist\r\ndef DirExplore(data, dump, sel, dia, offs, pt, val):\r\n\r\n # check that selection data folder exists\r\n SEL_DIR = dp.GetDataDirectory(data,sel,pt)\r\n\r\n if val:\r\n print('Full data Dir=', SEL_DIR + 'DIA_' + dp.SetDiagnostic(dia) + '__' + dp.SetSelectionVar(sel) + '_XXX' + '__SEED_XXXXXX__MVC/')\r\n else:\r\n print('Full data Dir=', SEL_DIR + 'DIA_' + dp.SetDiagnostic(dia) + '__' + dp.SetSelectionVar(sel) + '_XXX' + '__SEED_XXXXXX/')\r\n\r\n # loop through sub data directories\r\n print('Full data Dir=', SEL_DIR + 'DIA_' + dp.SetDiagnostic(dia) + '__' + dp.SetSelectionVar(sel) + '_XXX' + '__SEED_XXX' + '/')\r\n print('Now checking data replicates sub directories')\r\n\r\n SEEDS = dp.SetSeedSets(sel)\r\n # gens we are expecting\r\n GEN_LIST = [x for x in range(int(dp.GENERATIONS)+1) if x % dp.RESOLUTION == 0]\r\n # collect all data\r\n DF_LIST = []\r\n\r\n # iterate through the sets of seeds\r\n for i in range(len(SEEDS)):\r\n for s in SEEDS[i]:\r\n seed = str(s + offs)\r\n DATA_DIR = SEL_DIR + 'DIA_' + dp.SetDiagnostic(dia) + '__' + dp.SetSelectionVar(sel) + '_' + dp.SetVarList(sel)[i] + '__SEED_' + seed\r\n if val:\r\n DATA_DIR += '__MVC/data.csv'\r\n else:\r\n DATA_DIR += '/data.csv'\r\n\r\n print('Sub directory:', DATA_DIR)\r\n\r\n # get data from file and check if can store it\r\n df = pd.read_csv(DATA_DIR)\r\n df = df.iloc[::dp.RESOLUTION, :]\r\n\r\n # time to export the data\r\n cdf = pd.DataFrame(\r\n { 'gen': pd.Series(GEN_LIST),\r\n 'trt': pd.Series([dp.SetVarList(sel)[i]] * len(GEN_LIST)),\r\n dp.POP_FIT_AVG: pd.Series(df[dp.POP_FIT_AVG].tolist()),\r\n dp.POP_FIT_MAX: pd.Series(df[dp.POP_FIT_MAX].tolist()),\r\n dp.POP_OPT_MAX: pd.Series(df[dp.POP_OPT_MAX].tolist()),\r\n dp.POP_UNI_OBJ: pd.Series(df[dp.POP_UNI_OBJ].tolist()),\r\n dp.POP_STR_MAX: pd.Series(df[dp.POP_STR_MAX].tolist()),\r\n dp.ARC_ACTI_GENE: pd.Series(df[dp.ARC_ACTI_GENE].tolist()),\r\n dp.OVERLAP: pd.Series(df[dp.OVERLAP].tolist()),\r\n dp.ARCHIVE_CNT: pd.Series(df[dp.ARCHIVE_CNT].tolist()),\r\n dp.UNI_STR_POS: pd.Series(df[dp.UNI_STR_POS].tolist()),\r\n dp.PMIN: pd.Series(df[dp.PMIN].tolist()),\r\n dp.POP_MAX_TRT: pd.Series(df[dp.POP_MAX_TRT].tolist()),\r\n dp.POP_MAX_GENE: pd.Series(df[dp.POP_MAX_GENE].tolist()),\r\n dp.PARETO_CNT : pd.Series(df[dp.PARETO_CNT].tolist())\r\n })\r\n DF_LIST.append(cdf)\r\n\r\n fin_df = pd.concat(DF_LIST)\r\n\r\n fin_df.to_csv(path_or_buf= dump + 'over-time-' + dp.SetDiagnostic(dia).lower() + '-' + dp.SetSelection(sel,pt).lower() + '.csv', index=False)\r\n\r\ndef main():\r\n # Generate and get the arguments\r\n parser = argparse.ArgumentParser(description=\"Data aggregation script.\")\r\n parser.add_argument(\"data_dir\", type=str, help=\"Target experiment directory.\")\r\n parser.add_argument(\"dump_dir\", type=str, help=\"Data dumping directory\")\r\n parser.add_argument(\"selection\", type=int, help=\"Selection scheme we are looking for? \\n0: Truncation\\n1: Tournament\\n2: Fitness Sharing\\n4: Espilon Lexicase\\n6: Nondominated Sorting\\n7: Novelty Search\")\r\n parser.add_argument(\"diagnostic\", type=int, help=\"Diagnostic we are looking for?\\n0: Exploitation\\n1: Structured Exploitation\\n2: Strong Ecology\\n3: Exploration\\n4: Weak Ecology\")\r\n parser.add_argument(\"seed_offset\", type=int, help=\"Experiment seed offset. (REPLICATION_OFFSET + PROBLEM_SEED_OFFSET\")\r\n parser.add_argument(\"param_two\", type=str, help=\"Second paramater for any selection scheme\")\r\n parser.add_argument(\"--valleys\", type=int, help=\"True (1) or False (0) on whether or not valleys are applied\", action='store', required=False)\r\n\r\n # Parse all the arguments\r\n args = parser.parse_args()\r\n data_dir = args.data_dir.strip()\r\n print('Data directory=',data_dir)\r\n dump_dir = args.dump_dir.strip()\r\n print('Dump directory=', dump_dir)\r\n selection = args.selection\r\n print('Selection scheme=', dp.SetSelection(selection,args.param_two))\r\n diagnostic = args.diagnostic\r\n print('Diagnostic=', dp.SetDiagnostic(diagnostic))\r\n offset = args.seed_offset\r\n print('Offset=', offset)\r\n param_two = args.param_two\r\n print('2nd param=', param_two)\r\n valleys = bool(args.valleys)\r\n print('valleys=', valleys)\r\n\r\n # Get to work!\r\n print(\"\\nChecking all related data directories now!\")\r\n DirExplore(data_dir, dump_dir, selection, diagnostic, offset, param_two, valleys)\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"DataTools/Collector/ot-data-collect.py","file_name":"ot-data-collect.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"647205438","text":"import discord, asyncio, random, requests, json\nfrom discord.ext import commands, tasks\nfrom datetime import datetime, timedelta\nfrom discord_slash import cog_ext, SlashContext\n\nclass Slash(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n global uptime_start\n uptime_start = datetime.now()\n await self.bot.change_presence(activity=discord.Game(name=\"redémarrage...\"))\n await asyncio.sleep(3)\n self.status.start()\n\n @cog_ext.cog_slash(name=\"ping\", description=\"Afficher la latence et l'uptime du bot !.\")\n async def _ping(self, ctx):\n uptime_now = datetime.now()\n t1 = timedelta(days=uptime_start.day, hours=uptime_start.hour, minutes=uptime_start.minute, seconds=uptime_start.second)\n t2 = timedelta(days=uptime_now.day, hours=uptime_now.hour, minutes=uptime_now.minute, seconds=uptime_now.second)\n uptime = t2 - t1\n bot_latency = round(self.bot.latency * 1000)\n embed = discord.Embed(colour = discord.Colour.green())\n embed.add_field(name=':ping_pong: **ping**', value=f'{bot_latency}ms', inline=False)\n embed.add_field(name=':clock8: **uptime**', value=str(uptime).replace('day','jour').replace('days', 'jours'), inline=False)\n await ctx.send(embed=embed)\n\n @tasks.loop(seconds=600.0)\n async def status(self):\n rdnb = random.randint(1,3)\n if rdnb == 1:\n global_membercount, global_servers = 0, 0\n for guild in self.bot.guilds:\n for member in guild.members:\n if member.bot == False:\n global_membercount += 1\n global_servers += 1\n await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"{global_servers} serveurs | {global_membercount} membres\"))\n elif rdnb == 2:\n await self.bot.change_presence(activity=discord.Game(name=\"iso-land.org/amanager\"))\n elif rdnb == 3:\n a_file = open(\"no-move.json\", \"r\")\n json_object_nm = json.load(a_file)\n a_file.close()\n changelog_versions = json_object_nm['changelogs']\n changelog_versions = list(changelog_versions)\n changelog_versions = changelog_versions[-1]\n await self.bot.change_presence(activity=discord.Game(name=f\"v{changelog_versions}\"))\n\ndef setup(bot):\n bot.add_cog(Slash(bot))\n\ndef teardown(bot):\n bot.remove_cog(\"startup-scripts\")","sub_path":"cogs/startup-scripts.py","file_name":"startup-scripts.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"508218659","text":"from object_database.web.cells.cells import (\n # Methods\n registerDisplay,\n context,\n quoteForJs,\n multiReplace,\n augmentToBeUnique,\n sessionState,\n ensureSubscribedType,\n ensureSubscribedSchema,\n wrapCallback,\n\n # Classes\n GeventPipe,\n Cells,\n Slot,\n SessionState,\n Cell,\n Card,\n CardTitle,\n Modal,\n Octicon,\n Badge,\n CollapsiblePanel,\n Text,\n Padding,\n Span,\n Sequence,\n Columns,\n LargePendingDownloadDisplay,\n HeaderBar,\n Main,\n _NavTab,\n Tabs,\n Dropdown,\n Container,\n Scrollable,\n RootCell,\n Traceback,\n Code,\n ContextualDisplay,\n Subscribed,\n SubscribedSequence,\n Popover,\n Grid,\n SortWrapper,\n SingleLineTextBox,\n Table,\n Clickable,\n Button,\n ButtonGroup,\n LoadContentsFromUrl,\n SubscribeAndRetry,\n Expands,\n CodeEditor,\n Sheet,\n Plot,\n _PlotUpdater,\n AsyncDropdown,\n CircleLoader\n)\n\nfrom object_database.web.cells.CellsTestMixin import CellsTestMixin\n\nfrom object_database.web.cells.util import waitForCellsCondition\n\nMAX_FPS = 10\n","sub_path":"object_database/web/cells/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"29287018","text":"import iemdb\nimport numpy\nimport mx.DateTime\nCOOP = iemdb.connect('coop', bypass=True)\nccursor = COOP.cursor()\n\nMESOSITE = iemdb.connect('mesosite', bypass=True)\nmcursor = MESOSITE.cursor()\n\nelnino = []\nmcursor.execute(\"\"\"SELECT anom_34 from elnino where monthdate >= '2007-01-01'\"\"\")\nfor row in mcursor:\n elnino.append( float(row[0]) )\n\nelnino = numpy.array(elnino)\n\nclimate = []\nccursor.execute(\"\"\"\n SELECT avg(d), month from (SELECT year, month, avg((high+low)/2.0) as d from alldata_ia \n where station = 'IA2203' and day < '2012-12-01'\n GROUP by year, month) as foo GROUP by month ORDER by month ASC\n\"\"\")\nfor row in ccursor:\n climate.append( float(row[0]) )\n\ndiff = []\nccursor.execute(\"\"\"\n SELECT year, month, avg((high+low)/2.0) from alldata_ia where \n station = 'IA2203' and year > 2006 \n GROUP by year, month ORDER by year, month ASC\n\"\"\")\nfor row in ccursor:\n if row[0] == 2012 and row[1] == 18:\n diff.append( row[2] - august )\n else:\n diff.append( float(row[2]) - climate[ row[1] -1] )\n\ndiff = numpy.array(diff)\n\nimport matplotlib.pyplot as plt\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.set_title(\"Des Moines Monthly Average Temperature Departure\\nEl Nino 3.4 Index\")\n#\"\"\"\nxticks = []\nxticklabels = []\nfor i in range(0, len(diff),6):\n ts = mx.DateTime.DateTime(2007,1,1) + mx.DateTime.RelativeDateTime(months=i)\n if ts.month == 1:\n fmt = \"%b\\n%Y\"\n else:\n fmt = \"%b\"\n xticklabels.append( ts.strftime(fmt) )\n xticks.append( i )\n\nbars = ax.bar(numpy.arange(0, len(diff))-0.4, diff, fc='r', ec='r')\nfor bar in bars:\n if bar.get_xy()[1] < 0:\n bar.set_facecolor('b')\n bar.set_edgecolor('b')\n\nax2 = ax.twinx()\n\nax2.plot(numpy.arange(0, len(elnino)), elnino, zorder=2, color='k')\nax2.set_ylabel(\"El Nino 3.4 Index (line)\")\n\nax.set_ylabel(\"Departure $^{\\circ}\\mathrm{F}$ (bars)\")\nax.set_xlabel(\"* Thru 30 November\")\nax.grid(True)\nax.set_xticks( xticks )\nax.set_xticklabels( xticklabels )\nax.set_xlim(-0.5, len(diff)+0.5)\nax.set_ylim(-20,20)\n\"\"\"\nimport scipy.stats\nfor i in range(0,12):\n print len(diff[i:-2]), len(elnino[:-(i+1)])\n print i, numpy.corrcoef(diff[i:-2], elnino[:-(i+1)])[0,1]\n#ax.scatter(diff[2:], elnino[:-1])\n\"\"\"\nfig.savefig('test.ps')\nimport iemplot\niemplot.makefeature('test')\n","sub_path":"scripts/feature/month_depatures.py","file_name":"month_depatures.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"178185606","text":"\"\"\"\nUtility function for building an address string from a mycity request\n\n\"\"\"\n\nfrom streetaddress import StreetAddressParser\nimport mycity.intents.intent_constants as intent_constants\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_origin_address(req):\n \"\"\"\n Builds an address from an Alexa session. Assumes city is Boston if not\n specified\n \n :param req: MyCityRequestDataModel object\n :return: String containing full address\n \"\"\"\n logger.debug('MyCityRequestDataModel received:' + req.get_logger_string())\n address_parser = StreetAddressParser()\n current_address = \\\n req.session_attributes[intent_constants.CURRENT_ADDRESS_KEY]\n parsed_address = address_parser.parse(current_address)\n origin_address = \" \".join([parsed_address[\"house\"],\n parsed_address[\"street_full\"]])\n if parsed_address[\"other\"]:\n origin_address += \" {}\".format(parsed_address[\"other\"])\n else:\n origin_address += \" Boston MA\"\n\n return origin_address\n\n\n","sub_path":"mycity/mycity/utilities/address_utils.py","file_name":"address_utils.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"209030498","text":"import os\nimport sys\n\nfrom os.path import dirname, join\n\nimport pytest\n\nsys.path.insert(0, join(dirname(__file__), \"..\", \"..\"))\n\nfrom wptrunner import browsers\n\n\nproducts = browsers.product_list\nactive_products = set()\nall_products = products\n\nif \"CURRENT_TOX_ENV\" in os.environ:\n current_tox_env_split = os.environ[\"CURRENT_TOX_ENV\"].split(\"-\")\n\n tox_env_extra_browsers = {\n \"chrome\": {\"chrome_android\"},\n \"servo\": {\"servodriver\"},\n }\n\n active_products = set(products) & set(current_tox_env_split)\n for product in frozenset(active_products):\n active_products |= tox_env_extra_browsers.get(product, set())\n\n products = []\n for product in all_products:\n if product in active_products:\n products.append(product)\n else:\n products.append(pytest.param(product, marks=pytest.mark.skip))\n\n\ndef pytest_generate_tests(metafunc):\n if \"product\" in metafunc.fixturenames:\n metafunc.parametrize(\"product\", products)\n elif \"all_product\" in metafunc.fixturenames:\n metafunc.parametrize(\"all_product\", all_products)\n","sub_path":"tools/wptrunner/wptrunner/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"386215898","text":"#!/usr/bin/env python3\n'''\nThis is the first script in the \"lightcone halo\" pipeline. The goal of this script is to use merger\ntree information to flag halos that intersect the lightcone and make a unique determination of which\nhalo catalog epoch from which to draw the halo.\n\nUsage\n-----\n$ ./build_mt.py --help\n'''\n\nimport sys\nimport glob\nimport time\nimport gc\nimport os\nfrom pathlib import Path\n\nimport asdf\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nimport argparse\nimport numba as nb\nfrom astropy.table import Table\nfrom astropy.io import ascii\n\nfrom tools.InputFile import InputFile\nfrom tools.merger import simple_load, get_halos_per_slab, extract_superslab, extract_superslab_minified\nfrom tools.aid_asdf import save_asdf\nfrom tools.read_headers import get_lc_info\n\n# these are probably just for testing; should be removed for production\nDEFAULTS = {}\nDEFAULTS['sim_name'] = \"AbacusSummit_highbase_c000_ph100\" # AbacusSummit_base_c000_ph006\nDEFAULTS['merger_parent'] = Path(\"/mnt/gosling2/bigsims/merger\")\n#DEFAULTS['merger_parent'] = Path(\"/global/project/projectdirs/desi/cosmosim/Abacus/merger\")\nDEFAULTS['catalog_parent'] = Path(\"/mnt/gosling1/boryanah/light_cone_catalog/\")\n#DEFAULTS['catalog_parent'] = Path(\"/global/cscratch1/sd/boryanah/light_cone_catalog/\")\nDEFAULTS['z_start'] = 0.65 # 0.8 # 0.5\nDEFAULTS['z_stop'] = 0.72 # 1.25 # 0.8 # 0.5\nCONSTANTS = {'c': 299792.458} # km/s, speed of light\n\ndef reorder_by_slab(fns,minified):\n '''\n Reorder filenames in terms of their slab number\n '''\n if minified:\n return sorted(fns, key=extract_superslab_minified)\n else:\n return sorted(fns, key=extract_superslab)\n\ndef get_one_header(merger_dir):\n '''\n Get an example header by looking at one association\n file in a merger directory\n '''\n\n # choose one of the merger tree files\n fn = list(merger_dir.glob('associations*.asdf'))[0]\n with asdf.open(fn) as af:\n header = af['header']\n return header\n \ndef get_zs_from_headers(snap_names):\n '''\n Read redshifts from merger tree files\n '''\n \n zs = np.zeros(len(snap_names))\n for i in range(len(snap_names)):\n snap_name = snap_names[i]\n with asdf.open(snap_name) as f:\n zs[i] = np.float(f[\"header\"][\"Redshift\"])\n return zs\n\n@nb.njit\ndef dist(pos1, pos2, L=None):\n '''\n Calculate L2 norm distance between a set of points\n and either a reference point or another set of points.\n Optionally includes periodicity.\n \n Parameters\n ----------\n pos1: ndarray of shape (N,m)\n A set of points\n pos2: ndarray of shape (N,m) or (m,) or (1,m)\n A single point or set of points\n L: float, optional\n The box size. Will do a periodic wrap if given.\n \n Returns\n -------\n dist: ndarray of shape (N,)\n The distances between pos1 and pos2\n '''\n \n # read dimension of data\n N, nd = pos1.shape\n \n # allow pos2 to be a single point\n pos2 = np.atleast_2d(pos2)\n assert pos2.shape[-1] == nd\n broadcast = len(pos2) == 1\n \n dist = np.empty(N, dtype=pos1.dtype)\n \n i2 = 0\n for i in range(N):\n delta = 0.\n for j in range(nd):\n dx = pos1[i][j] - pos2[i2][j]\n if L is not None:\n if dx >= L/2:\n dx -= L\n elif dx < -L/2:\n dx += L\n delta += dx*dx\n dist[i] = np.sqrt(delta)\n if not broadcast:\n i2 += 1\n return dist\n\ndef unpack_inds(halo_ids):\n '''\n Unpack indices in Sownak's format of Nslice*1e12 \n + superSlabNum*1e9 + halo_position_superSlab\n '''\n \n # obtain slab number and index within slab\n id_factor = int(1e12)\n slab_factor = int(1e9)\n index = (halo_ids % slab_factor).astype(int)\n slab_number = ((halo_ids % id_factor - index) // slab_factor).astype(int)\n return slab_number, index\n\ndef correct_inds(halo_ids, N_halos_slabs, slabs, inds_fn):\n '''\n Reorder indices for given halo index array with \n corresponding n halos and slabs for its time epoch\n '''\n\n # number of halos in the loaded chunks\n N_halos_load = np.array([N_halos_slabs[i] for i in inds_fn])\n \n # unpack slab and index for each halo\n slab_ids, ids = unpack_inds(halo_ids)\n\n # total number of halos in the slabs that we have loaded\n N_halos = np.sum(N_halos_load)\n offsets = np.zeros(len(inds_fn), dtype=int)\n offsets[1:] = np.cumsum(N_halos_load)[:-1]\n \n # determine if unpacking halos for only one file (Merger_this['HaloIndex']) -- no need to offset \n if len(inds_fn) == 1: return ids\n\n '''\n # an attempt to speed up code but might be slower than currently done\n # TODO: there's a bug, ask Lehman (not necessary to fix)\n # determine if indices are contiguous in terms of their chunk number (np.unique will return slab_unique sorted)\n slab_unique, slab_first_ids = np.unique(slab_ids, return_index=True)\n contiguous = True\n for i in range(len(slab_first_ids)):\n if slab_first_ids[i] != offsets[np.argsort(inds_fn)][i] or slab_unique[i] != np.sort(inds_fn)[i]:\n contiguous = False\n if contiguous:\n for i in range(len(slab_first_ids)):\n ids[N_halos_load[i]*i:N_halos_load[i]*(i+1)] += offsets[i]\n return ids\n ''' \n \n # select the halos belonging to given slab\n for i, ind_fn in enumerate(inds_fn):\n select = np.where(slab_ids == slabs[ind_fn])[0]\n ids[select] += offsets[i]\n\n return ids\n\ndef get_mt_info(fns_load, fields, minified):\n '''\n Load merger tree and progenitors information\n '''\n \n data = simple_load(fns_load, fields=fields)\n merger = data['merger']\n \n # get number of halos in each slab and number of slabs\n # TODO: simple_load() could return this, but there's a use for this function standalone elsewhere\n # TODO: LHG is unsure if the order returned by get_halos_per_slab() matches that returned by simple_load(), and if that matters\n halos_per_slab = get_halos_per_slab(fns, minified)\n\n # if loading all progenitors\n if \"Progenitors\" in fields:\n num_progs = merger[\"NumProgenitors\"]\n # get an array with the starting indices of the progenitors array\n start_progs = np.empty(len(merger), dtype=int)\n start_progs[0] = 0\n start_progs[1:] = num_progs.cumsum()[:-1]\n merger.add_column(start_progs, name='StartProgenitors', copy=False)\n\n return data, halos_per_slab\n\ndef solve_crossing(r1, r2, pos1, pos2, chi1, chi2, Lbox, origin):\n '''\n Solve when the crossing of the light cones occurs and the\n interpolated position and velocity\n '''\n \n # identify where the distance between this object and its main progenitor is larger than half the boxsize (or really even 4 Mpc/h since that is Sownak's boundary)\n delta_pos = np.abs(pos2 - pos1)\n delta_pos = np.where(delta_pos > 0.5 * Lbox, (delta_pos - Lbox), delta_pos)\n delta_sign = np.sign(pos1 - pos2)\n\n # move the halos so that you keep things continuous\n pos1 = pos2 + delta_sign * delta_pos\n r1 = dist(pos1, origin)\n r2 = dist(pos2, origin)\n\n # solve for chi_star, where chi = eta_0-eta\n # equation is r1+(chi1-chi)/(chi1-chi2)*(r2-r1) = chi, with solution:\n chi_star = (r1 * (chi1 - chi2) + chi1 * (r2 - r1)) / ((chi1 - chi2) + (r2 - r1))\n\n # get interpolated positions of the halos\n v_avg = (pos2 - pos1) / (chi1 - chi2) # og\n pos_star = pos1 + v_avg * (chi1 - chi_star[:, None])\n\n\n # enforce boundary conditions by periodic wrapping\n pos_star[pos_star >= Lbox/2.] = pos_star[pos_star >= Lbox/2.] - Lbox\n pos_star[pos_star < -Lbox/2.] = pos_star[pos_star < -Lbox/2.] + Lbox\n \n # interpolated velocity [km/s]\n vel_star = v_avg * CONSTANTS['c'] # vel1+a_avg*(chi1-chi_star)\n\n # mark True if closer to chi2 (this snapshot)\n bool_star = np.abs(chi1 - chi_star) > np.abs(chi2 - chi_star)\n\n # condition to check whether halo in this light cone band\n # assert np.sum((chi_star > chi1) | (chi_star < chi2)) == 0, \"Solution is out of bounds\"\n \n return chi_star, pos_star, vel_star, bool_star\n\ndef offset_pos(pos,ind_origin,all_origins):\n '''\n Offset the interpolated positions to create continuous light cones\n '''\n\n # location of initial observer\n first_observer = all_origins[0]\n current_observer = all_origins[ind_origin]\n offset = (first_observer-current_observer)\n pos += offset\n return pos\n\ndef main(sim_name, z_start, z_stop, merger_parent, catalog_parent, resume=False, plot=False):\n '''\n Main function.\n The algorithm: for each merger tree epoch, for \n each superslab, for each light cone origin,\n compute the intersection of the light cone with\n each halo, using the interpolated position\n to the previous merger epoch (and possibly a \n velocity correction). If the intersection is\n between the current and previous merger epochs, \n then record the closer one as that halo's\n epoch and mark its progenitors as ineligible.\n Will need one padding superslab in the previous\n merger epoch. Can process in a rolling fashion.\n '''\n \n merger_dir = merger_parent / sim_name\n header = get_one_header(merger_dir)\n \n # simulation parameters\n Lbox = header['BoxSize']\n # location of the LC origins in Mpc/h\n origins = np.array(header['LightConeOrigins']).reshape(-1,3)\n\n # just for testing with highbase. remove!\n origins /= 2.\n \n # directory where we save the final outputs\n cat_lc_dir = catalog_parent / sim_name / \"halos_light_cones/\"\n os.makedirs(cat_lc_dir, exist_ok=True)\n\n # directory where we save the current state if we want to resume\n os.makedirs(cat_lc_dir / \"tmp\", exist_ok=True)\n with open(cat_lc_dir / \"tmp\" / \"tmp.log\", \"a\") as f:\n f.writelines([\"# Starting light cone catalog construction in simulation %s \\n\"%sim_name])\n \n # all redshifts, steps and comoving distances of light cones files; high z to low z\n # remove presaving after testing done (or make sure presaved can be matched with simulation)\n if not os.path.exists(\"data_headers/coord_dist.npy\") or not os.path.exists(\"data_headers/redshifts.npy\"):\n zs_all, steps, chis_all = get_lc_info(\"all_headers\")\n np.save(\"data_headers/redshifts.npy\", zs_all)\n np.save(\"data_headers/coord_dist.npy\", chis_all)\n zs_all = np.load(\"data_headers/redshifts.npy\")\n chis_all = np.load(\"data_headers/coord_dist.npy\")\n zs_all[-1] = float(\"%.1f\" % zs_all[-1]) # LHG: I guess this is trying to match up to some filename or something?\n\n # get functions relating chi and z\n chi_of_z = interp1d(zs_all, chis_all)\n z_of_chi = interp1d(chis_all, zs_all)\n\n # more accurate, slightly slower\n if not os.path.exists(\"data/zs_mt.npy\"):\n # all merger tree snapshots and corresponding redshifts\n snaps_mt = sorted(merger_dir.glob(\"associations_z*.0.asdf\"))\n zs_mt = get_zs_from_headers(snaps_mt)\n np.save(\"data/zs_mt.npy\", zs_mt)\n zs_mt = np.load(\"data/zs_mt.npy\")\n\n # number of chunks\n n_chunks = len(list(merger_dir.glob(\"associations_z%4.3f.*.asdf\"%zs_mt[0])))\n print(\"number of chunks = \",n_chunks)\n\n # starting and finishing redshift indices indices\n ind_start = np.argmin(np.abs(zs_mt - z_start))\n ind_stop = np.argmin(np.abs(zs_mt - z_stop))\n\n if resume:\n # if user wants to resume from previous state, create padded array for marking whether chunk has been loaded\n resume_flags = np.ones((n_chunks, origins.shape[1]), dtype=bool)\n \n # previous redshift, distance between shells\n infile = InputFile(cat_lc_dir / \"tmp\" / \"tmp.log\")\n z_this_tmp = infile.z_prev\n delta_chi_old = infile.delta_chi\n chunk = infile.super_slab\n assert (np.abs(n_chunks-1 - chunk) < 1.0e-6), \"Your recorded state did not complete all chunks, can't resume from old\"\n assert (np.abs(zs_mt[ind_start] - z_this_tmp) < 1.0e-6), \"Your recorded state is not for the correct redshift, can't resume from old\"\n with open(cat_lc_dir / \"tmp\" / \"tmp.log\", \"a\") as f:\n f.writelines([\"# Resuming from redshift z = %4.3f \\n\"%z_this_tmp])\n else:\n # delete the exisiting temporary files\n tmp_files = list((cat_lc_dir / \"tmp\").glob(\"*\"))\n for i in range(len(tmp_files)):\n os.unlink(str(tmp_files[i]))\n resume_flags = np.zeros((n_chunks, origins.shape[0]), dtype=bool)\n\n # fields to extract from the merger trees\n # fields_mt = ['HaloIndex','HaloMass','Position','MainProgenitor','Progenitors','NumProgenitors']\n # lighter version\n fields_mt = ['HaloIndex', 'Position', 'MainProgenitor']\n\n # redshift of closest point on wall between original and copied box\n z1 = z_of_chi(0.5 * Lbox - origins[0][0])\n # redshift of closest point where all three boxes touch\n # z2 = z_of_chi((0.5*Lbox-origin[0])*np.sqrt(2))\n # furthest point where all three boxes touch;\n z3 = z_of_chi((0.5 * Lbox - origins[0][0]) * np.sqrt(3))\n\n # initialize difference between the conformal time of last two shells\n delta_chi_old = 0.0\n \n for i in range(ind_start, ind_stop + 1):\n\n # this snapshot redshift and the previous\n z_this = zs_mt[i]\n z_prev = zs_mt[i + 1]\n print(\"redshift of this and the previous snapshot = \", z_this, z_prev)\n\n # coordinate distance of the light cone at this redshift and the previous\n assert z_this >= np.min(zs_all), \"You need to set starting redshift to the smallest value of the merger tree\"\n chi_this = chi_of_z(z_this)\n chi_prev = chi_of_z(z_prev)\n delta_chi = chi_prev - chi_this\n print(\"comoving distance between this and previous snapshot = \", delta_chi)\n\n # read merger trees file names at this and previous snapshot from minified version \n # LHG: do we need to support both minified and non-minified separately? I thought all the data was in the minifted format now.\n fns_this = list(merger_dir.glob(f'associations_z{z_this:4.3f}.*.asdf.minified'))\n fns_prev = list(merger_dir.glob(f'associations_z{z_prev:4.3f}.*.asdf.minified'))\n minified = True\n\n # if minified files not available, load the regular files\n if len(list(fns_this)) == 0 or len(list(fns_prev)) == 0:\n fns_this = list(merger_dir.glob(f'associations_z{z_this:4.3f}.*.asdf'))\n fns_prev = list(merger_dir.glob(f'associations_z{z_prev:4.3f}.*.asdf'))\n minified = False\n\n # turn file names into strings\n fns_this = [str(f) for f in fns_this]\n fns_prev = [str(f) for f in fns_prev]\n\n # number of merger tree files\n print(\"number of files = \", len(fns_this), len(fns_prev))\n assert n_chunks == len(fns_this) and n_chunks == len(fns_prev), \"Incomplete merger tree files\"\n\n # reorder file names by super slab number\n fns_this = reorder_by_slab(fns_this,minified)\n fns_prev = reorder_by_slab(fns_prev,minified)\n \n # maybe we want to support resuming from arbitrary superslab\n first_ss = 0\n \n # We're going to be loading slabs in a rolling fashion:\n # reading the \"high\" slab at the leading edge, discarding the trailing \"low\" slab\n # and moving the mid to low. But first we need to read all three to prime the queue\n mt_prev = {} # indexed by slab num\n mt_prev[(first_ss-1)%n_chunks] = get_mt_info(fns_prev[(first_ss-1)%n_chunks], fields=fields_mt, minified=minified)\n mt_prev[first_ss] = get_mt_info(fns_prev[first_ss], fields=fields_mt, minified=minified)\n\n # for each chunk\n for k in range(first_ss,n_chunks):\n # starting and finishing superslab chunks\n klow = (k-1)%n_chunks\n khigh = (k+1)%n_chunks\n \n # Slide down by one\n if (klow-1)%n_chunks in mt_prev:\n del mt_prev[(klow-1)%n_chunks]\n mt_prev[khigh] = get_mt_info(fns_prev[khigh], fields=fields_mt, minified=minified)\n\n print(f\"Loaded chunk {k} in this redshift, and {tuple(mt_prev)} in previous\")\n # get merger tree data for this snapshot and for the previous one\n mt_data_this, halos_per_slab_this = get_mt_info(fns_this[k], fields=fields_mt, minified=minified)\n \n # ======== LHG: haven't edited below here\n\n # number of halos in this step and previous step; this depends on the number of files requested\n N_halos_this = np.sum(N_halos_slabs_this[inds_fn_this])\n N_halos_prev = np.sum(N_halos_slabs_prev[inds_fn_prev])\n print(\"N_halos_this = \", N_halos_this)\n print(\"N_halos_prev = \", N_halos_prev)\n \n # mask where no merger tree info is available (because we don'to need to solve for eta star for those)\n noinfo_this = Merger_this['MainProgenitor'] <= 0\n info_this = Merger_this['MainProgenitor'] > 0\n \n # print percentage where no information is available or halo not eligible\n print(\"percentage no info = \", np.sum(noinfo_this) / len(noinfo_this) * 100.0)\n\n # no info is denoted by 0 or -999 (or regular if ineligible), but -999 messes with unpacking, so we set it to 0\n Merger_this['MainProgenitor'][noinfo_this] = 0\n\n # rework the main progenitor and halo indices to return in proper order\n Merger_this['HaloIndex'] = correct_inds(\n Merger_this['HaloIndex'],\n N_halos_slabs_this,\n slabs_this,\n inds_fn_this,\n )\n Merger_this['MainProgenitor'] = correct_inds(\n Merger_this['MainProgenitor'],\n N_halos_slabs_prev,\n slabs_prev,\n inds_fn_prev,\n )\n Merger_prev['HaloIndex'] = correct_inds(\n Merger_prev['HaloIndex'],\n N_halos_slabs_prev,\n slabs_prev,\n inds_fn_prev,\n )\n \n # loop over all origins\n for o in range(len(origins)):\n\n # location of the observer\n origin = origins[o]\n \n # comoving distance to observer\n Merger_this['ComovingDistance'] = dist(Merger_this['Position'], origin)\n Merger_prev['ComovingDistance'] = dist(Merger_prev['Position'], origin)\n \n # merger tree data of main progenitor halos corresponding to the halos in current snapshot\n Merger_prev_main_this = Merger_prev[Merger_this['MainProgenitor']].copy()\n \n # if eligible, can be selected for light cone redshift catalog;\n if i != ind_start or resume_flags[k, o]:\n # dealing with the fact that these files may not exist for all origins and all chunks\n if os.path.exists(cat_lc_dir / \"tmp\" / (\"eligibility_prev_z%4.3f_lc%d.%02d.npy\"%(z_this, o, k))):\n eligibility_this = np.load(cat_lc_dir / \"tmp\" / (\"eligibility_prev_z%4.3f_lc%d.%02d.npy\"%(z_this, o, k)))\n else:\n eligibility_this = np.ones(N_halos_this, dtype=bool)\n else:\n eligibility_this = np.ones(N_halos_this, dtype=bool)\n \n # for a newly opened redshift, everyone is eligible to be part of the light cone catalog\n eligibility_prev = np.ones(N_halos_prev, dtype=bool)\n\n # mask for eligible halos for light cone origin with and without information\n mask_noinfo_this = noinfo_this & eligibility_this\n mask_info_this = info_this & eligibility_this\n\n # halos that have merger tree information\n Merger_this_info = Merger_this[mask_info_this].copy()\n Merger_prev_main_this_info = Merger_prev_main_this[mask_info_this]\n \n # halos that don't have merger tree information\n Merger_this_noinfo = Merger_this[mask_noinfo_this].copy()\n \n # select objects that are crossing the light cones\n # TODO: revise conservative choice if stranded between two ( & \\) less conservative ( | \\ )\n mask_lc_this_info = (\n ((Merger_this_info['ComovingDistance'] > chi_this) & (Merger_this_info['ComovingDistance'] <= chi_prev))\n )\n #| ((Merger_prev_main_this_info['ComovingDistance'] > chi_this) & (Merger_prev_main_this_info['ComovingDistance'] <= chi_prev))\n\n mask_lc_this_noinfo = (\n (Merger_this_noinfo['ComovingDistance'] > chi_this - delta_chi_old / 2.0)\n & (Merger_this_noinfo['ComovingDistance'] <= chi_this + delta_chi / 2.0)\n )\n\n # spare the computer the effort and avert empty array errors\n # TODO: perhaps revise, as sometimes we might have no halos in\n # noinfo but some in info and vice versa\n if np.sum(mask_lc_this_info) == 0 or np.sum(mask_lc_this_noinfo) == 0: continue\n\n # percentage of objects that are part of this or previous snapshot\n print(\n \"percentage of halos in light cone %d with and without progenitor info = \"%o,\n np.sum(mask_lc_this_info) / len(mask_lc_this_info) * 100.0,\n np.sum(mask_lc_this_noinfo) / len(mask_lc_this_noinfo) * 100.0,\n )\n\n # select halos with mt info that have had a light cone crossing\n Merger_this_info_lc = Merger_this_info[mask_lc_this_info]\n Merger_prev_main_this_info_lc = Merger_prev_main_this_info[mask_lc_this_info]\n\n if plot:\n x_min = -Lbox/2.+k*(Lbox/n_chunks)\n x_max = x_min+(Lbox/n_chunks)\n\n x = Merger_this_info_lc['Position'][:,0]\n choice = (x > x_min) & (x < x_max)\n \n y = Merger_this_info_lc['Position'][choice,1]\n z = Merger_this_info_lc['Position'][choice,2]\n \n plt.figure(1)\n plt.scatter(y, z, color='dodgerblue', s=0.1, label='current objects')\n\n plt.legend()\n plt.axis('equal')\n\n x = Merger_prev_main_this_info_lc['Position'][:,0]\n \n choice = (x > x_min) & (x < x_max)\n\n y = Merger_prev_main_this_info_lc['Position'][choice,1]\n z = Merger_prev_main_this_info_lc['Position'][choice,2]\n \n plt.figure(2)\n plt.scatter(y, z, color='orangered', s=0.1, label='main progenitor')\n\n plt.legend()\n plt.axis('equal')\n plt.show()\n\n # select halos without mt info that have had a light cone crossing\n Merger_this_noinfo_lc = Merger_this_noinfo[mask_lc_this_noinfo]\n\n # add columns for new interpolated position, velocity and comoving distance\n Merger_this_info_lc.add_column('InterpolatedPosition',copy=False)\n Merger_this_info_lc.add_column('InterpolatedVelocity',copy=False)\n Merger_this_info_lc.add_column('InterpolatedComoving',copy=False)\n\n # get chi star where lc crosses halo trajectory; bool is False where closer to previous\n (\n Merger_this_info_lc['InterpolatedComoving'],\n Merger_this_info_lc['InterpolatedPosition'],\n Merger_this_info_lc['InterpolatedVelocity'],\n bool_star_this_info_lc,\n ) = solve_crossing(\n Merger_prev_main_this_info_lc['ComovingDistance'],\n Merger_this_info_lc['ComovingDistance'],\n Merger_prev_main_this_info_lc['Position'],\n Merger_this_info_lc['Position'],\n chi_prev,\n chi_this,\n Lbox,\n origin\n )\n\n # number of objects in this light cone\n N_this_star_lc = np.sum(bool_star_this_info_lc)\n N_this_noinfo_lc = np.sum(mask_lc_this_noinfo)\n\n if i != ind_start or resume_flags[k, o]:\n # cheap way to deal with the fact that sometimes we won't have information about all light cone origins for certain chunks and epochs\n if os.path.exists(cat_lc_dir / \"tmp\" / (\"Merger_next_z%4.3f_lc%d.%02d.asdf\"%(z_this,o,k))):\n # load leftover halos from previously loaded redshift\n with asdf.open(cat_lc_dir / \"tmp\" / (\"Merger_next_z%4.3f_lc%d.%02d.asdf\"%(z_this,o,k))) as f:\n Merger_next = f['data']\n\n # adding contributions from the previously loaded redshift\n N_next_lc = len(Merger_next['HaloIndex'])\n else:\n N_next_lc = 0\n else:\n N_next_lc = 0\n\n # total number of halos belonging to this light cone superslab and origin\n N_lc = N_this_star_lc + N_this_noinfo_lc + N_next_lc\n \n print(\"in this snapshot: interpolated, no info, next, total = \", N_this_star_lc * 100.0 / N_lc, N_this_noinfo_lc * 100.0 / N_lc, N_next_lc * 100.0 / N_lc, N_lc)\n \n # save those arrays\n Merger_lc = Table(\n {'HaloIndex':np.zeros(N_lc, dtype=Merger_this_info_lc['HaloIndex'].dtype),\n 'InterpolatedVelocity': np.zeros(N_lc, dtype=(np.float32,3)),\n 'InterpolatedPosition': np.zeros(N_lc, dtype=(np.float32,3)),\n 'InterpolatedComoving': np.zeros(N_lc, dtype=np.float32)\n }\n )\n\n # record interpolated position and velocity for those with info belonging to current redshift\n Merger_lc['InterpolatedPosition'][:N_this_star_lc] = Merger_this_info_lc['InterpolatedPosition'][bool_star_this_info_lc]\n Merger_lc['InterpolatedVelocity'][:N_this_star_lc] = Merger_this_info_lc['InterpolatedVelocity'][bool_star_this_info_lc]\n Merger_lc['InterpolatedComoving'][:N_this_star_lc] = Merger_this_info_lc['InterpolatedComoving'][bool_star_this_info_lc]\n Merger_lc['HaloIndex'][:N_this_star_lc] = Merger_this_info_lc['HaloIndex'][bool_star_this_info_lc]\n\n # record interpolated position and velocity of the halos in the light cone without progenitor information\n Merger_lc['InterpolatedPosition'][N_this_star_lc:N_this_star_lc+N_this_noinfo_lc] = Merger_this_noinfo_lc['Position']\n Merger_lc['InterpolatedVelocity'][N_this_star_lc:N_this_star_lc+N_this_noinfo_lc] = np.zeros_like(Merger_this_noinfo_lc['Position'])\n Merger_lc['InterpolatedComoving'][N_this_star_lc:N_this_star_lc+N_this_noinfo_lc] = np.ones(Merger_this_noinfo_lc['Position'].shape[0])*chi_this\n Merger_lc['HaloIndex'][N_this_star_lc:N_this_star_lc+N_this_noinfo_lc] = Merger_this_noinfo_lc['HaloIndex']\n del Merger_this_noinfo_lc\n\n # record information from previously loaded redshift that was postponed\n if i != ind_start or resume_flags[k, o]:\n if N_next_lc != 0:\n Merger_lc['InterpolatedPosition'][-N_next_lc:] = Merger_next['InterpolatedPosition']['data'][:]\n Merger_lc['InterpolatedVelocity'][-N_next_lc:] = Merger_next['InterpolatedVelocity']['data'][:]\n Merger_lc['InterpolatedComoving'][-N_next_lc:] = Merger_next['InterpolatedComoving']['data'][:]\n Merger_lc['HaloIndex'][-N_next_lc:] = Merger_next['HaloIndex']['data'][:]\n del Merger_next\n resume_flags[k, o] = False\n\n \n # offset position to make light cone continuous\n Merger_lc['InterpolatedPosition'] = offset_pos(Merger_lc['InterpolatedPosition'],ind_origin=o,all_origins=origins)\n \n # create directory for this redshift\n os.makedirs(cat_lc_dir / (\"z%.3f\"%z_this), exist_ok=True)\n\n # write table with interpolated information\n save_asdf(Merger_lc, (\"Merger_lc%d.%02d\"%(o,k)), header, cat_lc_dir / (\"z%.3f\"%z_this))\n\n # TODO: Need to make sure no bugs with eligibility, ask Lehman\n # version 1: only the main progenitor is marked ineligible\n # if halo belongs to this redshift catalog or the previous redshift catalog;\n eligibility_prev[Merger_prev_main_this_info_lc['HaloIndex']] = False\n\n \n # version 2: all progenitors of halos belonging to this redshift catalog are marked ineligible \n # run version 1 AND 2 to mark ineligible Merger_next objects to avoid multiple entries\n # optimize with numba if possible (ask Lehman)\n # Note that some progenitor indices are zeros;\n # For best result perhaps combine Progs with MainProgs \n if \"Progenitors\" in fields_mt:\n nums = Merger_this_info_lc['NumProgenitors'][bool_star_this_info_lc]\n starts = Merger_this_info_lc['StartProgenitors'][bool_star_this_info_lc]\n # for testing purposes (remove in final version)\n main_progs = Merger_this_info_lc['HaloIndex'][bool_star_this_info_lc]\n # loop around halos that were marked belonging to this redshift catalog\n for j in range(N_this_star_lc):\n # select all progenitors\n start = starts[j]\n num = nums[j]\n prog_inds = Progs_this[start : start + num]\n\n # remove progenitors with no info\n prog_inds = progs_inds[prog_inds > 0]\n if len(prog_inds) == 0: continue\n\n # correct halo indices\n prog_inds = correct_inds(prog_inds, N_halos_slabs_prev, slabs_prev, inds_fn_prev)\n halo_inds = Merger_prev['HaloIndex'][prog_inds]\n\n # test output; remove in final version\n if j < 100: print(halo_inds, Merger_prev[main_progs[j]])\n\n # mark ineligible\n eligibility_prev[halo_inds] = False\n\n # information to keep for next redshift considered\n N_next = np.sum(~bool_star_this_info_lc)\n Merger_next = Table(\n {'HaloIndex': np.zeros(N_next, dtype=Merger_lc['HaloIndex'].dtype),\n 'InterpolatedVelocity': np.zeros(N_next, dtype=(np.float32,3)),\n 'InterpolatedPosition': np.zeros(N_next, dtype=(np.float32,3)),\n 'InterpolatedComoving': np.zeros(N_next, dtype=np.float32)\n }\n )\n Merger_next['HaloIndex'][:] = Merger_prev_main_this_info_lc['HaloIndex'][~bool_star_this_info_lc]\n Merger_next['InterpolatedVelocity'][:] = Merger_this_info_lc['InterpolatedVelocity'][~bool_star_this_info_lc]\n Merger_next['InterpolatedPosition'][:] = Merger_this_info_lc['InterpolatedPosition'][~bool_star_this_info_lc]\n Merger_next['InterpolatedComoving'][:] = Merger_this_info_lc['InterpolatedComoving'][~bool_star_this_info_lc]\n del Merger_this_info_lc, Merger_prev_main_this_info_lc\n \n if plot:\n # select the halos in the light cones\n pos_choice = Merger_lc['InterpolatedPosition']\n\n # selecting thin slab\n pos_x_min = -Lbox/2.+k*(Lbox/n_chunks)\n pos_x_max = x_min+(Lbox/n_chunks)\n\n ijk = 0\n choice = (pos_choice[:, ijk] >= pos_x_min) & (pos_choice[:, ijk] < pos_x_max)\n\n circle_this = plt.Circle(\n (origins[0][1], origins[0][2]), radius=chi_this, color=\"g\", fill=False\n )\n circle_prev = plt.Circle(\n (origins[0][1], origins[0][2]), radius=chi_prev, color=\"r\", fill=False\n )\n\n # clear things for fresh plot\n ax = plt.gca()\n ax.cla()\n\n # plot particles\n ax.scatter(pos_choice[choice, 1], pos_choice[choice, 2], s=0.1, alpha=1., color=\"dodgerblue\")\n\n # circles for in and prev\n ax.add_artist(circle_this)\n ax.add_artist(circle_prev)\n plt.xlabel([-1000, 3000])\n plt.ylabel([-1000, 3000])\n plt.axis(\"equal\")\n plt.show()\n\n gc.collect()\n \n # split the eligibility array over three files for the three chunks it's made up of\n offset = 0\n for idx in inds_fn_prev:\n eligibility_prev_idx = eligibility_prev[offset:offset+N_halos_slabs_prev[idx]]\n # combine current information with previously existing\n if os.path.exists(cat_lc_dir / \"tmp\" / (\"eligibility_prev_z%4.3f_lc%d.%02d.npy\"%(z_prev, o, idx))):\n eligibility_prev_old = np.load(cat_lc_dir / \"tmp\" / (\"eligibility_prev_z%4.3f_lc%d.%02d.npy\"%(z_prev, o, idx)))\n eligibility_prev_idx = eligibility_prev_old & eligibility_prev_idx\n print(\"Exists!\")\n else:\n print(\"Doesn't exist\")\n np.save(cat_lc_dir / \"tmp\" / (\"eligibility_prev_z%4.3f_lc%d.%02d.npy\"%(z_prev, o, idx)), eligibility_prev_idx)\n offset += N_halos_slabs_prev[idx]\n\n # write as table the information about halos that are part of next loaded redshift\n save_asdf(Merger_next, (\"Merger_next_z%4.3f_lc%d.%02d\"%(z_prev, o, k)), header, cat_lc_dir / \"tmp\")\n\n # save redshift of catalog that is next to load and difference in comoving between this and prev\n # TODO: save as txt file that gets appended to and then read the last line\n with open(cat_lc_dir / \"tmp\" / \"tmp.log\", \"a\") as f:\n f.writelines([\"# Next iteration: \\n\", \"z_prev = %.8f \\n\"%z_prev, \"delta_chi = %.8f \\n\"%delta_chi, \"light_cone = %d \\n\"%o, \"super_slab = %d \\n\"%k])\n \n del Merger_this, Merger_prev\n\n # update values for difference in comoving distance\n delta_chi_old = delta_chi\n\n# dict_keys(['HaloIndex', 'HaloMass', 'HaloVmax', 'IsAssociated', 'IsPotentialSplit', 'MainProgenitor', 'MainProgenitorFrac', 'MainProgenitorPrec', 'MainProgenitorPrecFrac', 'NumProgenitors', 'Position', 'Progenitors'])\n \n\nclass ArgParseFormatter(argparse.RawDescriptionHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):\n pass\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=ArgParseFormatter)\n parser.add_argument('--sim_name', help='Simulation name', default=DEFAULTS['sim_name'])\n parser.add_argument('--z_start', help='Initial redshift where we start building the trees', default=DEFAULTS['z_start'])\n parser.add_argument('--z_stop', help='Final redshift (inclusive)', default=DEFAULTS['z_stop'])\n parser.add_argument('--merger_parent', help='Merger tree directory', default=DEFAULTS['merger_parent'])\n parser.add_argument('--catalog_parent', help='Light cone catalog directory', default=DEFAULTS['catalog_parent'])\n parser.add_argument('--resume', help='Resume the calculation from the checkpoint on disk', action='store_true')\n parser.add_argument('--plot', help='Want to show plots', action='store_true')\n \n args = vars(parser.parse_args())\n main(**args)\n","sub_path":"build_mt.py","file_name":"build_mt.py","file_ext":"py","file_size_in_byte":36619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"455490562","text":"# Copyright (c) 2011 Eliot Eshelman\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n###############################################################################\n\nimport logging\nimport ogre.renderer.OGRE as ogre\n\nimport Application\nimport EntitySystem\n\ndef create(ship_position=(0, 0, 0)):\n \"\"\"Create a HexShip entity.\"\"\"\n\n ship_id = EntitySystem.create_entity()\n logging.debug(\"HexShip id: \" + str(ship_id) + \" pos: (\" +\n str(ship_position[0]) + \", \" +\n str(ship_position[1]) + \", \" +\n str(ship_position[2]) + \")\"\n )\n\n ogre_entity = Application.ogre_scene_manager.createEntity(\n 'ogreEntity-' + str(ship_id), 'HexCell.mesh')\n ogre_node = Application.ogre_root_node.createChildSceneNode(\n 'ogreNode-' + str(ship_id))\n ogre_node.setPosition(ogre.Vector3(ship_position[0],\n ship_position[1],\n ship_position[2]))\n ogre_node.attachObject(ogre_entity)\n","sub_path":"Assemblages/HexShip.py","file_name":"HexShip.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"48510827","text":"# https://leetcode.com/problems/valid-perfect-square\nclass Solution(object):\n # Time complexity: O(logN)\n # Space comlexity: O(1)\n def isPerfectSquare(self, num):\n \"\"\"\n :type num: int\n :rtype: bool\n \"\"\"\n r = num\n while r * r > num:\n r = (r + num/r)/2\n\n return r*r == num\n\n # Time complexity: O(logN)\n # Space complexity: O(1)\n def isPerfectSquare1(self, num):\n if num == 0: return True\n\n left, right = 1, num\n while left <= right:\n mid = left + (right - left)//2\n if mid * mid == num:\n return True\n elif mid * mid > num:\n right = mid - 1\n else:\n left = mid + 1\n\n return False\n\n","sub_path":"Week_04/valid-perfect-square.py","file_name":"valid-perfect-square.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"331882608","text":"import os, sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\n\nimport utils\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas as pd\n\n\nretry_attempts = [1, 2, 10]\nstatic_traffic_ratio = [0.8, 1, 1.2]\nconfiguration_layer = [1, 2, 3, 'all']\ncb_values = [1, 20, 1024]\ncapacity_online_boutique = 230\nchallenging_services = [\"frontend\", \"recommendationservice\", \"productcatalogservice\"]\nretry_timeouts= [\"25ms\", \"5s\", \"20s\"]\n\ndf = pd.read_csv('./logs/retry-experiments.log')\n\nfig, axs = plt.subplots(nrows=3, ncols=3, figsize=(8, 8), sharex=True, sharey=True,dpi=300)\n\n\ni = 0\nj = 0\n\n\nfor retry_to in retry_timeouts:\n for spike in [0.2, 0.4, 0.5]:\n if retry_to == \"25ms\" and spike == 0.2:\n j = 0\n i = 0\n elif retry_to == \"25ms\" and spike == 0.4:\n j = 0\n i = 1\n elif retry_to == \"25ms\" and spike == 0.5:\n j = 0\n i = 2\n elif retry_to == \"5s\" and spike == 0.2:\n j = 1\n i = 0\n elif retry_to == \"5s\" and spike == 0.4:\n j = 1\n i = 1\n elif retry_to == \"5s\" and spike == 0.5:\n j = 1\n i = 2\n elif retry_to == \"20s\" and spike == 0.2:\n j = 2\n i = 0\n elif retry_to == \"20s\" and spike == 0.4:\n j = 2\n i = 1\n elif retry_to == \"20s\" and spike == 0.5:\n j = 2\n i = 2\n\n data = df.loc[(df['retry_timeout'] == retry_to) & (df['spike_ratio'] == spike)]\n data_status_col = utils.get_status_codes_from_prometheus(\"frontend\", int(data['start'].values[0]), int(data['end'].values[0]))\n success = {\n \"data\": [],\n \"timestamp\": [],\n }\n failed = {\n \"data\": [],\n \"timestamp\": []\n }\n circuit_broken = {\n \"data\": [],\n \"timestamp\": [],\n }\n for item in data_status_col:\n if item['metric']['response_code'] == \"200\" and item['metric']['response_flags'] == \"-\":\n for val in item['values']:\n success[\"data\"].append(float(val[1]))\n success[\"timestamp\"].append(float(val[0]) - item['values'][0][0])\n elif item['metric']['response_flags'] == \"UO\":\n for val in item['values']:\n circuit_broken[\"data\"].append(float(val[1]))\n circuit_broken[\"timestamp\"].append(float(val[0]) - item['values'][0][0])\n elif item['metric']['response_flags'] != \"UO\":\n for val in item['values']:\n cur_val = (float(val[0]) - float(item['values'][0][0]))\n if cur_val in failed[\"timestamp\"]:\n failed['data'][failed['timestamp'].index(cur_val)] = failed['data'][failed['timestamp'].index(\n cur_val)] + float(val[1])\n else:\n failed[\"data\"].append(float(val[1]))\n failed[\"timestamp\"].append(float(val[0]) - float(item['values'][0][0]))\n success['timestamp'] = [ii for ii in success['timestamp'] if ii <= 240]\n success['data'] = success['data'][:len(success['timestamp'])]\n circuit_broken['timestamp'] = [ii for ii in circuit_broken['timestamp'] if ii <= 240]\n circuit_broken['data'] = circuit_broken['data'][:len(circuit_broken['timestamp'])]\n failed['timestamp'] = [ii for ii in failed['timestamp'] if ii <= 240]\n failed['data'] = failed['data'][:len(failed['timestamp'])]\n axs[i, j].grid()\n axs[i, j].plot(success['timestamp'], success['data'], color=\"green\", label=\"Successful\")\n axs[i, j].plot(circuit_broken['timestamp'], circuit_broken['data'], color=\"orange\", label=\"Circuit Broken\")\n axs[i, j].plot(failed['timestamp'], failed['data'], color=\"red\", label=\"Failed\")\n axs[i, j].set_xticks([0, 60, 120, 180, 240])\n axs[i, j].set_xlim(0, 240)\n\n axs[i, j].yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())\n if i == 2:\n axs[i, j].set_xlabel(\"Time (sec)\")\n if j == 2:\n axs[i,j].legend()\n if i == 0:\n if retry_to == \"20s\":\n to = \"10s\"\n else:\n to = retry_to\n if j == 0:\n axs[i, j].set_title(str(to) + \" as retry timeouts\")\n elif j == 1:\n axs[i, j].set_title(str(to) + \" as retry timeouts\")\n elif j == 2:\n axs[i, j].set_title(str(to) + \" as retry timeouts\")\n if j == 0:\n axs[i, j].set_ylabel(str(1+spike) + \" traffic times of capacity\\n(req/sec)\")\n\nplt.tight_layout()\n# plt.show()\nplt.savefig(\"./charts/output/figure-11.pdf\",format='pdf', bbox_inches='tight', pad_inches = 0)","sub_path":"charts/figure-11.py","file_name":"figure-11.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"77826223","text":"from twitchio.ext import commands\r\nimport pyautogui\r\nfrom time import sleep\r\nfrom itertools import repeat\r\nfrom dotenv import load_dotenv\r\nload_dotenv()\r\nimport os\r\npag = pyautogui\r\n\r\ndelayTime = 0.25\r\n\r\nequipoUno = []\r\nequipoDos = []\r\nadmins = ['Mrklus', 'mrklus']\r\n\r\n# funciones rapidas\r\ndef direcciones(arg, jugador):\r\n if jugador == 'jugadorUno':\r\n if arg == 'u':\r\n return 'i'\r\n elif arg == 'd':\r\n return 'k'\r\n elif arg == 'l':\r\n return 'j'\r\n elif arg == 'r':\r\n return 'l'\r\n elif jugador == 'jugadorDos':\r\n if arg == 'u':\r\n return 'w'\r\n elif arg == 'd':\r\n return 's'\r\n elif arg == 'l':\r\n return 'a'\r\n elif arg == 'r':\r\n return 'd' \r\n\r\n# check\r\ndef checkAdmin(nombre):\r\n if not nombre in admins:\r\n return False\r\n else:\r\n return True\r\n\r\n# movimiento\r\ndef movimientoRes(nombreUsuario, direccion, tiempo=1):\r\n # Equipo uno\r\n for integrante in equipoUno:\r\n if nombreUsuario == integrante:\r\n # ejecuta la accion en el equipo uno\r\n dirFinal = direcciones(direccion, 'jugadorUno')\r\n pag.keyDown(dirFinal)\r\n sleep(0.20*int(tiempo))\r\n pag.keyUp(dirFinal)\r\n return\r\n # Equipo dos\r\n for integrante in equipoDos:\r\n if nombreUsuario == integrante:\r\n #ejecuta la accion en el equipo dos\r\n dirFinal = direcciones(direccion, 'jugadorDos')\r\n pag.keyDown(dirFinal)\r\n sleep(0.20*int(tiempo))\r\n pag.keyUp(dirFinal)\r\n return \r\n\r\n\r\nclass Bot(commands.Bot):\r\n\r\n def __init__(self):\r\n super().__init__(irc_token=os.getenv(\"IRC_TOKEN\"), client_id=os.getenv(\"CLIENT_ID\"), nick=os.getenv(\"NICK\"), prefix='!',\r\n initial_channels=[os.getenv(\"INITIAL_CHANNELS\")])\r\n\r\n \r\n async def event_ready(self):\r\n print(f'Bot listo!')\r\n\r\n async def event_message(self, message):\r\n await self.handle_commands(message)\r\n\r\n # Acciones especificas\r\n\r\n # Comando para registrarse en un equipo EJ: !register equipoUno\r\n @commands.command(name='register')\r\n async def my_commandRegister(self, ctx, *args):\r\n\r\n for integrante in equipoUno:\r\n if(ctx.author.name == integrante):\r\n await ctx.send(f'Jugador {ctx.author.name} ya se encuentra registrado en el equipo uno..')\r\n return\r\n for integrante in equipoDos:\r\n if(ctx.author.name == integrante):\r\n await ctx.send(f'Jugador {ctx.author.name} ya se encuentra registrado en el equipo dos..')\r\n return\r\n \r\n\r\n if args[0]=='equipoUno':\r\n if len(equipoUno) == 3:\r\n await ctx.send('Equipo uno ya esta lleno')\r\n return\r\n else:\r\n equipoUno.append(ctx.author.name)\r\n await ctx.send(f'Jugador {ctx.author.name} registrado en equipo uno')\r\n\r\n elif args[0]=='equipoDos':\r\n if len(equipoDos) == 3:\r\n await ctx.send('Equipo dos ya esta lleno')\r\n return\r\n else:\r\n equipoDos.append(ctx.author.name)\r\n await ctx.send(f'Jugador {ctx.author.name} registrado en equipo dos')\r\n\r\n\r\n# Comando para mostrar en chat los participantes de los grupos\r\n @commands.command(name='teams')\r\n async def my_commandTeams(self, ctx, *args):\r\n await ctx.send(f'El equipo uno es {equipoUno}')\r\n await ctx.send(f'El equipo dos es {equipoDos}')\r\n\r\n# Comando para retirarse de un grupo !retire\r\n @commands.command(name='retire')\r\n async def my_commandRetire(self, ctx, *args):\r\n for integrante in equipoUno:\r\n if ctx.author.name == integrante:\r\n equipoUno.remove(ctx.author.name)\r\n await ctx.send(f'El usuario {ctx.author.name} se retiro del equipo uno')\r\n return\r\n for integrante in equipoDos:\r\n if ctx.author.name == integrante:\r\n equipoDos.remove(ctx.author.name)\r\n await ctx.send(f'El usuario {ctx.author.name} se retiro del equipo dos')\r\n return\r\n \r\n await ctx.send(f'El usuario {ctx.author.name} no pertenece a ningun equipo')\r\n\r\n # Comando de movimientos\r\n @commands.command(name='move')\r\n async def my_commandJas(self, ctx, *args):\r\n # Chequea que el argumento de cantidad de movimiento este definido, caso contrario pone un default \r\n try:\r\n tiempoMove = args[1]\r\n except IndexError:\r\n tiempoMove = 1\r\n\r\n\r\n if int(tiempoMove) > 3:\r\n return\r\n # Movimiento\r\n movimientoRes(ctx.author.name, args[0], tiempoMove)\r\n\r\n \r\n # Limpiar equipos \r\n @commands.command(name='restart')\r\n async def my_commandRestart(self, ctx):\r\n # Check admin\r\n if checkAdmin(ctx.author.name):\r\n equipoUno.clear()\r\n equipoDos.clear()\r\n await ctx.send(f'Equipos reiniciados!')\r\n return\r\n else:\r\n await ctx.send(f'Lo siento {ctx.author.name}, no sos admin')\r\n return\r\n\r\n # Poner bombas \r\n @commands.command(name='bomb')\r\n async def my_commandBomb(self, ctx):\r\n # Equipo uno\r\n for integrante in equipoUno:\r\n if ctx.author.name == integrante:\r\n # ejecuta la accion en el equipo uno\r\n pag.keyDown('m')\r\n sleep(0.05)\r\n pag.keyUp('m')\r\n return\r\n # Equipo dos\r\n for integrante in equipoDos:\r\n if ctx.author.name == integrante:\r\n #ejecuta la accion en el equipo dos\r\n pag.keyDown('f')\r\n sleep(0.05)\r\n pag.keyUp('f')\r\n return \r\n\r\n# @commands.command(name='movegroup')\r\n# async def my_commandMoveGroup(self, ctx, *args):\r\n# if checkAdmin(ctx.author.name):\r\n\r\n\r\n @commands.command(name='ayuda')\r\n async def my_commandHelp(self, ctx):\r\n await ctx.send('Instrucciones: !move l,r,u,d || !bomb [para poner bomba]')\r\n \r\n\r\n\r\nbot = Bot()\r\nbot.run()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"399555207","text":"import time\nfrom datetime import datetime\nfrom datetime import timezone\nimport weaviate\nfrom integration.integration_util import TestFailedException\n\ndef get_query_for_group(name):\n return (\"\"\"\n {\n Get {\n Group (where: {\n path: [\"name\"]\n operator: Equal\n valueText: \"%s\"\n }) {\n name\n _additional {\n id\n }\n members {\n ... on Person {\n name\n _additional {\n id\n }\n }\n }\n }\n }\n }\n \"\"\" % name)\n\n\nclass IntegrationTestCrud:\n\n def __init__(self, client):\n \"\"\"\n\n :param client: is expected to already have the schema loaded\n :type client: weaviate.Client\n \"\"\"\n if not client.schema.contains():\n raise TestFailedException(\"Integration test crud requires a schema to be loaded.\")\n self.client = client\n self.chemists = [None] * 3\n\n def test_crud(self):\n self._create_objects_batch()\n self._create_objects()\n time.sleep(2.0)\n self._create_references()\n time.sleep(2.0)\n self._validate_data_loading()\n self._delete_objects()\n\n self._delete_references()\n self._get_data()\n\n def _create_objects_batch(self):\n print(\"Create a batch with data.\")\n \n\n self.client.batch.add_data_object({\"name\": \"John Rawls\"}, \"Person\")\n self.client.batch.add_data_object({\"name\": \"Emanuel Kant\"}, \"Person\")\n self.client.batch.add_data_object({\"name\": \"Plato\"}, \"Person\")\n self.client.batch.add_data_object({\"name\": \"Pull-up\"}, \"Exercise\")\n self.client.batch.add_data_object({\"name\": \"Squat\"}, \"Exercise\")\n self.client.batch.add_data_object({\"name\": \"Star jump\"}, \"Exercise\")\n\n print(\"Load batch.\")\n self.client.batch.create_objects()\n\n def _create_objects(self):\n print(\"Load a single objects.\")\n self.client.data_object.create({\"name\": \"Andrew S. Tanenbaum\"}, \"Person\", \"28954261-0449-57a2-ade5-e9e08d11f51a\")\n self.client.data_object.create({\"name\": \"Alan Turing\"}, \"Person\", \"1c9cd584-88fe-5010-83d0-017cb3fcb446\")\n self.client.data_object.create({\"name\": \"John von Neumann\"}, \"Person\", \"b36268d4-a6b5-5274-985f-45f13ce0c642\")\n self.client.data_object.create({\"name\": \"Tim Berners-Lee\"}, \"Person\", \"d1e90d26-d82e-5ef8-84f6-ca67119c7998\")\n self.client.data_object.create({\"name\": \"Legends\"}, \"Group\", \"2db436b5-0557-5016-9c5f-531412adf9c6\")\n self.client.data_object.create({\"name\": \"Chemists\"}, \"Group\", \"577887c1-4c6b-5594-aa62-f0c17883d9bf\")\n\n self.chemists[0] = self.client.data_object.create({\"name\": \"Marie Curie\"}, \"Person\")\n self.chemists[1] = self.client.data_object.create({\"name\": \"Fritz Haber\"}, \"Person\")\n self.chemists[2] = self.client.data_object.create({\"name\": \"Walter White\"}, \"Person\")\n\n local_time = datetime.now(timezone.utc).astimezone()\n self.client.data_object.create({\"start\": local_time.isoformat()}, \"Call\",\n \"3ab05e06-2bb2-41d1-b5c5-e044f3aa9623\")\n\n def _create_references(self):\n print(\"Add reference to object.\")\n self.client.data_object.reference.add(\"2db436b5-0557-5016-9c5f-531412adf9c6\", \"members\",\n \"b36268d4-a6b5-5274-985f-45f13ce0c642\")\n self.client.data_object.reference.add(\"2db436b5-0557-5016-9c5f-531412adf9c6\", \"members\",\n \"1c9cd584-88fe-5010-83d0-017cb3fcb446\")\n\n print(\"Add reference to object in batch.\")\n\n for chemist in self.chemists:\n self.client.batch.add_reference(\"577887c1-4c6b-5594-aa62-f0c17883d9bf\", \"Group\", \"members\",\n chemist)\n\n self.client.batch.create_references()\n\n def _validate_data_loading(self):\n print(\"Validate if loading was successful\")\n legends = self.client.query.raw(get_query_for_group(\"Legends\"))['data']['Get']\n for member in legends[\"Group\"][0][\"members\"]:\n if not member[\"name\"] in [\"John von Neumann\", \"Alan Turing\"]:\n raise TestFailedException(\"Adding references to objects failed\")\n\n group_chemists = self.client.query.raw(get_query_for_group(\"Chemists\"))['data']['Get']\n for member in group_chemists[\"Group\"][0][\"members\"]:\n if not member[\"name\"] in [\"Marie Curie\", \"Fritz Haber\", \"Walter White\"]:\n raise TestFailedException(\"Adding references to objects failed\")\n if len(group_chemists[\"Group\"][0][\"members\"]) != 3:\n raise TestFailedException(\"Lengths of the `Group` do not match!\")\n\n def _delete_objects(self):\n print(\"Test Delete\")\n self.client.data_object.delete(self.chemists[2]) # Delete Walter White not a real chemist just a legend\n time.sleep(1.1)\n if self.client.data_object.get_by_id(self.chemists[2]) is not None:\n raise TestFailedException(\"Thing was not correctly deleted\")\n\n def _delete_references(self):\n # Test delete reference\n prime_ministers_group = self.client.data_object.create({\"name\": \"Prime Ministers\"}, \"Group\")\n prime_ministers = []\n prime_minister_names = [\"Wim Kok\", \"Dries van Agt\", \"Piet de Jong\"]\n for name in prime_minister_names:\n prime_ministers.append(self.client.data_object.create({\"name\": name}, \"Person\"))\n time.sleep(2.0)\n for prime_minister in prime_ministers:\n self.client.data_object.reference.add(prime_ministers_group, \"members\", prime_minister)\n time.sleep(2.0)\n self.client.data_object.reference.delete(prime_ministers_group, \"members\", prime_ministers[0])\n time.sleep(2.0)\n prime_ministers_group_object = self.client.data_object.get_by_id(prime_ministers_group)\n if len(prime_ministers_group_object[\"properties\"][\"members\"]) != 2:\n raise TestFailedException(\"Reference not deleted correctly\")\n\n def _get_data(self):\n self.client.data_object.create({\"name\": \"George Floyd\"}, \"Person\", \"452e3031-bdaa-4468-9980-aed60d0258bf\")\n time.sleep(2.0)\n person = self.client.data_object.get_by_id(\"452e3031-bdaa-4468-9980-aed60d0258bf\", [\"interpretation\"], with_vector=True)\n\n if \"vector\" not in person:\n raise TestFailedException(\"additional property _vector not in person\")\n if \"interpretation\" not in person[\"additional\"]:\n raise TestFailedException(\"additional property _interpretation not in person\")\n\n persons = self.client.data_object.get(with_vector=True)\n if \"vector\" not in persons[\"objects\"][0]:\n raise TestFailedException(\"additional property _vector not in persons\")","sub_path":"integration/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":6807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"315153735","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# -*- mode: python -*-\n\"\"\"\n:mod:`mobus.tools` -- tools to build models\n\n=====\nTools\n=====\n\nTools to build recommender models\n\nThe model contains the following functions\n\n* **build_matrix** Builds matrix from a generator\n* **cosine_similarity** Calculates cosine similarities between rows in a matrix\n* **sort rows** Sort rows of a matrix\n\"\"\"\nfrom datetime import datetime\nimport logging\nimport numpy as np\nimport scipy.sparse as scsp\nimport sklearn.preprocessing as sp\n\nlogger = logging.getLogger(__name__)\n__all__ = ['build_matrix', 'cosine_similarity', 'sort_rows']\n\n\ndef build_matrix(label_feature_generator,\n feature_function=None,\n normalize=True,\n norm_type='l2',\n norm_axis=0,\n label_name='label',\n feature_name='feature'):\n \"\"\"\n Creates label/feature matrix from input generator.\n\n :param label_feature_generator:\n Generator the produces pairs of label/features\n For example ('label', ['feat_1', 'feat_2'])\n or ('label', [('feat_1', 2), ('feat_2', 2)])\n\n The first case is covered by the deafult feature function, that\n will assign a value of one to each feature.\n For the second example a feature function like this could be used:\n lambda x: (x[0], x[1])\n\n :param feature_function:\n Function used to retrieve name and value from features\n The function must return a (name, value) tuple.\n :param normalize:\n Normalize matrix after build. Default is true\n :param norm_type:\n Default is 'l1'\n :param norm_axis:\n Default is 0\n :param label_name:\n name of labels (user/work etc.) only used for debug purposes\n :param feature_name:\n name of features (user/work etc.) only used for debug purposes\n \"\"\"\n logger.info(\"Building matrix\")\n start = datetime.now()\n\n def __default_feature_function(x):\n return (x, 1)\n\n if not feature_function:\n feature_function = __default_feature_function\n\n label_map = {}\n feature_map = {}\n indices = []\n data = []\n indptr = [0]\n\n for i, (label, features) in enumerate(label_feature_generator):\n label_map.setdefault(label, len(label_map))\n for feature in features:\n fname, fval = feature_function(feature)\n index = feature_map.setdefault(fname, len(feature_map))\n indices.append(index)\n data.append(fval)\n if i != 0 and i % 10000 == 0:\n logger.debug('handled %s rows in [%s]', i, datetime.now() - start)\n indptr.append(len(indices))\n\n M = scsp.csr_matrix((data, indices, indptr), dtype=float)\n if normalize:\n M = sp.normalize(M, norm=norm_type, axis=norm_axis, copy=False)\n label_lst = __build_list(label_map)\n feature_lst = __build_list(feature_map)\n logger.info(\"matrix build in [%s]\", datetime.now() - start)\n logger.debug(\" matrix shape = (%d, %d)\", *M.shape)\n logger.debug(\" matrix shape = (%d, %d) -> %d %ss X %d %ss\", *M.shape, len(label_lst), label_name, len(feature_lst), feature_name)\n logger.debug(\" matrix sum=%d\", M.sum())\n\n return M, label_lst, feature_lst\n\n\nclass DimensionReductionError(Exception):\n pass\n\n\ndef dimension_reduction(M, alg='svd', dims=100, axis=0):\n \"\"\"\n Performs dimension reduction on input Matrix\n :param M:\n Matrix to perform reduction to\n :param alg:\n Algorithm to use. supported algorithms are [svd|lsi]\n :param dims:\n Number of dimension in output\n :param axis:\n Axis to perform reduction on\n \"\"\"\n supported_algorithms = ['svd']\n # supported_algorithms = ['lsi', 'svd']\n if alg not in supported_algorithms:\n raise DimensionReductionError(\"Unknown algorithm '%s'. supported algorithms are [%s]\" % (alg, \"|\".join(supported_algorithms)))\n\n if alg == 'svd':\n from sklearn.decomposition import TruncatedSVD\n svd = TruncatedSVD(n_components=dims, n_iter=10)\n return svd.fit_transform(M)\n\n # elif alg == 'lsi':\n # from gensim.models import LsiModel\n # M = scsp.coo_matrix(M)\n\n # corpus = [[]] * M.shape[0]\n # for i, j, v in zip(cx.col, cx.row, cx.data):\n # corpus[j].append((i, v))\n # model = LsiModel(corpus, num_topics=dims)\n # return model[corpus]\n\n\ndef cosine_similarity(M, rows_at_a_time=None):\n \"\"\"\n Calculates the cosine similarity matrix for a feature matrix, and\n returns it in chunks.\n\n :param M:\n The matrix to calculate cosine similarity matrix for\n :param rows_at_a_time:\n Rows used in each iteration (smaller memory footprint).\n If argument not present, the whole matrix is used and the\n calculation will finish in first iteration,\n \"\"\"\n if not rows_at_a_time or rows_at_a_time > M.shape[0]:\n rows_at_a_time = M.shape[0]\n\n for i_beg, i_end in __chunks(M.shape[0], rows_at_a_time):\n logger.debug(\"Calculating similarities for chunk (size=%d)\", rows_at_a_time)\n S = M * M[i_beg:i_end, :].T\n logger.debug(\"Chunk shape (%d, %d)\", *S.shape)\n yield scsp.csr_matrix(S.T)\n\n\ndef sort_rows(M, labels, offset=0, max_len=1000):\n \"\"\"\n Yields sorted rows of matrix with the given maximum length.\n :param M:\n Matrix to fetch rows from.\n :param labels:\n Labels corresponding to the matrix rows.\n :param offset:\n Offset in labels list\n :param max_len:\n Maximum returned list length\n \"\"\"\n labels = np.array(labels)\n for i in range(M.shape[0]):\n yield labels[offset + i], __sort_row(M[i].indices, M[i].data, labels, max_len)\n\n\ndef __build_list(vocabulary):\n inv = {v: k for k, v in vocabulary.items()}\n return [inv[n] for n in range(len(inv))]\n\n\ndef __chunks(n, size):\n i = (0, size)\n while i[1] < n:\n yield i\n i = [sum(x) for x in zip(i, [size, size])]\n if i[1] > n:\n i[1] = n\n yield i\n\n\ndef __sort_row(indices, data, labels, max_len=1000):\n ids = data.argsort()[::-1][:max_len]\n return data[ids], labels[indices[ids]]\n","sub_path":"src/mobus/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"30210913","text":"'''\n Help Package\n @ Author Kuntal\n @ Company \n @ version 0.1\n @date 10/12/2019\n'''\n\n# Email Config\nfrom BlankSetup import settings\nHOST_USER = settings.EMAIL_HOST_USER\nfrom django.core.mail import BadHeaderError, EmailMessage, EmailMultiAlternatives, send_mail, send_mass_mail\n\n\ndef SendOneToOneEmail(subject, message, to):\n send_mail('subject', 'message', HOST_USER, to, fail_silently = False)\n print(\"DDv\")\n\n\ndef SendMassEmail():\n message1 = ('Subject here', 'Here is the message', 'from@example.com', ['first@example.com', 'other@example.com'])\n message2 = ('Another Subject', 'Here is another message', 'from@example.com', ['second@test.com'])\n send_mass_mail((message1, message2), fail_silently=False)\n\n datatuple = (\n ('Subject', 'Message.', 'from@example.com', ['john@example.com']),\n ('Subject', 'Message.', 'from@example.com', ['jane@example.com']),\n )\n send_mass_mail(datatuple)\n\n\ndef SendMultiPartEmail():\n subject, from_email, to = 'hello', HOST_USER, 'kuntal.samanta@cbnits.com'\n text_content = 'This is an important message.'\n html_content = '

This is an important message.

\\\n '\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()","sub_path":"Django2.2BlankSetup/App/help/emailHelp.py","file_name":"emailHelp.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"494521863","text":"import socket\ndef definir_path():\n if socket.gethostname() == 'matias-Inspiron-5570':\n path_git = '/home/matias/Documents/Tesis/tesis_licenciatura'\n path_datos_global = '/home/matias/Documents/Tesis'\n elif socket.gethostname() == 'quipus': #Compu de Susana\n path_git = '/home/mleize/tesis_licenciatura'\n path_datos_global = '/home/mleize'\n elif socket.gethostname() == 'mn328': #Cluster\n path_git = '/tmpu/dsy_g/mleiz_a/tesis_licenciatura'\n path_datos_global = '/tmpu/dsy_g/mleiz_a'\n else:\n path_git = 'mleize/tesis_licenciatura'\n path_datos_global = 'mleize'\n return path_git, path_datos_global\n\n\nif __name__ == '__main__':\n #Much better:\n import os\n import git\n path_git = git.Repo('.', search_parent_directories=True).working_tree_dir\n path_datos_global = os.path.dirname(path_git)\n\n print(path_git)\n print(path_datos_global)\n\n #For some reason this work:\n os.chdir(path_git)\n os.sys.path.append('./Software/Funcionales/')\n\n #And this not:\n os.chdir(path_git+'/Software/Funcionales/')\n\n\n #The header of all run files should have this format:\n import numpy as np; np.random.seed(42)\n import emcee\n\n import os\n import git\n path_git = git.Repo('.', search_parent_directories=True).working_tree_dir\n os.chdir(path_git); os.sys.path.append('./Software/Funcionales/')\n","sub_path":"pc_path.py","file_name":"pc_path.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"69612973","text":"# prime number module\n# Author : Ayush Gupta\n# Date created : 15-01-2021\n\nfrom random import randrange\nfrom math import sqrt\n\ndef prime_seive(n):\n \"\"\"Function that returns first n prime numbers.\n\n Args:\n n (int): Upperlimit of the search.\n\n Returns:\n list: First n prime numbers.\n \"\"\"\n seive = [True]*n\n seive[0] = False\n seive[1] = False\n \n for i in range(2, int(sqrt(n)+1)):\n pointer = i*2\n while pointer> (32 - s))) & 0xffffffff\n\n\ndef f(x, y, z):\n return (x & y) | ((~x) & z)\n\n\ndef g(x, y, z):\n return (x & y) | (x & z) | (y & z)\n\n\ndef h(x, y, z):\n return x ^ y ^ z\n\n\ndef r1(a, b, c, d, k, s, x):\n return leftrotate(a + f(b, c, d) + int(x[k], 2), s)\n\n\ndef r2(a, b, c, d, k, s, x):\n return leftrotate((a + g(b, c, d) + int(x[k], 2) + 0x5A827999), s)\n\n\ndef r3(a, b, c, d, k, s, x):\n return leftrotate(a + h(b, c, d) + int(x[k], 2) + 0x6ED9EBA1, s)\n\n\ndef int_to_bin(a, b, c, d):\n a = bin(a)[2:]\n b = bin(b)[2:]\n c = bin(c)[2:]\n d = bin(d)[2:]\n while len(a) % 8 != 0 or len(b) % 8 != 0 or len(c) % 8 != 0 or len(d) % 8 != 0:\n if len(a) % 8 != 0:\n a = '0' + a\n elif len(b) % 8 != 0:\n b = '0' + b\n elif len(c) % 8 != 0:\n c = '0' + c\n elif len(d) % 8 != 0:\n d = '0' + d\n\n a = [int(x) for x in a]\n b = [int(x) for x in b]\n c = [int(x) for x in c]\n d = [int(x) for x in d]\n\n return a + b + c + d\n\n\ndef change_random_bit(msg):\n mod_msg = ''\n for char in msg:\n temp = bin(ord(char))[2:]\n while len(temp) != 8:\n temp = '0' + temp\n mod_msg += temp\n\n r_bit = randint(0, len(mod_msg) - 1)\n\n mod_msg = [x for x in mod_msg]\n mod_msg[r_bit] = str(int(mod_msg[r_bit]) ^ 1)\n mod_msg = ''.join(mod_msg)\n\n temp = ''\n for j in range(0, len(mod_msg), 8):\n temp += chr(int(mod_msg[j:j + 8], 2))\n mod_msg = temp\n return mod_msg\n\n\ndef paint(x, y, text_y, text_x):\n plt.plot(x, y, color='green', marker='o', ls='solid')\n plt.title('График')\n plt.ylabel(text_y)\n plt.xlabel(text_x)\n plt.show()\n\n\nclass md4(object):\n def __init__(self):\n self.A = 0x67452301\n self.B = 0xefcdab89\n self.C = 0x98badcfe\n self.D = 0x10325476\n self.result = ''\n self.X = []\n self.m = ''\n\n def expand_message(self, msg):\n\n for char in msg:\n temp = bin(ord(char))[2:]\n while len(temp) != 8:\n temp = '0' + temp\n self.m += temp\n\n b64 = bin(len(self.m))[2:]\n while len(b64) != 64:\n b64 = '0' + b64\n\n if len(self.m) % 512 == 0 & len(self.m) != 0:\n pass\n else:\n self.m += '1'\n while len(self.m) % 512 != 0:\n while len(self.m) % 512 != 448:\n self.m += '0'\n c = b64[56:64] + b64[48:56]+b64[40:48]+b64[32:40]+b64[24:32]+b64[16:24]+b64[8:16]+b64[0:8]\n self.m += c\n\n for i in range(0, len(self.m), 32):\n temp = self.m[i:i+32]\n word = ''\n for j in range(0, 33, 8):\n word += temp[24-j:32-j]\n self.X.append(word)\n\n def rounds_f(self):\n\n for i in range(0, len(self.X) // 16):\n a = self.A\n b = self.B\n c = self.C\n d = self.D\n x = self.X\n\n # первый раунд\n a = r1(a, b, c, d, (i * 16) + 0, 3, x)\n d = r1(d, a, b, c, (i * 16) + 1, 7, x)\n c = r1(c, d, a, b, (i * 16) + 2, 11, x)\n b = r1(b, c, d, a, (i * 16) + 3, 19, x)\n a = r1(a, b, c, d, (i * 16) + 4, 3, x)\n d = r1(d, a, b, c, (i * 16) + 5, 7, x)\n c = r1(c, d, a, b, (i * 16) + 6, 11, x)\n b = r1(b, c, d, a, (i * 16) + 7, 19, x)\n a = r1(a, b, c, d, (i * 16) + 8, 3, x)\n d = r1(d, a, b, c, (i * 16) + 9, 7, x)\n c = r1(c, d, a, b, (i * 16) + 10, 11, x)\n b = r1(b, c, d, a, (i * 16) + 11, 19, x)\n a = r1(a, b, c, d, (i * 16) + 12, 3, x)\n d = r1(d, a, b, c, (i * 16) + 13, 7, x)\n c = r1(c, d, a, b, (i * 16) + 14, 11, x)\n b = r1(b, c, d, a, (i * 16) + 15, 19, x)\n\n # второй раунд\n a = r2(a, b, c, d, (i * 16) + 0, 3, x)\n d = r2(d, a, b, c, (i * 16) + 4, 5, x)\n c = r2(c, d, a, b, (i * 16) + 8, 9, x)\n b = r2(b, c, d, a, (i * 16) + 12, 13, x)\n a = r2(a, b, c, d, (i * 16) + 1, 3, x)\n d = r2(d, a, b, c, (i * 16) + 5, 5, x)\n c = r2(c, d, a, b, (i * 16) + 9, 9, x)\n b = r2(b, c, d, a, (i * 16) + 13, 13, x)\n a = r2(a, b, c, d, (i * 16) + 2, 3, x)\n d = r2(d, a, b, c, (i * 16) + 6, 5, x)\n c = r2(c, d, a, b, (i * 16) + 10, 9, x)\n b = r2(b, c, d, a, (i * 16) + 14, 13, x)\n a = r2(a, b, c, d, (i * 16) + 3, 3, x)\n d = r2(d, a, b, c, (i * 16) + 7, 5, x)\n c = r2(c, d, a, b, (i * 16) + 11, 9, x)\n b = r2(b, c, d, a, (i * 16) + 15, 13, x)\n\n # третий раунд\n a = r3(a, b, c, d, (i * 16) + 0, 3, x)\n d = r3(d, a, b, c, (i * 16) + 8, 9, x)\n c = r3(c, d, a, b, (i * 16) + 4, 11, x)\n b = r3(b, c, d, a, (i * 16) + 12, 15, x)\n a = r3(a, b, c, d, (i * 16) + 2, 3, x)\n d = r3(d, a, b, c, (i * 16) + 10, 9, x)\n c = r3(c, d, a, b, (i * 16) + 6, 11, x)\n b = r3(b, c, d, a, (i * 16) + 14, 15, x)\n a = r3(a, b, c, d, (i * 16) + 1, 3, x)\n d = r3(d, a, b, c, (i * 16) + 9, 9, x)\n c = r3(c, d, a, b, (i * 16) + 5, 11, x)\n b = r3(b, c, d, a, (i * 16) + 13, 15, x)\n a = r3(a, b, c, d, (i * 16) + 3, 3, x)\n d = r3(d, a, b, c, (i * 16) + 11, 9, x)\n c = r3(c, d, a, b, (i * 16) + 7, 11, x)\n b = r3(b, c, d, a, (i * 16) + 15, 15, x)\n\n self.A = self.A + a & 0xffffffff\n self.B = self.B + b & 0xffffffff\n self.C = self.C + c & 0xffffffff\n self.D = self.D + d & 0xffffffff\n\n result = list(pack(' np.ndarray:\n r\"\"\"\n Produce a Bell state.\n\n Returns one of the following four Bell states depending on the value\n of `idx`:\n\n .. math::\n \\begin{equation}\n \\begin{aligned}\n \\frac{1}{\\sqrt{2}} \\left( |00 \\rangle + |11 \\rangle \\right) &\n \\qquad &\n \\frac{1}{\\sqrt{2}} \\left( |00 \\rangle - |11 \\rangle \\right) \\\\\n \\frac{1}{\\sqrt{2}} \\left( |01 \\rangle + |10 \\rangle \\right) &\n \\qquad &\n \\frac{1}{\\sqrt{2}} \\left( |01 \\rangle - |10 \\rangle \\right)\n \\end{aligned}\n \\end{equation}\n\n References:\n [1] Wikipedia: Bell state\n https://en.wikipedia.org/wiki/Bell_state\n\n :param idx: A parameter in [0, 1, 2, 3]\n \"\"\"\n e_0, e_1 = ket(2, 0), ket(2, 1)\n if idx == 0:\n return 1 / np.sqrt(2) * (np.kron(e_0, e_0) + np.kron(e_1, e_1))\n if idx == 1:\n return 1 / np.sqrt(2) * (np.kron(e_0, e_0) - np.kron(e_1, e_1))\n if idx == 2:\n return 1 / np.sqrt(2) * (np.kron(e_0, e_1) + np.kron(e_1, e_0))\n if idx == 3:\n return 1 / np.sqrt(2) * (np.kron(e_0, e_1) - np.kron(e_1, e_0))\n raise ValueError(\"Invalid integer value for Bell state.\")\n","sub_path":"toqito/state/states/bell.py","file_name":"bell.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"49243476","text":"\"\"\"\n# Definition for Employee.\nclass Employee:\n def __init__(self, id: int, importance: int, subordinates: List[int]):\n self.id = id\n self.importance = importance\n self.subordinates = subordinates\n\"\"\"\nfrom collections import deque\nclass Solution:\n def getImportance(self, employees: List['Employee'], id: int) -> int:\n if not employees: return 0\n hashmap = {}\n for emp in employees:\n hashmap[emp.id] = emp\n \n q = deque()\n q.append(id)\n \n total_importance = 0\n \n while q:\n size = len(q)\n for i in range(size):\n eid = q.popleft()\n e = hashmap[eid]\n total_importance += e.importance\n for sub in e.subordinates:\n q.append(sub)\n return total_importance\n \n#Time complexity is O(n) and space complexity is O(n)\n#BFS approach used for evaluating all the subordiantes of the given id.\n ","sub_path":"problem2-bfs.py","file_name":"problem2-bfs.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34070038","text":"## \n# This program gives a description of an earthquake, given the Richter scale \n# magnitude.\n#\n\ndef main() :\n # Extract the user input.\n richter = float(input(\"Enter a magnitude on the Richter scale: \"))\n \n # Get the description and print it.\n description = getDescription(richter)\n print(description)\n \n## Gets the description of an earthquake for a given magnitude on the Richter scale.\n# @param richter a float indicating the magnitude on the Richter scale\n# @return a string containing the description of the damage\n#\ndef getDescription(richter) :\n if richter >= 8.0 :\n result = \"Most structures fall\"\n elif richter >= 7.0 :\n result = \"Many buildings destroyed\"\n elif richter >= 6.0 :\n result = \"Many buildings considerably damaged, some collapse\"\n elif richter >= 4.5 :\n result = \"Damage to poorly constructed buildings\"\n else : \n result = \"No destruction of buildings\"\n \n return result\n \n## Gets the description of an earthquake for a given magnitude on the Richter\n# scale. This implementation uses a shorter form of the if statement \n# (see Special Topic 5.1)\n# @param richter a float indicating the magnitude on the Richter scale\n# @return a string containing the description of the damage\n#\ndef getDescription2(richter) :\n if richter >= 8.0 : return \"Most structures fall\"\n if richter >= 7.0 : return \"Many buildings destroyed\"\n if richter >= 6.0 : return \"Many buildings considerably damaged, some collapse\"\n if richter >= 4.5 : return \"Damage to poorly constructed buildings\"\n return \"No destruction of buildings\"\n\n# Start the program.\nmain()\n","sub_path":"P4EO_source/ch05/sec04/earthquake.py","file_name":"earthquake.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"292414007","text":"from vk_bot import VkBot\r\nfrom vk_api.longpoll import VkEventType\r\n\r\nbot = VkBot()\r\n\r\nfor event in bot.talk.longpoll.listen():\r\n if event.type == VkEventType.MESSAGE_NEW:\r\n if event.to_me:\r\n if not bot.user.is_defined(event.user_id):\r\n bot.set_user(event.user_id)\r\n bot.write_reply(event.text)\r\n\r\n\r\n\r\n\r\n","sub_path":"Python/netology_adpy_qualification/bot_main.py","file_name":"bot_main.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"190940569","text":"import shutil\nimport tempfile\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom common import SplearnTestCase\nfrom nose.tools import (assert_equal, assert_is_instance, assert_raises,\n assert_true)\nfrom numpy.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom splearn.rdd import ArrayRDD, TupleRDD, block\n\n\ndef assert_equal_tuple(tpl1, tpl2):\n assert_equal(len(tpl1), len(tpl2))\n for i in range(len(tpl1)):\n assert_array_equal(tpl1[i], tpl2[i])\n\n\ndef assert_equal_multiple_tuples(tpls1, tpls2):\n assert_equal(len(tpls1), len(tpls2))\n for i, tpl1 in enumerate(tpls1):\n assert_equal_tuple(tpl1, tpls2[i])\n\n\nclass RDDTestCase(SplearnTestCase):\n\n def setUp(self):\n super(RDDTestCase, self).setUp()\n self.outputdir = tempfile.mkdtemp()\n\n def tearDown(self):\n super(RDDTestCase, self).tearDown()\n shutil.rmtree(self.outputdir)\n\n\nclass TestBlockRDD(RDDTestCase):\n\n def test_block_rdd_tuple(self):\n n_partitions = 10\n n_samples = 100\n sparse_row = sp.csr_matrix([[0, 0, 1, 0, 1]])\n data = self.sc.parallelize(\n [(np.array([1., 2.]), 0, sparse_row) for i in range(n_samples)],\n n_partitions)\n blocked_data = block(data)\n\n expected_first_block = np.array([[1., 2.]] * 10)\n expected_second_block = np.zeros(10, dtype=np.int)\n expected_third_block = sp.vstack([sparse_row] * 10)\n\n first_block_tuple = blocked_data.first()\n assert_array_almost_equal(expected_first_block, first_block_tuple[0])\n assert_array_almost_equal(expected_second_block, first_block_tuple[1])\n assert_array_almost_equal(expected_third_block.toarray(),\n first_block_tuple[2].toarray())\n\n tuple_blocks = blocked_data.collect()\n assert_equal(len(tuple_blocks), n_partitions)\n assert_equal(sum(len(b[0]) for b in tuple_blocks), n_samples)\n assert_equal(sum(len(b[1]) for b in tuple_blocks), n_samples)\n\n def test_block_rdd_sp_matrix(self):\n n_partitions = 10\n n_samples = 100\n sparse_row = sp.csr_matrix([[0, 0, 1, 0, 1]])\n data = self.sc.parallelize([sparse_row for i in range(n_samples)],\n n_partitions)\n blocked_data = block(data)\n assert_true(sp.issparse(blocked_data.first()))\n\n expected_block = sp.vstack([sparse_row] * 10)\n assert_array_almost_equal(expected_block.toarray(),\n blocked_data.first().toarray())\n\n def test_block_rdd_array(self):\n n_partitions = 10\n n_samples = 100\n data = self.sc.parallelize([np.array([1]) for i in range(n_samples)],\n n_partitions)\n blocked_data = block(data)\n assert_array_almost_equal(np.ones((10, 1)), blocked_data.first())\n blocks = blocked_data.collect()\n assert_equal(len(blocks), n_partitions)\n assert_array_almost_equal(np.ones((10, 1)), blocks[-1])\n assert_equal(sum(len(b) for b in blocks), n_samples)\n\n n_partitions = 17\n data = self.sc.parallelize([np.array([1]) for i in range(n_samples)],\n n_partitions)\n blocked_data = block(data)\n assert_array_almost_equal(np.ones((n_samples / n_partitions, 1)),\n blocked_data.first())\n blocks = blocked_data.collect()\n assert_equal(len(blocks), n_partitions)\n assert_equal(sum(len(b) for b in blocks), n_samples)\n\n def test_block_rdd_array_block_size(self):\n n_partitions = 10\n n_samples = 107\n data = self.sc.parallelize([np.array([1]) for i in range(n_samples)],\n n_partitions)\n\n block_data_5 = block(data, block_size=5)\n blocks = block_data_5.collect()\n assert_true(all(len(b) <= 5 for b in blocks))\n\n block_data_10 = block(data, block_size=10)\n blocks = block_data_10.collect()\n assert_true(all(len(b) <= 10 for b in blocks))\n\n def test_block_empty_rdd(self):\n n_partitions = 3\n empty_data = self.sc.parallelize([], n_partitions)\n assert_raises(ValueError, block, empty_data)\n\n def test_block_rdd_dict(self):\n n_partitions = 3\n n_samples = 57\n dicts = [{'a': i, 'b': float(i) ** 2} for i in range(n_samples)]\n data = self.sc.parallelize(dicts, n_partitions)\n\n block_data_5 = block(data, block_size=5)\n blocks = block_data_5.collect()\n assert_true(all(len(b) <= 5 for b in blocks))\n assert_array_almost_equal(blocks[0][0], np.arange(5))\n assert_array_almost_equal(blocks[0][1],\n np.arange(5, dtype=np.float) ** 2)\n\n\nclass TestArrayRDD(RDDTestCase):\n\n def test_initialization(self):\n n_partitions = 4\n n_samples = 100\n\n data = [np.array([1, 2]) for i in range(n_samples)]\n rdd = self.sc.parallelize(data, n_partitions)\n\n assert_raises(TypeError, ArrayRDD, data)\n assert_raises(TypeError, ArrayRDD, data, False)\n assert_raises(TypeError, ArrayRDD, data, 10)\n\n assert_is_instance(ArrayRDD(rdd), ArrayRDD)\n assert_is_instance(ArrayRDD(rdd, 10), ArrayRDD)\n assert_is_instance(ArrayRDD(rdd, None), ArrayRDD)\n\n def test_partitions_number(self):\n data = np.arange(400).reshape((100, 4))\n rdd = self.sc.parallelize(data, 4)\n assert_equal(ArrayRDD(rdd, 5).partitions, 4)\n assert_equal(ArrayRDD(rdd, 10).partitions, 4)\n assert_equal(ArrayRDD(rdd, 20).partitions, 4)\n\n data = np.arange(400).reshape((100, 4))\n rdd = self.sc.parallelize(data, 7)\n assert_equal(ArrayRDD(rdd, 5).partitions, 7)\n assert_equal(ArrayRDD(rdd, 10).partitions, 7)\n assert_equal(ArrayRDD(rdd, 20).partitions, 7)\n\n def test_blocks_number(self):\n n_partitions = 10\n n_samples = 1000\n\n data = [np.array([1, 2]) for i in range(n_samples)]\n rdd = self.sc.parallelize(data, n_partitions)\n\n assert_equal(1000, ArrayRDD(rdd, False).blocks)\n assert_equal(10, ArrayRDD(rdd).blocks)\n assert_equal(20, ArrayRDD(rdd, 50).blocks)\n assert_equal(20, ArrayRDD(rdd, 66).blocks)\n assert_equal(10, ArrayRDD(rdd, 100).blocks)\n assert_equal(10, ArrayRDD(rdd, 300).blocks)\n assert_equal(200, ArrayRDD(rdd, 5).blocks)\n assert_equal(100, ArrayRDD(rdd, 10).blocks)\n\n def test_blocks_size(self):\n n_partitions = 10\n n_samples = 1000\n\n data = [np.array([1, 2]) for i in range(n_samples)]\n rdd = self.sc.parallelize(data, n_partitions)\n\n shapes = ArrayRDD(rdd).map(lambda x: x.shape[0]).collect()\n assert_true(all(np.array(shapes) == 100))\n shapes = ArrayRDD(rdd, 5).map(lambda x: x.shape[0]).collect()\n assert_true(all(np.array(shapes) == 5))\n shapes = ArrayRDD(rdd, 50).map(lambda x: x.shape[0]).collect()\n assert_true(all(np.array(shapes) == 50))\n shapes = ArrayRDD(rdd, 250).map(lambda x: x.shape[0]).collect()\n assert_true(all(np.array(shapes) == 100))\n shapes = ArrayRDD(rdd, 66).map(lambda x: x.shape[0]).collect()\n assert_true(all(np.in1d(shapes, [66, 34])))\n\n def test_shape(self):\n data = np.arange(4000)\n shapes = [(1000, 4),\n (200, 20),\n (100, 40),\n (2000, 2)]\n for shape in shapes:\n rdd = self.sc.parallelize(data.reshape(shape))\n assert_equal(ArrayRDD(rdd).shape, shape)\n\n def test_unblocking_rdd(self):\n data = np.arange(400)\n rdd = self.sc.parallelize(data, 4)\n X = ArrayRDD(rdd, 5)\n X_unblocked = X.unblock()\n assert_is_instance(X_unblocked, ArrayRDD)\n assert_array_equal(X_unblocked.take(12), np.arange(12))\n\n def test_convert_tolist(self):\n data = np.arange(400)\n rdd = self.sc.parallelize(data, 4)\n X = ArrayRDD(rdd, 5)\n X_list = X.tolist()\n assert_is_instance(X_list, list)\n assert_equal(X_list, data.tolist())\n\n data = [2, 3, 5, 1, 6, 7, 9, 9]\n rdd = self.sc.parallelize(data, 2)\n X = ArrayRDD(rdd)\n X_list = X.tolist()\n assert_is_instance(X_list, list)\n assert_equal(X_list, data)\n\n def test_convert_toarray(self):\n data = np.arange(400)\n rdd = self.sc.parallelize(data, 4)\n X = ArrayRDD(rdd, 5)\n X_array = X.toarray()\n assert_array_equal(X_array, data)\n\n data = [2, 3, 5, 1, 6, 7, 9, 9]\n rdd = self.sc.parallelize(data, 2)\n X = ArrayRDD(rdd)\n X_array = X.toarray()\n assert_array_equal(X_array, np.array(data))\n\n def test_get_single_item(self):\n data = np.arange(400).reshape((100, 4))\n rdd = self.sc.parallelize(data, 4)\n X = ArrayRDD(rdd, 5)\n\n expected = np.arange(0, 20).reshape((5, 4))\n assert_array_equal(X.first(), expected)\n assert_array_equal(X[0].first(), expected)\n assert_array_equal(X.ix(0).first(), expected)\n\n expected = np.arange(20, 40).reshape((5, 4))\n assert_array_equal(X[1].first(), expected)\n assert_array_equal(X.ix(1).first(), expected)\n\n expected = np.arange(380, 400).reshape((5, 4))\n assert_array_equal(X[19].first(), expected)\n assert_array_equal(X.ix(19).first(), expected)\n assert_array_equal(X[-1].first(), expected)\n assert_array_equal(X.ix(-1).first(), expected)\n\n expected = np.arange(340, 360).reshape((5, 4))\n assert_array_equal(X[17].first(), expected)\n assert_array_equal(X.ix(17).first(), expected)\n assert_array_equal(X[-3].first(), expected)\n assert_array_equal(X.ix(-3).first(), expected)\n\n def test_get_multiple_item(self):\n data = np.arange(400).reshape((100, 4))\n rdd = self.sc.parallelize(data, 4)\n X = ArrayRDD(rdd, 5)\n\n exp0th = np.arange(0, 20).reshape((5, 4))\n exp1st = np.arange(20, 40).reshape((5, 4))\n exp2nd = np.arange(40, 60).reshape((5, 4))\n exp7th = np.arange(140, 160).reshape((5, 4))\n exp18th = np.arange(360, 380).reshape((5, 4))\n exp19th = np.arange(380, 400).reshape((5, 4))\n\n assert_array_equal(X[[0, 1]].collect(), [exp0th, exp1st])\n assert_array_equal(X[[0, 2]].collect(), [exp0th, exp2nd])\n assert_array_equal(X[[0, -1]].collect(), [exp0th, exp19th])\n assert_array_equal(X[[0, -2]].collect(), [exp0th, exp18th])\n assert_array_equal(X[[1, -2]].collect(), [exp1st, exp18th])\n assert_array_equal(X[[7, 0]].collect(), [exp7th, exp0th])\n assert_array_equal(X[[1, 2, 7, 19]].collect(),\n [exp1st, exp2nd, exp7th, exp19th])\n\n def test_array_slice_syntax(self):\n data = np.arange(400).reshape((100, 4))\n rdd = self.sc.parallelize(data, 4)\n X = ArrayRDD(rdd, 5)\n\n exp0th = np.arange(0, 20).reshape((5, 4))\n exp1st = np.arange(20, 40).reshape((5, 4))\n exp7th = np.arange(140, 160).reshape((5, 4))\n exp8th = np.arange(160, 180).reshape((5, 4))\n exp9th = np.arange(180, 200).reshape((5, 4))\n exp18th = np.arange(360, 380).reshape((5, 4))\n exp19th = np.arange(380, 400).reshape((5, 4))\n\n assert_array_equal(X[:1].collect(), [exp0th])\n assert_array_equal(X[:2].collect(), [exp0th, exp1st])\n assert_array_equal(X[18:].collect(), [exp18th, exp19th])\n assert_array_equal(X[-1:].collect(), [exp19th])\n assert_array_equal(X[-2:].collect(), [exp18th, exp19th])\n assert_array_equal(X[7:10].collect(), [exp7th, exp8th, exp9th])\n assert_array_equal(X[7:10:2].collect(), [exp7th, exp9th])\n assert_array_equal(X[::9].collect(), [exp0th, exp9th, exp18th])\n assert_array_equal(X[::-10].collect(), [exp19th, exp9th])\n assert_array_equal(X[-1:1].collect(), [])\n\n def test_transform(self):\n data = np.arange(400).reshape((100, 4))\n rdd = self.sc.parallelize(data, 4)\n X = ArrayRDD(rdd, 5)\n\n fn = lambda x: x**2\n X1 = map(fn, X.collect())\n X2 = X.transform(fn).collect()\n\n assert_array_equal(X1, X2)\n\n\nclass TestTupleRDD(RDDTestCase):\n\n def test_initialization(self):\n n_partitions = 4\n n_samples = 100\n\n data = [(1, 2) for i in range(n_samples)]\n rdd = self.sc.parallelize(data, n_partitions)\n\n assert_raises(TypeError, TupleRDD, data)\n assert_raises(TypeError, TupleRDD, data, False)\n assert_raises(TypeError, TupleRDD, data, 10)\n\n assert_is_instance(TupleRDD(rdd), TupleRDD)\n assert_is_instance(TupleRDD(rdd), ArrayRDD)\n assert_is_instance(TupleRDD(rdd, 10), TupleRDD)\n assert_is_instance(TupleRDD(rdd), ArrayRDD)\n assert_is_instance(TupleRDD(rdd, None), TupleRDD)\n assert_is_instance(TupleRDD(rdd), ArrayRDD)\n\n def test_shape(self):\n data = np.arange(4000)\n shapes = [(1000, 4),\n (200, 20),\n (100, 40),\n (2000, 2)]\n for shape in shapes:\n rdd = self.sc.parallelize(data.reshape(shape))\n rdd1 = rdd.zipWithIndex()\n rdd2 = rdd.map(lambda x: (x, 1, 2, 3, 4, True))\n assert_equal(TupleRDD(rdd1).shape, (shape[0], 2))\n assert_equal(TupleRDD(rdd2).shape, (shape[0], 6))\n\n def test_get_single_tuple(self):\n x, y = np.arange(80).reshape((40, 2)), np.arange(40)\n x_rdd = self.sc.parallelize(x, 2)\n y_rdd = self.sc.parallelize(y, 2)\n z_rdd = x_rdd.zip(y_rdd)\n z = TupleRDD(z_rdd, 5)\n\n expected = np.arange(0, 10).reshape((5, 2)), np.arange(5)\n for tpl in [z.first(), z.ix(0).first(), z[0].first()]:\n assert_equal_tuple(tpl, expected)\n\n expected = np.arange(30, 40).reshape((5, 2)), np.arange(15, 20)\n for tpl in [z.ix(3).first(), z[3].first(), z[-5].first()]:\n assert_equal_tuple(tpl, expected)\n\n expected = np.arange(70, 80).reshape((5, 2)), np.arange(35, 40)\n for tpl in [z.ix(7).first(), z[7].first(), z[-1].first()]:\n assert_equal_tuple(tpl, expected)\n\n def test_get_single_item(self):\n x, y = np.arange(80).reshape((40, 2)), np.arange(40)\n x_rdd = self.sc.parallelize(x, 2)\n y_rdd = self.sc.parallelize(y, 2)\n z_rdd = x_rdd.zip(y_rdd)\n z = TupleRDD(z_rdd, 5)\n\n assert_array_equal(z[0, 0].first(), np.arange(0, 10).reshape((5, 2)))\n assert_array_equal(z[0, 1].first(), np.arange(5))\n\n assert_array_equal(z[3, 0].first(), np.arange(30, 40).reshape((5, 2)))\n assert_array_equal(z[3, 1].first(), np.arange(15, 20))\n assert_array_equal(z[3, -1].first(), np.arange(15, 20))\n\n assert_array_equal(z[7, 0].first(), np.arange(70, 80).reshape((5, 2)))\n assert_array_equal(z[-1, 0].first(), np.arange(70, 80).reshape((5, 2)))\n assert_array_equal(z[7, 1].first(), np.arange(35, 40))\n assert_array_equal(z[-1, -1].first(), np.arange(35, 40))\n\n def test_get_multiple_tuples(self):\n x, y = np.arange(80).reshape((40, 2)), np.arange(40)\n x_rdd = self.sc.parallelize(x, 2)\n y_rdd = self.sc.parallelize(y, 2)\n z_rdd = x_rdd.zip(y_rdd)\n z = TupleRDD(z_rdd, 5)\n\n expected = [(np.arange(0, 10).reshape((5, 2)), np.arange(0, 5)),\n (np.arange(10, 20).reshape((5, 2)), np.arange(5, 10))]\n assert_equal_multiple_tuples(z[:2].collect(), expected)\n assert_equal_multiple_tuples(z[:2, :].collect(), expected)\n assert_equal_multiple_tuples(z[[0, 1]].collect(), expected)\n assert_equal_multiple_tuples(z[[0, 1], :].collect(), expected)\n assert_equal_multiple_tuples(z[[1, 0]].collect(), expected[::-1])\n\n expected = [(np.arange(50, 60).reshape((5, 2)), np.arange(25, 30)),\n (np.arange(60, 70).reshape((5, 2)), np.arange(30, 35)),\n (np.arange(70, 80).reshape((5, 2)), np.arange(35, 40))]\n assert_equal_multiple_tuples(z[-3:].collect(), expected)\n assert_equal_multiple_tuples(z[-3:, :].collect(), expected)\n assert_equal_multiple_tuples(z[[5, 6, 7]].collect(), expected)\n assert_equal_multiple_tuples(z[[5, 6, 7], :].collect(), expected)\n assert_equal_multiple_tuples(z[[7, 6, 5]].collect(), expected[::-1])\n assert_equal_multiple_tuples(z[[7, 6, 5], :].collect(), expected[::-1])\n assert_equal_multiple_tuples(z[[5, 7, 6]].collect(),\n [expected[0], expected[2], expected[1]])\n\n def test_get_multiple_items(self):\n x, y = np.arange(80).reshape((40, 2)), np.arange(40)\n x_rdd = self.sc.parallelize(x, 2)\n y_rdd = self.sc.parallelize(y, 2)\n z_rdd = x_rdd.zip(y_rdd)\n z = TupleRDD(z_rdd, 5)\n\n expected = [(np.arange(0, 10).reshape((5, 2)), np.arange(0, 5)),\n (np.arange(10, 20).reshape((5, 2)), np.arange(5, 10))]\n assert_array_equal(z[:2, 1].collect(),\n [expected[0][1], expected[1][1]])\n assert_array_equal(z[[0, 1], 0].collect(),\n [expected[0][0], expected[1][0]])\n assert_equal_multiple_tuples(z[[0, 1], -1:].collect(),\n [(expected[0][1],),\n (expected[1][1],)])\n assert_equal_multiple_tuples(z[[0, 1], -1:].collect(),\n [(expected[0][1],),\n (expected[1][1],)])\n assert_equal_multiple_tuples(z[[1, 0], [1, 0]].collect(),\n [expected[1][::-1], expected[0][::-1]])\n\n\nclass TestDictRDD(RDDTestCase):\n pass\n","sub_path":"python/test/test_rdd.py","file_name":"test_rdd.py","file_ext":"py","file_size_in_byte":17890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"619509522","text":"from mpi4py import MPI\nimport os\nimport csv\nimport cv2\nimport numpy as np\nimport sys\nimport time\nfrom six.moves import cPickle as pickle\nimport shutil\n\ndef get_csv_id(filename):\n \"\"\"Function that returns the name of the \n csv file with the corresponding labels for\n a given video file.\n Args:\n x: String name of a video.\n Returns:\n A string csv file name for x.\n \"\"\"\n return filename[:-6] + \".csv\"\n\n\ndef modify_image(image, target_w, target_h, grayscale=False):\n \"\"\"Function that performs some image preprocessing\n operations.\n Args:\n image: OpenCV frame array that will be processed.\n target_h: Integer height to which the video will be\n resized.\n target_w: Integer width to which the image will be\n resized.\n grayscale: Boolean to indicate whether to convert the \n image into grayscale.\n Returns:\n Numpy array of modified image.\n \"\"\"\n \n mod = cv2.resize(image, (target_w, target_h), interpolation=cv2.INTER_AREA)\n if grayscale:\n mod = cv2.cvtColor(mod, cv2.COLOR_BGR2GRAY)\n return np.expand_dims(np.array(mod, dtype=np.float32), axis=0)\n\n\ndef broadcastList(files, jobs, offset=0):\n \"\"\"Function that returns a dictionary with\n rank numbers as keys and list of jobs\n as values. Each job is defined as an array\n that contains the start and end index of the\n list of videos to be processed by the \n specified rank number, \n i.e. {0: [14, 20], ...}.\n The function will first assign the same number\n of files as the first job to each rank. Then, \n the function will be called by itself to\n assign the remaining items as a new job to\n each rank, starting from rank 0,\n i.e. {0: [[14,20], [220,226]], ...}. The\n function adapts to the number of threads\n available at runtime.\n Args:\n files: String list of videos to be\n divided per rank.\n jobs: Dictionary of current job\n assignments per rank.\n offset: Integer number offset for\n job assignation when called\n recursively.\n Returns:\n A dictionary of job assignments per rank.\n \"\"\"\n \n comm = MPI.COMM_WORLD\n size = comm.Get_size()\n step_size = 0\n \n # When list size is less than number of threads\n if size >= len(files):\n used_ranks = np.arange(0,len(files))\n step_size = 1\n for rank in used_ranks:\n start = rank\n stop = rank+1\n if rank in jobs.keys():\n jobs[rank] = np.append([jobs[rank]], [[start+offset,stop+offset]], axis=0)\n else:\n jobs[rank] = [start,stop]\n else: \n step_size = len(files)//size\n used_ranks = np.arange(0, len(files)//step_size)\n for rank in used_ranks:\n start = max(0,rank*step_size-1)\n stop = rank*step_size-1+step_size\n if rank in jobs.keys():\n jobs[rank] = np.append([jobs[rank]], [[start+offset,stop+offset]], axis=0)\n else:\n jobs[rank] = [start,stop]\n\n # If there are pending files to assign\n if step_size*max(used_ranks)+offset-1+step_size <= len(files)-1:\n broadcastList(files[step_size*max(used_ranks)+offset-1+step_size:], jobs, step_size*max(used_ranks)+offset-1+step_size)\n\n return jobs\n \n\ndef process_information(jobs, video_list, video_dir, label_dir, result_dir,\n dict_dir, target_h, target_w, skips=0, pack=3, \n grayscale=False, label_index=0):\n \"\"\"Function that creates a dictionary of file locations and\n labels to be used with a modified Keras ImageDataGenerator\n class that allows multi-label learning. This function also\n samples a video file into appended contiguous frames with a\n skips number of frames that are omitted between samples. \n Frames are resized to (target_w, target_h) and saved to result_dir. \n Args:\n jobs: Dictionary of job assignments per rank.\n video_list: String list of videos to be processed. They\n should be inside the directory video_dir.\n video_dir: String path to where video_list videos are\n located.\n label_dir: String path to where the csv label files are\n located.\n result_dir: String path to where the video frames will \n be saved.\n dict_dir: String path to where the train and validation\n dictionaries will be saved.\n target_h: Integer height to which each video frame will be\n resized.\n target_w: Integer width to which each video frame will be\n resized.\n skips: Integer number of frames to skip after saving a\n video frame.\n pack: Integer number of images to append to single \n instance.\n grayscale: Boolean to indicate whether to convert the \n image into grayscale.\n label_index: Integer number of frame in the pack of \n images whose label will be assigned to the whole\n pack of images.\n \"\"\"\n\n if isinstance(jobs, list):\n jobs = [jobs]\n \n for limits in jobs:\n files = video_list[limits[0]:limits[1]]\n dictionary = {}\n \n for filename in files:\n y = np.zeros((0, 10))\n X = np.zeros((0, target_h, target_w, 3))\n vid_path = os.path.join(video_dir, filename)\n csv_path = os.path.join(label_dir, get_csv_id(filename))\n\n # Capture frames from video\n cap = cv2.VideoCapture(vid_path)\n total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n assert pack <= total_frames\n exit = False\n instances = 0\n frames_count = 0\n while (cap.isOpened() and not exit):\n ret, frame = cap.read()\n frames_count += 1\n if ret != False:\n # Merge pack if there are enough images to read\n if frames_count + (pack-1) <= total_frames:\n merged = modify_image(frame, target_w, target_h)\n for _ in range(pack-1):\n r, frame2 = cap.read()\n frames_count += 1\n merged = np.append(merged, \n modify_image(frame2, target_w, target_h),\n axis=0)\n\n # Create image path\n im_path = result_dir+filename.split(\".\")[0]+\"_%05d\"%(instances)+\".pickle\"\n\n # Path of image generator\n dict_key = result_dir.split(\"/\")[-2:-1][0]+\"/\"+filename.split(\".\")[0]+\"_%05d\"%(instances)+\".pickle\"\n \n # Get label for instance\n label = []\n with open(csv_path, newline='') as csvfile:\n reader = csv.reader(csvfile)\n label = list(map(int,list(reader)\n [frames_count-pack+label_index]))[1:]\n \n # Save packed image\n try:\n f = open(im_path, 'wb')\n pickle.dump(merged, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to',\n im_path, ':', e)\n raise\n \n # Include instance and label to dictionary\n dictionary[dict_key] = label\n \n # Skip frames if selected\n for skip in range(skips):\n cap.read()\n frames_count += 1\n instances += 1\n else:\n exit = True\n cap.release()\n \n\n # Save dictionary\n try:\n f = open(dict_dir + str(limits[0]) + \"_\" + str(limits[1]) + \n \".pickle\", 'wb')\n pickle.dump(dictionary, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to',\n dict_dir + str(limits[0]) + \"_\" + str(limits[1]) + \n \".pickle\", ':', e)\n raise\n\ndef create_dataset(orig_db_path=\"~\", # Path to original db\n main_dir=\"~\", # Location of new dataset\n target_h=112, target_w=112,\n view=6, skips=50, pack=16,\n grayscale=False, label_index=8):\n \n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n # Set hyperparameters\n target_h = 112\n target_w = 112\n view = 6 # Number from 1 to 9 or -1 for all views\n skips = 50\n pack = 16\n grayscale = False\n label_index = 8\n\n # Name of directory for new dataset\n dataset_id = 'FERA17DB_3D-h'+str(target_h)+'-w'+str(target_w)+'-v'+str(view)+'-s'+str(skips)+'-p'+str(pack)+'-gs'+str(grayscale)[0]+'-li'+str(label_index)+'/'\n\n # Defining input files' paths\n # Names are the default ones from the original FERA17 db\n train_video_dir = orig_db_path+\"FERA17_Train_MV/\"\n valid_video_dir = orig_db_path+\"FERA17_Valid_MV/\"\n train_label_dir = orig_db_path+\"Train_Occ/\"\n valid_label_dir = orig_db_path+\"Valid_Occ/\"\n\n # Defining output files' paths\n main_dir = main_dir+dataset_id\n au_list = [\"AU1\", \"AU4\", \"AU6\", \"AU7\", \"AU10\", \"AU12\", \"AU14\", \"AU15\", \"AU17\", \"AU23\"]\n images_dir = main_dir + \"images/\"\n train_dir = images_dir + \"train/\"\n valid_dir = images_dir + \"valid/\"\n train_imgs_dir = train_dir + str(\n au_list[0]) + \"/\" # Folder where every training image will be saved\n valid_imgs_dir = valid_dir + str(\n au_list[0]) + \"/\" # Folder where every validation image will be saved\n dictionary_dir = main_dir + \"meta/\"\n train_dict_dir = dictionary_dir + \"dicts_train/\"\n valid_dict_dir = dictionary_dir + \"dicts_valid/\"\n\n # Create required directories\n if rank == 0:\n try:\n assert view in np.delete(np.arange(-1,10),1)\n except AssertionError as e:\n e.args += ('View must be a number between 1-9 or -1 for all views.')\n raise\n starttime = time.time() \n print(\"\\nCreating folders...\")\n if not os.path.exists(train_dir):\n os.makedirs(train_dir)\n if not os.path.exists(valid_dir):\n os.makedirs(valid_dir)\n\n for au in au_list:\n if not os.path.exists(train_dir + au):\n os.makedirs(train_dir + au)\n if not os.path.exists(valid_dir + au):\n os.makedirs(valid_dir + au)\n\n if not os.path.exists(train_dict_dir):\n os.makedirs(train_dict_dir)\n if not os.path.exists(valid_dict_dir):\n os.makedirs(valid_dict_dir)\n print(\"Done.\\n\")\n\n comm.Barrier()\n\n # Filter training and validation videos according to view\n if view != -1:\n training_vid_list = [file for file in sorted(os.listdir(train_video_dir)) if int(file[-5]) == view]\n valid_vid_list = [file for file in sorted(os.listdir(valid_video_dir)) if int(file[-5]) == view]\n else: \n training_vid_list = [file for file in sorted(os.listdir(train_video_dir))]\n valid_vid_list = [file for file in sorted(os.listdir(valid_video_dir))]\n\n # Create jobs for each rank and broadcast them\n jobs_train = {}\n jobs_valid = {}\n\n if rank == 0:\n jobs_train = broadcastList(training_vid_list, jobs_train)\n jobs_valid = broadcastList(valid_vid_list, jobs_valid)\n else:\n jobs_train = None\n jobs_valid = None\n\n comm.Barrier()\n\n jobs_train = comm.bcast(jobs_train, root=0)\n jobs_valid = comm.bcast(jobs_valid, root=0)\n\n # Create training set\n if rank == 0:\n print(\"Creating training set.\")\n for worker, job in jobs_train.items():\n if rank == int(worker):\n process_information(job,\n training_vid_list,\n train_video_dir,\n train_label_dir,\n train_imgs_dir,\n train_dict_dir,\n target_h,\n target_w,\n skips,\n pack,\n grayscale,\n label_index)\n break\n\n comm.Barrier()\n\n if rank == 0:\n print(\"Done.\\n\")\n print(\"Creating validation set.\")\n\n # Create validation set\n for worker, job in jobs_valid.items():\n if rank == int(worker):\n process_informationx(job,\n valid_vid_list,\n valid_video_dir,\n valid_label_dir,\n valid_imgs_dir,\n valid_dict_dir,\n target_h,\n target_w,\n skips,\n pack,\n grayscale,\n label_index)\n break\n\n comm.Barrier()\n\n if rank == 0:\n print(\"Done.\\n\")\n print(\"Finishing up...\")\n\n # Merge train and validation dictionaries\n files_valid = sorted(os.listdir(valid_dict_dir))\n files_train = sorted(os.listdir(train_dict_dir))\n\n valid_dict = {}\n for filename in files_valid:\n temp_dict = pickle.load(open(valid_dict_dir + filename, 'rb'))\n valid_dict = {**valid_dict, **temp_dict}\n\n train_dict = {}\n for filename in files_train:\n temp_dict = pickle.load(open(train_dict_dir + filename, 'rb'))\n train_dict = {**train_dict, **temp_dict}\n\n all_y_labels = {**train_dict, **valid_dict}\n\n try:\n f = open(dictionary_dir + \"all_y_labels.p\", 'wb')\n pickle.dump(all_y_labels, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', dictionary_dir + \"all_y_labels.p:\", e)\n raise\n\n # Create a classes dictionary\n classes = {\n \"AU1\": 0,\n \"AU4\": 1,\n \"AU6\": 2,\n \"AU7\": 3,\n \"AU10\": 4,\n \"AU12\": 5,\n \"AU14\": 6,\n \"AU15\": 7,\n \"AU17\": 8,\n \"AU23\": 9\n }\n\n try:\n f = open(dictionary_dir + \"classes_dict.p\", 'wb')\n pickle.dump(classes, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', dictionary_dir + \"classes_dict.p:\", e)\n raise\n print(\"Done.\\n\")\n\n # Remove temporal directories created\n if os.path.exists(train_dict_dir):\n shutil.rmtree(train_dict_dir)\n if os.path.exists(valid_dict_dir):\n shutil.rmtree(valid_dict_dir)\n\n print(\"Duration %.1f minutes.\\n\" % ((time.time() - starttime)/60))\n\n\ndef main(argv):\n create_dataset(orig_db_path=str(argv[0]),\n main_dir=str(argv[1]),\n target_h=int(argv[2]), \n target_w=int(argv[3]),\n view=int(argv[4]),\n skips=int(argv[5]), \n pack=int(argv[6]),\n grayscale=bool(argv[7]),\n label_index=int(argv[8]))\n\nif __name__ == \"__main__\":\n main(sys.argv[1:]) \n","sub_path":"gen_3D_db.py","file_name":"gen_3D_db.py","file_ext":"py","file_size_in_byte":15791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"487432065","text":"from __future__ import print_function\nimport sys\n\nimport os\nimport json\nimport copy\nimport csv\n\ndef warning(*objs):\n print(\"WARNING: \", *objs, file=sys.stderr)\n\nclass EuglenaData(object):\n def __init__(self,path):\n if not path.endswith(os.pathsep):\n path += os.path.sep\n\n with open(path+\"lightdata_meta.json\") as fp:\n self._lightData = json.load(fp)\n\n with open(path+\"tracks.json\") as fp:\n self._tracks = json.load(fp)\n\n ## Converting all frame numbers from 1 to N\n for i in xrange(len(self._tracks)):\n self._tracks[i]['startFrame'] += 1\n self._tracks[i]['lastFrame'] += 1\n samples = self._tracks[i]['samples']\n for j in xrange(len(samples)):\n samples[j]['frame'] += 1\n\n self._zoom = self._lightData['metaData']['magnification']\n self._events = self._lightData['eventsToRun']\n baseTime = self._events[0]['time']\n for i in xrange(len(self._events)):\n self._events[i]['time'] = (self._events[i]['time'] - baseTime) / 1000.0\n\n self._totalTime = (self._events[-1]['time'])\n self._trackIDtoIndexMap = { t['trackID']:i for i,t in enumerate(self._tracks) }\n\n def getUMPP(self):\n return 100.0/30.0 * 4.0 / float(self._zoom)\n\n\n def getNumTracks(self):\n return len(self._tracks)\n\n def getNumFrames(self):\n if \"numFrames\" in self._lightData['metaData']:\n return self._lightData['metaData']['numFrames']\n else:\n warning(\"Use newer version of data, numFrames information doesn't exists\")\n return 0;\n\n def getTotalTime(self):\n return self._totalTime\n\n def getFPS(self):\n return self.getNumFrames() / self.getTotalTime()\n\n def getMagnification(self):\n return self._zoom\n\n def findTrackID(self,id):\n return self._trackIDtoIndexMap.get(id,-1)\n\n def findTracksBetweenFrames(self,startFrame, endFrame):\n return [ t for t in self._tracks if not (( t['startFrame']) > endFrame or (t['lastFrame']) < startFrame) ]\n\n\n def getTrackAt(self,index):\n return self._tracks[index]\n\n def getTrackByID(self,id):\n idx = self.findTrackID(id)\n if idx >= 0:\n return self.getTrackAt( idx )\n else:\n return None\n\n\n def extractEuglenaBetweenFrames(self,startFrame,endFrame):\n x = []\n y = []\n w = []\n h = []\n a = []\n f = []\n\n for t in self._tracks:\n for s in t['samples']:\n if startFrame <= (s['frame']) <= endFrame:\n rect = s['rect']\n x.append(rect[0])\n y.append(rect[1])\n w.append(rect[2])\n h.append(rect[3])\n a.append(rect[4])\n f.append(s['frame'])\n\n return [x,y,w,h,a,f]\n\n def getLedStateFromTime(self,t):\n i = 0\n while i< len(self._events) and self._events[i]['time'] <= t:\n i += 1\n\n i -= 1\n if i >= 0:\n return [self._events[i]['topValue'], self._events[i]['rightValue'], self._events[i]['bottomValue'], self._events[i]['leftValue'] ]\n\n return 0,0,0,0\n\n def getLedStateFromFrame(self,frame):\n return self.getLedStateFromTime( frame / self.getFPS() )\n\n @staticmethod\n def clipTracksBetweenFrames(tracks,startFrame, endFrame):\n clippedTracks = []\n for t in tracks:\n if not (t['startFrame']) > endFrame or (t['lastFrame']) < startFrame:\n if (t['startFrame']) >= startFrame and (t['lastFrame']) <= endFrame:\n #completely inside\n clippedTracks.append( copy.deepcopy(t) )\n else:\n newTrack = copy.deepcopy(t)\n newTrack['samples'] =filter(lambda ss: startFrame <= (ss['frame']) <= endFrame, t['samples'] )\n frames = [ss['frame'] for ss in newTrack['samples']]\n newTrack[\"startFrame\"] = min(frames)\n newTrack[\"lastFrame\"] = max(frames)\n newTrack[\"numSamples\"] = len(frames)\n clippedTracks.append(newTrack)\n else:\n newTrack = copy.deepcopy(t)\n newTrack['samples'] = []\n newTrack['startFrame'] = -1\n newTrack['lastFrame'] = -1\n newTrack['numSamples'] = 0\n clippedTracks.append(newTrack)\n\n return clippedTracks\n\n @staticmethod\n def extractTrackData(track):\n # returns x,y,w,h,a,f\n return zip(*[(s['rect'][0], s['rect'][1], s['rect'][2], s['rect'][3], s['rect'][4], s['frame']) for s in track['samples'] ])\n\n def writeTrackDataToCSV(self,track,csvfile):\n x,y,w,h,a,f = EuglenaData.extractTrackData(track)\n\n umpp = self.getUMPP()\n period = 1.0 / self.getFPS()\n\n x = [ xx * umpp for xx in x ]\n y = [ yy * umpp for yy in y ]\n w = [ ww * umpp for ww in w ]\n h = [ hh * umpp for hh in h ]\n t = [ tt * period for tt in f]\n\n # x *= self.getUMPP()\n # y *= self.getUMPP()\n # w *= self.getUMPP()\n # h *= self.getUMPP()\n\n\n # t = f / self.getFPS()\n\n ledStates = [self.getLedStateFromFrame(fr) for fr in f]\n\n with open(csvfile,'wb') as fb:\n header = ('frame#','time (sec)','center_x (um)','center_y (um)','width (um)','height (um)','angle (degrees)','topLED','rightLED','bottomLED','leftLED')\n wr = csv.writer(fb)\n rows = zip(f,t,x,y,w,h,a)\n wr.writerow(header)\n for i,r in enumerate(rows):\n data = list(r)\n data.extend(ledStates[i])\n wr.writerow(data)\n","sub_path":"server/cgi/downloadTrack/EuglenaData.py","file_name":"EuglenaData.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"501274875","text":"\n\nfrom xai.brain.wordbase.nouns._scarf import _SCARF\n\n#calss header\nclass _SCARFS(_SCARF, ):\n\tdef __init__(self,): \n\t\t_SCARF.__init__(self)\n\t\tself.name = \"SCARFS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"scarf\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_scarfs.py","file_name":"_scarfs.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"24415369","text":"\"\"\"\nUnit tests for the noun phrase tagger\n\"\"\"\n\nimport unittest\nimport warnings\nfrom sys import executable\nimport socket\nimport time\nfrom subprocess import Popen, CREATE_NEW_CONSOLE\nfrom noun_phrase_tagger import (\n get_stanford_nps,\n get_spacy_nps,\n get_nltk_nps,\n get_noun_phrases,\n)\n\n\ndef ignore_warnings(test_func):\n \"\"\"\n Function to ignore test warnings that are not relevant\n \"\"\"\n\n def do_test(self, *args, **kwargs):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n test_func(self, *args, **kwargs)\n\n return do_test\n\n\nclass TestNP(unittest.TestCase):\n \"\"\"\n Class for testing noun phrase tagging functions\n \"\"\"\n\n @ignore_warnings\n def test_stanford(self):\n \"\"\"\n Test for Stanford function.\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((\"localhost\", 9000))\n\n if result != 0:\n Popen(\n [executable, \"core_nlp.py\"],\n cwd=\"c:\\stanford-corenlp-full-2018-02-27\",\n creationflags=CREATE_NEW_CONSOLE,\n )\n print(\n \"Initializing CoreNLP....\"\n ) # Give CoreNLP some time to get going before accepting input.\n time.sleep(120)\n\n self.assertEqual(\n get_stanford_nps(\n \"The little yellow dog chased the boy in the red car.\"\n ),\n ['The little yellow dog', 'the boy', 'the red car'],\n )\n sock.close()\n\n @ignore_warnings\n def test_spacy(self):\n \"\"\"\n Test for Spacy\n \"\"\"\n self.assertEqual(\n get_spacy_nps(\n \"The little yellow dog chased the boy in the red car.\"\n ),\n ['The little yellow dog', 'the boy', 'the red car'],\n )\n\n @ignore_warnings\n def test_nltk(self):\n \"\"\"\n Test for NLTK\n \"\"\"\n self.assertEqual(\n get_nltk_nps(\n \"The little yellow dog chased the boy in the red car.\"\n ),\n ['The little yellow dog', 'the boy', 'the red car'],\n )\n\n @ignore_warnings\n def test_consolidated(self):\n \"\"\"\n Test for the consolidation function\n \"\"\"\n self.assertEqual(\n get_noun_phrases(\n \"The little yellow dog chased the boy in the red car.\"\n ),\n ['The little yellow dog', 'the boy', 'the red car'],\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test_np_unittest.py","file_name":"test_np_unittest.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"350117309","text":"import json\nimport uuid\nfrom functools import wraps\n\nimport redis\nfrom flask import request, abort\n\nfrom models.user import User\nfrom utils import log\n\ncache = redis.StrictRedis()\n\n\ndef current_user():\n if 'session_id' in request.cookies:\n session_id = request.cookies['session_id']\n key = 'session_id_{}'.format(session_id)\n log(key, 'keykeykeykey')\n\n user_id = cache.get(key)\n log(user_id, 'user_id')\n\n user_id = json.loads(user_id)\n user_id = int(user_id)\n log('current_user key <{}> user_id <{}>'.format(key, user_id))\n u = User.one(id=user_id)\n return u\n else:\n return None\n\n\ndef csrf_required(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n # 第一步找出token\n token = request.args['token']\n # 第二步从cache中找出u_id\n key = 'token_{}'.format(token)\n u_id = cache.get(key)\n u_id = json.loads(u_id)\n u = current_user()\n\n if u_id is not None and u_id == u.id:\n cache.delete(key)\n return f(*args, **kwargs)\n else:\n abort(401)\n\n return wrapper\n\n\ndef new_csrf_token():\n # new_csrf_tokend的思路如下, 通过user_id来设置token\n\n # 第一步设置key\n u = current_user()\n token = str(uuid.uuid4())\n k = 'token_{}'.format(token)\n v = json.dumps(u.id)\n cache.set(k, v)\n return token","sub_path":"routes/base_route.py","file_name":"base_route.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"473959107","text":"import sys\nimport threading\nimport daemon\n\nsys.path.append('..')\nfrom pathConvertor import *\n\n\n# a read thread, read data from remote\nclass WakeSocketExchanger(threading.Thread):\n def __init__(self, client, master, string=None):\n threading.Thread.__init__(self)\n self.client = client\n self.master = master\n self.string = string\n\n def run(self):\n if self.string is None:\n req = self.client.recv(BUFSIZE)\n else:\n req = self.string\n if req:\n try:\n self.master.sendall(req)\n print('发送成功')\n except BrokenPipeError or TimeoutError or ConnectionResetError:\n print('对方不在线或者异常')\n daemon.sendError(self.client)\n self.client.close()\n self.master.close()\n\n\ndef startWake():\n # 无限监听,并accept\n lst = daemon.MasterHolderd(WAKE_ON_LAN_DEV_PORT) # create a listen thread\n lst.start() # then start\n\n # 接收客户端连接请求并与服务端对接\n rcv = daemon.ClientHolderd(WAKE_ON_LAN_DEV_PORT,WAKE_ON_LAN_CLIENT_PORT, WakeSocketExchanger)\n rcv.start()\n\n # 实时Client连接\n d = daemon.reportd(WAKE_ON_LAN_DEV_PORT)\n d.setDaemon(True)\n d.start()\n\n\nclass wake_tr(threading.Thread):\n def __init__(self):\n super().__init__()\n\n def run(self):\n startWake()\n\n\nif __name__ == '__main__':\n startWake()\n","sub_path":"Linux-Release/server_2/wakeOnLanServer_nat_server.py","file_name":"wakeOnLanServer_nat_server.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"434783804","text":"import sys\nimport time\nimport numpy as np\nimport json\nimport dynamical_system\nimport ds_func\n\n\ndef main():\n if len(sys.argv) != 2:\n print(f\"Usage: python {sys.argv[0]} filename\")\n sys.exit(0)\n fd = open(sys.argv[1], 'r')\n json_data = json.load(fd)\n ds = dynamical_system.DynamicalSystem(json_data)\n\n vp = np.append(ds.x0, ds.p[ds.var_param])\n\n for p in range(ds.inc_iter):\n start = time.time()\n for i in range(ds.max_iter):\n ds_func.store_state(vp, ds)\n F = ds_func.func_newton(ds)\n J = ds_func.jac_newton(ds)\n vn = np.linalg.solve(J, -F) + vp\n norm = np.linalg.norm(vn - vp)\n if (norm < ds.eps):\n end = time.time()\n dur = end - start\n msec = dur\n print(\"**************************************************\")\n print(str(p) + \" : converged (iter = \" +\n str(i+1) + \", time = \" + str(msec)[0:8] + \"[sec])\")\n print(\"params : \" + str(ds.p))\n print(\"x0 : \" + str(vn[0:ds.xdim]))\n print(\"(Re(μ), Im(μ)), abs(μ) :\")\n for k in range(ds.xdim):\n print(str(ds.eigvals[k]) + \", \" +\n str(abs(ds.eigvals[k]))[0:6])\n print(\"**************************************************\")\n vp = vn\n ds.p[ds.var_param] = vn[ds.xdim]\n ds.p[ds.inc_param] += ds.delta_inc\n break\n elif (norm > ds.explode):\n print(\"explode\")\n sys.exit()\n vp = vn\n ds.p[ds.var_param] = vn[ds.xdim]\n else:\n print(\"iter over\")\n print(F)\n exit()\n\nif __name__ == '__main__':\n main()\n","sub_path":"bif/bif.py","file_name":"bif.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"627318866","text":"from sklearn.manifold import MDS, TSNE, SpectralEmbedding\n\n\ndef manifold_embedding(pos,method='tsne'):\n n_components = 2\n n_neighbors = 100\n\n if method == 'tsne':\n tsne = TSNE(n_components=n_components, init='pca', random_state=0, perplexity=5.0)\n Y = tsne.fit_transform(pos)\n elif method == 'spectral':\n se = SpectralEmbedding(n_components=n_components,n_neighbors=n_neighbors)\n Y = se.fit_transform(pos)\n elif method == 'mds':\n mds = MDS(n_components, max_iter=100, n_init=1)\n Y = mds.fit_transform(pos)\n return Y\n","sub_path":"deeprank2/tools/visualization/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"4220533","text":"# -*- coding: utf-8 -*-\n\"\"\"\nGeometry Plotting Utilities\n===========================\n\nDefines geometry plotting utilities objects:\n\n- :func:`colour.plotting.quad`\n- :func:`colour.plotting.grid`\n- :func:`colour.plotting.cube`\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'\n__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-science@googlegroups.com'\n__status__ = 'Production'\n\n__all__ = ['quad', 'grid', 'cube']\n\n\ndef quad(plane='xy', origin=None, width=1, height=1, depth=0):\n \"\"\"\n Returns the vertices of a quad geometric element in counter-clockwise\n order.\n\n Parameters\n ----------\n plane : array_like, optional\n **{'xy', 'xz', 'yz'}**,\n Construction plane of the quad.\n origin: array_like, optional\n Quad origin on the construction plane.\n width: numeric, optional\n Quad width.\n height: numeric, optional\n Quad height.\n depth: numeric, optional\n Quad depth.\n\n Returns\n -------\n ndarray\n Quad vertices.\n\n Examples\n --------\n >>> quad()\n array([[0, 0, 0],\n [1, 0, 0],\n [1, 1, 0],\n [0, 1, 0]])\n \"\"\"\n\n u, v = (0, 0) if origin is None else origin\n\n plane = plane.lower()\n if plane == 'xy':\n vertices = ((u, v, depth), (u + width, v, depth),\n (u + width, v + height, depth), (u, v + height, depth))\n elif plane == 'xz':\n vertices = ((u, depth, v), (u + width, depth, v),\n (u + width, depth, v + height), (u, depth, v + height))\n elif plane == 'yz':\n vertices = ((depth, u, v), (depth, u + width, v),\n (depth, u + width, v + height), (depth, u, v + height))\n else:\n raise ValueError('\"{0}\" is not a supported plane!'.format(plane))\n\n return np.array(vertices)\n\n\ndef grid(plane='xy',\n origin=None,\n width=1,\n height=1,\n depth=0,\n width_segments=1,\n height_segments=1):\n \"\"\"\n Returns the vertices of a grid made of quads.\n\n Parameters\n ----------\n plane : array_like, optional\n **{'xy', 'xz', 'yz'}**,\n Construction plane of the grid.\n origin: array_like, optional\n Grid origin on the construction plane.\n width: numeric, optional\n Grid width.\n height: numeric, optional\n Grid height.\n depth: numeric, optional\n Grid depth.\n width_segments: int, optional\n Grid segments, quad counts along the width.\n height_segments: int, optional\n Grid segments, quad counts along the height.\n\n Returns\n -------\n ndarray\n Grid vertices.\n\n Examples\n --------\n >>> grid(width_segments=2, height_segments=2)\n array([[[ 0. , 0. , 0. ],\n [ 0.5, 0. , 0. ],\n [ 0.5, 0.5, 0. ],\n [ 0. , 0.5, 0. ]],\n \n [[ 0. , 0.5, 0. ],\n [ 0.5, 0.5, 0. ],\n [ 0.5, 1. , 0. ],\n [ 0. , 1. , 0. ]],\n \n [[ 0.5, 0. , 0. ],\n [ 1. , 0. , 0. ],\n [ 1. , 0.5, 0. ],\n [ 0.5, 0.5, 0. ]],\n \n [[ 0.5, 0.5, 0. ],\n [ 1. , 0.5, 0. ],\n [ 1. , 1. , 0. ],\n [ 0.5, 1. , 0. ]]])\n \"\"\"\n\n u, v = (0, 0) if origin is None else origin\n\n w_x, h_y = width / width_segments, height / height_segments\n\n quads = []\n for i in range(width_segments):\n for j in range(height_segments):\n quads.append(\n quad(plane, (i * w_x + u, j * h_y + v), w_x, h_y, depth))\n\n return np.array(quads)\n\n\ndef cube(plane=None,\n origin=None,\n width=1,\n height=1,\n depth=1,\n width_segments=1,\n height_segments=1,\n depth_segments=1):\n \"\"\"\n Returns the vertices of a cube made of grids.\n\n Parameters\n ----------\n plane : array_like, optional\n Any combination of **{'+x', '-x', '+y', '-y', '+z', '-z'}**,\n Included grids in the cube construction.\n origin: array_like, optional\n Cube origin.\n width: numeric, optional\n Cube width.\n height: numeric, optional\n Cube height.\n depth: numeric, optional\n Cube depth.\n width_segments: int, optional\n Cube segments, quad counts along the width.\n height_segments: int, optional\n Cube segments, quad counts along the height.\n depth_segments: int, optional\n Cube segments, quad counts along the depth.\n\n Returns\n -------\n ndarray\n Cube vertices.\n\n Examples\n --------\n >>> cube()\n array([[[ 0., 0., 0.],\n [ 1., 0., 0.],\n [ 1., 1., 0.],\n [ 0., 1., 0.]],\n \n [[ 0., 0., 1.],\n [ 1., 0., 1.],\n [ 1., 1., 1.],\n [ 0., 1., 1.]],\n \n [[ 0., 0., 0.],\n [ 1., 0., 0.],\n [ 1., 0., 1.],\n [ 0., 0., 1.]],\n \n [[ 0., 1., 0.],\n [ 1., 1., 0.],\n [ 1., 1., 1.],\n [ 0., 1., 1.]],\n \n [[ 0., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 1., 1.],\n [ 0., 0., 1.]],\n \n [[ 1., 0., 0.],\n [ 1., 1., 0.],\n [ 1., 1., 1.],\n [ 1., 0., 1.]]])\n \"\"\"\n\n plane = (('+x', '-x', '+y', '-y', '+z', '-z')\n if plane is None else [p.lower() for p in plane])\n u, v, w = (0, 0, 0) if origin is None else origin\n\n w_s, h_s, d_s = width_segments, height_segments, depth_segments\n\n grids = []\n if '-z' in plane:\n grids.extend(grid('xy', (u, w), width, depth, v, w_s, d_s))\n if '+z' in plane:\n grids.extend(grid('xy', (u, w), width, depth, v + height, w_s, d_s))\n\n if '-y' in plane:\n grids.extend(grid('xz', (u, v), width, height, w, w_s, h_s))\n if '+y' in plane:\n grids.extend(grid('xz', (u, v), width, height, w + depth, w_s, h_s))\n\n if '-x' in plane:\n grids.extend(grid('yz', (w, v), depth, height, u, d_s, h_s))\n if '+x' in plane:\n grids.extend(grid('yz', (w, v), depth, height, u + width, d_s, h_s))\n\n return np.array(grids)\n","sub_path":"colour/plotting/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"304707541","text":"import argparse\nfrom diceSimulation import die\n\ndef sum_two_dice(prb_dist=None):\n return die(prb_dist) + die(prb_dist)\n\n\ndef crap_game(prb_dist=None):\n s = sum_two_dice(prb_dist)\n if s in {7,11}:\n return True\n if s in {2, 3, 12}:\n return False\n while True:\n nxt = sum_two_dice()\n if nxt == s:\n return True\n if nxt == 7:\n return False\n\ndef loaded_dice_prob(p):\n prob = [(1-1/6)/4] * 6\n prob[0] = p\n prob[5] = (1/6)-p\n return prob\n\ndef prob_one_face(p):\n prob = float(p)\n if not 0<=prob<=1/6:\n msg = \"probability of a face must be between 0 and 1/6\"\n raise argparse.ArgumentTypeError(msg)\n return prob\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Simulate crap game')\n parser.add_argument('nb_trials', type=int, help='number of times the experiment must be carried out')\n parser.add_argument('--prob_1', type=prob_one_face, help='probability of 1 (between 0 and 1/6)')\n args = parser.parse_args()\n nb_trials = args.nb_trials\n prob_1 = args.prob_1\n # print(loaded_dice_prob(0.1))\n # print(sum(loaded_dice_prob(0.1)))\n if args.prob_1:\n wins=sum([crap_game(loaded_dice_prob(prob_1)) for i in range(nb_trials)])\n else:\n wins=sum([crap_game() for i in range(nb_trials)])\n print(wins, nb_trials)\n","sub_path":"craps.py","file_name":"craps.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"631024838","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\n\nclass Gui(Tk):\n def __init__(self):\n super().__init__()\n\n self.title(\"Tikets\")\n self.configure(bg=\"#eee\", height=500, width=500)\n\n self.header()\n self.line1()\n self.check1()\n #self.check2()\n #self.line2()\n \n\n def header(self):\n self.header1 = Label()\n self.header1.grid(row=0, column=0)\n self.header1.configure(font=\"Arial 25\", text=\"Passport Checker\")\n def line1(self):\n self.q1 = Label()\n self.q1.grid(row = 1, column = 0)\n self.q1.configure(font=\"Arial 15\", text=\"1.Photo matches face?\")\n def check1(self):\n self.yesbox = Checkbutton()\n self.yesbox.grid(row=2, column=1)\n #def line2(self):\n #self.line2 = Label()\n #self.line2.grid(row=2, column=2)\n #self.line2.configure(font = \"Arial 9\", text=\"Yes\")\n","sub_path":"2-guis/3-events/3-checkbutton/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"393792442","text":"from django.conf.urls import patterns, include, url\r\nfrom django.contrib import admin\r\n#from .views import index, index2,inicio,RegistrarNoticia,Buscar\r\nfrom .views import Servicios,Produccion,Postproduccion,Publicidad,Distribucion,Buscar,Ajax\r\n\r\nurlpatterns = [\r\n #url(r'^$' , 'apps.inicio.views.index'),\r\n url(r'^$' , Servicios,name=\"servicios\"),\r\n url(r'^produccion' , Produccion,name=\"servicios\"),\r\n url(r'^postproduccion' , 'apps.servicios.views.Postproduccion'),\r\n url(r'^publicidad' , 'apps.servicios.views.Publicidad'),\r\n url(r'^distribucion' , 'apps.servicios.views.Distribucion',name=\"distribucion\"),\r\n url(r'^buscar' , Buscar.as_view()),\r\n url(r'^ajax' , Ajax.as_view()),\r\n]","sub_path":"apps/servicios/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"95668312","text":"import urllib3\n\nwith open('1.txt',mode='r+',encoding='utf-8') as f:\n file_read = f.read()\n\nheaders={\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'\n}\nurl = \"http://httpbin.org/post\"\npools = urllib3.PoolManager()\nresponse = pools.request(\"POST\",url=url,headers=headers,\n fields={'mytxt':('1.txt',file_read,'text/plain')})\nprint(response.data.decode())","sub_path":"爬虫案例/爬虫4/案例4-上传文件.py","file_name":"案例4-上传文件.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"410294119","text":"import wx\nimport subprocess\nimport os\n\nPATH = '/home/cites/cites-meetings'\nPYTHON = '%s/sandbox/bin/python' % PATH\n\ndef main():\n ex = wx.App()\n dial = wx.MessageDialog(None, 'This will overwrite the local database with the production database. All local changes will be lost.'\n 'Are you sure you want to do this?', 'Warning',\n wx.YES_NO | wx.NO_DEFAULT | wx.ICON_WARNING)\n if dial.ShowModal() == wx.ID_YES:\n dial.Destroy()\n try:\n subprocess.check_call([PYTHON, '%s/extras/backup.py' % PATH,\n '%s/instance/cites-backups.bup' % PATH])\n subprocess.check_call(['sudo', '-u', 'postgres', 'dropdb', 'cites'])\n subprocess.check_call(['sudo', '-u', 'postgres', 'createdb', 'cites'])\n\n subprocess.check_call(['sudo', '-u', 'postgres', 'psql', 'cites', '-c'\n 'GRANT ALL PRIVILEGES ON DATABASE cites TO cites;'])\n subprocess.check_call(['sudo', '-u', 'postgres', 'psql', 'cites', '-c' 'create extension hstore;'])\n\n enviroment = dict(os.environ)\n enviroment['BUP_DIR'] = '%s/instance/cites-backups.bup' % PATH\n subprocess.check_call(['bup join production-psql | pg_restore -d cites -x -O'],\n shell=True, env=enviroment)\n except subprocess.CalledProcessError:\n pass\n else:\n dial.Destroy()\n\nif __name__ == '__main__':\n main()","sub_path":"offline-scripts/update_local_database.py","file_name":"update_local_database.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"133902549","text":"print(\"+--------------MENU-----------------+\")\nprint(\"|Chon C de tao hoa don |\")\nprint(\"|Chon R de xem thong tin hoa don |\")\nprint(\"|Chon T de tinh tong doanh thu |\")\nprint(\"|Chon A de tinh tong hang hoa ban ra|\")\nprint(\"|Chon E de thoat |\")\nprint(\"+-----------------------------------+\")\nwhile True:\n danhsachhoahon=[]\n x=input(\"=> chon chuc nang:\")\n print(\"=> ban da chon chuc nang:\",x)\n if x == 'C':\n tiep=input(\"tiep tuc?y/n?\")\n while tiep == 'y':\n print(\"moi ban tao hoa don\")\n hoadon={}\n banghoadon={}\n stt = input(\"nhap so thu tu:\")\n stt_x=str(stt)\n for i in range(len(stt_x),7):\n stt_x= ' '+stt_x\n tenhanghoa= input(\"nhap ten hang hoa muon mua :\")\n for i in range(len(tenhanghoa),16):\n tenhanghoa = tenhanghoa + ' '\n so=[]\n soluong=input(\"nhap so luong:\")\n soluong_x=str(soluong)\n for i in range(len(soluong_x),8):\n soluong_x = ' '+soluong_x\n so.append(soluong)\n dongia= input(\"nhap gia cua san pham:\")\n dongia_x=str(dongia)\n for i in range(len(dongia_x),13):\n dongia_x=' '+dongia_x\n tien=[]\n thanhtien=int(dongia)*int(soluong)\n thanhtien_x=str(thanhtien)\n for i in range(len(thanhtien_x),16):\n thanhtien_x=' '+thanhtien_x\n tien.append(thanhtien)\n hoadon[\"sohoadon\"]=input(\"nhap so hoa don :\")\n hoadon[\"ngaysuat\"]=input(\"nhap ngay tao hoa don:\")\n hoadon[\"tenkhachhang\"]=input(\"nhap ten khach hang:\")\n tiep=input(\"ban muon tiep tuc ko?y/n?\")\n if x== 'R':\n print(\" HOA DON MUA HANG \")\n print(\"so hoa don:\",hoadon[\"sohoadon\"])\n print(\"ngay xuat:\",hoadon[\"ngaysuat\"])\n print(\"ten khach hang:\",hoadon[\"tenkhachhang\"])\n print(\"_____________________________thong tin hoa don_______________________________\")\n print(\"+----------+------------------+----------+---------------+------------------+\")\n print(\"| STT | hang hoa | so luong | don gia | thanh tien |\")\n print(\"+----------+------------------+----------+---------------+------------------+\")\n print(\"| \"+stt_x+\" | \" +tenhanghoa+ \" | \"+soluong_x+\" | \"+dongia_x+\" | \"+thanhtien_x+\" |\")\n print(\"+----------+------------------+----------+---------------+------------------+\")\n print(\"| \"+stt_x+\" | \" +tenhanghoa+ \" | \"+soluong_x+\" | \"+dongia_x+\" | \"+thanhtien_x+\" |\")\n print(\"+----------+------------------+----------+---------------+------------------+\")\n if x== 'T':\n print(\"tong doanh thu bang\")\n t_sum = 0\n tien=[]\n for num in tien:\n t_sum = t_sum + num\n print(t_sum)\n if x== 'A':\n print(\"so hang hoa ban ra\")\n a_sum = 0\n so=[]\n for j in so:\n a_sum = a_sum + j\n print(a_sum)\n if x== 'E':\n print(\"^_^ bye ^_^\")\n break\n","sub_path":"minhduc_day03.py","file_name":"minhduc_day03.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"102758449","text":"from sys import argv\nfrom sys import stdin\nfrom math import floor\nfrom PIL import Image\nimport PIL\n\nclass HashThings:\n internalStates = [0, 0, 0]\n def add(self, nums):\n self.internalStates = [self.internalStates[i] + nums[i] for i in range(3)]\n for i, state in enumerate(self.internalStates):\n if state > 255:\n self.internalStates[i] = state - ((state // 255) * 255)\n def reset(self):\n self.internalStates = [0, 0, 0]\n def getPix(self):\n return tuple(self.internalStates)\n\nif '-f' in argv:\n outFile = argv[argv.index('-f') + 1]\n BIter = False\nelse:\n outFile = '1.png'\n BIter = True\nif ('-d' in argv) and not ('-f' in argv):\n outDir = argv[argv.index('-d') + 1]\nelse:\n outDir = ''\nif '-i' in argv:\n iterations = int(argv[argv.index('-i') + 1])\nelse:\n iterations = 2\nif '-w' in argv:\n w = int(argv[argv.index('-w') + 1])\nelse:\n w = 100\nif '-h' in argv:\n h = int(argv[argv.index('-h') + 1])\nelse:\n h = 100\nreset = not ('-nre' in argv)\nverbose = ('-v' in argv)\n\nim = Image.new('RGB', (w, h))\nimArray = []\nhashThing = HashThings()\nvbArray = [25, 50, 75, 100]\n\nfor inBytes in range(1, iterations+1):\n if verbose:\n print('Starting iteration ' + str(inBytes))\n for x in range(w):\n for y in range(h):\n if verbose and (floor((((x * w) + y) / ((w * h) - 1)) * 100)) in vbArray:\n print('Picture ' + str(inBytes) + ': Pixel ' + str((x * w) + y) + ' out of ' + str((w * h) - 1))\n vbArray.pop(0)\n if reset:\n hashThing.reset()\n hashThing.add((int.from_bytes(stdin.buffer.read(inBytes), byteorder='little'),\n int.from_bytes(stdin.buffer.read(inBytes), byteorder='little'),\n int.from_bytes(stdin.buffer.read(inBytes), byteorder='little')))\n imArray.append(hashThing.getPix())\n im.putdata(imArray)\n im.save(outDir + outFile)\n imArray = []\n vbArray = [25, 50, 75, 100]\n if BIter:\n outFile = str(int(outFile[:-4])+1) + outFile[-4:]\n","sub_path":"picturegen.py","file_name":"picturegen.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"423441318","text":"from flask import Flask, jsonify\nfrom flask_cors import CORS\n\n\napp = Flask(__name__)\nCORS(app, resources=r'/*')\n\n@app.route('/cmt', methods=['GET'])\ndef get_cmt():\n context = jsonify(\n code=200,\n data=[\n {\"id\": 1, \"name\": '张三', \"title\": '沙发', \"content\": \"我是1楼\"},\n {\"id\": 2, \"name\": '李四', \"title\": '板凳', \"content\": \"我是2楼\"},\n {\"id\": 3, \"name\": '王五', \"title\": '茶几', \"content\": \"我是3楼\"},\n ])\n return context\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"337177966","text":"\n# coding: utf-8\n\n# In[19]:\n\nfrom scipy.misc import comb\n \nwith open('/Users/huxin/Downloads/rosalind_iprb.txt') as input_data:\n hom, het, rec = map(int, input_data.read().split())\ntotal = 4*comb(hom+het+rec, 2)\ndef mendels_first_law(hom, het, rec):\n total = 4*comb(hom+het+rec, 2)\n total_rec = 4*comb(rec, 2) + 2*rec*het + comb(het,2)\n return 1-total_rec/total\n \nwith open('/Users/huxin/Downloads/007_IRPB.txt', 'w') as output_data:\n\toutput_data.write(str(1 - total_rec/total))\ndef main():\n with open('/Users/huxin/Downloads/rosalind_iprb.txt') as input_data:\n k,m,n = map(int, input_data.read().strip().split())\n prob = str(mendels_first_law(k,m,n))\n print (prob)\n with open('/Users/huxin/Downloads/007_IRPB.txt', 'w') as output_data:\n output_data.write(prob)\n\nif __name__ == '__main__':\n main()\n\n\n# In[ ]:\n\n\n\n","sub_path":"IPRB.py","file_name":"IPRB.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"517594451","text":"\nimport smbus \nimport time\nimport RPi.GPIO as GPIO\nimport time\n \nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(7, GPIO.OUT)\npwm = GPIO.PWM(7,320) \n\nbus = smbus.SMBus(1) \ndef setup(Addr):\n global address\n address = Addr\n\ndef read(chn): #channel\n if chn == 0:\n bus.write_byte(address,0x40) #\n if chn == 1:\n bus.write_byte(address,0x41)\n if chn == 2:\n bus.write_byte(address,0x42)\n if chn == 3:\n bus.write_byte(address,0x43)\n bus.read_byte(address) # \n return bus.read_byte(address) #\n\ndef write(val):\n temp = val # \n temp = int(temp) #\n # print temp to see on terminal else comment out\n bus.write_byte_data(address, 0x40, temp) \n #\n\nif __name__ == \"__main__\":\n setup(0x48) \n \n while True:\n# print ('kk AIN0 = ', read(0)) \n \n tmp = read(0)\n tmp = tmp*3\n print ('tmp = ', tmp)\n if(tmp>50):\n GPIO.output(7,GPIO.HIGH)\n if(tmp<50):\n GPIO.output(7,GPIO.LOW)\n write(tmp)\n time.sleep(0.1)\n\n","sub_path":"individual/YangSiyu/week4.py","file_name":"week4.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"269119800","text":"N = int(input())\nans = 0\n\nl, m = [], []\n\nfor _ in range(N):\n s, t = input().split()\n t = int(t)\n l.append(s)\n m.append(t)\n\nX = input()\n\nidx = l.index(X)\nfor i in range(idx+1, N):\n ans += m[i]\n\nprint(ans)\n","sub_path":"problems/Others/dwacon6th_prelims_a.py","file_name":"dwacon6th_prelims_a.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"599206542","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef get_pad_layer(pad_type):\n if(pad_type in ['refl','reflect']):\n PadLayer = nn.ReflectionPad2d\n elif(pad_type in ['repl','replicate']):\n PadLayer = nn.ReplicationPad2d\n elif(pad_type=='zero'):\n PadLayer = nn.ZeroPad2d\n else:\n print('Pad type [%s] not recognized'%pad_type)\n return PadLayer\n\nclass BlurPool(nn.Module):\n def __init__(self, pad_type='reflect', filter_size=2, stride=2, channels=None, pad_off=0):\n super(BlurPool, self).__init__()\n self.filter_size = filter_size\n self.pad_off = pad_off\n self.pad_sizes = [int(1.*(filter_size-1)/2), int(np.ceil(1.*(filter_size-1)/2)), int(1.*(filter_size-1)/2), int(np.ceil(1.*(filter_size-1)/2))]\n self.channels = channels\n self.pad_sizes = [pad_size+pad_off for pad_size in self.pad_sizes]\n self.stride = stride\n self.off = int((self.stride-1)/2.)\n \n if(self.filter_size==1):\n a = np.array([1.,])\n elif(self.filter_size==2):\n a = np.array([1., 1.])\n elif(self.filter_size==3):\n a = np.array([1., 2., 1.])\n elif(self.filter_size==4): \n a = np.array([1., 3., 3., 1.])\n elif(self.filter_size==5): \n a = np.array([1., 4., 6., 4., 1.])\n elif(self.filter_size==6): \n a = np.array([1., 5., 10., 10., 5., 1.])\n elif(self.filter_size==7): \n a = np.array([1., 6., 15., 20., 15., 6., 1.])\n \n filt = torch.Tensor(a[:,None]*a[None,:])\n filt = filt/torch.sum(filt)\n self.register_buffer('filt', filt[None,None,:,:].repeat((self.channels,1,1,1)))\n \n self.pad = get_pad_layer(pad_type)(self.pad_sizes)\n \n def forward(self, inp):\n if(self.filter_size==1):\n if(self.pad_off==0):\n return inp[:,:,::self.stride,::self.stride] \n else:\n return self.pad(inp)[:,:,::self.stride,::self.stride]\n else:\n return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])\n \n\nclass AlexNet(nn.Module):\n\n def __init__(self, num_classes=2):\n super(AlexNet, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.avgpool = nn.AdaptiveAvgPool2d((6, 6))\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Linear(4096, num_classes),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n\nclass AlexNetNMP(nn.Module):\n\n def __init__(self, num_classes=2, filter_size=3):\n super(AlexNetNMP, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n BlurPool(filter_size=filter_size, stride=2, channels=64, pad_off=-1),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n BlurPool(filter_size=filter_size, stride=2, channels=192, pad_off=-1),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n BlurPool(filter_size=filter_size, stride=2, channels=256, pad_off=-1),\n )\n self.avgpool = nn.AdaptiveAvgPool2d((6, 6))\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Linear(4096, num_classes),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), 256 * 6 * 6)\n x = self.classifier(x)\n return x\n\nif __name__ == \"__main__\":\n\n nrEpochs = 5\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n transform = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ],\n std = [ 0.229, 0.224, 0.225 ]),\n ])\n\n data = torchvision.datasets.ImageFolder(root=\"./cats_dogs\", transform=transform)\n trainset, testset = torch.utils.data.random_split(data, [int(len(data) - len(data)/5), int( len(data)/5 )])\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,\n shuffle=True, num_workers=4)\n testloader = torch.utils.data.DataLoader(testset, batch_size=32,\n shuffle=False, num_workers=4)\n\n net = AlexNet()\n net.to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=0.02, momentum=0.9)\n for epoch in range(nrEpochs): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data[0].to(device), data[1].to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 100 == 99: # print every 100 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 100))\n running_loss = 0.0\n print('Finished Training without blurpool')\n\n netNMP = AlexNetNMP()\n netNMP.to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(netNMP.parameters(), lr=0.02, momentum=0.9)\n for epoch in range(nrEpochs): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data[0].to(device), data[1].to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = netNMP(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 100 == 99: # print every 100 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 100))\n running_loss = 0.0\n print('Finished Training with blurpool')\n\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n images, labels = (data[0].to(device), data[1].to(device))\n outputs = net(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the AlexNet on the test images: %d %%' % (\n 100 * correct / total))\n\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n images, labels = (data[0].to(device), data[1].to(device))\n outputs = netNMP(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the AlexNetNMP on the test images: %d %%' % (\n 100 * correct / total))\n\n\n torch.save(net.state_dict(), \"./models/AlexNet\")\n torch.save(netNMP.state_dict(), \"./models/AlexNetNMP\")\n\n # for loading later use:\n # model = TheModelClass(*args, **kwargs)\n # model.load_state_dict(torch.load(PATH))\n # model.eval()","sub_path":"CatsDogsCNN.py","file_name":"CatsDogsCNN.py","file_ext":"py","file_size_in_byte":9075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"462721133","text":"'''\r\nCreated on 25 Oct 2017\r\n\r\n@author: neeraj.mahajan\r\n'''\r\nfrom delivery_db_api.exception import ObjectNotFound\r\n\r\n\r\nclass AbstractModel:\r\n '''\r\n Abstract model class for defining common methods\r\n '''\r\n @classmethod\r\n def find_generic(cls, **kwargs):\r\n '''\r\n This method is used to lookup component type from database, based on provided parameters\r\n '''\r\n model_type = cls.query.filter_by(**kwargs).all()\r\n if not model_type:\r\n raise ObjectNotFound(\r\n {'message': \"No {} found.\".format(cls.__name__).replace('Model', '')})\r\n return model_type\r\n","sub_path":"flask/delivery_db_api/models/abstract_model.py","file_name":"abstract_model.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"339257953","text":"#coding:utf-8\n\ndef trim_dataset():\n f = open(\"../dataset/result\",\"r\")\n raw_data_list = f.read().split(\"#\")\n raw_data_list.pop(0)\n f.close()\n\n f = open(\"../dataset/club_account\",\"r\")\n club_list = f.read().split(\"\\n\")\n club_dict = {}\n f.close()\n\n for i in raw_data_list:\n tmp_list = i.split(\"\\n\")\n first_flag = False\n\n for j in tmp_list:\n if first_flag:\n if j not in club_list and j != \"\":\n club_dict[tmp_list[0]].append(j)\n else:\n first_flag = True\n club_dict.update({tmp_list[0]:[]})\n\n f = open(\"../dataset/user\",\"w\")\n users = []\n user_ranking_dict = {}\n\n for k,v in club_dict.items():\n for i in v:\n if i not in users:\n users.append(i)\n user_ranking_dict.update({i:0})\n f.write(i+\"\\n\")\n \"\"\"\n for user in users:\n for k,v in club_dict.items():\n if user in v:\n user_ranking_dict[user] += 1\n \"\"\"\n","sub_path":"trim_dataset.py","file_name":"trim_dataset.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"595829782","text":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"The SecondQuantizedProperty base class.\"\"\"\n\nfrom abc import abstractmethod\nfrom typing import Any, List, TypeVar, Union\n\nfrom qiskit_nature import QiskitNatureError\nfrom qiskit_nature.drivers import QMolecule as LegacyQMolecule\nfrom qiskit_nature.drivers import WatsonHamiltonian as LegacyWatsonHamiltonian\nfrom qiskit_nature.drivers.second_quantization import QMolecule, WatsonHamiltonian\nfrom qiskit_nature.operators.second_quantization import SecondQuantizedOp\n\nfrom ..grouped_property import GroupedProperty\nfrom ..property import Property\n\nLegacyElectronicStructureDriverResult = Union[QMolecule, LegacyQMolecule]\nLegacyVibrationalStructureDriverResult = Union[WatsonHamiltonian, LegacyWatsonHamiltonian]\nLegacyDriverResult = Union[\n LegacyElectronicStructureDriverResult, LegacyVibrationalStructureDriverResult\n]\n\n\nclass SecondQuantizedProperty(Property):\n \"\"\"The SecondQuantizedProperty base class.\n\n A second-quantization property provides the logic to transform a raw data (as e.g. produced by a\n `qiskit_nature.second_quantization.drivers.BaseDriver`) into a\n `qiskit_nature.operators.second_quantization.SecondQuantizedOp`.\n \"\"\"\n\n @abstractmethod\n def second_q_ops(self) -> List[SecondQuantizedOp]:\n \"\"\"Returns the (list of) second quantized operators associated with this Property.\"\"\"\n\n @classmethod\n @abstractmethod\n def from_legacy_driver_result(cls, result: LegacyDriverResult) -> \"Property\":\n \"\"\"Construct a Property instance from a driver result.\n\n This method should implement the logic which is required to extract the raw data for a\n certain property from the result produced by a driver.\n\n Args:\n result: the driver result from which to extract the raw data.\n\n Returns:\n An instance of this property.\n\n Raises:\n QiskitNatureError: if an invalid driver result type is passed.\n \"\"\"\n\n @classmethod\n def _validate_input_type(cls, result: LegacyDriverResult, valid_type: Any) -> None:\n # The type hint of `valid_type` is not easy to determine because we are passing a typing\n # alias which is a type hint itself. So what is the type hint for a type hint...\n # For the time being this should be fine because the logic around from_legacy_driver_result\n # will need to be slightly adapted *before* the next release anyways when we continue with\n # the integration of the `Property` objects.\n if not isinstance(result, valid_type.__args__):\n raise QiskitNatureError(\n f\"You cannot construct an {cls.__name__} from a {result.__class__.__name__}. \"\n \"Please provide an object of any of these types instead: \"\n f\"{typ.__name__ for typ in valid_type.__args__}\"\n )\n\n\n# pylint: disable=invalid-name\nT = TypeVar(\"T\", bound=SecondQuantizedProperty, covariant=True)\n\n\nclass GroupedSecondQuantizedProperty(GroupedProperty[T], SecondQuantizedProperty):\n \"\"\"A GroupedProperty subtype containing purely second-quantized properties.\"\"\"\n\n @abstractmethod\n def second_q_ops(self) -> List[SecondQuantizedOp]:\n \"\"\"\n Returns the list of second quantized operators given by the properties contained in this\n group.\n \"\"\"\n","sub_path":"qiskit_nature/properties/second_quantization/second_quantized_property.py","file_name":"second_quantized_property.py","file_ext":"py","file_size_in_byte":3767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"202558066","text":"from django.conf import settings\nfrom django.db import transaction\n\nfrom .base import AuthorizationRequiredAPITestMixin, MyGeneRankTestCase\n\nfrom ... import models\n\n\nclass HealthSamplesFeedAPIViewTestCase(AuthorizationRequiredAPITestMixin, MyGeneRankTestCase):\n RESOURCE_URL = '/api/health-samples/'\n\n def setUp(self):\n sample_identifier = models.HealthSampleIdentifier.objects.create(value='HKStepCount')\n models.HealthSampleIdentifier.objects.create(value='HKQuantityTypeIdentifierFlightsClimbed')\n super(HealthSamplesFeedAPIViewTestCase, self).setUp()\n\n # GET\n\n def test_authorized_get(self):\n r = self.client.get(self.RESOURCE_URL, **self.auth_headers)\n self.assertEqual(r.status_code, 404)\n\n def test_authorized_get_with_results(self):\n data = {\n 'identifier': 'HKStepCount',\n 'value': '10',\n 'start_date': '2017-09-13 00:32:33.617238+00:00',\n 'end_date': '2017-09-13 00:34:33.617238+00:00',\n 'units': 'steps'\n }\n r = self.client.post(self.RESOURCE_URL, data, **self.auth_headers)\n self.assertEqual(r.status_code, 201)\n\n r = self.client.get(self.RESOURCE_URL, **self.auth_headers)\n print(r.content)\n self.assertEqual(r.status_code, 200)\n\n # POST\n\n def test_invalid_post(self):\n r = self.client.post(self.RESOURCE_URL, **self.auth_headers)\n self.assertEqual(r.status_code, 400)\n\n def test_authorized_post(self):\n data = {\n 'identifier': 'HKStepCount',\n 'value': '10',\n 'start_date': '2017-09-13 00:32:33.617238+00:00',\n 'end_date': '2017-09-13 00:34:33.617238+00:00',\n 'units': 'steps'\n }\n r = self.client.post(self.RESOURCE_URL, data, **self.auth_headers)\n self.assertEqual(r.status_code, 201)\n\n def test_authorized_post2(self):\n data = {\n \"identifier\": \"HKQuantityTypeIdentifierFlightsClimbed\",\n \"user\": \"https:\\/\\/mygenerank.scripps.edu\\/api\\/users\\/80550d42-d499-4052-9911-8f2fd173db9f\\/\",\n \"value\": 1,\n \"start_date\": \"2017-09-26T18:47:56\",\n \"end_date\": \"2017-09-26T18:47:56\",\n \"units\": \"count\"\n }\n # Intentionally creating integrity exceptions breaks unit tests.\n # https://stackoverflow.com/a/23326971/2085172\n with transaction.atomic():\n r = self.client.post(self.RESOURCE_URL, data, **self.auth_headers)\n r = self.client.post(self.RESOURCE_URL, data, **self.auth_headers)\n self.assertEqual(r.status_code, 400)\n","sub_path":"generank/api/tests/views/health_samples.py","file_name":"health_samples.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"408766884","text":"\nfrom common import *\nimport httplib\n\n\nheaders = {'Content-Type': 'application/json'}\n\nconst.SYS_ERROR_SEARCH_PROJECT = 3\nconst.SYS_ERROR_CREATE_PROJECT = 4\n\n\ndef create_project(key, name, branch):\n params = {\n 'projects': key\n }\n resp = requests.get(url=expand_url(base_url + 'projects/search', params), auth=auth)\n\n if resp.status_code != httplib.OK:\n print_response_error(resp)\n sys.exit(const.SYS_ERROR_SEARCH_PROJECT)\n\n page_index, page_count, page_length = get_paging_info(resp)\n if page_count == 0:\n params = {\n 'name': name,\n 'project': key,\n 'branch': branch\n }\n url = expand_url(base_url + 'projects/create', params)\n resp = requests.post(url=url, headers=headers, auth=auth)\n\n if resp.status_code != httplib.OK:\n print('Url : {}'.format(url))\n print_response_error(resp)\n sys.exit(const.SYS_ERROR_CREATE_PROJECT)\n else:\n print(pretty_print_json(resp.json()))\n\n\ndef create_project_group(project_key, name, description, permissions=[]):\n params = {\n 'q': name\n }\n url = expand_url(base_url + 'user_groups/search', params)\n\n resp = send_request(url=url, headers=headers, auth=auth)\n page_index, page_count, page_length = get_paging_info(resp)\n\n if page_count == 0:\n params = {\n 'name': name,\n 'description': description\n }\n url = expand_url(base_url + 'user_groups/create', params)\n\n send_request(url=url, headers=headers, auth=auth)\n\n params = {\n 'groupName': name,\n 'projectKey': project_key,\n 'permission': ''\n }\n for perm in permissions:\n params['permission'] = perm\n url = expand_url(base_url + 'permissions/add_group', params)\n send_request(url=url, headers=headers, auth=auth)\n\n\ndef show_plugin_list(installed, pending, updates, available):\n results = {\n installed: [],\n updates: [],\n pending: [],\n available: []\n }\n\n if installed:\n print(\"Installed:\")\n print(\" The list of all the plugins installed on the SonarQube instance, sorted by plugin name.\")\n url = expand_url(base_url + 'plugins/installed')\n resp = send_request(url=url, headers=headers, auth=auth)\n\n if resp.status_code == httplib.OK:\n results['installed'] = decode_json(resp.json())['plugins']\n\n if updates:\n print(\"Updates:\")\n print(\" The list of plugins installed on the SonarQube instance for which at least one newer version is available, sorted by plugin name.\")\n url = expand_url(base_url + 'plugins/updates')\n resp = send_request(url=url, headers=headers, auth=auth)\n\n if resp.status_code == httplib.OK:\n results['updates'] = decode_json(resp.json())['plugins']\n\n if pending:\n print(\"Pending:\")\n print(\" The list of plugins which will either be installed or removed at the next startup of the SonarQube instance, sorted by plugin name.\")\n url = expand_url(base_url + 'plugins/pending')\n resp = send_request(url=url, headers=headers, auth=auth)\n\n if resp.status_code == httplib.OK:\n results['pending'] = decode_json(resp.json())['installing']\n\n if available:\n print(\"Available:\")\n print(\" The list of all the plugins available for installation on the SonarQube instance, sorted by plugin name.\")\n url = expand_url(base_url + 'plugins/available')\n resp = send_request(url=url, headers=headers, auth=auth)\n\n if resp.status_code == httplib.OK:\n results['available'] = decode_json(resp.json())['plugins']\n\n return results\n","sub_path":"sonarcube.py","file_name":"sonarcube.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"113778762","text":"\"\"\"\nHackerRank - Problem Name\n\nhttps://www.hackerrank.com/problem_URL\n\nProblem description\n\"\"\"\n\nclass SinglyLinkedListNode(object):\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def insert_node(self, data):\n if self.next is None:\n self.next = SinglyLinkedListNode(data)\n else:\n self.next.insert_node(data)\n \nclass SinglyLinkedList(object):\n def __init__(self):\n self.head = None\n\n def insert_node(self, data):\n if self.head is None:\n self.head = SinglyLinkedListNode(data)\n else:\n self.head.insert_node(data)\n\ndef printLinkedList(head):\n node = head\n while node != None:\n print(node.data)\n node = node.next\n\n\n\ndef main():\n \"\"\" main \"\"\"\n llist_count = int(input())\n\n llist = SinglyLinkedList()\n\n for _ in range(llist_count):\n llist_item = int(input())\n llist.insert_node(llist_item)\n\n printLinkedList(llist.head)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"other/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"632981231","text":"from __future__ import print_function, division\nimport numpy as np\nfrom treetime.treetime import TreeAnc as ta\nfrom treetime.treetime import TreeTime as tt\nfrom treetime.gtr import GTR\nfrom treetime import io\nimport datetime\nimport os,sys,copy\nfrom Bio import Phylo, AlignIO\nimport matplotlib.pyplot as plt\nplt.ion()\n\npolytomies = True\n\ndef str2date_time(instr):\n \"\"\"\n Convert input string to datetime object.\n\n Args:\n - instr (str): input string. Accepts one of the formats:\n {MM.DD.YYYY, MM.YYYY, MM/DD/YYYY, MM/YYYY, YYYY}.\n\n Returns:\n - date (datetime.datetime): parsed date object. If the parsing failed,\n None is returned\n \"\"\"\n\n instr = instr.replace('/', '.')\n # import ipdb; ipdb.set_trace()\n for fmt in [\"%m.%d.%Y\", \"%m.%Y\", \"%Y\"]:\n try:\n date = datetime.datetime.strptime(instr, fmt)\n except ValueError:\n date = None\n if date is not None:\n break\n return date\n\ndef date_from_seq_name(name):\n\n date = str2date_time(name.split('|')[2].strip())\n return date.year + date.timetuple().tm_yday / 365.25\n\n\nif __name__=='__main__':\n root_dir = os.path.dirname(os.path.realpath(__file__))\n file_base = '../data/H3N2_NA_allyears_NA.200'\n fasta = os.path.join(root_dir, file_base+'.fasta')\n nwk = os.path.join(root_dir, file_base+'.nwk')\n mdf = os.path.join(root_dir, file_base+'.metadata.csv')\n\n # read tree from file\n gtr = GTR.standard()\n t = io.treetime_from_newick(gtr, nwk)\n # set alignment to the tree\n io.set_seqs_to_leaves(t, AlignIO.read(fasta, 'fasta'))\n io.read_metadata(t, mdf)\n t.reroot_to_best_root(infer_gtr=True)\n t.init_date_constraints()\n t.ml_t()\n # plotting the results\n t._score_branches()\n t.tree.ladderize()\n\n if polytomies:\n #Phylo.draw(t.tree, label_func = lambda x:'', show_confidence=False, branch_labels='')\n t1 = copy.deepcopy(t)\n t1.resolve_polytomies()\n t1.tree.ladderize()\n t.print_lh()\n print (\"Prior branch len: {0}\".format((t.tree.total_branch_length())))\n t1.print_lh()\n print (\"Posterior branch len: {0}\".format((t1.tree.total_branch_length())))\n\n #traveling_wave(t1.tree, Tc=0.005)\n #t1.init_date_constraints(gtr, slope=slope)\n #t1.ml_t(gtr)\n t1.coalescent_model(optimize_Tc=True)\n t1.print_lh()\n print (\"coalescent model branch len: {0}\".format((t1.tree.total_branch_length())))\n\n gtr = GTR.standard()\n t2 = io.treetime_from_newick(gtr, nwk)\n # set alignment to the tree\n io.set_seqs_to_leaves(t2, AlignIO.read(fasta, 'fasta'))\n io.read_metadata(t2, mdf)\n t2.reroot_to_best_root(infer_gtr=True)\n t2.init_date_constraints()\n t2.ml_t()\n t2.tree.ladderize()\n t2.relaxed_clock(slack=.1, coupling=1)\n t2.ml_t()\n\n from matplotlib.cm import jet as cmap\n for n in t2.tree.find_clades():\n n.color = [int(x*255) for x in cmap(max(0, min(0.5*n.gamma, 1.0)))[:3]]\n\n Phylo.draw(t2.tree, label_func = lambda x:'', show_confidence=False, branch_labels='')\n","sub_path":"examples/flu_example.py","file_name":"flu_example.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"312098143","text":"with open('./rawText/james.txt') as james:\n jamesData = james.readline()\nwith open('./rawText/julie.txt') as julie:\n julieData = julie.readline()\nwith open('./rawText/mikey.txt') as mikey:\n mikeyData = mikey.readline()\nwith open('./rawText/sarah.txt') as sarah:\n sarahData = sarah.readline()\n\npeople = [{\n 'old': jamesData,\n 'new': []\n},\n {\n 'old': julieData,\n 'new': []\n},\n {\n 'old': mikeyData,\n 'new': []\n},\n {\n 'old': sarahData,\n 'new': []\n}]\n\nfor person in people:\n data = person['old'].split(',')\n for v in data:\n person['new'].append(v.strip())\n\njamesList = people[0]['new']\njulieList = people[1]['new']\nmikeyList = people[2]['new']\nsarahList = people[3]['new']\n\n# 处理字符串中的\":\"和\"-\"\n\n\ndef sanitize(time_string):\n if ':' in time_string:\n splitter = ':'\n elif '-' in time_string:\n splitter = '-'\n else:\n return (time_string)\n (mins, secs) = time_string.split(splitter)\n return (mins + '.' + secs)\n\n\njamesListCleanedSortedUnique = sorted(set([sanitize(t) for t in jamesList])) # sorted会将集合自动转成列表并排序\njulieListCleanedSortedUnique = sorted(set([sanitize(t) for t in julieList]))\nmikeyListCleanedSortedUnique = sorted(set([sanitize(t) for t in mikeyList]))\nsarahListCleanedSortedUnique = sorted(set([sanitize(t) for t in sarahList]))\n\n\n# 使用集合去重复\n# python提供了集合数据结构,特点是无序、不允许重复\n\nprint(jamesListCleanedSortedUnique[0:3])\nprint(julieListCleanedSortedUnique[0:3]) \nprint(mikeyListCleanedSortedUnique[0:3])\nprint(sarahListCleanedSortedUnique[0:3])\n","sub_path":"head-first/ch05/test-data.py","file_name":"test-data.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"201906539","text":"import cv2\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\n# more info on callbakcs: https://keras.io/callbacks/ model saver is cool too.\nfrom tensorflow.keras.callbacks import TensorBoard\nimport pickle\nimport time\nimport numpy as np\nimport time\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nmnist = tf.keras.datasets.mnist\n(x_train, y_train),(x_test,y_test) = mnist.load_data()\n\n\ndense_layers = [0]\nlayer_sizes = [64]\nconv_layers = [2]\nrow = 28\ncol = 28\nif tf.keras.backend.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, row,col)\n x_test = x_test.reshape(x_test.shape[0], 1, row,col)\n input_size = (1,row,col)\nelse:\n x_train = x_train.reshape(x_train.shape[0], row,col,1)\n x_test = x_test.reshape(x_test.shape[0], row,col,1)\n input_size = (row,col,1)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_test /=255\nx_train/=255\nfliters = [128,64,64,32,32]\nk_sizes = [3,5,4,3,4,5]\nnum_Conv = 4\ny_train = tf.keras.utils.to_categorical(y_train, 10)\ny_test = tf.keras.utils.to_categorical(y_test, 10)\nmodel = tf.keras.models.Sequential()\n # model.add(tf.keras.layers.Flatten())\n # matrix ==> one array as input to NN\nfor i in range(num_Conv-1):\n model.add(Conv2D(fliters[i], (k_sizes[i], k_sizes[i]), padding = \"same\", activation = tf.nn.relu, input_shape=input_size))\n model.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Flatten())\nmodel.add(tf.keras.layers.Dense(10, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10,activation = 'softmax'))\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.categorical_crossentropy,\n metrics=['accuracy'])\n\ncur_time = time.time()\nmodel.fit(x_train, y_train,batch_size = 128, epochs=3)\ntm = time.time()\nprint(\"\\n********Time used for training :\", tm - cur_time)\ncur_time = tm\n\n \nscore = model.evaluate(x_test,y_test,verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n \n\nmodel.save('MultiGPUs.model')","sub_path":"OneGPU.py","file_name":"OneGPU.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"427772344","text":"import subprocess\nimport pickle\nimport datetime\nimport os\nimport socket\nimport threading\nimport time\nimport sys\n\n# Define the global variables that we will use\nmaxRedirect = 4\nscanresults = []\n\nrunningCtr = 0\ntotalCtr = 0\nopenCtr = 0\nclosedCtr = 0\ndataCtr = 0\nzeroCtr = 0\nTimeout = 3\nredirectCtr = 0\nwin_path = \".\\\\openIPs\"\n\n# Clear the terminal so that we have a clear area to display script info\nos.system('cls')\n\n\ndef updateScreen():\n '''\n Displays realtime stats about the currently running scan\n '''\n\n print_pos(1, 1, \"-*- NETSCAN -*-\")\n print_pos(3, 1, \"Total: \"+str(totalCtr))\n print_pos(4, 1, \"Running: \"+str(runningCtr)+\" \")\n print_pos(5, 1, \"Timeout: \"+str(Timeout)+\"s\")\n print_pos(6, 1, \"Open: \"+str(openCtr))\n print_pos(7, 1, \"Closed: \"+str(closedCtr))\n print_pos(8, 1, \"Data avail: \"+str(dataCtr))\n print_pos(9, 1, \"Data none: \"+str(zeroCtr))\n\n\nFNULL = open(os.devnull, 'w')\n\n\nclass myThread (threading.Thread):\n def __init__(self, ip, port, gui_mode):\n threading.Thread.__init__(self)\n self.ip = ip\n self.port = port\n\n # If we're running the script from the GUI we don't need to display anything\n self.gui_mode = gui_mode\n\n def run(self):\n global scanresults\n global runningCtr\n global totalCtr\n global openCtr\n global closedCtr\n global dataCtr\n global zeroCtr\n global maxRedirect\n global Timeout\n global FNULL\n global redirectCtr\n runningCtr += 1\n totalCtr += 1\n if not self.gui_mode:\n updateScreen()\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.settimeout(Timeout)\n self.result = self.sock.connect_ex((self.ip, self.port))\n\n # Set the timeout back to None so the socket goes back into blocking mode\n self.sock.settimeout(None)\n self.sock.close()\n lock = threading.Lock()\n\n # If we can connect to the IP address, try to process it\n if self.result == 0:\n openCtr += 1\n scanresults.append(self.ip+\":\"+str(self.port))\n address = self.ip+\":\"+str(self.port)\n\n tryAgain = True\n\n while tryAgain and redirectCtr < maxRedirect:\n create_ip_folder(address, FNULL)\n process_ip(address, FNULL)\n tryAgain = process_webpage(\n address, redirectCtr, dataCtr, zeroCtr, FNULL)\n \n # If we still need to follow the redirects increment our redirect counter\n if tryAgain:\n redirectCtr += 1\n else:\n lock.acquire()\n closedCtr += 1\n lock.release()\n\n # Our thread is done, so we need to update our script stats\n lock.acquire()\n runningCtr -= 1\n lock.release()\n if not self.gui_mode:\n updateScreen()\n\n\ndef print_pos(y, x, text):\n '''\n Prints the most up to date information about the current scan to the console\n '''\n\n sys.stdout.write(\"\\x1b7\\x1b[%d;%df%s\\x1b8\" % (y, x, text))\n sys.stdout.flush()\n\n\ndef find_between(s, first, last):\n '''\n Finds all the information between the first and last parameters\n\n For example, if htmldata contains: Hello \n\n find_between(htmldata, \"\", \"\")\n >>> Hello\n\n '''\n\n try:\n start = s.index(first) + len(first)\n end = s.index(last, start)\n return s[start:end]\n except ValueError:\n return \"\"\n\n\ndef prepare_env():\n '''\n Set up our environment that the script will run in by creating the directory that will store all of our scanned IP addresses\n\n The openIPs folder will be created in the location that the findi_scan script is run\n '''\n\n create_dir = \"mkdir \" + \"\\\"\" + win_path + \"\\\"\"\n # print(create_dir)\n subprocess.call(create_dir, shell=True)\n # print(make_dir)\n\n\ndef create_ip_folder(address, FNULL):\n '''\n Creates the folder hierarchy the program will use\n\n Each open IP address will have its own folder created with the IP address as the name. If a folder with the same name exists, it will be removed\n\n A subdirectory called content will also be created which will hold the webpage we download called data.html\n '''\n\n # Convert the passed in IP address into a directory name\n address_file = address.replace(\"/\", \"\\\\\")\n address_as_dir_name = address_file.replace(\":\", \".\")\n\n # If the directory we want to create exists, remove it\n remove = \"rmdir /s /q \" + \"\\\"\" + win_path + \"\\\\\" + address_as_dir_name + \"\\\"\"\n subprocess.call(remove, stdout=FNULL, stderr=FNULL, shell=True)\n\n # Create the directory that will store our IP address information\n add_dir = \"mkdir \" + \"\\\"\" + win_path + \"\\\\\" + address_as_dir_name + \"\\\"\"\n subprocess.call(add_dir, stdout=FNULL, stderr=FNULL, shell=True)\n # Create the content subdirectory that will hold our webpage data.html\n subprocess.call([\"mkdir\", win_path + \"\\\\\" + address_as_dir_name +\n \"\\\\content\"], stdout=FNULL, stderr=FNULL, shell=True)\n\n\ndef process_ip(address, FNULL):\n '''\n Use wget to access the IP address we found, and store the contents of what we find in a file called data.html\n '''\n # Convert the passed in IP address into a directory name\n address_file = address.replace(\"/\", \"\\\\\")\n address_as_dir_name = address_file.replace(\":\", \".\")\n\n # Use wget to access our IP address and download the contents of the webpage to a file called data.html\n wget_str = \"wget --max-redirect=5 -T 10 -t 1 -P \" + \"\\\"\" + win_path + \"\\\\\" + address_as_dir_name + \\\n \"\\\\content\\\"\" + \" -O \" + \"\\\"\" + win_path + \"\\\\\" + \\\n address_as_dir_name + \"\\\\content\\\\data.html\\\" \" + address\n\n subprocess.call(wget_str, stdout=FNULL, stderr=FNULL, shell=True)\n\n\ndef process_webpage(address, redirectCtr, dataCtr, zeroCtr, FNULL):\n '''\n Process our downloaded webpage and gather the information we find in a text file called info.txt\n\n info.txt will store the following website information: IP address, date scanned, size of the webpage, webpage title, and comments about the scanned IP\n\n Returns True if we're going to follow a redirect. False otherwise\n '''\n\n # Convert the passed in IP address into a directory name\n address_file = address.replace(\"/\", \"\\\\\")\n address_as_dir_name = address_file.replace(\":\", \".\")\n\n # Create website info\n pageData = {}\n pageData[\"address\"] = address\n pageData[\"dateOfScan\"] = str(datetime.datetime.now())\n\n data_html_str = win_path + \"\\\\\" + address_as_dir_name + \"\\\\content\\\\data.html\"\n\n # Attempt to figure out the size (in bytes) of the webpage\n try:\n pageData[\"size\"] = os.stat(data_html_str).st_size\n except:\n pageData[\"size\"] = -1\n\n # If our webpage isn't empty, it will have a size larger than 0, so we'll increment our counter that keeps track of the number of webpages we've found that have data\n if pageData[\"size\"] > 0:\n dataCtr += 1\n else:\n zeroCtr += 1\n\n pageComment = \"\"\n pageTitle = \"\"\n tryAgain = False\n\n # Attempt to parse the webpage we found to determine what type of device we found along with other information\n try:\n # Convert the webpage to lowercase change backslashes to quotes for easier processing\n htmldata = open(data_html_str).read().lower()\n htmldataSingleQTs = htmldata.replace(\"\\\"\", \"'\")\n\n # Try to figure out what the title of the webpage is\n if pageTitle == \"\":\n pageTitle = find_between(\n htmldata, \"\", \"\")\n if pageTitle == \"\":\n pageTitle = find_between(htmldata, \"\")\n\n # Try to determine what type of device we've scanned and if the device is password protected\n if \"printer\" in htmldata:\n pageComment += \"printer found; \"\n if \"password\" in htmldata:\n pageComment += \"login form found; \"\n if \"router\" in htmldata:\n pageComment += \"router found; \"\n if \"dreambox\" in htmldata:\n pageComment += \"Dreambox receiver found; \"\n\n # Mark if we get redirected or encounter any javascript\n if ('http-equiv=\"refresh\"' in htmldata) or (\"http-equiv='refresh'\" in htmldata):\n pageComment += \"redirect found; \"\n\n if (\"<script type='text/javascript'>\" in htmldata) or ('<script type=\"text/javascript\">' in htmldata):\n pageComment += \"javascript found; \"\n\n redirect = [\"window.location='\", \"window.location ='\", \"window.location= '\", \"window.location = '\",\n \"window.location.href='\", \"window.location.href ='\", \"window.location.href= '\", \"window.location.href = '\"]\n\n # If our IP address redirects us somewhere, try to follow it\n for startredirect in redirect:\n if startredirect in htmldataSingleQTs:\n pageComment += \"location-change found [FOLLOW]; \"\n redirectAddress = find_between(\n htmldataSingleQTs, startredirect, \"'\").strip()\n if redirectAddress[:7] == \"http://\":\n address = redirectAddress\n elif redirectAddress[:1] == \"/\":\n address += redirectAddress\n else:\n address += \"/\" + redirectAddress\n\n tryAgain = True\n\n except:\n pageTitle = \"[parseError]\"\n\n # Store the title and all of our comments\n pageData[\"title\"] = pageTitle\n pageData[\"comment\"] = pageComment\n\n # Create the info.txt file and use pickle to write all of our collected information into the file\n open_file_str = win_path + \"\\\\\" + address_as_dir_name + \"\\\\info.txt\"\n pageDataFile = open(open_file_str, \"wb\")\n pickle.dump(pageData, pageDataFile)\n pageDataFile.close()\n\n return tryAgain\n\n\ndef get_page_size(address):\n '''\n Analyze data.html to determine the size of the webpage at the passed in IP address in bytes\n\n If the data.html is empty, this method will return -1\n '''\n\n # Convert the passed in IP address into a directory name\n address_file = address.replace(\"/\", \"\\\\\")\n address_as_dir_name = address_file.replace(\":\", \".\")\n\n data_html_str = win_path + \"\\\\\" + address_as_dir_name + \"\\\\content\\\\data.html\"\n\n page_size = 0\n\n # Attempt to figure out the size (in bytes) of the webpage\n try:\n page_size = os.stat(data_html_str).st_size\n except:\n page_size = -1\n return page_size\n\n\nthreads = []\n\n\ndef main():\n for i in range(144, 150):\n for j in range(45, 80):\n thread = myThread('24.160.'+str(i)+'.'+str(j), 80, False)\n thread.start()\n threads.append(thread)\n\n while(runningCtr >= 40):\n time.sleep(0.02)\n\n while(runningCtr > 0):\n time.sleep(0.02)\n\n\ndef test_main(ip_address_str):\n '''\n The starting point for our test classes. This allows us to run the script with a predetermined IP address string\n '''\n thread = myThread(ip_address_str, 80, False)\n thread.start()\n threads.append(thread)\n\n while(runningCtr >= 1):\n time.sleep(0.02)\n\n\ndef gui_scan(ip_address_str):\n '''\n The starting point for our GUI. This allows us to run the script with a user supplied IP address\n '''\n thread = myThread(ip_address_str, 80, True)\n thread.start()\n threads.append(thread)\n\n while(runningCtr >= 1):\n time.sleep(0.02)\n\n print(\"Successfully scanned \" + ip_address_str)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Findi/findi_scan.py","file_name":"findi_scan.py","file_ext":"py","file_size_in_byte":11782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"230367767","text":"# -*- coding: utf-8 -*- \n\n# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:\n#Copyright (c) 2005 Ali Afshar aafshar@gmail.com\n\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n\n#The above copyright notice and this permission notice shall be included in\n#all copies or substantial portions of the Software.\n\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\nimport gtk\nimport gobject\nimport subprocess\nimport threading\nimport pty\nimport sys\nimport os\nimport pida.core.service as service\nimport pida.pidagtk.contentview as contentview\nimport pida.core.registry as registry\nimport rlcompleter\nimport pida.utils.pyconsole as pyconsole\n\nclass Holder(object):\n def __init__(self, name):\n self.__name = name\n\n def __repr__(self):\n return self.__name\n\n\nclass PytermContent(contentview.content_view):\n\n ICON_NAME = 'manhole'\n\n SHORT_TITLE = 'manhole'\n\n LONG_TITLE = 'pIDA internal shell'\n\n BORDER = 4\n\n def init(self, localdict):\n console = pyconsole.Console(locals=localdict,\n banner=\"pIDA Shell. Keep breathing.\",\n use_rlcompleter=False)\n sw = gtk.ScrolledWindow()\n self.widget.pack_start(sw)\n sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n sw.add(console)\n self.show_all()\n\nclass Manhole(service.service):\n \"\"\"Debugging Python shell.\"\"\"\n NAME = 'manhole'\n\n multi_view_type = PytermContent\n multi_view_book = 'content'\n\n def cmd_run(self):\n localdict = {'pida':self.boss,\n 'cmd':self.get_commands()}\n content = self.create_multi_view(localdict=localdict)\n self.log.debug('%s ,', content)\n #content.start(localdict)\n\n def act_run_manhole(self, action):\n \"\"\"Run the internal manhole shell.\"\"\"\n self.call('run')\n\n def get_commands(self, group=None):\n L = []\n H = Holder('cmd')\n command_groups = self.boss.services\n if group is not None:\n command_groups = filter(lambda c: c.name == group, command_groups)\n for command_group in command_groups:\n setattr(H, command_group.NAME, Holder(command_group.NAME))\n J = getattr(H, command_group.NAME)\n for command in command_group.commands:\n setattr(J, command.name.replace('-', '_'), command)\n L.append((command_group.NAME, command.name))\n return H\n\n def ex(self, commandline):\n self.boss.command('terminal', 'execute-line', commandline=commandline)\n\n def test_vcs(self):\n self.boss.command('versioncontrol', 'call', command='status',\n directory = '/home/ali/working/pida/pida/trunk')\n\n def test_hidden(self):\n def p(data):\n self.log_debug(\"&&\" + data)\n self.boss.command('versioncontrol', 'get-statuses',\n datacallback=p,\n directory='/home/ali/working/pida/pida/trunk')\n\n def toolbar_action_commands(self):\n self.list_commands()\n\n def get_menu_definition(self):\n return \"\"\"\n <menubar>\n <menu name=\"base_tools\" action=\"base_tools_menu\">\n <menu name=\"base_pida\" action=\"base_pida_menu\">\n <separator />\n <menuitem name=\"manhole+run\" action=\"manhole+run_manhole\" />\n </menu>\n </menu>\n </menubar> \n \"\"\"\n\n\nimport code\nimport termios\nimport tty\nclass Interpreter(code.InteractiveConsole, object):\n\n def __init__(self, fd, localdict={}):\n self.__fd = fd\n code.InteractiveConsole.__init__(self, localdict)\n gobject.io_add_watch(fd, gobject.IO_IN, self.cb_stdin)\n self.__buffer = ''\n self.write_banner()\n self.write_ps()\n self.__completer = Completer(localdict)\n\n def write(self, data):\n os.write(self.__fd, data)\n\n def cb_stdin(self, fd, cond):\n data = os.read(fd, 1024)\n self.__buffer = '%s%s' % (self.__buffer, data)\n if '\\n' in self.__buffer:\n lines = self.__buffer.splitlines()\n command = lines.pop(0)\n self.__buffer = '\\n'.join(lines)\n t = threading.Thread(target=self.push, args=[command])\n t.run()\n return True\n\n def write_ps(self, more=False):\n if more:\n self.write('... ')\n else:\n self.write('>>> ')\n\n def write_banner(self):\n self.write(\"PIDA Python Shell.\\n\"\n \"Be careful, you are inside the PIDA main loop.\\n\")\n\n def __get_completer(self):\n return self.__completer\n completer = property(__get_completer)\n\n def push(self, command):\n tempstdout = sys.stdout\n sys.stdout = Stdout(self)\n more = code.InteractiveConsole.push(self, command)\n sys.stdout = tempstdout\n self.write_ps(more)\n \nclass Stdout(object):\n\n def __init__(self, interpreter):\n self.__interpreter = interpreter\n\n def write(self, data):\n self.__interpreter.write(data)\n\nclass Completer(object):\n \"\"\"\n Taken from rlcompleter, with readline references stripped, and a local dictionary to use.\n \"\"\"\n def __init__(self,locals):\n self.locals = locals\n\n def complete(self, text, state):\n \"\"\"Return the next possible completion for 'text'.\n This is called successively with state == 0, 1, 2, ... until it\n returns None. The completion should begin with 'text'.\n\n \"\"\"\n if state == 0:\n if \".\" in text:\n self.matches = self.attr_matches(text)\n else:\n self.matches = self.global_matches(text)\n try:\n return self.matches[state]\n except IndexError:\n return None\n\n def global_matches(self, text):\n \"\"\"Compute matches when text is a simple name.\n\n Return a list of all keywords, built-in functions and names\n currently defines in __main__ that match.\n\n \"\"\"\n import keyword\n matches = []\n n = len(text)\n for list in [keyword.kwlist, __builtin__.__dict__.keys(), self.locals.keys()]:\n for word in list:\n if word[:n] == text and word != \"__builtins__\":\n matches.append(word)\n return matches\n\n def attr_matches(self, text):\n \"\"\"Compute matches when text contains a dot.\n\n Assuming the text is of the form NAME.NAME....[NAME], and is\n evaluatable in the globals of __main__, it will be evaluated\n and its attributes (as revealed by dir()) are used as possible\n completions. (For class instances, class members are are also\n considered.)\n\n WARNING: this can still invoke arbitrary C code, if an object\n with a __getattr__ hook is evaluated.\n\n \"\"\"\n import re\n m = re.match(r\"(\\w+(\\.\\w+)*)\\.(\\w*)\", text)\n if not m:\n return\n expr, attr = m.group(1, 3)\n object = eval(expr, self.locals, self.locals)\n words = dir(object)\n if hasattr(object,'__class__'):\n words.append('__class__')\n words = words + get_class_members(object.__class__)\n matches = []\n n = len(attr)\n for word in words:\n if word[:n] == attr and not word.startswith('__'):\n matches.append(\"%s.%s\" % (expr, word))\n return matches\n\ndef get_class_members(klass):\n ret = dir(klass)\n if hasattr(klass,'__bases__'):\n for base in klass.__bases__:\n ret = ret + get_class_members(base)\n return ret\n\nService = Manhole\n\n","sub_path":"tags/release-0.3.1/trunk/pida/services/manhole.py","file_name":"manhole.py","file_ext":"py","file_size_in_byte":8139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"292594466","text":"#ex3.1\r\nimport sys\r\nimport string\r\nimport logging\r\n\r\nfrom util import mapper_logfile\r\nlogging.basicConfig(filename=mapper_logfile, format='%(message)s',\r\n level=logging.INFO, filemode='w')\r\n\r\ndef mapper():\r\n \r\n keys = []\r\n\r\n for line in sys.stdin:\r\n data = line.split(',')\r\n if data[0] == \"\":\r\n keys = data\r\n else:\r\n data_point = dict(zip(keys,data))\r\n logging.info(str(data_point['UNIT']) + '\\t' + str(data_point['ENTRIESn_hourly']))\r\n\r\nmapper()\r\nsys.stdin=open('turnstile_data_master_with_weather.csv')\r\nsys.stdout=open('mapper_result.txt','w')\r\n","sub_path":"solutions to ipynb file/ex3.1-mapper.py","file_name":"ex3.1-mapper.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"124973587","text":"# block of organized & reusable code action or give some results\n# Function definition with parameters\ndef add(x,y):\n x+y\n\n#calling function with actual values\nprint(add(5,10))\n#If we print this we will not get any output as function is not returning any value.For that we need to add return keyword\n\ndef add(x,y):\n return x+y\n\nsum=add(5,10)\nprint(sum)\n\n#return is not same as printing\ndef add(x,y):\n print(x+y)\nadd(100,10)\nanswer=add(100,10)\n#It will return none\nprint(type(answer))\n\n#another example\na=print(\"hello\")\nprint(type(a))\n\n##Excercise- Create a function to mutiply 2 numbers & return the answer\n\n#reverse a word\nword=\"pen\"\nprint(word[::-1])\n#define a function- take any word & reverse it\ndef rev(word):\n return word[::-1]\n\nx=rev(\"anu\")\nprint(x)","sub_path":"Python_Funtion/Python_funtions_Part1.py","file_name":"Python_funtions_Part1.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"355544585","text":"import os\nimport random\ni=0\ndef rename(path):\n print(\"?\")\n oldpath=path\n global i\n i+=1\n newpath=\"t_\"+str(i)+\".jpg\"\n print(\"newpath\"+str(i))\n os.rename(oldpath,newpath) # 对文件进行重命名\n '''\n oldpath = path\n pos1 = path.find('_num_')\n pos2 = path.find('_',pos1+5)\n newpath = path[:pos1]+path[pos2:]\n os.rename(path,newpath) # 对文件进行重命名\n '''\n\ndef find(stringin):\n print(stringin[len(stringin)-3:])\n return stringin[len(stringin)-3:]\n\ndef test(path):\n files = os.listdir(path) # 获取当前目录的所有文件及文件夹\n for file in files:\n try:\n file_path = os.path.join(path, file) # 获取绝对路径\n if os.path.isdir(file_path): # 判断是否是文件夹\n test(file_path) # 如果是文件夹,就递归调用自己\n else:\n #extension_name = os.path.splitext(file_path) # 将文件的绝对路径中的后缀名分离出来\n #print(extension_name)\n if find(file_path)== 'jpg':\n rename(file_path)\n except:\n continue # 可能会报错,所以用了try-except,如果要求比较严格,不需要报错,就删除异常处理,自己调试\n\n\ntest(r'Z:\\\\New folder')\nprint(\"success\")\n","sub_path":"thumbs/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"592557064","text":"#user_0 = {\n\t#'username': 'enfermi',\n\t #'first': 'enrico',\n\t #'last': 'fermi',\n#}\n\n#for key, value in user_0.items():\n\t#print(\"\\nKey: \" + key)\n\t#print(\"Value: \" + value)\n\n\nfavorite_languages = {\n\t'jen': 'python',\n\t'sarah':'c',\n\t'edward': 'ruby',\n\t'phil':'python',\n}\n\n#for name, language in favorite_languages.items():\n\t#print(\"\\nName: \"+ name)\n\t#print(\"Language: \"+ language)\n\n#Looping through all the keys in a dictionary\n\n#for name in favorite_languages.keys():\n\t#print(name.title())\n\n#Looping through the keys is actually the default behavior when looping through a dictionary\n\nfriends = ['phil', 'sarah']\n\nfor name in favorite_languages.keys():\n\tprint(name.title())\n\tif name in friends:\n \t\tprint(\" Hi \"+ name.title() +\n \t\t\t\", I see your favorite language is \" + favorite_languages[name].title() + \"!\")\n\n#You can also use the keys method to find out if a particular person was polled. This time, let's find out if Erin took the poll:\n\nfavorite_languages = {\n\t'jen': 'python',\n\t'sarah': 'c',\n\t'edward': 'ruby',\n\t'phil': 'python',\n\t}\n\nif 'erin' not in favorite_languages.keys():\n\tprint(\"\\nErin, please take our poll!\")\n\n\n#Looping Through a Dictionary's Key's in Order\n\nfavorite_languages = {\n\t'jen': 'python',\n\t'sarah': 'c',\n\t'edward': 'ruby',\n\t'phil': 'python',\n}\n\n\nfor name in sorted(favorite_languages.keys()):\n\tprint(\"\\n\" +name.title()+ \", thank you for taking the poll.\")\n\n\nfavorite_languages = {\n\t'jen': 'python',\n\t'sarah': 'c',\n\t'edward': 'ruby',\n\t'phil': 'python',\n}\n\nprint(\"\\n\"+\"The following languages have been mentioned:\")\n\nfor value in sorted(favorite_languages.values()):\n\tprint('\\n'+ value.title())\n\n\n\n#Try It Yourself 6-4\n\n# Glossary 2\n\nglossary = {\n 'string': 'A series of characters.',\n 'comment': 'A note in a program that the Python interpreter ignores.',\n 'list': 'A collection of items in a particular order.',\n 'loop': 'Work through a collection of items, one at a time.',\n 'dictionary': \"A collection of key-value pairs.\",\n 'key': 'The first item in a key-value pair in a dictionary.',\n 'value': 'An item associated with a key in a dictionary.',\n 'conditional test': 'A comparison between two values.',\n 'float': 'A numerical value with a decimal component.',\n 'boolean expression': 'An expression that evaluates to True or False.',\n}\n\n\n#The for loop allows me to define the key and value (word and definition)\nfor word, definition in glossary.items():\n\tprint(\"\\n\"+ word.title() + \": \"+ definition)\n\n\n\nrivers = {\n\t'nile': 'egypt',\n\t'rio grande': 'united states',\n\t'amazon': 'south america',\n}\n\n\nfor river, country in rivers.items():\n\tprint(\"\\nThe \"+ river.title()+ \" flows through \"+ country.title())\n\nprint(\"\\nThe following rivers are included in this data set:\")\nfor river in rivers.keys():\n print(\"- \" + river.title())\n\nprint(\"\\nThe following countries are included in this data set:\")\nfor country in rivers.values():\n print(\"- \" + country.title())\n\n\n#Polling \n\nfavorite_languages = {\n\t'jen': 'python',\n\t'sarah': 'c',\n\t'edward': 'ruby',\n\t'phil': 'python',\n}\n\nprint(\"\\n\")\n\nfor name, language in favorite_languages.items():\n\tprint(name.title() + \"'s favorite language is \"+ language.title()+\".\")\n\n\nprint(\"\\n\")\n\n\ncoders = ['phil', 'josh', 'david', 'becca', 'sarah', 'matt', 'danielle']\nfor coder in coders:\n\tif coder in favorite_languages.keys():\n\t\tprint(\"Thank you for taking the poll, \"+ coder.title()+\"!\\n\")\n\telse:\n\t\tprint(coder.title()+\", what's your favorite programming language?\\n\")\n\n\n\n\n","sub_path":"chapter_6/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"390246325","text":"from yahoo_finance import Share\nimport csv\nimport numpy as np\nimport pandas as pd\n\n\nyahoo=Share('GE')\nYAHOO_t = yahoo.get_historical('2011-01-01', '2016-01-01')\n\nmicrosoft=Share('GS')\nMS_t = microsoft.get_historical('2011-01-01', '2016-01-01')\n\ngoogle=Share('HAL')\nGOOGLE_t=google.get_historical('2011-01-01', '2016-01-01')\n\nfacebook=Share('JPM')\nFACEBOOK_t = facebook.get_historical('2011-01-01', '2016-01-01')\n\napple=Share('K')\nAPPLE_t = apple.get_historical('2011-01-01', '2016-01-01')\n\n\nA1= pd.DataFrame(YAHOO_t)\nA1_close=A1.set_index('Date').Close\n\nA2= pd.DataFrame(MS_t)\nA2_close=A2.set_index('Date').Close\n\nA3= pd.DataFrame(GOOGLE_t)\nA3_close=A3.set_index('Date').Close\n\nA4= pd.DataFrame(FACEBOOK_t)\nA4_close=A4.set_index('Date').Close\n\nA5= pd.DataFrame(APPLE_t)\nA5_close=A5.set_index('Date').Close\n\n\nOut= pd.DataFrame()\nOut['Date']=A1.Date\nOut['General Electric']=A1.Close\nOut['GS']=A2.Close\nOut['Halliburton']=A3.Close\nOut['JP Morgan']=A4.Close\nOut['Kellogg']=A5.Close\n\nOut=Out.sort('Date')\nOut.to_csv('data.csv')\n\n\n\n","sub_path":"Ipython_data/extract_flask.py","file_name":"extract_flask.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"222615940","text":"#-*- coding:utf-8 -*-\n\nimport os,django\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"jwxzs_2.settings\")# project_name 项目名称\ndjango.setup()\n\nfrom users.models import UserProfile\n\n\ndef test():\n me=UserProfile.objects.get(username='201626703079')\n me.image='http://jwc.jxnu.edu.cn/StudentPhoto/201626703079.jpg'\n me.save()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"db_tools/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"648859879","text":"from flask import Flask, request\nfrom flask import render_template\nfrom day11.myheidi01 import getList\n\napp = Flask(__name__)\n\n@app.route('/')\ndef myindex():\n return render_template('index.html')\n\n@app.route('/stock', methods =['get','post'])\ndef mylist():\n s_name = request.form[\"s_name\"]\n list = getList(s_name)\n return render_template('stock.html', list = list, s_name = s_name)\n\nif __name__ == '__main__':\n app.run(host=\"127.0.0.1\", port='80')","sub_path":"HELLOPYTHON/day11/myflask05stock.py","file_name":"myflask05stock.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"259349718","text":"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass DDPGActor(nn.Module):\n def __init__(self, obs_size, act_size):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(obs_size, 400), nn.ReLU(),\n nn.Linear(400, 300), nn.ReLU(),\n nn.Linear(300, act_size), nn.Tanh()\n )\n\n def forward(self, x):\n return self.net(x)\n \n\nclass DDPGCritic(nn.Module):\n def __init__(self, obs_size, act_size):\n super().__init__()\n self.obs_net = nn.Sequential(\n nn.Linear(obs_size, 400),\n nn.ReLU()\n )\n self.out_net = nn.Sequential(\n nn.Linear(400 + act_size, 300), nn.ReLU(),\n nn.Linear(300, 1)\n )\n\n def forward(self, x, a):\n obs = self.obs_net(x)\n return self.out_net(torch.cat([obs, a], dim=1))\n","sub_path":"DDPG/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"263564210","text":"import requests\nfrom json import dumps, loads\nfrom re import match\n\n\nclass SentineloneAPI:\n def __init__(self, url, make_token_header):\n self.url = url\n self.token_header = make_token_header\n\n def search_agents(self, agent_details: str, results_length: int = 0) -> list:\n results = []\n for search in self.__get_searches(agent_details):\n endpoint = f\"{self.url}web/api/v2.0/agents?{search}={agent_details}\"\n output = requests.get(endpoint, headers=self.token_header)\n\n if output.status_code is 200 and output.json()[\"pagination\"][\"totalItems\"] >= 1:\n results.append(loads(dumps(output.json()['data'][0]).replace('null', '\"None\"')))\n if results_length:\n if len(results) >= results_length:\n return results\n\n return results\n\n @staticmethod\n def __get_searches(agent_details: str) -> list:\n if len(agent_details) == 18 and agent_details.isdigit():\n return [\"ids\"]\n if match(r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\", agent_details):\n return [\"networkInterfaceInet__contains\", \"externalIp__contains\"]\n if match(r\"((?:(\\d{1,2}|[a-fA-F]{1,2}){2})(?::|-*)){6}\", agent_details):\n return [\"networkInterfacePhysical__contains\", \"uuid\"]\n else:\n return [\"computerName\"]\n","sub_path":"sentinelone/komand_sentinelone/util/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"204678949","text":"#!/usr/bin/env python\n\nimport rospy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom point_stamped_sample.msg import VR_dist\nclass VR_dist:\n ''' \n Subscribes to the VR dist topic and constantly plots the distribution for the \n ergodic controller\n '''\n \n def __init__(self):\n rospy.init_node('vr_distr', anonymous=True)\n self.distr_sub = rospy.Subscriber('/vr_dist', VR_dist, self.update_dist)\n\n self.nx = 10\n self.ny = 10\n self.dist = np.zeros((self.nx, self.ny))\n self.im = plt.imshow(data) \n\n def update_dist(self, arr):\n self.dist = arr\n self.im.set_data(self.dist)\n plt.draw()\n\n\nif __name__ == '__main__':\n x = VR_dist()\n rospy.spin()\n \n","sub_path":"ROS/point_stamped_sample/scripts/vr_distr_sub.py","file_name":"vr_distr_sub.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"450473236","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n path('mine/',views.ManageCourseListView.as_view(),name='manage_course_list'),\n path('create/',views.CourseCreateView.as_view(),name='course_create'),\n path('<pk>/edit/',views.CourseUpdateView.as_view(),name='course_edit'),\n path('<pk>/delete/',views.CourseDeleteView.as_view(),name='course_delete'),\n path('<pk>/module/',views.CourseModuleUpdateView.as_view(),name='course_module_update'),\n path('module/<int:module_id>/content/<model_name>/create/',views.ContentCreateUpdateView.as_view(),name='module_content_create'),\n path('module/<int:module_id>/content/<model_name>/<id>/',views.ContentCreateUpdateView.as_view(),name='module_content_update'),\n path('content/<int:id>/delete/',views.ContentDeleteView.as_view(),name='module_content_delete'),\n path('module/<int:module_id>/',views.ModuleContentListView.as_view(), name='module_content_list'),\n path('module/order/',views.ModuleOrderView.as_view(),name='module_order'),\n path('content/order/',views.ContentOrderView.as_view(),name='content_order'),\n path('subject/<slug:subject>/',views.CourseListView.as_view(),name='course_list_subject'), # course_list_subject: For displaying all courses for a subject\n path('<slug:slug>/',views.CourseDetailView.as_view(),name='course_detail'), # course_detail: For displaying a single course overview\n]\n\n# module_content_create: To create new text, video, image, or file objects\n# and add them to a module. It includes the module_id and model_name\n# parameters. The first one allows linking the new content object to the given\n# module. The latter specifies the content model to build the form for.\n\n# module_content_update: To update an existing text, video, image, or file\n# object. It includes the module_id and model_name parameters and an id\n# parameter to identify the content that is being updated.","sub_path":"courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"449160237","text":"import unittest\n\nfrom recommend import search, rate_all,best_predictor, find_predictor, make_restaurant, make_review, group_by_centroid, find_closest, find_centroid, k_means\nfrom abstractions import restaurant_price, make_user, make_review, make_restaurant,restaurant_ratings, restaurant_name, restaurant_num_ratings, restaurant_mean_rating\n\nclass Test_test1(unittest.TestCase):\n\n r1 = 0\n r2 = 0\n r3 = 0\n r4 = 0\n r5 = 0\n c1 = 0\n c2 = 0\n restaurants1 = 0\n restaurants2 =0\n\n\n\n def test_restaurant_ratings(self):\n soda_reviews = [make_review('Soda', 4.5),\n make_review('Soda', 4)]\n soda = make_restaurant('Soda', [127.0, 0.1],\n ['Restaurants', 'Breakfast & Brunch'],\n 1, soda_reviews)\n val =restaurant_ratings(soda)\n self.assertEqual(val,[4.5, 4])\n\n def test_restaurant_num_ratings(self):\n woz_reviews = [make_review('Wozniak Lounge', 4),\n make_review('Wozniak Lounge', 3),\n make_review('Wozniak Lounge', 5)]\n woz = make_restaurant('Wozniak Lounge', [127.0, 0.1],\n ['Restaurants', 'Pizza'],\n 1, woz_reviews)\n val = restaurant_num_ratings(woz)\n self.assertEqual(val,3)\n\n def test_restaurant_mean_rating(self):\n woz_reviews = [make_review('Wozniak Lounge', 4),\n make_review('Wozniak Lounge', 3),\n make_review('Wozniak Lounge', 5)]\n woz = make_restaurant('Wozniak Lounge', [127.0, 0.1],\n ['Restaurants', 'Pizza'],\n 1, woz_reviews)\n val = restaurant_mean_rating(woz)\n self.assertEqual(val,4.0)\n\n def test_find_closest_1(self):\n val = find_closest([6, 1], [[1, 5], [3, 3]])\n self.assertEqual(val,[3, 3])\n\n def test_find_closest_2(self):\n val = find_closest([1, 6], [[1, 5], [3, 3]])\n self.assertEqual(val,[1, 5])\n\n def test_find_closest_3(self):\n val = find_closest([0, 0], [[-2, 0], [2, 0]])\n self.assertEqual(val,[-2, 0])\n\n def test_find_closest_4(self):\n val = find_closest([0, 0], [[1000, 1000]])\n self.assertEqual(val,[1000, 1000])\n\n def test_find_centroid(self):\n cluster1 = [\n make_restaurant('A', [-3, -4], [], 3, [make_review('A', 2)]),\n make_restaurant('B', [1, -1], [], 1, [make_review('B', 1)]),\n make_restaurant('C', [2, -4], [], 1, [make_review('C', 5)]),\n ]\n val = find_centroid(cluster1) # should be a pair of decimals\n self.assertEqual(val,[0.0, -3.0])\n\n def test_group_by_centroid(self):\n print(\"test1\")\n print(self.r1)\n groups = group_by_centroid([self.r1, self.r2, self.r3, self.r4, self.r5], [self.c1, self.c2])\n # correct grouping is [[r1, r2], [r3, r4, r5]])\n val = [list (map (lambda r: r ['name'], c)) for c in groups]\n self.assertEqual(val,[['A', 'B'], ['C', 'D', 'E']])\n\n def test_kmeans1(self):\n centroids = k_means(self.restaurants1, 1)\n self.assertEqual(centroids,[[0.0, -3.0]])\n\n def test_kmeans2(self):\n centroids = k_means(self.restaurants2, 1)\n self.assertEqual(centroids,[[1.0, 3.0]])\n\n def test_kmeans3(self):\n centroids = k_means(self.restaurants1 + self.restaurants2, 1)\n centroids.sort(key=lambda tup: tup[0]+tup[1])\n self.assertEqual(centroids,[[0.4, -0.6]])\n\n def test_kmeans4(self):\n centroids = k_means(self.restaurants1 + self.restaurants2, 2)\n centroids.sort(key=lambda tup: tup[0]+tup[1])\n self.assertEqual(centroids,[[0.0, -3.0], [1.0, 3.0]])\n\n def test_kmeans5(self):\n centroids = k_means(self.restaurants1 + self.restaurants2, 3)\n centroids.sort(key=lambda tup: tup[0]+tup[1])\n self.assertEqual(centroids,[[-0.5, -4.0], [1.0, -1.0], [1.0, 3.0]])\n\n def test_kmeans6(self):\n centroids = k_means(self.restaurants1 + self.restaurants2, 4)\n centroids.sort(key=lambda tup: tup[0]+tup[1])\n self.assertEqual(centroids,[[-3.0, -4.0],[1.5, -2.5], [0.0, 3.0], [2.0, 3.0]])\n\n def test_kmeans7(self):\n centroids = k_means(self.restaurants1 + self.restaurants2, 5)\n centroids.sort(key=lambda tup: tup[0]+tup[1])\n self.assertEqual(centroids,[[-3.0, -4.0], [2.0, -4.0], [1.0, -1.0], [0.0, 3.0], [2.0, 3.0]])\n\n def test_find_predictor(self):\n user = make_user('John D.', [\n make_review('A', 1),\n make_review('B', 5),\n make_review('C', 2),\n make_review('D', 2.5),])\n restaurant = make_restaurant('New', [-10, 2], [], 2, [make_review('New', 4),])\n cluster = [make_restaurant('B', [4, 2], [], 1, [make_review('B', 5)]),\n make_restaurant('C', [-2, 6], [], 4, [make_review('C', 2)]),\n make_restaurant('D', [4, 2], [], 3.5, [make_review('D', 2.5),\n make_review('D', 3),]),]\n pred, r_squared = find_predictor(user, cluster, restaurant_mean_rating)\n val1 = round(pred(restaurant), 5)\n print(\"val1 is: \"+str(val1))\n self.assertAlmostEqual(val1, 3.9359,4)\n val2 = round(r_squared, 5)\n print(\"val2 is: \"+str(val2))\n self.assertAlmostEqual(val2, 0.99256,4)\n\n def test_best_predictor(self):\n user = make_user('Cheapskate', [\n make_review('A', 2),\n make_review('B', 5),\n make_review('C', 2),\n make_review('D', 5),\n ])\n\n cluster = [\n make_restaurant('A', [5, 2], [], 4, [\n make_review('A', 5) ]),\n make_restaurant('B', [3, 2], [], 2, [\n make_review('B', 5) ]),\n make_restaurant('C', [-2, 6], [], 4, [\n make_review('C', 4) ]), ]\n\n fns = [restaurant_price, restaurant_mean_rating]\n\n pred = best_predictor(user, cluster, fns)\n print ([round(pred(r), 5) for r in cluster], \"SHOULD =\",[2.0, 5.0, 2.0])\n self.assertEqual([round(pred(r), 5) for r in cluster], [2.0, 5.0, 2.0])\n\n def test_rate_all(self):\n user = make_user('Mr. Mean Rating Minus One', [make_review('A', 3),make_review('B', 4), make_review('C', 1),])\n\n cluster = [make_restaurant('A', [1, 2], [], 4, \n [make_review('A', 4), make_review('A', 4) ]),\n make_restaurant('B', [4, 2], [], 3, [make_review('B', 5)]),\n make_restaurant('C', [-2, 6], [], 4, [make_review('C', 2) ]),\n make_restaurant('D', [4, 4], [], 3.5, [\n make_review('D', 2.5), make_review('D', 3.5), ]),]\n\n restaurants = {restaurant_name(r): r for r in cluster}\n\n ALL_RESTAURANTS = cluster\n\n to_rate = cluster[2:]\n\n fns = [restaurant_price, restaurant_mean_rating]\n\n ratings = rate_all(user, to_rate, fns,ALL_RESTAURANTS)\n print(type(ratings), \"Should be \", \"dict\")\n\n print(len(ratings), \"Should be \", 2)\n\n print(ratings['C'], \"Should be\", 1)\n self.assertEqual(ratings['C'],1)\n print(round(ratings['D'], 5), \"Should be \", 2.0)\n self.assertEqual(ratings['D'],2.0)\n\n def test_search(self):\n val = search('Thai', [self.r1, self.r2, self.r3, self.r4, self.r5])\n print(val,'==', ['A','D'])\n self.assertEqual(val, ['A','D'])\n\n def setUp(self):\n print(\"setUp\")\n\n\n def tearDown(self):\n print(\"tearDown\")\n\n @classmethod\n def setUpClass(cls):\n print(\"setUpClass\")\n cls.r1 = make_restaurant('A', [-10, 2], ['Fast Food','Thai'], 2, [make_review('A', 4),])\n cls.r2 = make_restaurant('B', [-9, 1], ['Fast Food','American'], 3, [make_review('B', 5),make_review('B', 3.5),])\n cls.r3 = make_restaurant('C', [4, 2], ['Fast Food',], 1, [make_review('C', 5) ])\n cls.r4 = make_restaurant('D', [-2, 6], ['Sit Down','Thai'], 4, [make_review('D', 2)])\n cls.r5 = make_restaurant('E', [4, 2], ['Italian','German'], 3.5, [make_review('E', 2.5), make_review('E', 3),])\n cls.c1 = [0, 0]\n cls.c2 = [3, 4]\n cls.restaurants1 = [\n make_restaurant('A', [-3, -4], [], 3, [make_review('A', 2)]),\n make_restaurant('B', [1, -1], [], 1, [make_review('B', 1)]),\n make_restaurant('C', [2, -4], [], 1, [make_review('C', 5)])]\n\n cls.restaurants2 = [\n make_restaurant('D', [2, 3], [], 2, [make_review('D', 2)]),\n make_restaurant('E', [0, 3], [], 3, [make_review('E', 1)])]\n\n @classmethod\n def tearDownClass(cls):\n print(\"tearDownClass\")\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"HW/Tests/test1-hw2.py","file_name":"test1-hw2.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"290884505","text":"\"\"\"Module to check if any sentence in a document is Portuguese.\"\"\"\nimport os\nfrom os.path import abspath\nimport win32com.client as win32\nimport xlrd\nimport pptx\nimport csv\nimport re\nfrom polyglot.detect import Detector\nbase_path = os.path.dirname(abspath('__file__'))\navail_exts = ['docx', 'doc', 'pptx', 'xls', 'xlsx', 'csv', 'txt', 'rtf']\npassed_exts = ['py', 'git', 'spec', 'exe', 'md', 'gitattributes',\n 'gitignore', 'zip']\n\n\ndef extract_text(fname, path=base_path):\n \"\"\"Extract text from given document.\"\"\"\n if fname.split('.')[-1] in ['doc', 'docx', 'rtf']:\n word = win32.Dispatch('Word.Application')\n doc = word.Documents.Open(path+'\\\\'+fname)\n txt = doc.Content.Text\n doc.Close(False)\n elif fname.split('.')[-1] in ['xls', 'xlsx']:\n workbook = xlrd.open_workbook(path+'\\\\'+fname)\n sheets_name = workbook.sheet_names()\n txt = '\\n'\n for names in sheets_name:\n worksheet = workbook.sheet_by_name(names)\n num_rows = worksheet.nrows\n num_cells = worksheet.ncols\n for curr_row in range(num_rows):\n new_output = []\n for index_col in range(num_cells):\n value = worksheet.cell_value(curr_row, index_col)\n if value:\n new_output.append(value)\n if new_output:\n txt += ' '.join([str(i) for i in new_output]) + '\\n'\n elif fname.endswith('.pptx'):\n presentation = pptx.Presentation(path+'\\\\'+fname)\n text_runs = []\n for slide in presentation.slides:\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n for paragraph in shape.text_frame.paragraphs:\n for run in paragraph.runs:\n text_runs.append(run.text)\n txt = '\\n\\n'.join(text_runs)\n elif fname.endswith('.txt'):\n text_doc = open(path+'\\\\'+fname, 'r', encoding='utf8')\n txt = text_doc.read()\n text_doc.close()\n elif fname.endswith('.csv'):\n csv_doc = open(path+'\\\\'+fname, 'r', encoding='utf8')\n csv_reader = csv.reader(csv_doc, delimiter=',')\n txt = '\\n'.join(['\\t'.join(row) for row in csv_reader])\n return txt\n\n\ndef lbl_langcheck(txt):\n \"\"\"Check language of document sentence by sentence.\"\"\"\n tokens = re.split('[?.;!:,\\\\n\\\\r]', txt)\n failed_sents = []\n for i in tokens:\n tx = i.strip()\n detector = Detector(tx, quiet=True)\n lan = detector.language.code\n if lan == 'pt':\n failed_sents.append(tx)\n if len(failed_sents) != 0:\n msg = \"Some sentences may be in Portuguese. Check following sentences.\"\n else:\n msg = \"No suspected Portuguese sentences found.\"\n return msg, failed_sents\n\n\ndef final_report(msg, fname, sents=[]):\n \"\"\"Format for writing to result file.\"\"\"\n msg_head = '*' * 20 + '\\n' + 'Result for {}:'.format(fname) + '\\n'\n result_msg = 'RESULT :: ' + msg + '\\n'\n if len(sents) != 0:\n sents_msg = \"\\n**********\\n\".join(sents)\n else:\n sents_msg = \"\"\n return msg_head + result_msg + sents_msg\n\n\ndef directory_check(path=base_path):\n \"\"\"Run whole module on a given directory.\"\"\"\n msg_list = []\n for i in os.listdir(path):\n if os.path.isdir(i):\n pass\n elif i == 'script_result.txt':\n pass\n elif i.split('.')[-1] in passed_exts:\n pass\n elif i.endswith('.ppt'):\n msg_1 = 'ppt format not supported.\\n'\n msg_2 = 'Chen convert {} to pptx and run script again.'.format(i)\n msg_ppt = msg_1 + msg_2\n msg_list.append(final_report(msg_ppt, i))\n elif i.split('.')[-1] in avail_exts:\n trans_text = extract_text(i)\n msg_trans, sentences = lbl_langcheck(trans_text)\n msg_list.append(final_report(msg_trans, i, sentences))\n else:\n null_msg = '{} is not one of the acceptable formats.'.format(i)\n msg_list.append(final_report(null_msg, i))\n result = open('script_result.txt', 'a', encoding='utf8')\n for j in msg_list:\n result.write(j)\n result.close()\n return\n\n\nif __name__ == '__main__':\n directory_check()\n","sub_path":"langcheck.py","file_name":"langcheck.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"426892823","text":"import keras\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras.layers import Dense, Flatten\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils.vis_utils import plot_model\nfrom src.Models.model_utils import *\nfrom keras.models import load_model\n\n'''\nClass to make MLP models\n'''\n\n\nclass Mlp:\n\n def __init__(self, data):\n self.data = data\n self.model = None\n self.model_name = ''\n self.x = np.zeros((NUMBER_OF_YEARS - LOOK_BACK_TIME_STEPS, LOOK_BACK_TIME_STEPS, NUMBER_OF_COUNTRIES))\n self.y = np.zeros((NUMBER_OF_YEARS - LOOK_BACK_TIME_STEPS, NUMBER_OF_COUNTRIES))\n self.x_main_model = np.zeros((NUMBER_OF_YEARS - LOOK_BACK_TIME_STEPS, LOOK_BACK_TIME_STEPS, NUMBER_OF_COUNTRIES,\n TOTAL_NUMBER_OF_FEATURES))\n self.y_main_model = np.zeros((NUMBER_OF_YEARS - LOOK_BACK_TIME_STEPS, 1))\n self.r2_score = dict()\n self.feature_name = ''\n self.years = list(set(data['Year']))\n\n def make_mlp_model(self, train, main_model):\n \"\"\"\n This method is used to restructure our data required to feed into MLP models. 'train' is the flag to\n distinguish between training and evaluation, 'main_model' is the flag which signifies whether we are\n evaluating for the main model or the feature models\n \"\"\"\n if not main_model:\n for feature_name in self.data.columns:\n if feature_name not in ('Year', 'Country', 'Country_Code', 'Energy_CO2_Emissions',\n 'Industrial_Process_Emissions', 'Land_Use_Emissions', 'Transport_Emissions'):\n self.feature_name = feature_name\n util = ModelUtil(self.data, NUMBER_OF_YEARS, feature_name)\n util.prepare_feature(self.years) # 29*14\n self.x, self.y = util.create_time_series_dataset()\n x_train, x_test, y_train, y_test = \\\n train_test_split(self.x, self.y, shuffle=True, random_state=42, test_size=TEST_SPLIT_SIZE)\n self.x = self.x[:, :, 1:15]\n self.y = self.y[:, 1:15]\n x_train = x_train[:, :, 1:15]\n y_train = y_train[:, 1:15]\n self.build_mlp_features_model()\n print(self.model.summary())\n self.model_name = 'src/Models/model_checkpoints/mlp/model_mlp_' + self.feature_name\n if train:\n self.train_mlp_model(x_train, y_train)\n else:\n self.test_mlp_model(x_test, y_test, False)\n else:\n self.feature_name = 'main_model'\n main_util = ModelUtil(self.data, NUMBER_OF_YEARS, feature_name=None)\n main_util.prepare_data_main_model()\n self.x_main_model, self.y_main_model = main_util.create_time_series_dataset_main_model()\n self.x_main_model = self.x_main_model.reshape(self.x_main_model.shape[0], self.x_main_model.shape[1], -1)\n x_train, x_test, y_train, y_test = train_test_split(self.x_main_model, self.y_main_model, shuffle=True,\n random_state=42,\n test_size=TEST_SPLIT_SIZE)\n y_train = y_train[:, :, 0]\n self.build_mlp_main_model()\n print(self.model.summary())\n self.model_name = 'src/Models/model_checkpoints/mlp/model_mlp_' + self.feature_name\n if train:\n self.train_mlp_model(x_train, y_train)\n else:\n self.test_mlp_model(x_test, y_test, True)\n\n return self.r2_score\n\n def train_mlp_model(self, x_train, y_train):\n \"\"\"\n This method is used to train mlp models given training data\n \"\"\"\n print('-------------Training-----------------: ', self.model_name)\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10, restore_best_weights=True)\n mc = ModelCheckpoint(self.model_name, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n callbacks_list = [es, mc]\n history = self.model.fit(x_train, y_train, epochs=EPOCHS, batch_size=BATCH_SIZE,\n validation_split=VALIDATION_SPLIT_SIZE, callbacks=callbacks_list, verbose=1)\n trained_epoch = len(history.history['r2_keras'])\n self.r2_score[self.feature_name] = ((history.history['r2_keras'][trained_epoch - 1]),\n (history.history['val_r2_keras'])[trained_epoch - 1])\n self.save_model()\n\n def build_mlp_features_model(self):\n \"\"\"\n Builds MLP models for features\n \"\"\"\n self.model = keras.Sequential()\n self.model.add(Flatten(input_shape=(self.x.shape[1], self.x.shape[2])))\n self.model.add(Dense(128, activation=RELU_ACTIVATION, name=\"Hidden_Layer_1\"))\n self.model.add(Dense(64, activation=RELU_ACTIVATION, name=\"Hidden_Layer_2\"))\n self.model.add(Dense(16, activation=RELU_ACTIVATION, name=\"Hidden_Layer_3\"))\n self.model.add(Dense(units=NUMBER_OF_COUNTRIES, activation=LINEAR_ACTIVATION, name=\"output\"))\n self.model.compile(optimizer=ADAM_OPTIMIZER, loss=root_mean_squared_error, metrics=[r2_keras])\n\n def build_mlp_main_model(self):\n \"\"\"\n Builds MLP model for the main CO2 emission data.\n \"\"\"\n self.model = keras.Sequential()\n self.model.add(Flatten(input_shape=(self.x_main_model.shape[1], self.x_main_model.shape[2])))\n self.model.add(Dense(128, activation=RELU_ACTIVATION, name=\"Hidden_Layer_1\"))\n self.model.add(Dense(64, activation=RELU_ACTIVATION, name=\"Hidden_Layer_2\"))\n self.model.add(Dense(16, activation=RELU_ACTIVATION, name=\"Hidden_Layer_3\"))\n self.model.add(Dense(units=NUMBER_OF_COUNTRIES, activation=LINEAR_ACTIVATION, name=\"output\"))\n self.model.compile(loss=root_mean_squared_error, optimizer=ADAM_OPTIMIZER, metrics=[r2_keras])\n # plot_model(self.model, show_shapes=True, show_layer_names=True, dpi=128, to_file=MLP_MAIN_MODEL_STRUCTURE)\n\n def save_model(self):\n \"\"\"\n This method saves a keras model into disk\n \"\"\"\n self.model.save(self.model_name)\n print('Saved model to disk ', self.model_name)\n\n def test_mlp_model(self, x_test, y_test, main_model):\n \"\"\"\n This method is used to evaluate the MLP models. 'main_model' is the flag which signifies whether we are\n evaluating for the main model or the feature models\n \"\"\"\n print('-------Evaluating------------:', self.model_name)\n self.model = load_model(self.model_name, custom_objects={'root_mean_squared_error': root_mean_squared_error,\n 'OPTIMIZER': ADAM_OPTIMIZER, 'r2_keras': r2_keras})\n if not main_model:\n x_test = x_test[:, :, 1:15]\n y_test = y_test[:, 1:15]\n else:\n y_test = y_test[:, :, 0]\n self.r2_score[self.feature_name] = self.model.evaluate(x_test, y_test)\n","sub_path":"src/Models/mlp_model.py","file_name":"mlp_model.py","file_ext":"py","file_size_in_byte":7226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"619022050","text":"@override_settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=True)\ndef test_non_localized_pk(self):\n 'If USE_THOUSAND_SEPARATOR is set, make sure that the ids for\\n the objects selected for deletion are rendered without separators.\\n Refs #14895.\\n '\n s = ExternalSubscriber.objects.create(id=9999)\n action_data = {\n ACTION_CHECKBOX_NAME: [s.pk, self.s2.pk],\n 'action': 'delete_selected',\n 'index': 0,\n }\n response = self.client.post(reverse('admin:admin_views_subscriber_changelist'), action_data)\n self.assertTemplateUsed(response, 'admin/delete_selected_confirmation.html')\n self.assertContains(response, 'value=\"9999\"')\n self.assertContains(response, ('value=\"%s\"' % self.s2.pk))","sub_path":"Data Set/bug-fixing-5/0841a31baf649f33d2cb7983705ae15fdcfea5f2-<test_non_localized_pk>-fix.py","file_name":"0841a31baf649f33d2cb7983705ae15fdcfea5f2-<test_non_localized_pk>-fix.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"166249823","text":"# Package import\nfrom __future__ import print_function, division\nfrom warnings import warn\nfrom nilmtk.disaggregate import Disaggregator\nimport os\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nimport random\nimport sys\nimport torch\nfrom torchsummary import summary\nimport torch.nn as nn\nimport torch.utils.data as tud\nfrom torch.utils.data.dataset import TensorDataset\nfrom torch.utils.tensorboard import SummaryWriter\nimport time\n\n# Fix the random seed to ensure the reproducibility of the experiment\nrandom_seed = 10\nrandom.seed(random_seed)\nnp.random.seed(random_seed)\ntorch.manual_seed(random_seed)\ntorch.cuda.manual_seed_all(random_seed)\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n# Use cuda or not\nUSE_CUDA = torch.cuda.is_available\n\nclass seq2point_Pytorch_MultiChannel(nn.Module):\n def __init__(self, sequence_length, ndim = 2):\n # Refer to \"ZHANG C, ZHONG M, WANG Z, et al. Sequence-to-point learning with neural networks for non-intrusive load monitoring[C].The 32nd AAAI Conference on Artificial Intelligence\"\n super(seq2point_Pytorch_MultiChannel, self).__init__()\n self.seq_length = sequence_length\n\n self.conv = nn.Sequential(\n nn.ConstantPad1d((4, 5), 0),\n nn.Conv1d(ndim, 30, 10, stride=1),\n nn.ReLU(True),\n nn.ConstantPad1d((3, 4), 0),\n nn.Conv1d(30, 30, 8, stride=1),\n nn.ReLU(True),\n nn.ConstantPad1d((2, 3), 0),\n nn.Conv1d(30, 40, 6, stride=1),\n nn.ReLU(True),\n nn.ConstantPad1d((2, 2), 0),\n nn.Conv1d(40, 50, 5, stride=1),\n nn.ReLU(True),\n nn.ConstantPad1d((2, 2), 0),\n nn.Conv1d(50, 50, 5, stride=1),\n nn.ReLU(True)\n )\n\n self.dense = nn.Sequential(\n nn.Linear(50 * sequence_length, 1024), \n nn.ReLU(),\n nn.Linear(1024, 1)\n )\n\n def forward(self, x):\n x = self.conv(x)\n x = self.dense(x.view(-1, 50 * self.seq_length))\n return x.view(-1, 1)\n\n\ndef initialize(layer):\n # Xavier_uniform will be applied to conv1d and dense layer, to be sonsistent with Keras and Tensorflow\n if isinstance(layer,nn.Conv1d) or isinstance(layer, nn.Linear): \n torch.nn.init.xavier_uniform_(layer.weight.data)\n if layer.bias is not None:\n torch.nn.init.constant_(layer.bias.data, val = 0.0)\n\ndef train(appliance_name, model, mains, appliance, epochs, batch_size, pretrain = False,checkpoint_interval = None, train_patience = 3):\n # Model configuration\n if USE_CUDA:\n model = model.cuda()\n if not pretrain:\n model.apply(initialize)\n # summary(model, (1, mains.shape[1]))\n # Split the train and validation set\n train_mains,valid_mains,train_appliance,valid_appliance = train_test_split(mains, appliance, test_size=.2, random_state = random_seed)\n\n # Create optimizer, loss function, and dataloader\n optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)\n loss_fn = torch.nn.MSELoss(reduction = 'mean')\n\n train_dataset = TensorDataset(torch.from_numpy(train_mains).float().permute(0,2,1), torch.from_numpy(train_appliance).float())\n train_loader = tud.DataLoader(train_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)\n\n valid_dataset = TensorDataset(torch.from_numpy(valid_mains).float().permute(0,2,1), torch.from_numpy(valid_appliance).float())\n valid_loader = tud.DataLoader(valid_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)\n\n writer = SummaryWriter(comment='train_visual')\n patience, best_loss = 0, None\n\n for epoch in range(epochs):\n # Earlystopping\n if(patience == train_patience):\n print(\"val_loss did not improve after {} Epochs, thus Earlystopping is calling\".format(train_patience))\n break \n # train the model\n model.train()\n st = time.time() \n for i, (batch_mains, batch_appliance) in enumerate(train_loader):\n if USE_CUDA:\n batch_mains = batch_mains.cuda()\n batch_appliance = batch_appliance.cuda()\n \n batch_pred = model(batch_mains)\n loss = loss_fn(batch_appliance, batch_pred)\n\n model.zero_grad() \n loss.backward()\n optimizer.step()\n ed = time.time()\n\n # Evaluate the model \n model.eval()\n with torch.no_grad():\n cnt, loss_sum = 0, 0\n for i, (batch_mains, batch_appliance) in enumerate(valid_loader):\n if USE_CUDA:\n batch_mains = batch_mains.cuda()\n batch_appliance = batch_appliance.cuda()\n \n batch_pred = model(batch_mains)\n loss = loss_fn(batch_appliance, batch_pred)\n loss_sum += loss\n cnt += 1\n \n final_loss = loss_sum / cnt\n final_loss = loss_sum / cnt\n # Save best only\n if best_loss is None or final_loss < best_loss:\n best_loss = final_loss\n patience = 0\n net_state_dict = model.state_dict()\n path_state_dict = \"./\"+appliance_name+\"_mul_seq2point_best_state_dict.pt\"\n torch.save(net_state_dict, path_state_dict)\n else:\n patience = patience + 1 \n\n print(\"Epoch: {}, Valid_Loss: {}, Time consumption: {}s.\".format(epoch, final_loss, ed - st))\n\n # For the visualization of training process\n for name,param in model.named_parameters():\n writer.add_histogram(name + '_grad', param.grad, epoch)\n writer.add_histogram(name + '_data', param, epoch)\n writer.add_scalars(\"MSELoss\", {\"Valid\":final_loss}, epoch)\n\n # Save checkpoint\n if (checkpoint_interval != None) and ((epoch + 1) % checkpoint_interval == 0):\n checkpoint = {\"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch}\n path_checkpoint = \"./\"+appliance_name+\"_mul_seq2point_{}_epoch.pkl\".format(epoch)\n torch.save(checkpoint, path_checkpoint)\n\ndef test(model, test_mains, batch_size = 512):\n # Model test\n st = time.time()\n model.eval()\n # Create test dataset and dataloader\n batch_size = test_mains.shape[0] if batch_size > test_mains.shape[0] else batch_size\n test_dataset = TensorDataset(torch.from_numpy(test_mains).float().permute(0,2,1))\n test_loader = tud.DataLoader(test_dataset, batch_size = batch_size, shuffle = False, num_workers = 0)\n with torch.no_grad():\n for i, batch_mains in enumerate(test_loader):\n batch_pred = model(batch_mains[0])\n if i == 0:\n res = batch_pred\n else:\n res = torch.cat((res, batch_pred), dim = 0)\n ed = time.time()\n print(\"Inference Time consumption: {}s.\".format(ed - st))\n return res.numpy()\n\nclass MUL_Seq2Point(Disaggregator):\n\n def __init__(self, params):\n self.MODEL_NAME = \"MUL_Seq2Point\"\n self.models = OrderedDict()\n self.sequence_length = params.get('sequence_length',129)\n self.n_epochs = params.get('n_epochs', 10 )\n self.batch_size = params.get('batch_size',512)\n self.appliance_params = params.get('appliance_params',{})\n self.mains_mean = params.get('mains_mean',None)\n self.mains_std = params.get('mains_std',None)\n if self.sequence_length % 2 == 0:\n print (\"Sequence length should be odd!\")\n raise (SequenceLengthError)\n\n def partial_fit(self,train_main,train_appliances,pretrain = False, do_preprocessing=True, **load_kwargs):\n # Seq2Point version\n # If no appliance wise parameters are provided, then copmute them using the first chunk\n if len(self.appliance_params) == 0:\n self.set_appliance_params(train_appliances)\n\n print(\"...............Seq2Point partial_fit running...............\")\n # Preprocess the data and bring it to a valid shape\n\n if do_preprocessing:\n print(train_main)\n train_main, train_appliances = self.call_preprocessing(\n train_main, train_appliances, 'train')\n\n train_main = np.concatenate(train_main, axis=0)\n # train_main = train_main.values.reshape((-1,self.sequence_length,1))\n \n new_train_appliances = []\n for app_name, app_arr in train_appliances:\n app_arr = np.concatenate(app_arr, axis = 0)\n # app_df_values = app_df.values.reshape((-1,1))\n new_train_appliances.append((app_name, app_arr))\n train_appliances = new_train_appliances\n\n for appliance_name, power in train_appliances:\n if appliance_name not in self.models:\n print(\"First model training for \", appliance_name)\n self.models[appliance_name] = seq2point_Pytorch_MultiChannel(self.sequence_length, ndim = train_main.shape[2])\n # Load pretrain dict or not\n if pretrain is True:\n self.models[appliance_name].load_state_dict(torch.load(\"./\"+appliance_name+\"_mul_seq2point_pre_state_dict.pt\"))\n\n model = self.models[appliance_name]\n train(appliance_name, model, train_main, power, self.n_epochs, self.batch_size,pretrain = False,checkpoint_interval = None)\n # Model test will be based on the best model\n self.models[appliance_name].load_state_dict(torch.load(\"./\"+appliance_name+\"_mul_seq2point_best_state_dict.pt\"))\n\n\n def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):\n # Disaggregate (test process)\n if do_preprocessing:\n test_main_list = self.call_preprocessing(test_main_list, submeters_lst = None, method='test')\n\n test_predictions = []\n for test_main in test_main_list:\n # test_main = test_main.values\n # test_main = test_main.reshape((-1, self.sequence_length, 1))\n disggregation_dict = {}\n for appliance in self.models:\n # Move the model to cpu, and then test it\n model = self.models[appliance].to('cpu')\n prediction = test(model, test_main)\n prediction = self.appliance_params[appliance]['mean'] + prediction * self.appliance_params[appliance]['std']\n valid_predictions = prediction.flatten()\n valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)\n df = pd.Series(valid_predictions)\n disggregation_dict[appliance] = df\n results = pd.DataFrame(disggregation_dict, dtype='float32')\n test_predictions.append(results)\n return test_predictions\n\n def call_preprocessing(self, mains_lst, submeters_lst, method):\n # Seq2Point Version\n n = self.sequence_length\n units_to_pad = n // 2\n if method == 'train':\n # Preprocess the main and appliance data, the parameter 'overlapping' will be set 'True'\n mains_list = []\n for mains in mains_lst:\n new_mains = mains.values\n power_all = []\n for i in range(mains.shape[1]):\n power = new_mains[:,i].flatten()\n mains_mean, mains_std = np.mean(power), np.std(power) \n power = np.pad(power, (units_to_pad, units_to_pad),'constant',constant_values=(0, 0))\n power = np.array([power[j:j + n] for j in range(len(power) - n + 1)])\n power = (power - mains_mean) / mains_std\n power_all.append(power.reshape(-1, n, 1))\n power_all = np.concatenate(power_all, axis = 2)\n mains_list.append(power_all)\n\n appliance_list = []\n for app_index, (app_name, app_df_list) in enumerate(submeters_lst):\n if app_name in self.appliance_params:\n app_mean = self.appliance_params[app_name]['mean']\n app_std = self.appliance_params[app_name]['std']\n else:\n print (\"Parameters for \", app_name ,\" were not found!\")\n raise ApplianceNotFoundError()\n\n processed_appliance = []\n\n for app_df in app_df_list:\n new_app_readings = app_df.values.reshape((-1, 1))\n new_app_readings = (new_app_readings - app_mean) / app_std \n processed_appliance.append(new_app_readings)\n appliance_list.append((app_name, processed_appliance))\n return mains_list, appliance_list\n\n else:\n # Preprocess the main data only, the parameter 'overlapping' will be set 'False'\n mains_list = []\n for mains in mains_lst:\n new_mains = mains.values\n power_all = []\n for i in range(mains.shape[1]):\n power = new_mains[:,i].flatten()\n mains_mean, mains_std = np.mean(power), np.std(power) \n power = np.pad(power, (units_to_pad, units_to_pad),'constant',constant_values=(0, 0))\n power = np.array([power[j:j + n] for j in range(len(power) - n + 1)])\n power = (power - mains_mean) / mains_std\n power_all.append(power.reshape(-1, n, 1))\n power_all = np.concatenate(power_all, axis = 2)\n mains_list.append(power_all)\n return mains_list\n\n def set_appliance_params(self, train_appliances):\n # Set appliance mean and std to normalize the label(appliance data)\n for (app_name, df_list) in train_appliances:\n l = np.array(pd.concat(df_list, axis = 0))\n app_mean = np.mean(l)\n app_std = np.std(l)\n self.appliance_params.update({app_name:{'mean':app_mean,'std':app_std}})","sub_path":"nilmtk/disaggregate/seq2point_pytorch_multidim.py","file_name":"seq2point_pytorch_multidim.py","file_ext":"py","file_size_in_byte":14198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"521854462","text":"#!/usr/bin/python \n#https://w3resource.com/PostgreSQL/postgresql-subqueries.php\nimport psycopg2\nfrom config import config\n\ndef sql_creer_table_articles():\n return \"\"\"CREATE TABLE IF NOT EXISTS Articles (\n id SERIAL PRIMARY KEY,\n nom VARCHAR(255) NOT NULL,\n description VARCHAR,\n prix REAL\n )\n \"\"\"\n\ndef sql_creer_table_clients():\n return \"\"\" CREATE TABLE IF NOT EXISTS Clients(\n id SERIAL PRIMARY KEY,\n nom VARCHAR NOT NULL,\n addresse VARCHAR(255) NOT NULL,\n telephone INT NOT NULL,\n email VARCHAR\n )\n \"\"\"\n\ndef sql_creer_table_vendeurs():\n return \"\"\"\n CREATE TABLE IF NOT EXISTS Vendeurs (\n id SERIAL PRIMARY KEY,\n nom VARCHAR NOT NULL,\n addresse VARCHAR(255) NOT NULL,\n telephone INT NOT NULL,\n email VARCHAR\n )\n \"\"\"\n\ndef sql_creer_table_utilisateurs():\n return \"\"\"\n CREATE TABLE IF NOT EXISTS Utilisateurs(\n id SERIAL PRIMARY KEY,\n nom VARCHAR NOT NULL,\n addresse VARCHAR(255),\n email VARCHAR NOT NULL,\n mot_de_passe text NOT NULL,\n telephone INT )\"\"\"\n\ndef creer_tables():\n \"\"\" create tables in the PostgreSQL database\"\"\"\n\n # Articles\n commands = (\n sql_creer_table_articles(),\n sql_creer_table_clients(),\n sql_creer_table_vendeurs(),\n sql_creer_table_utilisateurs())\n \n conn = None\n try:\n # read the connection parameters\n params = config()\n # connect to the PostgreSQL server\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n # create table one by one\n for command in commands:\n cur.execute(command)\n # commit the changes\n conn.commit()\n # close communication with the PostgreSQL database server\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n print(\"TABS CREATION DONE SUCCESFULLY\") \n \ndef supprimer_table(table):\n sql = \" DROP TABLE {}\".format(table)\n \n conn = None\n try:\n # read the connection parameters\n params = config()\n # connect to the PostgreSQL server\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n # create table one by one\n cur.execute(sql)\n # commit the changes\n conn.commit()\n # close communication with the PostgreSQL database server\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n print(\"TABLE DROP DONE SUCCESFULLY\") \n\ndef afficher_table(table):\n sql = \"SELECT * FROM {}\".format(table)\n \n conn = None\n try:\n # read the connection parameters\n params = config()\n # connect to the PostgreSQL server\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n # create table one by one\n cur.execute(sql)\n vendeurs = cur.fetchall()\n for vendeur in vendeurs:\n print(vendeur)\n\n # close communication with the PostgreSQL database server\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\ndef afficher_base_de_donnees():\n pass\n\n \nif __name__ == '__main__':\n # supprimer_table(\"Utilisateurs\")\n # creer_tables()\n afficher_table(\"Articles\")\n # afficher_base_de_donnees()\n #pass\n","sub_path":"tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"444824826","text":"#!/usr/bin/env python2\r\n# -*- coding: utf-8 -*-\r\n\r\n# Implementing and plotting various distance metrics between distributions\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim\r\nfrom torch.autograd import Variable\r\nfrom torch.autograd import grad\r\nimport numpy as np\r\n\r\nfrom samplers import distribution1, distribution3, distribution4\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n# Utils function\r\n\r\n# calculation of the jensen shannon divergence objective function\r\n\r\n\r\ndef jsd_objective(Discrim, x_p, y_q):\r\n jsd_objectiv = torch.log(torch.Tensor([2])) + 0.5 * torch.log(\r\n Discrim(x_p)).mean() + 0.5 * torch.log(1 - Discrim(y_q)).mean()\r\n return jsd_objectiv\r\n\r\n# calculation of the wasserstein distance objective function\r\n\r\n\r\ndef wd_objective(Critic, x_p, y_q):\r\n wd_objectiv = Critic(x_p).mean() - Critic(y_q).mean()\r\n return wd_objectiv\r\n\r\n# inspired by https://github.com/EmilienDupont/wgan-gp/blob/master/training.py\r\n\r\n\r\ndef gradient_penalty(Critic, x_p, y_q, lamda):\r\n alfa = x_p.size()[0]\r\n alfa = torch.rand(alfa, 1, device=x_p.device)\r\n alfa = alfa.expand_as(x_p)\r\n\r\n interpolate_z = Variable(alfa * x_p + (1 - alfa) * y_q, requires_grad=True)\r\n\r\n inputs = interpolate_z\r\n outputs = Critic(interpolate_z)\r\n\r\n gradients = grad(outputs, inputs, torch.ones(Critic(interpolate_z).size()),\r\n create_graph=True, retain_graph=True, only_inputs=True)[0]\r\n\r\n gradient_norm = gradients.norm(2, dim=1)\r\n\r\n GP = lamda * ((gradient_norm - 1) ** 2).mean()\r\n return GP\r\n\r\n\r\nclass MLP(nn.Module):\r\n def __init__(self, input_dim):\r\n super(MLP, self).__init__()\r\n\r\n self.model = nn.Sequential(\r\n nn.Linear(input_dim, 64),\r\n nn.LeakyReLU(),\r\n nn.Linear(64, 128),\r\n nn.LeakyReLU(),\r\n nn.Linear(128, 128),\r\n nn.LeakyReLU(),\r\n nn.Linear(128, 1),\r\n )\r\n\r\n def forward(self, x):\r\n return self.model(x)\r\n\r\n# JSD based on the MLP with sigmoid at the output\r\n\r\n\r\nclass jsd_mlp(nn.Module):\r\n def __init__(self, input_dim):\r\n super(jsd_mlp, self).__init__()\r\n self.model = nn.Sequential(\r\n MLP(input_dim),\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self, x):\r\n return self.model(x)\r\n\r\n########### Question 1.1 ############\r\n\r\n\r\ndef js_divergence(p, q, m_minibatch=1000):\r\n x_p = next(p)\r\n y_q = next(q)\r\n x_p = torch.Tensor(x_p)\r\n y_q = torch.Tensor(y_q)\r\n\r\n Discrim = jsd_mlp(input_dim=x_p.size()[1])\r\n\r\n optimizer_D = torch.optim.Adagrad(Discrim.parameters())\r\n\r\n for mini_batch in range(m_minibatch):\r\n optimizer_D.zero_grad()\r\n jsd_loss = jsd_objective(Discrim, x_p, y_q)\r\n\r\n jsd_loss.backward(torch.FloatTensor([-1]))\r\n optimizer_D.step()\r\n Jsd = jsd_objective(Discrim, x_p, y_q)\r\n return Discrim, Jsd\r\n\r\n########### Question 1.2 ############\r\n\r\n\r\ndef w_distance(p, q, m_minibatch=1000, lamda=10):\r\n x_p = next(p)\r\n y_q = next(q)\r\n x_p = torch.Tensor(x_p)\r\n y_q = torch.Tensor(y_q)\r\n # based on mlp with no activation added\r\n Critic = MLP(input_dim=x_p.size()[1])\r\n\r\n optimizer_T = torch.optim.Adagrad(Critic.parameters())\r\n\r\n for mini_batch in range(m_minibatch):\r\n optimizer_T.zero_grad()\r\n wd = wd_objective(Critic, x_p, y_q)\r\n wd_loss = wd - gradient_penalty(Critic, x_p, y_q, lamda=10)\r\n\r\n wd_loss.backward(torch.FloatTensor([-1]))\r\n optimizer_T.step()\r\n Wd = wd_objective(Critic, x_p, y_q)\r\n penalty = gradient_penalty(Critic, x_p, y_q, lamda)\r\n Wd = Wd - penalty\r\n return Critic, Wd\r\n\r\n\r\n########### Question 1.3 ############\r\n\r\nPhi_values = [-1 + 0.1 * i for i in range(21)]\r\n\r\nestimated_jsd, estimated_wd = [], []\r\n\r\nfor Phi in Phi_values:\r\n\r\n dist_p = distribution1(0, batch_size=512)\r\n\r\n dist_q = distribution1(Phi, batch_size=512)\r\n\r\n Discrim, jsd = js_divergence(dist_p, dist_q, m_minibatch=1000)\r\n estimated_jsd.append(jsd)\r\n\r\n Critic, wd = w_distance(dist_p, dist_q, m_minibatch=1000, lamda=10)\r\n estimated_wd.append(wd)\r\n\r\n # TO DO\r\n print(\r\n f\"Phi: {Phi:.2f} estimated JSD: {jsd.item():.6f} estimated WD: {wd.item():.6f}\")\r\n\r\nplt.figure(figsize=(8, 4))\r\nplt.plot(Phi_values, estimated_jsd)\r\nplt.plot(Phi_values, estimated_wd)\r\nplt.title('JSD and WD in terms of phi')\r\nplt.xlabel('Phi values')\r\nplt.ylabel('estimate')\r\nplt.legend([\"estimated JSD\", \"estimated WD\"])\r\n\r\nplt.savefig('estimated JSD & WD.png')\r\nplt.show()\r\n\r\n\r\n########### Question 1.4 ############\r\n\r\n# plot p0 and p1\r\nplt.figure()\r\n\r\n# empirical\r\nxx = torch.randn(10000)\r\n\r\n\r\ndef f(x):\r\n return torch.tanh(x * 2 + 1) + x * 0.75\r\n\r\n\r\ndef d(x):\r\n return (1 - torch.tanh(x * 2 + 1)**2) * 2 + 0.75\r\n\r\n\r\nplt.hist(f(xx), 100, alpha=0.5, density=1)\r\nplt.hist(xx, 100, alpha=0.5, density=1)\r\nplt.xlim(-5, 5)\r\n# exact\r\nxx = np.linspace(-5, 5, 1000)\r\n\r\n\r\ndef N(x):\r\n return np.exp(-x**2 / 2.) / ((2 * np.pi)**0.5)\r\n\r\n\r\nplt.plot(f(torch.from_numpy(xx)).numpy(), d(\r\n torch.from_numpy(xx)).numpy()**(-1) * N(xx))\r\nplt.plot(xx, N(xx))\r\n\r\nbatch_size = 512\r\nm_minibatch = 100\r\n\r\np_iter = iter(distribution3(batch_size))\r\nfo = p_iter\r\n\r\nq_iter = iter(distribution4(batch_size))\r\nf1 = q_iter\r\n\r\nDiscrim, jsd = js_divergence(f1, fo, m_minibatch)\r\nDiscrim = Discrim(torch.Tensor(xx).unsqueeze(dim=1))\r\nr = Discrim.detach().numpy().reshape(-1)\r\n\r\nplt.figure(figsize=(8, 4))\r\nplt.subplot(1, 2, 1)\r\nplt.plot(xx, r)\r\nplt.title(r'$D(x)$')\r\n# estimate the density of distribution4 (on xx) using the discriminator;\r\nestimate = N(xx) * r / (1 - r)\r\n\r\nplt.subplot(1, 2, 2)\r\nplt.plot(xx, estimate)\r\nplt.plot(f(torch.from_numpy(xx)).numpy(), d(\r\n torch.from_numpy(xx)).numpy()**(-1) * N(xx))\r\nplt.legend(['Estimated', 'True'])\r\nplt.title('Estimated vs True')\r\n","sub_path":"image_tasks/distance_metrics/density_estimation.py","file_name":"density_estimation.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"418595153","text":"from typing import List, Tuple, Union, Dict, Any, Optional\nfrom box_embeddings.common.registrable import Registrable\nimport tensorflow as tf\nfrom box_embeddings.parameterizations.tf_box_tensor import TFBoxTensor\n\n\nclass TFBoxRegularizer(tf.Module, Registrable):\n\n \"\"\"Base box-regularizer class\"\"\"\n\n def __init__(\n self,\n weight: float,\n log_scale: bool = True,\n reduction: str = 'sum',\n **kwargs: Any,\n ) -> None:\n \"\"\"\n Args:\n weight: Weight (hyperparameter) given to this regularization in the overall loss.\n log_scale: Whether the output should be in log scale or not.\n Should be true in almost any practical case where box_dim>5.\n reduction: Specifies the reduction to apply to the output: 'mean': the sum of the output will be divided by\n the number of elements in the output, 'sum': the output will be summed. Default: 'sum'\n kwargs: Unused\n \"\"\"\n super().__init__() # type:ignore\n self.weight = weight\n self.log_scale = log_scale\n self.reduction = reduction\n\n def __call__(self, box_tensor: TFBoxTensor) -> Union[float, tf.Tensor]:\n \"\"\"Calls the _forward and multiplies the weight\n\n Args:\n box_tensor: Input box tensor\n\n Returns:\n scalar regularization loss\n \"\"\"\n\n return self.weight * self._reduce(self._forward(box_tensor))\n\n def _forward(self, box_tensor: TFBoxTensor) -> tf.Tensor:\n raise NotImplementedError\n\n def _reduce(self, reg_unreduced: tf.Tensor) -> tf.Tensor:\n if self.reduction == \"sum\":\n return tf.reduce_sum(reg_unreduced)\n elif self.reduction == \"mean\":\n return tf.reduce_mean(reg_unreduced)\n else:\n raise ValueError\n","sub_path":"box_embeddings/modules/regularization/tf_regularizer.py","file_name":"tf_regularizer.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"141468547","text":"######################################################################\n# Author: Concepta Njolima,Ben Maynard\n# Username: njolimac, Maynardb\n#\n# Assignment: T02: Exploring Turtles in Python\n# Purpose: Draw two letters that represent our team.\n#\n######################################################################\n\nimport turtle #importing the turtle class\nwn = turtle.Screen()\n# set the background color to blue\nwn.bgcolor(\"blue\")\n\n# Assigning variables design, concepta and ben to the turtles\nconcepta = turtle.Turtle()\nben = turtle.Turtle()\ndesign = turtle.Turtle()\nframe = turtle.Turtle()\n\n# Assigning turtle shape to turtle\nconcepta.shape(\"turtle\")\nben.shape(\"turtle\")\n\n#Assigning color to the turtles\nconcepta.color(\"red\")\nben.color(\"black\")\nframe.color(\"red\")\n# Choose a pen size and size\npen = 7\nsize = 1\nconcepta.pensize(pen)\nben.pensize(pen)\nframe.pensize(10)\n#Draw a background design\n\nfor i in range(130):\n design.stamp() # Leave an impression on the canvas\n design.penup()\n design.speed(0)\n size = size + 2 # Increase the size on every iteration by 2\n design.forward(size) # Move tess along\n design.right(56) # ... and turn her\n\n# draw the frame\nframe.penup()\nframe.forward(280)\nframe.left(90)\nframe.forward(280)\nframe.pendown()\nfor k in range(4):\n frame.left(90)\n frame.forward(600)\n# Draw the letters\n\nconcepta.penup()\nconcepta.left(90)\nconcepta.forward(100)\nconcepta.pendown()\nconcepta.left(90)\nconcepta.forward(100)\nconcepta.left(90)\nconcepta.forward(100)\nconcepta.left(90)\nconcepta.forward(100)\nconcepta.penup()\n\nben.penup()\nben.forward(100)\nben.left(180)\nben.forward(80)\nben.pendown()\nben.right(180)\nben.forward(80)\nben.left(90)\nben.forward(75)\nben.left(90)\nben.forward(150)\nben.left(90)\nben.forward(150)\nben.left(90)\nben.forward(150)\nben.left(90)\nben.forward(75)\nben.penup()\n\n\n\n\n\n\n\n\n\n","sub_path":"Ben,Concepta-Turtle.py","file_name":"Ben,Concepta-Turtle.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336049812","text":"import numpy as np\nimport pylab as pl\n\nimport pycbc.psd\nimport pycbc.filter\nimport pycbc.waveform\nfrom pycbc import frame\n\nimport configparser\n\n\ndef get_trigger_time(configfile, framefile, channel_name):\n \"\"\"\n Given a frame file with GW data and parameters used to create the\n frame file specified in an ini file this function computes the\n matched filter output and outputs the time of the trigger as \n detected in that interferometer.\n \"\"\"\n \n configParser = configparser.ConfigParser()\n configParser.read(configfile)\n # Intrinsic parameters\n mass1 = configParser.getfloat('intrinsic', 'mass1')\n mass2 = configParser.getfloat('intrinsic', 'mass2')\n spin1x = configParser.getfloat('intrinsic', 'spin1x')\n spin1y = configParser.getfloat('intrinsic', 'spin1y')\n spin1z = configParser.getfloat('intrinsic', 'spin1z')\n spin2x = configParser.getfloat('intrinsic', 'spin2x')\n spin2y = configParser.getfloat('intrinsic', 'spin2y')\n spin2z = configParser.getfloat('intrinsic', 'spin2z')\n lambda1 = configParser.getfloat('intrinsic', 'lambda1')\n lambda2 = configParser.getfloat('intrinsic', 'lambda2')\n \n # Extrinsic parameter\n inclination = configParser.getfloat('extrinsic', 'inclination')\n distance = configParser.getfloat('extrinsic', 'distance')\n ra = configParser.getfloat('extrinsic', 'ra')\n dec = configParser.getfloat('extrinsic', 'dec')\n polarization = configParser.getfloat('extrinsic', 'polarization')\n coa_phase = configParser.getfloat('extrinsic', 'coa_phase')\n t_coa = configParser.getfloat('extrinsic', 't_coa')\n\n # Other parameters\n f_lower = configParser.getfloat('other', 'f_lower')\n f_ref = configParser.getfloat('other', 'f_ref')\n approximant = configParser.get('other', 'approximant')\n srate = configParser.getint('other', 'srate')\n asdfile_ligo = configParser.get('other', 'asdfile_ligo')\n asdfile_virgo = configParser.get('other', 'asdfile_virgo')\n \n # Reading in the frame file and converting strain data to frequency series\n frame_data = frame.read_frame(framefile, channel_name)\n stilde = frame_data.to_frequencyseries()\n \n # Creating template with exactly same parameter used to make frame\n delta_t = 1/srate\n hp, hc = pycbc.waveform.get_td_waveform(approximant='IMRPhenomPv2_NRTidalv2',\n mass1=mass1, mass2=mass2,\n lambda1=lambda1, lambda2=lambda2,\n spin1x=spin1x, spin1y=spin1y, spin1z=spin1z,\n spin2x=spin2x, spin2y=spin2y, spin2z=spin2z,\n distance=distance, inclination=inclination,\n coa_phase=coa_phase, f_lower=f_lower, f_ref=f_ref,\n polarization=polarization, delta_t=1/srate)\n \n # Converting template to frequency series of same delta_f as the frame\n htilde = hp.to_frequencyseries(delta_f=stilde.delta_f)\n \n # Creating LIGO and Virgo PSD objects from ASD data files\n ligo_psd = pycbc.psd.from_txt(asdfile_ligo, int(4096/stilde.delta_f) + 1,\n stilde.delta_f, low_freq_cutoff=11)\n virgo_psd = pycbc.psd.from_txt(asdfile_virgo, int(4096/stilde.delta_f) + 1,\n stilde.delta_f, low_freq_cutoff=11)\n \n # Resizing both template and data frequency series to that of the PSD\n htilde.resize(len(ligo_psd))\n stilde.resize(len(ligo_psd))\n \n # Compute matched-fliter output for the frame\n snr_L = pycbc.filter.matched_filter(htilde, stilde, psd=ligo_psd, low_frequency_cutoff=f_lower)\n \n rho = np.abs(snr_L.data)\n times = snr_L.sample_times.data\n \n pl.rcParams.update({'font.size': 18})\n pl.figure(figsize=(12,10))\n pl.plot(times, rho, 'r')\n im_file_name = framefile.split(\".\")[0] + \".png\"\n pl.savefig(im_file_name)\n \n trigger_time = times[np.argmax(rho)]\n print(\"Trigger arrived at t = {}\".format(trigger_time))\n print(\"Time of coalescence = {}\".format(t_coa))\n observed_coa_time = trigger_time + hp.duration\n print(\"Observed coalescence time = {}\".format(observed_coa_time))\n print(\"Time difference = {} ms\".format((observed_coa_time - t_coa)*1e3))\n\n return trigger_time\n\n","sub_path":"get_injection_times.py","file_name":"get_injection_times.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34656606","text":"__author__ = 'cabarca'\n\nclass Graph():\n def __init__(self):\n from os import path\n location = path.dirname(path.abspath(__file__))+\"/\"\n try:\n fhandler = open(location+\"ascii_art.txt\")\n except IOError:\n return None\n\n self.__hangmanArray = list()\n graphStr = None\n for line in fhandler:\n if line.startswith(\"#\"):\n if graphStr is not None:\n self.__hangmanArray.append(graphStr)\n graphStr = \"\\n\"\n else:\n graphStr += line\n\n def getHangman(self, guess):\n if guess > len(self.__hangmanArray):\n return None\n return self.__hangmanArray[guess-1]\n\n @property\n def availableGuesses(self):\n return len(self.__hangmanArray)\n","sub_path":"projects/hangman/resources/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"407996451","text":"# Calculate the payments for a fixed term mortgage over N terms at\n# given interest rate. Allow users to select compounding interval.\n\ndef mortgage_calc(principal = 0, N = 0, rate = 0, compound = 12, interactive = False):\n '''User may specify the number of compoundings per year. Default is 12 (monthly).\n The rate should be yearly rate entered in decimal form.\n If interactive is True, the user is prompted for input values.'''\n if interactive == False:\n payment = principal*rate/compound*(1+rate/compound)**N/((1+rate/compound)**N-1)\n else:\n principal = float(input('Enter the principal: '))\n N = float(input('Enter the number of payments: '))\n rate = float(input('Enter the yearly interest rate in decimal form: '))\n compound = float(input('Enter the number of compoundings per year (12 for monthly): '))\n payment = principal*rate/compound*(1+rate/compound)**N/((1+rate/compound)** N-1)\n return payment","sub_path":"Numbers/mortgage_calc.py","file_name":"mortgage_calc.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"515250255","text":"from uuid import uuid4\n\nimport pytest\n\nfrom atat.domain.applications import Applications\nfrom atat.domain.exceptions import NotFoundError\nfrom atat.domain.permission_sets import PermissionSets\nfrom atat.domain.portfolios import (\n PortfolioDeletionApplicationsExistError,\n Portfolios,\n PortfolioStateMachines,\n)\nfrom atat.models import ApplicationRoleStatus, PortfolioRoleStatus, PortfolioStates\nfrom tests.factories import (\n ApplicationFactory,\n ApplicationRoleFactory,\n PortfolioFactory,\n PortfolioRoleFactory,\n UserFactory,\n get_all_portfolio_permission_sets,\n)\nfrom tests.utils import EnvQueryTest\n\n\n@pytest.fixture(scope=\"function\")\ndef portfolio_owner():\n return UserFactory.create()\n\n\n@pytest.fixture(scope=\"function\")\ndef portfolio(portfolio_owner):\n portfolio = PortfolioFactory.create(owner=portfolio_owner)\n return portfolio\n\n\ndef test_can_create_portfolio():\n portfolio = PortfolioFactory.create(name=\"frugal-whale\")\n assert portfolio.name == \"frugal-whale\"\n\n\ndef test_get_nonexistent_portfolio_raises():\n with pytest.raises(NotFoundError):\n Portfolios.get(UserFactory.build(), uuid4())\n\n\ndef test_creating_portfolio_adds_owner(portfolio, portfolio_owner):\n assert portfolio.roles[0].user == portfolio_owner\n\n\ndef test_portfolio_has_timestamps(portfolio):\n assert portfolio.time_created == portfolio.time_updated\n\n\ndef test_update_portfolio_role_role(portfolio, portfolio_owner):\n PortfolioRoleFactory._meta.sqlalchemy_session_persistence = \"flush\"\n member = PortfolioRoleFactory.create(portfolio=portfolio)\n permission_sets = [PermissionSets.EDIT_PORTFOLIO_FUNDING]\n\n updated_member = Portfolios.update_member(member, permission_sets=permission_sets)\n assert updated_member.portfolio == portfolio\n\n\ndef test_scoped_portfolio_for_admin_missing_view_apps_perms(portfolio_owner, portfolio):\n Applications.create(\n portfolio.owner,\n portfolio,\n \"My Application 2\",\n \"My application 2\",\n [\"dev\", \"staging\", \"prod\"],\n )\n restricted_admin = UserFactory.create()\n PortfolioRoleFactory.create(\n portfolio=portfolio,\n user=restricted_admin,\n permission_sets=[PermissionSets.get(PermissionSets.VIEW_PORTFOLIO)],\n )\n scoped_portfolio = Portfolios.get(restricted_admin, portfolio.id)\n assert scoped_portfolio.id == portfolio.id\n assert len(portfolio.applications) == 1\n assert len(scoped_portfolio.applications) == 0\n\n\ndef test_scoped_portfolio_returns_all_applications_for_portfolio_admin(\n portfolio, portfolio_owner\n):\n for i in range(5):\n Applications.create(\n portfolio.owner,\n portfolio,\n f\"My Application {i}\",\n \"My application\",\n [\"dev\", \"staging\", \"prod\"],\n )\n\n admin = UserFactory.create()\n perm_sets = get_all_portfolio_permission_sets()\n PortfolioRoleFactory.create(\n user=admin, portfolio=portfolio, permission_sets=perm_sets\n )\n scoped_portfolio = Portfolios.get(admin, portfolio.id)\n\n assert len(scoped_portfolio.applications) == 5\n assert len(scoped_portfolio.applications[0].environments) == 3\n\n\ndef test_scoped_portfolio_returns_all_applications_for_portfolio_owner(\n portfolio, portfolio_owner\n):\n for i in range(5):\n Applications.create(\n portfolio.owner,\n portfolio,\n f\"My Application {i}\",\n \"My application\",\n [\"dev\", \"staging\", \"prod\"],\n )\n\n scoped_portfolio = Portfolios.get(portfolio_owner, portfolio.id)\n\n assert len(scoped_portfolio.applications) == 5\n assert len(scoped_portfolio.applications[0].environments) == 3\n\n\ndef test_for_user_returns_portfolios_for_applications_user_invited_to():\n bob = UserFactory.create()\n portfolio = PortfolioFactory.create()\n application = ApplicationFactory.create(portfolio=portfolio)\n ApplicationRoleFactory.create(\n application=application, user=bob, status=ApplicationRoleStatus.ACTIVE\n )\n\n assert portfolio in Portfolios.for_user(user=bob)\n\n\ndef test_for_user_returns_active_portfolios_for_user(portfolio, portfolio_owner):\n bob = UserFactory.create()\n PortfolioRoleFactory.create(\n user=bob, portfolio=portfolio, status=PortfolioRoleStatus.ACTIVE\n )\n PortfolioFactory.create()\n\n bobs_portfolios = Portfolios.for_user(bob)\n\n assert len(bobs_portfolios) == 1\n\n\ndef test_for_user_does_not_return_inactive_portfolios(portfolio, portfolio_owner):\n bob = UserFactory.create()\n Portfolios.add_member(portfolio, bob)\n PortfolioFactory.create()\n bobs_portfolios = Portfolios.for_user(bob)\n\n assert len(bobs_portfolios) == 0\n\n\ndef test_for_user_returns_all_portfolios_for_ccpo(portfolio, portfolio_owner):\n sam = UserFactory.create_ccpo()\n PortfolioFactory.create()\n\n sams_portfolios = Portfolios.for_user(sam)\n assert len(sams_portfolios) == 2\n\n\ndef test_can_create_portfolios_with_matching_names():\n portfolio_name = \"Great Portfolio\"\n PortfolioFactory.create(name=portfolio_name)\n PortfolioFactory.create(name=portfolio_name)\n\n\ndef test_disabled_members_dont_show_up(session):\n portfolio = PortfolioFactory.create()\n PortfolioRoleFactory.create(portfolio=portfolio, status=PortfolioRoleStatus.ACTIVE)\n PortfolioRoleFactory.create(\n portfolio=portfolio, status=PortfolioRoleStatus.DISABLED\n )\n\n # should only return portfolio owner and ACTIVE member\n assert len(portfolio.members) == 2\n\n\ndef test_does_not_count_disabled_members(session):\n portfolio = PortfolioFactory.create()\n PortfolioRoleFactory.create(portfolio=portfolio, status=PortfolioRoleStatus.ACTIVE)\n PortfolioRoleFactory.create(portfolio=portfolio)\n PortfolioRoleFactory.create(\n portfolio=portfolio, status=PortfolioRoleStatus.DISABLED\n )\n\n assert portfolio.user_count == 3\n\n\ndef test_invite():\n portfolio = PortfolioFactory.create()\n inviter = UserFactory.create()\n member_data = UserFactory.dictionary()\n\n invitation = Portfolios.invite(portfolio, inviter, {\"user_data\": member_data})\n\n assert invitation.role\n assert invitation.role.portfolio == portfolio\n assert invitation.role.user is None\n assert invitation.dod_id == member_data[\"dod_id\"]\n\n\ndef test_delete_success():\n portfolio = PortfolioFactory.create()\n\n assert not portfolio.deleted\n\n Portfolios.delete(portfolio=portfolio)\n\n assert portfolio.deleted\n\n\ndef test_delete_failure_with_applications():\n portfolio = PortfolioFactory.create()\n ApplicationFactory.create(portfolio=portfolio)\n\n assert not portfolio.deleted\n\n with pytest.raises(PortfolioDeletionApplicationsExistError):\n Portfolios.delete(portfolio=portfolio)\n\n assert not portfolio.deleted\n\n\ndef test_for_user_does_not_include_deleted_portfolios():\n user = UserFactory.create()\n PortfolioFactory.create(owner=user, deleted=True)\n assert len(Portfolios.for_user(user)) == 0\n\n\ndef test_for_user_does_not_include_deleted_application_roles():\n user1 = UserFactory.create()\n user2 = UserFactory.create()\n portfolio = PortfolioFactory.create()\n app = ApplicationFactory.create(portfolio=portfolio)\n ApplicationRoleFactory.create(\n status=ApplicationRoleStatus.ACTIVE, user=user1, application=app\n )\n assert len(Portfolios.for_user(user1)) == 1\n ApplicationRoleFactory.create(\n status=ApplicationRoleStatus.ACTIVE, user=user2, application=app, deleted=True\n )\n assert len(Portfolios.for_user(user2)) == 0\n\n\ndef test_create_state_machine(portfolio):\n fsm = PortfolioStateMachines.create(portfolio)\n assert fsm\n\n\nclass TestGetPortfoliosPendingCreate(EnvQueryTest):\n def test_finds_unstarted(self):\n # Given: A portfolio is in its period of performance\n # Given: The portfolio's state machine is in its \"UNSTARTED\" stage\n self.create_portfolio_with_clins(\n [(self.YESTERDAY, self.TOMORROW)],\n state_machine_status=PortfolioStates.UNSTARTED.name,\n )\n # When I query for portfolios pending provisioning\n portfolios_pending = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will return the portfolio\n assert len(portfolios_pending) == 1\n\n def test_finds_created(self):\n # Given: A portfolio is in its period of performance\n # Given: The portfolio's state machine is in a _CREATED stage\n self.create_portfolio_with_clins(\n [(self.YESTERDAY, self.TOMORROW)],\n state_machine_status=PortfolioStates.TENANT_CREATED.name,\n )\n # When I query for portfolios pending provisioning\n portfolios_pending = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will return the portfolio\n assert len(portfolios_pending) == 1\n\n def test_does_not_find_failed(self):\n # Given: A portfolio is in its period of performance\n # Given: The portfolio's state machine is in a _FAILED stage\n self.create_portfolio_with_clins(\n [(self.YESTERDAY, self.TOMORROW)],\n state_machine_status=PortfolioStates.TENANT_FAILED.name,\n )\n # When I query for portfolios pending provisioning\n portfolios_pending = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will not return the portfolio\n assert len(portfolios_pending) == 0\n\n def test_with_future_clins_and_no_state_machine(self):\n # Given: The portfolio has not entered its period of performance\n # Given: The portfolio has not begun the provisioning process\n self.create_portfolio_with_clins([(self.TOMORROW, self.TOMORROW)])\n # When I query for portfolios pending provisioning\n portfolios_pending = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will return 0 portfolios\n assert len(portfolios_pending) == 0\n\n def test_with_future_clins_and_state_machine(self):\n # Given: The portfolio has not entered a period of performance\n # Given: The portfolio has begun the provisioning process\n self.create_portfolio_with_clins(\n [(self.TOMORROW, self.TOMORROW)],\n state_machine_status=PortfolioStates.TENANT_CREATED.name,\n )\n # When I query for portfolios pending provisioning\n portfolios_pending = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will return 0 portfolios\n assert len(portfolios_pending) == 0\n\n def test_with_expired_clins_and_no_state_machine(self):\n # Given: A portfolio has exited its period of performance\n # Given: The portfolio has not started the provisioning process\n self.create_portfolio_with_clins([(self.YESTERDAY, self.YESTERDAY)])\n # When I query for portfolios pending provisioning\n portfolios_pending = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will return 0 portfolios\n assert len(portfolios_pending) == 0\n\n def test_with_expired_clins_and_state_machine(self):\n # Given: A portfolio has exited its period of performance\n # Given: The portfolio is in the middle of the provisioning process\n self.create_portfolio_with_clins(\n [(self.YESTERDAY, self.YESTERDAY)],\n state_machine_status=PortfolioStates.TENANT_CREATED.name,\n )\n # When I query for portfolios pending provisioning\n portfolios_pending = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will return 0 portfolios\n assert len(portfolios_pending) == 0\n\n def test_with_active_clins_and_no_state_machine(self):\n # Given: A portfolio is in its period of performance\n # Given: The portfolio has begun the provisioning process\n self.create_portfolio_with_clins([(self.YESTERDAY, self.TOMORROW)])\n # When I query for portfolios pending provisioning\n pending_portfolios = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will return the pending portfolio\n assert len(pending_portfolios) == 1\n\n def test_with_active_clins_and_state_machine(self):\n # Given: A portfolio is in its period of performance\n # Given: The portfolio has begun the provisioning process\n self.create_portfolio_with_clins(\n [(self.YESTERDAY, self.TOMORROW)],\n state_machine_status=PortfolioStates.TENANT_CREATED.name,\n )\n # When I query for portfolios pending provisioning\n portfolios_pending = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will return the pending portfolio\n assert len(portfolios_pending) == 1\n\n def test_with_unsigned_task_order(self):\n # Given: A portfolio is in its period of performance\n # Given: The portfolio has begun the provisioning process\n # Given: Portfolio is associated with an unsigned task order\n self.create_portfolio_with_clins(\n [(self.YESTERDAY, self.TOMORROW)],\n state_machine_status=PortfolioStates.UNSTARTED.name,\n task_order_signed_at=None,\n )\n # When I query for portfolios pending provisioning\n portfolios_pending = Portfolios.get_portfolios_pending_provisioning(self.NOW)\n # Then the query will NOT return the pending portfolio\n assert len(portfolios_pending) == 0\n","sub_path":"tests/domain/test_portfolios.py","file_name":"test_portfolios.py","file_ext":"py","file_size_in_byte":13486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"431301219","text":"\"\"\"\n=================== TASK 1 ====================\n* Name: Power to the Number\n*\n* Write a function `numpower()` that will for the\n* passed based number `num` and exponent `expo`\n* return the value of the number `num` raised to\n* the power of `expo`.\n*\n* Note: Please describe in details possible cases\n* in which your solution might not work. It is not\n* allowed to use built-in operators and functions\n* for this task.\n*\n* Use main() function to test your solution.\n===================================================\n\"\"\"\n\n\ndef num_power(num, expo):\n powered = num\n if expo == 0:\n return 1\n if expo == 1:\n return num\n if expo != 1:\n powered = (num * num_power(num, expo-1))\n return powered\n\n\ndef main():\n num_on_expo = num_power(1, 3)\n print(\"Number on the exponent is: \", num_on_expo)\n\nmain()\n\n#This function will not work if passed \"expo\" value is a fraction or less than 0.","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"31649009","text":"from math import hypot\n\nclass MagnetCluster:\n\n def extract(matches, epsilon, minVotes):\n\n matchesCluster = {}\n\n nextClusterIndex = 0\n\n for match in matches:\n matchesCluster[id(match)] = nextClusterIndex\n nextClusterIndex += 1\n\n merge_occured = True\n while merge_occured:\n merge_occured = False\n\n transformationMap = {}\n\n for match in matches:\n for targetMatch in matches:\n\n matchCluster = matchesCluster[id(match)]\n targetMatchCluster = matchesCluster[id(targetMatch)]\n\n if id(match) != id(targetMatch) and matchCluster != targetMatchCluster:\n\n matchCenter = match.center()\n targetMatchCenter = targetMatch.center()\n\n dist = hypot(targetMatchCenter[0] - matchCenter[0], targetMatchCenter[1] - matchCenter[1])\n\n if dist <= epsilon:\n if matchCluster < targetMatchCluster:\n transformationMap[targetMatchCluster] = matchCluster\n else:\n transformationMap[matchCluster] = targetMatchCluster\n\n merge_occured = True\n\n\n # Transitivity\n\n # Merge\n\n for match in matches:\n matchCluster = matchesCluster[id(match)]\n if matchCluster in transformationMap:\n matchesCluster[id(match)] = transformationMap[matchCluster]\n\n # Clusters\n clusters = {}\n\n for match in matches:\n clusterIndex = matchesCluster[id(match)]\n\n if not(clusterIndex in clusters):\n clusters[clusterIndex] = []\n\n clusters[clusterIndex].append(match)\n\n # Best matches\n\n bestMatches = []\n\n for clusterIndex in clusters:\n\n clusterMatches = clusters[clusterIndex]\n\n if len(clusterMatches) > 0:\n\n bestMatch = clusterMatches[0]\n\n for match in clusterMatches:\n if match.probability > bestMatch.probability:\n bestMatch = match\n\n bestMatch.nbVotes = len(clusterMatches)\n if(bestMatch.nbVotes >= minVotes):\n bestMatches.append(bestMatch)\n\n return bestMatches\n","sub_path":"src/MagnetCluster.py","file_name":"MagnetCluster.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"554653916","text":"import functools\nimport uuid\nfrom string import Template\nfrom database.repositories.ListingRepository import ListingRepository\nfrom database.repositories.UserRepository import UserRepository\nfrom database.models.Listing import Listing\nfrom database.models.User import User\n\nfrom flask import (\n Blueprint, flash, g, redirect, render_template, request, session, url_for, current_app, jsonify\n)\n\nbp = Blueprint('market', __name__, url_prefix='/market')\n\nlistingRepository = ListingRepository()\nuserRepository = UserRepository()\n\n@bp.route('', methods=['GET'], strict_slashes=False)\ndef getAllListings():\n json = request.get_json()\n \n if json is None or 'RowKey' not in json.keys() or json['RowKey'] is None:\n result = listingRepository.read()\n result = [i.__dict__ for i in result]\n return jsonify(result)\n \n return getListing(json['RowKey'])\n\ndef getListing(RowKey):\n try:\n result = listingRepository.read(RowKey=RowKey)\n except Exception as e:\n print(e)\n return 'Not Found', 404\n\n result = result.__dict__\n \n return jsonify(result)\n\n@bp.route('', methods=['POST'], strict_slashes=False)\ndef createListing():\n json = request.get_json()\n\n try:\n user = User(json)\n except Exception as e:\n print(e)\n return 'Bad Request', 400\n\n try:\n user = userRepository.read(RowKey=user.RowKey)\n except Exception as e:\n print('Creating new user')\n userRepository.create(user)\n\n try:\n newListing = Listing(json)\n except Exception as e:\n print(e)\n return 'Bad Request', 400\n \n try:\n etag = listingRepository.create(newListing)\n except Exception as e:\n print(e)\n return jsonify({ 'success': False, 'etag': '' })\n\n return jsonify({ 'success': True, 'etag': etag })\n\n# @bp.route('updateListing', methods=['POST'], strict_slashes=False)\n# def updateListing():\n# json = request.get_json()\n\n# try:\n# user = User(json)\n# except Exception as e:\n# print(e)\n# return 'Bad Request', 400\n\n# try:\n# user = userRepository.read(RowKey=user.RowKey)\n# except Exception as e:\n# return 'Bad Request', 400\n\n# try:\n# newListing = Listing(json)\n# except Exception as e:\n# print(e)\n# return 'Bad Request', 400\n \n# try:\n# if (getListing(newListing.RowKey)):\n# print(\"already exist, update lisging\")\n# etag = listingRepository.updateListing(newListing)\n# else:\n# return 'Bad Request', 400\n# except Exception as e:\n# print(e)\n# return 'Bad Request', 500\n\n# return jsonify({ 'success': True, 'etag': etag })\n\n@bp.route('', methods=['DELETE'], strict_slashes=False)\ndef deleteListing():\n json = request.get_json()\n\n try:\n rowKey = json['RowKey']\n except Exception as e:\n print(e)\n return 'Bad Request', 400\n\n try:\n listingRepository.delete(rowKey)\n except Exception as e:\n print(e)\n return jsonify({ 'success': False })\n\n return jsonify({ 'success': True })\n","sub_path":"market.py","file_name":"market.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"607072878","text":"# -*- encoding: utf-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.models.rnn.ptb import reader\n\n\nfile_name = 'tinyshakespeare.txt'\n\nwith open(file_name, \"r\") as f:\n raw_data = f.read()\n\nvocab = set(raw_data)\nvocab_size = len(vocab)\nidx_to_vocab = dict(enumerate(vocab))\nvocab_to_idx = dict(zip(idx_to_vocab.values(), idx_to_vocab.keys()))\n\ndata = [vocab_to_idx[c] for c in raw_data]\ndel raw_data\n\ndef gen_epochs(n, num_steps, batch_size):\n for i in range(n):\n yield reader.ptb_iterator(data, batch_size, num_steps)","sub_path":"tf_test/day5/rnn_more_more.py","file_name":"rnn_more_more.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"2161860","text":"\"\"\"\n206. Reverse Linked List\nhttps://leetcode.com/problems/reverse-linked-list/\n\n_author: Kashif Memon\n_python_version: 3.7.2\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n if not head: return []\n before, current = None, head\n while current is not None:\n after = current.next\n current.next = before\n before = current\n current = after\n return before\n\n\ndef main():\n input1 = ListNode(1)\n input1.next = ListNode(2)\n out = Solution().reverseList(input1)\n print(out.val, out.next.val)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"solutions-to-leetcode/206_reverse_linked_list.py","file_name":"206_reverse_linked_list.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"429810594","text":"import colorama\nfrom colorama import Fore\nimport time\nimport json\nimport requests\nprint(f\"\"\"{Fore.BLUE}\n\n╦┌─┐ ┬ ┌─┐┌─┐┬┌─┬ ┬┌─┐\n║├─┘ │ │ ││ │├┴┐│ │├─┘\n╩┴ ┴─┘└─┘└─┘┴ ┴└─┘┴ \n\\n\\n\"\"\")\n\nip = input('Enter IP: ')\nr = requests.get(f'http://extreme-ip-lookup.com/json/{ip}')\ngeo = r.json()\nfields = [\n {'name': 'IP', 'value': geo['query']},\n {'name': 'Type', 'value': geo['ipType']},\n {'name': 'Country', 'value': geo['country']},\n {'name': 'City', 'value': geo['city']},\n {'name': 'Continent', 'value': geo['continent']},\n {'name': 'Country', 'value': geo['country']},\n {'name': 'Hostname', 'value': geo['ipName']},\n {'name': 'ISP', 'value': geo['isp']},\n {'name': 'Latitute', 'value': geo['lat']},\n {'name': 'Longitude', 'value': geo['lon']},\n {'name': 'Org', 'value': geo['org']},\n {'name': 'Region', 'value': geo['region']},\n ]\nfor field in fields:\n if field['value']:\n print(Fore.CYAN + field['name'] + ': ' + field['value'])\n time.sleep(0.2)\n","sub_path":"IP.py","file_name":"IP.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"112749203","text":"\"\"\" es_runners for single point calculations\n\"\"\"\n\nimport sys\nimport automol\nimport elstruct\nimport autofile\nfrom routines.es import runner as es_runner\nfrom lib.phydat import phycon\nfrom lib.phydat import symm\n\n\n# Can't read an energy if output placed in filesys, will immediately try to run\n\ndef run_energy(zma, geo, spc_info, thy_info,\n geo_save_fs, geo_run_path, geo_save_path, locs,\n script_str, overwrite, **kwargs):\n \"\"\" Find the energy for the given structure\n \"\"\"\n\n # geo_save_fs and locs unneeded for this\n _, _ = geo_save_fs, locs\n\n # Prepare unique filesystem since many energies may be under same directory\n sp_run_fs = autofile.fs.single_point(geo_run_path)\n sp_save_fs = autofile.fs.single_point(geo_save_path)\n sp_run_fs[-1].create(thy_info[1:4])\n sp_run_path = sp_run_fs[-1].path(thy_info[1:4])\n sp_save_fs[-1].create(thy_info[1:4])\n run_fs = autofile.fs.run(sp_run_path)\n\n # Set input geom\n if zma is not None:\n job_geo = zma\n else:\n job_geo = geo\n\n if not sp_save_fs[-1].file.energy.exists(thy_info[1:4]) or overwrite:\n\n print('No energy found in save filesys. Running energy...')\n # Add options matrix for energy runs for molpro\n if thy_info[0] == 'molpro2015':\n errs, optmat = es_runner.par.set_molpro_options_mat(spc_info, geo)\n else:\n errs = ()\n optmat = ()\n\n es_runner.run_job(\n job='energy',\n script_str=script_str,\n run_fs=run_fs,\n geom=job_geo,\n spc_info=spc_info,\n thy_info=thy_info,\n errors=errs,\n options_mat=optmat,\n overwrite=overwrite,\n **kwargs,\n )\n\n ret = es_runner.read_job(\n job='energy',\n run_fs=run_fs,\n )\n\n if ret is not None:\n inf_obj, inp_str, out_str = ret\n\n print(\" - Reading energy from output...\")\n ene = elstruct.reader.energy(inf_obj.prog, inf_obj.method, out_str)\n\n print(\" - Saving energy...\")\n sp_save_fs[-1].file.input.write(inp_str, thy_info[1:4])\n sp_save_fs[-1].file.info.write(inf_obj, thy_info[1:4])\n sp_save_fs[-1].file.energy.write(ene, thy_info[1:4])\n\n else:\n print('Energy found and saved previously at {}'.format(\n sp_save_fs[-1].file.energy.path(thy_info[1:4])))\n\n\ndef run_gradient(zma, geo, spc_info, thy_info,\n geo_save_fs, geo_run_path, geo_save_path, locs,\n script_str, overwrite, **kwargs):\n \"\"\" Determine the gradient for the geometry in the given location\n \"\"\"\n\n # Set the run filesystem information\n run_fs = autofile.fs.run(geo_run_path)\n\n # Set input geom\n if zma is not None:\n job_geo = zma\n is_atom = automol.geom.is_atom(automol.zmatrix.geometry(zma))\n else:\n job_geo = geo\n is_atom = automol.geom.is_atom(geo)\n\n run_fs = autofile.fs.run(geo_run_path)\n if not geo_save_fs[-1].file.gradient.exists(locs) or overwrite:\n\n print('No gradient found in save filesys. Running gradient...')\n es_runner.run_job(\n job='gradient',\n script_str=script_str,\n run_fs=run_fs,\n geom=job_geo,\n spc_info=spc_info,\n thy_info=thy_info,\n overwrite=overwrite,\n **kwargs,\n )\n\n ret = es_runner.read_job(\n job='gradient',\n run_fs=run_fs,\n )\n\n if ret is not None:\n inf_obj, inp_str, out_str = ret\n\n if is_atom:\n grad = ()\n else:\n print(\" - Reading gradient from output...\")\n grad = elstruct.reader.gradient(inf_obj.prog, out_str)\n\n print(\" - Saving gradient...\")\n print(\" - Save path: {}\".format(geo_save_path))\n geo_save_fs[-1].file.gradient_info.write(inf_obj, locs)\n geo_save_fs[-1].file.gradient_input.write(inp_str, locs)\n geo_save_fs[-1].file.gradient.write(grad, locs)\n\n else:\n print('Gradient found and saved previously at {}'.format(\n geo_save_fs[-1].file.gradient.path(locs)))\n\n\ndef run_hessian(zma, geo, spc_info, thy_info,\n geo_save_fs, geo_run_path, geo_save_path, locs,\n script_str, overwrite, **kwargs):\n \"\"\" Determine the hessian for the geometry in the given location\n \"\"\"\n\n # Set the run filesystem information\n run_fs = autofile.fs.run(geo_run_path)\n\n # if prog == 'molpro2015':\n # geo = hess_geometry(out_str)\n # scn_save_fs[-1].file.geometry.write(geo, locs)\n\n # Set input geom\n if zma is not None:\n job_geo = zma\n is_atom = automol.geom.is_atom(automol.zmatrix.geometry(zma))\n else:\n job_geo = geo\n is_atom = automol.geom.is_atom(geo)\n\n if not geo_save_fs[-1].file.hessian.exists(locs) or overwrite:\n\n print('No Hessian found in save filesys. Running Hessian...')\n es_runner.run_job(\n job='hessian',\n script_str=script_str,\n run_fs=run_fs,\n geom=job_geo,\n spc_info=spc_info,\n thy_info=thy_info,\n overwrite=overwrite,\n **kwargs,\n )\n\n ret = es_runner.read_job(\n job='hessian',\n run_fs=run_fs,\n )\n\n if ret is not None:\n inf_obj, inp_str, out_str = ret\n\n if is_atom:\n freqs = ()\n else:\n print(\" - Reading hessian from output...\")\n hess = elstruct.reader.hessian(inf_obj.prog, out_str)\n freqs = elstruct.util.harmonic_frequencies(\n geo, hess, project=False)\n\n print(\" - Saving Hessian...\")\n print(\" - Save path: {}\".format(geo_save_path))\n geo_save_fs[-1].file.hessian_info.write(inf_obj, locs)\n geo_save_fs[-1].file.hessian_input.write(inp_str, locs)\n geo_save_fs[-1].file.hessian.write(hess, locs)\n geo_save_fs[-1].file.harmonic_frequencies.write(freqs, locs)\n\n else:\n print('Hessian found and saved previously at {}'.format(\n geo_save_fs[-1].file.hessian.path(locs)))\n\n\ndef run_vpt2(zma, geo, spc_info, thy_info,\n geo_save_fs, geo_run_path, geo_save_path, locs,\n script_str, overwrite, **kwargs):\n \"\"\" Perform vpt2 analysis for the geometry in the given location\n \"\"\"\n\n # Set the run filesystem information\n run_fs = autofile.fs.run(geo_run_path)\n\n # Assess if symmetry needs to be broken for the calculation\n # Add porgram check because might only be issue for gaussian\n if spc_info[0] in symm.HIGH:\n if zma is None:\n print('Need a zma for high-symmetry of ', spc_info[0])\n sys.exit()\n else:\n disp = symm.HIGH[spc_info[0]] * phycon.ANG2BOHR\n vals = automol.zmatrix.values(zma)\n zma = automol.zmatrix.set_values(zma, {'R1': vals['R1'] + disp})\n else:\n if zma is not None:\n job_geo = zma\n is_atom = automol.geom.is_atom(automol.zmatrix.geometry(zma))\n else:\n job_geo = geo\n is_atom = automol.geom.is_atom(geo)\n\n run_vpt2_job = bool(\n not geo_save_fs[-1].file.anharmonicity_matrix.exists(locs) or\n not is_atom or\n overwrite)\n if run_vpt2_job:\n print('Running vpt2')\n es_runner.run_job(\n job='vpt2',\n script_str=script_str,\n run_fs=run_fs,\n geom=job_geo,\n spc_info=spc_info,\n thy_info=thy_info,\n overwrite=overwrite,\n **kwargs,\n )\n\n ret = es_runner.read_job(\n job='vpt2',\n run_fs=run_fs,\n )\n\n if ret is not None:\n inf_obj, inp_str, out_str = ret\n\n if not geo_save_fs[-1].file.hessian.exists(locs):\n print(\" - No Hessian in filesys. Reading it from output...\")\n hess = elstruct.reader.hessian(inf_obj.prog, out_str)\n print(\" - Saving Hessian...\")\n print(\" - Save path: {}\".format(geo_save_path))\n geo_save_fs[-1].file.hessian_info.write(inf_obj, locs)\n geo_save_fs[-1].file.hessian_input.write(inp_str, locs)\n geo_save_fs[-1].file.hessian.write(hess, locs)\n\n print(\" - Reading anharmonicities from output...\")\n vpt2_dct = elstruct.reader.vpt2(inf_obj.prog, out_str)\n hess = elstruct.reader.hessian(inf_obj.prog, out_str)\n\n # Write the VPT2 file specifying the Fermi Treatments\n # fermi_treatment = '{} Defaults'.format(inf_obj.prog)\n # vpt2_inf_obj = autofile.system.info.vpt2(\n # fermi_treatment=fermi_treatment)\n\n print(\" - Saving anharmonicities...\")\n print(\" - Save path: {}\".format(geo_save_path))\n # geo_save_fs[-1].file.vpt2_info.write(inf_obj, locs)\n geo_save_fs[-1].file.vpt2_input.write(inp_str, locs)\n geo_save_fs[-1].file.anharmonic_frequencies.write(\n vpt2_dct['freqs'], locs)\n geo_save_fs[-1].file.anharmonic_zpve.write(\n vpt2_dct['zpve'], locs)\n geo_save_fs[-1].file.vibro_rot_alpha_matrix.write(\n vpt2_dct['vibrot_mat'], locs)\n geo_save_fs[-1].file.quartic_centrifugal_dist_consts.write(\n vpt2_dct['cent_dist_const'], locs)\n geo_save_fs[-1].file.anharmonicity_matrix.write(\n vpt2_dct['x_mat'], locs)\n\n else:\n print('VPT2 information found and saved previously at {}'.format(\n geo_save_fs[-1].file.anharmonicity_matrix.exists(locs)))\n","sub_path":"routines/es/_routines/sp.py","file_name":"sp.py","file_ext":"py","file_size_in_byte":9870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"410126512","text":"from math import ceil\n\n\ndef check(board, player):\n diag1 = [board[i][i] for i in range(3)]\n diag2 = [board[i][2 - i] for i in range(3)]\n cols = [[board[i][k] for k in range(3)] for i in range(3)]\n for i in range(3):\n cols[i] = [max(cols[i].count(\"X\"), cols[i].count(\"O\"))]\n if (diag1.count(\"X\") == 3 or diag1.count(\"O\") == 3 or\n diag2.count(\"X\") == 3 or diag2.count(\"O\") == 3 or\n 3 in [board[i].count(\"X\") for i in range(3)] or 3 in [board[i].count(\"O\") for i in range(3)] or\n 3 in [num for row in cols for num in row]):\n return True, player\n if \" \" not in [num for row in board for num in row]:\n return True, \"Draw\"\n return False, player\n\n\ndef game(row, col, board, symbol):\n board[row][col] = symbol\n return board\n\n\ndef draw_board(board):\n for row in board:\n print(\"| \", end=\"\")\n print(' | '.join([i for i in row]), end=\"\")\n print(' |')\n\n\nboard = [[\" \" for i in range(3)] for k in range(3)]\nplayer_1 = input(\"\\n\\nPlayer one name: \")\nplayer_2 = input(\"\\nPlayer two name: \")\nplayer_1_symbol = input(f\"\\n{player_1} would you like to play with 'X' or 'O'? \")\nplayer_2_symbol = \"X\" if player_1_symbol == \"O\" else \"O\"\nboard_nums = \"\\n| 1 | 2 | 3 |\\n| 4 | 5 | 6 |\\n| 7 | 8 | 9 |\"\nprint(\"\\nThis is the numeration of the board\")\nprint(board_nums)\nprint(f\"\\n{player_1} starts first!\")\nwinner = None\nwhile True:\n player_1_move = int(input(f\"\\n{player_1} choose a free position [1-9]: \"))\n board = game(ceil(player_1_move / 3) - 1, player_1_move % 3 - 1, board, player_1_symbol)\n draw_board(board)\n status = (check(board, player_1))\n if status[0]:\n winner = player_1 if status[1] == player_1 else \"Draw\"\n break\n player_2_move = int(input(f\"\\n{player_2} choose a free position [1-9]: \"))\n board = game(ceil(player_2_move / 3) - 1, player_2_move % 3 - 1, board, player_2_symbol)\n draw_board(board)\n status = (check(board, player_2))\n if status[0]:\n winner = player_2 if status[1] == player_2 else \"Draw\"\n break\nprint(f\"{winner} won!\" if winner != \"Draw\" else \"Draw!\")\n","sub_path":"Workshop/Tic-Tac-Toe.py","file_name":"Tic-Tac-Toe.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"294715233","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/arctic/__init__.py\n# Compiled at: 2019-02-02 17:02:31\n# Size of source mod 2**32: 1080 bytes\n\"\"\" The Arctic TimeSeries and Tick store.\"\"\"\nfrom .arctic import Arctic, register_library_type\nfrom .arctic import VERSION_STORE, TICK_STORE, CHUNK_STORE\nfrom store._ndarray_store import NdarrayStore\nfrom store._pandas_ndarray_store import PandasDataFrameStore, PandasSeriesStore, PandasPanelStore\nfrom store.version_store import register_versioned_storage, register_version\ntry:\n from pkg_resources import get_distribution\n str_version = get_distribution(__name__).version.strip()\n int_parts = tuple((int(x) for x in str_version.split('.')))\n num_version = sum([1000 ** i * v for i, v in enumerate(reversed(int_parts))])\n register_version(str_version, num_version)\nexcept Exception:\n __version__ = None\n __version_parts__ = tuple()\n __version_numerical__ = 0\nelse:\n __version__ = str_version\n __version_parts__ = int_parts\n __version_numerical__ = num_version\nregister_versioned_storage(PandasDataFrameStore)\nregister_versioned_storage(PandasSeriesStore)\nregister_versioned_storage(PandasPanelStore)\nregister_versioned_storage(NdarrayStore)","sub_path":"pycfiles/arctic-1.79.3-py3.7/__init__.cpython-37.py","file_name":"__init__.cpython-37.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"369793989","text":"#!/usr/bin/python\n\n# Jared Henry Oviatt\n\n# create raw data\n\nimport random\n\n# pull prices from api csv\ndef parse_csv():\n# for if you download csv froma browser and then want to use it. Deprecated.\n filename = 'csv/per_day_all_time_history.csv'\n f = open(filename, 'r')\n data = f.readlines()\n prices = []\n for each in data[1:]:\n split = each.split(',')\n prices.append(float(split[3]))\n f.close()\n\n prices = prices[-1000:]\n \n f = open(\"csv/prices.csv\", \"w\")\n f.write(str(prices).replace(\"[\", \"\").replace(\"]\", \"\").replace(\",\", \"\\n\").strip())\n f.close()\n \n return prices\n\ndef fake_data():\n# generate fake bitcoin historical data\n# this is the one you want\n random.seed()\n count = 0\n value = 200.0\n data = []\n while count <= 3500:\n # increase by rand1 * 10 * rand2 * rand3(+/-) for rand4 times\n rand1 = random.random() * 10\n rand2 = float(random.randint(1, 5))\n rand3 = random.random()\n rand4 = random.randint(1, 10)\n \n # get change\n change = rand1 * rand2 * (rand3 - .5)\n for i in range(0, rand4):\n count += 1\n value += change\n data.append(abs(round(value, 2))+100)\n\n return data[:3500]\n\ndef fake_forex():\n# something I tried but I don't think it works. I never use it. \n random.seed()\n count = 0\n value = 1.0\n data = []\n while count <= 3500:\n # increase by rand1 * rand2 * rand3(+/-) for rand4 times\n rand1 = random.random()\n rand2 = float(random.randint(10, 200))\n rand3 = random.random()\n rand4 = random.randint(1, 3)\n \n # get change\n change = rand1 / rand2 * (rand3 - .5)\n for i in range(0, rand4):\n count += 1\n if abs(value + change) >= .5 and abs(value + change) <= 1.5:\n value += change\n data.append(abs(round(value, 2)))\n\n return data[:3500]\n\ndef save_fake_data(data):\n\n f = open(\"../csv/fake_prices.csv\", \"w\")\n f.write(str(data).replace(\"[\", \"\").replace(\"]\", \"\").replace(\",\", \"\\n\").strip())\n f.close()\n\ndef main():\n #parse_csv()\n save_fake_data(fake_data())\n #save_fake_data(fake_forex())\n #print(fake_data())\n #print(fake_forex())\n return\n\nif __name__ == '__main__':\n main()\n","sub_path":"test/src/fake_data.py","file_name":"fake_data.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"581551616","text":"import sys\nimport codecs\nimport os\nimport smtplib\nfrom conf import credentials\nfrom email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom jinja2 import Template\n\n#https://realpython.com/python-send-email/\n\nCOMMASPACE = ', '\n\n'''\nsubject: titulo del correo\nrecipients: array de destinatarios\nattachments: array de ficheros de attach\n'''\ndef send_email(smtp_server,smtp_port,subject,sender,recipients,attachments,params_email,logger):\n logger.info(\"Procediendo a enviar correo\")\n html_text= open('templates/email.html.j2').read()\n html_template=Template(html_text)\n html=html_template.render(params=params_email) \n \n gmail_password=credentials.login['password']\n #recipients = ['jamorena@essiprojects.com','jos3mor3na@gmail.com']\n \n # Create the enclosing (outer) message\n outer = MIMEMultipart()\n #outer['Subject'] = 'Ejecución Smart PV Containarized Replicator -' + '{:%Y-%m-%d}'.format(datetime.now()) \n outer['Subject'] = subject \n outer['To'] = COMMASPACE.join(recipients)\n outer['From'] = sender\n\n # List of attachments\n #attachments = ['./ocprsyncer.log']\n\n # Add the attachments to the message\n for file in attachments:\n try:\n with open(file, 'rb') as fp:\n msg = MIMEBase('application', \"octet-stream\")\n msg.set_payload(fp.read())\n encoders.encode_base64(msg)\n msg.add_header('Content-Disposition', 'attachment', filename=os.path.basename(file))\n outer.attach(msg) \n web=MIMEText(html,\"html\")\n outer.attach(web)\n except:\n logger.error(\"No se pudo . Error: \", sys.exc_info()[0])\n\n composed = outer.as_string()\n # Send the email\n try:\n with smtplib.SMTP(smtp_server, smtp_port) as s:\n s.ehlo()\n #s.starttls()\n s.ehlo()\n #s.login(sender, gmail_password)\n s.sendmail(sender, recipients, composed)\n s.close()\n logger.debug(\"Correo enviado!\")\n except:\n logger.error(\"No se pudo enviar el email. Error: \", sys.exc_info()[0])\n raise\n\n#if __name__ == '__main__':\n# main()\n","sub_path":"emailSender.py","file_name":"emailSender.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"515368276","text":"from odoo import models, fields, api, _\nfrom datetime import datetime, timedelta\nimport odoo.addons.decimal_precision as dp\nfrom calendar import monthrange\nimport dateutil\nimport logging\nfrom odoo.exceptions import UserError\nimport odoo.addons.decimal_precision as dp\n_logger = logging.getLogger(__name__)\n\nclass contract(models.Model):\n _inherit ='hr.contract'\n\n start_date = fields.Date('Start Date')\n date_end = fields.Date('End Date', required=True)\n break_meal_allowance = fields.Float('Meal Allowance',default=13.96)\n payslip_date = fields.Date('Payslip Start Date')\n classification =fields.Char('Classification')\n hour_wage =fields.Float('Hour_wage',default=None,digits=dp.get_precision('Leave_dp'))\n scheduled_hours = fields.Float('Scheduled Hour')\n wage =fields.Float('Schedule Wage')\n\n @api.multi\n def saturday_shift(self,payslip):\n saturday =0.0\n wages = payslip.contract_id.hour_wage or 0.0\n employee_id = payslip.contract_id.employee_id\n if employee_id.shift_worker == True:\n total_working_hours = payslip.ordinary_hour + payslip.overtime_hour + payslip.double_hour + payslip.afternoon_shift + payslip.night_shift\n # if payslip.contract_id.schedule_pay == 'bi-weekly':\n if payslip.saturday_shift:\n if total_working_hours < payslip.contract_id.scheduled_hours:\n if employee_id.employee_basis == 'casual_employment':\n saturday += (payslip.saturday_shift * 175 * wages) / 100\n elif employee_id.employee_basis != 'casual_employment':\n saturday += (payslip.saturday_shift * 150 * wages) / 100\n elif total_working_hours >= payslip.contract_id.scheduled_hours:\n if employee_id.employee_basis == 'casual_employment':\n if payslip.saturday_shift > 2:\n saturday += (2 * 150 * wages) / 100\n saturday += ((payslip.saturday_shift - 2) * 200 * wages) / 100\n else:\n saturday += (payslip.saturday_shift * 150 * wages) / 100\n elif employee_id.employee_basis != 'casual_employment':\n if payslip.saturday_shift > 2:\n saturday += (2 * 150 * wages) / 100\n saturday += ((payslip.saturday_shift - 2) * 200 * wages) / 100\n else:\n saturday += (payslip.saturday_shift * 150 * wages) / 100\n\n else:\n saturday += 0.0\n return saturday\n\n @api.multi\n def timesheet_pay(self,contract_id,hour,ordinary_hour_check):\n amount = 0\n if contract_id.employee_id.shift_worker:\n if contract_id.employee_id.employee_basis != 'casual_employment':\n if hour == 0.0 or ordinary_hour_check == True:\n amount = 0.0\n else:\n amount = contract_id.hour_wage\n # elif ordinary_hour_check == True:\n # amount = 0.0\n elif contract_id.employee_id.employee_basis == 'casual_employment':\n if hour == 0.0 or ordinary_hour_check == True:\n amount = 0.0\n else:\n amount = contract_id.hour_wage\n else:\n # result_qty = ordinary_hour\n if contract_id.employee_id.employee_basis != 'casual_employment':\n if hour == 0.0 or ordinary_hour_check == True:\n amount = 0.0\n else:\n amount = contract_id.hour_wage\n\n elif contract_id.employee_id.employee_basis == 'casual_employment':\n if hour == 0.0 or ordinary_hour_check == True:\n amount = 0.0\n else:\n amount = contract_id.hour_wage\n return amount\n\n @api.multi\n def shift(self,contract_id,shift,shift_check):\n if contract_id.employee_id.shift_worker:\n if shift == 0.0 or shift_check == True:\n amount = 0.0\n else:\n amount = contract_id.hour_wage\n return amount\n\n\n #based on scheduled period it will change number of hours.\n @api.onchange('schedule_pay')\n def scheduled_hour(self):\n hours = 0.0\n if self.schedule_pay == 'bi-weekly':\n hours += 76\n elif self.schedule_pay == 'weekly':\n hours += 38\n elif self.schedule_pay == 'monthly':\n hours += 152\n else:\n hours += 0.0\n if not self.scheduled_hours:\n self.scheduled_hours = hours\n\n @api.multi\n def used_sick_leave(self, date_from, date_to, contract_id, emp_id):\n employee_id = contract_id.employee_id\n leave_hour = 0.0\n hour_wage = 0.0\n leaves = self.env['hr.leave'].search([\n ('employee_id', '=', employee_id.id),\n ('date_from', '>=', date_from),\n ('date_to', '<=', date_to),\n ('state', '=', 'validate')])\n if employee_id.employee_basis != 'casual_employment':\n if leaves:\n if contract_id.timesheet_payroll == False:\n wage = contract_id.wage\n for leave in leaves:\n if leave.holiday_status_id.name == 'Sick/Carer Leave':\n leave_hour += leave.number_of_hours_temp\n if contract_id.schedule_pay == 'bi-weekly':\n hour_wage = wage /contract_id.scheduled_hours\n # hour_wage = hour_wage*1.175\n # amount = hour_wage*leave_hour\n elif contract_id.schedule_pay == 'monthly':\n hour_wage = wage /contract_id.scheduled_hours\n # hour_wage = hour_wage*1.175\n # amount = hour_wage*leave_hour\n elif contract_id.schedule_pay == 'weekly':\n hour_wage = wage /contract_id.scheduled_hours\n else:\n hour_wage = contract_id.hour_wage\n for leave in leaves:\n if leave.holiday_status_id.name == 'Sick/Carer Leave':\n leave_hour += leave.number_of_hours_temp\n else:\n leave_hour = 0.0\n else:\n pass\n # hour_wage = 0.0\n # leave_hour = 0.0\n # hour_wage = hour_wage*1.175\n # amount = hour_wage*leave_hour\n return {'amount': hour_wage, 'leave': leave_hour}\n\n # it will check if any annual leave taken in that particular period.\n\n @api.multi\n def cal_leaves(self, date_from, date_to, contract_id, emp_id):\n\n employee_id = contract_id.employee_id\n leave_hour = 0.0\n hour_wage = 0.0\n leaves = self.env['hr.leave'].search([('holiday_status_id', '=', \"Annual leave\"),\n # leaves = self.env['hr.holidays'].search([\n ('employee_id', '=', employee_id.id),\n ('date_from', '>=', date_from),\n ('date_to', '<=', date_to),\n ('state', '=', 'validate')])\n if leaves:\n if contract_id.timesheet_payroll == False:\n wage = contract_id.wage\n for leave in leaves:\n leave_hour += leave.number_of_hours_temp\n if contract_id.schedule_pay == 'bi-weekly':\n hour_wage = wage /contract_id.scheduled_hours\n # hour_wage = hour_wage*1.175\n # amount = hour_wage*leave_hour\n elif contract_id.schedule_pay == 'monthly':\n hour_wage = wage /contract_id.scheduled_hours\n # hour_wage = hour_wage*1.175\n # amount = hour_wage*leave_hour\n elif contract_id.schedule_pay == 'weekly':\n hour_wage = wage /contract_id.scheduled_hours\n else:\n hour_wage = contract_id.hour_wage\n for leave in leaves:\n leave_hour += leave.number_of_hours_temp\n else:\n leave_hour = 0.0\n # hour_wage = hour_wage*1.175\n # amount = hour_wage*leave_hour\n return {'amount':hour_wage,'leave':leave_hour}\n\n # calculate tax for timesheet employee\n @api.multi\n def tax_payslip(self,payslip):\n\n fortnight_wages = self.total_basic(payslip)\n if payslip.taxable_adjustment:\n fortnight_wage = fortnight_wages.get('with_manual_amount')\n else:\n fortnight_wage = fortnight_wages.get('without_manual_amount')\n if payslip.contract_id.schedule_pay == 'bi-weekly':\n annual_wage = fortnight_wage * 26\n _logger.info('=================annual_waage==>%s', annual_wage)\n elif payslip.contract_id.schedule_pay == 'monthly':\n annual_wage = fortnight_wage * 12\n elif payslip.contract_id.schedule_pay == 'weekly':\n annual_wage = fortnight_wage * 52\n medicare = 0.0\n if annual_wage >= 0 and annual_wage <= 18200:\n vals = annual_wage\n vals = 0.0\n _logger.info('==1111111111111111111111111111111111111==>%s', vals)\n\n elif annual_wage >= 18201 and annual_wage <= 37000:\n vals = annual_wage - 18201\n medicare = (2 * annual_wage / 100)\n vals = (19 * vals / 100)\n vals = vals + medicare\n _logger.info('==1111111111111111111111111_________________-111111111111==>%s', vals)\n\n elif annual_wage >= 37001 and annual_wage <= 87000:\n vals = annual_wage - 37001\n medicare = (2 * annual_wage / 100)\n vals = (32.5 * vals / 100)\n vals = 3572 + vals + medicare\n _logger.info('==1111111111111111111111111111111111111==>%s', vals)\n\n elif annual_wage >= 87001 and annual_wage <= 180000:\n vals = annual_wage - 87001\n medicare = (2 * annual_wage / 100)\n vals = (37 * vals / 100)\n vals = 19822 + vals + medicare\n\n else:\n vals = annual_wage - 180001\n medicare = (2 * annual_wage / 100)\n vals = (45 * vals / 100)\n vals = 54232 + vals + medicare\n\n if payslip.contract_id.schedule_pay == 'bi-weekly':\n tax = vals / 26\n elif payslip.contract_id.schedule_pay == 'monthly':\n tax = vals / 12\n elif payslip.contract_id.schedule_pay == 'weekly':\n tax = vals / 52\n return tax\n\n\n #calculate how much leave accured on this period for timesheet employee\n @api.multi\n def leave_allocation(self,date_from,date_to,contract_id,employee_id):\n # wage = contract_id.wage\n employee_id = contract_id.employee_id\n allocated_hour = 0.0\n hour_wage = 0.0\n leaves = self.env['hr.leave.allocation'].search([('employee_id', '=', employee_id.id),\n # ('type','=','add'),\n ('approval_date','>=',date_from),\n ('approval_date','<=',date_to),\n ('state', '=', 'validate')])\n if contract_id.timesheet_payroll == True:\n # wage = contract_id.wage\n if leaves:\n for leave in leaves:\n if leave.holiday_status_id == 'Annual leave':\n allocated_hour += leave.number_of_hours_temp\n else:\n allocated_hour = 0.0\n return allocated_hour\n\n # it will show no of hours leave remain of annual leaves\n @api.multi\n def balance_leave(self, date_from, date_to, contract_id, emp_id):\n employee_id = contract_id.employee_id\n leave_hour = 0.0\n if employee_id.employee_basis != 'casual_employment':\n leaves = employee_id.remaining_hours_ids\n wage = contract_id.wage\n if leaves:\n for leave in leaves:\n if leave.leave_type == 'Annual leave':\n leave_hour += leave.no_of_leaves\n else:\n leave_hour = 0.0\n return leave_hour\n\n\n\n # calculate gross salary of timesheet based employee it will calcuate based on types of leave and\n # shift and type of employee\n @api.multi\n def total_basic(self,payslip):\n wages = payslip.contract_id.hour_wage\n leave_hour = 0.0\n sick_leave_hours = 0.0\n hour_wage = 0.0\n ordinary = 0\n overtime = 0\n double = 0\n hour_wage = 0.0\n leave_amount = 0.0\n meal_allowance_pay = 0.0\n employee_id = payslip.employee_id\n employee_id = self.env['hr.employee'].search([('id', '=', employee_id)])\n leaves =self.cal_leaves(payslip.date_from, payslip.date_to, payslip.contract_id,employee_id)\n sick_leaves = self.used_sick_leave(payslip.date_from, payslip.date_to, payslip.contract_id,employee_id)\n # public_leave = self.public_leave(contract_id, employee_id, date_from, date_to)\n meal_allowance = self.meal_allowance(payslip.date_from, payslip.date_to,employee_id.id)\n if leaves or sick_leaves or payslip.public_holiday_hours:\n if not payslip.contract_id.timesheet_payroll== True:\n # wage = payslip.contract_id.wage\n #\n # if payslip.contract_id.schedule_pay == 'bi-weekly':\n # hour_wage = wage /payslip.contract_id.scheduled_hours\n # # hour_wage = hour_wage*1.175\n # # amount = hour_wage*leave_hour\n # elif payslip.contract_id.schedule_pay == 'monthly':\n # hour_wage = wage /payslip.contract_id.scheduled_hours\n # # hour_wage = hour_wage*1.175\n # # amount = hour_wage*leave_hour\n # elif payslip.contract_id.schedule_pay == 'weekly':\n # hour_wage = wage /payslip.contract_id.scheduled_hours\n pass\n else:\n hour_wage = payslip.contract_id.hour_wage\n\n\n leave_hour = leaves.get('leave') or 0.0\n sick_leave_hours = sick_leaves.get('leave') or 0.0\n leave_amount = (wages*leave_hour*117.5)/100\n leave_amount += (wages*sick_leave_hours*100)/100\n\n if employee_id.shift_worker == True:\n overtime = 0\n double = 0\n meal_allowance_pay = (meal_allowance['total'] * 13.96 * 100) / 100\n if employee_id.employee_basis != 'casual_employment':\n leave_amount += (hour_wage * payslip.public_holiday_hours * 250) / 100\n overtime_wage = wages + (wages / 2)\n overtime = payslip.overtime_hour * overtime_wage\n double = payslip.double_hour * (wages * 2)\n\n rate = (wages * 150 / 100) / 2\n ordinary_meal_break = meal_allowance['ordinary'] * rate\n if payslip.ordinary_hour > 0:\n ordinary = payslip.ordinary_hour * wages\n\n\n else:\n ordinary = 0\n # ordinary_meal_break = 0\n\n afternoon_meal_break = ((0.50 * meal_allowance['afternoon']) * 115 * wages) / 100\n if payslip.afternoon_shift > 0:\n # afternoon_shift = afternoon_shift\n afternoon = (payslip.afternoon_shift * 115 * wages) / 100\n\n else:\n afternoon = 0\n # afternoon_meal_break = 0\n\n night_meal_break = ((0.50 * meal_allowance['night']) * 115 * wages) / 100\n if payslip.night_shift > 0:\n # night_shift = night_shift\n night = (payslip.night_shift * 115 * wages) / 100\n\n else:\n night = 0\n # night_meal_break = 0\n\n if payslip.saturday_shift > 0:\n # saturday = (payslip.saturday_shift * 150 * wages) / 100\n saturday = self.saturday_shift(payslip)\n else:\n saturday = 0\n if payslip.sunday_shift > 0:\n sunday = payslip.sunday_shift * (wages * 2)\n else:\n sunday = 0\n fortnight = ordinary + overtime + double + afternoon + night + saturday + sunday + leave_amount + afternoon_meal_break + night_meal_break + meal_allowance_pay + ordinary_meal_break\n\n else:\n leave_amount += (hour_wage * payslip.public_holiday_hours * 275) / 100\n overtime_wage = wages + (wages / 2)\n overtime = payslip.overtime_hour * overtime_wage\n double = payslip.double_hour * (wages * 2)\n rate = (wages * 150 / 100) / 2\n ordinary_meal_break = meal_allowance['ordinary'] * rate\n if payslip.ordinary_hour > 0:\n ordinary = (payslip.ordinary_hour * 125 * wages) / 100\n\n\n else:\n ordinary = 0\n # overtime = 0\n # double = 0\n # ordinary_meal_break = 0\n\n\n if payslip.afternoon_shift > 0:\n afternoon_shift = payslip.afternoon_shift\n afternoon = (afternoon_shift * 140 * wages) / 100\n afternoon_meal_break = ((0.50 * meal_allowance['afternoon']) * 115 * wages) / 100\n else:\n afternoon = 0\n afternoon_meal_break = 0\n\n night_meal_break = ((0.50 * meal_allowance['night']) * 115 * wages) / 100\n if payslip.night_shift > 0:\n # night_shift = night_shift\n night = (payslip.night_shift * 140 * wages) / 100\n\n else:\n night = 0\n # night_meal_break = 0\n\n if payslip.saturday_shift > 0.0:\n saturday = self.saturday_shift(payslip)\n else:\n saturday = 0\n if payslip.sunday_shift > 0:\n sunday = (payslip.sunday_shift * 225 * wages) / 100\n else:\n sunday = 0\n fortnight = ordinary + overtime + double + afternoon + night + saturday + sunday + leave_amount + afternoon_meal_break + night_meal_break + meal_allowance_pay + ordinary_meal_break\n else:\n meal_allowance_pay = (meal_allowance['ordinary'] * 13.96 * 100) / 100\n if not employee_id.employee_basis == 'casual_employment':\n ordinary = payslip.ordinary_hour * wages\n else:\n ordinary = payslip.ordinary_hour * wages * 125 /100\n overtime_wage = wages + (wages / 2)\n overtime = payslip.overtime_hour * overtime_wage\n double = payslip.double_hour * (wages * 2)\n rate = (wages * 150 / 100) / 2\n ordinary_meal_break = meal_allowance['ordinary'] * rate\n fortnight = ordinary + overtime + double + leave_amount + meal_allowance_pay + ordinary_meal_break\n without_manual_amount_fortnight = fortnight\n if payslip.salary_adjust == True:\n if payslip.add == True:\n fortnight = fortnight + payslip.manual_amount\n if payslip.sub == True:\n fortnight = fortnight - payslip.manual_amount_sub\n return {'with_manual_amount':fortnight,'without_manual_amount':without_manual_amount_fortnight}\n\n\n @api.multi\n def accu_leave(self, contract_id,employee_id,date_from,date_to):\n employee_id = contract_id.employee_id\n leave =0.0\n unpaid_leave = 0.0\n leaves = self.env['hr.leave'].search(\n [('holiday_status_id', 'in', ('Unpaid Leave')),\n ('employee_id', '=', employee_id.id),\n ('date', '>=', date_from),\n ('date', '<=', date_to),\n ('state', '=', 'validate')])\n if leaves:\n for unpaid in leaves:\n unpaid_leave += unpaid.number_of_hours_temp\n if not employee_id.employee_basis == 'casual_employment':\n if contract_id.timesheet_payroll == False:\n if not contract_id.schedule_pay == 'monthly':\n leave = contract_id.scheduled_hours - unpaid_leave\n amount = 0.07692307692\n # result_qty = hours\n else:\n leave = contract_id.scheduled_hours - unpaid_leave\n # leave = 12.66\n amount = 0.07692307692\n # result = leave\n else:\n # leaves = self.env['hr.holidays'].search([('employee_id', '=', employee_id.id),\n # ('holiday_status_id', '=', \"Annual leave\"),\n # ('type','=','add'),\n # ('date_from','>=',date_from),\n # ('date_from','<=',date_to),\n # ('state', '=', 'validate')])\n sheets = self.env['account.analytic.line'].search([\n ('employee_id', '=', employee_id.id),\n ('date', '>=', date_from),\n ('date', '<=', date_to),\n ])\n amount = 1\n if sheets:\n for sheet in sheets:\n leave += sheet.hr_holiday.number_of_hours\n else:\n leave = 0.0\n amount = 0.0\n return {'qty':leave,'amount':amount}\n\n @api.multi\n def public_leave(self, contract_id, employee_id, date_from, date_to,holiday_hours):\n employee_id = contract_id.employee_id\n working_hours = 0.0\n public_holiday_hours = holiday_hours\n amount = 0.0\n working_hours = holiday_hours\n # leaves = self.env['public.holiday'].search([\n # ('date', '>=', date_from),\n # ('date', '<=', date_to),\n # ('state', '=', 'confirmed')])\n # sheets = self.env['hr_timesheet_sheet.sheet'].search([\n # ('employee_id', '=', employee_id.id),\n # ('date_from', '>=', date_from),\n # ('date_to', '<=', date_to),\n # ('state', '=', 'done')])\n # if leaves:\n # for leave in leaves:\n # for sheet in sheets:\n # if sheet.public_holiday == True:\n # line_ids = self.env['account.analytic.line'].search([('sheet_id', '=', sheet.id)])\n # for line in line_ids:\n # if line.date == leave.date:\n # working_hours += line.unit_amount\n # amount = contract_id.hour_wage\n\n return public_holiday_hours\n\n #tax calculation for monthly employee or fixed wage employee\n @api.multi\n def month_tax_payslip(self,payslip):\n wages = 0.0\n wage = self.month_salary_payslip(payslip)\n if payslip.taxable_adjustment:\n wages = wage.get('with_manual_amount')\n else:\n wages = wage.get('without_manual_amount')\n if payslip.contract_id.schedule_pay == 'bi-weekly':\n annual_wage = wages * 26\n elif payslip.contract_id.schedule_pay == 'monthly':\n annual_wage = wages * 12\n elif payslip.contract_id.schedule_pay == 'weekly':\n annual_wage = wages * 52\n medicare = 0.0\n if annual_wage >= 0 and annual_wage <= 18200:\n vals = annual_wage\n vals = 0.0\n elif annual_wage >= 18201 and annual_wage <= 37000:\n vals = annual_wage - 18201\n medicare = (2 * annual_wage / 100)\n vals = (19 * vals / 100)\n vals = vals + medicare\n\n elif annual_wage >= 37001 and annual_wage <= 87000:\n vals = annual_wage - 37001\n medicare = (2 * annual_wage / 100)\n vals = (32.5 * vals / 100)\n vals = 3572 + vals + medicare\n\n elif annual_wage >= 87001 and annual_wage <= 180000:\n vals = annual_wage - 87001\n medicare = (2 * annual_wage / 100)\n vals = (37 * vals / 100)\n vals = 19822 + vals + medicare\n else:\n vals = annual_wage - 180001\n medicare = (2 * annual_wage / 100)\n vals = (45 * vals / 100)\n vals = 54232 + vals + medicare\n if payslip.contract_id.schedule_pay == 'bi-weekly':\n tax = vals / 26\n elif payslip.contract_id.schedule_pay == 'monthly':\n tax = vals / 12\n elif payslip.contract_id.schedule_pay == 'weekly':\n tax = vals / 52\n else:\n tax = 0.0\n return tax\n\n\n @api.multi\n def month_salary_payslip(self,payslip):\n leave_amount = 0.0\n base_salary = self.month_salary(payslip)\n _logger.error(\"-----------------base_salary-----------------------------------\", base_salary)\n\n total_hours = payslip.contract_id.scheduled_hours\n total_hours = total_hours - base_salary['leave_hour']\n leave_amount = (base_salary.get('wages') * base_salary.get('leave').get('annual_leave_hours') * 117.5) / 100\n leave_amount += (base_salary.get('wages') * base_salary.get('leave').get('sick_leave_hours') * 100) / 100\n wage_without_leave = total_hours * base_salary.get('wages')\n wages = wage_without_leave + leave_amount\n without_manual_amount = wages\n if payslip.salary_adjust == True:\n if payslip.add == True:\n wages = wages + payslip.manual_amount\n if payslip.sub == True:\n wages = wages - payslip.manual_amount_sub\n return {'with_manual_amount':wages,'without_manual_amount':without_manual_amount}\n\n @api.multi\n def month_salary(self,payslip):\n leave_hour = 0.0\n sick_leave_hour = 0.0\n total_hours = payslip.contract_id.scheduled_hours\n hour_wage = 0\n leave_amount = 0.0\n unpaid_total_hours = 0\n employee_id = self.env['hr.employee'].search([('id', '=', payslip.employee_id)])\n leaves = self.env['hr.leave'].search([('holiday_status_id', 'in', ('Annual leave','Sick/Carer Leave','Unpaid Leave')),\n ('employee_id', '=', employee_id.id),\n ('date_from', '>=', payslip.date_from),\n ('date_to', '<=', payslip.date_to),\n ('state', '=', 'validate')])\n leave_dict = {'annual_leave_hours':0,'sick_leave_hours':0 , 'unpaid_leave_hours':0}\n for leave in leaves:\n if leave.holiday_status_id.name == 'Annual leave':\n leave_dict['annual_leave_hours'] += leave.number_of_hours_temp\n elif leave.holiday_status_id.name == 'Sick/Carer Leave':\n leave_dict['sick_leave_hours'] += leave.number_of_hours_temp\n elif leave.holiday_status_id.name == 'Unpaid Leave':\n leave_dict['unpaid_leave_hours'] += leave.number_of_hours_temp\n\n _logger.error(\"-----------------leave_dict-----------------------------------\", leave_dict)\n\n leave_hour = leave_dict['annual_leave_hours'] + leave_dict['sick_leave_hours'] + leave_dict[\n 'unpaid_leave_hours']\n unpaid_leave_hours = leave_dict['unpaid_leave_hours']\n\n if payslip.contract_id.timesheet_payroll == False:\n wage = payslip.contract_id.wage\n\n if payslip.contract_id.schedule_pay == 'bi-weekly':\n hour_wage = wage / payslip.contract_id.scheduled_hours\n _logger.error(\"=======================================hour_wage==========\", hour_wage)\n\n elif payslip.contract_id.schedule_pay == 'monthly':\n if payslip.contract_id.scheduled_hours !=0:\n hour_wage = wage / payslip.contract_id.scheduled_hours\n\n elif payslip.contract_id.schedule_pay == 'weekly':\n hour_wage = wage / payslip.contract_id.scheduled_hours\n elif payslip.contract_id.timesheet_payroll == True :\n hour_wage = payslip.contract_id.hour_wage\n\n else:\n pass\n\n if leave_hour:\n unpaid_total_hours = payslip.contract_id.scheduled_hours - unpaid_leave_hours\n\n total_hours = payslip.contract_id.scheduled_hours - leave_hour\n _logger.error(\"-------------total-------------------------\", total_hours)\n\n return {'qty':total_hours,'wages':hour_wage,'leave_hour':leave_hour,'leave':leave_dict,'unpaid_hours':unpaid_total_hours,'sick_leave':leave_dict.get('sick_leave_hours')}\n\n #night shift values based on employee type\n @api.multi\n def night_shift(self,afternoon_shift,night_shift_check,contract_id,night_shift,employee_id):\n print(afternoon_shift)\n if contract_id.employee_id.shift_worker == True and contract_id.employee_id.employee_basis != 'casual_employment':\n if night_shift == 0.0 or night_shift_check == True:\n amount = 0.0\n else:\n amount = contract.hour_wage\n # result_rate = 115\n # result_qty = night_shift - 0.50\n elif contract_id.employee_id.shift_worker == True and contract_id.employee_id.employee_basis == 'casual_employment':\n if night_shift == 0.0 or night_shift_check == True:\n amount = 0.0\n else:\n amount = contract.hour_wage\n # result_rate = 125\n # result_qty = night_shift - 0.50\n return amount\n\n # meal allowance calculation based on time sheet #edited by Mayur\n @api.multi\n def meal_allowance(self,date_from,date_to,employee_id):\n total = 0.0\n ordinary = 0.0\n afternoon = 0.0\n night = 0.0\n sheets = self.env['account.analytic.line'].search([\n ('employee_id', '=', employee_id),\n ('date', '>=', date_from),\n ('date', '<=', date_to),\n ('break_time','=', True)\n ])\n if sheets:\n for sheet in sheets:\n if sheet.project_id.name == 'Ordinary Shift':\n ordinary += sheet.break_time\n if sheet.project_id.name == 'Afternoon Shift':\n afternoon += sheet.break_time\n if sheet.project_id.name == 'Night Shift':\n night += sheet.break_time\n total = ordinary + afternoon + night\n print(ordinary,afternoon,night,total)\n return {'ordinary':ordinary,'afternoon':afternoon,'night':night , 'total':total}\n\n\n # calculate superannuation for timesheet based employee\n @api.multi\n def superannuation_timesheet(self,contract_id,ordinary_shift,afternoon_shift,saturday_shift,sunday_shift,night_shift,ordinary_shift_check,\n afternoon_shift_check,night_shift_check,saturday_shift_check,sunday_shift_check,date_from,date_to,public_holiday_hours):\n ordinary = 0\n afternoon = 0\n night = 0\n saturday = 0\n sunday = 0\n leave = 0\n annual_leave = 0.0\n hour_wage = contract_id.hour_wage\n wages = contract_id.hour_wage\n # public_leave = self.public_leave(contract_id, contract_id.employee_id, date_from, date_to)\n if saturday_shift >= 7.6:\n if saturday_shift_check == True:\n saturday_hour = 0.0\n else:\n saturday_hour = 7.6\n else:\n saturday_hour = saturday_shift\n\n if sunday_shift >= 7.6:\n if sunday_shift_check == True:\n sunday_hour = 0.0\n else:\n sunday_hour = 7.6\n else:\n sunday_hour = sunday_shift\n if ordinary_shift_check == True:\n ordinary_hour = 0.0\n else:\n ordinary_hour = ordinary_shift\n\n if afternoon_shift_check == True:\n afternoon_hour = 0.0\n else:\n afternoon_hour = afternoon_shift\n\n if night_shift_check == True:\n night_hour = 0.0\n else:\n night_hour = night_shift\n if contract_id.timesheet_payroll == True:\n if contract_id.employee_id.shift_worker == True:\n if contract_id.employee_id.employee_basis != 'casual_employment':\n ordinary = ordinary_hour * wages\n afternoon = (afternoon_hour * 115 * wages) / 100\n night = (night_hour * 115 * wages) / 100\n saturday = (saturday_hour * 150 * wages) / 100\n sunday = sunday_hour * (wages * 2)\n leave = (hour_wage * public_holiday_hours * 250) / 100\n else:\n leave = (hour_wage * public_holiday_hours * 275) / 100\n ordinary = ordinary_hour * wages * 125 /100\n afternoon = (afternoon_hour * 140 * wages) / 100\n night = (night_hour * 140 * wages) / 100\n saturday = (saturday_hour * 175 * wages) / 100\n sunday = (sunday_hour * 225 * wages) / 100\n else:\n if not contract_id.employee_id.employee_basis == 'casual_employment':\n ordinary = ordinary_hour * wages\n else:\n ordinary = ordinary_hour * wages * 125 / 100\n if contract_id.employee_id.employee_basis == 'full_time_employment':\n annual_leave_hours = self.cal_leaves(date_from, date_to, contract_id, contract_id.employee_id.id)\n annual_leave = (wages * annual_leave_hours.get('leave') * 117.5) / 100\n fortnight = ordinary + afternoon +night +saturday + sunday + leave + annual_leave\n\n return fortnight\n\n # check if any unpaid leave record for payslip time period.\n @api.multi\n def unpaid_leave(self, contract_id, employee_id, date_from, date_to):\n employee_id = contract_id.employee_id\n unpaid_leave = 0.0\n\n leaves = self.env['hr.leave'].search([('holiday_status_id', 'in', ('Unpaid Leave')),\n ('employee_id', '=', employee_id.id),\n ('date_from', '>=', date_from),\n ('date_to', '<=', date_to),\n ('state', '=', 'validate')])\n for leave in leaves:\n unpaid_leave += leave.number_of_hours_temp\n\n return unpaid_leave\n","sub_path":"australian_payroll/model/contract.py","file_name":"contract.py","file_ext":"py","file_size_in_byte":35181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"626286210","text":"from selenium.webdriver.common.by import By\nfrom behave import when, then\nfrom time import sleep\n\n\nSEARCH_FIELD = (By.CSS_SELECTOR, '#helpsearch')\nSEARCH_SUBMIT_BUTTON = (By.CSS_SELECTOR, '#helpSearchSubmit input[type=\"submit\"]')\nCONTENT_H1 = (By.CSS_SELECTOR, '.help-content h1')\n\n\n@when('Input {search_text} to the search field')\ndef input_search_field(context, search_text):\n search_field = context.driver.find_element(*SEARCH_FIELD)\n search_field.click()\n search_field.clear()\n sleep(0.5)\n search_field.send_keys(search_text)\n sleep(0.5)\n\n\n@when('Click on search button')\ndef submit_search(context):\n submit = context.driver.find_element(*SEARCH_SUBMIT_BUTTON)\n submit.click()\n sleep(1)\n\n\n@then('Search block shows {text} in the title')\ndef search_block_shows_expected_text(context, text):\n title = context.driver.find_element(*CONTENT_H1)\n assert title.text == text","sub_path":"features/steps/help_center.py","file_name":"help_center.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"459710204","text":"import mysql.connector\nimport logging.config\nimport database\nimport utils\nimport concepts_file_parser\nimport os\nimport sys\n\n_files_read = 0\n_concepts_read = 0\n_concepts_inserted = 0\n_concepts_updated = 0\n_concepts_errors = 0\n_results_file = None\n_errors_file = None\n\ndef _get_logger():\n logger = logging.getLogger(__name__)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n dir_name, filename = os.path.split(os.path.abspath(__file__))\n output_file = dir_name + \"/concepts_etl.log\"\n handler = logging.FileHandler(output_file)\n handler.setFormatter(formatter)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n return logger\n\n_logger = _get_logger() \n\ndef run(file_path, domains, vocabularies, concepts, cnx):\n global _concepts_read\n global _concepts_inserted\n global _concepts_updated\n global _concepts_errors\n global _errors_file\n vocabulary = utils.extract_vocabulary_data_from_filename(file_path)\n if vocabulary['name'] not in vocabularies:\n vocabulary_id = database.add_vocabulary(vocabulary['name'],\n vocabulary['version'],\n cnx)\n vocabularies[vocabulary['name']] = vocabulary_id\n\n row = 2\n for line in utils.read_csv_file(file_path, delimiter='\\t'):\n concept = concepts_file_parser.get_concept(line)\n _concepts_read += 1\n try:\n if (len(concept['pxordx']) != 0 and\n len(concept['code_type']) != 0 and\n len(concept['code']) != 0 and\n len(concept['concept_id']) != 0 and\n len(concept['vocabulary_id']) != 0 and \n len(concept['domain_id']) != 0):\n\n # Add new domain to dictionary\n domain_name = concept['domain_id'].strip()\n if domain_name not in domains:\n domains[domain_name] = database.add_domain(domain_name,\n cnx)\n\n # Create key to identify uniquely the concept\n key = '{0}|{1}|{2}|{3}|{4}|{5}|{6}|{7}'.format(concept['pxordx'], \n concept['code_type'], \n concept['code'],\n concept['concept_id'], \n concept['vocabulary_id'],\n concept['domain_id'],\n 'public',\n 0)\n concept = filters(concept)\n\n if key not in concepts:\n _logger.info(\"Adding new concept_id {0} to dictionary...\".format(concept['concept_id']))\n\n concepts[key] = {\n \"oldpxordx\": concept['old_pxordx'],\n \"concept_class_id\": concept['concept_class_id'],\n \"track\": concept['track'],\n \"standard_concept\": concept['standard_concept'],\n \"codewithperiods\": concept['code_with_periods'],\n \"codescheme\": concept['code_scheme'],\n \"long_desc\": concept['long_description'],\n \"short_desc\": concept['short_description'],\n \"code_status\": concept['code_status'],\n \"code_change\": concept['code_change'],\n \"code_change_year\": concept['code_change_year'],\n \"code_planned_type\": concept['code_planned_type'],\n \"code_billing_status\": concept['code_billing_status'],\n \"code_cms_claim_status\": concept['code_cms_claim_status'],\n \"sex_cd\": concept['sex_code'],\n \"anat_or_cond\": concept['anatomy_or_condition'],\n \"pl_cond_class_cd1\": concept['pl_condition_class_code1'],\n \"pl_cond_class_desc1\": concept['pl_condition_class_description1'],\n \"pl_cond_class_cd2\": concept['pl_condition_class_code2'],\n \"pl_cond_class_desc2\": concept['pl_condition_class_description2'],\n \"pl_cond_class_cd3\": concept['pl_condition_class_code3'],\n \"pl_cond_class_desc3\": concept['pl_condition_class_description3'],\n \"poa_code_status\": concept['poa_code_status'],\n \"poa_code_change\": concept['poa_code_change'],\n \"poa_code_change_year\": concept['poa_code_change_year'],\n \"valid_start_date\": concept['valid_start_date'],\n \"valid_end_date\": concept['valid_end_date'],\n \"invalid_reason\": concept['invalid_reason']\n }\n _logger.info(\"Inserting concept_id {0} in database.\".format(concept['concept_id']))\n database.add_concept(concept, cnx)\n _concepts_inserted += 1\n else:\n _logger.info(\"Updating concept_id {0}\".format(concept['concept_id']))\n database.update_concept(concept, concepts['id'], cnx)\n _concepts_updated += 1\n else:\n message = \"Error in row: %d, missing fields to create the key.\" % row\n _logger.error(message)\n print(message)\n _concepts_errors += 1\n _errors_file.write(message)\n except Exception as e:\n message = str(e) + \" file: {0} - row: {1}\".format(file_path, row)\n _logger.error(message)\n print(message)\n _concepts_errors += 1\n _errors_file.write(message)\n row += 1\n\ndef filters(concept):\n if concept['standard_concept'] == 'Standard':\n concept['standard_concept'] = 'S'\n elif concept['standard_concept'] == 'Non-standard':\n concept['standard_concept'] = 'N'\n\n if concept['invalid_reason'] == 'Valid':\n concept['invalid_reason'] = 'V'\n elif concept['invalid_reason'] == 'Invalid':\n concept['invalid_reason'] = 'I'\n return concept \n\ndef main():\n global _concepts_read\n global _concepts_inserted\n global _concepts_updated\n global _concepts_errors\n global _errors_file\n global _files_read\n global _results_file\n global _errors_file\n\n config = {\n 'user': 'dev',\n 'password': '4g83ytxKJvb1y8p4',\n 'host': '35.231.30.58',\n 'database': 'allconcepts_omop_api',\n 'raise_on_warnings': True\n }\n\n cnx = mysql.connector.connect(**config)\n\n _logger.info('Getting all current domains from database')\n domains = database.get_current_domains(cnx)\n _logger.info('Getting all current vocabularies from database')\n vocabularies = database.get_current_vocabularies(cnx)\n _logger.info('Getting all current concepts from database')\n concepts = database.get_current_concepts(cnx)\n\n _errors_file = open('concepts_etl_errors.log', 'a')\n\n dir_path = '/home/callanor/Dropbox/my_works/oiga/spa_concepts_groups_etls2/allconcepts_files/'\n list_files = map(lambda file_name: os.path.join(dir_path, file_name), os.listdir(dir_path))\n for file_path in list_files:\n print(\"*********** processing file %s *****************\" % file_path)\n _logger.info('processing file %s' % file_path)\n run(file_path, domains, vocabularies, concepts, cnx)\n _files_read += 1\n print(\"completed processing of the concepts\")\n _logger.info('Completed processing of the concepts')\n\n _errors_file.close()\n\n _results_file = open('concepts_etl_results.log', 'a')\n _results_file.write(\"Total files read: {0}\".format(files_read))\n _results_file.write(\"Total concepts read: {0}\".format(concepts_read))\n _results_file.write(\"Total concepts inserted: {0}\".format(concepts_inserted))\n _results_file.write(\"Total concepts updated: {0}\".format(concepts_updated))\n _results_file.write(\"Total concepts with errors: {0}\".format(concepts_errors))\n _results_file.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"medical_etls/concepts_etl.py","file_name":"concepts_etl.py","file_ext":"py","file_size_in_byte":8325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"159534069","text":"import os\nimport dicom\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom glob import glob\nimport re\nfrom skimage import measure, morphology\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom utils.preprocess import ROI\n\nMIN_BOUND = -1000.0\nMAX_BOUND = 300.0\n \ndef normalize(image):\n image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)\n image[image>1] = 1.\n image[image<0] = 0.\n return zero_center(image)\n\nPIXEL_MEAN = 0.25\n\ndef zero_center(image):\n image = image - PIXEL_MEAN\n return image\n\ndef resample(image, scan, new_spacing=[1,1,1]):\n # Determine current pixel spacing\n spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))\n spacing = np.array(list(spacing))\n\n resize_factor = spacing / new_spacing\n new_real_shape = image.shape * resize_factor\n new_shape = np.round(new_real_shape)\n real_resize_factor = new_shape / image.shape\n new_spacing = spacing / real_resize_factor\n \n image = scipy.ndimage.interpolation.zoom(image, real_resize_factor)\n \n return image, new_spacing\n\ndef load_patient(baseDir, hu=True):\n files = glob(os.path.join(baseDir, '*', '*'))\n mask_keyword = re.compile('ROI_MASK')\n isMask = lambda fileName: len(mask_keyword.findall(fileName)) != 0\n slicefs = list(filter(lambda f: not isMask(f), files))\n maskfs = list(filter(isMask, files))\n \n slices = [dicom.read_file(f) for f in slicefs]\n masks = [dicom.read_file(f) for f in maskfs]\n \n slices.sort(key = lambda x: int(x.InstanceNumber))\n masks.sort(key = lambda x: int(x.InstanceNumber))\n \n _masks = []\n for s in slices:\n _m = None\n for m in masks:\n if s.InstanceNumber == m.InstanceNumber:\n _m = m.pixel_array\n break\n if _m == None:\n _m = np.zeros(s.pixel_array.shape)\n _masks.append(_m)\n \n masks = _masks\n \n try:\n slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])\n except:\n slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)\n \n return {\n 'slice_thickness': slice_thickness,\n 'image' : get_pixels_hu(slices) if hu else np.array([s.pixel_array for s in slices]),\n 'mask' : np.array(masks)\n }\n\ndef get_pixels_hu(scans):\n image = np.stack([s.pixel_array for s in scans])\n # Convert to int16 (from sometimes int16), \n # should be possible as values should always be low enough (<32k)\n image = image.astype(np.int16)\n\n # Set outside-of-scan pixels to 0\n # The intercept is usually -1024, so air is approximately 0\n image[image == -2000] = 0\n \n # Convert to Hounsfield units (HU)\n intercept = scans[0].RescaleIntercept\n slope = scans[0].RescaleSlope\n \n if slope != 1:\n image = slope * image.astype(np.float64)\n image = image.astype(np.int16)\n \n image += np.int16(intercept)\n \n return np.array(image, dtype=np.int16)\n\ndef plot_3d(image, threshold=-300):\n \n # Position the scan upright, \n # so the head of the patient would be at the top facing the camera\n p = image.transpose(2,1,0)\n p = p[:,:,::-1]\n \n verts, faces = measure.marching_cubes(p, threshold)\n\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111, projection='3d')\n\n # Fancy indexing: `verts[faces]` to generate a collection of triangles\n mesh = Poly3DCollection(verts[faces], alpha=0.1)\n face_color = [0.5, 0.5, 1]\n mesh.set_facecolor(face_color)\n ax.add_collection3d(mesh)\n\n ax.set_xlim(0, p.shape[0])\n ax.set_ylim(0, p.shape[1])\n ax.set_zlim(0, p.shape[2])\n\n plt.show()\n \n \nclass Patient:\n def __init__(self, patient):\n self.patient = patient\n self.benign = patient['benign']\n self.image = patient['image']\n self.num_slice = self.image.shape[0]\n self.mask = patient['mask']\n self.background = patient['roi']\n ### Do some infitialization\n ### like extract lesion coords and save\n ###\n \n ### get nodules(either benign or malign) and save\n\n self.lesions = []\n self.lesionArea = 0\n for _idx, mask_layer in enumerate(self.mask[1:-1]):\n idx = _idx + 1 ## first layer is stripped\n regions = measure.regionprops(measure.label(self.mask[idx]))\n if len(regions) == 0 or regions[0].area == 54:\n pass\n else:\n region = regions[0]\n bbox = region.bbox\n centroid = region.centroid\n ## save lesion's centroid\n x, y = int(centroid[0]), int(centroid[1])\n self.lesions.append((idx, x, y, region.area))\n self.lesionArea += region.area\n ## remove region around lesion from roi(==background)\n self.background[idx-1:idx+2][:, bbox[0]-48:bbox[2]+48, bbox[1]-48:bbox[3]+48] = 0\n ## TODO\n \n self.background[0,:,:] = 0\n self.background[-1,:,:] = 0\n \n self.regions = measure.regionprops(measure.label(self.background))\n self.totalRoiArea = np.sum(list(map(lambda r: r.area, self.regions))) \n ### prepare non_nodule region(but how? efficiently?)\n ### np.random.choice((roi - lesionArea).coords) ???\n \n ### recommended shift = 32\n def getRandomLesion(self, shift=0, hflip=False, vflip=False):\n idx, x, y, area = self.lesions[np.random.randint(len(self.lesions))]\n #print(idx, x, y)\n xdev, ydev = (0,0) if shift == 0 else np.random.randint(shift * 2, size=2) - shift\n return self.image[idx][x+xdev-48:x+xdev+48, y+ydev-48:y+ydev+48]\n\n def getRandomLesion3D(self, shift=0, hflip=False, vflip=False):\n while True:\n rand = np.random.randint(self.lesionArea)\n idx, x, y = 0, None, None\n for lesion in self.lesions:\n if rand < lesion[3]:\n idx, x, y, area = lesion\n else:\n rand -= lesion[3]\n\n if x < 48 + shift or x > 512 - 48 - shift or y < 48 + shift or y > 512 - 48 - shift:\n continue\n\n #print(idx, x, y)\n xdev, ydev = (0,0) if shift == 0 else np.random.randint(shift * 2, size=2) - shift\n ret = self.image[idx-1:idx+2][:,x+xdev-48:x+xdev+48, y+ydev-48:y+ydev+48]\n if hflip and np.random.randint(1):\n ret = ret[::-1,:]\n if vflip and np.random.randint(1):\n ret = ret[:,::-1]\n return normalize(ret.reshape((3,96,96,1)))\n \n def isBenign(self):\n return self.benign\n \n ## DO USE 'tag' of patient object to rip off unneccessary region\n ## Exclude EXTERNAL_AIR area, inclu\n def getRandomBackground3D(self):\n #roi = self.roi[np.random.randint(self.num_slice - 2) + 1,:,:]\n rand = np.random.randint(self.totalRoiArea)\n for region in self.regions:\n if rand < region.area:\n coords = region.coords\n idx, x, y = coords[np.random.randint(coords.shape[0])]\n if x < 48:\n x = 48\n if x > 512 - 48:\n x = 512 - 48\n if y < 48:\n y = 48\n if y > 512 - 48:\n y = 512 - 48\n ret = self.image[idx-1:idx+2][:,x-48:x+48, y-48:y+48]\n if ret.shape != (3, 96, 96):\n print(idx, x, y)\n return normalize(ret.reshape((3,96,96,1)))\n else:\n rand -= region.area\n pass\n\n \n \n ## To stress on specific regions\n def memorizeCoord(self, coord):\n pass\n\nget_fname = lambda p: p['filename']\nload_function = lambda fname: {'filename': fname, 'data': Patient(np.load(fname).all())}\n \ndef data_generator(files, nodule_ratio = 0.5, get_size=1, get_num=16, hand_size = 16, turnover = 1):\n pool = files[:]\n hand = []\n while True:\n np.random.shuffle(hand)\n expire = hand[:turnover]\n expired = list(map(get_fname, expire))\n hand = hand[turnover:]\n for elem in expire:\n del elem\n del expire\n \n num_fill = hand_size - len(hand)\n np.random.shuffle(pool)\n fill = pool[:num_fill]\n \n for p in list(map(load_function, fill)):\n hand.append(p)\n \n ## return expired files to pool\n pool = pool + expired\n \n for i in range(get_num):\n X = []\n y = []\n for p in np.array(hand)[np.random.choice(range(len(hand)), get_size)]:\n if np.random.rand() < nodule_ratio:\n X.append(p['data'].getRandomLesion3D(shift=10, hflip=True, vflip=True))\n if p['data'].isBenign():\n y.append([0,1,0,1])\n else:\n y.append([0,0,1,1])\n else:\n X.append(p['data'].getRandomBackground3D())\n y.append([1,0,0,0])\n yield np.stack(X), np.vstack(y)\n \n\ndef symmetric_data_generator(xList, yList, batch_num, sym=False, generate_all=False):\n num_category = len(xList)\n while True:\n w = np.maximum((np.random.randn(num_category) + 1.5), 0) + 0.001\n nums = np.floor(w / w.sum() * batch_num)\n nums[np.random.randint(num_category)] += (batch_num - nums.sum())\n X = []\n y = []\n for i in range(num_category):\n X.append(xList[i][np.random.randint(0,xList[i].shape[0], int(nums[i]))])\n y.append(np.tile(yList[i], (int(nums[i]), 1)))\n X = np.vstack(X)\n y = np.vstack(y)\n yield (X, y)\n ","sub_path":"utils/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":9850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"635876648","text":"# -*- coding:utf-8 -*-\nimport json\n\nfrom bs4 import BeautifulSoup\nfrom pip.utils import logging\nfrom setuptools.compat import unicode\n\n\ndef find_hoteldetails_tag(tag):\n return tag.has_attr('heading') and tag['heading'] == '酒店详情'\n\n\ndef find_hotelsettings_tag(tag):\n return tag.has_attr('heading') and tag['heading'] == '酒店设置'\n\n\ndef find_hotelinfomation_tag(tag):\n return tag.has_attr('heading') and tag['heading'] == '客房信息'\n\n\ndef json_maker(iterator):\n return json.dumps(list(iterator), ensure_ascii=False)\n\n\nclass DaoLvHtmlParser:\n soup = BeautifulSoup(open('/Users/lincanhan/Desktop/道旅酒店详情.html', 'r'))\n\n def __init__(self, html_doc):\n \"\"\"\n\n :type html_doc: 带解析的html文本\n \"\"\"\n super().__init__()\n self.soup = BeautifulSoup(html_doc)\n\n def get_hotel_details(self):\n hoteldetais_json = ''\n try:\n # 获取酒店详情\n hoteldetails_tags = self.soup.find_all(find_hoteldetails_tag)\n hoteldetails_p_list = hoteldetails_tags[0].find_all('p')\n hoteldetais = {}\n for p in hoteldetails_p_list:\n key = unicode(p.b.string)\n value = unicode(p.b.next_sibling.next_sibling.next_sibling.string)\n hoteldetais[key] = value\n hoteldetais_json = json.dumps(hoteldetais, ensure_ascii=False)\n # print(hoteldetais_json)\n except BaseException as e:\n print('酒店详情信息解析失败')\n finally:\n return hoteldetais_json\n\n def get_hotel_policy(self):\n hotelpolicy_json = ''\n try:\n # 获取酒店政策信息\n hotelpolicy_tbody_tag_list = self.soup.find_all('tbody')\n hotelpolicy_tbody_th_list = hotelpolicy_tbody_tag_list[0].find_all('th')\n hotelpolicy = {}\n for hotelpolicy_tbody_th in hotelpolicy_tbody_th_list:\n # value集合\n temp_list = []\n # key信息\n th_name = unicode(hotelpolicy_tbody_th.string)\n ul = hotelpolicy_tbody_th.next_sibling.next_sibling.ul\n for string in ul.stripped_strings:\n temp_list.append(unicode(string))\n hotelpolicy[th_name] = temp_list\n hotelpolicy_json = json.dumps(hotelpolicy, ensure_ascii=False)\n # print(hotelpolicy_json)\n except BaseException as e:\n print('酒店政策信息解析失败')\n finally:\n return hotelpolicy_json\n\n def get_hotel_setting(self):\n hotelsettings_json = ''\n try:\n # 获取酒店设置信息\n hotelsetting_tags = self.soup.find_all(find_hotelsettings_tag)\n hotelsettings_strings = hotelsetting_tags[0].div.div.div.ul.stripped_strings\n hotelsettings_json = json_maker(hotelsettings_strings)\n # print(hotelsettings_json)\n except BaseException as e:\n print('酒店设置信息解析失败')\n finally:\n return hotelsettings_json\n\n def get_hotel_info(self):\n hotelinfo_json = ''\n try:\n # 获取客房信息\n hotelinfo_tags = self.soup.find_all(find_hotelinfomation_tag)\n hotelinfo_strings = hotelinfo_tags[0].div.div.div.ul.stripped_strings\n hotelinfo_json = json_maker(hotelinfo_strings)\n # print(hotelinfo_json)\n except BaseException as e:\n print('客房信息解析失败')\n finally:\n return hotelinfo_json\n\n def parse_html(self):\n parsed_dict_data = {}\n parsed_dict_data['HotelDetails'] = self.get_hotel_details()\n parsed_dict_data['HotelInformation'] = self.get_hotel_info()\n parsed_dict_data['HotelPolicy'] = self.get_hotel_policy()\n parsed_dict_data['HotelSettings'] = self.get_hotel_setting()\n return parsed_dict_data\n\n# daoLvHtmlParser = DaoLvHtmlParser()\n# daoLvHtmlParser.get_hotel_detais()\n# daoLvHtmlParser.get_hotel_info()\n# daoLvHtmlParser.get_hotel_policy()\n# daoLvHtmlParser.get_hotel_setting()\n","sub_path":"daolv_html_parser.py","file_name":"daolv_html_parser.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"637813707","text":"import pandas as pd\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\ntraining_set = pd.read_csv(\"train_ZoGVYWq.csv\")\n\n\ninput_fn = tf.estimator.inputs.pandas_input_fn(training_set, shuffle=True, target_column='sourcing_channel')\n# sanitize data\nsourcing_channel_vocab = tf.feature_column.categorical_column_with_vocabulary_list(key=input_fn, vocabulary_list=[\"A\", \"B\", \"C\", \"D\"])\n\ntraining_set[\"sourcing_channel\"] = (sourcing_channel_vocab)\nx_train, x_test, y_train, y_test = train_test_split(\n training_set, test_size=0.33, random_state=42)\n\n\ndef preprocess_features(training_set):\n\n selected_features = training_set[\n [\"perc_premium_paid_by_cash_credit\",\n \"age_in_days\",\n \"Count_3-6_months_late\",\n \"Count_6-12_months_late\",\n \"Count_more_than_12_months_late\",\n \"application_underwriting_score\",\n \"no_of_premiums_paid\",\n \"sourcing_channel\",\n \"residence_area_type\",\n \"premium\"]]\n\n processed_features = selected_features.copy()\n\n # add synthetic features here\n\n return processed_features\n","sub_path":"kckinsey2.py","file_name":"kckinsey2.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"176699511","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2019/12/16 上午11:15\n# @Author : MaybeShewill-CV\n# @Site : https://github.com/MaybeShewill-CV/bisenetv2-tensorflow\n# @File : test_bisenetv2_cityscapes.py\n# @IDE: PyCharm\n\n# Refactor : Janghoo Lee, ProtossDragoon\n# IDE : Google COLAB, VSCode\n# Refactor site : https://github.com/ProtossDragoon/bisenetv2-tensorflow\n\"\"\"\nSet Environment for Google COLAB, VSCode\n\"\"\"\n# import essential library\nimport os\np = os.path.dirname(os.path.abspath(__file__))\nprint('Current File Path : {}'.format(p))\nhard_coded_project_root_path = os.path.abspath(os.path.join(p, os.pardir, os.pardir))\nprint('Project Root Path : {} (Hardcoded)'.format(hard_coded_project_root_path))\nimport sys\nif sys.path[0] != hard_coded_project_root_path:\n sys.path.insert(0, hard_coded_project_root_path)\n\n\"\"\"\nTest bisenetv2 on cityspaces dataset\n\"\"\"\n# import essential library\nimport argparse\nimport cv2\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom sklearn.metrics import confusion_matrix\n\n# import bisenetv2-tensorflow module\nfrom bisenet_model import bisenet_v2\nfrom local_utils.config_utils import parse_config_utils\n\nCFG = parse_config_utils.cityscapes_cfg_v2\nLABEL_CONTOURS = [(0, 0, 0), # 0=road\n # 1=sidewalk, 2=building, 3=wall, 4=fence, 5=pole\n (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),\n # 6=traffic light, 7=traffic sign, 8=vegetation, 9=terrain, 10=sky\n (0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),\n # 11=person, 12=rider, 13=car, 14=truck, 15=bus\n (192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),\n # 16=train, 17=motorcycle, 18=bicycle\n (0, 64, 0), (128, 64, 0), (0, 192, 0)]\n\n\ndef init_args():\n \"\"\"\n\n :return:\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--src_image_path', type=str, help='The input source image')\n parser.add_argument('-w', '--weights_path', type=str, help='The model weights file path')\n\n return parser.parse_args()\n\n\ndef decode_prediction_mask(mask):\n \"\"\"\n\n :param mask:\n :return:\n \"\"\"\n mask_shape = mask.shape\n mask_color = np.zeros(shape=[mask_shape[0], mask_shape[1], 3], dtype=np.uint8)\n\n unique_label_ids = [v for v in np.unique(mask) if v != 0 and v != 255]\n\n for label_id in unique_label_ids:\n idx = np.where(mask == label_id)\n mask_color[idx] = LABEL_CONTOURS[label_id]\n\n return mask_color\n\n\ndef preprocess_image(src_image, input_tensor_size):\n \"\"\"\n\n :param src_image:\n :param input_tensor_size:\n :return:\n \"\"\"\n output_image = src_image[:, :, (2, 1, 0)]\n output_image = cv2.resize(\n output_image,\n dsize=(input_tensor_size[0], input_tensor_size[1]),\n interpolation=cv2.INTER_LINEAR\n )\n output_image = output_image.astype('float32') / 255.0\n img_mean = np.array(CFG.DATASET.MEAN_VALUE).reshape((1, 1, len(CFG.DATASET.MEAN_VALUE)))\n img_std = np.array(CFG.DATASET.STD_VALUE).reshape((1, 1, len(CFG.DATASET.STD_VALUE)))\n output_image -= img_mean\n output_image /= img_std\n return output_image\n\n\ndef compute_iou(y_pred, y_true, num_classes):\n \"\"\"\n\n :param y_pred:\n :param y_true:\n :param num_classes:\n :return:\n \"\"\"\n y_pred = y_pred.flatten()\n y_true = y_true.flatten()\n idx = np.where(y_true <= num_classes - 1)\n y_pred = y_pred[idx]\n y_true = y_true[idx]\n current = confusion_matrix(y_true, y_pred)\n # compute mean iou\n intersection = np.diag(current)\n ground_truth_set = current.sum(axis=1)\n predicted_set = current.sum(axis=0)\n union = ground_truth_set + predicted_set - intersection\n iou = intersection / union.astype(np.float32)\n\n return np.mean(iou)\n\n\ndef test_bisenet_cityspaces(image_path, image_path_isdir, weights_path):\n \"\"\"\n\n :param image_path:\n :param image_path_isdir:\n :param weights_path:\n :return:\n \"\"\"\n # define bisenet\n input_tensor_size = CFG.AUG.EVAL_CROP_SIZE\n input_tensor_size = [int(tmp / 2) for tmp in input_tensor_size]\n input_tensor = tf.placeholder(\n dtype=tf.float32,\n shape=[1, input_tensor_size[1], input_tensor_size[0], 3],\n name='input_tensor'\n )\n bisenet_model = bisenet_v2.BiseNetV2(phase='test', cfg=CFG)\n prediction = bisenet_model.inference(\n input_tensor=input_tensor,\n name='BiseNetV2',\n reuse=False\n )\n\n # define session and gpu config\n sess_config = tf.ConfigProto(allow_soft_placement=True)\n sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.GPU.GPU_MEMORY_FRACTION\n sess_config.gpu_options.allow_growth = CFG.GPU.TF_ALLOW_GROWTH\n sess_config.gpu_options.allocator_type = 'BFC'\n sess = tf.Session(config=sess_config)\n\n # define moving average version of the learned variables for eval\n with tf.variable_scope(name_or_scope='moving_avg'):\n variable_averages = tf.train.ExponentialMovingAverage(\n CFG.SOLVER.MOVING_AVE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n\n # define saver\n saver = tf.train.Saver(variables_to_restore)\n\n # run net and decode output prediction\n if image_path_isdir:\n \n image_list = os.listdir(image_path)\n # remove dummy file\n print(image_list)\n if '.ipynb_checkpoints' in image_list:\n image_list.pop(image_list.index('.ipynb_checkpoints'))\n loop_times = len(image_list)\n print('{} image(s) detected'.format(loop_times))\n t_cumm_cost = 0\n\n with sess.as_default():\n saver.restore(sess, weights_path)\n for imname in image_list:\n # prepare input images\n print('image {} reading'.format(imname))\n src_image = cv2.imread(os.path.join(image_path, imname), cv2.IMREAD_COLOR)\n print('image {} : shape {}'.format(imname, src_image.shape))\n preprocessed_image = preprocess_image(src_image, input_tensor_size) \n\n t_loop_start = time.time()\n prediction_value = sess.run(\n fetches=prediction,\n feed_dict={\n input_tensor: [preprocessed_image]\n }\n )\n prediction_value = np.squeeze(prediction_value, axis=0)\n prediction_value = cv2.resize(\n prediction_value,\n dsize=(input_tensor_size[0] * 2, input_tensor_size[1] * 2),\n interpolation=cv2.INTER_NEAREST\n )\n print('Prediction mask unique label ids: {}'.format(np.unique(prediction_value)))\n prediction_mask_color = decode_prediction_mask(prediction_value)\n\n t_loop_end = time.time()\n t_cumm_cost += (t_loop_end - t_loop_start)\n \n data_src_dirname = image_path.split('/')[-1]\n save_dir = os.path.join(hard_coded_project_root_path, 'data', 'test_image', data_src_dirname, 'output.'+imname)\n cv2.imwrite(save_dir, prediction_mask_color)\n print('save as : {}\\n'.format(save_dir))\n\n t_cost = (t_cumm_cost) / loop_times\n print('Mean cost time (inference ~ reshape ~ mapping): {:.5f}s'.format(t_cost))\n print('Mean fps (inference ~ reshape ~ mapping): {:.5f}fps'.format(1.0 / t_cost))\n\n\n else:\n # prepare input image\n src_image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n preprocessed_image = preprocess_image(src_image, input_tensor_size) \n with sess.as_default():\n saver.restore(sess, weights_path)\n\n t_start = time.time()\n loop_times = 2000\n for i in range(loop_times):\n prediction_value = sess.run(\n fetches=prediction,\n feed_dict={\n input_tensor: [preprocessed_image]\n }\n )\n t_cost = (time.time() - t_start) / loop_times\n print('Mean cost time: {:.5f}s'.format(t_cost))\n print('Mean fps: {:.5f}fps'.format(1.0 / t_cost))\n prediction_value = np.squeeze(prediction_value, axis=0)\n prediction_value = cv2.resize(\n prediction_value,\n dsize=(input_tensor_size[0] * 2, input_tensor_size[1] * 2),\n interpolation=cv2.INTER_NEAREST\n )\n\n print('Prediction mask unique label ids: {}'.format(np.unique(prediction_value)))\n\n prediction_mask_color = decode_prediction_mask(prediction_value)\n plt.figure('src_image')\n plt.imshow(src_image[:, :, (2, 1, 0)])\n plt.figure('prediction_mask_color')\n plt.imshow(prediction_mask_color[:, :, (2, 1, 0)])\n plt.show()\n\n data_src_dirname = image_path.split('/')[-2]\n save_dir=os.path.join(hard_coded_project_root_path, 'data', 'test_image', data_src_dirname, 'output.'+imname)\n\n cv2.imwrite(save_dir, prediction_mask_color)\n print('save as : {}'.format(save_dir))\n\nif __name__ == '__main__':\n \"\"\"\n test code\n \"\"\"\n args = init_args()\n\n print('-------version-------')\n print('tensorflow version {}'.format(tf.__version__))\n print('---------------------\\n')\n\n print('-------path parsing-------')\n print(hard_coded_project_root_path)\n parsed_path = args.weights_path\n if parsed_path[0] == '.':\n parsed_path = parsed_path[1:]\n if parsed_path[0] == '/':\n parsed_path = parsed_path[1:]\n print(parsed_path)\n weights_path = hard_coded_project_root_path + '/' + parsed_path\n print(weights_path)\n parsed_path = args.src_image_path\n if parsed_path[0] == '.':\n parsed_path = parsed_path[1:]\n if parsed_path[0] == '/':\n parsed_path = parsed_path[1:]\n print(parsed_path)\n image_path = hard_coded_project_root_path + '/' + parsed_path\n print(image_path)\n if os.path.isdir(image_path) == True:\n print('{} is a path'.format(image_path))\n else:\n print('{} is is a file'.format(image_path))\n print('--------------------------\\n')\n\n test_bisenet_cityspaces(\n # image_path=args.src_image_path,\n image_path=image_path,\n image_path_isdir=os.path.isdir(image_path),\n # weights_path=args.weights_path\n weights_path=weights_path\n )","sub_path":"tools/cityscapes/test_bisenetv2_cityscapes.py","file_name":"test_bisenetv2_cityscapes.py","file_ext":"py","file_size_in_byte":10590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"118940155","text":"import argparse\nimport json\nimport os\nfrom collections import OrderedDict\n\n\ndef argparse_config_filename():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\", type=str, help=\"Service level agreement config\")\n args = parser.parse_args()\n return args.config\n\n\ndef get_arg(self, arg):\n return getattr(self, arg)\n\n\ndef set_arg(self, arg, value):\n setattr(self, arg, value)\n return self\n\n\ndef return_classname(classname):\n return 'class {}(object):\\n'.format(classname)\n\n\ndef return__init(argnames):\n s = \"\\n\\tdef __init__(self, \"\n for arg in argnames:\n s = s + \"{}=None, \".format(arg)\n s = s + \"**kwargs):\\n\"\n for arg in argnames:\n s = s + \"\\t\\tself.{} = {}\\n\".format(arg, arg)\n return s.replace('\\t', ' ')\n\n\ndef return_get_methods(argnames):\n ret_methods = \"\"\n for arg in argnames:\n s = \"\\tdef get_{}(self):\\n\\t\\treturn self.{}\\n\".format(arg, arg)\n ret_methods = ret_methods + \"\\n\" + s\n return ret_methods.replace('\\t', ' ')\n\n\ndef return_set_methods(argnames):\n ret_methods = \"\"\n for arg in argnames:\n s = \"\\tdef set_{}(self, {}):\\n\\t\\tself.{} = {}\\n\\t\\treturn self\\n\".format(arg, arg, arg, arg)\n ret_methods = ret_methods + \"\\n\" + s\n return ret_methods.replace('\\t', ' ')\n\n\ndef create_classes(classname, argnames):\n body = {}\n body = {\"__module__\": \"types.service\"}\n for arg in argnames:\n body[\"get_{}\".format(arg)] = get_arg\n body[\"set_{}\".format(arg)] = set_arg\n newclass = type(str(classname), (), body)\n directory_path = 'service/types/'.format(classname)\n directory = os.path.dirname(directory_path)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)\n fp = open('service/types/{}.py'.format(classname), 'w+')\n fp.write(return_classname(classname))\n fp.write(return__init(argnames))\n fp.write(return_get_methods(argnames))\n fp.write(return_set_methods(argnames))\n fp.close\n return newclass\n\n\ndef process_types(types):\n klass_list = []\n for t in types:\n fields = []\n for field in t.get('fields').keys():\n fields.append(field)\n klass = create_classes(t.get(\"name\"), fields)\n klass_list.append(klass)\n return klass_list\n\n\ndef main():\n config_filename = argparse_config_filename()\n fp = open(config_filename, 'r')\n sla_config = json.load(fp, object_pairs_hook=OrderedDict)\n klass_list = process_types(sla_config.get(\"types\"))\n\n\nmain()\n","sub_path":"types/generate_type_classes.py","file_name":"generate_type_classes.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"641414829","text":"from django.shortcuts import (\n render, get_object_or_404,\n)\nfrom django.db.models import Max\n\nfrom surfers.models import (\n Surfer, Shaper, Surfboard\n)\n\ndef index(request):\n return render(request, 'index.html', {\n 'new_surfboards': Surfboard.objects.order_by('-created_at')[:3],\n 'updated_shapers': Shaper.objects.annotate(recent=Max('surfboard__created_at')).order_by('-recent')[:2]\n })\n\ndef surfers(request):\n surfers = Surfer.objects.all()\n return render(request, 'surfers.html', {\n 'surfers': surfers,\n })\n\ndef surfer(request, surfer_pk):\n surfer = get_object_or_404(Surfer, pk=surfer_pk)\n return render(request, 'surfer.html', {\n 'surfer': surfer,\n })\n\ndef shapers(request):\n shapers = Shaper.objects.all()\n return render(request, 'shapers.html', {\n 'shapers': shapers,\n })\n\n\ndef shaper(request, shaper_pk):\n shaper = get_object_or_404(Shaper, pk=shaper_pk)\n return render(request, 'shaper.html', {\n 'shaper': shaper,\n })\n\ndef surfboards(request):\n surfboards = Surfboard.objects.all()\n return render(request, 'surfboards.html', {\n 'surfboards': surfboards,\n })\n\n\ndef surfboard(request, surfboard_pk):\n surfboard = get_object_or_404(Surfboard, pk=surfboard_pk)\n return render(request, 'surfboard.html', {\n 'surfboard': surfboard,\n })\n","sub_path":"surfers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"150462194","text":"htonum={1:5,13:5,2:10,14:10,3:15,15:15,4:20,16:20,5:25,17:25,6:30,18:30,7:35,19:35,8:40,20:40,9:45,21:45,10:50,22:50,11:55,23:55,12:0,24:0,00:0}\r\ndef clock_angle(time):\r\n try:\r\n minnum = int(time[3:])\r\n except:\r\n minnum = 0\r\n try:\r\n sideh= int(time [:2])\r\n except:\r\n sideh=0\r\n for hour,num in htonum.items():\r\n if sideh == hour:\r\n diff= abs((num)+(minnum/60)*5-minnum)\r\n if diff>30:\r\n return round(6*(60-diff),1)\r\n elif diff==0:\r\n return 0\r\n else:\r\n return round(6*diff,1)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\r\n assert clock_angle(\"02:30\") == 105, \"02:30\"\r\n assert clock_angle(\"13:42\") == 159, \"13:42\"\r\n assert clock_angle(\"01:42\") == 159, \"01:42\"\r\n assert clock_angle(\"01:43\") == 153.5, \"01:43\"\r\n assert clock_angle(\"00:00\") == 0, \"Zero\"\r\n assert clock_angle(\"12:01\") == 5.5, \"Little later\"\r\n assert clock_angle(\"18:00\") == 180, \"Opposite\"\r\n\r\n print(\"Now that you're finished, hit the 'Check' button to review your code and earn sweet rewards!\")\r\n","sub_path":"G-clock_angle.py","file_name":"G-clock_angle.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"307293275","text":"from pyrogram import Client, filters\nfrom pyrogram.types import CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup\nfrom solidAPI import emoji, get_sudos\nfrom solidAPI.chat import add_chat, set_lang\nfrom solidAPI.other import get_message as gm\n\nfrom base.player import player\nfrom utils.functions import res_music\nfrom utils.pyro_utils import music_result\n\n\ndef play_next_keyboard(user_id: int):\n i = 5\n for j in range(5):\n i += 1\n yield InlineKeyboardButton(f\"{i}\", callback_data=f\"nextplay {j}|{user_id}\")\n j += 1\n\n\ndef play_back_keyboard(user_id: int):\n i = 0\n for j in range(5):\n i += 1\n yield InlineKeyboardButton(f\"{i}\", callback_data=f\"play {j}|{user_id}\")\n j += 1\n\n\nasync def edit_inline_text(\n inline_board: list[InlineKeyboardButton],\n temp: list,\n keyboard: list,\n cb: CallbackQuery,\n user_id: int,\n stats: str,\n k: int,\n music: list,\n bot_username: str,\n):\n chat_id = cb.message.chat.id\n results = res_music(k, music, bot_username, chat_id)\n for count, j in enumerate(inline_board, start=1):\n temp.append(j)\n if count % 3 == 0:\n keyboard.append(temp)\n temp = []\n if count == len(inline_board):\n keyboard.append(temp)\n await cb.edit_message_text(\n f\"{results}\",\n reply_markup=InlineKeyboardMarkup(\n [\n keyboard[0],\n keyboard[1],\n [\n InlineKeyboardButton(f\"{emoji.RIGHT_ARROW}\", f\"next|{user_id}\")\n if stats == \"next\"\n else InlineKeyboardButton(\n f\"{emoji.LEFT_ARROW}\", callback_data=f\"back|{user_id}\"\n ),\n InlineKeyboardButton(\n f\"{gm(chat_id, 'close_btn_name')} {emoji.WASTEBASKET}\", f\"close|{user_id}\"\n ),\n ],\n ]\n ),\n disable_web_page_preview=True,\n )\n\n\nasync def play_music(cb, music, index, chat_id, user_id):\n title: str = music[index][\"title\"]\n uri: str = music[index][\"url\"]\n duration = music[index][\"duration\"]\n yt_id = music[index][\"id\"]\n result = {\"title\": title, \"uri\": uri, \"duration\": duration, \"user_id\": user_id, \"yt_id\": yt_id}\n music_result[chat_id].clear()\n await player.play(cb, result)\n\n\nasync def get_infos(client, cb, k):\n bot_username = (await client.get_me()).username\n chat_id = cb.message.chat.id\n user_id = int(cb.data.split(\"|\")[1])\n music = music_result[chat_id][k]\n return bot_username, user_id, music\n\n\n@Client.on_callback_query(filters.regex(pattern=r\"close\"))\nasync def close_button(_, cb: CallbackQuery):\n callback = cb.data.split(\"|\")\n user_id = int(callback[1])\n message = cb.message\n from_user_id = cb.from_user.id\n chat_id = message.chat.id\n person = await message.chat.get_member(from_user_id)\n if from_user_id != user_id:\n return await cb.answer(gm(chat_id, \"not_for_you\"), show_alert=True)\n music_result[chat_id].clear()\n if person.status in [\"creator\", \"administrator\", get_sudos(chat_id)]:\n return await message.delete()\n return await message.delete()\n\n\n@Client.on_callback_query(filters.regex(pattern=r\"cls\"))\nasync def close_private_button(_, cb: CallbackQuery):\n return await cb.message.delete()\n\n\n@Client.on_callback_query(filters.regex(pattern=r\"set_lang_(.*)\"))\nasync def change_language_(_, cb: CallbackQuery):\n lang = cb.matches[0].group(1)\n chat = cb.message.chat\n try:\n set_lang(chat.id, lang)\n await cb.edit_message_text(gm(chat.id, \"lang_changed\"))\n except KeyError:\n add_chat(chat.id, lang)\n await cb.edit_message_text(gm(chat.id, \"lang_changed\"))\n\n\n@Client.on_callback_query(filters.regex(pattern=r\"(.*)play\"))\nasync def play_music_(_, cb: CallbackQuery):\n match = cb.matches[0].group(1)\n data = cb.data.split(\"|\")\n user_id = int(data[1])\n index = int(data[0].split(\" \")[1])\n chat_id = cb.message.chat.id\n from_id = cb.from_user.id\n if from_id != user_id:\n return await cb.answer(gm(chat_id, \"not_for_you\"), show_alert=True)\n if not match:\n music = music_result[chat_id][0]\n await play_music(cb, music, index, chat_id, user_id)\n if match:\n music = music_result[chat_id][1]\n await play_music(cb, music, index, chat_id, user_id)\n\n\n@Client.on_callback_query(filters.regex(pattern=r\"next\"))\nasync def next_music_(client: Client, cb: CallbackQuery):\n bot_username, user_id, music = await get_infos(client, cb, 1)\n from_id = cb.from_user.id\n chat_id = cb.message.chat.id\n if from_id != user_id:\n return await cb.answer(gm(chat_id, \"not_for_you\"), show_alert=True)\n k = 5\n temp = []\n keyboard = []\n inline_board = list(play_next_keyboard(user_id))\n await edit_inline_text(\n inline_board, temp, keyboard, cb, user_id, \"back\", k, music, bot_username\n )\n\n\n@Client.on_callback_query(filters.regex(pattern=r\"back\"))\nasync def back_music_(client: Client, cb: CallbackQuery):\n bot_username, user_id, music = await get_infos(client, cb, 0)\n from_id = cb.from_user.id\n chat_id = cb.message.chat.id\n if from_id != user_id:\n return await cb.answer(gm(chat_id, \"not_for_you\"), show_alert=True)\n k = 0\n temp = []\n keyboard = []\n inline_board = list(play_back_keyboard(user_id))\n await edit_inline_text(\n inline_board, temp, keyboard, cb, user_id, \"next\", k, music, bot_username\n )\n","sub_path":"handlers/callbackhandlers.py","file_name":"callbackhandlers.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"332720588","text":"import requests\nimport json\nimport random\nimport time\n\nfrom player import Player\nfrom api import url, key, opposite, Queue\n\n\ndef explore_random():\n \"\"\"\n Returns a random unexplored exit direction from the current room\n \"\"\"\n directions = player.current_room[\"exits\"]\n room_id = str(player.current_room[\"room_id\"])\n unexplored = [d for d in directions if player.graph[room_id][d] == '?']\n return unexplored[random.randint(0, len(unexplored)-1)]\n\n\ndef dft_for_dead_end():\n \"\"\"\n Performs depth-first traversal to explore random unexplored paths until\n finding a dead end (either no other exits at all, or no unexplored exits)\n \"\"\"\n while '?' in list(player.graph[str(player.current_room[\"room_id\"])].values()):\n # Grab direction that leads to unexplored exit\n next_dir = explore_random()\n # Travel there\n player.travel(next_dir)\n\n\ndef generate_path(target):\n \"\"\"\n Performs BFS to find shortest path to target room. If \"?\" passed instead of target room id,\n finds closest room with an unexplored exit.\n Returns the first path to meet the specified criteria.\n \"\"\"\n # Create an empty queue and enqueue a PATH to the current room\n q = Queue()\n q.enqueue([str(player.current_room[\"room_id\"])])\n # Create a Set to store visited rooms\n v = set()\n\n while q.size() > 0:\n p = q.dequeue()\n last_room = str(p[-1])\n if last_room not in v:\n # Check if target among exits (either a \"?\" or specific ID)\n if target in list(player.graph[last_room].values()):\n # >>> IF YES, RETURN PATH (excluding starting room)\n if target != \"?\":\n # final_dir = next(\n # (k for k, v in player.graph[last_room].items() if str(v) == target), '?')\n # final_dir ='?'\n\n # for d in player.graph[last_room]:\n # if player.graph[last_room][d] is target:\n # final_dir=d\n p.append(target)\n print(p[1:])\n\n return p[1:]\n # Else mark it as visited\n v.add(last_room)\n # Then add a PATH to its neighbors to the back of the queue\n for direction in player.graph[last_room]:\n if player.graph[last_room][direction] != '?':\n path_copy = p.copy()\n path_copy.append(player.graph[last_room][direction])\n q.enqueue(path_copy)\n\n\ndef travel_to_target(target='?'):\n \"\"\"\n Runs a BFS to specific room or to nearest room with unexplored exit,\n then moves through that path in order.\n \"\"\"\n if player.current_room[\"room_id\"] == target:\n return\n bfs_path = generate_path(target)\n print(f\"new path to follow! {bfs_path}\")\n while bfs_path is not None and len(bfs_path) > 0:\n next_room = bfs_path.pop(0)\n current_id = str(player.current_room[\"room_id\"])\n next_direction = next(\n (k for k, v in player.graph[current_id].items() if v == next_room), None)\n player.travel(next_direction)\n\n\ndef explore_maze():\n \"\"\"\n While the player's map is shorter than the number of rooms, continue looping\n through DFT until a dead end OR already fully-explored room is found,\n then perform BFS to find shortest path to room with unexplored path and go there.\n \"\"\"\n while len(player.graph) < 500:\n dft_for_dead_end()\n travel_to_target()\n print(\"Map complete!\")\n\n\n\ndef get_name(name):\n\n #Make list of treasure rooms\n treasure_rooms = []\n for k, v in player.map.items():\n if \"tiny treasure\" in v[\"items\"]:\n treasure_rooms.append(k)\n print(\"The following rooms have treasure:\", treasure_rooms)\n\n while player.gold < 1000: #This is automatically updated, otherwise have to check server\n while player.encumbrance < player.strength:\n #find room with treasure\n # go there\n print\n current_treasure_room = treasure_rooms[0]\n travel_to_target(int(current_treasure_room))\n\n # pick up treasure\n # while there are still items to pick up:\n #while len(player.map[str(player.current_room[\"room_id\"])][\"items\"]) > 0:\n player.pick_up_loot(\"tiny treasure\")\n\n # update map entry for room to reflect taken treasure\n player.map[current_treasure_room][\"items\"] = []\n player._write_file('map.txt', player.map)\n treasure_rooms = treasure_rooms[1:]\n\n # If all treasure in map has been taken, go straight to shop\n if len(treasure_rooms) < 1:\n break\n\n # travel to shop\n # sell all items in inventory\n sell_loot()\n # travel to Pirate Ry's\n travel_to_target(467)\n # purchase name \n player.buy_name(name)\n\ndef sell_loot():\n travel_to_target(1)\n time.sleep(player.cooldown)\n print(player.inventory)\n for item in player.inventory:\n print(\"in for loop\")\n json = {\"name\": item}\n print(json)\n r1 = requests.post(f\"{url}/api/adv/sell/\", headers={'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json = json).json()\n time.sleep(r1['cooldown'])\n json['confirm'] = \"yes\"\n r1_conf = requests.post(f\"{url}/api/adv/sell/\", headers={'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json = json).json()\n print(r1_conf)\n time.sleep(r1_conf['cooldown'])\n player.check_self()\n\n\ndef acquire_powers():\n \"\"\"\n After maze has been generated, now go to shrines and acquire powers by praying.\n Order of importance is flight -> dash -> everything else if ready.\n \"\"\"\n\n\n\n\nplayer = Player()\n# get_name(\"Madera\") # to my teammates... change this.\nget_name('Khaled')\n\n\nif __name__ == '__main__':\n print(player.current_room)\n running = True\n command_list = {\n \"moveTo\": {\"call\": player.travel, \"arg_count\": 1}, # moveTo n\n \"buildMap\": {\"call\": explore_maze, \"arg_count\": 0},\n \"travelTo\": {\"call\": travel_to_target, \"arg_count\": 1}, # travelTo roomid\n \"loot\": {\"call\": player.pick_up_loot, \"arg_count\": 1}, # loot 'tiny treasure'\n \"drop\": {\"call\": player.drop_loot, \"arg_count\": 1}, # drop 'tiny treasure'\n # \"mine\": {\"call\": player.mine, \"arg_count\": 0},\n \"sellLoot\":{\"call\": sell_loot, \"arg_count\": 0},\n \"roomDetails\": {\"call\": player.check_room, \"arg_count\": 0}\n }\n\n while running:\n user_data = input('Enter command: ').split(' ')\n\n cmd = user_data[0]\n args = user_data[1:]\n\n for i, v in enumerate(args):\n if v.isdigit():\n args[i] = int(v)\n\n if cmd == 'quit':\n running = False\n\n elif cmd not in command_list:\n print(\"That Command is not part of our command list try again.\")\n\n else:\n if command_list[cmd][\"arg_count\"] == 1:\n command_list[cmd]['call'](\" \".join(args) if len(args) > 1 else args[0])\n elif command_list[cmd][\"arg_count\"] == 0:\n command_list[cmd]['call']()","sub_path":"adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"25862927","text":"#Mohammed, Nolan, Rami, Abhi\r\nimport turtle as trtl\r\nimport time\r\nt = trtl.Turtle()\r\nwn = trtl.Screen()\r\n\r\n#screen size set to be proportional to coordinates.\r\nwn.setup(800, 800)\r\n#Max speed\r\nt.speed(0)\r\n#pensize and turtle shape\r\nt.pensize(3)\r\n\r\nmult = 40.0\r\n\r\n\r\n\r\ndef graph():\r\n wn.bgpic('800x800_coord.png')\r\n while True:\r\n try:\r\n eq = trtl.textinput('Calculator', (\"What is the equation? (Please enter eq with terms in order of degree. If you do not, your equation will not be right): \"))\r\n eq = eq.replace(' ', '')\r\n eq = eq.replace('x', '')\r\n eq = eq.replace('y=', '')\r\n eq = eq.replace('+', '')\r\n eq = list(eq)\r\n global a\r\n global b\r\n \r\n if len(eq) == 2: \r\n while True:\r\n try:\r\n neg_1 = eq.index('-')\r\n eq.pop(neg_1)\r\n if neg_1 == 0:\r\n a = float(eq[2])*-1\r\n break\r\n except ValueError or Exception:\r\n a= float(eq[0])\r\n b=float(eq[1])\r\n break\r\n elif len(eq) == 3:\r\n while True:\r\n try:\r\n a=float(eq[0])\r\n neg_1 = eq.index('-')\r\n if neg_1 == 1:\r\n b = float(eq[2])*-1\r\n break\r\n except ValueError or Exception:\r\n neg_1 = eq.index('-')\r\n a = float(eq[1])*-1\r\n b = float(eq[2])\r\n break\r\n elif len(eq) == 4:\r\n neg_1 = eq.index('-')\r\n eq.pop(neg_1)\r\n if neg_1 == 0:\r\n a = float(eq[2])*-1\r\n neg_1 = eq.index('-')\r\n if neg_1 == 1:\r\n b = float(eq[2])*-1\r\n \r\n \r\n\r\n break\r\n \r\n except Exception:\r\n print(\"Invalid Format of eq. Format as: y=mx+b\")\r\n continue\r\n\r\n\r\n\r\n mult = float(40)\r\n\r\n t.penup()\r\n x = (-10-b)/a\r\n t.goto(x*mult, -10*mult)\r\n top_heading = -90-(45/a)\r\n\r\n t.setheading(top_heading)\r\n t.backward(21.66)\r\n t.setheading(top_heading + 90)\r\n t.pendown()\r\n l = 25\r\n t.backward(l/2)\r\n t.fillcolor('black')\r\n t.begin_fill()\r\n for i in range(3):\r\n t.fd(l)\r\n t.right(120)\r\n t.end_fill()\r\n\r\n x_neg = float(-10)\r\n y_neg = float(a*x_neg + b)\r\n\r\n while y_neg <= -10:\r\n wn.update()\r\n y_neg = float(-1*(abs(a*x_neg)) + b)\r\n x_neg += 0.01\r\n\r\n t.penup()\r\n t.goto(x_neg*mult, y_neg*mult)\r\n t.pendown()\r\n\r\n x = x_neg\r\n while True:\r\n t.pendown()\r\n y = a*x + b\r\n t.goto(x*mult, y*mult)\r\n x += 1\r\n lol = t.ycor()\r\n limit = float((int(lol)/40))\r\n xint = x\r\n xint = xint//1\r\n yint = y\r\n yint = yint//1\r\n if xint == 0:\r\n # t.write(0, t.ycor(), font = (25))\r\n print(x,y)\r\n\r\n\r\n # t.write(t.xcor(), 0, font = (25))\r\n\r\n if a>= 0:\r\n if limit >= 10:\r\n print('done+')\r\n break\r\n else:\r\n if limit <= -10:\r\n print('done-')\r\n break\r\n \r\n\r\n t.penup()\r\n x = (10-b)/a\r\n t.goto(x*mult, 10*mult)\r\n top_heading = 90-(45/a)\r\n\r\n t.setheading(top_heading)\r\n t.backward(21.66)\r\n t.setheading(top_heading + 90)\r\n t.pendown()\r\n l = 25\r\n t.backward(l/2)\r\n #t.fillcolor('black')\r\n t.begin_fill()\r\n for i in range(3):\r\n t.fd(l)\r\n t.right(120)\r\n t.end_fill()\r\n\r\ngraph()\r\nprint()\r\nt.penup()\r\nt.goto(0,40*b)\r\nt.write(' (0' + ',' + str(b) + ')', font = (30))\r\nx=(0-b/a)\r\nprint(str(x) + ',' + '0')\r\nt.penup()\r\nt.goto(40*x,0)\r\nt.write(' (' + str(x) + ',' + '0)', font = (30))\r\nwn.mainloop()","sub_path":"graphing_calc_g2.py","file_name":"graphing_calc_g2.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"95280402","text":"import logging\nimport multiprocessing\nimport os\nimport signal\nfrom importlib import reload, import_module\nfrom logging.handlers import TimedRotatingFileHandler\n\nfrom pyx_gutils.util.misc_util import r_replace_str\nfrom pyx_scrapy_exts.const import *\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.spiderloader import SpiderLoader\nfrom scrapy.utils.log import TopLevelFormatter\nfrom scrapy.utils.project import get_project_settings\n\n\"\"\"\nsignal 只能在主线程中使用\n处理僵尸子进程,不然直接kill时子进程会成为僵死进程,不释放pid号\n\"\"\"\nsignal.signal(signal.SIGCHLD, signal.SIG_IGN)\n\nMGR_RUNABLE = \"mgr_runable\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_spiderloader(settings=get_project_settings().frozencopy(), enreload=False):\n spider_loader = SpiderLoader(settings)\n if enreload:\n for key in spider_loader.list():\n cls = spider_loader.load(key)\n m = import_module(cls.__module__)\n reload(m)\n return spider_loader\n\n\ndef spider_name2cls_mapper(settings=get_project_settings().frozencopy()):\n spider_loader = SpiderLoader(settings)\n name_cls = {}\n for key in spider_loader.list():\n cls = spider_loader.load(key)\n import_module(cls.__module__)\n name_cls[key] = cls\n return name_cls\n\n\ndef _get_time_rorate_handler(settings, sub_filename=None):\n \"\"\"\n Return a log handler object according to settings\n \"\"\"\n # init handler\n filename = settings.get('LOG_FILE')\n if filename:\n if sub_filename is not None:\n filename = r_replace_str(filename, sub_filename)\n encoding = settings.get('LOG_ENCODING')\n when = settings.get(LOG_ROTATE_FILE_WHEN, 'MIDNIGHT')\n backup_count = settings.get(LOG_ROTATE_FILE_BACKUP_COUNT, 14)\n handler = TimedRotatingFileHandler(filename=filename, when=when, backupCount=backup_count, encoding=encoding)\n elif settings.getbool('LOG_ENABLED'):\n handler = logging.StreamHandler()\n else:\n handler = logging.NullHandler()\n\n handler.setLevel(settings.get(LOG_ROTATE_FILE_LEVEL) or settings.get(\"LOG_LEVEL\", \"DEBUG\"))\n\n # handler.formatter\n formatter = logging.Formatter(\n fmt=settings.get('LOG_FORMAT'),\n datefmt=settings.get('LOG_DATEFORMAT')\n )\n handler.setFormatter(formatter)\n # handler.filter\n if settings.getbool('LOG_SHORT_NAMES'):\n handler.addFilter(TopLevelFormatter(['scrapy']))\n return handler\n\n\ndef _change_logging_handler(settings, name):\n logger = logging.getLogger()\n for handler in logger.handlers:\n if isinstance(handler, logging.FileHandler):\n logger.removeHandler(handler)\n _handler = _get_time_rorate_handler(settings, name)\n logger.addHandler(_handler)\n\n\ndef _crawler_start_(name, modify_config=None):\n if modify_config is None:\n modify_config = {}\n settings = get_project_settings()\n if isinstance(modify_config, dict) and len(modify_config) > 0:\n for k, v in modify_config.items():\n settings.set(k, v)\n process = CrawlerProcess(settings)\n _change_logging_handler(settings, name)\n process.crawl(name)\n process.start()\n\n\ndef _terminate_pid(spider_name, p):\n pid = p.pid\n p.terminate()\n try:\n ret = os.system('kill -9 %d' % pid)\n logger.info('stop %s process, pid: %d and ret: %s' % (spider_name, pid, ret))\n except Exception as err:\n print(err)\n\n\ndef _kill_spider_process(spider_name, ps, pid):\n pid_int = int(pid)\n for p in ps:\n if p.pid == pid_int:\n ps.remove(p)\n _terminate_pid(spider_name, p)\n break\n\n\ndef _reload_spider_name(spider_name):\n spiderloader = get_spiderloader(enreload=True)\n\n try:\n cls = spiderloader.load(spider_name)\n if hasattr(cls, MGR_RUNABLE) and cls.mgr_runable and cls.name == spider_name:\n return True\n else:\n return False\n except Exception as ex:\n logger.exception(ex)\n return False\n\n\nclass SpiderManager(object):\n def __init__(self):\n # 处理多进程数据共享引起异常问题\n self.mp_ctx = multiprocessing.get_context('spawn')\n self.spiders_map = {}\n # startall 只能执行一次,stopall后可全部再次执行\n self.startall_execed = False\n\n self.spider_names = []\n\n def start_spider(self, spider_name, check_spider_name=False, modify_config=None):\n \"\"\"\n 根据spider名启动spider进程\n :param spider_name:\n :param check_spider_name:\n :param modify_config:\n \"\"\"\n if modify_config is None:\n modify_config = {}\n\n if check_spider_name and spider_name not in self.spiders_map:\n valid = _reload_spider_name(spider_name)\n logger.info(\"++++ reload spider name: %s and result %s ++++\" % (spider_name, valid))\n if not valid:\n return None\n\n p = self.mp_ctx.Process(target=_crawler_start_, args=(spider_name, modify_config))\n p.start()\n\n ps = self.spiders_map.setdefault(spider_name, [])\n ps.append(p)\n\n logger.info('start %s process, pid: %d' % (spider_name, p.pid))\n return p\n\n def stop_spider(self, spider_name, num=None, pid=None):\n \"\"\"\n 根据spider名停止指定数量的进程,若num为None则全部停止\n :param spider_name:\n :param pid:\n :param num:\n \"\"\"\n ps = self.spiders_map.get(spider_name, [])\n if pid is not None:\n _kill_spider_process(spider_name, ps, pid)\n else:\n if num is None:\n num = len(ps)\n count = 0\n while True:\n if len(ps) == 0 or count == num:\n break\n p = ps.pop()\n _terminate_pid(spider_name, p)\n count += 1\n if len(ps) == 0 and spider_name in self.spiders_map:\n del self.spiders_map[spider_name]\n\n def stop_all(self):\n \"\"\"\n 停止当前所有正则运行的进程\n \"\"\"\n spider_names = list(self.spiders_map.keys())\n for spider_name in spider_names:\n self.stop_spider(spider_name)\n self.startall_execed = False\n\n def start_all(self):\n \"\"\"\n 启动所有spdier\n \"\"\"\n if self.startall_execed:\n return\n spiderloader = get_spiderloader(enreload=True)\n for spider_name in spiderloader.list():\n cls = spiderloader.load(spider_name)\n if hasattr(cls, MGR_RUNABLE) and cls.mgr_runable:\n self.start_spider(spider_name)\n self.startall_execed = True\n\n def restart_all(self):\n \"\"\"\n 重启所有spider\n \"\"\"\n\n self.stop_all()\n self.start_all()\n\n def list_spider_names(self, reloaded=False):\n \"\"\"\n 罗列所有的spider\n :param reloaded:\n \"\"\"\n if reloaded or len(self.spider_names) == 0:\n spiderloader = get_spiderloader(enreload=True)\n self.spider_names = spiderloader.list()\n\n return self.spider_names\n","sub_path":"pyx_scrapy_exts/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":7164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"100872435","text":"import webapp2\nimport cgi\nimport jinja2\nimport os\nimport re\nfrom google.appengine.ext import db\n\n# set up jinja\ntemplate_dir = os.path.join(os.path.dirname(__file__), \"templates\")\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),autoescape=True)\n# Using autoescape so I don't have to manually do CGI escape\n\ndef render_str(template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\nclass Post(db.Model): #This is where I am defining the Post (database).\n title = db.StringProperty(required = True)\n post = db.TextProperty(required = True)\n created = db.DateTimeProperty(auto_now_add = True)\n\nclass Handler(webapp2.RequestHandler):\n \"\"\" A base RequestHandler class for the site.\n The other handlers inherit form this one.\n \"\"\"\n def write(self, *a, **kw):\n self.response.out.write(*a,**kw)\n\n def render_str(self,template,**params):\n return render_str(template,**params)\n\n def render(self,template,**kw):\n self.write(self.render_str(template,**kw))\n\nclass Index(Handler):\n def get(self):\n welcome_message = \"Welcome! But you need to visit the \" + \"<a href='/blog'>blog</a>\" + \" page!\"\n self.response.write(welcome_message)\n\nclass MainHandler(Handler):\n \"\"\" Handles request to the main page of the blog. So '/blog' page\n \"\"\"\n def get(self):\n posts = db.GqlQuery(\"SELECT * FROM Post ORDER BY created DESC LIMIT 5\")\n \n self.render('frontpage.html',posts=posts)\n\nclass NewEntry(Handler):\n \"\"\" Handles request to the new post writing of the blog. So '/blog/newpost' page.\n \"\"\"\n\n def get(self):\n self.render(\"newpost.html\")\n\n def post(self):\n title = self.request.get(\"title\")\n post = self.request.get(\"post\")\n\n if title and post:\n new_entry = Post(title = title, post = post)\n new_entry.put()\n self.redirect('/blog/%s' % str(new_entry.key().id()))\n else:\n error = \"Be sure to enter title and post!\"\n self.render(\"newpost.html\",title=title,post=post,error=error)\n\nclass ViewPostHandler(Handler):\n def get(self, id):\n entry = Post.get_by_id(int(id))\n\n if not entry:\n error = \"<h3>Not a valid post ID! Return to <a href='/blog'>Main</a></h3>\"\n self.response.write(error)\n return\n\n self.render(\"permalink.html\",entry=entry)\n\napp = webapp2.WSGIApplication([\n ('/',Index),\n ('/blog', MainHandler),\n ('/blog/newpost', NewEntry),\n webapp2.Route('/blog/<id:\\d+>', ViewPostHandler)],\n debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"158805688","text":"import requests\nfrom celery.utils.log import get_task_logger\n\nfrom fct.broker import celery\n\nLOG = get_task_logger(__name__)\n\n\n@celery.task(rate_limit='600/m')\ndef sleep(x: int):\n LOG.info('start request')\n url = 'https://httpstat.us/200?sleep=%s' % x\n r = requests.get(url)\n LOG.info('end request: r[%s]' % r.text)\n\n return r.text\n","sub_path":"fct/messaging/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"50905990","text":"import xir\nfrom .pci import PciClass\n\ndef nic10G(neth, npci):\n x = xir.Node({\n 'model': 'Generic 10G NIC',\n 'pci_class': PciClass.network_controller\n })\n\n for _ in range(neth):\n x.endpoint({'protocol': 'ethernet', 'speed': xir.gbps(10)})\n\n #for _ in range(npci):\n # x.endpoint({'protocol': 'pcie3', 'lanes': xir.eq(1)})\n\n x.endpoint({'protocol': 'pcie3', 'phy': 'pcie_x%d'%npci, 'lanes': npci})\n\n return x\n\ndef switch(name, ports):\n\n x = xir.Node({\n 'name': name,\n 'model': 'Generic %dG %d radix switch'%( ports[0].props['speed'], len(ports) )\n }, ports)\n\n return x\n\n","sub_path":"lib/steam/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"152017197","text":"# Imports\r\nfrom vosk import Model, KaldiRecognizer, SetLogLevel\r\nSetLogLevel(-1) # The higher the more vosk logs\r\nimport pyaudio\r\nimport time\r\nimport os\r\nos.path.dirname(os.path.abspath(__file__))\r\n\r\nprint(\"\\nLoading...\\n\")\r\n\r\n# Define the model\r\nmodel = Model(\"Model\")\r\nframerate = 16000\r\nrec = KaldiRecognizer(model, framerate)\r\n\r\n# Launch the audio stream\r\np = pyaudio.PyAudio()\r\nstream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=8000)\r\nstream.start_stream()\r\n\r\nprint(\"\\nListening...\\n\")\r\n\r\nt0 = time.time()\r\ntmax = 5 # sec, acquisition time \r\n\r\nwhile time.time() - t0 < tmax:\r\n # Collect the audio stream\r\n data = stream.read(framerate//4)\r\n # Process data\r\n rec.AcceptWaveform(data) \r\n\r\n# Result\r\nprint(rec.FinalResult())\r\n\r\n# Stop the recording\r\nstream.stop_stream()\r\nstream.close()\r\np.terminate()\r\n","sub_path":"Vosk_micro.py","file_name":"Vosk_micro.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"618942461","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport itertools\nfrom pyspark import SparkContext\nfrom pyspark import SparkConf\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import Row\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.mllib.evaluation import RankingMetrics\nimport pyspark.sql.functions as F\n\n\ndef main(spark, train_file, val_file, model_file):\n\n train_df = spark.read.parquet(train_file)\n #train_df = spark.read.parquet('hdfs:/user/xx852/cf_train_small.parquet')\n val_df = spark.read.parquet(val_file)\n #val_df = spark.read.parquet('hdfs:/user/xx852/cf_val_small.parquet')\n train_df = train_df.select('user_label', 'track_label', 'count')\n val_df = val_df.select('user_label', 'track_label', 'count')\n val_grouped = val_df.groupBy('user_label').agg(F.collect_list(F.col('track_label')).alias('track_label'))\n\n # ALS for implicit feedback\n als = ALS(maxIter = 5, regParam = 0.01, implicitPrefs = True, \\\n userCol = 'user_label', itemCol = 'track_label', ratingCol = 'count')\n\n als_model = als.fit(train_df)\n predictions = als_model.recommendForAllUsers(10)\n prediction_df = predictions.rdd.map(lambda r: (r.user_label, [i[0] for i in r.recommendations])).toDF()\n prediction_df = prediction_df.selectExpr('_1 as user_label', '_2 as recommendations')\n\n # Join table\n val_pred = val_grouped.join(prediction_df, 'user_label', 'inner')\n rdd = val_pred.select('recommendations', 'track_label').rdd\n ranking_metrics = RankingMetrics(rdd)\n print('Before tuning, MAP = %s' % ranking_metrics.meanAveragePrecision)\n\n\n # hyperparameter tuning\n #ranks = [10, 20, 40, 60]\n #reg_params = [0.005, 0.01, 0.05]\n ranks = [20]\n reg_params = [0.01]\n alphas = [0.10, 0.20, 0.40]\n best_rank = None\n best_reg_param = None\n best_alpha = None\n best_model = None\n best_map = 0\n\n for rank_i, alpha_i, reg_param_i in itertools.product(ranks, alphas, reg_params):\n\n als = ALS(maxIter = 5, regParam = reg_param_i, implicitPrefs = True, alpha = alpha_i,\n rank = rank_i, userCol = 'user_label', itemCol = 'track_label', ratingCol = 'count')\n\n als_model = als.fit(train_df)\n predictions = als_model.recommendForAllUsers(100)\n prediction_df = predictions.rdd.map(lambda r: (r.user_label, [i[0] for i in r.recommendations])).toDF()\n prediction_df = prediction_df.selectExpr('_1 as user_label', '_2 as recommendations')\n\n # Join table\n val_pred = val_grouped.join(prediction_df, 'user_label', 'inner')\n rdd = val_pred.select('recommendations', 'track_label').rdd\n ranking_metrics = RankingMetrics(rdd)\n map_ = ranking_metrics.meanAveragePrecision\n print('Current rank:', rank_i)\n print('Current alpha:', alpha_i)\n print('Current reg:', reg_param_i)\n print('Current map:', map_)\n\n if map_ > best_map:\n best_rank = rank_i\n best_reg_param = reg_param_i\n best_alpha = alpha_i\n best_model = als_model\n best_map = map_\n\n print('Best rank:', best_rank)\n print('Best regParam:', best_reg_param)\n print('Best alpha:', best_alpha)\n print('Best map:', best_map)\n\n # save the best model\n best_model.save(model_file)\n\n\nif __name__ == '__main__':\n\n conf = SparkConf()\n conf.set('spark.executor.memory', '16g')\n conf.set('spark.driver.memory', '16g')\n conf.set('spark.default.parallelism', '4')\n\n spark = SparkSession.builder.config(conf = conf).appName('modeling and tuning').getOrCreate()\n\n train_file = sys.argv[1]\n val_file = sys.argv[2]\n model_file = sys.argv[3]\n\n main(spark, train_file, val_file, model_file)\n\n","sub_path":"Baseline/Individual Tuning/tuning_set2.py","file_name":"tuning_set2.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"433602792","text":"\"\"\"Apply different (unconstrained) optimzation methods to find minimum of \"gauss2d\" function specified below\n ncg1: Newton-CG with approximate Hessian\n ncg2: Newton-CG with exact Hessian\n bfgs1: BFGS with approximate Jacobian\n bfgs2: BFGS with exact Jacobian\n\"\"\"\n\n#import necessary libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n#objective function-------\ndef gauss2d(xf,x0,y0,a,b):\n \"\"\"Compute 2d gaussian function, exp(-a*(x-x0)^2-b*(y-y0)^2)\n x = xf[0], y = xf[1]\n \"\"\"\n x = xf[0]\n y = xf[1]\n\n return -np.exp(-a*(x-x0)**2 - b*(y-y0)**2)\n#----------------------------------------------\n\n#gradient of objective function\ndef gauss2d_grad(xf,x0,y0,a,b):\n \"\"\"Compute gradietn of 2d gaussian function\n defined in gauss2d. Returns two-element tuple\n containing (df/dx,df/dy)\n \"\"\"\n\n #compute function\n f = gauss2d(xf,x0,y0,a,b)\n\n x = xf[0]\n y = xf[1]\n\n return np.array([-2.0*a*(x-x0)*f,-2.0*b*(y-y0)*f])\n#------------------------------------------------------\n\n#Hessian for objective function\ndef gauss2d_hess(xf,x0,y0,a,b):\n \"\"\"Compute Hessian for function defined in guass2d.\n Return Hessian matrix, [[fxx fxy],[fxy fyy]]\"\"\"\n\n f = gauss2d(xf,x0,y0,a,b)\n fx,fy = gauss2d_grad(xf,x0,y0,a,b)\n\n #unpack input\n x = xf[0]\n y = xf[1]\n\n #compute 2nd derivatives\n fxx = -2.0*a*(f + (x-x0)*fx)\n fxy = -2.0*b*(y-y0)*fx\n fyy = -2.0*b*(f + (y-y0)*fy)\n\n #construct Hessian\n H = np.zeros((2,2))\n H[0,0] = fxx\n H[0,1] = fxy\n H[1,0] = fxy\n H[1,1] = fyy\n return H\n#------------\n\n#display objective\ndef display_gauss2d(args):\n\n x = np.linspace(-5,5,101)\n y = np.linspace(-5,5,101)\n\n xg,yg=np.meshgrid(x,y)\n xf = (xg,yg)\n\n x0,y0,a,b=args\n\n f = gauss2d(xf,x0,y0,a,b)\n\n# plt.figure()\n# plt.contour(xg,yg,f)\n plt.figure()\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(xg, yg, f,rstride=1,cstride=5)\n plt.show()\n#-------------\nif __name__ == \"__main__\":\n parameters = (0.5,0.25,1.0,1.0) #x0,y0,a,b\n# display_gauss2d(parameters) #uncomment to create surface plot\n\n (ncg1,ncg2,bfgs1,bfgs2)=(True,True,True,True) #specify which methods to run\n xguess = (0.0,0.0)\n\n if ncg1:\n result_newt = minimize(gauss2d,xguess,args=parameters,method='Newton-CG',jac=gauss2d_grad)\n print(\"Gauss2d, Newton-CG, approximate Hessian\")\n print(\"location of optimum:\",result_newt.x)\n print(\"info: \", result_newt)\n print(\"---------------------------------------\")\n\n if ncg2:\n result_newt2 = minimize(gauss2d,xguess,args=parameters,method='Newton-CG',jac=gauss2d_grad,hess=gauss2d_hess)\n print(\"Gauss2d, Newton-CG, exact Hessian\")\n print(\"location of optimum:\",result_newt2.x)\n print(\"info: \", result_newt2)\n print(\"---------------------------------------\")\n\n if bfgs1:\n result_bfgs1 = minimize(gauss2d,xguess,args=parameters,method='BFGS',jac=False)\n print(\"Gauss2d, BFGS, approximate gradient\")\n print(\"location of optimum:\",result_bfgs1.x)\n print(\"info: \", result_bfgs1)\n print(\"---------------------------------------\")\n\n if bfgs2:\n result_bfgs2 = minimize(gauss2d,xguess,args=parameters,method='BFGS',jac=gauss2d_grad)\n print(\"Gauss2d, BFGS, exact gradient\")\n print(\"location of optimum:\",result_bfgs2.x)\n print(\"info: \", result_bfgs2)\n print(\"---------------------------------------\")\n","sub_path":"lecture_notes/lecture5/gauss2d.py","file_name":"gauss2d.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"406031073","text":"from random import shuffle, choice\nfrom glob import glob\nimport logging\nimport pickle\nimport os\nimport concurrent.futures\nfrom tqdm.auto import tqdm\nimport numpy as np\n\nfrom scipy.ndimage.morphology import binary_erosion\nfrom skimage.morphology import disk\n\nfrom .jittering import tasks, jitter_axes_4d\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\n\n\nclass DataGenerator:\n \"\"\" Class to read batches \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.patchlets = None\n self.train_set = None\n self.cval_set = None\n self.set_idx = None\n if os.path.exists(os.path.join(self.config.log_dir, self.config.train_cval_log)):\n self.load_train_cval()\n else:\n self.train_cval_split()\n self.workers = config.workers\n print('Loading training data')\n self.train_patchlets = self.load_to_ram_parallel(self.train_set)\n print('Loading cross-validation data')\n self.cval_patchlets = self.load_to_ram_parallel(self.cval_set)\n\n def load_train_cval(self):\n logging.debug(\"Loading existing train/test split\")\n with open(os.path.join(self.config.log_dir, self.config.train_cval_log), 'rb') as f:\n self.train_set, self.cval_set = pickle.load(f)\n\n def train_cval_split(self):\n logging.debug(\"Creating train/test sets split\")\n # data with EOPatch folders\n data_dir = self.config.data_dir\n # prefix name of eopatch folders\n data_prefix = self.config.data_prefix\n # train/test split ratio. Check sum is lower than 1\n train_ratio = self.config.train_ratio\n cval_ratio = self.config.cval_ratio\n if train_ratio + cval_ratio > 1:\n raise ValueError(\"Wrong train/test split ratios\")\n # read data folders\n data_dirs = glob(os.path.join(data_dir, data_prefix + '*'))\n if not data_dirs:\n raise ValueError(\"Error loading data. Either non-existing or empty folder\")\n # shuffle in place\n shuffle(data_dirs)\n # split eopatch dir names into train and test\n n_train, n_cval = int(np.round(len(data_dirs) * train_ratio)), int(np.round(len(data_dirs) * cval_ratio))\n self.train_set = data_dirs[:n_train]\n self.cval_set = data_dirs[n_train:n_train + n_cval]\n # pickle lists for reproducibility\n with open(os.path.join(self.config.log_dir, self.config.train_cval_log), 'wb') as f:\n pickle.dump([self.train_set, self.cval_set], f)\n\n @staticmethod\n def read_one(fname):\n with open(fname, 'rb') as f:\n return pickle.load(f)\n\n def load_to_ram_parallel(self, file_names):\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers) as executor:\n batches = list(tqdm(executor.map(self.read_one, file_names), total=len(file_names)))\n return batches\n\n @staticmethod\n def jitter_x_y(data, labels, axes_dict, config):\n # expand first dimension of labels to use the jitter correctly\n labels = labels.reshape(((1,) + tuple(config.lb_size) + (config.n_classes,)))\n # Get sorted identifiers for jittering functions\n jitter_ids = sorted(list(tasks.keys()))\n # Randomly selct one action\n jitter = choice(jitter_ids)\n # jitter data and labels\n data, labels = tasks[jitter](data, axes_dict[jitter]), tasks[jitter](labels, axes_dict[jitter])\n labels = labels.squeeze(axis=0)\n return data, labels\n\n def get_data(self, patchlets):\n batch_x, batch_y = [], []\n # loop through patchlets\n for patchlet in patchlets:\n tmp_batch_x, tmp_batch_y = patchlet\n batch_x.append(tmp_batch_x)\n batch_y.append(tmp_batch_y)\n del tmp_batch_x, tmp_batch_y\n # return data and labels\n return np.stack(batch_x, axis=0), np.stack(batch_y, axis=0)\n\n def erode_label(self, label, radius):\n for n_class in range(1, self.config.n_classes):\n label[..., n_class] = binary_erosion(label[..., n_class], structure=disk(radius))\n label[..., 0] = np.where(np.sum(label, axis=-1) == 0, 1, label[..., 0])\n return label\n\n\nclass ExampleDataGenerator:\n \"\"\" Class to create random example batches \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.state_size = config.state_size\n\n def next_batch(self, batch_size):\n i_s = [batch_size] + self.state_size\n l_s = [batch_size, 10]\n\n # input data\n input_data = np.random.rand(*i_s)\n\n # one hot labels\n I = np.eye(10)\n indices = np.random.randint(10, size=batch_size)\n labels = I[indices]\n\n yield input_data, labels\n\n\nclass MultiTempBatchGenerator(DataGenerator):\n def __init__(self, config):\n super(MultiTempBatchGenerator, self).__init__(config)\n\n def next_batch(self, batch_size, is_training=True):\n # training or testing\n self.patchlets = self.train_patchlets if is_training else self.cval_patchlets\n if self.patchlets:\n # select batches randomly or loop through datasets\n self.set_idx = np.random.choice(len(self.patchlets), batch_size)\n batch_files = [self.patchlets[ii] for ii in self.set_idx]\n if self.config.jitter:\n batch_files = [self.jitter_x_y(*patchlet, jitter_axes_4d, self.config) for patchlet in batch_files]\n batch_x, batch_y = self.get_data(batch_files)\n if self.config.erode_labels is not None:\n eroded = [self.erode_label(label, self.config.erode_labels) for label in batch_y]\n batch_y = np.stack(eroded, axis=0)\n yield batch_x, batch_y\n else:\n yield None, None\n","sub_path":"eoflow/data_loader/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":5838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"181068415","text":"import sys\nimport requests\nimport json\nimport time\nimport asyncio\nfrom flask import Flask, request\nfrom pprint import pprint\nfrom ya_api import TranslateApi\n\napp = Flask(__name__)\n\n\ndef get_config():\n try:\n with open(\"config.json\") as f:\n config = json.loads(f.read())\n except FileNotFoundError as e:\n sys.exit(e)\n\n return config\n\n\nconfig = get_config()\nbot_token = config[\"bot_token\"]\n\n\ndef get_url(method):\n return f\"https://api.telegram.org/bot{bot_token}/{method}\"\n\n\n# DEFAULT TRANSLATION INTO RUSSIAN\ndef process_message(update):\n message = update[\"message\"]\n response = {\"chat_id\": message[\"from\"][\"id\"]}\n\n if \"text\" in message:\n user_text = message[\"text\"]\n translated_text = translate_api.translate(user_text, \"ru\")\n response[\"text\"] = translated_text\n\n else:\n response[\"text\"] = \"Sorry, but that kind of content is not supported.\"\n\n requests.post(get_url(\"sendMessage\"), data=response)\n\n\ndef process_inline_query(update):\n inline_query = update[\"inline_query\"]\n user_query = inline_query[\"query\"]\n\n if user_query:\n presented_langs = (\"ru\", \"en\", \"de\", \"fr\", \"es\", \"it\")\n results = []\n text = \"Translated by Yandex.Translate\"\n\n s_t = time.perf_counter()\n\n for i in range(len(presented_langs)):\n lang = presented_langs[i]\n translated_text = \"\".join(translate_api.translate(user_query, lang))\n result = {\n \"type\": \"contact\", \"id\": str(i),\n \"first_name\": lang, \"phone_number\": translated_text,\n \"input_message_content\": {\"message_text\": f\"{user_query}\\n\\n{translated_text}\"}\n }\n\n results.append(result)\n\n response = {\"inline_query_id\": inline_query[\"id\"], \"results\": json.dumps(results)}\n r = requests.post(get_url(\"answerInlineQuery\"), data=response)\n time_spent = time.perf_counter() - s_t\n print(f\"{time_spent:.3f}secs spent\")\n pprint(r.json())\n\n\n@app.route(f\"/{bot_token}\", methods=[\"POST\"])\ndef process_update():\n update = request.get_json()\n pprint(update)\n if \"message\" in update:\n process_message(update)\n return \"OK\", 200\n\n elif \"inline_query\" in update:\n process_inline_query(update)\n return \"OK\", 200\n\n else:\n return \"Cannot process such JSON\", 500\n\n\nif __name__ == '__main__':\n translate_api = TranslateApi(config[\"ya_api_trnsl_key\"])\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"558447919","text":"from django.contrib import admin\n\nfrom .models import UserFavorite,UserComments,ZyxOrder,UserOrder\n\n\nclass UserFavoriteAdmin(admin.ModelAdmin):\n list_display = ['user','fav_id','fav_type','add_time']\n\n\nadmin.site.register(UserFavorite,UserFavoriteAdmin)\n\n\nclass UserCommentsAdmin(admin.ModelAdmin):\n list_display = ['user','comments','comment_id','comment_type','add_time']\n\n\nadmin.site.register(UserComments,UserCommentsAdmin)\n\n\nclass ZyxOrderAdmin(admin.ModelAdmin):\n list_display = ['user', 'zyx_name','city', 'zyx','zyx_id','name_id','order_type','add_time', 'times', 'sale']\n\n\nadmin.site.register(ZyxOrder,ZyxOrderAdmin)\n\n\nclass UserOrderAdmin(admin.ModelAdmin):\n list_display = ['user','city','name','order_name','istype','is_order','name_id','zyx_id','order_type','code','times','sale','user_order',\n 'pinyin','card','country','phone','wechat','email','detail','add_time']\n\n\nadmin.site.register(UserOrder,UserOrderAdmin)","sub_path":"apps/operation/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"651821514","text":"from flask import (Flask, g, render_template, flash, redirect, url_for,\n abort)\nfrom flask.ext.bcrypt import check_password_hash\nfrom flask.ext.login import (LoginManager, login_user, logout_user,\n login_required, current_user)\n\nimport forms\nimport models\n\n\nDEBUG = True\nPORT = 8000\nHOST = '127.0.0.1'\n\napp = Flask(__name__)\napp.secret_key = '' # Enter secret key, because we'll use sessions'\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app) # Sets up a login manager for app\nlogin_manager.login_view = 'login' # View - if they are not logged in we redirect to the login page, which will be called login\n\n\n@login_manager.user_loader\ndef load_user(userid):\n \"\"\"user_loader - A decorator to mark the function responsible for loading a user from whatever data source we use.\"\"\"\n try:\n return models.User.get(models.User.id == userid)\n except models.DoesNotExist: # That exception is from peewee\n return None\n\n@app.before_request\ndef before_request():\n \"\"\"Connect to a database before each request.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n@app.after_request\ndef after_request(response):\n \"\"\"Close the database connection after each request\"\"\"\n g.db.close()\n return response\n\n\n@app.route('/register', methods=('GET', 'POST')) # Load a view and see the form and post back to the view so it processes a form\ndef register():\n form = forms.RegisterForm()\n if form.validate_on_submit(): # When the form is submitted through POST, make sure the data is valid\n flash('Congratulations! You registererd!', 'success')\n models.User.create_user(\n username=form.username.data,\n email=form.email.data,\n password=form.password.data\n )\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\n@app.route('/login', methods=('GET', 'POST'))\ndef login():\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.email == form.email.data)\n except models.DoesNotExist:\n flash('Your email or password does not match!', 'error')\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash('You have been logged in', 'success')\n return redirect(url_for('index'))\n else:\n flash('Your email or password does not match!', 'error')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required # Decorator to mark a view as requiring a user to be logged in before they can access the view\ndef logout():\n logout_user() # Method to remove a user's session and sign them out.\n flash('You have been logged out.', 'success')\n return redirect(url_for('index'))\n\n\n@app.route('/new_post', methods=('GET', 'POST'))\n@login_required\ndef post():\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(user=g.user._get_current_object(),\n content=form.content.data.strip())\n flash('Message posted! Thanks!', 'success')\n return redirect(url_for('index'))\n return render_template('post.html', form=form)\n \n\n@app.route('/')\ndef index():\n stream = models.Post.select().limit(100)\n return render_template('stream.html', stream=stream)\n\n\n@app.route('/stream')\n@app.route('/stream/<username>')\ndef stream(username=None):\n template = 'stream.html'\n if username and username != current_user.username:\n try:\n user = models.User.select().where(\n models.User.username**username).get() # ** compare if match\n except models.DoesNotExist:\n abort(404)\n else:\n stream = user.posts.limit(100)\n else: # our username\n stream = current_user.get_stream().limit(100)\n user = current_user\n if username:\n template = 'user_stream.html'\n return render_template(template, stream=stream, user=user)\n\n\n@app.route('/post/<int:post_id>')\ndef view_post(post_id):\n posts = models.Post.select().where(models.Post.id == post_id)\n if posts.count() == 0:\n abort(404)\n return render_template('stream.html', stream=posts)\n\n\n@app.route('/follow/<username>')\n@login_required\ndef follow(username):\n try:\n to_user = models.User.get(models.User.username**username)\n except models.DoesNotExist:\n abort(404)\n else:\n try:\n models.Relationship.create(\n from_user=g.user._get_current_object(),\n to_user=to_user\n )\n except models.IntegrityError:\n pass\n else:\n flash('You are now following {}!'.format(to_user.username), 'success')\n return redirect(url_for('stream', username=to_user.username))\n\n\n@app.route('/unfollow/<username>')\n@login_required\ndef unfollow(username):\n try:\n to_user = models.User.get(models.User.username**username)\n except models.DoesNotExist:\n abort(404)\n else:\n try:\n models.Relationship.get(\n from_user=g.user._get_current_object(),\n to_user=to_user\n ).delete_instance()\n except models.IntegrityError:\n pass\n else:\n flash('You have unfollowed {}!'.format(to_user.username), 'success')\n return redirect(url_for('stream', username=to_user.username))\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html'), 404\n\n\nif __name__ == '__main__':\n models.initialize()\n app.run(debug=DEBUG, host=HOST, port=PORT)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"54237944","text":"from django.db import models\n\n\nclass City(models.Model):\n\n name = models.TextField()\n lat = models.FloatField()\n lon = models.FloatField()\n\n class Meta:\n verbose_name_plural = 'cities'\n indexes = [\n models.Index(fields=['name'], name='city_name_idx'),\n ]\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336105992","text":"import click\nimport torch\nimport logging\nimport random\nimport numpy as np\nimport sys\nfrom utils.config import Config\nfrom utils.visualization.plot_images_grid import plot_images_grid\nfrom baselines.kde import KDE\nfrom datasets.main import load_dataset\n\n\n################################################################################\n# Settings\n################################################################################\n@click.command()\n@click.argument('dataset_name', type=click.Choice(['mnist', 'fmnist', 'cifar10', 'arrhythmia', 'cardio', 'satellite',\n 'satimage-2', 'shuttle', 'thyroid']))\n@click.argument('xp_path', type=click.Path(exists=True))\n@click.argument('data_path', type=click.Path(exists=True))\n@click.option('--load_config', type=click.Path(exists=True), default=None,\n help='Config JSON-file path (default: None).')\n@click.option('--load_model', type=click.Path(exists=True), default=None,\n help='Model file path (default: None).')\n@click.option('--ratio_known_normal', type=float, default=0.0,\n help='Ratio of known (labeled) normal training examples.')\n@click.option('--ratio_known_outlier', type=float, default=0.0,\n help='Ratio of known (labeled) anomalous training examples.')\n@click.option('--ratio_pollution', type=float, default=0.0,\n help='Pollution ratio of unlabeled training data with unknown (unlabeled) anomalies.')\n@click.option('--seed', type=int, default=-1, help='Set seed. If -1, use randomization.')\n@click.option('--kernel', type=click.Choice(['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']),\n default='gaussian', help='Kernel for the KDE')\n@click.option('--grid_search_cv', type=bool, default=True,\n help='Use sklearn GridSearchCV to determine optimal bandwidth')\n@click.option('--n_jobs_model', type=int, default=-1, help='Number of jobs for model training.')\n@click.option('--hybrid', type=bool, default=False,\n help='Train KDE on features extracted from an autoencoder. If True, load_ae must be specified.')\n@click.option('--load_ae', type=click.Path(exists=True), default=None,\n help='Model file path to load autoencoder weights (default: None).')\n@click.option('--n_jobs_dataloader', type=int, default=0,\n help='Number of workers for data loading. 0 means that the data will be loaded in the main process.')\n@click.option('--normal_class', type=int, default=0,\n help='Specify the normal class of the dataset (all other classes are considered anomalous).')\n@click.option('--known_outlier_class', type=int, default=1,\n help='Specify the known outlier class of the dataset for semi-supervised anomaly detection.')\n@click.option('--n_known_outlier_classes', type=int, default=0,\n help='Number of known outlier classes.'\n 'If 0, no anomalies are known.'\n 'If 1, outlier class as specified in --known_outlier_class option.'\n 'If > 1, the specified number of outlier classes will be sampled at random.')\ndef main(dataset_name, xp_path, data_path, load_config, load_model, ratio_known_normal, ratio_known_outlier,\n ratio_pollution, seed, kernel, grid_search_cv, n_jobs_model, hybrid, load_ae, n_jobs_dataloader, normal_class,\n known_outlier_class, n_known_outlier_classes):\n \"\"\"\n (Hybrid) KDE for anomaly detection.\n\n :arg DATASET_NAME: Name of the dataset to load.\n :arg XP_PATH: Export path for logging the experiment.\n :arg DATA_PATH: Root path of data.\n \"\"\"\n\n # Get configuration\n cfg = Config(locals().copy())\n\n # Set up logging\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n log_file = xp_path + '/log.txt'\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n # Print paths\n logger.info('Log file is %s.' % log_file)\n logger.info('Data path is %s.' % data_path)\n logger.info('Export path is %s.' % xp_path)\n\n # Print experimental setup\n logger.info('Dataset: %s' % dataset_name)\n logger.info('Normal class: %d' % normal_class)\n logger.info('Ratio of labeled normal train samples: %.2f' % ratio_known_normal)\n logger.info('Ratio of labeled anomalous samples: %.2f' % ratio_known_outlier)\n logger.info('Pollution ratio of unlabeled train data: %.2f' % ratio_pollution)\n if n_known_outlier_classes == 1:\n logger.info('Known anomaly class: %d' % known_outlier_class)\n else:\n logger.info('Number of known anomaly classes: %d' % n_known_outlier_classes)\n\n # If specified, load experiment config from JSON-file\n if load_config:\n cfg.load_config(import_json=load_config)\n logger.info('Loaded configuration from %s.' % load_config)\n\n # Print KDE configuration\n logger.info('KDE kernel: %s' % cfg.settings['kernel'])\n logger.info('Use GridSearchCV for bandwidth selection: %s' % cfg.settings['grid_search_cv'])\n logger.info('Number of jobs for model training: %d' % n_jobs_model)\n logger.info('Hybrid model: %s' % cfg.settings['hybrid'])\n\n # Set seed\n if cfg.settings['seed'] != -1:\n random.seed(cfg.settings['seed'])\n np.random.seed(cfg.settings['seed'])\n torch.manual_seed(cfg.settings['seed'])\n torch.cuda.manual_seed(cfg.settings['seed'])\n torch.backends.cudnn.deterministic = True\n logger.info('Set seed to %d.' % cfg.settings['seed'])\n\n # Use 'cpu' as device for KDE\n device = 'cpu'\n torch.multiprocessing.set_sharing_strategy('file_system') # fix multiprocessing issue for ubuntu\n logger.info('Computation device: %s' % device)\n logger.info('Number of dataloader workers: %d' % n_jobs_dataloader)\n\n # Load data\n dataset = load_dataset(dataset_name, data_path, normal_class, known_outlier_class, n_known_outlier_classes,\n ratio_known_normal, ratio_known_outlier, ratio_pollution,\n random_state=np.random.RandomState(cfg.settings['seed']))\n # Log random sample of known anomaly classes if more than 1 class\n if n_known_outlier_classes > 1:\n logger.info('Known anomaly classes: %s' % (dataset.known_outlier_classes,))\n\n # Initialize KDE model\n kde = KDE(hybrid=cfg.settings['hybrid'], kernel=cfg.settings['kernel'], n_jobs=n_jobs_model,\n seed=cfg.settings['seed'])\n\n # If specified, load model parameters from already trained model\n if load_model:\n kde.load_model(import_path=load_model, device=device)\n logger.info('Loading model from %s.' % load_model)\n\n # If specified, load model autoencoder weights for a hybrid approach\n if hybrid and load_ae is not None:\n kde.load_ae(dataset_name, model_path=load_ae)\n logger.info('Loaded pretrained autoencoder for features from %s.' % load_ae)\n\n # Train model on dataset\n kde.train(dataset, device=device, n_jobs_dataloader=n_jobs_dataloader,\n bandwidth_GridSearchCV=cfg.settings['grid_search_cv'])\n\n # Test model\n kde.test(dataset, device=device, n_jobs_dataloader=n_jobs_dataloader)\n\n # Save results and configuration\n kde.save_results(export_json=xp_path + '/results.json')\n cfg.save_config(export_json=xp_path + '/config.json')\n\n # Plot most anomalous and most normal test samples\n indices, labels, scores = zip(*kde.results['test_scores'])\n indices, labels, scores = np.array(indices), np.array(labels), np.array(scores)\n idx_all_sorted = indices[np.argsort(scores)] # from lowest to highest score\n idx_normal_sorted = indices[labels == 0][np.argsort(scores[labels == 0])] # from lowest to highest score\n\n if dataset_name in ('mnist', 'fmnist', 'cifar10'):\n\n if dataset_name in ('mnist', 'fmnist'):\n X_all_low = dataset.test_set.data[idx_all_sorted[:32], ...].unsqueeze(1)\n X_all_high = dataset.test_set.data[idx_all_sorted[-32:], ...].unsqueeze(1)\n X_normal_low = dataset.test_set.data[idx_normal_sorted[:32], ...].unsqueeze(1)\n X_normal_high = dataset.test_set.data[idx_normal_sorted[-32:], ...].unsqueeze(1)\n\n if dataset_name == 'cifar10':\n X_all_low = torch.tensor(np.transpose(dataset.test_set.data[idx_all_sorted[:32], ...], (0, 3, 1, 2)))\n X_all_high = torch.tensor(np.transpose(dataset.test_set.data[idx_all_sorted[-32:], ...], (0, 3, 1, 2)))\n X_normal_low = torch.tensor(np.transpose(dataset.test_set.data[idx_normal_sorted[:32], ...], (0, 3, 1, 2)))\n X_normal_high = torch.tensor(\n np.transpose(dataset.test_set.data[idx_normal_sorted[-32:], ...], (0, 3, 1, 2)))\n\n plot_images_grid(X_all_low, export_img=xp_path + '/all_low', padding=2)\n plot_images_grid(X_all_high, export_img=xp_path + '/all_high', padding=2)\n plot_images_grid(X_normal_low, export_img=xp_path + '/normals_low', padding=2)\n plot_images_grid(X_normal_high, export_img=xp_path + '/normals_high', padding=2)\n\n\nif __name__ == '__main__':\n sys.argv = ['', 'dataset_name', 'mnist', '--count', '3']\n main()\n","sub_path":"12.kde-based-ad/src/baseline_kde.py","file_name":"baseline_kde.py","file_ext":"py","file_size_in_byte":9369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"569185893","text":"\"\"\"\n Class which allows to train the neural network built by the ModelBuilder. Accuracy is the rate \n of well classified samples. \n\"\"\"\n\nimport DataLoaderTf, ModelBuilder\nimport tensorflow as tf\nimport numpy as np \nfrom sklearn.metrics import accuracy_score\nimport sys \n\n\nclass TrainerOpt:\n\n\n def __init__(self, logits, should_drop, dropout_rate1_placeholder, iterator, next_element, train_dataset, test_dataset):\n \"\"\"\n Build a trainer \n Parameters : \n - train_size : total number of training samples\n - output : output tensor of the network \n - should_drop : boolean the apply or not dropout layer 1 (depending on training or inference mode)\n - dropout_rate1_placeholder : dropout rate of the first dropout layer\n - train_data, train_labels : training samples (features and labels)\n - test_data, test_labels : testing samples (features and labels) \n \"\"\"\n self.logits = logits\n self.should_drop = should_drop\n self.dropout_rate1_placeholder = dropout_rate1_placeholder\n self.iterator = iterator\n self.next_element = next_element\n self.train_dataset = train_dataset\n self.test_dataset = test_dataset\n\n\n \n def train(self, learning_rate, iterations, do_rate1, backup_folder, drop1):\n \"\"\"\n Train the model. Optimizer is Adam, loss is the sparse softmax cross entropy with logits, et predictions are \n checked with argmax on logits. \n Parameters : \n - learning_rate : learning rate used by the optimizer \n - batch_size : size of the training batches \n - max_iterations : maximum number of iterations done for training \n - do_rate1 : dropout rate of the first dropout layer\n - backu_folder : folder to which save the current trained model\n \"\"\"\n\n # Make datasets that we can initialize separately, but using the same structure via the common iterator\n training_init_op = self.iterator.make_initializer(self.train_dataset)\n testing_init_op = self.iterator.make_initializer(self.test_dataset)\n\n # Define a loss function\n with tf.variable_scope('loss', reuse=tf.AUTO_REUSE):\n loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(labels = self.next_element[1], \n logits = self.logits))\n # Define an optimizer \n with tf.variable_scope('train_op', reuse=tf.AUTO_REUSE):\n train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n\n # Convert logits to label indexes\n with tf.variable_scope('correct_pred', reuse=tf.AUTO_REUSE):\n correct_pred = tf.argmax(self.logits, axis=1)\n\n # Equality between prediction and target prediciton \n with tf.variable_scope('equality', reuse=tf.AUTO_REUSE):\n equality = tf.equal(correct_pred, tf.argmax(self.next_element[1],axis=1)) #tf.argmax(self.current_labels, 0))\n\n # Accuracy of the prediction \n with tf.variable_scope('accuracy', reuse=tf.AUTO_REUSE):\n accuracy = tf.reduce_mean(tf.cast(equality, tf.float32))\n\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n\n # Record the different training and testing figures \n losses, accuracies_it, train_accuracies, test_accuracies = [], [], [], []\n\n ##### Create and run a session #####\n\n tf.set_random_seed(1234)\n \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n\n with tf.Session(config=config) as sess:\n\n writer = tf.summary.FileWriter(\"output\", sess.graph)\n\n sess.run(tf.global_variables_initializer())\n\n sess.run(training_init_op)\n\n for i in range(iterations+1):\n\n if (i==0):\n print('\\n\\nTRAINING & TESTING : ')\n test_acc = 0.\n\n _, loss_val, train_acc = sess.run([train_op, loss, accuracy], feed_dict={self.should_drop : drop1,\n self.dropout_rate1_placeholder : do_rate1})\n losses.append(loss_val)\n train_accuracies.append(train_acc)\n\n # Accuracy on testing set \n \"\"\"if (i%50 == 0):\n accuracies_it.append(i)\n sess.run(testing_init_op)\n test_acc = sess.run([accuracy], feed_dict={self.should_drop : False,\n self.dropout_rate1_placeholder : do_rate1})[0]\n test_accuracies.append(test_acc)\"\"\"\n\n # Display the results \n arrow_length = int(10*(i/iterations))\n progress_percent = (int(1000*(i/iterations)))/10\n sys.stdout.write('\\r \\_ EPOCH : {0} ; ITERATION : {1} / {2} ; loss = {3} ; training_acc = {4}% ; [{6}>{7}{8}%]'.format(\n i//100+1, i, iterations, '{0:.6f}'.format(loss_val), '{0:.1f}'.format(train_acc*100), 0,\n '='*arrow_length,' '*(10-arrow_length), progress_percent))\n sys.stdout.flush()\n \n\n ################## now setup the validation run ##################\n valid_iters = 20\n # re-initialize the iterator, but this time with validation data\n sess.run(testing_init_op)\n avg_acc = 0\n for i in range(valid_iters):\n acc = sess.run([accuracy], feed_dict={self.should_drop : False,\n self.dropout_rate1_placeholder : do_rate1})\n avg_acc += acc[0]\n avg_acc = (avg_acc / valid_iters) * 100\n print(\"\\n\\nAverage validation set accuracy over {} iterations is {:.2f}%\\n\".format(valid_iters, avg_acc))\n ##################################################################\n \n save_path = saver.save(sess, \"{0}/model.ckpt\".format(backup_folder))\n print('\\n\\nModel saved in %s' % save_path)\n \n\n writer.close()\n\n return losses, accuracies_it, train_accuracies, avg_acc","sub_path":"TrainerOpt.py","file_name":"TrainerOpt.py","file_ext":"py","file_size_in_byte":6519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"359537279","text":"from torchtext.vocab import GloVe\nimport torch\nfrom torch.utils.data import Dataset\nimport pickle\nimport numpy as np\n\nclass Term():\n def __init__(self, user: int, sentence: str, time_stamp: int):\n self.user = user\n self.sentence = sentence\n self.time_stamp = time_stamp\n\n def __lt__(self, other):\n return self.time_stamp < other.time_stamp\n\n def __gt__(self, other):\n return self.time_stamp > other.time_stamp\n\nclass RecommendationData(Dataset):\n @staticmethod\n def save_metadata(train_data_files, validate_file, test_file, embedding_word_num, pkl_file_name):\n train_dialogtag_id = dict()\n train_id_dialogtag = []\n validate_dialogtag_id = dict()\n validate_id_dialogtag = []\n test_dialogtag_id = dict()\n test_id_dialogtag = []\n username_id = dict()\n id_username = []\n username_id['-<unk-user>-'] = 0\n id_username.append('-<unk-user>-')\n max_sentence_len = 0\n glove = GloVe(name='840B', dim=300, cache='glove', max_vectors=embedding_word_num)\n stoi = dict()\n itos = []\n stoi['<UNK>'] = 0\n itos.append('<UNK>')\n embedding_vectors = torch.zeros([1, 300])\n sentence_num = 0\n index = 1\n index1 = 0\n index2 = 1\n unk_num = 0\n for data_file_name in train_data_files:\n with open(data_file_name, 'r') as f:\n for line in f:\n [dialogtag, _, _, _, sentence, username, _, _, _] = line.strip('\\n').split('\\t')\n if dialogtag not in train_dialogtag_id:\n train_dialogtag_id[dialogtag] = index1\n train_id_dialogtag.append(dialogtag)\n index1 += 1\n assert username != '-<unk-user>-'\n if username not in username_id:\n username_id[username] = index2\n id_username.append(username)\n index2 += 1\n words = sentence.strip(' ').split(' ')\n max_sentence_len = max(len(words), max_sentence_len)\n for word in words:\n assert word != '<UNK>'\n if word not in stoi:\n stoi[word] = index\n itos.append(word)\n if word in glove.stoi:\n embedding_vectors = torch.cat([embedding_vectors, glove.vectors[glove.stoi[word]].unsqueeze(0)], dim=0)\n else:\n vector = torch.zeros([1, 300])\n vector.normal_(mean=0, std=0.1)\n embedding_vectors = torch.cat([embedding_vectors, vector], dim=0)\n unk_num += 1\n index += 1\n sentence_num += 1\n embedding_vectors[0, :] = embedding_vectors[1:, :].mean(dim=0)\n index1 = 0\n with open(validate_file, 'r') as f:\n for line in f:\n items = line.strip('\\n').split('\\t')\n if len(items) == 9:\n dialogtag = items[0]\n if dialogtag not in validate_dialogtag_id:\n validate_dialogtag_id[dialogtag] = index1\n validate_id_dialogtag.append(dialogtag)\n index1 += 1\n index1 = 0\n with open(test_file, 'r') as f:\n for line in f:\n items = line.strip('\\n').split('\\t')\n if len(items) == 9:\n dialogtag = items[0]\n if dialogtag not in test_dialogtag_id:\n test_dialogtag_id[dialogtag] = index1\n test_id_dialogtag.append(dialogtag)\n index1 += 1\n print('totol word num :', len(embedding_vectors))\n print('unknown word num :', unk_num)\n with open(pkl_file_name, 'wb') as f:\n pickle.dump({\n 'train_dialogtag_id': train_dialogtag_id,\n 'train_id_dialogtag': train_id_dialogtag,\n 'validate_dialogtag_id': validate_dialogtag_id,\n 'validate_id_dialogtag': validate_id_dialogtag,\n 'test_dialogtag_id': test_dialogtag_id,\n 'test_id_dialogtag': test_id_dialogtag,\n 'username_id': username_id,\n 'id_username': id_username,\n 'max_sentence_len': max_sentence_len,\n 'stoi': stoi,\n 'itos': itos,\n 'embedding_vectors': embedding_vectors,\n 'sentence_num': sentence_num\n }, f)\n\n @staticmethod\n def load_metadata(pkl_file_name):\n meta_data = pickle.load(open(pkl_file_name, 'rb'))\n train_dialogtag_id = meta_data['train_dialogtag_id']\n train_id_dialogtag = meta_data['train_id_dialogtag']\n validate_dialogtag_id = meta_data['validate_dialogtag_id']\n validate_id_dialogtag = meta_data['validate_id_dialogtag']\n test_dialogtag_id = meta_data['test_dialogtag_id']\n test_id_dialogtag = meta_data['test_id_dialogtag']\n username_id = meta_data['username_id']\n id_username = meta_data['id_username']\n max_sentence_len = meta_data['max_sentence_len']\n stoi = meta_data['stoi']\n itos = meta_data['itos']\n embedding_vectors = meta_data['embedding_vectors']\n sentence_num = meta_data['sentence_num']\n return train_dialogtag_id, train_id_dialogtag, validate_dialogtag_id, validate_id_dialogtag, test_dialogtag_id, test_id_dialogtag, username_id, id_username, max_sentence_len, stoi, itos, embedding_vectors, sentence_num\n\n def __init__(self, data_file, mode, dialogtag_id, username_id, stoi, max_sentence_len):\n assert mode in ['train', 'test'], 'mode must be [train | test]'\n self.mode = mode\n self.max_dialog_len = 32\n self.max_sentence_len = max_sentence_len\n self.num = len(dialogtag_id)\n self.dialog_users = np.zeros([self.num, self.max_dialog_len], dtype=np.int64)\n self.dialog_sentences = np.zeros([self.num, self.max_dialog_len, self.max_sentence_len], dtype=np.int64)\n self.dialog_sentence_lens = np.zeros([self.num, self.max_dialog_len], dtype=np.int64)\n dialogs = [[] for _ in range(self.num)]\n\n if self.mode == 'train':\n self.target_masks = np.zeros([self.num, self.max_dialog_len - 1], dtype=np.float32)\n for data_file_name in data_file:\n with open(data_file_name, 'r') as f:\n for line in f:\n [dialogtag, _, _, _, sentence, username, time_stamp, _, _] = line.strip('\\n').split('\\t')\n dialogs[dialogtag_id[dialogtag]].append(Term(username_id[username], sentence.strip(' '), int(time_stamp)))\n for i in range(self.num):\n assert len(dialogs[i]) > 0, 'dialog %d is empty' % i\n dialogs[i].sort()\n for j in range(len(dialogs[i])):\n self.dialog_users[i][j] = dialogs[i][j].user\n words = dialogs[i][j].sentence.split(' ')\n self.dialog_sentence_lens[i][j] = len(words) - 1\n for k, word in enumerate(words):\n if word in stoi:\n self.dialog_sentences[i][j][k] = stoi[word]\n for j in range(len(dialogs[i]) - 1):\n self.target_masks[i][j] = 1\n else:\n self.targets = [[] for _ in range(self.num)]\n self.dialog_lens = [0 for _ in range(self.num)]\n with open(data_file, 'r') as f:\n for line in f:\n items = line.strip('\\n').split('\\t')\n dialog_id = dialogtag_id[items[0]]\n if len(items) == 9:\n if items[5] in username_id:\n dialogs[dialog_id].append(Term(username_id[items[5]], items[4].strip(' '), int(items[6])))\n else:\n dialogs[dialog_id].append(Term(0, items[4].strip(' '), int(items[6])))\n else:\n items = items[1].split(' ')\n for item in items:\n if item in username_id:\n if username_id[item] not in self.targets[dialog_id]:\n self.targets[dialog_id].append(username_id[item])\n else:\n if 0 not in self.targets[dialog_id]:\n self.targets[dialog_id].append(0)\n for i in range(self.num):\n dialogs[i].sort()\n for j in range(len(dialogs[i])):\n self.dialog_users[i][j] = dialogs[i][j].user\n words = dialogs[i][j].sentence.split(' ')\n self.dialog_sentence_lens[i][j] = len(words) - 1\n for k, word in enumerate(words):\n if word in stoi:\n self.dialog_sentences[i][j][k] = stoi[word]\n self.dialog_lens[i] = len(dialogs[i]) - 1\n\n def __getitem__(self, index):\n if self.mode == 'train':\n return torch.from_numpy(self.dialog_users[index]), torch.from_numpy(self.dialog_sentences[index]), torch.from_numpy(self.dialog_sentence_lens[index]), torch.from_numpy(self.target_masks[index])\n else:\n return torch.from_numpy(self.dialog_users[index]), torch.from_numpy(self.dialog_sentences[index]), torch.from_numpy(self.dialog_sentence_lens[index]), self.dialog_lens[index], index \n\n def __len__(self):\n return self.num\n","sub_path":"baseline2_impl2/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"208053676","text":"from mwpyeditor.core import mwglobals\nfrom mwpyeditor.core.mwrecord import MwRecord\n\n\nclass MwREPA(MwRecord):\n def __init__(self):\n MwRecord.__init__(self)\n self.id_ = ''\n self.model = ''\n self.name = None\n self.weight = 0.0\n self.value = 0\n self.uses = 0\n self.quality = 0.0\n self.icon = None\n self.script = None\n\n def load(self):\n self.id_ = self.parse_string('NAME')\n self.model = self.parse_string('MODL')\n self.name = self.parse_string('FNAM')\n\n self.weight = self.parse_float('RIDT')\n self.value = self.parse_uint('RIDT', start=4)\n self.uses = self.parse_uint('RIDT', start=8)\n self.quality = self.parse_float('RIDT', start=12)\n\n self.icon = self.parse_string('ITEX')\n self.script = self.parse_string('SCRI')\n\n mwglobals.object_ids[self.id_] = self\n\n def record_details(self):\n return MwRecord.format_record_details(self, [\n (\"|Name|\", '__str__'),\n (\"\\n|Script|\", 'script'),\n (\"\\n|Weight| {:.2f}\", 'weight'),\n (\"\\n|Value|\", 'value'),\n (\"\\n|Uses|\", 'uses'),\n (\"\\n|Quality| {:.2f}\", 'quality'),\n (\"\\n|Model|\", 'model'),\n (\"\\n|Icon|\", 'icon')\n ])\n\n def __str__(self):\n return f\"{self.name} [{self.id_}]\"\n\n def diff(self, other):\n return MwRecord.diff(self, other, ['model', 'name', 'weight', 'value', 'uses', 'quality', 'icon', 'script'])\n","sub_path":"mwpyeditor/record/mwrepa.py","file_name":"mwrepa.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"454844255","text":"# Convert 2018 CDC Mortality Data to csv format. Data source:\n# ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/Datasets/DVS/mortality/mort2018us.zip\n# Variable mapping was derived from the 2018 data documentation that can be found at:\n# https://www.cdc.gov/nchs/data/dvs/Multiple_Cause_Record_Layout_2018-508.pdf\n\ndef parse_2018mort(in_file, out_file):\n \n print('Running...')\n mort_data = open(in_file,'r')\n csv_out = open(out_file,\"a\")\n\n csv_out.write('Resident_Status, Education_89_Rev, Education_03_Rev, Education_Rep_Flag, Month_Of_Death, Sex, \\\n Age_Key, Age_Value, Age_Sub_Flag, Age_Recode_52, Age_Recode_27, Age_Recode_12, Infant_Age_Recode_22, \\\n Place_Of_Death, Marital_Status, DOW_of_Death, Current_Data_Year, Injured_At_Work, Manner_Of_Death, \\\n Method_Of_Disposition, Autopsy, Activity_Code, Place_Of_Causal_Injury, ICD10, Cause_Recode_358, \\\n Cause_Recode_113, Infant_Cause_Recode_130, Cause_Recode_39, Entity_Axis_Conditions, EAC1, EAC2, EAC3, \\\n EAC4, EAC5, EAC6, EAC7, EAC8, EAC9, EAC10, EAC11, EAC12, EAC13, EAC14, EAC15, EAC16, EAC17, EAC18, EAC19, \\\n EAC20, Num_Rec_Axis_Cond, RA1, RA2, RA3, RA4, RA5, RA6, RA7, RA8, RA9, RA10, RA11, RA12, RA13, RA14, RA15, \\\n RA16, RA17, RA18, RA19, RA20, Bridged_Race, Bridged_Race_Flag, Race_Imput_Flag, Race_Recode_3, \\\n Race_Recode_5, Hispanic_Origin, Hispanic_Origin_Recode, Race_Recode_40\\n')\n\n iter_out = \"\"\n line_count = 0\n\n for line in mort_data:\n\n Resident_Status = line[19].strip()\n Education_89_Rev = line[60:62].strip()\n Education_03_Rev = line[62].strip()\n Education_Rep_Flag = line[63].strip()\n Month_Of_Death = line[64:66].strip()\n Sex = line[68].strip()\n Age_Key = line[69].strip()\n Age_Value = line[70:73].strip()\n Age_Sub_Flag = line[73].strip()\n Age_Recode_52 = line[74:76].strip()\n Age_Recode_27 = line[76:78].strip()\n Age_Recode_12 = line[78:80].strip()\n Infant_Age_Recode_22 = line[80:82].strip()\n Place_Of_Death = line[82].strip()\n Marital_Status = line[83].strip()\n DOW_of_Death = line[84].strip()\n Current_Data_Year = line[101:105].strip()\n Injured_At_Work = line[105].strip()\n Manner_Of_Death = line[106].strip()\n Method_Of_Disposition = line[107].strip()\n Autopsy = line[108].strip()\n Activity_Code = line[143].strip()\n Place_Of_Causal_Injury = line[144].strip()\n ICD10 = line[145:149].strip()\n Cause_Recode_358 = line[149:152].strip()\n Cause_Recode_113 = line[153:156].strip()\n Infant_Cause_Recode_130 = line[156:159].strip()\n Cause_Recode_39 = line[159:161].strip()\n Entity_Axis_Conditions = line[162:164].strip()\n EAC1 = line[164:171].strip()\n EAC2 = line[171:178].strip()\n EAC3 = line[178:185].strip()\n EAC4 = line[185:192].strip()\n EAC5 = line[192:199].strip()\n EAC6 = line[199:206].strip()\n EAC7 = line[206:213].strip()\n EAC8 = line[213:220].strip()\n EAC9 = line[220:227].strip()\n EAC10 = line[227:234].strip()\n EAC11 = line[234:241].strip()\n EAC12 = line[241:248].strip()\n EAC13 = line[248:255].strip()\n EAC14 = line[255:262].strip()\n EAC15 = line[262:269].strip()\n EAC16 = line[269:276].strip()\n EAC17 = line[276:283].strip()\n EAC18 = line[283:290].strip()\n EAC19 = line[290:297].strip()\n EAC20 = line[297:304].strip()\n Num_Rec_Axis_Cond = line[340:342]\n RA1 = line[343:348].strip()\n RA2 = line[348:353].strip()\n RA3 = line[353:358].strip()\n RA4 = line[358:363].strip()\n RA5 = line[363:368].strip()\n RA6 = line[368:373].strip()\n RA7 = line[373:378].strip()\n RA8 = line[378:383].strip()\n RA9 = line[383:388].strip()\n RA10 = line[388:393].strip()\n RA11 = line[393:398].strip()\n RA12 = line[398:403].strip()\n RA13 = line[403:408].strip()\n RA14 = line[408:413].strip()\n RA15 = line[413:418].strip()\n RA16 = line[418:423].strip()\n RA17 = line[423:428].strip()\n RA18 = line[428:433].strip()\n RA19 = line[433:438].strip()\n RA20 = line[438:443].strip()\n Bridged_Race = line[444:446].strip()\n Bridged_Race_Flag = line[446].strip()\n Race_Imput_Flag = line[447].strip()\n Race_Recode_3 = line[448].strip()\n Race_Recode_5 = line[449].strip()\n Hispanic_Origin = line[483:486].strip()\n Hispanic_Origin_Recode = line[487].strip()\n Race_Recode_40 = line[488:490].strip()\n \n iter_out = (Resident_Status+','+Education_89_Rev+','+Education_03_Rev+','+Education_Rep_Flag+','+\n Month_Of_Death+','+Sex+','+Age_Key+','+Age_Value+','+Age_Sub_Flag+','+Age_Recode_52+','+\n Age_Recode_27+','+Age_Recode_12+','+Infant_Age_Recode_22+','+Place_Of_Death+','+\n Marital_Status+','+DOW_of_Death+','+Current_Data_Year+','+Injured_At_Work+','+\n Manner_Of_Death+','+Method_Of_Disposition+','+Autopsy+','+Activity_Code+','+\n Place_Of_Causal_Injury+','+ ICD10+','+Cause_Recode_358+','+Cause_Recode_113+','+\n Infant_Cause_Recode_130+','+Cause_Recode_39+','+Entity_Axis_Conditions+','+EAC1+','+\n EAC2+','+EAC3+','+EAC4+','+EAC5+','+EAC6+','+EAC7+','+EAC8+','+EAC9+','+EAC10+','+\n EAC11+','+EAC12+','+EAC13+','+EAC14+','+EAC15+','+EAC16+','+EAC17+','+EAC18+','+EAC19+','+\n EAC20+','+Num_Rec_Axis_Cond+','+RA1+','+RA2+','+RA3+','+RA4+','+RA5+','+RA6+','+RA7+','+\n RA8+','+RA9+','+RA10+','+RA11+','+RA12+','+RA13+','+RA14+','+RA15+','+RA16+','+RA17+','+\n RA18+','+RA19+','+RA20+','+Bridged_Race+','+Bridged_Race_Flag+','+Race_Imput_Flag+','+\n Race_Recode_3+','+Race_Recode_5+','+Hispanic_Origin+','+Hispanic_Origin_Recode+','+\n Race_Recode_40+'\\n')\n \n csv_out.write(iter_out)\n line_count += 1\n\n\n csv_out.close()\n mort_data.close()\n print('Finished.\\n'+'Parsed '+str(line_count)+' lines from '+str.split(in_file, '/')[-1])\n\n","sub_path":"mort/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"605774230","text":"#!/usr/bin/python\n################################################################################\n# Filename: T2CFR.py\n# Created: ?? May 2017\n# Author : Zeke Steer \n# Purpose: Take a text string from \"Speech to Text\" and use TextRazor to\n# convert to CFR format. \n################################################################################\n# Updates:\n# 22 Nov 2018 Derek - removed duplicate \"def\" get_actions_by_verb.\n# - added \"file exits\" check for training verbs file\n# \n# 25 Oct 2018 Derek - Fixed bug where client.analyze(s) would cease python program\n# with no msgs if it was passed unlikely words. The case was \n# \"Bangalore bengaluru\" On TextRazor.com demo page this produces\n# an error which I could not trap with the documented error name.\n# \n# 23 Nov 2017 Derek - Implement \"Python Support Library\" ROS package \n# First shared module is tag_topics.py\n#\n# 07 Aug 2017 Derek - Make competion data path seperate from the one used to test\n# the code using runmode = TH eg test harness.\n#\n# 11 Jul 2017 Derek - Removed debug/dev print statements\n#\n# 26 Jun 2017 Derek - add results files for mic input as needed by ERL competition\n# \n################################################################################\nimport rospy \nfrom std_msgs.msg import String \nimport operator\nimport random\nimport textrazor\nimport python_support_library.tag_topics as TT\nimport python_support_library.text_colours as TC\nimport os\n\n\no_tt = TT.tag_topics()\nprt = TC.tc()\n\nrun_mode = rospy.get_param('SR_TH')\nERLDATAPATHOUT = rospy.get_param(\"SR_ERL_DATAPATHOUT\")\n\nclass Command:\n\n def __init__(self):\n self.verb = None\n self.action = None\n self.theme = None\n self.conjunction = None\n self.goal = None\n self.source = None\n self.ground = None\n self.beneficiary = None\n self.path = None\n \n def __str__(self):\n s = self.action + \"(\"\n has_args = False\n if not self.beneficiary is None:\n s += \"beneficiary:\\\"%s\\\",\" %self.beneficiary\n has_args = True\n if not self.theme is None:\n s += \"theme:\\\"%s\\\",\" %self.theme\n has_args = True\n if not self.goal is None:\n s += \"goal:\\\"%s\\\",\" %self.goal\n has_args = True\n if not self.source is None:\n s += \"source:\\\"%s\\\",\" %self.source\n has_args = True\n if not self.ground is None:\n s += \"ground:\\\"%s\\\",\" %self.ground\n has_args = True\n if not self.path is None:\n s += \"path:\\\"%s\\\",\" %self.path\n has_args = True\n if has_args: \n s = s[:len(s) - 1]\n s += \")\"\n return s\n\ndef get_root(words):\n for word in words:\n if word.parent is None:\n return word\n return None\n\ndef get_goal_source_theme(parent, tokens):\n n_tokens = len(tokens)\n\n if (parent.relation_to_parent == \"nn\" or parent.relation_to_parent == \"amod\") and n_tokens > 0:\n tokens.insert(n_tokens - 1, parent.token)\n else:\n # add proposition e.g. to, of, etc.\n if not parent.parent is None and parent.relation_to_parent == \"pobj\" and (n_tokens == 0 or parent.parent.token != tokens[n_tokens - 1]):\n #if parent.parent.parent is None or parent.parent.parent.relation_to_parent != \"ccomp\":\n if parent.parent.parent is None or parent.parent.parent.relation_to_parent != \"ccomp\" or parent.parent.token == \"for\":\n tokens.append(parent.parent.token)\n\n # add determiner e.g. the, my, etc.\n for child in parent.children:\n if child.relation_to_parent == \"det\" or child.relation_to_parent == \"poss\":\n if len(child.children) > 0:\n for grandchild in child.children:\n # combine any possessives\n if grandchild.relation_to_parent == \"possessive\":\n tokens.append(child.token + grandchild.token)\n break\n tokens.append(child.token)\n break\n else:\n tokens.append(child.token)\n\n tokens.append(parent.token)\n\n for child in parent.children:\n if child.relation_to_parent == \"pobj\" or child.relation_to_parent == \"prep\" or child.relation_to_parent == \"nn\" or child.relation_to_parent == \"amod\":\n get_goal_source_theme(child, tokens)\n\ndef get_goal_source_theme_excluded(parent, tokens, exclude):\n n_tokens = len(tokens)\n\n if (parent.relation_to_parent == \"nn\" or parent.relation_to_parent == \"amod\") and n_tokens > 0:\n tokens.insert(n_tokens - 1, parent.token)\n else:\n # add proposition e.g. to, of, etc.\n if not parent.parent is None and parent.relation_to_parent == \"pobj\" and (n_tokens == 0 or parent.parent.token != tokens[n_tokens - 1]):\n #if parent.parent.parent is None or parent.parent.parent.relation_to_parent != \"ccomp\":\n if parent.parent.parent is None or parent.parent.parent.relation_to_parent != \"ccomp\" or parent.parent.token == \"for\":\n tokens.append(parent.parent.token)\n\n # add determiner e.g. the, my, etc.\n for child in parent.children:\n if child.relation_to_parent == \"det\" or child.relation_to_parent == \"poss\":\n if len(child.children) > 0:\n for grandchild in child.children:\n # combine any possessives\n if grandchild.relation_to_parent == \"possessive\":\n tokens.append(child.token + grandchild.token)\n break\n tokens.append(child.token)\n break\n else:\n tokens.append(child.token)\n\n tokens.append(parent.token)\n\n for child in parent.children:\n if child.relation_to_parent == \"pobj\" or child.relation_to_parent == \"prep\" or child.relation_to_parent == \"nn\" or child.relation_to_parent == \"amod\" or child.relation_to_parent == \"poss\":\n if not child is exclude:\n get_goal_source_theme(child, tokens)\n\ndef get_arg(parent):\n tokens = [ ]\n get_goal_source_theme(parent, tokens)\n return ' '.join(tokens)\n\ndef get_arg_excluded(parent, exclude):\n tokens = [ ]\n get_goal_source_theme_excluded(parent, tokens, exclude)\n return ' '.join(tokens)\n\n# should only generate bringing goal if this method returns false, otherwise should append to theme\ndef get_prep(parent):\n if parent.relation_to_parent == \"prep\":\n return parent\n\n for child in parent.children:\n prep = get_prep(child)\n if not prep is None:\n return prep\n \n return None\n\ndef get_child_prep(parent):\n for child in parent.children:\n prep = get_prep(child)\n if not prep is None:\n return prep\n\n return None\n\ndef set_cmd(child, cmd):\n if child.relation_to_parent == \"dobj\" or child.relation_to_parent == \"pobj\": \n if cmd.action == \"MOTION\":\n cmd.goal = get_arg(child) \n elif cmd.action == \"BRINGING\":\n cmd.theme = get_arg(child)\n elif cmd.action == \"SEARCHING\":\n cmd.theme = get_arg(child)\n elif cmd.action == \"PLACING\":\n cmd.theme = get_arg(child)\n elif cmd.action == \"TAKING\":\n cmd.theme = get_arg(child)\n elif child.relation_to_parent == \"cc\":\n cmd.conjunction = child.token\n elif child.relation_to_parent == \"prep\": \n if len(child.children) > 0:\n if cmd.action == \"MOTION\":\n if child.token == \"in\" or child.token == \"into\" or child.token == \"to\" or child.token == \"near\":\n if cmd.goal is None: \n cmd.goal = get_arg(child.children[0])\n else:\n cmd.goal = cmd.goal + \" \" + get_arg(child.children[0])\n else:\n if child.token == \"from\":\n cmd.path = child.token + \" \" + get_arg(child.children[0])\n else:\n cmd.path = get_arg(child.children[0])\n elif cmd.action == \"BRINGING\":\n if child.token == \"from\":\n cmd.source = get_arg(child.children[0])\n else:\n child_prep = get_child_prep(child)\n if child_prep is None:\n # e.g. \"onto\"\n cmd.goal = get_arg(child.children[0])\n else:\n cmd.theme = cmd.theme + \" \" + get_arg_excluded(child.children[0], child_prep)\n set_cmd(child_prep, cmd)\n elif cmd.action == \"SEARCHING\":\n if child.token == \"for\":\n cmd.theme = get_arg(child.children[0])\n else:\n cmd.ground = get_arg(child.children[0])\n elif cmd.action == \"PLACING\":\n cmd.goal = get_arg(child.children[0])\n elif cmd.action == \"TAKING\":\n if child.token == \"with\":\n cmd.theme = cmd.theme + \" \" + get_arg(child.children[0])\n else:\n cmd.source = get_arg(child.children[0])\n else:\n if cmd.action == \"MOTION\":\n if child.token == \"around\":\n cmd.path = child.token\n elif child.relation_to_parent == \"iobj\":\n cmd.beneficiary = child.token\n elif child.relation_to_parent == \"acomp\":\n if (len(child.children) > 0) and child.token == \"next\":\n cmd.goal = \"next \" + get_arg(child.children[0])\n elif child.relation_to_parent == \"expl\":\n if child.token == \"there\":\n cmd.goal = child.token\n elif child.relation_to_parent == \"advmod\":\n if (len(child.children) > 0) and (child.token == \"next\" or child.token == \"away\"):\n cmd.goal = child.token + \" \" + get_arg(child.children[0])\n elif child.relation_to_parent == \"nsubj\":\n if child.token == \"it\":\n cmd.theme = child.token\n\nplacing = 0\n\ndef get_cmds(verb, actions_by_verb, cmds):\n global placing\n \n if verb.token == \"go\" and verb.parent != None and verb.parent.token == \"let\":\n action = ''\n placing = 1\n elif placing == 1 and verb.parent != None and verb.parent.token == \"go\":\n action = \"PLACING\"\n vb = \"let go\"\n placing = 0\n elif verb.token in actions_by_verb:\n action = actions_by_verb[verb.token]\n placing = 0\n vb = verb.token\n else:\n action = ''\n placing = 0\n\n cmd = None\n\n if action != '':\n cmd = Command()\n cmd.action = action\n cmd.verb = vb\n cmds.append(cmd)\n \n for child in verb.children:\n if cmd != None and child.token == \"close\":\n cmd.goal = get_arg(child)\n elif cmd != None and child.token == \"please\" and (len(child.children) == 1):\n set_cmd(child.children[0], cmd)\n elif placing or child.token in actions_by_verb.keys():\n get_cmds(child, actions_by_verb, cmds) \n elif cmd != None:\n set_cmd(child, cmd)\n \ndef get_actions_by_verb():\n actions_by_verb = { }\n VERBSFILE=rospy.get_param('SR_VERBSFILE')\n\n if os.path.isfile(VERBSFILE):\n with open(VERBSFILE, \"r\") as file:\n for line in file.readlines():\n tokens = line.strip().split(\",\") \n actions_by_verb[tokens[0]] = tokens[1]\n else:\n prt.error(\"\\nError in \"+cname+\": Verbs file not found\\n\"+\"\\n\"+VERBSFILE+\"\\n\")\n #rospy.signal_shutdown(\"########## Forced Shutdown #########\")\n prt.info(\"\\n##### KILLing ALL Nodes but .....\\n# please use CTRL-C to complete the node shutdown process\\n\")\n os.system(\"rosnode kill -a\")\n \n\n return actions_by_verb\n\ndef get_cfr(s):\n try:\n response = client.analyze(s)\n except:\n #except TextRazorAnalysisException, rc: # didn't work - inknown global name???\n prt.error(\"##### Error in TextRazor: client.analyze(s) Failed!\")\n prt.error(\"##### text used was, s= \"+s)\n response = None\n \n if response != None:\n actions_by_verb = get_actions_by_verb()\n root = get_root(response.words())\n cmds = [ ]\n get_cmds(root, actions_by_verb, cmds)\n\n indexes_by_cmd = { }\n for cmd in cmds:\n index = 0\n while (index != -1):\n index = s.find(cmd.verb, index)\n if index in indexes_by_cmd.values():\n index = index + 1\n else:\n indexes_by_cmd[cmd] = index\n break\n sorted_cmds = sorted(indexes_by_cmd.items(), key=operator.itemgetter(1))\n\n cmd_strs = [ ]\n for cmd in sorted_cmds:\n cmd_strs.append(str(cmd[0]))\n\n cfr = \"#\".join(cmd_strs)\n \n else:\n cfr = \"ERROR IN TEXTRAZOR()\" # \"()\" used to trigger \"NO INTERPRETATION\"\n \n return cfr\n\n\n# def kill_nodes(): \n\n# nodes = os.popen(\"rosnode list\").readlines()\n\n# for i in range(len(nodes)):\n# nodes[i] = nodes[i].replace(\"\\n\",\"\")\n# prt.debug(\"i: \"+str(i)+\" \"+nodes[i])\n\n# os.system(\"rosnode kill -a\")\n\n\npub_topic = \"CFR_Out\"\nsub_topic = \"/hearts/stt\"\n\n# zeke's key:\n#textrazor.api_key = \"9a7bb531e0ea81b43e8fba17a517152d4d016847ab88a63a54c6cb36\"\n# alex's key:\ntextrazor.api_key = \"211ef8b5891adff67b329277425597a000d96bd443eeca0fe977819e\"\nclient = textrazor.TextRazor(extractors=[\"entities\", \"dependency-trees\", \"phrases\", \"words\"])\n\npub = rospy.Publisher(pub_topic, String, queue_size=10) \n\nrospy.init_node('cfr_node', anonymous=True)\n\ndef writeresults(WAV,S2T,CFR):\n S2T = S2T.lower()\n resultsfile = ERLDATAPATHOUT+\"results.txt\"\n # ERL only want raw \"file name\" on USB stick\n wavfile = os.path.basename(WAV)\n fh = open (resultsfile,'a')\n str = wavfile+'|'+S2T.strip()+'|'+CFR+'\\n'\n fh.write(str)\n fh.close() \n\ndef callback(s):\n \n rospy.loginfo(rospy.get_name()+\": Received text message: %s\"%s.data)\n #TOPIC is in format <Test from speech string> ~ <wav file name> \n str1, str2 = s.data.split('~')\n\n if(run_mode == 'TH'): \n # strip off #index# prefix used by TH mode \n index, str1 = o_tt.get_key(str1)\n\n prt.info(\"*** T2CFR.py - calling msg.data = get_cfr(str1)\\n*** to derive CFR syntax from interpreted Text\")\n msg = String()\n\n msg.data = get_cfr(str1)\n \n # check the CFR command has arguments ie NOT Motion()\n # OR not zero length\n if \"()\" not in msg.data: \n if len(msg.data) > 0 : \n pass\n else:\n msg.data = 'NO_INTERPRETATION'\n else:\n msg.data = 'NO_INTERPRETATION'\n\n if(run_mode == 'TH'): \n # add back #index# prefix used by TH mode \n rtndata = o_tt.add_key(index,msg.data)\n else: \n rtndata = msg.data\n \n pub.publish(rtndata)\n rospy.loginfo(rospy.get_name()+\": Published CFR message: %s\"%msg.data+'\\nmaybe changed to: '+rtndata)\n \n # ERL formatted results file\n writeresults(str2,str1,msg.data) \n \ncname = \"t2cfr.py\"\nrospy.Subscriber(sub_topic, String, callback) \nrospy.spin()\n","sub_path":"fbm3_speech_understanding/scripts/t2cfr.py","file_name":"t2cfr.py","file_ext":"py","file_size_in_byte":15444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"315822320","text":"\r\n# --- Define your functions below! ---\r\nfrom random import *\r\ndef intro():\r\n print(\"Hey I'm Chatbot 3000! I am here to entertain ya!\")\r\ndef say_hello(name):\r\n print(\"Hi\" + name)\r\n\r\ndef menugenerator():\r\n aListMainCourse = [\"Hamburger\", \"Pizza\", \"Pasta\", \"Salad\"]\r\n bListSides = [\"Fries\", \"Onion Rings\", \"Mashed Potatoes\", \"Bread Slices\"]\r\n aRandomIndex = randint(0, len(aListMainCourse)-1)\r\n bRandomIndex = randint(0, len(bListSides)-1)\r\n print(aListMainCourse[aRandomIndex])\r\n print(bListSides[aRandomIndex])\r\n\r\ndef haikugenerator():\r\n aListFiveSyllable = [\"You are amazing\", \"I love you so much\", \"You are beautiful\"]\r\n aListSevenSyllable = [\"You are the moon to my stars\", \"Your caring heart makes my day\"]\r\n aListFiveSyllable2 = [\"Thanks for being you\", \"You are loved dearly\"]\r\n aRandomIndex = randint(0, len(aListFiveSyllable)-1)\r\n aRandomIndex = randint(0, len(aListSevenSyllable)-1)\r\n aRandomIndex = randint(0, len(aListFiveSyllable2)-1)\r\n print(aListFiveSyllable[aRandomIndex])\r\n print(aListSevenSyllable[aRandomIndex])\r\n print(aListFiveSyllable2[aRandomIndex])\r\n# --- Put your main program below! ---\r\ndef main():\r\n for i in range (1):\r\n intro()\r\n user_name = input(\"Whats your name?\")\r\n say_hello(user_name)\r\n answer = input(\"How are you feeling?\")\r\n print(\"Okay!\")\r\n answer = input(\"Wanna here a joke?(yes or no)\")\r\n if answer == \"yes\":\r\n print(\"aight!\")\r\n answer = input (\"What do you call a guy with a rubber toe?\")\r\n print(\"ROBERTOOOOOOO\")\r\n print(\"ahahahahahahaha...sorry if it was bad\")\r\n else:\r\n print(\"aww that's no fun\")\r\n answer = input (\"Do you want me to generate your meal?(yes or no)\")\r\n if answer == \"yes\":\r\n menugenerator()\r\n print(\"You are welcome!\")\r\n else:\r\n print(\"Okay fine!\")\r\n answer = input (\"Do you want me to generate a haiku? (yes or no)\")\r\n if answer == \"yes\":\r\n haikugenerator()\r\n else:\r\n print(\"aight your choice\")\r\n answer = input(\"Im getting a little bit tired, but thanks for taking with me! BYEEE!\")\r\n# DON'T TOUCH! Setup code that runs your main() function.\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n# def intro(name):\r\n# my_name = name\r\n# return my_name\r\n# def turn(result):\r\n# if result == \"emily\":\r\n# print(\"go left\")\r\n# else:\r\n# print(\"go right\")\r\n#\r\n# turn(intro(\"emily\"))\r\n","sub_path":"ChatBot.py","file_name":"ChatBot.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"576454085","text":"from django.contrib import admin\nfrom django.urls import path\nfrom django.urls import re_path\nimport mainapp.views as mainapp\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.conf.urls import include\nfrom django.views.decorators.cache import cache_page\n\nurlpatterns = [\n path('', mainapp.home, name='home'),\n path('product/<int:pid>/', mainapp.product, name='product'),\n path('product/<int:pid>/ajax/', cache_page(3600)(mainapp.product_ajax), name='product_ajax'),\n path('category/<int:cid>/', mainapp.category, name='category'),\n path('categories/', mainapp.categories, name='categories'),\n path('admin/', admin.site.urls),\n path('about/', mainapp.about, name='about'),\n path('pages/', include('django.contrib.flatpages.urls')),\n path('summernote/', include('django_summernote.urls')),\n re_path(r'^auth/', include(('authapp.urls', 'authapp'), namespace='auth')),\n re_path(r'^cart/', include(('cartapp.urls', 'cartapp'), namespace='cart')),\n re_path(r'^dashboard/', include(('dashboardapp.urls', 'dashboardapp'), namespace='dashboard')),\n re_path(r'^category/(?P<cid>\\d+)/page/(?P<page>\\d+)/$', mainapp.category, name='page'),\n re_path(r'^auth/verify/google/oauth2/',\n include(('social_django.urls', 'social_django'), namespace=\"social\")),\n re_path(r'^orders/', include(('ordersapp.urls', 'ordersapp'), namespace='orders')),\n path('product/price/<int:pid>/', mainapp.get_price, name='get_price'),\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += [re_path(r'^__debug__/', include(debug_toolbar.urls))]\n","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"623754717","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass PurchaseApplyInfoDTO(object):\n\n def __init__(self):\n self._account_code = None\n self._begin_cycle = None\n self._biz_budget_apply_code = None\n self._biz_budget_id = None\n self._biz_type = None\n self._budget_strategy = None\n self._currency = None\n self._end_cycle = None\n self._gmt_create = None\n self._gmt_modified = None\n self._id = None\n self._pool_code = None\n self._remain_amount = None\n self._status = None\n self._use = None\n\n @property\n def account_code(self):\n return self._account_code\n\n @account_code.setter\n def account_code(self, value):\n self._account_code = value\n @property\n def begin_cycle(self):\n return self._begin_cycle\n\n @begin_cycle.setter\n def begin_cycle(self, value):\n self._begin_cycle = value\n @property\n def biz_budget_apply_code(self):\n return self._biz_budget_apply_code\n\n @biz_budget_apply_code.setter\n def biz_budget_apply_code(self, value):\n self._biz_budget_apply_code = value\n @property\n def biz_budget_id(self):\n return self._biz_budget_id\n\n @biz_budget_id.setter\n def biz_budget_id(self, value):\n self._biz_budget_id = value\n @property\n def biz_type(self):\n return self._biz_type\n\n @biz_type.setter\n def biz_type(self, value):\n self._biz_type = value\n @property\n def budget_strategy(self):\n return self._budget_strategy\n\n @budget_strategy.setter\n def budget_strategy(self, value):\n self._budget_strategy = value\n @property\n def currency(self):\n return self._currency\n\n @currency.setter\n def currency(self, value):\n self._currency = value\n @property\n def end_cycle(self):\n return self._end_cycle\n\n @end_cycle.setter\n def end_cycle(self, value):\n self._end_cycle = value\n @property\n def gmt_create(self):\n return self._gmt_create\n\n @gmt_create.setter\n def gmt_create(self, value):\n self._gmt_create = value\n @property\n def gmt_modified(self):\n return self._gmt_modified\n\n @gmt_modified.setter\n def gmt_modified(self, value):\n self._gmt_modified = value\n @property\n def id(self):\n return self._id\n\n @id.setter\n def id(self, value):\n self._id = value\n @property\n def pool_code(self):\n return self._pool_code\n\n @pool_code.setter\n def pool_code(self, value):\n self._pool_code = value\n @property\n def remain_amount(self):\n return self._remain_amount\n\n @remain_amount.setter\n def remain_amount(self, value):\n self._remain_amount = value\n @property\n def status(self):\n return self._status\n\n @status.setter\n def status(self, value):\n self._status = value\n @property\n def use(self):\n return self._use\n\n @use.setter\n def use(self, value):\n self._use = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.account_code:\n if hasattr(self.account_code, 'to_alipay_dict'):\n params['account_code'] = self.account_code.to_alipay_dict()\n else:\n params['account_code'] = self.account_code\n if self.begin_cycle:\n if hasattr(self.begin_cycle, 'to_alipay_dict'):\n params['begin_cycle'] = self.begin_cycle.to_alipay_dict()\n else:\n params['begin_cycle'] = self.begin_cycle\n if self.biz_budget_apply_code:\n if hasattr(self.biz_budget_apply_code, 'to_alipay_dict'):\n params['biz_budget_apply_code'] = self.biz_budget_apply_code.to_alipay_dict()\n else:\n params['biz_budget_apply_code'] = self.biz_budget_apply_code\n if self.biz_budget_id:\n if hasattr(self.biz_budget_id, 'to_alipay_dict'):\n params['biz_budget_id'] = self.biz_budget_id.to_alipay_dict()\n else:\n params['biz_budget_id'] = self.biz_budget_id\n if self.biz_type:\n if hasattr(self.biz_type, 'to_alipay_dict'):\n params['biz_type'] = self.biz_type.to_alipay_dict()\n else:\n params['biz_type'] = self.biz_type\n if self.budget_strategy:\n if hasattr(self.budget_strategy, 'to_alipay_dict'):\n params['budget_strategy'] = self.budget_strategy.to_alipay_dict()\n else:\n params['budget_strategy'] = self.budget_strategy\n if self.currency:\n if hasattr(self.currency, 'to_alipay_dict'):\n params['currency'] = self.currency.to_alipay_dict()\n else:\n params['currency'] = self.currency\n if self.end_cycle:\n if hasattr(self.end_cycle, 'to_alipay_dict'):\n params['end_cycle'] = self.end_cycle.to_alipay_dict()\n else:\n params['end_cycle'] = self.end_cycle\n if self.gmt_create:\n if hasattr(self.gmt_create, 'to_alipay_dict'):\n params['gmt_create'] = self.gmt_create.to_alipay_dict()\n else:\n params['gmt_create'] = self.gmt_create\n if self.gmt_modified:\n if hasattr(self.gmt_modified, 'to_alipay_dict'):\n params['gmt_modified'] = self.gmt_modified.to_alipay_dict()\n else:\n params['gmt_modified'] = self.gmt_modified\n if self.id:\n if hasattr(self.id, 'to_alipay_dict'):\n params['id'] = self.id.to_alipay_dict()\n else:\n params['id'] = self.id\n if self.pool_code:\n if hasattr(self.pool_code, 'to_alipay_dict'):\n params['pool_code'] = self.pool_code.to_alipay_dict()\n else:\n params['pool_code'] = self.pool_code\n if self.remain_amount:\n if hasattr(self.remain_amount, 'to_alipay_dict'):\n params['remain_amount'] = self.remain_amount.to_alipay_dict()\n else:\n params['remain_amount'] = self.remain_amount\n if self.status:\n if hasattr(self.status, 'to_alipay_dict'):\n params['status'] = self.status.to_alipay_dict()\n else:\n params['status'] = self.status\n if self.use:\n if hasattr(self.use, 'to_alipay_dict'):\n params['use'] = self.use.to_alipay_dict()\n else:\n params['use'] = self.use\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = PurchaseApplyInfoDTO()\n if 'account_code' in d:\n o.account_code = d['account_code']\n if 'begin_cycle' in d:\n o.begin_cycle = d['begin_cycle']\n if 'biz_budget_apply_code' in d:\n o.biz_budget_apply_code = d['biz_budget_apply_code']\n if 'biz_budget_id' in d:\n o.biz_budget_id = d['biz_budget_id']\n if 'biz_type' in d:\n o.biz_type = d['biz_type']\n if 'budget_strategy' in d:\n o.budget_strategy = d['budget_strategy']\n if 'currency' in d:\n o.currency = d['currency']\n if 'end_cycle' in d:\n o.end_cycle = d['end_cycle']\n if 'gmt_create' in d:\n o.gmt_create = d['gmt_create']\n if 'gmt_modified' in d:\n o.gmt_modified = d['gmt_modified']\n if 'id' in d:\n o.id = d['id']\n if 'pool_code' in d:\n o.pool_code = d['pool_code']\n if 'remain_amount' in d:\n o.remain_amount = d['remain_amount']\n if 'status' in d:\n o.status = d['status']\n if 'use' in d:\n o.use = d['use']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/PurchaseApplyInfoDTO.py","file_name":"PurchaseApplyInfoDTO.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"119265514","text":"from downloader import Downloader\nfrom imdb_parser import IMDB_Parser\n\n\n\ndef main():\n\n\tdownloader=Downloader()\n\thtml=downloader.download('http://www.imdb.com/title/tt0121955/reviews?ref_=tt_urv')\n\n\tif html == None:\n\t\texit(1)\n\n\n\tparser=IMDB_Parser(html)\n\n\treviews=parser.get_movie_reviews()\n\n\n\n\n\t\n\n\n\tfor review in reviews:\n\t\t\n\t\tprint(review)\n\n\t\n\t\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"246442552","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth.views import LoginView\nfrom django.contrib.auth.decorators import login_required\n\napp_name = \"music\"\n\nurlpatterns = [\n\n path('', views.IndexView.as_view(), name=\"index\"),\n path('album/<int:pk>/detail', views.DetailView.as_view(), name=\"detail\"),\n path('album/store/',login_required(views.AlbumCreate.as_view()), name=\"store\"),\n path('album/<int:pk>/update',views.AlbumCreate.as_view(), name=\"update\"),\n path('album/<int:pk>/delete/',views.AlbumDelete.as_view(), name=\"delete\"),\n path('login', LoginView.as_view(template_name='music/login.html'), name=\"login\"),\n path('sign-up', views.signup, name=\"register\"),\n path('dashboard', views.dashboardView, name=\"dashboard\"),\n path('logout', views.Logout, name=\"logout\"),\n]","sub_path":"music/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"206217063","text":"# Libs\r\nfrom pandas import read_table, crosstab\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn\r\nfrom sklearn.metrics import precision_recall_curve, f1_score, accuracy_score, confusion_matrix\r\n\r\n# Classificadores\r\nfrom sklearn.svm import LinearSVC, NuSVC\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\n\r\ndef read_data():\r\n frame = read_table(\"eyeState.txt\", encoding='latin-1', sep=',', skipinitialspace=True, index_col=None, header=None)\r\n return frame\r\n\r\ndef get_features_and_labels(frame):\r\n arr = np.array(frame, dtype=np.float)\r\n\r\n # Seleciona a coluna alvo.\r\n X, y = arr[:, :-1], arr[:, -1]\r\n \r\n # Separa os dados para treino.\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.6)\r\n\r\n # Normalizando os valores dos atributos\r\n scaler = StandardScaler()\r\n scaler.fit(X_train)\r\n X_train = scaler.transform(X_train)\r\n X_test = scaler.transform(X_test)\r\n\r\n return X_train, X_test, y_train, y_test\r\n\r\n\r\ndef evaluate_classifier(X_train, X_test, y_train, y_test):\r\n # Classificação por Linear SVC\r\n classifier = LinearSVC(C=1)\r\n classifier.fit(X_train, y_train)\r\n score = f1_score(y_test, classifier.predict(X_test))\r\n accuracy = accuracy_score(y_test, classifier.predict(X_test))\r\n matrix = confusion_matrix(y_test, classifier.predict(X_test))\r\n \r\n # Gerando curvas precision-recall\r\n y_prob = classifier.decision_function(X_test)\r\n precision, recall, _ = precision_recall_curve(y_test, y_prob)\r\n\r\n yield 'Linear SVC (F1 score={:.3f})'.format(score), precision, recall\r\n print(\"Resultado LinearSVC\\n\")\r\n print(\"F-Mesure: \" + str(score) + \"\\nAccuracy: \"+ str(accuracy)+\"\\n\")\r\n print(\"Matrix de confusao:\\n\")\r\n print(crosstab(y_test, classifier.predict(X_test), rownames = ['Real'], colnames = ['Classificado'], margins = True))\r\n\r\n # Classificação por NuSVC\r\n classifier = NuSVC(kernel='rbf', nu=0.5, gamma=1e-3)\r\n classifier.fit(X_train, y_train)\r\n score = f1_score(y_test, classifier.predict(X_test))\r\n accuracy = accuracy_score(y_test, classifier.predict(X_test))\r\n matrix = confusion_matrix(y_test, classifier.predict(X_test))\r\n \r\n # Gerando curvas precision-recall\r\n y_prob = classifier.decision_function(X_test)\r\n precision, recall, _ = precision_recall_curve(y_test, y_prob)\r\n\r\n print(\"\\nResultado Nu SVC\\n\")\r\n print(\"F-Mesure: \" + str(score) + \"\\nAccuracy: \"+ str(accuracy)+\"\\n\")\r\n print(\"Matrix de confusao:\\n\")\r\n print(crosstab(y_test, classifier.predict(X_test), rownames = ['Real'], colnames = ['Classificado'], margins = True))\r\n yield 'NuSVC (F1 score={:.3f})'.format(score), precision, recall\r\n\r\n # Classificação por AdaBoost\r\n classifier = AdaBoostClassifier(n_estimators=50, learning_rate=1.0, algorithm='SAMME.R')\r\n classifier.fit(X_train, y_train)\r\n score = f1_score(y_test, classifier.predict(X_test))\r\n accuracy = accuracy_score(y_test, classifier.predict(X_test))\r\n matrix = confusion_matrix(y_test, classifier.predict(X_test))\r\n \r\n # Gerando curvas precision-recall\r\n y_prob = classifier.decision_function(X_test)\r\n precision, recall, _ = precision_recall_curve(y_test, y_prob)\r\n\r\n print(\"\\nResultado Ada Booster\\n\")\r\n print(\"F-Mesure: \" + str(score) + \"\\nAccuracy: \"+ str(accuracy)+\"\\n\")\r\n print(\"Matrix de confusao:\\n\")\r\n # print(matrix)\r\n print(crosstab(y_test, classifier.predict(X_test), rownames = ['Real'], colnames = ['Classificado'], margins = True))\r\n yield 'Ada Boost (F1 score={:.3f})'.format(score), precision, recall\r\n\r\n\r\ndef plot(results):\r\n fig = plt.figure(figsize=(6, 6))\r\n fig.canvas.set_window_title('Classificacao de olhos')\r\n\r\n for label, precision, recall in results:\r\n plt.plot(recall, precision, label=label)\r\n\r\n plt.title('Curva de Precisao-Recall')\r\n plt.xlabel('Precisao')\r\n plt.ylabel('Recall')\r\n plt.legend(loc='lower left')\r\n\r\n plt.tight_layout()\r\n plt.show()\r\n plt.close()\r\n\r\nif __name__ == '__main__':\r\n print(\"Obtendo base de dados\")\r\n frame = read_data()\r\n\r\n print(\"Processando {} instancias com {} atributos\".format(len(frame.index), len(frame.columns)))\r\n X_train, X_test, y_train, y_test = get_features_and_labels(frame)\r\n\r\n print(\"Avaliando classificadores\")\r\n results = list(evaluate_classifier(X_train, X_test, y_train, y_test))\r\n\r\n print(\"Resultados no Grafico\")\r\n plot(results)","sub_path":"trabalho_2/src/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"213266045","text":" \n#!/usr/bin/python3 \nimport copy\nimport sys, os\nimport pathlib\nimport numpy as np\nimport matplotlib as mpl \nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nfrom matplotlib.colors import LogNorm\nimport matplotlib.gridspec as gridspec\nfrom scipy.interpolate import interp2d\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n# Stellapy package\nsys.path.append(os.path.abspath(pathlib.Path(os.environ.get('STELLAPY')).parent)+os.path.sep) \nfrom stellapy.calculations.calculate_inverseFourierTransform import calculate_inverseFourierTransform \nfrom stellapy.plot.utils.labels.add_timeAndZFramesToLabel import add_timeAndZFramesToLabel \nfrom stellapy.plot.utils.style.create_figure import update_figure_style\nfrom stellapy.plot.utils.surface.get_colorMap import get_colorMap\nfrom stellapy.utils.decorators.exit_program import exit_program\nfrom stellapy.simulations.Research import create_research\nfrom stellapy.plot.utils.labels import standardLabels\nfrom stellapy.utils.commandprompt.bash import Bash \n\n#===============================================================================\n# Plot phi(x,y) #\n#===============================================================================\n\ndef plot_quantity_vs_xy(folder, z_quantity=\"phi\", specie=None, tstart=None, tend=None, z=None, \n remove_zonal_modes=True, log=False, crange=None, ordersOfMagnitude=2, interpolation_step=20): \n \n # Create <simulations> based on the given <folder>\n research = create_research(folders=folder) \n \n # Plot each simulation separately \n for experiment in research.experiments:\n for simulation in experiment.simulations: \n \n # Moments can be plotted for each species\n nspecies = simulation.input.nspec if (specie==None and \"phi\" not in z_quantity) else 1\n \n # Create a figure \n fig = plt.figure(figsize=(6*nspecies, 5)); axes = []\n grid_specifications = gridspec.GridSpec(1, nspecies)\n if nspecies==1: grid_specifications.update(top=0.9, left=0.15, right=0.85, bottom=0.15)\n if nspecies==2: grid_specifications.update(top=0.93, left=0.08, right=0.97, bottom=0.1, wspace=0.2, hspace=0.3)\n if nspecies==3: grid_specifications.update(top=0.9, left=0.07, right=0.95, bottom=0.15, wspace=0.4, hspace=0.3)\n for i in range(nspecies): axes.append(plt.subplot(grid_specifications[i]))\n update_figure_style(fig, axes)\n \n # Plot phi2(t)\n for i in range(nspecies):\n if specie!=None: i = specie\n subplot_quantity_vs_xy(axes[i], simulation, z_quantity, specie=i, t_range=[tstart, tend], z=z, interpolation_step=interpolation_step,\n remove_zonal_modes=remove_zonal_modes, log=log, crange=crange, ordersOfMagnitude=ordersOfMagnitude) \n \n # Show the figure \n mpl.rcParams[\"savefig.directory\"] = folder\n plt.show()\n return\n\n#---------------------------------------- \ndef subplot_quantity_vs_xy(ax, simulation, z_quantity, specie, t_range=None, z=None, interpolation_step=None,\n remove_zonal_modes=False, log=False, crange=None, ordersOfMagnitude=2):\n\n # Get the data \n z_vs_xy, tstart, tend = get_realSpaceData(simulation, z_quantity, specie, t_range, z, remove_zonal_modes) \n x, y = simulation.vec.x, simulation.vec.y\n \n # Interpolate the data\n if interpolation_step: \n function = interp2d(x, y, z_vs_xy.T, kind='cubic')\n x = np.linspace(x[0], x[-1], int(len(x))*interpolation_step)\n y = np.linspace(y[0], y[-1], int(len(y))*interpolation_step)\n z_vs_xy = function(x,y).T \n\n # Value x=0 is plotted from x=0 to the next x, correct this by shifting x half a tile left\n x = x-(x[-1]-x[-2])/2; y = y-(y[-1]-y[-2])/2\n x = list(x); x.append(x[-1]+(x[-1]-x[-2]))\n y = list(y); y.append(y[-1]+(y[-1]-y[-2]))\n \n # For quantities squared use a different map\n squared = True if \"2\" in z_quantity else False\n \n # Get the color map\n if squared: cmap = copy.copy(plt.get_cmap(\"jet\")) \n if not squared: cmap = get_colorMap(color_map=\"red-white-blue\")\n cmap.set_bad(color='black') \n \n # Get the range of the color bar\n if log:\n z_vs_xy[z_vs_xy < 1.E-25] = np.NaN\n crange = [ np.nanmin(abs(z_vs_xy)), np.nanmax(abs(z_vs_xy)) ] \n if ordersOfMagnitude: crange[0] = 10**(np.log10(crange[1])-ordersOfMagnitude)\n norm = LogNorm(vmin=crange[0], vmax=crange[1]) \n if not log:\n norm = None\n if crange!=None: \n vmin = crange[0]; vmax = crange[1]\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n if not squared: \n vmin = np.nanmin(z_vs_xy, axis=(0,1)); vmax = np.nanmax(z_vs_xy, axis=(0,1)) \n vmin = np.min([vmin, -vmax]); vmax = np.max([vmax, -vmin]) \n norm = mcolors.TwoSlopeNorm(vmin=vmin, vcenter=0, vmax=vmax)\n \n # Plot the surface z(x,y) \n img = ax.pcolormesh(x, y, np.real(z_vs_xy).T, cmap=cmap, norm=norm) \n \n # Add colorbar \n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.1)\n cbar = plt.colorbar(img, cax=cax) \n cbar.update_ticks(); cbar.set_label(\"\")\n \n # Add the colorbar label as title\n nozonal = \"_nozonal\" if remove_zonal_modes==True else \"\"\n z_label = add_timeAndZFramesToLabel(standardLabels[\"normalized\"][z_quantity+nozonal].replace(\"\\\\hat\",\"\"), tstart, tend, z) \n ax.set_title(z_label.replace(\"_s\",\"_\"+str(specie))) \n \n # Change the axis style\n if not log:\n cbar.formatter.set_powerlimits((0,0))\n cbar.formatter.set_scientific(True)\n cbar.ax.yaxis.set_offset_position('left')\n cbar.update_ticks() \n\n # Set axis\n ax.set_xlabel(standardLabels[\"normalized\"][\"x\"])\n ax.set_ylabel(standardLabels[\"normalized\"][\"y\"])\n ax.set_xlim(xmin=np.min(x), xmax=np.max(x))\n ax.set_ylim(ymin=np.min(y), ymax=np.max(y)) \n return ax, cbar\n\n#-------------------------------\ndef get_realSpaceData(simulation, z_quantity, specie, t_range, z, remove_zonal_modes=False): \n \n # Try to read the data from the big 4D files\n try: \n z_vs_kxky, tstart, tend = get_fourierSpaceDatafrom4DFiles(simulation, z_quantity, specie, z, t_range)\n \n # If this fails, read the saturated files instead.\n except: \n if (t_range[0]!=None and t_range[0]==t_range[1]) or (t_range[0]!=None and t_range[1]==None): \n dimension = \"4D\" if z==None else \"5D\"\n exit_reason = \"A specific time instance is chosen but the data files were not available.\\n\"\n if \"phi\" in z_quantity: exit_reason += \"Please write the required files through: write_dataFiles -s pot\"+dimension\n elif z_quantity in [\"dens\", \"temp\", \"upar\"]: exit_reason += \"Please write the required files through: write_dataFiles -s mom\"+dimension\n exit_program(exit_reason, get_realSpaceData, sys._getframe().f_lineno)\n z_vs_kxky, tstart, tend = get_fourierSpaceDataFromSaturatedFiles(simulation, z_quantity, specie, z)\n \n # Check the time frame\n if t_range!=None:\n if t_range[0]!=None:\n if tstart!=t_range[0] or (t_range[1]!=-1 and tend!=t_range[1]):\n dimension = \"4D\" if z==None else \"5D\"\n print(\"WARNING: the \"+dimension+\" files were not written, the data is plotted from\")\n print(\"the saturated file instead. If you want control over the time frame,\")\n if \"phi\" in z_quantity: print(\"write the data files through >> write_dataFiles -s pot\"+dimension)\n elif z_quantity in [\"dens\", \"temp\", \"upar\"]: print(\"write the data files through >> write_dataFiles -s mom\"+dimension)\n else: exit_program(\"Add other options\", get_realSpaceData, sys._getframe().f_lineno)\n \n # Remove the zonal modes\n if remove_zonal_modes==True: z_vs_kxky[:,0] = 0\n \n # Calculate the quantity in real space\n z_vs_xy = calculate_inverseFourierTransform(simulation, z_vs_kxky, axis_kx=0, axis_ky=1) \n \n # If we want the quantity squared, square it after the IFFT\n if \"2\" in z_quantity: z_vs_xy = np.abs(z_vs_xy)**2\n return z_vs_xy, tstart, tend\n\n#-------------------------------\ndef get_fourierSpaceDatafrom4DFiles(simulation, z_quantity, specie, z, t_range):\n \n # Averaged over the field line\n if z==None:\n \n # Get the potential\n if \"phi\" in z_quantity: \n z_vs_tkxky = simulation.potential.phi_vs_tkxky.phi[:,:,:]\n t = simulation.potential.phi_vs_tkxky.t\n \n # Get the moments\n if z_quantity in [\"dens\", \"temp\", \"upar\"]:\n t = simulation.moments.dens_vs_tskxky.t\n if z_quantity==\"dens\": z_vs_tkxky = simulation.moments.dens_vs_tskxky.dens[:,specie,:,:]\n if z_quantity==\"temp\": z_vs_tkxky = simulation.moments.temp_vs_tskxky.temp[:,specie,:,:]\n if z_quantity==\"upar\": z_vs_tkxky = simulation.moments.upar_vs_tskxky.upar[:,specie,:,:]\n \n # Specific point along the field line\n if z!=None:\n \n # Get the potential\n if \"phi\" in z_quantity: \n z_vs_tzkxky = simulation.potential.phi_vs_tzkxky.phi[:,:,:,:]\n t = simulation.potential.phi_vs_tzkxky.t\n \n # Get the moments\n if z_quantity in [\"dens\", \"temp\", \"upar\"]:\n t = simulation.moments.dens_vs_tskxky.t\n if z_quantity==\"dens\": z_vs_tzkxky = simulation.moments.dens_vs_tszkxky.dens[:,specie,:,:,:]\n if z_quantity==\"temp\": z_vs_tzkxky = simulation.moments.temp_vs_tszkxky.temp[:,specie,:,:,:]\n if z_quantity==\"upar\": z_vs_tzkxky = simulation.moments.upar_vs_tszkxky.upar[:,specie,:,:,:]\n \n # Grab the specific z-point\n iz = np.where(simulation.vec.z>=z)[0][0]\n z_vs_tkxky = z_vs_tzkxky[:,iz,:,:]\n \n # Average out the time dimension\n if t_range==None:\n t_range = simulation.time.t_range\n if t_range[1]!=None:\n z_vs_kxky = np.mean(z_vs_tkxky[(t>=t_range[0])&(t<=t_range[1]),:],axis=0) \n \n # Grab a specific time point\n if t_range[1]==None:\n try: it = np.where(t>=t_range[0])[0][0]\n except: exit_program(\"The chosen time instance t = \"+str(t_range[0])+\" does not exist.\", get_fourierSpaceDatafrom4DFiles, sys._getframe().f_lineno)\n t_range[0] = t[it]\n z_vs_kxky = z_vs_tkxky[it,:,:] \n return z_vs_kxky, t_range[0], t_range[1]\n \n#-------------------------------\ndef get_fourierSpaceDataFromSaturatedFiles(simulation, z_quantity, specie, z): \n \n # Get the potential\n if \"phi\" in z_quantity: \n z_vs_zkxky = simulation.saturated.phi_vs_zkxky.phi[:,:,:] \n \n # Get the moments\n if z_quantity in [\"dens\", \"temp\", \"upar\"]: \n if z_quantity==\"dens\": z_vs_zkxky = simulation.saturated.dens_vs_szkxky.dens[specie,:,:,:]\n if z_quantity==\"temp\": z_vs_zkxky = simulation.saturated.temp_vs_szkxky.temp[specie,:,:,:]\n if z_quantity==\"upar\": z_vs_zkxky = simulation.saturated.upar_vs_szkxky.upar[specie,:,:,:]\n \n # Average away the z-dimension or select a specific z\n if z==None:\n dl_over_B = simulation.geometry.dl_over_B \n z_vs_kxky = np.sum(z_vs_zkxky[:,:,:]*dl_over_B[:,np.newaxis,np.newaxis], axis=0) \n if z!=None:\n iz = np.where(simulation.vec.z>=z)[0][0]\n z_vs_kxky = z_vs_zkxky[iz,:,:]\n if z!=simulation.vec.z[iz]: print(\"WARNING: Choosen value of z is \"+str(z)+\", selected value of z is \"+str(simulation.vec.z[iz]))\n \n # Read the time range over which the saturated file has been averaged\n t_range = simulation.saturated.trange\n return z_vs_kxky, t_range[0], t_range[1] \n\n#===============================================================================\n# RUN AS BASH COMMAND #\n#=============================================================================== \n \nif __name__ == \"__main__\" and False: \n \n # Launch the bash interface\n bash = Bash(plot_quantity_vs_xy, __doc__) \n \n # Get the arguments and execute the script\n plot_quantity_vs_xy(**bash.get_arguments()) \n\n################################################################################\n# DEBUG MODE #\n################################################################################\n \nif __name__ == \"__main__\":\n import pathlib\n folder = pathlib.Path(\"/home/hanne/CIEMAT/RUNS/test_CBC2/biggrid\")\n plot_quantity_vs_xy(folder, z=0, tstart=310)\n sys.exit()\n \n \n\n","sub_path":"stellapy/plot/nonlinear/quantity_vs_xy.py","file_name":"quantity_vs_xy.py","file_ext":"py","file_size_in_byte":12813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"175682397","text":"# Ce programme doit être lancé avec les permissions 'root'\nimport socket\n\nip = \"206.167.46.234\"\nport = 420\n\n# Création de l'objet 'socket'\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# Connexion au port local\ns.bind((ip,port))\n\nfd = open(\"/etc/test/test_serveur.conf\", 'r')\nposition = fd.read()\nposition2=position.split('\\n')\nfd.close()\n\n# Boucle d'écoute\nwhile True:\n# Réception des données et affichage\n\tdonnees,addr = s.recvfrom(1024)\n\tprint(\"Message UDP de\", addr, \": \", donnees)\n\n\tf=open(position2[0],'r')\n\tfile=f.read()\n\tfile=(file + (\"Message UDP de\" + str(addr) + \":\" + str(donnees) + '\\n'))\n\tf.close()\n\n\tf=open(position2[0],'w')\n\tf.write(file)\n\tf.close()\n\ns.close()\nprint('Termine.')\n","sub_path":"sock_serveur.py","file_name":"sock_serveur.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"470516759","text":"from __future__ import (print_function, division, absolute_import)\n\nimport logging\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.ndimage as ndi\nfrom skimage.feature import peak_local_max\nfrom skimage.morphology import watershed\n\nfrom scpye.track.bounding_box import extract_bbox\nfrom scpye.improc.image_processing import (fill_bw, scale_array, u8_from_bw,\n gray_from_bgr, bgr_from_gray,\n enhance_contrast)\nfrom scpye.improc.contour_analysis import (contour_bounding_rect,\n analyze_contours_bw, Blob,\n find_contours)\nfrom scpye.utils.drawing import draw_contours, Colors, imshow\n\n\ndef mean_blob_area(blobs):\n areas = [blob.prop['area'] for blob in blobs]\n return np.mean(areas)\n\n\nclass BlobAnalyzer(object):\n def __init__(self, max_aspect=1.3, min_extent=0.62,\n min_solidity=0.91, gauss_filter_sigma=2,\n max_filter_size=4, gray_edt_ratio=1.5, min_peak_distance=5,\n exclude_border=True, vis=False):\n # Parameters for extracting single blob\n self.max_aspect = max_aspect # 1.3\n self.min_extent = min_extent # 0.62\n self.min_solidity = min_solidity # 0.91\n\n # Parameters for splitting multi blob\n self.gauss_filter_sigma = gauss_filter_sigma\n self.max_filter_size = max_filter_size\n self.gray_edt_ratio = gray_edt_ratio\n self.gray_max = 100\n self.min_peak_distance = min_peak_distance\n self.exclude_border = exclude_border\n\n self.logger = logging.getLogger(__name__)\n\n # Drawing\n self.vis = vis\n self.disp_bw = None\n self.disp_bgr = None\n\n def analyze(self, bgr, bw):\n \"\"\"\n :param bgr: color image\n :param bw: binary image\n :return: fruits\n \"\"\"\n gray = gray_from_bgr(bgr)\n blobs = analyze_contours_bw(bw, min_area=4)\n\n cntrs = [blob.cntr for blob in blobs]\n bw_fill = fill_bw(bw, cntrs, in_place=False)\n\n if self.vis:\n self.disp_bw = bgr_from_gray(bw_fill)\n good_bgr = enhance_contrast(bgr)\n self.disp_bgr = bgr_from_gray(gray_from_bgr(good_bgr))\n self.disp_bgr[bw > 0] = good_bgr[bw > 0]\n\n # Get single bboxes (fruits)\n single_blobs, multi_blobs = self.extract_single(blobs)\n\n if self.vis:\n single_cntrs = [blob.cntr for blob in single_blobs]\n draw_contours(self.disp_bgr, single_cntrs, thickness=2,\n color=Colors.blue)\n\n # Split them to single bbox and add to fruits\n more_single_blobs, split_blobs = self.split_multi(multi_blobs, gray)\n\n if self.vis:\n more_single_cntrs = [blob.cntr for blob in more_single_blobs]\n draw_contours(self.disp_bgr, more_single_cntrs, color=Colors.cyan,\n thickness=2)\n split_cntrs = [blob.cntr for blob in split_blobs]\n draw_contours(self.disp_bgr, split_cntrs, color=Colors.green,\n thickness=2)\n\n self.logger.debug(\n \"single/more/split: {}/{}/{}\".format(len(single_blobs),\n len(more_single_blobs),\n len(split_blobs)))\n\n split_blobs.extend(single_blobs)\n split_blobs.extend(more_single_blobs)\n fruits = np.array([blob.bbox for blob in split_blobs])\n\n return fruits, bw_fill\n\n def is_single_blob(self, blob, mean_area):\n \"\"\"\n Check if this blob is a single blob\n :param blob:\n :param mean_area:\n :return:\n \"\"\"\n prop = blob.prop\n\n # If area is less than average then it is a single blob\n if prop['area'] < mean_area:\n return True\n\n # For a blob that is big enough, if it is solid then it is a single blob\n if prop['solidity'] > self.min_solidity:\n return True\n\n # For the rest blobs, if it is a relative filled square,\n # it is a single blob\n if prop['extent'] > self.min_extent \\\n and prop['aspect'] < self.max_aspect:\n return True\n\n return False\n\n def extract_single(self, blobs):\n \"\"\"\n Extract potential multi-blobs\n :param blobs:\n :return: list of potential multi-blobs\n \"\"\"\n mean_area = mean_blob_area(blobs)\n\n single_blobs, multi_blobs = [], []\n for blob in blobs:\n\n if self.is_single_blob(blob, mean_area):\n single_blobs.append(blob)\n else:\n multi_blobs.append(blob)\n\n return single_blobs, multi_blobs\n\n def split_multi(self, blobs, gray):\n \"\"\"\n Split potential multi-blobs into separate bounding boxes\n :param blobs:\n :param gray:\n \"\"\"\n single_blobs, split_blobs = [], []\n\n for blob in blobs:\n gray_bbox = extract_gray(gray, blob)\n\n # calculate distance measure for watershed\n dist_max, markers, n_peaks = self.prepare_watershed(gray_bbox)\n\n if n_peaks < 2:\n single_blobs.append(blob)\n else:\n labels = watershed(-dist_max, markers, mask=gray_bbox)\n # VIS\n if self.vis:\n imshow(labels, dist_max, markers, figsize=(12, 12),\n interp=\"none\", cmap=plt.cm.viridis)\n\n each_blobs = blobs_from_labels(labels, n_peaks, blob)\n split_blobs.extend(each_blobs)\n\n return single_blobs, split_blobs\n\n def prepare_watershed(self, gray):\n \"\"\"\n Prepare for watershed\n :param gray:\n :return:\n \"\"\"\n gray_blur = ndi.gaussian_filter(gray, self.gauss_filter_sigma)\n # gray will be converted to binary when performing edt\n euclid_dist = ndi.distance_transform_edt(gray)\n dist = scale_array(gray_blur, val=self.gray_max)\n # combination of intensity and distance transform\n dist += scale_array(euclid_dist,\n val=self.gray_max / self.gray_edt_ratio)\n\n dist_max = ndi.maximum_filter(dist, size=self.max_filter_size,\n mode='constant')\n local_max = peak_local_max(dist_max,\n min_distance=self.min_peak_distance,\n indices=False,\n exclude_border=self.exclude_border)\n markers, n_peaks = ndi.label(local_max)\n\n return dist_max, markers, n_peaks\n\n\ndef blobs_from_labels(labels, n_peaks, blob):\n \"\"\"\n Extract bboxes in labels and convert to global bboxes\n :param labels:\n :param n_peaks:\n :param blob:\n :return:\n \"\"\"\n each_blobs = []\n\n for i in range(n_peaks):\n label_i1 = u8_from_bw(labels == i + 1) # 0 is background\n local_bbox = contour_bounding_rect(label_i1)\n local_cntr = find_contours(label_i1)[0]\n # shift bbox from local to global\n local_bbox[:2] += blob.bbox[:2]\n local_cntr += blob.bbox[:2]\n each_blobs.append(Blob(bbox=local_bbox, prop=None, cntr=local_cntr))\n\n return each_blobs\n\n\ndef extract_gray(gray, blob):\n \"\"\"\n Extract gray and binary image from rprop\n :param gray:\n :param blob:\n :return: gray bbox\n \"\"\"\n bbox = blob.bbox\n cntr = blob.cntr\n\n gray_bbox = extract_bbox(gray, bbox, copy=True)\n # redraw contour so that we don't accidentally grab pixels from other blobs\n # and because cntr is global, we need to draw it onto full image\n bw_cntr = fill_bw(gray, [cntr])\n bw_bbox = extract_bbox(bw_cntr, bbox)\n gray_bbox[bw_bbox == 0] = 0\n\n return gray_bbox\n","sub_path":"scpye/improc/blob_analyzer.py","file_name":"blob_analyzer.py","file_ext":"py","file_size_in_byte":7893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502326168","text":"import base64\nimport asyncio\nimport logging\n\nfrom abc import ABC\n\nLOG = logging.getLogger(__name__)\n\n\nINDOOR_SENSOR = 'indoor_sensor'\nDOOR_LOCK_SENSOR = 'door_lock_sensor'\n\n\nclass Decoder(ABC):\n ''' Base class for decoders. Abstact. '''\n def __init__(self, data):\n self.raw = data\n self.data = None\n\n async def decode(self):\n '''\n Decodes the raw data passed to the constructor.\n Override this method with the custom decoder requirements for specific needs.\n '''\n return {k: v for k, v in self.raw}\n\n async def is_valid(self):\n '''\n Initializes the decoding and returns the decoded data.\n Yo should use this method prior to accessing the data of your decoder and\n verify that it yields a positive result.\n '''\n\n self.data = await self.decode()\n\n if self.data:\n return True\n\n return False\n\n @property\n def sensor_type(self):\n ''' Optionally set a sensor type for your decoder. '''\n pass\n\n\nclass SensitiviaDecoder(Decoder):\n ''' Decoder for sensitivia sensors. '''\n key_map = {\n 'Battery': 'b',\n 'AvgTemperature': 't',\n 'Humidity': 'h',\n 'Lux': 'l',\n 'fcnt': 'fcnt',\n 'rssi': 'rssi',\n 'port': 'port',\n }\n\n async def decode(self):\n data = {}\n\n for k, v in self.raw:\n try:\n data[self.key_map[k]] = v\n except KeyError:\n continue\n\n return data\n\n async def is_valid(self):\n return await super().is_valid()\n\n @property\n def sensor_type(self):\n return INDOOR_SENSOR\n\n\nclass DoorLockDecoder(Decoder):\n ''' Decoder for door lock sensors. '''\n async def decode(self):\n data = await super().decode()\n\n open_state = data.get('DI1', None)\n locked_state = data.get('DI2', None)\n\n return {\n 'open': None if open_state is None else open_state == 0,\n 'locked': None if locked_state is None else locked_state == 0,\n }\n\n async def is_valid(self):\n return await super().is_valid()\n\n @property\n def sensor_type(self):\n return DOOR_LOCK_SENSOR\n\n\nclass ElsysDecoder(Decoder):\n ''' Decoder used to decode all sensor types from Elsys '''\n TYPE_TEMP = 0x01 # Temp 2 bytes -3276. -->\n TYPE_RH = 0x02 # Humidity 1 byte 0-100%\n TYPE_ACC = 0x03 # Acceleration 3 bytes X,Y,Z -128 --> 127 +/-63=1G\n TYPE_LIGHT = 0x04 # Light 2 bytes 0-->65535 Lux\n TYPE_MOTION = 0x05 # No of motion 1 byte 0-255\n TYPE_CO2 = 0x06 # Co2 2 bytes 0-65535 ppm\n TYPE_VDD = 0x07 # VDD 2byte 0-65535mV\n TYPE_ANALOG1 = 0x08 # VDD 2byte 0-65535mV\n TYPE_GPS = 0x09 # 3bytes lat 3bytes long binary\n TYPE_PULSE1 = 0x0A # 2bytes relative pulse count\n TYPE_PULSE1_ABS = 0x0B # 4bytes no 0->0xFFFFFFFF\n TYPE_EXT_TEMP1 = 0x0C # 2bytes -3276.5C-->3276.5C\n TYPE_EXT_DIGITAL = 0x0D # 1bytes value 1 or 0\n TYPE_EXT_DISTANCE = 0x0E # 2bytes distance in mm\n TYPE_ACC_MOTION = 0x0F # 1byte number of vibration/motion\n TYPE_IR_TEMP = 0x10 # 2bytes internal temp 2bytes external temp -3276.5C-->3276.5C\n TYPE_OCCUPANCY = 0x11 # 1byte data\n TYPE_WATERLEAK = 0x12 # 1byte data 0-255\n TYPE_GRIDEYE = 0x13 # 65byte temperature data 1byte ref+64byte external temp\n TYPE_PRESSURE = 0x14 # 4byte pressure data (hPa)\n TYPE_SOUND = 0x15 # 2byte sound data (peak/avg)\n TYPE_PULSE2 = 0x16 # 2bytes 0-->0xFFFF\n TYPE_PULSE2_ABS = 0x17 # 4bytes no 0->0xFFFFFFFF\n TYPE_ANALOG2 = 0x18 # 2bytes voltage in mV\n TYPE_EXT_TEMP2 = 0x19\n\n async def is_valid(self):\n try:\n self.data = await self.decode()\n\n if self.data:\n return True\n\n except IndexError:\n return False # TODO: reraise a better error message and handle in caller.\n\n return False\n\n @property\n def sensor_type(self):\n return INDOOR_SENSOR\n\n async def _decode_hex(self, data):\n obj = {}\n i = 0\n while i < len(data):\n if data[i] == self.TYPE_TEMP:\n num = int(data[i+1] << 8 | data[i+2])\n if num > 0x7FFF:\n num -= 0x10000\n obj['t'] = num/10\n i += 2\n elif data[i] == self.TYPE_RH:\n obj['h'] = data[i+1]\n i += 1\n elif data[i] == self.TYPE_ACC:\n i += 2\n elif data[i] == self.TYPE_LIGHT:\n obj['l'] = int(data[i+1] << 8 | data[i+2])\n i += 2\n elif data[i] == self.TYPE_MOTION:\n obj['m'] = data[i+1]\n i += 1\n elif data[i] == self.TYPE_CO2:\n obj['c'] = int(data[i+1] << 8 | data[i+2])\n i += 2\n elif data[i] == self.TYPE_VDD:\n obj['b'] = int(data[i+1] << 8 | data[i+2])\n i += 2\n elif data[i] == self.TYPE_ANALOG1:\n obj['a1'] = int(data[i+1] << 8 | data[i+2])\n i += 2\n elif data[i] == self.TYPE_GPS:\n i += 6\n elif data[i] == self.TYPE_PULSE1:\n obj['p1'] = int(data[i+1] << 8 | data[i+2])\n i += 2\n elif data[i] == self.TYPE_PULSE1_ABS:\n i += 4\n elif data[i] == self.TYPE_EXT_TEMP1:\n i += 2\n elif data[i] == self.TYPE_EXT_DIGITAL:\n obj['edig'] = data[i+1]\n i += 1\n elif data[i] == self.TYPE_EXT_DISTANCE:\n obj['edis'] = int(data[i+1] << 8 | data[i+2])\n i += 2\n elif data[i] == self.TYPE_ACC_MOTION:\n obj['am'] = data[i+1]\n i += 1\n elif data[i] == self.TYPE_IR_TEMP:\n internal_temp = int(data[i+1] << 8 | data[i+2])\n if internal_temp > 0x7FFF:\n internal_temp -= 0x10000\n\n external_temp = int(data[i+3] << 8 | data[i+4])\n if external_temp > 0x7FFF:\n external_temp -= 0x10000\n\n obj['irit'] = internal_temp/10\n obj['iret'] = external_temp/10\n\n i += 4\n elif data[i] == self.TYPE_OCCUPANCY:\n obj['o'] = data[i+1]\n i += 1\n elif data[i] == self.TYPE_WATERLEAK:\n obj['w'] = data[i+1]\n i += 1\n elif data[i] == self.TYPE_GRIDEYE:\n i += 65\n elif data[i] == self.TYPE_PRESSURE:\n i += 4\n elif data[i] == self.TYPE_SOUND:\n i += 2\n elif data[i] == self.TYPE_PULSE2:\n obj['p2'] = int(data[i+1] << 8 | data[i+2])\n i += 2\n elif data[i] == self.TYPE_PULSE2_ABS:\n i += 4\n elif data[i] == self.TYPE_ANALOG2:\n obj['a2'] = int(data[i+1] << 8 | data[i+2])\n i += 2\n elif data[i] == self.TYPE_EXT_TEMP2:\n i += 2\n else:\n LOG.error('Couldnt decode hex, at pos %s value %s', i, data[i])\n i = len(data)\n i = i+1\n\n return obj\n\n\nclass AdvantechElsysDecoder(ElsysDecoder):\n ''' Decodes elsys data comming from Advantech Wise-PaaS. '''\n async def decode(self):\n data = {}\n\n for k, v in self.raw:\n if k == 'hex':\n data.update(await self._decode_hex(\n data=bytes.fromhex(v)\n ))\n else:\n data[k] = v\n\n LOG.debug('Decoded %s from %s', data, self.raw)\n return data\n\n\nclass ChirpElsysDecoder(ElsysDecoder):\n ''' Decodes elsys data comming from a Chirpstack server. '''\n async def decode(self):\n data = await self._decode_hex(\n data=base64.b64decode(self.raw)\n )\n\n LOG.debug('Decoded %s from %s', data, self.raw)\n return data\n","sub_path":"pipeline/decoders.py","file_name":"decoders.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"23538228","text":"\"\"\"\nmake classes here \n\"\"\"\n\nfrom random import randint\nimport unittest\n\n\nclass Product:\n \"part 1 stuff here \"\n\n def __init__(self, name, price=10, weight=20, flannability=.5):\n self.name = name\n self.price = price\n self.weight = weight\n self.flannability = flannability\n self.identifier = randint(1000000, 9999999)\n\n # part 2\n def stealability(self):\n \"\"\"METhods for part 2, first one is - calculates the price divided by the weight, and then\n returns a message: if the ratio is less than 0.5 return Not so stealable...,\n if it is greater or equal to 0.5 but less than 1.0 return Kinda stealable.,\n and otherwise return Very stealable! \"\"\"\n\n Price_weight = self.price / self.weight\n if Price_weight < .05:\n return \"Not so stealable...\"\n elif Price_weight < 1.0:\n return 'Kinda stealable.'\n else:\n return 'Very stealable'\n\n def explode(self):\n \"\"\"Second method is - calculates the flammability times the weight, and then\n returns a message: if the product is less than 10 return ...fizzle., if it is\n greater or equal to 10 but less than 50 return ...boom!, and otherwise\n return ...BABOOM!!\"\"\"\n fire_potential = self.flannability * self.weight\n if fire_potential < 10:\n return '...fizzle'\n elif fire_potential < 50:\n return '...boom!'\n else:\n return '...BABOOM!!'\n\n # part 3 sublass\n\n\nclass BoxingGlove(Product):\n \"\"\"Subclass for the class parent. - Change the default `weight` to 10 (but leave other defaults unchanged)\n- Override the `explode` method to always return ...it's a glove.\n- Add a `punch` method that returns \"That tickles.\" if the weight is below 5,\n Hey that hurt! \n if the weight is greater or equal to 5 but less than 15, and\n OUCH! otherwise\n \"\"\"\n\n def __init__(self, name, price=100, weight=10, flannability=.5):\n self.name = name\n self.price = price\n self.weight = weight\n self.flannability = flannability\n self.identifier = randint(1000000, 9999999)\n # change the price for giggles, and did not work. further investagation needed.\n\n def explode(self):\n # \"\"\"A doc string needs to be here?Nah\"\"\"\n return \"...it's a glove.\"\n\n def punch(self):\n # you are not working, futher investagtion needed...\n \"\"\"a method of a BoxingGLove\"\"\"\n if self.weight < 5:\n return \"That tickles.\"\n elif self.weight < 15:\n return \"Hey that hurt!\"\n else:\n return \"OUCH!\"\n","sub_path":"sprint-challenge/part1_acme.py","file_name":"part1_acme.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"538831853","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required, permission_required\n\nfrom .models import DcWellitem, ElectricTag\nfrom django.conf import settings\n\n\nfrom . import forms\nfrom . import table_utils\n\n\n@login_required\ndef index(request):\n if request.method == 'GET':\n name = request.GET.get('name', None)\n if not name:\n query_set = DcWellitem.objects.all().values()\n else:\n query_set = DcWellitem.objects.filter(jh__contains=name).all().values()\n\n paginator_list, page_range, left_has_more, right_has_more, page_sum = table_utils.data_paginator(request, query_set)\n\n # 修改分页对象属性\n for query in paginator_list.object_list:\n row = ElectricTag.objects.filter(id=query['id']).first()\n if row:\n query['look'] = '已查看'\n query['look_time'] = row.create_time\n else:\n query['look'] = None\n query['look_time'] = None\n absurl = request.get_full_path()\n return render(request, 'index.html',\n context={'elecs': paginator_list,\n 'page_range': page_range,\n 'name': name,\n 'SWITCH': settings.SWITCH,\n 'left_has_more': left_has_more,\n 'right_has_more': right_has_more,\n 'page_sum': page_sum,\n })\n else:\n # 通过井号查询\n name = request.POST.get('name')\n return HttpResponseRedirect('/index/?name={0}'.format(name))\n\n\n@permission_required('label.view_electrictag', login_url='/users/login/', raise_exception=True)\ndef tag(request, id):\n if request.method == 'GET':\n form = forms.TagForm()\n # 查询曲线详情\n img_path, savepath, data_tag = table_utils.get_tag(id)\n # 查询下一条、上一条曲线id\n prev_image_id, next_image_id = table_utils.get_tag_id(id)\n\n return render(request, 'tag.html', {'form': form,\n 'prev_image_id': prev_image_id,\n 'next_image_id': next_image_id,\n 'data_tag': data_tag,\n 'img_path': img_path,\n 'savepath': savepath})\n else:\n form = forms.TagForm(request.POST)\n # post提交表单数据\n wellbore_tag_1, wellbore_tag_2, motor_belt_tag, balance_tag, slowdown_box_tag, remarks = table_utils.post_tag_form(form)\n\n data_tag = ElectricTag.objects.filter(id=id).first()\n data_tag.wellbore_tag_1 = wellbore_tag_1\n data_tag.wellbore_tag_2 = wellbore_tag_2\n data_tag.motor_belt_tag = motor_belt_tag\n data_tag.balance_tag = balance_tag\n data_tag.slowdown_box_tag = slowdown_box_tag\n data_tag.remarks = remarks\n data_tag.labeled = True\n data_tag.save()\n\n return HttpResponseRedirect('/tag/{0}/'.format(id))\n\n\n@permission_required('label.view_electrictag', login_url='/users/login/', raise_exception=True)\ndef filter(request, filter_tag='all'):\n if request.method == 'GET':\n name = request.GET.get('name', None)\n if filter_tag == 'all':\n query_set = ElectricTag.objects\n elif filter_tag == 'labeled':\n query_set = ElectricTag.objects.filter(labeled=True)\n elif filter_tag == 'unlabeled':\n query_set = ElectricTag.objects.filter(labeled=False)\n else:\n return render(request, '404.html')\n\n # 获取过滤后的数据集\n pagination = table_utils.get_pagination(name, query_set)\n paginator_list, page_range, left_has_more, right_has_more, page_sum = table_utils.data_paginator(request, pagination.values())\n\n return render(request, 'filter.html',\n context={\n 'elecs': paginator_list,\n 'filter_tag': filter_tag,\n 'page_range': page_range,\n 'name': name,\n 'left_has_more': left_has_more,\n 'right_has_more': right_has_more,\n 'page_sum': page_sum\n })\n else:\n wellname = request.POST.get('name')\n # 获取当前路径\n url = request.get_full_path()\n if '?' in url:\n new_url = url.split('?')[0]\n else:\n new_url = url\n\n return HttpResponseRedirect('{0}?name={1}'.format(new_url, wellname))\n\n\n@permission_required('label.view_electrictag', login_url='/users/login/', raise_exception=True)\ndef export_excel(request):\n filter_tag = request.GET.get('filter_tag', None)\n name = request.GET.get('name', None)\n\n # 获取过滤数据\n query_sets = table_utils.get_query_sets(filter_tag, name)\n\n # 导出过滤后的数据\n response = table_utils.export_func(request, query_sets)\n\n return response\n","sub_path":"power_label/apps/label/table_views.py","file_name":"table_views.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"136391731","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright (c) Microsoft Corporation and Dapr Contributors.\nLicensed under the MIT License.\n\"\"\"\n\nimport unittest\n\nfrom datetime import timedelta\n\nfrom dapr.actor.runtime.runtime import ActorRuntime\nfrom dapr.actor.runtime.config import ActorRuntimeConfig\nfrom dapr.serializers import DefaultJSONSerializer\n\nfrom tests.actor.fake_actor_classes import (\n FakeSimpleActor,\n FakeMultiInterfacesActor,\n FakeSimpleTimerActor,\n)\n\nfrom tests.actor.utils import _run\n\n\nclass ActorRuntimeTests(unittest.TestCase):\n def setUp(self):\n ActorRuntime._actor_managers = {}\n ActorRuntime.set_actor_config(ActorRuntimeConfig())\n self._serializer = DefaultJSONSerializer()\n _run(ActorRuntime.register_actor(FakeSimpleActor))\n _run(ActorRuntime.register_actor(FakeMultiInterfacesActor))\n _run(ActorRuntime.register_actor(FakeSimpleTimerActor))\n\n def test_get_registered_actor_types(self):\n actor_types = ActorRuntime.get_registered_actor_types()\n self.assertTrue(actor_types.index('FakeSimpleActor') >= 0)\n self.assertTrue(actor_types.index(FakeMultiInterfacesActor.__name__) >= 0)\n self.assertTrue(actor_types.index(FakeSimpleTimerActor.__name__) >= 0)\n\n def test_actor_config(self):\n config = ActorRuntime.get_actor_config()\n\n self.assertTrue(config._drain_rebalanced_actors)\n self.assertEqual(timedelta(hours=1), config._actor_idle_timeout)\n self.assertEqual(timedelta(seconds=30), config._actor_scan_interval)\n self.assertEqual(timedelta(minutes=1), config._drain_ongoing_call_timeout)\n self.assertEqual(3, len(config._entities))\n\n # apply new config\n new_config = ActorRuntimeConfig(\n timedelta(hours=3), timedelta(seconds=10),\n timedelta(minutes=1), False)\n\n ActorRuntime.set_actor_config(new_config)\n config = ActorRuntime.get_actor_config()\n\n self.assertFalse(config._drain_rebalanced_actors)\n self.assertEqual(timedelta(hours=3), config._actor_idle_timeout)\n self.assertEqual(timedelta(seconds=10), config._actor_scan_interval)\n self.assertEqual(timedelta(minutes=1), config._drain_ongoing_call_timeout)\n self.assertEqual(3, len(config._entities))\n\n def test_entities_update(self):\n # Clean up managers\n ActorRuntime._actor_managers = {}\n ActorRuntime.set_actor_config(ActorRuntimeConfig())\n\n config = ActorRuntime.get_actor_config()\n with self.assertRaises(ValueError):\n config._entities.index(FakeSimpleActor.__name__)\n\n _run(ActorRuntime.register_actor(FakeSimpleActor))\n config = ActorRuntime.get_actor_config()\n self.assertTrue(config._entities.index(FakeSimpleActor.__name__) >= 0)\n\n def test_dispatch(self):\n _run(ActorRuntime.register_actor(FakeMultiInterfacesActor))\n\n request_body = {\n \"message\": \"hello dapr\",\n }\n\n test_request_body = self._serializer.serialize(request_body)\n response = _run(ActorRuntime.dispatch(\n FakeMultiInterfacesActor.__name__, 'test-id',\n \"ActionMethod\", test_request_body))\n\n self.assertEqual(b'\"hello dapr\"', response)\n\n _run(ActorRuntime.deactivate(FakeMultiInterfacesActor.__name__, 'test-id'))\n\n # Ensure test-id is deactivated\n with self.assertRaises(ValueError):\n _run(ActorRuntime.deactivate(FakeMultiInterfacesActor.__name__, 'test-id'))\n\n def test_fire_timer_success(self):\n # Fire timer\n _run(ActorRuntime.fire_timer(\n FakeSimpleTimerActor.__name__,\n 'test-id',\n 'test_timer',\n '{ \"callback\": \"timer_callback\", \"data\": \"timer call\" }'.encode('UTF8')))\n\n manager = ActorRuntime._actor_managers[FakeSimpleTimerActor.__name__]\n actor = manager._active_actors['test-id']\n self.assertTrue(actor.timer_called)\n\n def test_fire_timer_unregistered(self):\n with self.assertRaises(ValueError):\n _run(ActorRuntime.fire_timer(\n 'UnknownType',\n 'test-id',\n 'test_timer',\n '{ \"callback\": \"timer_callback\", \"data\": \"timer call\" }'.encode('UTF8')))\n","sub_path":"tests/actor/test_actor_runtime.py","file_name":"test_actor_runtime.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"302353491","text":"from django.utils.functional import cached_property\n\nfrom base.descriptors.subfields import Subfield\nfrom base.json import json_freeze\nfrom base.types import Type, RelType\nfrom base.constant import LOOKUP_SEP, COMPUTED_REL_SUBFIELD_POSTFIX\n\n\ndef computed_rel_subfield_name(rsf_name):\n return '{}{}'.format(rsf_name, COMPUTED_REL_SUBFIELD_POSTFIX)\n\n\ndef computed_rel_count_subfield_name(rsf_name):\n return '{}__count{}'.format(rsf_name, COMPUTED_REL_SUBFIELD_POSTFIX)\n\n\ndef is_computed_rel_subfield_name(subfield_name):\n return subfield_name[-1] == COMPUTED_REL_SUBFIELD_POSTFIX\n\n\nclass RelSubfield(Subfield):\n IS_PARTIAL = False\n\n def __init__(self, field_name, subfield_type, multiplicity, source_name=None, rel_type=None, create_only=False):\n assert field_name in ('sources', 'targets')\n assert isinstance(subfield_type, Type)\n assert multiplicity in ('*:*', '1:*', '1:1'), 'multiplicity 는 반드시 적절한 값으로 지정되어야 합니다.'\n assert not field_name == 'targets' or source_name, 'target 인 경우 source_name 이 지정되어야 합니다.'\n super().__init__(field_name, subfield_type)\n self.is_source = field_name == 'sources'\n self.multiplicity = multiplicity\n self.source_name = source_name\n self._rel_type = rel_type\n self.create_only = create_only\n\n @cached_property\n def rel_type(self):\n assert self.owner\n if self._rel_type:\n return self._rel_type\n elif self.is_source:\n return getattr(RelType, self.subfield_name)\n else:\n return getattr(RelType, self.source_name)\n\n @cached_property\n def related_subfields(self):\n subfield_model, is_source = self.subfield_model, self.is_source\n related_field_name = 'targets' if is_source else 'sources'\n d = {}\n for rsfn, related_subfield in subfield_model.subfields[related_field_name].items():\n assert related_subfield.is_source is not is_source\n if related_subfield.rel_type == self.rel_type:\n if issubclass(self.owner, related_subfield.subfield_model) and issubclass(self.subfield_model, related_subfield.owner):\n assert related_subfield.owner not in d\n d[related_subfield.owner] = related_subfield\n return json_freeze(d)\n\n def get_related_subfield(self, related_model):\n result, related_subfields = None, self.related_subfields\n for model in related_subfields:\n if issubclass(related_model, model):\n if not result:\n result = model\n elif issubclass(model, result):\n result = model\n return related_subfields[result] if result else None\n\n def _check_validation(self):\n super()._check_validation()\n for related_subfield in self.related_subfields.values():\n assert related_subfield.rel_type == self.rel_type\n assert related_subfield.is_source == (not self.is_source)\n assert issubclass(self.subfield_type.model, related_subfield.owner)\n assert issubclass(self.owner, related_subfield.subfield_type.model)\n\n @cached_property\n def subfield_model(self):\n return self.subfield_type.model\n\n @cached_property\n def my_multiplicity(self):\n return self.multiplicity.split(':')[0 if self.is_source else 1]\n\n @cached_property\n def related_multiplicity(self):\n return self.multiplicity.split(':')[1 if self.is_source else 0]\n\n def get_manager(self, instance):\n is_source, rel_type, subfield_model = self.is_source, self.rel_type, self.subfield_model\n manager = instance.sources if is_source else instance.targets\n manager.model = subfield_model\n manager.is_source = is_source\n manager.rel_type = rel_type\n manager.multiplicity = self.multiplicity\n sfn = 'source' if is_source else 'target'\n assert sfn == manager.target_field_name\n manager.core_filters['{}_rels__type'.format(sfn)] = rel_type\n subfield_types = subfield_model.types\n if subfield_types:\n manager.core_filters['{}_rels__computed_{}_type__in'.format(sfn, sfn)] = subfield_types\n manager.subfield = self\n manager.related_subfield = self.get_related_subfield(subfield_model)\n return manager\n\n def __get__(self, instance, owner):\n if not instance:\n return self\n return self.get_manager(instance)\n\n def __set__(self, instance, value):\n if value is None:\n manager = self.get_manager(instance)\n return manager.unlink_all()\n raise AssertionError('delete() 를 위한 None 세팅만 허용됩니다.')\n\n def convert_filter(self, model, parts, value, lookup_type, raw):\n is_source, rel_type, subfield_model = self.is_source, self.rel_type, self.subfield_model\n filter = {}\n tfn = 'target' if is_source else 'source'\n sfn = 'source' if is_source else 'target'\n filter['{}_rels__type'.format(tfn)] = rel_type\n filter['{}_rels__computed_{}_type__in'.format(tfn, sfn)] = subfield_model.types\n parts[0] = '{}s'.format(sfn)\n new_filter = '{}__{}'.format(LOOKUP_SEP.join(parts), lookup_type)\n filter[new_filter] = value\n return filter\n\n @cached_property\n def computed_rel_subfield(self):\n return getattr(self.owner, computed_rel_subfield_name(self.subfield_name))\n\n @cached_property\n def computed_rel_count_subfield(self):\n assert self.IS_PARTIAL, 'partial subfield 이어야만 의미 있는 property 입니다.'\n return getattr(self.owner, computed_rel_count_subfield_name(self.subfield_name))\n\n\nclass ForeignKeySubfield(RelSubfield):\n def __init__(self, field_name, subfield_type, rel_type=None, create_only=False):\n assert field_name == 'sources'\n super().__init__(field_name, subfield_type, multiplicity='1:*', rel_type=rel_type, create_only=create_only)\n\n def __get__(self, instance, owner):\n if not instance:\n return self\n manager = self.get_manager(instance)\n qs = manager.all()[:2]\n assert len(qs) in (0, 1)\n return qs.first()\n\n def __set__(self, instance, value):\n manager = self.get_manager(instance)\n # TODO : 튜닝 by 캐싱\n old = self.__get__(instance, instance.__class__)\n if old and old != value:\n manager.unlink(old)\n if value:\n manager.link(value)\n\n def _check_validation(self):\n super()._check_validation()\n assert self.is_source is True\n assert self.my_multiplicity == '1'\n assert self.related_multiplicity == '*'\n for related_subfield in self.related_subfields.values():\n assert isinstance(related_subfield, ReverseForeignKeySubfield)\n subfields = self.owner.subfields['sources']\n assert self in subfields.values()\n for fksf in [fksf for fksf in subfields.values() if fksf != self and isinstance(fksf, ForeignKeySubfield)]:\n if self.rel_type == fksf.rel_type:\n a = set(self.subfield_model.types)\n b = set(fksf.subfield_model.types)\n assert not a.intersection(b), \\\n 'subfield_model.types 가 겹쳐서 multiplicity 가 깨어질 수 있습니다. rel_type 을 변경해야 합니다.'\n\n\nclass ReverseForeignKeySubfield(RelSubfield):\n def __init__(self, field_name, subfield_type, source_name, rel_type=None, create_only=False):\n assert field_name == 'targets'\n super().__init__(field_name, subfield_type, multiplicity='1:*', source_name=source_name, rel_type=rel_type, create_only=create_only)\n\n def _check_validation(self):\n super()._check_validation()\n assert self.is_source is False\n assert self.my_multiplicity == '*'\n assert self.related_multiplicity == '1'\n for related_subfield in self.related_subfields.values():\n assert isinstance(related_subfield, ForeignKeySubfield)\n\n\nclass OneToOneSubfield(ForeignKeySubfield):\n def __init__(self, field_name, subfield_type, rel_type=None, create_only=False):\n assert field_name == 'sources'\n RelSubfield.__init__(\n self, field_name, subfield_type, multiplicity='1:1', rel_type=rel_type, create_only=create_only)\n\n def _check_validation(self):\n RelSubfield._check_validation(self)\n assert self.my_multiplicity == '1'\n # 상속에 의해 1 이 아니라 * 일 수도 있음\n # assert self.related_multiplicity == '1'\n for related_subfield in self.related_subfields.values():\n assert isinstance(related_subfield, OneToOneSubfield)\n\n\nclass ReverseOneToOneSubfield(OneToOneSubfield):\n def __init__(self, field_name, subfield_type, source_name, rel_type=None, create_only=False):\n assert field_name == 'targets'\n RelSubfield.__init__(\n self, field_name, subfield_type, multiplicity='1:1', source_name=source_name, rel_type=rel_type, create_only=create_only)\n\n\nclass ManyToManySubfield(RelSubfield):\n def __init__(self, field_name, subfield_type, rel_type=None, create_only=False):\n assert field_name == 'sources'\n super().__init__(field_name, subfield_type, multiplicity='*:*', rel_type=rel_type, create_only=create_only)\n\n def _check_validation(self):\n super()._check_validation()\n assert self.my_multiplicity == '*'\n assert self.related_multiplicity == '*'\n for related_subfield in self.related_subfields.values():\n assert isinstance(related_subfield, ManyToManySubfield)\n\n\nclass ReverseManyToManySubfield(ManyToManySubfield):\n def __init__(self, field_name, subfield_type, source_name, rel_type=None, create_only=False):\n assert field_name == 'targets'\n RelSubfield.__init__(\n self, field_name, subfield_type, multiplicity='*:*', source_name=source_name, rel_type=rel_type, create_only=create_only)\n\n\nclass PartialMappingSubfield(ManyToManySubfield):\n IS_PARTIAL = True\n\n def __init__(self, field_name, subfield_type, create_only=False):\n assert field_name == 'sources'\n super().__init__(field_name, subfield_type, create_only=create_only)\n\n\nclass ReversePartialMappingSubfield(ReverseManyToManySubfield):\n IS_PARTIAL = True\n\n def __init__(self, field_name, subfield_type, source_name, create_only=False):\n assert field_name == 'targets'\n super().__init__(field_name, subfield_type, source_name=source_name, create_only=create_only)\n","sub_path":"base/descriptors/rel_subfields.py","file_name":"rel_subfields.py","file_ext":"py","file_size_in_byte":10629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"324438322","text":"import signal\nimport time\n\nimport requests\n\nfrom js9 import j\n\n\n\n\nclass Grafana():\n def __init__(self, container, ip, port, url):\n\n self.container = container\n self.ip = ip\n self.port = port\n self.url = url\n self._client = None\n\n @property\n def client(self):\n if not self._client:\n self._client = j.clients.grafana.get(url='http://%s:%d' % (\n self.ip, self.port), username='admin', password='admin')\n return self._client\n\n def apply_config(self):\n f = self.container.client.filesystem.open('/opt/grafana/conf/defaults.ini')\n try:\n template = self.container.client.filesystem.read(f)\n finally:\n self.container.client.filesystem.close(f)\n\n template = template.replace(b'3000', str(self.port).encode())\n if self.url:\n template = template.replace(b'root_url = %(protocol)s://%(domain)s:%(http_port)s/', b'root_url = %s' % self.url.encode())\n self.container.client.filesystem.mkdir('/etc/grafana/')\n self.container.upload_content('/etc/grafana/grafana.ini', template)\n\n @property\n def PID(self):\n for process in self.container.client.process.list():\n if 'grafana-server' in process['cmdline']:\n return process['pid']\n return None\n\n def is_running(self):\n if self.client.ping():\n return True\n return False\n\n def stop(self, timeout=30):\n if not self.is_running():\n return\n\n self.container.client.process.kill(self.PID, signal.SIGTERM)\n start = time.time()\n end = start + timeout\n is_running = self.is_running()\n while is_running and time.time() < end:\n time.sleep(1)\n is_running = self.is_running()\n\n if is_running:\n raise RuntimeError('Failed to stop grafana.')\n\n if self.container.node.client.nft.rule_exists(self.port):\n self.container.node.client.nft.drop_port(self.port)\n\n def start(self, timeout=45):\n is_running = self.is_running()\n if is_running:\n return\n\n self.apply_config()\n\n if not self.container.node.client.nft.rule_exists(self.port):\n self.container.node.client.nft.open_port(self.port)\n\n self.container.client.system(\n 'grafana-server -config /etc/grafana/grafana.ini -homepath /opt/grafana')\n time.sleep(1)\n\n start = time.time()\n end = start + timeout\n is_running = self.is_running()\n while not is_running and time.time() < end:\n time.sleep(1)\n is_running = self.is_running()\n\n if not is_running:\n if self.container.node.client.nft.rule_exists(self.port):\n self.container.node.client.nft.drop_port(self.port)\n raise RuntimeError('Failed to start grafana.')\n\n def add_data_source(self, database, name, ip, port, count):\n data = {\n 'type': 'influxdb',\n 'access': 'proxy',\n 'database': database,\n 'name': name,\n 'url': 'http://%s:%u' % (ip, port),\n 'user': 'admin',\n 'password': 'passwd',\n 'default': True,\n }\n\n now = time.time()\n while time.time() - now < 10:\n try:\n self.client.addDataSource(data)\n if len(self.client.listDataSources()) == count + 1:\n continue\n break\n except requests.exceptions.ConnectionError:\n time.sleep(1)\n pass\n\n def delete_data_source(self, name):\n count = len(self.client.listDataSources())\n now = time.time()\n while time.time() - now < 10:\n try:\n self.client.deleteDataSource(name)\n if len(self.client.listDataSources()) == count - 1:\n continue\n break\n except requests.exceptions.ConnectionError:\n time.sleep(1)\n pass\n","sub_path":"JumpScale9Lib/clients/zero_os/sal/grafana/grafana.py","file_name":"grafana.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"144098521","text":"__author__ = 'ericweiner'\nfrom bs4 import BeautifulSoup\nimport urllib2\nclass Page():\n def __init__(self, url, domain='google'):\n self.pageRating = 0\n self.url = url\n self.domain = domain\n opener = urllib2.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n self.page = opener.open(url)\n self.soup = BeautifulSoup(self.page, 'html.parser')\n\n def gSoupToLinks(self):\n cites = [cite.findAll('a', href = True) for cite in self.soup.findAll('h3')]\n toReturn = [str(cite)[str(cite).find('/url?q=') + 7:str(cite).find('&sa=U&ved=')]\n for cite in cites]\n return toReturn\n\n def aSoupToLinks(self):\n pages = [page for page in self.soup.findAll(\"a\", {\"class\",\"s-access-detail-page\"})]\n toReturn = [str(link)[str(link).find('\" href=') + 8: str(link).find('\" title=')]\n for link in pages]\n toReturn = [toReturn[i] for i in range(len(toReturn)) if toReturn[i][:4] == 'http']\n return toReturn\n\n def __str__(self):\n if self.domain == 'google':\n links = self.gSoupToLinks(self.soup)\n\n return links\n\n","sub_path":"page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"194756588","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 19 11:52:14 2019\r\n\r\n@author: arje\r\n\"\"\"\r\nimport os\r\nimport imageio\r\nimport numpy as np\r\nfrom sklearn import metrics\r\nimport cloudpickle as pickle\r\nimport utility_detect as utility\r\n\r\n\"\"\"\r\nReading in the new Detect data\r\n\"\"\"\r\nfolder = './data/Expo_2000_Ap_3.8/Expo_2000_Ap_3.8'\r\n\r\n\r\n#Read in the classes\r\nlabels = os.listdir(folder)\r\nlabels.sort()\r\n\r\n#For each label, create annotations for the individuals\r\n#Annotation = id, label, species, genus, im_id\r\nann = []\r\nim_data = []\r\nfor index, label in enumerate(labels):\r\n lab = label\r\n spe_where = lab.index(' ')\r\n spe = lab[(spe_where+1):]\r\n gen = lab[:spe_where]\r\n \r\n #Let's go through the traps\r\n sub_fold = folder + '/' + lab\r\n traps = os.listdir(sub_fold)\r\n traps.sort()\r\n for trap in traps:\r\n sub_sub_fold = sub_fold + '/' + trap\r\n id_inds = os.listdir(sub_sub_fold)\r\n id_inds.sort()\r\n #Let's go through the images of the individual\r\n for id_ind in id_inds:\r\n im_fold = sub_sub_fold + '/' + id_ind\r\n images = os.listdir(im_fold)\r\n images.sort()\r\n #Finally, let's save the annotation information for each image\r\n #And get the .npy file\r\n dim = imageio.imread(im_fold+'/'+images[0]).shape\r\n for im in images:\r\n ann.append(str(id_ind) + ',' + str(lab) + ',' + str(spe) +',' + str(gen) + \\\r\n ',' + str(im) + '\\n')\r\n \r\n im_data.append(imageio.imread(im_fold+'/'+im)[0:64,0:64,:])\r\n \r\nfid = open(folder+'/annotations_bg.txt','w')\r\nfor row in ann:\r\n fid.write(row)\r\nfid.close()\r\n\r\n#fid = open(folder+'/image_data.npy','wb')\r\n#np.save(fid, im_data)\r\n#fid.close()\r\n\r\n\r\nall_data = utility.proc_annotation(folder+'/annotations_bg.txt', im_data, ratio=[0.7,0.1,0.2])\r\n\r\nfid = open(folder+'/all_data_bg.pickle','wb')\r\npickle.dump(all_data,fid)\r\nfid.close()\r\n","sub_path":"Johanna/background_data.py","file_name":"background_data.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"475146070","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint\n\nclass Events:\n def __init__(self):\n pass\n\n def fetch_events(self):\n url = \"http://www-intern.cispa.uni-saarland.de/wordpress/feed/atom\"\n with urlopen(url) as f:\n xml = BeautifulSoup(f.read().decode(), \"xml\")\n\n for entry in xml.find_all(\"entry\"):\n event = dict(\n category = entry.category[\"term\"],\n title = entry.title.string,\n summary = entry.summary.string\n )\n pprint(event)\n\n\n\nif (__name__ == \"__main__\"):\n events = Events()\n events.fetch_events()\n\n \n","sub_path":"src/py/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"270093157","text":"import pandas as pd\nimport numpy as np\n\n\ndef OCWRes(file1, file2):\n Dense1 = pd.read_csv(file1)\n Dense2 = pd.read_csv(file2)\n Dense1.pop('Unnamed: 0')\n Dense1.pop('bias')\n Dense2.pop('Unnamed: 0')\n Dense2.pop('bias')\n columns = Dense1.columns.tolist()\n Dense1Value = Dense1.values\n Dense2Value = Dense2.values\n OCW = Dense1Value * Dense2Value.T\n\n return np.mean(OCW, axis=0)\n\nif __name__ == '__main__':\n file1 = 'weights/dense_1.csv'\n file2 = 'weights/dense_2.csv'\n result = OCWRes(file1, file2)\n print(result)\n","sub_path":"OCW.py","file_name":"OCW.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"650749445","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 2 14:12:04 2018\n\n@author: pfjarschel\n\"\"\"\n\nimport sys, time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import optimize\nimport functools\n\nfrom PyQt5.QtCore import Qt, QTimer, QDir, QCoreApplication\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n\nimport MainWindow\nimport esa4407b\nimport n5173b\nimport edfakps\nimport hp34401a\n\n\nclass MainWindow(QMainWindow, MainWindow.Ui_MainWindow):\n # Devices Stuff\n esa = None\n esaBusy = False\n esaOpen = False\n\n gen = None\n genOpen = False\n genBusy = False\n\n edfa = None\n edfaOpen = False\n edfaBusy = False\n\n multi = None\n multiOpen = False\n multiBusy = False\n\n # Graphs\n figureESA = None\n graphESA = None\n axESA = None\n graphToolbarESA = None\n esax = []\n showesay = []\n\n figurePump = None\n graphPump = None\n axPump = None\n graphToolbarPump = None\n pumpy = []\n showpumpy = []\n pumpfreqs = []\n pumpy_curr = []\n\n figureStk = None\n graphStk = None\n axStk = None\n graphToolbarStk = None\n stky = []\n showstky = []\n stkfreqs = []\n carry = []\n showcarry = []\n stky_curr = []\n\n # Measure stuff\n measTimer = None\n measBusy = False\n counterGen = 0\n counterEDFA = 0\n freqs = []\n specs = []\n currs = []\n volts = []\n specs_curr = []\n\n # Other stuff\n filename = \"\"\n simulate = True\n updateFreq = False\n\n def __init__(self):\n super(self.__class__, self).__init__()\n self.setupUi(self)\n self.setupOtherUi()\n self.setupActions()\n self.show()\n\n def setupOtherUi(self):\n self.figureESA = plt.figure()\n self.graphESA = FigureCanvas(self.figureESA)\n self.graphToolbarESA = NavigationToolbar(self.graphESA, self)\n self.ESAGraphHolder.addWidget(self.graphToolbarESA)\n self.ESAGraphHolder.addWidget(self.graphESA)\n\n self.figurePump = plt.figure()\n self.graphPump = FigureCanvas(self.figurePump)\n self.graphToolbarPump = NavigationToolbar(self.graphPump, self)\n self.PumpGraphHolder.addWidget(self.graphToolbarPump)\n self.PumpGraphHolder.addWidget(self.graphPump)\n\n self.figureStk = plt.figure()\n self.graphStk = FigureCanvas(self.figureStk)\n self.graphToolbarStk = NavigationToolbar(self.graphStk, self)\n self.StokesGraphHolder.addWidget(self.graphToolbarStk)\n self.StokesGraphHolder.addWidget(self.graphStk)\n self.initPlots()\n\n def setupActions(self):\n # Buttons and etc\n self.connectBut.clicked.connect(self.initializeDevices)\n self.startBut.clicked.connect(self.onStartButClicked)\n self.stopBut.clicked.connect(self.onStopButClicked)\n self.saveBut.clicked.connect(self.saveData)\n self.pointviewSpin.valueChanged.connect(self.changeSpectrum)\n self.freqviewSpin.valueChanged.connect(self.chooseFreq)\n self.pointviewEdfaSpin.valueChanged.connect(self.changeCurrent)\n self.currviewSpin.valueChanged.connect(self.chooseCurrent)\n self.fulldbCheck.stateChanged.connect(self.changeSpecScale)\n self.powdbCheck.stateChanged.connect(self.changePowScale)\n self.autolevelCheck.stateChanged.connect(functools.partial(self.updatePlots, self.pointviewSpin.value()))\n self.showfitCheck.stateChanged.connect(functools.partial(self.updatePlots, self.pointviewSpin.value()))\n\n # Timers\n self.measTimer = QTimer()\n self.measTimer.timeout.connect(self.measLoop)\n self.measTimer.setInterval(100)\n\n def initializeDevices(self):\n if not self.simulate:\n self.esa = esa4407b.ESA4407B()\n self.esa.connectESA(address=self.esaAddrSpin.value())\n time.sleep(0.2)\n self.esa.initESA(tracen=self.esaPtsSpin.value())\n time.sleep(0.1)\n\n self.gen = n5173b.N5173B()\n self.gen.connectGen(address=self.genAddrSpin.value())\n time.sleep(0.2)\n self.gen.initGen()\n time.sleep(0.1)\n\n self.edfa = edfakps.EDFAKps()\n self.edfa.connectEDFA(address=self.edfaAddrSpin.value())\n time.sleep(0.2)\n self.edfa.initEDFA(mode='ACC', enabled=self.edfa.getState())\n time.sleep(0.1)\n\n self.multi = hp34401a.HP34401A()\n self.multi.connectMult(address=self.multAddrSpin.value())\n time.sleep(0.2)\n self.multi.initMult()\n time.sleep(0.1)\n\n if self.esa.esaOK:\n self.esaOpen = True\n self.statusbar.showMessage(\"ESA OK!\")\n self.getConfigs()\n else:\n self.esaOpen = False\n self.statusbar.showMessage(\"ESA Error!\")\n\n if self.gen.genOK:\n self.genOpen = True\n self.statusbar.showMessage(\"Generator OK!\")\n else:\n self.genOpen = False\n self.statusbar.showMessage(\"Generator Error!\")\n\n if self.edfa.edfaOK:\n self.edfaOpen = True\n self.statusbar.showMessage(\"EDFA OK!\")\n else:\n self.edfaOpen = False\n self.statusbar.showMessage(\"EDFA Error!\")\n\n if self.multi.multOK:\n self.multiOpen = True\n self.statusbar.showMessage(\"Multimeter OK!\")\n else:\n self.multiOpen = False\n self.statusbar.showMessage(\"Multimeter Error!\")\n\n if self.esaOpen and self.genOpen and self.edfaOpen:\n self.statusbar.showMessage(\"Devices OK!\")\n else:\n self.statusbar.showMessage(\"Error on some devices!!!!\")\n else:\n self.genOpen = True\n self.esaOpen = True\n self.edfaOpen = True\n self.multiOpen = True\n self.statusbar.showMessage(\"Simulating!! Do not take results seriously!\")\n\n def getConfigs(self):\n if (not self.esaBusy) and self.esaOpen and (not self.simulate):\n self.esaBusy = True\n self.esaSpanSpin.setValue(self.esa.getSpanFreq()/1e6)\n self.esaPtsSpin.setValue(self.esa.getTraceLength())\n self.esaAvgSpin.setValue(self.esa.getAvrg())\n self.esaRbwSpin.setValue(self.esa.getResBW()/1e6)\n self.esaVbwSpin.setValue(self.esa.getVBW()/1e6)\n self.esaBusy = False\n return 0\n\n def onStartButClicked(self):\n if not self.measTimer.isActive() and self.genOpen and self.esaOpen:\n genstart = self.genCenterSpin.value() - self.genSpanSpin.value()/2.0\n genstop = self.genCenterSpin.value() + self.genSpanSpin.value()/2.0\n self.freqs = np.linspace(genstart, genstop, self.genPtsSpin.value())\n self.currs = np.linspace(self.edfaStartSpin.value(), self.edfaStopSpin.value(), self.edfaPtsSpin.value())\n\n if not self.simulate:\n self.edfa.setCurr(self.currs[0])\n\n if not self.simulate:\n self.gen.setFreq(self.freqs[0]*1e6)\n\n if not self.simulate:\n self.esa.setCenterFreq(self.genCenterSpin.value()*1e6)\n self.esa.setSpanFreq(self.esaSpanSpin.value()*1e6)\n self.esa.setTraceLength(self.esaPtsSpin.value())\n self.esa.setAvrg(self.esaAvgSpin.value())\n self.esa.setResBW(self.esaRbwSpin.value()*1e6)\n self.esa.setVBW(self.esaVbwSpin.value()*1e6)\n esastart = self.genCenterSpin.value() - self.esaSpanSpin.value()/2\n esastop = self.genCenterSpin.value() + self.esaSpanSpin.value()/2\n self.esax = np.linspace(esastart, esastop, self.esaPtsSpin.value())\n\n self.specs = np.zeros([self.genPtsSpin.value(), self.esaPtsSpin.value()], dtype=np.float32)\n self.pumpy = np.zeros([self.genPtsSpin.value()], dtype=np.float32)\n self.pumpfreqs = np.zeros([self.genPtsSpin.value()], dtype=np.float32)\n self.stky = np.zeros([self.genPtsSpin.value()], dtype=np.float32)\n self.stkfreqs = np.zeros([self.genPtsSpin.value()], dtype=np.float32)\n self.carry = np.zeros([self.genPtsSpin.value()], dtype=np.float32)\n self.specs_curr = np.zeros([self.edfaPtsSpin.value(), self.genPtsSpin.value(), self.esaPtsSpin.value()], dtype=np.float32)\n self.pumpy_curr = np.zeros([self.edfaPtsSpin.value(), self.genPtsSpin.value()], dtype=np.float32)\n self.stky_curr = np.zeros([self.edfaPtsSpin.value(), self.genPtsSpin.value()], dtype=np.float32)\n self.volts =np.zeros([self.edfaPtsSpin.value()], dtype=np.float32)\n self.counterGen = 0\n self.counterEDFA = 0\n\n self.statusbar.showMessage(\"Preparing...\")\n\n time.sleep(3)\n\n self.measTimer.setInterval(self.waitSpin.value())\n self.measTimer.start()\n self.statusbar.showMessage(\"Measuring...\")\n else:\n self.statusbar.showMessage(\"Already running, or devices disconnected!\")\n\n def onStopButClicked(self):\n self.statusbar.showMessage(\"Stopped.\")\n self.stopAll()\n\n def initPlots(self):\n self.figureESA.clear()\n self.axESA = self.figureESA.add_subplot(111)\n self.axESA.plot(self.esax, self.showesay)\n self.axESA.set_title(\"Full Spectrum\")\n self.axESA.grid(True)\n self.figureESA.set_tight_layout(True)\n\n self.figurePump.clear()\n self.axPump = self.figurePump.add_subplot(111)\n self.axPump.plot(self.freqs, self.showpumpy)\n self.axPump.set_title(\"Pump\")\n self.axPump.grid(True)\n self.figurePump.set_tight_layout(True)\n\n self.figureStk.clear()\n self.axStk = self.figureStk.add_subplot(111)\n self.axStk.plot(self.freqs, self.showstky)\n self.axStk.set_title(\"Stokes\")\n self.axStk.grid(True)\n self.figureStk.set_tight_layout(True)\n\n if self.fulldbCheck.isChecked():\n self.axESA.set_xlabel(\"Frequency (MHz)\")\n self.axESA.set_ylabel(\"Power (dBm)\")\n else:\n self.axESA.set_xlabel(\"Frequency (MHz)\")\n self.axESA.set_ylabel(\"Power (mW)\")\n\n if self.powdbCheck.isChecked():\n self.axPump.set_xlabel(\"Frequency (MHz)\")\n self.axPump.set_ylabel(\"Power (dBm)\")\n self.axStk.set_xlabel(\"Frequency (MHz)\")\n self.axStk.set_ylabel(\"Power (dBm)\")\n else:\n self.axPump.set_xlabel(\"Frequency (MHz)\")\n self.axPump.set_ylabel(\"Power (mW)\")\n self.axStk.set_xlabel(\"Frequency (MHz)\")\n self.axStk.set_ylabel(\"Power (mW)\")\n\n self.graphESA.draw()\n self.graphPump.draw()\n self.graphStk.draw()\n\n def levelData(self, x, y, x0):\n fitfunc = lambda p, x: p[0] + p[1]*np.exp(-((x - p[2])**2)/(2*p[3]**2)) + p[4]*x + p[5]*x**2 + p[6]*x**3 + p[7]*x**4 + p[8]*x**5\n errfunc = lambda p, x, y: fitfunc(p, x) - y\n p0 = [0, 0, x0, 1, 0.0, 0.0, 0.0, 0.0, 0.0]\n\n tol = 1e-9\n p1, success1 = optimize.leastsq(errfunc, p0[:], args=(x, y), maxfev=10000, ftol=tol, gtol=tol, xtol=tol)\n\n ybase = p1[4] * x + p1[5] * x ** 2 + p1[6] * x ** 3 + p1[7] * x ** 4 + p1[8] * x ** 5\n ylevel = y - ybase\n ylmean = ylevel.mean()\n ymean = y.mean()\n meandiff = ylmean - ymean\n ylevel = ylevel - meandiff\n\n return ylevel\n\n def fitGaussPol(self, x, y, x0):\n fitfunc = lambda p, x: p[0] + p[1] * np.exp(-((x - p[2]) ** 2) / (2 * p[3] ** 2)) + p[4] * x + p[5] * x ** 2 + p[6] * x ** 3 + p[\n 7] * x ** 4 + p[8] * x ** 5\n errfunc = lambda p, x, y: fitfunc(p, x) - y\n p0 = [0, 0, x0, 1, 0.0, 0.0, 0.0, 0.0, 0.0]\n\n tol = 1e-9\n p1, success1 = optimize.leastsq(errfunc, p0[:], args=(x, y), maxfev=10000, ftol=tol, gtol=tol, xtol=tol)\n\n return fitfunc(p1, x)\n\n def updatePlots(self, freq_i):\n if len(self.freqs) > 1:\n self.showesay = self.specs[freq_i]\n self.showpumpy = self.pumpy[:self.counterGen + 1]\n self.showstky = self.stky[:self.counterGen + 1]\n self.showcarry = self.carry[:self.counterGen + 1]\n\n freqs = self.freqs[:self.counterGen + 1]\n\n if self.counterGen > 10 and self.autolevelCheck.isChecked():\n self.showpumpy = self.levelData(freqs, self.showpumpy, self.genCenterSpin.value())\n self.showstky = self.levelData(freqs, self.showstky, self.genCenterSpin.value())\n\n if not self.fulldbCheck.isChecked():\n self.showesay = np.power(10, self.showesay/10.0)\n if not self.powdbCheck.isChecked():\n self.showpumpy = np.power(10, self.showpumpy/10.0)\n self.showstky = np.power(10, self.showstky/10.0)\n\n self.figureESA.clear()\n self.axESA = self.figureESA.add_subplot(111)\n self.axESA.plot(self.esax, self.showesay)\n self.axESA.set_title(\"Full Spectrum\")\n self.axESA.grid(True)\n self.axESA.axvline(self.pumpfreqs[freq_i], color=\"r\", linestyle=\"--\")\n self.axESA.axvline(self.stkfreqs[freq_i], color=\"g\", linestyle=\"--\")\n self.figureESA.set_tight_layout(True)\n\n self.figurePump.clear()\n self.axPump = self.figurePump.add_subplot(111)\n # self.axPump.plot(freqs, self.showcarry, 'k')\n self.axPump.plot(freqs, self.showpumpy)\n self.axPump.set_title(\"Pump\")\n self.axPump.grid(True)\n self.axPump.axvline(self.freqs[freq_i], color=\"r\", linestyle=\"--\")\n self.figurePump.set_tight_layout(True)\n\n self.figureStk.clear()\n self.axStk = self.figureStk.add_subplot(111)\n self.axStk.plot(freqs, self.showstky)\n self.axStk.set_title(\"Stokes\")\n self.axStk.grid(True)\n self.axStk.axvline(self.freqs[freq_i], color=\"g\", linestyle=\"--\")\n self.figureStk.set_tight_layout(True)\n\n if self.fulldbCheck.isChecked():\n self.axESA.set_xlabel(\"Frequency (MHz)\")\n self.axESA.set_ylabel(\"Power (dBm)\")\n else:\n self.axESA.set_xlabel(\"Frequency (MHz)\")\n self.axESA.set_ylabel(\"Power (mW)\")\n\n if self.powdbCheck.isChecked():\n self.axPump.set_xlabel(\"Frequency (MHz)\")\n self.axPump.set_ylabel(\"Power (dBm)\")\n self.axStk.set_xlabel(\"Frequency (MHz)\")\n self.axStk.set_ylabel(\"Power (dBm)\")\n else:\n self.axPump.set_xlabel(\"Frequency (MHz)\")\n self.axPump.set_ylabel(\"Power (mW)\")\n self.axStk.set_xlabel(\"Frequency (MHz)\")\n self.axStk.set_ylabel(\"Power (mW)\")\n\n if self.counterGen > 10 and self.showfitCheck.isChecked():\n pumpfit = self.fitGaussPol(freqs, self.showpumpy, self.genCenterSpin.value())\n stokesfit = self.fitGaussPol(freqs, self.showstky, self.genCenterSpin.value())\n self.axPump.plot(freqs, pumpfit)\n self.axStk.plot(freqs, stokesfit)\n\n self.graphESA.draw()\n self.graphPump.draw()\n self.graphStk.draw()\n\n def findPeaks(self, f0p, f0s, specx, specy):\n specy = np.power(10, specy/10.0)\n\n midf = np.abs(f0p + f0s) / 2\n midind = np.abs(specx - midf).argmin()\n lx = specx[:int(0.8*midind)]\n ly = specy[:int(0.8*midind)]\n rx = specx[int(1.2*midind):]\n ry = specy[int(1.2*midind):]\n lmax = ly.max()\n rmax = ry.max()\n\n fitfunc = lambda p, x: p[0] * ((p[1] ** 2) / ((x - p[2]) ** 2 + p[1] ** 2)) + p[3]\n errfunc = lambda p, x, y: fitfunc(p, x) - y\n p01 = [lmax, 1, f0p, 0]\n p02 = [rmax, 1, f0s, 0]\n\n p1, success1 = optimize.leastsq(errfunc, p01[:], args=(lx, ly), maxfev=10000)\n p2, success2 = optimize.leastsq(errfunc, p02[:], args=(rx, ry), maxfev=10000)\n\n pmpi = np.abs((specx - p1[2]) / 2).argmin()\n stki = np.abs((specx - p2[2]) / 2).argmin()\n # pmp = 10*np.log10(specy[pmpi])\n # stk = 10*np.log10(specy[stki])\n pmp = 10 * np.log10(p1[0])\n stk = 10 * np.log10(p2[0])\n\n return [[p1[2], pmp], [p2[2], stk], [midf, specy[midind]]]\n\n def measLoop(self):\n if self.esaOpen and self.genOpen and not self.measBusy:\n self.measBusy = True\n\n f0p = self.freqs[self.counterGen] - self.shiftSpin.value()\n f0s = self.freqs[self.counterGen] + self.shiftSpin.value()\n\n if not self.simulate:\n self.specs[self.counterGen] = self.esa.getData()\n else:\n esastart = self.esax[0]\n esastop = self.esax[len(self.esax) - 1]\n self.specs[self.counterGen] = self.getRandomSpec(esastart, esastop, self.esaPtsSpin.value(), f0p, f0s, self.freqs[self.counterGen])\n\n if not self.simulate and self.counterGen < len(self.freqs) - 1:\n self.gen.setFreq(self.freqs[self.counterGen + 1]*1e6)\n\n peaks = self.findPeaks(f0p, f0s, self.esax, self.specs[self.counterGen])\n self.pumpfreqs[self.counterGen] = peaks[0][0]\n self.stkfreqs[self.counterGen] = peaks[1][0]\n self.pumpy[self.counterGen] = peaks[0][1]\n self.stky[self.counterGen] = peaks[1][1]\n # self.carry[self.counterGen] = peaks[2][1]\n self.updatePlots(self.counterGen)\n self.pointviewSpin.setValue(self.counterGen)\n self.freqviewSpin.setValue(self.freqs[self.counterGen])\n\n self.counterGen = self.counterGen + 1\n if self.counterGen >= self.genPtsSpin.value():\n self.specs_curr[self.counterEDFA] = self.specs\n self.pumpy_curr[self.counterEDFA] = self.pumpy\n self.stky_curr[self.counterEDFA] = self.stky\n\n if not self.simulate:\n self.volts[self.counterEDFA] = self.multi.getVolt()\n\n self.counterEDFA = self.counterEDFA + 1\n if self.counterEDFA >= self.edfaPtsSpin.value():\n self.onStopButClicked()\n else:\n if not self.simulate:\n self.gen.setFreq(self.freqs[0] * 1e6)\n self.edfa.setCurr(self.currs[self.counterEDFA])\n\n self.counterGen = 0\n\n self.specs = np.zeros([self.genPtsSpin.value(), self.esaPtsSpin.value()], dtype=np.float32)\n self.pumpy = np.zeros([self.genPtsSpin.value()])\n self.stky = np.zeros([self.genPtsSpin.value()])\n\n self.pointviewEdfaSpin.setValue(self.counterEDFA)\n self.currviewSpin.setValue(self.currs[self.counterEDFA])\n\n time.sleep(3)\n self.measBusy = False\n\n return 0\n\n def saveData(self):\n file = QFileDialog.getSaveFileName(self, \"Save file\", QDir.homePath(), \"Text files (*.txt)\")\n self.filename = file[0]\n prefix = \"\"\n if self.filename != \"\":\n if self.filename[-4:] != \".txt\" and self.filename[-4:] != \".TXT\":\n prefix = self.filename\n self.filename = self.filename + \".txt\"\n else:\n prefix = self.filename[:-4]\n else:\n self.filename = QDir.homePath() + \"/noname.txt\"\n prefix = QDir.homePath() + \"/noname\"\n\n for currindex in range(0, self.edfaPtsSpin.value()):\n self.specs = self.specs_curr[currindex]\n self.pumpy = self.pumpy_curr[currindex]\n self.stky = self.stky_curr[currindex]\n\n thisfile = prefix + \"_\" + str(currindex) + \".txt\"\n\n if len(self.freqs) != 0 and len(self.pumpy) != 0 and len(self.stky) != 0:\n if self.autolevelCheck.isChecked():\n pumplevel = self.levelData(self.freqs, self.pumpy, self.genCenterSpin.value())\n stokeslevel = self.levelData(self.freqs, self.stky, self.genCenterSpin.value())\n with open(thisfile, \"w\") as file:\n file.write(\"Sweep Freq. (MHz)\" + \"\\t\" + \"Pump Power (dBm)\" + \"\\t\" + \"Stokes Power (dBm)\" + \"\\t\" +\n \"Leveled Pump Power (dBm)\" + \"\\t\" + \"Leveled Stokes Power (dBm)\" + \"\\n\")\n for i in range(0, len(self.freqs)):\n file.write(str(self.freqs[i]) + \"\\t\" + str(self.pumpy[i]) + \"\\t\" + str(self.stky[i]) + \"\\t\" +\n str(pumplevel[i]) + \"\\t\" + str(stokeslevel[i]) + \"\\n\")\n file.close()\n else:\n with open(thisfile, \"w\") as file:\n file.write(\"Sweep Freq. (MHz)\" + \"\\t\" + \"Pump Power (dBm)\" + \"\\t\" + \"Stokes Power (dBm)\" + \"\\n\")\n for i in range(0, len(self.freqs)):\n file.write(str(self.freqs[i]) + \"\\t\" + str(self.pumpy[i]) + \"\\t\" + str(self.stky[i]) + \"\\n\")\n file.close()\n if len(self.freqs) != 0 and len(self.specs) != 0 and len(self.specs[0]) != 0:\n specfile = prefix + \"_spectra\" + \"_\" + str(currindex) + \".txt\"\n with open(specfile, \"w\") as file:\n header = \"Freq. (MHz)\"\n for i in range(0, len(self.freqs)):\n header = header + \"\\tFreq. \" + str(self.freqs[i]) + \" Power (dBm)\"\n header = header + \"\\n\"\n file.write(header)\n for i in range(0, len(self.esax)):\n file.write(str(self.esax[i]))\n for j in range(0, len(self.freqs)):\n file.write(\"\\t\" + str(self.specs[j][i]))\n file.write(\"\\n\")\n file.close()\n\n if len(self.currs) != 0 and len(self.volts) != 0:\n currvoltsfile = prefix + \"_currs_volts.txt\"\n with open(currvoltsfile, \"w\") as file:\n file.write(\"EDFA Current (mA)\" + \"\\t\" + \"DC Voltage (V)\" + \"\\n\")\n for i in range(0, len(self.currs)):\n file.write(str(self.currs[i]) + \"\\t\" + str(self.volts[i]) + \"\\n\")\n file.close()\n\n def chooseCurrent(self):\n if not self.updateFreq and len(self.currs) > 0:\n self.updateFreq = True\n if not self.measTimer.isActive():\n curri = np.abs(self.currs - self.currviewSpin.value()).argmin()\n self.pointviewEdfaSpin.setValue(curri)\n self.currviewSpin.setValue(self.currs[curri])\n else:\n self.currviewSpin.setValue(self.currs[self.counterEDFA])\n self.updateFreq = False\n\n def changeCurrent(self):\n if len(self.currs) > 0:\n if not self.measTimer.isActive():\n self.updateFreq = True\n if self.pointviewEdfaSpin.value() >= len(self.specs_curr):\n self.pointviewEdfaSpin.setValue(len(self.specs_curr) - 1)\n self.currviewSpin.setValue(self.currs[self.pointviewEdfaSpin.value()])\n\n self.specs = self.specs_curr[self.pointviewEdfaSpin.value()]\n self.pumpy = self.pumpy_curr[self.pointviewEdfaSpin.value()]\n self.stky = self.stky_curr[self.pointviewEdfaSpin.value()]\n\n self.updateFreq = False\n\n self.changeSpectrum()\n else:\n self.pointviewEdfaSpin.setValue(self.counterEDFA)\n\n def chooseFreq(self):\n if not self.updateFreq and len(self.freqs) > 0:\n self.updateFreq = True\n if not self.measTimer.isActive():\n freqi = np.abs(self.freqs - self.freqviewSpin.value()).argmin()\n self.pointviewSpin.setValue(freqi)\n self.freqviewSpin.setValue(self.freqs[freqi])\n else:\n self.freqviewSpin.setValue(self.freqs[self.counterGen])\n self.updateFreq = False\n\n def changeSpectrum(self):\n if len(self.freqs) > 0:\n if not self.measTimer.isActive():\n self.updateFreq = True\n if self.pointviewSpin.value() >= len(self.specs):\n self.pointviewSpin.setValue(len(self.specs) - 1)\n self.updatePlots(self.pointviewSpin.value())\n self.freqviewSpin.setValue(self.freqs[self.pointviewSpin.value()])\n self.updateFreq = False\n else:\n self.pointviewSpin.setValue(self.counterGen)\n\n self.pumpFreqView.setValue(self.pumpfreqs[self.pointviewSpin.value()])\n self.stokesFreqView.setValue(self.stkfreqs[self.pointviewSpin.value()])\n self.pumpPowView.setValue(self.showpumpy[self.pointviewSpin.value()])\n self.stokesPowView.setValue(self.showstky[self.pointviewSpin.value()])\n\n def changeSpecScale(self):\n if len(self.freqs) > 0:\n self.updatePlots(self.pointviewSpin.value())\n\n def changePowScale(self):\n if len(self.freqs) > 0:\n self.updatePlots(self.pointviewSpin.value())\n if self.powdbCheck.isChecked():\n self.pumpPowView.setSuffix(\" dBm\")\n self.stokesPowView.setSuffix(\" dBm\")\n else:\n self.pumpPowView.setSuffix(\" mW\")\n self.stokesPowView.setSuffix(\" mW\")\n self.pumpPowView.setValue(self.showpumpy[self.pointviewSpin.value()])\n self.stokesPowView.setValue(self.showstky[self.pointviewSpin.value()])\n\n def stopAll(self):\n self.measTimer.stop()\n\n def closeAll(self):\n if not self.simulate:\n self.esa.closeESA()\n self.gen.closeGen()\n\n def exit(self):\n self.stopAll()\n self.closeAll()\n QCoreApplication.quit()\n sys.exit()\n\n def closeEvent(self, event):\n self.stopAll\n self.closeAll()\n\n def getRandomSpec(self, start, stop, n, f0p, f0s, fsw):\n x = np.linspace(start, stop, n, dtype=np.float)\n center = (start + stop)/2\n noise = np.random.randint(0, 1000000, n) / 100000000.0\n\n a1 = 1 - 1*np.exp(-((fsw - center)**2)/(2*(10**2)))\n w1 = 10\n x1 = f0p\n\n a2 = 1*np.exp(-((fsw - center)**2)/(2*(10**2)))\n w2 = 10\n x2 = f0s\n\n offset = np.random.randint(1, 1000000) / 10000000.0\n\n y = (a1 * (w1 ** 2) / ((x - x1) ** 2 + w1 ** 2)) + (a2 * (w2 ** 2) / ((x - x2) ** 2 + w2 ** 2)) + offset + noise\n y = 10*np.log10(y)\n\n return y\n\n def dummy(self):\n print(\"haha\")\n\n\n# Run\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n w = MainWindow()\n\n sys.exit(app.exec_())","sub_path":"python/Brillouin/ESAGainCapture/ESAGainCapture.py","file_name":"ESAGainCapture.py","file_ext":"py","file_size_in_byte":27113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"23308739","text":"from Stuff import Stuff\n\n\nclass Coloring(Stuff):\n\n def __init__(self, quantity, price, number_of_pages, title,\n age, publishing_house, ean13, description):\n self.quantity = quantity\n self.price = price\n self.number_of_pages = number_of_pages\n self.title = title\n self.age = age\n self.publishing_house = publishing_house\n self.ean13 = ean13\n self.description = description\n","sub_path":"Coloring.py","file_name":"Coloring.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"306024429","text":"\"\"\"Builds the Kinect Depth Data network.\n\nImplements the inference/loss/training pattern for model building.\n\n1. inference() - Builds the model as far as is required for running the network\nforward to make predictions.\n2. loss() - Adds to the inference model the layers required to generate loss.\n3. training() - Adds to the loss model the Ops required to generate and\napply gradients.\n\nThis file is used by the various \"fully_connected_*.py\" files and not meant to\nbe run.\n\nLiberally borrowed from here:\nhttps://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10.py\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport tensorflow as tf\n\nNUM_CLASSES = 25\nIMAGE_SIZE = 34\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 41310\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000 # same as CIFAR-10\n\nMOVING_AVERAGE_DECAY = 0.9999\nNUM_EPOCHS_PER_DECAY = 350.0\nLEARNING_RATE_DECAY_FACTOR = 0.1\nINITIAL_LEARNING_RATE = 0.1\n\ndef get_lossy_variable(name, shape, stddev, wd):\n\tvar = tf.get_variable(name, shape, initializer=tf.truncated_normal_initializer(stddev=stddev))\n\tweight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n\ttf.add_to_collection('losses', weight_decay)\n\treturn var\n\ndef layer_conv(x, kernel_shape):\n\t\"\"\"Simple convolution layer with nonlinearity. All HP guesses.\n\tArguments:\n\t`x`: input tensor, shape = [batchLength, height, width, channels]\n\t`kernel_shape`: [height, width, channelsInput, channelsOutput]\n\tReference: https://www.tensorflow.org/api_docs/python/nn/convolution#conv2d\n\t\"\"\"\n\t# bias_shape = [kernel_shape[-1]]\n\t# bias = tf.get_variable(\"biases\", bias_shape, initializer=tf.constant_initializer(0.0))\n\t# outp = tf.nn.relu(conv + biases)\n\twait = get_lossy_variable(\"weights\", kernel_shape, 0.05, 0.0)\n\tconv = tf.nn.conv2d(x, wait, strides=([1]*4), padding=\"SAME\")\n\tpool = tf.nn.max_pool(conv, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool')\n\tnorm = tf.nn.lrn(pool, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm')\n\treturn norm\n\ndef layer_fc(x, qty_outputs, stddev=0.04, wd=0.004):\n\t\"\"\"Simple fully-connected layer with nonlinearity. All HP guesses.\n\tArguments:\n\t`x`: input tensor, shape = [batchLength, height, width, channels]\n\t`qty_outputs`: length of next vector in chain\n\t\"\"\"\n\tdims = x.get_shape()[1].value\n\twait = get_lossy_variable('weights', shape=[dims, qty_outputs], stddev=stddev, wd=wd)\n\tbias = tf.get_variable('biases', [qty_outputs], initializer=tf.constant_initializer(0.1))\n\toutp = tf.nn.relu(tf.matmul(x, wait) + bias, name=\"ReLU\")\n\treturn outp\n\ndef inference(images, c1, stack_height_c1, c2, stack_height_c2, fc1, fc2):\n\tp = images\n\n\twith tf.variable_scope('conv1') as scope:\n\t\tp = layer_conv(p, [c1, c1, 1, stack_height_c1])\n\n\twith tf.variable_scope('conv2') as scope:\n\t\tp = layer_conv(p, [c2, c2, stack_height_c1, stack_height_c2])\n\n\tp = tf.reshape(p, [int(p.get_shape()[0]), -1]) # images -> vector\n\n\twith tf.variable_scope(\"fc1\") as scope:\n\t\tp = layer_fc(p, fc1)\n\n\twith tf.variable_scope(\"fc2\") as scope:\n\t\tp = layer_fc(p, fc2)\n\n\twith tf.variable_scope(\"fc3\") as scope:\n\t\tp = layer_fc(p, NUM_CLASSES, stddev=(1/NUM_CLASSES), wd=0.0)\n\n\t# no softmax because loss() will do it for us\n\n\treturn p\n\ndef loss(logits, labels):\n\t\"\"\"Calculates the loss from the logits and the labels.\n\n Args:\n logits: Logits tensor, float - [batch_size, NUM_CLASSES].\n labels: Labels tensor, int32 - [batch_size].\n\n Returns: Loss tensor of type float.\n\t\"\"\"\n\tlabels = tf.to_int64(labels)\n\t# http://stackoverflow.com/a/37317322\n\tcross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name='xentropy')\n\tcross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')\n\ttf.add_to_collection(\"losses\", cross_entropy_mean)\n\treturn tf.add_n(tf.get_collection(\"losses\"), name=\"total_loss\")\n\ndef training(loss, global_step, batch_size):\n\t\"\"\"Sets up the training Ops.\n\n Creates an optimizer and applies the gradients to all trainable variables.\n\n The Op returned by this function is what must be passed to the\n `sess.run()` call to cause the model to train.\n\n Args:\n loss: Loss tensor, from loss().\n global_step: int32 Variable counting how many steps we have taken so far\n\n Returns:\n train_op: The Op for training.\n\t\"\"\"\n\tnum_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / batch_size\n\tdecay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)\n\t# https://www.tensorflow.org/api_docs/python/train/decaying_the_learning_rate#exponential_decay\n\tlr = tf.train.exponential_decay(\\\n\t\tINITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True)\n\n\t# generate the moving averages of all of the losses\n\tloss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n\tlosses = tf.get_collection('losses')\n\tloss_averages_op = loss_averages.apply(losses + [loss])\n\n\twith tf.control_dependencies([loss_averages_op]):\n\t\topt = tf.train.GradientDescentOptimizer(lr)\n\t\tgrads = opt.compute_gradients(loss)\n\n\tapply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n\tvariable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n\tvariables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n\twith tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n\t\ttrain_op = tf.no_op(name='train')\n\n\treturn train_op\n\ndef evaluation(logits, labels):\n\t\"\"\"Evaluate the quality of the logits at predicting the label.\n\n Args:\n logits: Logits tensor, float - [batch_size, NUM_CLASSES].\n labels: Labels tensor, int32 - [batch_size], with values in the\n range [0, NUM_CLASSES).\n\n Returns:\n A scalar int32 tensor with the number of examples (out of batch_size)\n that were predicted correctly.\n\t\"\"\"\n\tcorrect = tf.nn.in_top_k(logits, labels, 1)\n\t# Return the number of true entries.\n\treturn tf.reduce_sum(tf.cast(correct, tf.int32))\n\n","sub_path":"final/kdd.py","file_name":"kdd.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"261138436","text":"import weakref\nfrom types import MethodType, BuiltinMethodType\nfrom qtodb.reflect.weakmethod import WeakMethod\n\n\nclass Reflective(object):\n \"\"\"\n Reflect changes in instance attributes for registered callbacks\n \"\"\"\n\n\n def RegisterAttributeReflection(self, attr_name, callback):\n if not hasattr(self, \"_v_callbacks\"):\n self._v_callbacks = {}\n if not hasattr(self, attr_name):\n import warnings\n warnings.warn(\"{0} has no attribute '{1}'\".format(self.__class__.__name__, attr_name))\n if isinstance(callback, (MethodType, BuiltinMethodType)):\n callback_ref = WeakMethod(callback)\n else:\n callback_ref = weakref.ref(callback)\n attr_callbacks = self._v_callbacks.setdefault(attr_name, [])\n attr_callbacks.append(callback_ref)\n\n\n def UnregisterReflection(self, callback):\n for callback_list in self._v_callbacks.values():\n for callback_ref in callback_list:\n if callback == callback_ref():\n callback_list.remove(callback_ref)\n\n\n def __setattr__(self, key, value):\n super(Reflective, self).__setattr__(key, value)\n if hasattr(self, \"_v_callbacks\") and key in self._v_callbacks:\n for callback_weakref in self._v_callbacks[key]:\n callback = callback_weakref()\n if callback:\n callback(value)\n else:\n self._v_callbacks[key].remove(callback_weakref)\n","sub_path":"src/python/qtodb/reflect/reflective.py","file_name":"reflective.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"313973307","text":"#!/usr/bin/python\n\nimport socket \nfrom termcolor import colored\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsocket.setdefaulttimeout(1)\n# socket.SOCK_STREAM means that: packet by TCP\n\nhost = raw_input(\"[*] Enter the host to SCAN: \")\n#port = int(raw_input(\"[*] Enter the port to SCAN: \"))\n\n\ndef portscanner(port):\n if sock.connect_ex((host, port)):\n print(colored(\"[!!] Port %s is closed\" % (port), 'red'))\n else:\n print(colored(\"[+] Port %s is opened\" % (port), 'green'))\n\nfor port in range(1,1000):\n portscanner(port)\n","sub_path":"portscan1.py","file_name":"portscan1.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"582170855","text":"import mysql.connector\nimport pathlib\nimport json\nimport csv\n\nfrom datetime import datetime\n\nfrom etl import utils\n\nPATH_TO_STATE = f\"{pathlib.Path(__file__).parent}/state\"\nPATH_TO_EXPORTS = f\"{pathlib.Path(__file__).parent}/../exports\"\n\n\ndef load_data():\n \"\"\"\n Performs loading books to data base operation\n :return:\n \"\"\"\n with open(f'{PATH_TO_STATE}/current_state.json', 'r') as f:\n cur_state = json.load(f)\n\n if cur_state['current_state'] != 1:\n print(\"You have to perform transfer operations first!\")\n return\n books = cur_state['books']\n BooksDBOperator().save_books_to_db(books)\n utils.clear_data()\n print(\"Data successfully saved in database\")\n\n\nclass BooksDBOperator:\n\n def __init__(self):\n self.HOST = 'localhost'\n self.USER = 'root'\n self.PASSWD = 'Hurtownie123!'\n self.DB_NAME ='webscraper_lubimyczytac'\n\n def connect_to_db(self):\n \"\"\"\n Connect to database\n :return: None\n \"\"\"\n my_db = mysql.connector.connect(\n host=self.HOST,\n user=self.USER,\n passwd=self.PASSWD,\n database=self.DB_NAME,\n auth_plugin='mysql_native_password'\n )\n return my_db\n\n def save_books_to_db(self, books):\n \"\"\"\n Method that will connect to proper mysql database and save book data\n :param books: List of dicts with book details, each dict represents one book\n :return: None\n \"\"\"\n db_to_save_books = self.connect_to_db()\n\n table_name = self._create_table_for_books(db_to_save_books)\n self._save_books_to_table(db_to_save_books, table_name, books)\n\n @staticmethod\n def _save_books_to_table(my_db, table_name, books):\n my_cursor = my_db.cursor()\n sql = f\"INSERT INTO {table_name} (book_name, author_name, book_rating, nmb_of_ratings, \" \\\n f\"genres, nmb_of_opinions, nmb_of_pages, apx_reading_time, avg_book_price) VALUES ( \" \\\n f\"%s, %s, %s, %s, %s,\" \\\n f\"%s, %s, %s, %s) \"\n\n val_to_insert = []\n for book in books:\n val_to_insert.append((\n book['book_name'],\n book['author_name'],\n book['book_rating'],\n book['nmb_of_ratings'],\n book['genres'],\n book['nmb_of_opinions'],\n book.get('nmb_of_pages', 0),\n book.get('apx_reading_time', \"not available\"),\n book.get('avg_book_price', 0.0)\n ))\n\n my_cursor.executemany(sql, val_to_insert)\n my_db.commit()\n print(my_cursor.rowcount, \" records were inserted\")\n\n @staticmethod\n def _create_table_for_books(my_db) -> str:\n my_cursor = my_db.cursor()\n table_name = \"lubimyczytac_top100books_{0}\".format(\n str(datetime.now().strftime(\"%Y%m%d_%H%M_%S\")))\n\n my_cursor.execute(\"CREATE TABLE {0} (\"\n \"id INT NOT NULL PRIMARY KEY AUTO_INCREMENT,\"\n \"book_name VARCHAR(255),\"\n \"author_name VARCHAR(100),\"\n \"book_rating FLOAT,\"\n \"nmb_of_ratings INT,\"\n \"genres VARCHAR(255),\"\n \"nmb_of_opinions INT,\"\n \"nmb_of_pages INT,\"\n \"apx_reading_time VARCHAR(50),\"\n \"avg_book_price FLOAT\"\n \")\".format(table_name))\n return table_name\n\n def show_existing_tables(self):\n \"\"\"\n Displays tables that are saved in database\n :return: None\n \"\"\"\n my_db = self.connect_to_db()\n my_cursor = my_db.cursor()\n my_cursor.execute(f\"SHOW TABLES\")\n result = my_cursor.fetchall()\n print(\"Currently saved tables:\")\n for table_tuple in result:\n print(table_tuple[0])\n\n def export_table_elements(self, table_name):\n \"\"\"\n Exports table elements to different files in exports folder\n :param table_name: Name of the table to be exported\n :return:\n \"\"\"\n my_db = self.connect_to_db()\n my_cursor = my_db.cursor()\n my_cursor.execute(f\"SELECT * FROM {table_name}\")\n records = my_cursor.fetchall()\n for i, row in enumerate(records):\n with open(f'{PATH_TO_EXPORTS}/books/{i + 1}.txt', 'w') as file:\n print(f\"Saving details for book {i + 1}\")\n file.write(self._get_row_str(row))\n\n def export_table_elements_csv(self, table_name):\n \"\"\"\n Exports table elements to csv file\n :param table_name: Name of the table to be exported\n :return:\n \"\"\"\n my_db = self.connect_to_db()\n my_cursor = my_db.cursor()\n my_cursor.execute(f\"SELECT * FROM {table_name}\")\n records = my_cursor.fetchall()\n with open(f'{PATH_TO_EXPORTS}/books_{table_name}.csv', mode='w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n\n fieldnames = ['ID', 'book_name', 'author_name', 'book_rating', 'nmb_of_ratings',\n 'genres', 'nmb_of_opinions', 'nmb_of_pages', 'apx_reading_time',\n 'avg_book_price']\n csv_writer.writerow(fieldnames)\n for row in records:\n csv_writer.writerow(row)\n\n\n def show_table_elements(self, table_name):\n \"\"\"\n Show records for given table\n :param table_name: Name of the table, that will have the record displayed\n :return: None\n \"\"\"\n my_db = self.connect_to_db()\n my_cursor = my_db.cursor()\n my_cursor.execute(f\"SELECT * FROM {table_name}\")\n records = my_cursor.fetchall()\n print(f\"{table_name} records:\")\n for row in records:\n print(self._get_row_str(row))\n # print()\n # print(f\"ID {row[0]}\")\n # print(f\"book_name {row[1]}\")\n # print(f\"author_name {row[2]}\")\n # print(f\"book_rating {row[3]}\")\n # print(f\"nmb_of_ratings {row[4]}\")\n # print(f\"genres {row[5]}\")\n # print(f\"nmb_of_opinions {row[6]}\")\n # print(f\"nmb_of_pages {row[7]}\")\n # print(f\"apx_reading_time {row[8]}\")\n # print(f\"avg_book_price {row[9]}\")\n # print()\n\n def _get_row_str(self, row):\n row_str = f\"\"\"\nbook_name {row[1]}\nauthor_name {row[2]}\nbook_rating {row[3]}\nnmb_of_ratings {row[4]}\ngenres {row[5]}\nnmb_of_opinions {row[6]}\nnmb_of_pages {row[7]}\napx_reading_time {row[8]}\navg_book_price {row[9]} \n\"\"\"\n return row_str\n\n def remove_table(self, table_name):\n \"\"\"\n Removed given table from database\n :param table_name: Name of the table, that will be removed\n :return: None\n \"\"\"\n my_db = self.connect_to_db()\n my_cursor = my_db.cursor()\n my_cursor.execute(f\"DROP TABLE {table_name}\")\n print(f\"Table: {table_name} is dropped\")\n","sub_path":"etl/books_database.py","file_name":"books_database.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"342260596","text":"# _*_ coding: utf-8 _*_\nfrom flask import Flask, render_template, session, request, redirect, g\nfrom flask_socketio import SocketIO, emit\nimport sqlite3\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\nDATABASE = 'test.db'\n\n#db functions\ndef init_db():\n db = sqlite3.connect(\"test.db\")\n cur = db.cursor()\n cur.execute(\"SELECT count(name) FROM sqlite_master WHERE type='table';\")\n tb_lst = cur.fetchone()[0]\n if(tb_lst == 0):\n print(\"> created DB\")\n cur.execute(\"CREATE TABLE user(id INTEGER PRIMARY KEY AUTOINCREMENT, userid VARCHAR(12) NOT NULL, pwd TEXT NOT NULL, email TEXT NOT NULL, username TEXT);\")\n db.commit()\n cur.close()\n db.close()\n \n@app.route('/')\ndef index():\n if session.get(\"account_id\") is not None:\n return render_template('signup.html')\n else :\n return render_template('login.html')\n\n# login function\n@app.route('/account/login', methods=[\"POST\"])\ndef login():\n db = sqlite3.connect(\"test.db\")\n cur = db.cursor()\n user_id = request.form[\"user_id\"]\n user_pw = request.form[\"user_pw\"]\n \n \n cur.execute(\"SELECT EXISTS (SELECT * FROM user WHERE userid = ? AND pwd = ?);\", (user_id, user_pw))\n flag = cur.fetchone()[0]\n if flag == 1:\n session[\"account_id\"] = user_id\n print('> session : ' + session['account_id'])\n return redirect('/', code=302)\n else:\n return redirect('/', code=302)\n\n@app.route('/account/signup', methods=[\"GET\"])\ndef signup():\n return render_template(\"signup.html\")\n\n@app.route('/account/create', methods=[\"POST\"])\ndef create():\n user_id = request.form[\"user_id\"]\n user_pw = request.form[\"user_pw\"]\n user_em = request.form[\"user_email\"]\n user_name = request.form[\"user_name\"]\n \n db = sqlite3.connect('test.db')\n cur = db.cursor()\n cur.execute(\"INSERT INTO user(userid, pwd, email, username) VALUES(?, ?, ?, ?)\", (user_id, user_pw, user_em, user_name))\n db.commit()\n cur.close()\n db.close()\n\n return redirect('/', code=302)\n\n#app start\nif __name__ == '__main__':\n init_db()\n socketio.run(app, port=9001, debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"62432557","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the analysis front-end object.\"\"\"\n\nimport unittest\n\nfrom plaso.frontend import analysis_frontend\nfrom plaso.frontend import frontend\nfrom plaso.frontend import test_lib\nfrom plaso.lib import errors\nfrom plaso.lib import storage\n\n\nclass AnalysisFrontendTests(test_lib.FrontendTestCase):\n \"\"\"Tests for the analysis front-end object.\"\"\"\n\n def setUp(self):\n \"\"\"Sets up the objects used throughout the test.\"\"\"\n self._input_reader = frontend.StdinFrontendInputReader()\n self._output_writer = frontend.StdoutFrontendOutputWriter()\n\n def testOpenStorageFile(self):\n \"\"\"Tests the open storage file function.\"\"\"\n test_front_end = analysis_frontend.AnalysisFrontend(\n self._input_reader, self._output_writer)\n\n options = frontend.Options()\n options.storage_file = self._GetTestFilePath([u'psort_test.out'])\n\n test_front_end.ParseOptions(options)\n storage_file = test_front_end.OpenStorageFile()\n\n self.assertIsInstance(storage_file, storage.StorageFile)\n\n storage_file.Close()\n\n def testParseOptions(self):\n \"\"\"Tests the parse options function.\"\"\"\n test_front_end = analysis_frontend.AnalysisFrontend(\n self._input_reader, self._output_writer)\n\n options = frontend.Options()\n\n with self.assertRaises(errors.BadConfigOption):\n test_front_end.ParseOptions(options)\n\n options.storage_file = self._GetTestFilePath([u'no_such_file.out'])\n\n with self.assertRaises(errors.BadConfigOption):\n test_front_end.ParseOptions(options)\n\n options.storage_file = self._GetTestFilePath([u'psort_test.out'])\n\n test_front_end.ParseOptions(options)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"plaso/frontend/analysis_frontend_test.py","file_name":"analysis_frontend_test.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"402638395","text":"import torch\nimport torch.nn as nn\nimport rdkit.Chem as Chem\nimport torch.nn.functional as F\nfrom .nnutils import *\nfrom .chemutils import get_mol\nfrom networkx import Graph, DiGraph, line_graph, convert_node_labels_to_integers\nfrom dgl import DGLGraph, line_graph, batch, unbatch\nimport dgl.function as DGLF\nfrom functools import partial\nfrom .line_profiler_integration import profile\n\nELEM_LIST = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'Al', 'I', 'B', 'K', 'Se', 'Zn', 'H', 'Cu', 'Mn', 'unknown']\n\nATOM_FDIM = len(ELEM_LIST) + 6 + 5 + 4 + 1\nBOND_FDIM = 5 + 6\nMAX_NB = 6\n\ndef onek_encoding_unk(x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return [x == s for s in allowable_set]\n\ndef atom_features(atom):\n return cuda(torch.Tensor(onek_encoding_unk(atom.GetSymbol(), ELEM_LIST) \n + onek_encoding_unk(atom.GetDegree(), [0,1,2,3,4,5]) \n + onek_encoding_unk(atom.GetFormalCharge(), [-1,-2,1,2,0])\n + onek_encoding_unk(int(atom.GetChiralTag()), [0,1,2,3])\n + [atom.GetIsAromatic()]))\n\ndef bond_features(bond):\n bt = bond.GetBondType()\n stereo = int(bond.GetStereo())\n fbond = [bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE, bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC, bond.IsInRing()]\n fstereo = onek_encoding_unk(stereo, [0,1,2,3,4,5])\n return cuda(torch.Tensor(fbond + fstereo))\n\n@profile\ndef mol2graph(mol_batch):\n padding = cuda(torch.zeros(ATOM_FDIM + BOND_FDIM))\n fatoms,fbonds = [],[padding] #Ensure bond is 1-indexed\n in_bonds,all_bonds = [],[(-1,-1)] #Ensure bond is 1-indexed\n scope = []\n total_atoms = 0\n\n for smiles in mol_batch:\n mol = get_mol(smiles)\n #mol = Chem.MolFromSmiles(smiles)\n n_atoms = mol.GetNumAtoms()\n for atom in mol.GetAtoms():\n fatoms.append( atom_features(atom) )\n in_bonds.append([])\n\n for bond in mol.GetBonds():\n a1 = bond.GetBeginAtom()\n a2 = bond.GetEndAtom()\n x = a1.GetIdx() + total_atoms\n y = a2.GetIdx() + total_atoms\n\n b = len(all_bonds) \n all_bonds.append((x,y))\n fbonds.append( torch.cat([fatoms[x], bond_features(bond)], 0) )\n in_bonds[y].append(b)\n\n b = len(all_bonds)\n all_bonds.append((y,x))\n fbonds.append( torch.cat([fatoms[y], bond_features(bond)], 0) )\n in_bonds[x].append(b)\n \n scope.append((total_atoms,n_atoms))\n total_atoms += n_atoms\n\n total_bonds = len(all_bonds)\n fatoms = torch.stack(fatoms, 0)\n fbonds = torch.stack(fbonds, 0)\n agraph = torch.zeros(total_atoms,MAX_NB).long()\n bgraph = torch.zeros(total_bonds,MAX_NB).long()\n\n for a in range(total_atoms):\n for i,b in enumerate(in_bonds[a]):\n agraph[a,i] = b\n\n for b1 in range(1, total_bonds):\n x,y = all_bonds[b1]\n for i,b2 in enumerate(in_bonds[x]):\n if all_bonds[b2][0] != y:\n bgraph[b1,i] = b2\n\n return fatoms, fbonds, cuda(agraph), cuda(bgraph), scope\n\n@profile\ndef mol2dgl(smiles_batch):\n n_nodes = 0\n graph_list = []\n for smiles in smiles_batch:\n atom_feature_list = []\n bond_feature_list = []\n bond_source_feature_list = []\n graph = DGLGraph()\n mol = get_mol(smiles)\n for atom in mol.GetAtoms():\n graph.add_node(atom.GetIdx())\n atom_feature_list.append(atom_features(atom))\n for bond in mol.GetBonds():\n begin_idx = bond.GetBeginAtom().GetIdx()\n end_idx = bond.GetEndAtom().GetIdx()\n features = bond_features(bond)\n graph.add_edge(begin_idx, end_idx)\n bond_feature_list.append(features)\n # set up the reverse direction\n graph.add_edge(end_idx, begin_idx)\n bond_feature_list.append(features)\n\n atom_x = torch.stack(atom_feature_list)\n graph.set_n_repr({'x': atom_x})\n if len(bond_feature_list) > 0:\n bond_x = torch.stack(bond_feature_list)\n graph.set_e_repr({\n 'x': bond_x,\n 'src_x': atom_x.new(len(bond_feature_list), ATOM_FDIM).zero_()\n })\n graph_list.append(graph)\n\n return graph_list\n\n\nclass MPN(nn.Module):\n\n def __init__(self, hidden_size, depth):\n super(MPN, self).__init__()\n self.hidden_size = hidden_size\n self.depth = depth\n\n self.W_i = nn.Linear(ATOM_FDIM + BOND_FDIM, hidden_size, bias=False)\n self.W_h = nn.Linear(hidden_size, hidden_size, bias=False)\n self.W_o = nn.Linear(ATOM_FDIM + hidden_size, hidden_size)\n\n @profile\n def forward(self, mol_graph):\n fatoms,fbonds,agraph,bgraph,scope = mol_graph\n fatoms = create_var(fatoms)\n fbonds = create_var(fbonds)\n agraph = create_var(agraph)\n bgraph = create_var(bgraph)\n\n binput = self.W_i(fbonds)\n message = nn.ReLU()(binput)\n\n for i in range(self.depth - 1):\n nei_message = index_select_ND(message, 0, bgraph)\n nei_message = nei_message.sum(dim=1)\n nei_message = self.W_h(nei_message)\n message = nn.ReLU()(binput + nei_message)\n\n nei_message = index_select_ND(message, 0, agraph)\n nei_message = nei_message.sum(dim=1)\n ainput = torch.cat([fatoms, nei_message], dim=1)\n atom_hiddens = nn.ReLU()(self.W_o(ainput))\n \n mol_vecs = []\n for st,le in scope:\n mol_vec = atom_hiddens.narrow(0, st, le).sum(dim=0) / le\n mol_vecs.append(mol_vec)\n\n mol_vecs = torch.stack(mol_vecs, dim=0)\n return mol_vecs\n\n# TODO: use SPMV\n#def mpn_loopy_bp_msg(src, edge):\n# return src['msg']\nmpn_loopy_bp_msg = DGLF.copy_src(src='msg', out='msg')\n\n\n#def mpn_loopy_bp_reduce(node, msgs):\n# return {'accum_msg': torch.sum(msgs, 1)}\nmpn_loopy_bp_reduce = DGLF.sum(msgs='msg', out='accum_msg')\n\n\nclass LoopyBPUpdate(nn.Module):\n def __init__(self, hidden_size):\n super(LoopyBPUpdate, self).__init__()\n self.hidden_size = hidden_size\n\n self.W_h = nn.Linear(hidden_size, hidden_size, bias=False)\n\n def forward(self, node):\n msg_input = node['msg_input']\n msg_delta = self.W_h(node['accum_msg'])\n msg = F.relu(msg_input + msg_delta)\n return {'msg': msg}\n\n\n# TODO: can we use SPMV?\n#def mpn_gather_msg(src, edge):\n# return edge['msg']\nmpn_gather_msg = DGLF.copy_edge(edge='msg', out='msg')\n\n\n#def mpn_gather_reduce(node, msgs):\n# return {'m': torch.sum(msgs, 1)}\nmpn_gather_reduce = DGLF.sum(msgs='msg', out='m')\n\n\nclass GatherUpdate(nn.Module):\n def __init__(self, hidden_size):\n super(GatherUpdate, self).__init__()\n self.hidden_size = hidden_size\n\n self.W_o = nn.Linear(ATOM_FDIM + hidden_size, hidden_size)\n\n def forward(self, node):\n m = node['m']\n return {\n 'h': F.relu(self.W_o(torch.cat([node['x'], m], 1))),\n }\n\n\nclass DGLMPN(nn.Module):\n def __init__(self, hidden_size, depth):\n super(DGLMPN, self).__init__()\n\n self.depth = depth\n\n self.W_i = nn.Linear(ATOM_FDIM + BOND_FDIM, hidden_size, bias=False)\n\n self.loopy_bp_updater = LoopyBPUpdate(hidden_size)\n self.gather_updater = GatherUpdate(hidden_size)\n self.hidden_size = hidden_size\n\n self.n_samples_total = 0\n self.n_nodes_total = 0\n self.n_edges_total = 0\n self.n_passes = 0\n\n @profile\n def forward(self, mol_graph_list):\n n_samples = len(mol_graph_list)\n\n mol_graph = batch(mol_graph_list)\n mol_line_graph = line_graph(mol_graph, no_backtracking=True)\n\n n_nodes = len(mol_graph.nodes)\n n_edges = len(mol_graph.edges)\n\n mol_graph = self.run(mol_graph, mol_line_graph)\n mol_graph_list = unbatch(mol_graph)\n g_repr = torch.stack([g.get_n_repr()['h'].mean(0) for g in mol_graph_list], 0)\n\n self.n_samples_total += n_samples\n self.n_nodes_total += n_nodes\n self.n_edges_total += n_edges\n self.n_passes += 1\n\n return g_repr\n\n @profile\n def run(self, mol_graph, mol_line_graph):\n n_nodes = len(mol_graph.nodes)\n\n mol_graph.update_edge(\n #*zip(*mol_graph.edge_list),\n edge_func=lambda src, dst, edge: {'src_x': src['x']},\n batchable=True,\n )\n\n bond_features = mol_line_graph.get_n_repr()['x']\n source_features = mol_line_graph.get_n_repr()['src_x']\n\n features = torch.cat([source_features, bond_features], 1)\n msg_input = self.W_i(features)\n mol_line_graph.set_n_repr({\n 'msg_input': msg_input,\n 'msg': F.relu(msg_input),\n 'accum_msg': torch.zeros_like(msg_input),\n })\n mol_graph.set_n_repr({\n 'm': bond_features.new(n_nodes, self.hidden_size).zero_(),\n 'h': bond_features.new(n_nodes, self.hidden_size).zero_(),\n })\n\n for i in range(self.depth - 1):\n mol_line_graph.update_all(\n mpn_loopy_bp_msg,\n mpn_loopy_bp_reduce,\n self.loopy_bp_updater,\n True\n )\n\n mol_graph.update_all(\n mpn_gather_msg,\n mpn_gather_reduce,\n self.gather_updater,\n True\n )\n\n return mol_graph\n","sub_path":"jtnn/mpn.py","file_name":"mpn.py","file_ext":"py","file_size_in_byte":9479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"268712751","text":"import math\nfrom opentrons.protocol_api.labware import OutOfTipsError\nfrom opentrons import types\n\nmetadata = {\n 'protocolName': '''NEBNext Quarter Volume Library Prep Step 8:\n Sample Clean Up''',\n 'author': 'Steve Plonk <protocols@opentrons.com>',\n 'apiLevel': '2.11'\n}\n\n\ndef run(ctx):\n\n # get parameter values from json above\n [sample_count, labware_plates, engage_height, engage_time, dry_time,\n offset_x] = get_values( # noqa: F821\n 'sample_count', 'labware_plates', 'engage_height',\n 'engage_time', 'dry_time', 'offset_x')\n\n ctx.set_rail_lights(True)\n ctx.delay(seconds=10)\n\n if sample_count < 48 or sample_count > 96:\n raise Exception('Number of samples must be 48-96')\n\n # tips, p20 multi, p300 multi\n tips20 = [\n ctx.load_labware(\"opentrons_96_filtertiprack_20ul\", str(\n slot)) for slot in [10, 11]]\n tips300 = [\n ctx.load_labware(\"opentrons_96_filtertiprack_200ul\", str(\n slot)) for slot in [2, 4, 7]]\n p20m = ctx.load_instrument(\n \"p20_multi_gen2\", 'left', tip_racks=tips20)\n p300m = ctx.load_instrument(\n \"p300_multi_gen2\", 'right', tip_racks=tips300)\n\n num_cols = math.ceil(sample_count / 8)\n\n # temperature module empty\n ctx.load_module('Temperature Module', '1')\n\n mag = ctx.load_module('magnetic module gen2', '9')\n mag_plate = mag.load_labware(labware_plates, 'Library Prep Plate (96xA)')\n mag.disengage()\n\n reservoir = ctx.load_labware(\n 'nest_12_reservoir_15ml', '5', '12-well Reservoir')\n [beads, elutionbf] = [\n reservoir.wells_by_name()[name] for name in ['A1', 'A10']]\n deadvol_reservoir_1 = 1800\n\n etoh = ctx.load_labware(\n 'agilent_1_reservoir_290ml', '6', '80 Percent Ethanol').wells()[0]\n deadvol_reservoir_2 = 10000\n\n waste = ctx.load_labware(\n 'agilent_1_reservoir_290ml', '8', 'Waste Reservoir').wells()[0]\n\n output_plate = ctx.load_labware(labware_plates, '3', 'Output Plate')\n\n beads.liq_vol = num_cols*8*21.6 + deadvol_reservoir_1\n elutionbf.liq_vol = num_cols*8*20 + deadvol_reservoir_1\n etoh.liq_vol = num_cols*8*200 + deadvol_reservoir_2\n\n # alert user to reagent volumes needed\n ctx.comment(\n \"\\n***\\nEnsure reagents in sufficient volume are present on deck\\n***\\n\")\n for volume, units, reagent, location in zip([round(\n beads.liq_vol / 1000, 1),\n math.ceil(elutionbf.liq_vol / 1000),\n math.ceil(etoh.liq_vol / 1000)],\n ['mL', 'mL', 'mL'],\n ['beads', 'elution buffer', 'etoh'],\n [beads, elutionbf, etoh]):\n ctx.comment(\n \"\\n***\\n{0} {1} {2} in {3}\\n***\\n\".format(\n str(volume), units, reagent.upper(), location))\n\n # notify user to replenish tips\n def pick_up_or_refill(pip):\n try:\n pip.pick_up_tip()\n except OutOfTipsError:\n ctx.pause(\n \"\"\"\\n***\\nPlease Refill the {} Tip Boxes\n and Empty the Tip Waste\\n***\\n\"\"\".format(pip))\n pip.reset_tipracks()\n pip.pick_up_tip()\n\n # return liquid height in a well\n def liq_height(well, effective_diameter=None):\n if well.diameter:\n if effective_diameter:\n radius = effective_diameter / 2\n else:\n radius = well.diameter / 2\n csa = math.pi*(radius**2)\n else:\n csa = well.length*well.width\n return well.liq_vol / csa\n\n # apply speed limit to departing tip\n def slow_tip_withdrawal(pipette, well_location, to_center=False):\n if pipette.mount == 'right':\n axis = 'A'\n else:\n axis = 'Z'\n ctx.max_speeds[axis] = 10\n if to_center is False:\n pipette.move_to(well_location.top())\n else:\n pipette.move_to(well_location.center())\n ctx.max_speeds[axis] = None\n\n ctx.comment(\"\\n***\\nStep - add beads to sample and mix\\n***\\n\")\n\n for index, column in enumerate(mag_plate.columns()[:num_cols]):\n\n pick_up_or_refill(p300m)\n\n ht_premix = liq_height(beads) if liq_height(beads) > 1 else 1\n\n # bead premix - aspirate 2 mm, dispense at top of liquid\n if not index % 2: # sets frequency of premixing\n ctx.comment(\"\\n***\\nStep - pre-mixing beads\\n***\\n\")\n for rep in range(5):\n p300m.aspirate(\n 200, beads.bottom(2), rate=0.5)\n p300m.dispense(200, beads.bottom(ht_premix), rate=0.5)\n\n # aspirate beads\n p300m.aspirate(\n 21.6, beads.bottom(1), rate=0.5)\n ctx.delay(seconds=1)\n slow_tip_withdrawal(p300m, beads)\n\n # reservoir tip touch\n p300m.move_to(\n beads.top(-2).move(types.Point(x=beads.length / 2, y=0, z=0)))\n p300m.move_to(beads.top())\n\n # dispense beads\n p300m.dispense(21.6, column[0].bottom(2))\n\n # mix\n p300m.mix(10, 40, column[0].bottom(2))\n\n # tip touch and blowout\n p300m.move_to(\n column[0].top(-2).move(types.Point(\n x=column[0].diameter / 2, y=0, z=0)))\n p300m.blow_out()\n p300m.move_to(column[0].top())\n\n p300m.drop_tip()\n\n ctx.comment(\"\\n***\\nStep - incubate 5 minutes\\n***\\n\")\n\n ctx.delay(minutes=5)\n\n ctx.comment(\"\\n***\\nStep - engage magnets and wait\\n***\\n\")\n\n mag.engage(height_from_base=engage_height)\n ctx.delay(minutes=engage_time)\n\n ctx.comment(\"\\n***\\nStep - discard supernatant\\n***\\n\")\n\n for index, column in enumerate(mag_plate.columns()[:num_cols]):\n pick_up_or_refill(p300m)\n\n # pre air gap\n p300m.move_to(column[0].top())\n p300m.air_gap(20)\n\n # take most liquid with tip at 4 mm and slow flow rate\n p300m.aspirate(90, column[0].bottom(4), rate=0.33)\n\n # take remaining with tip at 1 mm and offset_x mm to side\n p300m.aspirate(\n 90, column[0].bottom(1).move(types.Point(\n x={True: -1}.get(not index % 2, 1)*offset_x, y=0, z=0)), rate=0.33)\n\n # top dispense liquid plus air at fast flow rate\n p300m.dispense(200, waste.top(), rate=2)\n\n # delayed blow out\n ctx.delay(seconds=1)\n p300m.blow_out()\n\n p300m.drop_tip()\n\n ctx.comment(\"\\n***\\nStep - wash twice with 80 percent ethanol\\n***\\n\")\n\n for repeat in range(2):\n\n pick_up_or_refill(p300m)\n for column in mag_plate.columns()[:num_cols]:\n\n # increment etoh volume downward for each aspiration\n etoh.liq_vol -= 800\n\n # height of top of etoh\n ht = liq_height(etoh) - 3 if liq_height(etoh) - 3 > 1 else 1\n\n # at ht mm - avoid overimmersion, avoid ridge in reservoir bottom\n p300m.aspirate(\n 100, etoh.bottom(ht).move(types.Point(x=4.5, y=0, z=0)))\n p300m.air_gap(20) # post air gap\n\n # etoh top dispense with delayed blow out\n p300m.dispense(120, column[0].top())\n ctx.delay(seconds=0.5)\n p300m.blow_out()\n\n # post-dispense air gap to avoid drips\n p300m.air_gap(20)\n\n p300m.drop_tip()\n\n ctx.delay(seconds=30)\n\n # remove sup\n for index, column in enumerate(mag_plate.columns()[:num_cols]):\n\n pick_up_or_refill(p300m)\n\n # aspiration location offset to side to avoid bead pellet\n loc = column[0].bottom(1).move(types.Point(x={True: -1}.get(\n not index % 2, 1)*offset_x, y=0, z=0))\n\n # take most liquid with tip at 4 mm, slow flow rate\n p300m.aspirate(100, column[0].bottom(4), rate=0.2)\n\n # take remaining at 1 mm, avoid beads, slow flow rate, post air gap\n p300m.aspirate(60, loc, rate=0.2)\n p300m.air_gap(20)\n\n # top dispense to waste with delayed blowout\n p300m.dispense(180, waste.top())\n ctx.delay(seconds=0.5)\n p300m.blow_out()\n\n # post-dispense air gap to avoid drips\n p300m.air_gap(20)\n\n # to improve completeness of removal\n if repeat:\n p300m.move_to(column[0].top())\n for clearance in [0.7, 0.4, 0.2, 0]:\n loc = column[0].bottom(clearance).move(types.Point(\n x={True: -1}.get(not index % 2, 1)*offset_x, y=0, z=0))\n p300m.aspirate(25, loc)\n\n p300m.drop_tip()\n\n ctx.comment(\"\\n***\\nStep - wait for beads to dry\\n***\\n\")\n\n ctx.delay(minutes=dry_time)\n\n ctx.comment(\"\\n***\\nStep - resuspend beads in elution buffer\\n***\\n\")\n\n mag.disengage()\n\n for index, column in enumerate(mag_plate.columns()[:num_cols]):\n pick_up_or_refill(p20m)\n p20m.aspirate(20, elutionbf.bottom(1))\n\n # location targeting bead pellet for resuspension\n loc = column[0].bottom(1).move(types.Point(\n x={True: 1}.get(not index % 2, -1)*offset_x, y=0, z=0))\n\n p20m.dispense(20, loc, rate=3)\n\n # mix with dispenses targeting bead pellet\n for rep in range(10):\n p20m.aspirate(16, column[0].bottom(1))\n rt = 3 if rep < 9 else 0.5\n p20m.dispense(16, loc, rate=rt)\n\n # wait, depart slowly, tip touch and blowout after final mix\n if rep == 9:\n ctx.delay(seconds=1)\n slow_tip_withdrawal(p20m, column[0])\n p20m.move_to(\n column[0].top(-2).move(types.Point(\n x=column[0].diameter / 2, y=0, z=0)))\n p20m.blow_out()\n p20m.move_to(column[0].top())\n\n p20m.drop_tip()\n\n ctx.comment(\"\\n***\\nStep - incubate 2 minutes\\n***\\n\")\n\n ctx.delay(minutes=2)\n\n ctx.comment(\"\\n***\\nStep - engage magnets and wait\\n***\\n\")\n\n mag.engage(height_from_base=engage_height)\n ctx.delay(minutes=engage_time)\n\n ctx.comment(\"\\n***\\nStep - transfer eluate to output plate\\n***\\n\")\n\n p20m.flow_rate.aspirate = 3.5\n\n for index, column in enumerate(mag_plate.columns()[:num_cols]):\n p20m.pick_up_tip()\n\n loc_asp = column[0].bottom(1).move(types.Point(\n x={True: -1}.get(not index % 2, 1)*offset_x, y=0, z=0))\n\n p20m.aspirate(18, loc_asp)\n\n p20m.move_to(loc_asp.move(types.Point(x=0, y=0, z=1)))\n ctx.delay(seconds=1)\n\n p20m.dispense(18, output_plate.columns()[index][0].bottom(1))\n\n p20m.blow_out()\n p20m.drop_tip()\n\n p20m.flow_rate.aspirate = 7.6\n\n mag.disengage()\n\n ctx.comment(\"\\n***\\nFinished\\n***\\n\")\n","sub_path":"protocols/11bb6a-part-8/11bb6a-part-8.ot2.apiv2.py","file_name":"11bb6a-part-8.ot2.apiv2.py","file_ext":"py","file_size_in_byte":10491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"248615191","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom ckeditor.fields import RichTextField\n\n\n\n#KILWOO\nclass Introduction(models.Model):\n text = RichTextField()\n image = models.ImageField(null=True, blank=True, upload_to=\"images/\")\n location = RichTextField(blank=True)\n\n\n#PRODUCTS\nclass Team(models.Model):\n #team number\n title = models.CharField(max_length=50)\n image = models.ImageField(null=True, blank=True, upload_to=\"images/\")\n\nclass Subcategory(models.Model):\n name = models.CharField(max_length=30)\n companyimage = models.ImageField(null=True, blank=True, upload_to=\"images/\")\n image = models.ImageField(null=True, blank=True, upload_to=\"images/\")\n caption = RichTextField()\n team = models.ForeignKey(Team, on_delete=models.CASCADE)\n\n\n\n#SERVICES\nclass Services(models.Model):\n\n #service name\n title = models.CharField(max_length=50)\n\n heading = models.TextField(max_length=50)\n caption = RichTextField()\n image = models.ImageField(null=True, blank=True, upload_to=\"images/\")\n","sub_path":"kilwoo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"109759949","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\retail\\retail_balance_transfer_dialog.py\n# Compiled at: 2016-04-08 00:46:58\n# Size of source mod 2**32: 5372 bytes\nfrom protocolbuffers import Dialog_pb2, DistributorOps_pb2\nfrom distributor.ops import GenericProtocolBufferOp\nfrom distributor.rollback import ProtocolBufferRollback\nfrom distributor.system import Distributor\nfrom sims4.localization import TunableLocalizedString, LocalizationHelperTuning\nimport services, sims4.log\nlogger = sims4.log.Logger('Business', default_owner='trevor')\n\nclass FundsTransferDialog:\n PLAYER_HOUSEHOLD_TITLE = TunableLocalizedString(description='\\n This is the text that will show for the players home lot. Typically,\\n the lot name would show but the home lot should say something along the\\n lines of \"Player Household\" to avoid confusion.\\n ')\n\n @classmethod\n def show_dialog(cls, first_time_buyer=False):\n business_managers = services.business_service().get_business_managers_for_household()\n if not business_managers:\n logger.error('Trying to show the balance transfer dialog but failed to find any owned businesses for the active household.')\n return False\n active_household = services.active_household()\n current_zone_id = services.current_zone_id()\n current_business_manager = business_managers.get(current_zone_id, None)\n balance_transfer_msg = Dialog_pb2.BalanceTransferDialog()\n balance_transfer_msg.transfer_amount = min(active_household.funds.money, current_business_manager.tuning_data.initial_funds_transfer_amount) if first_time_buyer else 0\n if first_time_buyer or current_business_manager is None:\n cls._add_household(balance_transfer_msg, active_household)\n cls._try_add_current_business_lot(balance_transfer_msg, business_managers, current_zone_id)\n else:\n cls._try_add_current_business_lot(balance_transfer_msg, business_managers, current_zone_id)\n cls._add_household(balance_transfer_msg, active_household)\n for zone_id, business_manager in business_managers.items():\n if zone_id == current_zone_id:\n continue\n zone_data = services.get_persistence_service().get_zone_proto_buff(zone_id)\n if zone_data is None:\n logger.error(\"Business tracker thinks a zone exists that doesn't. Zone id:{}\", zone_id)\n continue\n with ProtocolBufferRollback(balance_transfer_msg.lot_data) as (lot_data):\n lot_data.lot_name = LocalizationHelperTuning.get_raw_text(zone_data.name)\n lot_data.zone_id = zone_id\n lot_data.balance = business_manager.funds.money\n\n transfer_op = GenericProtocolBufferOp(DistributorOps_pb2.Operation.RETAIL_BALANCE_TRANSFER_DIALOG, balance_transfer_msg)\n Distributor.instance().add_op_with_no_owner(transfer_op)\n\n @classmethod\n def _add_household(cls, balance_transfer_msg, active_household):\n home_lot_data = balance_transfer_msg.lot_data.add()\n home_lot_data.lot_name = cls.PLAYER_HOUSEHOLD_TITLE\n home_lot_data.zone_id = active_household.home_zone_id\n home_lot_data.balance = active_household.funds.money\n\n @classmethod\n def _try_add_current_business_lot(cls, balance_transfer_msg, business_managers, current_zone_id):\n business_manager = business_managers.get(current_zone_id, None)\n if business_manager is not None:\n business_data = balance_transfer_msg.lot_data.add()\n business_data.lot_name = business_manager.tuning_data.current_business_lot_transfer_dialog_entry\n business_data.zone_id = current_zone_id\n business_data.balance = business_manager.funds.money","sub_path":"Scripts/simulation/retail/retail_balance_transfer_dialog.py","file_name":"retail_balance_transfer_dialog.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"184473261","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, flash, redirect, render_template, \\\n request, session, url_for, g\nfrom functools import wraps\nimport sqlite3\nfrom forms import AddTaskForm\n\napp = Flask(__name__)\napp.config.from_object('config')\n\ndef connect_db():\n return sqlite3.connect(app.config['DATABASE'])\n\n# Functools is a module used for extending the capabilities of functions with other\n# functions, which is exactly what decorators accomplish.\ndef login_required(test):\n @wraps(test)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return test(*args, **kwargs)\n else:\n flash('You need to login first.')\n return redirect(url_for('login'))\n return wrap\n# This tests to see if logged_in is in the session. If it is, then we call\n# the appropriate function (e.g., the function that the decorator is\n# applied to), and if not, the user is redirected back to the login screen\n# with a message stating that a login is required. Add the decorator to the\n# top of the main() function:\n\n@app.route('/logout')\ndef logout():\n session.pop('logged_in', None)\n flash('You were logged out')\n return redirect(url_for('login'))\n# The logout() function uses the pop() method to reset the session key to the\n# default value when the user logs out. The user is then redirected back to\n# the login screen and a message is flashed indicating that they were logged out.\n\n# sessions and login required decorators\n# Now that users are able to login and logout, we need to protect main.html from\n# unauthorized access.\n#\n# To prevent unauthorized access to main.html ,\n# we need to set up sessions, as well as utilize the login_required decorator.\n# Sessions store user information in a secure manner, usually as a token,\n# within a cookie. In this case, when the session key, logged_in , is set to True ,\n# the user has the rights to view the main.html page.\n#\n# The login_required decorator, meanwhile, checks to make sure that a user is\n# authorized (e.g.,logged_in ) before allowing access to certain pages.\n\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n if request.form['username'] != app.config['USERNAME'] or \\\n request.form['password'] != app.config['PASSWORD']:\n error = 'Invalid Credentials. Try again.'\n return render_template('login.html', error=error)\n else:\n session['logged_in'] = True\n return redirect(url_for('tasks'))\n if request.method == 'GET':\n return render_template('login.html')\n# This function compares the username and password entered against those from\n# the configuration section. If the correct username and password are entered,\n# the user is redirected to the main page and the session key, logged_in ,\n# is set to True . If the wrong information is entered, an error message\n# is flashed to the user.\n# The url_for() function generates an endpoint for the provided method.\n\n# Making the task view. on the tasks.html, the user will have full CRUD\n# access the ability to delete tasks (delete) and mark tasks as complete\n# (update) rather than just being able to add new tasks (create) to the\n# database table and view (read) such tasks.\n@app.route('/tasks')\n@login_required\ndef tasks():\n g.db = connect_db()\n cur = g.db.execute(\n 'select name, due_date, priority, task_id from tasks where status=1'\n )\n\n open_tasks = [dict(name=row[0], due_date=row[1], priority=row[2],\n task_id=row[3]) for row in cur.fetchall()]\n cur = g.db.execute(\n 'select name, due_date, priority, task_id from tasks where status=0'\n )\n\n closed_tasks = [dict(name=row[0], due_date=row[1], priority=row[2],\n task_id=row[3]) for row in cur.fetchall()]\n g.db.close()\n return render_template(\n 'tasks.html',\n form=AddTaskForm(request.form),\n open_tasks=open_tasks,\n closed_tasks=closed_tasks\n )\n# make sure to add g to the imports\n# We’re querying the database for open and closed tasks, assigning the\n# results to two variables,open_tasks and closed tasks , and then\n# passing those variables to the tasks.html page.These variables will\n# then be used to populate the open and closed task lists, respectively.\n# Make sense? Also, you may have noticed this line\n# form=AddTaskForm(request.form), AddTaskForm() will be the name of\n# a form used to, well, add tasks. This has not been created yet\n\n# Add, Update, and Delete Tasks\n@app.route('/add/', methods=['POST'])\n@login_required\ndef new_task():\n g.db = connect_db()\n name = request.form['name']\n date = request.form['due_date']\n priority = request.form['priority']\n if not name or not date or not priority:\n flash(\"All fields are required. Please try again.\")\n return redirect(url_for('tasks'))\n else:\n g.db.execute('insert into tasks (name, due_date, priority, '\n 'status)values (?, ?, ?, 1)',\n [request.form['name'], request.form['due_date'],\n request.form['priority']])\n g.db.commit()\n g.db.close()\n flash('New entry was successfully posted. Thanks.')\n return redirect(url_for('tasks'))\n\n# Mark tasks as complete:\n@app.route('/complete/<int:task_id>/',)\n@login_required\ndef complete(task_id):\n g.db = connect_db()\n g.db.execute(\n 'update tasks set status = 0 where task_id='+str(task_id)\n )\n g.db.commit()\n g.db.close()\n flash('The task was marked as complete.')\n return redirect(url_for('tasks'))\n\n# Delete Tasks:\n@app.route('/delete/<int:task_id>/',)\n@login_required\ndef delete_entry(task_id):\n g.db = connect_db()\n g.db.execute('delete from tasks where task_id='+str(task_id))\n g.db.commit()\n g.db.close()\n flash('The task was deleted.')\n return redirect(url_for('tasks'))\n\n\"\"\"\n1. The last two functions pass in a variable parameter, task_id , from the tasks.html page\n(which we will create next). This variable is equal to the unique task_id field in the\ndatabase. A query is then performed and the appropriate action takes place. In this\ncase, an action means either marking a task as complete or deleting a task. Notice how\nwe have to convert the task_id variable to a string, since we are using concatenation\nto combine the SQL query to the task_id , which is an integer.\n\n\"\"\"","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"577734224","text":"#!/usr/bin/env python\n\nfrom optparse import OptionParser\nimport signal\nimport sys\nimport logging\n\nfrom meerkat_backend_interface.katportal_server import BLKATPortalClient\nfrom meerkat_backend_interface.logger import log, set_logger\n\ndef cli(prog = sys.argv[0]):\n usage = 'usage: %prog [options]'\n parser = OptionParser(usage = usage)\n parser.add_option('-c', '--config', type = str,\n help = 'Config filename (yaml)', default = 'config.yml')\n (opts, args) = parser.parse_args()\n main(config = opts.config)\n\ndef on_shutdown():\n log.info(\"Shutting Down Katportal Clients\")\n sys.exit()\n\ndef main(config):\n log = set_logger(log_level = logging.DEBUG)\n log.info(\"Starting Katportal Client\")\n client = BLKATPortalClient(config)\n signal.signal(signal.SIGINT, lambda sig, frame: on_shutdown())\n client.start()\n\nif __name__ == '__main__':\n cli()\n","sub_path":"katportal_start.py","file_name":"katportal_start.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"123510689","text":"# -*- coding: utf-8 -*-\nfrom qc_service.utils import parse_family\n\n\ndef test_parse_family_exomes():\n output_dir = (\"tests/fixtures/analysis/cust002/F0002732/analysis/\"\n \"exomes/F0002732\")\n samples = list(parse_family(output_dir))\n assert len(samples) == 1\n assert samples[0]['sample_id'] == 'ADM1011A1'\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"268791029","text":"from os import listdir\nfrom os.path import isfile, join\nimport discord\nfrom discord.ext import commands\nfrom manager import Manager\nimport config\n\n\nclass IngoBot(commands.Bot):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n print(\"Loading database..\")\n self.db = Manager()\n\n print(\"Loading extensions..\")\n cogs = [f.replace('.py', '') for f in listdir(config.COGS_DIR) if isfile(join(config.COGS_DIR, f))]\n for extension in cogs:\n try:\n self.load_extension(config.COGS_DIR + \".\" + extension)\n print(\"Loaded\", extension)\n except Exception as e:\n exc = '{}: {}'.format(type(e).__name__, e)\n print('Failed to load extension {}\\n{}'.format(extension, exc)) \n\n \n async def on_ready(self):\n print('Bot ready.')\n\n\nbot = IngoBot(command_prefix=config.DISCORD_PREFIX)\nbot.run(config.DISCORD_TOKEN)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"610505844","text":"\n\nfrom xai.brain.wordbase.nouns._dialect import _DIALECT\n\n#calss header\nclass _DIALECTS(_DIALECT, ):\n\tdef __init__(self,): \n\t\t_DIALECT.__init__(self)\n\t\tself.name = \"DIALECTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"dialect\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_dialects.py","file_name":"_dialects.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"602915","text":"\"\"\"\nobjects for interacting with dancohenio api\n\"\"\"\n#pylint: disable=no-self-use\nimport os\nfrom datetime import datetime\nimport json\nfrom functools import wraps\n\nimport pytz\nimport requests\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import AppRegistryNotReady\n\nfrom dancohenio.settings import (\n BORIS_FOLDER, BORIS_CONF, BORIS_DB, BORIS_POST_DIR, BORIS_QUEUE_DIR\n)\n\n\ndef build_config():\n \"\"\"\n Set up the local .boris config files.\n \"\"\"\n if not os.path.exists(BORIS_FOLDER):\n os.mkdir(BORIS_FOLDER)\n\n if os.path.exists(BORIS_CONF):\n yn = raw_input('Boris config already exists. Overwrite it? (y/n)')\n if not yn.startswith('y'):\n return None\n fqdn = raw_input('Fully qualified domain name of your site:')\n tz = raw_input('IANA Timezone Code:')\n token = raw_input('token:')\n secret = raw_input('secret:')\n config_data = {\n 'fqdn': fqdn,\n 'tz': tz,\n 'token': token,\n 'secret': secret,\n }\n with open(BORIS_CONF, 'w') as stream:\n json.dump(config_data, stream)\n #os.chmod(BORIS_FOLDER, 700)\n\ndef setup(func):\n \"\"\"\n If the function fails because django is not set up, set up django and\n retry.\n \"\"\"\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n result = func(*args, **kwargs)\n except AppRegistryNotReady:\n import django\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"dancohenio.settings\")\n django.setup()\n result = func(*args, **kwargs)\n return result\n return wrapper\n\nclass Client(object):\n \"\"\"Interacts with the API.\"\"\"\n def __init__(self, rebuild=False):\n \"\"\"Client needs to be told where the website is installed.\"\"\"\n if rebuild or not os.path.exists(BORIS_CONF):\n build_config()\n\n with open(BORIS_CONF, 'r') as stream:\n config_data = json.load(stream)\n\n self.fqdn = config_data['fqdn']\n self.tz = pytz.timezone(config_data['tz'])\n self.token = config_data['token']\n self.secret = config_data['secret']\n self.queue_dir = BORIS_QUEUE_DIR\n self.post_dir = BORIS_POST_DIR\n\n if os.path.exists(BORIS_DB):\n with open(BORIS_DB, 'r') as stream:\n self.db = json.load(stream)\n else:\n self.db = {}\n\n def clear_db(self):\n \"\"\"Delete the current db file.\"\"\"\n if os.path.exists(BORIS_DB):\n os.remove(BORIS_DB)\n\n def _watch_for_mtime(self, directory, extension):\n \"\"\"Watch a directory for.\"\"\"\n i = 0\n for i, filepath in enumerate(find_files(directory, extension)):\n timestamp = datetime.fromtimestamp(os.path.getmtime(filepath))\n timestamp = self.tz.localize(timestamp)\n print('Found %s at %s' % (filepath, timestamp))\n try:\n self.db[filepath]\n except KeyError:\n yield filepath, timestamp\n print('Found %d files in %s ending with %s' % (i + 1, directory, extension))\n\n def watch_for_entries(self, task, directory, extension, persist=True):\n for filepath, timestamp in self._watch_for_mtime(directory, extension):\n filename = os.path.basename(filepath)\n r = self._post_entry(task, timestamp, 1, ref=filename)\n if eval_response(r) and persist:\n self.db[filepath] = timestamp.strftime('%Y-%m-%d %H:%m:%s')\n self._save_db()\n\n def watch_for_posts(self, directory, ext, persist=True):\n for filepath, timestamp in self._watch_for_mtime(directory, ext):\n with open(filepath, 'r') as stream:\n content = stream.read()\n try:\n title, content = parse_title(content)\n except AttributeError:\n title, ext = os.path.splitext(os.path.basename(filepath))\n r = self._post_blog(title, content)\n if eval_response(r) and persist:\n self.db[filepath] = str(timestamp)\n self._save_db()\n\n def _save_db(self):\n \"\"\"Stores the current db on the filesystem.\"\"\"\n with open(BORIS_DB, 'w') as stream:\n json.dump(self.db, stream)\n\n @setup\n def _post_entry(self, task_name, date, value, ref=None):\n \"\"\"Posts an entry to the api endpoint.\"\"\"\n payload = {\n 'task_name': task_name,\n 'date': date,\n 'value': value,\n 'token': self.token,\n 'secret': self.secret,\n 'reference': ref\n }\n url = self.fqdn + reverse('api:create_entry')\n response = requests.post(url, data=payload)\n print(response.text)\n return response\n\n @setup\n def _post_blog(self, title, content):\n \"\"\"Receives a new blog entry via api.\"\"\"\n payload = {'title': title, 'content': content}\n url = self.fqdn + reverse('api:create_blog_post')\n return self._post(url, data=payload)\n\n def _post(self, url, data=None):\n if data is None: data = {}\n data['token'] = self.token\n data['secret'] = self.secret\n response = requests.post(url, data=data)\n print(response.text)\n return response\n\ndef eval_response(response):\n \"\"\"Evaluates if the response should be considered successful.\"\"\"\n if '<body>' in response.text:\n text = response.text.split('<body>')[1].split('</body>')[0]\n try:\n response = json.loads(text)\n except ValueError as e:\n raise ValueError('%s:\\n\\n%s\\n\\n' % (e, text))\n else:\n response = response.json()\n \n if 'error' in response.keys():\n raise ApiError(response['error'])\n elif 'success' in response.keys() or 'info' in response.keys():\n return True\n else:\n raise ApiError('An unknown error has occurred.')\n\ndef find_files(directory, ext):\n \"\"\"\n Generator that walks through a directory and returns all the files of a\n certain extension at any depth.\n \"\"\"\n for root, _, files in os.walk(directory):\n for f in files:\n if f.endswith(ext) or ext == '*':\n yield os.path.join(root, f)\n\ndef parse_title(content):\n lines = content.split('\\n')\n if lines[0].startswith('#'):\n title = lines[0].replace('#', '')\n content = '\\n'.join([l for l in lines[1:] if l.strip()])\n elif lines[1].startswith('='):\n title = lines[0]\n content = '\\n'.join([l for l in lines[2:] if l.strip()])\n else:\n raise AttributeError('No title found for: %s' % content)\n return title, content\n\nclass ApiError(Exception):\n pass\n","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"11281872","text":"# 1. 앞에서부터 읽을 때나 뒤에서부터 읽을 때나 모양이 같은 수를 대칭수(palindrome)라고 부른다.\n# 두 자리 수를 곱해 만들 수 있는 대칭수 중 가장 큰 수는 9009 (=91 X 99) 이다.\n# 세 자리 수를 곱해 만들 수 있는 가장 큰 대칭수는 얼마인가?\n\ndef isPalindrome(a):\n for i in range(len(a)//2):\n if a[i] != a[-1-i]:\n return False\n return True\n\nmaxNum = 0\nfor i in range(100, 1000):\n for k in range(100, 1000):\n if isPalindrome(str(i*k)):\n if i*k > maxNum:\n maxNum = i*k\nprint(maxNum)","sub_path":"과제17(190719)/연습문제01.py","file_name":"연습문제01.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"474637572","text":"#환경변수 추가 export JAVA_HOME=$(/usr/libexec/java_home)\n#참고: https://nogadaworks.tistory.com/193\n#conda install -c conda-forge jpype1 로 설치\n#pip install konlpy\n#pip install selenium\nimport time\n# 해시태그를 분석하기 위한 Twitter 모듈\n# from konlpy.tag import Twitter\nfrom konlpy.tag import Okt\n# 크롬 브라우저 조작을 위한 모듈\nfrom selenium import webdriver\n# 페이지 스크롤링을 위한 모듈\nfrom selenium.webdriver.common.keys import Keys\n\n# 크롤링할 url 주소\nurl = \"https://www.instagram.com/explore/tags/%EC%86%A1%ED%8C%8C/\"\n# 다운로드 받은 driver 주소\n# 크롬 드라이버를 이용해 임의로 크롬 브라우저를 실행시켜 조작한다.\ndriver = webdriver.Chrome('./chromedriver')\n# 암묵적으로 웹 자원을 (최대) 5초 기다리기\ndriver.implicitly_wait(3)\n# 크롬 브라우저가 실행되며 해당 url로 이동한다.\ndriver.get(url)\ntime.sleep(2)\n# 총 게시물 수를 클래스 이름으로 찾기\ntotalCount = driver.find_element_by_class_name('g47SY ').text\nprint(\"총 게시물:\", totalCount)\n# body 태그를 태그 이름으로 찾기\nelem = driver.find_element_by_tag_name(\"body\")\n# alt 속성의 값을 담을 빈 리스트 선언\n# alt_list = []\n# 첫번째 이미지 선택\ndriver.execute_script(\"document.getElementsByClassName('_9AhH0')[0].click()\")\nhash_data = {}\nnoun_data = {}\nadject_data = {}\ntw = Okt()\n# 페이지 스크롤을 위해 임시 변수 선언\npagedowns = 1\n# 스크롤을 20번 진행한다.\ntry:\n while pagedowns < 30:\n alt_list = []\n img = driver.find_elements_by_css_selector('div.C4VMK > span')\n # for i in img:\n # alt_list.append(i.text)\n # 댓글까지 모두 가져오기 때문에 첫번째걸로 함\n if len(img) > 0:\n alt_list.append(img[0].text)\n pagedowns += 1\n if pagedowns % 10 == 0:\n print(\"진행사항:\", pagedowns)\n\n alt_list = list(set(alt_list))\n for alt in alt_list:\n # pos 메서드를 통해 alt 속성의 모든 해시태그의 값을 (값, 품사) 형태의 튜플을 요소로 갖는 리스트로 반환한다.\n temp = tw.pos(alt, norm=True)\n # 리스트의 크기만큼 반복한다.\n for data in temp:\n # 품사가 만약 해시태그이면\n if data[1] == \"Hashtag\":\n # 결과 값을 저장할 딕셔너리에 값이 있는지 확인하고 없다면 새로이 키를 추가하고 0, 있다면 기존 키에 1을 더해준다.\n if not (data[0] in hash_data):\n hash_data[data[0]] = 0\n hash_data[data[0]] += 1\n # 품사가 만약 단어이면\n elif data[1] == \"Noun\":\n if not (data[0] in noun_data):\n noun_data[data[0]] = 0\n noun_data[data[0]] += 1\n elif data[1] == \"Adjective\":\n if not (data[0] in adject_data):\n adject_data[data[0]] = 0\n adject_data[data[0]] += 1\n\n elem.send_keys(Keys.RIGHT)\nfinally:\n # 딕셔너리를 횟수를 가지고 내림차순으로 정렬한다.\n keys = sorted(hash_data.items(), key=lambda x: x[1], reverse=True)\n # 1~15위 까지의 키:값을 출력한다.\n for k, v in keys[:15]:\n print(\"{}({})\".format(k, v))\n print(\"### Noun ###\")\n keys = sorted(noun_data.items(), key=lambda x: x[1], reverse=True)\n for k, v in keys[:15]:\n print(\"{}({})\".format(k, v))\n print(\"### adject ###\")\n keys = sorted(adject_data.items(), key=lambda x: x[1], reverse=True)\n for k, v in keys[:15]:\n print(\"{}({})\".format(k, v))\n # 드라이버를 종료한다.\n driver.close()\n# 값의 중복을 방지를 리스트 set으로 변환후 리스트로 재할당\n# alt_list = list(set(alt_list))\n# 키:해시태그, 값:횟수 형식으로 저장하기 위한 빈 딕셔너리 선언\n# hash_data = {}\n# 키:일반단어, 값:횟수 형식 저장 빈 딕셔너리 선언\n# noun_data = {}\n# adject_data = {}\n# alt 속성의 값인 제목과 해시태그 중 해시태그 만을 가져오기 위한 Tiwitter 객체 생성\n# tw = Okt()\n# alt_list에 담긴 값의 크기만큼 반복한다.\n# for alt in alt_list:\n# pos 메서드를 통해 alt 속성의 모든 해시태그의 값을 (값, 품사) 형태의 튜플을 요소로 갖는 리스트로 반환한다.\n# temp = tw.pos(alt, norm = True)\n# 리스트의 크기만큼 반복한다.\n# for data in temp:\n# 품사가 만약 해시태그이면\n# if data[1] == \"Hashtag\":\n# 결과 값을 저장할 딕셔너리에 값이 있는지 확인하고 없다면 새로이 키를 추가하고 0, 있다면 기존 키에 1을 더해준다.\n# if not (data[0] in hash_data):\n# hash_data[data[0]] = 0\n# hash_data[data[0]] += 1\n# 품사가 만약 단어이면\n# elif data[1] == \"Noun\":\n# if not (data[0] in noun_data):\n# noun_data[data[0]] = 0\n# noun_data[data[0]] += 1\n# elif data[1] == \"Adjective\":\n# if not (data[0] in adject_data):\n# adject_data[data[0]] = 0\n# adject_data[data[0]] += 1\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"437003843","text":"import sys\nclass node: \n\n def __init__(self, info): \n self.info = info \n self.prev = None\n self.next = None \n\nclass LinkedList: \n\n def __init__(self): \n self.head = None\n\n\n def display(self):\n temp = self.head \n while (temp): \n print( temp.info) \n temp = temp.next\n \n def insert_at_beg(self,data):\n self.temp = node(data)\n if self.head is None:\n self.head = self.temp\n return\n self.temp.next=self.head\n self.head.prev=self.temp\n self.head= self.temp\n \n def insert_at_end(self,data):\n self.temp = node(data)\n if self.head is None:\n self.head = self.temp\n return\n self.p=self.head\n while(self.p.next):\n self.p=self.p.next\n self.p.next=self.temp\n self.temp.prev=self.p\n \n def insert_after_given_node(self,data,item):\n self.p=self.head\n while self.p is not None:\n if(self.p.info==item):\n self.temp=node(data)\n self.temp.prev=self.p\n self.temp.next=self.p.next\n if (self.p.next):\n self.p.next.prev=self.temp\n self.p.next=self.temp\n return\n self.p=self.p.next\n def delete(self,data):\n if self.head is None:\n print(\"List is empty\")\n return\n if self.head.next is None:\n if self.head.info==data:\n self.temp=self.head;\n self.head=None\n return\n \n else:\n print(\"element not found\")\n return\n if self.head.info==data:\n self.temp=self.head\n self.head=self.head.next\n self.head.prev=None\n return\n self.temp=self.head\n while self.temp.next is not None:\n if self.temp.info==data:\n self.temp.prev.next=self.temp.next\n self.temp.next.prev=self.temp.prev\n return\n self.temp=self.temp.next\n if(self.temp.info==data):\n self.temp.prev.next=None;\n return \n print(\"element not found\")\n \n def reverse(self):\n self.p1=self.head\n self.p2=self.p1.next\n self.p1.next=None\n self.p1.prev=self.p2\n while self.p2 is not None:\n self.p2.prev=self.p2.next\n self.p2.next=self.p1\n self.p1=self.p2\n self.p2=self.p2.prev\n self.head=self.p1\n print(\"List reversed\\n\")\n \n \nif __name__=='__main__': \n\n llist = LinkedList() \n while(1):\n print(\"1.Display\\n\")\n print(\"2.Insert new node at the beginning\\n\")\n print(\"3.Insert new node at the end\\n\")\n print(\"4.Insert new node after the given node\\n\")\n print(\"5.Delete node\\n\")\n print(\"6.Reverse list\\n\")\n print(\"7.Quit\\n\\n\")\n print(\"Enter your choice : \")\n choice=int(input())\n if(choice==1):\n llist.display()\n elif(choice==2):\n value=int(input())\n llist.insert_at_beg(value)\n elif(choice==3):\n value=int(input())\n llist.insert_at_end(value)\n elif(choice==4):\n print(\"enter the value\")\n value=int(input())\n print(\"Enter the element after which to insert : \")\n item=int(input())\n llist.insert_after_given_node(value,item)\n elif(choice==5):\n value=int(input())\n llist.delete(value) \n elif(choice==6):\n llist.reverse() \n else:\n sys.exit(0)\n \n\n","sub_path":"doublylinkedlist.py","file_name":"doublylinkedlist.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"436839489","text":"from rest_framework import serializers\nfrom . import models\nfrom .randomString import generator\nimport re\nimport threading\n\n\nfrom api.scraping import getTitle,getTitle2\n\nfrom .tasks import printHi, printHi2, get_Title\n\n\nclass UrlSerializer(serializers.Serializer):\n original_url = serializers.CharField(max_length=500)\n\n #field level validator\n def validate_original_url(self, value):\n \"\"\"\n Check and update the link format.\n \"\"\"\n if not (value.startswith(\"https://\") or value.startswith(\"http://\")):\n value = \"https://\" + value\n # return value\n # return serializers.ValidationError(\"HI\")\n\n regex = r'^((https?|ftp|smtp):\\/\\/)?(www.)?[a-zA-Z0-9.]+?[a-zA-Z0-9]+\\.[a-z]+(\\/[a-zA-Z0-9#.w-]+\\/?)*$'\n x = re.match(regex, value)\n print (\"url\", x)\n\n # print (\"url\", x)\n\n if x:\n return value\n else:\n raise serializers.ValidationError(\"Invalid URL.\")\n\n\n def create(self, validated_data):\n print ('4')\n print ('url', validated_data['original_url'])\n\n\n #url, created = models.Url.objects.get_or_create(original_url=validated_data['original_url'])\n urls = models.Url.objects.filter(original_url=validated_data['original_url'])\n print (urls)\n print (len(urls))\n print ('5')\n if len(urls) > 0:\n print ('6')\n urls[0].created_count += 1\n urls[0].save()\n return urls[0]\n\n print ('7')\n # url.url_title = getTitle(url).get('title', \"\")\n\n # t = threading.Thread(target=getTitle2, args=[url])\n # t.setDaemon(True)\n # t.start()\n\n\n #printHi.delay()\n\n #printHi2()\n\n\n # print (\"title\", url.url_title)\n print('8')\n unique_id = generator()\n url = models.Url(original_url=validated_data['original_url'], unique_id=unique_id)\n url.save()\n\n get_Title(url.id)\n\n return url\n\n","sub_path":"urlshortener/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"243768998","text":"'''preprocessing'''\n\nfrom __future__ import print_function\n\nimport csv\nimport sys\nimport numpy as np\nimport cPickle as pickle\nimport time\nimport datetime\n\nfrom constants import SECS_PER_DAY, SECS_PER_YEAR\nfrom utils import extract_lat_and_lon\n\n################################################################################\n# New preprocessing\n################################################################################\n\n#---------------------\n# downsampling helpers\n#---------------------\n\ndef downsample(ts, factor):\n '''Takes a time series and returns a downsampling where only one point every\n factor points is kept'''\n if factor < 1:\n raise Exception(\"Downsampling factor must be >= 1\")\n return ts[0:len(ts):factor]\n\ndef downsample_all(tss, factor):\n '''wrapper to downsample a bunch of time series using the above method'''\n new_tss = []\n for ts in tss:\n new_tss.append(downsample(ts, factor))\n return new_tss\n\n#--------------\n# Other helpers\n#--------------\ndef get_col_index(col_titles, title):\n for i, curr_title in enumerate(col_titles):\n if curr_title == title:\n return i\n return -1\n\ndef get_unix_ts(ts_str):\n '''converts a string timestamp to a unix timestamp\n string format: 2015-06-19 12:34:42.000 or 2001-07-31 2:37:41'''\n year_str, month_str, rest = ts_str.split('-')\n day_str, rest = rest.split(' ')\n if '.' in rest:\n clock_time, milliseconds = rest.split('.')\n else:\n clock_time = rest\n hour_str, minute_str, second_str = clock_time.split(':')\n\n day = int(day_str)\n month = int(month_str)\n year = int(year_str)\n hour = int(hour_str)\n minute = int(minute_str)\n second = int(second_str)\n\n dt = datetime.datetime(year, month, day, hour, minute, second)\n\n return time.mktime(dt.timetuple())\n\ndef sort_by_time(data):\n '''for each individual in data, sorts the time series by time'''\n for indiv_id in data:\n data[indiv_id].sort(key=lambda x: x[2])\n return data\n\ndef get_data_by_individual(fname):\n '''given a filename, returns a map of individual to data'''\n data = []\n with open(fname, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n col_titles = reader.next()\n timestamp_col = get_col_index(col_titles, 'timestamp')\n if timestamp_col == -1:\n print('Could not find column `timestamp`')\n sys.exit(1)\n ili_col = get_col_index(col_titles, 'individual-local-identifier')\n if ili_col == -1:\n print('Could not find column `individual-local-identifier`')\n sys.exit(1)\n lat_col = get_col_index(col_titles, 'location-lat')\n if lat_col == -1:\n print('Could not find column `location-lat`')\n sys.exit(1)\n lon_col = get_col_index(col_titles, 'location-long')\n if lon_col == -1:\n print('Could not find column `location-long`')\n sys.exit(1)\n\n # create a map of individual-local-identifier to time series\n data = {}\n for row in reader:\n # get the timestamp\n timestamp = row[timestamp_col]\n # convert the timestamp string into a utc timestamp\n time = get_unix_ts(timestamp)\n # get the individual\n individual = row[ili_col]\n # get the location\n # get the data\n try:\n lat = float(row[lat_col])\n lon = float(row[lon_col])\n pt = [lon, lat, time]\n # add the point to the time series of the appropriate individual\n if individual not in data:\n data[individual] = []\n data[individual].append(pt)\n except ValueError:\n print('cannot cast `%s` to float' % row[lat_col])\n\n # let's be sure that all the data is properly sorted...\n return sort_by_time(data)\n\ndef pretty_time(ts):\n return datetime.datetime.fromtimestamp(ts).strftime('%d/%m/%Y')\n\nclass TimeSeries(object):\n def __init__(self, id, series):\n self.id = id\n self.series = np.array(series)\n self.loc_series = extract_lat_and_lon(series)\n self.year = datetime.datetime.fromtimestamp(self.series[0][2]).year\n\n def set_interpolated_series(self, interpolated_series):\n self.interpolated_series = np.array(interpolated_series)\n self.interpolated_loc_series = extract_lat_and_lon(interpolated_series)\n\n def time_normalize(self):\n start_time = self.interpolated_series[0][2]\n self.normalized_interpolated_series = np.array([[lon, lat, time-start_time] for lon, lat, time in self.interpolated_series])\n\n def __str__(self):\n return self.id + ', ' + pretty_time(self.series[0][2]) + ' - ' + pretty_time(self.series[-1][2])\n\nclass RelativeDate(object):\n def __init__(self, month, day):\n self.month = month\n self.day = day\n\nclass RelativeDateRange(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\n def contains(self, date):\n if date.month > self.start.month and date.month < self.end.month:\n return True\n elif date.month == self.start.month:\n if date.month == self.end.month:\n return date.day >= self.start.day and date.day <= self.end.day\n else:\n return date.day >= self.start.day\n elif date.month == self.end.month:\n return date.day <= self.end.day\n else:\n return False\n\n def get_total_time(self):\n # Assume 30 day months\n num_months = self.end.month - self.start.month\n num_days = self.end.day - self.start.day\n return float((num_months*30 + num_days) * SECS_PER_DAY)\n\ndef get_total_time(series):\n return series[-1][2] - series[0][2]\n\ndef should_add(series, rdr=None, inclusion_threshold=0.5):\n if len(series) == 0:\n return False\n series_time = get_total_time(series)\n if rdr:\n range_time = rdr.get_total_time()\n else:\n range_time = SECS_PER_YEAR\n\n if (series_time / range_time) >= inclusion_threshold:\n return True\n\n return False\n\ndef split_time_series(indiv_id, time_series, relative_date_range=None, inclusion_threshold=0.5):\n '''given an id and a time series, splits the time series according to the\n relative date range and returns a list of the splits. Uses some extrapolation if need be'''\n if relative_date_range:\n split = []\n curr_series = []\n for pt in time_series:\n curr_date = datetime.datetime.fromtimestamp(pt[2])\n if relative_date_range.contains(curr_date):\n curr_series.append(pt)\n elif len(curr_series) != 0:\n # decide if curr_series contains enough points to be added\n if should_add(curr_series, relative_date_range, inclusion_threshold):\n split.append(TimeSeries(indiv_id, curr_series))\n else:\n print('Rejecting series for individual %s' % indiv_id)\n curr_series = []\n if should_add(curr_series, relative_date_range, inclusion_threshold):\n split.append(TimeSeries(indiv_id, curr_series))\n else:\n print('Rejecting series for individual %s' % indiv_id)\n return split\n else:\n # split according to years\n year_to_series = {}\n for pt in time_series:\n curr_date = datetime.datetime.fromtimestamp(pt[2])\n if curr_date.year not in year_to_series:\n year_to_series[curr_date.year] = []\n year_to_series[curr_date.year].append(pt)\n split = []\n for year, series in year_to_series.iteritems():\n if should_add(series, inclusion_threshold=inclusion_threshold):\n split.append(TimeSeries(indiv_id, series))\n else:\n print('Rejecting series for individual %s' % indiv_id)\n return split\n\ndef get_time_series(data, relative_date_range=None, inclusion_threshold=0.5):\n '''takes an mapping of individual id to the time series for the individual and\n returns a list of TimeSeries objects constructed according to the relative date range'''\n tsos = [] # time series objects\n for indiv_id, time_series in data.iteritems():\n tsos += split_time_series(indiv_id, time_series, relative_date_range, inclusion_threshold)\n return tsos\n\n################################################################################\n# Old preprocessing\n################################################################################\n\n''' Struture\n{\n year: {\n individual_id: [data points],\n ...\n },\n ...\n}\n'''\n\ndef read_data(fname):\n data_by_year = {}\n data_by_individual = {}\n with open(fname, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n # get the timestamp and individual-local-identifier columns\n col_titles = reader.next()\n timestamp_col = get_col_index(col_titles, 'timestamp')\n if timestamp_col == -1:\n print('Could not find column `timestamp`')\n sys.exit(1)\n ili_col = get_col_index(col_titles, 'individual-local-identifier')\n if ili_col == -1:\n print('Could not find column `individual-local-identifier`')\n sys.exit(1)\n lat_col = get_col_index(col_titles, 'location-lat')\n lon_col = get_col_index(col_titles, 'location-long')\n # iterate row by row and create the data object\n for row in reader:\n # get the year\n timestamp = row[timestamp_col]\n try:\n year = int(timestamp.split('-')[0])\n except ValueError:\n print('Unable to cast year to an integer')\n sys.exit(1)\n\n # get the individual\n individual = row[ili_col]\n\n if year not in data_by_year:\n data_by_year[year] = {}\n\n if individual not in data_by_year[year]:\n data_by_year[year][individual] = []\n\n if individual not in data_by_individual:\n data_by_individual[individual] = {}\n\n if year not in data_by_individual:\n data_by_individual[individual][year] = []\n\n # get the data\n try:\n lat = float(row[lat_col])\n lon = float(row[lon_col])\n # convert the timestamp string into a utc timestamp\n time = get_unix_ts(timestamp)\n\n feature_vec = [lon, lat, time]\n\n data_by_year[year][individual].append(feature_vec)\n data_by_individual[individual][year].append(feature_vec)\n except ValueError:\n print('cannot cast %s to float' % row[lat_col])\n\n return data_by_year, data_by_individual\n\n# Note: this is a pretty inefficient data structure - stores the same data twice\nclass Data(object):\n '''A wrapper class for data'''\n\n def __init__(self, data_by_year, data_by_individual):\n self.data_by_year = data_by_year\n self.data_by_individual = data_by_individual\n\n def get_data(self):\n return self.get_data_by_year, self.get_data_by_individual\n\n def get_year(year):\n '''returns the data for all the individuals for a single year'''\n if year not in self.data:\n raise Exception('No such year')\n return self.data[year]\n\n def get_data_by_year(self):\n '''returns an iterable over all the years according to the following format\n [(year, [(individual_id, data) for each individual]) for each year]'''\n retval = []\n for year, individuals in self.data_by_year.iteritems():\n l_indivs = []\n for individual_id, data in individuals.iteritems():\n l_indivs.append((individual_id, np.array(data)))\n retval.append((year, l_indivs))\n retval.sort()\n return retval\n\n def get_data_by_individual(self):\n '''returns data for individuals across years according to the following format\n [(individual_id, [(year, data) for each year]) for each individual]'''\n retval = []\n for individual, years in self.data_by_individual.iteritems():\n l_years = []\n for year, data in years.iteritems():\n l_years.append((year, np.array(data)))\n l_years.sort()\n retval.append((individual, l_years))\n return retval\n\n################################################################################\n# Pickling\n################################################################################\n\ndef load_pickle(name):\n with open(name, 'rb') as f:\n p = pickle.load(f)\n return p\n\ndef save_pickle(name, content):\n if '.pkl' not in name:\n name = 'preprocessed_data/' + name + '.pkl'\n else:\n name = 'preprocessed_data/' + name\n print('saving data as %s' % name)\n with open(name, 'wb') as f:\n pickle.dump(content, f)\n\ndef preprocess(source_name, dest_name):\n '''creates a Data object from csv'''\n # get the data\n data_by_year, data_by_individual = read_data(source_name)\n # wrap the data in an object\n data = Data(data_by_year, data_by_individual)\n # save it to disk\n save_pickle(dest_name, data)\n\ndef load(fname):\n '''returns a pre-pickled Data object'''\n return load_pickle(fname)\n\n## End\n","sub_path":"project_code/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":13477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"212963557","text":"class Bakesale(object):\n\n def __init__(self, bakesale_id, teacher_id, group_name, group_size, items_desc, date, teacher_name, description, requested_day):\n self.bakesale_id = bakesale_id\n self.teacher_id = teacher_id\n self.group_name = group_name\n self.group_size = group_size\n self.items_desc = items_desc\n self.date = date\n self.teacher_name = teacher_name\n self.description = description\n self.requested_day = requested_day\n\nclass Request(object):\n\n def _init_(self, group_name, group_size, items_desc, requested_day):\n self.group_name = group_name\n self.group_size = group_size\n self.items_desc = items_desc\n self.requested_day = requested_day\n\n","sub_path":"app/bakesale/teacher/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"455022848","text":"# coding=utf-8\n\"\"\"The root of the ``string_matcher`` namespace.\"\"\"\n\n\ndef kmp_matcher(text, pattern):\n \"\"\"Find the positions at which ``pattern`` is present in ``text``.\n\n Return an iterable of zero-based indices indicating where ``pattern``\n occurs in ``text. Implemented with the Knuth-Morris-Pratt algorithm.\n \"\"\"\n indices = []\n prefix_func = compute_prefix_function(pattern)\n chars_matched = 0\n for i, char in enumerate(text):\n while chars_matched > 0 and pattern[chars_matched] != char:\n chars_matched = prefix_func[chars_matched - 1] + 1\n if pattern[chars_matched] == char:\n chars_matched += 1\n if chars_matched == len(pattern):\n indices.append(i - len(pattern) + 1)\n chars_matched = prefix_func[chars_matched - 1] + 1\n return indices\n\n\ndef compute_prefix_function(pattern):\n \"\"\"Compute the prefix function used by :func:`kmp_matcher`.\"\"\"\n prefix_func = [-1 for _ in range(len(pattern))]\n k = -1\n # `i` is used to walk through each character in `pattern` and\n # `prefix_func`. For example, given 'ababaca', i ranges from 1 to 6.\n # prefix_func[0] is always given a value of -1, and prefix_func[1..6] are\n # computed.\n #\n # `k` determines the value placed in each element of prefix_func.\n for i in range(1, len(pattern)):\n while k >= 0 and pattern[k + 1] != pattern[i]:\n k = prefix_func[k]\n if pattern[k + 1] == pattern[i]:\n k += 1\n prefix_func[i] = k\n return prefix_func\n","sub_path":"python/string-matcher/string_matcher/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"293466062","text":"import pika\n\n#Estabele conexão com o servidor RabbitMQ\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\nchannel = connection.channel()\n\n#Criação da fila que recebe a mensagem\nchannel.queue_declare(queue='ola')\n\n#Função que imprime na tela a mensagem\ndef callback(ch, method, properties, body):\n print(\" [x] Recebido %r\" % body)\n\n#Informa o RabbitMQ que essa função deve receber mensagens só da fila ola\nchannel.basic_consume(queue='ola', on_message_callback=callback, auto_ack=True)\n\n#Loop que executa os retornos sempre que necessário\nprint(' [*] Aguardando mensagens. Para sair, pressione CTRL + C')\nchannel.start_consuming()","sub_path":"enviarReceber/recebe.py","file_name":"recebe.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"548656537","text":"from cosmoslik import *\nfrom numpy import interp, identity, exp, inf, arange, hstack, loadtxt, zeros, ones, log, invert\nimport sys\nimport math \nparam = param_shortcut('start','scale')\n\n\n\nimport os.path as osp\n \nparam = param_shortcut('start','scale')\n \nclass main(SlikPlugin):\n\n def __init__(self):\n super(SlikPlugin,self).__init__()\n\n self.cosmo = get_plugin('models.cosmology')(\n logA = 3.0828303593955471,\n ns = 0.95995802437054512,\n k_c = param(-9, scale = 1, range = (-13,-7)),\n alpha_exp = param(0.8, scale = 0.1, range=(0.4,0.9)),\n ombh2 = 0.022062077811123734,\n omch2 = 0.11974442183029269,\n tau = 0.086558937968886043,\n theta = 0.010415977212164643,\n #omnuh2 = 0,\t#0.000645,\n #massive_neutrinos=0,#param( 3, .2),\n massless_neutrinos=3.046, #param(3,.2)\n l_max_scalar=3000,\n l_max_tensor=3000,\n pivot_scalar=0.05,\n w=-1.0,\n r=None,\n nrun=None,\n omk=0,\n Yp=None,\n Tcmb=2.7255,\n #P_k_ini type = analytic_Pk,\n lensing = 'yes'#,\n #P_k_max_hinvMpc = 1.\n )\n\n\t#print 'setting phase template'\n #self.phase_template = SlikDict()\n #self.phase_template.alpha = 5.5\n #self.phase_template.A = 8.43\n #self.phase_template.B = 6.5e-4\n #if 'neff' in model: self.cosmo.massless_neutrinos = param(3,.2)\n #if 'yp' in model: self.cosmo.Yp = param(.24,0.1)\n #if 'mnu' in model: self.cosmo.omnuh2 = param(0,0.001,range=(0,1))\n\n\t#print 'loading likelihoods'\n self.camspec = get_plugin('likelihoods.clik')(\n clik_file='/software/mint15/cosmomc/likelihoods/clik_0313/data/CAMspec_v6.2TN_2013_02_26_dist.clik',\n A_ps_100=155.74974564586893,\n A_ps_143=42.990837159987066,\n A_ps_217=97.695142281342967,\n A_cib_143=9.0610630022180896,\n A_cib_217=35.756240161532823,\n A_sz=7.6487534282036265,\n r_ps=0.84792479189221115,\n r_cib=0.48718759077123552,\n n_Dl_cib=0.61145370143786038,\n cal_100=1.0005742128940385,\n cal_217=0.99635794254317223,\n xi_sz_cib=-0.11312275434269781,\n A_ksz=2.4495876748338397,\n Bm_1_1=0.48688296219336374\n )\n\n self.lowl = get_plugin('likelihoods.clik')(\n clik_file='/software/mint15/cosmomc/likelihoods/clik_0313/data/commander_v4.1_lm49.clik'\n )\n\n # self.s12 = get_plugin('likelihoods.spt_lowl')(\n # which='s12',\n # cal = param(1,0.02),\n # fgs = get_plugin('models.clust_poisson_egfs')\n # (\n # Aps = param(10,10,min=0),\n # Acib = param(10,10,min=0),\n # ncib = 0.8\n # )\n # )\n \t#print 'loading cosmology'\n\n self.get_cmb = get_plugin('models.classy')()\n\n\t#print 'loading derivers'\n self.bbn = get_plugin('models.bbn_consistency')()\n self.hubble_theta = get_plugin('models.hubble_theta')()\n self.priors = get_plugin('likelihoods.priors')(self)\n\n\t#print 'loading sampler'\n self.sampler = get_plugin('samplers.metropolis_hastings')(\n self,\n num_samples=1000000,\n output_file='chains/andyAAfitv6.chain',\n proposal_cov='../data/proposal.covmat',\n proposal_scale=1,\n #print_level=0,\n output_extra_params=['cosmo.Yp','cosmo.H0','cosmo.kcactual','cosmo.alphaactual','cl_TT2','cl_TT3','cl_TT4','cl_TT5','cl_TT6','cl_TT7','cl_TT8','cl_TT20','cl_TT40','cl_TT80','cl_TT120','cl_TT200','cl_TT500','cl_TT1000','cl_TT2000','cl_TT2999']\n\t)\n\n\n \n def __call__(self):\n self.cosmo.As = exp(self.cosmo.logA)*1e-10\n self.cosmo.Yp = self.bbn(**self.cosmo)\n self.cosmo.H0 = self.hubble_theta.theta_to_hubble(**self.cosmo)\n self.cosmo.kcactual = exp(self.cosmo.k_c)\n self.cosmo.alphaactual = 1./(1.-self.cosmo.alpha_exp)-1.\n #self.cosmo.neff_phase = self.amp_to_neff()\n #self.cosmo.leq = 125\n\t #print 'getting cmb'\n self.cmb_result = self.get_cmb(force = True, outputs=['cl_TT'],**self.cosmo)\n self.cl_TT = self.cmb_result['cl_TT']\n self.cl_TT2 = self.cl_TT[2]\n self.cl_TT3 = self.cl_TT[3]\n self.cl_TT4 = self.cl_TT[4]\n self.cl_TT5 = self.cl_TT[5]\n self.cl_TT6 = self.cl_TT[6]\n self.cl_TT7 = self.cl_TT[7]\n self.cl_TT8 = self.cl_TT[8]\n self.cl_TT20 = self.cl_TT[20]\n self.cl_TT40 = self.cl_TT[40]\n self.cl_TT80 = self.cl_TT[80]\n self.cl_TT120 = self.cl_TT[120]\n self.cl_TT200 = self.cl_TT[200]\n self.cl_TT500 = self.cl_TT[500]\n self.cl_TT1000 = self.cl_TT[1000]\n self.cl_TT2000 = self.cl_TT[2000]\n self.cl_TT2999 = self.cl_TT[2999]\n \n return lsum(lambda: self.priors(self),\n lambda: self.camspec(self.cmb_result),\n lambda: self.lowl(self.cmb_result))\n\n\nif __name__=='__main__':\n #run the chain\n for _ in Slik(main()).sample(): pass\n","sub_path":"cosmoslik/AAfitv6slice.py","file_name":"AAfitv6slice.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"617214535","text":"from utility import gpib\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\n\n\ndef daqMeasure(df):\n daq=gpib.AgilentDAQ(10,110,111,103,101)\n daq.ReadVin()\n daq.ReadIin()\n daq.ReadVout()\n daq.ReadRemoteVout()\n pdtemp={}\n pdtemp['Vin']=daq.vinValue\n pdtemp['Iin']=daq.iinValue\n pdtemp['Vout']=daq.voutValue\n pdtemp['VoutRemote']=daq.voutRemoteValue\n df=df.append(pdtemp,ignore_index=True)\n return df \n\ndef measureEfficiency(ComboEload,Ioutmax,Ioutstep,delaytime,onlivecurve=1): \n #ComboEload.loadoff()\n \n df=pd.DataFrame()\n i=0\n ax=[]\n ay=[]\n ay1=[] \n \n ComboEload.getCurrentCapability() \n for current in range(0,Ioutmax+1,Ioutstep):\n ComboEload.loadoff()\n ComboEload.setcurrent2device(current)\n ComboEload.loadon() \n df=daqMeasure(df)\n time.sleep(delaytime)\n ComboEload.measureCurrent()\n Iout=ComboEload.meascurrentvalue\n \n df.loc[i,\"Iout\"]=Iout\n df.loc[i,\"Effi\"]=(df.loc[i,\"Vout\"]*df.loc[i,\"Iout\"])/(df.loc[i,\"Vin\"]*df.loc[i,\"Iin\"])\n df.loc[i,\"EffiRemote\"]=(df.loc[i,\"VoutRemote\"]*df.loc[i,\"Iout\"])/(df.loc[i,\"Vin\"]*df.loc[i,\"Iin\"])\n\n ComboEload.loadoff() \n time.sleep(delaytime)\n \n #plt cruve\n print(df)\n if onlivecurve==1:\n ax.append(df.loc[i,'Iout'])\n ay.append(df.loc[i,'Effi'])\n ay1.append(df.loc[i,'EffiRemote']) \n \n plt.clf()\n plt.ion()\n plt.plot(ax,ay)\n plt.plot(ax,ay1)\n plt.pause(0.1)\n plt.ioff()\n else:\n None\n i+=1\n ComboEload.loadoff() # off all eload\n return df \n \n \n \n\nif __name__ ==\"__main__\":\n#\n load1=gpib.ChromaEload(7,1,80,0)\n load3=gpib.ChromaEload(7,3,80,0)\n load7=gpib.ChromaEload(7,5,80,0)\n #load63=gpib.ChromaEload(9,1,220,0)\n cobload=gpib.ComboEload()\n cobload.addEload(load1)\n cobload.addEload(load3)\n cobload.addEload(load7)\n #cobload.addEload(load63)\n df=measureEfficiency(cobload,230,23,5,0)\n \n","sub_path":"utility/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"251029625","text":"import cv2\nimport numpy as np\n\nboundaries = [\n\t([17, 15, 100], [50, 56, 200]),\n\t([86, 31, 4], [220, 88, 50]),\n\t([25, 146, 190], [62, 174, 250]),\n\t([103, 86, 65], [145, 133, 128])\n]\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n if cap.grab():\n ret, frame = cap.retrieve()\n if not ret:\n continue\n else:\n \n #gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n #laplacian = cv2.Laplacian(frame,cv2.CV_64F) #need gray\n #edges = cv2.Canny(frame,100,200)\n \n img_hsv=cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)\n\n\n cv2.imshow('video', img_hsv)\n if cv2.waitKey(10) == 27:\n break\n","sub_path":"cv_cam.py","file_name":"cv_cam.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"436726390","text":"import socketserver\n\nclass OpenComputersRemoteShell(socketserver.StreamRequestHandler):\n def shell_input(self, op, args):\n working_dir = args[0]\n cmd = bytes(\n input(working_dir + \" $ \"),\n \"ascii\")\n return cmd\n\n\n def shell_out(self, op, args):\n out_lines = []\n while True:\n line = self.rfile.readline().decode('ascii').rstrip()\n if line.strip() == '%SHOUTEND':\n break\n out_lines.append(line)\n print('\\n'.join(out_lines).strip())\n return b'%OK'\n\n\n def command(self, data):\n op, *args = data\n ops = {\n 'READY': self.shell_input,\n 'SHOUT': self.shell_out,\n }\n \n try:\n return ops[op](op, args)\n except Exception as err:\n print(\"OP ERROR:\", err)\n return b\"\"\n\n\n def handle(self):\n message = self.rfile.readline().strip().decode('ascii')\n\n if message[:1] == '%':\n response = self.command(message[1:].split())\n else: \n print(message)\n response = b'%OK'\n \n self.wfile.write(response)\n\nif __name__ == \"__main__\":\n HOST, PORT = \"localhost\", 9987\n server = socketserver.TCPServer((HOST, PORT), OpenComputersRemoteShell)\n server.serve_forever()\n\n\n\n## ocsc-client 0.tcp.ngrok.io:19705\n","sub_path":"mctcpsrv.py","file_name":"mctcpsrv.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"352159251","text":"#!/usr/bin/python3\n\n\n\n\n# Imports\nimport os\nimport sys\nimport argparse\nimport logging\n\n\n\n\n# Local imports\n# (Can't use relative imports because this is a top-level script)\nimport edgecase_article\n\n\n\n\n# Shortcuts\nisfile = os.path.isfile\nisdir = os.path.isdir\ndatajack = edgecase_article.submodules.datajack\nstateless_gpg = edgecase_article.submodules.stateless_gpg\ngpg = stateless_gpg.gpg\nutil = edgecase_article.util\n\n\n\n\n# Notes:\n# - Using keyword function arguments, each of which is on its own line,\n# makes Python code easier to maintain. Arguments can be changed and\n# rearranged much more easily.\n# - I use \"validate\" to mean \"check that this data is in the expected format\".\n# - I use \"verify\" to mean that \"check that a mathematical operation produces the expected result\". Example: Check a digital signature.\n# - An article and the corresponding signed article have the same filenames. To distinguish between them, a newly created signed article has the additional extension '.signed'. It will fail filename verification but will pass all other checks. This extra extension means that an article and its signed equivalent can exist in the same directory.\n# - If the --verifyAssets option is used, this tool will look for a directory named \"assets\" in the article directory (in addition to looking for a directory with the same name as the article (minus the extension)).\n\n\n\n\n# Set up logger for this module. By default, it produces no output.\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\nlogger.setLevel(logging.ERROR)\nlog = logger.info\ndeb = logger.debug\n\n\n\n\ndef setup(\n log_level = 'error',\n debug = False,\n log_timestamp = False,\n log_filepath = None,\n ):\n logger_name = 'cli'\n # Configure logger for this module.\n edgecase_article.util.module_logger.configure_module_logger(\n logger = logger,\n logger_name = logger_name,\n log_level = log_level,\n debug = debug,\n log_timestamp = log_timestamp,\n log_filepath = log_filepath,\n )\n deb('Setup complete.')\n # Configure logging levels for edgecase_article package.\n # By default, without setup, it logs at ERROR level.\n # Optionally, the package could be configured here to use a different log level, by e.g. passing in 'error' instead of log_level.\n edgecase_article.setup(\n log_level = log_level,\n debug = debug,\n log_timestamp = log_timestamp,\n log_filepath = log_filepath,\n )\n\n\n\n\ndef main():\n\n # Note: We use camelCase for option names because it's faster to type.\n\n parser = argparse.ArgumentParser(\n description='Command-Line Interface (CLI) for using the edgecase_article package.'\n )\n\n parser.add_argument(\n '-t', '--task',\n help=\"Task to perform (default: '%(default)s').\",\n default='hello',\n )\n\n parser.add_argument(\n '-a', '--articleType',\n help=\"Type of article (default: '%(default)s').\",\n default='unspecified',\n )\n\n parser.add_argument(\n '-p', '--articlePath',\n help=\"Path to article file (default: '%(default)s').\",\n default='new_articles/new_article.txt',\n )\n\n # Technically, this should be \"validateFileName\", but it seems more user-friendly to always use \"verify\" in the options.\n parser.add_argument(\n '-n', '--verifyFileName',\n action='store_true',\n help=\"Checks that the article's filename is in the proper format.\",\n )\n\n parser.add_argument(\n '-v', '--verifySignature',\n action='store_true',\n help=\"Checks that the article's signature(s) are valid.\",\n )\n\n parser.add_argument(\n '-c', '--verifyContent',\n action='store_true',\n help=\"Validates the content element within an article.\",\n )\n\n parser.add_argument(\n '-e', '--verifyAssets',\n action='store_true',\n help=\"Validates the content element within an article.\",\n )\n\n parser.add_argument(\n '--assetDir',\n help=\"Path to directory containing assets of this article (default: '%(default)s'). If not supplied, the article path (minus the .txt extension) is used.\",\n default=None,\n )\n\n parser.add_argument(\n '--publicKeyDir',\n help=\"Path to directory containing public keys (default: '%(default)s').\",\n default=None,\n )\n\n parser.add_argument(\n '--privateKeyDir',\n help=\"Path to directory containing private keys (default: '%(default)s').\",\n default=None,\n )\n\n parser.add_argument(\n '-o', '--outputDir',\n help=\"Specify an output directory. (default: '%(default)s').\",\n default=None,\n )\n\n parser.add_argument(\n '-l', '--logLevel', type=str,\n choices=['debug', 'info', 'warning', 'error'],\n help=\"Choose logging level (default: '%(default)s').\",\n default='error',\n )\n\n parser.add_argument(\n '-d', '--debug',\n action='store_true',\n help=\"Sets logLevel to 'debug'. This overrides --logLevel.\",\n )\n\n parser.add_argument(\n '-s', '--logTimestamp',\n action='store_true',\n help=\"Choose whether to prepend a timestamp to each log line.\",\n )\n\n parser.add_argument(\n '-x', '--logToFile',\n action='store_true',\n help=\"Choose whether to save log output to a file.\",\n )\n\n parser.add_argument(\n '-z', '--logFilepath',\n help=\"The path to the file that log output will be written to.\",\n default='log_edgecase_article.txt',\n )\n\n a = parser.parse_args()\n\n log_filepath = a.logFilepath if a.logToFile else None\n\n # Check and analyse arguments\n if not isfile(a.articlePath):\n msg = \"File not found at articlePath {}\".format(repr(a.articlePath))\n raise FileNotFoundError(msg)\n if a.verifySignature:\n if not a.publicKeyDir:\n msg = \"To use verifySignature, need to specify a publicKeyDir.\"\n raise ValueError(msg)\n if not isdir(a.publicKeyDir):\n msg = \"Directory not found at publicKeyDir {}\".format(repr(a.publicKeyDir))\n raise FileNotFoundError(msg)\n if a.task == 'sign':\n if not a.publicKeyDir:\n msg = \"To use the 'sign' task, need to specify a publicKeyDir.\"\n raise ValueError(msg)\n if not isdir(a.publicKeyDir):\n msg = \"Directory not found at publicKeyDir {}\".format(repr(a.publicKeyDir))\n if not a.privateKeyDir:\n msg = \"To use the 'sign' task, need to specify a privateKeyDir.\"\n raise ValueError(msg)\n if not isdir(a.privateKeyDir):\n msg = \"Directory not found at privateKeyDir {}\".format(repr(a.privateKeyDir))\n raise FileNotFoundError(msg)\n if a.verifyAssets:\n if not util.misc.shell_tool_exists('shasum'):\n msg = \"Could not find shell tool 'shasum' on system.\"\n raise ValueError(msg)\n\n # Setup\n setup(\n log_level = a.logLevel,\n debug = a.debug,\n log_timestamp = a.logTimestamp,\n log_filepath = log_filepath,\n )\n\n # Run top-level function (i.e. the appropriate task).\n tasks = \"\"\"\nhello hello2 hello3 hello4 hello5\nverify sign\n\"\"\".split()\n if a.task not in tasks:\n print(\"Unrecognised task: {}\".format(a.task))\n stop()\n globals()[a.task](a) # run task.\n\n\n\n\ndef hello(a):\n # Confirm:\n # - that we can run a simple task.\n # - that this tool has working logging.\n log('Log statement at INFO level')\n deb('Log statement at DEBUG level')\n print('hello world')\n\n\n\n\ndef hello2(a):\n # Confirm:\n # - that we can run a simple task from within the package.\n # - that the package has working logging.\n edgecase_article.code.hello.hello()\n\n\n\n\ndef hello3(a):\n # Confirm:\n # - that we can run a simple package task that loads a resource file.\n edgecase_article.code.hello.hello_resource()\n\n\n\n\ndef hello4(a):\n # Confirm:\n # - that the datajack submodule can be accessed.\n e = datajack.Element()\n value = e.hello()\n print(value)\n\n\n\n\ndef hello5(a):\n # Confirm:\n # - that we can use the stateless_gpg submodule\n data = \"hello world\\n\"\n log(\"data = \" + data.strip())\n data_dir = 'edgecase_article/submodules/stateless_gpg/stateless_gpg/data'\n private_key_file = data_dir + '/test_key_1_private_key.txt'\n private_key = open(private_key_file).read()\n signature = gpg.make_signature(private_key, data)\n public_key_file = data_dir + '/test_key_1_public_key.txt'\n public_key = open(public_key_file).read()\n result = gpg.verify_signature(public_key, data, signature)\n log(\"result = \" + str(result))\n if not result:\n raise Exception(\"Failed to create and verify signature.\")\n print(\"Signature created and verified.\")\n\n\n\n\ndef verify(a):\n edgecase_article.code.verify.verify(\n article_path = a.articlePath,\n article_type = a.articleType,\n verify_file_name = a.verifyFileName,\n verify_signature = a.verifySignature,\n verify_content = a.verifyContent,\n public_key_dir = a.publicKeyDir,\n verify_assets = a.verifyAssets,\n asset_dir = a.assetDir,\n )\n\n\n\n\ndef sign(a):\n signed_article = edgecase_article.code.sign.sign(\n article_path = a.articlePath,\n public_key_dir = a.publicKeyDir,\n private_key_dir = a.privateKeyDir,\n )\n # By default, write the output file to the same directory as the original article.\n output_file = signed_article.file_path + '.signed'\n output_dir = a.outputDir\n if output_dir:\n # If an output directory was specified, write the file there instead.\n if not isdir(output_dir):\n msg = \"Output dir ({}) not found.\".format(output_dir)\n raise FileNotFoundError(msg)\n output_file_name = signed_article.file_name + '.signed'\n output_file = os.path.join(output_dir, output_file_name)\n if isfile(output_file):\n msg = \"Error: Output file ({}) already exists.\".format(output_file)\n stop(msg)\n with open(output_file, 'w') as f:\n f.write(signed_article.data + '\\n')\n msg = \"Signed article written to {}\".format(output_file)\n log(msg)\n\n\n\n\ndef stop(msg=None):\n if msg is not None:\n print(msg)\n import sys\n sys.exit()\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":9651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"634345964","text":"##Pattern Recognition: Implementation of the \"forward part\" of forward/backward\n##algorithm. Reports observation probability of input sequence. \n\nimport sys\n\n# -------------------------------\n# HMM CLASS\n# -------------------------------\nclass HMM:\n def __init__(self):\n pass\n\n # Reads HMM values from a given file\n def read_hmm_from_file(self, file_name):\n fmm_fd = open(file_name, \"r\")\n # N - number of states\n # M - number of observation symbols\n # T - number of time steps or length of oberservation sequences\n self.N, self.M, self.T = [int(i) for i in fmm_fd.readline().rstrip().split(\" \")]\n\n # Read state from the 2nd line\n self.states = fmm_fd.readline().rstrip().split(\" \")\n # Read vocab from the 3rd line\n self.vocab = fmm_fd.readline().rstrip().split(\" \")\n\n self.a = []\n B_by_lines = []\n\n if (fmm_fd.readline().rstrip() == \"a:\"):\n for i in range(0, self.N):\n self.a.append([float(i) for i in fmm_fd.readline().rstrip().split(\" \")])\n if (fmm_fd.readline().rstrip() == \"b:\"): \n for i in range(0, self.N):\n B_by_lines.append([float(i) for i in fmm_fd.readline().rstrip().split(\" \")])\n\n self.b = {}\n for i in range(0, self.M):\n tmp_arr = []\n for j in range(0, self.N):\n tmp_arr.append(B_by_lines[j][i])\n self.b[self.vocab[i]] = tmp_arr \n\n if (fmm_fd.readline().rstrip() == \"pi:\"): \n self.pi = [float(i) for i in fmm_fd.readline().rstrip().split(\" \")]\n \n\n\ndef forward(hmm, T, obs):\n a = hmm.a\n b = hmm.b\n pi = hmm.pi\n N = hmm.N\n M = hmm.M\n\n #(1) : Initialization\n \n alpha = [[0 for item2 in range(N)] for item in range(T)]\n for state_num in range(N):\n alpha[0][state_num] = pi[state_num] * b[obs[0]][state_num]\n \n #(2): Induction\n for i in range(1,T):\n for current_state in range(N):\n alpha[i][current_state] = sum(alpha[i-1][prev_state]*a[prev_state][current_state] for prev_state in range(N))* b[obs[i]][current_state]\n \n #(3): Termination\n prob = sum(alpha[T-1][states] for states in range(N))\n \n return prob\n \n# -------------------------------\n# MAIN PROGRAM\n# -------------------------------\n\nif __name__ == '__main__':\n\n # Get names of files\n hmm_fname = sys.argv[1] # Original HMM\n obser_fname = sys.argv[2] # Observations\n #optimized_fname = sys.argv[3] # Output HMM\n\n # Initialize new HMM and read it from file\n hmm = HMM()\n hmm.read_hmm_from_file(hmm_fname)\n\n # Reading observation file\n obser_fd = open(obser_fname, \"r\")\n num_of_obser_sets = int(obser_fd.readline().rstrip())\n observations = []\n\n for i in range(0, num_of_obser_sets):\n observation = {}\n observation[\"T\"] = int(obser_fd.readline().rstrip())\n observation[\"seq\"] = obser_fd.readline().rstrip().split(\" \")\n observations.append(observation)\n \n #implement forward algorithm for each observation sequence in the obs file\n for observation in observations:\n prob = forward(hmm, observation[\"T\"],observation[\"seq\"])\n print(prob)\n\n \n","sub_path":"HMM/recognize.py","file_name":"recognize.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"121843772","text":"\"\"\"\nA test of the AverageWeightings kernel used for the Averager.\n\"\"\"\n\nfrom firedrake import (IntervalMesh, Function, RectangleMesh,\n VectorFunctionSpace)\n\nfrom gusto import kernels\nimport pytest\n\n\n@pytest.fixture\ndef mesh(geometry):\n\n L = 3.0\n n = 3\n\n if geometry == \"1D\":\n m = IntervalMesh(n, L)\n elif geometry == \"2D\":\n m = RectangleMesh(n, n, L, L, quadrilateral=True)\n\n return m\n\n\ndef setup_values(geometry, true_values):\n\n # The true values can be determined by the number of elements\n # that the DoF is shared between.\n\n if geometry == \"1D\":\n # The numbering of DoFs for CG1 in this mesh is\n # | | CG1 | |\n # 0------1------2-------3\n\n edge_indices = [0, 3]\n internal_indices = [1, 2]\n\n for index in edge_indices:\n true_values.dat.data[index] = 1.0\n for index in internal_indices:\n true_values.dat.data[index] = 2.0\n\n elif geometry == \"2D\":\n # The numbering of DoFs for DG1 and CG1 near the origin in this mesh is\n # | CG1 |\n # 11-----12-----14-----15\n # | | | |\n # | | | |\n # | | | |\n # 6------7------10-----13\n # | | | |\n # | | | |\n # | | | |\n # 1------2------4------8\n # | | | |\n # | | | |\n # | | | |\n # 0------3------5------9\n\n # List indices for corners\n corner_indices = [0, 9, 11, 15]\n edge_indices = [1, 3, 5, 6, 8, 12, 13, 14]\n internal_indices = [2, 4, 7, 10]\n\n for index in corner_indices:\n true_values.dat.data[index] = [1.0, 1.0]\n for index in edge_indices:\n true_values.dat.data[index] = [2.0, 2.0]\n for index in internal_indices:\n true_values.dat.data[index] = [4.0, 4.0]\n\n return true_values\n\n\n@pytest.mark.parametrize(\"geometry\", [\"1D\", \"2D\"])\ndef test_average(geometry, mesh):\n\n vec_CG1 = VectorFunctionSpace(mesh, \"CG\", 1)\n\n # We will fill DG_field with values, and average them to CG_field\n weights = Function(vec_CG1)\n true_values = Function(vec_CG1)\n\n true_values = setup_values(geometry, true_values)\n\n kernel = kernels.AverageWeightings(vec_CG1)\n kernel.apply(weights)\n\n tolerance = 1e-12\n if geometry == \"1D\":\n for i, (weight, true) in enumerate(zip(weights.dat.data[:], true_values.dat.data[:])):\n assert abs(weight - true) < tolerance, \"Weight not correct at position %i\" % i\n elif geometry == \"2D\":\n for i, (weight, true) in enumerate(zip(weights.dat.data[:], true_values.dat.data[:])):\n for weight_j, true_j in zip(weight, true):\n assert abs(weight_j - true_j) < tolerance, \"Weight not correct at position %i\" % i\n","sub_path":"tests/kernel_tests/test_weighting_kernel.py","file_name":"test_weighting_kernel.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"430769744","text":"#Create a function that takes a string as a parameter and returns a list.\n\n#The function should remove all vowels and sort the consonants in alphabetic order, and the return the result.\n\n#--------------------------\nStr1 = 'hej med dig'\ndef sortcons(s):\n for i in ['a','e','i','o','u','y', ' ']:\n s = s.lower().replace(i,'')\n \n return sorted(s)\n\n\nprint(sortcons(\"hello world\"))\n#--------------------------\n\n#Create a list of strings with names in it. (l = [‘Claus’, ‘Ib’, ‘Per’])\n\nl = ['Claus', 'Ib', 'Per']\nprint(l)\n\n#Sort this list by using the sorted() build in function.\nprint(sorted(l))\n\n\n#Sort the list in reversed order.\n\nprint(sorted(l, reverse=True))\n\n#Sort the list on the lenght of the name.\n\nprint(sorted(l, key=len))\n\n#Sort the list based on the last letter in the name.\n\n\ndef getLastLetter(l):\n a = [x[:-1]for x in l]\n return a \n \nprint(getLastLetter(l))\n\ndef reverse_list(s):\n return s[::-1]\nprint(sorted(l,key=reverse_list))\n\n#print(sorted(l, reverse=True))\n\n#Sort the list with the names where the letter ‘a’ is in the name first.\n\n# sort a list\n\ndef a_in(x):\n if 'a' in x.lower():\n return True\n return False\n\nprint(sorted(l, key = a_in))","sub_path":"List and tuples/1.ListAndTuple.py","file_name":"1.ListAndTuple.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"63535705","text":"\"\"\"\nstupida GUI che rende una parola maiuscola\n\"\"\"\n\nfrom kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.filechooser import FileChooserListView\nfrom kivy.uix.listview import ListView\nfrom kivy.config import Config\n\n#Config.set( 'graphics', 'width', '250' )\n#Config.set( 'graphics', 'height', '300' )\n\npath = \"../python/poesie\"\n\nimport lex_stat\n\nclass Widg( BoxLayout ):\n\n def on_text( self, *args ):\n print( \"text entered\" )\n self.s = self.t.text\n\n def on_conv( self, *args ):\n words = []\n for f in self.f.selection:\n print( 'doing file', f )\n words += lex_stat.leggi( f )\n freq = lex_stat.freq( words )\n less = lex_stat.less( freq )\n items = []\n for w in less:\n items.append( w + ': ' + str( freq[ w ] ) )\n self.lles.item_strings = items\n most = lex_stat.most( freq )\n items = []\n for w in most:\n items.append( w + ': ' + str( freq[ w ] ) )\n self.lmos.item_strings = items\n\n def on_files( self, *args ):\n print( 'selected files' )\n self.lsel.item_strings = self.f.selection\n \n\n def __init__( self ):\n super( Widg, self ).__init__( orientation='vertical', spacing=20 )\n self.s = \"\"\n self.txt = TextInput( multiline=False, font_size=24 )\n self.bsel = Button( text='select files', font_size=24, background_color= [ 0.8, 0.2, 0.1, 1 ] )\n self.bfrq = Button( text='compute frequency', font_size=24, background_color= [ 0.2, 0.8, 0.1, 1 ] )\n self.lsel = ListView( item_strings=[ \"\" ] )\n self.lles = ListView( item_strings=[ \"\" ] )\n self.lmos = ListView( item_strings=[ \"\" ] )\n self.f = FileChooserListView( path=path, filters=[ '*.txt' ], multiselect=True )\n self.bsel.bind( on_release=self.on_files )\n self.bfrq.bind( on_release=self.on_conv )\n self.bb = BoxLayout( size_hint= ( 1, 0.2 ) )\n self.ff = BoxLayout()\n self.lm = BoxLayout( size_hint= ( 1, 0.6 ) )\n self.ff.add_widget( self.f )\n self.ff.add_widget( self.lsel )\n self.bb.add_widget( self.bsel )\n self.bb.add_widget( self.bfrq )\n self.lm.add_widget( self.lles )\n self.lm.add_widget( self.lmos )\n self.add_widget( self.lm )\n self.add_widget( self.bb )\n self.add_widget( self.ff )\n\nclass bu( App ):\n def build( self ):\n return Widg()\n\nif __name__ == '__main__':\n bu().run()\n","sub_path":"kivy/k_lex.py","file_name":"k_lex.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"231277667","text":"from __future__ import print_function\nimport os\nimport numpy as np\nfrom pathlib import Path\n\ndef find_files(directory,extension):\n print(\"Scanning for files...\")\n ext_files = []\n for folder, subs, files in os.walk(directory):\n for filename in files:\n if filename.split('.')[-1] == extension:\n filename = os.path.join(folder, filename)\n ext_files.append(filename)\n \n print(\"Found {} files\".format(len(ext_files)))\n return sorted(ext_files)\n\ndef read_obj_file(filename):\n vertices = []\n triangles = []\n quads = []\n with open(filename, 'r') as f:\n for line in f:\n splited = line.split()\n if splited and splited[0] == 'f':\n size = len(splited) - 1\n point = [] \n for i in range(size):\n point.append(int(splited[i+1].split('/')[0])-1)\n if size == 3:\n triangles.append(point)\n elif size==4:\n quads.append(point)\n elif splited and splited[0] == 'v':\n vertices.append([float(splited[1]), float(splited[3]),float(splited[2])])\n vertices = rescale_to_unit_sphere(centralize(np.array(vertices)))\n return vertices, np.array(triangles), np.array(quads)\n\ndef read_off_file(filename):\n vertices = []\n triangles = []\n quads = []\n with open(filename, 'r') as f:\n \n line = f.readline().strip()\n\n if line=='OFF':\n line = f.readline() \n else:\n line = line[3:]\n if not line.strip():\n line = f.readline() \n n_vertices, n_faces, _ = [int(x) for x in line.split()]\n for _ in range(n_vertices):\n line = f.readline()\n vertices.append([float(x) for x in line.split()])\n for _ in range(n_faces):\n line = f.readline()\n splited = line.split()\n if splited[0] == \"3\":\n triangles.append([int(x) for x in splited[1:4]])\n elif splited[0] == \"4\":\n quads.append([int(x) for x in splited[1:5]])\n vertices = rescale_to_unit_sphere(centralize(np.array(vertices)))\n\n return vertices, np.array(triangles), np.array(quads)\n \n\ndef rescale_to_unit_sphere(vertices):\n return vertices / np.max(np.linalg.norm(vertices, axis=1))\n\ndef centralize(vertices):\n return vertices - np.mean(vertices, axis=0)\n\ndef off2obj(file):\n vertices, triangles, quads = read_off_file(file)\n obj_file_name = os.path.join(os.path.split(file)[0] , Path(file).stem + \".obj\")\n with open(obj_file_name, 'w') as f:\n for xyz in vertices:\n f.write('v {:6f} {:6f} {:6f}\\n'.format(xyz[0],xyz[2],xyz[1]))\n f.write('\\n')\n for ijk in triangles:\n f.write('f %d %d %d\\n' % (ijk[0]+1, ijk[1]+1, ijk[2]+1))\n for ijkl in quads:\n f.write('f %d %d %d %d\\n' % (ijkl[0]+1, ijkl[1]+1, ijkl[2]+1, ijkl[3]+1))\n \n ","sub_path":"dockers/data_conversion/vrnens_data/mesh_files.py","file_name":"mesh_files.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"335659868","text":"\"\"\"\r\nID: kqanto1\r\nLANG: PYTHON3\r\nTASK: gift1\r\n\"\"\"\r\n\r\nnames = []\r\nid = []\r\nwith open('gift1.in', 'r') as f:\r\n n = int(f.readline())\r\n for i in range(n):\r\n names.append(f.readline().split())\r\n names[i].append(0)\r\n id.append(names[i][0])\r\n for i in range(len(id)):\r\n name = f.readline().split()\r\n money = f.readline().split()\r\n if int(money[1]) > 0:\r\n cost = int(money[0]) // int(money[1])\r\n else:\r\n cost = 0\r\n for j in range(int(money[1])):\r\n whom = f.readline().split()\r\n names[id.index(whom[0])][1] += cost\r\n names[id.index(name[0])][1] -= cost * int(money[1])\r\n\r\nwith open('gift1.out', 'w') as f:\r\n for i in range(len(names)):\r\n print(names[i][0],end=' ',file=f)\r\n print(names[i][1],file=f)\r\n\r\n","sub_path":"usaco/1.2/gift1.py","file_name":"gift1.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"142861196","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2015 via680\n#\n# Licensed under a BSD 3-Clause License. See LICENSE file.\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.core.management.commands import migrate\nfrom django.test import SimpleTestCase\nfrom django.test.runner import dependency_ordered, DiscoverRunner\n\nclass MigrateTestCase(SimpleTestCase):\n test = lambda self: None\n\n\nclass TestMigrationExecutor(migrate.MigrationExecutor):\n def __init__(self, connection, progress_callback=None):\n self._progress_callback = progress_callback\n super(TestMigrationExecutor, self).__init__(connection, progress_callback=self.progress_callback)\n \n def progress_callback(self, action, migration, *args, **kwargs):\n if self._progress_callback:\n kwargs['executor'] = self\n self._progress_callback(action, migration, *args, **kwargs)\n\n\nclass TestMigrateCommand(migrate.Command):\n def run_test_func(self, action, migration, executor, test_func, test_func_name):\n if self.verbosity > 0:\n whitespace = '' if action.startswith('apply') else ' '\n self.stdout.write(' %sRunning %s.%s.%s...' % (whitespace, migration.app_label, migration.name, test_func_name,), self.style.MIGRATE_SUCCESS, ending='')\n \n project_state = executor.loader.project_state((migration.app_label, migration.name), at_end=False)\n \n test_func(project_state, self.testcase)\n \n if self.verbosity > 0:\n self.stdout.write(' OK', self.style.MIGRATE_SUCCESS)\n \n \n def migration_progress_callback(self, action, migration, *args, **kwargs):\n executor = kwargs.pop('executor')\n \n test_func_name = 'test_%s' % action\n \n test_func = getattr(migration, test_func_name, None)\n \n if test_func and action.endswith('start'):\n self.run_test_func(action, migration, executor, test_func, test_func_name)\n \n super(TestMigrateCommand, self).migration_progress_callback(action, migration, *args, **kwargs)\n \n if test_func and action.endswith('success'):\n self.run_test_func(action, migration, executor, test_func, test_func_name)\n \n def handle(self, *args, **kwargs):\n \n self.testcase = MigrateTestCase('test')\n \n old = migrate.MigrationExecutor\n migrate.MigrationExecutor = TestMigrationExecutor\n \n super(TestMigrateCommand, self).handle(*args, **kwargs)\n \n migrate.MigrationExecutor = old\n\n\nclass TestMigrationRunner(DiscoverRunner):\n def setup_databases(self, **kwargs):\n return setup_databases(self.verbosity, self.interactive, **kwargs)\n\n\n# the following code mostly copy+pasted from Django, used under a BSD license.\n\n\ndef create_test_db(self, verbosity=1, autoclobber=False, serialize=True, db_name_suffix=None):\n \"\"\"\n Creates a test database, prompting the user for confirmation if the\n database already exists. Returns the name of the test database created.\n \"\"\"\n # Don't import django.core.management if it isn't needed.\n from django.core.management import call_command\n\n test_database_name = self._get_test_db_name()\n \n if db_name_suffix:\n test_database_name += db_name_suffix\n\n if verbosity >= 1:\n test_db_repr = ''\n if verbosity >= 2:\n test_db_repr = \" ('%s')\" % test_database_name\n print(\"Creating test database for alias '%s'%s...\" % (\n self.connection.alias, test_db_repr))\n\n self._create_test_db(verbosity, autoclobber)\n\n self.connection.close()\n settings.DATABASES[self.connection.alias][\"NAME\"] = test_database_name\n self.connection.settings_dict[\"NAME\"] = test_database_name\n\n TestMigrateCommand().execute(\n verbosity=verbosity,\n interactive=False,\n database=self.connection.alias,\n test_database=True,\n test_flush=True,\n )\n\n # We then serialize the current state of the database into a string\n # and store it on the connection. This slightly horrific process is so people\n # who are testing on databases without transactions or who are using\n # a TransactionTestCase still get a clean database on every test run.\n if serialize:\n self.connection._test_serialized_contents = self.serialize_db_to_string()\n\n call_command('createcachetable', database=self.connection.alias)\n\n # Ensure a connection for the side effect of initializing the test database.\n self.connection.ensure_connection()\n\n return test_database_name\n\n\ndef setup_databases(verbosity, interactive, **kwargs):\n from django.db import connections, DEFAULT_DB_ALIAS\n\n # First pass -- work out which databases actually need to be created,\n # and which ones are test mirrors or duplicate entries in DATABASES\n mirrored_aliases = {}\n test_databases = {}\n dependencies = {}\n default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()\n for alias in connections:\n connection = connections[alias]\n test_settings = connection.settings_dict['TEST']\n if test_settings['MIRROR']:\n # If the database is marked as a test mirror, save\n # the alias.\n mirrored_aliases[alias] = test_settings['MIRROR']\n else:\n # Store a tuple with DB parameters that uniquely identify it.\n # If we have two aliases with the same values for that tuple,\n # we only need to create the test database once.\n item = test_databases.setdefault(\n connection.creation.test_db_signature(),\n (connection.settings_dict['NAME'], set())\n )\n item[1].add(alias)\n\n if 'DEPENDENCIES' in test_settings:\n dependencies[alias] = test_settings['DEPENDENCIES']\n else:\n if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:\n dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])\n\n # Second pass -- actually create the databases.\n old_names = []\n mirrors = []\n\n for signature, (db_name, aliases) in dependency_ordered(\n test_databases.items(), dependencies):\n test_db_name = None\n # Actually create the database for the first connection\n for alias in aliases:\n connection = connections[alias]\n if test_db_name is None:\n test_db_name = create_test_db(connection.creation,\n verbosity,\n autoclobber=not interactive,\n serialize=connection.settings_dict.get(\"TEST\", {}).get(\"SERIALIZE\", True),\n db_name_suffix=kwargs.get('db_name_suffix'),\n )\n destroy = True\n else:\n connection.settings_dict['NAME'] = test_db_name\n destroy = False\n old_names.append((connection, db_name, destroy))\n\n for alias, mirror_alias in mirrored_aliases.items():\n mirrors.append((alias, connections[alias].settings_dict['NAME']))\n connections[alias].settings_dict['NAME'] = (\n connections[mirror_alias].settings_dict['NAME'])\n\n return old_names, mirrors\n\n","sub_path":"django_testmigrate/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"395521561","text":"class Solution:\n def intervalListIntersection(self, A, B):\n ans = []\n i = j = 0\n\n while i < len(A) and j < len(B):\n left = max(A[i][0], B[j][0])\n right = min(A[i][1], B[j][1])\n if left <= right:\n ans.append([left, right])\n if A[i][1] < B[j][1]:\n i += 1\n else:\n j += 1\n\n return ans\n","sub_path":"996-Interval-List-Intersections/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"429544338","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nwith open('hail/hail_pip_version') as f:\n hail_pip_version = f.read().strip()\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"hail\",\n version=hail_pip_version,\n author=\"Hail Team\",\n author_email=\"hail-team@broadinstitute.org\",\n description=\"Scalable library for exploring and analyzing genomic data.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://hail.is\",\n packages=find_packages(),\n package_data={\n '': ['hail-all-spark.jar', 'hail_pip_version', 'hail_version']},\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\n 'numpy<2',\n 'pandas>0.22,<0.24',\n 'matplotlib<3',\n 'seaborn<0.9',\n 'bokeh<0.14',\n 'pyspark>=2.2,<2.3',\n 'parsimonious<0.9',\n 'ipykernel<5',\n 'decorator<5',\n 'requests>=2.21.0,<2.21.1',\n ]\n)\n","sub_path":"hail/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"156812715","text":"from model.user import User\n\n\nclass Accounting(User):\n\n @staticmethod\n def init_db(db):\n sql = \"\"\"\n CREATE TABLE IF NOT EXISTS factor (\n factor_id int NOT NULL AUTO_INCREMENT UNIQUE PRIMARY KEY,\n patient_id int NOT NULL,\n price int,\n payed int,\n description text,\n FOREIGN KEY (patient_id) REFERENCES user(user_id)\n )\n \"\"\"\n cursor = db.cursor()\n cursor.execute(sql)\n cursor.close()\n db.commit()\n\n def calculate_user_drug_debt(self, db, user_id):\n\n drugs_debt_sql = \"\"\"\n SELECT SUM(d.price)\n FROM drug d\n INNER JOIN prescription_drug p_d\n ON p_d.drug_id = d.drug_id\n INNER JOIN prescription p\n ON p.prescription_id = p_d.prescription_id\n WHERE p.patient_id = %s\n \"\"\"\n cursor = db.cursor()\n cursor.execute(drugs_debt_sql, (user_id,))\n price = int(cursor.fetchone())\n cursor.close()\n return price\n\n def calculate_user_test_debt(self, db, user_id):\n tests_debt_sql = \"\"\"\n SELECT SUM(t.price)\n FROM test t\n INNER JOIN diagnosis_test d_t\n ON d_t.test_id = d.test_id\n INNER JOIN diagnosis d\n ON d.diagnosis_id = d_t.diagnosis_id\n WHERE d.patient_id = %s\n \"\"\"\n cursor = db.cursor()\n cursor.execute(tests_debt_sql, (user_id,))\n price = int(cursor.fetchone())\n cursor.close()\n return price\n\n\n def calculate_user_debt(self, db):\n patient_id = input(\"What is Patient ID ?\")\n tests_debt = self.calculate_user_test_debt(db, patient_id)\n drugs_debt = self.calculate_user_drug_debt(db, patient_id)\n print(tests_debt + drugs_debt)\n\n def show_menu(self, db):\n print(\" 1 - How much should patient pay?\")\n choice = int(input())\n if choice == 1:\n self.calculate_user_debt(db)","sub_path":"model/accounting.py","file_name":"accounting.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"197636169","text":"import nbformat\r\nimport sys\r\nimport os\r\nimport pickle\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch\r\nimport random\r\nimport collections\r\nfrom nbconvert import PythonExporter\r\nimport json, copy\r\nimport itertools\r\npd.set_option('display.max_columns', None)\r\npd.set_option('precision', 4)\r\nnp.set_printoptions(precision=4)\r\n\r\nsys.argv.append(\"notebooks/debug_example.ipynb\")\r\n\r\ndir_path = os.path.dirname(os.path.realpath(sys.argv[1]))\r\nfilename = sys.argv[1].split('\\\\')[-1].split('/')[-1]\r\nfilename_no_suffix = filename[:filename.rfind(\".\")]\r\nsuffix = filename[filename.rfind(\".\"):]\r\n\r\ndata_path = os.path.join(dir_path, filename_no_suffix)\r\noutput_path = os.path.join(dir_path, filename_no_suffix + \"_m\" + suffix)\r\njson_path = os.path.join(dir_path, filename_no_suffix + \"_comment.json\")\r\njson_out_path = os.path.join(data_path, \"result.json\")\r\n\r\nblanks = \"\\t- \"\r\npostfix = \"[auto]\"\r\ngraph = collections.defaultdict(list)\r\n\r\n### sample\r\n'''\r\ninput:\r\n- x, int\r\n- y, shape(1,2) of float\r\noutput:\r\n- z, class MyClass\r\n'''\r\n\r\n\r\ndef highlight_text(text):\r\n return \"<p style='color:Tomato;'>\" + text + \"</p>\"\r\n\r\n\r\ndef add_emphasis(table):\r\n for col in table:\r\n if col.endswith(postfix):\r\n table[col] = table[col].map('<b>{}</b>'.format)\r\n # table[col] = table[col].map('**{}**'.format)\r\n\r\ndef print_error(msg):\r\n print(\"\\033[91m\", msg, \"\\033[0m\")\r\n\r\n\r\nclass Variable(object):\r\n def __init__(self, var, name, cellnum, outflag):\r\n self.var = var\r\n self.name = name\r\n self.cellnum = cellnum\r\n self.outflag = outflag\r\n self.json_map = {\r\n \"type\": str(type(var))[8:-2],\r\n \"shape\": \"\",\r\n \"hint\": \"\",\r\n \"value\": \"\"\r\n }\r\n self.comment = \"- \" + name + \", \" + self.initial_comment()\r\n\r\n def initial_comment(self):\r\n var = self.var\r\n # if str(type(var)) == \"<class 'sklearn.pipeline.Pipeline'>\":\r\n # return \"transforms: \" + str(var.steps)\r\n if str(type(var)) == \"<class 'sklearn.utils.Bunch'>\":\r\n return str(type(var))\r\n if self.outflag:\r\n self.json_map[\"value\"] = str(var)\r\n return str(type(var)) + \", \" + str(var)\r\n else:\r\n return str(type(var))\r\n\r\n def add_data_distribute(self):\r\n pass\r\n\r\n def check_rel(self, variable):\r\n return 5\r\n\r\n def check_copy(self, variable):\r\n pass\r\n\r\n def compare_to(self, variable):\r\n pass\r\n\r\n\r\nclass List(Variable):\r\n def __init__(self, var, name, cellnum, outflag):\r\n super().__init__(var, name, cellnum, outflag)\r\n # self.comment = \"- \" + name + \", \" + self.initial_comment()\r\n\r\n def initial_comment(self):\r\n length = min(len(self.var), 5)\r\n comments = [\r\n dispatch_gen(self.var[i], self.name + \"[\" + str(i) + \"]\", -1,\r\n -1).comment for i in range(length)\r\n ]\r\n self.json_map[\"value\"] = comments\r\n self.json_map[\"shape\"] = \"(1, {})\".format(str(length))\r\n return \"list length of \" + str(length) + \", sample:\\n\\t\" + \"\\n\\t\".join(\r\n comments)\r\n\r\n def check_rel(self, variable):\r\n rel_score = 5\r\n if type(variable.var) != list:\r\n return rel_score\r\n if self.name == variable.name:\r\n rel_score = 3\r\n elif len(self.var) == len(variable.var):\r\n rel_score = 4\r\n return rel_score\r\n\r\n def compare_to(self, variable):\r\n if len(self.var) == len(variable.var):\r\n example = [\r\n str(variable.var[i]) + \" -> \" + str(self.var[i])\r\n for i in range(min(len(self.var), 5))\r\n ]\r\n self.json_map[\"value\"] = str(example)\r\n self.comment += \"\\n\" + blanks + \"example changes: \" + str(example)\r\n\r\n\r\nclass NdArray(Variable):\r\n def __init__(self, var, name, cellnum, outflag):\r\n super().__init__(var, name, cellnum, outflag)\r\n # self.comment = \"- \" + name + \", \" + self.initial_comment()\r\n\r\n def initial_comment(self):\r\n self.json_map[\"shape\"] = str(np.shape(self.var))\r\n self.json_map[\"type\"] += \", dtype: \" + str(np.array(self.var).dtype)\r\n return \"shape\" + str(np.shape(self.var)) + \" of \" + str(\r\n np.array(self.var).dtype)\r\n\r\n def add_data_distribute(self):\r\n # blanks = \" \" * len(\"- \" + self.name + \", \")\r\n blanks = \"\\t- \"\r\n array = np.asarray(self.var)\r\n # only support all numerical values\r\n if not np.issubdtype(array.dtype, np.number):\r\n return\r\n _mean = np.mean(array)\r\n _variance = np.var(array)\r\n _max, _min = np.max(array), np.min(array)\r\n comment_str = \"mean: \" + \"%.4f\" % _mean + \", variance: \" + \"%.4f\" % _variance + \", range: [\"\r\n if int(_min) == float(_min):\r\n comment_str += str(_min) + \", \" + str(_max) + \"]\"\r\n else:\r\n comment_str += \"%.4f, %.4f]\" % (_min, _max)\r\n self.json_map[\"value\"] = comment_str\r\n self.comment += \"\\n\" + blanks + comment_str\r\n\r\n def check_rel(self, variable):\r\n rel_score = 5\r\n if not type(variable.var) in [np.ndarray, pd.DataFrame]:\r\n return rel_score\r\n if np.shape(self.var)[0] == np.shape(variable.var)[0]:\r\n rel_score = 4\r\n return rel_score\r\n\r\n def check_copy(self, variable):\r\n if np.array_equal(self.var, variable.var):\r\n self.comment += \"\\n\" + blanks\r\n if self.name == variable.name:\r\n self.comment += highlight_text(\"no change in the cell\")\r\n # self.json_map[\"hint\"] += \"no change in the cell; \"\r\n else:\r\n self.comment += highlight_text(\"copy of \" + variable.name)\r\n self.json_map[\"hint\"] += \"copy of \" + variable.name + \"; \"\r\n return True\r\n return False\r\n\r\n def compare_to(self, variable):\r\n if self.check_copy(variable):\r\n return\r\n ## check submatrix\r\n var_a = np.asarray(self.var)\r\n var_b = np.asarray(variable.var)\r\n if len(np.shape(var_a)) != 2 or len(np.shape(var_b)) != 2:\r\n return\r\n if np.shape(var_a)[0] == np.shape(var_b)[0]:\r\n if np.shape(var_a)[1] < np.shape(var_b)[1]:\r\n ls1 = var_a.T.tolist()\r\n ls2 = var_b.T.tolist()\r\n r1 = [element for element in ls1 if element in ls2]\r\n r2 = [element for element in ls2 if element in ls1]\r\n if r1 == r2:\r\n self.comment += \"\\n\" + blanks + highlight_text(\r\n \"truncated from \" + variable.name)\r\n self.json_map[\r\n \"hint\"] += \"truncated from \" + variable.name + \"; \"\r\n\r\n\r\nclass DataFrame(Variable):\r\n def __init__(self, var, name, cellnum, outflag):\r\n super().__init__(var, name, cellnum, outflag)\r\n self.change_exp = []\r\n self.copy = False\r\n self.columns = list(map(lambda x: str(x), var.columns))\r\n # self.comment = \"- \" + name + \", \" + self.initial_comment()\r\n\r\n def initial_comment(self):\r\n ret = \"shape\" + str(np.shape(self.var))\r\n # count column by type\r\n type_cnt = {}\r\n for t in self.var.dtypes:\r\n if t not in type_cnt.keys():\r\n type_cnt[t] = 1\r\n else:\r\n type_cnt[t] += 1\r\n col_types = \", column types: {\"\r\n type_ls = [str(key) + \": \" + str(type_cnt[key]) for key in type_cnt]\r\n col_types += \", \".join(type_ls) + \"}\"\r\n # ret += \", sample:\\n\" + str(var.head(1))\r\n self.json_map[\"shape\"] = str(np.shape(self.var))\r\n self.json_map[\"type\"] = \"DataFrame\" + col_types\r\n return ret + col_types\r\n\r\n def add_data_distribute(self):\r\n if self.copy:\r\n return\r\n array = np.asarray(self.var)\r\n if len(self.change_exp) > 0:\r\n _examples = self.change_exp\r\n _example_names = [\r\n \"example_\" + str(i) for i in range(len(_examples))\r\n ]\r\n else:\r\n max_len = min(self.var.shape[0], 5)\r\n _examples = [self.var.iloc[i] for i in range(max_len)]\r\n _example_names = [\"example_\" + str(i) for i in range(max_len)]\r\n\r\n def get_range(col):\r\n if str(col.dtype) == \"category\":\r\n return len(col.unique())\r\n if np.issubdtype(col.dtype, np.number):\r\n return [np.min(col), np.max(col)]\r\n else:\r\n return len(col.unique())\r\n\r\n _type = [str(self.var[col].dtype) for col in self.var]\r\n _range = [str(get_range(self.var[col])) for col in self.var]\r\n\r\n table = pd.DataFrame([_type] + _examples + [_range],\r\n columns=self.columns)\r\n\r\n table.insert(0, self.name + postfix, [\"type\"] + _example_names + [\"range\"])\r\n\r\n # add_emphasis(table)\r\n\r\n def reindex_column(columns):\r\n ls1 = list(filter(lambda col: col.endswith(postfix), columns))\r\n ls2 = list(filter(lambda col: not col.endswith(postfix), columns))\r\n return ls1 + ls2\r\n\r\n table = table.reindex(columns=reindex_column(table.columns))\r\n comment_str = \"\\n\\n\" + table.to_markdown()\r\n self.json_map[\"value\"] = json.loads(table.to_json())\r\n self.comment += comment_str\r\n\r\n def check_rel(self, variable):\r\n '''\r\n Score:\r\n 0 - identical name\r\n 1 - identical content\r\n 2 - identical shape and type\r\n 3 - identical shape and different type\r\n 4 - different shape but relevant\r\n 5 - irrelevant\r\n '''\r\n if type(variable.var) != pd.core.frame.DataFrame:\r\n return 5\r\n rel_score = 5\r\n if self.name == variable.name:\r\n rel_score = 0\r\n elif self.var.equals(variable.var):\r\n rel_score = 1\r\n elif np.shape(self.var) == np.shape(variable.var):\r\n if self.var.dtypes.equals(variable.var.dtypes):\r\n rel_score = 2\r\n else:\r\n rel_score = 3\r\n else:\r\n if np.shape(self.var)[0] == np.shape(variable.var)[0] or np.shape(\r\n self.var)[1] == np.shape(variable.var)[1]:\r\n rel_score = 4\r\n return rel_score\r\n\r\n def check_copy(self, variable):\r\n if self.var.equals(variable.var):\r\n self.comment += \"\\n\" + blanks\r\n if self.name == variable.name:\r\n self.comment += highlight_text(\"no change in the cell\")\r\n # self.json_map[\"hint\"] += \"no change in the cell; \"\r\n self.copy = True\r\n else:\r\n self.comment += highlight_text(\"copy of \" + variable.name)\r\n self.json_map[\"hint\"] += \"copy of \" + variable.name + \"; \"\r\n return True\r\n return False\r\n\r\n def add_change_comment(self, variable, convert, change, diffset):\r\n if change:\r\n self.comment += \"\\n\" + blanks\r\n comment_str = \"\"\r\n for key in change:\r\n comment_str += str(\r\n change[key]) + \" \" + str(key) + \" columns changed\"\r\n self.comment += highlight_text(comment_str)\r\n self.json_map[\"hint\"] += comment_str + \"; \"\r\n if convert:\r\n self.comment += \"\\n\" + blanks\r\n comment_str = \"\"\r\n for key in convert:\r\n comment_str += str(convert[key]) + \" \" + str(\r\n key[1]) + \" columns converted to \" + str(key[0])\r\n self.comment += highlight_text(comment_str)\r\n self.json_map[\"hint\"] += comment_str + \"; \"\r\n\r\n indices = set()\r\n values = set()\r\n for col in self.columns:\r\n if not col.endswith(postfix):\r\n continue\r\n col = col[:-1]\r\n for i in self.var.index:\r\n try:\r\n if str(self.var[col][i]) not in values:\r\n if col in diffset or str(variable.var[col][i]) != str(\r\n self.var[col][i]):\r\n indices.add(i)\r\n values.add(str(self.var[col][i]))\r\n except:\r\n pass\r\n # break after enough sample points\r\n if len(indices) >= 5:\r\n break\r\n row_num = self.var.shape[0]\r\n\r\n # disable random choice\r\n # if row_num >= 5:\r\n # while len(indices) < 5:\r\n # i = random.randint(0, row_num - 1)\r\n # indices.add(i)\r\n\r\n def change_str(col, idx):\r\n if not col.endswith(postfix):\r\n return str(self.var[col][idx])\r\n col = col[:-1]\r\n if col in diffset:\r\n return str(self.var[col][idx])\r\n return str(variable.var[col][idx]) + \" -> \" + str(\r\n self.var[col][idx])\r\n\r\n for idx in indices:\r\n self.change_exp.append(\r\n [change_str(col, idx) for col in self.columns])\r\n\r\n def check_difference(self, variable):\r\n col_a = set(self.var.columns)\r\n col_b = set(variable.columns)\r\n a_minus_b = col_a.difference(col_b)\r\n b_minus_a = col_b.difference(col_a)\r\n # if a_minus_b and b_minus_a:\r\n # self.comment += \"\\n\" + blanks\r\n # comment_str = \"\"\r\n # if len(b_minus_a) == 1:\r\n # item = list(b_minus_a)[0]\r\n # filter(lambda x: )\r\n if a_minus_b or b_minus_a:\r\n self.comment += \"\\n\" + blanks\r\n comment_str = \"\"\r\n if a_minus_b:\r\n comment_str += \"add {0} columns; \".format(len(a_minus_b))\r\n if b_minus_a:\r\n comment_str += \"remove {0} columns; \".format(len(b_minus_a))\r\n\r\n # add *s for such cols\r\n self.comment += highlight_text(comment_str)\r\n self.json_map[\"hint\"] += comment_str\r\n\r\n for i in range(len(self.var.dtypes)):\r\n if self.var.columns[i] in a_minus_b:\r\n self.columns[i] += postfix\r\n return a_minus_b, b_minus_a\r\n\r\n def check_change(self, variable, diffset):\r\n convert = {}\r\n change = {}\r\n var_a = self.var\r\n var_b = variable.var\r\n for i in range(len(var_a.dtypes)):\r\n column_name = var_a.columns[i]\r\n if column_name in diffset:\r\n continue\r\n if str(var_b[column_name].dtype) != str(var_a[column_name].dtype):\r\n type_pair = (var_a[column_name].dtype,\r\n var_b[column_name].dtype)\r\n self.columns[i] += postfix\r\n if type_pair not in convert.keys():\r\n convert[type_pair] = 1\r\n else:\r\n convert[type_pair] += 1\r\n elif not var_b[column_name].equals(var_a[column_name]):\r\n self.columns[i] += postfix\r\n if var_a.dtypes[i] not in change.keys():\r\n change[var_a.dtypes[i]] = 1\r\n else:\r\n change[var_a.dtypes[i]] += 1\r\n self.add_change_comment(variable, convert, change, diffset)\r\n\r\n def compare_to(self, variable):\r\n if self.check_copy(variable):\r\n return\r\n # only column changed\r\n if np.shape(self.var)[0] == np.shape(variable.var)[0]:\r\n # check difference first\r\n a_minus_b, b_minus_a = self.check_difference(variable)\r\n # check convert/change in common columns\r\n self.check_change(variable, a_minus_b)\r\n elif np.shape(self.var)[1] == np.shape(variable.var)[1]:\r\n if np.shape(self.var)[0] < np.shape(variable.var)[0]:\r\n l = len(self.var)\r\n # if self.var.equals(variable.var.iloc[:l]) or self.var.equals(\r\n # variable.var.iloc[-l:]):\r\n\r\n self.comment += \"\\n\" + blanks\r\n comment_str = \"remove \" + str(\r\n np.shape(variable.var)[0] -\r\n np.shape(self.var)[0]) + \" rows from \" + variable.name\r\n self.comment += highlight_text(comment_str)\r\n self.json_map[\"hint\"] += comment_str + \"; \"\r\n if list(self.var.columns) != list(variable.columns):\r\n set_a = set(self.var.columns)\r\n set_b = set(variable.columns)\r\n if set_a == set_b:\r\n self.comment += \"\\n\" + blanks\r\n self.comment += highlight_text(\"rearrange columns\")\r\n self.json_map[\"hint\"] += \"rearrange columns\" + \"; \"\r\n\r\n\r\nclass PatternSynthesizer(object):\r\n\r\n '''\r\n df1: before, df2: after, col: the target column\r\n '''\r\n def __init__(self, DF1, DF2, info):\r\n self.df1 = DF1.var\r\n self.df2 = DF2.var\r\n self.df1_name = DF1.name\r\n self.df2_name = DF2.name\r\n self.cols1 = list(self.df1.columns)\r\n self.cols2 = list(self.df2.columns)\r\n self.srccols = [col for col in info.get if col in self.cols1] \r\n self.descols = [col for col in info.set if col in self.cols1] \r\n self.partition = collections.defaultdict(list)\r\n if self.df1_name in info.par:\r\n self.partition = copy.deepcopy(info.par[self.df1_name])\r\n self.syn_stack = []\r\n self.summary = collections.defaultdict(list)\r\n self.markers = {}\r\n \r\n def synthesis_append(self, pattern, from_col, to_col):\r\n self.syn_stack.append((pattern, from_col, to_col))\r\n if pattern == \"rearrange\":\r\n self.summary[\"other_patterns\"].append({pattern: \r\n ','.join(from_col) + '|' + ','.join(to_col)})\r\n elif len(to_col) > 0:\r\n self.summary[','.join(from_col) + '|' + ','.join(to_col)].append(pattern)\r\n else:\r\n self.summary[\"other_patterns\"].append({pattern: ','.join(from_col)})\r\n\r\n def check_fillna_only(self, df1, df2, from_col, to_col):\r\n cmp_df = df2[to_col].compare(df1[from_col])\r\n return cmp_df[\"other\"].isnull().all()\r\n\r\n def check_fillna(self, df1, df2, from_col, to_col):\r\n return df1[from_col].isnull().values.any() and not df2[to_col].isnull().values.any()\r\n \r\n def check_str(self, df, col):\r\n return pd.api.types.is_string_dtype(df[col])\r\n \r\n def check_int(self, df, col):\r\n return pd.api.types.is_integer_dtype(df[col])\r\n\r\n def check_float(self, df, col):\r\n return pd.api.types.is_float_dtype(df[col])\r\n\r\n def check_num(self, df, col):\r\n return pd.api.types.is_numeric_dtype(df[col])\r\n\r\n def check_cat(self, df, col):\r\n return pd.api.types.is_categorical_dtype(df[col])\r\n\r\n def check_typeconvert(self, df1, df2, from_col, to_col):\r\n def check_transform(f):\r\n try:\r\n if df1[from_col].map(f).equals(df2[to_col]):\r\n return\r\n except:\r\n pass\r\n # print_error(\"error when check transform: \" + str(df1[from_col].dtype) + \"->\" + str(df2[to_col].dtype))\r\n if df1[from_col].nunique() > df2[to_col].nunique():\r\n self.synthesis_append(\"merge\", [from_col], [to_col])\r\n elif self.check_num(df1, from_col):\r\n self.synthesis_append(\"num_transform\", [from_col], [to_col])\r\n elif self.check_str(df1, from_col):\r\n self.synthesis_append(\"str_transform\", [from_col], [to_col])\r\n else:\r\n self.synthesis_append(\"map\", [from_col], [to_col]) \r\n\r\n # converted to str to avoid bugs when dtype == Categorical\r\n if str(df1[from_col].dtype) != str(df2[to_col].dtype):\r\n if self.check_float(df2, to_col):\r\n self.synthesis_append(\"float\", [from_col], [to_col])\r\n check_transform(float)\r\n elif self.check_cat(df2, to_col):\r\n self.synthesis_append(\"discretize\", [from_col], [to_col])\r\n elif self.check_int(df2, to_col):\r\n l = df2[to_col].unique()\r\n if sorted(l) == list(range(min(l), max(l)+1)):\r\n self.synthesis_append(\"encode\", [from_col], [to_col])\r\n else:\r\n self.synthesis_append(\"int\", [from_col], [to_col])\r\n check_transform(int)\r\n elif self.check_str(df2, to_col):\r\n self.synthesis_append(\"str\", [from_col], [to_col])\r\n check_transform(str)\r\n else:\r\n self.synthesis_append(\"type_convert\", [from_col], [to_col])\r\n return True\r\n return False\r\n\r\n def check_column(self, df1, df2, from_col, to_col):\r\n \r\n if not self.check_typeconvert(df1, df2, from_col, to_col):\r\n # check the case when only different values are null values\r\n if self.check_fillna_only(df1, df2, from_col, to_col):\r\n self.synthesis_append(\"fillna\", [from_col], [to_col])\r\n return\r\n if df1[from_col].nunique() > df2[to_col].nunique():\r\n self.synthesis_append(\"merge\", [from_col], [to_col])\r\n elif self.check_num(df1, from_col):\r\n self.synthesis_append(\"num_transform\", [from_col], [to_col])\r\n elif self.check_str(df1, from_col):\r\n self.synthesis_append(\"str_transform\", [from_col], [to_col])\r\n else:\r\n self.synthesis_append(\"map\", [from_col], [to_col]) \r\n \r\n if self.check_fillna(df1, df2, from_col, to_col):\r\n self.synthesis_append(\"fillna\", [from_col], [to_col])\r\n\r\n def check_removecol(self, df1, df2):\r\n self.removedcols = [x for x in self.cols1 if x not in self.cols2]\r\n if self.removedcols: \r\n self.cols1 = [x for x in self.cols1 if x in self.cols2]\r\n self.synthesis_append(\"removecol\", self.removedcols, [])\r\n return True\r\n return False\r\n \r\n def check_rearrange(self, df1, df2):\r\n if self.cols1 != self.cols2 and set(self.cols1) == set(self.cols2):\r\n self.synthesis_append(\"rearrange\", self.cols1, self.cols2)\r\n return True\r\n return False\r\n\r\n def check_removerow(self, df1, df2):\r\n # [TODO] add other cases: duplicates/etc.\r\n # use index to track row mappings\r\n if len(df2) < len(df1) and set(df2.index).issubset(set(df1.index)):\r\n removed = df1.loc[~df1.index.isin(df2.index)]\r\n left = df1.loc[df1.index.isin(df2.index)]\r\n removed_null = removed.isnull()\r\n left_null = left.isnull()\r\n # all removed rows contain nan\r\n if removed_null.any(axis=1).all():\r\n # select columns that removed rows all contain nan & remaining rows contain no nan\r\n all_nan = set(removed_null.columns[removed_null.all()])\r\n no_nan = set(left_null.columns[~left_null.any()])\r\n self.synthesis_append(\"removerow_null\", list(all_nan & no_nan), [])\r\n elif len(left.merge(removed)) == len(left):\r\n self.synthesis_append(\"removerow_dup\", [], [])\r\n else:\r\n self.synthesis_append(\"removerow\", [str(len(removed))], [])\r\n return True\r\n return False\r\n\r\n def search(self, df1, df2):\r\n cols_dummy = [col for col in self.colsnew if set(df2[col].unique()).issubset({0, 1})]\r\n cols_left = [col for col in self.colsnew if col not in cols_dummy]\r\n if cols_dummy:\r\n # should check whether dummies are true\r\n self.synthesis_append(\"one_hot_encoding\", self.srccols, cols_dummy)\r\n for col in cols_left:\r\n if col in graph:\r\n src = list(set(self.srccols) & set(graph[col]))\r\n else:\r\n src = self.srccols\r\n if len(src) == 1:\r\n self.check_column(df1, df2, src[0], col)\r\n elif self.check_num(df2, col):\r\n self.synthesis_append(\"num_transform\", src, [col])\r\n elif self.check_str(df2, col):\r\n self.synthesis_append(\"str_transform\", src, [col])\r\n else:\r\n self.synthesis_append(\"create\", src, [col])\r\n\r\n for col in self.colschange:\r\n self.check_column(df1, df2, col, col)\r\n \r\n # generate default partition\r\n MAGIC_BOUND = 25\r\n if not self.partition:\r\n paths = collections.defaultdict(list)\r\n for col in self.colsnew:\r\n if df2[col].nunique() > MAGIC_BOUND:\r\n continue\r\n for i in df2.index:\r\n paths[i].append(str(df2[col].at[i]))\r\n for col in self.colschange:\r\n # look at diff\r\n if df2[col].compare(df1[col])[\"self\"].nunique() > MAGIC_BOUND:\r\n continue\r\n for i in df2.index:\r\n if df2[col].at[i] == df1[col].at[i]:\r\n paths[i].append(\"DUMMY\")\r\n else:\r\n paths[i].append(str(df2[col].at[i]))\r\n for k, v in paths.items():\r\n self.partition[str(tuple(v))].append(k)\r\n # print(self.partition.keys())\r\n\r\n\r\n def check(self, df1, df2):\r\n if set(self.cols1).isdisjoint(set(self.cols2)):\r\n return\r\n if len(df1) < len(df2):\r\n return\r\n if self.check_removerow(df1, df2):\r\n # if index is reset this might lead to error?\r\n df1 = df1.loc[df2.index]\r\n # rows not removed -> index not subset -> irrelevant dfs\r\n if len(df1) > len(df2):\r\n return\r\n self.check_removecol(df1, df2)\r\n self.check_rearrange(df1, df2)\r\n self.colsnew = [col for col in self.cols2 if col not in self.cols1] # set(self.cols2).difference(set(self.cols1))\r\n self.colschange = [col for col in self.cols1 if not df1[col].equals(df2[col])]\r\n # print(self.colsnew, self.colschange)\r\n if self.colsnew or self.colschange:\r\n self.search(df1, df2)\r\n if self.syn_stack:\r\n return self.gen_table(df1, df2)\r\n return\r\n\r\n def gen_table(self, df1, df2):\r\n df = df2.copy()\r\n\r\n \r\n # [TODO] special info for removed col & src cols\r\n for col in self.removedcols:\r\n df[col] = df1[col]\r\n\r\n # generate extra info\r\n def get_range(col):\r\n if str(col.dtype) == \"category\":\r\n return len(col.unique())\r\n if np.issubdtype(col.dtype, np.number):\r\n return [np.min(col), np.max(col)]\r\n else:\r\n return len(col.unique())\r\n _type = {col:str(df[col].dtype) for col in df if col not in self.colschange}\r\n _range = {col: str(get_range(df[col])) for col in df if col not in self.colschange}\r\n\r\n # build data flow\r\n for col in self.colschange:\r\n _type[col] = str(df1[col].dtype) + \"->\" + str(df[col].dtype)\r\n _range[col] = str(get_range(df1[col])) + \"->\" + str(get_range(df[col]))\r\n df[col] = df1[col].astype(str) + ['->']*len(df1) + df[col].astype(str)\r\n\r\n \r\n # sort examples \r\n new_df = pd.DataFrame()\r\n if self.partition:\r\n # sort self.partition first by frequency\r\n self.partition = dict(sorted(self.partition.items(), key=lambda item: len(item[1]), reverse=True))\r\n for k, l in dict(self.partition).items():\r\n self.markers[k] = len(new_df)\r\n new_df = new_df.append(df.loc[l])\r\n else:\r\n new_df = df\r\n \r\n df = pd.concat([pd.DataFrame([_type, _range]), new_df], ignore_index=True)\r\n\r\n # rearrange cols to make changed/new cols first\r\n colsleft = [col for col in df.columns if col not in self.colsnew + self.colschange + self.removedcols]\r\n colssrc = [col for col in colsleft if col in self.srccols]\r\n colsleft = [col for col in colsleft if col not in self.srccols]\r\n df = df.reindex(columns = self.removedcols + self.colschange + self.colsnew + colssrc + colsleft)\r\n\r\n def rename(col):\r\n if col in self.colschange:\r\n return col + \"*\" + postfix\r\n elif col in self.colsnew:\r\n return col + \"+\" + postfix\r\n elif col in self.removedcols:\r\n return col + \"-\" + postfix\r\n elif col in self.srccols:\r\n return col + \">\" + postfix\r\n return col\r\n\r\n df.rename(rename, axis =1 ,inplace = True)\r\n\r\n # print(self.markers)\r\n return json.loads(df.to_json())\r\n \r\n\r\nclass Info(object):\r\n def __init__(self, info, cellnum):\r\n super().__init__()\r\n self.get = []\r\n self.set = []\r\n self.par = collections.defaultdict(lambda : collections.defaultdict(list))\r\n if info == None:\r\n return\r\n if str(cellnum) in info[\"get\"]:\r\n self.get = info[\"get\"][str(cellnum)]\r\n if str(cellnum) in info[\"set\"]:\r\n self.set = info[\"set\"][str(cellnum)]\r\n if str(cellnum) in info[\"par\"]:\r\n self.par = info[\"par\"][str(cellnum)]\r\n\r\n\r\ndef handlecell(myvars, st, ed, info):\r\n # comments = [\"\\'\\'\\'\"]\r\n comments = []\r\n\r\n # find the first input and output\r\n flags = [var.outflag for var in myvars[st:ed+1]]\r\n first_in = flags.index(0) + st if 0 in flags else -1\r\n first_out = flags.index(1) + st if 1 in flags else -1\r\n\r\n # build json maps\r\n json_map = {\"input\": {}, \"output\": {}, \"summary\": {}, \"partition\": {}, \"table\":{}}\r\n for i in range(st, ed + 1):\r\n if myvars[i].outflag == 0:\r\n json_map[\"input\"][myvars[i].name] = myvars[i].json_map\r\n elif myvars[i].outflag == 1:\r\n json_map[\"output\"][myvars[i].name] = myvars[i].json_map\r\n\r\n '''\r\n for each output variable, find the input that is closest to it\r\n find rel within in/out group\r\n '''\r\n if first_out != -1 and first_in != -1:\r\n for i in range(first_out, ed + 1):\r\n # choose_idx = -1\r\n # cur_score = 5\r\n for j in range(first_in, first_out):\r\n # score = myvars[i].check_rel(myvars[j])\r\n # print(myvars[i].name, myvars[j].name, score)\r\n if type(myvars[i].var) == pd.core.frame.DataFrame:\r\n if type(myvars[j].var) == pd.core.frame.DataFrame:\r\n checker = PatternSynthesizer(myvars[j], myvars[i], info)\r\n result = checker.check(myvars[j].var, myvars[i].var)\r\n if result:\r\n flow = ' '.join([myvars[j].name, \"->\", myvars[i].name])\r\n json_map[\"summary\"][flow] = dict(checker.summary)\r\n json_map[\"partition\"][flow] = checker.markers\r\n json_map[\"table\"][flow] = result\r\n print(myvars[i].cellnum, \":\", flow, \"\\033[96m\", \r\n dict(checker.summary), len(checker.markers), \"\\033[0m\")\r\n \r\n # comments.append(\"\\'\\'\\'\\n\")\r\n\r\n return \"\\n\".join(comments), json_map\r\n\r\n\r\n# def gen_comments(labels, tmpvars):\r\n# comment_str = {}\r\n# max_len = len(labels)\r\n# intervals = {}\r\n# for i in range(max_len):\r\n# curcell = labels[i][0]\r\n# if curcell not in intervals.keys():\r\n# intervals[curcell] = (i, i)\r\n# else:\r\n# intervals[curcell] = (intervals[curcell][0], i)\r\n# json_map = {}\r\n# for key in intervals:\r\n# comment_str[key], inner_json_map = handlecell(key, intervals[key][0],\r\n# intervals[key][1])\r\n# json_map[code_indices[key - 1]] = inner_json_map\r\n# return comment_str, json_map\r\n\r\n\r\ndef dispatch_gen(var, name, cellnum, outflag):\r\n if type(var) == list:\r\n return List(var, name, cellnum, outflag)\r\n elif type(var) in [np.ndarray, pd.Index, pd.Series]:\r\n return NdArray(var, name, cellnum, outflag)\r\n elif type(var) == pd.DataFrame:\r\n return DataFrame(var, name, cellnum, outflag)\r\n else:\r\n return Variable(var, name, cellnum, outflag)\r\n\r\n\r\n# def gen_func_comment(fun_name, fun_map):\r\n# # not considering multiple return types from branches\r\n\r\n# _type = []\r\n# for k, path_map in fun_map.items():\r\n# if k == \"loc\":\r\n# continue\r\n# _type = [\r\n# k + \": \" + str(type(v)) for k, v in path_map[\"args\"][0].items()\r\n# ] + [str(type(x)) for x in path_map[\"rets\"][0]]\r\n# break\r\n\r\n# total = sum([path_map[\"count\"] for path_map in list(fun_map.values())[1:]])\r\n\r\n# args_len, rets_len = 0, 0\r\n# examples = []\r\n# for k, path_map in fun_map.items():\r\n# if k == \"loc\":\r\n# continue\r\n# args_len = max(args_len, len(path_map[\"args\"][0]))\r\n# rets_len = max(rets_len, len(path_map[\"rets\"][0]))\r\n# args_list = [[v for k, v in args.items()] for args in path_map[\"args\"]]\r\n# args = [[args[i] for args in args_list] for i in range(args_len)]\r\n# rets = [[rets[i] for rets in path_map[\"rets\"]]\r\n# for i in range(rets_len)]\r\n# examples.append(args + rets +\r\n# ['{:.2g}'.format(path_map[\"count\"] / total)] +\r\n# [path_map[\"count\"]])\r\n\r\n# _columns = [\"args[{:d}]\".format(i) for i in range(args_len)\r\n# ] + [\"rets[{:d}]\".format(i)\r\n# for i in range(rets_len)] + [\"frequency\", \"counts\"]\r\n\r\n# table = pd.DataFrame([_type] +\r\n# sorted(examples, key=lambda x: x[-1], reverse=True),\r\n# columns=_columns)\r\n\r\n# table.insert(0, fun_name + postfix, [\"type\"] +\r\n# [\"example_\" + str(i) for i in range(len(fun_map.keys()) - 1)])\r\n\r\n# # comment = \"'''\\n[function table]\\n\" + str(table) + \"\\n'''\\n\"\r\n# comment = \"\"\r\n# json_map = json.loads(table.to_json())\r\n# return comment, json_map\r\n\r\nif __name__ == \"__main__\":\r\n with open(sys.argv[1], encoding=\"UTF-8\") as f:\r\n file_content = f.read()\r\n notebook = nbformat.reads(file_content, as_version=4)\r\n\r\n # estabish map from line in .py to line in .ipynb\r\n lines = PythonExporter().from_notebook_node(notebook)[0].split(\"\\n\")\r\n code_cells = list(\r\n filter(lambda cell: cell[\"cell_type\"] == \"code\", notebook.cells))\r\n code_indices = list(\r\n filter(lambda i: notebook.cells[i] in code_cells,\r\n range(len(notebook.cells))))\r\n # begin_indices = [\r\n # i + 3 for i in range(len(lines)) if lines[i].startswith(\"# In[\")\r\n # ]\r\n # line_to_idx = {}\r\n # for i, idx in enumerate(begin_indices):\r\n # l = len(notebook.cells[code_indices[i]].source.split(\"\\n\"))\r\n # for j in range(l):\r\n # line_to_idx[idx + j] = (code_indices[i], j)\r\n\r\n # load static comments\r\n # static_comments = {}\r\n # with open(json_path) as f:\r\n # json_tmp_list = json.load(f)\r\n # for [idx, content] in json_tmp_list:\r\n # static_comments[idx] = content\r\n\r\n json_map = {}\r\n info = {}\r\n # funcs = {}\r\n with open(os.path.join(data_path, \"info.json\"), 'r') as j:\r\n info = json.loads(j.read())\r\n graph = info[\"graph\"]\r\n\r\n\r\n for file in os.listdir(data_path):\r\n myvars = []\r\n if file == \"info.json\" or file.endswith(\"_f.dat\"):\r\n continue\r\n elif file.endswith(\".dat\"):\r\n with open(os.path.join(data_path, file), \"rb\") as f:\r\n try:\r\n vars = pickle.load(f)\r\n except: \r\n print_error(\"error when pickle from \" + file)\r\n continue\r\n for i in range(len(vars)):\r\n try:\r\n myvars.append(\r\n dispatch_gen(vars[i][0], vars[i][1][2], vars[i][1][0], vars[i][1][1]))\r\n except:\r\n print_error(\"error when dispatch var \" + vars[i][1][2])\r\n pass\r\n # comments = static_comments[vars[0][1][0]] if vars[0][1][0] in static_comments.keys() else []\r\n _, json_map = handlecell(myvars, 0, len(vars)-1, Info(info, vars[0][1][0]))\r\n \r\n # distributed\r\n with open(os.path.join(data_path, f\"result_{code_indices[vars[0][1][0] - 1]}.json\"), \"w\") as f:\r\n f.write(json.dumps(json_map))\r\n \r\n\r\n # fill not existing entries\r\n # for key, value in json_map.items():\r\n # cat_list = [\"input\", \"output\", \"summary\", \"partition\", \"table\"]\r\n # for cat in cat_list:\r\n # if cat not in value.keys():\r\n # json_map[key][cat] = {}\r\n\r\n # with open(json_out_path, \"w\") as f:\r\n # f.write(json.dumps(json_map))\r\n\r\n\r\n # comment_str, json_map = gen_comments(labels, tmpvars)\r\n\r\n # format: [[cellnum, comment] or [funcname, cellnum]]\r\n # comment should be used later, along with cell number\r\n\r\n # def insert_to_map(json_map, cell_num, cat, name, value):\r\n # if cell_num not in json_map.keys():\r\n # json_map[cell_num] = {cat: {name: value}}\r\n # elif cat not in json_map[cell_num].keys():\r\n # json_map[cell_num][cat] = {name: value}\r\n # else:\r\n # json_map[cell_num][cat][name] = value\r\n\r\n # # add function info\r\n # insert_map = collections.defaultdict(list)\r\n # for fun_name, fun_map in funcs.items():\r\n # # print(lines[fun_map[\"loc\"] - 1])\r\n # # affected by \"-s\"\r\n # # (i, j) = line_to_idx[fun_map[\"loc\"] -3]\r\n # fun_name_no_idx = fun_name[:fun_name.rfind(\"_\")]\r\n # cell_num = [v for k, v in static_comments.items() if k == fun_name_no_idx]\r\n # assert(len(cell_num) == 1)\r\n # comment, func_json_map = gen_func_comment(fun_name, fun_map)\r\n # insert_to_map(json_map, cell_num[0], \"function\", fun_name, func_json_map)\r\n # # insert_map[i].append((j, comment))\r\n\r\n # for comment in static_comments:\r\n # (i, j) = line_to_idx[comment[0] - 3]\r\n # insert_to_map(json_map, i, \"comment\", j, comment[1])\r\n # insert_map[i].append((j, \"# [autodocs] \" + comment[1] + \"\\n\"))\r\n\r\n \r\n\r\n # for key, value in insert_map.items():\r\n # code = notebook.cells[key].source.split(\"\\n\")\r\n # for (j, comment) in value:\r\n # code[j] = comment + code[j]\r\n # notebook.cells[key].source = \"\\n\".join(code)\r\n\r\n # write comments to new notebooks\r\n # cur_cell = 0\r\n # cur_idx = 0\r\n # insert_list = []\r\n # for cell in notebook.cells:\r\n # if cell[\"cell_type\"] == \"code\":\r\n # cur_cell += 1\r\n # if cur_cell in comment_str.keys():\r\n # comment_cell = nbformat.v4.new_markdown_cell(\r\n # comment_str[cur_cell])\r\n # insert_list.append((cur_idx, comment_cell))\r\n # cur_idx += 1\r\n # cur_idx += 1\r\n\r\n # for item in insert_list:\r\n # notebook.cells.insert(item[0], item[1])\r\n\r\n # nbformat.write(notebook, output_path)\r\n","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":39378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"375716521","text":"import time\nimport random\nfrom verify_sorted import verify_sorted\n\ndef split_list(lst):\n \"\"\"\n Divide the unsorted list at midpoint into sublists\n Returns two sublists - left and right\n Takes O(log(n)) time\n \"\"\"\n length = len(lst)\n midpoint = length//2\n\n left = lst[:midpoint]\n right = lst[midpoint:]\n return left,right\n\n \ndef merge(left, right):\n \"\"\"\n Merges two lists, sorting them in the process\n Returns a new merged list\n\n Runs in O(n) time\n \"\"\"\n\n l = []\n i = 0\n j = 0\n\n while i<len(left) and j<len(right):\n if left[i] < right[j]:\n l.append(left[i])\n i+=1\n else:\n l.append(right[j])\n j+=1\n\n while i < len(left):\n l.append(left[i])\n i+=1\n\n while j < len(right):\n l.append(right[j])\n j+=1\n return l\n \ndef merge_sort(lst):\n \"\"\"\n Sorts a list in ascending order\n Returns a new sorted list\n\n Divide: Find the midpoint of the list and divide into sublists\n Conquer: Recursively sort the sublists created in previous step\n Combine: Merge the sorted sublists created in previous step\n\n Overall sorting time is O(nlog(n))\n \"\"\"\n if len(lst) <= 1:\n return lst\n\n\n\n left_lst, right_lst = split_list(lst)\n left = merge_sort(left_lst)\n right = merge_sort(right_lst)\n\n return merge(left, right)\n\n\nlst = [random.randint(0, 1000) for i in range(50)]\n\n\nstart_merge = time.time()\nsorted_list = merge_sort(lst)\nend_merge = time.time()\nprint('The speed of merge sort is', end_merge - start_merge)\nprint('Sorted?', verify_sorted(sorted_list))","sub_path":"sorting algorithms/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"42330723","text":"#!/usr/bin/python\n\nimport os\nimport argparse\nimport sqlite3\nimport json\nfrom pathlib import Path\n\n\"\"\"\nVulnerability Management Tracking Tools\n\ninstall and setup script\n\n\"\"\"\n\n#\n# Default Values\n#\nverbose = False\n\ndb_file = \"data.db\"\ndb_schema_file = \"db_schema.json\"\ndb_lookup_tables = \"db_lookups.json\"\n\nargParser = argparse.ArgumentParser()\n\ndef debug( message ):\n\t\"\"\" If verbosity is set, print the message\n\n\t:param message: message to display\n\t:return:\n\t\"\"\"\n\tglobal verbose\n\tif verbose:\n\t\tprint( message )\n\ndef create_db_connection(db_file):\n\t\"\"\" Create a database connection to the SQLite database\n\t\tspecified by the db_file\n\t:param db_file: database file name\n\t:return: Connection object or None\n\t\"\"\"\n\n\ttry:\n\t\tconn = sqlite3.connect(db_file)\n\t\treturn conn\n\texcept NameError as e:\n\t\tprint(e)\n\n\treturn None\n\ndef init():\n\t\"\"\" Initialization function\n\t:param : none\n\t:return: none\n\t\"\"\"\n\tglobal verbose, db_file, db_schema_file, db_lookup_tables\n\n\targParser = argparse.ArgumentParser()\n\n\tdebug( \"Enter init() function.\")\n\t# Parse the command line arguments\n\targParser.add_argument( \"-v\", \"--verbose\", action=\"store_true\", help=\"Turn on verbose mode.\" )\n\targParser.add_argument( \"-d\", \"--db\", help=\"Specifiy the name of the database file.\")\n\targParser.add_argument( \"-c\", \"--clean\", action=\"store_true\", help=\"Delete the database file.\")\n\targParser.add_argument( \"-s\", \"--schema\", help=\"Set a database schema description file.\")\n\targParser.add_argument( \"-l\", \"--lookup\", help=\"Set lookup table values.\")\n\n\targs = vars( argParser.parse_args() )\n\n\t# Turn on verbose mode\n\tif args['verbose']:\n\t\tverbose = True\n\n\t# Set the database file name\n\tif args['db'] != None:\n\t\tdb_file = args['db']\n\t\tdebug(\"Database file set to \" + db_file + \".\")\n\n\t# Delete the database file if cleaning up\n\tif args['clean']:\n\t\tdebug(\"About to delete the database file \" + db_file + \".\")\n\t\tfile = Path(db_file)\n\t\tif file.exists():\n\t\t\tdebug(\"The database file \" + db_file + \" exists, about to delete it.\")\n\t\t\tos.unlink( db_file )\n\t\t\tif not file.exists():\n\t\t\t\tdebug( \"Database file was deleted.\")\n\t\t\telse:\n\t\t\t\tdebug(\"Unable to delete the database file.\")\n\t\telse:\n\t\t\tdebug(\"No need to delete the database file \" + db_file + \" because it did not exist in the first place.\")\n\n\t# Set the database schema description file\n\tif args['schema'] != None:\n\t\tdb_schema_file = args['schema']\n\tdebug(\"Database schema description file set to \" + db_schema_file + \".\")\n\n\t# Set the lookup table values file\n\tif args['lookup'] != None:\n\t\tdb_lookup_tables = args['lookup']\n\tdebug(\"Lookup table file is \" + db_lookup_tables + \".\")\n\n\tdebug( \"Exiting init() function.\")\n\treturn\n\ndef create_database():\n\t\"\"\"\n\tCreate the database\n\t:param: none\n\t:return: exit code\n\t\"\"\"\n\t# Create the database\n\tdebug(\"Checking to see if the database file \" + db_file + \" already exist...\")\n\tfile = Path(db_file)\n\tif file.exists():\n\t\tdebug(\"The database already exists.\")\n\telse:\n\t\tdebug(\"Database file does not exist. Creating the database file . . .\")\n\n\t# Open the database file\n\tdbConn = create_db_connection(db_file)\n\tdebug(\"Creating database cursor.\")\n\tdbCursor = dbConn.cursor()\n\n\t# Open the schema description file\n\tschemaFilepath = Path(db_schema_file)\n\tif not schemaFilepath.exists():\n\t\tdebug(\"The schema description file [\" + db_schema_file + \"] does not exist.\")\n\t\treturn 1\n\n\tdebug(\"Reading the database schema file.\")\n\twith open(db_schema_file, 'r') as fp:\n\t\ttables = json.load(fp)\n\t\tsql = \"CREATE TABLE \"\n\t\tfor table in tables['tables']:\n\t\t\tdebug(\"Building SQL statement to create the table [\" + table['table'] + \"] . . .\")\n\t\t\tsql = \"CREATE TABLE IF NOT EXISTS \" + table['table'] + \" (\"\n\t\t\t## print( table['fields'] )\n\t\t\tfor field in table['fields']:\n\t\t\t\tdebug(\"Adding field \" + field['name'] + \" of type \" + field['type'])\n\t\t\t\tsql = sql + field['name'] + \" \" + field['type']\n\t\t\t\ttry:\n\t\t\t\t\tfor constraint in field['constraints']:\n\t\t\t\t\t\tdebug(\" with constraint \" + constraint['constraint'])\n\t\t\t\t\t\tsql = sql + \" \" + constraint['constraint'] + \" \"\n\t\t\t\texcept:\n\t\t\t\t\tdebug(\"No constraints\")\n\n\t\t\t\tsql = sql + \",\"\n\t\t\tsql = sql[:-1] + \")\"\n\t\t\tdebug(\"SQL = \" + sql)\n\n\t\t\ttry:\n\t\t\t\tdebug(\"Executing SQL statement:\")\n\t\t\t\tdebug(\" \\\"\" + sql + \"\\\"\")\n\t\t\t\tdbCursor.execute(sql)\n\t\t\texcept NameError as e:\n\t\t\t\tdebug(\"Table create failed for \" + tables['tables'] + \" with error:\")\n\t\t\t\tdebug(e)\n\t\t\t\treturn 1\n\treturn 0\n\ndef load_lookup_tables():\n\t\"\"\"\n\tLoad lookup table in the database\n\n\t:return: exit code\n\t\"\"\"\n\t\n\treturn 0\n\t# Open the database file\n\tdbConn = create_db_connection(db_file)\n\tdbCursor = dbConn.cursor()\n\n\t# Open the lookup table values file\n\tschemaFilepath = Path(db_lookup_tables)\n\tif not schemaFilepath.exists():\n\t\tdebug(\"The lookup table values file [\" + db_lookup_tables + \"] does not exist.\")\n\t\treturn 1\n\n\tdebug(\"Reading lookup table file.\")\n\twith open(db_lookup_tables, 'r') as fp:\n\t\ttables = json.load(fp)\n\n\tfor lookup_table in tables['lookup-tables']:\n\t\tdebug(\"Load values into [\" + lookup_table['table'] + \"]\")\n\t\tfor row in lookup_table['rows']:\n\t\t\tsql = \"INSERT INTO \" + lookup_table['table'] + \" (\"+ row['field'] + \") VALUES (\\\"\" + row['value'] +\"\\\")\"\n\n\t\t\ttry:\n\t\t\t\tdebug(\"Executing SQL statement:\")\n\t\t\t\tdebug(\" \\\"\" + sql + \"\\\"\")\n\t\t\t\t# dbCursor.execute(sql)\n\t\t\texcept NameError as e:\n\t\t\t\tdebug(\"Inserting into \" + tables['tables'] + \" failed with an error:\")\n\t\t\t\tdebug(e)\n\t\t\t\treturn 1\n\tdbConn.commit()\n\n\treturn 0\n\ndef main():\n\t\"\"\" Main procedure goes here\n\t:return: exit code\n\t\"\"\"\n\n\t# Initialize the program\n\tinit()\n\n\t# Create the database\n\tif ( create_database() > 0 ):\n\t\tdebug(\"Failed to create the database.\")\n\t\treturn 1\n\n\t# Load lookup tables\n\tif ( load_lookup_tables() > 0 ):\n\t\tdebug(\"Failed to load lookup tables.\")\n\t\treturn 1\n\n\treturn 0\n\n\nif __name__ == '__main__':\n\texit(main())","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"654532646","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nimport cv2\nimport matplotlib.pyplot as plt\nfrom collections import deque\n\nclass Line():\n \"\"\" Class to receive the characteristics of each line detection \"\"\"\n\n def __init__(self):\n\n self.n = 10\n\n # was the line detected in the last iteration?\n self.detected = False\n\n self.weights = deque(maxlen = self.n)\n\n # x values of the last n fits of the line\n self.recent_xfit = deque(maxlen = self.n)\n\n self.current_xfit = None\n\n #polynomial coefficients of the last n iterations\n self.recent_fit = deque(maxlen = self.n)\n\n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n\n #radius of curvature of the line in some units\n self.radius_of_curvature = None\n\n #distance in meters of vehicle center from the line\n self.line_base_pos = None\n\n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float')\n\n #x values for detected line pixels\n self.allx = None\n\n #y values for detected line pixels\n self.ally = None\n\n def fit(self, image):\n\n originaly, originalx = image.nonzero()\n try:\n fit = np.polyfit(originaly, originalx, 2)\n # weight data according to how much of a y spread we have\n # less information affects averages less\n weight = np.max(originaly) - np.min(originaly)\n self.weights.append(weight)\n except Exception as e:\n fit = self.current_fit\n self.weights.append(self.weights[-1])\n\n self.recent_fit.append(fit)\n self.current_fit = np.average(self.recent_fit, axis=0, weights=self.weights)\n\n fx = lambda y, p: p[0] * y ** 2 + p[1] * y + p[2]\n\n # generate new data from the fit so we can visually extend the overlay beyond the data\n ally, allx = [], []\n for yi in np.arange(image.shape[0], 0, -10):\n xi = fx(yi, self.current_fit)\n ally.append(yi)\n allx.append(xi)\n\n ally, allx = np.array(ally), np.array(allx)\n fitx = fx(ally, fit)\n\n # starting point of the line\n self.origin = allx[0]\n self.height = image.shape[0]\n self.width = image.shape[1]\n\n self.recent_xfit.append(fitx)\n fitx = np.average(self.recent_xfit, axis=0, weights=self.weights)\n\n # decay weights as we get further away from frame\n self.weights = deque(np.multiply(self.weights, 0.4), maxlen=self.n)\n\n # store both generated and original x/y values\n # both are needed for plotting and or overlays\n self.originaly = originaly\n self.originalx = originalx\n\n self.ally = ally\n self.allx = allx\n\n self.current_xfit = fitx\n\n def get_curvature(self, meters = True):\n \"\"\" get line curvature \"\"\"\n\n y = self.ally.astype('float64')\n x = self.allx.astype('float64')\n\n # final curvature is in meters\n if meters:\n x = self.pixels_to_meters(x, dimension='x')\n y = self.pixels_to_meters(y, dimension='y')\n\n y_eval = np.max(y)\n fit = np.polyfit(y, x, 2)\n\n curvature = ((1 + (2 * fit[0] * y_eval + fit[1]) ** 2) ** 1.5) / np.absolute(2*fit[0])\n return curvature\n\n def get_distance_from_center(self):\n distance = int(self.origin - self.width / 2)\n return self.pixels_to_meters(distance, dimension='x')\n\n def pixels_to_meters(self, vals, dimension='x'):\n \"\"\" If radians, define conversions in x and y from pixels space to meters \"\"\"\n if dimension == 'x':\n xm_per_pix = 3.7/700 # meteres per pixel in x dimension\n vals *= xm_per_pix\n if dimension == 'y':\n ym_per_pix = 30/720 # meters per pixel in y dimension\n vals *= ym_per_pix\n\n return vals\n\n def plot(self, color='red'):\n plt.plot(self.originalx, self.originaly, 'o', color=color)\n plt.plot(self.current_xfit, self.ally, color='green', linewidth=3)\n","sub_path":"advanced_lane_lines/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"330736298","text":"#CHEATSHEET КОМАНД\n\n# django-admin startproject NewsPortal\n# python3 manage.py startapp news\n\n# python3 manage.py makemigrations\n# python3 manage.py migrate\n\n#команда для запуска этого файла из оболочки джанги, т.к. запуск файла из терминала ругается матом - нет времени разбираться.\n# exec(open('comshell.py').read())\n\n#ГЕНЕРАТЦИЯ ДАННЫХ\n#Скрипт для быстрой генерации тестовых данных.\n#параметр from_the_scratch определяет будут ли генерироваться данные с нуля.\n#если генерация уже один раз была - from_the_scratch = 1 выдаст ошибки, т.к. такие авторы и усеры уже есть\n#в таком случае надо выставлять в from_the_scratch = 0\n\nfrom news.models import *\n\nimport random\n\n#исходные данные для генерации количества объектов\nptype = ['NS', 'AL']\naindex = [1, 2]\nusersQTY = 11\nauthQTY = 3\npostQTY = 16\ncommQTY = 26\ncatQTY = 5\nfrom_the_scratch = 1\n\n# Создать двух пользователей (с помощью метода User.objects.create_user)\nif from_the_scratch:\n for n in range(1, usersQTY):\n globals()[f'user{n}'] = User.objects.create_user(username = f\"UserName{n}\", password = f\"UN{n}pass\")\n\n # Создать два объекта модели Author, связанные с пользователями.\n for n in range(1, authQTY):\n globals()[f'author{n}'] = Author.objects.create(authorUser = globals()[f'user{n}'])\n\n # Добавить 4 категории в модель Category\n for n in range(1, catQTY):\n globals()[f'cat{n}'] = Category.objects.create(name = f'Category{n}')\n\n# Добавить 2 статьи и 1 новость\nfor n in range(1, postQTY):\n globals()[f'post{n}'] = Post.objects.create(postType = f'{random.choice(ptype)}',\n postName = f'Post {n} Name',\n postBody = f'Very long post Nr.{n} body created for tests just to fill database. '\n f'it will be longer, than 124 characters, to make sure - preview '\n f'method does the job. It does not need to have any meaning. '\n f'Just ID1 to make sure this post belongs to Post {n}',\n postAuthor = Author.objects.get(id = random.choice(aindex)),\n )\n# Присвоить им категории\nfor n in range(1, postQTY):\n Post.objects.get(id = n).postCategory.add(Category.objects.get(id = random.choice(range(1, catQTY))))\n\n# (как минимум в одной статье/новости должно быть не меньше 2 категорий)\nfor n in [3, 5, 7]:\n Post.objects.get(id = n).postCategory.add(Category.objects.get(id = random.choice(range(1, catQTY))))\n\n# Создать как минимум 4 комментария к разным объектам модели Post (в каждом объекте должен быть как минимум один комментарий).\nfor n in range(1, commQTY):\n globals()[f'comment{n}'] = Comment.objects.create(post = globals()[f'post{random.choice(range(1, postQTY))}'],\n user = globals()[f'user{random.choice(range(1, usersQTY))}'],\n commentBody = f'Generated comment {n} body.')\n\n# Применяя функции like() и dislike() к статьям/новостям и комментариям, скорректировать рейтинги этих объектов.\nfor n in range(1, 100):\n Comment.like(Comment.objects.get(id = random.choice(range(1, commQTY))))\n Post.like(Post.objects.get(id = random.choice(range(1, postQTY))))\n\n\n# Обновить рейтинги пользователей.\nfor n in range(1, 3):\n Author.objects.get(id = n).update_rating()\n\n\n#ВЫДОД ДАННЫХ\n\n# Вывести username и рейтинг лучшего пользователя (применяя сортировку и возвращая поля первого объекта).\nAuthor.objects.order_by('-autorRating')[:1]\n\n# Вывести дату добавления, username автора, рейтинг, заголовок и превью лучшей статьи, основываясь на лайках/дислайках к этой статье.\npid = 0 #сохраним сюда айди поста, чтобы потом использовать для выведения коментов\np = Post.objects.order_by('-postRating')[:1].values()\nfor i in p:\n print(f\"Дата поста: {i['postDate']}\")\n print(f\"Автор поста: {Author.objects.get(id=i['postAuthor_id']).authorUser.username}\")\n print(f\"Рейтинг поста: {i['postRating']}\")\n print(f\"Заголовок поста: {i['postName']}\")\n print(f\"Предпросмотр поста: {Post.objects.get(id=i['id']).preview()}\")\n pid = i['id']\n\n# Вывести все комментарии (дата, пользователь, рейтинг, текст) к этой статье.\nc = Comment.objects.filter(post=pid) #используем сохранённый айди поста\nfor i in c.values():\n print(f\"Comment date: {i['commentDate']}\")\n usr = i[\"user_id\"]\n print(f\"Comment user: {User.objects.get(id=usr).username}\")\n print(f\"Comment rating: {i['commentRating']}\")\n print(f\"Comment: {i['commentBody']}\")\n print(\"----------------\")\n\n#вывод категории поста\n# cat=Post.objects.get(id=1).postCategory.values(\"name\")\n# for i in cat.values():\n# print(i['name'])\n\n#готовая команда для создания отдельных тестовых постов\n#Post.objects.create(postType = 'AR', postName = 'Пост для проверки типа', postBody = f'Проверяем, как работает тип поста по умолчанию.', postAuthor = Author.objects.get(id = 1))\n\n\n","sub_path":"NewsPortal/comshell.py","file_name":"comshell.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"303547924","text":"from __future__ import print_function\nimport sys\n\nfrom Ziggeo import Ziggeo\nimport json\nif(len(sys.argv) < 5):\n\tprint (\"Error\\n\")\n\tprint (\"Usage: $>python stream_get.py YOUR_API_TOKEN YOUR_PRIVATE_KEY VIDEO_TOKEN STREAM_TOKEN PUSH_SERVICE_TOKEN\\n\")\n\tsys.exit()\n\napi_token = sys.argv[1]\nprivate_key = sys.argv[2]\nvideo_token = sys.argv[3]\nstream_token = sys.argv[4]\n\nziggeo = Ziggeo(api_token, private_key)\n\nprint (ziggeo.streams().get(video_token, stream_token))","sub_path":"demos/stream_get.py","file_name":"stream_get.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"130200378","text":"\r\n# python -m rpctool result.json instances_test2019.json\r\n\r\nimport argparse\r\nfrom data import *\r\nimport json\r\nimport torch\r\nimport torchvision\r\nfrom torchvision.models.detection.rpn import AnchorGenerator\r\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\r\nfrom torchvision.models.detection.rpn import RPNHead\r\n\r\nparser = argparse.ArgumentParser(\r\n description='Checkout Detector Training With Pytorch')\r\ntrain_set = parser.add_mutually_exclusive_group()\r\nparser.add_argument('--dataset_root', default=CHECKOUT_ROOT,\r\n help='Dataset root directory path')\r\nparser.add_argument('--trained_model', default='weights/faster_steps_600.pth',\r\n type=str, help='Trained state_dict file path to open')\r\nparser.add_argument('--visual_threshold', default=0.5, type=float,\r\n help='Final confidence threshold')\r\nparser.add_argument('--cuda', default=True, type=bool,\r\n help='Use cuda to train model')\r\nargs = parser.parse_args()\r\n\r\n\r\ndef get_model_detection(num_classes):\r\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\r\n\r\n in_features = model.roi_heads.box_predictor.cls_score.in_features\r\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\r\n\r\n anchor_generator = AnchorGenerator(\r\n sizes=tuple([(16, 32, 64, 128, 256, 512) for _ in range(5)]),\r\n aspect_ratios=tuple([(0.5, 1.0, 2.0) for _ in range(5)]))\r\n model.rpn.anchor_generator = anchor_generator\r\n\r\n # 256 because that's the number of features that resnet_fpn_backbone returns\r\n model.rpn.head = RPNHead(256, anchor_generator.num_anchors_per_location()[0])\r\n return model\r\n\r\n\r\nif args.cuda and torch.cuda.is_available():\r\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\r\n print(\"cuda可以用\")\r\nelse:\r\n torch.set_default_tensor_type('torch.FloatTensor')\r\n print(\"cuda不能用\")\r\n\r\n\r\ndef test_net(net, cuda, testset, thresh):\r\n # dump predictions and assoc. ground truth to text file for now\r\n filename = 'result.json'\r\n num_images = len(testset)\r\n result = list()\r\n for i in range(num_images):\r\n print('Testing image {:d}/{:d}....'.format(i + 1, num_images))\r\n image_id, img = testset.pull_image(random.randint(0, testset.__len__()))\r\n img_copy = np.copy(img)\r\n # img_path, labels, boxes = testset.pull_annotation(i)\r\n # img_copy = testset.visualize_bbox(img_copy, labels, boxes, display=False)\r\n x = testset.base_transform(img)\r\n\r\n if cuda and torch.cuda.is_available():\r\n x = x.cuda()\r\n\r\n x = [x, ] # one image a batch\r\n with torch.no_grad():\r\n y = net(x) # forward pass\r\n\r\n detections = y[0]\r\n print(detections)\r\n predicted_labels = detections['labels'].cpu().numpy().astype(int)\r\n predicted_boxes = detections['boxes'].cpu().numpy().astype(int)\r\n predicted_scores = detections['scores'].cpu().numpy()\r\n for label, box, score in zip(predicted_labels, predicted_boxes, predicted_scores):\r\n if score < thresh:\r\n continue\r\n result.append({\r\n \"image_id\": image_id,\r\n \"category_id\": label,\r\n \"bbox\": [box[0], box[1], box[2]-box[0], box[3]-box[1]],\r\n \"score\": score,\r\n })\r\n\r\n testset.visualize_bbox(img_copy, predicted_labels, predicted_boxes, predicted_scores, display=True, thresh=thresh)\r\n with open(filename, 'w') as f:\r\n json.dump(result, f, cls=MyEncoder)\r\n\r\n\r\ndef test_checkout():\r\n # load net\r\n net = get_model_detection(201)\r\n net.load_state_dict(torch.load(args.trained_model, map_location=lambda storage, location: storage))\r\n net.eval()\r\n print('Finished loading model!')\r\n # load data\r\n testset = CheckoutDetection(CHECKOUT_ROOT, 'test', show_images=True)\r\n # evaluation\r\n test_net(net, args.cuda, testset, thresh=args.visual_threshold)\r\n\r\n\r\nclass MyEncoder(json.JSONEncoder):\r\n def default(self, obj):\r\n if isinstance(obj, np.integer):\r\n return int(obj)\r\n elif isinstance(obj, np.floating):\r\n return float(obj)\r\n elif isinstance(obj, np.ndarray):\r\n return obj.tolist()\r\n else:\r\n return super(MyEncoder, self).default(obj)\r\n\r\n\r\nif __name__ == '__main__':\r\n test_checkout()\r\n","sub_path":"faster-test.py","file_name":"faster-test.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"171613528","text":"import sys\n\nif len(sys.argv) < 2:\n\tprint(\"Usage:\\npython3 identify_dup.py <text file>\")\n\nfilename = sys.argv[1]\n\nclass line(object):\n\tpath = \"\"\n\tmd5 = \"\"\n\tisUnique = True #assume there are no copies until proven otherwise\n\n\tdef __init__(self, p, h):\n\t\tself.path = p\n\t\tself.md5 = h\n\n\n\nfile = open(filename, \"rb\")\narr = []\n\nfor l in file:\n\tli = str(l).replace(\"\\\\x00\", \"\")\n\tli = li.replace(\"\\\\\\\\\", \"/\")\n\tli = li.replace(\"\\r\", \"\")\n\tli = li.replace(\"\\n\", \"\")\n\tarr.append( line(li[94:-1], li[2:94]) )\n\nprint(\"Hash,File A,File Two\")\nfor l in range(1, len(arr)-1):\n\tif arr[l-1].md5 == arr[l].md5 and arr[l-1].path != arr[l].path:# and arr[l-1].path.split(\"\\\\\")[-1] != arr[l].path.split(\"\\\\\")[-1]:\n\t\tprint(str(arr[l-1].md5)+\",\"+str(arr[l-1].path)[:-4].rstrip()+\",\"+str(arr[l].path)[:-4].rstrip())\n\t\tarr[l-1].isUnique = False\n\t\tarr[l].isUnique = False\n","sub_path":"identify_dup.py","file_name":"identify_dup.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"560406090","text":"from cryptography.fernet import Fernet\n\nfrom main.util.get_env.secrets_key import get_secrets_key_from_env\n\n\nclass ConfigCrypter:\n\n def __init__(self, keys: list = None):\n self.keys = keys\n if self.keys is None:\n self.keys = get_secrets_key_from_env()\n\n if self.keys is None:\n raise ValueError('Keys missing!')\n\n self.__fernet_1 = Fernet(self.keys[0].encode())\n self.__fernet_2 = Fernet(self.keys[1].encode())\n\n def encrypt_value(self, text: str) -> str:\n first_encrypted_text = self.__fernet_1.encrypt(text.encode())\n return self.__fernet_2.encrypt(first_encrypted_text).decode()\n\n def decrypt_value(self, text: str) -> str:\n first_decrypted_text = self.__fernet_2.decrypt(text.encode())\n return self.__fernet_1.decrypt(first_decrypted_text).decode()\n\n @staticmethod\n def generate_random_keys():\n keys = [Fernet.generate_key().decode(), Fernet.generate_key().decode()]\n return keys","sub_path":"main/util/encrypt/config_crypter.py","file_name":"config_crypter.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"437529011","text":"# 9. Faça um programa que:\n\n# ENTRADA \n# receba o salário-base de um funcionário,\nsalario = float(input('Salario: '))\n\n# PROCESSAMENTO \n# calcule e mostre o salário a receber,\n# sabendo-se que esse funcionário tem uma gratificação de 5% sobre o salário-base\n# e paga um valor de imposto(%) sobre o salário-base.\n### NÃO ENTENDI DA ONDE VEM O IMPORTA! DEIXEI COMO 0\ngratificacao = salario * 5 / 100\nimposto = salario * 0\nsalario_a_receber = salario + gratificacao - imposto \n\n# SAIDA\n# mostar salario\nprint('Salario a receber: R$ ', salario_a_receber)","sub_path":"lista01_ex09.py","file_name":"lista01_ex09.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"374940898","text":"from itertools import cycle\nimport random\nimport sys\n\nimport pygame\nfrom pygame.constants import QUIT, K_ESCAPE, KEYDOWN, K_SPACE, K_UP\n\nFPS = 30\nSCREEN_WIDTH = 288\nSCREEN_HEIGHT = 512\nPIPE_GAP_SIZE = 100 # gap between upper and lower part of pipe\nBASE_Y = SCREEN_HEIGHT * 0.79\n# image, sound and hit mask dicts\nIMAGES, SOUNDS, HIT_MASKS = {}, {}, {}\n\n# list of all possible players (tuple of 3 positions of flap)\nPLAYERS_LIST = (\n # red bird\n (\n \"assets/sprites/redbird-upflap.png\",\n \"assets/sprites/redbird-midflap.png\",\n \"assets/sprites/redbird-downflap.png\",\n ),\n # blue bird\n (\n \"assets/sprites/bluebird-upflap.png\",\n \"assets/sprites/bluebird-midflap.png\",\n \"assets/sprites/bluebird-downflap.png\",\n ),\n # yellow bird\n (\n \"assets/sprites/yellowbird-upflap.png\",\n \"assets/sprites/yellowbird-midflap.png\",\n \"assets/sprites/yellowbird-downflap.png\",\n ),\n)\n\n# list of backgrounds\nBACKGROUNDS_LIST = (\n \"assets/sprites/background-day.png\",\n \"assets/sprites/background-night.png\",\n)\n\n# list of pipes\nPIPES_LIST = (\"assets/sprites/pipe-green.png\", \"assets/sprites/pipe-red.png\")\n\n\ndef main():\n global SCREEN, FPS_CLOCK\n pygame.init()\n FPS_CLOCK = pygame.time.Clock()\n SCREEN = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n pygame.display.set_caption(\"Flappy Bird\")\n\n # numbers sprites for score display\n IMAGES[\"numbers\"] = (\n pygame.image.load(\"assets/sprites/0.png\").convert_alpha(),\n pygame.image.load(\"assets/sprites/1.png\").convert_alpha(),\n pygame.image.load(\"assets/sprites/2.png\").convert_alpha(),\n pygame.image.load(\"assets/sprites/3.png\").convert_alpha(),\n pygame.image.load(\"assets/sprites/4.png\").convert_alpha(),\n pygame.image.load(\"assets/sprites/5.png\").convert_alpha(),\n pygame.image.load(\"assets/sprites/6.png\").convert_alpha(),\n pygame.image.load(\"assets/sprites/7.png\").convert_alpha(),\n pygame.image.load(\"assets/sprites/8.png\").convert_alpha(),\n pygame.image.load(\"assets/sprites/9.png\").convert_alpha(),\n )\n\n # game over sprite\n IMAGES[\"game_over\"] = pygame.image.load(\n \"assets/sprites/game_over.png\"\n ).convert_alpha()\n # message sprite for welcome screen\n IMAGES[\"message\"] = pygame.image.load(\n \"assets/sprites/message.png\"\n ).convert_alpha()\n # base (ground) sprite\n IMAGES[\"base\"] = pygame.image.load(\n \"assets/sprites/base.png\"\n ).convert_alpha()\n\n sound_ext = \".ogg\"\n\n SOUNDS[\"die\"] = pygame.mixer.Sound(\"assets/audio/die\" + sound_ext)\n SOUNDS[\"hit\"] = pygame.mixer.Sound(\"assets/audio/hit\" + sound_ext)\n SOUNDS[\"point\"] = pygame.mixer.Sound(\"assets/audio/point\" + sound_ext)\n SOUNDS[\"swoosh\"] = pygame.mixer.Sound(\"assets/audio/swoosh\" + sound_ext)\n SOUNDS[\"wing\"] = pygame.mixer.Sound(\"assets/audio/wing\" + sound_ext)\n\n while True:\n # select random background sprites\n rand_bg = random.randint(0, len(BACKGROUNDS_LIST) - 1)\n IMAGES[\"background\"] = pygame.image.load(\n BACKGROUNDS_LIST[rand_bg]\n ).convert()\n\n # select random player sprites\n rand_player = random.randint(0, len(PLAYERS_LIST) - 1)\n IMAGES[\"player\"] = (\n pygame.image.load(PLAYERS_LIST[rand_player][0]).convert_alpha(),\n pygame.image.load(PLAYERS_LIST[rand_player][1]).convert_alpha(),\n pygame.image.load(PLAYERS_LIST[rand_player][2]).convert_alpha(),\n )\n\n # select random pipe sprites\n pipe_index = random.randint(0, len(PIPES_LIST) - 1)\n IMAGES[\"pipe\"] = (\n pygame.transform.flip(\n pygame.image.load(PIPES_LIST[pipe_index]).convert_alpha(),\n False,\n True,\n ),\n pygame.image.load(PIPES_LIST[pipe_index]).convert_alpha(),\n )\n\n # hit_mask for pipes\n HIT_MASKS[\"pipe\"] = (\n get_hit_mask(IMAGES[\"pipe\"][0]),\n get_hit_mask(IMAGES[\"pipe\"][1]),\n )\n\n # hit_mask for player\n HIT_MASKS[\"player\"] = (\n get_hit_mask(IMAGES[\"player\"][0]),\n get_hit_mask(IMAGES[\"player\"][1]),\n get_hit_mask(IMAGES[\"player\"][2]),\n )\n\n movement_info = show_welcome_animation()\n crash_info = main_game(movement_info)\n show_game_over_screen(crash_info)\n\n\ndef show_welcome_animation():\n \"\"\"Shows welcome screen animation of flappy bird\"\"\"\n # index of player to show on screen\n player_index = 0\n player_index_gen = cycle([0, 1, 2, 1])\n # iterator used to change player_index after every 5th iteration\n loop_iter = 0\n\n player_x = int(SCREEN_WIDTH * 0.2)\n player_y = int((SCREEN_HEIGHT - IMAGES[\"player\"][0].get_height()) / 2)\n\n message_x = int((SCREEN_WIDTH - IMAGES[\"message\"].get_width()) / 2)\n message_y = int(SCREEN_HEIGHT * 0.12)\n\n base_x = 0\n # amount by which base can maximum shift to left\n base_shift = IMAGES[\"base\"].get_width() - IMAGES[\"background\"].get_width()\n\n # player shm for up-down motion on welcome screen\n player_shm_vals = {\"val\": 0, \"dir\": 1}\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT or (\n event.type == KEYDOWN and event.key == K_ESCAPE\n ):\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN and (\n event.key == K_SPACE or event.key == K_UP\n ):\n # make first flap sound and return values for mainGame\n SOUNDS[\"wing\"].play()\n return {\n \"player_y\": player_y + player_shm_vals[\"val\"],\n \"base_x\": base_x,\n \"player_index_gen\": player_index_gen,\n }\n\n # adjust player_y, player_index, base_x\n if (loop_iter + 1) % 5 == 0:\n player_index = next(player_index_gen)\n loop_iter = (loop_iter + 1) % 30\n base_x = -((-base_x + 4) % base_shift)\n player_shm(player_shm_vals)\n\n # draw sprites\n SCREEN.blit(IMAGES[\"background\"], (0, 0))\n SCREEN.blit(\n IMAGES[\"player\"][player_index],\n (player_x, player_y + player_shm_vals[\"val\"]),\n )\n SCREEN.blit(IMAGES[\"message\"], (message_x, message_y))\n SCREEN.blit(IMAGES[\"base\"], (base_x, BASE_Y))\n\n pygame.display.update()\n FPS_CLOCK.tick(FPS)\n\n\ndef main_game(movement_info):\n score = player_index = loop_iter = 0\n player_index_gen = movement_info[\"player_index_gen\"]\n player_x, player_y = int(SCREEN_WIDTH * 0.2), movement_info[\"player_y\"]\n\n base_x = movement_info[\"base_x\"]\n base_shift = IMAGES[\"base\"].get_width() - IMAGES[\"background\"].get_width()\n\n # get 2 new pipes to add to upper_pipes lower_pipes list\n new_pipe1 = get_random_pipe()\n new_pipe2 = get_random_pipe()\n\n # list of upper pipes\n upper_pipes = [\n {\"x\": SCREEN_WIDTH + 200, \"y\": new_pipe1[0][\"y\"]},\n {\"x\": SCREEN_WIDTH + 200 + (SCREEN_WIDTH / 2), \"y\": new_pipe2[0][\"y\"]},\n ]\n\n # list of lower pipes\n lower_pipes = [\n {\"x\": SCREEN_WIDTH + 200, \"y\": new_pipe1[1][\"y\"]},\n {\"x\": SCREEN_WIDTH + 200 + (SCREEN_WIDTH / 2), \"y\": new_pipe2[1][\"y\"]},\n ]\n\n pipe_vel_x = -4\n\n # player velocity, max velocity, downward acceleration, acceleration on flap\n player_vel_y = -9\n\n # player's velocity along Y, default same as player_flapped\n player_max_vel_y = 10 # max vel along Y, max descend speed\n player_acc_y = 1 # players downward acceleration\n player_rot = 45 # player's rotation\n player_vel_rot = 3 # angular speed\n player_rot_thr = 20 # rotation threshold\n player_flap_acc = -9 # players speed on flapping\n player_flapped = False # True when player flaps\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT or (\n event.type == KEYDOWN and event.key == K_ESCAPE\n ):\n pygame.quit()\n sys.exit()\n if (\n event.type == KEYDOWN\n and (event.key == K_SPACE or event.key == K_UP)\n and player_y > -2 * IMAGES[\"player\"][0].get_height()\n ):\n player_vel_y = player_flap_acc\n player_flapped = True\n SOUNDS[\"wing\"].play()\n\n # check for crash here\n crash_test = check_crash(\n {\"x\": player_x, \"y\": player_y, \"index\": player_index},\n upper_pipes,\n lower_pipes,\n )\n if crash_test[0]:\n return {\n \"y\": player_y,\n \"groundCrash\": crash_test[1],\n \"base_x\": base_x,\n \"upper_pipes\": upper_pipes,\n \"lower_pipes\": lower_pipes,\n \"score\": score,\n \"player_vel_y\": player_vel_y,\n \"player_rot\": player_rot,\n }\n\n # check for score\n player_mid_pos = player_x + IMAGES[\"player\"][0].get_width() / 2\n for pipe in upper_pipes:\n pipe_mid_pos = pipe[\"x\"] + IMAGES[\"pipe\"][0].get_width() / 2\n if pipe_mid_pos <= player_mid_pos < pipe_mid_pos + 4:\n score += 1\n SOUNDS[\"point\"].play()\n\n # player_index base_x change\n if (loop_iter + 1) % 3 == 0:\n player_index = next(player_index_gen)\n loop_iter = (loop_iter + 1) % 30\n base_x = -((-base_x + 100) % base_shift)\n\n # rotate the player\n if player_rot > -90:\n player_rot -= player_vel_rot\n\n # player's movement\n if player_vel_y < player_max_vel_y and not player_flapped:\n player_vel_y += player_acc_y\n if player_flapped:\n player_flapped = False\n\n # more rotation to cover the threshold (calculated in visible rotation)\n player_rot = 45\n\n player_height = IMAGES[\"player\"][player_index].get_height()\n player_y += min(player_vel_y, BASE_Y - player_y - player_height)\n\n # move pipes to left\n for u_pipe, l_pipe in zip(upper_pipes, lower_pipes):\n u_pipe[\"x\"] += pipe_vel_x\n l_pipe[\"x\"] += pipe_vel_x\n\n # add new pipe when first pipe is about to touch left of screen\n if 0 < upper_pipes[0][\"x\"] < 5:\n new_pipe = get_random_pipe()\n upper_pipes.append(new_pipe[0])\n lower_pipes.append(new_pipe[1])\n\n # remove first pipe if its out of the screen\n if upper_pipes[0][\"x\"] < -IMAGES[\"pipe\"][0].get_width():\n upper_pipes.pop(0)\n lower_pipes.pop(0)\n\n # draw sprites\n SCREEN.blit(IMAGES[\"background\"], (0, 0))\n\n for u_pipe, l_pipe in zip(upper_pipes, lower_pipes):\n SCREEN.blit(IMAGES[\"pipe\"][0], (u_pipe[\"x\"], u_pipe[\"y\"]))\n SCREEN.blit(IMAGES[\"pipe\"][1], (l_pipe[\"x\"], l_pipe[\"y\"]))\n\n SCREEN.blit(IMAGES[\"base\"], (base_x, BASE_Y))\n # print score so player overlaps the score\n show_score(score)\n\n # Player rotation has a threshold\n visible_rot = player_rot_thr\n if player_rot <= player_rot_thr:\n visible_rot = player_rot\n\n player_surface = pygame.transform.rotate(\n IMAGES[\"player\"][player_index], visible_rot\n )\n SCREEN.blit(player_surface, (player_x, player_y))\n\n pygame.display.update()\n FPS_CLOCK.tick(FPS)\n\n # what we need for the model #\n print(score)\n print(player_y)\n\n if lower_pipes[0][\"x\"] > 0:\n print(lower_pipes[0][\"x\"], lower_pipes[0][\"y\"])\n else:\n print(lower_pipes[1][\"x\"], lower_pipes[1][\"y\"])\n\n # test to see if the keys are working\n if player_y > lower_pipes[0][\"x\"]:\n new_event = pygame.event.Event(\n pygame.constants.KEYDOWN, {\"key\": K_SPACE}\n )\n pygame.event.post(new_event)\n\n\ndef show_game_over_screen(crash_info):\n \"\"\"crashes the player down ans shows game over image\"\"\"\n score = crash_info[\"score\"]\n player_x = SCREEN_WIDTH * 0.2\n player_y = crash_info[\"y\"]\n player_height = IMAGES[\"player\"][0].get_height()\n player_vel_y = crash_info[\"player_vel_y\"]\n player_acc_y = 2\n player_rot = crash_info[\"player_rot\"]\n player_vel_rot = 7\n\n base_x = crash_info[\"base_x\"]\n\n upper_pipes, lower_pipes = (\n crash_info[\"upper_pipes\"],\n crash_info[\"lower_pipes\"],\n )\n\n # play hit and die sounds\n SOUNDS[\"hit\"].play()\n if not crash_info[\"groundCrash\"]:\n SOUNDS[\"die\"].play()\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT or (\n event.type == KEYDOWN and event.key == K_ESCAPE\n ):\n pygame.quit()\n sys.exit()\n if (\n event.type == KEYDOWN\n and (event.key == K_SPACE or event.key == K_UP)\n and player_y + player_height >= BASE_Y - 1\n ):\n return\n\n # player y shift\n if player_y + player_height < BASE_Y - 1:\n player_y += min(player_vel_y, BASE_Y - player_y - player_height)\n\n # player velocity change\n if player_vel_y < 15:\n player_vel_y += player_acc_y\n\n # rotate only when it's a pipe crash\n if not crash_info[\"groundCrash\"] and player_rot > -90:\n player_rot -= player_vel_rot\n\n # draw sprites\n SCREEN.blit(IMAGES[\"background\"], (0, 0))\n\n for u_pipe, l_pipe in zip(upper_pipes, lower_pipes):\n SCREEN.blit(IMAGES[\"pipe\"][0], (u_pipe[\"x\"], u_pipe[\"y\"]))\n SCREEN.blit(IMAGES[\"pipe\"][1], (l_pipe[\"x\"], l_pipe[\"y\"]))\n\n SCREEN.blit(IMAGES[\"base\"], (base_x, BASE_Y))\n show_score(score)\n\n player_surface = pygame.transform.rotate(\n IMAGES[\"player\"][1], player_rot\n )\n SCREEN.blit(player_surface, (player_x, player_y))\n SCREEN.blit(IMAGES[\"game_over\"], (50, 180))\n\n FPS_CLOCK.tick(FPS)\n pygame.display.update()\n\n\ndef player_shm(player_shm):\n \"\"\"oscillates the value of player_shm['val'] between 8 and -8\"\"\"\n if abs(player_shm[\"val\"]) == 8:\n player_shm[\"dir\"] *= -1\n\n if player_shm[\"dir\"] == 1:\n player_shm[\"val\"] += 1\n else:\n player_shm[\"val\"] -= 1\n\n\ndef get_random_pipe():\n \"\"\"returns a randomly generated pipe\"\"\"\n # y of gap between upper and lower pipe\n gap_y = random.randrange(0, int(BASE_Y * 0.6 - PIPE_GAP_SIZE))\n gap_y += int(BASE_Y * 0.2)\n pipe_height = IMAGES[\"pipe\"][0].get_height()\n pipe_x = SCREEN_WIDTH + 10\n\n return [\n {\"x\": pipe_x, \"y\": gap_y - pipe_height}, # upper pipe\n {\"x\": pipe_x, \"y\": gap_y + PIPE_GAP_SIZE}, # lower pipe\n ]\n\n\ndef show_score(score):\n \"\"\"displays score in center of screen\"\"\"\n score_digits = [int(x) for x in list(str(score))]\n total_width = 0 # total width of all numbers to be printed\n\n for digit in score_digits:\n total_width += IMAGES[\"numbers\"][digit].get_width()\n\n x_offset = (SCREEN_WIDTH - total_width) / 2\n\n for digit in score_digits:\n SCREEN.blit(IMAGES[\"numbers\"][digit], (x_offset, SCREEN_HEIGHT * 0.1))\n x_offset += IMAGES[\"numbers\"][digit].get_width()\n\n\ndef check_crash(player, upper_pipes, lower_pipes):\n \"\"\"returns True if player collides with base or pipes.\"\"\"\n pi = player[\"index\"]\n player[\"w\"] = IMAGES[\"player\"][0].get_width()\n player[\"h\"] = IMAGES[\"player\"][0].get_height()\n\n # if player crashes into ground\n if player[\"y\"] + player[\"h\"] >= BASE_Y - 1:\n return [True, True]\n else:\n\n player_rect = pygame.Rect(\n player[\"x\"], player[\"y\"], player[\"w\"], player[\"h\"]\n )\n pipe_w = IMAGES[\"pipe\"][0].get_width()\n pipe_h = IMAGES[\"pipe\"][0].get_height()\n\n for u_pipe, l_pipe in zip(upper_pipes, lower_pipes):\n # upper and lower pipe rects\n u_pipe_rect = pygame.Rect(u_pipe[\"x\"], u_pipe[\"y\"], pipe_w, pipe_h)\n l_pipe_rect = pygame.Rect(l_pipe[\"x\"], l_pipe[\"y\"], pipe_w, pipe_h)\n\n # player and upper/lower pipe hit masks\n p_hit_mask = HIT_MASKS[\"player\"][pi]\n u_hit_mask = HIT_MASKS[\"pipe\"][0]\n l_hit_mask = HIT_MASKS[\"pipe\"][1]\n\n # if bird collided with u_pipe or l_pipe\n u_collide = pixel_collision(\n player_rect, u_pipe_rect, p_hit_mask, u_hit_mask\n )\n l_collide = pixel_collision(\n player_rect, l_pipe_rect, p_hit_mask, l_hit_mask\n )\n\n if u_collide or l_collide:\n return [True, False]\n\n return [False, False]\n\n\ndef pixel_collision(rect1, rect2, hit_mask1, hit_mask2):\n \"\"\"Checks if two objects collide and not just their rects\"\"\"\n rect = rect1.clip(rect2)\n\n if rect.width == 0 or rect.height == 0:\n return False\n\n x1, y1 = rect.x - rect1.x, rect.y - rect1.y\n x2, y2 = rect.x - rect2.x, rect.y - rect2.y\n\n for x in range(rect.width):\n for y in range(rect.height):\n if hit_mask1[x1 + x][y1 + y] and hit_mask2[x2 + x][y2 + y]:\n return True\n return False\n\n\ndef get_hit_mask(image):\n \"\"\"returns a hit mask using an image's alpha.\"\"\"\n mask = []\n for x in range(image.get_width()):\n mask.append([])\n for y in range(image.get_height()):\n mask[x].append(bool(image.get_at((x, y))[3]))\n return mask\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"flappy.py","file_name":"flappy.py","file_ext":"py","file_size_in_byte":17577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"571548426","text":"from datetime import datetime\n\ndef convert_data(data):\n\ttry:\n\t\tnewShow = data[0]['Headline display text (required)']\n\texcept:\n\t\tnewShow = 'invalid_data'\n\treturn {\n\t\t'createdData': datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t'newShow': newShow,\n\t\t'rawData': data\n\t}\n\n","sub_path":"app/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"478350188","text":"word=input()\nletter,digit=0,0\n\nfor i in word:\n if('a'<=i and i<='z') or ('A'<=i and i<='Z'):\n letter+=1\n if '0'<=i and i<='9':\n digit+=1\n\nprint(\"LETTERS {0} \\n DIGITS {1}\".format(letter,digit))\n\n","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"363855580","text":"# from threading import Thread\n#\n#\n# class Patten(Thread):\n#\n# def __init__(self, string, count):\n# super(Patten, self).__init__()\n# self._string = string\n# self._count = count\n#\n# def run(self):\n# # for _ in range(self._count):\n# # print(self._string, end='', flush=True)\n# inner = 0\n# while inner < self._count:\n# inner += 1\n# print(self._string, end='', flush=True)\n# return inner\n#\n#\n# def main():\n# p1 = Patten('上', 10000)\n# p1.start()\n# p2 = Patten('右', 10000)\n# p2.start()\n# p3 = Patten('下', 10000)\n# p3.start()\n# p4 = Patten('左', 10000)\n# p4.start()\n# p5 = Patten('中', 10000)\n# p5.start()\n# total = int(p1._count) + int(p2._count) + int(p3._count) + int(p4._count) + int(p5._count)\n# print(total)\n#\n# if __name__ == '__main__':\n# main()\n\n\n\nfrom multiprocessing import Process\n\n\nclass Patten(Process):\n\n def __init__(self, string, count):\n self._string = string\n self._count = count\n super(Patten, self).__init__()\n\n @property\n def count(self):\n return self._count\n\n def run(self):\n for _ in range(self._count):\n print(self._string, end='', flush=True)\n\n\ndef main():\n patten = Patten('👌', 10000)\n patten2 = Patten('😵', 10000)\n patten3 = Patten('😭', 10000)\n patten4 = Patten('ヾ(•ω•`)o', 10000)\n patten5 = Patten('😀', 10000)\n total = patten.count + patten2.count + patten3.count + patten4.count + patten5.count\n print(total)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"day17/text3.py","file_name":"text3.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"608238233","text":"# -*- coding: utf-8 -*-\nfrom irc3.plugins.command import command\nimport irc3\nimport sys\nimport re\nfrom CachingDB import CachingDB\n\n\n@irc3.plugin\nclass GiveawayBot:\n \"\"\"A plugin is a class which take the IrcBot as argument\n \"\"\"\n\n requires = [\n 'irc3.plugins.core',\n 'irc3.plugins.userlist',\n 'irc3.plugins.command',\n 'irc3.plugins.human',\n ]\n\n def __init__(self, bot):\n self.bot = bot\n self.log = self.bot.log\n self.log.debug('Bot initialisation completed')\n\n def connection_made(self):\n \"\"\"triggered when connection is up\"\"\"\n self.log.debug('Connection completed')\n\n def server_ready(self):\n \"\"\"triggered after the server sent the MOTD (require core plugin)\"\"\"\n self.log.debug('Server ready')\n\n def connection_lost(self):\n \"\"\"triggered when connection is lost\"\"\"\n\n # MY_PRIVMSG definition has a spurious '{nick}[:,\\s]\\s*' after the : on the second line. This causes it to not\n # match (on ngircd at least). This may need revisiting when used with twitch.\n @irc3.event(r'^:(?P<mask>\\S+!\\S+@\\S+) (?P<event>(PRIVMSG|NOTICE)) '\n r'(?P<target>(#\\S+|{nick})) :'\n r'(?P<data>\\S+.*)$')\n def enter_privmsg(self, mask=None, event=None, target=None, data=None):\n if target == self.bot.nick:\n pat = re.compile(r'!?enter (\\w+)')\n m = pat.match(data)\n if m:\n code = m.group(1)\n self.enter_giveaway(mask.nick, code)\n else:\n # TODO(cricalix) This method should actually be a handler to check all known commands. Kinda ugly if\n # there's more than a handful to handle.\n self.bot.privmsg(mask.nick, 'Your command of \"{}\" was not understood'.format(data))\n\n @command\n def enter(self, mask, target, args):\n \"\"\"Enter an ongoing giveaway\n %%enter <code>\n \"\"\"\n # Take mask.nick, check the cache to see if the nick is registered for the current giveaway.\n # If not registered, register the nick; else, do nothing.\n giveaway_code = args['<code>']\n giveaway_code = giveaway_code.split(' ')[0]\n self.enter_giveaway(mask.nick, giveaway_code)\n\n def enter_giveaway(self, nick, giveaway_code):\n db = CachingDB()\n self.log.info('Giveaway: {0} is trying to enter giveaway \"{1}\"'.format(nick, giveaway_code))\n active_code = db.getone('SELECT id FROM giveaways WHERE active=%s and code=%s', (1, giveaway_code))\n if active_code:\n sql = 'SELECT 1 FROM giveaway_entrants WHERE giveaway=%s AND nick=%s'\n is_entered = db.getone(sql, (active_code, nick))\n if is_entered:\n self.log.info('Giveaway: {0} is already entered in giveaway \"{1}\"'.format(nick, giveaway_code))\n else:\n sql = 'INSERT INTO giveaway_entrants (giveaway, nick) values (%s, %s)'\n if db.set(sql, (active_code, nick)):\n self.log.info('Giveaway: Entered {0} into giveaway \"{1}\"'.format(nick, giveaway_code))\n else:\n self.log.info(\n 'Giveaway: Failure entering {0} into giveaway {1}: {2}'.format(nick, giveaway_code,\n db.last_error()))\n else:\n self.log.info('Giveaway: No giveaway matched {0}'.format(giveaway_code))\n self.bot.privmsg(nick, 'Sorry, \"{0}\" is not an active giveaway.'.format(giveaway_code))\n\n\ndef main():\n config = dict(\n nick='irc3', autojoins=['#irc3'],\n host='192.168.0.17', port=6667, ssl=False,\n includes=[\n 'irc3.plugins.core',\n 'irc3.plugins.command',\n 'irc3.plugins.human',\n __name__, # this register MyPlugin\n ],\n )\n try:\n bot = irc3.IrcBot.from_config(config)\n bot.run(forever=True)\n except ConnectionRefusedError as e:\n print('Failed to connect: {0}'.format(e))\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"GiveawayBot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"159640022","text":"\"\"\"\nA quack is a data structure combining properties of both stacks and queues.\nIt can be viewed as a list of elements written left to right such that three\noperations are possible:\n • push(x): add a new item x to the left end of the list\n • pop(): remove and return the item on the left end of the list\n • pull(): remove the item on the right end of the list.\n\nImplement a quack using three stacks and O(1) additional memory, so that the\namortized time for any push, pop, or pull operation is O(1).\n\"\"\"\n\n\"\"\"\nLet Q1, Q2, Q3 be the three stacks.\nAfter N pushes Q1 = {1,2,3,4,5,6}\nTransform it so that its divided equally among two stacks as {4,5,6} and {3,2,1}\nFor example :\nPop 3 elements into q2, and 3 into q3.\nSo Q2 = {6,5,4} Q3 = {3,2,1}\nPop back Q2 into Q1 , so that Q1 = {4,5,6}\n\nNow, popFront() will take O(1), and its a pop on Q3.\nPop() will take O(1), and its a pop on Q1.\nPush ()will take O(1) and its push on Q1.\n\"\"\"\n\n\nclass Quack:\n def __str__(self):\n print(self.left)\n print(self.right)\n print(self.temp)\n return ''\n\n def __init__(self):\n self.left = []\n self.right = []\n self.temp = []\n\n def push(self, x):\n self.left.append(x)\n\n def pop(self):\n if not self.left and not self.right:\n raise IndexError('pop from empty quack')\n\n # Re-balance stacks\n if not self.left:\n self.balance(self.right, self.left)\n\n return self.left.pop()\n\n def pull(self):\n if not self.left and not self.right:\n raise IndexError('pull from empty quack')\n\n # Re-balance stacks\n if not self.right:\n self.balance(self.left, self.right)\n\n # If you really want to implement pull() as the question is stated remove the return\n return self.right.pop()\n\n def balance(self, primary, secondary):\n size = len(primary)\n # Move half of primary stack to buffer\n for _ in range(size // 2):\n self.temp.append(primary.pop())\n # Move remainder of primary to secondary\n while primary:\n secondary.append(primary.pop())\n # Move temp elements back to primary\n while self.temp:\n primary.append(self.temp.pop())\n\n\nquack = Quack()\n\nfor i in range(6):\n quack.push(i + 1)\n\nprint(quack)\nprint(quack.pull())\nprint(quack)\nprint(quack.pop())\nprint(quack)\n","sub_path":"Python/quack.py","file_name":"quack.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"479903159","text":"#!/usr/bin/python3\n\n# pylint: disable=too-many-locals, too-many-statements, too-many-branches\n\nfrom pathlib import Path\nfrom shutil import copyfile, rmtree\nfrom sys import argv, exit as sys_exit\n\nfrom git import Repo\nfrom git.exc import InvalidGitRepositoryError\n\nfrom twrpdtgen import __version__ as version, aik_path\nfrom twrpdtgen import current_path, working_path\nfrom twrpdtgen.aik_manager import AIKManager\nfrom twrpdtgen.info_readers.buildprop_reader import BuildPropReader\nfrom twrpdtgen.misc import error, get_device_arch, \\\n make_twrp_fstab, open_file_and_read, print_help, render_template\n\nTWRPDTGEN_REPO = None\ntry:\n TWRPDTGEN_REPO = Repo(current_path)\nexcept InvalidGitRepositoryError:\n error(\"Please clone the script with Git instead of downloading it as a zip\")\n sys_exit()\nlast_commit = TWRPDTGEN_REPO.head.object.hexsha[:7]\n\n\ndef main():\n print(f\"TWRP device tree generator\\n\"\n \"Python Edition\\n\"\n f\"Version {version}\\n\")\n recovery_image = Path()\n try:\n recovery_image = Path(argv[1])\n except IndexError:\n error(\"Recovery image not provided\")\n print_help()\n sys_exit()\n\n if not recovery_image.is_file():\n error(\"Recovery image doesn't exist\")\n print_help()\n sys_exit()\n\n aik = AIKManager(aik_path)\n aik_ramdisk_path, aik_images_path = aik.extract_recovery(recovery_image)\n\n print(\"Getting device infos...\")\n arch_binary = None\n if Path(aik_ramdisk_path / \"sbin\" / \"recovery\").is_file():\n arch_binary = aik_ramdisk_path / \"sbin\" / \"recovery\"\n elif Path(aik_ramdisk_path / \"sbin\" / \"setlockstate\").is_file():\n arch_binary = aik_ramdisk_path / \"sbin\" / \"setlockstate\"\n elif Path(aik_ramdisk_path / \"init\").is_file():\n arch_binary = aik_ramdisk_path / \"init\"\n else:\n error(\"No expected binary has been found\")\n sys_exit()\n\n build_prop = BuildPropReader(aik_ramdisk_path / \"default.prop\")\n\n aik_images_path_base = str(aik_images_path / \"recovery.img-\")\n device_tree_path = working_path / build_prop.manufacturer / build_prop.codename\n device_tree_prebuilt_path = device_tree_path / \"prebuilt\"\n device_tree_recovery_root_path = device_tree_path / \"recovery\" / \"root\"\n # device_tree_files = [\"Android.mk\", \"AndroidProducts.mk\", \"BoardConfig.mk\", \"device.mk\",\n # \"omni_\" + device_codename + \".mk\", \"vendorsetup.sh\"]\n\n device_arch = get_device_arch(arch_binary)\n device_have_kernel = Path(aik_images_path_base + \"zImage\").is_file()\n device_have_dt_image = Path(aik_images_path_base + \"dt\").is_file()\n device_have_dtb_image = Path(aik_images_path_base + \"dtb\").is_file()\n device_have_dtbo_image = Path(aik_images_path_base + \"dtbo\").is_file()\n device_base_address = open_file_and_read(aik_images_path_base + \"base\")\n device_board_name = open_file_and_read(aik_images_path_base + \"board\")\n device_cmdline = open_file_and_read(aik_images_path_base + \"cmdline\")\n # device_hash_type = open_file_and_read(aik_images_path_base + \"hashtype\")\n device_header_version = open_file_and_read(aik_images_path_base + \"header_version\")\n # device_image_type = open_file_and_read(aik_images_path_base + \"imgtype\")\n # device_kernel_offset = open_file_and_read(aik_images_path_base + \"kernel_offset\")\n device_recovery_size = open_file_and_read(aik_images_path_base + \"origsize\")\n # device_recovery_sp = open_file_and_read(aik_images_path_base + \"os_patch_level\")\n # device_recovery_version = open_file_and_read(aik_images_path_base + \"os_version\")\n device_pagesize = open_file_and_read(aik_images_path_base + \"pagesize\")\n device_ramdisk_compression = open_file_and_read(aik_images_path_base + \"ramdiskcomp\")\n device_ramdisk_offset = open_file_and_read(aik_images_path_base + \"ramdisk_offset\")\n # device_second_offset = open_file_and_read(aik_images_path_base + \"second_offset\")\n device_tags_offset = open_file_and_read(aik_images_path_base + \"tags_offset\")\n\n if not device_arch:\n error(\"Device architecture not supported\")\n sys_exit()\n\n device_have_64bit_arch = device_arch in (\"arm64\", \"x86_64\")\n\n print(\"Creating device tree folders...\")\n if device_tree_path.is_dir():\n rmtree(device_tree_path, ignore_errors=True)\n device_tree_path.mkdir(parents=True)\n device_tree_prebuilt_path.mkdir(parents=True)\n device_tree_recovery_root_path.mkdir(parents=True)\n\n print(\"Copying kernel...\")\n device_kernel_name = \"\"\n if device_have_kernel:\n if device_arch == \"arm\":\n device_kernel_name = \"zImage\"\n elif device_arch == \"arm64\":\n device_kernel_name = \"Image.gz\"\n elif device_arch in (\"x86\", \"x86_64\"):\n device_kernel_name = \"bzImage\"\n else:\n device_kernel_name = \"zImage\"\n if device_arch in (\"arm\", \"arm64\") and (\n not device_have_dt_image and not device_have_dtb_image):\n device_kernel_name += \"-dtb\"\n copyfile(aik_images_path_base + \"zImage\", device_tree_prebuilt_path / device_kernel_name)\n if device_have_dt_image:\n copyfile(aik_images_path_base + \"dt\", device_tree_prebuilt_path / \"dt.img\")\n if device_have_dtb_image:\n copyfile(aik_images_path_base + \"dtb\", device_tree_prebuilt_path / \"dtb.img\")\n if device_have_dtbo_image:\n copyfile(aik_images_path_base + \"dtbo\", device_tree_prebuilt_path / \"dtbo.img\")\n\n if Path(aik_ramdisk_path / \"etc\" / \"twrp.fstab\").is_file():\n print(\"Found a TWRP fstab, copying it...\")\n copyfile(aik_ramdisk_path / \"etc\" / \"twrp.fstab\", device_tree_path / \"recovery.fstab\")\n else:\n print(\"Generating fstab...\")\n make_twrp_fstab(aik_ramdisk_path / \"etc\" / \"recovery.fstab\",\n device_tree_path / \"recovery.fstab\")\n\n for file in aik_ramdisk_path.iterdir():\n if file.name.endswith(\".rc\") and file != \"init.rc\":\n copyfile(aik_ramdisk_path / file,\n device_tree_recovery_root_path / file.name, follow_symlinks=True)\n\n print(\"Creating Android.mk...\")\n render_template(device_tree_path, \"Android.mk.jinja2\", device_codename=build_prop.codename)\n\n print(\"Creating AndroidProducts.mk...\")\n render_template(device_tree_path, \"AndroidProducts.mk.jinja2\", device_codename=build_prop.codename)\n\n print(\"Creating BoardConfig.mk...\")\n render_template(device_tree_path, \"BoardConfig.mk.jinja2\",\n device_manufacturer=build_prop.manufacturer,\n device_codename=build_prop.codename,\n device_is_ab=build_prop.device_is_ab,\n device_platform=build_prop.platform,\n device_arch=device_arch,\n device_board_name=device_board_name,\n device_recovery_size=device_recovery_size,\n device_cmdline=device_cmdline,\n device_have_kernel=device_have_kernel,\n device_kernel_name=device_kernel_name,\n device_have_dt_image=device_have_dt_image,\n device_have_dtb_image=device_have_dtb_image,\n device_have_dtbo_image=device_have_dtbo_image,\n device_header_version=device_header_version,\n device_base_address=device_base_address,\n device_pagesize=device_pagesize,\n device_ramdisk_offset=device_ramdisk_offset,\n device_tags_offset=device_tags_offset,\n device_ramdisk_compression=device_ramdisk_compression,\n flash_block_size=str(int(device_pagesize) * 64)\n )\n\n print(\"Creating device.mk...\")\n render_template(device_tree_path, \"device.mk.jinja2\",\n device_codename=build_prop.codename,\n device_manufacturer=build_prop.manufacturer,\n device_platform=build_prop.platform,\n device_is_ab=build_prop.device_is_ab)\n\n print(f\"Creating omni_{build_prop.codename}.mk...\")\n render_template(device_tree_path, \"omni.mk.jinja2\", out_file=f\"omni_{build_prop.codename}.mk\",\n device_codename=build_prop.codename,\n device_manufacturer=build_prop.manufacturer,\n device_brand=build_prop.brand,\n device_model=build_prop.model,\n device_have_64bit_arch=device_have_64bit_arch\n )\n\n print(\"Creating vendorsetup.sh...\")\n render_template(device_tree_path, \"vendorsetup.sh.jinja2\", device_codename=build_prop.codename)\n\n dt_repo = Repo.init(device_tree_path)\n with dt_repo.config_writer() as git_config:\n git_config.set_value('user', 'email', 'barezzisebastiano@gmail.com')\n git_config.set_value('user', 'name', 'Sebastiano Barezzi')\n dt_repo.index.add([\"*\"])\n commit_message = render_template(None, \"commit_message.jinja2\", to_file=False,\n device_codename=build_prop.codename,\n device_arch=device_arch,\n device_manufacturer=build_prop.manufacturer,\n device_brand=build_prop.brand,\n device_model=build_prop.model,\n last_commit=last_commit)\n dt_repo.index.commit(commit_message)\n print(f\"\\nDone! You can find the device tree in {str(device_tree_path)}\")\n","sub_path":"twrpdtgen/twrp_dt_gen.py","file_name":"twrp_dt_gen.py","file_ext":"py","file_size_in_byte":9504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"69730483","text":"# myapi/urls.py\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'vendedor', views.VendedorViewSet)\nrouter.register(r'venta', views.VentaViewSet)\nrouter.register(r'producto', views.ProductoViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('login/', views.UserLogin),\n path('user/', views.UserCrud),\n path('product/', views.ProductCrud),\n path('sales/', views.SalesCrud),\n path('statistics/', views.Statistics)\n]\n","sub_path":"backend/myapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"544817651","text":"import subprocess\nimport os\n\n\ndef compare_files(answer_file, program_file):\n answer_text = str(answer_file, 'utf-8')\n temp_origin = \"my_program.py\"\n with open(\"my_program.py\", 'wb') as original_file:\n original_file.write(program_file)\n program = 'python my_program.py'\n data_text = subprocess.check_output(program, encoding='utf-8', shell=True)\n os.remove(temp_origin)\n if answer_text == data_text:\n mark = 5\n else:\n mark = 2\n return mark\n","sub_path":"testing/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"13350033","text":"import multiprocessing\nimport socket\nimport sys\nimport time\n\ndef handle(connection, address, key_list):\n import logging\n logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(\"process-%r\" % (address,))\n\n found = False\n try:\n logger.debug(\"Connected %r at %r\", connection, address)\n while True:\n data = connection.recv(1024)\n if data == \"\":\n logger.debug(\"Socket closed remotely\")\n break\n logger.debug(\"Received data %r\", data)\n # if key in list is found, then remove that key\n for key in key_list:\n if key in data:\n logger.debug(\"Found key {}\".format(key))\n key_list.remove(key)\n\n # an empty list denotes all keywords are found\n if len(key_list) == 0:\n found = True\n sys.exit(0)\n\n # connection.sendall(data)\n # logger.debug(\"Sent data\")\n except:\n # sys.exit is an exception. Successful exit\n # should not have exception message.\n if not found:\n logger.exception(\"Problem handling request\")\n finally:\n logger.debug(\"Closing socket\")\n connection.shutdown(socket.SHUT_RD)\n connection.close()\n\n\nclass Server(object):\n def __init__(self, hostname, port, keywords):\n import logging\n self.logger = logging.getLogger(\"server\")\n self.hostname = hostname\n self.port = port\n self.keywords = keywords\n\n def start(self):\n self.logger.debug(\"listening\")\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind((self.hostname, self.port))\n self.socket.listen(1)\n\n self.socket.settimeout(60)\n conn, address = self.socket.accept()\n self.logger.debug(\"Got connection\")\n\n # initialize a list for keeping track of keyword not found\n # using Manager, which allow an object to be shared\n # between processes. It is slower than using shared memory.\n manager = multiprocessing.Manager()\n key_list = manager.list(self.keywords)\n\n process = multiprocessing.Process(\n target=handle, args=(conn, address, key_list))\n process.daemon = True\n process.start()\n start_time = time.time() # keep track of time passed\n self.logger.debug(\"Started process %r\", process)\n\n while process.is_alive():\n end_time = time.time()\n # exceed two cycles of collectd agent report\n if end_time - start_time > 65:\n self.logger.debug(\"Closing socket by server object\")\n conn.shutdown(socket.SHUT_RD)\n conn.close()\n process.terminate()\n process.join()\n\n for key in key_list:\n self.logger.debug(\"Failed to find {}\".format(key))\n sys.exit(1)\n\nif __name__ == \"__main__\":\n import logging\n logging.basicConfig(level=logging.DEBUG)\n arg_len = len(sys.argv)\n if arg_len != 2:\n logging.debug(\"Usage: plugin_tester.py [keymetrics].\")\n sys.exit(1)\n\n keywords = sys.argv[1].split(' ')\n server = Server(\"127.0.0.1\", 4242, keywords)\n\n try:\n logging.info(\"Listening\")\n server.start()\n except:\n logging.exception(\"Unexpected exception\")\n sys.exit(1)\n finally:\n logging.info(\"Shutting down\")\n for process in multiprocessing.active_children():\n logging.info(\"Shutting down process %r\", process)\n process.terminate()\n process.join()\n logging.info(\"All done\")\n\n","sub_path":"app-config/integration_test/docker_dir/test_script/plugin_tester.py","file_name":"plugin_tester.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"419064585","text":"import csv, os\nfrom tqdm import tqdm\n\ndef two_loss( clos, op, low, high, ticker, dat):\n\twith open('log2stop.txt','a') as f:\n\t\t\tf.write('it was a bearish in %s in company %s\\n__________________\\n'%(dat, ticker))\n\t\t\tmin_elem = low[0]\n\t\t\tmax_elem = high[0]\n\t\t\tmy_list = [0,0,0]\n\t\t\ti = 1\n\t\t\tfor line, row in zip(low, high):\n\t\t\t\tif line < min_elem:\n\t\t\t\t\tmin_elem = line\n\t\t\t\tif row > max_elem:\n\t\t\t\t\tmax_elem = row\n\t\t\t\tif (min_elem/op)-1 <= -0.02:\n\t\t\t\t\tf.write('%d-day VERY BAD return: -0.02\\n'%(i))\n\t\t\t\t\tmy_list[i-1]-=0.02\n\t\t\t\t\tbreak\n\t\t\t\telif (max_elem) >= clos[0]:\n\t\t\t\t\tf.write('%d-day return: %f\\n'%(i, abs((op/clos[0])-1)))\n\t\t\t\t\tmy_list[i-1] += abs((op/clos[0])-1)\n\t\t\t\telse: \n\t\t\t\t\tf.write('%d-day return: %f\\n'%(i, clos[i-1]/op-1))\t\n\t\t\t\t\tmy_list[i-1] += clos[i]/op-1\n\t\t\t\ti=i+1\t\t\t\n\t\t\tf.write('\\n')\n\t\t\tf.close()\n\t\t\toutfile = open('tmp.csv', 'a')\n\t\t\twriter = csv.writer(outfile)\n\t\t\twriter.writerow(my_list)\n\t\t\toutfile.close()\n\ndef main(ticker):\n\tclosing_price = []\n\topen_price= []\n\tlow_price = []\n\tdata_of_trade= []\n\thigh_price= []\n\twith open('../../history/csv/'+ticker+'.csv','r') as csvfile:\n\t\tspamreader = csv.reader(csvfile)\n\t\tfor row in spamreader:\n\t\t\tclosing_price.append(row[4])\n\t\t\topen_price.append(row[1])\n\t\t\tlow_price.append(row[3])\n\t\t\tdata_of_trade.append(row[0])\n\t\t\thigh_price.append(row[2])\n\t\t\t\n\tdel closing_price[0]\n\tdel open_price[0]\n\tdel low_price[0]\n\tdel data_of_trade[0]\n\tdel high_price[0]\n\tclosing_price = list(reversed(list(map(float, closing_price))))\n\topen_price = list(reversed( list(map(float, open_price))))\n\tlow_price = list(reversed(list(map(float, low_price))))\n\thigh_price = list(reversed(list(map(float, high_price))))\n\tdata_of_trade = list(reversed( data_of_trade))\n\tf = open('tmp.csv','w')\n\tf.close()\n\tlist_my = [0,0,0,'s',0,0]\n\tj=0\n\t\n\t#print closing_price\n\tfor i,enum in enumerate(closing_price[:-4]):\n\t\tif open_price[i+1] < closing_price[i]:\n\t\t\tj= j+1\n\t\t\tif (abs((open_price[i+1])/closing_price[i]-1)) <= 0.04 and (abs((open_price[i+1])/closing_price[i]-1)) >= 0.02:\n\t\t\t\t#clos, op, low, high, ticker, dat\n\t\t\t\ttwo_loss(closing_price[i:i+4], open_price[i+1], low_price[i+1:i+4], high_price[i+1:i+4], ticker, data_of_trade[i])\n\t\t\t\t\n\toutfile = open('tmp.csv', 'r')\n\twriter = csv.reader(outfile)\n\tkostyl=0\n\tfor x in writer:\n\t\tkostyl= kostyl+1\n\t\tlist_my[0]+=float(x[0])\n\t\tlist_my[1]+=float(x[1])\n\t\tlist_my[2]+=float(x[2])\n\tlist_my[0] = list_my[0]/kostyl\n\tlist_my[1] = list_my[1]/kostyl\n\tlist_my[2] = list_my[2]/kostyl\n\toutfile.close()\n\tlist_my[3] = ticker\n\tlist_my[4] = j\n\tlist_my[5] = kostyl\n\twith open('olymp.csv','a') as f:\n\t\twriter = csv.writer(f)\n\t\twriter.writerow(list_my)\n\tf.close()\n\tos.system('rm tmp.csv')\n\t\t\t\nwith open('../../sp500.txt','r') as w:\n\tfor symbol in tqdm(w.readlines()):\n\t\tstring = symbol[:-1]\n\t\tmain(string)\n\n\t\n\t\n","sub_path":"strategy/bearish/bearish.py","file_name":"bearish.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"39442920","text":"import sys\r\n\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5 import QtCore\r\nfrom PyQt5.QtCore import QDate\r\n\r\nclass CalendarExample( QWidget):\r\n def __init__(self):\r\n super(CalendarExample, self).__init__()\r\n self.initUI()\r\n def initUI(self):\r\n #实例化日历控件\r\n self.cal = QCalendarWidget(self)\r\n #设置日历的最小日期\r\n self.cal.setMinimumDate(QDate(1980,1,1))\r\n #设置日历的最大日期\r\n self.cal.setMaximumDate(QDate(3000,1,1))\r\n #设置日历的网格是否可见\r\n self.cal.setGridVisible(True)\r\n #控件位置移动\r\n self.cal.move(20,20)\r\n #点击绑定自定义的槽函数\r\n self.cal.clicked[QtCore.QDate].connect(self.showDate)\r\n\r\n #创建标签\r\n self.lb1=QLabel(self)\r\n #设置标签的文本为日历控件所选中的日期,并转为str数据显示\r\n date=self.cal.selectedDate()\r\n self.lb1.setText(date.toString('yyyy-MM-dd dddd'))\r\n\r\n #标签移动位置\r\n self.lb1.move(20,300)\r\n #设置主窗口的位置及初始大小和标题\r\n self.setGeometry(200,100,400,350)\r\n self.setWindowTitle('Calendar例子')\r\n\r\n def showDate(self,date):\r\n #设置标签的文本值\r\n self.lb1.setText(date.toString('yyyy-MM-dd dddd'))\r\nif __name__ == '__main__':\r\n app=QApplication(sys.argv)\r\n demo=CalendarExample()\r\n demo.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"pyqt/日历控件.py","file_name":"日历控件.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"483314283","text":"import sys\nr_input = sys.stdin.readline\n\nN, M = map(int, r_input().split())\n\ndp = [0] * M\ndp[0] = 1\n\nfor _ in range(N):\n s = r_input().rstrip()\n if s[0] == 'X':\n dp[0] = 0\n\n for i in range(1, M):\n if s[i] == '.':\n dp[i] += dp[i - 1]\n\n else:\n dp[i] = 0\n\nprint(dp[-1] % 1000000007)\n","sub_path":"Algorithm/Baekjoon/15241 Counting paths/15241.py","file_name":"15241.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"558573633","text":"import csv\n\nimport numpy as np\n\nimport constants as C\n\n\n# import matplotlib.pyplot as plt\n\ndef __get_list_of_pops():\n populations = []\n with open(C.SATPOP_MAIN_DATA_FILE, \"r\") as fd:\n tsvreader = csv.reader(fd, delimiter='\\t', quotechar='|')\n for line in tsvreader:\n populations.append(float(line[3]))\n return populations\n\n\ndef generate_histogram(divisions, populations=None):\n if not populations:\n populations = __get_list_of_pops()\n print(np.histogram(populations, bins=divisions))\n\n\ndef generate_even_divisions(num_of_divisions):\n populations = __get_list_of_pops()\n populations.sort()\n bucket_size = len(populations) / num_of_divisions\n upper_barriers = []\n for i in range(1, num_of_divisions):\n upper_barriers.append(populations[i * bucket_size])\n upper_barriers.append(populations[-1] + 1)\n return upper_barriers\n\n\nif __name__ == \"__main__\":\n pops = __get_list_of_pops()\n less = 0\n more = 0\n for pop in pops:\n if pop < 1.0:\n less += 1\n else:\n more += 1\n print(less, more)\n","sub_path":"metadata_utils.py","file_name":"metadata_utils.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"280095266","text":"# Copyright 2015-2016 F5 Networks Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom f5_heat.resources import f5_sys_iapptemplate\nfrom heat.common import exception\nfrom heat.common import template_format\nfrom heat.engine.hot.template import HOTemplate20150430\nfrom heat.engine import rsrc_defn\nfrom heat.engine import template\n\nimport mock\nimport pytest\n\n\niapp_template_defn = '''\nheat_template_version: 2015-04-30\ndescription: Testing iAppTemplate plugin\nresources:\n bigip_rsrc:\n type: F5::BigIP::Device\n properties:\n ip: 10.0.0.1\n username: admin\n password: admin\n iapp_template:\n type: F5::Sys::iAppTemplate\n depends_on: bigip_rsrc\n properties:\n name: testing_template\n bigip_server: bigip_rsrc\n requires_modules: [ ltm ]\n implementation: |\n hello\n presentation: |\n hello\n'''\n\nbad_iapp_template_defn = '''\nheat_template_version: 2015-04-30\ndescription: Testing iAppTemplate plugin\nresources:\n bigip_rsrc:\n type: F5::BigIP\n properties:\n ip: 10.0.0.1\n username: admin\n password: admin\n iapp_template:\n type: F5::Sys::iAppTemplate\n depends_on: bigip_rsrc\n properties:\n name: testing_template\n bigip_server: bigip_rsrc\n requires_modules: not_a_list\n bad_iplementation: |\n hello\n presentation: |\n hello\n'''\n\niapp_actions_dict = {\n 'name': u'testing_template',\n 'actions': {\n 'definition': {\n 'implementation': u'hello\\n',\n 'presentation': u'hello\\n',\n }\n },\n 'requiresModules': [u'ltm']\n}\n\n\nversions = ('2015-04-30', '2015-04-30')\n\n\n@mock.patch.object(template, 'get_version', return_value=versions)\n@mock.patch.object(\n template,\n 'get_template_class',\n return_value=HOTemplate20150430\n)\ndef mock_template(templ_vers, templ_class, test_templ=iapp_template_defn):\n '''Mock a Heat template for the Kilo version.'''\n templ_dict = template_format.parse(test_templ)\n return templ_dict\n\n\ndef create_resource_definition(templ_dict):\n '''Create a resource definition.'''\n rsrc_def = rsrc_defn.ResourceDefinition(\n 'test_stack',\n templ_dict['resources']['iapp_template']['type'],\n properties=templ_dict['resources']['iapp_template']['properties']\n )\n return rsrc_def\n\n\n@pytest.fixture\ndef F5SysiAppTemplate():\n '''Instantiate the F5SysiAppTemplate resource.'''\n template_dict = mock_template()\n rsrc_def = create_resource_definition(template_dict)\n return f5_sys_iapptemplate.F5SysiAppTemplate(\n \"iapp_template\", rsrc_def, mock.MagicMock()\n )\n\n\n@pytest.fixture\ndef CreateTemplateSideEffect(F5SysiAppTemplate):\n F5SysiAppTemplate.get_bigip()\n F5SysiAppTemplate.bigip.iapp.create_template.side_effect = \\\n Exception()\n return F5SysiAppTemplate\n\n\n@pytest.fixture\ndef DeleteTemplateSideEffect(F5SysiAppTemplate):\n F5SysiAppTemplate.get_bigip()\n F5SysiAppTemplate.bigip.iapp.delete_template.side_effect = \\\n Exception()\n return F5SysiAppTemplate\n\n# Tests\n\n\ndef test_handle_create(F5SysiAppTemplate):\n create_result = F5SysiAppTemplate.handle_create()\n assert create_result is None\n assert F5SysiAppTemplate.bigip.iapp.create_template.call_args == \\\n mock.call(\n name='testing_template',\n template=iapp_actions_dict\n )\n\n\ndef test_handle_create_error(CreateTemplateSideEffect):\n '''Currently, test exists to satisfy 100% code coverage.'''\n with pytest.raises(exception.ResourceFailure):\n CreateTemplateSideEffect.handle_create()\n\n\ndef test_handle_delete(F5SysiAppTemplate):\n delete_result = F5SysiAppTemplate.handle_delete()\n assert delete_result is None\n assert F5SysiAppTemplate.bigip.iapp.delete_template.call_args == \\\n mock.call('testing_template')\n\n\ndef test_handle_delete_error(DeleteTemplateSideEffect):\n '''Currently, test exists to satisfy 100% code coverage.'''\n with pytest.raises(exception.ResourceFailure):\n DeleteTemplateSideEffect.handle_delete()\n\n\ndef test_resource_mapping():\n rsrc_map = f5_sys_iapptemplate.resource_mapping()\n assert rsrc_map == {\n 'F5::Sys::iAppTemplate': f5_sys_iapptemplate.F5SysiAppTemplate\n }\n\n\ndef test_bad_property():\n template_dict = mock_template(test_templ=bad_iapp_template_defn)\n rsrc_def = create_resource_definition(template_dict)\n f5_sys_iapptemplate_obj = f5_sys_iapptemplate.F5SysiAppTemplate(\n 'test',\n rsrc_def,\n mock.MagicMock()\n )\n with pytest.raises(exception.StackValidationFailed):\n f5_sys_iapptemplate_obj.validate()\n","sub_path":"f5_heat/resources/test/test_f5_sys_iapptemplate.py","file_name":"test_f5_sys_iapptemplate.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"9539566","text":"import matplotlib.pyplot as plt \nimport numpy as np \nfrom numpy.linalg import inv\nfrom functools import partial\n\ndef plot_predict(func, range, label='', resolution=0.02, color='red'):\n _x = np.arange(range[0], range[1], 0.02)\n _y = [ func(x) for x in _x ]\n plt.plot(_x, _y, color=color, label=label)\n\n\ndef phi(x, m):\n _phi = [ 1 ]\n for i in range(m):\n _phi.append(x**(i+1))\n return _phi\n\ndef Phi(X, m):\n _Phi = [ ]\n for x in X:\n _Phi.append(phi(x, m))\n return np.array(_Phi)\n\ndef predict(x, omega):\n return omega.dot(phi(x, len(omega)-1))\n\nRANGE = [0, 1]\nX = np.linspace(*RANGE, 10)\ntarget = np.sin(2*np.pi*X) + np.random.normal(0, 0.2, len(X))\n\nfor idx, dim in enumerate([2, 3, 6, 9]):\n # 计算模型参数\n _Phi = Phi(X, dim)\n omega = inv(_Phi.T.dot(_Phi)).dot(_Phi.T).dot(target)\n ## 正则化\n lambda_ = 1e-3\n omega_r = inv(lambda_*np.eye(dim+1)+ _Phi.T.dot(_Phi)).dot(_Phi.T).dot(target)\n\n # 预测函数\n predict = partial(predict, omega=omega)\n predict_r = partial(predict, omega=omega_r)\n\n # 绘制图像\n ax = plt.subplot(2, 2, idx+1)\n plot_predict(predict, RANGE, label='no regularization', color='red')\n plot_predict(predict_r, RANGE, label='regularization', color='green')\n plt.scatter(X, target)\n plt.title('Dim = $%d$' % dim)\n plt.legend()\n \nplt.show()","sub_path":"Polynomial curve fitting.py","file_name":"Polynomial curve fitting.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"424426371","text":"def romanToInteger(s):\n dict_map = {\n 'I':1,\n 'V':5,\n 'X':10,\n 'L':50,\n 'C':100,\n 'D':500,\n 'M':1000 }\n result = 0\n curr = 0 \n prev = 0\n s = s[::-1]\n for i in range(0,len(s)):\n curr = dict_map[s[i]]\n if i!=0:\n prev = dict_map[s[i-1]]\n if prev > curr:\n result = result - curr\n else:\n result = result + curr\n return result\n \nprint (romanToInteger('LIV')) ","sub_path":"LatestCodes/LeetCodePractise/romanToInteger.py","file_name":"romanToInteger.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"253944110","text":"casosTeste = int(input())\n\nfor i in range(casosTeste):\n alfabeto = [0 for i in range(26)]\n\n string = input()\n for j in range(len(string)):\n if (ord(string[j]) >= 97 and ord(string[j]) <= 122):\n alfabeto[ord(string[j]) - 97] = alfabeto[ord(string[j]) - 97] + 1\n\n nLetras = 0\n for j in range(len(alfabeto)):\n if (alfabeto[j] >= 1):\n nLetras = nLetras + 1\n\n if (nLetras == len(alfabeto)):\n print(\"frase completa\")\n elif (nLetras >= len(alfabeto) / 2):\n print(\"frase quase completa\")\n else:\n print(\"frase mal elaborada\")\n","sub_path":"strings/1551.py","file_name":"1551.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"494086845","text":"# built-in modules\nimport os\nimport math\nimport shlex\nimport json\nfrom subprocess import Popen, PIPE\nfrom collections import OrderedDict\nfrom tempfile import NamedTemporaryFile\n\n\nTREC_EVAL_PATH = 'bin/trec_eval'\nSAMPLE_EVAL_PATH = 'bin/sample_eval.pl'\nUBIRE_PATH = 'bin/ubire-v0.1.0.jar'\n\n\ndef __guess_type(e):\n try:\n e = float(e)\n if e == math.floor(e):\n e = int(e)\n except ValueError:\n pass\n return e\n\n\ndef call_trec_eval(results, qrels_path,\n trec_eval_path=TREC_EVAL_PATH, trec_eval_flags=None):\n\n if type(results) == str and os.path.exists(results):\n # the results are already a path to the result file\n results_path = results\n delete_result_path = False\n else:\n with NamedTemporaryFile(mode='w', delete=False) as tmpf:\n tmpf.write('\\n'.join(results))\n results_path = tmpf.name\n delete_result_path = True\n\n if trec_eval_flags is None:\n trec_eval_flags = []\n cmd = (['./{}'.format(trec_eval_path)] +\n ['-q'] + trec_eval_flags +\n [qrels_path, results_path])\n\n try:\n proc = Popen(cmd, stdout=PIPE, stderr=PIPE)\n resp = proc.communicate()\n msg_out, msg_err = (msg.decode('utf-8') for msg in resp)\n except Exception:\n raise\n finally:\n if delete_result_path:\n os.remove(results_path)\n\n if len(msg_err) > 0:\n raise IOError(msg_err)\n\n results = {}\n for ln in msg_out.split('\\n'):\n ln = ln.strip()\n if not ln:\n continue\n ln_split = ln.split()[:3]\n metric, qid, value = (__guess_type(elem.strip()) for elem in ln_split)\n results.setdefault(qid, OrderedDict()).setdefault(metric, value)\n return results\n\n\ndef __call_sample_eval(\n formatted_results, qrels_path, sample_eval_path,\n sample_eval_metrics=None):\n\n if sample_eval_metrics is not None:\n sample_eval_metrics = set(sample_eval_metrics)\n\n with NamedTemporaryFile(mode='w', delete=False) as tmpf:\n tmpf.write('\\n'.join(formatted_results))\n results_path = tmpf.name\n\n cmd = (['./{}'.format(sample_eval_path), '-q', qrels_path, results_path])\n\n try:\n proc = Popen(cmd, stdout=PIPE, stderr=PIPE)\n resp = proc.communicate()\n msg_out, msg_err = (msg.decode('utf-8') for msg in resp)\n except Exception:\n raise\n finally:\n os.remove(results_path)\n\n if len(msg_err) > 0:\n raise IOError(msg_err)\n\n results = {}\n for ln in msg_out.split('\\n'):\n ln = ln.strip()\n if not ln:\n continue\n ln_split = ln.split()[:3]\n metric, qid, value = (__guess_type(elem.strip()) for elem in ln_split)\n\n valid_metric = (\n sample_eval_metrics is None or\n metric in sample_eval_metrics\n )\n if not valid_metric:\n continue\n\n results.setdefault(qid, OrderedDict()).setdefault(metric, value)\n return results\n\n\ndef __call_ubire(\n formatted_results, qrels_path, ubire_path,\n qread_path, ubire_flags=None):\n\n with NamedTemporaryFile(mode='w', delete=False) as tmpf:\n tmpf.write('\\n'.join(formatted_results))\n results_path = tmpf.name\n\n if ubire_flags is None:\n ubire_flags = []\n cmd = (\n 'java -jar {ubire} --qrels-file={qrels} --qread-file={qread} '\n '--ranking-file={results} {flags}'.format(\n ubire=ubire_path, qrels=qrels_path, qread=qread_path,\n results=results_path, flags=' '.join(ubire_flags)))\n\n try:\n proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)\n resp = proc.communicate()\n msg_out, msg_err = (msg.decode('utf-8') for msg in resp)\n except Exception:\n raise\n finally:\n os.remove(results_path)\n\n if msg_err:\n raise IOError(msg_err)\n\n results = {}\n for ln in msg_out.split('\\n'):\n ln = ln.strip()\n if not ln:\n continue\n data = ln.split()\n\n # the data on the line with the average metrics has 3 entries,\n # the data on the per-query metric has four entries.\n if len(data) == 3:\n metric, qid, value = data\n else:\n metric, qid, _, value = data\n\n results.setdefault(__guess_type(qid), OrderedDict()).setdefault(\n metric.strip().lower(), __guess_type(value.strip()))\n return results\n\n\ndef __make_trec_eval_results(run_name, queries_ids, elasticsearch_results):\n lines = []\n\n if all(len(r) == 0 for r in elasticsearch_results):\n raise ValueError(\"No result retrieved for any query\")\n\n for qid, results in zip(queries_ids, elasticsearch_results):\n for rank, result in enumerate(results, start=0):\n lines.append('{q} 0 {d} {r} {s} {n}'.format(\n q=qid, d=result['_id'], r=rank,\n s=result['_score'], n=run_name))\n\n return lines\n\n\ndef run_ubire(\n run_name, queries_ids, elasticsearch_results,\n qrels_path, qread_path, ubire_path=UBIRE_PATH, ubire_flags=None):\n\n if ubire_flags is None:\n ubire_flags = ['--readability', '--rbp-p=0.8']\n\n formatted_results = __make_trec_eval_results(\n run_name, queries_ids, elasticsearch_results)\n\n output = __call_ubire(\n formatted_results, qrels_path, ubire_path, qread_path, ubire_flags)\n\n return output\n\n\ndef run_trec_eval(\n run_name, queries_ids, elasticsearch_results,\n qrels_path, trec_eval_path=TREC_EVAL_PATH, trec_eval_flags=None):\n\n formatted_results = __make_trec_eval_results(\n run_name, queries_ids, elasticsearch_results)\n\n output = call_trec_eval(\n formatted_results, qrels_path, trec_eval_path, trec_eval_flags)\n\n return output\n\n\ndef run_sample_eval(\n run_name, queries_ids, elasticsearch_results,\n qrels_path, sample_eval_path=SAMPLE_EVAL_PATH,\n sample_eval_metrics=None):\n\n formatted_results = __make_trec_eval_results(\n run_name, queries_ids, elasticsearch_results)\n\n output = __call_sample_eval(\n formatted_results, qrels_path, sample_eval_path, sample_eval_metrics)\n\n return output\n\n\ndef print_performance(performance, per_query=False):\n print(json.dumps(performance.pop('all'), indent=2, sort_keys=True))\n\n if not per_query:\n return\n\n print('')\n\n for query, results in performance.items():\n print('{}: {}'.format(\n str(query).rjust(2),\n ' '.join(\n '{}: {}'.format(k, str(v).ljust(6))\n for k, v in sorted(results.items())\n )\n ))\n","sub_path":"evalutils.py","file_name":"evalutils.py","file_ext":"py","file_size_in_byte":6617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"417772475","text":"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n#\n# Modifications Copyright 2021 Arm Inc. All Rights Reserved.\n# Modified to use TensorFlow 2.0 and data pipelines.\n#\n\"\"\"Functions for loading and preparing data for keyword spotting.\"\"\"\n\nimport os\nimport re\nimport sys\nimport urllib\nfrom pathlib import Path\nimport tarfile\nimport hashlib\nimport random\nimport math\nfrom enum import Enum\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import gen_audio_ops as audio_ops\n\nMAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M\nRANDOM_SEED = 59185\nBACKGROUND_NOISE_DIR_NAME = '_background_noise_'\nSILENCE_LABEL = '_silence_'\nSILENCE_INDEX = 0\nUNKNOWN_WORD_INDEX = 1\nUNKNOWN_WORD_LABEL = '_unknown_'\n\n\ndef load_wav_file(wav_filename, desired_samples):\n \"\"\"Loads and then decodes a given 16bit PCM wav file.\n\n Decoded audio is scaled to the range [-1, 1] and padded or cropped to the desired number of samples.\n\n Args:\n wav_filename: 16bit PCM wav file to load.\n desired_samples: Number of samples wanted from the audio file.\n\n Returns:\n Tuple consisting of the decoded audio and sample rate.\n \"\"\"\n wav_file = tf.io.read_file(wav_filename)\n decoded_wav = audio_ops.decode_wav(wav_file, desired_channels=1, desired_samples=desired_samples)\n\n return decoded_wav.audio, decoded_wav.sample_rate\n\n\ndef calculate_mfcc(audio_signal, audio_sample_rate, window_size, window_stride, num_mfcc):\n \"\"\"Returns Mel Frequency Cepstral Coefficients (MFCC) for a given audio signal.\n\n Args:\n audio_signal: Raw audio signal in range [-1, 1]\n audio_sample_rate: Audio signal sample rate\n window_size: Window size in samples for calculating spectrogram\n window_stride: Window stride in samples for calculating spectrogram\n num_mfcc: The number of MFCC features wanted.\n\n Returns:\n Calculated mffc features.\n \"\"\"\n spectrogram = audio_ops.audio_spectrogram(input=audio_signal, window_size=window_size, stride=window_stride,\n magnitude_squared=True)\n\n mfcc_features = audio_ops.mfcc(spectrogram, audio_sample_rate, dct_coefficient_count=num_mfcc)\n\n return mfcc_features\n\n\ndef which_set(filename, validation_percentage, testing_percentage):\n \"\"\"Determines which data partition the file should belong to.\n\n We want to keep files in the same training, validation, or testing sets even\n if new ones are added over time. This makes it less likely that testing\n samples will accidentally be reused in training when long runs are restarted\n for example. To keep this stability, a hash of the filename is taken and used\n to determine which set it should belong to. This determination only depends on\n the name and the set proportions, so it won't change as other files are added.\n It's also useful to associate particular files as related (for example words\n spoken by the same person), so anything after '_nohash_' in a filename is\n ignored for set determination. This ensures that 'bobby_nohash_0.wav' and\n 'bobby_nohash_1.wav' are always in the same set, for example.\n\n Args:\n filename: File path of the data sample.\n validation_percentage: How much of the data set to use for validation.\n testing_percentage: How much of the data set to use for testing.\n\n Returns:\n String, one of 'training', 'validation', or 'testing'.\n \"\"\"\n base_name = os.path.basename(filename)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put a wav in, so the data set creator has a way of\n # grouping wavs that are close variations of each other.\n hash_name = re.sub(r'_nohash_.*$', '', base_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_WAVS_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_WAVS_PER_CLASS))\n if percentage_hash < validation_percentage:\n result = 'validation'\n elif percentage_hash < (testing_percentage + validation_percentage):\n result = 'testing'\n else:\n result = 'training'\n return result\n\n\ndef prepare_words_list(wanted_words):\n \"\"\"Prepends common tokens to the custom word list.\n\n Args:\n wanted_words: List of strings containing custom words to spot.\n\n Returns:\n List of words with silence and unknown tokens added.\n \"\"\"\n return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words\n\n\nclass AudioProcessor:\n \"\"\"Handles loading, partitioning, and preparing audio training data.\"\"\"\n\n class Modes(Enum):\n TRAINING = 1\n VALIDATION = 2\n TESTING = 3\n\n def __init__(self, data_url, data_dir, silence_percentage, unknown_percentage,\n wanted_words, validation_percentage, testing_percentage, model_settings):\n self.data_dir = Path(data_dir)\n self.model_settings = model_settings\n self.words_list = prepare_words_list(wanted_words)\n\n self._tf_datasets = {}\n self.background_data = None\n self._set_size = {'training': 0, 'validation': 0, 'testing': 0}\n\n self._download_and_extract_data(data_url, data_dir)\n self._prepare_datasets(silence_percentage, unknown_percentage, wanted_words,\n validation_percentage, testing_percentage)\n self._prepare_background_data()\n\n def get_data(self, mode, background_frequency=0, background_volume_range=0, time_shift=0):\n \"\"\"Returns the train, validation or test set for KWS as a TF Dataset.\n\n Args:\n mode: The set to return, see AudioProcessor.Modes enumeration.\n background_frequency: How many of the samples have background noise mixed in.\n background_volume_range: How loud the background noise should be, between 0 and 1.\n time_shift: Range to randomly shift the training audio by in time.\n\n Returns:\n TF dataset that will generate tuples containing an mfcc and corresponding label.\n\n Raises:\n ValueError: If mode is not recognised.\n \"\"\"\n if mode == AudioProcessor.Modes.TRAINING:\n dataset = self._tf_datasets['training']\n elif mode == AudioProcessor.Modes.VALIDATION:\n dataset = self._tf_datasets['validation']\n elif mode == AudioProcessor.Modes.TESTING:\n dataset = self._tf_datasets['testing']\n else:\n ValueError(\"Incorrect dataset type given\")\n\n use_background = (self.background_data is not None) and (mode == AudioProcessor.Modes.TRAINING)\n dataset = dataset.map(lambda path, label: self._process_path(path, label, self.model_settings,\n background_frequency, background_volume_range,\n time_shift, use_background, self.background_data),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n return dataset\n\n def set_size(self, mode):\n \"\"\"Get the number of samples in the requested dataset partition.\n\n Args:\n mode: Which partition, see AudioProcessor.Modes enumeration.\n\n Returns:\n Number of samples in the partition.\n\n Raises:\n ValueError: If mode is not recognised.\n \"\"\"\n if mode == AudioProcessor.Modes.TRAINING:\n return self._set_size['training']\n elif mode == AudioProcessor.Modes.VALIDATION:\n return self._set_size['validation']\n elif mode == AudioProcessor.Modes.TESTING:\n return self._set_size['testing']\n else:\n ValueError('Incorrect dataset type given')\n\n @staticmethod\n def _process_path(path, label, model_settings, background_frequency, background_volume_range, time_shift_samples,\n use_background, background_data):\n \"\"\"Load wav files and calculate mfcc features.\n\n Random shifting of samples and adding in background noise is done within this function as well.\n This function is meant to be mapped onto a TF Dataset by using a lambda function.\n\n Args:\n path: Path to the wav file to load.\n label: Integer label for classifying the audio clip.\n model_settings: Dictionary of settings for model being trained.\n background_frequency: How many clips will have background noise, 0.0 to 1.0.\n background_volume_range: How loud the background noise will be.\n time_shift_samples: How much to randomly shift the clips by.\n use_background: Add in background noise to audio clips or not.\n background_data: Ragged tensor of loaded background noise samples.\n\n Returns:\n Tuple of calculated flattened mfcc and its class label.\n \"\"\"\n\n desired_samples = model_settings['desired_samples']\n audio, sample_rate = load_wav_file(path, desired_samples=desired_samples)\n\n # Make our own silence audio data.\n if label == SILENCE_INDEX:\n audio = tf.multiply(audio, 0)\n\n # Shift samples start position and pad any gaps with zeros.\n if time_shift_samples > 0:\n time_shift_amount = tf.random.uniform(shape=(), minval=-time_shift_samples, maxval=time_shift_samples,\n dtype=tf.int32)\n else:\n time_shift_amount = 0\n if time_shift_amount > 0:\n time_shift_padding = [[time_shift_amount, 0], [0, 0]]\n time_shift_offset = [0, 0]\n else:\n time_shift_padding = [[0, -time_shift_amount], [0, 0]]\n time_shift_offset = [-time_shift_amount, 0]\n\n padded_foreground = tf.pad(audio, time_shift_padding, mode='CONSTANT')\n sliced_foreground = tf.slice(padded_foreground, time_shift_offset, [desired_samples, -1])\n\n # Get a random section of background noise.\n if use_background:\n background_index = tf.random.uniform(shape=(), maxval=background_data.shape[0], dtype=tf.int32)\n background_sample = background_data[background_index]\n background_offset = tf.random.uniform(shape=(), maxval=len(background_sample)-desired_samples,\n dtype=tf.int32)\n background_clipped = background_sample[background_offset:(background_offset + desired_samples)]\n background_reshaped = tf.reshape(background_clipped, [desired_samples, 1])\n if tf.random.uniform(shape=(), maxval=1) < background_frequency:\n background_volume = tf.random.uniform(shape=(), maxval=background_volume_range)\n else:\n background_volume = tf.constant(0, dtype='float32')\n else:\n background_reshaped = np.zeros([desired_samples, 1], dtype=np.float32)\n background_volume = tf.constant(0, dtype='float32')\n\n # Mix in background noise.\n background_mul = tf.multiply(background_reshaped, background_volume)\n background_add = tf.add(background_mul, sliced_foreground)\n background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)\n\n mfcc = calculate_mfcc(background_clamp, sample_rate, model_settings['window_size_samples'],\n model_settings['window_stride_samples'],\n model_settings['dct_coefficient_count'])\n mfcc = tf.reshape(mfcc, [-1])\n\n return mfcc, label\n\n def _download_and_extract_data(self, data_url, target_directory):\n \"\"\"Downloads and extracts file to target directory.\n\n If the file does not already exist download it and then untar into the target directory.\n\n Args:\n data_url: Web link to the tarred data to download.\n target_directory: Directory to download and extract to.\n \"\"\"\n target_directory = Path(target_directory)\n target_directory.mkdir(exist_ok=True)\n\n filename = data_url.split('/')[-1]\n filepath = target_directory / filename\n\n if not filepath.exists():\n def _report_hook(block_num, block_size, total_size):\n \"\"\"Function to track download progress in urllib\"\"\"\n read_so_far = block_num * block_size\n percent = (read_so_far / total_size) * 100.0\n\n s = f\"\\rDownloading {filename} {percent:.1f}%\"\n\n sys.stdout.write(s)\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(data_url, filepath, _report_hook)\n print()\n\n print(f'Untarring {filename}...')\n tarfile.open(filepath, 'r:gz').extractall(target_directory)\n\n def _prepare_datasets(self, silence_percentage, unknown_percentage, wanted_words,\n validation_percentage, testing_percentage):\n \"\"\"Split the data into train, validation and testing sets.\n\n Silence and unknown data is added, then sets are converted to TF Datasets.\n\n Args:\n silence_percentage: Percent of words should be silence.\n unknown_percentage: Percent of words that should be unknown.\n wanted_words: List of words wanted to classify.\n validation_percentage: Percent to split off for validation.\n testing_percentage: Percent to split off for testing.\n \"\"\"\n # Make sure the shuffling and picking of unknowns is deterministic.\n random.seed(RANDOM_SEED)\n wanted_words_index = {}\n\n for index, wanted_word in enumerate(wanted_words):\n wanted_words_index[wanted_word] = index + 2\n\n # Find all wav files in subfolders.\n search_path = self.data_dir / '*' / '*.wav'\n data_index, unknown_index, all_words = self._find_and_sort_wavs(search_path, validation_percentage,\n testing_percentage, wanted_words_index)\n\n for index, wanted_word in enumerate(wanted_words):\n if wanted_word not in all_words:\n raise Exception(f'Tried to find {wanted_word} in labels but only found: {\", \".join(all_words.keys())}')\n\n word_to_index = {}\n for word in all_words:\n if word in wanted_words_index:\n word_to_index[word] = wanted_words_index[word]\n else:\n word_to_index[word] = UNKNOWN_WORD_INDEX\n word_to_index[SILENCE_LABEL] = SILENCE_INDEX\n\n # We need an arbitrary file to load as the input for the silence samples.\n # It's multiplied by zero later, so the content doesn't matter.\n silence_wav_path = data_index['training'][0]['file']\n for set_index in ['validation', 'testing', 'training']:\n set_size = len(data_index[set_index]) # Size before adding silence and unknown samples.\n silence_size = int(math.ceil(set_size * silence_percentage / 100))\n for _ in range(silence_size):\n data_index[set_index].append({\n 'label': SILENCE_LABEL,\n 'file': silence_wav_path\n })\n # Pick some unknowns to add to each partition of the data set.\n random.shuffle(unknown_index[set_index])\n unknown_size = int(math.ceil(set_size * unknown_percentage / 100))\n data_index[set_index].extend(unknown_index[set_index][:unknown_size])\n\n self._set_size[set_index] = len(data_index[set_index]) # Size after adding silence and unknown samples.\n\n # Make sure the ordering is random.\n random.shuffle(data_index[set_index])\n\n # Transform into TF Datasets ready for easier processing later.\n labels, paths = list(zip(*[d.values() for d in data_index[set_index]]))\n labels = [word_to_index[label] for label in labels]\n self._tf_datasets[set_index] = tf.data.Dataset.from_tensor_slices((list(paths), labels))\n\n def _find_and_sort_wavs(self, search_pattern, validation_percentage, testing_percentage, wanted_words_index):\n \"\"\"Find and sort wav files into known and unknown word sets.\n\n Known words are files containing words in the list of wanted words.\n Any other clip goes to the unknown label set. Labels come from the folder names.\n All clips are also assigned to train, test and validation sets.\n\n Args:\n search_pattern: Path pattern used by glob to find wav files.\n validation_percentage: Percent to split off for validation.\n testing_percentage: Percent to split off for testing.\n wanted_words_index: Dict mapping wanted words to their label index.\n\n Returns:\n 3-tuple of known words, unknown words and mapping of all word labels.\n \"\"\"\n data_index = {'validation': [], 'testing': [], 'training': []}\n unknown_index = {'validation': [], 'testing': [], 'training': []}\n all_words = {}\n\n for wav_path in tf.io.gfile.glob(str(search_pattern)):\n word = Path(wav_path).parent.name.lower()\n\n # Treat the '_background_noise_' folder as a special case, since we expect\n # it to contain long audio samples we mix in to improve training.\n if word == BACKGROUND_NOISE_DIR_NAME:\n continue\n\n all_words[word] = True\n set_index = which_set(wav_path, validation_percentage, testing_percentage)\n # If it's a known class, store its detail, otherwise add it to the list\n # we'll use to train the unknown label.\n if word in wanted_words_index:\n data_index[set_index].append({'label': word, 'file': wav_path})\n else:\n unknown_index[set_index].append({'label': word, 'file': wav_path})\n if not all_words:\n raise Exception('No .wavs found at ' + str(search_pattern))\n\n return data_index, unknown_index, all_words\n\n def _prepare_background_data(self):\n \"\"\"Searches a folder for background noise audio, and loads it into memory.\n\n It's expected that the background audio samples will be in a subdirectory\n named '_background_noise_' inside the 'data_dir' folder, as .wavs that match\n the sample rate of the training data, but can be much longer in duration.\n\n If the '_background_noise_' folder doesn't exist at all, this isn't an\n error, it's just taken to mean that no background noise augmentation should\n be used. If the folder does exist, but it's empty, that's treated as an\n error.\n\n Returns:\n Ragged tensor of raw PCM-encoded audio samples of background noise.\n None if '_background_noise_' folder doesnt exist.\n\n Raises:\n Exception: If files aren't found in the folder.\n \"\"\"\n background_data = []\n background_dir = Path(self.data_dir / BACKGROUND_NOISE_DIR_NAME)\n if not background_dir.exists():\n self.background_data = None\n return\n\n search_path = Path(background_dir / '*.wav')\n for wav_path in tf.io.gfile.glob(str(search_path)):\n wav_data, _ = load_wav_file(wav_path, desired_samples=-1)\n background_data.append(tf.reshape(wav_data, [-1]))\n\n if not background_data:\n raise Exception('No background wav files were found in ' + str(search_path))\n\n # Ragged tensor as we cant use lists in tf dataset map functions.\n self.background_data = tf.ragged.stack(background_data)\n","sub_path":"tflu-kws-cortex-m/Training/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":20712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"230572832","text":"import jdatetime\nfrom django.db.models import Sum, F, FloatField, Avg, Count, Q\nfrom django_jalali.db import models as jmodels\nimport base64\n\nfrom django import template\n\nfrom django.contrib.auth import get_user_model\n\nfrom req_track.forms import ChangeRequestForm\n\nUser = get_user_model()\nfrom req_track.models import ReqEntered\nfrom request.models import Requests, Xpref, ReqSpec, PrefSpec, Payment, Comment\n\nregister = template.Library()\n\n\n@register.simple_tag()\ndef reqs_to_follow(user, on):\n # req = Requests.objects.filter(is_active=True, owner=user, to_follow=True, on=on).order_by('date_modified').reverse()\n req = Requests.objects.filter(is_active=True, owner=user, to_follow=True).order_by('date_modified').reverse()\n return req\n\n\n@register.simple_tag()\ndef reqs_to_entered(user):\n reqs = ReqEntered.objects.filter(is_entered=False, is_request=True)\n if not user.is_superuser:\n reqs = reqs.filter(owner_text__contains=user)\n return reqs\n\n\n@register.simple_tag()\ndef reqs_noxp(user):\n date = jdatetime.date(month=10, day=1, year=1399)\n reqs = Requests.objects.filter(\n is_active=True,\n finished=False,\n xpref__isnull=True,\n date_fa__gte=date\n ).distinct().order_by('date_fa').reverse()\n if not user.is_superuser:\n reqs = reqs.filter(owner=user)\n return reqs\n\n\n@register.simple_tag()\ndef reqs_no_xp(user):\n date = jdatetime.date(month=10, day=1, year=1399)\n reqs = Requests.objects.filter(\n is_active=True,\n finished=False,\n date_fa__gte=date,\n xpref__isnull=True,\n ).order_by('date_fa').reverse()\n if not user.is_superuser:\n reqs = reqs.filter(owner=user)\n return reqs\n\n\n@register.simple_tag()\ndef unread_comments(status, user):\n filter_query = Q(req_comment__owner=user, req_comment__is_active=True) | \\\n Q(req_comment__colleagues=user, req_comment__is_active=True) | \\\n Q(xpref_comment__owner=user, xpref_comment__is_active=True)\n\n comments = Comment.objects.filter(is_read=status).filter(filter_query).exclude(author=user).distinct().order_by('pub_date').reverse()\n return comments\n\n\n@register.simple_tag()\ndef expert_remaining_reqs_not_entered(pk):\n account = User.objects.get(pk=pk)\n reqs = ReqEntered.objects.filter(owner_text__contains=account.last_name, is_request=True, is_entered=False)\n return reqs.count()\n\n\n@register.simple_tag()\ndef expert_remaining_reqs_no_xp(pk):\n account = User.objects.get(pk=pk)\n date = jdatetime.date(month=10, day=1, year=1399)\n reqs = Requests.objects.filter(\n is_active=True,\n finished=False,\n date_fa__gte=date,\n owner=account,\n xpref__isnull=True\n )\n return reqs.count()\n\n\n@register.simple_tag()\ndef all_expert_reqs(pk):\n account = User.objects.get(pk=pk)\n reqs = Requests.objects.filter(is_active=True, owner=account)\n return reqs.count()\n\n\n@register.simple_tag()\ndef expert_reqs_percent(account):\n reqs = Requests.objects.filter(is_active=True, owner=account)\n all_reqs = Requests.objects.filter(is_active=True)\n return 100 * reqs.count() / all_reqs.count()\n\n\n@register.simple_tag()\ndef reqs_to_entered_forms(req):\n form = ChangeRequestForm(initial={'rtrack': req.id})\n return form\n","sub_path":"app/request/templatetags/sales_ex_dashboard.py","file_name":"sales_ex_dashboard.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"363810319","text":"# character.py\n\nimport weapons\nimport items\n\n\nclass Character(object):\n \"\"\"\n This is the base class for both Player and Non-Player characters, i.e enemies and heroes.\n To create a character, you need to call the Character class with both a name and a factor\n to allot the character a certain amount of 'life'.\n The default factor is 1 and allots 100 life to the Character.\n \"\"\"\n def __init__(self, name, factor=None):\n # super(Character, self).__init__()\n self.name = name\n self.factor = factor\n self.life = int(self._life(factor))\n\n def _life(self, factor):\n if self.factor is None:\n self.factor = 1\n return 100 * self.factor\n\n\nclass Player(Character):\n \"\"\"\n This is the base class for the player character.\n The base Player class gives the Player an inventory.\n \"\"\"\n inventory = {\n \"weapon\": weapons.weapons['Dull Sword'],\n \"armor\": \"Leather Armor\",\n \"boots\": \"Leather Boots\",\n \"keys\": None,\n \"tonic\": None\n }\n\n _type = \"PC\"\n\n\nclass Zombie(Character):\n \"\"\"\n This is the base class for Zombies thoughout the game.\n Zombies have a factor of 0.1 giving them 10 life.\n \"\"\"\n\n def __init__(self, name, factor=0.1):\n self.name = name\n self.factor = factor\n self.life = int(self._life(factor))\n\n inventory = {\n \"weapon\": weapons.weapons['Gnashing Teeth'],\n \"item\": None\n }\n\n _type = \"NPC\"\n\n\nclass Dwarf(Character):\n \"\"\"\n This is the base class for Dwarves thoughout the game.\n Dwarves have a factor of 0.5 giving them 50 life.\n \"\"\"\n\n def __init__(self, name, factor=0.5):\n self.name = name\n self.factor = factor\n self.life = int(self._life(factor))\n\n inventory = {\n \"weapon\": weapons.weapons['Dwarfish Sword'],\n \"item\": None\n }\n\n _type = \"NPC\"\n\n\nclass Skeleton(Character):\n \"\"\"\n This is the base class for Skeletons throughout the game.\n Dwarves have a factor of .8 giving them 80 life.\n \"\"\"\n\n def __init__(self, name, factor=0.8):\n self.name = name\n self.factor = factor\n self.life = int(self._life(factor))\n\n inventory = {\n \"weapon\": weapons.weapons['Skeletal Sword'],\n \"item\": items.items['Health Tonic']\n }\n\n _type = \"NPC\"\n\n\nclass Dragon(Character):\n \"\"\"\n This is the base class for Dragons thoughout the game.\n Dragons have a factor of 5 giving them 500 life.\n \"\"\"\n\n def __init__(self, name, factor=5):\n self.name = name\n self.factor = factor\n self.life = int(self._life(factor))\n\n inventory = {\n \"weapon\": weapons.weapons['Dragon Breath'],\n \"item\": None\n }\n\n _type = \"NPC\"\n","sub_path":"character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"344357853","text":"#!/usr/bin/env python3\n\"\"\" Tips for python coding \"\"\"\nimport sys\n# import timeit\nfrom collections import Counter\n\n\nclass PyTips:\n my_list = [17, 12, 13]\n my_set = {25, 23, 29}\n my_dict = {'name': 'A.B', 'age': 37, 'gender': 'm'}\n my_data = [{'name': 'Bill', 'age': 32},\n {'name': 'Ada', 'age': 22},\n {'name': 'Tim', 'age': 63}]\n\n # 1) Iterate with enumerate instead of range(len(x))\n def demo_enumerate(self):\n print(enumerate(self.my_list))\n for i, v in enumerate(self.my_list):\n print(i, v)\n for i, v in enumerate(self.my_set):\n print(i, v)\n for i, v in enumerate(self.my_dict):\n print(i, v)\n\n # 2) Use list comprehension instead of raw for loops\n def demo_comprehension(self):\n comp_list = [i for i in range(10) if i % 2 == 0]\n print(comp_list)\n comp_dict = {v: k for k, v in self.my_dict.items()}\n print(comp_dict)\n\n # 3) Sort complex iterables with sorted()\n def demo_sort(self):\n sorted_list = sorted(self.my_list, reverse=True)\n print(sorted_list)\n self.my_list.sort()\n print(self.my_list)\n sorted_data = sorted(self.my_data, key=lambda item: item['name'])\n print(sorted_data)\n\n # 4) Store unique values with Sets\n\n # 5) Save memory with Generators\n def demo_gene(self):\n large_list = [i for i in range(1000)]\n print(sum(large_list))\n print(sys.getsizeof(large_list, 'bytes'))\n\n large_gen = (i for i in range(1000))\n print(sum(large_gen))\n print(sys.getsizeof(large_gen, 'bytes'))\n\n # 6) Define default values in Dictionaries with .get() and .setdefault()\n def demo_dict_get(self):\n try:\n print(self.my_dict['title'])\n except KeyError as e:\n print(f'missing key: {e}')\n\n print(f'Original dict: {self.my_dict}')\n print(self.my_dict.get('title', 'Mr.'))\n self.my_dict.setdefault('title', 'Mr.')\n print(f'After dict: {self.my_dict}')\n\n # 7) Count hashable objects with collections.Counter\n def demo_count_hashable(self):\n count = Counter('abrehwbobwqhghreaq')\n most_common = count.most_common(3)\n print(count)\n print(most_common)\n\n # 8) Format strings with f-Strings (Python 3.6+)\n\n # 9) Concatenate strings with .join()\n def demo_join(self):\n bad = ''\n for i in range(10):\n bad += str(i) + '+'\n print(bad)\n\n good = '+'.join([str(i) for i in range(10)])\n print(good)\n\n # 10) Merge dictionaries with {**d1, **d2} (Python 3.5+)\n def demo_dict_merge(self):\n dic1 = {'one': 1, 'two': 2}\n dic2 = {'four': 4, 'two': 3}\n dic_merged = {**dic1, **dic2}\n print(dic_merged)\n\n # 11) Simplify if-statements with if x in list\n\n\ndef main():\n my_tip = PyTips()\n # my_tip.demo_enumerate()\n # my_tip.demo_comprehension()\n # my_tip.demo_sort()\n # my_tip.demo_gene()\n # my_tip.demo_dict_get()\n # my_tip.demo_count_hashable()\n # my_tip.demo_join()\n my_tip.demo_dict_merge()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"py_ex/tips.py","file_name":"tips.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"564231009","text":"from manimlib.imports import *\nfrom manim_projects.tony_useful.imports import *\nfrom random import randint\n\n'''\n这个文件中是群友问问题时我写的测试代码(2020.02.03开始)\n一些目的和效果已经通过文档字符串的形式给出\n'''\n\nclass Test0(Scene):\n def construct(self):\n circle = Circle(radius=3)\n poly = []\n for i in range(3, 11):\n po = Polygon(\n *[\n UP * np.sin(j * 2 * PI / i) + RIGHT * np.cos(j * 2 * PI / i)\n for j in range(i)\n ]\n )\n poly.append(po.scale(3, about_point=ORIGIN))\n self.play(ShowCreation(circle))\n self.play(ShowCreation(poly[0]))\n self.wait()\n for i in range(1, 8):\n self.play(Transform(poly[0], poly[i]))\n self.wait()\n self.wait(2)\n\nclass Test1(Scene):\n '''Matrix类中间元素的下标布局'''\n def construct(self):\n mat = Matrix([['0', '-1', '2'], ['1', '0', '12'], ['3', '2', 'x']])\n self.add(mat)\n debugTeX(self, mat[0])\n\nclass Test2(Scene):\n '''使用\\tt调TextMobject打字机字体'''\n def construct(self):\n text = VGroup(\n TextMobject(\"\\\\tt UR=np.array([ 1, 1, 0])\", tex_to_color_map={\"=\":RED, \"array\":BLUE}),\n TextMobject(\"\\\\tt UL=np.array([-1, 1, 0])\", tex_to_color_map={\"=\":RED, \"array\":BLUE}),\n TextMobject(\"\\\\tt DR=np.array([ 1,-1, 0])\", tex_to_color_map={\"=\":RED, \"array\":BLUE}),\n TextMobject(\"\\\\tt DL=np.array([-1,-1, 0])\", tex_to_color_map={\"=\":RED, \"array\":BLUE})\n ).arrange_submobjects(DOWN)\n self.add(text)\n\nclass Test3(Scene):\n '''坐标可以用ndarray,也可以用列表'''\n def construct(self):\n l = Line([0, 0, 0], [3, 3, 0])\n self.add(l)\n\nclass Test4(Scene):\n '''aligned_edge的用法'''\n def construct(self):\n sq1 = Square().shift(LEFT * 2)\n sq2 = Square().next_to(sq1.get_corner(DR), DOWN)\n sq3 = Square().shift(RIGHT * 2)\n sq4 = Square().next_to(sq3.get_corner(DR), DOWN, aligned_edge=LEFT)\n\n self.add(sq1, sq2, sq3, sq4)\n\nclass Test5(Scene):\n '''加号强制next_to对齐'''\n def construct(self):\n text = TextMobject(\"LOVE\\\\ DEATH\\\\ \", \"$+$\", \"\\\\ ROBOTS\", color=RED)\n text[1].next_to(text[0], RIGHT)\n text[2].next_to(text[1], RIGHT)\n self.add(text)\n\nclass Test6(Scene):\n '''FocusOn和Flash的动画效果'''\n def construct(self):\n title1 = TextMobject(\"FocusOn\").scale(2).to_corner(UL)\n self.add(title1)\n dot = Dot(radius=0.5, color=BLUE)\n self.play(ShowCreation(dot))\n self.wait()\n self.play(FocusOn(dot))\n self.wait(2)\n title2 = TextMobject(\"Flash\").scale(2).to_corner(UL)\n self.play(Transform(title1, title2))\n self.wait()\n self.play(Flash(dot, flash_radius=0.55))\n self.wait(3)\n\nclass Test7(Scene):\n '''白底黑字'''\n def construct(self):\n txt = TexMobject(\"0\",\n fill_color=BLACK,\n fill_opacity=1.0,\n stroke_color=BLACK,\n stroke_opacity=1.0,\n ).scale(3)\n self.add(txt)\n\nclass Test8(Scene):\n '''使用Rectangle或者Line来强制Brace宽度'''\n def construct(self):\n rec = Rectangle(width=4)\n brac = Brace(rec, DOWN)\n self.add(brac)\n\nclass Test9(ThreeDScene):\n '''立方体三维旋转'''\n def construct(self):\n self.set_to_default_angled_camera_orientation()\n cube = Cube()\n self.add(cube)\n self.wait()\n self.play(Rotating(cube, axis=UP, radians=PI / 6))\n self.wait(2)\n\nclass Test10(Scene):\n '''文字渐变色'''\n def construct(self):\n text = TextMobject(\"test\").scale(2).set_color_by_gradient(BLUE, RED)\n self.add(text)\n\nclass Test11(Scene):\n '''LaTeX的cases可行'''\n def construct(self):\n text = TexMobject(\n r\"\"\"\n \\begin{cases}\n u^3+v^3=-q\\\\\n uv=-\\frac{p}{3}\\\\\n \\end{cases}\n \"\"\"\n )\n self.add(text)\n\nclass Test12(Scene):\n def construct(self):\n circle0 = Circle(color=WHITE,radius=2)\n text0 = TextMobject(\"Gaussian \\\\\\\\ Elimination\")\n vec1 =Vector(1.4*LEFT).move_to(circle0.get_center()+2.8*LEFT)\n circle1 = Circle(color=RED,radius=1.6).next_to(vec1, LEFT)\n text1 = TextMobject(\"System of \\\\\\\\ linear equation\").move_to(circle1.get_center()+ORIGIN).scale(0.8)\n\n vgr1 = VGroup(text1, circle1)\n self.add(circle0, text0)\n self.add(vec1)\n self.add(vgr1)\n self.wait(2)\n pos = Dot(fill_opacity=0).move_to(circle1.get_center())\n def update_text(obj):\n obj.move_to(pos)\n\n vgr1.add_updater(update_text)\n self.add(vgr1) \n self.play(\n Rotating(vec1, radians = 6 * PI, about_point = ORIGIN, axis = IN),\n Rotating(pos , radians = 6 * PI, about_point = ORIGIN, axis = IN),\n run_time=20\n )\n\nclass Test13(Scene):\n '''Uncreate效果,注意不是UnCreate'''\n def construct(self):\n sq = Square()\n self.add(sq)\n self.wait()\n self.play(Uncreate(sq))\n self.wait()\n\nclass Test14(Scene):\n def construct(self):\n rec1 = Rectangle(height=2, width=6)\n rec2 = Rectangle(height=1, width=1).shift(LEFT*2)\n rec3 = Rectangle(height=1, width=1).shift(RIGHT*2)\n rec4 = Rectangle(height=1, width=1)\n recs = VGroup(rec1, rec2, rec3, rec4)\n self.add(recs)\n self.wait()\n self.play(recs.shift, UP*2.5)\n self.wait()\n circle = Circle(radius=0.5).move_to(rec3)\n self.play(Transform(rec3, circle))\n self.wait()\n\nclass Test15(GraphScene):\n '''GraphScene的坐标轴可以FadeOut'''\n def construct(self):\n self.setup_axes(animate=True)\n self.wait()\n self.play(FadeOut(self.axes))\n self.wait()\n\nclass Test16(Scene):\n def construct(self):\n objs = [\n Square().shift(LEFT * 3),\n Square(),\n Square().shift(RIGHT * 3)\n ]\n self.add(*objs)\n self.wait()\n self.play(\n *[\n ApplyMethod(obj.shift, UP)\n for obj in objs\n ]\n )\n self.wait()\n\nclass Test17(Scene):\n '''使用index_of_submobject_to_align来对齐,注意要get_center()'''\n def construct(self):\n vg1 = VGroup(\n Circle(radius = 0.5).shift(LEFT*2),\n Circle(radius = 0.5).shift(LEFT*1),\n Circle(radius = 0.5),\n Circle(radius = 0.5).shift(RIGHT*1),\n Circle(radius = 0.5).shift(RIGHT*2),\n )\n vg2 = VGroup(\n Square(side_length=1).shift(LEFT*1),\n Square(side_length=1),\n Square(side_length=1).shift(RIGHT*1),\n )\n vg2.next_to(vg1[3].get_center(), DOWN, index_of_submobject_to_align=1)\n self.add(vg1, vg2)\n\nclass Test18(Scene):\n '''使用tex[0]来对TexMobject的每个字符进行分解'''\n def construct(self):\n tex = TexMobject(\"a^2+b^2=c^2\")\n self.add(tex)\n debugTeX(self, tex[0])\n\nclass Test19(Scene):\n '''用AnimatedBoundary实现Line的颜色变化'''\n def construct(self):\n l = Line(LEFT * 3, RIGHT * 3)\n self.add(l)\n self.wait()\n l2 = AnimatedBoundary(l, colors=[BLUE])\n self.add(l2)\n self.wait(3)\n\nclass Test20(Scene):\n '''使用set_opacity实现闪烁效果'''\n def construct(self):\n text = TextMobject(\"颓废最不要脸\")\n self.add(text)\n for i in range(20):\n self.play(text.set_opacity, 0, run_time=0.2)\n self.play(text.set_opacity, 1, run_time=0.2)\n self.wait()\n\nclass Test21(Scene):\n '''圆弧flip的默认轴'''\n def construct(self):\n grid = NumberPlane()\n arc = Arc(0, PI / 2, color = BLUE)\n arc2 = arc.copy().flip().set_color(YELLOW)\n self.add(grid, arc, arc2)\n\nclass Test22(Scene):\n def construct(self):\n text = TextMobject(\"abcd\")\n self.add(text)\n\nclass Test23(Scene):\n '''move_arc_center_to和不同run_time的动画同时播放'''\n def construct(self):\n sq = Square(side_length=4)\n ci = Arc(0, PI / 2, color=BLUE, radius=4).move_arc_center_to(sq.get_corner(DL))\n self.wait()\n self.play(ShowCreation(sq, run_time=2), ShowCreation(ci, run_time=4))\n self.wait()\n\nclass Test24(Scene):\n '''环绕每个字符'''\n def construct(self):\n text = TextMobject(\"abcdefgh\")\n rec = VGroup()\n for i in text[0]:\n rec.add(SurroundingRectangle(i, buff=0))\n self.add(text, rec)\n\nclass Test25(Scene):\n '''使用LaTeX的表格'''\n def construct(self):\n tab = TextMobject(\n r\"\"\"\n \\begin{table}[]\n \\begin{tabular}{|l|l|l|l|l|l|}\n \\hline\n a & b & c & d & e & f \\\\ \\hline\n \\end{tabular}\n \\end{table}\n \"\"\"\n )\n self.add(tab)\n debugTeX(self, tab[0])\n\nclass Test26(Scene):\n '''Succession,其实和多个play没什么区别'''\n def construct(self):\n group = VGroup(\n Circle(radius = 0.5).shift(LEFT*2),\n Circle(radius = 0.5).shift(LEFT*1),\n Circle(radius = 0.5),\n Circle(radius = 0.5).shift(RIGHT*1),\n Circle(radius = 0.5).shift(RIGHT*2),\n ).set_opacity(0)\n self.wait()\n self.play(\n Succession(\n *[\n ApplyMethod(obj.set_opacity, 1)\n for obj in group\n ]\n )\n )\n self.wait()\n\nclass Test27(Scene):\n '''UP和TOP在to_corner时的区别'''\n def construct(self):\n text = TextMobject(\"to\\_corner UP\").to_corner(UP)\n text2 = TextMobject(\"to\\_corner TOP\").to_corner(TOP) # 非标准用法\n text3 = TextMobject(\"move\\_to TOP\").move_to(TOP).set_color(YELLOW)\n self.add(text, text2, text3)\n\nclass Test28(Scene):\n '''将所有物体都FadeOut,没有add的物体也不会强制add再FadeOut'''\n def construct(self):\n sq = Square()\n ci = Circle()\n self.add(sq)\n self.wait()\n self.play(\n *[\n FadeOut(obj)\n for obj in self.mobjects\n ]\n )\n self.wait()\n\nclass Test29(Scene):\n '''Text不会自动换行'''\n def construct(self):\n text = Text(\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\", font=\"Consolas\")\n self.add(text)\n\nclass Test30(Scene):\n '''字符上弧'''\n def construct(self):\n text = TextMobject(\"\\\\overarc{AB}\")\n self.add(text)\n\nclass Test31(Scene):\n def construct(self):\n Gc = VGroup()\n colors = color_gradient([BLACK, WHITE], 9)\n Gc.add(Square(side_length=1).shift(LEFT*6).set_fill(colors[0], 1).set_color(colors[0]))\n for i in range(1, 9):\n Gc.add(Gc[-1].copy().set_fill(colors[i], 1).set_color(colors[i]).shift(RIGHT*1.2))\n self.play(Transform(Gc[-2], Gc[-1], rate_func=linear))\n self.wait()\n\nclass Test32(GraphScene):\n '''get_graph必须在setup_axes之后'''\n def construct(self):\n self.setup_axes(animate=True)\n graph = self.get_graph(lambda x : x**2, \n color = GREEN,\n x_min = None, \n x_max = None\n )\n self.play(\n ShowCreation(graph),\n run_time = 2\n )\n self.wait()\n\nclass Test33(Scene):\n '''用颜色灰度来实现透明度的效果,防止两透明度颜色相叠加,导致亮度突变'''\n def construct(self):\n colors = color_gradient([\"#6C6C00\", YELLOW], 9)\n sq = Square(side_length=1).shift(LEFT*6).set_fill(colors[0], 1).set_color(colors[0])\n for i in range(1, 9):\n self.play(\n ApplyMethod(sq.shift, RIGHT * 1.2, rate_func=linear),\n ApplyMethod(sq.set_color, colors[i]),\n ApplyMethod(sq.set_fill, colors[i], 1)\n )\n self.wait()\n\nclass Test34(ThreeDScene):\n '''cube的面'''\n def construct(self):\n self.set_to_default_angled_camera_orientation()\n cube = Cube(fill_opacity=0, stroke_width=3).set_fill(opacity=0).set_color(WHITE)\n cube[5].set_color(BLUE)\n self.add(cube)\n debugTeX(self, cube)\n\nclass Test35(GraphScene):\n '''使用updater来实现graph的更新'''\n def construct(self):\n self.setup_axes()\n line = self.get_graph(lambda x: x + 2)\n val = ValueTracker(1)\n line.add_updater(lambda m: m.become(self.get_graph(lambda x: val.get_value() * x + 2, color=BLUE)))\n self.add(line)\n self.play(val.increment_value, 4)\n self.wait()\n\nclass Test36(ThreeDScene):\n '''抛物面'''\n def construct(self):\n self.set_to_default_angled_camera_orientation()\n 颓废曲面 = ParametricSurface(\n lambda u, v: [u, v, u ** 2 + v ** 2],\n u_min=-1, u_max=1, v_min=-1, v_max=1\n )\n self.add(颓废曲面)\n\nclass Test37(Scene):\n '''Transfrom前统一方向,使动画更顺滑'''\n def construct(self):\n ci = Circle()\n # sq = Square()\n sq = Square().flip()\n self.play(ShowCreation(ci))\n self.play(Transform(ci, sq))\n self.wait()\n\nclass Test38(Scene):\n '''根式上色'''\n def construct(self):\n text = TexMobject(\"\\\\sqrt{x^2+y^2+z^2}\")\n text[0][2:4].set_color(RED)\n self.add(text)\n debugTeX(self, text[0])\n\nclass Test39(Scene):\n '''上色'''\n def construct(self):\n text4 = TexMobject(\n r\"ds=\\vert d\\vec r \\vert=\",\n r\"\\sqrt{x^2+y^2+z^2}\"\n )\n VGroup(\n text4\n ).set_color(YELLOW)\n VGroup(\n text4[1][2:4]\n ).set_color(RED)\n self.add(text4)\n debugTeX(self, text4[1])\n\nclass Test40(Scene):\n '''一个self.play中无法处理两个针对同一物体的ApplyMethod,但不加ApplyMethod可以'''\n def construct(self):\n dot = Dot(color=BLUE)\n up = Dot(color=YELLOW).to_edge(UP)\n self.add(dot)\n self.wait()\n # self.play(\n # ApplyMethod(dot.next_to, up, DOWN),\n # ApplyMethod(dot.scale, 3)\n # )\n self.play(\n dot.next_to, up, DOWN,\n dot.scale, 3\n )\n self.wait()\n\nclass Test41(Scene):\n '''replace的作用'''\n def construct(self):\n sq = Square().scale(2)\n ci = Circle().shift(RIGHT*3)\n self.add(sq, ci)\n self.play(sq.replace, ci)\n self.wait()\n\nclass Test42(Scene):\n '''使用updater时不能使用循环变量i'''\n def construct(self):\n ups = VGroup(\n *[\n Dot(color=BLUE).move_to([i, 1, 0])\n for i in range(-3, 4)\n ]\n )\n downs = VGroup(\n *[\n Dot(color=YELLOW).move_to([i, -1, 0])\n for i in range(-3, 4)\n ]\n )\n lines = VGroup(\n *[\n Line(ups[i], downs[i])\n for i in range(0, 7)\n ]\n )\n lines.add_updater(\n lambda m: m.become(\n VGroup(\n *[\n Line(ups[i], downs[i])\n for i in range(0, 7)\n ]\n )\n )\n )\n # for i in range(7):\n # lines[i].add_updater(lambda m: m.put_start_and_end_on(ups[i].get_bottom(), downs[i].get_top()))\n self.add(ups, downs, lines)\n self.wait()\n self.play(\n ups.shift, LEFT * 2\n )\n self.play(\n downs.shift, RIGHT * 2\n )\n self.wait()\n \nclass Test43(Scene):\n '''和Test40同理'''\n def construct(self):\n dot = Dot(color=BLUE)\n self.add(dot)\n self.wait()\n self.play(\n ApplyMethod(dot.scale, 3), # 这个被淹没了\n ApplyMethod(dot.set_color, YELLOW)\n )\n self.wait()\n\nclass Test44(ThreeDScene):\n def construct(self):\n axes = ThreeDAxes()\n spheres = VGroup(\n *[\n Sphere(radius=i, opacity=0.5, resolution=(20, 40))\n for i in np.arange(1, 3.1, 0.4)\n ]\n )\n self.set_to_default_angled_camera_orientation()\n self.add(axes)\n old = VGroup()\n new = VGroup()\n for i in range(len(spheres[0])):\n old.add(spheres[randint(1, 5)][i].set_opacity(randint(1, 6) / 10))\n new.add(spheres[0][i])\n self.wait()\n self.wait()\n self.play(\n FadeIn(old),\n *[\n Transform(i, j)\n for i, j in zip(old, new)\n ],\n run_time=6\n )\n self.wait()\n\nclass Test45(ThreeDScene):\n def construct(self):\n axes = ThreeDAxes()\n self.set_to_default_angled_camera_orientation()\n self.add(axes)\n surface = ParametricSurface(\n lambda y, z: [\n -np.sqrt(\n 1 - 9 * y ** 2 / 4 + (320 * 2 ** (1 / 3) * z ** 3) / ((\n 99532800 * y ** 2 * z ** 2 + 884736000 * z ** 3 - \\\n 1990656000 * y ** 2 * z ** 3 - 884736000 * z ** 5 + np.sqrt(\n (-115964116992000000 * z ** 9 + (99532800 * y ** 2 * z ** 2 + \\\n 884736000 * z ** 3 - 1990656000 * y ** 2 * z ** 3 - 884736000 * z ** 5) ** 2) ** 2)\n ) ** (1 / 3)) + (1 / 960 * 2 ** (1 / 3)) * (99532800 * y ** 2 * z ** 2 + \\\n 884736000 * z ** 3 - 1990656000 * y ** 2 * z ** 3 - 884736000 * z ** 5 + np.sqrt(\n (-115964116992000000 * z ** 9 + (99532800 * y ** 2 * z ** 2 + 884736000 * z ** 3 -\\\n 1990656000 * y ** 2 * z ** 3 - 884736000 * z ** 5) ** 2)\n )) ** (1 / 3)\n ),\n y, z\n ]\n )\n self.add(surface)\n\nclass Test46(Scene):\n '''Brace'''\n def construct(self):\n text = TextMobject(\"test\")\n brace = Brace(text, DOWN)\n self.play(Write(brace))\n self.play(Write(text))\n\nclass Test47(Scene):\n '''LaTeX的dancers小人,需要下载字体包并且更改ctex_template'''\n def construct(self):\n Test = VGroup()\n for i in range(51):\n test = TextMobject(\"\\\\Pisymbol{dancers}{%d}\" % i, stroke_width=1, fill_opacity=1, stroke_opacity=1).scale(200)\n Test.add(test)\n self.wait()\n self.play(Write(Test[0]))\n for i in range(1, 51):\n self.wait(0.8)\n self.play(Transform(Test[0], Test[i]))\n self.wait(2)\n\nclass Test48(Scene):\n '''plot_depth'''\n CONFIG = {\n \"camera_config\": {\"use_plot_depth\": True}\n }\n def construct(self):\n sq = Square(stroke_width=5).set_plot_depth(1)\n sq2 = Square(side_length=1, stroke_width=5).shift(RIGHT).set_color(BLUE).set_plot_depth(0)\n self.add(sq, sq2)\n self.wait()\n self.play(sq2.set_plot_depth, 2)\n self.wait()\n\nclass Test49(Scene):\n '''使用LaTeX的lstlisting写代码,需要改ctex_template'''\n def construct(self):\n text = TextMobject(\"\"\"\n \\\\begin{lstlisting}\n int main() {\n\n }\n \\\\end{lstlisting}\n \"\"\")\n self.add(text)\n\nclass Test50(ThreeDScene):\n '''正劈锥体,渲染贼慢'''\n def construct(self):\n axes = ThreeDAxes()\n self.set_camera_orientation(phi=70 * DEGREES, theta=45 * DEGREES)\n self.add(axes)\n a = VGroup()\n b = VGroup()\n c = VGroup()\n for i in np.arange(-1, 1.00001, 0.0005):\n tri = Polygon([i, np.sqrt(1 - i ** 2), 0], \n [i, -np.sqrt(1 - i ** 2), 0],\n [i, 0, 2], stroke_width=0, fill_color=BLUE, fill_opacity=0.75)\n a.add(tri)\n cnt = 1\n self.begin_ambient_camera_rotation(rate=0.5)\n for tri in a:\n if cnt % 2 == 0:\n self.add(tri.set_fill(color=YELLOW, opacity=0.5))\n self.wait(0.01)\n tri.set_fill(color=BLUE, opacity=0.75)\n else:\n self.add(tri)\n cnt += 1\n self.wait(5)\n\nclass Test51(ThreeDScene):\n '''棱锥到近似圆锥'''\n def construct(self):\n axes = ThreeDAxes()\n self.set_camera_orientation(phi=70 * DEGREES, theta=45 * DEGREES)\n self.add(axes)\n circle = Circle(radius=2)\n polys = []\n faces = []\n for i in range(3, 16):\n po = Polygon(\n *[\n UP * np.sin(j * 2 * PI / i) + RIGHT * np.cos(j * 2 * PI / i)\n for j in range(i)\n ], stroke_width=1, stroke_color=BLUE, fill_color=BLUE, fill_opacity=0.75\n ).scale(2, about_point=ORIGIN)\n polys.append(po)\n verts = po.get_vertices()\n faces_ = VGroup()\n for j in range(i):\n if j == i - 1:\n face = Polygon(verts[j], verts[0], [0, 0, 3])\n else:\n face = Polygon(verts[j], verts[j + 1], [0, 0, 3])\n face.set_stroke(width=1, color=BLUE)\n face.set_fill(color=BLUE, opacity=0.75)\n faces_.add(face)\n faces.append(faces_)\n self.play(ShowCreation(circle))\n self.play(ShowCreation(polys[0]), ShowCreation(faces[0]))\n self.wait()\n self.begin_ambient_camera_rotation(rate=0.5)\n self.wait()\n for i in range(1, 13):\n self.play(\n Transform(polys[0], polys[i]),\n Transform(faces[0], faces[i])\n )\n self.wait()\n self.wait(2)\n\nclass Test52(SpecialThreeDScene):\n '''Boxes类的test'''\n CONFIG = {\n \"default_angled_camera_position\": {\n \"phi\": 70 * DEGREES,\n \"theta\": -45 * DEGREES,\n \"distance\": 50,\n },\n }\n def construct(self):\n\n self.set_camera_to_default_position()\n axes = self.get_axes()\n boxes = MyBoxes(fill_color=GRAY, resolution=(20, 20), bottom_size=(0.25, 0.25), gap=0.05)\n self.var_phi = 0\n func_01 = lambda x, y: np.sin(x ** 2 / 2.4 + y ** 2 / 2.4 + self.var_phi) * 1.\n func_02 = lambda x, y: np.sin(x ** 2 / 2.4 + y ** 2 / 2.4 + self.var_phi) * 1. - 0.25\n\n boxes.update_top_and_bottom_by_func(func_01, func_02)\n boxes.update_color_by_func(func_01)\n def update_boxes(b, dt):\n b.update_top_and_bottom_by_func(func_01, func_02)\n b.update_color_by_func(func_01)\n self.var_phi += 1 * DEGREES\n self.add(boxes)\n boxes.add_updater(update_boxes)\n # self.wait(2)\n # boxes.remove_updater(update_boxes)\n self.wait(12)\n\nclass Test53(ThreeDScene):\n '''MyBoxes的序号分布'''\n def construct(self):\n axes = ThreeDAxes()\n # self.set_camera_orientation(phi=70 * DEGREES, theta=225 * DEGREES)\n self.add(axes)\n boxes = MyBoxes(fill_color=GRAY, resolution=(9, 18), bottom_size=(0.5, 0.7), gap=0.2, box_height=0.5)\n self.add(boxes)\n debugTeX(self, boxes)\n\nclass Test54(ThreeDScene):\n '''测试元素周期表'''\n CONFIG = {\n \"camera_config\": {\n \"background_color\": WHITE,\n \"should_apply_shading\": False,\n }\n }\n def construct(self):\n self.set_camera_orientation(phi=50 * DEGREES, theta=240 * DEGREES, distance=50)\n boxes = ChemicalBoxes(fill_color=BLUE_E).add_label().set_block_color()\n self.add(boxes)\n self.begin_ambient_camera_rotation(rate=1)\n # self.wait(10)\n\nclass Test55(Scene):\n '''无法像这样获取圆上某一方向的点'''\n def construct(self):\n ci = Circle()\n self.add(ci)\n dot = Dot().move_to(ci.get_boundary_point(UP * 2 * np.sqrt(5) / 5 + RIGHT * np.sqrt(5) / 5))\n self.add(dot)\n\nclass Test56(Scene):\n '''带dt的updater'''\n def construct(self):\n dot = Dot().to_edge(UP)\n dot.add_updater(lambda m, dt: m.shift(0.1 * DOWN))\n self.add(dot)\n self.wait(6)\n\nclass Test57(Scene):\n '''文字上下标'''\n def construct(self):\n text = TextMobject(\"正文A$_{\\\\text{下标B}}^{\\\\text{上标C}}$\").scale(3)\n self.add(text)\n\nclass Test58(Scene):\n '''rate_func'''\n def construct(self):\n func = ParametricFunction(\n lambda x: [x, smooth(x), 0],\n t_min=0, t_max=1\n ).scale(3, about_point=ORIGIN)\n self.add(func)\n\nclass Test59(Scene):\n '''save_image'''\n def construct(self):\n sq = Square()\n sq.save_image()\n self.add(sq)\n\nclass Test60(Scene):\n '''根据等号对齐'''\n def construct(self):\n tex1 = TexMobject(\"A=\\\\frac{\\\\displaystyle\\\\sum^n_{i=0}}{x}\")\n tex2 = TexMobject(\"=\", \"\\\\frac{x}{\\\\displaystyle\\\\sum^n_{i=0}}\")\n tex2.next_to(tex1, RIGHT)\n tex2.next_to(tex1[0][1].get_center(), RIGHT, index_of_submobject_to_align=0, coor_mask=np.array([0, 1, 1]))\n self.add(tex1, tex2)\n texs = [\n \"A=\\\\frac{\\\\displaystyle\\\\sum^n_{i=0}}{x}\",\n \"=\\\\frac{x}{\\\\displaystyle\\\\sum^n_{i=0}}\"\n ]\n tex = TexMobject(*texs)\n self.add(tex)\n\nclass Test61(Scene):\n def construct(self):\n for1 = TexMobject(r\"G(x)=\\displaystyle\\sum_{p=0}^{\\infty}{\\left( \\frac{S^{p}(n)}{p!}x^p\\right)}\").scale(0.7).to_edge(UP+LEFT)\n for1_bg = SurroundingRectangle(for1, fill_opacity = .2)\n for2 = TexMobject(r\"G(x) = \\left( \\frac{e^{(n+1)x}-1}{x} \\right) \\left( \\frac{x}{e^x-1} \\right)\")\n\n forrs = [\n r\"\\frac{e^{(n+1)x}-1}{x}\", # for3\n r\"= \\frac{ \\left( \\displaystyle\\sum_{p=0}^{\\infty}{\\frac{{((n+1)x)}^p}{p!}} \\right) -1}{x}}\", #for4\n r\"=\\frac{1+\\left( \\displaystyle\\sum_{p=1}^{\\infty}{\\frac{{((n+1)x)}^p}{p!}} \\right) -1}{x}}\",#for5\n r\"=\\displaystyle\\sum_{p=1}^{\\infty}{\\frac{(n+1)^p}{p!}x^{p-1}}\",#for6\n r\"=\\displaystyle\\sum_{p=0}^{\\infty}{\\frac{(n+1)^{p+1}}{(p+1)!}x^{p}}\"#for7\n ]\n forr = TexMobject(*forrs).scale(0.9)\n self.add(forr)\n\nclass Test62(Scene):\n '''三角形绕边翻转'''\n def construct(self):\n tri = Triangle()\n vert = tri.get_vertices()\n tri.generate_target()\n tri.target.flip(axis=vert[0]-vert[1], about_point=(vert[0]+vert[1])/2)\n self.add(tri)\n self.wait()\n self.play(MoveToTarget(tri))\n self.wait()\n\nclass Test63(Scene):\n '''文字渐变色的区别'''\n def construct(self):\n vg = VGroup(\n TextMobject(\"abcde\").set_color([RED, BLUE, WHITE]),\n TextMobject(\"abcde\").set_color_by_gradient(RED, BLUE, WHITE),\n TextMobject(\"abcde\")\n ).arrange(DOWN)\n vg[2].shuffle(True)\n vg[2].set_color_by_gradient(RED, BLUE, WHITE)\n self.add(vg)\n\nclass Test64(Scene):\n '''CubicBezier的points只有四个点,即锚点和控制点,但ParametricFunction是好多贝塞尔曲线,好多点'''\n def construct(self):\n # line = CubicBezier([np.array([ -3, -1.5, 0]), np.array([-3.6, 1.5, 0]), np.array([ 0, 1.5, 0]), np.array([ 3, -1.5, 0])])\n line = ParametricFunction(\n bezier([np.array([ -3, -1.5, 0]), np.array([-3.6, 1.5, 0]), np.array([ 0, 1.5, 0]), np.array([ 3, -1.5, 0])]),\n t_min=0, t_max=1\n )\n self.add(line)\n points = line.get_points()\n debugTeX(self, points)\n\nclass Test65(Scene):\n '''渐变色的方向,用sheen_direction来设定'''\n def construct(self):\n sq = Square()\n sq.set_color([RED, BLUE])\n # sq.set_opacity([0, 1])\n # sq.set_fill([RED, BLUE], [0, 1])\n sq.set_sheen_direction(UP)\n self.add(sq)\n # self.wait()\n # self.play(sq.flip)\n # self.wait()\n\nclass Test66(Scene):\n '''digest_config的很愚蠢的用法'''\n CONFIG = {\n \"stroke_width\": 15,\n }\n def construct(self):\n line = Line()\n digest_config(line, self.CONFIG)\n self.add(line)\n\nclass Test67(Scene):\n '''arc的points,用好多贝塞尔曲线来拟合的'''\n def construct(self):\n arc = Arc().scale(3)\n self.add(arc)\n points = arc.get_points()\n debugTeX(self, points, 0.3)\n\nclass Test68(Scene):\n def construct(self):\n tex = TexMobject(\"{\\\\sin\\\\alpha\\\\over\\\\sin\\\\gamma}={n_1\\\\over n_2}\")\n self.add(tex)\n debugTeX(self, tex[0])\n\nclass Test69(ThreeDScene):\n '''无法将三维物体Transform到fixed_in_frame_mobjects的二维物体,但可以通过z_to_vector等变换得到类似的效果'''\n def construct(self):\n self.set_camera_orientation(phi=60*DEGREES, theta=45*DEGREES)\n vec = [\n np.cos(45*DEGREES) * np.sin(60*DEGREES),\n np.sin(45*DEGREES) * np.sin(60*DEGREES),\n np.cos(60*DEGREES)\n ]\n n = z_to_vector(vec)\n tex = TexMobject(\"a\").apply_matrix(n).rotate(PI/2, vec)\n # self.camera.add_fixed_in_frame_mobjects(tex)\n # tex.to_corner(UL)\n surface = Cube()\n self.add(surface)\n self.play(Transform(surface, tex), run_time=2)\n self.wait()\n\nclass Test70(Scene):\n '''无法通过get_points获取TexMobject的点'''\n def construct(self):\n tex = TexMobject(\"S\").scale(2)\n self.add(tex)\n p = tex.get_points()\n print(p)\n\nclass Test71(Scene):\n def construct(self):\n grid = NumberPlane()\n vector = np.array([1, 2, 0])\n matrix = np.identity(3) - np.outer(vector, vector)\n self.add(grid, Dot([1, 2, 0], color=RED))\n self.wait()\n self.play(grid.apply_matrix, matrix, run_time=3)\n self.wait()\n\nclass Test72(Scene):\n '''光源'''\n def construct(self):\n light = AmbientLight()\n self.add(light)\n\nclass Test73(Scene):\n '''running_start的写法是六次贝塞尔曲线'''\n def construct(self):\n grid = NumberPlane().scale(3)\n func = ParametricFunction(\n lambda x: [x, running_start(x), 0],\n t_min=0, t_max=1\n ).scale(3, about_point=ORIGIN)\n func2 = ParametricFunction(\n bezier([\n np.array([0/6, 0, 0]), \n np.array([1/6, 0, 0]), \n np.array([2/6, -0.5, 0]), \n np.array([3/6, -0.5, 0]),\n np.array([4/6, 1, 0]),\n np.array([5/6, 1, 0]),\n np.array([6/6, 1, 0]),\n ]),\n t_min=0, t_max=1, color=RED\n ).scale(3, about_point=ORIGIN)\n self.add(grid, func, func2)\n\nclass Test74(Scene):\n '''幼儿园小练习1'''\n CONFIG = {\n \"camera_config\": {\n \"use_plot_depth\": True,\n },\n }\n\n def setup(self):\n self.A = np.array([1, 0, 0])\n self.B = np.array([-1, 0, 0])\n self.C = np.array([-0.3, 1.3, 0])\n\n self.main_tri = Polygon(\n self.A, self.B, self.C,\n color=BLUE, fill_color=BLUE, fill_opacity=0.8\n )\n\n label_a = TexMobject(\"a\").scale(0.7).next_to((self.B+self.C)/2, UL, buff=0.08)\n label_b = TexMobject(\"b\").scale(0.7).next_to((self.A+self.C)/2, UR, buff=0.08)\n label_c = TexMobject(\"c\").scale(0.7).next_to((self.B+self.A)/2, DOWN, buff=0.08)\n self.labels = VGroup(label_a, label_b, label_c).set_plot_depth(5)\n\n sq_a = Polygon(self.B, self.C, np.array([-1.6, 2, 0]), np.array([-2.3, 0.7, 0]), color=WHITE)\n sq_b = Polygon(self.C, self.A, np.array([2.3, 1.3, 0]), np.array([1, 2.6, 0]), color=WHITE)\n sq_c = Polygon(self.A, self.B, np.array([-1, -2, 0]), np.array([1, -2, 0]), color=WHITE)\n self.sq = VGroup(sq_a, sq_b, sq_c).set_plot_depth(-1)\n\n tri_a = Polygon(self.A, np.array([1, -2, 0]), np.array([2.3, 1.3, 0]), color=RED, fill_color=RED, fill_opacity=0.8)\n tri_b = Polygon(self.B, np.array([-2.3, 0.7, 0]), np.array([-1, -2, 0]), color=YELLOW, fill_color=YELLOW, fill_opacity=0.8)\n tri_c = Polygon(self.C, np.array([1, 2.6, 0]), np.array([-1.6, 2, 0]), color=GREEN, fill_color=GREEN, fill_opacity=0.8)\n self.tri = VGroup(tri_a, tri_b, tri_c)\n\n equation = TexMobject(\"S_{\\\\ } = S_{\\\\ } = S_{\\\\ } = S_{\\\\ }\").scale(1.5).to_corner(UR, buff=1.1)\n tri_1 = self.main_tri.copy().set_stroke(width=0).set_fill(opacity=1).scale(0.2).next_to(equation[0][0], RIGHT+DOWN*3, buff=-0.08)\n tri_2 = tri_a.copy().rotate(PI/2).set_stroke(width=0).set_fill(opacity=1).scale(0.2).next_to(equation[0][2], RIGHT+DOWN*3, buff=-0.08)\n tri_3 = tri_b.copy().rotate(PI/2, axis=IN).set_stroke(width=0).set_fill(opacity=1).scale(0.2).next_to(equation[0][4], RIGHT+DOWN*3, buff=-0.08)\n tri_4 = tri_c.copy().rotate(PI/4, axis=IN).set_stroke(width=0).set_fill(opacity=1).scale(0.2).next_to(equation[0][6], RIGHT+DOWN*3, buff=-0.08)\n self.equation = VGroup(equation, tri_1, tri_2, tri_3, tri_4)\n\n # self.add(self.main_tri, self.labels, self.sq, self.tri, equation, tri_1, tri_2, tri_3, tri_4)\n\n def construct(self):\n self.wait()\n self.play(ShowCreation(self.main_tri))\n self.wait()\n self.play(FadeIn(self.labels))\n self.wait(2)\n self.play(*[ShowCreation(i.set_plot_depth(-5)) for i in self.sq], run_time=2)\n self.wait()\n self.play(*[ShowCreation(i) for i in self.tri], run_time=2)\n self.wait()\n self.play(\n *[\n WiggleOutThenIn(i)\n for i in self.tri\n ], run_time=2\n )\n self.wait(2)\n self.play(\n FadeOut(self.sq),\n Rotating(self.tri[0], radians=PI/2, about_point=self.A),\n Rotating(self.tri[1], radians=PI/2, about_point=self.B),\n Rotating(self.tri[2], radians=PI/2, about_point=self.C),\n run_time=3\n )\n self.wait()\n self.play(\n self.main_tri.shift, LEFT*2.5+DOWN,\n self.tri.shift, LEFT*2.5+DOWN,\n self.labels.shift, LEFT*2.5+DOWN,\n )\n self.labels.set_plot_depth(6)\n self.wait(2)\n self.play(\n WiggleOutThenIn(self.tri[0]), \n WiggleOutThenIn(self.main_tri)\n )\n self.play(\n FadeIn(self.equation[0][0][:3]),\n TransformFromCopy(self.main_tri, self.equation[1]),\n TransformFromCopy(self.tri[0], self.equation[2]),\n run_time=2\n )\n self.wait(2)\n self.play(\n WiggleOutThenIn(self.tri[1]), \n WiggleOutThenIn(self.main_tri)\n )\n equation_copy_1 = self.equation[1].copy()\n equation_copy_2 = self.equation[1].copy()\n self.play(\n FadeIn(self.equation[0][0][3:5]),\n TransformFromCopy(self.main_tri, equation_copy_1),\n TransformFromCopy(self.tri[1], self.equation[3]),\n run_time=2\n )\n self.wait(2)\n self.play(\n WiggleOutThenIn(self.tri[2]), \n WiggleOutThenIn(self.main_tri)\n )\n self.play(\n FadeIn(self.equation[0][0][5:]),\n TransformFromCopy(self.main_tri, equation_copy_2),\n TransformFromCopy(self.tri[2], self.equation[4]),\n run_time=2\n )\n self.wait(3)\n self.play(FadeOut(VGroup(self.equation[0][0][:2], self.equation[1], equation_copy_1, equation_copy_2)))\n self.equation[0][0][:2].set_opacity(0)\n self.equation[1].set_fill(opacity=0)\n self.equation.generate_target()\n self.equation.target.scale(1.3).shift(DOWN+LEFT)\n self.play(MoveToTarget(self.equation))\n self.wait(5)\n\nclass Test75(Scene):\n '''对坐标轴的非线性变换'''\n def construct(self):\n grid = ComplexPlane().prepare_for_nonlinear_transform(50)\n self.add(grid)\n self.wait()\n self.play(\n grid.apply_function,\n lambda point: complex_to_R3(R3_to_complex(point)**2),\n run_time=5\n )\n self.wait()\n\nclass Test76(Scene):\n '''交换点的顺序实现五角星'''\n def construct(self):\n poly = RegularPolygon(5)\n verts = poly.get_vertices()\n poly2 = Polygon(verts[0], verts[2], verts[4], verts[1], verts[3]).set_fill(BLUE, opacity=0.5)\n self.add(poly2)\n debugTeX(self, verts)\n\nclass Test77(Scene):\n '''对Imageset_color,所有rgb均替换为指定颜色,但保留alpha'''\n def construct(self):\n image = ImageMobject(\"GZTime.png\").set_color(RED)\n self.add(image)\n\nclass MyTransform(Animation):\n '''继承Animation类,自定义动画,用于下一个场景'''\n CONFIG = {\n \"radians\": PI/2,\n \"axis\": OUT,\n \"about_point\": None,\n \"remover\": True,\n }\n def __init__(self, mobject, target, **kwargs):\n digest_config(self, kwargs)\n self.mobject = mobject.copy()\n self.target = target\n def clean_up_from_scene(self, scene):\n if self.is_remover():\n scene.remove(self.mobject)\n scene.add(self.target)\n def interpolate_mobject(self, alpha):\n now = self.starting_mobject.copy()\n now.rotate(\n alpha * self.radians,\n axis=self.axis,\n about_point=self.about_point,\n )\n for i in range(3):\n now[i].set_color(interpolate_color(self.starting_mobject[i].get_color(), self.target[i].get_color(), alpha))\n self.mobject.become(now)\n\nclass Test78(Scene):\n '''logo的一种动画方案'''\n def construct(self):\n logo1 = VGroup(\n Polygon(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 2, 0])),\n Polygon(np.array([1.5, 0, 0]), np.array([3, 3, 0]), np.array([0, 3, 0])),\n Polygon(np.array([2, 0, 0]), np.array([3, 0, 0]), np.array([3, 2, 0])),\n ).set_stroke(width=0).center()\n logo1[0].set_fill(WHITE, 1)\n logo1[1].set_fill(BLUE_B, 1)\n logo1[2].set_fill(BLUE_C, 1)\n logo1.move_to(np.array([2.5, 1, 0]))\n logo2 = logo1.copy().rotate(PI/2, about_point=ORIGIN)\n logo3 = logo2.copy().rotate(PI/2, about_point=ORIGIN)\n logo4 = logo3.copy().rotate(PI/2, about_point=ORIGIN)\n logo = VGroup(logo1, logo2, logo3, logo4).scale(1/3)\n logo[0][1].set_fill(\"#C59978\", 1)\n logo[0][2].set_fill(\"#8D5630\", 1)\n text = VGroup(\n Text(\"Manim\", font=\"Nexa Bold\"),\n Text(\"Kindergarten\", font=\"Nexa Bold\")\n ).arrange(DOWN, aligned_edge=LEFT, buff=0.3).set_height(2).next_to(logo, buff=0.8).shift(DOWN*0.2)\n all_logo = VGroup(logo, text).center()\n bg = Rectangle(height=10, width=10, fill_color=BLACK, fill_opacity=1, stroke_width=0)\n bg.add_updater(lambda m: m.move_to(logo, aligned_edge=RIGHT))\n text.save_state()\n text.shift((text.get_right()[0]-bg.get_right()[0]+0.2)*LEFT)\n logo.save_state()\n logo.center().scale(1.5)\n \n self.add(text, bg)\n self.play(FadeIn(logo[0]))\n self.wait()\n for i in range(3):\n self.play(MyTransform(logo[i], logo[i+1], about_point=logo.get_center()), run_time=0.25, rate_func=smooth)\n self.wait(2)\n self.play(\n text.restore, logo.restore,\n rate_func=smooth, run_time=1\n )\n self.wait()\n\nclass Test79(Scene):\n '''逐字上颜色'''\n def construct(self):\n tex = TextMobject(\"text or object\")\n self.add(tex)\n self.wait(0.5)\n for letter in tex:\n self.play(\n LaggedStart(\n *[\n ApplyMethod(i.set_color, YELLOW)\n for i in letter\n ],\n run_time=2\n )\n )\n self.wait()\n\nclass Test80(Scene):\n '''rate_func的细节效果'''\n def construct(self):\n dot = Dot()\n self.add(dot)\n self.wait()\n self.play(dot.shift, RIGHT*3, rate_func=func, run_time=2)\n self.wait()\n\nclass Test81(Scene):\n '''白底logo的配色方案'''\n CONFIG = {\n \"camera_config\": {\n \"background_color\": WHITE,\n },\n }\n def construct(self):\n logo1 = VGroup(\n Polygon(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 2, 0])),\n Polygon(np.array([1.5, 0, 0]), np.array([3, 3, 0]), np.array([0, 3, 0])),\n Polygon(np.array([2, 0, 0]), np.array([3, 0, 0]), np.array([3, 2, 0])),\n ).set_stroke(width=0).center()\n logo1[0].set_fill(\"#cccccc\", 1)\n logo1[1].set_fill(BLUE_D, 1)\n logo1[2].set_fill(BLUE_E, 1)\n logo1.move_to(np.array([2.5, 1, 0]))\n logo2 = logo1.copy().rotate(PI/2, about_point=ORIGIN)\n logo3 = logo2.copy().rotate(PI/2, about_point=ORIGIN)\n logo4 = logo3.copy().rotate(PI/2, about_point=ORIGIN)\n logo = VGroup(logo1, logo2, logo3, logo4).scale(0.7).center()\n logo[0][1].set_fill(\"#C59978\", 1)\n logo[0][2].set_fill(\"#8D5630\", 1)\n self.add(logo)\n\nclass Test82(Scene):\n def construct(self):\n tex = TextMobject(\"ab\")\n self.add(tex)\n\nclass Test83(LogoGenerationTemplate):\n '''3B1B的logo动效,并不是想要的效果'''\n CONFIG = {\n \"random_seed\": 2,\n }\n\n def get_logo_animations(self, logo):\n layers = logo.spike_layers\n for layer in layers:\n random.shuffle(layer.submobjects)\n for spike in layer:\n spike.save_state()\n spike.scale(0.5)\n spike.apply_complex_function(np.log)\n spike.rotate(-90 * DEGREES, about_point=ORIGIN)\n spike.set_fill(opacity=0)\n\n return [\n FadeIn(\n logo.iris_background,\n rate_func=squish_rate_func(smooth, 0.25, 1),\n run_time=3,\n ),\n AnimationGroup(*[\n LaggedStartMap(\n Restore, layer,\n run_time=3,\n path_arc=180 * DEGREES,\n rate_func=squish_rate_func(smooth, a / 3.0, (a + 0.9) / 3.0),\n lag_ratio=0.8,\n )\n for layer, a in zip(layers, [0, 2, 1, 0])\n ]),\n Animation(logo.pupil),\n ]\n\nclass Test84(Scene):\n '''坐标系非线性复变换'''\n def construct(self):\n grid = ComplexPlane().prepare_for_nonlinear_transform(50)\n self.add(grid)\n self.wait()\n self.play(grid.apply_complex_function, np.exp, run_time=3, rate_func=linear)\n self.wait()\n\nclass Test85(Scene):\n '''由Line+VGroup拼成的多边形无法上色'''\n def construct(self):\n vg = VGroup(\n Line(ORIGIN, RIGHT),\n Line(RIGHT, UP),\n Line(UP, ORIGIN)\n ).set_fill(BLUE, 1)\n self.add(vg)\n\nclass Test86(Scene):\n '''PointCouldDot的细节,有一个个像素点构成的点'''\n def construct(self):\n test = PointCloudDot().scale(30)\n self.add(test)\n\nclass Test87(Scene):\n '''无法用Polygon表示折线,因为Polygon强制首尾相接'''\n def construct(self):\n lines = Polygon(ORIGIN, UP, RIGHT)\n self.add(lines)\n\nclass Lines(VMobject):\n '''利用set_points_as_corner实现的折线类'''\n def __init__(self, *points, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.set_points_as_corners(points)\n\nclass Test88(Scene):\n '''上面的折线类和VGroup+Line构造的折线的ShowCreation效果相同'''\n def construct(self):\n # lines = Lines(ORIGIN, UP, RIGHT)\n lines = VGroup(\n Line(ORIGIN, UP),\n Line(UP, RIGHT)\n )\n self.play(ShowCreation(lines))\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":44179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"495213550","text":"import numpy as np\n\nfrom sklearn.linear_model._base import LinearModel\nfrom sklearn.linear_model import LinearRegression, Lasso\nfrom sklearn.preprocessing import StandardScaler\n\n# https://gist.github.com/agramfort/2351057\n\n\n__all__ = ['NNGRegressor']\n\n\ndef non_negative_garotte(X, y, alpha, tol=1e-6, max_iter=1000):\n\n # Ordinart Least Squares coefficients\n coef_ols = LinearRegression(fit_intercept=False).fit(X, y).coef_\n X = X * coef_ols[np.newaxis, :]\n\n # Shrunken betas\n shrink_coef = Lasso(alpha=alpha, fit_intercept=False, normalize=False,\n positive=True, tol=tol, max_iter=max_iter).fit(X, y).coef_\n coef = coef_ols * shrink_coef\n\n # Residual Sum of Squares\n rss = np.sum((y - np.dot(X, coef)) ** 2)\n return coef, shrink_coef, rss\n\n\n\nclass NNGRegressor(LinearModel):\n \"\"\"Non-Negative Garrote Regressor\n\n Code source : https://gist.github.com/agramfort/2351057\n\n Ref:\n Breiman, L. (1995), \"Better Subset Regression Using the Nonnegative\n Garrote,\" Technometrics, 37, 373-384. [349,351]\n\n Parameters\n ----------\n alpha : float, optional (default 1e-3)\n Constant that multiplies the L1 term. Defaults to 1.0. alpha = 0 is\n equivalent to an ordinary least square,\n solved by the LinearRegression object. For numerical reasons, using\n alpha = 0 with the Lasso object is not\n advised. Given this, you should use the LinearRegression object.\n\n fit_intercept : boolean, optional (default True)\n Whether to calculate the intercept for this model. If set to False, no\n intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional (default False)\n This parameter is ignored when fit_intercept is set to False. If True,\n the regressors X will be normalized\n before regression by subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n sklearn.preprocessing.StandardScaler before calling fit on an estimator\n with normalize=False.\n\n tol : float, optional (default: 1e-6)\n The tolerance for the optimization: if the updates are smaller than\n tol, the optimization code checks\n the dual gap for optimality and continues until it is smaller than tol.\n\n max_iter : int, optional (default: 1000)\n The maximum number of iterations.\n\n copy_X : boolean, optional (default True)\n If True, X will be copied; else, it may be overwritten.\n\n Attributes\n ----------\n coef_ : array, shape (n_features, ) or (n_targets, n_features)\n Estimated coefficients for the linear regression problem.\n If multiple targets are passed during the fit (y 2D), this\n is a 2D array of shape (n_targets, n_features), while if only\n one target is passed, this is a 1D array of length n_features.\n\n intercept_ : array\n Independent term in the linear model.\n\n \"\"\"\n def __init__(self, alpha=1e-3, fit_intercept=True, normalize=False,\n tol=1e-4, max_iter=1000, copy_X=True):\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.tol = tol\n self.normalize = normalize\n self.copy_X = copy_X\n self.max_iter = max_iter\n self.tol = tol\n\n\n def fit(self, X, y):\n '''\n X : array-like, shape = (n_samples, n_features)\n y : array-like, shape = (n_samples, )\n\n '''\n\n X, y, X_mean, y_mean, X_std = self._preprocess_data(X, y,\n self.fit_intercept, self.normalize, self.copy_X)\n\n self.coef_, self.shrink_coef_, self.rss_ = non_negative_garotte(X, y,\n alpha=self.alpha, tol=self.tol, max_iter=self.max_iter)\n\n self._set_intercept(X_mean, y_mean, X_std)\n return self\n","sub_path":"linear_model/nng.py","file_name":"nng.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"171715086","text":"#coding=utf-8\n#作者:曾祥卫\n#时间:2018.06.12\n#描述:cloud system-upgrade的控制层\n\n\nimport time\nfrom data import data\nfrom publicControl.public_control import PublicControl\nfrom connect.ssh import SSH\n\n\n\nclass UpgradeControl(PublicControl):\n\n def __init__(self, s):\n #继承PublicControl类的属性和方法\n PublicControl.__init__(self, s)\n\n ######################################################################\n #获取对应型号和版本的fileid\n def get_version_fileid(self, apType, version):\n APType = apType.upper()\n api = self.loadApi()['sysApUpgradeShow']\n request = PublicControl(self.s)\n recvdata = request.apiRequest_get(api+str(APType))\n version_lists = recvdata['data']['apFirmwares']\n for i in range(len(version_lists)):\n if version == version_lists[i]['firmwareNum']:\n fileid = version_lists[i]['fileId']\n print(\"fileid is {}\".format(fileid))\n return fileid\n\n #获取ap当前的版本\n def get_ap_current_version(self, ap_mac):\n AP_mac = ap_mac.upper()\n api = self.loadApi()['sysUpgradeApList']\n request = PublicControl(self.s)\n recvdata = request.apiRequest(api, {'filter': {'apType': \"all\"},\n 'pageNum':1,\n 'pageSize':10})\n ap_lists = recvdata['data']['result']\n for i in range(len(ap_lists)):\n if AP_mac == ap_lists[i]['mac']:\n version = ap_lists[i]['versionFirmware']\n print (\"version is {}\".format(version))\n return version\n\n\n #选择现在时间升级ap\n def set_ap_upgrade_now(self, apType, version, ap_mac, \\\n cloud_ssh_ip, cloud_ssh_user, cloud_ssh_pwd):\n APType = apType.upper()\n AP_mac = ap_mac.upper()\n fileid = self.get_version_fileid(apType, version)\n #获取cloud的当前时间,并按标准格式输出\n ssh = SSH(cloud_ssh_ip, cloud_ssh_pwd)\n result = ssh.ssh_cmd_noKey(cloud_ssh_user, '\\'date -d today +\"%Y/%m/%d %H:%M\"\\'')\n now_time = result.strip(\"\\r\\n\")\n api = self.loadApi()['sysApUpgrade']\n request = PublicControl(self.s)\n recvdata = request.apiRequest(api, {'apType':APType,\n 'deviceNum':1,\n 'fileId':fileid,\n 'mac':AP_mac,\n 'name':u\"升级\",\n 'schedule':\"0\",\n 'scheduleEndTime':now_time,\n 'scheduleTime':now_time,\n 'targetVersion':version,\n 'timezone':\"Etc/GMT\"})\n return recvdata\n\n\n\n\n\n","sub_path":"system/upgrade/upgrade_control.py","file_name":"upgrade_control.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"377158054","text":"from django.contrib import admin\nfrom .models import (Patient, Practitioner, Hospital, PractitionerMembership,\n MedicalRecord, Observation,\n DiagnosticReport)\nfrom transfer_api.models import TransHospitalTransact, InHospitalTransact\nfrom auth_api.models import UserProfile\n\n\nclass PatientAdmin(admin.ModelAdmin):\n list_display = (\n 'id', 'name', 'id_no', 'telecom', 'gender', 'birth_date', 'address',\n 'photo', 'contact_relationship', 'contact_name',\n 'contact_telecom', 'contact_gender', 'contact_address')\n\n\nclass PractitionerAdmin(admin.ModelAdmin):\n list_display = (\n 'id', 'name', 'id_no', 'name', 'telecom', 'qualification_id',\n 'qualification_period', 'qualification_issuer')\n\n\nclass HospitalAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'id_no', 'telecom',\n 'qualification_level')\n\n\nclass PractitionerMembershipAdmin(admin.ModelAdmin):\n pass\n\n\nclass MedicalRecordAdmin(admin.ModelAdmin):\n list_display = ('id', 'patient', 'initial_date', 'update_date')\n\n\nclass ObservationInline(admin.StackedInline):\n model = Observation\n extra = 1\n\n\nclass ObservationAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'name',\n 'result',\n 'diagnostic_report',\n )\n\n\nclass DiagnosticReportAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'serial_no',\n 'name',\n 'patient',\n 'practitioner',\n 'comments',\n )\n inlines = [ObservationInline]\n\n\nclass TransHospitalTransactAdmin(admin.ModelAdmin):\n list_display = (\n 'id', 'from_hospital', 'to_hospital','from_practitioner', 'to_practitioner', 'PatientRecord', 'order_sn', 'trade_sn', 'open_time',\n 'transact_status', 'pay_time'\n )\n\n\nclass InHospitalTransactAdmin(admin.ModelAdmin):\n list_display = (\n 'id', 'order_sn', 'trade_sn', 'open_time', 'transact_status',\n 'transact_comments', 'pay_time', 'next_order'\n )\n\n\nadmin.site.register(Patient, PatientAdmin)\nadmin.site.register(Practitioner, PractitionerAdmin)\nadmin.site.register(Hospital, HospitalAdmin)\nadmin.site.register(PractitionerMembership, PractitionerMembershipAdmin)\nadmin.site.register(MedicalRecord, MedicalRecordAdmin)\nadmin.site.register(TransHospitalTransact, TransHospitalTransactAdmin)\nadmin.site.register(InHospitalTransact, InHospitalTransactAdmin)\nadmin.site.register(Observation, ObservationAdmin)\nadmin.site.register(DiagnosticReport, DiagnosticReportAdmin)\n","sub_path":"data_api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"385149482","text":"import numpy as np\nimport pandas as pd\nimport re\nimport itertools\nfrom collections import Counter\nfrom sklearn import preprocessing\nfrom keras.utils import np_utils\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import Tokenizer\nimport s3fs\nimport sys\nimport os\nfrom nltk.corpus import stopwords\nimport sklearn.metrics\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport seaborn as sns\n\ndef clean_str(string):\n\n string = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', string) # addition #flags=re.MULTILINE\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\ndef text_preprocessing(x_text, additional_title, MAX_NB_WORDS, MAX_LEN_DOC, SPLIT_INDEX, INSPECTION_ROW=4):\n \n print (\"Clean up texts...\")\n \n # data type\n x_text = x_text.astype(str)\n additional_title = additional_title.astype(str)\n \n if isinstance(x_text, pd.Series):\n x_text = x_text.values\n \n if isinstance(x_text, pd.Series):\n additional_title = additional_title.values\n \n clean_func = np.vectorize(lambda x: clean_str(x))\n x_text = clean_func(x_text)\n \n print (\"Tokenizing...\")\n x_train, x_test =x_text[0:SPLIT_INDEX], x_text[SPLIT_INDEX:]\n tokenizer = Tokenizer(num_words=MAX_NB_WORDS)\n tokenizer.fit_on_texts(x_train)\n \n word2id = tokenizer.word_index\n id2word = {v:k for k, v in word2id.items()}\n \n vocab_size = len(word2id.keys()) + 1\n\n print (\"Integer encoding...\")\n # print (tokenizer.word_index)\n x_train = tokenizer.texts_to_sequences(x_train)\n x_test = tokenizer.texts_to_sequences(x_test)\n x_text = np.concatenate((x_train, x_test), axis=0)\n additional_title = tokenizer.texts_to_sequences(additional_title)\n\n print (\"Padding...\")\n x_text = sequence.pad_sequences(\n x_text, maxlen=MAX_LEN_DOC, dtype='int32',\n padding=\"post\", truncating=\"post\")\n additional_title = sequence.pad_sequences(\n additional_title, maxlen=MAX_LEN_DOC, dtype='int32',\n padding=\"post\", truncating=\"post\")\n \n \n return x_text, additional_title ,vocab_size, id2word, word2id\n\ndef label_processing(y):\n \n \n num_class = len(np.unique(y))\n y = np_utils.to_categorical(y, num_class)\n \n return y\n\n\ndef load_data(MAX_LEN_DOC=100, MAX_NB_WORDS=5000, \n url1='s3://smart-newsdev-dmp/tmp/data/classification/data.csv',\n url2='s3://smart-newsdev-dmp/tmp/data/classification/data.csv',\n url3='s3://smart-newsdev-dmp/tmp/data/classification/data.csv',\n TEXT_TITLE_COMBINED=False): # if false use only 'title'\n \"\"\"\n \n \"\"\"\n # Load data from files\n print(\"Loading data...\")\n train = pd.read_csv(url1, sep='|', error_bad_lines=False)\n test = pd.read_csv(url2, sep='|', error_bad_lines=False)\n additional_title = pd.read_csv(url3, sep='|', error_bad_lines=False)\n split_index = train.shape[0]\n \n data = pd.concat([train, test])\n \n # combine two columns\n if TEXT_TITLE_COMBINED:\n pre_x = data['title'] + \" \" + data['text']\n else:\n pre_x = data['title']\n \n additional_title = additional_title['title']\n \n \n # Generate encoded text sequence\n x_text, additional_title, vocab_size, id2word, word2id = text_preprocessing(\n x_text=pre_x, additional_title = additional_title, MAX_NB_WORDS=MAX_NB_WORDS,\n MAX_LEN_DOC=MAX_LEN_DOC, SPLIT_INDEX=split_index)\n \n # Generate labels\n y = label_processing(data['category'])\n \n return [x_text, y, split_index, vocab_size, id2word, word2id, pre_x, data, additional_title]\n\ndef ints2setences(sequence_array, id2word):\n result = []\n for int_enc in sequence_array:\n if int_enc != 0:\n# print(int_enc)\n result.append(id2word[int(int_enc)])\n \n return ' '.join(result)\n\ndef evaluation(y_pred, y_test, history):\n print ('---------Confusion Matrix Report -------------------- \\n')\n \n print (sklearn.metrics.confusion_matrix(y_test, y_pred))\n print ('\\n---------Classificaiton Report ---------------------- \\n')\n print (sklearn.metrics.classification_report(y_test, y_pred))\n \n if history:\n print(history.history.keys())\n # summarize history for accuracy\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()","sub_path":"notebooks/clickbait/data_helpers_v8.py","file_name":"data_helpers_v8.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"165812501","text":"import requests\nimport xml.etree.ElementTree\nimport random\n\ndef make_wordimfo_file(needed_word, file):\n '''\n '''\n parameters = { 'key': \"84AD3BB0C4BF3809A9CF3CCA68FAF946\", 'q': needed_word, 'part': 'word', 'translated': 'y',\\\n 'trans_lang': '1'} \n url = \"https://krdict.korean.go.kr/api/search\"\n url1 = requests.get(url, params = parameters)\n\n content = url1.text\n\n fl = open(file,'w')\n fl.write(content)\n fl.close()\n\ndef get_translation(file):\n '''\n '''\n fl = open(file)\n tx = fl.read()\n xmlData = str(tx)\n fl.close()\n\n translation = []\n import xml.etree.ElementTree as ET\n xml = ET.fromstring(xmlData)\n\n for table in xml.getiterator('channel'):\n for child in table[7][5][2]:\n translation.append(child.text.strip())\n return translation\n\n\ndef form_words_dict():\n '''\n '''\n translated_words = {}\n korean_word = '사과'\n kor_word = '오다'\n make_wordimfo_file(korean_word, 'data_example.xml')\n translation = get_translation('data_example.xml')\n translated_words[korean_word] = translation\n make_wordimfo_file(kor_word, 'data_example.xml')\n translation = get_translation('data_example.xml')\n translated_words[kor_word] = translation\n return translated_words\n\n\ndef random_word(words):\n '''\n '''\n word = random.choice([key for key in words])\n return word\n\nif __name__ == \"__main__\":\n m = form_words_dict()\n print(m)\n print(\"Random word: \")\n print(random_word(m))","sub_path":"final_learn_korean/module_use.py","file_name":"module_use.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"23264118","text":"import cv2 \nimport mediapipe as mp\nimport time\n\nframe = cv2.VideoCapture(0) # 0 é o num da webcam utilizada\nhandsDect = mp.solutions.hands\nhands = handsDect.Hands() #confidences=0.5, max num of hands=2, static mode=false são parametros padrão\nconnections = mp.solutions.drawing_utils\n\nprevTime = 0 # tempo anterior\ncurrTime = 0 # tempo atual\n\nwhile True:\n success, image = frame.read()\n imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = hands.process(imageRGB)\n #print(results.multi_hand_landmarks) #testa se está detectando a mão\n\n if results.multi_hand_landmarks:\n for handLandmarks in results.multi_hand_landmarks: # para saber se é uma mão ou várias\n for id, points in enumerate(handLandmarks.landmark): # id identifica o landmark correspondente\n #print(id,points) #id é dado em núm decimal pq é tipo a razão do frame em que se encontra o pixel\n h, w, p = image.shape # pega as dimensões do video \n cx, cy = int(points.x*w), int(points.y*h) # lm.x*w = pega a posição do pixel em x\n print(id,cx,cy) # identifica qual a posição de cada landmark\n if id == 0: # testa se é o ponto mais embaixo\n cv2.circle(image, (cx,cy), 8, (255,0,100), cv2.FILLED) # desenha um circulo no ponto 0 identificado\n connections.draw_landmarks(image, handLandmarks, handsDect.HAND_CONNECTIONS) # aponta os landmarks das mãos, HAND_CONNECTIONS faz conexões\n\n currTime = time.time() # pega o tempo atual\n fps = 1/(currTime-prevTime) # cálculo do fps \n prevTime = currTime\n cv2.putText(image,str(int(fps)), (10,460), cv2.FONT_HERSHEY_PLAIN,3,(255,255,255),3)\n\n cv2.imshow(\"Imagem em tempo real\", image)\n cv2.waitKey(1)","sub_path":"handTracking.py","file_name":"handTracking.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"159907720","text":"import os\nfrom config import db\nfrom models import Party, Office, Electorate, Election, Ballot, BallotBox, InvoiceItem\n\n# Data to initialize database with\nPEOPLE = [\n {'fname': 'Doug', 'lname': 'Farrell'},\n {'fname': 'Kent', 'lname': 'Brockman'},\n {'fname': 'Bunny', 'lname': 'Easter'}\n]\n\n# Delete database file if it exists currently\nif os.path.exists('tallysheet.db'):\n os.remove('tallysheet.db')\n\n# Create the database\ndb.create_all()\n\nfor i in range(1, 6):\n db.session.add(Party())\n db.session.add(Election())\n\n for j in range(1, 5):\n db.session.add(Office(electionId=i))\n db.session.add(Electorate(electionId=i))\n\nfor i in range(1, 20):\n invoice_item = InvoiceItem()\n\n db.session.add(invoice_item)\n db.session.commit()\n\n db.session.add(Ballot(\n ballotId=\"pre-ballot-%d\" % i,\n invoiceItemId=invoice_item.invoiceItemId\n ))\n\nfor i in range(1, 200):\n invoice_item = InvoiceItem()\n\n db.session.add(invoice_item)\n db.session.commit()\n\n db.session.add(BallotBox(\n ballotBoxId=\"pre-ballot-box-%d\" % i,\n invoiceItemId=invoice_item.invoiceItemId\n ))\n\ndb.session.commit()\n","sub_path":"build_database.py","file_name":"build_database.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"65849826","text":"# Copyright (c) 2018, Kevin Spiteri\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport math\nimport os\nimport subprocess\nimport json\nimport sys\nimport threading\n\n# TODO: Clean up to give an easier-to-understand example how to use Sabre!\n\ndef load_json(path):\n with open(path) as file:\n obj = json.load(file)\n return obj\n\ndef cdf(l, margin = 0.025):\n l = sorted(l)\n range = l[-1] - l[0]\n if range > 0:\n margin *= range\n inc = 1 / len(l)\n c = []\n y = 0\n if range == 0:\n c += [[l[0] - margin, y]]\n for x in l:\n c += [[x, y]]\n y += inc\n c += [[x, y]]\n if range == 0:\n c += [[l[-1] + margin, y]]\n return c\n\ndef mean_stddev(l):\n mean = sum(l) / len(l)\n var = sum([(x - mean) * (x - mean) for x in l]) / len(l)\n stddev = math.sqrt(var)\n return (mean, stddev)\n\ndef thread_run_sabre(results, command):\n completed = subprocess.run(command, stdout = subprocess.PIPE)\n for line in completed.stdout.decode('ascii').split('\\n'):\n l = line.split(':')\n if len(l) != 2:\n continue\n if l[0] in results:\n results[l[0]].append(float(l[1]))\n\ndef thread_run_gnuplot(plotting):\n subprocess.run('gnuplot', input = plotting.encode('ascii'))\n\ndef do_figure(prefix, subfigs, algorithms, metrics, term = None):\n print(prefix + ' ', end = '')\n\n plotting_threads = []\n\n for subfig in subfigs:\n title = subfig[0]\n dir = subfig[1]\n args1 = subfig[2]\n\n print(title + ' ', end = '')\n\n # info['metric_name']['algorithm_name'] = (mean, stddev, 'name of .dat file')\n info = {m[0]: {} for m in metrics}\n \n plot_mark_offset = 0\n for algorithm in algorithms:\n plot_mark_offset += 1\n name = algorithm[0]\n args = args1 + algorithm[1]\n \n print(name + ' ', end = '')\n \n results = {m[1]: [] for m in metrics}\n \n cnt = 0\n max_threads = 5\n threads = []\n for trace in os.listdir(dir)[:]: # use this line to limit directory size\n cnt += 1\n print('%d' % cnt, end = '')\n sys.stdout.flush()\n \n if len(threads) >= max_threads:\n for t in threads:\n if not t.is_alive():\n t.join()\n threads.remove(t)\n break\n \n if len(threads) >= max_threads:\n threads[0].join()\n threads.pop(0)\n \n command = ['python3', './svc.py', '-n', dir + '/' + trace] + args\n print (command)\n t = threading.Thread(target = thread_run_sabre, args = (results, command))\n threads.append(t)\n t.start()\n \n print('\\b' * len(str(cnt)), end = '')\n print(' ' * len(str(cnt)), end = '')\n print('\\b' * len(str(cnt)), end = '')\n for t in threads:\n t.join()\n \n print('\\b' * (len(name) + 1), end = '')\n print(' ' * (len(name) + 1), end = '')\n print('\\b' * (len(name) + 1), end = '')\n \n for metric in metrics:\n config = metric[2] if len(metric) > 2 else {}\n samples = results[metric[1]]\n points = cdf(samples)\n median = (points[(len(points) - 1) // 2][0] + points[len(points) // 2][0]) / 2\n stats = (median, ) + mean_stddev(samples)\n datname = ('tmp/' + prefix + '-' +\n title.replace(' ', '-') + '-' +\n metric[0].replace(' ', '-') + '-' +\n algorithm[0].replace(' ', '-') + '.dat')\n info[metric[0]][algorithm[0]] = stats + (datname, )\n with open(datname, 'w') as f:\n for l in points:\n xoffset = config['xoffset'] if 'xoffset' in config else 0\n f.write('%f %f\\n' % (xoffset + l[0], l[1]))\n \n dot_count = 4\n step = math.floor(len(points) / dot_count)\n # plot_mark_offset in [1, len(algorithms)]\n first = math.ceil(plot_mark_offset / (len(algorithms) + 1) * step)\n with open(datname + '.dot', 'w') as f:\n for l in points[first::step]:\n xoffset = config['xoffset'] if 'xoffset' in config else 0\n f.write('%f %f\\n\\n' % (xoffset + l[0], l[1]))\n \n statname = ('stats/' + prefix + '-' + title.replace(' ', '-') + '.txt')\n delim = ''\n with open(statname, 'w') as f:\n for metric in metrics:\n f.write(delim)\n delim = '\\n'\n f.write('%s:\\n' % metric[0])\n for algorithm in algorithms:\n i = info[metric[0]][algorithm[0]]\n f.write('%s: %f %f %f\\n' % (algorithm[0], i[0], i[1], i[2]))\n\n xranges = subfig[3][:] if len(subfig) > 3 else None\n mi = -1\n for metric in metrics:\n mi += 1\n config = metric[2] if len(metric) > 2 else {}\n pdfname = ('figures/' + prefix + '-' +\n title.replace(' ', '-') + '-' +\n metric[0].replace(' ', '-') +\n '.pdf')\n key = config['key'] if 'key' in config else 'bottom right'\n xtics = str(config['xtics']) if 'xtics' in config else 'autofreq'\n #xlabel = title + ' ' + metric[0]\n xlabel = metric[0]\n if 'time' in xlabel:\n xlabel += ' (s)'\n elif 'bitrate' in xlabel:\n xlabel += ' (kbps)'\n if xranges:\n xrange = '[0:%f]' % xranges.pop(0)\n else:\n xrange = '[0:*]'\n plot_list = []\n point_types = [1, 2, 4, 6, 8, 10]\n pti = 0\n for algorithm in algorithms:\n pt = point_types[pti]\n pti += 1\n alg_pars = algorithm[2]\n if alg_pars.startswith('notitle'):\n alg_pars = alg_pars[len('notitle'):]\n # HACK\n if isinstance(term, list) and term[mi] != None:\n do_title = ' notitle '\n else:\n do_title = ' title \"' + algorithm[0] + '\" '\n else:\n do_title = ' title \"' + algorithm[0] + '\" '\n datname = info[metric[0]][algorithm[0]][-1]\n plot_list += ['\"' + datname + '\" notitle ' + alg_pars + ' lw 2']\n plot_list += ['\"' + datname + '.dot\" ' + do_title +\n ' with linespoints pt ' + str(pt) + ' ' + alg_pars + ' lw 2']\n\n trm = term[mi] if isinstance(term, list) else term\n if trm == None:\n trm = 'pdf size 2.3, 1.75 font \",8\"'\n\n plotting = '''set term ''' + trm + '''\nset bmargin 3.5\n\nset style data lines\nset key ''' + key + '''\n\nset xlabel \"''' + xlabel + '''\"\nset xtics ''' + xtics + '''\n\nset xrange ''' + xrange + '''\n\nset output \"''' + pdfname + '''\"\n\nplot ''' + ', '.join(plot_list) + '''\n\nset output\n'''\n #subprocess.run('gnuplot', input = plotting.encode('ascii'))\n t = threading.Thread(target = thread_run_gnuplot, args = (plotting, ))\n plotting_threads.append(t)\n t.start()\n\n print('\\b' * (len(title) + 1), end = '')\n print(' ' * (len(title) + 1), end = '')\n print('\\b' * (len(title) + 1), end = '')\n\n for t in plotting_threads:\n t.join()\n\n print('\\b' * (len(prefix) + 1), end = '')\n print(' ' * (len(prefix) + 1), end = '')\n print('\\b' * (len(prefix) + 1), end = '')\n\ndef figure12_write_network():\n with open('tmp/network.json', 'w') as f:\n f.write('[ {\"duration_ms\": 60000, \"bandwidth_kbps\": 8000, \"latency_ms\": 0} ]')\n\n\ndef figure6a():\n figure12_write_network()\n\n completed = subprocess.run(['python3', './svc.py', '-v',\n '-m', 'bbb.json', '-n', 'tmp/network.json',\n '-a', 'bola', '-ab'],\n stdout = subprocess.PIPE)\n basic = completed.stdout.decode('ascii')\n\n completed = subprocess.run(['python3', './svc.py', '-v',\n '-m', 'bbb.json', '-n', 'tmp/network.json',\n '-a', 'bolae'],\n stdout = subprocess.PIPE)\n bolapl = completed.stdout.decode('ascii')\n\n fig1 = []\n for out in [basic, bolapl]:\n fig = []\n for line in out.split('\\n'):\n if not '[' in line or 'Network' in line:\n continue\n l = line.split()\n \n index = int(l[1].split(':')[0])\n quality = int(l[2].split('=')[1])\n #print('%d %d' % (index, quality))\n fig += [(index * 3, bbb['bitrates_kbps'][quality])]\n fig += [((index + 1) * 3, bbb['bitrates_kbps'][quality])]\n if index == 9:\n break\n\n fig1 += [fig]\n\n for i in [0, 1]:\n name = 'fig1%s.dat' % ['a', 'b'][i]\n with open('tmp/%s' % name, 'w') as f:\n for l in fig1[i]:\n f.write('%f %f\\n' % (l[0], l[1]))\n\n plotting = '''set term pdf size 1.9, 1.75 font \",16\"\nset bmargin 3.5\n\nset style data lines\nset yrange[0:6500]\n\nset xlabel 'play time (s)'\nset ylabel 'bitrate (kbps)'\n\nset xtics 10\n\n#set key bottom right\nset key out top center\n\nset output \"figures/fig6a.pdf\"\n\n#plot \"tmp/fig1a.dat\" title \"BOLA\" lc 7 dt 4 lw 2, \"tmp/fig1b.dat\" title \"BOLA-PL\" lc 6 lw 2\nplot \"tmp/fig1a.dat\" title \"BOLA\" lc 7 dt 4 lw 2, \"tmp/fig1b.dat\" notitle lc 6 lw 2\n\nset output\n'''\n subprocess.run('gnuplot', input = plotting.encode('ascii'))\n\ndef figure6b():\n figure12_write_network()\n\n completed = subprocess.run(['python3', './sabre-mmsys18.py', '-v',\n '-m', 'bbb.json', '-n', 'tmp/network.json',\n '-s', '120', '180',\n '-a', 'bola', '-ab'],\n stdout = subprocess.PIPE)\n basic = completed.stdout.decode('ascii')\n\n completed = subprocess.run(['python3', './sabre-mmsys18.py', '-v',\n '-m', 'bbb.json', '-n', 'tmp/network.json',\n '-s', '120', '180',\n '-a', 'bolae'],\n stdout = subprocess.PIPE)\n bolapl = completed.stdout.decode('ascii')\n\n fig2 = []\n for out in [basic, bolapl]:\n fig = []\n for line in out.split('\\n'):\n if not '[' in line or 'Network' in line:\n continue\n l = line.split()\n index = int(l[1].split(':')[0])\n quality = int(l[2].split('=')[1])\n if index < 35:\n continue\n if index == 60:\n fig += [None]\n #print('%d %d' % (index, quality))\n fig += [(index * 3, bbb['bitrates_kbps'][quality])]\n fig += [((index + 1) * 3, bbb['bitrates_kbps'][quality])]\n if index == 69:\n break\n\n fig2 += [fig]\n\n for i in [0, 1]:\n name = 'fig2%s.dat' % ['a', 'b'][i]\n with open('tmp/%s' % name, 'w') as f:\n for l in fig2[i]:\n if l == None:\n f.write('\\n')\n else:\n f.write('%f %f\\n' % (l[0], l[1]))\n\n plotting = '''set term pdf size 1.47, 1.75 font \",16\"\nset bmargin 3.5\n\nset style data lines\nset xrange[180:]\nset yrange[0:6500]\n\nset xlabel 'play time (s)'\n\n#set ylabel 'bitrate (kbps)'\nset ytics format \"\"\nset xtics 10\n\n#set key bottom right\nset key out top center\n\nset output \"figures/fig6b.pdf\"\n\n#plot \"tmp/fig2a.dat\" title \"BOLA\" lc 7 dt 4 lw 2, \"tmp/fig2b.dat\" title \"BOLA-PL\" lc 6 lw 2\nplot \"tmp/fig2a.dat\" notitle lc 7 dt 4 lw 2, \"tmp/fig2b.dat\" title \"BOLA-PL\" lc 6 lw 2\n\nset output\n'''\n subprocess.run('gnuplot', input = plotting.encode('ascii'))\n\ndef figure_1_4():\n\n with open('tmp/egbuf.dat', 'w') as f:\n f.write('''0 1000\n5 1000\n5 2500\n10 2500\n10 5000\n15 5000\n15 0\n18 0\n''')\n\n with open('tmp/lowbufa.dat', 'w') as f:\n f.write('''0 0\n0 230\n3.534 230\n3.534 331\n3.843 331\n3.843 477\n4.153 477\n4.153 688\n4.462 688\n4.462 991\n4.771 991\n4.771 1427\n5.081 1427\n5.081 2056\n5.390 2056\n5.390 2962\n5.759 2962\n5.759 5027\n6.075 5027\n6.075 6000\n7.000 6000\n7.000 0\n10 0\n''')\n with open('tmp/lowbufb.dat', 'w') as f:\n f.write('''0 0\n0 230\n11.048 230\n11.048 331\n13.284 331\n13.284 477\n15.527 477\n15.527 688\n17.770 688\n17.770 991\n20.007 991\n20.007 1427\n22.244 1427\n22.244 2056\n24.483 2056\n24.483 2962\n27.150 2962\n27.150 5027\n29.441 5027\n29.441 6000\n36.132 6000\n36.132 0\n40 0\n''')\n\n plotting1 = '''set term pdf size 3.35, 1.5 font \",16\"\nset bmargin 3.5\n\nset style data lines\n\nset xlabel 'buffer level (s)'\nset ylabel 'bitrate (kbps)'\n\nset output \"figures/fig-1.pdf\"\n\nset xrange[0:18]\nset yrange[0:6000]\n\n#set arrow from 12.5,3500 to 5,0\n#set arrow from 12.5,3500 to 10,0\n#set arrow from 12.5,3500 to 15,0\n#\n#set arrow from 2,4000 to 0,5000\n#set arrow from 2,4000 to 0,2500\n#set arrow from 2,4000 to 0,1000\n\nset arrow from 5,0 to 5,1000 nohead dt 2\nset arrow from 10,0 to 10,2500 nohead dt 2\n\nset arrow from 0,2500 to 5,2500 nohead dt 2\nset arrow from 0,5000 to 10,5000 nohead dt 2\n\nplot \"tmp/egbuf.dat\" lc 7 lw 2 notitle\n\nset output\n'''\n\n plotting2 = '''set term pdf size 3.35, 1.5 font \",16\"\nset bmargin 3.5\n\nset style data lines\n\nset xlabel 'buffer level (s)'\nset ylabel 'bitrate (kbps)'\n\nset output \"figures/fig-4a.pdf\"\n\nset xrange[0:10]\nset yrange[0:6500]\n\nplot \"tmp/lowbufa.dat\" lc 7 lw 2 notitle\n\nset output\n\nset output \"figures/fig-4b.pdf\"\n\nset xrange[0:40]\nset yrange[0:6500]\n\nset grid noxtics noytics noztics front\nset style rect fc lt -1 fs solid 0.25 noborder\nset obj rect from 16, 0 to 26, 6500\nset arrow from 0.1,3500 to 15.9,3500 heads\nset arrow from 16.1,3500 to 25.9,3500 heads\nset label \"virtual\\\\nplaceholder\\\\nsegments\" at 8,5600 center\nset label \"actual\\\\nvideo\\\\nsegments\" at 21,5600 center\n\nplot \"tmp/lowbufb.dat\" lc 6 lw 2 notitle\n\nset output\n'''\n subprocess.run('gnuplot', input = plotting1.encode('ascii'))\n subprocess.run('gnuplot', input = plotting2.encode('ascii'))\n\ndef figure_7_10():\n subfigs = [\n #('12dash vod' , '12dash', ['-m', 'bbb.json' , '-b', '25']),\n #('3Glogs vod' , '3Glogs', ['-m', 'bbb.json' , '-b', '25']),\n ('4G VOD' , '4Glogs', ['-m', 'bbb4k.json', '-b', '25']),\n #('12dash live10', '12dash', ['-m', 'bbb.json' , '-b', '10']),\n #('3G LIVE 10s', '3Glogs', ['-m', 'bbb.json' , '-b', '10']),\n #('4Glogs live10', '4Glogs', ['-m', 'bbb4k.json', '-b', '10']),\n #('12dash live5' , '12dash', ['-m', 'bbb.json' , '-b', '5' ]),\n #('3Glogs live5' , '3Glogs', ['-m', 'bbb.json' , '-b', '5' ]),\n #('4Glogs live5' , '4Glogs', ['-m', 'bbb4k.json', '-b', '5' ]),\n ]\n\n metrics = [\n #('rebuffer' , 'rebuffer ratio'),\n #('oscillation', 'time average bitrate change'),\n #('bitrate' , 'time average played bitrate'),\n ('reaction time' , 'rampup time', {'key': 'out top center horizontal', 'xtics': 10}),\n ]\n\n prefix = 'fig7a'\n algorithms = [\n ('BOLA' , ['-ao', '-a', 'bola', '-ab'] , 'lc 7'),\n ('BOLA-PL', ['-ao', '-a', 'bolae', '-noibr'], 'notitle lc 6'),\n ]\n\n term = 'pdf size 1.8, 1.75 font \",16\"'\n do_figure(prefix, subfigs, algorithms, metrics, term = term)\n\n prefix = 'fig7b'\n algorithms = [\n ('BOLA' , ['-ao', '-a', 'bola', '-ab' , '-s', '120', '180'], 'notitle lc 7'),\n ('BOLA-PL', ['-ao', '-a', 'bolae', '-noibr', '-s', '120', '180'], 'lc 6'),\n ]\n term = 'pdf size 1.5, 1.75 font \",16\"\\nset ytics format \"\"'\n do_figure(prefix, subfigs, algorithms, metrics, term = term)\n\n\n prefix = 'fig10a'\n algorithms = [\n ('BOLA' , ['-ao', '-a', 'bola', '-ab'], 'lc 4'),\n ('TPUT' , ['-ao', '-a', 'throughput'], 'lc 2'),\n ('DYNAMIC' , ['-ao', '-a', 'dynamic', '-ab'], 'notitle lc 1'),\n ]\n term = 'pdf size 1.8, 1.75 font \",16\"'\n do_figure(prefix, subfigs, algorithms, metrics, term = term)\n\n prefix = 'fig10b'\n algorithms = [\n ('BOLA' , ['-ao', '-a', 'bola', '-ab', '-s', '120', '180'], 'notitle lc 4'),\n ('TPUT' , ['-ao', '-a', 'throughput', '-s', '120', '180'], 'notitle lc 2'),\n ('DYNAMIC' , ['-ao', '-a', 'dynamic', '-ab', '-s', '120', '180'], 'lc 1'),\n ]\n term = 'pdf size 1.5, 1.75 font \",16\"\\nset ytics format \"\"'\n do_figure(prefix, subfigs, algorithms, metrics, term = term)\n\ndef figure8():\n prefix = 'fig8'\n\n algorithms = [\n ('BOLA', ['-a', 'bola', '-ao', '-ab'], ' lc 4'),\n ('BOLA-PL', ['-a', 'bolae', '-ao', '-noibr'], ' lc 7'),\n ('BOLA-E' , ['-a', 'bolae', '-ao' ], ' lc 6'),\n ]\n\n metrics = [\n ('rebuffer ratio' , 'rebuffer ratio', {'xtics' : 0.1}),\n ('average bitrate oscillation', 'time average bitrate change', {'xtics': 150}),\n ('average bitrate' , 'time average played bitrate', {'xtics': 500}),\n ]\n\n subfigs = [\n ('3G Live 10s' , '3Glogs', ['-m', 'bbb.json' , '-b', '10'], [0.6, 600, 2000]),\n ]\n\n do_figure(prefix, subfigs, algorithms, metrics)\n\n# metrics = [\n# ('rebuffer ratio' , 'rebuffer ratio'),\n# ('average bitrate oscillation', 'time average bitrate change', {'xtics': 500}),\n# ('average bitrate' , 'time average played bitrate', {'xtics': 10000}),\n# ]\n#\n# subfigs = [\n# ('4G Live 10s', '4Glogs', ['-m', 'bbb4k.json', '-b', '10']),\n# ('4G VOD', '4Glogs', ['-m', 'bbb4k.json', '-b', '25']),\n# ]\n#\n# do_figure(prefix, subfigs, algorithms, metrics)\n\n\ndef figure11():\n prefix = 'fig11'\n\n subfigs = [\n #('12dash vod' , '12dash', ['-m', 'bbb.json' , '-b', '25']),\n #('3Glogs vod' , '3Glogs', ['-m', 'bbb.json' , '-b', '25']),\n ('4G VOD' , '4Glogs', ['-m', 'bbb4k.json', '-b', '25'], [0.1, 2200, 34000]),\n #('12dash live10', '12dash', ['-m', 'bbb.json' , '-b', '10']),\n #('3Glogs live10', '3Glogs', ['-m', 'bbb.json' , '-b', '10']),\n ('4G Live 10s', '4Glogs', ['-m', 'bbb4k.json', '-b', '10'], [0.1, 4600, 31500]),\n #('12dash live5' , '12dash', ['-m', 'bbb.json' , '-b', '5' ]),\n #('3Glogs live5' , '3Glogs', ['-m', 'bbb.json' , '-b', '5' ]),\n #('4Glogs live5' , '4Glogs', ['-m', 'bbb4k.json', '-b', '5' ], [0.1, 4000, 35000]),\n ]\n\n algorithms = [\n #('BOLA-E' , ['-ao', '-a', 'bolae' ], 'lc 7'),\n #('DYNAMIC-DASH' , ['-ao', '-a', 'dynamicdash'], 'lc 1'),\n ('BOLA' , ['-ao', '-a', 'bola' ], 'lc 4'),\n ('THROUGHPUT' , ['-ao', '-a', 'throughput'], 'lc 2'),\n ('DYNAMIC' , ['-ao', '-a', 'dynamic' ], 'lc 1'),\n ]\n\n metrics = [\n ('rebuffer ratio' , 'rebuffer ratio'),\n ('average bitrate oscillation', 'time average bitrate change', {'xtics': 1000, 'key': 'bottom right font \",12\"'}),\n ('average bitrate' , 'time average played bitrate', {'xtics': 10000, 'key': 'bottom right font \",12\"'}),\n ]\n\n do_figure(prefix, subfigs, algorithms, metrics)\n\ndef figure_12_13():\n\n prefix = '12_13'\n\n subfigs = [\n ('FCC SD', 'sd_fs', ['-m', 'bbb.json' , '-b', '25'], [0.01, 450, 4500, 120]),\n ]\n\n algorithms = [\n #('BOLA-E' , ['-ao', '-r', 'none', '-a', 'bolae' , '-rmp', '9', '-ml', '180'], 'lc 7'),\n ('BOLA' , ['-ao', '-r', 'left', '-a', 'bolae' , '-rmp', '9', '-ml', '180'], 'lc 1'),\n ('BOLA-SVC' , ['-ao', '-r', 'left', '-a', 'bolae' , '-rmp', '9', '-ml', '180', '-svc'], 'lc 1'),\n #('DYNAMIC' , ['-ao', '-r', 'none', '-a', 'dynamic', '-rmp', '9', '-ml', '180'], 'notitle lc 3'),\n ('DYNAMIC', ['-ao', '-r', 'left', '-a', 'dynamic', '-rmp', '9', '-ml', '180'], 'notitle lc 4'),\n ('DYNAMIC-SVC', ['-ao', '-r', 'left', '-a', 'dynamic', '-rmp', '9', '-ml', '180', '-svc'], 'notitle lc 4'),\n ]\n\n metrics = [\n ('rebuffer ratio' , 'rebuffer ratio'),\n ('average bitrate oscillation' , 'time average bitrate change', {'xtics': 150}),\n ('average bitrate' , 'time average played bitrate',\n {'key': 'top left reverse Left font \",12\"', 'xtics': 1500}),\n ('reaction time', 'rampup time', {'xoffset': -60, 'key': 'out top center vertical', 'xtics': 40}),\n ]\n\n term = 'pdf size 1.8, 1.75 font \",8\"\\n'\n do_figure(prefix, subfigs, algorithms, metrics, term = [None, None, None, term])\n\n prefix = '12_13'\n\n subfigs = [\n ('FCC HD', 'hd_fs', ['-m', 'bbb4k.json', '-b', '25'], [0.01, 1200, 12000, 120]),\n ]\n\n algorithms = [\n #('BOLA-E' , ['-ao', '-r', 'none', '-a', 'bolae' , '-rmp', '4', '-ml', '180'], 'notitle lc 7'),\n ('BOLA' , ['-ao', '-r', 'left', '-a', 'bolae' , '-rmp', '4', '-ml', '180'], 'notitle lc 1'),\n ('BOLA-SVC' , ['-ao', '-r', 'left', '-a', 'bolae' , '-rmp', '4', '-ml', '180', '-svc'], 'notitle lc 1'),\n #('DYNAMIC' , ['-ao', '-r', 'none', '-a', 'dynamic', '-rmp', '4', '-ml', '180'], 'lc 3'),\n ('DYNAMIC', ['-ao', '-r', 'left', '-a', 'dynamic', '-rmp', '4', '-ml', '180'], 'lc 4'),\n ('DYNAMIC-SVC', ['-ao', '-r', 'left', '-a', 'dynamic', '-rmp', '4', '-ml', '180', '-svc'], 'lc 4'),\n ]\n\n metrics = [\n ('rebuffer ratio' , 'rebuffer ratio'),\n ('average bitrate oscillation' , 'time average bitrate change', {'xtics': 400}),\n ('average bitrate' , 'time average played bitrate',\n {'key': 'top left reverse Left font \",12\"', 'xtics': 4000}),\n ('reaction time', 'rampup time', {'xoffset': -60, 'key': 'out top center vertical', 'xtics': 40}),\n ]\n\n term = 'pdf size 1.5, 1.75 font \",8\"\\nset ytics format \"\"'\n do_figure(prefix, subfigs, algorithms, metrics, term = [None, None, None, term])\n \nif __name__ == '__main__':\n\n bbb = load_json('bbb.json')\n bbb4k = load_json('bbb4k.json')\n\n os.makedirs('tmp', exist_ok = True)\n os.makedirs('figures', exist_ok = True)\n os.makedirs('stats', exist_ok = True)\n\n figure_12_13()\n","sub_path":"simulation/example/svc/simulate-svc.py","file_name":"simulate-svc.py","file_ext":"py","file_size_in_byte":23765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"191994137","text":"from tkinter import *\n\n\nclass Manual(Toplevel):\n def __init__(self, root, **kwargs):\n Toplevel.__init__(self, master=root, **kwargs)\n self.title(\"Справка\")\n self.resizable(0, 0)\n file = open('manual.txt', 'tr', encoding='utf-8')\n self.file_as_string = file.read()\n file.close()\n\n self.TextBox = Text(self, width=100, height=35, wrap=WORD)\n self.TextBox.grid(row=0, column=0)\n self.TextBox.insert('end', self.file_as_string)\n self.TextBox.config(state=DISABLED)\n\n self.YScrollBar = Scrollbar(self, orient=VERTICAL)\n self.YScrollBar.grid(row=0, column=1, sticky='ns')\n\n self.YScrollBar.configure(command=self.TextBox.yview)\n self.TextBox.configure(yscrollcommand=self.YScrollBar.set)\n","sub_path":"manual.py","file_name":"manual.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"620233743","text":"from __future__ import division\nfrom joblib import load, dump\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nspenc_dir = '/home/mboos/SpeechEncoding/'\n\nduration = np.array([902,882,876,976,924,878,1084,676])\n\npatches = load(spenc_dir+'MaThe/'+\\\n 'transformed_data/dict_learning_patches.pkl')\n\n# keep only every 10th sample so there's no overlap between the patches\npatches = patches[::10].copy()\n\n# the length of the movie segments without the transition TRs \n# (like they are saved in patches)\nmovieseg_duration = duration[:]\nmovieseg_duration[0] -= 8\nmovieseg_duration[-1] -= 8\nmovieseg_duration[1:-1] -= 16\n\nmvcs = np.cumsum(movieseg_duration)\n\n# we need to remove the last last 2s of the second to last stimulus\nto_delete = (mvcs[-2]-2)*10 + np.arange(20)\n\npatches = np.delete(patches, to_delete, axis=0)\n# shape of TR samples\n# note: column ordering is now oldest --> newest in steps of 50\npatches = np.reshape(patches, (-1, 200*20))\n\nstrides = (patches.strides[0],) + patches.strides\n\n# rolling window of length 4 samples\nshape = (patches.shape[0] - 4 + 1, 4, patches.shape[1])\n\npatches = np.lib.stride_tricks.as_strided(patches[::-1,:].copy(),\n shape=shape,\n strides=strides)[::-1, :, :]\n\npatches = np.reshape(patches, (patches.shape[0], -1))\n\n# we kick out the most recent sample\npatches = patches[:, :-4000]\n\npatches = StandardScaler().fit_transform(patches)\n\ndump(patches,\n spenc_dir+'MaThe/prepro/dict_stimuli.pkl')\n\n","sub_path":"prepare_dict_splits.py","file_name":"prepare_dict_splits.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"74259943","text":"import numpy as np\nfrom matplotlib.lines import Line2D\nfrom matplotlib.artist import Artist\n\nfrom ifs_operators_plot import * \n\nfrom matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n\nfrom ifs_properties_plot import PropertiesIFS, PropertiesPath\n\n\nclass TriangularInteractorBasic(object):\n line_active__ = 0\n index_active__ = 1\n# \n# slider_length__ = 0.1\n# slider_hight__ = 0.015\n# \n# button_length__ = 0.1\n# button_height__ = 0.1\n\n def __init__(self, ax01, ax02, axlines, widgets,\n epsilon=0.02):\n \n self._epsilon = epsilon # max pixel distance to count as a vertex hit\n\n\n\n self.axlines = axlines\n\n self.ax02 = ax02\n for prop_ifs in self.axlines[self.ax02]:\n prop_ifs.init_default(ax02)\n\n self.ax01 = ax01\n for prop_ifs in self.axlines[self.ax01]:\n prop_ifs.init_default(ax01)\n\n ### (line_number, active idx in that line)\n self.active_lines_idx = {self.ax01: [self.axlines[self.ax01][0], None],\n self.ax02: [self.axlines[self.ax02][0], None]}\n\n self.ax_active = self.ax01\n\n canvas = self.ax01.figure.canvas\n \n self.canvas = canvas\n self.mpl_connect_init()\n \n\n self.colors_ifs = { }\n for ax, properties in self.axlines.items():\n colors_map = {i: prop.holder.get_color() \\\n for i, prop in enumerate(properties)}\n self.colors_ifs[ax] = colors_map \n\n \n self.widgets = widgets\n self.widgets.canvas = self.canvas \n self.recreate_widgets()\n \n self.ax01.set_aspect('equal', 'datalim')\n self.ax02.set_aspect('equal', 'datalim')\n\n def mpl_connect_init(self):\n self.canvas.mpl_connect('draw_event',\n self.draw_callback)\n self.canvas.mpl_connect('button_press_event',\n self.button_press_callback)\n self.canvas.mpl_connect('key_press_event',\n self.key_press_callback)\n self.canvas.mpl_connect('button_release_event',\n self.button_release_callback)\n self.canvas.mpl_connect('motion_notify_event',\n self.motion_notify_callback)\n\n# \n# @property\n# def _prop_idx_active(self):\n# return self.active_lines_idx.get(self.ax_active, [None,None])\n\n\n\n def draw_callback(self, event):\n# print(vars(event))\n# \n #self.ax01.draw_artist(self.poly)\n# map(lambda ann: self.ax01.draw_artist(ann), self.marker_ann)\n\n for prop_ifs in self.axlines[self.ax01]:\n prop_ifs.draw_holder_annotations(self.ax01)\n \n for prop_ifs in self.axlines[self.ax02]:\n prop_ifs.draw_holder_annotations(self.ax02)\n\n if self.ax_active is not None:\n self.canvas.blit(self.ax_active.bbox) \n self.background = \\\n self.canvas.copy_from_bbox(self.ax_active.figure.bbox)\n\n\n def motion_notify_callback(self, event):\n# print('on mouse movement')\n if event.inaxes is None:\n return\n if event.button != 1:\n return\n\n if event.inaxes in self.active_lines_idx.keys():\n# print('on mouse movement')\n self.ax_active = event.inaxes\n \n prop_ifs, idx_act = self.active_lines_idx[self.ax_active]\n \n if not prop_ifs.holder.get_visible():\n print(\"visible..\")\n return\n if idx_act is None:\n print('idx_act %d' % -1 if idx_act is None else idx_act)\n return\n\n # The drop & drag point should stay\n # within the triangular area\n\n xdata = min(max(0.0, event.xdata), 1.0) \n ydata = min(max(0.0, event.ydata), 1.0) \n print('on mouse movement')\n self.update_holder_annotation(prop_ifs, idx_act, xdata, ydata)\n self.canvas.restore_region(self.background)\n\n prop_ifs.draw_holder_annotations(self.ax_active)\n\n self.canvas.blit(self.ax_active.bbox)\n\n\n def update_holder_annotation(self, prop_ifs, idx_act, xdata, ydata):\n# print(\"..in update holder annotations...\")\n if xdata + ydata >= 1.0:\n pos = ((xdata-ydata+1)/2, (ydata-xdata+1)/2)\n else:\n pos = (xdata,ydata)\n \n line_xy = list(zip(*prop_ifs.get_data_pair())) \n line_xy[idx_act] = pos\n data = list(zip(*line_xy))\n\n prop_ifs.set_data(data[0], data[1]) \n \n \n if prop_ifs.show_ann:\n prop_ifs.set_data_annotations_single(idx_act, pos)\n# annotation = prop_ifs.annotations[idx_act]\n# annotation.xyann = pos\n# annotation.xy = pos\n# annotation.xytext = pos\n\n\n def line_changes(self, line):\n '''\n This method is called whenever the line object is called\n '''\n pass\n\n def poly_changed(self, poly):\n 'this method is called whenever the polygon object is called'\n # only copy the artist props to the line (except visibility)\n vis = self.line1_01.get_visible()\n Artist.update_from(self.line1_01, poly)\n self.line1_01.set_visible(vis) # don't use the poly visibility state\n# self.line.set_linestyle(' ')\n\n def get_ind_under_point(self, xdata, ydata, prop_ifs):\n 'get the index of the vertex under point if within epsilon tolerance'\n\n # display coords\n# print(event.inaxes)\n xy = np.asarray(prop_ifs.get_data_pair())\n print('get the index..')\n print(prop_ifs.get_data_pair())\n print(xy)\n x, y = xy[0], xy[1]\n# print(x)\n# print(y)\n# print(event.xdata, event.ydata)\n# print(event.x, event.y)\n# print(self.ax01.transData.inverted().transform((event.x, event.y)))\n# print(event.xdata, event.ydata)\n\n d = np.sqrt((x - xdata)**2 + (y - ydata)**2)\n indseq = np.nonzero(np.equal(d, np.amin(d)))[0]\n ind = indseq[0]\n \n ind = None if (d[ind] >= self._epsilon) else ind\n return ind\n\n def recreate_widgets(self):\n print(\"in reacreate widgets...\")\n self.widgets.colors_ifs = self.colors_ifs[self.ax_active]\n prop_ifs, _ = self.active_lines_idx[self.ax_active]\n idx = self.axlines[self.ax_active].index(prop_ifs)\n \n self.widgets.recreate_widgets(#prop_ifs,\n idx,\n self.axlines[self.ax_active],\n self.active_lines_idx[self.ax_active])\n if prop_ifs != self.widgets.prop_ifs:\n self.active_lines_idx[self.ax_active] =[self.widgets.active_prop[0],\n None]\n\n if not self.widgets.active_prop[0].holder.get_visible():\n self.active_lines_idx[self.ax_active][self.index_active__] = None\n\n print(\"active prop %s\" % self.ax_active)\n print(\"active prop %s\" % self.active_lines_idx[self.ax_active][0].label)\n print(\"widgets prop %s\" % self.widgets.prop_ifs.label)\n print(\"-------------\")\n\n\n def button_press_callback(self, event):\n 'whenever a mouse button is pressed'\n print('button_pres - event in axis')\n print(event.inaxes)\n\n if event.inaxes is None:\n return\n if event.button != 1:\n return\n\n if event.inaxes == self.widgets.rax_activeifs:\n self.recreate_widgets()\n\n if event.inaxes in self.active_lines_idx.keys():\n if self.ax_active != event.inaxes:\n self.ax_active = event.inaxes\n self.recreate_widgets()\n \n\n# self.refresh_widgets()\n prop_ifs, _ = self.active_lines_idx[self.ax_active]\n\n if not prop_ifs.holder.get_visible():\n return\n\n idx_active = self.get_ind_under_point(event.xdata,\n event.ydata,\n prop_ifs)\n print('button press callback: idx = %d' % -1 if idx_active is None else idx_active)\n self.active_lines_idx[self.ax_active][self.index_active__] =\\\n idx_active\n\n\n def button_release_callback(self, event):\n 'whenever a mouse button is released'\n\n if event.button != 1:\n return\n\n if self.ax_active in self.active_lines_idx.keys():\n prop_ifs, idx_act = self.active_lines_idx[self.ax_active]\n \n if not prop_ifs.holder.get_visible():\n return\n# print(\"active prop %s\" % self.ax_active)\n# print(\"active prop %s\" % self.active_lines_idx[self.ax_active][0].label)\n \n self.active_lines_idx[self.ax_active][self.index_active__] = None\n\n if self.ax_active == self.ax01:\n prop1_ifs01 = self.axlines[self.ax01][0]\n prop1_ifs02 = self.axlines[self.ax01][1]\n \n# mus, nus = supStd(prop1_ifs01.line.get_data(),\n# prop1_ifs02.line.get_data())\n# print(prop1_ifs01.holder.get_data())\n mus, nus = incGeneral(prop1_ifs01.holder.get_data(),\n 0.6, 0.2, 0.5)\n\n prop2_ifs01 = self.axlines[self.ax02][0]\n prop2_ifs01.holder.set_data(mus,nus)\n prop2_ifs01.set_data_annotations(list(zip(mus,nus)))\n\n\n self.canvas.draw()\n\n\n\n\n def key_press_callback(self, event):\n 'whenever a key is pressed'\n print('## key press callback')\n print('nothing doing...')\n print(type(event.inaxes))\n print(event.inaxes)\n print(event.xdata, event.ydata)\n print(event.x, event.y)\n print(event)\n \n# if not event.inaxes:\n# return\n# \n# if event.key == 't':\n# self.update_show_components(self._showverts)\n# self.check_components.set_active(0)\n# # self._flip_markers()\n# if event.key == '-':\n# self.update_show_components(self._showedges)\n# self.check_components.set_active(1)\n# \n# if event.key == 'a':\n# self.update_show_components(self._showann)\n# self.check_components.set_active(2)\n\n\n self.canvas.draw()\n\n\n###################################################################\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n from intuitionistic_fuzzy_set import *\n from universal_set import UniversalSet\n from ifs_2Dplot import *\n from ifs_operators_topo import *\n \n \n\n# fig, ax = plt.subplots()\n\n universe = UniversalSet(set(range(50)))\n\n\n\n fig = plt.figure()\n plt.subplots_adjust(hspace=0.1, wspace=0.1)\n \n\n\n ifs01 = IFS.random(universe, 1, randseed=1)\n\n indices, mus, nus, pis = ifs01.elements_split()\n \n ax = plt.subplot2grid((4,6), (0,0), rowspan=3, colspan=3)\n ax_01, line2d1_01 = plot_triangular_(ax,\n mus, nus, ifs01.get_range(), bins=19,\n rotation={'x':45, 'y':0})\n\n line2d1_01.set_linestyle(' ')\n line2d1_01.set_markersize(5)\n line2d1_01.set_markerfacecolor('r')\n line2d1_01.set_color('r')\n# line2d1_01.set_marker(marker=r'$\\odot$')\n line2d1_01.set_marker(marker=r'o') \n line2d1_01.set_zorder(15)\n\n\n ifs01 = IFS.random(universe, 1, randseed=2)\n\n indices, mus, nus, pis = ifs01.elements_split()\n\n colors={'mu':'b', 'nu':'g', 'elem':'r'}\n ax_01, line2d1_02 = plot_triangular_(ax,\n mus, nus,\n ifs01.get_range(),\n colors=colors,\n bins=19,\n rotation={'x':45, 'y':0})\n\n line2d1_02.set_linestyle(' ')\n line2d1_02.set_markersize(5)\n line2d1_02.set_markerfacecolor('g')\n line2d1_02.set_color('g')\n line2d1_02.set_marker(marker=r'o')\n line2d1_02.set_visible(True)\n line2d1_02.set_zorder(15)\n \n\n\n# line2d_01.set_alpha(0.5)\n# fontsize = 12\n# linepoints = list(zip(*line2d_01.get_data())) \n# marker_ann = [ax_01.annotate(str(idx), pt, fontsize=fontsize) \n# for idx, pt in enumerate(linepoints) ]\n# \n# \n# prop_ifs01 = PlotPropertiesIFS( ifsname='ifs01',\n# line=line2d_01.copy(),\n# annotations=marker_ann.copy(),\n# radius=5,\n# alpha_marker=0.5, \n# labels_size=12, \n# show_ifs=True, show_edges=True, show_ann=True)\n# \n# # \n ifs02 = IFS.random(universe, 1, randseed=3)\n indices, mus, nus, pis = ifs02.elements_split()\n \n ax02 = plt.subplot2grid((4,6), (0,3), rowspan=3, colspan=3)\n \n\n \n _, line2d2_02 = plot_triangular_(ax02,\n mus, nus, ifs02.get_range(), bins=19,\n rotation={'x':45, 'y':0})\n\n ax02.get_yaxis().tick_right()\n ax02.set_ylabel('')\n\n line2d2_02.set_linestyle('-')\n line2d2_02.set_markersize(5)\n line2d2_02.set_markerfacecolor('c')\n line2d2_02.set_color('c')\n line2d2_02.set_marker(marker=r'o')\n\n# line2d_01.set_markersize(20)\n# line2d_01.set_markerfacecolor('r')\n# line2d_01.set_marker(marker=r'$\\odot$')\n from widgets_basic import WidgetsBasic\n\n widgets = WidgetsBasic(None)\n\n axlines = {ax_01:[PropertiesIFS(label='ifs01_ax01', holder=line2d1_01),\n PropertiesIFS(label='ifs02_ax01', holder=line2d1_02)],\n ax02:[PropertiesIFS(label='ifs01_ax02', holder=line2d2_02)]\n }\n\n \n p = TriangularInteractorBasic(ax_01, ax02, axlines, widgets)\n \n# p = TriangularInteractor(ax_01, line2d_01)\n \n plt.show()\n \n a =10\n","sub_path":"test_interaction_path.py","file_name":"test_interaction_path.py","file_ext":"py","file_size_in_byte":14218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"614944616","text":"import requests\n\n\ndef save_issues(db, issues):\n if \"milestones\" not in db.table_names():\n db[\"milestones\"].create({\"id\": int}, pk=\"id\")\n for original in issues:\n # Ignore all of the _url fields\n issue = {\n key: value for key, value in original.items() if not key.endswith(\"url\")\n }\n # Add repo key\n issue[\"repo\"] = original[\"repository_url\"].split(\n \"https://api.github.com/repos/\"\n )[1]\n # Pull request can be flattened to just their URL\n if issue.get(\"pull_request\"):\n issue[\"pull_request\"] = issue[\"pull_request\"][\"url\"].split(\n \"https://api.github.com/repos/\"\n )[1]\n # Extract user\n issue[\"user\"] = save_user(db, issue[\"user\"])\n labels = issue.pop(\"labels\")\n # Extract milestone\n if issue[\"milestone\"]:\n issue[\"milestone\"] = save_milestone(db, issue[\"milestone\"])\n # For the moment we ignore the assignees=[] array but we DO turn assignee\n # singular into a foreign key reference\n issue.pop(\"assignees\", None)\n if issue[\"assignee\"]:\n issue[\"assignee\"] = save_user(db, issue[\"assignee\"])\n # Add a type field to distinguish issues from pulls\n issue[\"type\"] = \"pull\" if issue.get(\"pull_request\") else \"issue\"\n # Insert record\n table = db[\"issues\"].upsert(\n issue,\n pk=\"id\",\n foreign_keys=[\n (\"user\", \"users\", \"id\"),\n (\"assignee\", \"users\", \"id\"),\n (\"milestone\", \"milestones\", \"id\"),\n ],\n alter=True,\n )\n # m2m for labels\n for label in labels:\n table.m2m(\"labels\", label, pk=\"id\")\n\n\ndef save_user(db, user):\n # Remove all url fields except avatar_url and html_url\n to_save = {\n key: value\n for key, value in user.items()\n if (key in (\"avatar_url\", \"html_url\") or not key.endswith(\"url\"))\n }\n # If this user was nested in repo they will be missing several fields\n # so fill in 'name' from 'login' so Datasette foreign keys display\n if to_save.get(\"name\") is None:\n to_save[\"name\"] = to_save[\"login\"]\n return db[\"users\"].upsert(to_save, pk=\"id\", alter=True).last_pk\n\n\ndef save_milestone(db, milestone):\n milestone = dict(milestone)\n milestone[\"creator\"] = save_user(db, milestone[\"creator\"])\n milestone.pop(\"labels_url\", None)\n milestone.pop(\"url\", None)\n return (\n db[\"milestones\"]\n .upsert(\n milestone, pk=\"id\", foreign_keys=[(\"creator\", \"users\", \"id\")], alter=True\n )\n .last_pk\n )\n\n\ndef save_issue_comment(db, comment):\n comment = dict(comment)\n comment[\"user\"] = save_user(db, comment[\"user\"])\n # We set up a 'issue' foreign key, but only if issue is in the DB\n comment[\"issue\"] = None\n issue_url = comment[\"issue_url\"]\n bits = issue_url.split(\"/\")\n user_slug, repo_slug, issue_number = bits[-4], bits[-3], bits[-1]\n # Is the issue in the DB already?\n issue_rows = list(\n db[\"issues\"].rows_where(\n \"number = :number and repo = :repo\",\n {\"repo\": \"{}/{}\".format(user_slug, repo_slug), \"number\": issue_number},\n )\n )\n if len(issue_rows) == 1:\n comment[\"issue\"] = issue_rows[0][\"id\"]\n comment.pop(\"url\", None)\n if \"url\" in comment.get(\"reactions\", {}):\n comment[\"reactions\"].pop(\"url\")\n last_pk = (\n db[\"issue_comments\"]\n .upsert(comment, pk=\"id\", foreign_keys=(\"user\", \"issue\"), alter=True)\n .last_pk\n )\n return last_pk\n\n\ndef fetch_repo(repo, token=None):\n headers = make_headers(token)\n owner, slug = repo.split(\"/\")\n url = \"https://api.github.com/repos/{}/{}\".format(owner, slug)\n return requests.get(url, headers=headers).json()\n\n\ndef save_repo(db, repo):\n # Remove all url fields except html_url\n to_save = {\n key: value\n for key, value in repo.items()\n if (key == \"html_url\") or not key.endswith(\"url\")\n }\n to_save[\"owner\"] = save_user(db, to_save[\"owner\"])\n to_save[\"license\"] = save_license(db, to_save[\"license\"])\n repo_id = (\n db[\"repos\"]\n .upsert(to_save, pk=\"id\", foreign_keys=((\"owner\", \"users\", \"id\"),), alter=True)\n .last_pk\n )\n return repo_id\n\n\ndef save_license(db, license):\n if license is None:\n return None\n return db[\"licenses\"].upsert(license, pk=\"key\").last_pk\n\n\ndef ensure_repo_fts(db):\n if \"repos_fts\" not in db.table_names():\n db[\"repos\"].enable_fts([\"name\", \"description\"], create_triggers=True)\n\n\ndef ensure_releases_fts(db):\n if \"releases_fts\" not in db.table_names():\n db[\"releases\"].enable_fts([\"name\", \"body\"], create_triggers=True)\n\n\ndef ensure_foreign_keys(db):\n for expected_key in ((\"repos\", \"license\", \"licenses\", \"key\"),):\n if expected_key not in db[expected_key[0]].foreign_keys:\n db[expected_key[0]].add_foreign_key(*expected_key[1:])\n\n\ndef fetch_issues(repo, token=None, issue=None):\n headers = make_headers(token)\n if issue is not None:\n url = \"https://api.github.com/repos/{}/issues/{}\".format(repo, issue)\n yield from [requests.get(url).json()]\n else:\n url = \"https://api.github.com/repos/{}/issues?state=all&filter=all\".format(repo)\n for issues in paginate(url, headers):\n yield from issues\n\n\ndef fetch_issue_comments(repo, token=None, issue=None):\n assert \"/\" in repo\n headers = make_headers(token)\n # Get reactions:\n headers[\"Accept\"] = \"application/vnd.github.squirrel-girl-preview\"\n path = \"/repos/{}/issues/comments\".format(repo)\n if issue is not None:\n path = \"/repos/{}/issues/{}/comments\".format(repo, issue)\n url = \"https://api.github.com{}\".format(path)\n for comments in paginate(url, headers):\n yield from comments\n\n\ndef fetch_releases(repo, token=None, issue=None):\n headers = make_headers(token)\n url = \"https://api.github.com/repos/{}/releases\".format(repo)\n for releases in paginate(url, headers):\n yield from releases\n\n\ndef fetch_all_starred(username=None, token=None):\n assert username or token, \"Must provide username= or token= or both\"\n headers = make_headers(token)\n headers[\"Accept\"] = \"application/vnd.github.v3.star+json\"\n if username:\n url = \"https://api.github.com/users/{}/starred\".format(username)\n else:\n url = \"https://api.github.com/user/starred\"\n for stars in paginate(url, headers):\n yield from stars\n\n\ndef fetch_all_repos(username=None, token=None):\n assert username or token, \"Must provide username= or token= or both\"\n headers = make_headers(token)\n # Get topics for each repo:\n headers[\"Accept\"] = \"application/vnd.github.mercy-preview+json\"\n if username:\n url = \"https://api.github.com/users/{}/repos\".format(username)\n else:\n url = \"https://api.github.com/user/repos\"\n for repos in paginate(url, headers):\n yield from repos\n\n\ndef fetch_user(username=None, token=None):\n assert username or token, \"Must provide username= or token= or both\"\n headers = make_headers(token)\n if username:\n url = \"https://api.github.com/users/{}\".format(username)\n else:\n url = \"https://api.github.com/user\"\n return requests.get(url, headers=headers).json()\n\n\ndef paginate(url, headers=None):\n while url:\n response = requests.get(url, headers=headers)\n try:\n url = response.links.get(\"next\").get(\"url\")\n except AttributeError:\n url = None\n yield response.json()\n\n\ndef make_headers(token=None):\n headers = {}\n if token is not None:\n headers[\"Authorization\"] = \"token {}\".format(token)\n return headers\n\n\ndef save_stars(db, user, stars):\n user_id = save_user(db, user)\n\n for star in stars:\n starred_at = star[\"starred_at\"]\n repo = star[\"repo\"]\n repo_id = save_repo(db, repo)\n db[\"stars\"].upsert(\n {\"user\": user_id, \"repo\": repo_id, \"starred_at\": starred_at},\n pk=(\"user\", \"repo\"),\n foreign_keys=(\"user\", \"repo\"),\n )\n\n\ndef save_releases(db, releases, repo_id=None):\n foreign_keys = [(\"author\", \"users\", \"id\")]\n if repo_id:\n foreign_keys.append((\"repo\", \"repos\", \"id\"))\n for original in releases:\n # Ignore all of the _url fields except html_url\n issue = {\n key: value\n for key, value in original.items()\n if key == \"html_url\" or not key.endswith(\"url\")\n }\n issue[\"repo\"] = repo_id\n issue[\"author\"] = save_user(db, issue[\"author\"])\n db[\"releases\"].upsert(issue, pk=\"id\", foreign_keys=foreign_keys, alter=True)\n","sub_path":"github_to_sqlite/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"45122652","text":"from flask import Blueprint, redirect, render_template, url_for, flash\nfrom sqlalchemy import and_, not_\nfrom sqlalchemy.sql import func, case\n\nimport config\nfrom app.core.env import DB\nfrom app.models.datas import BibDatasTypes, TReleasedDatas\nfrom app.models.dynamic_content import TDynamicPages, BibDynamicPagesCategory\nfrom app.models.ref_geo import BibAreasTypes, LAreas, LAreasTypeSelection\nfrom app.models.territory import MVTerritoryGeneralStats, MVAreaNtileLimit\nimport re\n\nrendered = Blueprint(\"rendered\", __name__)\n\n\ndef get_legend_classes(type):\n query = MVAreaNtileLimit.query.filter_by(type=type).order_by(MVAreaNtileLimit.ntile)\n ntiles = query.all()\n datas = []\n for r in ntiles:\n datas.append(r.as_dict())\n return datas\n\n\n@rendered.context_processor\ndef global_variables():\n values = {}\n values[\"site_name\"] = config.SITE_NAME\n values[\"site_desc\"] = config.SITE_DESC\n values[\"default_grid\"] = config.DEFAULT_GRID\n values[\"default_buffer\"] = config.DEFAULT_BUFFER\n values[\"special_pages\"] = (\n DB.session.query(TDynamicPages.link_name, TDynamicPages.url)\n .filter(TDynamicPages.is_active == True)\n .filter(TDynamicPages.navbar_link == True)\n .filter(TDynamicPages.url != None)\n .order_by(TDynamicPages.navbar_link_order.asc())\n .all()\n )\n\n categories = (\n DB.session.query(\n BibDynamicPagesCategory.category_name, BibDynamicPagesCategory.category_desc\n )\n .join(\n TDynamicPages,\n TDynamicPages.id_category == BibDynamicPagesCategory.id_category,\n )\n .all()\n )\n\n dynamic_pages = []\n for c in categories:\n c_content = c._asdict()\n c_content[\"pages\"] = []\n pages = (\n DB.session.query(\n TDynamicPages.link_name,\n TDynamicPages.id_category,\n TDynamicPages.link_name,\n )\n .filter(TDynamicPages.id_category == c.id_category)\n .all()\n )\n for p in pages:\n c_content[\"pages\"].append(p._asdict())\n\n values[\"dynamic_pages\"] = dynamic_pages\n return values\n\n\n@rendered.route(\"/\")\ndef index():\n bonus_block = (\n DB.session.query(\n TDynamicPages.title, TDynamicPages.short_desc, TDynamicPages.content\n )\n .filter(TDynamicPages.url == \"home-bonus\")\n .first()\n )\n\n print(\"bonus\", bonus_block)\n # re_grid_codes = r\"M(\\d+)\"\n\n return render_template(\"home.html\", name=config.SITE_NAME, bonus_block=bonus_block)\n\n\n@rendered.route(\"/<string:url>\")\ndef special_pages(url):\n \"\"\"\n\n :return:\n \"\"\"\n page = TDynamicPages.query.filter(TDynamicPages.url == url).first()\n return render_template(\"dynamic_page.html\", page=page)\n\n\n@rendered.route(\"/datas\")\ndef datas():\n qdatas = DB.session.query(\n BibDatasTypes.type_desc,\n BibDatasTypes.type_name,\n BibDatasTypes.type_protocol,\n TReleasedDatas.data_desc,\n TReleasedDatas.data_name,\n TReleasedDatas.data_type,\n TReleasedDatas.data_url,\n ).join(BibDatasTypes, TReleasedDatas.id_type == BibDatasTypes.id_type, isouter=True)\n datas = qdatas.all()\n intro = (\n DB.session.query(\n TDynamicPages.title, TDynamicPages.short_desc, TDynamicPages.content\n )\n .filter(TDynamicPages.url == \"datas-intro\")\n .first()\n )\n return render_template(\"datas.html\", datas=datas, intro=intro)\n\n\n@rendered.route(\"/territory/<string:type_code>/<string:area_code>\")\ndef territory(type_code, area_code):\n \"\"\"\n \"\"\"\n try:\n q_area_info = (\n DB.session.query(\n BibAreasTypes.type_code,\n BibAreasTypes.type_name,\n BibAreasTypes.type_desc,\n LAreas.id_area,\n LAreas.area_name,\n LAreas.area_code,\n )\n .join(LAreas, LAreas.id_type == BibAreasTypes.id_type, isouter=True)\n .filter(\n and_(BibAreasTypes.type_code == type_code.upper()),\n LAreas.area_code == area_code,\n )\n )\n area_info = q_area_info.one()\n\n # Retrieve general stats\n q_gen_stats = DB.session.query(MVTerritoryGeneralStats).filter(\n MVTerritoryGeneralStats.id_area == area_info.id_area\n )\n gen_stats = q_gen_stats.one()\n\n # generate Legend Dict\n legend_dict = {}\n for type in DB.session.query(MVAreaNtileLimit.type).distinct():\n legend_dict[type[0]] = get_legend_classes(type)\n\n intro = (\n DB.session.query(\n TDynamicPages.title, TDynamicPages.short_desc, TDynamicPages.content\n )\n .filter(TDynamicPages.url == \"territory-intro\")\n .first()\n )\n\n return render_template(\n \"territory/_main.html\",\n area_info=area_info,\n gen_stats=gen_stats,\n legend_dict=legend_dict,\n intro=intro,\n )\n\n except Exception as e:\n flash(\"Erreur: {}\".format(e))\n print(\"<territory> ERROR: \", e)\n return redirect(url_for(\"rendered.index\"))\n","sub_path":"app/core/frontend/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"223739629","text":"def process_ratings(sheet_results):\n result = {}\n for person in sheet_results:\n result[_get_name(person)] = _process_person(person)\n return result\n\n\ndef _process_person(person):\n # simple check if person structure is valid\n if len(person) % 2 == 1:\n print(\"Invalid person - unexpected amount of columns\")\n return {}\n\n result = {}\n person = person[2:] # trim out name and group\n for n in range(len(person) // 2):\n try:\n result[person[n * 2]] = float(person[n * 2 + 1])\n except ValueError:\n print(\"Invalid value \\\"{}\\\" for movie {}\".format(person[n * 2 + 1], person[n * 2]))\n\n return result\n\n\ndef _get_name(person):\n # name is stored in the first column\n return person[0]\n","sub_path":"ratings_adapter.py","file_name":"ratings_adapter.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"254041783","text":"\"\"\" MarketBeat Model \"\"\"\n__docformat__ = \"numpy\"\n\nfrom typing import List\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom gamestonk_terminal.helper_funcs import get_user_agent\n\n\ndef get_ratings_html(url_ratings: str) -> str:\n \"\"\"Wraps HTTP requests.get for testibility\n\n Parameters\n ----------\n url_ratings : str\n Ratings URL\n\n Returns\n -------\n str\n HTML page of ratings\n \"\"\"\n ratings_html = requests.get(\n url_ratings, headers={\"User-Agent\": get_user_agent()}\n ).text\n\n return ratings_html\n\n\ndef get_ratings() -> List[dict]:\n \"\"\"Returns a list of ratings\n\n Parameters\n ----------\n None\n\n Returns\n -------\n list\n Ratings list\n \"\"\"\n\n ratings = []\n url_ratings = \"https://www.marketbeat.com/ratings/\"\n\n text_soup_ratings = BeautifulSoup(\n get_ratings_html(url_ratings),\n \"lxml\",\n )\n\n for stock_rows in text_soup_ratings.findAll(\"tr\"):\n tds = stock_rows.findAll(\"td\")\n if len(tds) == 8:\n rating = {}\n # company\n td = tds[0]\n company = td.find(\"div\", {\"class\": \"title-area\"}).text\n ticker = td.find(\"div\", {\"class\": \"ticker-area\"}).text\n rating[\"company\"] = company\n rating[\"ticker\"] = ticker\n # action\n td = tds[1]\n action = td.text\n rating[\"action\"] = action\n # brokerage\n td = tds[2]\n brokerage = td.find(\"a\").text\n rating[\"brokerage\"] = brokerage\n # current price\n td = tds[3]\n if td.find(\"span\"):\n td.span.extract()\n analyst = td.text\n rating[\"analyst\"] = analyst\n # target price\n td = tds[4]\n current_price = td.text\n rating[\"current_price\"] = current_price\n # new rating\n td = tds[5]\n target_price = td.text\n rating[\"target_price\"] = target_price\n # impact on price\n td = tds[6]\n if td.find(\"a\"):\n rate = td.find(\"a\").text\n else:\n rate = td.text\n rating[\"rate\"] = rate\n ratings.append(rating)\n\n return ratings\n","sub_path":"gamestonk_terminal/stocks/discovery/marketbeat_model.py","file_name":"marketbeat_model.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"17235945","text":"import socket,random,urllib.parse,urllib.request,json,http.client,gzip\r\nrandomUserAgents=[\"You should really make an API\",\"Mozilla/420blazeit\",\"Mozilla/69 HAR HAR\",\"Not Guggle Chreme\",\"We need some bangin tunes on in here\",\"I'm very bored\",\"Your service is rather useful. Thankyou\",\"Gotta make dat Steam wallet money\",\"I'm very poor\",\"Not Java\",\"The Greatest Web Browser\",\"Ur mum har har\",\"Shite\",\"Shity Shity Shite Shite\",\"Goddamit\",\"Crappy Browser\",\"Mozilla/FIRE!fox\",\"Shouldve used a fyrewall.\",\"Please don't block this.\",\"Definitely a web browser\"]\r\nheads={\"Connection\":\"close\",\"User-Agent\":\"Mozilla/420\"}\r\nconn=http.client.HTTPConnection(\"steamcommunity.com\")\r\nconn.request(\"GET\",\"/market/itemordershistogram?country=GB&language=english¤cy=2&item_nameid=1115503&two_factor=0\",headers=heads)\r\nresp=conn.getresponse()\r\ndata=str(resp.read().decode(\"utf-8\")).replace(\"\\u00a3\",\"\")\r\nconn.close()\r\nprint(data)\r\n\r\ndef Dechunkify(body):\r\n actualBody=bytearray()\r\n hexAmount=\"\"\r\n hexTime=0\r\n bodyLength=len(body)\r\n i=0\r\n while i<bodyLength:\r\n if(i==hexTime):\r\n hexAmount=\"\"\r\n if(i>=hexTime):\r\n if(body[i]==ord('\\r') and body[i+1]==ord('\\n')):\r\n if hexAmount==\"0\":break\r\n hexTime=i+3+int(hexAmount,16)\r\n i+=1\r\n else:\r\n hexAmount+=chr(body[i])\r\n else:\r\n actualBody.append(body[i])\r\n i+=1\r\n return actualBody[:-2]\r\nsceip=\"212.48.114.218\"\r\nsteamip=\"2.16.238.67\"\r\ndef GetExchangeSessionCookie():\r\n heads={\"Accept\":\"text/html\",\"Host\":\"www.steamcardexchange.net\",\"Connection\":\"close\",\"User-Agent\":random.choice(randomUserAgents)}\r\n h1=http.client.HTTPConnection(sceip)\r\n h1.request(\"GET\",\"/index.php\",headers=heads)\r\n headers=h1.getresponse().getheaders()\r\n h1.close()\r\n for h in headers:\r\n if h[0]==\"Set-Cookie\":return h[1]\r\n return None\r\ndef GetExchangeSession(ignoreMaxAge=False):\r\n rawExchange=GetExchangeSessionCookie()\r\n seshid=None\r\n maxAge=None\r\n if rawExchange:\r\n splet=rawExchange.split(\"; \")\r\n for s in splet:\r\n if \"PHPSESSID\" in s:seshid=s.split(\"=\")[1]\r\n elif not ignoreMaxAge and \"Max-Age\" in s:maxAge=int(s.split(\"=\")[1])\r\n if seshid and maxAge:break\r\n if ignoreMaxAge:return seshid\r\n else:return seshid,maxAge\r\ndef DownloadExchangePage(appID,cardAmount,sessionCookie=None):\r\n while not sessionCookie:sessionCookie=GetExchangeSession(True)\r\n heads={\"Accept\":\"text/html\",\"Connection\":\"close\",\"Host\":\"www.steamcardexchange.net\",\"User-Agent\":random.choice(randomUserAgents),\"Cookie\":\"PHPSESSID=\"+sessionCookie}\r\n conn=http.client.HTTPConnection(sceip,80)\r\n conn.connect()\r\n conn.request(\"GET\",\"/index.php?inventorygame-appid-\"+str(appID),headers=heads)\r\n response=conn.getresponse()\r\n dechunked=response.read().decode()\r\n conn.close()\r\n p=0\r\n exchangeInfo=[]\r\n try:\r\n for i in range(cardAmount):\r\n shiteInfo={}\r\n p=dechunked.index(\"Stock:\",p+1)\r\n stock=dechunked[p+7:dechunked.index(\"<\",p)].split(\" \")[0]\r\n shiteInfo[\"stock\"]=int(stock)\r\n p=dechunked.index(\"Worth:\",p+1)\r\n worth=dechunked[p+7:dechunked.index(\"<\",p)]\r\n if \" Credits\" in worth:worth=worth.replace(\" Credits\",\"\")\r\n if worth.lower()==\"overstocked\":worth=0\r\n elif \"not available\" in worth.lower():worth=-1\r\n else:worth=int(worth)\r\n shiteInfo[\"worth\"]=worth\r\n p=dechunked.index(\"Price:\",p+1)\r\n price=dechunked[p+7:dechunked.index(\"<\",p)]\r\n if \" Credits\" in price:price=price.replace(\" Credits\",\"\")\r\n elif \"not available\" in price.lower():price=0\r\n shiteInfo[\"price\"]=int(price)\r\n exchangeInfo.append(shiteInfo)\r\n except ValueError as ve:print(ve)\r\n return exchangeInfo\r\n#http://steamcommunity.com/market/priceoverview/?appid=730¤cy=3&market_hash_name=StatTrak%E2%84%A2 M4A1-S | Hyper Beast (Minimal Wear)\r\nclass SteamCard:\r\n def __init__(self,name=None,stock=None,worthCredits=None,priceCredits=None,marketPrice=None):\r\n self.name=name;self.stock=stock;self.worthCredits=worthCredits;self.priceCredits=priceCredits;self.marketPrice=marketPrice\r\nclass SteamCardSet:\r\n def __init__(self,gameID=None,gameName=None):\r\n self.gameID=gameID;self.gameName=gameName;self.gameCards=[]\r\n def addNames(self,*names):\r\n for n in names:\r\n self.gameCards.append(SteamCard(n))\r\n return self\r\n def setExchangeInfo(self,prices=[]):\r\n for i,p in enumerate(prices):\r\n if i<len(self.gameCards):\r\n if p:\r\n gameCard=self.gameCards[i]\r\n gameCard.worthCredits=p[\"worth\"]\r\n gameCard.priceCredits=p[\"price\"]\r\n gameCard.stock=p[\"stock\"]\r\n return self\r\n def updateExchange(self,sessionCookie=None):\r\n self.setExchangeInfo(DownloadExchangePage(self.gameID,len(self.gameCards),sessionCookie))\r\n return self\r\n def updateMarketPrices(self):\r\n conn=http.client.HTTPConnection(steamip,80)\r\n conn.connect()\r\n for i in range(len(self.gameCards)):\r\n heads={\"User-Agent\":random.choice(randomUserAgents),\"Host\":\"steamcommunity.com\"}\r\n if i<len(self.gameCards)-1:heads[\"Connection\"]=\"keep-alive\"\r\n else:heads[\"Connection\"]=\"close\"\r\n conn.request(\"GET\",\"/market/priceoverview?appid=753¤cy=1&market_hash_name=\"+str(self.gameID)+\"-\"+urllib.parse.quote(self.gameCards[i].name),headers=heads)\r\n resp=conn.getresponse()\r\n data=resp.read()\r\n for h in resp.getheaders():\r\n if h[0].lower()==\"content-encoding\":data=gzip.decompress(data)\r\n if data:\r\n jsondata=json.JSONDecoder().decode(data.decode())\r\n if jsondata:\r\n if \"success\" in jsondata and jsondata[\"success\"]:\r\n self.gameCards[i].marketPrice=float(jsondata[\"lowest_price\"][1:])\r\n conn.close()\r\n return self\r\n def getMarketHashes(self):\r\n marketHashes=[]\r\n for c in self.gameCards:marketHashes.append(str(self.gameID)+\"-\"+urllib.parse.quote(c.name))\r\n return marketHashes\r\n def toString(self):\r\n s=\"\"\r\n for sc in self.gameCards:\r\n if len(s)>0:s+=\"\\n\"\r\n s+=sc.name\r\n return s\r\ndef GetMarketValues(marketHashes):\r\n marketValues=[]\r\n conn=http.client.HTTPConnection(steamip,80)\r\n conn.connect()\r\n setAmount=len(marketHashes)\r\n heads={\"User-Agent\":random.choice(randomUserAgents),\"Host\":\"steamcommunity.com\"}\r\n for i in range(setAmount):\r\n hashAmount=len(marketHashes[i])\r\n setValues=[]\r\n for i2 in range(hashAmount):\r\n if i<setAmount-1:heads[\"Connection\"]=\"keep-alive\"\r\n else:\r\n if i2<hashAmount-1:heads[\"Connection\"]=\"keep-alive\"\r\n else:heads[\"Connection\"]=\"close\"\r\n conn.request(\"GET\",\"/market/priceoverview?appid=753¤cy=2&market_hash_name=\"+marketHashes[i][i2],headers=heads)\r\n resp=conn.getresponse()\r\n data=resp.read()\r\n for h in resp.getheaders():\r\n if h[0].lower()==\"content-encoding\":data=gzip.decompress(data)\r\n if data:\r\n jsondata=json.JSONDecoder().decode(data.decode())\r\n if jsondata:\r\n if \"success\" in jsondata and jsondata[\"success\"]:setValues.append(float(jsondata[\"lowest_price\"].strip(\"\\u00a3\")))\r\n else:setValues.append(float(0))\r\n else:setValues.append(float(0))\r\n else:setValues.append(float(0))\r\n marketValues.append(setValues)\r\n conn.close()\r\n return marketValues\r\ncsgoSet=SteamCardSet(730,\"Counter-Strike: Global Offensive\")\r\ncsgoSet.addNames(\"Anarchist\",\"Balkan\",\"FBI\",\"IDF\",\"SWAT\")\r\ntf2Set=SteamCardSet(440)\r\ntf2Set.addNames(\"DEMOMAN (Trading Card)\",\"ENGINEER (Trading Card)\",\"MEDIC (Trading Card)\",\"PYRO (Trading Card)\",\"SCOUT (Trading Card)\",\"SNIPER (Trading Card)\",\"SOLDIER (Trading Card)\",\"SPY (Trading Card)\",\"HEAVY (Trading Card)\")\r\nhl2Set=SteamCardSet(220)\r\nhl2Set.addNames(\"Alyx Vance\",\"G-Man\",\"Gordon Freeman\",\"Gordon & Alyx\",\"Bring the fight to them\",\"Trouble Underground\",\"Respite\",\"City 17 Metrocop\")\r\nsets=[csgoSet,tf2Set,hl2Set]\r\nfor s in sets:\r\n s.updateMarketPrices()\r\n for gc in s.gameCards:\r\n print(gc.marketPrice)\r\n","sub_path":"unsorted_guff/Steam Card Exchanging/DB Generator.py","file_name":"DB Generator.py","file_ext":"py","file_size_in_byte":8573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"74815460","text":"#==================================================================================================\r\n# send_notice.py\r\n# \r\n# This script tests the STL send notice command.\r\n#\r\n# Non-DBMS Specific\r\n#\r\n# Author: Ryan Donahue\r\n# $Header: /Vigilert/Tests/Non-DBMS Specific/send_notice.py 12 4/30/02 10:20a Tom $\r\n#==================================================================================================\r\nimport time\r\nfrom Tests.testapi import *\r\n \r\necho(\"\")\r\necho(\"=================================\")\r\necho(\" Test the send notice command.\")\r\necho(\"=================================\")\r\necho(\"\")\r\n\r\nresetter = VigilertResetApi.Resetter()\r\n# Start the notification agent if it's not started\r\nif not using_vigilert_service():\r\n resetter.StartVigilert()\r\n\r\nif noticesys_open(sessionInfo.host) == VL_ERROR:\r\n echo(\"Error: could not open notice session on host \" + sessionInfo.host)\r\n echo(get_noticesys_error ()[1])\r\n\r\nif register_notice(\"foo\") == VL_ERROR:\r\n echo(\"Error: coud not register for notice foo.\")\r\n echo(get_noticesys_error ()[1])\r\ntime.sleep(2)\r\n\r\necho(\"Here follow three syntactically bad notices.\")\r\ntl(\"send notice ('This notice is malformed.')\")\r\ntl(\"send notice ''()\")\r\ntl(\"send notice '%s', 'bar'()\")\r\n\r\nimport string\r\n\r\nRECEIPT_TIMEOUT = 4\r\ntl(\"send notice foo('This script was brought to you by the number %d',15)\")\r\nreceive_pending_notice(RECEIPT_TIMEOUT)\r\ntl(\"send notice foo('This script was brought to you by the letters %c, %c and the bang operator %c', 65, 90, 33)\")\r\nreceive_pending_notice(RECEIPT_TIMEOUT)\r\ntl(\"send notice foo('%s','This script was brought to you by this string.')\")\r\nreceive_pending_notice(RECEIPT_TIMEOUT)\r\ntl(\"send notice foo('%v',date('9/27/2001'))\")\r\nreceive_pending_notice(RECEIPT_TIMEOUT)\r\ntl(\"send notice foo('%v %v %v %v %v %v', 1, 1.1, 'One', date('9/27/2001'), datetime('2001-9-27 14:33:30', 'year to second'), interval('2:34:31.54321', 'hour to fraction(5)'))\")\r\nreceive_pending_notice(RECEIPT_TIMEOUT)\r\ntl(\"send notice foo\")\r\nreceive_pending_notice(RECEIPT_TIMEOUT)\r\n\r\n# Cleanup\r\nif unregister_notice(\"foo\") != VL_OK:\r\n echo(\"Error: could not unregister for notice foo.\")\r\n echo(get_noticesys_error ()[1])\r\nnoticesys_close()\r\n\r\nif not using_vigilert_service():\r\n resetter.StopVigilert()\r\n ","sub_path":"Vigilert/Tests/Non-DBMS Specific/send_notice.py","file_name":"send_notice.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"248869539","text":"import json\nimport telebot\nfrom answer import create_answer, create_hello_message\n\nwith open('tg.json', 'r') as file:\n data = file.read()\nobj = json.loads(data)\nbot = telebot.TeleBot(obj['token'])\n\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n bot.send_message(message.chat.id, create_hello_message('ru'), disable_web_page_preview=True)\n\n\n@bot.message_handler(content_types=['text'])\ndef send_text(message):\n bot.send_message(chat_id=message.chat.id, text=create_answer(message.text, 'ru'))\n\n\nbot.polling()\n","sub_path":"main_tg.py","file_name":"main_tg.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"372515647","text":"#!/usr/bin/env python3\n\n\"\"\"\nFujitsu Package Extractor\nFujitsu SFX Packager Extractor\nCopyright (C) 2019 Plato Mavropoulos\n\"\"\"\n\nprint('Fujitsu SFX Packager Extractor v1.0')\n\nimport os\nimport re\nimport sys\nimport subprocess\n\nif len(sys.argv) >= 2 :\n\t# Drag & Drop or CLI\n\tfjsfx_exec = sys.argv[1:]\nelse :\n\t# Folder path\n\tfjsfx_exec = []\n\tin_path = input('\\nEnter the full folder path: ')\n\tprint('\\nWorking...')\n\tfor root, dirs, files in os.walk(in_path):\n\t\tfor name in files :\n\t\t\tfjsfx_exec.append(os.path.join(root, name))\n\n# \"FjSfxBinay\" + Microsoft CAB Header XOR 0xFF (Tag[4] + Res[4] + Size[4] + Res[4] + Offset[4] + Res[4] + Ver[2]) pattern\nmscf_pattern = re.compile(br'\\x46\\x6A\\x53\\x66\\x78\\x42\\x69\\x6E\\x61\\x79\\xB2\\xAC\\xBC\\xB9\\xFF{4}.{4}\\xFF{4}.{4}\\xFF{4}\\xFC\\xFE', re.DOTALL)\n\t\t\t\nfor input_file in fjsfx_exec :\n\tfile_path = os.path.abspath(input_file)\n\tfile_dir = os.path.dirname(file_path)\n\tfile_name = os.path.basename(file_path)\n\t\n\tprint('\\nFile: ' + file_name)\n\t\n\t# Open Fujitsu SFX Binary Packager executable as mutable bytearray\n\twith open(input_file, 'rb') as in_file : FjSfx = bytearray(in_file.read())\n\t\n\tmatch_mscf = mscf_pattern.search(FjSfx) # Search for Fujitsu Microsoft CAB Header XOR 0xFF pattern\n\t\n\t# Check if Microsoft CAB Header XOR 0xFF pattern exists\n\tif match_mscf :\n\t\tprint('\\n Detected obfuscated Microsoft CAB image.')\n\t\t\n\t\tmscf_start = match_mscf.start() + 0xA # Microsoft CAB Header XOR 0xFF starts after \"FjSfxBinay\" signature\n\t\t\n\t\t# Determine the Microsoft CAB image Size\n\t\tcab_size_hex = bytearray(4) # Initialize LE Hex CAB Size as mutable bytearray\n\t\tcab_size_xor = FjSfx[mscf_start + 0x8:mscf_start + 0xC] # Get LE XOR-ed CAB Size\n\t\tfor idx in range(4) : # Parse each CAB Size byte\n\t\t\tcab_size_hex[idx] = cab_size_xor[idx] ^ 0xFF # Perform XOR 0xFF\n\t\tcab_size = int.from_bytes(cab_size_hex, 'little') # Get BE Actual CAB Size\n\t\t\n\t\tprint('\\n Removing Obfuscation...') # May take a while\n\t\t\n\t\t# Determine the Microsoft CAB image Data\n\t\tcab_data = bytearray(cab_size) # Initialize CAB Data as mutable bytearray\n\t\tcab_data_xor = FjSfx[mscf_start:mscf_start + cab_size] # Get XOR-ed CAB Data\n\t\tfor idx in range(cab_size) : # Parse each CAB Data byte\n\t\t\tcab_data[idx] = cab_data_xor[idx] ^ 0xFF # Perform XOR 0xFF and get Actual CAB Data\n\t\t\n\t\tprint('\\n Extracting...')\n\t\t\n\t\twith open('fjsfx_temp.cab', 'wb') as cab_file : cab_file.write(cab_data) # Create temporary CAB image\n\t\t\n\t\textr_path = os.path.join(file_dir, file_name[:-4], '') # Create CAB image extraction path\n\t\t\n\t\ttry :\n\t\t\tdecomp = subprocess.run(['7z', 'x', '-aou', '-bso0', '-bse0', '-bsp0', '-o' + extr_path, 'fjsfx_temp.cab']) # 7-Zip\n\t\texcept :\n\t\t\tprint('\\n Error: Could not decompress Microsoft CAB image!')\n\t\t\tprint(' Make sure that \"7z\" executable exists!\\n')\n\t\t\t\n\t\tos.remove('fjsfx_temp.cab') # Remove temporary CAB image\n\t\t\n\t\tprint('\\n Extracted!')\n\t\t\n\telse :\n\t\tprint('\\n Error: No Fujitsu SFX Packager found!')\n\t\tcontinue # Next input file\n\nelse :\n\tinput('\\nDone!')","sub_path":"Fujitsu SFX Packager Extractor/Fujitsu_Package_Extract.py","file_name":"Fujitsu_Package_Extract.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"74314157","text":"'''\n#3-4. Palindrome\n앞에서 부터 읽어도, 뒤에서 부터 읽어도 같은 문자를 Palindrome, 즉 회문이라고 합니다. \n다음 문자들이 회문인지 아닌지 판별하는 코드를 작성해보세요. \n1) Anna\n2) Radar\n3) Step on no Pets\n4) No lemon, no melon\n'''\n\nimport re\n\nprint(\"문자를 입력해보세요.\")\npalindrome = input()\n\npalindrome = re.sub(\"[-=.,#/?!:&]\", \"\", palindrome) #특수 문자 제거\npalindrome = palindrome.lower().strip().replace(\" \",\"\") #대소문자 무시, 공백 삭제, 띄어쓰기 무시 \n\nanswer = True\n\nfor i, c in enumerate(palindrome):\n if c != palindrome[-i-1]: #틀렸을 때만 break시키면 된다. enumerate가 자동으로 다음 인덱스로 넘겨주기 때문에. \n answer = False\n break\nif answer:\n print(\"회문입니다.\")\nelse:\n print(\"회문이 아닙니다.\")\n ","sub_path":"day3/assignment3-4.py","file_name":"assignment3-4.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"358117843","text":"\nfrom django.urls import path\n\nfrom . import views\n\napp_name = 'disease'\n\nurlpatterns = [\n # ex: /polls/\n path('', views.index, name='index'),\n # ex: /polls/5/\n path('contact/', views.contact, name='contact'),\n # ex: /polls/5/\n path('uploadeye/', views.uploadeye, name='uploadeye'),\n # ex: /polls/5/\n path('uploadskin/', views.uploadskin, name='uploadskin'),\n # ex: /polls/5/results/\n path('resultseye/', views.resultseye, name='resultseye'),\n path('resultsskin/', views.resultsskin, name='resultsskin'),\n]\n","sub_path":"hackwithus/disease/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"444723760","text":"# Eksempel: Ønsker å skrive ut en forkant av stjerner, hvor brukeren\n# angir høyde og bredde\n#\n# Bredde 5, høyde 3\n# *****\n# *****\n# *****\n\nbredde = int(input(\"Bredde: \"))\nhoyde = int(input(\"Høyde: \"))\nfor linje in range(hoyde):\n for tall in range(bredde):\n print(\"*\", end=\"\")\n print()\n","sub_path":"introduksjon_lerer/firkant_stjerner.py","file_name":"firkant_stjerner.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"524898220","text":"import json\nimport uuid\n\nimport six\n\nfrom django.core.urlresolvers import reverse\nfrom django.forms import FileField, ClearableFileInput, CharField, HiddenInput\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\nfrom django.core import validators\n\nfrom .models import UploadedFile\n\n\nclass FileFormMixin(object):\n def __init__(self, *args, **kwargs):\n super(FileFormMixin, self).__init__(*args, **kwargs)\n\n self.add_hidden_field('form_id', uuid.uuid4)\n self.add_hidden_field('upload_url', self.get_upload_url())\n self.add_hidden_field('delete_url', self.get_delete_url())\n\n def add_hidden_field(self, name, initial):\n self.fields[name] = CharField(widget=HiddenInput, initial=initial, required=False)\n\n def get_delete_url(self):\n return reverse('file_form_handle_delete_no_args')\n\n def get_upload_url(self):\n return reverse('file_form_handle_upload')\n\n def full_clean(self):\n if not self.is_bound:\n # Form is unbound; just call super\n super(FileFormMixin, self).full_clean()\n else:\n # Update file data of the form\n self.update_files_data()\n\n # Call super\n super(FileFormMixin, self).full_clean()\n\n def update_files_data(self):\n form_id = self.data.get('form_id')\n\n if form_id:\n for field_name, field in six.iteritems(self.fields):\n if hasattr(field, 'get_file_data'):\n file_data = field.get_file_data(field_name, form_id)\n\n if file_data:\n self.files[field_name] = file_data\n\n def delete_temporary_files(self):\n form_id = self.data.get('form_id')\n\n if form_id:\n for field_name, field in six.iteritems(self.fields):\n if hasattr(field, 'delete_file_data'):\n field.delete_file_data(field_name, form_id)\n\n\nclass UploadWidget(ClearableFileInput):\n def render(self, name, value, attrs=None):\n def get_file_value(f):\n if hasattr(f, 'file_id'):\n return f.get_values()\n else:\n return dict(name=f.name)\n\n files_data = None\n\n if value:\n if isinstance(value, list):\n values = value\n else:\n values = [value]\n\n files_data = json.dumps(\n [\n get_file_value(v) for v in values\n ]\n )\n\n return mark_safe(\n render_to_string(\n 'django_file_form/upload_widget.html',\n dict(\n input=super(UploadWidget, self).render(name, value, attrs),\n files_data=files_data\n )\n )\n )\n\n\nclass UploadedFileField(FileField):\n widget = UploadWidget\n\n def get_file_qs(self, field_name, form_id):\n return UploadedFile.objects.filter(\n form_id=form_id,\n field_name=field_name\n )\n\n def get_file_data(self, field_name, form_id):\n qs = self.get_file_qs(field_name, form_id)\n\n if qs.exists():\n return qs.latest('created').get_uploaded_file()\n else:\n return None\n\n def delete_file_data(self, field_name, form_id):\n qs = self.get_file_qs(field_name, form_id)\n\n for f in qs:\n f.delete()\n\n\nclass MultipleUploadedFileField(UploadedFileField):\n def widget_attrs(self, widget):\n attrs = super(MultipleUploadedFileField, self).widget_attrs(widget)\n\n attrs['multiple'] = 'multiple'\n return attrs\n\n def get_file_data(self, field_name, form_id):\n qs = self.get_file_qs(field_name, form_id)\n\n return [\n f.get_uploaded_file() for f in qs\n ]\n\n def to_python(self, data):\n if data in validators.EMPTY_VALUES:\n return None\n elif isinstance(data, list):\n for f in data:\n super(MultipleUploadedFileField, self).to_python(f)\n\n return data\n else:\n return [data]\n\n\nclass ExistingFile(object):\n def __init__(self, name, file_id):\n self.name = name\n self.file_id = file_id\n self.is_existing = True\n\n def get_values(self):\n return dict(name=self.name, id=self.file_id)","sub_path":"django_file_form/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"618950581","text":"import pytest\nfrom flask import Flask, Response\nfrom flask.testing import FlaskClient\n\nfrom yafcorse import Yafcorse\n\n\n@pytest.fixture()\ndef local_app():\n app = Flask(__name__)\n\n cors = Yafcorse({\n 'allowed_methods': ['GET', 'POST', 'PUT'],\n 'allowed_headers': ['Content-Type', 'X-Test-Header'],\n 'origins': lambda origin: origin == 'https://from_lambda'\n })\n cors.init_app(app)\n\n return app\n\n\n@pytest.fixture()\ndef local_client(local_app: Flask):\n return local_app.test_client()\n\n\ndef test_origin_function(local_client: FlaskClient):\n response: Response = local_client.options('/some-request', headers={\n 'Origin': 'https://from_lambda'\n })\n assert response.status_code == 404\n assert 'Access-Control-Allow-Origin'.lower() in response.headers\n assert 'Access-Control-Max-Age'.lower() in response.headers\n assert response.headers.get('Access-Control-Allow-Origin') is not None\n assert response.headers.get('Access-Control-Allow-Origin') == 'https://from_lambda'\n assert response.headers.get('Access-Control-Max-Age') is not None\n assert response.headers.get('Access-Control-Max-Age') != ''\n\n\ndef test_origin_function_fail(local_client: FlaskClient):\n response: Response = local_client.options('/some-request', headers={\n 'Origin': 'https://other_than_lambda'\n })\n assert response.status_code == 404\n assert 'Access-Control-Allow-Origin'.lower() not in response.headers\n assert 'Access-Control-Max-Age'.lower() not in response.headers\n","sub_path":"tests/test_origins_function.py","file_name":"test_origins_function.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"203685512","text":"# lessons: \n# - graph the matriz as index, it can be helpful\n# - Sometimes simply there isn't a visible pattern, and time is gold.\n# so implement the brute force solution and optimice later if exist time\n# - Just give some minutes to the task of finding a pattern\ndef solve(matrix = None):\n\n # condition of the problem\n if matrix is None: \n return None\n\n unique = []\n map = dict()\n\n len_x = len(matrix[0])\n len_y = len(matrix)\n # left to right\n # all rows\n for i in range(len_y):\n j = 0\n for j in range(len_x):\n target = i-j\n\n if target in map:\n if map[target] != matrix[i][j]:\n return -1\n else:\n map[target] = matrix[i][j]\n if matrix[i][j] not in unique:\n unique.append(matrix[i][j])\n\n\n \n return len(unique)\n\ndef test():\n test_cases = [\n [[1, 2, 3],\n [3, 1, 2],\n [4, 4, 1],],\n [[1, 2, 3],\n [3, 1, 2],\n [4, 3, 1],],\n None, # no input will be null\n [[1, 2, 3, 4, 8, 1],\n [5, 1, 2, 3, 4, 8],\n [4, 5, 1, 2, 3, 4],\n [7, 4, 5, 1, 2, 3],],\n ]\n expected = [\n -1,\n 4,\n None,\n 7\n ]\n for i in range(len(test_cases)):\n solution = None\n try:\n solution = solve(test_cases[i])\n assert solution == expected[i]\n print(\"OK\")\n except Exception as error:\n print(f\"Error, test {test_cases[i]}, expected {expected[i]}, calculated {solution}\")\n\ntest()","sub_path":"devsu/2019/13_solved.py","file_name":"13_solved.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"54750163","text":"import numpy as np\r\nimport pandas as pd\r\n\r\n#import modified features and labels\r\nX = pd.read_csv('X_aug.csv')\r\nX_test = pd.read_csv('X_test_aug.csv')\r\n\r\ntrain = pd.read_csv(\"training_variants\")\r\ntest = pd.read_csv(\"test_variants\")\r\n\r\nID_train = train.ID\r\nID_test = test.ID\r\ny = train.Class.values-1\r\ny_test = np.zeros((X_test.shape[0], max(y)+1))\r\n\r\n\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import StratifiedKFold\r\nn_folds = 5\r\nkf = StratifiedKFold(n_splits=n_folds, random_state=1, shuffle=True)\r\nkf.get_n_splits(X, y)\r\nX = np.array(X)\r\ny = np.array(y)\r\n\r\nfold = 0\r\nfrom sklearn.linear_model import LogisticRegression\r\nclf = LogisticRegression(C=4)\r\nfor train_index, test_index in kf.split(X, y):\r\n fold += 1\r\n X_train, X_valid = X[train_index], X[test_index]\r\n y_train, y_valid = y[train_index], y[test_index]\r\n print(\"Fold\", fold) \r\n clf.fit(X_train, y_train)\r\n p_train = clf.predict_proba(X_train)\r\n p_valid = clf.predict_proba(X_valid)\r\n p_test = clf.predict_proba(X_test)\r\n print(metrics.log_loss(y_train, p_train))\r\n print(metrics.log_loss(y_valid, p_valid))\r\n y_test += p_test/n_folds\r\n\r\n\r\n#output result as csv\r\n#classes = \"class1,class2,class3,class4,class5,class6,class7,class8,class9\".split(',')\r\n#subm = pd.DataFrame(y_test, columns=classes)\r\n#subm['ID'] = ID_test\r\n#subm.to_csv('tfidfLr.csv', index=False)\r\n\r\n","sub_path":"LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"99009320","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 13 18:01:38 2019\n\n@author: alextseng\n\"\"\"\n\nimport torch\n#import torchvision\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.autograd as Variable\nimport numpy as np\n\nfrom dataloader import load_data\nfrom classes import Net\nimport imageio\n\nn_epochs = 3\nbatch_size_train = 64\nbatch_size_test = 1000\nlearning_rate = 1e-4\nmomentum = 0.5\nlog_interval = 10\n\n\n\npara_list, iead_list = load_data()\npara_tensor = torch.FloatTensor(para_list)\niead_tensor = torch.FloatTensor(iead_list)\n\nnet = Net()\n\nnet.train()\ncriterion = nn.MSELoss()\noptimizer = optim.Adam(net.parameters(), lr=learning_rate)\n\nprint(iead_list.shape)\nprint(iead_list[0])\n\n\ndef train(num_epochs):\n print('Training Network')\n #losses= []\n for epoch in range(num_epochs):\n running_loss = 0.0\n #train_acc = 0.0\n for i, data in enumerate(para_tensor, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs = data\n labels = iead_tensor[i]\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n #print(outputs.shape)\n\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n \n running_loss += loss.item()\n \n running_loss /= para_list.shape[0]\n #print(losses)\n print(running_loss)\n \n #print(\"Epoch {}, Train Accuracy: {} , TrainLoss: {} \".format(epoch, train_acc, train_loss))\n print('Finished Training')\n\ntrain(100)\n\ntorch.save(net, \"model.pth\")\n\n#print(predictions.shape)\n\n\n#Produce figures \nprint('Producing figures...')\nfor i, para in enumerate(para_list):\n fig = plt.figure(i)\n #fig.suptitle('Ti/Te: '+str(simulation_list[-i].Ti_Te) +\\\n # ' B: '+str(round(simulation_list[-i].B,2)) +\\\n # ' psi: '+str(round(simulation_list[-i].psi,2)))\n \n outputs = net(torch.FloatTensor(para))\n ax = fig.subplots(1,2,False,True,True)\n ax[0].imshow(outputs.data.numpy().reshape((500,180)))\n ax[0].set_title('NN')\n ax[0].xlabel(\"incident angle [deg]\")\n ax[0].ylabel(\"incident energy [eV]\")\n ax[1].imshow(iead_list[i].reshape((500,180)))\n ax[1].set_title('hPIC')\n ax[1].xlabel(\"incident angle [deg]\")\n ax[1].ylabel(\"incident energy [eV]\")\n #error = iead_list[-i,:]/np.max(iead_list[-i,:]) - predictions[-i,:]/np.max(predictions[-i,:])\n #ax[2].imshow(error.reshape((240,90))[:50,:])\n #ax[2].set_title('error')\n plt.savefig(str(i)+'.png')\n plt.close()\n#end for\n\n#Produce animated figure\nprint('Producing animated figure...')\nimages = []\nfor filename in [str(i)+'.png' for i in range(num_figures)]:\n images.append(imageio.imread(filename))\n#end for\nimageio.mimsave('movie.gif',images,duration=0.5)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"576495674","text":"# Copyright (c) 2019, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.\n#\n# WSO2 Inc. licenses this file to you under the Apache License,\n# Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport sys\nfrom multiprocessing.dummy import Pool\nimport os\nimport pickle\nimport time\nimport yaml\nfrom datetime import datetime\nfrom utils import util_methods\nimport random\nfrom utils.util_methods import generate_biased_random\n\n\ndef execute_scenario(scenario):\n \"\"\"\n Execute scenarios from the scenario pool to simulate abnormal token usage\n :param scenario: A list containing a scenario\n :return: none\n \"\"\"\n global attack_duration, protocol, host, port, payloads, user_agents, start_time, max_request_multiplier, min_request_multiplier, dataset_path\n\n up_time = datetime.now() - start_time\n\n if up_time.seconds < attack_duration:\n # multiply normal request count by a random value between user defined min and max value\n request_target = scenario[0] * random.randint(min_request_multiplier, max_request_multiplier)\n context = scenario[1]\n version = scenario[2]\n resource_path = scenario[3]\n token = scenario[4]\n method = scenario[5]\n ip = scenario[6]\n cookie = scenario[7]\n user_agent = scenario[10]\n\n request_path = \"{}://{}:{}/{}/{}/{}\".format(protocol, host, port, context, version, resource_path)\n random_payload = random.choice(payloads)\n\n # sending requests until the request target achieved or attack duration elapses\n for i in range(request_target):\n up_time = datetime.now() - start_time\n if up_time.seconds >= attack_duration:\n break\n\n response = util_methods.send_simple_request(request_path, method, token, ip, cookie, user_agent, payload=random_payload)\n\n request_info = \"{},{},{},{},{},{},{},\\\"{}\\\"\".format(datetime.now(), request_path, method, token, ip, cookie, response.status_code, user_agent)\n util_methods.log(dataset_path, request_info, \"a\")\n\n # sleep the process for a random period of time between 0 and 3 seconds but biased to 0\n time.sleep(generate_biased_random(0, 3, 2))\n\n\n# Program Execution\nif __name__ == '__main__':\n\n attack_tool_log_path = \"../../../../../../logs/attack-tool.log\"\n\n try:\n with open(os.path.abspath(os.path.join(__file__, \"../../../../traffic-tool/data/runtime_data/user_scenario_pool.sav\")), \"rb\") as scenario_file:\n scenario_pool = pickle.load(scenario_file, )\n\n with open(os.path.abspath(os.path.join(__file__, \"../../../../../config/attack-tool.yaml\")), \"r\") as attack_config_file:\n attack_config = yaml.load(attack_config_file, Loader=yaml.FullLoader)\n except FileNotFoundError as ex:\n error_string = \"[ERROR] {} - {}: \\'{}\\'\".format(datetime.now(), ex.strerror, ex.filename)\n print(error_string)\n util_methods.log(attack_tool_log_path, error_string, \"a\")\n sys.exit()\n\n # Reading configurations from attack-tool.yaml\n protocol = attack_config['general_config']['api_host']['protocol']\n host = attack_config['general_config']['api_host']['ip']\n port = attack_config['general_config']['api_host']['port']\n attack_duration = attack_config['general_config']['attack_duration']\n payloads = attack_config['general_config']['payloads']\n user_agents = attack_config['general_config']['user_agents']\n process_count = attack_config['general_config']['number_of_processes']\n max_request_multiplier = attack_config['attacks']['abnormal_token_usage']['max_request_scalar']\n min_request_multiplier = attack_config['attacks']['abnormal_token_usage']['min_request_scalar']\n\n start_time = datetime.now()\n\n # Recording column names in the dataset csv file\n dataset_path = \"../../../../../../dataset/attack/abnormal_token.csv\"\n util_methods.log(dataset_path, \"Timestamp, Request path, Method,Access Token, IP Address, Cookie, Response Code\", \"w\")\n\n log_string = \"[INFO] {} - Abnormal token usage attack started \".format(start_time)\n print(log_string)\n util_methods.log(attack_tool_log_path, log_string, \"a\")\n\n process_pool = Pool(processes=process_count)\n\n # Executing scenarios until the attack duration elapses\n while True:\n time_elapsed = datetime.now() - start_time\n if time_elapsed.seconds >= attack_duration:\n log_string = \"[INFO] {} - Attack terminated successfully. Time elapsed: {} minutes\".format(datetime.now(), time_elapsed.seconds / 60.0)\n print(log_string)\n util_methods.log(attack_tool_log_path, log_string, \"a\")\n break\n else:\n process_pool.map(execute_scenario, scenario_pool)\n\n # closes the process pool and wait for the processes to finish\n process_pool.close()\n process_pool.join()\n","sub_path":"lib/attack-tool/src/python/abnormal_token_usage.py","file_name":"abnormal_token_usage.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"92765682","text":"def main ():\n file = getAFile()\n image = makeImage(file)\n changeDot(image)\n displayImage(image)\n #showInfo(image)\n\ndef changeDot(image):\n pixel_1 = getPixel(image, 100, 102)\n pixel_2 = getPixel(image, 101, 103)\n pixel_3 = getPixel(image, 20, 20)\n pixel_4 = getPixel(image, 40, 40)\n newColor = makeColor (255, 255, 255)\n \n setColor(pixel_1, newColor)\n setColor(pixel_2, newColor)\n setColor(pixel_3, newColor)\n setColor(pixel_4, newColor)\ndef getAFile():\n file = pickAFile()\n return file \n\ndef makeImage(file):\n image = makePicture(file)\n return image\n\ndef displayImage(image):\n show(image)\n \ndef showInfo(image):\n printNow(\"beach.jpn\")\n printNow(getWidth(image))\n printNow(getHeight(image))\n \n pixel= getPixel (image, 150, 80)\n printNow(pixel)\n printNow(getColor(pixel))\nmain()","sub_path":"WhitePixelEditted.py","file_name":"WhitePixelEditted.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"295909878","text":"import unittest, os, subprocess, re, json, imp, tempfile, shutil\nfrom pkg_resources import resource_filename\nfrom hashlib import sha224\nfrom random import random\nfrom hermes.GrammarFileParser import GrammarFileParser, HermesParserFactory\nfrom hermes.GrammarCodeGenerator import FactoryFactory as TemplateFactoryFactory\nfrom hermes.GrammarCodeGenerator import TemplateWriter\nfrom hermes.Morpheme import NonTerminal\ndirectory = 'test/cases'\n\nclass terminal:\n def __init__(self, id, string):\n self.__dict__.update(locals())\n\nclass token:\n def __init__(self, terminal, lineno=0, colno=0, source_string=''):\n self.__dict__.update(locals())\n\nclass HermesTest(unittest.TestCase):\n pass\n\nclass HermesFirstSetTest(HermesTest):\n\n def __init__(self, testCaseDir=None, nonterminal=None, expected=None, actual=None):\n super().__init__()\n self.__dict__.update(locals())\n self.maxDiff = None\n\n def runTest(self):\n self.assertEqual(self.actual, self.expected, \\\n 'First set for nonterminal %s in test %s does not match expected value\\n\\nexpected: %s\\nactual:%s' % ( self.nonterminal, self.testCaseDir, self.expected, self.actual))\n\nclass HermesFollowSetTest(HermesTest):\n\n def __init__(self, testCaseDir=None, nonterminal=None, expected=None, actual=None):\n super().__init__()\n self.__dict__.update(locals())\n self.maxDiff = None\n\n def runTest(self):\n self.assertEqual(self.actual, self.expected, \\\n 'Follow set for nonterminal %s in test %s does not match expected value\\n\\nexpected: %s\\nactual:%s' % ( self.nonterminal, self.testCaseDir, self.expected, self.actual))\n\nclass HermesConflictTest(HermesTest):\n\n def __init__(self, testCaseDir=None, nonterminal=None, expected=None, actual=None):\n super().__init__()\n self.__dict__.update(locals())\n self.maxDiff = None\n\n def runTest(self):\n self.assertEqual(self.actual, self.expected, 'expected conflicts to match')\n\nclass HermesPythonParseTreeTest(HermesTest):\n\n def __init__(self, testCaseDir=None, grammar=None, expected=None):\n super().__init__()\n self.__dict__.update(locals())\n self.maxDiff = None\n\n def runTest(self):\n tree = getParseTree(self.grammar, self.testCaseDir)\n self.assertEqual(self.expected, tree, 'expected parse trees to match (test %s)' % (self.testCaseDir))\n\nclass HermesPythonAbstractSyntaxTreeTest(HermesTest):\n\n def __init__(self, testCaseDir=None, grammar=None, expected=None):\n super().__init__()\n self.__dict__.update(locals())\n self.maxDiff = None\n\n def runTest(self):\n self.assertEqual(self.expected, getAst(self.grammar, self.testCaseDir), 'expected ASTs to match (test %s)' % (self.testCaseDir))\n\nclass HermesJavaTest(HermesTest):\n def runJavaParser(self, grammar, testCaseDir, arg):\n tmpDir = tempfile.mkdtemp()\n shutil.copy(os.path.join(testCaseDir, 'tokens'), tmpDir)\n shutil.copytree(os.path.join(testCaseDir, '..', 'javacp', 'org'), os.path.join(tmpDir, 'org'))\n templateFactory = TemplateFactoryFactory().create('java')\n templateWriter = TemplateWriter(templateFactory)\n templateWriter.write([grammar], tmpDir, addMain=True)\n\n javaSourceFiles = list(filter(lambda filename: filename.endswith('.java'), os.listdir(tmpDir)))\n\n try:\n compileCmd = 'javac *.java 2>/dev/null'\n subprocess.check_call(compileCmd, cwd=tmpDir, shell=True, stderr=None)\n except subprocess.CalledProcessError as error:\n print('FAILED TO COMPILE', testCaseDir)\n shutil.rmtree(tmpDir)\n return error.output.decode('utf-8').strip()\n\n try:\n runCmd = 'java ParserMain grammar {type} 2>&1 <tokens'.format(type=arg)\n return subprocess.check_output(runCmd, shell=True, stderr=None, cwd=tmpDir).decode('utf-8').strip()\n except subprocess.CalledProcessError as exception:\n return exception.output.decode('utf-8').strip()\n finally:\n shutil.rmtree(tmpDir)\n\nclass HermesJavaParseTreeTest(HermesJavaTest):\n\n def __init__(self, testCaseDir=None, grammar=None, expected=None):\n super().__init__()\n self.__dict__.update(locals())\n self.maxDiff = None\n\n def getJavaParseTree(self):\n return self.runJavaParser(self.grammar, self.testCaseDir, 'parsetree')\n\n def runTest(self):\n self.assertEqual(self.expected, self.getJavaParseTree(), 'expected parse trees to match (test %s)' % (self.testCaseDir))\n\nclass HermesJavaAbstractSyntaxTreeTest(HermesJavaTest):\n\n def __init__(self, testCaseDir=None, grammar=None, expected=None):\n super().__init__()\n self.__dict__.update(locals())\n self.maxDiff = None\n\n def getJavaAst(self):\n return self.runJavaParser(self.grammar, self.testCaseDir, 'ast')\n\n def runTest(self):\n self.assertEqual(self.expected, self.getJavaAst(), 'expected ASTs to match (test %s)' % (self.testCaseDir))\n\nclass HermesCTest(HermesTest):\n def runCParser(self, grammar, testCaseDir, arg):\n tmpDir = tempfile.mkdtemp()\n shutil.copy(os.path.join(testCaseDir, 'tokens'), tmpDir)\n templateFactory = TemplateFactoryFactory().create('c')\n templateWriter = TemplateWriter(templateFactory)\n templateWriter.write([grammar], tmpDir, addMain=True)\n\n cSourceFiles = list(filter(lambda x: x != 'tokens', os.listdir(tmpDir)))\n\n try:\n compileCmd = 'gcc -o parser {sources} -g -Wall -pedantic -ansi -std=c99 2>/dev/null'.format(sources=' '.join(cSourceFiles))\n subprocess.check_call(compileCmd, cwd=tmpDir, shell=True, stderr=None)\n except subprocess.CalledProcessError as error:\n shutil.rmtree(tmpDir)\n return error.output.decode('utf-8').strip()\n\n try:\n runCmd = './parser grammar {type} < tokens'.format(type=arg)\n return subprocess.check_output(runCmd, shell=True, stderr=None, cwd=tmpDir).decode('utf-8').strip()\n except subprocess.CalledProcessError as exception:\n return exception.output.decode('utf-8').strip()\n finally:\n shutil.rmtree(tmpDir)\n\nclass HermesCParseTreeTest(HermesCTest):\n\n def __init__(self, testCaseDir=None, grammar=None, expected=None):\n super().__init__()\n self.__dict__.update(locals())\n self.maxDiff = None\n\n def getCParseTree(self):\n return self.runCParser(self.grammar, self.testCaseDir, 'parsetree')\n\n def runTest(self):\n tree = self.getCParseTree()\n self.assertEqual(self.expected, tree, 'expected parse trees to match (test %s)' % (self.testCaseDir))\n\nclass HermesCAbstractSyntaxTreeTest(HermesCTest):\n\n def __init__(self, testCaseDir=None, grammar=None, expected=None):\n super().__init__()\n self.__dict__.update(locals())\n self.maxDiff = None\n\n def getCAst(self):\n return self.runCParser(self.grammar, self.testCaseDir, 'ast')\n\n def runTest(self):\n self.assertEqual(self.expected, self.getCAst(), 'expected ASTs to match (test %s)' % (self.testCaseDir))\n\ndef getParseTree(grammar, testCaseDir):\n tmpDir = tempfile.mkdtemp()\n shutil.copy(os.path.join(testCaseDir, 'tokens'), tmpDir)\n\n templateFactory = TemplateFactoryFactory().create('python')\n templateWriter = TemplateWriter(templateFactory)\n templateWriter.write([grammar], tmpDir, addMain=True)\n\n try:\n runCmd = 'python ParserMain.py grammar parsetree < tokens 2>&1'\n return subprocess.check_output(runCmd, shell=True, stderr=None, cwd=tmpDir).decode('utf-8').strip()\n except subprocess.CalledProcessError as exception:\n return exception.output.decode('utf-8').strip()\n finally:\n shutil.rmtree(tmpDir)\n\ndef getAst(grammar, testCaseDir):\n tmpDir = tempfile.mkdtemp()\n shutil.copy(os.path.join(testCaseDir, 'tokens'), tmpDir)\n\n templateFactory = TemplateFactoryFactory().create('python')\n templateWriter = TemplateWriter(templateFactory)\n templateWriter.write([grammar], tmpDir, addMain=True)\n\n try:\n runCmd = 'python ParserMain.py grammar ast < tokens 2>&1'\n return subprocess.check_output(runCmd, shell=True, stderr=None, cwd=tmpDir).decode('utf-8').strip()\n except subprocess.CalledProcessError as exception:\n return exception.output.decode('utf-8').strip()\n finally:\n shutil.rmtree(tmpDir)\n\n\ndef getPythonParser(grammar):\n global hermesparser\n\n templateFactory = TemplateFactoryFactory().create('python')\n templateWriter = TemplateWriter(templateFactory)\n templateWriter.write([grammar], '.', addMain=True)\n\n try:\n os.remove('__pycache__/hermesparser.cpython-32.pyc')\n except OSError:\n pass\n\n hermesparser = imp.load_source('hermesparser', 'grammar_Parser.py')\n parser = hermesparser.grammar_Parser()\n return parser\n\ndef load_tests(loader, tests, pattern):\n grammarTestsDirectory = os.path.join(directory, 'grammar')\n parsingTestsDirectory = os.path.join(directory, 'parsing')\n suite = unittest.TestSuite()\n jsonifySets = lambda arg:'{\\n%s\\n}' % (',\\n'.join([' \"%s\": [%s]' % (nt, ', '.join(['\"'+z+'\"' for z in theSet])) for nt, theSet in arg.items()]))\n for parsingTest in os.listdir(parsingTestsDirectory):\n try:\n int(parsingTest)\n except ValueError:\n continue\n\n testDirectory = os.path.join(parsingTestsDirectory, parsingTest)\n grammarFile = os.path.join(testDirectory, 'grammar.zgr')\n tokensFile = os.path.join(testDirectory, 'tokens')\n grammarParser = GrammarFileParser(HermesParserFactory().create())\n grammar = grammarParser.parse( 'grammar', open(grammarFile) )\n\n path = os.path.join(testDirectory, 'parsetree')\n if os.path.exists(path):\n expectedParseTree = open(path).read().strip()\n if len(expectedParseTree):\n suite.addTest(HermesPythonParseTreeTest(testDirectory, grammar, expectedParseTree))\n suite.addTest(HermesCParseTreeTest(testDirectory, grammar, expectedParseTree))\n suite.addTest(HermesJavaParseTreeTest(testDirectory, grammar, expectedParseTree))\n else:\n fp = open(path, 'w')\n fp.write(getParseTree(grammar, testDirectory))\n fp.close()\n print('generated %s' % (path))\n\n path = os.path.join(testDirectory, 'ast')\n if os.path.exists(path):\n expectedAst = open(path).read().strip()\n suite.addTest(HermesPythonAbstractSyntaxTreeTest(testDirectory, grammar, expectedAst))\n suite.addTest(HermesCAbstractSyntaxTreeTest(testDirectory, grammar, expectedAst))\n suite.addTest(HermesJavaAbstractSyntaxTreeTest(testDirectory, grammar, expectedAst))\n else:\n fp = open(path, 'w')\n fp.write(getAst(grammar, testDirectory))\n fp.close()\n print('generated %s' % (path))\n\n\n for grammarTest in os.listdir(grammarTestsDirectory):\n try:\n int(grammarTest)\n except ValueError:\n continue\n testDirectory = os.path.join(grammarTestsDirectory, grammarTest)\n grammarParser = GrammarFileParser(HermesParserFactory().create())\n grammar = grammarParser.parse( 'grammar', open(os.path.join(testDirectory, 'grammar.zgr')) )\n grammarFirst = dict()\n for k,v in grammar.first.items():\n if isinstance(k, NonTerminal):\n grammarFirst[k.string] = set(map(lambda x: x.string, v))\n grammarFollow = dict()\n for k,v in grammar.follow.items():\n if isinstance(k, NonTerminal):\n grammarFollow[k.string] = set(map(lambda x: x.string, v))\n\n path = os.path.join(testDirectory, 'conflicts.json')\n if os.path.exists(path):\n contents = open(path).read()\n if len(contents):\n expected = json.loads(contents)\n for k,v in expected.items():\n suite.addTest(HermesConflictTest(testDirectory, k, contents, '\\n'.join([x.toJson() for x in grammar.conflicts])))\n else:\n if len(grammar.conflicts):\n fp = open(path, 'w')\n fp.write('\\n'.join([x.toJson() for x in grammar.conflicts]))\n fp.close()\n else:\n fp = open(path, 'w')\n fp.close()\n print('generated %s/conflicts.json (%d conflicts)' % (path, len(grammar.conflicts)))\n\n path = os.path.join(testDirectory, 'first.json')\n if os.path.exists(path):\n contents = open(path).read()\n if len(contents):\n expected = json.loads(contents)\n for k,v in expected.items():\n suite.addTest(HermesFirstSetTest(testDirectory, k, set(expected[k]), grammarFirst[k]))\n else:\n if len(grammar.conflicts):\n fp = open(path, 'w')\n fp.close()\n print('generated %s/first.json (empty file because of conflicts)' % (path))\n else:\n for k,v in grammarFirst.items():\n grammarFirst[k] = list(v)\n fp = open(path, 'w')\n fp.write(jsonifySets(grammarFirst))\n fp.close()\n print('generated %s/first.json' % (path))\n\n path = os.path.join(testDirectory, 'follow.json')\n if os.path.exists(path):\n contents = open(path).read()\n if len(contents):\n expected = json.loads(contents)\n for k,v in expected.items():\n suite.addTest(HermesFollowSetTest(testDirectory, k, set(expected[k]), grammarFollow[k]))\n else:\n if len(grammar.conflicts):\n fp = open(path, 'w')\n fp.close()\n print('generated %s/follow.json (empty file because of conflicts)' % (path))\n else:\n for k,v in grammarFollow.items():\n grammarFollow[k] = list(v)\n fp = open(path, 'w')\n fp.write(jsonifySets(grammarFollow))\n fp.close()\n print('generated %s/follow.json' % (path))\n\n return suite\n","sub_path":"test/HermesTest.py","file_name":"HermesTest.py","file_ext":"py","file_size_in_byte":13134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"157078852","text":"# This is an implementation of the \"Depth Greedy Algorithm\" as described in\n# Algorithm Design, Tarjan/Robert.\n#\n# The list R contains tuples on the form: (\"name\", \"start time\", \"end time\")\n#\n# The list A will, at the end of the execution, contain the tuples that gives\n# the optimal scheduling.\n#\n\nR = [(\"i1\",0,1),(\"i2\",0,2),(\"i3\",1.5,3),(\"i4\",2.5,4),(\"i5\",4.5,5),(\"i6\",0,6),(\"i7\",4.75,7),(\"i8\",7.2,8),(\"i9\",7.1,9)]\nEx = []\nA = []\nd = 3\ndepth = list(range(d))\n\nfor i in depth:\n Ex.append((list(range(d))))\n\nR.sort(key=lambda tup: tup[1]) # Sort R according to starting time.\n\nfor j in range(len(R)):\n for i in range(len(R)):\n if R[i][1] < R[j][1] and not R[i][2] < R[j][1]: # This was tricky...\n i_list_label = Ex[i]\n for l in i_list_label:\n Ex[j][i].remove(l)\n\nprint(\"Result:\" + str(A))\n\n","sub_path":"algorithms/greedy_depth.py","file_name":"greedy_depth.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"161571437","text":"#!/usr/bin/python3.4\n# -*- coding: utf-8 -*-\n# Pavel Ostyakov\n# pavelosta@gmail.com\n\nimport argparse\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom crawler.common import *\nfrom crawler.database import Database, url_iter\n\n\ndef get_and_check_thread_n(thread_n):\n \"\"\"Check the number of threads for correctness and return it.\n\n :param thread_n: str/int.\n :return: int.\n\n \"\"\"\n threads_count = int(thread_n)\n\n if threads_count < 1 or 1000 < threads_count:\n raise ValueError(\"Error - invalid count of threads. It must be in range from 1 to 1000\")\n\n return threads_count\n\n\ndef urls_check(url):\n \"\"\"Check the URL for the correctness\n\n :param url: str.\n :return: str.\n\n \"\"\"\n if url.count(\"/\") != SLASH_COUNT:\n raise ValueError(\"Invalid URL\")\n\n return url\n\n\ndef parse_arguments():\n \"\"\"\"Parse command line arguments.\n\n :return: namespace with parsed args.\n\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Download articles from the web site\")\n\n parser.add_argument(\n \"urls\",\n nargs=\"*\",\n type=urls_check,\n help=\"List of URLS to download\",\n )\n\n parser.add_argument(\n \"--all\",\n action=\"store_true\",\n dest=\"all\",\n help=\"Download all unloaded articles (optional)\",\n )\n\n parser.add_argument(\n \"--t\",\n action=\"store\",\n dest=\"thread_count\",\n help=\"Count of threads (optional)\",\n type=get_and_check_thread_n,\n default=DEFAULT_THREAD_COUNT,\n )\n\n parser.add_argument(\n \"--log\",\n action=\"store\",\n dest=\"log\",\n help=\"Level of log message: [all, error, critical]. Default: error\",\n choices=[\"all\", \"error\", \"critical\"],\n type=str,\n default=\"error\",\n )\n\n args = parser.parse_args()\n\n if not args.all:\n if len(args.urls) == 0:\n raise ValueError(\"Invalid URLs. Type the list of the URLs or use option '--all'\")\n\n else:\n args.urls = []\n\n return args\n\n\nclass Downloader:\n def __init__(self, threads_count, database, urls=list()):\n \"\"\"Create an object of class Downloader.\n\n :param threads_count: int - number of threads.\n :param database: database.Database.\n :param urls: list.\n\n \"\"\"\n self._database = database\n self._executor = ThreadPoolExecutor(threads_count)\n self._queue = url_iter(database)\n self._urls = urls\n\n def start(self):\n \"\"\"Starts download.\n\n \"\"\"\n with self._executor as executor:\n try:\n for _ in executor.map(self.download_page, self._urls or self._queue):\n pass\n\n except KeyboardInterrupt:\n log.debug(\"Stop download...\")\n self.stop()\n\n def stop(self):\n \"\"\"Stop download.\n\n \"\"\"\n self._executor.shutdown()\n\n def download_page(self, url):\n \"\"\"Download html page.\n\n :param url: str.\n\n \"\"\"\n page_text = page_get(SITE_URL + url).content.decode()\n self._database.add_html(url, page_text)\n self._database.delete_from_download_queue(url)\n\n\ndef main():\n try:\n args = parse_arguments()\n log.config(args.log)\n log.debug(\"Start download...\")\n Downloader(args.thread_count, Database(), args.urls).start()\n\n except Exception:\n log.critical(\"%s: %s\\n%s\", *sys.exc_info())\n\n finally:\n log.debug(\"Shutdown download\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"crawler/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"473803120","text":"#!/usr/bin/env python\nimport re\nimport operator\n\n\nclass Group:\n def __init__(self, n_units: int, hp: int, weak: set,\n immune: set, attak: int, attak_type: str, initiative: int):\n self.n_units = n_units\n self.hp = hp\n self.weak = weak\n self.immune = immune\n self.attak = attak\n self.attak_type = attak_type\n self.initiative = initiative\n self.id = None\n\n @classmethod\n def from_str(cls, line):\n regex = r'^(\\d+) units each with (\\d+) hit points (\\(.*\\) )*with an attack that does (\\d+) (\\w+) damage at initiative (\\d+)$'\n matchhes = re.match(regex, line)\n _n_units, hp, specs, attak, attak_type, initiative = matchhes.groups()\n n_units, hp, attak, initiative = map(\n int, (_n_units, hp, attak, initiative))\n weak = set()\n immune = set()\n if specs:\n specs = specs.replace('(', '').replace(')', '')\n for s in specs.split('; '):\n if s.startswith('weak'):\n for t in s[7:].split(', '):\n weak.add(t.strip())\n elif s.startswith('immune'):\n for t in s[9:].split(', '):\n immune.add(t.strip())\n else:\n raise Exception('Wrong spec!')\n\n return cls(n_units, hp, weak, immune, attak, attak_type, initiative)\n\n def __str__(self):\n return f'{self.__class__.__name__} contains {self.n_units} units'\n\n def __repr__(self):\n return f'{self.__class__.__name__}{self.n_units, self.hp, self.weak, self.immune, self.attak, self.attak_type, self.initiative }'\n\n def __lt__(self, other):\n return (self.effective_power, self.initiative) < (other.effective_power, other.initiative)\n\n @property\n def effective_power(self) -> int:\n return self.n_units * self.attak\n\n def estimate_damage(self, other) -> int:\n if self.attak_type in other.immune:\n return 0\n elif self.attak_type in other.weak:\n return self.effective_power * 2\n else:\n return self.effective_power\n\n\nclass Immune(Group):\n pass\n\n\nclass Infection(Group):\n pass\n\n\ndef madness(path, boost):\n with open(path) as f:\n immune_raw, infection_raw = f.read().split('\\n\\n')\n immune_raw = immune_raw[1:]\n infection_raw = infection_raw[1:]\n immune = [Immune.from_str(line) for line in immune_raw.splitlines()[1:]]\n \n for g in immune:\n g.attak += boost\n\n infection = [Infection.from_str(line)\n for line in infection_raw.splitlines()[1:]]\n\n groups = infection + immune\n\n for idx, group in enumerate(groups):\n group.id = idx\n\n def alive(groups):\n immune = sum(1 for group in groups\n if not isinstance(group, Immune) and group.hp > 0)\n\n infection = sum(1 for group in groups\n if not isinstance(group, Infection) and group.hp > 0)\n return immune > 0 and infection > 0\n\n while alive(groups):\n # print('New round')\n choosen = set()\n pairs = []\n for attaker in sorted(groups, reverse=True):\n enemies = (group for group in groups\n if not isinstance(group, attaker.__class__)\n and group.id not in choosen)\n targets = []\n for enemy in enemies:\n damage = attaker.estimate_damage(enemy)\n if damage > 0:\n targets.append([damage, enemy.effective_power, enemy.initiative, enemy])\n try:\n target = max(targets)[-1]\n pairs.append([attaker, target])\n choosen.add(target.id)\n except ValueError:\n pass\n pairs.sort(reverse=True, key=lambda p: p[0].initiative)\n \n \n total_kills = 0\n for attaker, target in pairs:\n # print(f'{repr(attaker)} attaks {repr(target)}', end='')\n killed = attaker.estimate_damage(target) // target.hp\n if target.n_units < killed:\n killed = target.n_units\n target.n_units -= killed\n total_kills += killed\n # print(f' kills {killed} units')\n if total_kills == 0:\n return -float('inf')\n groups = [g for g in groups if g.n_units > 0]\n \n if all(isinstance(g, Immune) for g in groups):\n return sum(g.n_units for g in groups)\n return -sum(g.n_units for g in groups)\n\n\nassert madness('Day 24/example.0.txt',1570) == 51\n\nboost = 0\nwhile True:\n res = madness('Day 24/input.txt', boost)\n print(boost, res)\n if res > 0:\n print(boost)\n break\n else:\n boost += 1 ","sub_path":"2018/Day 25/Part 2.py","file_name":"Part 2.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"41958962","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport LatticeDefinitions as ld\nimport GeometryFunctions as gf\nimport GeneralLattice as gl\nimport LAMMPSTool as LT\nimport sys\nfrom mpl_toolkits.mplot3d import Axes3D \nimport copy as cp\n\n\n# strDirectory = str(sys.argv[1])\n# intSigma = int(sys.argv[2])\n# fltFactor = float(sys.argv[4])\n# lstAxis = eval(str(sys.argv[3]))\n# arrAxis = np.array(lstAxis)\narrAxis = np.array([0,0,1])\nobjSigma = gl.SigmaCell(np.array([5,1,1]),ld.FCCCell)\nobjSigma.MakeCSLCell(9)\nfltAngle1, fltAngle2 = objSigma.GetLatticeRotations()\nprint((fltAngle1-fltAngle2)*180/np.pi)\narrSigmaBasis = objSigma.GetBasisVectors()\n\nintMax = 60\nintHeight = 5\ns1 = np.linalg.norm(arrSigmaBasis, axis=1)[0]\ns2 = np.linalg.norm(arrSigmaBasis, axis=1)[1]\ns3 = np.linalg.norm(arrSigmaBasis, axis=1)[2]\na = 4.05 ##lattice parameter\nx = np.round(intMax/s1,0)\nif np.mod(x,2) !=0: #ensure an even number of CSL unit cells in the x direction\n x += 1\ny = np.round(intMax/s2,0)\nif np.mod(y,2) !=0: \n y += 1\nw = x*a\nl = y*a\nh = a*np.round(intHeight/s3,0)\narrX = w*arrSigmaBasis[0]\narrXY = l*arrSigmaBasis[1]\nz = h*arrSigmaBasis[2]\nif np.all(arrAxis == np.array([0,0,1])):\n arrBasisVectors = gf.StandardBasisVectors(3)\nelse:\n fltAngle3, arrRotation = gf.FindRotationVectorAndAngle(arrAxis,np.array([0,0,1]))\n arrBasisVectors = gf.RotateVectors(fltAngle3, arrRotation,gf.StandardBasisVectors(3))\narrLatticeParameters= np.array([a,a,a])\nfltDatum = -3.36\narrShift = a*(0.5-np.random.ranf())*arrSigmaBasis[1]+a*(0.5-np.random.ranf())*arrSigmaBasis[2]\narrCentre = 0.5*(arrX+arrXY+z) + arrShift\n#np.savetxt(strDirectory + 'GrainCentre.txt',arrCentre)\nstrConstraint = str(arrXY[0])+ '*(y -' + str(arrCentre[1]) + ') - ' + str(arrXY[1]) + '*(x -' + str(arrCentre[0]) + ')' \nMySimulationCell = gl.SimulationCell(np.array([arrX,arrXY, z])) \nobjFullCell1 = gl.ExtrudedParallelogram(arrX,arrXY,s3*h, gf.RotateVectors(fltAngle1,z,arrBasisVectors), ld.FCCCell, arrLatticeParameters,np.zeros(3))\nobjFullCell2 = gl.ExtrudedParallelogram(arrX,arrXY, s3*h, gf.RotateVectors(fltAngle2,z,arrBasisVectors), ld.FCCCell, arrLatticeParameters,np.zeros(3))\nobjFullCell3 = gl.ExtrudedParallelogram(arrX,arrXY, s3*h, gf.RotateVectors(np.mean([fltAngle1,fltAngle2]),z,arrBasisVectors), ld.FCCCell, arrLatticeParameters,arrShift)\nobjFullCell1.ApplyLatticeShift(np.array([1.2,-4.5,-3.0]))\nobjFullCell1.SetPeriodicity(['n','p','p'])\nobjFullCell2.SetPeriodicity(['n','p','p'])\nobjFullCell3.SetPeriodicity(['n','n','n'])\nobjLeftCell1 = cp.deepcopy(objFullCell1)\nobjLeftCell1.ApplyGeneralConstraint(gf.InvertRegion(strConstraint))\nobjRightCell2 = cp.deepcopy(objFullCell2)\nobjRightCell2.ApplyGeneralConstraint(strConstraint)\n\nfltDistance = objFullCell1.GetNearestNeighbourDistance()\n\nobjBaseLeft = cp.deepcopy(objLeftCell1)\nobjBaseRight = cp.deepcopy(objRightCell2)\nMySimulationCell.AddGrain(objBaseLeft)\nMySimulationCell.AddGrain(objBaseRight)\nMySimulationCell.RemoveAtomsOnOpenBoundaries()\nMySimulationCell.RemovePeriodicDuplicates()\nMySimulationCell.RemoveTooCloseAtoms(fltDistance*fltFactor, ['1','2'])\nMySimulationCell.WrapAllAtomsIntoSimulationCell()\nMySimulationCell.WriteLAMMPSDataFile(strDirectory + 'read0.dat')\nMySimulationCell.RemoveAllGrains()\nMySimulationCell.RemoveNonGrainAtomPositons()\nfIn = open(strDirectory + 'TemplateMin.in', 'rt')\nfData = fIn.read()\nfData = fData.replace('read.dat', 'read0.dat')\nfData = fData.replace('read.dmp', 'read0.dmp')\nfData = fData.replace('logfile', 'logfile0')\nfData = fData.replace('read.lst', 'read0.lst') \nfIn.close()\nfIn = open(strDirectory + 'TemplateMin0.in', 'wt')\nfIn.write(fData)\nfIn.close()\n\n\nfor j in range(1,np.round(intMax/4,0).astype('int')+1):\n r = a*j\n strCylinder = gf.ParseConic([arrCentre[0],arrCentre[1]],[r,r],[2,2])\n objCylinder3 = cp.deepcopy(objFullCell3)\n objCylinder3.ApplyGeneralConstraint(strCylinder)\n objLeftChopped1 = cp.deepcopy(objLeftCell1)\n objLeftChopped1.ApplyGeneralConstraint(gf.InvertRegion(strCylinder))\n objRightChopped2 = cp.deepcopy(objRightCell2)\n objRightChopped2.ApplyGeneralConstraint(gf.InvertRegion(strCylinder))\n MySimulationCell.AddGrain(objLeftChopped1)\n MySimulationCell.AddGrain(objRightChopped2)\n MySimulationCell.AddGrain(objCylinder3)\n MySimulationCell.RemoveAtomsOnOpenBoundaries()\n MySimulationCell.RemovePeriodicDuplicates()\n MySimulationCell.RemoveTooCloseAtoms(fltDistance*fltFactor, ['1','2'])\n MySimulationCell.RemoveTooCloseAtoms(0.2*fltDistance,['1','3'])\n # MySimulationCell.RemoveTooCloseAtoms(2,['3','1'])\n MySimulationCell.WrapAllAtomsIntoSimulationCell()\n MySimulationCell.SetFileHeader('Grain centre is ' +str(arrCentre))\n MySimulationCell.WriteLAMMPSDataFile(strDirectory + 'read' + str(j) + '.dat')\n #arrPoints = MySimulationCell.GetDuplicatePoints()\n MySimulationCell.RemoveAllGrains()\n MySimulationCell.RemoveNonGrainAtomPositons()\n fIn = open(strDirectory + 'TemplateMin.in', 'rt')\n fData = fIn.read()\n fData = fData.replace('read.dat', 'read' + str(j) + '.dat')\n fData = fData.replace('read.dmp', 'read' + str(j) + '.dmp')\n fData = fData.replace('read.lst', 'read' + str(j) + '.lst')\n fData = fData.replace('logfile', 'logfile' + str(j))\n fIn.close()\n fIn = open(strDirectory + 'TemplateMin' + str(j) + '.in', 'wt')\n fIn.write(fData)\n fIn.close()\n #ax.scatter(*tuple(zip(*arrPoints)))\nplt.show()","sub_path":"TJEnergyGrowthCylinderAll.py","file_name":"TJEnergyGrowthCylinderAll.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"575027134","text":"import pymysql\n\nfrom Spider.checker import Checker\nfrom config import *\n\n\"\"\"\n表结构:\n+--------+-------------+------+-----+-------------------+----------------+\n| Field | Type | Null | Key | Default | Extra |\n+--------+-------------+------+-----+-------------------+----------------+\n| id | int(11) | NO | PRI | NULL | auto_increment |\n| domain | varchar(45) | NO | | NULL | |\n| price | varchar(45) | NO | | NULL | |\n| date | varchar(45) | NO | | NULL | |\n| status | int(11) | NO | | 3 | |\n| update | timestamp | NO | | CURRENT_TIMESTAMP | |\n+--------+-------------+------+-----+-------------------+----------------+\n\"\"\"\n\n\nclass DomainDB(object):\n def __init__(self, host=MYSQL_HOST, port=MYSQL_PORT, user=MYSQL_USER, password=MYSQL_PASSWORD, db=MYSQL_DB):\n \"\"\"\n MYSQL初始化\n :param host: 地址\n :param port: 端口\n :param user: 用户名\n :param password: 密码\n :param db: 数据库\n \"\"\"\n try:\n self.db = pymysql.connect(\n host=host,\n port=port,\n user=user,\n passwd=password,\n db=db,\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor\n )\n except pymysql.MySQLError as e:\n print(e.args)\n\n def __del__(self):\n self.db.close()\n\n def insert(self, data):\n \"\"\"\n 向数据库中插入数据\n :param data: 需要插入的数据(dict)\n :return:\n \"\"\"\n # 检查重复域名\n with self.db.cursor() as cursor:\n sql_query = 'select domain from domain where domain=%s'\n cursor.execute(sql_query, data['domain'])\n self.db.commit()\n duplicate = cursor.fetchone()\n # 如果没有重复\n if not duplicate:\n\n # 获取域名状态\n c = Checker()\n data = c.check_status(data)\n if not data:\n return False # 获取失败则直接返回\n\n # 构造sql语句\n keys = ', '.join(data.keys())\n values = ', '.join(['%s'] * len(data))\n # 插入数据\n try:\n with self.db.cursor() as cursor:\n sql_query = 'insert into domain (%s) values (%s)' % (keys, values)\n cursor.execute(sql_query, tuple(data.values()))\n self.db.commit()\n except pymysql.MySQLError as e:\n print(e.args)\n return False\n\n else:\n # 遇到重复域名则更新数据\n condition = 'set'\n for key, value in data.items():\n condition += \" {0}='{1}',\".format(key, value)\n condition = condition[:-1]\n try:\n with self.db.cursor() as cursor:\n sql_query = \"update domain %s where domain='%s'\" % (condition, data['domain'])\n cursor.execute(sql_query)\n self.db.commit()\n return True\n except pymysql.MySQLError as e:\n print(e.args)\n return False\n\n def get_domain(self, where_condition, row_count, offset):\n \"\"\"\n 获取数据\n :param where_condition: 筛选规则\n :param row_count: 起始行\n :param offset: 偏移量\n :return: 数据总数和数据内容\n \"\"\"\n with self.db.cursor() as cursor:\n # 获取总数\n sql_query = \"select count(*) from domain %s\" % where_condition\n cursor.execute(sql_query)\n count = cursor.fetchone()['count(*)']\n # 获取数据\n sql_query = \"select * from domain %s order by id desc limit %s, %s\" % (where_condition, row_count, offset)\n cursor.execute(sql_query)\n data_list = cursor.fetchall()\n\n for data in data_list:\n # 把update格式化为字符串\n data['update'] = data['update'].strftime('%Y-%m-%d %H:%M:%S')\n return count, data_list\n\n def update(self, data):\n \"\"\"\n 更新数据\n :param data:\n :return:\n \"\"\"\n condition = 'set'\n del data['update'] # update字段会自动更新, 不能手动修改\n for key, value in data.items():\n condition += \" {0}='{1}',\".format(key, value)\n condition = condition[:-1]\n try:\n with self.db.cursor() as cursor:\n sql_query = \"update domain %s where domain='%s'\" % (condition, data['domain'])\n cursor.execute(sql_query)\n self.db.commit()\n return True\n except pymysql.MySQLError as e:\n print(e.args)\n return False\n","sub_path":"Spider/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"579420909","text":"from peewee import *\nfrom flask import Flask, request, session, g, redirect, url_for, abort, \\\n\trender_template, flash, current_app\nfrom models import *\n\n\napp = Flask(__name__,template_folder=\"templates\", static_url_path=\"/static\",\n static_folder=\"static\")\napp.config.from_object(__name__)\n\n\ndef init_db():\n\tConnectDatabase.db.connect()\n\tConnectDatabase.db.drop_tables([Story], safe=True)\n\tConnectDatabase.db.create_tables([Story], safe=True)\n\n\n@app.route('/')\n@app.route('/list')\ndef show_stories():\n\tstories = Story.select().order_by(Story.id)\n\treturn render_template('list.html', stories=stories)\n\n\n@app.route('/story', methods=['GET'])\ndef add_new_story():\n\tstory = []\n\treturn render_template('form.html', story=story, header='Create story', button='Create')\n\n@app.route('/story', methods=['POST'])\ndef save_new_story():\n\tnew_record = Story.create(title=request.form['title'],\n\t\t\t\t\t\t\t text=request.form['text'],\n\t\t\t\t\t\t\t criteria=request.form['criteria'],\n\t\t\t\t\t\t\t business_value=request.form['business_value'],\n\t\t\t\t\t\t\t estimation=request.form['estimation'],\n\t\t\t\t\t\t\t status=request.form['status'])\n\treturn redirect(url_for('show_stories'))\n\n@app.route('/story/<story_id>', methods=['GET'])\ndef edit_story(story_id):\n\tstory = Story.get(Story.id==story_id)\n\treturn render_template(\"form.html\", story=story, header=\"Edit story\", button=\"Update\")\n\n\n@app.route('/story/<story_id>', methods=['POST'])\ndef update_story(story_id):\n\tedit_record = Story.update(title=request.form['title'],\n\t\t\t\t\t\t\t text=request.form['text'],\n\t\t\t\t\t\t\t criteria=request.form['criteria'],\n\t\t\t\t\t\t\t business_value=request.form['business_value'],\n\t\t\t\t\t\t\t estimation=request.form['estimation'],\n\t\t\t\t\t\t\t status=request.form['status']).where(Story.id==story_id)\n\treturn redirect(url_for('show_stories'))\n\n@app.route('/delete/<story_id>', methods=['POST'])\ndef delete_story(story_id):\n\tstory = Story.select().where(Story.id==story_id).get()\n\tstory.delete_instance()\n\treturn redirect(url_for('show_stories'))\n\nif __name__=='__main__':\n\tinit_db()\n\tapp.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"225392845","text":"import torch\r\nimport argparse, time\r\nimport numpy as np\r\n\r\nimport dgl\r\nimport networkx as nx\r\nimport scipy.sparse as sp\r\nfrom dgl import DGLGraph\r\n\r\n\r\nclass WebKBset(object):\r\n r\"\"\"Cora citation network dataset. Nodes mean author and edges mean citation\r\n relationships.\r\n \"\"\"\r\n\r\n def __init__(self, name):\r\n self.name = name\r\n self._load()\r\n\r\n def _load(self):\r\n idx_features_labels = np.genfromtxt(\"data/{}.content\".format(self.name), dtype=np.dtype(str))\r\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\r\n labels = self._encode_onehot(idx_features_labels[:, -1])\r\n self.num_labels = labels.shape[1]\r\n # build graph\r\n idx = np.array(idx_features_labels[:, 0], dtype=str)\r\n idx_map = {j: i for i, j in enumerate(idx)}\r\n edges_unordered = np.genfromtxt(\"data/{}.cites\".format(self.name), dtype=np.dtype(str))\r\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\r\n dtype=np.int32).reshape(edges_unordered.shape)\r\n adj = sp.coo_matrix((np.ones(edges.shape[0]),\r\n (edges[:, 0], edges[:, 1])),\r\n shape=(labels.shape[0], labels.shape[0]),\r\n dtype=np.float32)\r\n # build symmetric adjacency matrix\r\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\r\n self.graph = nx.from_scipy_sparse_matrix(adj, create_using=nx.DiGraph())\r\n\r\n features = self._normalize(features)\r\n self.features = np.array(features.todense())\r\n self.labels = np.where(labels)[1]\r\n self.train_mask = self._sample_mask(range(int(0.5 * labels.shape[0])), labels.shape[0])\r\n\r\n self.val_mask = self._sample_mask(range(int(0.5 * labels.shape[0]) + 1, int(0.8 * labels.shape[0])),\r\n labels.shape[0])\r\n\r\n self.test_mask = self._sample_mask(range(int(0.8 * labels.shape[0]) + 1, int(labels.shape[0])), labels.shape[0])\r\n\r\n def __getitem__(self, idx):\r\n assert idx == 0, \"This dataset has only one graph\"\r\n g = DGLGraph(self.graph)\r\n g.ndata['train_mask'] = self.train_mask\r\n g.ndata['val_mask'] = self.val_mask\r\n g.ndata['test_mask'] = self.test_mask\r\n g.ndata['label'] = self.labels\r\n g.ndata['feat'] = self.features\r\n return g\r\n\r\n def __len__(self):\r\n return 1\r\n\r\n @staticmethod\r\n def _sample_mask(idx, l):\r\n \"\"\"Create mask.\"\"\"\r\n mask = np.zeros(l)\r\n mask[idx] = 1\r\n return mask\r\n\r\n @staticmethod\r\n def _normalize(mx):\r\n \"\"\"Row-normalize sparse matrix\"\"\"\r\n rowsum = np.array(mx.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n mx = r_mat_inv.dot(mx)\r\n return mx\r\n\r\n @staticmethod\r\n def _encode_onehot(labels):\r\n classes = list(sorted(set(labels)))\r\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in\r\n enumerate(classes)}\r\n labels_onehot = np.array(list(map(classes_dict.get, labels)),\r\n dtype=np.int32)\r\n return labels_onehot\r\n\r\n @staticmethod\r\n def evaluate(model, graph, features, labels, mask):\r\n model.eval()\r\n with torch.no_grad():\r\n logits = model(graph, features)\r\n logits = logits[mask]\r\n labels = labels[mask]\r\n _, indices = torch.max(logits, dim=1)\r\n correct = torch.sum(indices == labels)\r\n return correct.item() * 1.0 / len(labels)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # filename = \"cornell\"\r\n filename = \"texas\"\r\n # filename=\"washington\"\r\n # filename=\"wisconsin\"\r\n data = WebKBset(filename)\r\n use_intermediate_embedding = True\r\n use_linear_comb = False\r\n\r\n graph = dgl.DGLGraph(data.graph)\r\n features = torch.Tensor(data.features)\r\n train_mask = torch.LongTensor(data.train_mask).bool()\r\n test_mask = torch.LongTensor(data.test_mask).bool()\r\n val_mask = torch.LongTensor(data.val_mask).bool()\r\n labels = torch.LongTensor(data.labels)\r\n num_labels = data.num_labels\r\n","sub_path":"data/webkb.py","file_name":"webkb.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"172776684","text":"__docformat__ = \"numpy\"\n\nimport argparse\nimport os\nimport pandas as pd\nfrom prompt_toolkit.completion import NestedCompleter\n\nfrom gamestonk_terminal import feature_flags as gtff\nfrom gamestonk_terminal.helper_funcs import get_flair\nfrom gamestonk_terminal.menu import session\n\nfrom gamestonk_terminal.portfolio.portfolio_analysis import portfolio_parser\n\n\nclass PortfolioController:\n \"\"\"Portfolio Controller\"\"\"\n\n CHOICES = [\n \"cls\",\n \"?\",\n \"help\",\n \"q\",\n \"quit\",\n \"load\",\n \"group\",\n ]\n\n def __init__(self):\n self.pa_parser = argparse.ArgumentParser(add_help=False, prog=\"pa\")\n self.pa_parser.add_argument(\"cmd\", choices=self.CHOICES)\n self.portfolio_name = \"\"\n self.portfolio = pd.DataFrame()\n\n def print_help(self):\n \"\"\"Print help\"\"\"\n print(\n \"https://github.com/GamestonkTerminal/GamestonkTerminal/tree/main/gamestonk_terminal/portfolio_analysis\"\n )\n print(\"\\nPortfolio Analysis:\")\n print(\" cls clear screen\")\n print(\" ?/help show this menu again\")\n print(\" q quit this menu, and shows back to main menu\")\n print(\" quit quit to abandon program\")\n print(\"\")\n print(\" load load portfolio from csv file\")\n print(\"\")\n if self.portfolio_name:\n print(f\"Portfolio: {self.portfolio_name}\")\n print(\"\")\n print(\" group view holdings by a user input group\")\n print(\"\")\n\n def switch(self, an_input: str):\n \"\"\"Process and dispatch input\n\n Returns\n -------\n True, False or None\n False - quit the menu\n True - quit the program\n None - continue in the menu\n \"\"\"\n\n # Empty command\n if not an_input:\n print(\"\")\n return None\n\n (known_args, other_args) = self.pa_parser.parse_known_args(an_input.split())\n\n # Help menu again\n if known_args.cmd == \"?\":\n self.print_help()\n return None\n\n # Clear screen\n if known_args.cmd == \"cls\":\n os.system(\"cls||clear\")\n return None\n\n return getattr(\n self, \"call_\" + known_args.cmd, lambda: \"Command not recognized!\"\n )(other_args)\n\n def call_help(self, _):\n \"\"\"Process Help command\"\"\"\n self.print_help()\n\n def call_q(self, _):\n \"\"\"Process Q command - quit the menu\"\"\"\n return False\n\n def call_quit(self, _):\n \"\"\"Process Quit command - quit the program\"\"\"\n return True\n\n def call_load(self, other_args):\n \"\"\"Process csv command\"\"\"\n self.portfolio_name, self.portfolio = portfolio_parser.load_csv_portfolio(\n other_args\n )\n\n if self.portfolio_name:\n print(f\"Successfully loaded: {self.portfolio_name}\\n\")\n\n def call_group(self, other_args):\n \"\"\"Process group command\"\"\"\n if self.portfolio_name:\n portfolio_parser.breakdown_by_group(self.portfolio, other_args)\n else:\n print(\"Please load a portfolio\")\n\n\ndef menu():\n \"\"\"Portfolio Analysis Menu\"\"\"\n pa_controller = PortfolioController()\n pa_controller.print_help()\n\n while True:\n # Get input command from user\n if session and gtff.USE_PROMPT_TOOLKIT:\n completer = NestedCompleter.from_nested_dict(\n {c: None for c in pa_controller.CHOICES}\n )\n an_input = session.prompt(\n f\"{get_flair()} (portfolio)>(pa)> \",\n completer=completer,\n )\n else:\n an_input = input(f\"{get_flair()} (portfolio)>(pa)> \")\n\n try:\n process_input = pa_controller.switch(an_input)\n\n if process_input is not None:\n return process_input\n\n except SystemExit:\n print(\"The command selected doesn't exist\\n\")\n continue\n","sub_path":"gamestonk_terminal/portfolio/portfolio_analysis/pa_controller.py","file_name":"pa_controller.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"152428312","text":"def analisador(grammar, action, input, print_table = True):\n\n stack = []\n pos = 0\n \n if print_table:\n print(\"|{:>20}|{:>20}|\".format(\"input\", \"stack\"))\n print(\"|{}|{}|\".format(\"-\"*20, \"-\"*20))\n print(\"|{:>20}|{:>20}|\".format(input[pos:], \"\".join([ str(s) for s in stack])))\n \n \n for i in range(len(action)):\n if actions[i] == \"s\":\n stack.append( input[pos])\n pos = pos + 1\n else:\n rule_index = action[i][1]\n rhs = grammar[rule_index][1]\n lhs = grammar[rule_index][0]\n \n for i in range(len(rhs)-1, -1, -1):\n \n if rhs[i] == stack[-1]:\n stack.pop()\n else:\n print(\"wrong parser\")\n stack.append(lhs)\n if print_table:\n print(\"|{:>20}|{:>20}|\".format(input[pos:], \"\".join([ str(s) for s in stack])))\n\n\ngrammar = [ \n (\"E\", \"E+T\" ), #rule 0 \n (\"E\", \"T\" ), #rule 1\n (\"T\", \"T*F\" ), #rule 2 \n (\"T\", \"F\" ), #rule 3 \n (\"F\", \"(E)\" ), #rule 4 \n (\"F\", \"a\" ), #rule 5 \n]\n\ninput = \"a*(a+a)$\"\n\nactions = [\"s\", (\"r\", 5), (\"r\", 3), \"s\", \"s\", \"s\", (\"r\", 5), (\"r\", 3), (\"r\", 1), \"s\", \"s\", (\"r\", 5), (\"r\", 3), (\"r\", 0), \"s\", (\"r\", 4), (\"r\", 2), (\"r\", 1)]\n\nanalisador(grammar, actions, input )\n\n","sub_path":"Semestre_3/linguagem/Analisador Sintático Ascendente II/quest.py","file_name":"quest.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"156500798","text":"# -*- coding: utf-8 -*-\n#\n# MainController.py\n# Imagr\n#\n# Created by Graham Gilbert on 04/04/2015.\n# Copyright (c) 2015 Graham Gilbert. All rights reserved.\n#\n\nimport objc\nimport FoundationPlist\nimport os\nfrom SystemConfiguration import *\nfrom Foundation import *\nfrom AppKit import *\nfrom Cocoa import *\nimport subprocess\nimport sys\nimport macdisk\nimport urllib2\nimport Utils\nimport PyObjCTools\n\nclass MainController(NSObject):\n password = objc.IBOutlet()\n passwordLabel = objc.IBOutlet()\n loginLabel = objc.IBOutlet()\n loginButton = objc.IBOutlet()\n errorField = objc.IBOutlet()\n mainWindow = objc.IBOutlet()\n\n mainView = objc.IBOutlet()\n loginView = objc.IBOutlet()\n\n progressPanel = objc.IBOutlet()\n progressIndicator = objc.IBOutlet()\n progressText = objc.IBOutlet()\n\n startUpDiskPanel = objc.IBOutlet()\n startUpDiskText = objc.IBOutlet()\n startupDiskCancelButton = objc.IBOutlet()\n startupDiskDropdown = objc.IBOutlet()\n startupDiskRestartButton = objc.IBOutlet()\n\n chooseTargetPanel = objc.IBOutlet()\n chooseTargetDropDown = objc.IBOutlet()\n chooseTargetCancelButton = objc.IBOutlet()\n chooseTargetPanelSelectTarget = objc.IBOutlet()\n\n cancelAndRestartButton = objc.IBOutlet()\n chooseWorkflowDropDown = objc.IBOutlet()\n chooseWorkflowLabel = objc.IBOutlet()\n\n runWorkflowButton = objc.IBOutlet()\n workflowDescriptionView = objc.IBOutlet()\n workflowDescription = objc.IBOutlet()\n\n imagingProgress = objc.IBOutlet()\n imagingLabel = objc.IBOutlet()\n imagingProgressPanel = objc.IBOutlet()\n imagingProgressDetail = objc.IBOutlet()\n\n # former globals, now instance variables\n volumes = None\n passwordHash = None\n workflows = None\n targetVolume = None\n workVolume = None\n selectedWorkflow = None\n packages_to_install = None\n restartAction = None\n blessTarget = None\n\n def runStartupTasks(self):\n self.loginView.setHidden_(self)\n self.mainWindow.center()\n #self.progressPanel.center()\n #self.password.becomeFirstResponder()\n # Run app startup - get the images, password, volumes - anything that takes a while\n\n self.progressText.setStringValue_(\"Application Starting...\")\n NSApp.beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(\n self.progressPanel, self.mainWindow, self, None, None)\n self.progressIndicator.setIndeterminate_(True)\n self.progressIndicator.setUsesThreadedAnimation_(True)\n self.progressIndicator.startAnimation_(self)\n NSThread.detachNewThreadSelector_toTarget_withObject_(self.loadData, self, None)\n\n def loadData(self):\n\n pool = NSAutoreleasePool.alloc().init()\n self.volumes = macdisk.MountedVolumes()\n\n theURL = Utils.getServerURL()\n if theURL:\n plistData = Utils.downloadFile(theURL)\n converted_plist = FoundationPlist.readPlistFromString(plistData)\n self.passwordHash = converted_plist['password']\n self.workflows = converted_plist['workflows']\n else:\n self.passwordHash = False\n\n self.performSelectorOnMainThread_withObject_waitUntilDone_(\n self.loadDataComplete, None, YES)\n del pool\n\n def loadDataComplete(self):\n # end modal sheet and close the panel\n NSApp.endSheet_(self.progressPanel)\n self.progressPanel.orderOut_(self)\n if not self.passwordHash:\n self.password.setEnabled_(False)\n self.loginButton.setEnabled_(False)\n self.disableAllButtons(self)\n self.startUpDiskText.setStringValue_(\n \"No Server URL has been set. Please contact your administrator.\")\n self.setStartupDisk_(self)\n self.loginView.setHidden_(False)\n self.mainView.setHidden_(True)\n self.mainWindow.makeFirstResponder_(self.password)\n\n @objc.IBAction\n def login_(self, sender):\n if self.passwordHash:\n password_value = self.password.stringValue()\n if Utils.getPasswordHash(password_value) != self.passwordHash or password_value == \"\":\n self.errorField.setEnabled_(sender)\n self.errorField.setStringValue_(\"Incorrect password\")\n else:\n self.loginView.setHidden_(sender)\n self.mainView.setHidden_(False)\n self.chooseImagingTarget_(sender)\n self.enableAllButtons_(self)\n\n @objc.IBAction\n def setStartupDisk_(self, sender):\n # This stops the console being spammed with: unlockFocus called too many times. Called on <NSButton\n NSGraphicsContext.saveGraphicsState()\n self.disableAllButtons(sender)\n # clear out the default junk in the dropdown\n self.startupDiskDropdown.removeAllItems()\n list = []\n for volume in self.volumes:\n list.append(volume.mountpoint)\n\n # Let's add the items to the popup\n self.startupDiskDropdown.addItemsWithTitles_(list)\n NSApp.beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(\n self.startUpDiskPanel, self.mainWindow, self, None, None)\n NSGraphicsContext.restoreGraphicsState()\n\n @objc.IBAction\n def closeStartUpDisk_(self, sender):\n self.enableAllButtons_(sender)\n NSApp.endSheet_(self.startUpDiskPanel)\n self.startUpDiskPanel.orderOut_(self)\n\n @objc.IBAction\n def openProgress_(self, sender):\n NSApp.beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(\n self.progressPanel, self.mainWindow, self, None, None)\n\n @objc.IBAction\n def chooseImagingTarget_(self, sender):\n self.disableAllButtons(sender)\n NSGraphicsContext.saveGraphicsState()\n self.chooseTargetDropDown.removeAllItems()\n list = []\n for volume in self.volumes:\n if volume.mountpoint != '/':\n if volume.mountpoint.startswith(\"/Volumes\"):\n if volume.mountpoint != '/Volumes':\n if volume.writable:\n list.append(volume.mountpoint)\n # No writable volumes, this is bad.\n if len(list) == 0:\n alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(\n NSLocalizedString(u\"No writable volumes found\", None),\n NSLocalizedString(u\"Restart\", None),\n NSLocalizedString(u\"Open Disk Utility\", None),\n objc.nil,\n NSLocalizedString(u\"No writable volumes were found on this Mac.\", None))\n\n alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(\n self.mainWindow, self, self.noVolAlertDidEnd_returnCode_contextInfo_, objc.nil)\n # If there's only one volume, we're going to use that and move on to selecting the workflow\n self.enableAllButtons_(self)\n if len(list) == 1:\n self.targetVolume = list[0]\n self.selectWorkflow_(sender)\n for volume in self.volumes:\n if str(volume.mountpoint) == str(self.targetVolume):\n imaging_target = volume\n self.workVolume = volume\n break\n # We'll move on to the select workflow bit when it exists\n else:\n self.chooseTargetDropDown.addItemsWithTitles_(list)\n NSApp.beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(\n self.chooseTargetPanel, self.mainWindow, self, None, None)\n NSGraphicsContext.restoreGraphicsState()\n\n @PyObjCTools.AppHelper.endSheetMethod\n def noVolAlertDidEnd_returnCode_contextInfo_(self, alert, returncode, contextinfo):\n if returncode == NSAlertDefaultReturn:\n self.setStartupDisk_(sender)\n else:\n cmd = ['/Applications/Utilities/Disk Utility.app/Contents/MacOS/Disk Utility']\n proc = subprocess.call(cmd)\n alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(\n NSLocalizedString(u\"Rescan for volumes\", None),\n NSLocalizedString(u\"Rescan\", None),\n objc.nil,\n objc.nil,\n NSLocalizedString(u\"Rescan for volumes.\", None))\n\n alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(\n self.mainWindow, self, self.rescanAlertDidEnd_returnCode_contextInfo_, objc.nil)\n\n @PyObjCTools.AppHelper.endSheetMethod\n def rescanAlertDidEnd_returnCode_contextInfo_(self, alert, returncode, contextinfo):\n self.progressText.setStringValue_(\"Reloading Volumes...\")\n NSApp.beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(\n self.progressPanel, self.mainWindow, self, None, None)\n NSThread.detachNewThreadSelector_toTarget_withObject_(self.loadData, self, None)\n\n\n @objc.IBAction\n def selectImagingTarget_(self, sender):\n self.targetVolume = self.chooseTargetDropDown.titleOfSelectedItem()\n for volume in self.volumes:\n if str(volume.mountpoint) == str(self.targetVolume):\n self.workVolume = volume\n break\n self.enableAllButtons_(sender)\n NSApp.endSheet_(self.chooseTargetPanel)\n self.chooseTargetPanel.orderOut_(self)\n self.selectWorkflow_(self)\n\n\n @objc.IBAction\n def closeImagingTarget_(self, sender):\n self.enableAllButtons_(sender)\n NSApp.endSheet_(self.chooseTargetPanel)\n self.chooseTargetPanel.orderOut_(self)\n self.setStartupDisk_(sender)\n\n @objc.IBAction\n def selectWorkflow_(self, sender):\n self.chooseWorkflowDropDown.removeAllItems()\n list = []\n for workflow in self.workflows:\n list.append(workflow['name'])\n\n self.chooseWorkflowDropDown.addItemsWithTitles_(list)\n self.chooseWorkflowLabel.setHidden_(False)\n self.chooseWorkflowDropDown.setHidden_(False)\n self.workflowDescriptionView.setHidden_(False)\n self.runWorkflowButton.setHidden_(False)\n self.chooseWorkflowDropDownDidChange_(sender)\n\n @objc.IBAction\n def chooseWorkflowDropDownDidChange_(self, sender):\n selected_workflow = self.chooseWorkflowDropDown.titleOfSelectedItem()\n for workflow in self.workflows:\n if selected_workflow == workflow['name']:\n try:\n self.workflowDescription.setString_(workflow['description'])\n except:\n self.workflowDescription.setString_(\"\")\n break\n\n @objc.IBAction\n def runWorkflow_(self, sender):\n '''Set up the selected workflow to run on secondary thread'''\n self.imagingProgress.setHidden_(False)\n self.imagingLabel.setHidden_(False)\n self.cancelAndRestartButton.setEnabled_(False)\n self.chooseWorkflowLabel.setEnabled_(True)\n self.chooseWorkflowDropDown.setEnabled_(False)\n # self.workflowDescriptionView.setEnabled_(True)\n self.runWorkflowButton.setEnabled_(False)\n self.cancelAndRestartButton.setEnabled_(False)\n self.imagingLabel.setStringValue_(\"Preparing to run workflow...\")\n self.imagingProgressDetail.setStringValue_('')\n NSApp.beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(\n self.imagingProgressPanel, self.mainWindow, self, None, None)\n # initialize the progress bar\n self.imagingProgress.setMinValue_(0.0)\n self.imagingProgress.setMaxValue_(100.0)\n self.imagingProgress.setIndeterminate_(True)\n self.imagingProgress.setUsesThreadedAnimation_(True)\n self.imagingProgress.startAnimation_(self)\n NSThread.detachNewThreadSelector_toTarget_withObject_(\n self.processWorkflowOnThread, self, None)\n\n def updateProgressWithInfo_(self, info):\n '''UI stuff should be done on the main thread. Yet we do all our interesting work\n on a secondary thread. So to update the UI, the secondary thread should call this\n method using performSelectorOnMainThread_withObject_waitUntilDone_'''\n if 'title' in info.keys():\n self.imagingLabel.setStringValue_(info['title'])\n if 'percent' in info.keys():\n if float(info['percent']) < 0:\n if not self.imagingProgress.isIndeterminate():\n self.imagingProgress.setIndeterminate_(True)\n self.imagingProgress.startAnimation_(self)\n else:\n if self.imagingProgress.isIndeterminate():\n self.imagingProgress.stopAnimation_(self)\n self.imagingProgress.setIndeterminate_(False)\n self.imagingProgress.setDoubleValue_(float(info['percent']))\n if 'detail' in info.keys():\n self.imagingProgressDetail.setStringValue_(info['detail'])\n\n def updateProgressTitle_Percent_Detail_(self, title, percent, detail):\n '''Wrapper method that calls the UI updadte method on the main thread'''\n info = {}\n if title is not None:\n info['title'] = title\n if percent is not None:\n info['percent'] = percent\n if detail is not None:\n info['detail'] = detail\n self.performSelectorOnMainThread_withObject_waitUntilDone_(\n self.updateProgressWithInfo_, info, objc.NO)\n\n def processWorkflowOnThread(self, sender):\n '''Process the selected workflow'''\n pool = NSAutoreleasePool.alloc().init()\n selected_workflow = self.chooseWorkflowDropDown.titleOfSelectedItem()\n # let's get the workflow\n self.selectedWorkflow = None\n for workflow in self.workflows:\n if selected_workflow == workflow['name']:\n self.selectedWorkflow = workflow\n break\n if self.selectedWorkflow:\n if 'restart_action' in self.selectedWorkflow:\n self.restartAction = self.selectedWorkflow['restart_action']\n if 'bless_target' in self.selectedWorkflow:\n self.blessTarget = self.selectedWorkflow['bless_target']\n else:\n self.blessTarget = True\n self.restoreImage()\n self.downloadAndInstallPackages()\n self.downloadAndCopyPackages()\n\n self.performSelectorOnMainThread_withObject_waitUntilDone_(\n self.processWorkflowOnThreadComplete, None, YES)\n del pool\n\n def processWorkflowOnThreadComplete(self):\n '''Done running workflow, restart to imaged volume'''\n NSApp.endSheet_(self.imagingProgressPanel)\n self.imagingProgressPanel.orderOut_(self)\n if self.restartAction == 'restart' or self.restartAction == 'shutdown':\n self.restartToImagedVolume()\n else:\n self.openEndWorkflowPanel()\n\n def restoreImage(self):\n dmgs_to_restore = [item.get('url') for item in self.selectedWorkflow['components']\n if item.get('type') == 'image' and item.get('url')]\n if dmgs_to_restore:\n self.Clone(dmgs_to_restore[0], self.targetVolume)\n\n def Clone(self, source, target, erase=True, verify=True, show_activity=True):\n \"\"\"A wrapper around 'asr' to clone one disk object onto another.\n\n We run with --puppetstrings so that we get non-buffered output that we can\n actually read when show_activity=True.\n\n Args:\n source: A Disk or Image object.\n target: A Disk object (including a Disk from a mounted Image)\n erase: Whether to erase the target. Defaults to True.\n verify: Whether to verify the clone operation. Defaults to True.\n show_activity: whether to print the progress to the screen.\n Returns:\n boolean: whether the operation succeeded.\n Raises:\n MacDiskError: source is not a Disk or Image object\n MacDiskError: target is not a Disk object\n \"\"\"\n\n for volume in self.volumes:\n if str(volume.mountpoint) == str(target):\n imaging_target = volume\n self.workVolume = volume\n break\n\n if isinstance(imaging_target, macdisk.Disk):\n target_ref = \"/dev/%s\" % imaging_target.deviceidentifier\n else:\n raise macdisk.MacDiskError(\"target is not a Disk object\")\n\n command = [\"/usr/sbin/asr\", \"restore\", \"--source\", str(source),\n \"--target\", target_ref, \"--noprompt\", \"--puppetstrings\"]\n\n if erase:\n # check we can unmount the target... may as well fail here than later.\n if imaging_target.Mounted():\n imaging_target.Unmount()\n command.append(\"--erase\")\n\n if not verify:\n command.append(\"--noverify\")\n\n self.updateProgressTitle_Percent_Detail_('Restoring %s' % source, -1, '')\n\n NSLog(str(command))\n task = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n message = \"\"\n while task.poll() is None:\n output = task.stdout.readline().strip()\n try:\n percent = int(output.split(\"\\t\")[1])\n except:\n percent = 0.001\n if len(output.split(\"\\t\")) == 4:\n if output.split(\"\\t\")[3] == \"restore\":\n message = \"Restoring: \"+ str(percent) + \"%\"\n elif output.split(\"\\t\")[3] == \"verify\":\n message = \"Verifying: \"+ str(percent) + \"%\"\n else:\n message = \"\"\n else:\n message = \"\"\n if percent == 0:\n percent = 0.001\n info = {'detail': message, 'percent': float(percent)}\n self.performSelectorOnMainThread_withObject_waitUntilDone_(\n self.updateProgressWithInfo_, info, objc.NO)\n\n (unused_stdout, stderr) = task.communicate()\n\n if task.returncode:\n raise macdisk.MacDiskError(\"Cloning Error: %s\" % stderr)\n if task.poll() == 0:\n return True\n\n def downloadAndInstallPackages(self):\n self.updateProgressTitle_Percent_Detail_('Installing packages...', -1, '')\n # mount the target\n if not self.workVolume.Mounted():\n self.workVolume.Mount()\n\n pkgs_to_install = [item for item in self.selectedWorkflow['components']\n if item.get('type') == 'package' and item.get('pre_first_boot')]\n package_count = len(pkgs_to_install)\n counter = 0.0\n for item in pkgs_to_install:\n counter = counter + 1.0\n package_name = os.path.basename(item['url'])\n percent_done = (counter / package_count) * 100\n self.updateProgressTitle_Percent_Detail_(\n 'Installing packages...', percent_done, package_name)\n Utils.downloadAndInstallPackage(\n item['url'], self.workVolume.mountpoint, counter, package_count)\n\n def downloadAndCopyPackages(self):\n self.updateProgressTitle_Percent_Detail_(\n 'Copying packages for install on first boot...', -1, '')\n # mount the target\n if not self.workVolume.Mounted():\n self.workVolume.Mount()\n\n packages_dir = os.path.join(self.workVolume.mountpoint, 'usr/local/first-boot/')\n if not os.path.exists(packages_dir):\n os.makedirs(packages_dir)\n pkgs_to_install = [item for item in self.selectedWorkflow['components']\n if item.get('type') == 'package' and not item.get('pre_first_boot')]\n package_count = len(pkgs_to_install)\n counter = 0.0\n # download packages to /usr/local/first-boot - prepend number\n for item in pkgs_to_install:\n counter = counter + 1.0\n package_name = os.path.basename(item['url'])\n percent_done = (counter / package_count) * 100\n self.updateProgressTitle_Percent_Detail_(\n 'Copying packages for install on first boot...', percent_done, package_name)\n Utils.downloadPackage(item['url'], self.workVolume.mountpoint, counter, package_count)\n if package_count:\n # copy bits for first boot script\n Utils.copyFirstBoot(self.workVolume.mountpoint)\n\n def restartToImagedVolume(self):\n # set the startup disk to the restored volume\n if self.blessTarget == True:\n self.workVolume.SetStartupDisk()\n if self.restartAction == 'restart':\n cmd = ['/sbin/reboot']\n elif self.restartAction == 'shutdown':\n cmd = ['/sbin/reboot', '-h', 'now']\n task = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n task.communicate()\n\n def openEndWorkflowPanel(self):\n label_string = \"%s completed.\" % self.selectedWorkflow['name']\n alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(\n NSLocalizedString(label_string, None),\n NSLocalizedString(u\"Restart\", None),\n NSLocalizedString(u\"Run another workflow\", None),\n NSLocalizedString(u\"Shutdown\", None),\n NSLocalizedString(u\"\", None),)\n\n alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(\n self.mainWindow, self, self.endWorkflowAlertDidEnd_returnCode_contextInfo_, objc.nil)\n\n @PyObjCTools.AppHelper.endSheetMethod\n def endWorkflowAlertDidEnd_returnCode_contextInfo_(self, alert, returncode, contextinfo):\n # -1 = Shutdown\n # 0 = another workflow\n # 1 = Restart\n if returncode == -1:\n self.restartAction = 'shutdown'\n self.restartToImagedVolume()\n elif returncode == 1:\n self.restartAction = 'restart'\n self.restartToImagedVolume()\n elif returncode == 0:\n self.chooseWorkflowDropDown.setEnabled_(True)\n self.chooseImagingTarget_(contextinfo)\n\n\n def enableAllButtons_(self, sender):\n self.cancelAndRestartButton.setEnabled_(True)\n self.runWorkflowButton.setEnabled_(True)\n\n def disableAllButtons(self, sender):\n self.cancelAndRestartButton.setEnabled_(False)\n self.runWorkflowButton.setEnabled_(False)\n","sub_path":"Imagr/MainController.py","file_name":"MainController.py","file_ext":"py","file_size_in_byte":22437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"271827604","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nf=open('../lab0_spectral_data.txt', \"r\")\nlines=f.readlines()[1:]\n\n\nAm241=[] # 59.54 keV\nBa133=[] # 80.997 keV (34%) 302.853 keV (18%) 356.017 keV (62%)\nCs137=[] # 661.66 keV\nCo60=[] # 1173.2 keV and 1332.51 keV\nEu152=[] # 344.28 keV (27%) 1112.1 keV (14%) 1408.1 keV (21%) 121.78 keV (26%)\n\n\nfor x in lines:\n x.split('\\t')\n y=[int(s) for s in x.split() if s.isdigit()]\n Am241.append(y[0])\n Ba133.append(y[1])\n Cs137.append(y[2])\n Co60.append(y[3])\n Eu152.append(y[4])\n# testing that input vectors have the same length\nf.close()\n\n\nlen(Cs137)==len(Am241)==len(Cs137)==len(Co60)==len(Eu152)==8192\n\nchan=list(range(1,len(Cs137)+1)) #number of channels:8192\n#sources=[ for i in chan Am241[i]+Ba133[i]+Cs137[i]+Co60[i]+Eu152[i]]\n\ncentroid_Cs=np.argmax(Cs137) # 661.66 keV\ncentroid_Am=np.argmax(Am241) # 59.54 keV\n\nx1=centroid_Am\nx2=centroid_Cs\n\ny1=59.5409\ny2=661.657\n\nenergies=[]\nm=float(y2-y1)/(x2-x1)\nb=float(-m*x1+y1)\n#print(m,b)\nchan_array=np.array(chan)\n\nfor i in chan:\n energies.append(np.multiply(m, chan_array[i-1])+b)\n\n\n#makes pretty of raw data\nplt.semilogy(chan, Am241, label='Am-241')\nplt.semilogy(chan, Ba133, label='Ba-133')\nplt.semilogy(chan, Cs137, label='Cs-137')\nplt.semilogy(chan,Co60, label='Co-60')\nplt.semilogy(chan, Eu152, label='Eu-152')\nplt.legend()\nplt.xlabel('Channel Number')\nplt.ylabel('Number of Counts')\nplt.title('Imported Raw Data (Uncalibrated)')\nplt.savefig('../images/rawdata.png')\n#plt.show()\nplt.close()\n\n#plot of calibrated data_analysis\nplt.semilogy(energies, Am241, label='Am-241')\nplt.semilogy(energies, Cs137, label='Cs-137')\nplt.legend()\nplt.xlabel('Energy (keV)')\nplt.ylabel('Number of Counts')\nplt.title('Calibrated Plot of Am-241 and Cs-137')\nplt.savefig('../images/cal_AmCs.png')\nplt.close()\n#plt.show()\n\nfrom find_nearest import find_near\n\n#intensity of 80.99 (32.9%) and 79.61 (2.6%)\n\n\nBa_a=[80.9979,276.3989,302.8508, 356.0129,383.8485] #Bactual intensities of Ba133\nBa_e=[] #expected intensities of Ba133\npercent_diff=[]\ntable=[]\ndE=10\n\n#this loop finds the max energy in the range around each Ba_a value (or the centroid)\nfor x in range(0,5):\n minE=Ba_a[int(x)]-dE\n maxE=Ba_a[int(x)]+dE\n\n nearestmin=find_near(energies,minE)\n nearestmax=find_near(energies,maxE)\n\n Erange=energies[nearestmin[0]:nearestmax[0]]\n Countrange=Ba133[nearestmin[0]:nearestmax[0]]\n #print(Erange, Countrange)\n maxcounts=max(Countrange)\n ECentroid=Erange[Countrange.index(maxcounts)]\n Ba_e.append(ECentroid)\n percent_diff.append((Ba_e[x]-Ba_a[x])/Ba_a[x])\n table.append([Ba_a[x], Ba_e[x], percent_diff[x]])\n\n\nnp.savetxt('../images/peakdiffquant.csv', table)\n","sub_path":"code/data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"372208058","text":"# Definition for a binary tree node.\r\n# class TreeNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\nimport Queue\r\n\r\nclass Solution(object):\r\n def __init__(self):\r\n self.s=[]\r\n self.q=Queue.Queue()\r\n self.level=0\r\n \r\n def levelOrder(self, root):\r\n \"\"\"\r\n :type root: TreeNode\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n if root is None:\r\n return []\r\n if(len(self.s)<self.level+1):\r\n self.s.append([root.val])\r\n else:\r\n self.s[self.level].append(root.val)\r\n if root.left is not None:\r\n self.q.put((root.left,self.level+1))\r\n if root.right is not None:\r\n self.q.put((root.right,self.level+1))\r\n if not self.q.empty():\r\n thenext=self.q.get()\r\n thenextnode=thenext[0]\r\n self.level=thenext[1]\r\n self.levelOrder(thenextnode)\r\n return self.s\r\n \r\n def levelOrderBottom(self, root):\r\n \"\"\"\r\n :type root: TreeNode\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n temp=self.levelOrder(root)\r\n print(temp)\r\n temp.reverse()\r\n return temp\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"binary_tree_level_order_traversal_ii.py","file_name":"binary_tree_level_order_traversal_ii.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"644416393","text":"from typing import List, Callable, Union, Optional, TypeVar, Tuple, Dict\nTensor = TypeVar('torch.tensor')\nfrom argparse import ArgumentParser\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom pytorch_lightning.metrics import Accuracy\n\nfrom .base import BaseVAE\nfrom src.models.convnet import conv_blocks, deconv_blocks\nfrom src.models.resnet import ResNet\nfrom src.models.resnet_deconv import ResNetDecoder\nfrom .utils import compute_kld\n\nclass BiVAE(BaseVAE):\n def __init__(self, *,\n in_shape: Union[torch.Size, Tuple[int,int,int]],\n n_styles: int,\n\n latent_dim: int,\n hidden_dims: Optional[List[int]],\n adversary_dims: Optional[List[int]],\n learning_rate: float,\n act_fn: Callable= nn.LeakyReLU(),\n out_fn: Callable = nn.Tanh(),\n\n size_average: bool = False,\n\n is_contrasive: bool = True,\n kld_weight: float=1.0,\n adv_loss_weight: float=1.0,\n\n enc_type: str = 'conv',\n dec_type: str = 'conv',\n **kwargs) -> None:\n \"\"\"\n VAE with extra adversarial loss from a style discriminator to enforce the information from original data to be\n encoded into two independent subspaces of the latent space, \\mathcal{Z_c} and \\mathcal{Z_s}\n aka. Bi-latent VAE\n TODO: how about Bilinear VAE\n\n :param in_shape: model(x)'s input x's shape w/o batch dimension, in order of (c, h, w). Note no batch dimension.\n :param latent_dim:\n :param hidden_dims:\n :param act_fn: Default is LeakyReLU()\n :param learning_rate: initial learning rate. Default: 1e-3.\n :param size_average: bool; whether to average the recon_loss across the pixel dimension. Default: False\n :param is_contrasive bool; True to use both adversarial losses from the content and style codes\n If False, use only the loss from the style code's classification prediction as the adversarial loss\n :param kld_weight (float); Beta in BetaVAE that is a relative weight of the kld vs. recon-loss\n vae_loss = recon_loss + kld_weight * kld\n :param adv_loss_weight (float); Weight btw vae_loss and adv_loss\n loss = vae_loss + adv_loss_weight * adv_loss\n :param enc_type (str); One of ['resnet', 'conv']\n :param dec_type(str); One of ['resnet', 'conv']\n :param kwargs: will be part of self.hparams\n Eg. batch_size, kld_weight\n \"\"\"\n super().__init__()\n # About input x\n self.dims = in_shape\n self.in_channels, self.in_h, self.in_w = in_shape\n # About label y\n self.n_styles = n_styles # num of styles from which the adversary to predict\n # About model configs\n self.latent_dim = latent_dim\n self.content_dim = int(self.latent_dim/2)\n self.style_dim = self.content_dim\n self.act_fn = act_fn\n self.out_fn = out_fn\n self.learning_rate = learning_rate\n self.size_average = size_average\n self.hidden_dims = hidden_dims or [32, 64, 128, 256, 512]\n self.adversary_dims = adversary_dims or [32, 32, 32]\n # Loss\n self.is_contrasive = is_contrasive\n self.kld_weight = kld_weight\n self.adv_loss_weight = adv_loss_weight\n # Encoder, Decoder type\n self.enc_type = enc_type\n self.dec_type = dec_type\n # Save kwargs to tensorboard's hparams\n self.save_hyperparameters()\n\n # Compute last feature map's height, width\n # In case of resnet, the second convlayer doesn't shrink the resolution (h,w)\n self.n_downsampling_layers = self.get_n_downsampling_layers()\n self.last_h = int(self.in_h / 2 ** self.n_downsampling_layers)\n self.last_w = int(self.in_w / 2 ** self.n_downsampling_layers)\n\n # Build Encoder\n if self.enc_type == 'conv':\n self.encoder = conv_blocks(self.in_channels,\n self.hidden_dims,\n has_bn=True,\n act_fn=act_fn)\n elif self.enc_type == 'resnet':\n self.encoder = ResNet(self.in_channels,\n self.hidden_dims,\n act_fn=act_fn)\n else:\n raise NotImplementedError(\"Currently supports convnet, resnet as encoder\")\n\n self.len_flatten = self.hidden_dims[-1] * self.last_h * self.last_w\n self.fc_flatten2qparams = nn.Linear(self.len_flatten, 2*self.content_dim+2*self.style_dim) # mu_qc, std_qc, mu_qs, std_qs (both c, s have the same dim)\n\n # Build Decoder\n # modules = []\n # self.fc_latent2decoder = nn.Linear(self.latent_dim, self.len_flatten)\n # rev_hidden_dims = self.hidden_dims[::-1]\n #\n # for i in range(len(rev_hidden_dims) - 1):\n # modules.append(\n # nn.Sequential(\n # nn.ConvTranspose2d(rev_hidden_dims[i],\n # rev_hidden_dims[i + 1],\n # kernel_size=3,\n # stride = 2,\n # padding=1,\n # output_padding=1),\n # nn.BatchNorm2d(rev_hidden_dims[i + 1]),\n # self.act_fn)\n # )\n #\n # self.decoder = nn.Sequential(*modules)\n #\n # self.final_layer = nn.Sequential(\n # nn.ConvTranspose2d(rev_hidden_dims[-1],\n # self.n_channels,\n # kernel_size=3,\n # stride=2,\n # padding=1,\n # output_padding=1),\n # nn.BatchNorm2d(self.n_channels),\n # self.act_fn,\n #\n # nn.Conv2d(self.n_channels, self.n_channels,\n # kernel_size=3, stride=1, padding= 1),\n # nn.Tanh()) #todo: sigmoid? maybe Tanh is better given we normalize inputs by mean and std\n self.fc_latent2flatten = nn.Linear(self.latent_dim, self.len_flatten)\n\n if self.enc_type == 'resnet':\n decoder_dims = [*hidden_dims[1:][::-1], self.in_channels]\n else:\n decoder_dims = [*hidden_dims[::-1], self.in_channels]\n\n if self.dec_type == 'conv':\n self.decoder = deconv_blocks(decoder_dims[0],\n decoder_dims[1:],\n has_bn=True,\n act_fn=act_fn) # bs, (len_flatten,) -> ... -> bs, (n_channels, h,w)\n elif self.dec_type == 'resnet':\n self.decoder = ResNetDecoder(decoder_dims,\n act_fn=act_fn) # todo\n else:\n raise NotImplementedError(\"Currently supports convnet, resnet as decoder\")\n\n self.out_layer = nn.Sequential(\n nn.Conv2d(self.in_channels, self.in_channels,\n kernel_size=3, stride=1, padding=1),\n self.out_fn) # todo: sigmoid? maybe Tanh is better given we normalize inputs by mean and std\n\n # Build style classifier:\n # Given a content or style code, predict its style label\n # zc or zs --> scores (a vector of len = n_classes)\n _adv_dims = [self.content_dim, *self.adversary_dims, self.n_styles]\n adv_layers = []\n for num_in, num_out in zip(_adv_dims, _adv_dims[1:]):\n adv_layers.append(nn.Sequential(nn.Linear(num_in, num_out), self.act_fn))\n self.adversary = nn.Sequential(*adv_layers)\n\n # Add the accuracy metric for the style-classification based on the style code\n self.train_style_acc = Accuracy()\n self.val_style_acc = Accuracy()\n self.test_style_acc = Accuracy()\n\n @property\n def name(self):\n bn = \"BiVAE-C\" if self.is_contrasive else \"BiVAE\"\n return f'{bn}-{self.enc_type}-{self.dec_type}-{self.kld_weight:.1f}-{self.adv_loss_weight:.1f}'\n\n def input_dim(self):\n return np.prod(self.dims)\n\n def get_n_downsampling_layers(self):\n if self.enc_type == 'conv':\n return len(self.hidden_dims)\n elif self.enc_type == 'resnet':\n return len(self.hidden_dims) - 1\n else:\n raise NotImplementedError(\"Use a valid enc_type: 'conv', 'resnet'\")\n\n def encode(self, input: Tensor) -> Dict[str, Tensor]:\n \"\"\"\n Encodes the input by passing through the encoder network\n and returns the latent codes.\n :param input: (Tensor) Input tensor to encoder [N x C x H x W]\n :return: (Dict[std,Tensor]) Dict of parameters for variational distributions q_content and q_style\n dict_q_params = {\n \"mu_qc\": mu_qc,\n \"logvar_qc\": logvar_qc,\n \"mu_qs\": mu_qs,\n \"logvar_qs\": logvar_qs\n }\n \"\"\"\n out = self.encoder(input)\n out = torch.flatten(out, start_dim=1)\n\n # Split the result into mu and var components\n # of the latent Gaussian distribution\n q_params = self.fc_flatten2qparams(out) #(bs, 2*content_dim + 2*style_dim]\n mu_qc = q_params[:, 0:self.content_dim]\n logvar_qc = q_params[:, self.content_dim:self.content_dim*2]\n mu_qs = q_params[:, self.content_dim*2:self.content_dim*2+self.style_dim]\n logvar_qs = q_params[:, self.content_dim*2+self.style_dim:]\n\n dict_q_params = {\n \"mu_qc\": mu_qc,\n \"logvar_qc\": logvar_qc,\n \"mu_qs\": mu_qs,\n \"logvar_qs\": logvar_qs\n }\n return dict_q_params\n\n def rsample(self, dict_q_params: Dict[str, Tensor]) -> Dict[str,Tensor]:\n \"\"\"\n Sample latent codes from N(mu, var) by using the reparam. trick.\n\n :param dict_q_params: output of the encoder network\n :return: dict_z_samples (Dict[str, Tensor])\n keys: 'c', 's'\n value of dict_zsample['c']: samples of content codes; [BS, self.latent_dim]\n - same for key='s'\n \"\"\"\n mu_qc = dict_q_params[\"mu_qc\"] #(BS, self.content_dim)\n logvar_qc = dict_q_params[\"logvar_qc\"] #(BS, self.content_dim)\n std_qc = logvar_qc.exp()\n mu_qs = dict_q_params[\"mu_qs\"] #(BS, self.style_dim)\n logvar_qs = dict_q_params[\"logvar_qs\"] #(BS, self.style_dim)\n std_qs = logvar_qs.exp()\n\n # Reparam. trick\n eps_c = torch.randn_like(mu_qc)\n c_samples = eps_c * std_qc + mu_qc\n\n eps_s = torch.randn_like(mu_qs)\n s_samples = eps_s * std_qs + mu_qs\n\n dict_z_samples = {\"c\": c_samples,\"s\": s_samples}\n return dict_z_samples\n\n def decode(self, z: Tensor) -> Tensor:\n \"\"\"\n Maps a batch of latent codes onto the image space, when each row contains\n a single latent code corresponding to the rowth input datapoint\n\n :param z: (Tensor) [B, latent_dim]\n :return: (Tensor) [B, C, H, W]\n \"\"\"\n out = self.fc_latent2flatten(z) # latent_dim -> len_flatten; 1dim tensor\n out = out.view(-1, self.hidden_dims[-1], self.last_h, self.last_w) # back to a mini-batch of 3dim tensors\n out = self.decoder(out); #print(out.shape)\n out = self.out_layer(out); #print(out.shape)\n return out\n\n def create_labels(self, z):\n \"\"\"\n Create proper target_labels for c and s\n\n :param z:\n :return:\n \"\"\"\n pass\n\n def discriminate(self, z: Tensor) -> Tensor:\n \"\"\"\n - Divide z into c and s\n -\n :param z:\n :return: y_pred: style label prediction (BS, )\n \"\"\"\n pass\n\n def combine_content_style(self, dict_z: Dict[str, Tensor]) -> Tensor:\n \"\"\"\n Combine a mini-batch of content codes and a mini-batch of style codes\n to get a \"full\" sample of z that can be fed into the decoder\n :param dict_z: Dict with keys \"c\", \"s\". dict_z[\"c\"] returns a mini-batch of content codes.\n :return: a mini-batch of z = [zc, zs] vectors\n \"\"\"\n c = dict_z[\"c\"] # (BS, content_dim)\n s = dict_z[\"s\"] # (BS, style_dim)\n assert len(c) == len(s), \"Number of content and style codes must be the same\"\n return torch.cat([c, s], dim=1)\n\n def forward(self, x: Tensor, **kwargs) -> Dict[str, Tensor]:\n \"\"\"\n Full forwardpass of VAE: x -> enc -> rsample(z's) -> dec\n :param x: mini-batch of inputs (BS, *in_shape)\n :param kwargs:\n :return: Dict[str,Tensor] with keys\n \"mu_qc\", \"logvar_qc\", \"mu_qs\", \"logvar_qs\", \"c\", \"s\", mu_x_pred\"\n \"\"\"\n dict_q_params = self.encode(x)\n dict_z_samples = self.rsample(dict_q_params)\n z = self.combine_content_style(dict_z_samples) # (BS, self.latent)\n mu_x_pred = self.decode(z)\n out_dict = {**dict_q_params, **dict_z_samples, \"mu_x_pred\":mu_x_pred}\n return out_dict\n\n # ------------------------------------------------------------------------\n # Methods for adversary\n # ------------------------------------------------------------------------\n def partition_z(self, z: Tensor) -> Dict[str, Tensor]:\n \"\"\"\n Reverse operation of `combine_content_style`.\n Given a (batch of) latent code z, divide it into content and style codes.\n :param z:\n :return: dict_z\n \"\"\"\n dict_z = {\n \"c\": z[:, :self.content_dim],\n \"s\": z[:, self.content_dim:]\n }\n return dict_z\n\n def predict_y(self, z_partition):\n \"\"\"\n Use the style classifer to predict the style label given either content or style code.\n :param z_partition: (BS, self.content_dim), same as (BS, style_dim)\n :return: y_scores: predicted styles (BS, n_styles)\n \"\"\"\n y_scores = self.adversary(z_partition) #(BS, n_styles)\n return y_scores\n\n def compute_loss_c(self, c:torch.Tensor) -> Tensor:\n \"\"\"\n Using the current adversary, compute the prediction loss of style\n given the content codes.\n - Set the target to be a uniform dist. over classes. ie (BS, n_styles)\n with values = 1/n_styles\n\n :param c:\n :return: loss_c (torch.float32)\n \"\"\"\n bs = len(c)\n # target = torch.ones((bs, self.n_styles), device=c.device)\n # target /= self.n_styles # TODO: possible to not create this as a tensor, as it has all the same value ie. 1/self.n_styles\n scores = self.predict_y(c); #print(\"score_c: \", scores.shape) #(bs, n_styles)\n log_probs = nn.LogSoftmax(dim=1)(scores) #(bs, n_styles)\n loss_c = - log_probs.mean(dim=1) # same as: log_probs.sum(dim=1) / self.n_styles\n loss_c = loss_c.mean(dim=0) # adversarial loss per content code\n\n return loss_c\n\n def compute_loss_s(self, s:torch.Tensor, target_y) -> Tensor:\n \"\"\"\n\n :param s: style code; (bs, style_dim)\n :param target_y: target style index (ie. one-hot style target); (bs,)\n :return: loss_s (torch.float32)\n \"\"\"\n scores = self.predict_y(s)\n loss_s = nn.CrossEntropyLoss(reduction='mean')(scores, target_y) # estimated loss computed as averaged loss (over batch)\n return loss_s\n\n def loss_function(self,\n out_dict,\n batch,\n mode:str,\n **kwargs) -> dict:\n \"\"\"\n Computes the VAE loss function from a mini-batch of pred and target\n KL(N(\\mu, \\sigma), N(0, 1)) = \\log \\frac{1}{\\sigma} + \\frac{\\sigma^2 + \\mu^2}{2} - \\frac{1}{2}\n\n :param out_dict: output of the full forward pass\n :param target: Tuple[ Tensor, Tensor]. mini-batch of inputs and labels\n :param mode: (str) one of \"train\", \"val\", \"test\"\n :param kwargs:\n eg. has a key \"kld_weight\" to multiply the (negative) kl-divergence\n :return: loss_dict\n \"\"\"\n\n # Uppack the batch into a batch of img, content_labels, style_labels\n # target_x = batch['img'].detach().clone()\n # # label_c = batch['digit'] # digit/content label (int) -- currently not used\n # label_s = batch['color'].detach().clone() # color/style label (int) -- used for adversarial loss_s\n target_x, label_c, label_s = self.trainer.datamodule.unpack(batch)\n target_x = target_x.detach().clone()\n label_s = label_s.detach().clone()\n\n\n # qparams\n mu_qc, logvar_qc = out_dict[\"mu_qc\"], out_dict[\"logvar_qc\"]\n mu_qs, logvar_qs = out_dict[\"mu_qs\"], out_dict[\"logvar_qs\"]\n # samples\n c, s, = out_dict[\"c\"], out_dict[\"s\"]\n # output of decoder\n mu_x_pred = out_dict[\"mu_x_pred\"]\n\n # Monitor kld of each latent subspace to see how the content/style latent's KLD's changes individually\n with torch.no_grad():\n kld_c = compute_kld(mu_qc, logvar_qc)\n kld_s = compute_kld(mu_qs, logvar_qs)\n\n # Combine mu_qc and mu_qs. Same for logvars\n mu_z = self.combine_content_style({\"c\": mu_qc, \"s\": mu_qs})\n logvar_z = self.combine_content_style({\"c\": logvar_qc, \"s\": logvar_qs})\n\n # Compute losses\n recon_loss = F.mse_loss(mu_x_pred, target_x, reduction='mean', size_average=self.size_average) # see https://github.com/pytorch/examples/commit/963f7d1777cd20af3be30df40633356ba82a6b0c\n # kld = torch.mean(-0.5 * torch.sum(1 + logvar_z - mu_z ** 2 - logvar_z.exp(), dim = 1), dim = 0)\n kld = compute_kld(mu_z, logvar_z)\n vae_loss = recon_loss + self.kld_weight * kld\n\n # Compute adversarial loss\n #adv_loss_s = self.compute_loss_s(s, target_y) #loss from \"positives\"\n score_s = self.predict_y(s); #print(\"score_s: \", score_s.shape) #(bs, n_styles)\n adv_loss_s = nn.CrossEntropyLoss(reduction='mean')(score_s, label_s) # estimated loss computed as averaged loss (over batch)\n if self.is_contrasive:\n adv_loss_c = self.compute_loss_c(c) # loss from \"negatives\"\n adv_loss = adv_loss_c + adv_loss_s\n else:\n adv_loss = adv_loss_s\n\n # Finally, full loss\n # Estimates for per-datapoint (ie. image), computed as an average over mini-batch\n # TODO: Noisy gradient estimate of the (full-batch) gradient thus need to be multipled by num_datapoints N\n loss = vae_loss + self.adv_loss_weight * adv_loss\n\n loss_dict = {\n \"kld_c\": kld_c,\n \"kld_s\": kld_s,\n 'recon_loss': recon_loss,\n 'kld': kld,\n 'vae_loss': vae_loss,\n \"score_s\": score_s,# (BS, n_styles); needed to compute the style accuracy at `training_step`\n 'adv_loss_s': adv_loss_s,\n \"adv_loss\": adv_loss,\n \"loss\": loss\n }\n if self.is_contrasive:\n loss_dict[\"adv_loss_c\"] = adv_loss_c\n\n if self.current_epoch % 10 == 0 and self.trainer.batch_idx % 300 == 0:\n print(f\"Ep: {self.current_epoch}, batch: {self.trainer.batch_idx}\")\n # pprint(loss_dict)\n\n return loss_dict\n\n def decode_sample(self,\n num_samples:int,\n current_device: int,\n **kwargs) -> Tensor:\n \"\"\"\n Samples from the latent space and return the corresponding\n image space map.\n :param num_samples: (Int) Number of samples\n :param current_device: (Int) Device to run the model\n :return: (Tensor)\n \"\"\"\n z = torch.randn(num_samples, self.latent_dim)\n z = z.to(current_device)\n samples = self.decode(z)\n return samples\n\n def generate(self, x: Tensor, **kwargs) -> Tensor:\n \"\"\"\n Given an input image x, returns the reconstructed image\n :param x: (Tensor) [B x C x H x W]\n :return: (Tensor) [B x C x H x W]\n TODO:\n CHECK IF taking mean makes sense\n \"\"\"\n mu_x_pred = self.forward(x)[\"mu_x_pred\"] # (BS, C, H, W)\n return mu_x_pred\n\n def get_embedding(self, x: Tensor, **kwargs) -> List[Tensor]:\n self.eval()\n with torch.no_grad():\n dict_q_params = self.encode(x)\n dict_z = self.rsample(dict_q_params)\n c = dict_z[\"c\"] # (BS, content_dim)\n s = dict_z[\"s\"] # (BS, style_dim)\n return {\"c\": c, \"s\": s}\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n Implements one mini-batch iteration: x -> model(x) -> loss or loss_dict\n `loss` is the last node of the model's computational graph, ie. starting node of\n backprop.\n \"\"\"\n # x = batch['img']\n # # label_c = batch['digit'] # digit/content label (int) -- currently not used\n # label_s = batch['color'] # color/style label (int) -- used for adversarial loss_s\n\n x, label_c, label_s = self.trainer.datamodule.unpack(batch) # \"unpack\" must be implemented in the DataModule class -- Jan 22, 2021\n out_dict = self(x)\n loss_dict = self.loss_function(out_dict, batch, mode=\"train\")\n # breakpoint()\n\n # Log using tensorboard logger\n #-- for scalar metrics, self.log will do\n self.log('train/loss', loss_dict[\"loss\"]) # Default: on_step=True, on_epoch=True, prog_bar=True, logger=True)\n #-- log each component of the loss\n self.log('train/vae_loss', loss_dict[\"vae_loss\"])\n self.log('train/recon_loss', loss_dict[\"recon_loss\"])\n self.log('train/kld', loss_dict[\"kld\"])\n self.log('train/kld_c', loss_dict[\"kld_c\"])\n self.log('train/kld_s', loss_dict[\"kld_s\"])\n\n self.log('train/adv_loss', loss_dict[\"adv_loss\"])\n self.log('train/adv_loss_s', loss_dict[\"adv_loss_s\"])\n if self.is_contrasive:\n self.log('train/adv_loss_c', loss_dict[\"adv_loss_c\"])\n\n #-- update and log the style_acc metric\n score_s = loss_dict.pop(\"score_s\").detach().clone() # we don't want to compute metric on the loss computational graph\n self.train_style_acc(score_s, label_s)\n self.log('train/style_acc', self.train_style_acc)# Note: we pass in the Metric object, rather than the value tensor\n\n return {'loss': loss_dict[\"loss\"]}\n\n\n def validation_step(self, batch, batch_ids):\n # x = batch['img']\n # # label_c = batch['digit'] # digit/content label (int) -- currently not used\n # label_s = batch['color'] # color/style label (int) -- used for adversarial loss_s\n x, label_c, label_s = self.trainer.datamodule.unpack(batch) # \"unpack\" must be implemented in the DataModule class -- Jan 22, 2021\n out_dict = self(x)\n loss_dict = self.loss_function(out_dict, batch, mode=\"val\")\n\n # Log the validation loss\n self.log('val/loss', loss_dict[\"loss\"]) # Default: on_step=True, on_epoch=True, prog_bar=True, logger=True)\n # -- log each component of the loss\n self.log('val/vae_loss', loss_dict[\"vae_loss\"])\n self.log('val/recon_loss', loss_dict[\"recon_loss\"])\n self.log('val/kld', loss_dict[\"kld\"])\n self.log('val/kld_c', loss_dict[\"kld_c\"])\n self.log('val/kld_s', loss_dict[\"kld_s\"])\n\n self.log('val/adv_loss', loss_dict[\"adv_loss\"])\n self.log('val/adv_loss_s', loss_dict[\"adv_loss_s\"])\n if self.is_contrasive:\n self.log('val/adv_loss_c', loss_dict[\"adv_loss_c\"])\n\n # Update and log val_style_acc metric\n score_s = loss_dict.pop('score_s').detach().clone()\n self.val_style_acc(score_s, label_s)\n self.log('val/style_acc', self.val_style_acc)\n # print(self.trainer.callback_metrics.keys())\n # ['val_loss', 'train/loss', 'train/vae_loss', 'train/recon_loss', 'train/kld', 'train/kld_c', 'train/kld_s', 'train/adv_loss', 'train/adv_loss_s', 'train/adv_loss_c', 'train/style_acc']\n return {\"val_loss\": loss_dict[\"loss\"]}\n\n\n def test_step(self, batch, batch_idx):\n # x = batch['img']\n # # label_c = batch['digit'] # digit/content label (int) -- currently not used\n # label_s = batch['color'] # color/style label (int) -- used for adversarial loss_s\n x, label_c, label_s = self.trainer.datamodule.unpack(batch) # \"unpack\" must be implemented in the DataModule class -- Jan 22, 2021\n out_dict = self(x)\n loss_dict = self.loss_function(out_dict, x.detach().clone(), mode=\"test\")\n\n self.log('test/loss', loss_dict[\"loss\"])\n\n # Update and log test_style_acc metric\n score_s = loss_dict.pop('score_s').detach().clone()\n self.test_style_acc(score_s, label_s)\n self.log('test/style_acc', self.test_style_acc, on_step=False, on_epoch=True)\n\n return {\"test_loss\": loss_dict[\"loss\"]}\n\n def configure_optimizers(self):\n #TODO: ADD optimizer for discriminator\n optimizer = optim.Adam(self.parameters(), lr=self.learning_rate)\n lr_scheduler = {\n 'scheduler': optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n mode='min',\n patience=10,\n verbose=True),\n 'monitor': 'val_loss',\n 'name': \"train/lr/Adam\",\n }\n\n return [optimizer], [lr_scheduler]\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n # parser.add_argument('--in_shape', nargs=\"3\", type=int, default=[3,64,64])\n # Required\n parser.add_argument('--latent_dim', type=int, required=True)\n parser.add_argument('--n_styles', type=int, required=True)\n # Recommended\n # -- Model architecture\n parser.add_argument('--hidden_dims', nargs=\"+\", type=int) #None as default\n parser.add_argument('--adv_dims', dest=\"adversary_dims\", nargs=\"+\", type=int) #None as default\n parser.add_argument('--act_fn', type=str, default=\"leaky_relu\")\n parser.add_argument('--enc_type', type=str, default=\"conv\")\n parser.add_argument('--dec_type', type=str, default=\"conv\")\n\n # -- Loss function\n parser.add_argument('--kld_weight', type=float, default=1.0)\n parser.add_argument('--adv_weight', dest=\"adv_loss_weight\", type=float, default=1.0)\n # Add boolean argument switches for contrasive loss\n # src: https://stackoverflow.com/a/31347222\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument('--is_contrasive', dest='is_contrasive', action='store_true')\n group.add_argument('--not_contrasive', dest='is_contrasive', action='store_false')\n parser.set_defaults(is_contrasive=True)\n\n # -- Optimizer(s)\n parser.add_argument('-lr', '--learning_rate', type=float, default=1e-3)\n return parser\n\n\nclass Encoder(nn.Module):\n \"\"\"\n Input of (BS, C, H, W); Outputs (BS, *out_shape)\n - Each layer is implemented as a conv-block, conv2D -> (BN2D) -> out_fn (eg. ReLU)\n\n Args:\n -\n \"\"\"\n def __init__(self):\n super().__init__()\n # self.convs =\n\n\nclass Decoder(nn.Module):\n pass\n\n\nclass Adversary(nn.Module):\n pass\n\n\nclass FCAdversary(Adversary):\n pass\n\n\nclass CNNAdversary(Adversary):\n pass","sub_path":"src/models/plmodules/bilatent_vae.py","file_name":"bilatent_vae.py","file_ext":"py","file_size_in_byte":27698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"394478503","text":"# Returns the forecast, news, and calendar events by parsing through html and\n# using google API\n\nfrom __future__ import print_function\nimport requests\nfrom bs4 import BeautifulSoup\nfrom newspaper import fulltext\nimport newspaper\nfrom apiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nimport datetime\nimport logging\n\n\n# Returns a dictionary mapping periods of time to their weather\ndef returnForecast():\n \n forecast = {}\n \n # Sometimes the website is down and causes errors\n try:\n \n page = requests.get(\"https://forecast.weather.gov/MapClick.php?x=212&y=\" + \n \"132&site=pbz&zmx=&zmy=&map_x=212&map_y=132#.WtfexYjwbD4\")\n soup = BeautifulSoup(page.content, \"html.parser\")\n sevenDay = soup.find(id=\"seven-day-forecast\")\n \n # Finds all the periods/days listed with information on the website\n periodTags = sevenDay.select(\".tombstone-container .period-name\")\n periods = [pt.get_text() for pt in periodTags]\n \n # Finds descriptions of the weather and the temp for each period on the webiste\n shortDescs = [sd.get_text() for sd in sevenDay.select(\".tombstone-container .short-desc\")]\n temps = [t.get_text() for t in sevenDay.select(\".tombstone-container .temp\")]\n descs = [d[\"title\"] for d in sevenDay.select(\".tombstone-container img\")]\n \n # Puts each day in the dictionary mapping to its weather and temp\n for item in periods:\n for i in range(len(periods)):\n if item == periods[i]:\n forecast[item] = [shortDescs[i], temps[i], descs[i]]\n \n # Adds current temperature to the dictionary as well\n current = soup.find(id=\"current_conditions-summary\")\n currTemp = current.find(class_=\"myforecast-current-lrg\").get_text()\n forecast[\"Temp\"] = currTemp\n \n except:\n \n return {}\n \n return forecast\n\n\n\"\"\"followed this documentation to use newspaper module to get headlines\n http://newspaper.readthedocs.io/en/latest/\"\"\"\n \n# Returns a list of news headlines\ndef returnNews():\n \n news = {}\n \n # Parses through the google news headlines\n page = newspaper.build(\"https://news.google.com/news/?ned=us&gl=US&hl=en\")\n \n for article in page.articles:\n try:\n url = article.url\n html = requests.get(url).text\n text = fulltext(html)\n # Creates a dictionary mappying titles to full text\n if article.title != None:\n news[article.title] = text\n except:\n pass\n return news\n \n\"\"\"followed this tutorial to use the google api to extract calendar events\n https://developers.google.com/calendar/quickstart/python\"\"\"\n \n# Returns a list of calendar events\ndef returnCalendar():\n \n calendar = []\n \n # Setup the Calendar API\n SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'\n store = file.Storage('credentials.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('calendar', 'v3', http=creds.authorize(Http()), cache_discovery=False)\n \n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n\n events_result = service.events().list(calendarId='primary', timeMin=now,\n maxResults=5, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n \n if not events:\n events = ['No upcoming events found.']\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n #print(start, event['summary'])\n calendar.append((start, event['summary']))\n return calendar\n ","sub_path":"webscraping.py","file_name":"webscraping.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"642583551","text":"from django import forms\nfrom .models import Deal, Cactus\nclass DealForm(forms.ModelForm):\n class Meta:\n model = Deal\n exclude = (\"slug\",)\n\n def clean_price(self):\n price = self.cleaned_data.get(\"price\")\n if price<0:\n raise forms.ValidationError(\"Price must be positive.\")\n return price\n\n def clean_weight(self):\n weight = self.cleaned_data.get(\"weight\")\n if weight<0:\n raise forms.ValidationError(\"Weight must be positive.\")\n return weight\n\nclass CactusForm(forms.ModelForm):\n class Meta:\n model = Cactus\n exclude = ()","sub_path":"cactus/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"36430491","text":"\"\"\"\nWrite a program that asks a teacher for the number of students in his or her class. Next, ask the teacher\nhow many assignments are given in this class. With this information prompt the user to enter in scores for each\nstudent and compute their average grade in the class. Here's a sample running of your program:\n\nHow many students in the class? 2\nHow many assignments in the class? 2\n\nStudent #1\nAssignment #1: 100\nAssignment #2: 90\nStudent #1 earned a 95\n\nStudent #2\nAssignment #1: 90\nAssignment #2: 80\nStudent #2 earned a 85\n\n\"\"\"\n\n\ndef get_num_students_and_assignments():\n num_students = int(input(\"how many students are in the class? \"))\n num_assignments = int(input(\"how many assignments are in the class? \"))\n return num_students, num_assignments\n\n\ndef calc_grades(num_students, num_assignments):\n for stud in range(num_students):\n print(f\"\\nstudent #{stud + 1}\")\n\n sum_grade = 0\n for a in range(num_assignments):\n grade = int(input(f\"\\tassignment #{a + 1}: \"))\n sum_grade += grade\n\n avg = sum_grade / num_assignments\n print(f\"student #{stud + 1} earned a {avg:.2f}\")\n\n return\n\n\ndef main():\n num_students, num_assignments = get_num_students_and_assignments()\n calc_grades(num_students, num_assignments)\n\n\nmain()\n","sub_path":"homework/looping/challenge_6.py","file_name":"challenge_6.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"375571952","text":"# Fonctions permettant de parser des données précises\nimport sys\nimport re\n\n# format de tel géré ['+33 (0)8 06 39 06 32', '05 94 40 87 21', '0256932111', '+33 5 49 50 46 69']\n# Longueurs: 1er = 20, 2ème = 14, 3ème = 10, 4ème = 17\n\n\ndef telephone(str_tel: str):\n \"\"\"\n :param str_tel: Un numéro de téléphone dans un format quelconque\n :return: Un numéro de téléphone format +33 X XX XX XX XX\n \"\"\"\n if str_tel in [\"\", None]: # Si le champ était vide\n return \"\"\n\n len_tel = len(str_tel) # On trouve le format du tel suivant la longueur de la chaine de caractère\n\n if len_tel == 17:\n return str_tel # Déjà dans le format souhaité\n elif len_tel == 20:\n return str_tel.replace(\"(0)\", \"\") # Retire le \"(0)\"\n elif len_tel == 14:\n return \"+33 \" + str_tel[1:] # Ajoute le \"+33\"\n elif len_tel == 10:\n new_tel = \" \".join([str_tel[:2], str_tel[2:4], str_tel[4:6], str_tel[6:8], str_tel[8:10]]) # ajoute des \" \"\n return \"+33 \" + new_tel[1:] # Ajoute le +33\n else: # format non géré (n'est pas sensé arriver)\n print(\"Le numéro de téléphone n'est pas dans un format pris en compte\", file=sys.stderr)\n return str_tel\n\n\ndef findVoie(filename: str):\n if \"_MP\" in filename:\n return \"MP\"\n if \"_PC\" in filename:\n return \"PC\"\n if \"_PSI\" in filename:\n return \"PSI\"\n if \"_PT\" in filename:\n return \"PT\"\n if \"_TSI\" in filename:\n return \"TSI\"\n if \"_ATS\" in filename:\n return \"ATS\"\n return None\n\ndef ParsMatName(matname: str):\n matname = matname.replace(\")\",\"\")\n ret = matname.split(\"(\")\n ret[0] = ret[0].strip()\n ret[1] = ret[1].strip().lower()\n return ret\n\n\n\"\"\"\nTest:\ntel_possible = ['+33 (0)8 06 39 06 32', '05 94 40 87 21', '0256932111', '+33 5 49 50 46 69']\nfor tel in tel_possible:\n print(\"Avant: \" + tel + \"\\nAprès: \" + telephone(tel))\n\"\"\"\n\n\ndef checkTelephoneFr(tel: str):\n \"\"\"Check if tel is a phone number in the format +33 X XX XX XX XX\n\n :return 0 if its a phone number ine the correct format\n 1 if its detected as a phone number but not in the good format\n -1 otherwise\"\"\"\n\n if tel[0] == \"+\":\n no_pref = tel[1:]\n if len(no_pref) == 16:\n num = no_pref.replace(\" \", \"\")\n if num.isdecimal() and len(num) == 1:\n return 0\n # If we reach this code, tel wasn't in the correct format\n num = tel.replace(\" \", \"\").replace(\"+\", \"\").replace(\"(0)\", \"\")\n if (num.startswith(\"33\") and len(num) == 11) or (num.startswith(\"0\") and len(num) == 10):\n return 1\n else:\n return -1\n\n\ndef checkMail(email: str):\n \"\"\"Check if 'mail' is a valid mail : string1@string2.suffixe\n\n :param email: str to test\n :return: 0 if its a valid mail\n -1 otherwise\n \"\"\"\n\n # Note: La vérification poussée de validité d'un mail est un vrai problème en raison de ce qui accepté par les RFC\n pattern = r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\"\n res = re.findall(pattern, email)\n if len(res) == 1 and res[0] == email:\n return 0\n else:\n return -1\n\n\ndef checkDate(date):\n \"\"\"Check if 'date' is a valid date : YYYYMM\n\n :param date: str to test\n :return: 0 if its a valid date\n -1 otherwise\n \"\"\"\n\n if type(date) is int:\n if len(date) == 6:\n return 0\n if type(date) is str:\n if date.isdecimal() and len(date) == 6:\n return 0\n return -1\n","sub_path":"src/ParserDonnee.py","file_name":"ParserDonnee.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"422876457","text":"from django.test import TransactionTestCase\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db.models import signals\n\nfrom url_tracker.trackers import lookup_previous_url, track_changed_url, track_url_changes_for_model, add_old_url\nfrom url_tracker.models import URLChangeMethod\n\nfrom .models import TestModel, reverse_model, RemoveSignals\n\n\nclass TestTrackUrlForModel(RemoveSignals, TransactionTestCase):\n\n def setUp(self):\n self.function_from_signals = lambda signal: list(map(lambda _: _[1], signal.receivers))\n\n def test_model_without_url_method(self):\n TestModel.url_tracking_methods = []\n self.assertRaises(\n ImproperlyConfigured,\n track_url_changes_for_model,\n TestModel,\n )\n\n def test_adds_pre_save_signal(self):\n self.assertFalse(self.function_from_signals(signals.pre_save))\n track_url_changes_for_model(TestModel)\n self.assertIn(lookup_previous_url, self.function_from_signals(signals.pre_save))\n\n def test_adds_post_save_signal(self):\n self.assertFalse(self.function_from_signals(signals.pre_save))\n track_url_changes_for_model(TestModel)\n self.assertIn(track_changed_url, self.function_from_signals(signals.post_save))\n\n\nclass TestLookupUrl(RemoveSignals, TransactionTestCase):\n\n def test_new_instance_dont_create(self):\n unsaved_instance = TestModel(slug='initial')\n lookup_previous_url(unsaved_instance)\n self.assertFalse(URLChangeMethod.objects.count())\n\n def test_url_blank_dont_create(self):\n instance = TestModel.objects.create(text='')\n lookup_previous_url(instance)\n\n self.assertFalse(URLChangeMethod.objects.filter(\n method_name__exact='get_text').count())\n\n def test_new_instance_create(self):\n instance = TestModel.objects.create(slug='initial')\n instance.get_absolute_url()\n lookup_previous_url(instance)\n\n self.assertEqual(URLChangeMethod.objects.count(), 1)\n url_method = URLChangeMethod.objects.all()[0]\n self.assertEqual(url_method.method_name, 'get_absolute_url')\n\n self.assertEqual(url_method.old_urls.count(), 1)\n old_url = url_method.old_urls.all()[0]\n self.assertEqual(old_url.url, reverse_model('initial'))\n\n\nclass TestChangedUrl(RemoveSignals, TransactionTestCase):\n def test_no_url_change_method(self):\n instance = TestModel.objects.create()\n track_changed_url(instance)\n\n self.assertFalse(URLChangeMethod.objects.count())\n\n def test_url_no_reverse_dont_create(self):\n instance = TestModel.objects.create(slug='//')\n track_changed_url(instance)\n\n self.assertFalse(URLChangeMethod.objects.count())\n\n def test_same_urls_delete_old_url(self):\n instance = TestModel.objects.create(slug='initial')\n url_method = URLChangeMethod.objects.create(\n content_object=instance,\n method_name='get_absolute_url',\n )\n url_method.old_urls.create(url=reverse_model('initial'))\n url_method.old_urls.create(url=reverse_model('another'))\n\n track_changed_url(instance)\n self.assertEqual(url_method.old_urls.count(), 1)\n\n def test_save_current_url(self):\n instance = TestModel.objects.create(slug='current')\n url_method = URLChangeMethod.objects.create(\n content_object=instance,\n method_name='get_absolute_url',\n )\n url_method.old_urls.create(url=reverse_model('another'))\n track_changed_url(instance)\n self.assertEqual(URLChangeMethod.objects.count(), 1)\n url_method = URLChangeMethod.objects.all()[0]\n self.assertEqual(url_method.current_url, reverse_model('current'))\n\n def test_same_url_dont_create(self):\n instance = TestModel.objects.create(slug='initial')\n url_method = URLChangeMethod.objects.create(\n content_object=instance,\n method_name='get_absolute_url',\n )\n url_method.old_urls.create(url=reverse_model('initial'))\n track_changed_url(instance)\n self.assertEqual(URLChangeMethod.objects.count(), 0)\n\n\nclass TestAddOldUrl(RemoveSignals, TransactionTestCase):\n def test_without_previous_tracking(self):\n instance = TestModel.objects.create(slug='initial')\n add_old_url(instance, 'get_absolute_url', 'old_url')\n self.assertEqual(URLChangeMethod.objects.count(), 1)\n url_method = URLChangeMethod.objects.all()[0]\n self.assertEqual(url_method.method_name, 'get_absolute_url')\n self.assertEqual(url_method.old_urls.count(), 1)\n old_url = url_method.old_urls.all()[0]\n self.assertEqual(old_url.url, 'old_url')\n","sub_path":"tests/test_trackers.py","file_name":"test_trackers.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"471419186","text":"# Ejercicio 34\r\n#Escriba un programa que lea un numero\r\n#entero intoducido por el usuario. Su \r\n#programa debe desplegar un mensaje\r\n#indicador si su numero entero es par o impar\r\n\r\nentero= float(int(input('insertar un numero entero:')))\r\n\r\na = (entero / 2)\r\nb = (entero % 2)\r\n\r\nif b <= 0.0:\r\n\tprint('es un numero par')\r\nelif b >= 1:\r\n\tprint('es un numero impar')","sub_path":"ejercicio34.py","file_name":"ejercicio34.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"204322019","text":"'''THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND\nNON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE\nDISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,\nWHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.'''\n\n# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk\n# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB\n# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu\n# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd\n\n# contact :- github@jamessawyer.co.uk\n\n\n\n#############################\n# Author: Aravind Kashyap\n# File: lis.py\n# comments: This programme outputs the Longest Strictly Increasing Subsequence in O(NLogN)\n# Where N is the Number of elements in the list\n#############################\n\n\ndef CeilIndex(v, l, r, key):\n while r - l > 1:\n m = (l + r) / 2\n if v[m] >= key:\n r = m\n else:\n l = m\n\n return r\n\n\ndef LongestIncreasingSubsequenceLength(v):\n if len(v) == 0:\n return 0\n\n tail = [0] * len(v)\n length = 1\n\n tail[0] = v[0]\n\n for i in range(1, len(v)):\n if v[i] < tail[0]:\n tail[0] = v[i]\n elif v[i] > tail[length - 1]:\n tail[length] = v[i]\n length += 1\n else:\n tail[CeilIndex(tail, -1, length - 1, v[i])] = v[i]\n\n return length\n\n\nif __name__ == \"__main__\":\n v = [2, 5, 3, 7, 11, 8, 10, 13, 6]\n print(LongestIncreasingSubsequenceLength(v))\n","sub_path":"dynamic_programming/longest_increasing_subsequence_o(nlogn).py","file_name":"longest_increasing_subsequence_o(nlogn).py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"608852823","text":"import numpy as np\nimport math\nfrom numpy import *\nimport matplotlib.pyplot as mpl\nimport h5py\nimport sys\nimport os\nfrom matplotlib.pyplot import *\nimport matplotlib as mpll\nimport symmetrize_Nc4x4\n\n\nclass BSE:\n\n\n def __init__(self,fileG4,fileG=\"dca_tp.hdf5\",draw=True,useG0=False,symmetrize_G4=False,phSymmetry=False,calcRedVertex=False,calcCluster=True,nkfine=100):\n self.fileG4 = fileG4\n self.fileG = fileG\n self.draw = draw\n self.useG0 = useG0\n self.symmetrize_G4 = symmetrize_G4\n print(\"self.symmetrize_G4=\", self.symmetrize_G4)\n self.calcCluster = calcCluster\n self.calcRedVertex = calcRedVertex\n self.phSymmetry = phSymmetry\n self.readData()\n #self.calcPS()\n self.reorderG4()\n self.setupMomentumTables()\n self.determine_iKPiPi()\n print (\"Index of (pi,pi): \",self.iKPiPi)\n self.determine_specialK()\n print (\"Index of (pi,pi): \",self.iKPiPi)\n print (\"Index of (pi,0): \",self.iKPi0)\n self.calcDwaveSCClusterSus()\n self.calcChi0Cluster()\n self.calcGammaIrr()\n if calcCluster == False: self.buildChi0Lattice(nkfine)\n self.buildKernelMatrix()\n self.calcKernelEigenValues() \n if self.draw: self.plotLeadingSolutions(self.Kvecs,self.lambdas,self.evecs[:,:,0,0,:],\"Cu-Cu\")\n #if calcRedVertex: self.calcReducibleLatticeVertex()\n #if self.vertex_channel in (\"PARTICLE_PARTICLE_SUPERCONDUCTING\",\"PARTICLE_PARTICLE_UP_DOWN\"):\n # if calcCluster == False: self.calcSCSus()\n # self.calcDwaveSCClusterSus()\n\n # read basic parameters from the data and the cluster one and two particle Green's function\n def readData(self):\n f = h5py.File(self.fileG4,'r')\n # self.iwm = array(f['parameters']['vertex-channel']['w-channel'])[0] # transferred frequency in units of 2*pi*temp\n self.iwm = array(f['parameters']['four-point']['frequency-transfer'])[0] # transferred frequency in units of 2*pi*temp\n print(\"Transferred frequency iwm = \",self.iwm)\n # self.qchannel = array(f['parameters']['vertex-channel']['q-channel'])\n self.qchannel = array(f['parameters']['four-point']['momentum-transfer'])\n print(\"Transferred momentum q = \",self.qchannel)\n # a = array(f['parameters']['vertex-channel']['vertex-measurement-type'])[:]\n #a = array(f['parameters']['four-point']['channels']['data'])[0]\n #self.vertex_channel = ''.join(chr(i) for i in a)\n self.vertex_channel = 'PARTICLE_PARTICLE_UP_DOWN'\n print(\"Vertex channel = \",self.vertex_channel)\n self.invT = array(f['parameters']['physics']['beta'])[0]\n print(\"Inverse temperature = \",self.invT)\n self.temp = 1.0/self.invT\n self.Upp = array(f['parameters']['threebands-Hubbard-model']['U_pp'])[0]\n print(\"U_pp = \",self.Upp)\n self.Udd = array(f['parameters']['threebands-Hubbard-model']['U_dd'])[0]\n print(\"U_dd = \",self.Udd)\n self.tpp = array(f['parameters']['threebands-Hubbard-model']['t_pp'])[0]\n print(\"t_pp = \",self.tpp)\n self.tpd = array(f['parameters']['threebands-Hubbard-model']['t_pd'])[0]\n print(\"t_pd = \",self.tpd)\n self.epp = array(f['parameters']['threebands-Hubbard-model']['ep_p'])[0]\n print(\"ep_p = \",self.epp)\n self.epd = array(f['parameters']['threebands-Hubbard-model']['ep_d'])[0]\n print(\"ep_d = \",self.epd)\n self.fill = array(f['parameters']['physics']['density'])[0]\n print(\"filling = \",self.fill)\n self.dens = array(f['DCA-loop-functions']['density']['data'])\n print(\"actual filling:\",self.dens)\n self.nk = array(f['DCA-loop-functions']['n_k']['data'])\n self.sign = array(f['DCA-loop-functions']['sign']['data'])\n print(\"sign:\",self.sign)\n self.orbital=array(f['DCA-loop-functions']['orbital-occupancies']['data'])\n print(\"orbital occupancy:\",self.orbital[self.orbital.shape[0]-1])\n print(\"Cu filling =\", self.orbital[self.orbital.shape[0]-1,0,0]+self.orbital[self.orbital.shape[0]-1,1,0])\n print(\"Ox filling =\", self.orbital[self.orbital.shape[0]-1,0,1]+self.orbital[self.orbital.shape[0]-1,1,1])\n print(\"Oy filling =\", self.orbital[self.orbital.shape[0]-1,0,2]+self.orbital[self.orbital.shape[0]-1,1,2])\n self.sigmaarray=array(f['DCA-loop-functions']['L2_Sigma_difference']['data'])\n print(\"L2_Sigma_difference =\", self.sigmaarray)\n # Now read the 4-point Green's function\n # G4Re = array(f['functions']['G4_k_k_w_w']['data'])[:,:,:,:,:,:,:,:,0]\n # G4Im = array(f['functions']['G4_k_k_w_w']['data'])[:,:,:,:,:,:,:,:,1]\n \n G4Re = array(f['functions']['G4_PARTICLE_PARTICLE_UP_DOWN']['data'])[0,0,:,:,:,:,:,:,:,:,0]\n G4Im = array(f['functions']['G4_PARTICLE_PARTICLE_UP_DOWN']['data'])[0,0,:,:,:,:,:,:,:,:,1]\n #G4Re = array(f['functions']['G4']['data'])[0,0,:,:,:,:,:,:,:,:,0]\n #G4Im = array(f['functions']['G4']['data'])[0,0,:,:,:,:,:,:,:,:,1]\n self.G4 = G4Re+1j*G4Im\n # Now read the cluster Green's function\n GRe = array(f['functions']['cluster_greens_function_G_k_w']['data'])[:,:,0,:,0,:,0]\n GIm = array(f['functions']['cluster_greens_function_G_k_w']['data'])[:,:,0,:,0,:,1]\n #GRe = array(f['functions']['free_cluster_greens_function_G0_k_w']['data'])[:,:,0,:,0,:,0]\n #GIm = array(f['functions']['free_cluster_greens_function_G0_k_w']['data'])[:,:,0,:,0,:,1]\n self.Green = GRe + 1j * GIm\n #print(\"self.Green =\",self.Green[512:1024,0,0,0])\n \n \n #GRe = array(f['functions']['cluster_greens_function_G_k_t']['data'])[:,:,0,:,0,:,0]\n #GIm = array(f['functions']['cluster_greens_function_G_k_t']['data'])[:,:,0,:,0,:,1]\n GRe = array(f['functions']['free_cluster_greens_function_G0_k_t']['data'])[:,:,0,:,0,:,0]\n GIm = array(f['functions']['free_cluster_greens_function_G0_k_t']['data'])[:,:,0,:,0,:,1]\n self.Greenkt = GRe + 1j * GIm\n #print(\"self.Greenkt =\",self.Greenkt[120:137,0,0,0])\n self.Greenrt = array(f['functions']['cluster_greens_function_G_r_t']['data'])[:,:,0,:,0,:]\n #self.Greenrt = array(f['functions']['free_cluster_greens_function_G0_r_t']['data'])[:,:,0,:,0,:]\n #GIm = array(f['functions']['free_cluster_greens_function_G0_k_t']['data'])[:,:,0,:,0,:]\n #self.Greenrt = GRe + 1j * GIm\n # f2 = h5py.File(self.fileG,'r')\n # bare cluster Green's function G0(k,t)\n # self.G0kt = array(f2['functions']['free_cluster_greens_function_G0_k_t']['data'][:,:,0,:,0,:])\n # self.ntau = G0kt.shape[0]\n\n # bare cluster Green's function G0(k,w)\n # G0kwRe = array(f2['functions']['free_cluster_greens_function_G0_k_w']['data'][:,:,0,:,0,:,0])\n # G0kwIm = array(f2['functions']['free_cluster_greens_function_G0_k_w']['data'][:,:,0,:,0,:,1])\n # self.G0kw = G0kwRe + 1j*G0kwIm\n\n # Now read the self-energy\n s = np.array(f['functions']['Self_Energy']['data'])\n self.sigmaoriginal = s[:,:,0,:,0,:,0] + 1j *s[:,:,0,:,0,:,1]\n print(\"Imsimga=\",s[127:138,1,0,0,0,0,1])\n nOrb = self.Green.shape[2]\n nw = self.Green.shape[0]\n nk = self.Green.shape[1]\n \n\n # Now load frequency data\n self.wn = np.array(f['domains']['frequency-domain']['elements'])\n self.wnSet = np.array(f['domains']['vertex-frequency-domain (COMPACT)']['elements'])\n\n # Now read the K-vectors\n self.Kvecs = array(f['domains']['CLUSTER']['MOMENTUM_SPACE']['elements']['data'])\n print (\"K-vectors: \",self.Kvecs)\n\n # Now read other Hubbard parameters\n self.mu = np.array(f['DCA-loop-functions']['chemical-potential']['data'])[0]\n self.nOrb = self.Green.shape[2]\n self.NwG4 = self.G4.shape[0]\n self.Nc = self.Green.shape[1]\n self.NwG = self.Green.shape[0]\n self.NtG = self.Greenkt.shape[0]\n self.nt = self.Nc*self.NwG4*self.nOrb*self.nOrb\n\n print (\"NwG4: \",self.NwG4)\n print (\"NtG: \",self.NtG)\n print (\"NwG : \",self.NwG)\n print (\"Nc : \",self.Nc)\n print (\"nOrb: \",self.nOrb)\n print (\"G4shape0 = \", self.G4.shape[0], \", G4shape1 = \", self.G4.shape[1], \", G4shape2 = \", self.G4.shape[2], \", G4shape3 = \", self.G4.shape[3], \", G4shape4 = \", self.G4.shape[4], \", G4shape5 = \", self.G4.shape[5], \", G4shape6 = \", self.G4.shape[6], \", G4shape7 = \", self.G4.shape[7])\n self.NwTP = 2*np.array(f['parameters']['domains']['imaginary-frequency']['four-point-fermionic-frequencies'])[0]\n self.iQ = self.K_2_iK(self.qchannel[0],self.qchannel[1])\n print (\"Index of transferred momentum: \", self.iQ)\n #self.ddwave = array(f['CT-AUX-SOLVER-functions']['dwave-pp-correlator']['data'])\n #print('shape0 of ddwave=',self.ddwave.shape[0])\n #print('shape1 of ddwave=',self.ddwave.shape[1])\n #print('Cu-Cu ddwave=',self.ddwave[:,0])\n #print('Ox-Ox ddwave=',self.ddwave[:,1])\n #print('Oy-Oy ddwave=',self.ddwave[:,2])\n self.iwG40 = self.NwG4/2\n self.iwG0 = self.NwG/2\n\n f.close()\n\n # Now remove vacuum part of charge G4\n if (self.vertex_channel==\"PARTICLE_HOLE_CHARGE\"):\n if (self.qchannel[0] == 0) & (self.qchannel[1] == 0):\n for ik1 in range(self.Nc):\n for ik2 in range(self.Nc):\n for iw1 in range(self.NwG4):\n for iw2 in range(self.NwG4):\n for l1 in range(self.nOrb):\n for l2 in range(self.nOrb):\n for l3 in range(self.nOrb):\n for l4 in range(self.nOrb):\n iw1Green = iw1 - self.iwG40 + self.iwG0\n iw2Green = iw2 - self.iwG40 + self.iwG0\n self.G4[iw1,ik1,iw2,ik2,l1,l2,l3,l4] -= 2.0 * self.Green[iw1Green,ik1,l1,l2] * self.Green[iw2Green,ik2,l4,l3]\n\n\n def K_2_iK(self,Kx,Ky):\n delta=1.0e-4\n # First map (Kx,Ky) into [0...2pi,0...2pi] region where Kvecs are defined\n if Kx < -delta : Kx += 2*pi\n if Ky < -delta : Ky += 2*pi\n if Kx > 2.*pi-delta : Kx -= 2*pi\n if Ky > 2.*pi-delta : Ky -= 2*pi\n # Now find index of Kvec = (Kx,Ky)\n for iK in range(0,self.Nc):\n if (abs(float(self.Kvecs[iK,0]-Kx)) < delta) & (abs(float(self.Kvecs[iK,1]-Ky)) < delta): return iK\n print(\"No Kvec found!!!\")\n\n\n def reorderG4(self):\n # In Peter's code:\n # PARTICLE_HOLE_MAGNETIC:\n #\n # k1,l1 k2,l4 k1,l1 k2,l3\n # ----->------------->------ ----->------------->------\n # | | | |\n # | G4 | mapped onto | G4 |\n # -----<-------------<------ -----<-------------<------\n # k1+q,l3 k2+q,l2 k1+q,l2 k2+q,l4\n #\n \n Nc=self.Nc; NwG4=self.NwG4; NwG=self.NwG; nOrb = self.nOrb\n self.G4r=np.zeros((NwG4,Nc,nOrb,nOrb,NwG4,Nc,nOrb,nOrb),dtype='complex')\n G4susQz0 = 0.0\n G4susQzPi = 0.0\n for ik1 in range(self.Nc):\n for ik2 in range(self.Nc):\n for iw1 in range(self.NwG4):\n for iw2 in range(self.NwG4):\n for l1 in range(self.nOrb):\n for l2 in range(self.nOrb):\n for l3 in range(self.nOrb):\n for l4 in range(self.nOrb):\n if self.vertex_channel==\"PARTICLE_HOLE_MAGNETIC\":\n c1= self.G4[iw1,iw2,ik1,ik2,l1,l2,l3,l4]\n self.G4r[iw1,ik1,l1,l3,iw2,ik2,l4,l2] = c1\n if (l1==l3) & (l4==l2):\n G4susQz0 += c1\n G4susQzPi += c1*exp(1j*np.pi*(l2-l3))\n elif self.vertex_channel==\"PARTICLE_PARTICLE_UP_DOWN\":\n c1 = self.G4[iw2,ik2,iw1,ik1,l4,l3,l2,l1]\n self.G4r[iw1,ik1,l1,l2,iw2,ik2,l3,l4] = c1 \n if (l1!=l2) & (l3!=l4):\n G4susQz0 += c1\n \n G4rtemp = self.G4r.copy()\n \n \n \n \n \n PS=0.0; PScc=0.0; PSoxox=0.0; PSoyoy=0.0; PScox=0.0; PScoy=0.0; PSoxoy=0.0; \n for iw1 in range(NwG4):\n for ik1 in range(Nc):\n for iw2 in range(NwG4):\n for ik2 in range(Nc):\n PScc += self.G4r[iw1,ik1,0,0,iw2,ik2,0,0]\n PSoxox += self.G4r[iw1,ik1,1,1,iw2,ik2,1,1]\n PSoyoy += self.G4r[iw1,ik1,2,2,iw2,ik2,2,2]\n PScox += self.G4r[iw1,ik1,0,0,iw2,ik2,1,1] + self.G4r[iw1,ik1,1,1,iw2,ik2,0,0]\n PScoy += self.G4r[iw1,ik1,0,0,iw2,ik2,2,2] + self.G4r[iw1,ik1,2,2,iw2,ik2,0,0]\n PSoxoy += self.G4r[iw1,ik1,1,1,iw2,ik2,2,2] + self.G4r[iw1,ik1,2,2,iw2,ik2,1,1]\n\n PScc /= self.Nc*self.invT\n print(\"G4 s-wave Pairfield susceptibility for Cu-Cu is \",PScc)\n #print(\"chi0kiw s-wave Pairfield susceptibility error2 for Cu-Cu is \",PSccerror2)\n PSoxox /= self.Nc*self.invT\n print(\"G4 s-wave Pairfield susceptibility for Ox-Ox is \",PSoxox)\n PSoyoy /= self.Nc*self.invT \n print(\"G4 s-wave Pairfield susceptibility for Oy-Oy is \",PSoyoy)\n PScox /= self.Nc*self.invT \n print(\"G4 s-wave Pairfield susceptibility for Cu-Ox is \",PScox)\n PScoy /= self.Nc*self.invT \n print(\"G4 s-wave Pairfield susceptibility for Cu-Oy is \",PScoy)\n PSoxoy /= self.Nc*self.invT \n print(\"G4 s-wave Pairfield susceptibility for Ox-Oy is \",PSoxoy)\n PS = PScc+PSoxox+PSoyoy+PScox+PScoy+PSoxoy\n print(\"G4 s-wave Pairfield susceptibility is \",PS)\n \n \n \n \n \n #PS=0.0; PScc=0.0; PSoxox=0.0; PSoyoy=0.0; PScox=0.0; PScoy=0.0; PSoxoy=0.0; testG4susQz0=0.0; testG4susQz1=0.0;ep=0.0;\n #for iw in range(0,1024):\n #for iw in range(NwG):\n # for ik in range(self.Nc):\n # PScc += self.chic0kiw[iw,ik,0,0,iw,ik,0,0]\n # PSoxox += self.chic0kiw[iw,ik,1,1,iw,ik,1,1]\n # PSoyoy += self.chic0kiw[iw,ik,2,2,iw,ik,2,2]\n # PScox += self.chic0kiw[iw,ik,0,0,iw,ik,1,1] + self.chic0kiw[iw,ik,1,1,iw,ik,0,0]\n # PScoy += self.chic0kiw[iw,ik,0,0,iw,ik,2,2] + self.chic0kiw[iw,ik,2,2,iw,ik,0,0]\n # PSoxoy += self.chic0kiw[iw,ik,1,1,iw,ik,2,2] + self.chic0kiw[iw,ik,2,2,iw,ik,1,1]\n #PSccerror1=0.0;PSccerror2=0.0;\n #for iw in range(-512,512):\n # PSccerror1+=1.0/((2*iw+1)*1j*3.1415926/self.invT-4.25-0.9)/(-(2*iw+1)*1j*3.1415926/self.invT-4.25-0.9)\n #PSccerror1*=1.0/(self.invT)\n #PSccerror2=(1-2.0/(math.exp(self.invT*(4.25+0.9))+1))/2/(4.25+0.9)\n #PScc = PScc*(self.invT-ep)/(float(Nc)*self.invT*self.invT)\n #print(\"chi0kiw (sum over 64 w) s-wave Pairfield susceptibility for Cu-Cu is \",PScc)\n #print(\"chi0kiw s-wave Pairfield susceptibility error1 for Cu-Cu is \",PSccerror2)\n #print(\"chi0kiw s-wave Pairfield susceptibility error2 for Cu-Cu is \",PSccerror2)\n #PSoxox = PSoxox*(self.invT-ep)/(float(Nc)*self.invT*self.invT) \n #print(\"chi0kiw (sum over 512 w) s-wave Pairfield susceptibility for Ox-Ox is \",PSoxox)\n #PSoyoy = PSoyoy*(self.invT-ep)/(float(Nc)*self.invT*self.invT) \n #print(\"chi0kiw (sum over 512 w) s-wave Pairfield susceptibility for Oy-Oy is \",PSoyoy)\n #PScox = PScox*(self.invT-ep)/(float(Nc)*self.invT*self.invT) \n #print(\"chi0kiw (sum over 512 w) s-wave Pairfield susceptibility for Cu-Ox is \",PScox)\n #PScoy = PScoy*(self.invT-ep)/(float(Nc)*self.invT*self.invT) \n #print(\"chi0kiw (sum over 512 w) s-wave Pairfield susceptibility for Cu-Oy is \",PScoy)\n #PSoxoy = PSoxoy*(self.invT-ep)/(float(Nc)*self.invT*self.invT) \n #print(\"chi0kiw (sum over 512 w) s-wave Pairfield susceptibility for Ox-Oy is \",PSoxoy)\n #PS = PScc+PSoxox+PSoyoy+PScox+PScoy+PSoxoy\n #print(\"chi0kiw (sum over 512 w) s-wave Pairfield susceptibility is \",PS) \n \n if self.symmetrize_G4:\n nwn = self.G4r.shape[0]\n \n \n sym=symmetrize_Nc4x4.symmetrize()\n type=dtype(self.G4r[0,0,0,0,0,0,0,0])\n for iK1 in range(0,Nc):\n for iK2 in range(0,Nc):\n #if (iK1==iK2):\n tmp = zeros((nwn,nwn),dtype=type)\n for iSym in [0]: # Apply every point-group symmetry operation\n iK1Trans = sym.symmTrans_of_iK(iK1,iSym)\n iK2Trans = sym.symmTrans_of_iK(iK2,iSym)\n tmp += self.G4r[:,iK1Trans,0,0,:,iK2Trans,0,0] \n for iSym in [0]:\n iK1Trans = sym.symmTrans_of_iK(iK1,iSym)\n iK2Trans = sym.symmTrans_of_iK(iK2,iSym)\n self.G4r[:,iK1Trans,0,0,:,iK2Trans,0,0] = tmp\n \n #for iw1 in range(nwn):\n # for iw2 in range(nwn):\n # imw1 = nwn-1-iw1\n # imw2 = nwn-1-iw2\n # tmp1 = self.G4r[iw1,:,:,:,iw2,:,:,:]\n # tmp2 = self.G4r[imw1,:,:,:,imw2,:,:,:]\n # self.G4r[iw1,:,:,:,iw2,:,:,:] = 0.5*(tmp1+conj(tmp2))\n # self.G4r[imw1,:,:,:,imw2,:,:,:] = 0.5*(conj(tmp1)+tmp2)\n\n #GP = self.G4r.reshape(self.nt,self.nt)\n #GP = 0.5*(GP + GP.transpose())\n #self.G4r = GP.reshape(nwn,Nc,nOrb,nOrb,nwn,Nc,nOrb,nOrb)\n\n self.G4M = self.G4r.reshape(self.nt,self.nt)\n \n if self.vertex_channel==\"PARTICLE_HOLE_MAGNETIC\":\n print(\"Cluster Chi(q,qz=0) :\", G4susQz0/(self.invT*self.Nc*2.0))\n print(\"Cluster Chi(q,qz=pi):\", G4susQzPi/(self.invT*self.Nc*2.0)) \n \n if self.vertex_channel==\"PARTICLE_PARTICLE_UP_DOWN\":\n print(\"Cluster inter-orbital Chi(q=0):\", G4susQz0/(self.invT*self.Nc*4.0))\n\n def setupMomentumTables(self):\n # build tables for K+K' and K-K'\n self.iKDiff = zeros((self.Nc,self.Nc),dtype='int')\n self.iKSum = zeros((self.Nc,self.Nc),dtype='int')\n Nc = self.Nc\n for iK1 in range(Nc):\n Kx1 = self.Kvecs[iK1,0]; Ky1 = self.Kvecs[iK1,1]\n for iK2 in range(0,Nc):\n Kx2 = self.Kvecs[iK2,0]; Ky2 = self.Kvecs[iK2,1]\n iKS = self.K_2_iK(Kx1+Kx2,Ky1+Ky2)\n iKD = self.K_2_iK(Kx1-Kx2,Ky1-Ky2)\n self.iKDiff[iK1,iK2] = iKD\n self.iKSum[iK1,iK2] = iKS\n\n def determine_iKPiPi(self):\n self.iKPiPi = 0\n Nc=self.Nc\n for iK in range(Nc):\n kx = abs(self.Kvecs[iK,0] - np.pi)\n ky = abs(self.Kvecs[iK,1] - np.pi)\n if kx >= 2*np.pi: kx-=2.*pi\n if ky >= 2*np.pi: ky-=2.*pi\n if kx**2+ky**2 <= 1.0e-5:\n self.iKPiPi = iK\n break\n\n def calcChi0Cluster(self):\n print (\"Now calculating chi0 on cluster\")\n self.chic0 = zeros((self.NwG4,self.Nc,self.nOrb,self.nOrb,self.NwG4,self.Nc,self.nOrb,self.nOrb),dtype='complex')\n #self.chic0ktau = zeros((self.NtG,self.Nc,self.nOrb,self.nOrb,self.NtG,self.Nc,self.nOrb,self.nOrb),dtype='complex')\n #self.chic0kiw = zeros((self.NwG,self.Nc,self.nOrb,self.nOrb,self.NwG,self.Nc,self.nOrb,self.nOrb),dtype='complex')\n\n Nc=self.Nc; NwG4=self.NwG4; NwG=self.NwG; nOrb = self.nOrb; NtG=self.NtG; c2=0.0;\n \n if (self.vertex_channel == \"PARTICLE_PARTICLE_UP_DOWN\"):\n for iw in range(0,NwG4):\n for ik in range(Nc):\n for l1 in range(nOrb):\n for l2 in range(nOrb):\n for l3 in range(nOrb):\n for l4 in range(nOrb):\n iw1 = int(iw - NwG4/2 + NwG/2)\n ikPlusQ = int(self.iKSum[self.iKDiff[0,ik],self.iQ]) # -k+Q\n minusiwPlusiwm = int(min(max(NwG-iw1-1 + self.iwm,0),NwG-1)) # -iwn + iwm\n #if (l1==l2==l3==l4==0) & (iw==0): print(\"ik=\",ik,\" ikPlusQ=\",ikPlusQ)\n if (l2 != l4 and l2 == 0) or (l2 != l4 and l4 == 0): \n c1 = -self.Green[iw1,ik,l3,l1] * self.Green[minusiwPlusiwm,ikPlusQ,l4,l2]\n else:\n c1 = self.Green[iw1,ik,l3,l1] * self.Green[minusiwPlusiwm,ikPlusQ,l4,l2]\n \n self.chic0[iw,ik,l1,l2,iw,ik,l3,l4] = c1\n else:\n G4susQz0 = 0.0\n G4susQzPi = 0.0\n for iw in range(NwG4):\n for ik in range(Nc):\n for l1 in range(nOrb):\n for l2 in range(nOrb):\n for l3 in range(nOrb):\n for l4 in range(nOrb):\n iw1 = int(iw - NwG4/2 + NwG/2)\n ikPlusQ = int(self.iKSum[ik,self.iQ]) # k+Q\n iwPlusiwm = int(min(max(iw1 + self.iwm,0),NwG-1)) # iwn+iwm\n #print(\"iw1,ik,iwPlusiwm,ikPlusQ\",iw1,ik,iwPlusiwm,ikPlusQ)\n c1 = - self.Green[iw1,ik,l1,l3] * self.Green[iwPlusiwm,ikPlusQ,l4,l2]\n self.chic0[iw,ik,l1,l2,iw,ik,l3,l4] = c1\n if (l1==l2) & (l3==l4):\n G4susQz0 += c1\n G4susQzPi += c1*exp(1j*np.pi*(l2-l3))\n\n self.chic0M = self.chic0.reshape(self.nt,self.nt)\n \n\n if self.vertex_channel==\"PARTICLE_HOLE_MAGNETIC\":\n print(\"Cluster Chi0(q,qz=0) :\", G4susQz0/(self.invT*self.Nc*2.0))\n print(\"Cluster Chi0(q,qz=pi):\", G4susQzPi/(self.invT*self.Nc*2.0))\n \n \n \n \n \n \n \n \n def calcDwaveSCClusterSus(self):\n print (\" \")\n print (\" \")\n gksx = cos(self.Kvecs[:,0]) + cos(self.Kvecs[:,1])\n csumCuCu = 0.0; csumOxOx = 0.0; csumOyOy = 0.0; csumCuOx = 0.0; csumCuOy = 0.0; csumOxOy = 0.0;\n \n\n for iK1 in range(self.Nc):\n for iK2 in range(self.Nc):\n csumCuCu += gksx[iK1]*sum(self.G4r[:,iK1,0,0,:,iK2,0,0])*gksx[iK2]\n csumOxOx += gksx[iK1]*sum(self.G4r[:,iK1,1,1,:,iK2,1,1])*gksx[iK2]\n csumOyOy += gksx[iK1]*sum(self.G4r[:,iK1,2,2,:,iK2,2,2])*gksx[iK2]\n csumCuOx += gksx[iK1]*sum(self.G4r[:,iK1,0,0,:,iK2,1,1])*gksx[iK2] + gksx[iK1]*sum(self.G4r[:,iK1,1,1,:,iK2,0,0])*gksx[iK2]\n csumCuOy += gksx[iK1]*sum(self.G4r[:,iK1,0,0,:,iK2,2,2])*gksx[iK2] + gksx[iK1]*sum(self.G4r[:,iK1,2,2,:,iK2,0,0])*gksx[iK2]\n csumOxOy += gksx[iK1]*sum(self.G4r[:,iK1,1,1,:,iK2,2,2])*gksx[iK2] + gksx[iK1]*sum(self.G4r[:,iK1,2,2,:,iK2,1,1])*gksx[iK2]\n\n csumCuCu /= self.Nc*self.invT\n csumOxOx /= self.Nc*self.invT\n csumOyOy /= self.Nc*self.invT\n csumCuOx /= self.Nc*self.invT\n csumCuOy /= self.Nc*self.invT\n csumOxOy /= self.Nc*self.invT \n #self.Pdc = real(csum)\n print (\"G4 sx-wave Cu-Cu Pairfield susceptibility: \",csumCuCu)\n print (\"G4 sx-wave Ox-Ox Pairfield susceptibility: \",csumOxOx)\n print (\"G4 sx-wave Oy-Oy Pairfield susceptibility: \",csumOyOy)\n print (\"G4 sx-wave Cu-Ox Pairfield susceptibility: \",csumCuOx)\n print (\"G4 sx-wave Cu-Oy Pairfield susceptibility: \",csumCuOy)\n print (\"G4 sx-wave Ox-Oy Pairfield susceptibility: \",csumOxOy)\n \n print (\" \")\n print (\" \")\n gkd = cos(self.Kvecs[:,0]) - cos(self.Kvecs[:,1])\n csumCuCu = 0.0; csumOxOx = 0.0; csumOyOy = 0.0; csumCuOx = 0.0; csumCuOy = 0.0; csumOxOy = 0.0;\n \n\n for iK1 in range(self.Nc):\n for iK2 in range(self.Nc):\n csumCuCu += gkd[iK1]*sum(self.G4r[:,iK1,0,0,:,iK2,0,0])*gkd[iK2]\n csumOxOx += gkd[iK1]*sum(self.G4r[:,iK1,1,1,:,iK2,1,1])*gkd[iK2]\n csumOyOy += gkd[iK1]*sum(self.G4r[:,iK1,2,2,:,iK2,2,2])*gkd[iK2]\n csumCuOx += gkd[iK1]*sum(self.G4r[:,iK1,0,0,:,iK2,1,1])*gkd[iK2] + gkd[iK1]*sum(self.G4r[:,iK1,1,1,:,iK2,0,0])*gkd[iK2]\n csumCuOy += gkd[iK1]*sum(self.G4r[:,iK1,0,0,:,iK2,2,2])*gkd[iK2] + gkd[iK1]*sum(self.G4r[:,iK1,2,2,:,iK2,0,0])*gkd[iK2]\n csumOxOy += gkd[iK1]*sum(self.G4r[:,iK1,1,1,:,iK2,2,2])*gkd[iK2] + gkd[iK1]*sum(self.G4r[:,iK1,2,2,:,iK2,1,1])*gkd[iK2]\n\n csumCuCu /= self.Nc*self.invT\n csumOxOx /= self.Nc*self.invT\n csumOyOy /= self.Nc*self.invT\n csumCuOx /= self.Nc*self.invT\n csumCuOy /= self.Nc*self.invT\n csumOxOy /= self.Nc*self.invT \n #self.Pdc = real(csum)\n print (\"G4 d-wave Cu-Cu Pairfield susceptibility: \",csumCuCu)\n print (\"G4 d-wave Ox-Ox Pairfield susceptibility: \",csumOxOx)\n print (\"G4 d-wave Oy-Oy Pairfield susceptibility: \",csumOyOy)\n print (\"G4 d-wave Cu-Ox Pairfield susceptibility: \",csumCuOx)\n print (\"G4 d-wave Cu-Oy Pairfield susceptibility: \",csumCuOy)\n print (\"G4 d-wave Ox-Oy Pairfield susceptibility: \",csumOxOy)\n \n print (\" \")\n print (\" \")\n \n \n \n def calcPS(self):\n # Calculate the S wave pairfield susp\n Nc=self.Nc; NwG4=self.NwG4; NwG=self.NwG; nt = self.nt; nOrb = self.nOrb; PS=0.0; PScc=0.0;\n PSoxox=0.0; PSoyoy=0.0; PScox=0.0; PScoy=0.0; PSoxoy=0.0; testG4susQz0=0.0; testG4susQz1=0.0;\n for ik1 in range(self.Nc):\n for ik2 in range(self.Nc):\n for iw1 in range(self.NwG4):\n for iw2 in range(self.NwG4):\n for l1 in range(nOrb):\n for l2 in range(nOrb):\n for l3 in range(nOrb):\n for l4 in range(nOrb):\n if (l1!=l2) & (l3!=l4):\n testG4susQz0 += self.G4[iw2,ik2,iw1,ik1,l4,l3,l2,l1]\n if (l1==l2) & (l3==l4) & (l1==l3):\n testG4susQz1 += self.G4[iw2,ik2,iw1,ik1,l4,l3,l2,l1]\n print(\"Test Cluster inter-orbital Chi(q=0):\", testG4susQz0/(self.invT*self.Nc*4.0))\n print(\"Test Cluster intra-orbital Chi(q=0):\", testG4susQz1/(self.invT*self.Nc*4.0))\n \n \n \n \n \n \n def buildChi0Lattice(self,nkfine):\n print (\"Now calculating chi0 on lattice\")\n\n NwG=self.NwG\n # Cluster K-grid\n Kset = self.Kvecs.copy() # copy() since Kset will be modified\n\n # Fine mesh\n klin = np.arange(-pi,pi,2*pi/nkfine)\n kx,ky = np.meshgrid(klin,klin)\n kset = np.column_stack((kx.flatten(),ky.flatten()))\n\n kPatch = []\n\n # shift to 1. BZ\n Nc = Kset.shape[0]\n for iK in range(Nc):\n if Kset[iK,0] > np.pi: Kset[iK,0] -= 2*np.pi\n if Kset[iK,1] > np.pi: Kset[iK,1] -= 2*np.pi\n\n self.Kset = Kset\n\n #Determine k-points in patch\n for k in kset:\n distance0 = k[0]**2 + k[1]**2\n newPoint = True\n for K in Kset:\n distanceKx = k[0] - K[0]; distanceKy = k[1] - K[1]\n if distanceKx >= pi: distanceKx -= 2*pi\n if distanceKy >= pi: distanceKy -= 2*pi\n if distanceKx <= -pi: distanceKx += 2*pi\n if distanceKy <= -pi: distanceKy += 2*pi\n distanceK = distanceKx**2 + distanceKy**2\n if distanceK < distance0:\n newPoint = False\n break\n if newPoint: kPatch.append(k.tolist())\n\n kPatch = np.array(kPatch)\n self.kPatch = kPatch\n\n # Load frequency domain\n wnSet = self.wnSet\n\n # Load parameters: t,mu\n # t1 = self.t1\n # t2 = self.t2\n mu = self.mu \n\n self.sigma = np.zeros((self.NwG,self.Nc,self.nOrb,self.nOrb),dtype='complex')\n self.sigma = self.sigmaoriginal\n \n\n self.sigmanegk = np.zeros((self.NwG,self.Nc,self.nOrb,self.nOrb),dtype='complex')\n for l2 in range(self.nOrb):\n for l4 in range(self.nOrb):\n if (l2 != l4 and l2 == 0) or (l2 != l4 and l4 == 0): \n self.sigmanegk[:,:,l2,l4] = -self.sigma[:,:,l2,l4]\n else:\n self.sigmanegk[:,:,l2,l4] = self.sigma[:,:,l2,l4]\n\n # Now coarse-grain G*G to build chi0(K) = Nc/N sum_k Gc(K+k')Gc(-K-k')\n nOrb = self.nOrb; nw = wnSet.shape[0]; nk=Kset.shape[0]\n self.chi0 = np.zeros((nw,nk,nOrb,nOrb,nw,nk,nOrb,nOrb),dtype='complex')\n self.chi0D = np.zeros((nw,nk,nOrb,nOrb,nw,nk,nOrb,nOrb),dtype='complex')\n self.chi0D2 = np.zeros((nw,nk,nOrb,nOrb,nw,nk,nOrb,nOrb),dtype='complex')\n self.chi0XS = np.zeros((nw,nk,nOrb,nOrb,nw,nk,nOrb,nOrb),dtype='complex')\n self.chi0XS2 = np.zeros((nw,nk,nOrb,nOrb,nw,nk,nOrb,nOrb),dtype='complex')\n self.gkdNorm = 0.0\n #self.cG = np.zeros((nw,nk,nOrb,nOrb),dtype='complex')\n #self.cG0 = np.zeros((nw,nk,nOrb,nOrb),dtype='complex')\n for iwn,wn in enumerate(wnSet): # reduced tp frequencies !!\n print(\"iwn = \",iwn)\n iwG = int(iwn - self.iwG40 + self.iwG0)\n for iK,K in enumerate(Kset):\n c0 = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n c1 = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n c2 = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n c3 = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n c4 = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n c5 = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n #cG = np.zeros((nOrb,nOrb),dtype='complex')\n #cG0 = np.zeros((nOrb,nOrb),dtype='complex')\n for k in kPatch:\n kx = K[0]+k[0]; ky = K[1]+k[1]\n if (K[0]<-0.01):\n kx += 2*np.pi\n if (K[1]<-0.01):\n ky += 2*np.pi\n ek = self.dispersion(kx,ky)\n gkd = cos(kx) - cos(ky)\n gkxs= cos(kx) + cos(ky)\n #G0inv = (1j*wn+self.mu-self.U1*(self.dens[-1]/4.-0.5))*np.identity(nOrb) - ek\n #G0 = linalg.inv(G0inv)\n Qx = self.qchannel[0]; Qy = self.qchannel[1]\n if (self.vertex_channel == \"PARTICLE_PARTICLE_UP_DOWN\"):\n emkpq = self.dispersion(-kx+Qx, -ky+Qy)\n iKQ = self.iKSum[self.iKDiff[0,iK],self.iQ]\n #emkpq = self.dispersion(Kset[iKQ,0], Kset[iKQ,1])\n #print(\"iK=\",iK,\"iKQ=\",iKQ,\"iKQx=\",Kset[iKQ,0],\"iKQy=\",Kset[iKQ,1])\n minusiwPlusiwm = min(max(NwG-iwG-1 + self.iwm,0),NwG-1) # -iwn + iwm\n #minusiwPlusiwm = int(min(max(NwG-iw1-1 + self.iwm,0),NwG-1))\n G1inv = (1j*wn+self.mu) * np.identity(nOrb)-ek-self.sigma[iwG,iK,:,:]\n G2inv = (-1j*wn+self.mu)* np.identity(nOrb)-emkpq-self.sigmanegk[minusiwPlusiwm,iK,:,:]\n G1 = linalg.inv(G1inv); G2 = linalg.inv(G2inv)\n\n else:\n ekpq = self.dispersion(kx+Qx, ky+Qy)\n iKQ = int(self.iKSum[iK,self.iQ])\n iwPlusiwm = int(min(max(iwG + self.iwm,0),NwG-1)) # iwn+iwm\n\n G1inv = (1j*wn+self.mu)*np.identity(nOrb)-ek-self.sigma[iwG,iK,:,:]\n G2inv = (1j*wn+self.mu)*np.identity(nOrb)-ekpq-self.sigmanegk[iwPlusiwm,iKQ,:,:]\n G1 = linalg.inv(G1inv); G2 = -linalg.inv(G2inv)\n\n for l1 in range(nOrb):\n for l2 in range(nOrb):\n for l3 in range(nOrb):\n for l4 in range(nOrb):\n c0[l1,l2,l3,l4] = G1[l3,l1]*G2[l4,l2]\n \n c1[:,:,:,:] += c0[:,:,:,:]\n c2[:,:,:,:] += c0[:,:,:,:] * gkd\n c3[:,:,:,:] += c0[:,:,:,:] * gkd**2\n c4[:,:,:,:] += c0[:,:,:,:] * gkxs\n c5[:,:,:,:] += c0[:,:,:,:] * gkxs**2\n if (iwn==0): self.gkdNorm += gkd**2\n\n self.chi0[iwn,iK,:,:,iwn,iK,:,:] = c1[:,:,:,:]/kPatch.shape[0]\n self.chi0D[iwn,iK,:,:,iwn,iK,:,:] = c2[:,:,:,:]/kPatch.shape[0]\n self.chi0D2[iwn,iK,:,:,iwn,iK,:,:] = c3[:,:,:,:]/kPatch.shape[0]\n self.chi0XS[iwn,iK,:,:,iwn,iK,:,:] = c4[:,:,:,:]/kPatch.shape[0]\n self.chi0XS2[iwn,iK,:,:,iwn,iK,:,:] = c5[:,:,:,:]/kPatch.shape[0]\n #self.cG[iwn,iK,:,:] = cG[:,:]/kPatch.shape[0]\n #self.cG0[iwn,iK,:,:] = cG0[:,:]/kPatch.shape[0]\n\n \n #sym=symmetrize_Nc4x4.symmetrize()\n #nwn = self.chi0.shape[0]\n #type=dtype(self.chi0[0,0,0,0,0,0,0,0])\n #for iK1 in range(0,Nc):\n # for iK2 in range(0,Nc):\n # tmp = zeros((nwn,nwn),dtype=type)\n # for iSym in range(8): # Apply every point-group symmetry operation\n # iK1Trans = sym.symmTrans_of_iK(iK1,iSym)\n # iK2Trans = sym.symmTrans_of_iK(iK2,iSym)\n # tmp += self.chi0[:,iK1Trans,0,0,:,iK2Trans,0,0]\n # print(\"isym=\",iSym,\"iK1=\",iK1,\"iK1Trans=\",iK1Trans,\"iK2=\",iK2,\"iK2Trans=\",iK2Trans)\n # print(\"chi0[15,iK1Trans,0,0,15,iK2Trans,0,0]=\",self.chi0[15,iK1Trans,0,0,15,iK2Trans,0,0])\n\n # for iSym in range(8):\n # iK1Trans = sym.symmTrans_of_iK(iK1,iSym)\n # iK2Trans = sym.symmTrans_of_iK(iK2,iSym)\n # self.chi0[:,iK1Trans,0,0,:,iK2Trans,0,0] = tmp/8.\n \n \n self.chi0M = self.chi0.reshape(self.nt,self.nt)\n self.gkdNorm /= kPatch.shape[0]\n\n\n if self.vertex_channel==\"PARTICLE_HOLE_MAGNETIC\":\n chi0Loc = sum(sum(sum(sum(self.chi0,axis=0),axis=0),axis=2),axis=2)\n chi00 = 0.0; chi0Pi = 0.0\n for l1 in range(0,nOrb):\n for l3 in range(0,nOrb):\n chi00 += chi0Loc[l1,l1,l3,l3]\n chi0Pi += chi0Loc[l1,l1,l3,l3] * exp(1j*np.pi*(l1-l3))\n\n print(\"Lattice Chi0(q,qz=0) :\", chi00 /(self.invT*self.Nc*2.0))\n print(\"Lattice Chi0(q,qz=pi):\", chi0Pi/(self.invT*self.Nc*2.0))\n\n \n \n def calcSCSus(self):\n # Calculate from gd*G4*gd = gd*GG*gd + gd*GG*GammaRed*GG*gd\n #GammaRed = self.GammaRed.reshape(self.NwG4*self.Nc,self.NwG4*self.Nc)\n print(\"\")\n gkd = cos(self.Kvecs[:,0]) - cos(self.Kvecs[:,1])\n nOrb = self.nOrb;Nc=self.Nc;NwG4=self.NwG4;\n csum = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n csumxs = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n ccsum = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n csum3 = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n csum1 = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n csum1xs = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n ccsum1 = np.zeros((nOrb,nOrb,nOrb,nOrb),dtype='complex')\n tempchi = np.zeros((NwG4,Nc),dtype='complex')\n\n\n\n for iw1 in range(NwG4):\n for iK1 in range(Nc):\n for iw2 in range(NwG4):\n for iK2 in range(Nc):\n for l1 in range(nOrb):\n for l2 in range(nOrb):\n for l3 in range(nOrb):\n for l4 in range(nOrb):\n for l5 in range(nOrb):\n for l6 in range(nOrb):\n for l7 in range(nOrb):\n for l8 in range(nOrb):\n csum[l1,l2,l3,l4] += self.chi0D[iw1,iK1,l1,l2,iw1,iK1,l5,l6] * self.GammaRed[iw1,iK1,l5,l6,iw2,iK2,l7,l8] * self.chi0D[iw2,iK2,l7,l8,iw2,iK2,l3,l4]\n csumxs[l1,l2,l3,l4] += self.chi0XS[iw1,iK1,l1,l2,iw1,iK1,l5,l6] * self.GammaRed[iw1,iK1,l5,l6,iw2,iK2,l7,l8] * self.chi0XS[iw2,iK2,l7,l8,iw2,iK2,l3,l4]\n ccsum[l1,l2,l3,l4] += gkd[iK1] * self.chi0[iw1,iK1,l1,l2,iw1,iK1,l5,l6] * self.GammaRed[iw1,iK1,l5,l6,iw2,iK2,l7,l8] * self.chi0[iw2,iK2,l7,l8,iw2,iK2,l3,l4] * gkd[iK2]\n csum[:,:,:,:] /= (self.Nc*self.invT)**2\n csumxs[:,:,:,:] /= (self.Nc*self.invT)**2\n ccsum[:,:,:,:] /= (self.Nc*self.invT)**2\n self.chi0D2sim = np.zeros((NwG4,Nc,nOrb,nOrb,nOrb,nOrb),dtype='complex')\n self.chi0XS2sim = np.zeros((NwG4,Nc,nOrb,nOrb,nOrb,nOrb),dtype='complex')\n self.chi0sim = np.zeros((NwG4,Nc,nOrb,nOrb,nOrb,nOrb),dtype='complex')\n for iw in range(NwG4):\n for iK in range(Nc):\n self.chi0D2sim[iw,iK,:,:,:,:] = self.chi0D2[iw,iK,:,:,iw,iK,:,:]\n self.chi0XS2sim[iw,iK,:,:,:,:] = self.chi0XS2[iw,iK,:,:,iw,iK,:,:]\n self.chi0sim[iw,iK,:,:,:,:] = self.chi0[iw,iK,:,:,iw,iK,:,:]\n for l1 in range(nOrb):\n for l2 in range(nOrb):\n for l3 in range(nOrb):\n for l4 in range(nOrb):\n csum1[l1,l2,l3,l4] = sum(self.chi0D2sim[:,:,l1,l2,l3,l4])/(self.Nc*self.invT)\n csum1xs[l1,l2,l3,l4] = sum(self.chi0XS2sim[:,:,l1,l2,l3,l4])/(self.Nc*self.invT)\n ccsum1[l1,l2,l3,l4] = sum(np.dot(self.chi0sim[:,:,l1,l2,l3,l4],gkd**2))/(self.Nc*self.invT)\n csum[:,:,:,:] += csum1[:,:,:,:]\n csumxs[:,:,:,:] += csum1xs[:,:,:,:]\n ccsum[:,:,:,:] += ccsum1[:,:,:,:]\n self.Pd = real(csum)\n self.csum = csum\n self.ccsum = ccsum\n self.Pxs = real(csumxs)\n self.Pdgkc = real(ccsum)\n #csum3 = sum(real(self.chi0D2[abs(self.wnSet) <= 2.*4*self.t**2/self.U,:]))/(self.Nc*self.invT)\n print (\"Calculations from GammaRed:\")\n print (\"d-wave SC susceptibility: \",csum[0,0,0,0],csum[1,1,1,1],csum[2,2,2,2])\n print (\"xs-wave SC susceptibility: \",csumxs[0,0,0,0],csumxs[1,1,1,1],csumxs[2,2,2,2])\n print(\"\")\n print (\"bare d-wave SC susceptibility: \",csum1[0,0,0,0],csum1[1,1,1,1],csum1[2,2,2,2])\n print (\"bare xs-wave SC susceptibility: \",csum1xs[0,0,0,0],csum1xs[1,1,1,1],csum1xs[2,2,2,2])\n #print (\"bare d-wave SC susceptibility with cutoff wc = J: \",csum3)\n print (\"bare d-wave SC lattice (with cluster form-factors) susceptibility: \",ccsum1[0,0,0,0],ccsum1[1,1,1,1],ccsum1[2,2,2,2])\n print (\"d-wave SC lattice (with cluster form-factors) susceptibility: \",ccsum[0,0,0,0],ccsum[1,1,1,1],ccsum[2,2,2,2])\n print(\"\")\n\n \n\n def calcGammaIrr(self):\n # Calculate the irr. GammaIrr\n Nc=self.Nc; NwG4=self.NwG4; NwG=self.NwG; nt = self.nt; nOrb = self.nOrb\n \n #print(\"chic0M(105,106)=\",self.chic0M[105,106],\"chic0(2,3,2,0,2,3,2,1)=\",self.chic0[2,3,2,0,2,3,2,1])\n #print(\"chic0M(106,105)=\",self.chic0M[106,105],\"chic0(2,3,2,1,2,3,2,0)=\",self.chic0[2,3,2,1,2,3,2,0])\n #for i in range(0,nt):\n # for j in range(i,nt):\n # c1 = 0.5*(self.GammaM[i,j]+self.GammaM[j,i])\n # self.GammaM[i,j] = c1\n # self.GammaM[j,i] = c1\n\n G4M = linalg.inv(self.G4M)\n chic0M = linalg.inv(self.chic0M)\n \n \n self.GammaM = chic0M - G4M \n \n #self.GammaM *= float(Nc)*self.invT*float(self.nOrb)\n self.GammaM *= float(Nc)*self.invT\n\n \n self.Gamma = self.GammaM.reshape(NwG4,Nc,nOrb,nOrb,NwG4,Nc,nOrb,nOrb)\n \n Gamma1 = self.Gamma.copy()\n for iw2 in range(NwG4):\n self.Gamma[:,:,:,:,iw2,:,:,:]=(Gamma1[:,:,:,:,iw2,:,:,:]+Gamma1[:,:,:,:,NwG4-iw2-1,:,:,:])/2\n Gamma1 = self.Gamma.copy()\n for iw1 in range(NwG4):\n self.Gamma[iw1,:,:,:,:,:,:,:]=(Gamma1[iw1,:,:,:,:,:,:,:]+Gamma1[NwG4-iw1-1,:,:,:,:,:,:,:])/2\n \n \n def buildKernelMatrix(self):\n Nc=self.Nc; NwG4=self.NwG4; NwG=self.NwG; nt = self.nt; nOrb = self.nOrb\n # Build kernel matrix Gamma*chi0\n \n if (self.calcCluster):\n self.chiM = self.chic0M\n else:\n self.chiM = self.chi0M\n \n self.pm = np.dot(self.GammaM, self.chiM)\n #self.pm *= 1.0/(self.invT*float(self.Nc)*float(self.nOrb))\n self.pm *= 1.0/(self.invT*float(self.Nc))\n\n \n wtemp,vtemp = linalg.eig(self.chiM)\n wttemp = abs(wtemp-1)\n ileadtemp = argsort(wttemp)\n self.lambdastemp = wtemp[ileadtemp]\n self.evecstemp = vtemp[:,ileadtemp]\n \n self.Lambdatemp = sqrt(np.diag(self.lambdastemp))\n self.chiMasqrt = np.dot(self.evecstemp,np.dot(self.Lambdatemp,linalg.inv(self.evecstemp)))\n\n self.pm2 = np.dot(self.chiMasqrt,np.dot(self.GammaM, self.chiMasqrt))\n self.pm2 *= 1.0/(self.invT*float(self.Nc))\n \n #self.pm2m = self.pm2.reshape(NwG4,Nc,nOrb,nOrb,NwG4,Nc,nOrb,nOrb)\n \n #pm2m1 = self.pm2m.copy()\n #for iw2 in range(NwG4):\n # self.pm2m[:,:,:,:,iw2,:,:,:]=(pm2m1[:,:,:,:,iw2,:,:,:]+pm2m1[:,:,:,:,NwG4-iw2-1,:,:,:])/2\n #pm2m1 = self.pm2m.copy()\n #for iw1 in range(NwG4):\n # self.pm2m[iw1,:,:,:,:,:,:,:]=(pm2m1[iw1,:,:,:,:,:,:,:]+pm2m1[NwG4-iw1-1,:,:,:,:,:,:,:])/2\n \n \n \n #self.Gamma = self.GammaM.reshape(NwG4,Nc,nOrb,nOrb,NwG4,Nc,nOrb,nOrb)\n \n #Gamma1 = self.Gamma.copy()\n #for iw2 in range(NwG4):\n # self.Gamma[:,:,:,:,iw2,:,:,:]=(Gamma1[:,:,:,:,iw2,:,:,:]+Gamma1[:,:,:,:,NwG4-iw2-1,:,:,:])/2\n #Gamma1 = self.Gamma.copy()\n #for iw1 in range(NwG4):\n # self.Gamma[iw1,:,:,:,:,:,:,:]=(Gamma1[iw1,:,:,:,:,:,:,:]+Gamma1[NwG4-iw1-1,:,:,:,:,:,:,:])/2\n\n def calcKernelEigenValuesnew(self):\n nt = self.nt; Nc = self.Nc; NwG4=self.NwG4; nOrb = self.nOrb\n w,v = linalg.eig(self.pm2)\n wt = abs(w-1)\n ilead = argsort(wt)\n self.lambdas = w[ilead]\n self.evecs = v[:,ilead]\n self.evecs = self.evecs.reshape(NwG4,Nc,nOrb,nOrb,nt)\n \n \n iw0=int(NwG4/2)\n for inr in range(16):\n imax = argmax(self.evecs[iw0,:,0,0,inr])\n if (abs(self.evecs[iw0-1,imax,0,0,inr]-self.evecs[iw0,imax,0,0,inr]) <= 1.0e-2):\n print(\"Eigenvalue is \", self.lambdas[inr], \"even frequency\")\n print(\"Eigenvector=\",self.evecs[iw0-1,imax,0,0,inr],self.evecs[iw0,imax,0,0,inr])\n else:\n print(\"Eigenvalue is \", self.lambdas[inr], \"odd frequency\")\n print(\"Eigenvector=\",self.evecs[iw0-1,imax,0,0,inr],self.evecs[iw0,imax,0,0,inr])\n\n print (\"Leading 16 eigenvalues of lattice Bethe-salpeter equation\",self.lambdas[0:16])\n if self.vertex_channel in (\"PARTICLE_PARTICLE_SUPERCONDUCTING\",\"PARTICLE_PARTICLE_UP_DOWN\"):\n #Now find d-wave eigenvalue\n gk = cos(self.Kvecs[:,0]) - cos(self.Kvecs[:,1]) # dwave form factor\n self.found_d=False\n self.ind_d=0\n for ia in range(nt):\n r1 = dot(gk,self.evecs[int(self.NwG4/2),:,0,0,ia]) * sum(self.evecs[:,1,0,0,ia])\n if abs(r1) >= 2.0e-1:\n self.lambdad = self.lambdas[ia]\n self.ind_d = ia\n self.found_d=True\n break\n if self.found_d:\n print(\"Cu-Cu d-wave eigenvalue\",self.lambdad)\n #self.calcPdFromEigenFull(self.ind_d)\n #self.calcPdFromEigenFull2(self.ind_d)\n #Now find sx-wave eigenvalue\n gk = cos(self.Kvecs[:,0]) + cos(self.Kvecs[:,1]) # sxwave form factor\n self.found_d=False\n self.ind_d=0\n for ia in range(nt):\n r1 = dot(gk,self.evecs[int(self.NwG4/2),:,0,0,ia]) * sum(self.evecs[:,self.iKPi0,0,0,ia])\n if abs(r1) >= 2.0e-1:\n self.lambdad = self.lambdas[ia]\n self.ind_d = ia\n self.found_d=True\n break\n if self.found_d:\n print(\"Cu-Cu sx-wave eigenvalue\",self.lambdad)\n\n \n def calcKernelEigenValues(self):\n nt = self.nt; Nc = self.Nc; NwG4=self.NwG4; nOrb = self.nOrb\n w,v = linalg.eig(self.pm2)\n wt = abs(w-1)\n ilead = argsort(wt)\n self.lambdas = w[ilead]\n self.evecs = v[:,ilead]\n self.evecs = self.evecs.reshape(NwG4,Nc,nOrb,nOrb,nt)\n\n w2,v2 = linalg.eig(self.pm2)\n wt2 = abs(w2-1)\n ilead2 = argsort(wt2)\n self.lambdas2 = w2[ilead2]\n self.evecs2 = v2[:,ilead2]\n self.evecs2 = self.evecs2.reshape(NwG4,Nc,nOrb,nOrb,nt)\n \n iw0=int(NwG4/2)\n for inr in range(16):\n imax = argmax(self.evecs[iw0,:,0,0,inr])\n if (abs(self.evecs[iw0-1,imax,0,0,inr]-self.evecs[iw0,imax,0,0,inr]) <= 1.0e-2):\n print(\"Eigenvalue is \", self.lambdas[inr], \"even frequency\")\n print(\"Eigenvector=\",self.evecs[iw0-1,imax,0,0,inr],self.evecs[iw0,imax,0,0,inr])\n else:\n print(\"Eigenvalue is \", self.lambdas[inr], \"odd frequency\")\n print(\"Eigenvector=\",self.evecs[iw0-1,imax,0,0,inr],self.evecs[iw0,imax,0,0,inr])\n\n print (\"Leading 16 eigenvalues of lattice Bethe-salpeter equation\",self.lambdas[0:16])\n if self.vertex_channel in (\"PARTICLE_PARTICLE_SUPERCONDUCTING\",\"PARTICLE_PARTICLE_UP_DOWN\"):\n #Now find d-wave eigenvalue\n gk = cos(self.Kvecs[:,0]) - cos(self.Kvecs[:,1]) # dwave form factor\n self.found_d=False\n self.ind_d=0\n for ia in range(nt):\n r1 = dot(gk,self.evecs[int(self.NwG4/2),:,0,0,ia]) * sum(self.evecs[:,1,0,0,ia])\n if abs(r1) >= 2.0e-1: \n self.lambdad = self.lambdas[ia]\n self.ind_d = ia\n self.found_d=True\n break\n if self.found_d: \n print(\"Cu-Cu d-wave eigenvalue\",self.lambdad)\n #self.calcPdFromEigenFull(self.ind_d)\n #self.calcPdFromEigenFull2(self.ind_d)\n #self.calcPdFromEigenFull(self.ind_d)\n #Now find sx-wave eigenvalue\n gk = cos(self.Kvecs[:,0]) + cos(self.Kvecs[:,1]) # sxwave form factor\n self.found_d=False\n self.ind_d=0\n for ia in range(nt):\n r1 = dot(gk,self.evecs[int(self.NwG4/2),:,0,0,ia]) * sum(self.evecs[:,self.iKPi0,0,0,ia])\n if abs(r1) >= 2.0e-1: \n self.lambdad = self.lambdas[ia]\n self.ind_d = ia\n self.found_d=True\n break\n if self.found_d: \n print(\"Cu-Cu sx-wave eigenvalue\",self.lambdad)\n \n def calcReducibleLatticeVertex(self):\n pm = self.pm; Gamma=self.GammaM\n nt = self.nt; Nc = self.Nc; NwG4=self.NwG4; nOrb=self.nOrb\n self.pminv = np.linalg.inv(np.identity(nt)-pm)\n # self.pminv = np.linalg.inv(np.identity(nt)+pm)\n self.GammaRed = dot(self.pminv, Gamma)\n self.GammaRed = self.GammaRed.reshape(NwG4,Nc,nOrb,nOrb,NwG4,Nc,nOrb,nOrb)\n\n def determine_specialK(self):\n self.iKPiPi = 0\n self.iKPi0 = 0\n Nc=self.Nc\n for iK in range(Nc):\n kx = abs(self.Kvecs[iK,0] - np.pi)\n ky = abs(self.Kvecs[iK,1] - np.pi)\n ky2 = abs(self.Kvecs[iK,1])\n if kx >= 2*np.pi: kx-=2.*pi\n if ky >= 2*np.pi: ky-=2.*pi\n if ky2 >= 2*np.pi: ky-=2.*pi\n if kx**2+ky**2 <= 1.0e-5:\n self.iKPiPi = iK\n if kx**2+ky2**2 <= 1.0e-5:\n self.iKPi0 = iK\n \n def dwave(self,kx,ky):\n return cos(kx)-cos(ky)\n\n #if self.vertex_channel in (\"PARTICLE_PARTICLE_SUPERCONDUCTING\",\"PARTICLE_PARTICLE_UP_DOWN\"):\n # w2,v2 = linalg.eigh(self.pm2)\n\n # wt2 = abs(w2-1)\n # ilead2 = argsort(wt2)\n # self.lambdas2 = w2[ilead2]\n # self.evecs2 = v2[:,ilead2]\n # self.evecs2 = self.evecs2.reshape(NwG4,Nc,nOrb,nOrb,nt)\n # print (\"10 leading eigenvalues of symmetrized Bethe-salpeter equation\",self.lambdas2[0:10])\n\n #Now find d-wave eigenvalue\n # gk = cos(self.Kvecs[:,0]) - cos(self.Kvecs[:,1]) # dwave form factor\n # self.found_d=False\n # for ia in range(0,10):\n # r1 = dot(gk,self.evecs[int(self.NwG4/2),:,0,0,ia]) * sum(self.evecs[:,2,0,0,ia])\n # if abs(r1) >= 2.0e-1: \n # self.lambdad = self.lambdas[ia]\n # self.ind_d = ia\n # self.found_d=True\n # break\n # if self.found_d: print(\"d-wave eigenvalue\",self.lambdad)\n\n \n def calcPdFromEigenFull(self,ia=0):\n gk = self.dwave(self.Kvecs[:,0],self.Kvecs[:,1])\n nt = self.nt; nc=self.Nc; nw=self.NwG4; nOrb=self.nOrb;\n \n eval = self.lambdas\n Lambda = np.diag(1./(1-eval))\n # Dkk = zeros((nt,nt), dtype=real)\n phit =zeros((nw,nc,nOrb,nOrb,nt), dtype=complex)\n phit2=zeros((nw,nc,nOrb,nOrb,nt), dtype=complex)\n Dkk =zeros((nw,nc,nOrb,nOrb,nw,nc,nOrb,nOrb), dtype=complex)\n Dkk2=zeros((nw,nc,nOrb,nOrb,nw,nc,nOrb,nOrb), dtype=complex)\n Pd =zeros((nOrb,nOrb,nOrb,nOrb), dtype=complex)\n Pdk =zeros((nc,nOrb,nOrb,nc,nOrb,nOrb), dtype=complex)\n PdIa =zeros((nOrb,nOrb,nOrb,nOrb), dtype=complex)\n \n if self.calcCluster: \n chi0 = self.chic0\n else:\n chi0 = self.chi0\n for ialpha in range(nt):\n phit[:,:,:,:,ialpha] = self.evecs[:,:,:,:,ialpha]\n phit2[:,:,:,:,ia] = self.evecs[:,:,:,:,ia]\n #phi = phit.reshape(nt,nt)\n #phi2 = phit2.reshape(nt,nt)\n evecscom = self.evecs.reshape(nt,nt)\n Dkktemp = dot(phit,dot(Lambda,linalg.inv(evecscom)))\n Dkktemp2 = Dkktemp.reshape(nt,nt)\n Dkk = dot(self.chic0M,Dkktemp2)\n #Dkk = dot(phi,dot(Lambda,phi.T))\n Dkk = Dkk.reshape(nw,nc,nOrb,nOrb,nw,nc,nOrb,nOrb)\n #Dkk2 = dot(phi2,dot(Lambda,phi2.T))\n Dkk2temp = dot(phit2,dot(Lambda,linalg.inv(evecscom)))\n Dkk2temp2 = Dkk2temp.reshape(nt,nt)\n Dkk2 = dot(self.chic0M,Dkk2temp2)\n Dkk2 = Dkk2.reshape(nw,nc,nOrb,nOrb,nw,nc,nOrb,nOrb)\n Lkk = sum(sum(Dkk,axis=0),axis=3)\n Lkk2 = sum(sum(Dkk2,axis=0),axis=3)\n \n for ki in range(self.Nc):\n for kj in range(self.Nc):\n Pdk[ki,:,:,kj,:,:] = gk[ki]*Lkk[ki,:,:,kj,:,:]*gk[kj]\n \n Pd[0,0,0,0] = dot(gk,dot(Lkk[:,0,0,:,0,0],gk)) * self.temp/self.Nc\n PdIa[0,0,0,0] = dot(gk,dot(Lkk2[:,0,0,:,0,0],gk)) * self.temp/self.Nc\n Pd[1,1,1,1] = dot(gk,dot(Lkk[:,1,1,:,1,1],gk)) * self.temp/self.Nc\n PdIa[1,1,1,1] = dot(gk,dot(Lkk2[:,1,1,:,1,1],gk)) * self.temp/self.Nc\n Pd[2,2,2,2] = dot(gk,dot(Lkk[:,2,2,:,2,2],gk)) * self.temp/self.Nc\n PdIa[2,2,2,2] = dot(gk,dot(Lkk2[:,2,2,:,2,2],gk)) * self.temp/self.Nc\n Pd[0,0,1,1] = dot(gk,dot(Lkk[:,0,0,:,1,1],gk)) * self.temp/self.Nc\n PdIa[0,0,1,1] = dot(gk,dot(Lkk2[:,0,0,:,1,1],gk)) * self.temp/self.Nc\n Pd[1,1,0,0] = dot(gk,dot(Lkk[:,1,1,:,0,0],gk)) * self.temp/self.Nc\n PdIa[1,1,0,0] = dot(gk,dot(Lkk2[:,1,1,:,0,0],gk)) * self.temp/self.Nc\n Pd[0,0,2,2] = dot(gk,dot(Lkk[:,0,0,:,2,2],gk)) * self.temp/self.Nc\n PdIa[0,0,2,2] = dot(gk,dot(Lkk2[:,0,0,:,2,2],gk)) * self.temp/self.Nc\n Pd[2,2,0,0] = dot(gk,dot(Lkk[:,2,2,:,0,0],gk)) * self.temp/self.Nc\n PdIa[2,2,0,0] = dot(gk,dot(Lkk2[:,2,2,:,0,0],gk)) * self.temp/self.Nc\n Pd[1,1,2,2] = dot(gk,dot(Lkk[:,1,1,:,2,2],gk)) * self.temp/self.Nc\n PdIa[1,1,2,2] = dot(gk,dot(Lkk2[:,1,1,:,2,2],gk)) * self.temp/self.Nc\n Pd[2,2,1,1] = dot(gk,dot(Lkk[:,2,2,:,1,1],gk)) * self.temp/self.Nc\n PdIa[2,2,1,1] = dot(gk,dot(Lkk2[:,2,2,:,1,1],gk)) * self.temp/self.Nc\n \n\n self.PdEigen = Pd\n self.PdIa = PdIa\n self.Pdk = Pdk\n print (\"Calculations from BSE eigenvalues and eigenvectors:\")\n print(\"Cu-Cu Pd from eigensystem (all eigenvalues): \",Pd[0,0,0,0])\n print(\"Ox-Ox Pd from eigensystem (all eigenvalues): \",Pd[1,1,1,1])\n print(\"Oy-Oy Pd from eigensystem (all eigenvalues): \",Pd[2,2,2,2])\n print(\"Cu-Ox Pd from eigensystem (all eigenvalues): \",Pd[0,0,1,1]+Pd[1,1,0,0])\n print(\"Cu-Oy Pd from eigensystem (all eigenvalues): \",Pd[0,0,2,2]+Pd[2,2,0,0])\n print(\"Ox-Oy Pd from eigensystem (all eigenvalues): \",Pd[1,1,2,2]+Pd[2,2,1,1])\n \n def calcPdFromEigenFull2(self,ia=0):\n gk = self.dwave(self.Kvecs[:,0],self.Kvecs[:,1])\n nt = self.nt; nc=self.Nc; nw=self.NwG4; nOrb=self.nOrb;\n \n eval = self.lambdas2\n Lambda = np.diag(1./(1-eval))\n # Dkk = zeros((nt,nt), dtype=real)\n phit =zeros((nw,nc,nOrb,nOrb,nt), dtype=complex)\n phit2=zeros((nw,nc,nOrb,nOrb,nt), dtype=complex)\n Dkk =zeros((nw,nc,nOrb,nOrb,nw,nc,nOrb,nOrb), dtype=complex)\n Dkk2=zeros((nw,nc,nOrb,nOrb,nw,nc,nOrb,nOrb), dtype=complex)\n Pd =zeros((nOrb,nOrb,nOrb,nOrb), dtype=complex)\n PdIa =zeros((nOrb,nOrb,nOrb,nOrb), dtype=complex)\n \n if self.calcCluster: \n chi0 = self.chic0\n else:\n chi0 = self.chi0\n for ialpha in range(nt):\n phit[:,:,:,:,ialpha] = self.evecs2[:,:,:,:,ialpha]\n phit2[:,:,:,:,ia] = self.evecs2[:,:,:,:,ia]\n #phi = phit.reshape(nt,nt)\n #phi2 = phit2.reshape(nt,nt)\n evecscom = self.evecs2.reshape(nt,nt)\n Dkktemp = dot(phit,dot(Lambda,linalg.inv(evecscom)))\n Dkktemp2 = Dkktemp.reshape(nt,nt)\n Dkk = dot(self.chi0M,Dkktemp2)\n #Dkk = dot(phi,dot(Lambda,phi.T))\n Dkk = Dkk.reshape(nw,nc,nOrb,nOrb,nw,nc,nOrb,nOrb)\n #Dkk2 = dot(phi2,dot(Lambda,phi2.T))\n Dkk2temp = dot(phit2,dot(Lambda,linalg.inv(evecscom)))\n Dkk2temp2 = Dkk2temp.reshape(nt,nt)\n Dkk2 = dot(self.chiMasqrt,dot(Dkk2temp2,self.chiMasqrt))\n Dkk2 = Dkk2.reshape(nw,nc,nOrb,nOrb,nw,nc,nOrb,nOrb)\n Lkk = sum(sum(Dkk,axis=0),axis=3)\n Lkk2 = sum(sum(Dkk2,axis=0),axis=3)\n Pd[0,0,0,0] = dot(gk,dot(Lkk[:,0,0,:,0,0],gk)) * self.temp/self.Nc\n PdIa[0,0,0,0] = dot(gk,dot(Lkk2[:,0,0,:,0,0],gk)) * self.temp/self.Nc\n Pd[1,1,1,1] = dot(gk,dot(Lkk[:,1,1,:,1,1],gk)) * self.temp/self.Nc\n PdIa[1,1,1,1] = dot(gk,dot(Lkk2[:,1,1,:,1,1],gk)) * self.temp/self.Nc\n Pd[2,2,2,2] = dot(gk,dot(Lkk[:,2,2,:,2,2],gk)) * self.temp/self.Nc\n PdIa[2,2,2,2] = dot(gk,dot(Lkk2[:,2,2,:,2,2],gk)) * self.temp/self.Nc\n Pd[0,0,1,1] = dot(gk,dot(Lkk[:,0,0,:,1,1],gk)) * self.temp/self.Nc\n PdIa[0,0,1,1] = dot(gk,dot(Lkk2[:,0,0,:,1,1],gk)) * self.temp/self.Nc\n Pd[1,1,0,0] = dot(gk,dot(Lkk[:,1,1,:,0,0],gk)) * self.temp/self.Nc\n PdIa[1,1,0,0] = dot(gk,dot(Lkk2[:,1,1,:,0,0],gk)) * self.temp/self.Nc\n Pd[0,0,2,2] = dot(gk,dot(Lkk[:,0,0,:,2,2],gk)) * self.temp/self.Nc\n PdIa[0,0,2,2] = dot(gk,dot(Lkk2[:,0,0,:,2,2],gk)) * self.temp/self.Nc\n Pd[2,2,0,0] = dot(gk,dot(Lkk[:,2,2,:,0,0],gk)) * self.temp/self.Nc\n PdIa[2,2,0,0] = dot(gk,dot(Lkk2[:,2,2,:,0,0],gk)) * self.temp/self.Nc\n Pd[1,1,2,2] = dot(gk,dot(Lkk[:,1,1,:,2,2],gk)) * self.temp/self.Nc\n PdIa[1,1,2,2] = dot(gk,dot(Lkk2[:,1,1,:,2,2],gk)) * self.temp/self.Nc\n Pd[2,2,1,1] = dot(gk,dot(Lkk[:,2,2,:,1,1],gk)) * self.temp/self.Nc\n PdIa[2,2,1,1] = dot(gk,dot(Lkk2[:,2,2,:,1,1],gk)) * self.temp/self.Nc\n\n self.PdEigen = Pd\n self.PdIa = PdIa\n print (\"Calculations from BSE2 eigenvalues and eigenvectors:\")\n print(\"Cu-Cu Pd from eigensystem (all eigenvalues): \",Pd[0,0,0,0])\n print(\"Ox-Ox Pd from eigensystem (all eigenvalues): \",Pd[1,1,1,1])\n print(\"Oy-Oy Pd from eigensystem (all eigenvalues): \",Pd[2,2,2,2])\n print(\"Cu-Ox Pd from eigensystem (all eigenvalues): \",Pd[0,0,1,1]+Pd[1,1,0,0])\n print(\"Cu-Oy Pd from eigensystem (all eigenvalues): \",Pd[0,0,2,2]+Pd[2,2,0,0])\n print(\"Ox-Oy Pd from eigensystem (all eigenvalues): \",Pd[1,1,2,2]+Pd[2,2,1,1])\n\n \n\n def transformEvecsToKz(self):\n self.phi0 = 1./sqrt(2.)*(self.evecs[:,:,0,0,:] + self.evecs[:,:,1,0,:])\n self.phipi = 1./sqrt(2.)*(self.evecs[:,:,0,0,:] - self.evecs[:,:,1,0,:])\n\n def projectOnDwave(self,Ks,matrix):\n gk = self.dwave(Ks[:,0], Ks[:,1])\n c1 = dot(gk, dot(matrix,gk) ) / dot(gk,gk)\n return c1\n\n\n def dispersion(self,kx,ky):\n ek = np.zeros((self.nOrb,self.nOrb),dtype='complex')\n r1 = -2.* 1j *self.tpd*sin(kx/2.)\n r2 = 2.* 1j *self.tpd*sin(ky/2.)\n r3 = 4.*self.tpp*sin(kx/2.)*sin(ky/2.)\n ek[0,0] = self.epd\n ek[1,1] = self.epp\n ek[2,2] = self.epp\n ek[0,1] = r1\n ek[1,0] = -r1\n ek[0,2] = r2\n ek[2,0] = -r2\n ek[1,2] = r3\n ek[2,1] = r3\n return ek\n\n def selectFS(self,G4,FSpoints):\n NFs=FSpoints.shape[0]\n NwG4 = self.NwG4\n GammaFS = zeros((NFs,NFs),dtype='complex')\n for i1,iK1 in enumerate(FSpoints):\n for i2,iK2 in enumerate(FSpoints):\n GammaFS[i1,i2] = sum(G4[NwG4/2-1:NwG4/2+1,iK1,NwG4/2-1::NwG4/2+1,iK2])/float(4.*NFs)\n return GammaFS\n\n \n######### Plotting functions\n\n def plotLeadingSolutions(self,Kvecs,lambdas,evecs,title=None):\n mpl.style.use([\"ggplot\"])\n\n Nc = Kvecs.shape[0]\n for ic in range(Nc):\n if Kvecs[ic,0] > pi: Kvecs[ic,0] -=2.*pi\n if Kvecs[ic,1] > pi: Kvecs[ic,1] -=2.*pi\n\n fig, axes = mpl.subplots(nrows=4,ncols=4, sharex=True,sharey=True,figsize=(16,16))\n inr=0\n for ax in axes.flat:\n self.plotEV(ax,Kvecs,lambdas,evecs,inr)\n inr += 1\n ax.set(adjustable='box', aspect='equal')\n if title==None:\n title = r\"Leading eigensolutions of BSE for $Upp=$\" + str(self.Upp) + r\", $t\\prime=$\" + str(self.tp1) + r\", $\\langle n\\rangle=$\" + str(self.fill) + r\", $T=$\" + str(self.temp)\n fig.suptitle(title, fontsize=10)\n mpl.show()\n\n def plotEV(self,ax,Kvecs,lambdas,evecs,inr):\n prop_cycle = rcParams['axes.prop_cycle']\n colors = prop_cycle.by_key()['color']\n\n Nc = evecs.shape[1]; Nw = self.evecs.shape[0]\n iw0=int(Nw/2)\n imax = argmax(evecs[iw0,:,inr])\n if (abs(evecs[iw0-1,imax,inr]-evecs[iw0,imax,inr]) <= 1.0e-2):\n freqString = \"; even frequency\"\n else:\n freqString = \"; odd frequency\"\n\n colVec = Nc*[colors[0]]\n for ic in range(Nc):\n #if real(evecs[iw0,ic,inr])*real(evecs[iw0,imax,inr]) < 0.0: colVec[ic] = colors[1]\n if real(evecs[iw0,ic,inr])*10 < 0.0: colVec[ic] = colors[1]\n # print \"colVec=\",colVec\n ax.scatter(Kvecs[:,0]/pi,Kvecs[:,1]/pi,s=abs(real(evecs[iw0,:,inr]))*2500,c=colVec)\n ax.set(aspect=1)\n ax.set_xlim(-0.75,1.25); ax.set_ylim(-0.75,1.25)\n ax.set_title(r\"$\\lambda=$\"+str(round(lambdas[inr].real,4))+freqString)\n # ax.get_xaxis().set_visible(False)\n # ax.get_yaxis().set_visible(False)\n ax.grid(True)\n for tic in ax.xaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n for tic in ax.yaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n\n\n def apply_symmetry_in_wn(self,G4):\n # for G4[w1,w2,K1,K2]\n # apply symmetry G4(wn,wn',K,K') = G4*(-wn,-wn',K,K')\n Nc = G4.shape[2]\n nwn = G4.shape[0]\n for iw1 in range(nwn):\n for iw2 in range(nwn):\n for iK1 in range(Nc):\n for iK2 in range(Nc):\n imw1 = nwn-1-iw1\n imw2 = nwn-1-iw2\n tmp1 = G4[iw1,iw2,iK1,iK2]\n tmp2 = G4[imw1,imw2,iK1,iK2]\n G4[iw1,iw2,iK1,iK2] = 0.5*(tmp1+conj(tmp2))\n G4[imw1,imw2,iK1,iK2] = 0.5*(conj(tmp1)+tmp2)\n\n def apply_transpose_symmetry(self,G4):\n # Apply symmetry Gamma(K,K') = Gamma(K',K)\n Nc = G4.shape[2]; nwn = G4.shape[0]; nt =Nc*nwn\n GP = np.swapaxes(G4,1,2).reshape(nt,nt)\n GP = 0.5*(GP + GP.transpose())\n G4 = GP.reshape(nwn,nwn,Nc,Nc)\n\n def apply_ph_symmetry_pp(self,G4):\n # G4pp(k,wn,k',wn') = G4pp(k+Q,wn,k'+Q,wn')\n Nc = G4.shape[2]\n nwn = G4.shape[0]\n for iw1 in range(nwn):\n for iw2 in range(nwn):\n for iK1 in range(Nc):\n iK1q = self.iKSum[iK1,self.iKPiPi]\n for iK2 in range(Nc):\n iK2q = self.iKSum[iK2,self.iKPiPi]\n tmp1 = G4[iw1,iw2,iK1,iK2]\n tmp2 = G4[iw1,iw2,iK1q,iK2q]\n G4[iw1,iw2,iK1,iK2] = 0.5*(tmp1+tmp2)\n G4[iw1,iw2,iK1q,iK2q] = 0.5*(tmp1+tmp2)\n\n","sub_path":"python_script/solveBSE_fromG4_threeband5new.py","file_name":"solveBSE_fromG4_threeband5new.py","file_ext":"py","file_size_in_byte":63063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"303244967","text":"# https://github.com/manashmandal/DeadSimpleSpeechRecognizer/blob/master/preprocess.py\n\nimport librosa\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import to_categorical\nimport numpy as np\nfrom tqdm import tqdm\nimport sys\n\nDATA_PATH = \"/Users/hyung.lee/cs229fall2019/speech_commands_tenlabels/\"\n\n\n# Input: Folder Path\n# Output: Tuple (Label, Indices of the labels, one-hot encoded labels)\ndef get_labels(path=DATA_PATH):\n alldir = os.listdir(path)\n filesToRemove = [\n 'testing_list.txt',\n 'validation_list.txt',\n 'LICENSE','README.md',\n '.DS_Store',\n 'valid_audio',\n 'test_audio',\n 'train_audio',\n '_background_noise_',\n 'cnnmodel.h5',\n 'cnnmodel2.h5',\n 'cnnmodel3.h5']\n for f in filesToRemove:\n if alldir.__contains__(f):\n alldir.remove(f)\n labels = alldir\n label_indices = np.arange(0, len(labels))\n return labels, label_indices, to_categorical(label_indices)\n\n# Handy function to convert wav2mfcc\ndef wav2mfcc(file_path, max_len=11):\n wave, sr = librosa.load(file_path, mono=True, sr=None)\n wave = np.array(wave[::3])\n mfcc = librosa.feature.mfcc(wave, sr=sr, n_mfcc=20)\n\n # If maximum length exceeds mfcc lengths then pad the remaining ones\n if (max_len > mfcc.shape[1]):\n pad_width = max_len - mfcc.shape[1]\n mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')\n\n # Else cutoff the remaining parts\n else:\n mfcc = mfcc[:, :max_len]\n\n return np.sum(mfcc, axis=1)/11\n\ndef save_data_to_array(path=DATA_PATH, max_len=11):\n labels, _, _ = get_labels(path)\n\n for label in labels:\n # Init mfcc vectors\n mfcc_vectors = []\n\n wavfiles = [path + label + '/' + wavfile for wavfile in os.listdir(path + '/' + label)]\n for wavfile in tqdm(wavfiles, \"Saving vectors of label - '{}'\".format(label)):\n mfcc = wav2mfcc(wavfile, max_len=max_len)\n mfcc_vectors.append(mfcc)\n print(mfcc_vectors[0].shape)\n np.save(label + '.npy', mfcc_vectors)\n\n\ndef get_train_test_valid(split_ratio=0.6, random_state=42, path=DATA_PATH):\n # Get available labels\n labels, indices, _ = get_labels(path)\n\n # Getting first arrays\n X = np.load('models/' + labels[0] + '.npy')\n y = np.zeros(X.shape[0])\n\n # Append all of the dataset into one single array, same goes for y\n for i, label in enumerate(labels[1:]):\n x = np.load('models/' + label + '.npy')\n X = np.vstack((X, x))\n y = np.append(y, np.full(x.shape[0], fill_value=(i + 1)))\n\n assert X.shape[0] == len(y)\n X_train_validate, X_test, y_train_validate, y_test = train_test_split(X, y, test_size=(0.2), random_state=random_state, shuffle=True)\n X_train, X_validate, y_train, y_validate = train_test_split(X_train_validate, y_train_validate, test_size=(0.2), random_state=random_state, shuffle=True)\n return (X_train, X_validate, X_test, y_train, y_validate, y_test)\n\n\ndef prepare_dataset(path=DATA_PATH):\n labels, _, _ = get_labels(path)\n data = {}\n for label in labels:\n data[label] = {}\n data[label]['path'] = [path + label + '/' + wavfile for wavfile in os.listdir(path + '/' + label)]\n\n vectors = []\n\n for wavfile in data[label]['path']:\n wave, sr = librosa.load(wavfile, mono=True, sr=None)\n # Downsampling\n wave = wave[::3]\n mfcc = librosa.feature.mfcc(wave, sr=16000)\n vectors.append(mfcc)\n\n data[label]['mfcc'] = vectors\n\n return data\n\ndef load_dataset(path=DATA_PATH):\n data = prepare_dataset(path)\n\n dataset = []\n\n for key in data:\n for mfcc in data[key]['mfcc']:\n dataset.append((key, mfcc))\n\n return dataset[:100]\n\ndef main(path):\n save_data_to_array(path, max_len=11)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n\n","sub_path":"Project/Multinomial/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"255189073","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport pytest\nimport produtil\nimport config_metplus\nfrom tc_stat_wrapper import TcStatWrapper\n\n\n#\n# -----------Mandatory-----------\n# configuration and fixture to support METplus configuration files beyond\n# the metplus_data, metplus_system, and metplus_runtime conf files.\n#\n\n\n# Add a test configuration\ndef pytest_addoption(parser):\n \"\"\"! For supporting config files from the command line\"\"\"\n parser.addoption(\"-c\", action=\"store\", help=\" -c <test config file>\")\n\n\n# @pytest.fixture\ndef cmdopt(request):\n \"\"\"! For supporting the additional config files used by METplus\"\"\"\n return request.config.getoption(\"-c\")\n\n\n#\n# ------------Pytest fixtures that can be used for all tests ---------------\n#\n@pytest.fixture\ndef tc_stat_wrapper():\n \"\"\"! Returns a default TCStatWrapper with /path/to entries in the\n metplus_system.conf and metplus_runtime.conf configuration\n files. Subsequent tests can customize the final METplus configuration\n to over-ride these /path/to values.\"\"\"\n\n # Default, empty TcStatWrapper with some configuration values set\n # to /path/to:\n conf = metplus_config()\n return TcStatWrapper(conf, None)\n\n\n\n@pytest.fixture\ndef metplus_config():\n \"\"\"! Generate the METplus config object\"\"\"\n try:\n if 'JLOGFILE' in os.environ:\n produtil.setup.setup(send_dbn=False, jobname='TcStatWrapper ',\n jlogfile=os.environ['JLOGFILE'])\n else:\n produtil.setup.setup(send_dbn=False, jobname='TcStatWrapper ')\n produtil.log.postmsg('tc_stat_wrapper is starting')\n\n # Read in the configuration object CONFIG\n config = config_metplus.setup()\n return config\n\n except Exception as e:\n produtil.log.jlogger.critical(\n 'tc_stat_wrapper failed: %s' % (str(e),), exc_info=True)\n sys.exit(2)\n\n\n@pytest.mark.parametrize(\n 'key, value', [\n ('APP_PATH', '/usr/local/met-8.0/bin/tc_stat'),\n ('APP_NAME', 'tc_stat'),\n ('INIT_BEG', '20170705'),\n ('INIT_END', '20170901'),\n ('INIT_HOUR', ['00'])\n ]\n)\ndef test_tc_stat_dict(key, value):\n \"\"\"! Test that the expected values set in the tc_stat_filter.conf\n file are correctly read/captured in the tc_stat_dict dictionary\n \"\"\"\n tcsw = tc_stat_wrapper()\n actual_value = tcsw.tc_stat_dict[key]\n assert actual_value == value\n\n\ndef test_config_lists():\n \"\"\"! Test that when the COLUMN_THRESH_NAME and COLUMN_THRESH_VAL lists\n are of different length, the appropriate value is returned\n from config_lists_ok()\n \"\"\"\n tcsw = tc_stat_wrapper()\n\n # Uneven lengths, expect False to be returned\n column_thresh_name = \"A, B, C\"\n column_thresh_val = \"1,2\"\n tcsw.tc_stat_dict['COLUMN_THRESH_NAME'] = column_thresh_name\n tcsw.tc_stat_dict['COLUMN_THRESH_VAL'] = column_thresh_val\n assert tcsw.config_lists_ok() is False\n\n\ndef test_filter_by_al_basin():\n \"\"\"! Test that for a given time window of SBU GFS data, the expected number\n of results is returned when additional filtering by basin=[\"AL\"].\n \"\"\"\n\n tcsw = tc_stat_wrapper()\n tcsw.tc_stat_dict['INIT_BEG'] = \"20170705\"\n tcsw.tc_stat_dict['INIT_END'] = \"20170901\"\n tcsw.tc_stat_dict['BASIN'] = [\"AL\"]\n # expect only 13 lines of output (including the header) for SBU data\n expected_num_lines = 13\n tcsw.run_all_times()\n output_file = \\\n tcsw.tc_stat_dict['OUTPUT_BASE'] + \"/tc_stat/tc_stat_summary.tcst\"\n with open(output_file, 'r') as out_file:\n lines = len(out_file.readlines())\n print(\"Num lines: \", str(lines))\n\n assert lines == expected_num_lines\n\n\ndef test_filter_by_cyclone():\n \"\"\"! Test that for a given time window of SBU GFS data, the expected number\n of results is returned when additional filtering by cyclone.\n \"\"\"\n\n tcsw = tc_stat_wrapper()\n tcsw.tc_stat_dict['INIT_BEG'] = \"20170705\"\n tcsw.tc_stat_dict['INIT_END'] = \"20170901\"\n tcsw.tc_stat_dict['CYCLONE'] = [\"10\"]\n\n # expect only 13 lines of output (including the header) for SBU data\n expected_num_lines = 13\n tcsw.run_all_times()\n output_file = \\\n tcsw.tc_stat_dict['OUTPUT_BASE'] + \"/tc_stat/tc_stat_summary.tcst\"\n with open(output_file, 'r') as out_file:\n lines = len(out_file.readlines())\n # print(\"Num lines: \", str(lines))\n\n assert lines == expected_num_lines\n\n\ndef test_filter_by_storm_name():\n \"\"\"! Test that for a given time window of SBU GFS data, the expected number\n of results is returned when additional filtering by storm_name.\n \"\"\"\n\n tcsw = tc_stat_wrapper()\n tcsw.tc_stat_dict['INIT_BEG'] = \"20170705\"\n tcsw.tc_stat_dict['INIT_END'] = \"20170901\"\n tcsw.tc_stat_dict['STORM_NAME'] = [\"TEN\"]\n # expect only 13 lines of output (including the header) for SBU data\n expected_num_lines = 13\n tcsw.run_all_times()\n output_file = \\\n tcsw.tc_stat_dict['OUTPUT_BASE'] + \"/tc_stat/tc_stat_summary.tcst\"\n with open(output_file, 'r') as out_file:\n lines = len(out_file.readlines())\n print(\"Num lines: \", str(lines))\n\n assert lines == expected_num_lines\n\n\ndef test_filter_by_storm_id():\n \"\"\"! Test that for a given time window of SBU GFS data, the expected number\n of results is returned when additional filtering by storm_id. For\n this data and the indicated storm_id, tc_stat does not return any\n data\n \"\"\"\n\n tcsw = tc_stat_wrapper()\n tcsw.tc_stat_dict['INIT_BEG'] = \"20170105\"\n tcsw.tc_stat_dict['INIT_END'] = \"20170901\"\n tcsw.tc_stat_dict['STORM_ID'] = [\"AL102017\"]\n # expect only 13 lines of output (including the header) for SBU data\n expected_num_lines = 13\n tcsw.run_all_times()\n output_file = \\\n tcsw.tc_stat_dict['OUTPUT_BASE'] + \"/tc_stat/tc_stat_summary.tcst\"\n with open(output_file, 'r') as out_file:\n lines = len(out_file.readlines())\n print(\"Num lines: \", str(lines))\n\n assert lines == expected_num_lines\n\n\ndef test_filter_by_basin_cyclone():\n \"\"\"! Test that for a given time window of SBU GFS data, the expected number\n of results is returned when additional filtering by basin and cyclone\n to get the same results as if filtering by storm_id (which doesn't\n work, perhaps because the storm_id is greater than 2-digits?).\n \"\"\"\n\n tcsw = tc_stat_wrapper()\n tcsw.tc_stat_dict['INIT_BEG'] = \"20170705\"\n tcsw.tc_stat_dict['INIT_END'] = \"20170901\"\n tcsw.tc_stat_dict['CYCLONE'] = [\"10\"]\n tcsw.tc_stat_dict['BASIN'] = [\"AL\"]\n\n # expect only 13 lines of output (including the header) for SBU data\n expected_num_lines = 13\n tcsw.run_all_times()\n output_file = \\\n tcsw.tc_stat_dict['OUTPUT_BASE'] + \"/tc_stat/tc_stat_summary.tcst\"\n with open(output_file, 'r') as out_file:\n lines = len(out_file.readlines())\n print(\"Num lines: \", str(lines))\n\n assert lines == expected_num_lines\n\n#\n\n\n\n","sub_path":"internal_tests/pytests/tc_stat/via_config/test_tc_stat_wrapper.py","file_name":"test_tc_stat_wrapper.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"313126367","text":"import traceback\nimport discord\nimport inspect\nfrom discord.ext import commands\n\n\ndef is_owner():\n \"\"\"Decorator to allow a command to run only if it is called by the owner.\"\"\"\n return commands.check(lambda ctx: ctx.message.author.id == ctx.bot.owner.id)\n\n\nclass Tools:\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(pass_context=True)\n @is_owner()\n async def debug(self, ctx, *, code: str):\n \"\"\"Evaluates an expression to see what is happening internally.\"\"\"\n code = code.strip('` ')\n python = '```py\\n{}\\n```'\n\n env = {\n 'bot': self.bot,\n 'ctx': ctx,\n 'message': ctx.message,\n 'server': ctx.message.server,\n 'channel': ctx.message.channel,\n 'author': ctx.message.author\n }\n\n env.update(globals())\n\n try:\n result = eval(code, env)\n if inspect.isawaitable(result):\n result = await result\n except Exception:\n await self.bot.say(python.format(traceback.format_exc()))\n return\n\n await self.bot.say(python.format(result))\n\n @commands.command(pass_context=True, aliases=('exec',))\n @is_owner()\n async def execute(self, ctx, *, code: str):\n \"\"\"Evaluates an expression to see what is happening internally.\"\"\"\n code = code.strip('` ')\n python = '```py\\n{}\\n```'\n\n env = {\n 'bot': self.bot,\n 'ctx': ctx,\n 'message': ctx.message,\n 'server': ctx.message.server,\n 'channel': ctx.message.channel,\n 'author': ctx.message.author\n }\n\n env.update(globals())\n\n # noinspection PyBroadException\n try:\n exec(code, env)\n await self.bot.say('\\N{OK HAND SIGN}')\n except Exception:\n await self.bot.say(python.format(traceback.format_exc()))\n\n\ndef setup(bot):\n bot.add_cog(Tools(bot))\n","sub_path":"cogs/Tools.py","file_name":"Tools.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"646299534","text":"import argparse\nimport json\nimport os\nfrom random import choice\n\ncommand_file = \"command.txt\"\nplace_ship_file = \"place.txt\"\ngame_state_file = \"state.json\"\noutput_path = '.'\nmap_size = 0\ndata_file = \"data.txt\"\nstack_file = \"stack.txt\"\n\n\ndef main(player_key):\n #create initial external file\n global map_size\n # Retrieve current game state\n with open(os.path.join(output_path, game_state_file), 'r') as f_in:\n state = json.load(f_in)\n map_size = state['MapDimension']\n if state['Phase'] == 1:\n place_ships()\n else:\n fire_shot(state['OpponentMap']['Cells'])\n\n\ndef fire_shot(opponent_map):\n # To send through a command please pass through the following <code>,<x>,<y>\n # Possible codes: 1 - Fireshot, 0 - Do Nothing (please pass through coordinates if\n # code 1 is your choice)\n with open(os.path.join(\"../..\",data_file), 'r') as f:\n last_cell_x, last_cell_y, last_state = f.read().split(',')\n last_cell_x = int(last_cell_x)\n last_cell_y = int(last_cell_y)\n print (\"data file\", last_cell_x, last_cell_y, last_state)\n\n #get last cell\n for cell in opponent_map:\n if cell['X']==last_cell_x and cell['Y']==last_cell_y:\n last_cell = cell\n break\n if last_cell['Damaged']:\n print(\"damaged\")\n else:\n print(\"missed\")\n #load stack\n stack = []\n with open(os.path.join(\"../..\", stack_file), 'r') as f:\n print(\"isi stack\")\n for line in f:\n x,y = line[:-1].split(',')\n x = int(x)\n y = int(y)\n stack.append((x,y))\n print(x,y)\n\n print(\"current mode :\", last_state)\n #handling if last state hit\n print(\"last st\", last_state, \"damaged :\", last_cell['Damaged'])\n if last_state==\"hunt\" and last_cell['Damaged']:\n print (\"adding aronund\", last_cell['X'], last_cell['Y'])\n for cell in opponent_map:\n if not cell['Damaged'] and not cell['Missed']:\n if cell['X']==last_cell_x+1 and cell['Y']==last_cell_y:\n check = True\n for s in stack:\n if s[0]==cell['X'] and s[1]==cell['Y']:\n check = False\n if check:\n print(\"added\", cell['X'], cell['Y'])\n stack.append((cell['X'],cell['Y']))\n last_state=\"target\"\n if cell['X']==last_cell_x-1 and cell['Y']==last_cell_y:\n check = True\n for s in stack:\n if s[0]==cell['X'] and s[1]==cell['Y']:\n check = False\n if check:\n print(\"added\", cell['X'], cell['Y']) \n stack.append((cell['X'],cell['Y']))\n last_state=\"target\"\n if cell['X']==last_cell_x and cell['Y']==last_cell_y+1:\n check = True\n for s in stack:\n if s[0]==cell['X'] and s[1]==cell['Y']:\n check = False\n if check:\n print(\"added\", cell['X'], cell['Y']) \n stack.append((cell['X'],cell['Y']))\n last_state=\"target\"\n if cell['X']==last_cell_x and cell['Y']==last_cell_y-1:\n check = True\n for s in stack:\n if s[0]==cell['X'] and s[1]==cell['Y']:\n check = False\n if check:\n print(\"added\", cell['X'], cell['Y']) \n stack.append((cell['X'],cell['Y'])) \n last_state=\"target\" \n elif last_state==\"target\" and last_cell['Damaged']:\n print (\"adding aronund\", last_cell['X'], last_cell['Y']) \n for cell in opponent_map:\n if not cell['Damaged'] and not cell['Missed']:\n if cell['X']==last_cell_x+1 and cell['Y']==last_cell_y:\n check = True\n for s in stack:\n if s[0]==cell['X'] and s[1]==cell['Y']:\n check = False\n if check:\n stack.append((cell['X'],cell['Y']))\n if cell['X']==last_cell_x-1 and cell['Y']==last_cell_y:\n check = True\n for s in stack:\n if s[0]==cell['X'] and s[1]==cell['Y']:\n check = False\n if check:\n stack.append((cell['X'],cell['Y']))\n if cell['X']==last_cell_x and cell['Y']==last_cell_y+1:\n check = True\n for s in stack:\n if s[0]==cell['X'] and s[1]==cell['Y']:\n check = False\n if check:\n stack.append((cell['X'],cell['Y']))\n if cell['X']==last_cell_x and cell['Y']==last_cell_y-1:\n check = True\n for s in stack:\n if s[0]==cell['X'] and s[1]==cell['Y']:\n check = False\n if check:\n stack.append((cell['X'],cell['Y']))\n\n if stack==[]:\n last_state = \"hunt\"\n\n #hunt mode\n if last_state==\"hunt\":\n targets = []\n for cell in opponent_map:\n if not cell['Damaged'] and not cell['Missed'] and ((cell['X']+cell['Y'])%2==1):\n valid_cell = cell['X'], cell['Y']\n targets.append(valid_cell)\n target = choice(targets)\n with open(os.path.join(\"../..\",data_file), 'w') as f:\n f.write(\"{},{},{}\".format(target[0],target[1],\"hunt\"))\n output_shot(*target)\n #target mode\n elif last_state==\"target\":\n #print(\"target\")\n target = stack[0]\n stack = stack[1:]\n with open(os.path.join(\"../..\",data_file), 'w') as f:\n f.write(\"{},{},{}\".format(target[0],target[1],\"target\"))\n output_shot(*target) \n \n #rewrite stack\n with open(os.path.join(\"../..\",stack_file), 'w') as f:\n for s in stack:\n f.write(\"{},{}\".format(s[0],s[1]))\n f.write(\"\\n\")\n\n\ndef output_shot(x, y):\n move = 1 # 1=fire shot command code\n with open(os.path.join(output_path, command_file), 'w') as f_out:\n f_out.write('{},{},{}'.format(move, x, y))\n f_out.write('\\n')\n pass\n\ndef place_ships():\n # Please place your ships in the following format <Shipname> <x> <y> <direction>\n # Ship names: Battleship, Cruiser, Carrier, Destroyer, Submarine\n # Directions: north east south west\n\n ships = ['Battleship 1 0 north',\n 'Carrier 3 1 East',\n 'Cruiser 4 2 north',\n 'Destroyer 7 3 north',\n 'Submarine 1 8 East'\n ]\n with open(os.path.join(\"../..\", data_file), 'w') as f_out:\n f_out.write(\"0,0,hunt\");\n with open(os.path.join(\"../..\", stack_file), 'w') as f_out:\n f_out.write(\"\")\n with open(os.path.join(output_path, place_ship_file), 'w') as f_out:\n for ship in ships:\n f_out.write(ship)\n f_out.write('\\n')\n \n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('PlayerKey', nargs='?', help='Player key registered in the game')\n parser.add_argument('WorkingDirectory', nargs='?', default=os.getcwd(), help='Directory for the current game files')\n args = parser.parse_args()\n assert (os.path.isdir(args.WorkingDirectory))\n output_path = args.WorkingDirectory\n main(args.PlayerKey)\n","sub_path":"Reference Bots/botBaru/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"536672110","text":"import numpy as np\nimport toolss.it as it\nimport matplotlib.pyplot as plt\n\ndef act_fct(x, type_fct):\n # type_fct=identity\n # type_fct=sigmoid\n # type_fct=tanh\n # type_fct=relu\n x=np.asarray(x,dtype=float)\n if type_fct == 'identity':\n y = x\n elif type_fct == \"sigmoid\":\n y = 1/(1 + np.exp(-x))\n elif type_fct == \"tanh\":\n y = np.tanh(x)\n elif type_fct == \"relu\":\n y = np.max(np.vstack((x, np.zeros(x.shape))), axis=0)\n else:\n raise ValueError(\"wrong option\");\n\n return y\n\n\n'''\ndef poly_extend_data1D(x):\n \"\"\"\n Extend the provided input vector x, wtih subsequent powers of the input.\n x = np.array of size 1xN\n Output:\n x_e = np.array of size (p+1)xN such that 1st row = x^0, 2nd row = x^1, ...\n \"\"\"\n ### YOUR CODE HERE ###\n x = np.asarray(x);\n ones_vec=np.ones(np.size(x,axis=1)); #number of colums in x ( number of examples), and dimensions will be number of rows\n x_ext = np.vstack((ones_vec,x));\n return x_ext\n'''\n\n\ndef poly_extend_data1D(x, p):\n \"\"\"\n Extend the provided input vector x, wtih subsequent powers of the input.\n x = np.array of size 1xN\n Output:\n x_e = np.array of size (p+1)xN such that 1st row = x^0, 2nd row = x^1, ...\n \"\"\"\n ### YOUR CODE HERE ###\n x = np.asarray(x);\n ones_vec = np.ones(\n np.size(x, axis=1)); # number of colums in x ( number of examples), and dimensions will be number of rows\n x_e = np.vstack((ones_vec, x));\n for i in range(2, p + 1):\n x_e = np.vstack((x_e, x ** i))\n\n return x_e\n\n\ndef lir_cost(w, y, x):\n \"\"\"\n Computes cost for linear regression with parameters w and data set x,y\n y = np.array of size 1xN\n x = np.array of size MxN\n w = np array of size Mx1\n Output:\n cost = scalar\n \"\"\"\n y_estimated = np.dot(w.T, x);\n l_cost = np.sum(0.5 * (np.square(y_estimated - y)));\n\n return l_cost\n\n\ndef lir_grad(w, y, x):\n \"\"\"\n Returs gradient for linear regression with quadratic cost for parameter w and data set y, x.\n y = np.array of size 1xN\n x = np.array of size MxN\n w = np array of size Mx1\n Output:\n gradT = np array of size Mx1\n \"\"\"\n\n y_estimated = np.asarray(np.dot(w.T, x));\n # err or gradients\n err = y_estimated - y\n err = np.array(err)\n gradT = np.dot(x, err.T)\n return gradT\n\n\ndef gradient_descent(iter_num, l_rate, w_0, gradient_func,cost=None):\n \"\"\"\n Performs gradient descent for iter_num iterations with learning rate l_rate from initial\n position w_0.\n w_0 = np array of size Mx1\n gradient_func(w) is a function which returns gradient for parameter w\n Output:\n w_opt = optimal parameters\n \"\"\"\n c=np.zeros(iter_num);\n for x in range(0, iter_num):\n w_0 = w_0 - l_rate * gradient_func(w_0);\n\n c[x] = np.float32(cost(w_0))\n\n plt.plot(c)\n plt.show()\n\n w_opt = w_0\n return w_opt\n\n\ndef normalize_data(x):\n \"\"\"\n Normalizes data. Should not normalize the first row (we assume it is the row of ones).\n x = np.array of size MxN\n Output:\n x_norm = normalized np.array of size MxN\n norm_param = distionary with two keys \"mean\" and \"var\". Each key contains\n a np.array of size Mx1 with the mean and variance of each row of data array.\n For the first row, set mean=0 and var=1\n \"\"\"\n ### YOUR CODE HERE ###\n x = np.asarray(x);\n m = np.mean(x, axis=1).reshape((-1,1));\n v = np.var(x, axis=1).reshape((-1,1));\n m[0,] = 0;\n v[0,] = 1;\n x_norm = (x - m) / np.sqrt(v);\n dic = {'mean': m, 'var': v};\n return x_norm, dic\n\n\ndef sin_extend_data1D(x, p):\n \"\"\"\n Extend the provided input vector x, wtih P subsequent sin harmonics of the input.\n x = np.array of size 1xN\n Output:\n x_e = np.array of size (p+1)xN\n \"\"\"\n ### YOUR CODE HERE ###\n x = np.asarray(x);\n ones_vec = np.ones(\n np.size(x, axis=1)); # number of colums in x ( number of examples), and dimensions will be number of rows\n har = np.sin(2 * np.pi * x / x.max())\n x_e = np.vstack((ones_vec, har));\n for i in range(2, p + 1):\n har = np.sin(2 * np.pi * i * x / x.max())\n x_e = np.vstack((x_e, har))\n\n return x_e\n\n\ndef poly_extend_data2D_(x, p=1):\n \"\"\"\n Extend the provided input matrix x wtih all subsequent powers of terms of the input.\n x = np.array of size 2xN\n Output:\n x_e = np.array\n Eg. for p=3 and x of dimensions 2xN. x_e should be a matrix such that\n the 1st row is [1 1 .. 1], 2nd X[0,:], 3rd X[1,:], 4th X[0,:]**2,\n 5th X[0,:]*X[1,:], 6th X[1,:]*2, 7th X[0,:]**3, 8th X[0,:]**2*X[1,:],\n and so on... till 10th row equal X[1,:]**3\n \"\"\"\n ### YOUR CODE HERE ###\n x_tmp = [[x[0, :] ** (k - i) * x[1, :] ** i for i in range(k + 1)] for k in range(p + 1)]\n x_e = np.vstack(x_tmp)\n\n ### ######### ###\n return x_e\n\n\ndef poly_extend_data2D(x, p):\n \"\"\"\n Extend the provided input matrix x wtih all subsequent powers of terms of the input.\n x = np.array of size 2xN\n Output:\n x_e = np.array\n Eg. for p=3 and x of dimensions 2xN. x_e should be a matrix such that\n the 1st row is [1 1 .. 1], 2nd X[0,:], 3rd X[1,:], 4th X[0,:]**2,\n 5th X[0,:]*X[1,:], 6th X[1,:]*2, 7th X[0,:]**3, 8th X[0,:]**2*X[1,:],\n and so on... till 10th row equal X[1,:]**3\n \"\"\"\n ### YOUR CODE HERE ###\n x_tmp = [[x[0, :] ** (k - i) * x[1, :] ** i for i in range(k + 1)] for k in range(p + 1)]\n x_e = np.vstack(x_tmp)\n #\n\n ### ######### ###\n return x_e\n\n\ndef lor_grad(w, y, x,lbd=0):\n \"\"\"\n Returs gradient for logistic regression with the cross entropy cost function\n for parameter w and data set y, x.\n y = np.array of size 1xN\n x = np.array of size MxN\n w = np array of size Mx1\n Output:\n gradT = np array of size Mx1\n \"\"\"\n ## YOUR CODE HERE ###\n a = act_fct(np.dot(w.T, x), \"sigmoid\");\n err = a - y;\n err = np.array(err)\n gradT = np.dot(x, err.T)+lbd*w\n\n #####################\n return gradT\n\n\ndef lor_cost(w, y, x,lbd=0):\n \"\"\"\n Computes cost for logistic regression with parameters w and data set x,y\n y = np.array of size 1xN\n x = np.array of size MxN\n w = np array of size Mx1\n Output: y_eq_0 = (y==0).nonzero()[1]\n\n cost = scalar\n \"\"\"\n ## YOUR CODE HERE ###\n sigmoid_f = lambda x: 1 / (1 + np.exp(-x))\n y_eq_0 = (y == 0).nonzero()[1]\n y_eq_1 = (y == 1).nonzero()[1]\n cost = np.sum(-np.log(sigmoid_f(np.dot(w.T, x[:, y_eq_1])))) + np.sum(\n -np.log(1 - sigmoid_f(np.dot(w.T, x[:, y_eq_0]))))\n cost += lbd * np.sum(w ** 2)\n\n #####################\n return cost\n\ndef dact_fct(x, type_fct):\n \"\"\"\n Implements derivatives of activation functions to be used in Neural Networks. The\n Inputs:\n x = np.array of input values\n type_act =\n 'identity' : for activation y = f(x) = x\n 'sigmoid': for activation y = f(x) = 1/(1+exp(-x))\n 'tanh': for activation y = f(x) = tanh(x)\n 'rect_lin_unit': for activation y = f(x) = max(x,0)\n Output:\n y = np.array containing f'(x)\n \"\"\"\n # type_fct=identity\n # type_fct=sigmoid\n # type_fct=tanh\n # type_fct=rlu\n x = np.asarray(x, dtype=float)\n if type_fct == \"identity\":\n y = np.ones((x.size))\n elif type_fct == \"sigmoid\":\n y = np.divide(np.exp(-x), ((1 + np.exp(-x))**2))\n #der of sigmoid is =(sig)*(1-sig)\n elif type_fct == \"tanh\":\n y = np.tanh(x)\n y = 1-(y**2);\n elif type_fct == \"relu\":\n #derivative of relu is 0 for x<=0 and 1 x>0\n x_z=(x<=0).nonzero()[0]\n x_nz=(x>0).nonzero()[0]\n x[x_z]=0; x[x_nz]=1;\n y=x;\n\n\n else:\n raise ValueError(\"wrong option\");\n\n ##################\n return y\n\n","sub_path":"nn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"373377121","text":"import opencc\n\nfound_start = False\nword_freqs = {}\nfor line in open('pinyin_simp.dict.yaml'):\n line = line.strip()\n if not found_start:\n if line == '...':\n found_start = True\n continue\n parts = line.split('\\t')\n if len(parts) != 3:\n continue\n word, pinyin, weight = parts\n word_freqs[word] = weight\n\noutput_simp = open('../jyutping_simp.dict.yaml', 'wt')\noutput_tradsimp = open('../jyutping_tradsimp.dict.yaml', 'wt')\nprint('''---\nname: jyutping_simp\nversion: \"2018.04.10\"\nsort: by_weight\nuse_preset_vocabulary: false\nmax_phrase_length: 7\nmin_phrase_weight: 100\n...\n''', file=output_simp)\nprint('''---\nname: jyutping_tradsimp\nversion: \"2018.04.10\"\nsort: by_weight\nuse_preset_vocabulary: false\nmax_phrase_length: 7\nmin_phrase_weight: 100\n...\n''', file=output_tradsimp)\nfound_start = False\nfor line in open('leimaau_jyutping.dict.yaml'):\n line = line.strip()\n if not found_start:\n if line == '...':\n found_start = True\n continue\n parts = line.split('\\t')\n word = parts[0]\n word_simp = opencc.convert(word, config='t2s.json')\n if word_simp in word_freqs:\n if len(parts) == 2:\n parts.append(word_freqs[word_simp])\n elif len(parts) == 3:\n parts[2] = word_freqs[word_simp]\n if word != word_simp:\n newparts = parts[:]\n newparts[0] = word_simp\n print('\\t'.join(newparts), file=output_simp)\n print('\\t'.join(newparts), file=output_tradsimp)\n print('\\t'.join(parts), file=output_tradsimp)\n else:\n print('\\t'.join(parts), file=output_simp)\n print('\\t'.join(parts), file=output_tradsimp)\noutput_tradsimp.close()\noutput_simp.close()\n","sub_path":"data/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"50369849","text":"from lib.util import randomString\nfrom locust import HttpLocust, TaskSet, task, between\nimport random\nimport faker\n\n\nclass UserBehavior(TaskSet):\n def on_start(self):\n self.login()\n self.makeLobbies(20)\n self.joinALobby()\n\n def joinALobby(self):\n # join some random lobby :P\n lobby_list = self.client.get(\"/api/lobbies/\").json()\n if len(lobby_list) > 20:\n lobby_list = lobby_list[0:20]\n \n self.lobby_id = str(random.choice(lobby_list)[\"id\"])\n\n # get info for the lobby we just joined\n self.client.get(\"/api/lobbies/\" + self.lobby_id,\n name=\"/api/lobbies/:lobby_id\")\n\n # create num lobbies\n def makeLobbies(self, num):\n # check if there are enough lobbies\n lobby_list = self.client.get(\"/api/lobbies/\").json()\n if (len(lobby_list) == 0):\n id = 1\n else:\n id = lobby_list[len(lobby_list)-1][\"id\"]\n\n while (id <= num):\n new_lobby = self.client.post(\"/api/lobbies\", json={\n \"title\": \"title\",\n \"desc\": \"a desc\",\n \"currentVideoId\": \"LDQcgkDn0yU\"\n }).json()\n id = new_lobby[\"id\"]\n\n def login(self):\n username = randomString()\n password = randomString()\n\n print(\"new user username: \" + username + \" password: \" + password)\n\n self.client.post(\"/api/signup\", json={\n \"user\": {\n \"name\": username,\n \"password\": password\n }\n })\n\n self.client.post(\"/api/login\", json={\n \"name\": username,\n \"password\": password\n })\n\n @task(1)\n def create_lobby(self):\n new_lobby = self.client.post(\"/api/lobbies\", json={\n \"title\": \"title\",\n \"desc\": \"a desc\",\n \"currentVideoId\": \"LDQcgkDn0yU\"\n }).json()\n\n\n @task(80)\n class JoinLobby(TaskSet):\n \"\"\"\n actions\n - list lobbies\n - pick a lobby and join it\n - request new messages from the lobby\n - possibly post a new message to the lobby\n \"\"\"\n wait_time = between(1, 2)\n latest_message_id = 0\n\n @task(80)\n def post_message_and_get_new_messages(self):\n # get new messages for the lobby\n new_messages = self.client.get(\"/api/lobbies/%s/lobby_messages/new_messages/%s\" % (self.parent.lobby_id, self.latest_message_id),\n name=\"/api/lobbies/:lobby_id/lobby_messages/new_messages/:latest_msg_seqno\").json()\n print(new_messages)\n if len(new_messages) > 0:\n self.latest_message_id = max(\n self.latest_message_id,\n max(msg[\"id\"] for msg in new_messages)\n )\n # post a test message to the other users in the lobby!\n if random.randint(0, 5) >= 3:\n self.parent.client.post(\"/api/lobbies/%s/lobby_messages/\" % self.parent.lobby_id, json={\n \"lobby_message\": {\n \"message\": randomString()\n }\n }, name=\"/api/lobbies/:lobby_id/lobby_messages/\")\n\n latest_seq_no = 0\n @task(15)\n def add_video_and_get_video_queue(self):\n new_videos = self.client.get(\"/api/lobbies/%s/queued_videos/new_videos/%s\" % (self.parent.lobby_id, self.latest_seq_no),\n name=\"/api/lobbies/:lobby_id/queued_videos/new_videos/:latest_seqno\").json()\n if len(new_videos) > 0:\n self.latest_seq_no = max(\n self.latest_seq_no,\n max(video[\"id\"] for video in new_videos)\n )\n self.parent.client.post(\"/api/lobbies/%s/queued_videos\" % (self.parent.lobby_id), json={\n \"queued_video\": {\n \"lobby_id\": self.parent.lobby_id,\n \"video\": \"https://www.youtube.com/watch?v=Zt8wH_yD8AY\"\n }\n }, name=\"/api/lobbies/:lobby_id/queued_videos/\")\n\n @task(5)\n def logout(self):\n self.parent.client.get(\"/api/logout\")\n self.parent.login()\n self.parent.joinALobby()\n\n\n# on set up action\n# set up 5-10 lobbies\n# run a function before the tests start running\n\nclass WebsiteUser(HttpLocust):\n task_set = UserBehavior\n wait_time = between(1, 2) # wait 5 to 15 seconds between requests\n","sub_path":"benchmarking/locust/locust.py","file_name":"locust.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"297543292","text":"#!/usr/bin/env python\n\n\"\"\"Creating chess puzzles for lichess.org\"\"\"\n\nimport argparse\nimport io\nimport logging\nimport os\n\nimport chess.engine\nimport chess.pgn\n\nimport pymongo\n\nfrom modules.api.api import post_puzzle\nfrom modules.bcolors.bcolors import bcolors\nfrom modules.investigate.investigate import investigate\nfrom modules.puzzle.puzzle import puzzle\nfrom modules.utils.helpers import str2bool, get_stockfish_command, configure_logging, prepare_terminal\n\n\ndef prepare_settings():\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"--max\", metavar=\"MAX\", nargs=\"?\", type=int, default=20,\n help=\"number of games to retrieve\")\n parser.add_argument(\"--user\", metavar=\"USER\", nargs=\"?\", type=str,\n help=\"user to retrieve games\")\n parser.add_argument(\"--threads\", metavar=\"THREADS\", nargs=\"?\", type=int, default=4,\n help=\"number of engine threads\")\n parser.add_argument(\"--memory\", metavar=\"MEMORY\", nargs=\"?\", type=int, default=2048,\n help=\"memory in MB to use for engine hashtables\")\n parser.add_argument(\"--depth\", metavar=\"DEPTH\", nargs=\"?\", type=int, default=8,\n help=\"depth for stockfish analysis\")\n parser.add_argument(\"--quiet\", dest=\"loglevel\",\n default=logging.DEBUG, action=\"store_const\", const=logging.INFO,\n help=\"substantially reduce the number of logged messages\")\n parser.add_argument(\"--games\", metavar=\"GAMES\", default=\"games.pgn\",\n help=\"A specific pgn with games\")\n parser.add_argument(\"--strict\", metavar=\"STRICT\", default=True,\n help=\"If False then it will be generate more tactics but maybe a little ambiguous\")\n parser.add_argument(\"--includeBlunder\", metavar=\"INCLUDE_BLUNDER\", default=True,\n type=str2bool, const=True, dest=\"include_blunder\", nargs=\"?\",\n help=\"If False then generated puzzles won't include initial blunder move\")\n parser.add_argument(\"--stockfish\", metavar=\"STOCKFISH\", default=None, help=\"Path to Stockfish binary\")\n\n return parser.parse_args()\n\n\nsettings = prepare_settings()\n\nprepare_terminal()\n\nconfigure_logging(settings.loglevel)\n\nstockfish_command = get_stockfish_command(settings.stockfish)\nlogging.debug(f'Using {stockfish_command} to run Stockfish.')\nengine = chess.engine.SimpleEngine.popen_uci(stockfish_command)\nengine.configure({'Threads': settings.threads, 'Hash': settings.memory})\n\nclient = pymongo.MongoClient('url')\ndatabase = client[\"chesspecker-db\"]\ncollection = database[\"users\"]\nuserObject = collection.find_one({\"id\": settings.user})\n\ndef createSet() -> bool:\n newSet = {\n 'user': userObject[\"_id\"],\n 'puzzles': [],\n 'length': 0,\n 'bestTime': 0,\n }\n try:\n collection = database[\"puzzlesets\"]\n set_id = collection.insert_one(newSet).inserted_id\n return set_id\n except Exception as err:\n print(err)\n return False\n\ndef getSet() -> bool:\n try:\n collection = database[\"puzzlesets\"]\n numberOfSets = collection.count_documents({\"user\": userObject[\"_id\"]})\n if numberOfSets == 0:\n set_id = createSet()\n else:\n for current_set in collection.find({\"user\": userObject[\"_id\"]}):\n if current_set[\"length\"] < 30:\n set_id = current_set[\"_id\"]\n break\n\n return set_id\n except Exception as err:\n print(err)\n return False\n\ndef updateGame(gameID: str) -> bool:\n try:\n setAsAnalyzed = { \"$set\" : { \"analyzed\": True }}\n collection = database[\"games\"]\n collection.update_one({\"game_id\": gameID}, setAsAnalyzed)\n return True\n except Exception as err:\n print(err)\n return False\n\ndef insertPuzzle(puzzle) -> bool:\n try:\n collection = database[\"puzzles\"]\n puzzle_id = collection.insert_one(puzzle).inserted_id\n set_id = getSet()\n collection = database[\"puzzlesets\"]\n pushPuzzleToSet = { \"$push\" : { \"puzzles\": puzzle_id }}\n collection.update_one({\"_id\": set_id}, pushPuzzleToSet)\n incrementSetLength = {'$inc': {\"length\": 1}}\n collection.update_one({\"_id\": set_id}, incrementSetLength)\n collection = database[\"puzzlesets\"]\n incrementPuzzleNumber = {'$inc': {\"puzzlesInDb\": 1}}\n collection.update_one({\"_id\": userObject[\"_id\"]}, incrementPuzzleNumber)\n return True\n except Exception as err:\n print(err)\n return False\n\ntry:\n collection = database[\"games\"]\n for currentGame in collection.find({\"user\": userObject[\"_id\"]}).limit(settings.max):\n if currentGame[\"analyzed\"] == True:\n print(currentGame[\"game_id\"])\n print(\"Already analyzed\")\n else:\n print(currentGame[\"game_id\"])\n print(\"Not analyzed yet\")\n pgn = io.StringIO(currentGame[\"pgn\"])\n game = chess.pgn.read_game(pgn)\n if game is None:\n break\n node = game\n\n game_id = currentGame[\"game_id\"]\n logging.debug(bcolors.WARNING + \"Game ID: \" + str(game_id) + bcolors.ENDC)\n logging.debug(bcolors.WARNING + \"Game headers: \" + str(game) + bcolors.ENDC)\n\n prev_score = chess.engine.Cp(0)\n\n logging.debug(bcolors.OKGREEN + \"Game Length: \" + str(game.end().board().fullmove_number))\n logging.debug(\"Analysing Game...\" + bcolors.ENDC)\n\n while not node.is_end():\n next_node = node.variation(0)\n info = engine.analyse(next_node.board(), chess.engine.Limit(depth=settings.depth))\n cur_score = info[\"score\"].relative\n logging.debug(bcolors.OKGREEN + node.board().san(next_node.move) + bcolors.ENDC)\n logging.debug(bcolors.OKBLUE + \" CP: \" + str(cur_score.score()) + bcolors.ENDC)\n logging.debug(bcolors.OKBLUE + \" Mate: \" + str(cur_score.mate()) + bcolors.ENDC)\n\n if investigate(prev_score, cur_score, node.board()):\n logging.debug(bcolors.WARNING + \" Investigate!\" + bcolors.ENDC)\n logging.debug(bcolors.WARNING + \"Generating new puzzle...\" + bcolors.ENDC)\n currentPuzzle = puzzle(node.board(), next_node.move, str(game_id), engine, info, game, settings.strict)\n currentPuzzle.generate(settings.depth)\n\n if currentPuzzle.is_complete():\n puzzle_pgn = post_puzzle(currentPuzzle, settings.include_blunder)\n puzzle_json = currentPuzzle.to_json(settings.user, puzzle_pgn)\n insertPuzzle(puzzle_json)\n\n prev_score = cur_score\n node = next_node\n updateGame(game_id) \nexcept Exception as err:\n print(err)\nfinally:\n print(\"all done\")\n os._exit(1)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"581584288","text":"#!/usr/bin/env python3\n\nimport json\nimport speech_recognition as sr\n\nr = sr.Recognizer()\nwith sr.Microphone() as source:\n print(\"Say something!\")\n audio = r.listen(source)\n\nWIT_AI_KEY = \"YCRQBWVE2TVZS3XNZ7PCQKJAP2L7V4TR\" # Wit.ai keys are 32-character uppercase alphanumeric strings\ntry:\n print(\"Wit.ai thinks you said \" + r.recognize_wit(audio, key=WIT_AI_KEY))\nexcept sr.UnknownValueError:\n print(\"Wit.ai could not understand audio\")\nexcept sr.RequestError as e:\n print(\"Could not request results from Wit.ai service; {0}\".format(e))","sub_path":"witRec.py","file_name":"witRec.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"202679370","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\nimport logging\nimport string\n\nimport hal.box\nfrom utilities.comparison import SimpleComparison\n\n\nclass BoxTable(hal.box.BoxTableInterface):\n \n def __init__(self):\n self.sensorsTable = []\n self.bccTable = ['Version','-------------------------']\n self.temperatureList = ['disable','0','45']\n \n self.ClearSensor()\n \n \n def SetBCC(self, sw = '0', hw = '0', bcc = '0'):\n self.bccTable.append('SW Version : ' + sw)\n self.bccTable.append('HW Version : ' + hw)\n self.bccTable.append('BCC Version : ' + bcc)\n \n def SetSensors(self, sensor = '0', name = 'input0', severity = '6 (info)', snmp = 'disable', currentS = '0 (open)', activeS = '1 (close)', active = 'No'):\n self.sensorsTable[int(sensor) + 3] = ' ' + sensor + '{0:>24}{1:>16}{2:>11}{3:>15}{4:>14}{5:>8}'.format('input' + sensor, severity, snmp, currentS, activeS, active) \n \n def ClearSensor(self):\n self.sensorsTable = ['Sensors',\n '-------------------------',\n 'Sensor Sensor configured name Severity SNMP Trap Current State Active State Active',\n '------ ---------------------- -------------- --------- ------------- ------------ ------',\n '',\n '',\n '',\n ''\n ]\n for i in range(1,5):\n self.SetSensors(str(i))\n \n def GetBCC(self):\n return self.bccTable\n \n def GetSensors(self):\n return self.sensorsTable\n \n def CompareBattery(self, real, virtual):\n #print real\n voltage = real[2].split(' ')\n \n if float(voltage[13]) < float(virtual[0]) or float(voltage[13]) > float(virtual[1]):\n logging.error('COMPARE ERROR: Comparison failed, voltage out of range\\n\\nReceived:\\n' \\\n + '|' + str(voltage[13]) + '|' + '\\n\\nExpected:\\n' + '|' + str(virtual) + '|')\n return False\n return True\n #print voltage\n \n def GetTemperature(self):\n return self.temperatureList\n \n def SetTableTemperature(self, SNMPtrap = 'disable', minThreshold = '0', maxThreshold = '45'): \n self.temperatureList = [SNMPtrap, minThreshold, maxThreshold]\n \n def TemperatureTableCompare(self, real, virtual):\n #If cli comand\n if (len(real[1]) > 2) and (real[1].split()[3] == 'disable' or real[1].split()[3] == 'enable'):\n realList = []\n \n realList.append(real[1].split()[3]) #save value SNMP Trap\n realList.append(real[2].split()[3]) #save lover threshold temperature\n realList.append(real[3].split()[3]) #save upper threshold temperature\n \n currentTemperature = real[0].split()[3] #save current temperature \n \n if (int(currentTemperature) < 25 or int(currentTemperature) > 45):\n return False\n return SimpleComparison(realList, self.temperatureList)\n #If snmp-request\n else:\n currentTemperature = real[0]\n if (int(currentTemperature) < int(virtual[0][0]) or int(currentTemperature) > int(virtual[0][1])):\n return False\n elif real[1] == virtual[1] and real[2] == virtual[2] and real[3] == virtual[3]:\n return True\n return False\n \nclass BoxCli(hal.box.BoxCliInterface):\n \n def ShowBox(self, error = '1', access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['show box\\n']\n if access == 'snmp':\n return ''\n \n if error == '0':\n if access == 'cli':\n return []\n if access == 'snmp':\n return []\n \n def ShowBoxBattery(self, error = '1', access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['show box battery\\n']\n if access == 'snmp':\n return ''\n \n if error == '0':\n if access == 'cli':\n return []\n if access == 'snmp':\n return []\n \n def ShowBoxTemperature(self, error = '1', access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['show box temperature\\n']\n elif access == 'snmp':\n return [['get', 'enterprises', '27142.1.1.43.1.8.1.4.0'],\n ['get', 'enterprises', '27142.1.1.43.1.3.0'],\n ['get', 'enterprises', '27142.1.1.43.1.1.0'], \n ['get', 'enterprises', '27142.1.1.43.1.2.0']]\n \n if error == '0':\n if access == 'cli':\n return []\n elif access == 'snmp':\n return ['1']\n \n def SetLowerThresholdDefault(self, error = '1', access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['configure\\nbox\\nno temperature threshold min\\n\\x1a']\n elif access == 'snmp':\n return []\n \n if error == '0':\n if access == 'cli':\n return []\n elif access == 'snmp':\n return []\n \n\n def SetLowerThreshold(self, temp = '0', error = '1' , access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['configure\\nbox\\ntemperature threshold min ' + temp + '\\n\\x1a']\n elif access == 'snmp':\n return [['set', 'enterprises', '27142.1.1.43.1.1.0', temp, 'INTEGER']]\n \n if error == '0':\n if access == 'cli':\n return []\n elif access == 'snmp':\n return ['1']\n \n if error == '-1':\n if access == 'cli':\n return ['ERROR: Can\\'t set temperature value, temperature value is out of range -100..100']\n elif access == 'snmp':\n return []\n \n if error == '-2':\n if access == 'cli':\n return ['ERROR: Wrong or out of range parameters']\n elif access == 'snmp':\n return []\n \n if error == '-3':\n if access == 'cli':\n return ['ERROR: Can\\'t change temperature threshold, lower limit is more than upper limit']\n elif access == 'snmp':\n return []\n \n def SetUpperThreshold(self, temp = '45', error = '1', access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['configure\\nbox\\ntemperature threshold max ' + temp + '\\n\\x1a']\n elif access == 'snmp':\n return [['set', 'enterprises', '27142.1.1.43.1.2.0', temp, 'INTEGER']]\n \n if error == '0':\n if access == 'cli':\n return []\n elif access == 'snmp':\n return ['1']\n \n if error == '-1':\n if access == 'cli':\n return ['ERROR: Can\\'t set temperature value, temperature value is out of range -100..100']\n elif access == 'snmp':\n return []\n \n if error == '-2':\n if access == 'cli':\n return ['ERROR: Wrong or out of range parameters']\n elif access == 'snmp':\n return []\n \n if error == '-3':\n if access == 'cli':\n return ['ERROR: Can\\'t change temperature threshold, upper limit is less than lower limit']\n elif access == 'snmp':\n return []\n \n def SetUpperThresholdDefault(self, error = '1', access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['configure\\nbox\\nno temperature threshold max\\n\\x1a']\n elif access == 'snmp':\n return []\n \n if error == '0':\n if access == 'cli':\n return []\n elif access == 'snmp':\n return []\n\n\n def EnableTempSNMPTrap(self, snmpEnable = '', error = '1', access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['configure\\nbox\\ntemperature snmp-trap\\n\\x1a']\n elif access == 'snmp':\n return [['set', 'enterprises', '27142.1.1.43.1.3.0', snmpEnable, 'INTEGER']]\n \n if error == '0':\n if access == 'cli':\n return []\n elif access == 'snmp':\n return ['1']\n\n def DisableTempSNMPTrap(self, snmpDisable = '', error = '1', access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['configure\\nbox\\nno temperature snmp-trap\\n\\x1a']\n elif access == 'snmp':\n return [['set', 'enterprises', '27142.1.1.43.1.3.0', snmpDisable, 'INTEGER']]\n \n if error == '0':\n if access == 'cli':\n return []\n elif access == 'snmp':\n return ['1']\n \n def GetPowerLinePresent(self, error = '1', access = 'snmp'):\n if error == '1':\n if access == 'cli':\n pass\n elif access == 'snmp':\n return [['get', 'enterprises', '27142.1.1.43.1.11.1.0']]\n \n if error == '0':\n if access == 'cli':\n pass\n elif access == 'snmp':\n return ['1']\n\n if error == '-1':\n if access == 'cli':\n pass\n elif access == 'snmp':\n return ['0']\n \n def GetChargingCurrent(self, error = '1', access = 'snmp'):\n if error == '1':\n if access == 'cli':\n pass\n elif access == 'snmp':\n return [['get', 'enterprises', '27142.1.1.43.1.11.2.0']]\n \n if error == '0':\n if access == 'cli':\n pass\n elif access == 'snmp':\n return ['1']\n \n def GetBatteryVoltage(self, numBattery = '1', error = '1', access = 'snmp'):\n if error == '1':\n if access == 'cli':\n pass\n elif access == 'snmp':\n return [['get', 'enterprises', '27142.1.1.43.1.10.1.2.' + numBattery]]\n \n if error == '0':\n if access == 'cli':\n pass\n elif access == 'snmp':\n return ['1']\n \nclass SFPCli(hal.box.SFPCliInterface):\n \n def ShowSFPInformation(self, interface = '0/0', error = '1', access = 'cli'):\n if error == '1':\n if access == 'cli':\n return ['show sfp information ' + interface + '\\n']\n elif access == 'snmp':\n return []\n \n if error == '0':\n if access == 'cli':\n return []\n elif access == 'snmp':\n return ['']\n \n if error == 'wrongCommand':\n if access == 'cli':\n return ['ERROR: Wrong or incomplete command, use ? for help']\n elif access == 'snmp':\n return []\n \n if error == '-2':\n if access == 'cli':\n return ['ERROR: Wrong or out of range parameters']\n elif access == 'snmp':\n return []\n \n if error == '-3':\n if access == 'cli':\n return ['ERROR: Can\\'t read SFP module information']\n elif access == 'snmp':\n return []\n \n if error == '-4':\n if access == 'cli':\n return ['ERROR: Can\\'t show SFP module information on the copper interface']\n elif access == 'snmp':\n return []\n\nclass SFPTable(hal.box.SFPTableInterface): \n \n def CompareTableSFPModuleInformation(self, realInf, DDM):\n #DDM Support\n if realInf[7].split()[3] in DDM:\n for i in range(len(realInf)):\n for j in range(len(realInf[i])):\n if realInf[i][j] in string.printable:\n continue\n else:\n return False\n return True\n else: return False","sub_path":"PythonTestProject/hal/bcm_phoenix/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":12581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"177067149","text":"from django.conf.urls import url\nfrom . import br_dal_views\nfrom entities.models import *\n\napp_name = 'browsing'\n\nurlpatterns = [\n url(\n r'^person/$', br_dal_views.PersonAC.as_view(\n model=Person),\n name='person-autocomplete',\n ),\n url(\n r'^place/$', br_dal_views.PlaceAC.as_view(\n model=Place),\n name='place-autocomplete',\n ),\n url(\n r'^institution/$', br_dal_views.InstitutionAC.as_view(\n model=Institution),\n name='institution-autocomplete',\n ),\n url(\n r'^work/$', br_dal_views.WorkAC.as_view(\n model=Institution),\n name='work-autocomplete',\n ),\n url(\n r'^event/$', br_dal_views.EventAC.as_view(\n model=Institution),\n name='event-autocomplete',\n ),\n]\n","sub_path":"browsing/br_dal_urls.py","file_name":"br_dal_urls.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"113907481","text":"#!/usr/bin/env python\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nsetup_args = {}\n\nsetup_args.update(dict(\n name='paramnb',\n version=\"0.0.1\",\n description='Generate ipywidgets from Parameterized objects in the notebook',\n long_description=open('README.rst').read() if os.path.isfile('README.rst') else 'Consult README.rst',\n author= \"philippjfr\",\n author_email= \"philippjfr@continuum.io\",\n maintainer= \"philippjfr\",\n maintainer_email= \"philippjfr@continuum.io\",\n platforms=['Windows', 'Mac OS X', 'Linux'],\n packages = [\"paramnb\"],\n provides = [\"paramnb\"],\n))\n\n\nif __name__==\"__main__\":\n setup(**setup_args)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"405318034","text":"\n# 二次方程式 ax**2 + bx + c = 0\n# a、b、c 用户提供\n\nimport cmath\n\na = float(input('输入 '))\nb = float(input('input b:'))\nc = float(input('input c:'))\n\nd = b ** 2 - 4*a*c\n\nprint ('d=%d'%(d))\n\nsol1 = (-b-cmath.sqrt(d))/(2*a) #-3+0j\nsol2 = (-b+cmath.sqrt(d))/(2*a) #-2+0j\n\nprint ('result {0} and {1}'.format(sol1,sol2))\n\n\na,b,c = input('please input 3 number:').split()\na = float(a)\nb = float(b)\nc = float(c)\n\nd = (b**2) - (4*a*c)\n\nif a==0 and b==0 and c==0 :\n print (' many more limit! ')\nelif d >= 0:\n x1 = (-b-d/(2*a))\n x2 = (-b+d/(2*a))\n print ('result: %.2f,%.2f'%(x1,x2))\nelse:\n print ('no result!')","sub_path":"bee/二次方程.py","file_name":"二次方程.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"312554536","text":"#!/usr/bin/python3\n\nimport os, re, argparse, sys\n\n\ndef check_arg(args=None):\n parser = argparse.ArgumentParser(description='Script to open and manage OpenVPN sessions')\n parser.add_argument('-c', '--config', required = False, help='Config file to use to open a new VPN session', default=\"Madrid\")\n parser.add_argument('-k', '--kill', required = False, help='Terminate all current VPN sessions', action=\"store_true\")\n \n results = parser.parse_args(args)\n return (results.config,results.kill)\n\ndef main():\n streamed_out = os.popen(\"openvpn3 sessions-list\")\n output = streamed_out.read()\n\n if arg_kill or \"No sessions available\" not in output:\n sessions_str = output.replace('\\n', ' NEW_L').split(\" NEW_L NEW_L\")\n sessions = []\n print(f\"There are {len(sessions_str)} open sessions.\" if len(sessions_str) > 1 else \n f\"There is {len(sessions_str)} open session.\")\n\n for index, session_str in enumerate(sessions_str):\n session = {}\n\n session['path'] = re.search(\"Path: (\\S+) \", session_str).group(1).strip()\n session['config'] = re.search(\"Config name: (\\S+) \", session_str).group(1).strip()\n session['conn_status'] = re.search(\"Status: (.*).*$\", session_str).group(1).split('NEW_L')[0].strip()\n\n print(f\" Session {index + 1}: \")\n print(f\" - Path: {session['path']}\")\n print(f\" - Config: {session['config']}\")\n print(f\" - Status: {session['conn_status']}\")\n\n sessions.append(session)\n\n terminate = input(\"\\nDo you want to terminate those sessions to open a new one? [y/N]: \" if len(sessions_str) > 1 else \n \"\\nDo you want to terminate this session to open a new one? [y/N]: \")\n\n if \"y\" not in terminate.lower():\n return\n\n print(\"\")\n \n for index, session in enumerate(sessions):\n print(f\"Terminating session {index+1} using {session['config']}...\")\n os.system(f\"openvpn3 session-manage --path {session['path']} --disconnect\")\n print(\"\")\n\n\n if not arg_kill:\n create = input(f\"Do you want to create a new session using {arg_config}? [Y/n]: \")\n\n if \"n\" in create.lower():\n return\n\n print(f\"Openning new session using {arg_config}...\\n\")\n os.system(f\"openvpn3 session-start --config {arg_config}\")\n\nif __name__ == '__main__':\n arg_config,arg_kill = check_arg(sys.argv[1:])\n arg_config = f\"PS-{arg_config}\"\n main()\n","sub_path":"vpn.py","file_name":"vpn.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"396310117","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport argparse\nimport json\nimport subprocess\nimport hashlib\n\nclass JenkinJobConfig():\n\t'''Define variable'''\n\t###Change version name\n\tWORKSPACE = os.environ['WORKSPACE']\n\t\n\t#NEED REMOVE\n\t# WORKSPACE = '/Users/sgs/workspace/AppTeamS'\n\tfile_path = '{}/ProjectSettings/ProjectSettings.asset'.format(WORKSPACE)\n\told_value = os.popen('cat {} | grep bundleVersion'.format(file_path)).read()\n\tVERSION_NAME = os.environ['VERSION_NAME']\n\t#NEED REMOVE\n\t\n\tnew_value = ' bundleVersion: {}\\n'.format(VERSION_NAME)\n\told_content = os.popen('cat {}'.format(file_path)).read()\n\t# RELEASE_MODE = os.environ['RELEASE_MODE']\n\t\n\t\n\t\n\tVERSION_CODE = os.environ['VERSION_CODE']\n\tPLATFORM = os.environ['PLATFORM']\n\tRELEASE_MODE = os.environ['RELEASE_MODE']\n\tSERVER = os.environ['SERVER']\n\tBRANCH = os.environ['BRANCH']\n\tKEYCHAIN = os.environ['KEYCHAIN']\n\n\tdef __init__(self):\n\t\tself.workspace = 'WP_Unity'\n\t\tself.filepath = '/Assets'\n\t\tif JenkinJobConfig.PLATFORM == 'iOS':\n\t\t\tself.OUTPUT_FOLDER='iOS_Staging'\n\t\telif JenkinJobConfig.PLATFORM == 'Android':\n\t\t\tself.OUTPUT_FOLDER='Android_Staging'\n\t\t# self.OUTPUT_FOLDER = ''\n\t\t# self.option = sys.argv[1]\n\tdef VersionName(self):\n\t\t'''Modify version name'''\n\t\tprint('Start function VersionName')\n\t\tprint('Workspace: {} \\n File path: {}'.format(JenkinJobConfig.WORKSPACE, JenkinJobConfig.file_path))\n\t\tos.system('cat {} | grep bundleVersion'.format(JenkinJobConfig.file_path))\n\t\twith open(JenkinJobConfig.file_path,'w') as f:\n\t\n\t\t\tnew_content = JenkinJobConfig.old_content.replace(JenkinJobConfig.old_value,JenkinJobConfig.new_value)\n\t\t\tf.write(new_content)\n\t\tos.system('cat {} | grep bundleVersion'.format(JenkinJobConfig.file_path))\n\n\tdef EXTRACT_HOCKEY(self):\n\t\tprint('Start function EXTRACT_HOCKEY')\n\t\tif os.path.exists('hockey_output'):\n\t\t\tprint('Hockey output file exists')\n\t\t\twith open('hockey_output') as f:\n\t\t\t\ttext = f.read()\n\t\t\t\tcontent = json.loads(text)\n\t\t\t\tHockey_URL = content['public_url'] + '/app_versions/' + content['config_url'].split('/')[-1]\n\t\t\t\tprint('You can get hockey app link here: {}'.format(Hockey_URL))\n\t\t\tf = open('DOWNLOAD_LINK.txt','a')\n\t\t\tf.write('\\n')\n\t\t\tf.write('You can get hockey app link here: {}'.format(Hockey_URL))\n\t\t\tf.close()\n\t\telse:\n\t\t\tprint('Hockey output file does not exist') \n\n\tdef Prepare(self):\n\t\tNDK_VERSION = ''\n\t\tfilename='{}/testVar.properties'.format(JenkinJobConfig.WORKSPACE)\n\t\tprint('Start function Prepare')\n\t\t'''This commands need to be run before each platform build'''\n\t\tcommand_list = [\n\t\t#Grant permission to workspace folder\n\t\t'chmod -R 777 {}'.format(JenkinJobConfig.WORKSPACE),'chmod -R 777 /Users/sgs/Library/Android', \\\n\t\t#Reduce size\n\t\t'rm -rf {}/Assets/_hirez/ODB' .format(JenkinJobConfig.WORKSPACE), \\\n\t\t'rm -rf {}/Assets/_lowrez/ODB'.format(JenkinJobConfig.WORKSPACE), \\\n\t\t'rm -rf {}/Assets/_mhirez/ODB'.format(JenkinJobConfig.WORKSPACE), \\\n\t\t#Clean variable store file\n\t\t'rm -rf {}/testVar.properties'.format(JenkinJobConfig.WORKSPACE)]\n\n\t\tfor command in command_list:\n\t\t\tos.system('echo {}'.format(command))\n\t\t\tos.system('{}'.format(command))\n\t\tJOB_NAME = os.environ['JOB_NAME']\n\t\tif 'Unity2017' in JOB_NAME:\n\t\t\tNDK_VERSION = 'r13b'\n\t\telse:\n\t\t\tNDK_VERSION = 'r13b'\n\t\t# with open(filename,'w') as f:\n\t\t# \tf.write('NDK_VERSION={}'.format(NDK_VERSION) '\\n')\n\t\tos.system('/Users/sgs/GearInc-DevOps/DevOps/ndk {}'.format(NDK_VERSION))\n\tdef DisableBuildVersion(self):\n\t\tprint('Start function DisableBuildVersion')\n\t\told_content = os.popen('cat {}/Assets/Scenes/AssetLoaderScene.unity'.format(JenkinJobConfig.WORKSPACE)).read()\n\t\told_value = os.popen('cat {}/Assets/Scenes/AssetLoaderScene.unity | grep DisableBuildVersion'.format(JenkinJobConfig.WORKSPACE)).read()\n\t\t\n\t\tDisableBuildVersion = os.environ['DISABLE_BUILD_VERSION']\n\t\t\n\t\tif DisableBuildVersion == 'true':\n\t\t\tnew_value = ' DisableBuildVersion: 1\\n'\n\t\t\t\n\t\t\tnew_content = old_content.replace(old_value,new_value)\t\n\t\t\twith open('{}/Assets/Scenes/AssetLoaderScene.unity'.format(JenkinJobConfig.WORKSPACE),'w') as f:\n\t\t\t\tf.write(new_content)\t\n\t\t\n\n\t\telif DisableBuildVersion == 'false':\n\t\t\t\n\n\t\t\tnew_value = ' DisableBuildVersion: 0\\n'\n\t\t\tnew_content = old_content.replace(old_value,new_value)\t\n\t\t\twith open('{}/Assets/Scenes/AssetLoaderScene.unity'.format(JenkinJobConfig.WORKSPACE),'w') as f:\n\t\t\t\tf.write(new_content)\t\n\t\t\n\t\t\n\tdef VersionCode(self):\n\t\tprint('Start function VersionCode')\n\t\tfilename = '{}/BuildSettings/AndroidSettings.xml'.format(JenkinJobConfig.WORKSPACE)\n\t\told_content = os.popen('cat {}'.format(filename)).readlines()\n\t\ttrace_index = old_content[-2].find('value')\n\t\told_value = old_content[-2][trace_index:].strip().replace(' />','')\n\t\tnew_value = 'value=\"{}\"'.format(JenkinJobConfig.VERSION_CODE)\n\t\told_content_str = os.popen('cat {}'.format(filename)).read()\n\t\tnew_content = old_content_str.replace(old_value,new_value)\n\t\twith open(filename,'w') as f:\n\t\t\tf.write(new_content)\n\t\tos.system('cat {} | grep value'.format(filename))\n\n\tdef SaveVar(self):\n\t\tprint('Start function SaveVar')\n\t\tfilename='{}/testVar.properties'.format(JenkinJobConfig.WORKSPACE)\n\t\tif JenkinJobConfig.PLATFORM == 'iOS':\n\t\t\t# self.OUTPUT_FOLDER = 'iOS_Staging'\n\t\t\twith open(filename,'w') as f:\n\t\t\t\tf.write('APP_ID=bf7fabd50f2d9360c395f43b805c1ea0' '\\n' \\\n\t\t\t\t\t'BUILDSCRIPT=iOSBuildWhiplashScopelyAssetBundles' '\\n' \\\n\t\t\t\t\t'OUTPUT_FOLDER=iOS_Staging' '\\n' \\\n\t\t\t\t)\n\t\telif JenkinJobConfig.PLATFORM == 'Android':\n\t\t\t# self.OUTPUT_FOLDER = 'Android_Staging'\n\t\t\twith open(filename,'w') as f:\n\t\t\t\tf.write('APP_ID=36e0ccc5421dba3ddfd71d948e187871' '\\n' \\\n\t\t\t\t\t'BUILDSCRIPT=AndroidScopelySplitAssetBundles' '\\n' \\\n\t\t\t\t\t'OUTPUT_FOLDER=Android_Staging' '\\n' \\\n\t\t\t\t)\n\n\t\t# if JenkinJobConfig.RELEASE_MODE:\n\t\t\t# FILENAME='FILENAME={}-{}-{}-{}-release'.format(JenkinJobConfig.SERVER, JenkinJobConfig.VERSION_NAME, JenkinJobConfig.VERSION_CODE,\\\n\t\t\t # JenkinJobConfig.BRANCH)\n\t\t# else:\n\t\t\t# FILENAME='FILENAME={}-{}-{}-{}-debug'.format(JenkinJobConfig.SERVER, JenkinJobConfig.VERSION_NAME, JenkinJobConfig.VERSION_CODE,\\\n\t\t\t # JenkinJobConfig.BRANCH)\n\n\t\twith open(filename,'a') as f:\n\t\t\tf.write(\n\t\t\t\t'EXPORT_METHOD=development' '\\n' \\\n\t\t\t\t'TEAM_ID=C8JJ626MJR' '\\n' \\\n\t\t\t\t'PROFILE_NAME=DEV whiplash' '\\n' \\\n\t\t\t\t'CODESIGN_ID=iPhone Developer: Bao Phung (HKCD3D84D5)' '\\n' \\\n\t\t\t\t'PROVISION_FILE=\"DEV_whiplash.mobileprovision\"' '\\n')\n\t\t\t# f.write(FILENAME)\n\t\t\t# f.write('\\n')\n\t\t\t#f.write(PLATFORM_FILE)\n\n\tdef Packingipa(self):\n\t\tprint('Start function Packingipa')\n\t\t# OUTPUT_FOLDER=os.environ['OUTPUT_FOLDER']\n\t\tkeychain_path = '\"/Users/sgs/Library/Keychains/login.keychain\"'\n\t\tKEYCHAIN = JenkinJobConfig.KEYCHAIN\n\t\t#Unlock keychain\n\t\tos.system('security unlock-keychain -p {} {}'.format(KEYCHAIN, keychain_path))\n\t\t#packing ipa\n\t\tARCHIVE_PATH='{}/{}/build/Release-iphoneos/Unity-iPhone.xcarchive'.format(JenkinJobConfig.WORKSPACE,self.OUTPUT_FOLDER)\n\t\tEXPORT_PATH='{}/{}/build/Release-iphoneos/'.format(JenkinJobConfig.WORKSPACE,self.OUTPUT_FOLDER)\n\t\tPLIST='/Users/sgs/GearInc-DevOps/DevOps/plist/ExportOptions.plist'\n\t\tprint(ARCHIVE_PATH + '\\n' + EXPORT_PATH + '\\n' + PLIST)\n\n\t\tos.system('xcodebuild -exportArchive -archivePath {} -exportOptionsPlist {} -exportPath {}'.format(ARCHIVE_PATH,PLIST, EXPORT_PATH))\n\t\t#Resign ipa\n\n\t\tfastlane_command = 'fastlane sigh resign $WORKSPACE/{}/build/Release-iphoneos/Unity-iPhone.ipa --signing_identity \"iPhone Distribution: Scopely Inc\" -p \"/Users/sgs/Library/MobileDevice/Provisioning Profiles/Scopely_Enterprise_GearInc_new.mobileprovision\"'.format(self.OUTPUT_FOLDER)\n\t\tos.system(fastlane_command)\n\n\tdef RenameFile(self):\n\n\t\tdef BuildFileValues():\n\n\t\t\tBuild_File_Values = {'file location': '', 'file': ''}\n\t\t\tAPP_FILES = []\n\t\t\tAPP_FILE = ''\n\t\t\tif os.path.exists('{}/Android_Staging'.format(JenkinJobConfig.WORKSPACE)) == True:\n\n\t\t\t\tAPP_FILES = os.popen('find {}/Android_Staging -name \"*.apk\"'.format(JenkinJobConfig.WORKSPACE)).readlines()\n\t\t\t\n\t\t\telif os.path.exists('{}/iOS_Staging'.format(JenkinJobConfig.WORKSPACE)) == True:\n\t\t\t\t\n\t\t\t\tAPP_FILES = os.popen('find {}/iOS_Staging -name \"*.ipa\"'.format(JenkinJobConfig.WORKSPACE)).readlines()\n\n\t\t\tprint(APP_FILES)\n\t\t\tAPP_FILES = [i.strip() for i in APP_FILES]\n\t\t\t\n\t\t\tprint(APP_FILES)\n\t\t\tfor file in APP_FILES:\n\t\t\t\tif os.path.isfile(file) == True:\n\t\t\t\t\tAPP_FILE = file\n\t\t\t\n\t\t\tBuild_File_Values['file'] = APP_FILE\n\t\t\t\n\t\t\tAPP_FILE_LOCATION = '/'.join(APP_FILE.split('/')[0:-1])\n\t\t\t\n\t\t\tBuild_File_Values['file location'] = APP_FILE_LOCATION\n\t\t\t\t\n\t\t\tprint(Build_File_Values)\n\t\t\t\n\t\t\treturn Build_File_Values\n\n\t\tdef Rename():\n\t\t\tfilename='{}/testVar.properties'.format(JenkinJobConfig.WORKSPACE)\n\t\t\tFILENAME = ''\n\t\t\t#check apk or ipa to define Filename\n\t\t\tif JenkinJobConfig.PLATFORM == 'Android':\n\t\t\t\tif JenkinJobConfig.RELEASE_MODE == 'true':\n\t\t\t\t\tFILENAME='{}-{}-{}-{}-release.apk'.format(JenkinJobConfig.SERVER,JenkinJobConfig.VERSION_NAME,JenkinJobConfig.VERSION_CODE,JenkinJobConfig.BRANCH)\n\t\t\t\telse:\n\t\t\t\t\tFILENAME='{}-{}-{}-{}-debug.apk'.format(JenkinJobConfig.SERVER,JenkinJobConfig.VERSION_NAME,JenkinJobConfig.VERSION_CODE,JenkinJobConfig.BRANCH)\n\t\t\t\n\t\t\telif JenkinJobConfig.PLATFORM == 'iOS':\n\t\t\t\tif JenkinJobConfig.RELEASE_MODE == 'true':\n\t\t\t\t\tFILENAME='{}-{}-{}-{}-release.ipa'.format(JenkinJobConfig.SERVER,JenkinJobConfig.VERSION_NAME,JenkinJobConfig.VERSION_CODE,JenkinJobConfig.BRANCH)\n\t\t\t\telse:\n\t\t\t\t\tFILENAME='{}-{}-{}-{}-debug.ipa'.format(JenkinJobConfig.SERVER,JenkinJobConfig.VERSION_NAME,JenkinJobConfig.VERSION_CODE,JenkinJobConfig.BRANCH)\n\t\t\t\n\t\t\tBuild_File_Values = BuildFileValues()\n\t\t\t#rename\n\t\t\tos.rename('{}'.format(Build_File_Values['file']),'{}/{}'.format(Build_File_Values['file location'],FILENAME))\n\t\t\t#write PLATFORM_FILE to var\n\t\t\tPLATFORM_FILE='{}/{}'.format(Build_File_Values['file location'],FILENAME)\n\t\t\twith open(filename,'a') as f:\n\t\t\t\tf.write('PLATFORM_FILE={}'.format(PLATFORM_FILE)) \n\t\t\t\tf.write('\\n')\n\n\t\tRename()\n\n\tdef UploadtoHockey(self):\n\t\tAPI_TOKEN = \"ba4cdb1cdc98425292ef9c1f61469565\"\n\t\tdef main():\n\t\t\t# parser = argparse.ArgumentParser(description='Upload a build to HockeyApp')\n\t\t\t# parser.add_argument(\"--file\", dest=\"file\", help=\"The binary file to upload (for uploading a single binary)\")\n\t\t\t# parser.add_argument(\"--notifyFlag\", dest=\"notifyFlag\", default=\"0\", help=\"Notify hockeyapp users by email that a new build is ready. 0 - Don't notify, 1 - Notify allowed only, 2 - Notify all\")\n\t\t\t# parser.add_argument(\"--releaseNote\", dest=\"releaseNote\", help=\"A release note to accompany the build\")\n\t\t\t# parser.add_argument(\"--status\", dest=\"releaseStatus\", default=\"1\", help=\"Release status - will the version be downloadable to testers. 1-Don't allow install, 2-Allow install\")\n\t\t\t# parser.add_argument(\"--appId\", dest=\"appId\", help=\"Application Id - if provided will use alternate /api/2/apps/APP_ID/app_versions/upload api for upload\")\n\t\t\t# parser.add_argument(\"--dryRun\", dest=\"dryRun\", default=\"0\", help=\"--dryrun 1 will perform all actions, but not upload to HockeyApp\")\n\t\t\t# args = parser.parse_args()\n\t\t\tbuildFile = os.environ['PLATFORM_FILE']\n\n\t\t\tupload_build(buildFile)\n\t\t\n\t\tdef get_file_hash(buildFile):\n\t\n\t\t\tblock_size=256*128\n\t\t\tsha = hashlib.sha1()\n\t\t\twith open(buildFile,'rb') as f:\n\t\t\t\tfor chunk in iter(lambda: f.read(block_size), b''):\n\t\t\t\t\tsha.update(chunk)\n\t\t\treturn sha.hexdigest()\n\n\n\t\tdef upload_build(buildFile):\n\t\n\t\t\tsha1digest = get_file_hash(buildFile)\n\t\t\t\n\n\t\t\tAPP_ID=os.environ['APP_ID']\n\n\t\t\tapiEndpoint = \"https://rink.hockeyapp.net/api/2/apps/\" + APP_ID + \"/app_versions/upload\"\n\t\t\t\n\t\t\tJOB_NAME = os.environ['JOB_NAME']\n\t\t\tUPSTREAM_CHANGELIST = os.environ['UPSTREAM_CHANGELIST']\n\t\t\tSCOPELY_MODE = os.environ['SCOPELY_MODE']\n\t\t\tdefault_releasenote = \"### SGS Release ###\" +'\\n\\n' + \\\n\t\t\t\t\t\t\t\t\t\"**SGS Jenkins Job:** {}\".format(JOB_NAME) + '\\n\\n' + \\\n\t\t\t\t\t\t\t\t\t\"**SGS AssetBundle:** {}\".format(UPSTREAM_CHANGELIST) + '\\n\\n' + \\\n\t\t\t\t\t\t\t\t\t\"**SGS Server:** {}\".format(JenkinJobConfig.SERVER) + '\\n\\n' + \\\n\t\t\t\t\t\t\t\t\t\"**SGS Branch:** {}\".format(JenkinJobConfig.BRANCH) + '\\n\\n' + \\\n\t\t\t\t\t\t\t\t\t\"**Release Mode:** {}\".format(JenkinJobConfig.RELEASE_MODE) + '\\n\\n' + \\\n\t\t\t\t\t\t\t\t\t\"**ScopelyPlatform Mode:** {}\".format(SCOPELY_MODE) + '\\n\\n\\n'\n\n\t\t\treleaseNotes = default_releasenote + \"\\n\\nSHA1 Digest: \" + sha1digest\n\n\t\t\tmappingType = 'dsym'\n\n\t\t\t\n\t\t\tprint('Uploading {}'.format(buildFile))\n\t\t\tresponse = subprocess.check_output([\n\t\t\t\t'curl',\n\t\t\t\tapiEndpoint,\n\t\t\t\t'-F', 'notes_type=1',\n\t\t\t\t'-F', 'status=2',\n\t\t\t\t'-F', 'notify=0',\n\t\t\t\t'-F', 'notes={}'.format(releaseNotes),\n\t\t\t\t'-F', 'ipa=@{}'.format(buildFile),\n\t\t\t\t'-H', 'X-HockeyAppToken:{}'.format(API_TOKEN)\n\t\t\t\t])\n\t\t\twith open('{}/hockey_output'.format(JenkinJobConfig.WORKSPACE),'wb') as f:\n\t\t\t\tf.write(response)\n\n\t\tmain()\n\n'''Connect to JenkinJobConfig Object'''\nJenkinConfig = JenkinJobConfig()\n\n'''Prepare workspace before build'''\nif 'PREPARE' == sys.argv[1]:\n\tJenkinConfig.Prepare()\n\n'''Modify version name'''\nif 'VersionName' == sys.argv[1]:\n\t\n\tJenkinConfig.VersionName()\n\n'''Extract hockey_app link'''\nif 'EXTRACT_HOCKEY' == sys.argv[1]:\n\t\n\tJenkinConfig.EXTRACT_HOCKEY()\n\n'''Disable Build Version'''\nif 'DisableBuildVersion' == sys.argv[1]:\n\n\tJenkinConfig.DisableBuildVersion()\n\n'''Modify version code'''\nif 'VersionCode' == sys.argv[1]:\n\tJenkinConfig.VersionCode()\n\nif 'SaveVar' == sys.argv[1]:\n\tJenkinConfig.SaveVar()\n\nif 'Packingipa' == sys.argv[1]:\n\tJenkinConfig.Packingipa()\n\nif 'RenameFile' == sys.argv[1]:\n\tJenkinConfig.RenameFile()\n\nif 'UploadHockey' == sys.argv[1]:\n\tJenkinConfig.UploadtoHockey()\n","sub_path":"DevOps/jenkinjob.py","file_name":"jenkinjob.py","file_ext":"py","file_size_in_byte":13382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"616870828","text":"class Autocomp:\n def __init__(self, options):\n self.options = sorted(options)\n self.matched = []\n def complete(self, text, state):\n if state == 0:\n if text:\n self.matches = [s for s in self.options if s and s.startswith(text)]\n else:\n self.matches = self.options[:]\n try:\n return self.matches[state]\n except IndexError:\n return None\n","sub_path":"src/core/completer.py","file_name":"completer.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"220746914","text":"# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance\n# with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"LICENSE.txt\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom datetime import datetime, timedelta, timezone\nfrom types import SimpleNamespace\n\nimport boto3\nimport botocore\nimport clusterstatusmgtd\nimport pytest\nfrom assertpy import assert_that\nfrom clusterstatusmgtd import (\n ClusterStatusManager,\n ClusterstatusmgtdConfig,\n ComputeFleetStatus,\n ComputeFleetStatusManager,\n _sleep_remaining_loop_time,\n)\n\n\n@pytest.mark.parametrize(\n \"loop_start_time, loop_end_time, loop_total_time, expected_sleep_time\",\n [\n (\n datetime(2020, 1, 1, 0, 0, 30, tzinfo=timezone.utc),\n datetime(2020, 1, 1, 0, 0, 30, tzinfo=timezone.utc),\n 60,\n 60,\n ),\n (\n datetime(2020, 1, 1, 0, 0, 30, tzinfo=timezone.utc),\n datetime(2020, 1, 1, 0, 1, 00, tzinfo=timezone.utc),\n 60,\n 30,\n ),\n (\n datetime(2020, 1, 1, 0, 0, 30, tzinfo=timezone.utc),\n datetime(2020, 1, 1, 0, 1, 30, tzinfo=timezone.utc),\n 60,\n 0,\n ),\n (\n datetime(2020, 1, 1, 0, 0, 30, tzinfo=timezone.utc),\n datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),\n 60,\n 0,\n ),\n (\n datetime(2020, 1, 1, 1, 0, 0, tzinfo=timezone(timedelta(hours=1))),\n datetime(2020, 1, 1, 0, 0, 30, tzinfo=timezone.utc),\n 60,\n 30,\n ),\n (\n datetime(2020, 1, 1, 1, 0, 0),\n datetime(2020, 1, 1, 0, 0, 30, tzinfo=timezone.utc),\n 60,\n None, # can't assert this with naive timezone since the value depends on the system timezone\n ),\n ],\n)\ndef test_sleep_remaining_loop_time(mocker, loop_start_time, loop_end_time, loop_total_time, expected_sleep_time):\n sleep_mock = mocker.patch(\"time.sleep\")\n datetime_now_mock = mocker.MagicMock()\n datetime_now_mock.now = mocker.MagicMock(return_value=loop_end_time, spec=datetime.now)\n mocker.patch(\"clusterstatusmgtd.datetime\", datetime_now_mock)\n\n _sleep_remaining_loop_time(loop_total_time, loop_start_time)\n\n if expected_sleep_time:\n sleep_mock.assert_called_with(expected_sleep_time)\n elif expected_sleep_time == 0:\n sleep_mock.assert_not_called()\n datetime_now_mock.now.assert_called_with(tz=timezone.utc)\n\n\ndef test_run_command():\n pass\n\n\ndef test_write_json_to_file():\n # tested in test_call_update_event\n pass\n\n\nclass TestComputeFleetStatus:\n \"\"\"Class to test ComputeFleetStatus.\"\"\"\n\n def test_transform_compute_fleet_data(self):\n \"\"\"Tested in test_call_update_event.\"\"\"\n pass\n\n def test_is_start_in_progress(self):\n \"\"\"Tested in test_manage_cluster_status.\"\"\"\n pass\n\n def test_is_stop_in_progress(self):\n \"\"\"Tested in test_manage_cluster_status.\"\"\"\n pass\n\n def test_is_protected_status(self):\n \"\"\"Not tested, not used.\"\"\"\n pass\n\n\nclass TestComputeFleetStatusManager:\n \"\"\"Class to test TestComputeFleetStatusManager.\"\"\"\n\n @pytest.fixture\n def compute_fleet_status_manager(self, mocker):\n \"\"\"Fixture for ComputeFleetStatusManager.\"\"\"\n status_manager = ComputeFleetStatusManager(\"table\", botocore.config.Config(), \"us-east-1\")\n mocker.patch.object(status_manager, \"_table\")\n\n return status_manager\n\n @pytest.mark.parametrize(\n \"get_item_response, expected_exception, expected_status\",\n [\n ({\"Item\": {\"Id\": \"COMPUTE_FLEET\", \"Data\": {\"status\": \"RUNNING\"}}}, None, {\"status\": \"RUNNING\"}),\n ({\"NoData\": \"NoValue\"}, ComputeFleetStatusManager.FleetDataNotFoundError, Exception()),\n ],\n ids=[\"success\", \"exception\"],\n )\n def test_get_status(self, compute_fleet_status_manager, get_item_response, expected_exception, expected_status):\n \"\"\"Test get_status method.\"\"\"\n if isinstance(expected_status, Exception):\n compute_fleet_status_manager._table.get_item.side_effect = get_item_response\n with pytest.raises(expected_exception):\n compute_fleet_status_manager.get_status()\n else:\n compute_fleet_status_manager._table.get_item.return_value = get_item_response\n status = compute_fleet_status_manager.get_status()\n assert_that(status).is_equal_to(expected_status)\n\n compute_fleet_status_manager._table.get_item.assert_called_with(\n ConsistentRead=True, Key={\"Id\": ComputeFleetStatusManager.DB_KEY}\n )\n\n @pytest.mark.parametrize(\n \"expected_status, expected_update_item, expected_exception\",\n [\n (\n {\"status\": \"RUNNING\"},\n {\"Attributes\": {\"Data\": {\"status\": \"RUNNING\"}}},\n None,\n ),\n (\n boto3.client(\"dynamodb\", region_name=\"us-east-1\").exceptions.ConditionalCheckFailedException(\n {\"Error\": {}}, {}\n ),\n {},\n ComputeFleetStatusManager.ConditionalStatusUpdateFailedError,\n ),\n (Exception(), {}, Exception),\n ],\n ids=[\"success\", \"conditional_check_failed\", \"exception\"],\n )\n def test_update_status(\n self, compute_fleet_status_manager, expected_status, expected_update_item, expected_exception\n ):\n \"\"\"Test update_status method.\"\"\"\n if isinstance(expected_status, Exception):\n compute_fleet_status_manager._table.update_item.side_effect = expected_status\n with pytest.raises(expected_exception):\n compute_fleet_status_manager.update_status(ComputeFleetStatus.STARTING, ComputeFleetStatus.RUNNING)\n else:\n compute_fleet_status_manager._table.update_item.return_value = expected_update_item\n actual_status = compute_fleet_status_manager.update_status(\n ComputeFleetStatus.STARTING, ComputeFleetStatus.RUNNING\n )\n assert_that(actual_status).is_equal_to(expected_status)\n\n\nclass TestClusterstatusmgtdConfig:\n \"\"\"Class to test ClusterstatusmgtdConfig.\"\"\"\n\n @pytest.mark.parametrize(\n (\"config_file\", \"expected_attributes\"),\n [\n (\n \"default.conf\",\n {\n \"cluster_name\": \"test\",\n \"region\": \"us-east-2\",\n \"_boto3_config\": {\"retries\": {\"max_attempts\": 5, \"mode\": \"standard\"}},\n \"loop_time\": 60,\n \"logging_config\": os.path.join(\n os.path.dirname(clusterstatusmgtd.__file__), \"clusterstatusmgtd_logging.conf\"\n ),\n \"dynamodb_table\": \"table-name\",\n \"computefleet_status_path\": \"/opt/parallelcluster/shared/computefleet-status.json\",\n \"update_event_timeout_minutes\": 15,\n },\n ),\n (\n \"all_options.conf\",\n {\n \"cluster_name\": \"test-2\",\n \"region\": \"us-east-1\",\n \"_boto3_config\": {\n \"retries\": {\"max_attempts\": 10, \"mode\": \"standard\"},\n \"proxies\": {\"https\": \"https://fake.proxy\"},\n },\n \"loop_time\": 30,\n \"logging_config\": \"/my/logging/config\",\n \"dynamodb_table\": \"another-table\",\n \"computefleet_status_path\": \"/alternative/status.json\",\n \"update_event_timeout_minutes\": 5,\n },\n ),\n ],\n ids=[\"default\", \"all_options\"],\n )\n def test_config_parsing(self, config_file, expected_attributes, test_datadir):\n \"\"\"Test config_parsing method.\"\"\"\n sync_config = ClusterstatusmgtdConfig(test_datadir / config_file)\n for key in expected_attributes:\n assert_that(sync_config.__dict__.get(key)).is_equal_to(expected_attributes.get(key))\n\n def test_config_comparison(self, test_datadir):\n \"\"\"Test configs comparison.\"\"\"\n config = test_datadir / \"config.conf\"\n config_modified = test_datadir / \"config_modified.conf\"\n\n assert_that(ClusterstatusmgtdConfig(config)).is_equal_to(ClusterstatusmgtdConfig(config))\n assert_that(ClusterstatusmgtdConfig(config)).is_not_equal_to(ClusterstatusmgtdConfig(config_modified))\n\n\n@pytest.fixture(name=\"initialize_compute_fleet_status_manager_mock\")\ndef fixture_initialize_compute_fleet_status_manager_mock(mocker):\n compute_fleet_status_manager_mock = mocker.Mock(spec=ComputeFleetStatusManager)\n compute_fleet_status_manager_mock.get_status.return_value = ComputeFleetStatus.RUNNING\n compute_fleet_status_manager_mock.COMPUTE_FLEET_STATUS_ATTRIBUTE = (\n ComputeFleetStatusManager.COMPUTE_FLEET_STATUS_ATTRIBUTE\n )\n return mocker.patch.object(\n ClusterStatusManager,\n \"_initialize_compute_fleet_status_manager\",\n spec=ClusterStatusManager._initialize_compute_fleet_status_manager,\n return_value=compute_fleet_status_manager_mock,\n )\n\n\nclass TestClusterStatusManager:\n \"\"\"Class to test ClusterStatusManager.\"\"\"\n\n def test_set_config(self, initialize_compute_fleet_status_manager_mock):\n \"\"\"Test set_config method.\"\"\"\n initial_config = SimpleNamespace(some_key_1=\"some_value_1\", some_key_2=\"some_value_2\")\n updated_config = SimpleNamespace(some_key_1=\"some_value_1\", some_key_2=\"some_value_2_changed\")\n\n clusterstatus_manager = ClusterStatusManager(initial_config)\n assert_that(clusterstatus_manager._config).is_equal_to(initial_config)\n clusterstatus_manager.set_config(initial_config)\n assert_that(clusterstatus_manager._config).is_equal_to(initial_config)\n clusterstatus_manager.set_config(updated_config)\n assert_that(clusterstatus_manager._config).is_equal_to(updated_config)\n\n assert_that(initialize_compute_fleet_status_manager_mock.call_count).is_equal_to(2)\n\n @pytest.mark.parametrize(\n \"get_status_response, fallback, expected_fleet_status\",\n [\n ({\"status\": \"RUNNING\"}, None, ComputeFleetStatus.RUNNING),\n (\n {},\n ComputeFleetStatus.STOPPED,\n ComputeFleetStatus.STOPPED,\n ),\n (\n Exception,\n ComputeFleetStatus.STOPPED,\n ComputeFleetStatus.STOPPED,\n ),\n ],\n ids=[\"success\", \"empty_response\", \"exception\"],\n )\n def test_get_compute_fleet_status(\n self, initialize_compute_fleet_status_manager_mock, get_status_response, fallback, expected_fleet_status\n ):\n \"\"\"Test get_compute_fleet_status method.\"\"\"\n config = SimpleNamespace(some_key_1=\"some_value_1\", some_key_2=\"some_value_2\")\n clusterstatus_manager = ClusterStatusManager(config)\n\n if get_status_response is Exception:\n initialize_compute_fleet_status_manager_mock().get_status.side_effect = get_status_response\n else:\n initialize_compute_fleet_status_manager_mock().get_status.return_value = get_status_response\n\n actual_fleet_status = clusterstatus_manager._get_compute_fleet_status(fallback)\n assert_that(actual_fleet_status).is_equal_to(expected_fleet_status)\n\n @pytest.mark.parametrize(\n \"new_status, new_fleet_data, expected_exception\",\n [\n (\n ComputeFleetStatus.RUNNING,\n {\"status\": \"RUNNING\"},\n None,\n ),\n (\n boto3.client(\"dynamodb\", region_name=\"us-east-1\").exceptions.ConditionalCheckFailedException(\n {\"Error\": {}}, {}\n ),\n None,\n ComputeFleetStatusManager.ConditionalStatusUpdateFailedError,\n ),\n (Exception(), None, Exception),\n ],\n ids=[\"success\", \"conditional_check_failed\", \"exception\"],\n )\n def test_update_compute_fleet_status(\n self, initialize_compute_fleet_status_manager_mock, new_status, new_fleet_data, expected_exception\n ):\n \"\"\"Test update_compute_fleet_status method.\"\"\"\n config = SimpleNamespace(some_key_1=\"some_value_1\", some_key_2=\"some_value_2\")\n clusterstatus_manager = ClusterStatusManager(config)\n\n if isinstance(new_status, Exception):\n initialize_compute_fleet_status_manager_mock().update_status.side_effect = expected_exception\n with pytest.raises(expected_exception):\n clusterstatus_manager._update_compute_fleet_status(new_status)\n else:\n initialize_compute_fleet_status_manager_mock().update_status.return_value = new_fleet_data\n clusterstatus_manager._update_compute_fleet_status(new_status)\n assert_that(clusterstatus_manager._compute_fleet_data).is_equal_to(new_fleet_data)\n assert_that(clusterstatus_manager._compute_fleet_status).is_equal_to(new_status)\n\n @pytest.mark.parametrize(\n \"status, translated_status, exception\",\n [\n (\n {\"status\": \"STOPPING\"},\n '{\"status\": \"STOP_REQUESTED\"}',\n None,\n ),\n (\n {\"status\": \"STARTING\"},\n '{\"status\": \"START_REQUESTED\"}',\n None,\n ),\n (\n {\"status\": \"WRONG\"},\n '{\"status\": \"UNKNOWN\"}',\n None,\n ),\n (\n {},\n '{\"status\": \"UNKNOWN\"}',\n None,\n ),\n (\n None,\n '{\"status\": \"UNKNOWN\"}',\n Exception(),\n ),\n (\n {\"status\": \"STOPPING\"},\n '{\"status\": \"STOP_REQUESTED\"}',\n Exception(),\n ),\n ],\n ids=[\"stopping\", \"starting\", \"unknown_status\", \"empty_status\", \"no_status\", \"run_command_exception\"],\n )\n @pytest.mark.usefixtures(\"initialize_compute_fleet_status_manager_mock\")\n def test_call_update_event(self, mocker, status, translated_status, exception):\n \"\"\"Test call_update_event method.\"\"\"\n computeflee_json_path = \"/path/to/compute_fleet.json\"\n cinc_log_file = \"/var/log/chef-client.log\"\n cmd = (\n \"sudo cinc-client \"\n \"--local-mode \"\n \"--config /etc/chef/client.rb \"\n \"--log_level auto \"\n f\"--logfile {cinc_log_file} \"\n \"--force-formatter \"\n \"--no-color \"\n \"--chef-zero-port 8889 \"\n \"--json-attributes /etc/chef/dna.json \"\n \"--override-runlist aws-parallelcluster-entrypoints::update_computefleet_status\"\n )\n\n config = SimpleNamespace(computefleet_status_path=computeflee_json_path, update_event_timeout_minutes=1)\n clusterstatus_manager = ClusterStatusManager(config)\n clusterstatus_manager._compute_fleet_data = status\n run_command_mock = mocker.patch(\"clusterstatusmgtd._run_command\")\n if isinstance(exception, Exception):\n run_command_mock.side_effect = exception\n with pytest.raises(ClusterStatusManager.ClusterStatusUpdateEventError):\n clusterstatus_manager._call_update_event()\n else:\n file_writer_mock = mocker.mock_open()\n mocker.patch(\"clusterstatusmgtd.open\", file_writer_mock)\n\n clusterstatus_manager._call_update_event()\n\n file_writer_mock.assert_called_once_with(computeflee_json_path, \"w\", encoding=\"utf-8\")\n file_writer_mock().write.assert_called_once_with(translated_status)\n run_command_mock.assert_called_once_with(cmd, 1)\n\n def test_update_status(self):\n \"\"\"Tested in test_manage_cluster_status.\"\"\"\n pass\n\n @pytest.mark.parametrize(\n \"compute_fleet_initial_status, compute_fleet_transitions\",\n [\n (ComputeFleetStatus.RUNNING, []),\n (ComputeFleetStatus.STOP_REQUESTED, [ComputeFleetStatus.STOPPING, ComputeFleetStatus.STOPPED]),\n (ComputeFleetStatus.STOPPING, [ComputeFleetStatus.STOPPED]),\n (ComputeFleetStatus.STOPPED, []),\n (ComputeFleetStatus.START_REQUESTED, [ComputeFleetStatus.STARTING, ComputeFleetStatus.RUNNING]),\n (ComputeFleetStatus.STARTING, [ComputeFleetStatus.RUNNING]),\n ],\n )\n def test_manage_cluster_status(\n self,\n mocker,\n initialize_compute_fleet_status_manager_mock,\n compute_fleet_initial_status,\n compute_fleet_transitions,\n ):\n \"\"\"Test manage_cluster_status method.\"\"\"\n config = SimpleNamespace(computefleet_status_path=\"/path/to/fleet.json\", update_event_timeout_minutes=1)\n clusterstatus_manager = ClusterStatusManager(config)\n update_compute_fleet_status_mocked = initialize_compute_fleet_status_manager_mock().update_status\n get_compute_fleet_status_mocked = mocker.patch.object(\n clusterstatus_manager, \"_get_compute_fleet_status\", return_value=compute_fleet_initial_status\n )\n call_update_event_mocked = mocker.patch.object(clusterstatus_manager, \"_call_update_event\")\n\n clusterstatus_manager.manage_cluster_status()\n\n get_compute_fleet_status_mocked.assert_called_once()\n if compute_fleet_transitions:\n call_update_event_mocked.assert_called_once()\n assert_that(update_compute_fleet_status_mocked.call_count).is_equal_to(len(compute_fleet_transitions))\n else:\n call_update_event_mocked.assert_not_called()\n update_compute_fleet_status_mocked.assert_not_called()\n\n @pytest.mark.usefixtures(\"initialize_compute_fleet_status_manager_mock\")\n def test_manage_cluster_status_concurrency(self, mocker, caplog):\n \"\"\"Test manage_cluster_status method, in case of concurrency.\"\"\"\n config = SimpleNamespace(computefleet_status_path=\"/path/to/fleet.json\", update_event_timeout_minutes=1)\n clusterstatus_manager = ClusterStatusManager(config)\n mocker.patch.object(\n clusterstatus_manager, \"_get_compute_fleet_status\", return_value=ComputeFleetStatus.STOP_REQUESTED\n )\n mocker.patch.object(\n clusterstatus_manager,\n \"_update_compute_fleet_status\",\n side_effect=ComputeFleetStatusManager.ConditionalStatusUpdateFailedError,\n )\n\n clusterstatus_manager.manage_cluster_status()\n\n assert_that(caplog.text).contains(\"Cluster status was updated while handling a transition\")\n assert_that(clusterstatus_manager._get_compute_fleet_status.call_count).is_equal_to(1)\n assert_that(clusterstatus_manager._update_compute_fleet_status.call_count).is_equal_to(1)\n","sub_path":"test/unit/clusterstatusmgtd/test_clusterstatusmgtd.py","file_name":"test_clusterstatusmgtd.py","file_ext":"py","file_size_in_byte":19335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"425817629","text":"from flask import Flask, render_template, request\nfrom flask import jsonify\nimport json\nimport file_handler\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef show_form():\n return render_template('index.html')\n\n@app.route(\"/write\", methods=['POST'])\ndef submit():\n each_question_detail = dict()\n\n question_id = request.form['question_id']\n md_test = request.form['md_test']\n points = request.form['points']\n difficulty = request.form['difficulty']\n type_ = request.form['type_']\n time = request.form['time']\n\n each_question_detail[\"question_id\"] = question_id\n each_question_detail[\"md_test\"] = md_test\n each_question_detail[\"points\"] = points\n each_question_detail[\"difficulty\"] = difficulty\n each_question_detail[\"type_\"] = type_\n each_question_detail[\"time\"] = time\n\n file_handler.write_to_file(each_question_detail)\n \n return render_template('index.html')\n\n@app.route(\"/read_q\", methods=['GET'])\ndef read_questions():\n return render_template('write.html', message=file_handler.read_from_file())\n\n@app.route(\"/read_a/<user>\",methods=[\"GET\"])\ndef read_answers(user):\n return render_template('read_answers.html', user_id=file_handler.read_answers_from_csv_file(), question_detail=file_handler.read_from_file(), user=user)\n \n\n \n\n\nif __name__ == '__main__':\n app.run(debug = True)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"595460483","text":"# 检查文件是否存在\n\nfrom pathlib import Path\nimport os\n\n# 1.使用try/except块来检查文件是否存在(Python2+)\n\n\ndef readFileWithTryExcept(filePath: str):\n try:\n with open(filePath, 'r') as file:\n print('file', file)\n except FileNotFoundError:\n print('%s not exists' % (filePath))\n\n\n# readFileWithTryExcept('src/list-element-add.py')\n\n# 2.使用os.path检查文件是否存在(Python2+)\n\n\ndef readFileWithOS(filePath: str):\n exists = os.path.isfile(filePath)\n if exists:\n with open(filePath) as file:\n print('file', file)\n else:\n print('%s not exists' % (filePath))\n\n\n# readFileWithOS('src/list-element-add.py')\n\n# 3.使用Path对象来检查文件是否存在(Python3.4+)\n\ndef readFileWithPath(filePath: str):\n file = Path(filePath)\n if file.is_file():\n print('file', file.readlink())\n else:\n print('%s not exists' % (filePath))\n\n\nreadFileWithPath('src/list-element-add.py')\n","sub_path":"src/ckeck-file-exists.py","file_name":"ckeck-file-exists.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"570928181","text":"from sympy import to_dnf\nfrom copy import copy\nimport pickle\n\ndef dot(x,y):\n x*= 1000\n y*=1000\n return (x*y)/1000000\n\ndef rest(x,y):\n x*=1000\n y*=1000\n return (x-y)/1000\n\nclass fuzzy_logic:\n def __init__(self):\n self.CIJ = {}\n#entre 0 y 1\n def c_ij(self,i,j,documents):\n try:\n return self.CIJ[(i,j)]\n except:\n return self.cij(i,j,documents)\n \n def cij(self,i,j,documents):\n nij = 0\n ni = 0\n nj = 0\n for doc in documents:\n if doc[i] == 1:\n ni += 1\n if doc[j] == 1:\n nj+= 1\n nij+= 1\n elif doc[j] == 1:\n nj+= 1\n value = nij/(ni+nj-nij)\n value = round(value,3)\n self.CIJ[(i,j)] = value\n self.CIJ[(j,i)] = value\n if value > 1: raise Exception(\"i: {} j:{}, ni : {},nj: {},nij: {}\".format(i,j,ni, nj, nij))\n return value\n\n #entre 0 y 1\n def term_fuzzy(self,term,doc,documents):\n product = 1\n for word in doc.keys():\n if doc[word] == 1:\n product = dot(rest(1, self.c_ij(term,word,documents)),product)\n product = round(product,3)\n return rest(1,product)\n\n\n def get_terms(self,Query,neg=True):\n splits = [\"|\",\" \",\"&\",\"(\",\")\"]\n if neg: splits.append(\"~\")\n for i in splits:\n Query = Query.replace(i,\" \")\n return Query.split()\n #entre 0 y 1\n def rank_cc(self,cc,documents,Q_t,d_index):\n query_product = 1\n x =self.get_fdnf(self.get_terms(cc,False),Q_t,0)\n for component in x:\n fdnf_product = 1\n for term in component:\n if term[0] == \"~\":\n fdnf_product= dot(rest(1,self.term_fuzzy(term[1:],documents[d_index],documents)),fdnf_product)\n \n else:\n fdnf_product = dot(self.term_fuzzy(term,documents[d_index],documents),fdnf_product)\n fdnf_product = round(fdnf_product,3)\n query_product = dot(rest(1, fdnf_product),query_product)\n query_product = round(query_product,3)\n return query_product\n\n\n def get_fdnf(self,cc,Q_t,index):\n if index == len(Q_t): return [copy(cc)]\n term = Q_t[index]\n neg_t = \"~\"+term\n if neg_t in cc or term in cc:\n return self.get_fdnf(cc,Q_t,index+1)\n else:\n cc.append(neg_t)\n l = self.get_fdnf(cc,Q_t,index+1)\n cc.pop()\n cc.append(term)\n r= l + self.get_fdnf(cc,Q_t,index+1)\n cc.pop()\n return r\n \n\n #entre 0 y 1\n def rank_Cdnf(self,Query,documents,d_index):\n Q_t = self.get_terms(Query)\n Q = str(to_dnf(Query,simplify=True))\n Q = Q.split(\"|\")\n product = 1\n \n for cc in Q:\n product = dot(self.rank_cc(cc,documents,Q_t,d_index),product)\n product = round(product,3)\n return rest(1,product)\n\n def rank(self,Query,documents):\n ranks = []\n for i,_ in enumerate(documents):\n ranks.append(self.rank_Cdnf(Query,documents,i))\n return sorted([(ranks[i],d.name) for i,d in enumerate(documents)],key=lambda x: x[0],reverse=True)\n\ndef rank(Qery,documents,path):\n try:\n with open(path+\"/\"+\"fuzzy.pickle\") as fd:\n ranker = pickle.load(fd)\n except:\n ranker = fuzzy_logic()\n rnk = ranker.rank(Qery,documents)\n with open(path+\"/\"+\"fuzzy.pickle\",\"wb+\") as fd:\n pickle.dump(ranker,fd)\n return rnk\ndef term_fuzzy(term,doc,documents,path):\n try:\n with open(path+\"/\"+\"fuzzy.pickle\") as fd:\n ranker = pickle.load(fd)\n except:\n ranker = fuzzy_logic()\n return ranker.term_fuzzy(term,doc,documents)\n\n# a =\"A & (C | ~D)\"\n# documents = [{\"A\": 0, \"B\":0, \"C\":1, \"D\": 0},\n# {\"A\": 1, \"B\":1, \"C\":1, \"D\": 0},\n# {\"A\": 1, \"B\":0, \"C\":0, \"D\": 1} \n# ]\n# b = rank_Cdnf(a,documents,0)\n# print(b)\n# print(get_fdnf(get_terms(\"A\",False),[\"A\",\"D\",\"C\"], 0))","sub_path":"FlaskApp/fuzzy_model/Fuzzy.py","file_name":"Fuzzy.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"209533962","text":"from setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\nimport re\n\nhere = path.abspath(path.dirname(__file__))\n\nversion = re.search(\n '^__version__\\s*=\\s*\"(.*)\"',\n open('edgar/__init__.py').read(),\n re.M\n ).group(1)\n\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name = 'edgar',\n packages = find_packages(), # this must be the same as the name above\n version = version,\n description = 'Scrape data from SEC\\'s EDGAR',\n long_description = long_description,\n author = 'Joey Sham',\n author_email = 'sham.joey@gmail.com',\n url = 'https://github.com/joeyism/py-edgar', # use the URL to the github repo\n download_url = 'https://github.com/joeyism/py-edgar/archive/{}.tar.gz'.format(version),\n keywords = ['edgar', 'sec'], \n install_requires = ['requests', 'lxml'],\n classifiers = [],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"595253839","text":"#!/usr/bin/env python\n# _*_ coding:utf8 _*_\n\nimport subprocess\nimport socket\nimport struct\nimport json\nssh_cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nssh_cli.connect((\"127.0.0.1\", 8001))\n# ssh_cli.connect((\"192.168.1.66\", 9001))\nwhile True:\n command = input(\">>input command:\").strip()\n if not command:\n continue\n if command == 'q':\n break\n ssh_cli.send(command.encode('utf-8'))\n # 1. 获得报头的长度\n head_len = struct.unpack('i', ssh_cli.recv(4))[0]\n print('head length', head_len)\n # 2. 获取报头字典\n head_bytes = ssh_cli.recv(head_len)\n head_json = head_bytes.decode('utf-8')\n head_dic = json.loads(head_json)\n print('head_dic', head_dic)\n # 3. 从报头字典中获取数据长度\n data_size = head_dic['data_size']\n print('data size:', data_size)\n recv_size = 0\n recv_data = b''\n while recv_size < data_size:\n data = ssh_cli.recv(1024)\n recv_size += len(data)\n recv_data += data\n\n print(len(recv_data))\n # print(recv_data.decode(\"utf-8\"))\n print(recv_data.decode(\"gbk\"))\n\nssh_cli.close()\n\n","sub_path":"python_full_statck/socket_study/多个客户端同时访问服务器/client3.py","file_name":"client3.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"276436396","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import special\nfrom matplotlib.colors import LogNorm\nimport matplotlib as mpl\nlabel_size = 7\nmpl.rcParams['ytick.labelsize'] = label_size\n\n#Problem 1\ndef anscombe():\n ansarr = np.load('anscombe.npy')\n one_x = ansarr[:, 0]\n one_y = ansarr[:, 1]\n two_x = ansarr[:, 2]\n two_y = ansarr[:, 3]\n three_x = ansarr[:, 4]\n three_y = ansarr[:, 5]\n four_x = ansarr[:, 6]\n four_y = ansarr[:, 7]\n x = np.linspace(0,20,100000)\n y = 1/2*x*1.0 + 3\n\n plt.subplot(221)\n plt.title(\"I\")\n plt.plot(one_x, one_y, \"ko\", markersize = 5)\n plt.plot(x, y)\n\n plt.subplot(222)\n plt.title(\"II\")\n plt.plot(two_x, two_y, \"ko\", markersize = 5)\n plt.plot(x, y)\n\n plt.subplot(223)\n plt.title(\"III\")\n plt.plot(three_x, three_y, \"ko\", markersize = 5)\n plt.plot(x, y)\n\n plt.subplot(224)\n plt.title(\"IV\")\n plt.plot(four_x, four_y, \"ko\", markersize = 5)\n plt.plot(x, y)\n\n plt.tight_layout()\n plt.show()\n\n return\n\nanscombefindings = '''\nI is mostly linear and increasing in y but has some dispersion.\nII is parabolic and concave.\nIII is very linear with one outlier with a high y-value.\nIV is vertical as there is only one x value for multiple y-values.\n'''\n\nprint(anscombefindings)\n\n#Problem 2\ndef getT(n,v,x):\n T = special.binom(n,v)*(x**v)*((1-x)**(n-v))\n return T\n\ndef bernstein():\n x = np.linspace(0, 1, 1000)\n for n in range(0, 4):\n for v in range(0, n+1):\n plt.subplot(4, 4, 1 + v + n*4)\n y = getT(n, v, x)\n plt.plot(x, y, lw = 2)\n plt.axis([0, 1, 0, 1])\n plt.tick_params(which = \"both\", top = \"off\", right = \"off\")\n if n < 2:\n plt.tick_params(labelbottom = \"off\")\n if n % 5:\n plt.tick_params(labelleft = \"off\")\n plt.title(\"n =\" + str(n))\n\n plt.tight_layout()\n plt.show()\n\n#Problem 3\n# HEIGHT WEIGHT AGE\ndef MLB():\n mlbarr = np.load('MLB.npy')\n height = mlbarr[:,0]\n weight = mlbarr[:,1]\n age = mlbarr[:,2]\n plt.subplot(131)\n plt.plot(height, weight, 'o', markersize = 1)\n plt.plot(np.unique(height), np.poly1d(np.polyfit(height, weight, 1))(np.unique(height)))\n plt.xlabel('height')\n plt.ylabel('weight')\n plt.axis(\"equal\")\n\n plt.subplot(132)\n plt.plot(height, age, 'o', markersize = 1)\n plt.plot(np.unique(height), np.poly1d(np.polyfit(height, age, 1))(np.unique(height)))\n plt.xlabel('height')\n plt.ylabel('age')\n plt.axis(\"equal\")\n\n plt.subplot(133)\n plt.plot(age, weight, 'o', markersize = 1)\n plt.plot(np.unique(age), np.poly1d(np.polyfit(age, weight, 1))(np.unique(age)))\n plt.xlabel('age')\n plt.ylabel('weight')\n plt.axis(\"equal\")\n plt.suptitle(\"Correlations of NBA players\")\n plt.tight_layout()\n plt.subplots_adjust(top=0.85)\n\n plt.show()\n\n return\n\n#DATE MAGNITUDE LONGITUDE LATITUDE\n#Problem 4\ndef earthquakes():\n year, magnitude, longitude, latitude = np.load('earthquakes.npy').T\n\n plt.subplot(131)\n plt.hist(year, bins = 11, edgecolor='black', linewidth=1.2)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of Earthquakes\")\n plt.title(\"Qn 1\")\n\n plt.subplot(132)\n plt.hist(magnitude, bins = 10, edgecolor='black', linewidth=1.2)\n plt.title(\"Qn 2\")\n\n plt.subplot(133)\n plt.plot(longitude, latitude, \"o\", markersize = 0.05)\n plt.xlabel('latitude')\n plt.ylabel('longitude')\n plt.axis(\"equal\")\n plt.title(\"Qn 3\")\n\n plt.suptitle(\"Distribution of Earthquake Data\")\n plt.tight_layout()\n plt.subplots_adjust(top=0.85)\n plt.show()\n return\n\n#Problem 5\ndef rosenbrock():\n x = np.linspace(-10,10,1000)\n y = x.copy()\n X, Y = np.meshgrid(x, y)\n Z = (1-X)**2 + 100*((Y-X**2)**2)\n plt.contourf(X, Y, Z, 1000, cmap = \"plasma\", norm=LogNorm())\n plt.plot(1, 1, 'bx')\n plt.axis([-2, 2, -2, 2])\n plt.colorbar()\n plt.title(\"Rosenbrock Function\")\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.show()\n\n#POPN GDP MALEHEIGHT FEMALEHEIGHT\n#ROWS ARE COUNTRIES\n#Problem 6\ndef countries():\n df = np.load('countries.npy')\n population, gdp, maleheight, femaleheight = np.load('countries.npy').T\n labels = [\"Austria\", \"Bolivia\", \"Brazil\", \"China\",\n \"Finland\", \"Germany\", \"Hungary\", \"India\",\n \"Japan\", \"North Korea\", \"Montenegro\", \"Norway\",\n \"Peru\", \"South Korea\", \"Sri Lanka\", \"Switzerland\",\n \"Turkey\", \"United Kingdom\", \"United States\", \"Vietnam\"]\n\n plt.subplot(221)\n plt.scatter(gdp, maleheight)\n plt.plot(np.unique(gdp), np.poly1d(np.polyfit(gdp, maleheight, 1))(np.unique(gdp)))\n plt.title(\"Male Height vs. GDP\")\n plt.subplot(222)\n plt.scatter(gdp, femaleheight)\n plt.title(\"Female Height vs. GDP\")\n plt.plot(np.unique(gdp), np.poly1d(np.polyfit(gdp, femaleheight, 1))(np.unique(gdp)))\n plt.subplot(223)\n positions = np.arange(len(labels))\n plt.barh(positions, gdp, align = \"center\")\n plt.yticks(positions, labels)\n plt.title(\"GDP by country\")\n plt.subplot(224)\n plt.hist(gdp/population, bins = 20, edgecolor = \"black\")\n plt.title(\"Distribution of GDP per capita\")\n plt.tight_layout()\n\n plt.show()\n\ncountries()\ncountriesfindings = '''\nThere is a slight positive linear relationship between height and GDP.\nCountries with higher GDP's tend to have greater average height for both males and females.\nThe United States has by far the highest GDP, followed by China who has about 2/3 of US GDP.\nThe poorest countries by GDP are Montenegro, Bolivia, and North Korea.\nFrom the histogram, one can see that there is huge per capita income inequality between countries.\nThe distribution of per capita GDP is thick in the left tail and thin in the right tail.\n'''\nprint(countriesfindings)\n","sub_path":"Labs/Computation/Week 2/Python Code/Lab2_dataviz.py","file_name":"Lab2_dataviz.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"551210975","text":"from AnyQt import QtGui, QtCore\n\nfrom Orange.widgets.widget import OWWidget, Input, Output\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting\n\nfrom pycns import CNS\n\nclass OffsetCNS(OWWidget):\n name = \"Offset Node/Timepoint\"\n description = \"Offset Node numbers or timepoints by a fixed value\"\n icon = \"icons/move-up.svg\"\n want_main_area = False\n\n class Inputs:\n in_cns = Input(\"Input CNS\", CNS)\n\n class Outputs:\n out_cns = Output(\"CNS object\", CNS)\n\n noff = Setting(0, schema_only=True)\n tpoff = Setting(0, schema_only=True)\n\n def __init__(self):\n super().__init__()\n self.generate_gui()\n\n def generate_gui(self):\n self.box = gui.widgetBox(self.controlArea, \"Enter the offsets for nodes and timepoints\")\n self.noffinput = gui.lineEdit(self.box, self, \"noff\", label=\"Node Number offset\")\n self.tpoffinput = gui.lineEdit(self.box, self, \"tpoff\", label=\"Timepoint offset\")\n self.button = gui.button(self.box, self, \"OK\", self.set_output)\n\n @Inputs.in_cns\n def input_change(self, cns):\n if cns is None:\n return\n self.in_cns = cns\n self.set_output()\n \n def set_output(self): \n self.hide()\n if not hasattr(self, \"in_cns\"):\n return\n self.noff = int(self.noff) if self.noff is not None else 0\n self.tpoff = float(self.tpoff) if self.tpoff is not None else 0\n self.out_cns = self.in_cns.set_index([\n self.in_cns.index.get_level_values(0) + self.noff,\n self.in_cns.index.get_level_values(1) + self.tpoff\n ])\n self.Outputs.out_cns.send(self.out_cns)\n","sub_path":"pycnsorange/cns/widgets/offset.py","file_name":"offset.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"66538015","text":"import numpy as np\n\nfrom cogdl.datasets import build_dataset\n\nfrom .. import BaseModel, register_model\nfrom .prone import ProNE\n\n\n@register_model(\"struc2vec\")\nclass Struc2vec(BaseModel):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--emb-path', type=str,\n help='Load self.emb from npy file')\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(args)\n\n def __init__(self, args):\n super(Struc2vec, self).__init__()\n\n node2id = build_dataset(args).node2id\n with open(args.emb_path) as f:\n num_nodes, dim = list(map(int, f.readline().strip().split()))\n assert len(node2id) == num_nodes and dim == args.hidden_size, \"Dataset and emb dimension doesn't match\"\n self.emb = np.zeros((num_nodes, dim))\n for line in f:\n line = line.strip().split()\n x = node2id[int(line[0])]\n embedding = np.array(list(map(float, line[1:])))\n self.emb[x] = embedding\n # if args.task == \"unsupervised_node_classification\":\n # self.emb = (self.emb - self.emb.mean(axis=0)) / (self.emb.std(axis=0) + 1e-8)\n\n def train(self, G):\n id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])\n self.emb = np.asarray([self.emb[id2node[i]] for i in range(len(id2node))])\n return self.emb\n\n\n@register_model(\"struc2vec_cat_prone\")\nclass Struc2vecCatProne(Struc2vec):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n Struc2vec.add_args(parser)\n ProNE.add_args(parser)\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(args)\n\n def __init__(self, args):\n args.hidden_size //= 2\n super(Struc2vecCatProne, self).__init__(args)\n self.prone = ProNE.build_model_from_args(args)\n args.hidden_size *= 2\n\n def train(self, G):\n id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])\n self.emb = np.asarray([self.emb[id2node[i]] for i in range(len(id2node))])\n prone_embeddings = self.prone.train(G)\n return np.concatenate([self.emb, prone_embeddings], axis=1)\n\n@register_model(\"struc2vec_align\")\nclass Struc2vecAlign(BaseModel):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--emb-path-1', type=str,\n help='Load self.emb from npy file')\n parser.add_argument('--emb-path-2', type=str,\n help='Load self.emb from npy file')\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(args)\n\n def __init__(self, args):\n super(Struc2vecAlign, self).__init__()\n dataset = build_dataset(args)\n self.emb_1 = self._load_emb(args.emb_path_1, dataset.node2id_1, args.hidden_size)\n self.emb_2 = self._load_emb(args.emb_path_2, dataset.node2id_2, args.hidden_size)\n self.t1, self.t2 = False, False\n\n def _load_emb(self, emb_path, node2id, hidden_size):\n with open(emb_path) as f:\n num_nodes, dim = list(map(int, f.readline().strip().split()))\n assert len(node2id) == num_nodes and dim == hidden_size, \"Dataset and emb dimension doesn't match\"\n emb = np.zeros((num_nodes, dim))\n for line in f:\n line = line.strip().split()\n x = node2id[int(line[0])]\n embedding = np.array(list(map(float, line[1:])))\n emb[x] = embedding\n return emb\n\n def train(self, G):\n if G.number_of_nodes() == self.emb_1.shape[0] and not self.t1:\n emb = self.emb_1\n self.t1 = True\n elif G.number_of_nodes() == self.emb_2.shape[0] and not self.t2:\n emb = self.emb_2\n self.t2 = True\n else:\n raise NotImplementedError\n id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])\n self.emb = np.asarray([self.emb[id2node[i]] for i in range(len(id2node))])\n return self.emb","sub_path":"cogdl/models/emb/struc2vec.py","file_name":"struc2vec.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"55669369","text":"# coding=utf-8\nimport pdb\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom tqdm import tqdm\nfrom PIL import Image\nimport numpy as np\nfrom datasets import VOC, Saliency\nfrom datasets import palette as palette_voc\nfrom evaluate_seg import evaluate_iou\nfrom evaluate_sal import fm_and_mae\nimport json\nimport os\nfrom jls_fcn import JLSFCN\nfrom logger import Logger\n\nimage_size = 256\nbatch_size = 8\nc_output = 21\nexperiment_name = \"debug5\"\npath_save_checkpoints = \"./stage1.pth\"\n\npath_save_valid_voc = \"output/validation/{}_voc\".format(experiment_name)\nif not os.path.exists(path_save_valid_voc): os.mkdir(path_save_valid_voc)\n\npath_save_valid_sal = \"output/validation/{}_sal\".format(experiment_name)\nif not os.path.exists(path_save_valid_sal): os.mkdir(path_save_valid_sal)\n\nnet = JLSFCN(c_output).cuda()\nnet.load_state_dict(torch.load(path_save_checkpoints))\n\nmean = torch.Tensor([0.485, 0.456, 0.406])[None, ..., None, None].cuda()\nstd = torch.Tensor([0.229, 0.224, 0.225])[None, ..., None, None].cuda()\n\nvoc_val_img_dir = '/home/zeng/data/datasets/segmentation/VOCdevkit/VOC2012/JPEGImages'\nvoc_val_gt_dir = '/home/zeng/data/datasets/segmentation/VOCdevkit/VOC2012/SegmentationClass'\nvoc_val_split = '/home/zeng/data/datasets/segmentation/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt'\n\nsal_val_img_dir = '/home/zeng/data/datasets/saliency/ECSSD/images'\nsal_val_gt_dir = '/home/zeng/data/datasets/saliency/ECSSD/masks'\n\nsal_val_loader = torch.utils.data.DataLoader(\n Saliency(sal_val_img_dir, sal_val_gt_dir,\n crop=None, flip=False, rotate=None, size=image_size, training=False), \n batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)\n\nvoc_val_loader = torch.utils.data.DataLoader(\n VOC(voc_val_img_dir, voc_val_gt_dir, voc_val_split,\n crop=None, flip=False, rotate=None, size=image_size, training=False),\n batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)\n\ndef val_sal():\n net.eval()\n with torch.no_grad():\n for it, (img, gt, batch_name, WW, HH) in tqdm(enumerate(sal_val_loader), desc='train'):\n img = (img.cuda()-mean)/std\n pred_seg, v_sal, _ = net(img)\n pred_seg = torch.softmax(pred_seg, 1)\n bg = pred_seg[:, :1]\n fg = (pred_seg[:, 1:]*v_sal[:, 1:]).sum(1, keepdim=True)\n fg = fg.squeeze(1)\n fg = fg*255\n for n, name in enumerate(batch_name):\n msk =fg[n]\n msk = msk.detach().cpu().numpy()\n w = WW[n]\n h = HH[n]\n msk = Image.fromarray(msk.astype(np.uint8))\n msk = msk.resize((w, h))\n msk.save('{}/{}.png'.format(path_save_valid_sal, name), 'PNG')\n fm, mae, _, _ = fm_and_mae(path_save_valid_sal, sal_val_gt_dir)\n net.train()\n return fm, mae\n\n\ndef val_voc():\n net.eval()\n with torch.no_grad():\n for it, (img, gt, batch_name, WW, HH) in tqdm(enumerate(voc_val_loader), desc='train'):\n img = (img.cuda()-mean)/std\n outputs = net(img)\n batch_seg = outputs[0]\n _, batch_seg = batch_seg.detach().max(1)\n for n, name in enumerate(batch_name):\n msk =batch_seg[n]\n msk = msk.detach().cpu().numpy()\n w = WW[n]\n h = HH[n]\n msk = Image.fromarray(msk.astype(np.uint8))\n msk = msk.convert('P')\n msk.putpalette(palette_voc)\n msk = msk.resize((w, h))\n msk.save('{}/{}.png'.format(path_save_valid_voc, name), 'PNG')\n miou = evaluate_iou(path_save_valid_voc, voc_val_gt_dir, c_output)\n net.train()\n return miou\n\nif __name__ == \"__main__\":\n fm, mae = val_sal()\n print(fm, mae)\n #net.load_state_dict(torch.load(\"output/checkpoints/debug/500.pth\"))\n #miou = val()\n #print(miou)\n","sub_path":"test_stage1.py","file_name":"test_stage1.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"107985125","text":"import torch \nimport torch.nn as tnn\nimport torch.optim as toptim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nimport numpy as np\nimport pandas\nimport matplotlib.pyplot as plt\nimport time\nimport json\nfrom collections import OrderedDict\nfrom torchvision import datasets, models, transforms, utils\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport copy\nimport argparse\n\nuse_gpu = torch.cuda.is_available()\n\ndef initial_data(args):\n data_dir = args.data_dir\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ]),\n 'test': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n }\n \n image_datasets = dict()\n image_datasets['train'] = datasets.ImageFolder(train_dir, transform=data_transforms['train'])\n image_datasets['valid'] = datasets.ImageFolder(valid_dir, transform=data_transforms['valid'])\n image_datasets['test'] = datasets.ImageFolder(test_dir, transform=data_transforms['test'])\n batch_size = 8\n dataloaders = dict()\n dataloaders['train'] = torch.utils.data.DataLoader(image_datasets['train'], batch_size=batch_size, shuffle=True)\n dataloaders['valid'] = torch.utils.data.DataLoader(image_datasets['valid'], batch_size=batch_size)\n dataloaders['test'] = torch.utils.data.DataLoader(image_datasets['test'], batch_size=batch_size)\n \n return dataloaders, image_datasets\n\ndef train_model_manager(args, model, criterion, optimizer, scheduler, num_epochs=10):\n \n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_accuracy = 0.0\n\n dataloaders, image_datasets = initial_data(args)\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid', 'test']}\n \n for epoch in range(num_epochs):\n print('Epoch {} / {}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n for phase in ['train', 'valid']:\n if phase == 'train':\n scheduler.step()\n model.train(True)\n else:\n model.train(False)\n\n running_loss = 0.0\n running_corrects = 0\n\n for data in dataloaders[phase]:\n inputs, labels = data\n if use_gpu:\n inputs = Variable(inputs.cuda())\n labels = Variable(labels.cuda())\n else:\n inputs, labels = Variable(inputs), Variable(labels)\n\n optimizer.zero_grad()\n\n outputs = model(inputs)\n _, preds = torch.max(outputs.data, 1)\n loss = criterion(outputs, labels)\n\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_accuracy = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_accuracy))\n\n if phase == 'valid' and epoch_accuracy > best_accuracy:\n best_accuracy = epoch_accuracy\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training has complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best Accuracy : {:4f}'.format(best_accuracy))\n\n model.load_state_dict(best_model_wts)\n return model\n\ndef save_check_point(checkpoint):\n torch.save(checkpoint, 'current_checkpoint.pth')\n\ndef setup_train_model(args):\n dataloaders, image_datasets = initial_data(args)\n \n if args.arch == 'vgg': \n model = models.vgg16(pretrained=True)\n elif args.arch == 'densenet':\n model = models.densenet121(pretrained=True)\n \n for param in model.parameters():\n param.requires_grad = False\n \n num_features = model.classifier[0].in_features\n classifier = tnn.Sequential(OrderedDict([\n ('fc1', tnn.Linear(num_features, 512)),\n ('relu', tnn.ReLU()),\n ('drpot', tnn.Dropout(p=0.5)),\n ('hidden', tnn.Linear(512, args.hidden_units)), \n ('fc2', tnn.Linear(args.hidden_units, 102)),\n ('output', tnn.LogSoftmax(dim=1)),\n ]))\n\n model.classifier = classifier\n \n if args.gpu:\n if use_gpu:\n model = model.cuda()\n print (\"Using GPU : \"+ str(use_gpu))\n else:\n print(\"GPU is not available\")\n \n \n num_epochs = 10\n\n criterion = tnn.CrossEntropyLoss()\n optimizer = toptim.Adam(model.classifier.parameters(), lr=args.learning_rate)\n\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)\n model = train_model_manager(args, model, criterion, optimizer, exp_lr_scheduler, num_epochs=args.epochs)\n model.class_to_idx = dataloaders['train'].dataset.class_to_idx\n model.epochs = num_epochs\n checkpoint = {'input_size': [3, 224, 224],\n 'batch_size': dataloaders['train'].batch_size,\n 'output_size': 102,\n 'state_dict': model.state_dict(),\n 'optimizer_dict':optimizer.state_dict(),\n 'class_to_idx': model.class_to_idx,\n 'epoch': model.epochs}\n save_check_point(checkpoint)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', default=False, help='Use GPU if available')\n parser.add_argument('--data_dir', type=str, help='Path to dataset ')\n parser.add_argument('--epochs', type=int, help='Number of epochs')\n parser.add_argument('--arch', type=str, help='Model architecture')\n parser.add_argument('--learning_rate', type=float, help='Learning rate')\n parser.add_argument('--checkpoint', type=str, help='Save trained model checkpoint to file')\n parser.add_argument('--hidden_units', type=int, help='Number of hidden units')\n args = parser.parse_args()\n \n with open('cat_to_name.json', 'r') as file:\n cat_to_name = json.load(file)\n\n setup_train_model(args)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"147835796","text":"#! /usr/bin/env python\n\nfrom __future__ import print_function\ntry: from . import *\nexcept: from bacula_tools import * #pragma: no cover\n\nclass Device(DbDict):\n SETUP_KEYS = [\n ARCHIVEDEVICE, DEVICETYPE, MEDIATYPE, CHANGERDEVICE, CHANGERCOMMAND, ALERTCOMMAND,\n DRIVEINDEX, MAXIMUMCONCURRENTJOBS, MAXIMUMCHANGERWAIT, MAXIMUMREWINDWAIT,\n MAXIMUMOPENWAIT, VOLUMEPOLLINTERVAL, MOUNTPOINT, MOUNTCOMMAND, UNMOUNTCOMMAND,\n MINIMUMBLOCKSIZE, MAXIMUMBLOCKSIZE, MAXIMUMVOLUMESIZE, MAXIMUMFILESIZE,\n MAXIMUMNETWORKBUFFERSIZE, MAXIMUMSPOOLSIZE, MAXIMUMJOBSPOOLSIZE, SPOOLDIRECTORY,\n MAXIMUMPARTSIZE, CLIENTCONNECTWAIT,\n ]\n BOOL_KEYS = [\n AUTOSELECT, REMOVABLEMEDIA, BLOCKCHECKSUM, HARDWAREENDOFMEDIUM,\n FASTFORWARDSPACEFILE, USEMTIOCGET, AUTOMATICMOUNT,\n BACKWARDSPACERECORD, BACKWARDSPACEFILE, FORWARDSPACERECORD,\n FORWARDSPACEFILE, BLOCKPOSITIONING, AUTOCHANGER, ALWAYSOPEN, CLOSEONPOLL,\n RANDOMACCESS, BSFATEOM, TWOEOF, OFFLINEONUNMOUNT, LABELMEDIA,\n ]\n table = DEVICE\n _insert = 'INSERT INTO device_link (device_id, storage_id) values (%s, %s)'\n _delete = 'DELETE FROM device_link where device_id = %s and storage_id = %s'\n _select = 'SELECT storage_id FROM device_link where device_id = %s'\n # {{{ parse_string(string, obj=None): Entry point for a recursive descent parser\n\n def parse_string(self, string, obj=None):\n # {{{ boilerplate. Sigh\n\n '''Populate a new object from a string.\n \n Parsing is hard, so we're going to call out to the pyparsing\n library here. I hope you installed it!\n '''\n from pyparsing import Suppress, Regex, quotedString, restOfLine, Keyword, nestedExpr, Group, OneOrMore, Word, Literal, alphanums, removeQuotes, replaceWith, nums\n gr_eq = Literal('=')\n gr_stripped_string = quotedString.copy().setParseAction( removeQuotes )\n gr_opt_quoted_string = gr_stripped_string | restOfLine\n gr_number = Word(nums)\n gr_yn = Keyword('yes', caseless=True).setParseAction(replaceWith('1')) | Keyword('no', caseless=True).setParseAction(replaceWith('0'))\n\n def np(words, fn = gr_opt_quoted_string, action=None):\n p = Keyword(words[0], caseless=True).setDebug(bacula_tools.DEBUG)\n for w in words[1:]:\n p = p | Keyword(w, caseless=True).setDebug(bacula_tools.DEBUG)\n p = p + gr_eq + fn\n p.setParseAction(action)\n return p\n\n # }}}\n\n gr_line = np(PList(NAME), action=lambda x: self._set_name(x[2]))\n gr_line = gr_line | np(PList('alert command'), action=self._parse_setter(ALERTCOMMAND))\n gr_line = gr_line | np(PList('archive device'), action=self._parse_setter(ARCHIVEDEVICE))\n gr_line = gr_line | np(PList('changer command'), action=self._parse_setter(CHANGERCOMMAND))\n gr_line = gr_line | np(PList('changer device'), action=self._parse_setter(CHANGERDEVICE))\n gr_line = gr_line | np(PList('client connect wait'), action=self._parse_setter(CLIENTCONNECTWAIT))\n gr_line = gr_line | np(PList('device type'), action=self._parse_setter(DEVICETYPE))\n gr_line = gr_line | np(PList('drive index'), gr_number, action=self._parse_setter(DRIVEINDEX, c_int=True))\n gr_line = gr_line | np(PList('maximum block size'), action=self._parse_setter(MAXIMUMBLOCKSIZE))\n gr_line = gr_line | np(PList('maximum changer wait'), action=self._parse_setter(MAXIMUMCHANGERWAIT))\n gr_line = gr_line | np(PList('maximum concurrent jobs'), gr_number, action=self._parse_setter(MAXIMUMCONCURRENTJOBS, c_int=True))\n gr_line = gr_line | np(PList('maximum file size'), action=self._parse_setter(MAXIMUMFILESIZE))\n gr_line = gr_line | np(PList('maximum job spool size'), action=self._parse_setter(MAXIMUMJOBSPOOLSIZE))\n gr_line = gr_line | np(PList('maximum network buffer size'), action=self._parse_setter(MAXIMUMNETWORKBUFFERSIZE))\n gr_line = gr_line | np(PList('maximum open wait'), action=self._parse_setter(MAXIMUMOPENWAIT))\n gr_line = gr_line | np(PList('maximum part size'), action=self._parse_setter(MAXIMUMPARTSIZE))\n gr_line = gr_line | np(PList('maximum rewind wait'), action=self._parse_setter(MAXIMUMREWINDWAIT))\n gr_line = gr_line | np(PList('maximum spool size'), action=self._parse_setter(MAXIMUMSPOOLSIZE))\n gr_line = gr_line | np(PList('maximum volume size'), action=self._parse_setter(MAXIMUMVOLUMESIZE))\n gr_line = gr_line | np(PList('media type'), action=self._parse_setter(MEDIATYPE))\n gr_line = gr_line | np(PList('minimum block size'), action=self._parse_setter(MINIMUMBLOCKSIZE))\n gr_line = gr_line | np(PList('mount command'), action=self._parse_setter(MOUNTCOMMAND))\n gr_line = gr_line | np(PList('mount point'), action=self._parse_setter(MOUNTPOINT))\n gr_line = gr_line | np(PList('spool directory'), action=self._parse_setter(SPOOLDIRECTORY))\n gr_line = gr_line | np(PList('unmount command'), action=self._parse_setter(UNMOUNTCOMMAND))\n gr_line = gr_line | np(PList('volume poll interval'), action=self._parse_setter(VOLUMEPOLLINTERVAL))\n\n gr_line = gr_line | np(PList('always open'), gr_yn, action=self._parse_setter(ALWAYSOPEN))\n gr_line = gr_line | np(PList('auto changer'), gr_yn, action=self._parse_setter(AUTOCHANGER))\n gr_line = gr_line | np(PList('auto select'), gr_yn, action=self._parse_setter(AUTOSELECT))\n gr_line = gr_line | np(PList('automatic mount'), gr_yn, action=self._parse_setter(AUTOMATICMOUNT))\n gr_line = gr_line | np(PList('backward space file'), gr_yn, action=self._parse_setter(BACKWARDSPACEFILE))\n gr_line = gr_line | np(PList('backward space record'), gr_yn, action=self._parse_setter(BACKWARDSPACERECORD))\n gr_line = gr_line | np(PList('block check sum'), gr_yn, action=self._parse_setter(BLOCKCHECKSUM))\n gr_line = gr_line | np(PList('block positioning'), gr_yn, action=self._parse_setter(BLOCKPOSITIONING))\n gr_line = gr_line | np(PList('bsf at eom'), gr_yn, action=self._parse_setter(BSFATEOM))\n gr_line = gr_line | np(PList('close on poll'), gr_yn, action=self._parse_setter(CLOSEONPOLL))\n gr_line = gr_line | np(PList('fast forward space file'), gr_yn, action=self._parse_setter(FASTFORWARDSPACEFILE))\n gr_line = gr_line | np(PList('forward space file'), gr_yn, action=self._parse_setter(FORWARDSPACEFILE))\n gr_line = gr_line | np(PList('forward space record'), gr_yn, action=self._parse_setter(FORWARDSPACERECORD))\n gr_line = gr_line | np(PList('hardware end of medium'), gr_yn, action=self._parse_setter(HARDWAREENDOFMEDIUM))\n gr_line = gr_line | np(PList('label media'), gr_yn, action=self._parse_setter(LABELMEDIA))\n gr_line = gr_line | np(PList('offline on unmount'), gr_yn, action=self._parse_setter(OFFLINEONUNMOUNT))\n gr_line = gr_line | np(PList('random access'), gr_yn, action=self._parse_setter(RANDOMACCESS))\n gr_line = gr_line | np(PList('removable media'), gr_yn, action=self._parse_setter(REMOVABLEMEDIA))\n gr_line = gr_line | np(PList('two eof'), gr_yn, action=self._parse_setter(TWOEOF))\n gr_line = gr_line | np(PList('use mtiocget'), gr_yn, action=self._parse_setter(USEMTIOCGET))\n\n gr_res = OneOrMore(gr_line)\n\n result = gr_res.parseString(string, parseAll=True)\n if obj: self.link(obj)\n return 'Device: ' + self[NAME]\n\n # }}}\n # {{{ __str__(): \n\n def __str__(self):\n '''String representation of a Device suitable for inclusion in a\n configuration file.\n\n '''\n self.output = ['Device {\\n Name = \"%(name)s\"' % self,'}']\n \n for key in self.SETUP_KEYS: self._simple_phrase(key)\n for key in self.BOOL_KEYS: self._yesno_phrase(key)\n\n return '\\n'.join(self.output)\n\n# }}}\n # {{{ link(obj): link the device to a storage daemon\n\n def link(self, obj):\n '''Devices belong to Storage Daemons, but there's no intrinsic way to know\n that a device belongs to a daemon, so we have a table that provides\n links between the two. This member, given a Storage object, will\n link the Device to the Storage.\n '''\n try:\n self.bc.do_sql(self._insert, (self[ID], obj[ID]))\n except Exception as e:\n if e.args[0] == 1062: pass # 1062 is what happens when you try to insert a duplicate row\n else:\n print(e)\n raise\n\n # }}}\n # {{{ unlink(obj): unlink the device from a storage daemon\n\n def unlink(self, obj):\n '''Remove the linkage between a Device and a Storage'''\n self.bc.do_sql(self._delete, (self[ID], obj[ID]))\n return\n\n # }}}\n # {{{ _cli_special_setup(): (un)link Storage\n\n def _cli_special_setup(self):\n '''Enable the CLI to (un)link devices to/from Sotrage Daemons.'''\n group = optparse.OptionGroup(self.parser, \"Storage daemon links\",\n \"A device is associated with one or more storage daemons.\")\n group.add_option('--add-link', metavar='STORAGE_DAEMON')\n group.add_option('--remove-link', metavar='STORAGE_DAEMON')\n self.parser.add_option_group(group)\n return\n\n # }}}\n # {{{ _cli_special_do_parse(args): (un)link Storage\n\n def _cli_special_do_parse(self, args):\n '''Handle any attempts by the CLI to (un)link the Device to/from Storage'''\n if args.add_link:\n s = bacula_tools.Storage().search(args.add_link)\n if not s[ID]: s.search(args.add_link)\n if not s[ID]:\n print('\\n***WARNING***: Unable to find a Storage Daemon identified by \"%s\". Not linked.\\n' % args.add_link)\n return\n self.link(s)\n\n if args.remove_link:\n s = bacula_tools.Storage().search(args.remove_link)\n if not s[ID]: s.search(args.remove_link)\n if not s[ID]:\n print('\\n***WARNING***: Unable to find a Storage Daemon identified by \"%s\". Not unlinked.\\n' % args.remove_link)\n return\n self.unlink(s)\n\n return\n\n# }}}\n # {{{ _cli_special_print(): Display any linked Storage\n\n def _cli_special_print(self):\n '''Print any linked Storage'''\n for row in self.bc.do_sql(self._select, self[ID]):\n s = bacula_tools.Storage().search(row[0])\n print(('%'+ str(self._maxlen) + 's: %s') % ('Storage Daemon', s[NAME]))\n return\n\n # }}}\n # {{{ _cli_special_clone(oid): Handle cloning requirements.\n\n def _cli_special_clone(self, oid):\n '''Any clones of this device will be linked to the same Storage.'''\n for row in self.bc.do_sql(self._select, oid): self.bc.do_sql(self._insert, (self[ID], row[0]))\n return\n\n # }}}\n\ndef main():\n s = Device()\n s.cli()\n\nif __name__ == \"__main__\": main()\n","sub_path":"bacula_tools/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":11008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"93069048","text":"#\n# wireless sensor routines\n\n\nimport config\n\nimport json\nimport random\n\nimport sys\nfrom subprocess import PIPE, Popen, STDOUT\nfrom threading import Thread\n#import json\nimport datetime\nimport buildJSON\n\nimport state\nimport indoorTH\nimport pclogging\n\nimport time\nimport os\nimport signal\n\n\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------\n\ncmd = [ '/usr/local/bin/rtl_433', '-q', '-F', 'json', '-R', '146', '-R', '147']\n\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------\n# A few helper functions...\n\nThreadStop = False;\n\ndef nowStr():\n return( datetime.datetime.now().strftime( '%Y-%m-%d %H:%M:%S'))\n\n#stripped = lambda s: \"\".join(i for i in s if 31 < ord(i) < 127)\n\n\n# We're using a queue to capture output as it occurs\ntry:\n from Queue import Queue, Empty\nexcept ImportError:\n from queue import Queue, Empty # python 3.x\nON_POSIX = 'posix' in sys.builtin_module_names\n\ndef enqueue_output(src, out, queue):\n try:\n for line in iter(out.readline, b''):\n queue.put(( src, line))\n out.close()\n except:\n pass \n\ndef randomadd(value, spread):\n\n return round(value+random.uniform(-spread, spread),2)\n\n\n# process functions\n\ndef processF020(sLine):\n\n if (config.SWDEBUG):\n sys.stdout.write(\"processing FT020T Data\\n\")\n sys.stdout.write('This is the raw data: ' + sLine + '\\n')\n\n var = json.loads(sLine)\n\n # outside temperature and Humidity\n\n state.mainID = var[\"id\"] \n state.lastMainReading = nowStr()\n\n\n if (state.previousMainReading == \"Never\"):\n pclogging.systemlog(config.INFO,\"Main Weather Sensors Found\")\n print(\"Main Weather Sensors Found\")\n pclogging.systemlog(config.INFO,\"Blynk Updates Started\")\n state.previousMainReading = state.lastMainReading\n\n\n\n wTemp = var[\"temperature\"]\n\n ucHumi = var[\"humidity\"]\n\n\n wTemp = (wTemp - 400)/10.0\n # deal with error condtions\n if (wTemp > 140.0):\n # error condition from sensor\n if (config.SWDEBUG):\n sys.stdout.write(\"error--->>> Temperature reading from FT020T\\n\")\n sys.stdout.write('This is the raw temperature: ' + str(wTemp) + '\\n')\n # put in previous temperature \n wtemp = state.OudoorTemperature \n #print(\"wTemp=%s %s\", (str(wTemp),nowStr() ));\n if (ucHumi > 100.0):\n # bad humidity\n # put in previous humidity\n ucHumi = state.OutdoorHumidity\n \n state.OutdoorTemperature = round(((wTemp - 32.0)/(9.0/5.0)),2)\n state.OutdoorHumidity = ucHumi \n\n \n \n state.WindSpeed = round(var[\"avewindspeed\"]/10.0, 1)\n state.WindGust = round(var[\"gustwindspeed\"]/10.0, 1)\n state.WindDirection = var[\"winddirection\"]\n \n\n\n state.TotalRain = round(var[\"cumulativerain\"]/10.0,1)\n state.Rain60Minutes = 0.0\n\n wLight = var[\"light\"]\n if (wLight >= 0x1fffa):\n wLight = wLight | 0x7fff0000\n\n wUVI =var[\"uv\"]\n if (wUVI >= 0xfa):\n wUVI = wUVI | 0x7f00\n\n state.SunlightVisible = wLight \n state.SunlightUVIndex = round(wUVI/10.0, 1 )\n\n if (var['batterylow'] == 0):\n state.BatteryOK = \"OK\"\n else:\n state.BatteryOK = \"LOW\"\n\n #print(\"looking for buildJSONSemaphore acquire\")\n state.buildJSONSemaphore.acquire()\n #print(\"buildJSONSemaphore acquired\")\n state.StateJSON = buildJSON.getStateJSON()\n #if (config.SWDEBUG):\n # print(\"currentJSON = \", state.StateJSON)\n state.buildJSONSemaphore.release()\n #print(\"buildJSONSemaphore released\")\n\n\n\n# processes Inside Temperature and Humidity\ndef processF016TH(sLine):\n if (config.SWDEBUG):\n sys.stdout.write('Processing F016TH data'+'\\n')\n sys.stdout.write('This is the raw data: ' + sLine + '\\n')\n \n var = json.loads(sLine)\n\n state.mainID = var[\"device\"] + var[\"channel\"]\n state.lastIndoorReading = nowStr()\n\n if (state.previousIndoorReading == \"Never\"):\n pclogging.systemlog(config.INFO,\"Indoor Weather Sensor Found\")\n print(\"Indoor Weather Sensors Found\")\n state.previousIndoorReading = state.lastIndoorReading\n\n state.IndoorTemperature = round(((var[\"temperature_F\"] - 32.0)/(9.0/5.0)),2)\n state.IndoorHumidity = var[\"humidity\"]\n state.lastIndoorReading = var[\"time\"]\n state.insideID = var[\"channel\"]\n\n\n\n indoorTH.addITReading(var[\"device\"], var[\"channel\"], state.IndoorTemperature, var[\"humidity\"], var[\"battery\"], var[\"time\"])\n\n #print(\"looking for buildJSONSemaphore acquire\")\n state.buildJSONSemaphore.acquire()\n #print(\"buildJSONSemaphore acquired\")\n state.StateJSON = buildJSON.getStateJSON()\n #if (config.SWDEBUG):\n # print(\"currentJSON = \", state.StateJSON)\n state.buildJSONSemaphore.release()\n #print(\"buildJSONSemaphore released\")\n\n# main read 433HMz Sensor Loop\ndef readSensors():\n\n\n print(\"\")\n print(\"######\")\n # Create our sub-process...\n # Note that we need to either ignore output from STDERR or merge it with STDOUT due to a limitation/bug somewhere under the covers of \"subprocess\"\n # > this took awhile to figure out a reliable approach for handling it...\n\n p = Popen( cmd, stdout=PIPE, stderr=STDOUT, bufsize=1, close_fds=ON_POSIX)\n q = Queue()\n\n t = Thread(target=enqueue_output, args=('stdout', p.stdout, q))\n \n t.daemon = True # thread dies with the program\n t.start()\n\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------\n pulse = 0\n print(\"starting 433MHz scanning\")\n print(\"######\")\n lastTimeSensorReceived = time.time()\n while True:\n # Other processing can occur here as needed...\n #sys.stdout.write('Made it to processing step. \\n')\n timeSinceLastSample = time.time() - lastTimeSensorReceived\n \n if (timeSinceLastSample > 720.0): # restart if no reads in 12 minutes\n \n if (config.SWDEBUG):\n print(\">>>>>>>>>>>>>>restarting SDR thread.....\")\n lastTimeSensorReceived = time.time()\n if (config.SWDEBUG):\n print( \"Killing SDR Thread\")\n p.kill()\n t.join()\n pclogging.systemlog(config.INFO,\"SDR Restarted\")\n if (config.SWDEBUG):\n print(\"starting SDR Thread again\")\n\n print(\"\")\n print(\"######\")\n print(\"Read Wireless Sensors\")\n print(\"######\")\n p = Popen( cmd, stdout=PIPE, stderr=STDOUT, bufsize=1, close_fds=ON_POSIX)\n q = Queue()\n\n t = Thread(target=enqueue_output, args=('stdout', p.stdout, q))\n \n t.daemon = True # thread dies with the program\n t.start()\n\n\n try:\n src, line = q.get(timeout = 1)\n #print(line.decode())\n except Empty:\n pulse += 1\n else: # got line\n pulse -= 1\n sLine = line.decode()\n #if ( sLine.find('F007TH') != -1) or ( sLine.find('FT0300') != -1) or ( sLine.find('F016TH') != -1) or ( sLine.find('FT020T') != -1):\n # pclogging.systemlog(config.INFO,\"SDR Received data in =%6.2f seconds\"%(timeSinceLastSample))\n lastTimeSensorReceived = time.time()\n \n # See if the data is something we need to act on...\n\n if ( sLine.find('F007TH') != -1) or ( sLine.find('FT0300') != -1) or ( sLine.find('F016TH') != -1) or ( sLine.find('FT020T') != -1):\n \n if (( sLine.find('F007TH') != -1) or ( sLine.find('F016TH') != -1)): \n processF016TH(sLine)\n if (( sLine.find('FT0300') != -1) or ( sLine.find('FT020T') != -1)): \n processF020(sLine)\n\n sys.stdout.flush()\n\n","sub_path":"wirelessSensors.py","file_name":"wirelessSensors.py","file_ext":"py","file_size_in_byte":7976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"449657809","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'shfc'\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^about/$', views.about, name='about'),\n\n # Range\n\n url(r'^range/$', views.range, name='range'),\n url(r'^range/shooting/$', views.rangeShooting, name='rangeShooting'),\n url(r'^range/archery/$', views.rangeArchery, name='rangeArchery'),\n url(r'^range/images/$', views.rangeImages, name='rangeImages'),\n\n # Marine\n url(r'^marine/$', views.marine, name='marine'),\n url(r'^marine/boats_rentals_information/$', views.marineBoatInformation, name='marineBoatInformation'),\n url(r'^marine/trips/$', views.marineBoatTrips, name='marineBoatTrips'),\n\n url(r'^membership/$', views.membership, name='membership'),\n url(r'^save/$', views.save, name='save'),\n\n # Purchasing\n url(r'^purchase/$', views.purchase, name='purchase'),\n url(r'^purchase/powder_valley/$', views.purchasePowderValley, name='purchasePowderValley'),\n\n # Events and classes\n url(r'^events/$', views.events, name='events'),\n url(r'^events/classes/$', views.eventsClasses, name='eventsClasses'),\n url(r'^documents/$', views.documents, name='documents'),\n\n\n url(r'^updateAnnouncement/$', views.updateAnnouncement, name='updateAnnouncement'),\n url(r'^workBond/$', views.workBond, name='workBond'),\n # Calendars\n url(r'^management/$', views.calendarManagement, name='calendarManagement'),\n url(r'^management/rso/$', views.calendarManagementRSO, name='calendarManagementRSO'),\n url(r'^management/events/$', views.calendarManagementEVENTS, name='calendarManagementEVENTS'),\n url(r'^management/marine/$', views.calendarManagementMARINE, name='calendarManagementMARINE'),\n \n url(r'^management/announcement/$', views.calendarManagementANNOUNCEMENTS, name='calendarManagementANNOUNCEMENTS'),\n url(r'^createRSOCalendarEvent/$', views.createRSOCalendarEvent, name='createRSOCalendarEvent'),\n url(r'^updateRSOCalendar/$', views.updateRSOCalendar, name='updateRSOCalendar'),\n url(r'^updateEventCalendar/$', views.updateEventCalendar, name='updateEventCalendar'),\n\n]","sub_path":"Site/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"303683790","text":"#!/usr/bin/env python3\nimport time\nimport bisect\nfrom collections import deque, defaultdict\nimport asyncio\nfrom concurrent import futures\nfrom phasortoolbox import Parser\nimport uvloop\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\n\nclass PDC(object):\n \"\"\"docstring for PDC\n\n\n Example:\n my_pdc = PDC()\n\n\n \"\"\"\n\n def __init__(\n self,\n CALLBACK=None,\n BUF_SIZE=1,\n FILTER={'data'},\n WAIT_TIME=0.1,\n loop: asyncio.AbstractEventLoop()=None,\n executor: futures.Executor()=None,\n step_time=0.01,\n returnNone=False,\n count=0\n # Partially Timeout time stamps will be discarded on False\n # None will be returned for timeout data on True.\n ):\n self.WAIT_TIME = WAIT_TIME\n self.FILTER = FILTER\n self.BUF_SIZE = BUF_SIZE\n self.step_time = step_time\n self.buf_time_out = self.BUF_SIZE * self.WAIT_TIME * 2\n self._input_list = []\n self._output_list = []\n self._input_queue = asyncio.Queue()\n if loop:\n self.loop = loop\n else:\n self.loop = asyncio.get_event_loop()\n self.executor = executor\n if CALLBACK:\n self.CALLBACK = CALLBACK\n self.returnNone = returnNone\n self.count = count\n\n async def run(self, count=None):\n if not self._input_list:\n print('No input defined.')\n return\n self._ordered_idcode_list = []\n for _input in self._input_list:\n self._ordered_idcode_list.append(_input.IDCODE)\n self._ordered_idcode_list.sort()\n if not callable(self.CALLBACK):\n raise TypeError(\"Input must be a function, \"\n \"not {!r}\".format(type(self.CALLBACK)))\n self._buf = {}\n self._buf_index = deque()\n if not count:\n count = self.count\n while True:\n try:\n ###############################################################\n \"\"\"Check which data can be send \n All data are kept in an dictionary. A valid record also\n contains the earlest arrive time and a flag indicated if the\n record has been sent previously. The newest record returned to\n the user CALLBACK must be a new record that has never been\n sent before. Thus, first check if the newest data has beend\n sent beforefrom the newest arrived data.\n \"\"\"\n _temp_send_list = []\n _time_out_by = time.time() - self.WAIT_TIME\n for time_tag in reversed(self._buf_index):\n if len(_temp_send_list) == self.BUF_SIZE:\n break\n if (\n (len(_temp_send_list) > 0) and\n (\n self._buf[time_tag]['sent'] or\n (\n len(self._buf[time_tag]) - 2 ==\n len(self._input_list)\n ) or\n (\n self.returnNone and\n (\n self._buf[time_tag]['_arrtime'] <\n _time_out_by\n )\n )\n )\n ):\n _temp_send_list.append(time_tag)\n # Valid to send, also the first one already found.\n continue\n elif (\n (\n len(_temp_send_list) == 0\n ) and\n (\n not self._buf[time_tag]['sent']\n ) and\n (\n (\n (len(self._buf[time_tag]) - 2) ==\n len(self._input_list)\n ) or\n (\n self.returnNone and\n (\n self._buf[time_tag]['_arrtime'] <\n _time_out_by\n )\n )\n )\n ):\n # The fist item in the list must be the newest recored\n # valid to send and has never been sent before.\n _temp_send_list.append(time_tag)\n continue\n elif (\n (len(_temp_send_list) == 0) and\n (self._buf[time_tag]['sent'])\n ):\n # The newest recored valid to send has been sent\n # before, no need to do anything.\n break\n if len(_temp_send_list) == self.BUF_SIZE:\n # Will not do anything if not enough data to send\n # Prepare send msgs\n buffer_msgs = [\n [\n self._buf[time_tag][idcode] for idcode in\n self._ordered_idcode_list\n ]\n for time_tag in reversed(_temp_send_list)\n ]\n # self.loop.run_in_executor(\n # self.executor, self.CALLBACK, buffer_msgs)\n _usr_buffer_msgs = self.CALLBACK(\n buffer_msgs) # Call user's function\n if _usr_buffer_msgs:\n for _devices in self._output_list:\n await _devices._input_queue.put(_usr_buffer_msgs)\n if count == 0:\n pass\n elif count > 1:\n count -= 1\n elif count == 1:\n break\n for time_tag in _temp_send_list:\n self._buf[time_tag]['sent'] = True\n ###############################################################\n # Remove time out data from _buf\n _del_list = []\n if len(_temp_send_list) == self.BUF_SIZE:\n # Remove all data until the last one sent\n for time_tag in self._buf_index:\n _del_list.append(time_tag)\n if time_tag < _temp_send_list[-1]:\n continue\n elif time_tag == _temp_send_list[-1]:\n break\n else:\n # Remove all data until buffer time out\n _time_out_by = time.time() - self.buf_time_out\n for time_tag in self._buf_index:\n if self._buf[time_tag]['_arrtime'] < _time_out_by:\n _del_list.append(time_tag)\n continue\n else:\n break\n for time_tag in _del_list:\n del self._buf[time_tag]\n self._buf_index.remove(time_tag)\n ###############################################################\n \"\"\"Get all data from queue\n If user's CALLBACK function is too slow, queue size will keep\n increase. Get all data from queue if queue have pendding\n data. If user's CALLBACK function is fast enough, then wait\n until item available in queue.\n \"\"\"\n if self._input_queue.qsize() >= 1:\n msgs = []\n for i in range(self._input_queue.qsize()):\n msgs.append(self._input_queue.get_nowait())\n else:\n msgs = [None]\n msgs[0] = await asyncio.wait_for(\n self._input_queue.get(), self.step_time)\n for msg in msgs:\n if msg.sync.frame_type.name not in self.FILTER:\n continue\n try:\n self._buf[msg.time][msg.idcode] = msg\n except KeyError: # New time tag\n self._buf[msg.time] = defaultdict(lambda: None)\n self._buf[msg.time][msg.idcode] = msg\n self._buf[msg.time]['_arrtime'] = msg._arrtime\n bisect.insort(self._buf_index, msg.time)\n ###############################################################\n except asyncio.TimeoutError:\n continue\n except asyncio.CancelledError:\n break\n self.loop.stop()\n\n async def clean_up(self):\n pass\n","sub_path":"phasortoolbox/pdc.py","file_name":"pdc.py","file_ext":"py","file_size_in_byte":8965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"628784440","text":"from Tkconstants import N, S, W, E, LEFT, BOTH, BOTTOM, RIGHT\nimport time\n\nimport Helper\nimport Menu\nfrom RPi import GPIO\nimport Tkinter as tk\nfrom Tkinter import Scale\n\n\nclass Motor(tk.Frame):\n def __init__(self, parent, controller):\n self.frame = tk.Frame\n self.frame.__init__(self, parent, background='red')\n \n self.parent = parent\n self.controller = controller\n \n self.motorFrame = tk.Frame(self, background='black')\n self.motorFrame.pack(fill=BOTH, expand=True, side=BOTTOM)\n\n self.helper = Helper.ButtonHelper()\n \n \n GPIO.setmode(GPIO.BOARD)\n self.gpios = [29, 31, 33, 35]\n GPIO.setup(self.gpios, GPIO.OUT)\n \n# self.leftMotor = GPIO.PWM(self.gpios[0], 75)\n# self.leftMotor.start(0)\n# self.rightMotor = GPIO.PWM(self.gpios[2], 75)\n# self.rightMotor.start(0)\n \n self.initUI()\n \n \n def initUI(self):\n\n menuButton = self.helper.menuButton(self, 'Back', lambda: self.controller.show_frame(Menu.MainMenu)) \n \n menuButton.pack(side=LEFT)\n \n columns = 3\n rows = 3\n \n for i in range(columns):\n self.motorFrame.columnconfigure(i, weight=1)\n for i in range(rows):\n self.motorFrame.rowconfigure(i, weight=1)\n \n self.forwardButton = self.helper.motorButton(self.motorFrame, \"Forward\")\n self.backButton = self.helper.motorButton(self.motorFrame, \"Back\")\n self.leftButton = self.helper.motorButton(self.motorFrame, \"Left\")\n self.rightButton = self.helper.motorButton(self.motorFrame, \"Right\")\n \n# self.scale = self.helper.scale(self.motorFrame, self.changeSpeed)\n \n self.forwardButton.grid(row=0, column=1, sticky=N+S+W+E)\n self.backButton.grid(row=2, column=1, sticky=N+S+W+E)\n self.leftButton.grid(row=1, column=0, sticky=N+S+W+E)\n self.rightButton.grid(row=1, column=2, sticky=N+S+W+E)\n# self.scale.grid(row=1, column=1, sticky=N+S+W+E, padx=10, pady=10)\n\n self.isStopped = True\n \n #Perform action when the button is pressed down\n self.forwardButton.bind('<Button-1>', self.goForward)\n self.backButton.bind('<Button-1>', self.goBackwards)\n self.leftButton.bind('<Button-1>', self.goLeft)\n self.rightButton.bind('<Button-1>', self.goRight)\n \n #Bind to arrow keys\n self.motorFrame.bind_all('<KeyPress-Up>', self.goForward)\n self.motorFrame.bind_all('<Down>', self.goBackwards)\n self.motorFrame.bind_all('<Left>', self.goLeft)\n self.motorFrame.bind_all('<Right>', self.goRight)\n self.motorFrame.bind_all('<KeyRelease>', self.allStop)\n \n #Stop everything when any button is released\n controlButtons = [self.forwardButton, self.backButton, self.leftButton, self.rightButton]\n for button in controlButtons:\n button.bind('<ButtonRelease-1>', self.allStop)\n\n def goForward(self, event):\n if self.isStopped:\n print(\"Going Forwards\")\n self.leftMotorForward()\n self.rightMotorForward()\n self.isStopped = False\n \n def goBackwards(self, event):\n if self.isStopped:\n print(\"Going Backwards\")\n self.leftMotorBack()\n self.rightMotorBack()\n self.isStopped = False\n \n def goLeft(self, event):\n if self.isStopped:\n print(\"Going Left\")\n self.leftMotorBack()\n self.rightMotorForward()\n self.isStopped = False\n \n def goRight(self, event):\n if self.isStopped:\n print(\"Going Right\")\n self.leftMotorForward()\n self.rightMotorBack()\n self.isStopped = False\n \n def allStop(self, event):\n print(\"All Stop\")\n GPIO.output(self.gpios[0], False)\n GPIO.output(self.gpios[1], False)\n GPIO.output(self.gpios[2], False)\n GPIO.output(self.gpios[3], False)\n self.isStopped = True\n\n def leftMotorForward(self):\n GPIO.output(self.gpios[0], True)\n GPIO.output(self.gpios[1], False)\n \n def leftMotorBack(self):\n GPIO.output(self.gpios[0], False)\n GPIO.output(self.gpios[1], True)\n\n def rightMotorForward(self):\n GPIO.output(self.gpios[2], True)\n GPIO.output(self.gpios[3], False)\n\n def rightMotorBack(self):\n GPIO.output(self.gpios[2], False)\n GPIO.output(self.gpios[3], True)\n \n# def changeSpeed(self, value):\n# self.leftMotor.ChangeDutyCycle(float(value))\n# self.rightMotor.ChangeDutyCycle(float(value))\n","sub_path":"src/Motor.py","file_name":"Motor.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"528105484","text":"import collections\nimport time\n\nfrom pyspark import StorageLevel\nfrom pyspark.sql import Row\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nfrom pyspark.sql import functions\nfrom pyspark.sql.types import *\nfrom pyspark.sql.window import Window\n\n\nspark = SparkSession.builder.appName(\"project\").getOrCreate()\ntrain_data = spark.read.csv(\"s3://ybigta-spark-180602/data/train.csv\")\ntest_data = spark.read.csv(\"s3://ybigta-spark-180602/data/test.csv\")\n\n\ndef making_dataframe(df) :\n col_name = df.columns\n real_name = df.take(1)\n \n new_col_name = []\n for idx in range(len(col_name)) :\n new_col_name.append(real_name[0][idx])\n \n command = []\n for idx in range(len(col_name)) :\n command.append(col(col_name[idx]).alias(new_col_name[idx]))\n \n df = df.select(command)\n df = df.filter(df[real_name[0][0]] != real_name[0][0])\n \n return df\n\ndef is_booking(df) :\n new_df = df.where(df.is_booking == 1)\n new_df = new_df.drop(\"is_booking\").select(\"*\")\n \n return new_df\n\ndef drop_na(df) :\n new_df = df.na.drop()\n \n return new_df\n\ndef fill_na(df, ctg) :\n if ctg == \"train\" :\n new_df = df.na.fill({\"date_time\":\"2013.5-08-00 00:00:00\", \"srch_ci\":\"2013.5-09-00\",\"srch_co\":\"2013.5-09-00\"})\n else :\n new_df = df.na.fill({\"date_time\":\"2015-08-00 00:00:00\", \"srch_ci\":\"2015-09-00\",\"srch_co\":\"2015-09-00\"})\n \n return new_df\n\ndef string_to_double(df) :\n col_name = df.columns\n for name in col_name :\n df = df.withColumn(name, df[name].cast(DoubleType()))\n \n return df\n\ndef string_to_double2(df) :\n col_name = [\"reserv_year\", \"reserv_month\", \"check_in_year\", \"check_in_month\", \"check_out_year\", \"check_out_month\"]\n for name in col_name :\n df = df.withColumn(name, test_df[name].cast(DoubleType()))\n \n return df\n\ndef string_to_date(df, stringlist=[\"date_time\", \"srch_ci\", \"srch_co\"]) :\n col1 = functions.udf(lambda date_time: date_time.split()[0].split(\"-\")[0])\n col2 = functions.udf(lambda date_time: date_time.split()[0].split(\"-\")[1])\n ##col3 = functions.udf(lambda date_time: date_time.split()[0].split(\"-\")[2]) \n ##col4 = functions.udf(lambda date_time: date_time.split()[1].split(\":\")[0])\n \n new_df = df.select(\"*\", col1(df.date_time).alias(\"reserv_year\"), col2(df.date_time).alias(\"reserv_month\"))\n \n col5 = functions.udf(lambda srch_ci: srch_ci.split(\"-\")[0])\n col6 = functions.udf(lambda srch_ci: srch_ci.split(\"-\")[1])\n #col7 = functions.udf(lambda srch_ci: srch_ci.split(\"-\")[2]) \n \n new_df = new_df.select(\"*\", col5(df.srch_ci).alias(\"check_in_year\"), col6(df.srch_ci).alias(\"check_in_month\"))\n \n col8 = functions.udf(lambda srch_co: srch_co.split(\"-\")[0])\n col9 = functions.udf(lambda srch_co: srch_co.split(\"-\")[1])\n #col10 = functions.udf(lambda srch_co: srch_co.split(\"-\")[2]) \n \n new_df = new_df.select(\"*\", col8(df.srch_co).alias(\"check_out_year\"), col9(df.srch_co).alias(\"check_out_month\"))\n new_df = new_df.drop(stringlist[0]).drop(stringlist[1]).drop(stringlist[2])\n \n return new_df\n\ndef fill_na_as_mean_or_most(df, ctg) :\n col_name = df.columns\n new_df = df.select(\"*\")\n \n for col in col_name :\n print(col)\n try :\n col_avg = new_df.agg({col : \"mean\"}).collect()[0][0]\n new_df = new_df.na.fill({col :col_avg})\n except :\n if col == \"check_in_year\" and ctg==\"test\": \n new_df = new_df.na.fill({col : 2015})\n elif col == \"check_out_year\" and ctg==\"test\" : \n new_df = new_df.na.fill({col : 2015})\n elif col == \"check_in_year\" and ctg==\"train\": \n new_df = new_df.na.fill({col : 2013.5})\n elif col == \"check_out_year\" and ctg==\"train\" : \n new_df = new_df.na.fill({col : 2013.5})\n elif col == \"check_in_month\" : \n new_df = new_df.na.fill({col : 9})\n elif col == \"check_out_month\" : \n new_df = new_df.na.fill({col : 9})\n else :\n new_df = new_df.na.fill({col : 0})\n print(\"error : \", col)\n \n return new_df\n\ndef interval(df) :\n print(\"check\")\n term1 = (df.check_in_year-df.reserv_year)*365 + (df.check_in_month-df.reserv_month)*30\n new_df = df.select(\"*\", term1.alias(\"reserv_check_in_interval\"))\n \n term2 = (df.check_out_year-df.check_in_year)*365 + (df.check_out_month-df.check_in_month)*30\n new_df2 = new_df.select(\"*\", term2.alias(\"check_in_out_interval\"))\n \n new_df3 = new_df2.drop(\"reserv_year\")\n new_df3 = new_df3.drop(\"check_in_year\")\n new_df3 = new_df3.drop(\"check_out_year\")\n \n return new_df3\n\ndef drop_df(df, lst = [\"posa_continent\", \"user_id\", \"cnt\"]) :\n print(\"check\")\n if len(lst) == 3 :\n new_df = df.drop(lst[0], lst[1], lst[2]) \n else :\n new_df = df.drop(lst[0], lst[1])\n \n return new_df\n\ndef total_people(df) :\n print(\"check\")\n total = df.srch_adults_cnt + df.srch_children_cnt\n avg_room_cnt = total / df.srch_rm_cnt\n new_df = df.select(\"*\", total.alias(\"total_cnt\"), avg_room_cnt.alias(\"avg_room_cnt\"))\n \n return new_df\n\ndef normalize(df, col_lst) :\n new_df = df.select(\"*\")\n \n for col in col_lst :\n print(col)\n described = new_df.describe(col).select(\"*\")\n mean = float(described.take(3)[1][1])\n stddev = float(described.take(3)[2][1])\n \n if stddev == 0 :\n pass\n \n else :\n new_df = new_df.select(\"*\", ((new_df[col]-mean)/stddev).alias(\"normed_\"+col))\n new_df = new_df.drop(col)\n \n return new_df\n\n\ntrain_df = making_dataframe(train_data)\ntest_df = making_dataframe(test_data)\n\n\ntrain_df = is_booking(train_df)\n\n\ntrain_df1_1 = drop_na(train_df)\ntrain_df1_2 = fill_na(train_df, \"train\")\ntest_df1 = fill_na(test_df, \"test\")\n\n\ntrain_df2_1 = string_to_double(string_to_date(train_df1_1))\ntrain_df2_2 = string_to_double(string_to_date(train_df1_2))\ntest_df2 = string_to_double(string_to_date(test_df1))\n\n\ntrain_df3_1 = train_df2_1.select(\"*\")\n\ntrain_df3_2 = fill_na_as_mean_or_most(train_df2_2, \"train\")\ntrain_df3_2 = train_df2_2.na.drop()\n\ntest_df3 = fill_na_as_mean_or_most(test_df2, \"test\")\ntest_df3 = test_df3.na.fill(0)\n\n\ntrain_df4_1 = interval(total_people(drop_df(train_df3_1)))\ntrain_df4_2 = interval(total_people(drop_df(train_df3_2)))\ntest_df4 = interval(total_people(drop_df(test_df3, lst = [\"posa_continent\", \"user_id\"])))\n\n\ncol_lst = [\"orig_destination_distance\",\"srch_adults_cnt\",\"srch_children_cnt\",\"srch_rm_cnt\",\"total_cnt\",\"avg_room_cnt\"]\n\ntrain_df5_1 = normalize(train_df4_1, col_lst)\ntrain_df5_2 = normalize(train_df4_2, col_lst)\ntest_df5 = normalize(test_df4, col_lst)\n\n\n## modeling\n\nfrom pyspark.ml.classification import LogisticRegression\nfrom pyspark.ml.classification import LogisticRegressionModel\n\nfrom pyspark.ml.classification import DecisionTreeClassifier\nfrom pyspark.ml.feature import StringIndexer, VectorIndexer\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n\nfrom pyspark.ml.classification import RandomForestClassifier\nfrom pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n\nfrom pyspark.ml.classification import GBTClassifier\nfrom pyspark.ml.feature import StringIndexer, VectorIndexer\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n\nfrom pyspark.ml.classification import MultilayerPerceptronClassifier\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.pipeline import Pipeline\nfrom pyspark.ml.pipeline import PipelineModel\n\ntrain1 = train_df3_1.drop('cnt', 'user_id')\ntrain2 = train_df3_2.drop('cnt', 'user_id')\ntest1 = test_df3.select(\"*\")\n\ntrain3 = train_df5_1.select(\"*\")\ntrain4 = train_df5_2.select(\"*\")\ntest2 = test_df5.select(\"*\")\n\ntrain_features1 = [x for x in train_df3_1.columns if x not in ['hotel_cluster', 'cnt', 'user_id']]\ntrain_features1 = [x for x in train_features1 if x in test1.columns]\ntrain_features2 = [x for x in train_df5_2.columns if x != 'hotel_cluster']\ntrain_features2 = [x for x in train_features2 if x in test2.columns]\n\ntarget = \"hotel_cluster\"\n\n\n# assembler\n\nassembler1 = VectorAssembler(inputCols=train_features1, outputCol=\"features\")\nassembler2 = VectorAssembler(inputCols=train_features2, outputCol=\"features\")\n\n\n# multinomial logistic\n\nlr1 = LogisticRegression(maxIter=120, regParam=0.01, labelCol=target)\nlr2 = LogisticRegression(maxIter=120, regParam=0.01, labelCol=target)\nlr3 = LogisticRegression(maxIter=130, regParam=0.01, labelCol=target)\nlr4 = LogisticRegression(maxIter=130, regParam=0.01, labelCol=target)\n\npipeline1 = Pipeline(stages=[assembler1, lr1])\npipeline2 = Pipeline(stages=[assembler1, lr2])\npipeline3 = Pipeline(stages=[assembler2, lr3])\npipeline4 = Pipeline(stages=[assembler2, lr4])\n\npipelineModel1_1 = pipeline1.fit(train1)\npipelineModel1_2 = pipeline2.fit(train2)\npipelineModel1_3 = pipeline3.fit(train3)\npipelineModel1_4 = pipeline4.fit(train4)\n\npipelineModel1_1 = pipelineModel1_1.transform(test1).select(\"id\", \"prediction\")\npipelineModel1_2 = pipelineModel1_2.transform(test1).select(\"id\", \"prediction\")\npipelineModel1_3 = pipelineModel1_3.transform(test2).select(\"id\", \"prediction\")\npipelineModel1_4 = pipelineModel1_4.transform(test2).select(\"id\", \"prediction\")\n\npipelineModel1_1.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model1_result1\")\npipelineModel1_2.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model1_result2\")\npipelineModel1_3.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model1_result3\")\npipelineModel1_4.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model1_result4\")\n\n\n# decision tree\n\ndt1 = DecisionTreeClassifier(labelCol=target)\ndt2 = DecisionTreeClassifier(labelCol=target)\ndt3 = DecisionTreeClassifier(labelCol=target)\ndt4 = DecisionTreeClassifier(labelCol=target)\n\npipeline1 = Pipeline(stages=[assembler1, dt1])\npipeline2 = Pipeline(stages=[assembler1, dt2])\npipeline3 = Pipeline(stages=[assembler2, dt3])\npipeline4 = Pipeline(stages=[assembler2, dt4])\n\npipelineModel2_1 = pipeline1.fit(train1)\npipelineModel2_2 = pipeline2.fit(train2)\npipelineModel2_3 = pipeline3.fit(train3)\npipelineModel2_4 = pipeline4.fit(train4)\n\npipelineModel2_1 = pipelineModel2_1.transform(test1).select(\"id\", \"prediction\")\npipelineModel2_2 = pipelineModel2_2.transform(test1).select(\"id\", \"prediction\")\npipelineModel2_3 = pipelineModel2_3.transform(test2).select(\"id\", \"prediction\")\npipelineModel2_4 = pipelineModel2_4.transform(test2).select(\"id\", \"prediction\")\n\npipelineModel2_1.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model2_result1\")\npipelineModel2_2.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model2_result2\")\npipelineModel2_3.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model2_result3\")\npipelineModel2_4.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model2_result4\")\n\n\n# random forest\n\nrf1 = RandomForestClassifier(labelCol=target, numTrees=30)\nrf2 = RandomForestClassifier(labelCol=target, numTrees=30)\nrf3 = RandomForestClassifier(labelCol=target, numTrees=40)\nrf4 = RandomForestClassifier(labelCol=target, numTrees=40)\n\npipeline1 = Pipeline(stages=[assembler1, rf1])\npipeline2 = Pipeline(stages=[assembler1, rf2])\npipeline3 = Pipeline(stages=[assembler2, rf3])\npipeline4 = Pipeline(stages=[assembler2, rf4])\n\npipelineModel3_1 = pipeline1.fit(train1)\npipelineModel3_2 = pipeline2.fit(train2)\npipelineModel3_3 = pipeline3.fit(train3)\npipelineModel3_4 = pipeline4.fit(train4)\n\npipelineModel3_1 = pipelineModel3_1.transform(test1).select(\"id\", \"prediction\")\npipelineModel3_2 = pipelineModel3_2.transform(test1).select(\"id\", \"prediction\")\npipelineModel3_3 = pipelineModel3_3.transform(test2).select(\"id\", \"prediction\")\npipelineModel3_4 = pipelineModel3_4.transform(test2).select(\"id\", \"prediction\")\n\npipelineModel3_1.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model3_result1\")\npipelineModel3_2.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model3_result2\")\npipelineModel3_3.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model3_result3\")\npipelineModel3_4.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model3_result4\")\n\n\n# mlp\n\nlayers1 = [len(train_features1), 64, 128, 100]\nlayers2 = [len(train_features2), 64, 128, 100]\n\nmlp1 = MultilayerPerceptronClassifier(layers = layers1, labelCol=target, maxIter=150, blockSize=128)\nmlp2 = MultilayerPerceptronClassifier(layers = layers1, labelCol=target, maxIter=150, blockSize=128)\nmlp3 = MultilayerPerceptronClassifier(layers = layers2, labelCol=target, maxIter=170, blockSize=128)\nmlp4 = MultilayerPerceptronClassifier(layers = layers2, labelCol=target, maxIter=170, blockSize=128)\n\npipeline1 = Pipeline(stages=[assembler1, mlp1])\npipeline2 = Pipeline(stages=[assembler1, mlp2])\npipeline3 = Pipeline(stages=[assembler2, mlp3])\npipeline4 = Pipeline(stages=[assembler2, mlp4])\n\npipelineModel5_1 = pipeline1.fit(train1)\npipelineModel5_2 = pipeline2.fit(train2)\npipelineModel5_3 = pipeline3.fit(train3)\npipelineModel5_4 = pipeline4.fit(train4)\n\npipelineModel5_1 = pipelineModel5_1.transform(test1).select(\"id\", \"prediction\")\npipelineModel5_2 = pipelineModel5_2.transform(test1).select(\"id\", \"prediction\")\npipelineModel5_3 = pipelineModel5_3.transform(test2).select(\"id\", \"prediction\")\npipelineModel5_4 = pipelineModel5_4.transform(test2).select(\"id\", \"prediction\")\n\npipelineModel5_1.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model5_result1\")\npipelineModel5_2.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model5_result2\")\npipelineModel5_3.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model5_result3\")\npipelineModel5_4.write.format(\"csv\").save(\"s3://ybigta-spark-180602/model5_result4\")\n","sub_path":"Spark-Python/project_2018-1/project_submit.py","file_name":"project_submit.py","file_ext":"py","file_size_in_byte":13848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"524956873","text":"from Modules.MLPGAN import MLPGAN\nfrom Utilities import data_utilities as d_u\n\n# mnist\ndataset = d_u.MNIST_loader(root='dataset/mnist', image_size=32)\nn_chan = 1 #number of channels\n\ngan = MLPGAN(image_size=32, n_z=64, n_chan=n_chan,\n hiddens={'gen': [256, 512, 1024], 'dis': [1024, 512, 256]},\n depths={'gen': 4, 'dis': 4},\n ngpu=1)\n\n# Optional arguments\nbatch_size = 100\nn_iters = 1e05\nopt_dets = {'gen': {'name': 'adam',\n 'learn_rate': 1e-04},\n 'dis': {'name': 'adam',\n 'learn_rate': 1e-04}\n }\n\n# Call training\ngan.train(dataset=dataset, batch_size=batch_size,\n n_iters=n_iters, optimizer_details=opt_dets)\n","sub_path":"main_scripts/main_MLPGAN.py","file_name":"main_MLPGAN.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"235659901","text":"import os\nfrom os.path import join\nimport numpy as np\nimport matplotlib\n# matplotlib.use(\"AGG\")\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PolyCollection\nfrom matplotlib.patches import Ellipse\nimport LFPy\nimport neuron\nfrom neuron import h\nimport ECSbook_simcode.neural_simulations as ns\nfrom ECSbook_simcode.plotting_convention import mark_subplots, simplify_axes\nfrom ECSbook_simcode.neural_simulations import return_hay_cell\n\nneuron.load_mechanisms(ns.cell_models_folder)\n\nnp.random.seed(12345)\n\ntstop = 15\ndt = 2**-4\nsigma = 0.3\n\ndef insert_synaptic_input(cell, synidx):\n\n synapse_parameters = dict(\n idx = synidx,\n e = 0., # reversal potential\n weight = 0.01, # synapse weight\n record_current = True, # record synapse current\n syntype = 'Exp2Syn',\n tau1 = 0.1, #Time constant, rise\n tau2 = 1.0, #Time constant, decay\n )\n synapse = LFPy.Synapse(cell, **synapse_parameters)\n synapse.set_spike_times(np.array([3.]))\n return synapse, cell\n\n\ndef return_electrode_grid():\n xmin, xmax = [-250, 250]\n zmin, zmax = [-150, 1100]\n\n dx = 100\n dz = 100\n x_grid, z_grid = np.mgrid[xmin:xmax+dx:dx, zmin:zmax+dz:dz]\n num_elecs = len(x_grid.flatten())\n elec_grid_params = dict(\n sigma = sigma, # extracellular conductivity\n x = x_grid.flatten(),\n y = np.zeros(num_elecs),\n z = z_grid.flatten(),\n method = 'pointsource',\n )\n return elec_grid_params\n\n\ndef hay_LFP():\n\n cell = return_hay_cell(tstop=tstop, dt=dt, make_passive=False)\n\n synidx = cell.get_closest_idx(x=0, y=0, z=500)\n\n syn, cell = insert_synaptic_input(cell, synidx)\n remove_list = [\"Nap_Et2\", \"NaTa_t\", \"NaTs2_t\", \"SKv3_1\",\n \"SK_E2\", \"K_Tst\", \"K_Pst\",\n \"Im\", \"Ih\", \"CaDynamics_E2\", \"Ca_LVAst\", \"Ca\", \"Ca_HVA\"]\n cell = ns.remove_active_mechanisms(remove_list, cell)\n h.dt = dt\n for sec in neuron.h.allsec():\n if \"soma\" in sec.name():\n print(\"g_pas: {}, e_pas: {}, cm: {}, \"\n \"Ra: {}, soma_diam: {}, soma_L: {}\".format(sec.g_pas,\n sec.e_pas, sec.cm,\n sec.Ra, sec.diam,\n sec.L))\n\n cell.simulate(rec_imem=True, rec_vmem=True)\n\n plot_results(cell, \"fig_hay_LFP\", \"reconstructed neuron\", \"B\")\n cell.__del__()\n\n\ndef ball_and_stick_LFP(somatic_vmem):\n\n cell = ns.return_ball_and_stick_cell(tstop, dt)\n for sec in neuron.h.allsec():\n # Insert same passive params as Hay model\n sec.g_pas = 3.38e-05\n sec.e_pas = -90\n sec.cm = 1.0\n sec.Ra = 100\n h.dt = dt\n\n for sec in neuron.h.allsec():\n if \"soma\" in sec.name():\n print(\"Inserting vclamp\")\n vclamp = h.SEClamp_i(sec(0.5))\n vclamp.dur1 = 1e9\n vclamp.rs = 1e-9\n vmem_to_insert = h.Vector(somatic_vmem)\n vmem_to_insert.play(vclamp._ref_amp1, h.dt)\n\n cell.simulate(rec_imem=True, rec_vmem=True)\n plot_results(cell, \"fig_ball_and_stick_LFP\", \"ball and stick\", \"C\")\n cell.__del__()\n\n\ndef two_comp_LFP():\n\n cell = ns.return_two_comp_cell(tstop, dt)\n\n synidx = cell.get_closest_idx(x=0, y=0, z=800)\n syn, cell = insert_synaptic_input(cell, synidx)\n\n h.dt = dt\n for sec in neuron.h.allsec():\n # Insert same passive params as Hay model\n sec.g_pas = 3.38e-05\n sec.e_pas = -90\n sec.cm = 1.0\n sec.Ra = 100\n\n cell.simulate(rec_imem=True, rec_vmem=True)\n cell.z = np.array([[-10., 10.],\n [790., 810.]])\n\n # np.save(\"two_comp_imem.npy\", cell.imem)\n # np.save(\"two_comp_xyz.npy\", [cell.x, cell.y, cell.z])\n plot_results(cell, \"fig_two_comp_LFP\", \"two-compartment\", \"D\")\n # plot_dipole_decay(cell)\n cell.__del__()\n\ndef two_comp_dipole_decay():\n\n cell = ns.return_two_comp_cell(tstop, dt)\n\n synidx = cell.get_closest_idx(x=0, y=0, z=800)\n syn, cell = insert_synaptic_input(cell, synidx)\n\n h.dt = dt\n for sec in neuron.h.allsec():\n # Insert same passive params as Hay model\n sec.g_pas = 3.38e-05\n sec.e_pas = -90\n sec.cm = 1.0\n sec.Ra = 100\n\n cell.simulate(rec_imem=True, rec_vmem=True)\n cell.z = np.array([[-10., 10.],\n [790., 810.]])\n\n plot_two_monopole_versus_dipole(cell)\n # plot_two_monopole_decay_directions(cell)\n cell.__del__()\n\n\n\n\ndef plot_grid_LFP(cell, grid_elec_params, grid_x, grid_z,\n ax, synapses, scale_max=None):\n\n # Create a grid of measurement locations, in (um)\n grid_x, grid_z = np.mgrid[-450:451:20, -370:1200:20]\n grid_y = np.ones(grid_x.shape) * 0\n\n # Define electrode parameters\n grid_elec_params = {\n 'sigma': sigma, # extracellular conductivity\n 'x': grid_x.flatten(), # electrode positions\n 'y': grid_y.flatten(),\n 'z': grid_z.flatten(),\n 'method': 'linesource'\n }\n\n\n ax_lfp_dict = dict(aspect=1, frameon=False, xticks=[], yticks=[],\n ylim=[np.min(grid_z), np.max(grid_z)],\n xlim=[np.min(grid_x), np.max(grid_x)])\n\n grid_electrode = LFPy.RecExtElectrode(cell, **grid_elec_params)\n M_elec_ps = grid_electrode.get_transformation_matrix()\n lfp_ = M_elec_ps @ cell.imem * 1000\n\n\n max_amp_elec_idx = np.argmax(np.max(np.abs(lfp_), axis=1))\n max_amp_t_idx = np.argmax(np.abs(lfp_[max_amp_elec_idx, :]))\n\n max_amp_LFP = np.max(np.abs(lfp_))\n if not max_amp_LFP == np.abs(lfp_[max_amp_elec_idx, max_amp_t_idx]):\n raise RuntimeError(\"Wrong with chosen max value\")\n\n LFP = lfp_[:, max_amp_t_idx].reshape(grid_x.shape)\n\n num = 15\n levels = np.linspace(0.01, 1, num=num)\n\n print(np.max(np.abs(LFP)))\n scale_max = np.max(np.abs(LFP)) if scale_max is None else scale_max\n print(scale_max)\n\n levels_norm = scale_max * np.concatenate((-levels[::-1], levels))\n rainbow_cmap = plt.cm.get_cmap('PRGn') # rainbow, spectral, RdYlBu\n\n colors_from_map = [rainbow_cmap(i*np.int(255/(len(levels_norm) - 2)))\n for i in range(len(levels_norm) -1)]\n colors_from_map[num - 1] = (1.0, 1.0, 1.0, 1.0)\n\n [ax.plot([cell.x[idx, 0], cell.x[idx, 1]],\n [cell.z[idx, 0], cell.z[idx, 1]], lw=1, c='gray')\n for idx in range(cell.totnsegs)]\n\n [ax.plot(cell.x[syn.idx].mean(), cell.z[syn.idx].mean(),\n marker='o', c='cyan', ms=5, mec='k')\n for syn in synapses]\n\n ep_intervals = ax.contourf(grid_x, grid_z, LFP,\n zorder=2, colors=colors_from_map,\n levels=levels_norm, extend='both')\n\n ax.contour(grid_x, grid_z, LFP, colors='k', linewidths=(1), zorder=2,\n levels=levels_norm)\n return ep_intervals\n\n\ndef plot_two_monopole_versus_dipole(cell):\n\n max_t_idx = np.argmax(np.abs(cell.imem[0, :]))\n\n i = cell.imem[:, max_t_idx]\n ia = np.abs(i[0])\n l_d = cell.z.mean(axis=1)[1] - cell.z.mean(axis=1)[0]\n print(\"Ia: \", ia)\n print(\"l_d: \", l_d)\n cell.z -= l_d / 2\n\n x, y, z = cell.x.mean(axis=1), cell.y.mean(axis=1), cell.z.mean(axis=1)\n\n # Unit vector pointing from negative to positive current\n e_p_vec = np.array([0, 0, -1])\n p = ia * l_d * e_p_vec\n\n error_radius = 800\n\n num_elecs = 100\n\n elec_params_0deg = dict(\n sigma = sigma, # extracellular conductivity\n x = np.zeros(num_elecs),\n y = np.zeros(num_elecs),\n z = np.max(z) + 100 + np.linspace(0, 3000, num_elecs),\n method = 'pointsource',\n )\n\n elec_params_60deg = dict(\n sigma = sigma, # extracellular conductivity\n x = np.sin(np.deg2rad(60)) * np.linspace(0, 3000, num_elecs),\n y = np.zeros(num_elecs),\n z = np.cos(np.deg2rad(60)) * np.linspace(0, 3000, num_elecs),\n method = 'pointsource',\n )\n\n dist_0deg = np.sqrt(elec_params_0deg['x'] ** 2 + elec_params_0deg['z'] ** 2)\n dist_60deg = np.sqrt(elec_params_60deg['x'] ** 2 + elec_params_60deg['z'] ** 2)\n\n idxs_0deg = np.where(dist_0deg > error_radius)\n idxs_60deg = np.where(dist_60deg > error_radius)\n\n elec_0deg = LFPy.RecExtElectrode(cell, **elec_params_0deg)\n M_elec_0deg = elec_0deg.get_transformation_matrix()\n lfp_0deg_2m = M_elec_0deg @ i * 1000\n\n elec_60deg = LFPy.RecExtElectrode(cell, **elec_params_60deg)\n M_elec_60deg = elec_60deg.get_transformation_matrix()\n lfp_60deg_2m = M_elec_60deg @ i * 1000\n\n electrode_locs_0deg = np.array([elec_params_0deg[\"x\"],\n elec_params_0deg[\"y\"],\n elec_params_0deg[\"z\"]]).T\n electrode_locs_60deg = np.array([elec_params_60deg[\"x\"],\n elec_params_60deg[\"y\"],\n elec_params_60deg[\"z\"]]).T\n\n r_mean_0deg = electrode_locs_0deg - np.array([x.mean(),\n y.mean(),\n z.mean()])\n r_mean_60deg = electrode_locs_60deg - np.array([x.mean(),\n y.mean(),\n z.mean()])\n\n lfp_0deg_dp = 1000 * 1. / (4 * np.pi * sigma) * (np.dot(r_mean_0deg, p.T)\n / np.linalg.norm(r_mean_0deg, axis=1) ** 3)\n\n lfp_60deg_dp = 1000 * 1. / (4 * np.pi * sigma) * (np.dot(r_mean_60deg, p.T)\n / np.linalg.norm(r_mean_60deg, axis=1) ** 3)\n\n\n grid_x, grid_z = np.mgrid[-2000:2001:27, -3000:3002:27]\n grid_y = np.zeros(grid_x.shape)\n\n # Define electrode parameters\n grid_electrode_parameters = {\n 'sigma' : sigma, # extracellular conductivity\n 'x' : grid_x.flatten(), # electrode requires 1d vector of positions\n 'y' : grid_y.flatten(),\n 'z' : grid_z.flatten(),\n 'method': 'pointsource'\n }\n elec_grid = LFPy.RecExtElectrode(cell, **grid_electrode_parameters)\n M_elec_grid = elec_grid.get_transformation_matrix()\n lfp_2m = M_elec_grid @ i * 1000\n lfp_2m = lfp_2m.reshape(grid_x.shape)\n\n rvec_0deg = np.dot(np.linspace(0, 3000, 100)[:, None],\n np.array([np.sin(0), 0, np.cos(0)])[:, None].T)\n\n rvec_60deg = np.dot(np.linspace(0, 3000, 100)[:, None],\n np.array([np.sin(np.deg2rad(60)),\n 0,\n np.cos(np.deg2rad(60))])[:, None].T)\n\n rvec_0deg += np.array([0, 0, np.max(z) + 100])\n rvec_60deg += np.array([0, 0, l_d/2])\n\n # dipole grid\n electrode_locs = np.array([grid_x.flatten(),\n grid_y.flatten(),\n grid_z.flatten()]).T\n r_mean = electrode_locs - np.array([x.mean(), y.mean(), z.mean()])\n\n lfp_dp_grid = 1000 * 1. / (4 * np.pi * sigma) * (np.dot(r_mean, p.T)\n / np.linalg.norm(r_mean, axis=1) ** 3).reshape(grid_x.shape)\n\n\n plt.close(\"all\")\n fig = plt.figure(figsize=[10, 4])\n fig.subplots_adjust(left=0.03, wspace=0.5, right=0.98, bottom=0.17)\n\n ax_2m = fig.add_subplot(141, aspect=1, title=\"two-monopole\",\n frameon=False, xticks=[], yticks=[],\n xlim=[-2000, 2000], ylim=[-3100, 3100])\n ax_dp = fig.add_subplot(142, aspect=1, title=\"dipole\", frameon=False,\n xticks=[], yticks=[],\n xlim=[-2000, 2000], ylim=[-3100, 3100])\n ax_diff = fig.add_subplot(143, aspect=1, title=\"difference\", frameon=False,\n xticks=[], yticks=[],\n xlim=[-800, 800], ylim=[-1200, 1200])\n ax2 = fig.add_subplot(144, xlabel=\"distance (µm)\", ylabel=\"|$\\phi$| (µV)\")\n\n num = 15\n levels = np.logspace(-3, 0, num=num)\n\n print(np.max(np.abs(lfp_2m)))\n scale_max = 10 #np.max(np.abs(lfp_2m))\n print(scale_max)\n\n levels_norm = scale_max * np.concatenate((-levels[::-1], levels))\n rainbow_cmap = plt.cm.get_cmap('PRGn') # rainbow, spectral, RdYlBu\n\n colors_from_map = [rainbow_cmap(i*np.int(255/(len(levels_norm) - 2)))\n for i in range(len(levels_norm) -1)]\n colors_from_map[num - 1] = (1.0, 1.0, 1.0, 1.0)\n\n\n ep_2m = ax_2m.contourf(grid_x, grid_z, lfp_2m,\n zorder=2, colors=colors_from_map,\n levels=levels_norm, extend='both')\n\n ax_2m.contour(grid_x, grid_z, lfp_2m, colors='k',\n linewidths=(1), zorder=2, levels=levels_norm)\n\n ep_dp = ax_dp.contourf(grid_x, grid_z, lfp_dp_grid,\n zorder=2, colors=colors_from_map,\n levels=levels_norm, extend='both')\n\n ax_dp.contour(grid_x, grid_z, lfp_dp_grid, colors='k',\n linewidths=(1), zorder=2, levels=levels_norm)\n\n ep_diff = ax_diff.contourf(grid_x, grid_z, lfp_dp_grid - lfp_2m,\n zorder=2, colors=colors_from_map,\n levels=levels_norm, extend='both')\n\n ax_diff.contour(grid_x, grid_z, lfp_dp_grid - lfp_2m, colors='k',\n linewidths=(1), zorder=2,\n levels=levels_norm)\n\n imgs = [ep_2m, ep_dp, ep_diff]\n\n for i, ax in enumerate([ax_2m, ax_dp, ax_diff]):\n ax.plot(x, z, 'o', c='k')\n [ax.plot(x[i], z[i], '+_'[i], c='w') for i in range(2)]\n\n if i < 2:\n ax.plot(elec_params_0deg['x'][idxs_0deg],\n elec_params_0deg['z'][idxs_0deg], ['-', '--'][i], c='b')\n ax.plot(elec_params_60deg['x'][idxs_60deg],\n elec_params_60deg['z'][idxs_60deg], ['-', '--'][i], c='r')\n\n ax.add_patch(plt.Circle((0, 0), radius=error_radius,\n color='none', zorder=50, ls='--',\n fill=True, ec='cyan', lw=3))\n ax_x1, ax_y1, ax_w, ax_h = ax.get_position().bounds\n\n cax = fig.add_axes([ax_x1, 0.19, ax_w, 0.01], frameon=False)\n cbar = fig.colorbar(imgs[i], cax=cax, orientation=\"horizontal\")\n cbar.set_label('$\\phi$ (µV)', labelpad=0)\n cbar.set_ticks(scale_max * np.array([-1, -0.1, -0.01, 0, 0.01, 0.1, 1]))\n\n cax.set_xticklabels(cax.get_xticklabels(), rotation=40)\n\n ax2.axvline(error_radius, lw=2, c='cyan', ls='--')\n l1, = ax2.plot(dist_0deg[idxs_0deg], np.abs(lfp_0deg_2m[idxs_0deg]), 'b')\n l2, = ax2.plot(dist_0deg[idxs_0deg], np.abs(lfp_0deg_dp[idxs_0deg]), 'b--')\n l3, = ax2.plot(dist_60deg[idxs_60deg], np.abs(lfp_60deg_2m[idxs_60deg]), 'r')\n l4, = ax2.plot(dist_60deg[idxs_60deg], np.abs(lfp_60deg_dp[idxs_60deg]), 'r--')\n\n ax2.legend([l1, l2, l3, l4], [r\"2-monopole $\\theta=0^{\\circ}$\",\n r\"dipole $\\theta=0^{\\circ}$\",\n r\"2-monopole $\\theta=60^{\\circ}$\",\n r\"dipole $\\theta=60^{\\circ}$\",\n ], frameon=False, fontsize=9.5, loc=(0.1, 0.7))\n\n ax_2m.plot([1800, 1800], [-2000, -1000], lw=2, c='k', clip_on=False)\n ax_2m.text(1900, -1500, \"1000 µm\", va='center')\n\n ax_dp.plot([1800, 1800], [-2000, -1000], lw=2, c='k', clip_on=False)\n ax_dp.text(1900, -1500, \"1000 µm\", va='center')\n\n ax_diff.plot([430, 430], [-750, -1150], lw=2, c='k', clip_on=False)\n ax_diff.text(460, -950, \"400 µm\", va='center')\n\n simplify_axes(ax2)\n mark_subplots([ax_2m, ax_dp, ax_diff, ax2], ypos=1.05, xpos=0.)\n plt.savefig(join(os.path.dirname(__file__), \"dipole_decay.png\"))\n plt.close(\"all\")\n\n # Also make other simpler figure\n fig = plt.figure(figsize=[4, 4])\n fig.subplots_adjust(left=0.2, right=0.98, bottom=0.17, top=0.98)\n\n ax2 = fig.add_subplot(111, xlabel=\"distance (µm)\", ylabel=\"|$\\phi$| (µV)\")\n\n # ax2.axvline(error_radius, lw=2, c='cyan', ls='--')\n l1, = ax2.plot(dist_0deg[idxs_0deg], np.abs(lfp_0deg_2m[idxs_0deg]), 'b')\n l2, = ax2.plot(dist_0deg[idxs_0deg], np.abs(lfp_0deg_dp[idxs_0deg]), 'b--')\n l3, = ax2.plot(dist_60deg[idxs_60deg], np.abs(lfp_60deg_2m[idxs_60deg]), 'r')\n l4, = ax2.plot(dist_60deg[idxs_60deg], np.abs(lfp_60deg_dp[idxs_60deg]), 'r--')\n\n ax2.legend([l1, l2, l3, l4], [r\"2-monopole $\\theta=0^{\\circ}$\",\n r\"dipole $\\theta=0^{\\circ}$\",\n r\"2-monopole $\\theta=60^{\\circ}$\",\n r\"dipole $\\theta=60^{\\circ}$\",\n ], frameon=False, loc=\"upper right\")\n\n simplify_axes(ax2)\n plt.savefig(join(os.path.dirname(__file__), \"dipole_decay_simpler.pdf\"), dpi=300)\n plt.close(\"all\")\n\n\ndef plot_two_monopole_decay_directions(cell):\n\n max_t_idx = np.argmax(np.abs(cell.imem[0, :]))\n\n i = cell.imem[:, max_t_idx]\n\n l_d = cell.z.mean(axis=1)[1] - cell.z.mean(axis=1)[0]\n cell.z -= l_d / 2\n\n x, y, z = cell.x.mean(axis=1), cell.y.mean(axis=1), cell.z.mean(axis=1)\n\n print(x, y, z)\n\n error_radius = 600\n\n num_elecs = 200\n\n elec_params_0deg = dict(\n sigma = sigma, # extracellular conductivity\n x = np.zeros(num_elecs),\n y = np.zeros(num_elecs),\n z = np.linspace(0, 10000, num_elecs),\n method = 'pointsource',\n )\n\n elec_params_60deg = dict(\n sigma = sigma, # extracellular conductivity\n x = np.sin(np.deg2rad(60)) * np.linspace(0, 10000, num_elecs),\n y = np.zeros(num_elecs),\n z = np.cos(np.deg2rad(60)) * np.linspace(0, 10000, num_elecs),\n method = 'pointsource',\n )\n\n elec_params_perp = dict(\n sigma = sigma, # extracellular conductivity\n x = np.linspace(0, 10000, num_elecs),\n y = np.zeros(num_elecs),\n z = np.ones(num_elecs) * l_d / 2,\n method = 'pointsource',\n )\n\n dist_0deg = np.sqrt(elec_params_0deg['x'] ** 2 + elec_params_0deg['z'] ** 2)\n dist_60deg = np.sqrt(elec_params_60deg['x'] ** 2 + elec_params_60deg['z'] ** 2)\n dist_perp = np.sqrt(elec_params_perp['x'] ** 2 + elec_params_perp['z'] ** 2)\n\n idxs_0deg = np.where(dist_0deg > error_radius)\n idxs_60deg = np.where(dist_60deg > error_radius)\n idxs_perp = np.where(dist_perp > error_radius)\n\n elec_0deg = LFPy.RecExtElectrode(cell, **elec_params_0deg)\n M_elec_0deg = elec_0deg.get_transformation_matrix()\n lfp_0deg_2m = M_elec_0deg @ i * 1000\n\n elec_60deg = LFPy.RecExtElectrode(cell, **elec_params_60deg)\n M_elec_60deg = elec_60deg.get_transformation_matrix()\n lfp_60deg_2m = M_elec_60deg @ i * 1000\n\n elec_perp = LFPy.RecExtElectrode(cell, **elec_params_perp)\n M_elec_perp = elec_perp.get_transformation_matrix()\n lfp_perp_2m = M_elec_perp @ i * 1000\n\n grid_x, grid_z = np.mgrid[-2000:2001:27, -3000:3002:27]\n grid_y = np.zeros(grid_x.shape)\n\n # Define electrode parameters\n grid_electrode_parameters = {\n 'sigma' : sigma, # extracellular conductivity\n 'x' : grid_x.flatten(), # electrode requires 1d vector of positions\n 'y' : grid_y.flatten(),\n 'z' : grid_z.flatten(),\n 'method': 'pointsource'\n }\n elec_grid = LFPy.RecExtElectrode(cell, **grid_electrode_parameters)\n M_elec_grid = elec_grid.get_transformation_matrix()\n lfp_2m = M_elec_grid @ i * 1000\n lfp_2m = lfp_2m.reshape(grid_x.shape)\n\n plt.close(\"all\")\n fig = plt.figure(figsize=[5, 4])\n fig.subplots_adjust(left=0.03, wspace=0.6, right=0.98, bottom=0.17)\n\n ax_2m = fig.add_subplot(121, aspect=1,\n frameon=False, xticks=[], yticks=[],\n xlim=[-2000, 2000], ylim=[-3100, 3100])\n\n ax2 = fig.add_subplot(122, xlabel=\"distance (mm)\",\n xscale=\"log\", yscale=\"log\",\n ylabel=\"|$\\phi$| (µV)\")\n\n num = 15\n levels = np.logspace(-3, 0, num=num)\n\n print(np.max(np.abs(lfp_2m)))\n scale_max = 10 #np.max(np.abs(lfp_2m))\n print(scale_max)\n\n levels_norm = scale_max * np.concatenate((-levels[::-1], levels))\n rainbow_cmap = plt.cm.get_cmap('PRGn') # rainbow, spectral, RdYlBu\n\n colors_from_map = [rainbow_cmap(i*np.int(255/(len(levels_norm) - 2)))\n for i in range(len(levels_norm) -1)]\n colors_from_map[num - 1] = (1.0, 1.0, 1.0, 1.0)\n\n\n ep_2m = ax_2m.contourf(grid_x, grid_z, lfp_2m,\n zorder=2, colors=colors_from_map,\n levels=levels_norm, extend='both')\n\n ax_2m.contour(grid_x, grid_z, lfp_2m, colors='k',\n linewidths=(1), zorder=2, levels=levels_norm)\n\n\n ax_2m.plot(x, z, 'o', c='k')\n [ax_2m.plot(x[i], z[i], '+_'[i], c='w') for i in range(2)]\n\n ax_2m.plot(elec_params_0deg['x'][idxs_0deg],\n elec_params_0deg['z'][idxs_0deg], '--', c='b')\n ax_2m.plot(elec_params_60deg['x'][idxs_60deg],\n elec_params_60deg['z'][idxs_60deg], '--', c='r')\n\n ax_2m.plot(elec_params_perp['x'][idxs_perp],\n elec_params_perp['z'][idxs_perp], '--', c='orange')\n\n\n ax_2m.add_patch(plt.Circle((0, 0), radius=error_radius,\n color='none', zorder=50, ls='--',\n fill=True, ec='cyan', lw=3))\n ax_2m.plot(0, 0, 'o', c='cyan')\n ax_x1, ax_y1, ax_w, ax_h = ax_2m.get_position().bounds\n\n cax = fig.add_axes([ax_x1, 0.19, ax_w, 0.01], frameon=False)\n cbar = fig.colorbar(ep_2m, cax=cax, orientation=\"horizontal\")\n cbar.set_label('$\\phi$ (µV)', labelpad=0)\n cbar.set_ticks(scale_max * np.array([-1, -0.1, -0.01, 0, 0.01, 0.1, 1]))\n\n cax.set_xticklabels(cax.get_xticklabels(), rotation=40)\n\n ax2.axvline(error_radius / 1000, lw=2, c='cyan', ls='--')\n l1, = ax2.loglog(dist_0deg[idxs_0deg] / 1000, np.abs(lfp_0deg_2m[idxs_0deg]), 'b')\n # l2, = ax2.plot(dist_0deg[idxs_0deg], np.abs(lfp_0deg_dp[idxs_0deg]), 'b--')\n l3, = ax2.loglog(dist_60deg[idxs_60deg] / 1000, np.abs(lfp_60deg_2m[idxs_60deg]), 'r')\n l4, = ax2.loglog(dist_perp[idxs_perp] / 1000, np.abs(lfp_perp_2m[idxs_perp]), 'orange')\n # l4, = ax2.plot(dist_60deg[idxs_60deg], np.abs(lfp_60deg_dp[idxs_60deg]), 'r--')\n\n ax2.legend([l1, l3, l4], [r\"$\\theta=0^{\\circ}$\",\n r\"$\\theta=60^{\\circ}$\",\n r\"perpendicular\",\n ],\n frameon=False, fontsize=9.5, loc=(0.25, 0.75))\n\n # Making 1/r**3 markers\n r1 = dist_perp[idxs_perp][-1] / 1000\n r0 = r1 * 0.5\n r = np.linspace(r0, r1, 10)\n slope_factor = np.abs(lfp_perp_2m[idxs_perp])[-1] * r1**3\n y = slope_factor / r ** 3\n ax2.plot(r, y, lw=3, c='k')\n ax2.text(r0*0.85, y[-1], \"1/r$^3$\", ha=\"left\")\n\n # Making 1/r**2 markers\n r1 = dist_0deg[idxs_0deg][-1] / 1000\n r0 = r1 * 0.5\n r = np.linspace(r0, r1, 10)\n slope_factor = np.abs(lfp_0deg_2m[idxs_0deg])[-1] * r1**2\n y = slope_factor / r ** 2\n ax2.plot(r, y, lw=3, c='k')\n ax2.text(r0*1.1, y[0], \"1/r$^2$\", ha=\"left\")\n\n ax_2m.plot([1800, 1800], [-2000, -1000], lw=2, c='k', clip_on=False)\n ax_2m.text(1900, -1500, \"1 mm\", va='center')\n\n ax2.grid(True)\n\n simplify_axes(ax2)\n mark_subplots([ax_2m, ax2], ypos=1.05, xpos=0.)\n plt.savefig(join(os.path.dirname(__file__), \"two_monopole_decay_direction.png\"))\n plt.close(\"all\")\n\n\n\n\n\ndef plot_results(cell, figname, figtitle, subplot_marker):\n\n elec_grid_params = return_electrode_grid()\n\n elec = LFPy.RecExtElectrode(cell, **elec_grid_params)\n M_elec = elec.get_transformation_matrix()\n eaps = M_elec @ cell.imem * 1000\n\n xmin = np.min(elec_grid_params[\"x\"])\n xmax = np.max(elec_grid_params[\"x\"])\n zmin = np.min(elec_grid_params[\"z\"])\n zmax = np.max(elec_grid_params[\"z\"])\n\n eap_idxs = np.where((np.abs(elec_grid_params[\"z\"] - 850) < 1e-9) &\n (elec_grid_params[\"x\"] > 0))[0]\n\n\n eap_clrs = {idx: plt.cm.Reds_r(num / (len(eap_idxs)))\n for num, idx in enumerate(eap_idxs)}\n\n fig = plt.figure(figsize=[5, 6.5])\n\n # fig.suptitle(figtitle)\n ax_morph = fig.add_axes([0.01, 0.01, 0.65, 0.93], frameon=False, aspect=1,\n xticks=[], yticks=[], xlim=[xmin - 25, xmax + 100],\n ylim=[zmin-10, zmax + 5])\n\n ax_imem = fig.add_axes([0.67, 0.55, 0.3, 0.25],\n title=\"normalized\\ntransmembrane\\ncurrents\",\n xticks=[], yticks=[], frameon=False,)\n\n ax_eap = fig.add_axes([0.67, 0.15, 0.3, 0.25],\n title=\"normalized\\nLFP traces\",\n xticks=[], yticks=[], frameon=False,\n ylim=[-1.05, 0.1])\n\n ax_imem.plot(cell.tvec, cell.imem[0, :] /\n np.max(np.abs(cell.imem[0, :])), 'blue', lw=2)\n ax_imem.plot(cell.tvec, cell.imem[cell.synidx[0], :] /\n np.max(np.abs(cell.imem[cell.synidx[0], :])), 'green', lw=2)\n\n\n ax_imem.text(5, 0.5, \"bottom\\ncomp.\", ha=\"left\", c=\"b\")\n ax_imem.text(5, -0.7, \"top\\ncomp.\", ha=\"left\", c=\"g\")\n\n\n for n, elec_idx in enumerate(eap_idxs[::-1]):\n c = eap_clrs[elec_idx]\n eap_norm = eaps[elec_idx] / np.max(np.abs(eaps[elec_idx]))\n ls = '-'# if n == (len(eap_idxs) - 1) else '-'\n ax_eap.plot(cell.tvec, eap_norm, c=c, lw=2, ls=ls)\n x = int(elec_grid_params[\"x\"][elec_idx])\n fig.text(0.80, 0.2 + n * 0.04, \"x={:d} µm\".format(x), c=c)\n\n if \"two_comp\" in figname:\n ax_morph.plot(cell.x[0].mean(), cell.z[0].mean(), 'bo', ms=12)\n ax_morph.plot(cell.x[0].mean(), cell.z[0].mean(), 'w+', ms=12)\n ax_morph.plot(cell.x[1].mean(), cell.z[1].mean(), 'go', ms=12)\n ax_morph.plot(cell.x[1].mean(), cell.z[1].mean(), 'w_', ms=12)\n\n\n else:\n zips = []\n for x, z in cell.get_pt3d_polygons():\n zips.append(list(zip(x, z)))\n polycol = PolyCollection(zips, edgecolors='none',\n facecolors='0.4', zorder=-1, rasterized=False)\n ax_morph.add_collection(polycol)\n\n l_syn, = ax_morph.plot(cell.x[cell.synidx].mean(axis=1),\n cell.z[cell.synidx].mean(axis=1), c='y',\n marker='*', ls='none')\n fig.legend([l_syn], [\"synapse\"], loc=(0.63, 0.92),\n frameon=False, handlelength=0.5)\n t1 = 10\n t1_idx = np.argmin(np.abs(cell.tvec - t1))\n dz = np.abs(np.diff(elec.z))[0]\n num_elecs = len(elec.x)\n eap_norm = dz * 0.7 / np.max(np.abs(eaps))\n t_norm = cell.tvec[:t1_idx] / t1 * dz * 0.7\n for elec_idx in range(num_elecs):\n c = eap_clrs[elec_idx] if elec_idx in eap_idxs else 'k'\n x, z = elec.x[elec_idx], elec.z[elec_idx]\n ax_morph.plot(x, z, '.', c='k', ms=3)\n eap = eaps[elec_idx, :t1_idx] * eap_norm\n ax_morph.plot(x + t_norm, z + eap, c=c, lw=2)\n\n ax_morph.plot([150, 250], [-15, -15], c='gray', lw=2)\n ax_morph.text(200, 0, \"100 µm\", ha=\"center\", c='gray')\n\n ax_morph.plot([150, 150 + t_norm[-1]], [775, 775], c='k', lw=2)\n ax_morph.text(157, 777, \"{:d} ms\".format(int(t1)), ha=\"center\",\n va=\"bottom\", c='k')\n\n ax_morph.plot([130, 130], [720 - 1 * eap_norm, 720], c='k', lw=2,\n clip_on=False)\n ax_morph.text(135, 720 - 0.5 * eap_norm, \"1 µV\", ha=\"left\",\n c='k', va=\"center\")\n\n\n ax_imem.plot([0, 15], [-1.1, -1.1], c='k', lw=2, clip_on=False)\n ax_imem.text(7, -1.15, \"15 ms\", va='top', ha='center')\n\n ax_eap.plot([0, 15], [-1.1, -1.1], c='k', lw=2, clip_on=False)\n ax_eap.text(7, -1.15, \"15 ms\", va='top', ha='center')\n\n mark_subplots(ax_morph, \"A\", xpos=0.05, ypos=1.03)\n mark_subplots(ax_imem, \"B\", xpos=0.05, ypos=1.38)\n mark_subplots(ax_eap, \"C\", xpos=0.05, ypos=1.2)\n\n fig.savefig(join(os.path.dirname(__file__), \"{}.png\".format(figname)),\n dpi=300)\n\n\nif __name__ == '__main__':\n # save_somatic_spike_vmem()\n # soma_t, soma_vmem = np.load(\"somatic_vmem.npy\")\n # hay_LFP()\n # ball_and_stick_spike_replay(soma_vmem)\n # two_comp_LFP()\n two_comp_dipole_decay()\n\n\n","sub_path":"ECSbook_simcode/hay_LFPs/compare_LFP_2comp_bns_hay.py","file_name":"compare_LFP_2comp_bns_hay.py","file_ext":"py","file_size_in_byte":28404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"326214018","text":"\n\nfrom xai.brain.wordbase.nouns._decimal import _DECIMAL\n\n#calss header\nclass _DECIMALS(_DECIMAL, ):\n\tdef __init__(self,): \n\t\t_DECIMAL.__init__(self)\n\t\tself.name = \"DECIMALS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"decimal\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_decimals.py","file_name":"_decimals.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"48379894","text":"# Given that the user has not input 'exit'\n# Write a program that takes a user and rolls dice\n# input will be similar to the discord sidekick bot\n# https://github.com/ArtemGr/Sidekick\n# examples: \"1d8 + 4d6\", \"4d6 == 5\", \"4d6 - 5\", \"1d20 >= 10\"\n# hint: use split() and eval()\n# https//docs.python.org/3/library/functions.html\n\nfrom random import randint\n\n# single die roll\n\ndieRoll = input('Enter your roll: ')\ndieRoll = dieRoll.lower()\nrollResult = dieRoll.split('d',-1)\nnumDie = int(rollResult[0])\nmaxResult = int(rollResult[1])\n\ndef rollDie():\n for roll in range(numDie):\n if (numDie != 0):\n print(randint(1,int(maxResult)))\n numDie - 1\n\nrollDie()\n","sub_path":"week2/W02E05S01.py","file_name":"W02E05S01.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"464686277","text":"# _*_ coding: utf_8 _*_\n\"\"\"\nThe Replace Space With Newline problem is an insteresting software synthesis\nproblem. The RSWN problem is specified as:\n\nGiven a string input, print the string, replacing spaces with newlines.\nThe input string will not have tabs or newlines, but may have multiple spaces\nin a row. It will have maximum length of 20 characters. Also, the program\nshould return the integer count of the non-whitespace characters.\n\nThis problem requires PushGP to evolve a program that manipulates more than\none data type. This problem aslo requires the printing of a value on top of\nproducing another value.\n\"\"\"\nimport random\n\nfrom pyshgp.push.interpreter import PushInterpreter\nfrom pyshgp.push.registered_instructions import get_instructions_by_pysh_type\nfrom pyshgp.gp.evolvers import SimplePushGPEvolver\nfrom pyshgp.utils import Character, merge_sets, levenshtein_distance\n\n\ndef rswn(s: str) -> (str, int):\n new_s = s.replace(' ', '\\n')\n i = len(''.join(new_s.split()))\n return (new_s, i)\n\n\nrequired_training_cases = [(s, rswn(s)) for s in [\n \"\", \"A\", \"*\", \" \", \"s\", \"B \", \" \", \" D\", \"ef\", \"!!\", \" F \", \"T L\", \"4ps\",\n \"q \", \" \", \" e\", \"hi \", \" $ \", \" 9\", \"i !i !i !i !i\",\n \"88888888888888888888\", \" \", \"ssssssssssssssssssss\",\n \"1 1 1 1 1 1 1 1 1 1 \", \" v v v v v v v v v v\", \"Ha Ha Ha Ha Ha Ha Ha\",\n \"x y!x y!x y!x y!x y!\", \"G5G5G5G5G5G5G5G5G5G5\", \">_=]>_=]>_=]>_=]>_=]\",\n \"^_^ ^_^ ^_^ ^_^ ^_^ \"\n ]]\n\n\ndef random_str(str_length):\n s = \"\"\n for i in range(str_length):\n if random.random() < 0.2:\n s += \" \"\n else:\n s += random.choice(\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\")\n return s\n\n\ndef generate_cases(n_cases: int) -> list:\n cases = []\n for i in range(n_cases):\n l = random.randint(1, 20)\n inpt = random_str(l)\n targets = rswn(inpt)\n cases.append((inpt, targets))\n return cases\n\n\ntraining_set = required_training_cases + generate_cases(50)\ntesting_set = generate_cases(50)\n\n\ndef error_function(program, debug=False):\n errors = []\n for io_pair in training_set:\n interpreter = PushInterpreter()\n int_result = interpreter.run(program, [io_pair[0]], ['_integer'], debug)[0]\n str_result = interpreter.state.stdout\n\n int_error = None\n str_error = levenshtein_distance(io_pair[1][0], str_result)\n if int_result is None:\n # If response is un-evaluatable, add a bad error.\n int_error = 1e5\n else:\n int_error = abs(int_result - io_pair[1][1])\n errors += [str_error, int_error]\n return errors\n\n\natom_generators = list(merge_sets(\n get_instructions_by_pysh_type(\"_integer\"),\n get_instructions_by_pysh_type(\"_boolean\"),\n get_instructions_by_pysh_type(\"_string\"),\n get_instructions_by_pysh_type(\"_char\"),\n get_instructions_by_pysh_type(\"_exec\"),\n get_instructions_by_pysh_type(\"_print\"),\n [lambda: Character(\" \"),\n lambda: Character(\"\\n\"),\n # ERCs\n lambda: Character(random.choice(\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\\n\\t\")),\n lambda: random_str(random.randint(0, 21))]))\n\nif __name__ == \"__main__\":\n evo = SimplePushGPEvolver(n_jobs=-1, verbose=1,\n atom_generators=atom_generators,\n initial_max_genome_size=400,\n selection_method='lexicase')\n evo.fit(error_function, 1, ['_integer'])\n","sub_path":"examples/software/replace_space_with_newline.py","file_name":"replace_space_with_newline.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"520838621","text":"import numpy as np\r\nimport matplotlib.ticker as ticker\r\n\r\n\r\ndef label_offset(ax, axis=\"y\"):\r\n \"\"\"\r\n\r\n Removes axis ticklabel offsets (e.g. exponents) and moves them to the axis \r\n label. Label is dynamically updated when axis range changes.\r\n\r\n \"\"\"\r\n\r\n if axis == \"y\":\r\n fmt = ax.yaxis.get_major_formatter()\r\n ax.yaxis.offsetText.set_visible(False)\r\n labelfunc = ax.set_ylabel\r\n label = ax.get_ylabel()\r\n\r\n elif axis == \"x\":\r\n fmt = ax.xaxis.get_major_formatter()\r\n ax.xaxis.offsetText.set_visible(False)\r\n labelfunc = ax.set_xlabel\r\n label = ax.get_xlabel()\r\n\r\n def update_label(_):\r\n offset = fmt.get_offset()\r\n if offset == '':\r\n labelfunc(\"{}\".format(label))\r\n else:\r\n labelfunc(\"{} ({})\".format(label, offset))\r\n return\r\n\r\n ax.callbacks.connect(\"ylim_changed\", update_label)\r\n ax.callbacks.connect(\"xlim_changed\", update_label)\r\n ax.figure.canvas.draw()\r\n update_label(None)\r\n return\r\n\r\n\r\ndef corner_plot(fig, samples, bins=100, ranges=None, labels=None, cmap='viridis', plot_type='hist',\r\n facecolor='C0', edgecolor=None, density=True):\r\n \"\"\"Generate a corner plot.\r\n \r\n Using MCMC samples, generate a corner plot - a set of 2D histograms\r\n showing the bivariate distributions for each pair of model parameters.\r\n \r\n Parameters:\r\n ----------\r\n fig : {matplotlib.figure.Figure}\r\n Matplotlib figure in which to draw the corner plot. Should be empty.\r\n samples : {numpy.ndarray}\r\n MCMC samples of schape (nwalkers, nsamples, ndim).\r\n bins : {int}, optional\r\n Number of bins along each axis of each histogram.\r\n ranges : {sequence}, optional\r\n A list of bounds (min, max) for each histogram plot. (the default \r\n is None, which automatically chooses 3*sigma bounds about the mean.)\r\n labels : {list}, optional\r\n List of names of model parameters. Must be of length *ndim* (the \r\n default is None, which makes blank labels).\r\n cmap : {[type]}, optional\r\n [description] (the default is None, which [default_description])\r\n plot_type : {str}, optional\r\n Specify the plot type. Should be one of\r\n * 'hex'\r\n * 'hist'\r\n\r\n \"\"\"\r\n\r\n # Handling the arguments\r\n if len(np.shape(samples)) != 3:\r\n raise ValueError(\"Samples must be of shape (nwalkers, nsamples, ndim), not {}\".format(np.shape(samples)))\r\n else:\r\n _, nsamples, ndim = np.shape(samples)\r\n samples = samples.reshape((-1, ndim))\r\n\r\n if nsamples <= ndim:\r\n raise ValueError(\"Number of samples <= number of dimensions. Is this intended for this dataset?\")\r\n\r\n if isinstance(bins, int):\r\n bins = np.array([bins for _ in range(ndim)])\r\n elif len(np.shape(bins)) != 1:\r\n raise ValueError(\"Bins should be a 1D array or an integer.\")\r\n elif np.shape(bins)[0] != ndim:\r\n raise ValueError(\"Dimension mismatch between bins and number of parameters in samples.\")\r\n else:\r\n raise ValueError(\"Invalid type {} for parameter 'bins'.\".format(type(bins)))\r\n\r\n if ranges is None:\r\n ranges = [nice_bounds(samples[:, i]) for i in range(ndim)]\r\n elif len(ranges) != ndim:\r\n raise ValueError(\"Dimension mismatch between ranges and number of columns in samples.\")\r\n else:\r\n ranges = [nice_bounds(samples[:, i]) if ranges[i] is None else ranges[i] for i in range(ndim)]\r\n\r\n if labels is None:\r\n labels = [\"\" for _ in range(ndim)]\r\n\r\n # Divide the figure into a bunch of subplots, and Remove whitespace\r\n # between plots\r\n axes = fig.subplots(ndim, ndim, sharex='col')\r\n fig.subplots_adjust(left=0.1, bottom=0.1, right=0.98, top=0.98, wspace=0.05, hspace=0.05)\r\n\r\n if ndim == 1:\r\n hist_1d(ax=axes,\r\n samples=samples[:, 0],\r\n bins=bins[0],\r\n bounds=ranges[0],\r\n label=labels[0],\r\n show_xlabels=True,\r\n density=density,\r\n facecolor=facecolor,\r\n edgecolor=edgecolor)\r\n\r\n else:\r\n for i in range(ndim):\r\n\r\n # Plot the 1D histograms along the diagonal. If i == ndim-1,\r\n # make xticklabels. Otherwise, omit them.\r\n hist_1d(ax=axes[i, i],\r\n samples=samples[:, i],\r\n bins=bins[i],\r\n bounds=ranges[i],\r\n label=labels[i],\r\n show_xlabels=(i == ndim - 1),\r\n facecolor=facecolor,\r\n edgecolor=edgecolor)\r\n\r\n # Plot the 2D histograms in the lower left corner\r\n for j in range(ndim):\r\n\r\n if j > i:\r\n axes[i, j].axis('off')\r\n elif j < i:\r\n\r\n hist_2d(ax=axes[i, j],\r\n xsamples=samples[:, j],\r\n ysamples=samples[:, i],\r\n xbins=bins[j],\r\n ybins=bins[i],\r\n xbounds=ranges[j],\r\n ybounds=ranges[i],\r\n xlabel=labels[j],\r\n ylabel=labels[i],\r\n cmap=cmap,\r\n plot_type=plot_type,\r\n show_ylabels=(j == 0),\r\n show_xlabels=(i == ndim - 1),\r\n density=density)\r\n\r\n for tick in axes[i, j].get_xticklabels():\r\n tick.set_rotation(45)\r\n\r\n return\r\n\r\n\r\ndef hist_1d(ax, samples, bins, bounds, label, show_xlabels, density=True, facecolor='C0', edgecolor=None):\r\n # ax.hist(samples, bins=bins, range=bounds)\r\n\r\n pdf, xedges = np.histogram(samples, bins=bins, range=bounds, density=density)\r\n pdf = np.append(pdf, 0)\r\n ax.fill_between(xedges, pdf, step='post', facecolor=facecolor, edgecolor=edgecolor)\r\n\r\n ax.set_yticklabels([])\r\n ax.set_xlim(nice_bounds(samples))\r\n\r\n if show_xlabels:\r\n ax.set_xlabel(label)\r\n ax.get_xaxis().set_major_locator(ticker.MaxNLocator(nbins=5, prune='upper'))\r\n label_offset(ax, \"x\")\r\n\r\n return\r\n\r\n\r\ndef hist_2d(ax, xsamples, ysamples, xbins, ybins, xbounds, ybounds, xlabel, ylabel, cmap, plot_type, show_ylabels,\r\n show_xlabels, density=True):\r\n if plot_type in [\"hist\", \"histogram\"]:\r\n\r\n # matplotlib's ax.hist2d makes a patch for each bin (bug?). Instead, use imshow to make a cleaner, faster plot.\r\n\r\n # By default, np.histogram2d histograms x-values along the first dimension of the pdf, and y-values along\r\n # the second dimension. This is opposite to how we want to display the data, which is why the x and y values\r\n # are swapped here. \r\n pdf, yedges, xedges = np.histogram2d(ysamples, \r\n xsamples, \r\n bins=[ybins, xbins], \r\n range=[ybounds, xbounds], \r\n density=density)\r\n\r\n ax.imshow(pdf,\r\n extent=[xbounds[0], xbounds[1], ybounds[0], ybounds[1]],\r\n cmap=cmap,\r\n interpolation='nearest',\r\n origin='lower')\r\n elif plot_type in [\"hex\", \"hexbin\"]:\r\n ax.hexbin(xsamples,\r\n ysamples,\r\n gridsize=[int(0.5 * xbins), int(0.5 * ybins)],\r\n extent=[*xbounds, *ybounds],\r\n cmap=cmap)\r\n else:\r\n raise ValueError(\"Invalid plot_type: {}\".format(plot_type))\r\n\r\n\r\n ax.set_aspect('auto')\r\n\r\n if show_ylabels:\r\n ax.set_ylabel(ylabel)\r\n label_offset(ax, \"y\")\r\n else:\r\n ax.set_yticklabels([])\r\n\r\n if show_xlabels:\r\n ax.set_xlabel(xlabel)\r\n label_offset(ax, \"x\")\r\n\r\n ax.get_xaxis().set_major_locator(ticker.MaxNLocator(nbins=5, prune='upper'))\r\n ax.get_yaxis().set_major_locator(ticker.MaxNLocator(nbins=5, prune='upper'))\r\n ax.set_xlim(nice_bounds(xsamples))\r\n ax.set_ylim(nice_bounds(ysamples))\r\n\r\n return\r\n\r\n\r\ndef nice_bounds(samplesx, factor=3):\r\n \"\"\"Generate sensible limits for distribution plots.\r\n \r\n Finds the mean+factor*std_dev and mean-factor*std_dev of a set of samples.\r\n \r\n Parameters:\r\n ----------\r\n samplesx : {ndarray}\r\n Samples from a distribution.\r\n factor : {int}, optional\r\n Number of standard deviations to includde. (the default is 3, which\r\n usually gives nice looking plots without being too zoomed out)\r\n \r\n Returns\r\n -------\r\n tuple\r\n (lower limit, upper limit) of plot.\r\n \"\"\"\r\n\r\n sx = factor * np.std(samplesx)\r\n avgx = np.mean(samplesx)\r\n return avgx - sx, avgx + sx\r\n\r\n\r\ndef walker_trace(fig, samples, labels=None, **kwargs):\r\n \"\"\"Generate a walker trace figure from MCMC samples.\r\n \r\n Given some input MCMC samples, generate a figure with ndim subplots, one\r\n for each model parameter, showing the traces of each walker through the\r\n parameter subspace.\r\n \r\n Parameters:\r\n ----------\r\n fig : {figure}\r\n Empty Matplotlib figure in which to draw the walker trace subplots.\r\n samples : {ndarray}\r\n Output of MCMC sampler, must be of shape (nwalkers, nsamples, ndim).\r\n labels : {list}, optional\r\n List of length *ndim* containing variable names for each parameter.\r\n (the default is None, which means your parameters are unlabeled.)\r\n \r\n \"\"\"\r\n\r\n nwalkers, nsteps, ndim = np.shape(samples)\r\n\r\n if labels is None:\r\n labels = [None for _ in range(ndim)]\r\n\r\n if \"color\" not in kwargs:\r\n kwargs[\"color\"] = 'k'\r\n if \"alpha\" not in kwargs:\r\n kwargs[\"alpha\"] = 0.3\r\n\r\n axes = fig.subplots(ndim, 1)\r\n fig.subplots_adjust(left=0.1, bottom=0.1, right=0.98, top=0.98, wspace=0.05, hspace=0.05)\r\n\r\n if ndim == 1:\r\n axes.plot(samples[:, :, 0].T, **kwargs)\r\n \r\n axes.set_xlim(0, nsteps)\r\n if labels[0] is not None:\r\n axes.set_ylabel(labels[0])\r\n label_offset(axes, \"y\")\r\n axes.set_xlabel(\"Step\")\r\n\r\n else:\r\n for i in range(ndim):\r\n axes[i].plot(samples[:, :, i].T, **kwargs)\r\n\r\n if i < ndim - 1:\r\n axes[i].set_xticklabels([])\r\n\r\n axes[i].set_xlim(0, nsteps)\r\n if labels[i] is not None:\r\n axes[i].set_ylabel(labels[i])\r\n\r\n label_offset(axes[i], \"y\")\r\n\r\n axes[ndim - 1].set_xlabel(\"Step\")\r\n\r\n return\r\n","sub_path":"tarmac/tarmac.py","file_name":"tarmac.py","file_ext":"py","file_size_in_byte":10658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"642672926","text":"class Solution:\r\n def productExceptSelf(self, nums: List[int]) -> List[int]:\r\n p=1\r\n l,r,o=[0]*len(nums),[0]*len(nums),[0]*len(nums)\r\n l[0],r[-1]=1,1\r\n for i in range(1,len(nums)):\r\n l[i]=l[i-1]*nums[i-1]\r\n for i in range(len(nums)-2,-1,-1):\r\n r[i]=r[i+1]*nums[i+1]\r\n for i in range(len(o)):\r\n o[i]=l[i]*r[i]\r\n return o","sub_path":"product_of_array_except_self.py","file_name":"product_of_array_except_self.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"128932877","text":"import numpy\nimport math\nimport matplotlib.pyplot as plt\n\nt = numpy.linspace(-2, 2*math.pi, 400)\na = numpy.sin(t)\nb = numpy.cos(t)\nc = a + b\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nax.plot(t, a, 'r') # plotting t, a separately \nax.plot(b, t, 'b') # plotting b, t separately \nax.plot(t, c, 'g') # plotting t, c separately \n\nax.set_xlim([-4,4])\nax.set_xticklabels([str(abs(x)) for x in ax.get_xticks()])\nlabel = ax.set_xlabel('Xlabel', fontsize = 9)\nax.xaxis.set_label_coords(1.05, -0.025)\n\n\nax.set_ylim([-4,4])\nax.set_yticklabels([str(abs(y)) for y in ax.get_yticks()])\nlabel = ax.set_ylabel('YLABEL', fontsize = 9)\nax.xaxis.set_label_coords(1.05, -0.025)\n\nax.spines['left'].set_position('center')\nax.spines['right'].set_color('none')\nax.spines['bottom'].set_position('center')\nax.spines['top'].set_color('none')\n\nplt.show()","sub_path":"generic_scripts/multiaxis.py","file_name":"multiaxis.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"521313809","text":"__author__ = 'T'\nfrom getpass import getuser\n\nimport consoleMethods\nfrom mario import mario\n\n\ndef marioMenu(uTeam):\n return consoleMethods.getChar(\n \"*************************************************************\\n\"\n \"Welcome to the Mario Bros System,\" + getuser() + \"\\n\"\n \"Team: \" + str(uTeam.getName()) + \" with \" + str(\n uTeam.getNumMarios()) + \" members\\n\"\n\n \"Please choose from our menu\\n\"\n\n \"a. Display the Team\\n\"\n \"b. Add a character to the Team\\n\"\n \"c. Delete a member from the Team\\n\"\n \"d. Exit saving the Team\\n\"\n \"e. Exit without saving changes to Team\\n\"\n \"Please enter an option:\\n\")\n\n\ndef displayTeam(tList, uTeam):\n print(\"Team \" + uTeam.getName())\n print(\"Name Weight Gender Species Good?\")\n for i in range(0, len(tList)):\n print(tList[i].getName() + \" \" + str(tList[i].getWeight()) + \" \" + tList[i].getGender() + \" \" + tList[\n i].getSpecies() + \" \" + str(tList[i].getGoodMario()))\n\n\ndef addCharacter(tList, uTeam):\n name = consoleMethods.getString(\"Please enter a Name for your character: \") + \"\\n\"\n gender = consoleMethods.getString(\"Please enter a Gender: \") + \"\\n\"\n species = consoleMethods.getString(\"Please enter a Species: \") + \"\\n\"\n weight = str(consoleMethods.getDouble(\"Please enter a Weight: \")) + \"\\n\"\n if consoleMethods.getString(\"Is the character good? \") == \"good\":\n goodMario = True\n else:\n goodMario = False\n if name is not None and gender is not None and species is not None and weight is not None:\n tList.append(mario(name, weight, gender, species, goodMario))\n uTeam.setNumMarios(uTeam.getNumMarios() + 1)\n\n\ndef deleteCharacter(tList):\n if len(tList) is not 0:\n while True:\n todelete = consoleMethods.getString(\"Which character would you like to delete? \").lower()\n print(\"Trying to delete \" + todelete.lower())\n valid = False\n for i in range(0, len(tList)):\n if tList[i] is None:\n break\n if todelete == tList[i].getName().lower():\n valid = True\n del (tList[i])\n print(\"Deleted\")\n break\n if valid:\n break\n else:\n print(\"Invalid character!\")\n else:\n print(\"Team empty!\")\n\n\ndef getTeam(tList, uTeam):\n while True:\n fileName = consoleMethods.getString(\"Please enter a file name: \")\n try:\n with open(fileName, 'r') as file:\n buffer = file.readlines()\n uTeam.setName(buffer[0])\n break\n except FileNotFoundError:\n print(\"File Invalid\")\n try:\n i = 1\n while i < int((len(buffer) - 1)):\n name = buffer[i]\n weight = buffer[i + 1]\n gender = buffer[i + 2]\n species = buffer[i + 3]\n good = buffer[i + 4]\n tList.append(mario(name, weight, gender, species, good))\n uTeam.setNumMarios(uTeam.getNumMarios() + 1)\n i += 5\n\n except ValueError:\n print(\"Read\")\n\n\ndef save(tList, uTeam):\n fileName = consoleMethods.getString(\"Please enter a File Name: \")\n with open(fileName, 'w') as file:\n file.write(uTeam.getName())\n for i in range(0, len(tList)):\n file.write(tList[i].getName())\n file.write(tList[i].getWeight())\n file.write(tList[i].getGender())\n file.write(tList[i].getSpecies())\n file.write(str(tList[i].getGoodMario()))","sub_path":"project1123py/marioMethods.py","file_name":"marioMethods.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"107208481","text":"import numpy as np\nimport sys\nimport re\n\nest = []\nstd = []\npath = sys.argv[1]\n\nwith open(path, 'r') as file:\n for line in file:\n toks = line.split()\n est.append(float(toks[0]))\n m = re.match(r'\\((.+)\\)', toks[1])\n std.append(float(m.group(1)))\n\nprint(f'est mean: {np.mean(est):.5f} std: {np.std(est):.5f}')\nprint(f'SE est mean: {np.mean(std):.5f}')\n\n","sub_path":"python/get_std.py","file_name":"get_std.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"299113195","text":"import logging.config\nimport time\nimport os.path, sys\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\nfrom RPi import GPIO\nfrom src.switch.position import SwitchPosition\n\n# Logging\nlogging.config.fileConfig('../config/logging.config')\nlogger = logging.getLogger(__name__)\n\n# GPIO\nGPIO.setmode()\n\n# Create new SwitchPosition() instance and define update interval\nnew_switch = SwitchPosition(gpio_clk=23, gpio_dt=24)\n\nupdate_interval_seconds = 5\n\n# Check switch position every update_interval_seconds\nwhile True:\n\n if new_switch.power_state == \"ON\":\n power_state_text=\"\\033[92mON\\033[0m\"\n else:\n power_state_text=\"\\033[91mOFF\\033[0m\"\n\n logger.debug(\"SWITCH STATE is %s\" % power_state_text)\n logger.debug(\"Checking state again in %s seconds...\" % update_interval_seconds)\n time.sleep(update_interval_seconds)\n\n","sub_path":"test/test_position.py","file_name":"test_position.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"38601859","text":"\"\"\"\nEECS 445 - Introduction to Machine Learning\nWinter 2019 - Project 2\nUtility functions\n\"\"\"\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef config(attr):\n \"\"\"\n Retrieves the queried attribute value from the config file. Loads the\n config file on first call.\n \"\"\"\n if not hasattr(config, 'config'):\n with open('config.json') as f:\n config.config = eval(f.read())\n node = config.config\n for part in attr.split('.'):\n node = node[part]\n return node\n\ndef denormalize_image(image):\n \"\"\" Rescale the image's color space from (min, max) to (0, 1) \"\"\"\n ptp = np.max(image, axis=(0,1)) - np.min(image, axis=(0,1))\n return (image - np.min(image, axis=(0,1))) / ptp\n\ndef hold_training_plot():\n \"\"\"\n Keep the program alive to display the training plot\n \"\"\"\n plt.ioff()\n plt.show()\n\ndef log_cnn_training(epoch, stats):\n \"\"\"\n Logs the validation accuracy and loss to the terminal\n \"\"\"\n valid_acc, valid_loss, train_acc, train_loss = stats[-1]\n print('Epoch {}'.format(epoch))\n print('\\tValidation Loss: {}'.format(valid_loss))\n print('\\tValidation Accuracy: {}'.format(valid_acc))\n print('\\tTrain Loss: {}'.format(train_loss))\n print('\\tTrain Accuracy: {}'.format(train_acc))\n\ndef make_cnn_training_plot(name='CNN'):\n \"\"\"\n Runs the setup for an interactive matplotlib graph that logs the loss and\n accuracy\n \"\"\"\n plt.ion()\n fig, axes = plt.subplots(1,2, figsize=(10,5))\n plt.suptitle(name + ' Training')\n axes[0].set_xlabel('Epoch')\n axes[0].set_ylabel('Accuracy')\n axes[1].set_xlabel('Epoch')\n axes[1].set_ylabel('Loss')\n\n return axes\n\ndef update_cnn_training_plot(axes, epoch, stats):\n \"\"\"\n Updates the training plot with a new data point for loss and accuracy\n \"\"\"\n valid_acc = [s[0] for s in stats]\n valid_loss = [s[1] for s in stats]\n train_acc = [s[2] for s in stats]\n train_loss = [s[3] for s in stats]\n axes[0].plot(range(epoch - len(stats) + 1, epoch + 1), valid_acc,\n linestyle='--', marker='o', color='b')\n axes[0].plot(range(epoch - len(stats) + 1, epoch + 1), train_acc,\n linestyle='--', marker='o', color='r')\n axes[0].legend(['Validation', 'Train'])\n axes[1].plot(range(epoch - len(stats) + 1, epoch + 1), valid_loss,\n linestyle='--', marker='o', color='b')\n axes[1].plot(range(epoch - len(stats) + 1, epoch + 1), train_loss,\n linestyle='--', marker='o', color='r')\n axes[1].legend(['Validation', 'Train'])\n plt.pause(0.00001)\n\ndef save_cnn_training_plot(name='cnn'):\n \"\"\"\n Saves the training plot to a file\n \"\"\"\n plt.savefig(name + '_training_plot.png', dpi=200)\n\ndef log_ae_training(epoch, stats):\n \"\"\"\n Logs the validation loss to the terminal\n \"\"\"\n valid_loss, train_loss = stats[-1]\n print('Epoch {}'.format(epoch))\n print('\\tValidation Mean squared error loss: {}'.format(valid_loss))\n print('\\tTrain Mean squared error loss: {}'.format(train_loss))\n\ndef make_ae_training_plot():\n \"\"\"\n Runs the setup for an interactive matplotlib graph that logs the loss\n \"\"\"\n plt.ion()\n fig, axes = plt.subplots(1,1, figsize=(5,5))\n plt.suptitle('Autoencoder Training')\n axes.set_xlabel('Epoch')\n axes.set_ylabel('MSE')\n\n return axes\n\ndef update_ae_training_plot(axes, epoch, stats):\n \"\"\"\n Updates the training plot with a new data point for loss\n \"\"\"\n valid_loss = [s[0] for s in stats]\n train_loss = [s[1] for s in stats]\n axes.plot(range(epoch - len(stats) + 1, epoch + 1), valid_loss,\n linestyle='--', marker='o', color='b')\n axes.plot(range(epoch - len(stats) + 1, epoch + 1), train_loss,\n linestyle='--', marker='o', color='r')\n axes.legend(['Validation', 'Train'])\n plt.pause(0.00001)\n\ndef save_ae_training_plot():\n \"\"\"\n Saves the training plot to a file\n \"\"\"\n plt.savefig('ae_training_plot.png', dpi=200)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"117813655","text":"\"\"\"\nTest the various utilities in serpentTools/utils.py\n\"\"\"\n\nimport unittest\n\nfrom numpy import arange, ndarray, array\nfrom numpy.testing import assert_array_equal\nfrom six import iteritems\n\nfrom serpentTools.utils import convertVariableName, splitValsUncs, str2vec\n\n\nclass VariableConverterTester(unittest.TestCase):\n \"\"\"Class for testing our variable name conversion function.\"\"\"\n\n def test_variableConversion(self):\n \"\"\" Verify the variable name conversion function.\"\"\"\n testCases = {\n \"VERSION\": \"version\",\n \"INF_KINF\": \"infKinf\",\n \"ADJ_PERT_KEFF_SENS\": \"adjPertKeffSens\"\n ,}\n for serpentStyle, expected in iteritems(testCases):\n actual = convertVariableName(serpentStyle)\n self.assertEqual(expected, actual, msg=serpentStyle)\n\n\nclass VectorConverterTester(unittest.TestCase):\n \"\"\"Class for testing the str2vec function\"\"\"\n\n def setUp(self):\n self.testCases = (\"0 1 2 3\", [0, 1, 2, 3], (0, 1, 2, 3), arange(4))\n\n def test_str2Arrays(self):\n \"\"\"Verify that the str2vec converts to arrays.\"\"\"\n expected = arange(4)\n for case in self.testCases:\n actual = str2vec(case)\n assert_array_equal(expected, actual, err_msg=case)\n\n def test_listOfInts(self):\n \"\"\"Verify that a list of ints can be produced with str2vec.\"\"\"\n expected = [0, 1, 2, 3]\n self._runConversionTest(int, expected, list)\n\n def _runConversionTest(self, valType, expected, outType=None):\n if outType is None:\n outType = array\n compareType = ndarray\n else:\n compareType = outType\n for case in self.testCases:\n actual = str2vec(case, of=valType, out=outType)\n self.assertIsInstance(actual, compareType, msg=case)\n ofRightType = [isinstance(xx, valType) for xx in actual]\n self.assertTrue(all(ofRightType), \n msg=\"{} -> {}, {}\".format(case, actual,\n type(actual)))\n self.assertEqual(expected, actual, msg=case)\n\n\nclass SplitValsTester(unittest.TestCase):\n \"\"\"Class that tests splitValsUncs.\"\"\"\n\n def setUp(self):\n self.input = arange(4)\n\n def test_splitVals(self):\n \"\"\"Verify the basic functionality.\"\"\"\n expectedV = array([0, 2])\n expectedU = array([1, 3])\n actualV, actualU = splitValsUncs(self.input)\n assert_array_equal(expectedV, actualV, err_msg=\"Values\")\n assert_array_equal(expectedU, actualU, err_msg=\"Uncertainties\")\n\n def test_splitCopy(self):\n \"\"\"Verfiy that a copy, not a view, is returned when copy=True\"\"\"\n viewV, viewU = splitValsUncs(self.input)\n copyV, copyU = splitValsUncs(self.input, copy=True)\n for view, copy, msg in zip(\n (viewV, viewU), (copyV, copyU), ('value', 'uncertainty')):\n assert_array_equal(view, copy,err_msg=msg)\n self.assertFalse(view is copy, msg=msg)\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"serpentTools/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"69047878","text":"# See COPYRIGHT.md for copyright information\n\nfrom tkinter import Toplevel, N, E, S, W, EW, StringVar, BooleanVar\ntry:\n from tkinter.ttk import Frame, Button, Label, Entry, Checkbutton\nexcept ImportError:\n from ttk import Frame, Button, Label, Entry, Checkbutton\n\nimport os\n\n\nclass SaveViewerDialog(Toplevel):\n def __init__(self, cntlr):\n\n super(SaveViewerDialog, self).__init__(cntlr.parent)\n\n self.cntlr = cntlr\n self.parent = cntlr.parent\n self.accepted = False\n\n self.title(\"Save iXBRL Viewer\")\n\n frame = Frame(self)\n self._scriptUrl = StringVar()\n self._scriptUrl.set(self.cntlr.config.setdefault('iXBRLViewerScriptURL', os.path.join(os.path.dirname(__file__), 'viewer', 'dist', 'ixbrlviewer.js')))\n self._filename = StringVar()\n self._filename.set(self.cntlr.config.setdefault(\"iXBRLViewerOutputFile\",\"\"))\n self._zipViewerOutput = BooleanVar()\n self._zipViewerOutput.set(self.cntlr.config.setdefault(\"iXBRLViewerZipOutput\", False))\n\n y = 1\n\n scriptUrlLabel = Label(frame, text=\"Script URL\")\n scriptUrlEntry = Entry(frame, textvariable = self._scriptUrl, width=80)\n\n scriptUrlLabel.grid(row=y, column=0, sticky=W, pady=3, padx=3)\n scriptUrlEntry.grid(row=y, column=1, columnspan=2, sticky=EW, pady=3, padx=3)\n\n y += 1\n\n filenameLabel = Label(frame, text=\"iXBRL file\")\n filenameEntry = Entry(frame, textvariable = self._filename, width=80)\n filenameBrowse = Button(frame, text=_(\"Browse...\"), command=self.browseForFile)\n\n filenameLabel.grid(row=y, column=0, sticky=W, pady=3, padx=3)\n filenameEntry.grid(row=y, column=1, sticky=EW, pady=3, padx=3)\n filenameBrowse.grid(row=y, column=2, sticky=EW, pady=3, padx=3)\n\n y += 1\n\n zipViewerOutputCheckbutton = Checkbutton(frame, text=\"Zip Viewer Output\", variable=self._zipViewerOutput, onvalue=True, offvalue=False)\n zipViewerOutputCheckbutton.grid(row=y, column=0, pady=3, padx=3)\n\n y += 1\n\n okButton = Button(frame, text=_(\"OK\"), command=self.ok)\n cancelButton = Button(frame, text=_(\"Cancel\"), command=self.close)\n okButton.grid(row=y, column=1, sticky=E, pady=3)\n cancelButton.grid(row=y, column=2, columnspan=1, sticky=E, pady=3, padx=3)\n\n frame.grid(row=0, column=0, sticky=(N,E,S,W))\n frame.columnconfigure(1, weight=1)\n\n window = self.winfo_toplevel()\n window.columnconfigure(0, weight=1)\n #self.geometry(\"+{0}+{1}\".format(dialogX+50,dialogY+100))\n\n self.bind(\"<Return>\", self.ok)\n self.bind(\"<Escape>\", self.close)\n\n self.protocol(\"WM_DELETE_WINDOW\", self.close)\n self.grab_set()\n self.wait_window(self)\n\n \n def close(self, event=None):\n self.parent.focus_set()\n self.destroy()\n\n\n def ok(self, event=None):\n self.cntlr.config['iXBRLViewerOutputFile'] = self._filename.get()\n self.cntlr.config['iXBRLViewerScriptURL'] = self._scriptUrl.get()\n self.cntlr.config[\"iXBRLViewerFileDir\"] = os.path.dirname(self._filename.get())\n self.cntlr.saveConfig()\n self.accepted = True\n self.close()\n\n def cancel(self, event=None):\n self.close()\n\n def browseForFile(self, event=None):\n instanceFile = self.cntlr.uiFileDialog(\"save\",\n parent=self,\n title=_(\"arelle - Save iXBRL Viewer Instance\"),\n initialdir=self.cntlr.config.setdefault(\"iXBRLViewerFileDir\",\".\"),\n filetypes=[(_(\"iXBRL report .html\"), \"*.html\")],\n defaultextension=\".html\")\n self._filename.set(instanceFile)\n pass\n\n def scriptUrl(self):\n return self._scriptUrl.get()\n\n def filename(self):\n return self._filename.get()\n\n def zipViewerOutput(self):\n return self._zipViewerOutput.get()\n","sub_path":"iXBRLViewerPlugin/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"303246732","text":"import matplotlib as mpl\nimport seaborn as sns\nimport brewer2mpl\n\nsns.set_palette('deep')\ndeep = sns.color_palette('deep')\n\nset1 = map(mpl.colors.rgb2hex, brewer2mpl.get_map('Set1', 'qualitative', 9).mpl_colors)\nred = set1[0]\nblue = set1[1]\ngreen = set1[2]\npurple = set1[3]\norange = set1[4]\nyellow = set1[5]\nbrown = set1[6]\npink = set1[7]\ngrey = set1[8]\n\nalmost_black = '#262626'\n\npurples = sns.color_palette('Purples', 9)\n\nstr_to_color = {'red': red, 'blue': blue, 'green': green, 'purple': purple,\n 'orange': orange, 'brown': brown, 'pink': pink, 'grey': grey}\n","sub_path":"flotilla/visualize/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"170639","text":"__author__ = 'Administrator'\r\n'''\r\n模块具体设置\r\n'''\r\n#ckeditor模块\r\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\r\nCKEDITOR_IMAGE_BACKEND = \"pillow\"\r\nCKEDITOR_CONFIGS = {\r\n 'default': {\r\n 'toolbar': 'basic',\r\n },\r\n 'small_ckeditor': {\r\n 'toolbar': 'basic',\r\n 'width': 300,\r\n },\r\n 'media_ckeditor': {\r\n 'toolbar': 'standard',\r\n 'width': 600,\r\n },\r\n 'large_ckeditor': {\r\n 'toolbar': 'full',\r\n 'width': 1024,\r\n },\r\n }\r\n#模块bootsrap3\r\nBOOTSTRAP3 = {\r\n # The URL to the jQuery JavaScript file\r\n 'jquery_url': 'http://cdn.bootcss.com/jquery/2.1.3/jquery.min.js',\r\n # The Bootstrap base URL\r\n 'base_url': 'http://cdn.bootcss.com/bootstrap/3.3.4',\r\n # The complete URL to the Bootstrap CSS file (None means derive it from base_url)\r\n 'css_url': 'http://cdn.bootcss.com/bootstrap/3.3.4/css/bootstrap.min.css',\r\n # The complete URL to the Bootstrap CSS file (None means no theme)\r\n 'theme_url': 'http://cdn.bootcss.com/bootstrap/3.3.4/css/bootstrap-theme.min.css',\r\n # The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)\r\n 'javascript_url': \"http://cdn.bootcss.com/bootstrap/3.3.4/js/bootstrap.min.js\",\r\n # Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)\r\n 'javascript_in_head': False,\r\n # Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)\r\n 'include_jquery': True,\r\n # Label class to use in horizontal forms\r\n 'horizontal_label_class': 'col-md-2',\r\n # Field class to use in horiozntal forms\r\n 'horizontal_field_class': 'col-md-4',\r\n # Set HTML required attribute on required fields\r\n 'set_required': True,\r\n # Set placeholder attributes to label if no placeholder is provided\r\n 'set_placeholder': True,\r\n # Class to indicate required (better to set this in your Django form)\r\n 'form_required_class': '',\r\n # Class to indicate error (better to set this in your Django form)\r\n 'form_error_class': '',\r\n # Renderers (only set these if you have studied the source and understand the inner workings)\r\n 'formset_renderers':{\r\n 'default': 'bootstrap3.renderers.FormsetRenderer',\r\n },\r\n 'form_renderers': {\r\n 'default': 'bootstrap3.renderers.FormRenderer',\r\n },\r\n 'field_renderers': {\r\n 'default': 'bootstrap3.renderers.FieldRenderer',\r\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer',\r\n },\r\n}","sub_path":"settings/section.py","file_name":"section.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"281875964","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n\nimport io\nimport sys\nimport time\nimport codecs\n\nfrom young_tools.pedestal import Constant\nfrom young_tools.pedestal import ANSIFormatter\nfrom young_tools.pedestal import InstancesChecker\n\n\nHEADS = Constant()\nHEADS.TYPE = {\n 'info',\n 'warn',\n 'error',\n 'exit',\n 'start',\n 'finish',\n}\n\n\nclass RedirectStream(object):\n @InstancesChecker(setting_out_stream={io.TextIOWrapper, codecs.StreamReaderWriter}, setting_err_stream={io.TextIOWrapper, codecs.StreamReaderWriter})\n def __init__(self, setting_out_stream=None, setting_err_stream=None):\n self._current_out_stream = setting_out_stream or sys.stdout\n self._current_err_stream = setting_err_stream or sys.stderr\n self._previous_out_stream = sys.stdout\n self._previous_err_stream = sys.stderr\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n\n sys.stdout = self._current_out_stream\n sys.stderr = self._current_err_stream\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n\n sys.stdout = self._previous_out_stream\n sys.stderr = self._previous_err_stream\n\n if exc_type:\n return False\n return True\n\n\nclass Logger(object):\n @InstancesChecker(log_file_path=str)\n def __init__(self, log_file_path='/tmp/temp_log_file'):\n self._log_file_path = log_file_path+'-{}'.format(time.time())\n\n def _write_to_console(self, message, log_type):\n message = getattr(self, '_'+log_type.lower()+'_head')() + message\n with RedirectStream():\n sys.stderr.write(message)\n sys.stderr.flush()\n\n def _write_to_log_file(self, message, log_type):\n message = ' [{}]: '.format(log_type.upper()) + message\n with open(self._log_file_path, 'a', encoding='utf-8') as log_file:\n with RedirectStream(setting_err_stream=log_file):\n sys.stderr.write(message)\n sys.stderr.flush()\n\n def _write_to_both(self, message, log_type):\n self._write_to_console(message, log_type)\n self._write_to_log_file(message, log_type)\n\n @classmethod\n def _start_head(cls):\n return ANSIFormatter.color_string(' [START]: ', 'White')\n\n @classmethod\n def _finish_head(cls):\n return ANSIFormatter.color_string(' [FINISH]: ', 'Cyan')\n\n @classmethod\n def _info_head(cls):\n return ANSIFormatter.color_string(' [INFO]: ', 'Green')\n\n @classmethod\n def _warn_head(cls):\n return ANSIFormatter.color_string(' [WARN]: ', 'Yellow')\n\n @classmethod\n def _error_head(cls):\n return ANSIFormatter.color_string(' [ERROR]: ', 'Red')\n\n @classmethod\n def _exit_head(cls):\n return ANSIFormatter.color_string(' [EXIT]: ', 'Magenta')\n\n #where={'both'|'console'|'log_file'}\n @InstancesChecker(message=str, log_type=str, where=str)\n def write_log(self, message, log_type, where='both'):\n if where not in {'both', 'console', 'log_file'}:\n self.write('Do not support this kind of \\'Destination\\': [{}] yet!\\nNow writing to console and log file both.\\n'.format(where), 'warn')\n where = 'both'\n\n if log_type not in HEADS.TYPE:\n self.write('Do not support this kind of \\'Log Type\\': [{}] yet!\\nNow writing the [INFO] type log.\\n'.format(log_type), 'warn')\n log_type = 'info'\n\n getattr(self, '_write_to_'+where)(message, log_type)\n if log_type == 'error' or log_type == 'exit':\n sys.exit()\n\n @classmethod\n @InstancesChecker(message=str, log_type=str)\n def write(cls, message, log_type):\n if log_type not in HEADS.TYPE:\n sys.stderr.write(cls._warn_head()+'Do not support this kind of \\'Log Type\\': [{}] yet!\\n Now writing the [INFO] type log.\\n'.format(log_type))\n sys.stderr.flush()\n log_type = 'info'\n\n head_padding = '\\n' + ' ' * (len(log_type) + 5)\n message_list = message.strip().split('\\n')\n message = message_list[0]\n for i in range(1, len(message_list)):\n message += head_padding + message_list[i]\n message = getattr(cls, '_'+log_type.lower()+'_head')() + message + '\\n'\n with RedirectStream():\n sys.stderr.write(message)\n sys.stderr.flush()\n if log_type == 'error' or log_type == 'exit':\n sys.exit()\n","sub_path":"young_tools/pedestal/interactor.py","file_name":"interactor.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"374621972","text":"#!/usr/bin/env python\n\n\"\"\"update Perlbrew and check for updated Perl\"\"\"\n\n# File: ratom/perlbrew.py\n# Version: 1.0.0\n# Date: 2016-05-25\n# Author: qtfkwk <qtfkwk+ratom@gmail.com>\n# Copyright: (C) 2016 by qtfkwk\n# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)\n\nfrom common import *\n\ndef check():\n \"\"\"check if can update Perlbrew\"\"\"\n return runp('which perlbrew', True)[0] == 0\n\ndef main(argv=None, cfg=None):\n \"\"\"update Perlbrew and check for updated Perl\"\"\"\n if cfg == None:\n cfg = args(argv)\n log = logging.getLogger('ratom')\n log.info('perlbrew: started')\n if not check():\n log.info('perlbrew: failed check')\n return\n section('Perlbrew', [\n 'perlbrew self-upgrade',\n 'perlbrew list',\n 'perlbrew available',\n ], dryrun=cfg['dryrun'])\n log.info('perlbrew: finished')\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"ratom/perlbrew.py","file_name":"perlbrew.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"432516488","text":"import abc\nfrom connection import DBConnection\n\nclass QuerysetMetaMixin(type):\n\n def __new__(meta, name, bases, dct):\n\n cls = type.__new__(meta, name, bases, dct)\n methods = []\n for k, v in dct.items():\n if not k.startswith('__') and hasattr(v, '__call__'):\n methods.append(k)\n setattr(cls, 'methods', methods)\n return cls\n\nclass QuerysetMeta(abc.ABCMeta, QuerysetMetaMixin):\n\n pass\n\n# This object represents a base custom queryset from which the queryset\n# may have specialized derivatives formed.\nclass BaseQueryset(metaclass=QuerysetMeta):\n\n collates = {'iexact': 'NO CASE', 'base': 'LOCALIZED'}\n collate_extensions = ('__%s' % collate for collate in collates)\n\n def __getattribute__(self, name):\n\n altget = lambda x: object.__getattribute__(self, x)\n if name in altget('methods'):\n if (name in ('delete', 'get') and len(altget('calls'))):\n raise AttributeError(\"You may not chain methods with the \" +\n \"delete or get method for Queryset instances.\\n\" +\n \"These methods must be called as a singleton chain.\")\n return altget(name)\n \n @classmethod\n def parse_filter_kwargs(self, filter_kwargs):\n\n no_collate = {}\n for arg, value in filter_kwargs.items():\n if '__' in arg:\n column, collate, *other = arg.split('__')\n if other:\n raise ValueError(\"You may not chain multiple COLLATE's\")\n elif collate not in self.collates:\n raise KeyError(\"Invalid COLLATE argument passed to Queryset \" +\n \"method 'filter':\\n %s\\nValid COLLATE arguments are %s\" %\n (collate, \", \".join(self.collate_extensions))\n )\n return no_collate\n\n # @verify_singleton_collates\n # @verify_table_columns\n def filter(self, **kwargs):\n\n pass\n\n # This method returns all data values for all rows including\n # abstract foreign key relations as the bases \"all\" filter\n # which may be proxied and customized in derivatives if \n # necessary.\n def all(self):\n\n for k,v in self.cls.__dict__.items():\n if isinstance(v, ForeignKey):\n self._result = (x for x in v.relations)\n\n # This method returns the next object in the query.\n def __next__(self):\n\n try:\n row = next(self._result)\n kwargs = {'id': row[0]}\n row = row[1:]\n kwargs.update(dict(zip(self.cls.fields.keys(), row)))\n return self.cls(**kwargs)\n\n except StopIteration:\n raise\n\n # This \"magic\" method (in combination with __next__) results in a\n # queryset object being iterable.\n def __iter__(self):\n\n return self\n\n# This object represents the basest queryset possible.\nclass Queryset(BaseQueryset):\n\n def __init__(self, cls):\n\n self.filters = {}\n self.calls = {}\n self.cls, self.table, = cls, cls.__name__\n self._result = None\n\n # This object returns whether or not the queryset currently\n # contains at least one result in which case the query\n # must necessarily exist by definition.\n def exists(self):\n\n if self._result is not None:\n try:\n next(self._result)\n return True\n except StopIteration:\n return False\n return False\n\n # This object returns all rows in a table / model that specified \n # the filter within \"**kwargs\"\n def filter(self, **kwargs):\n\n if not kwargs or self._result is None:\n \"\"\" We could raise an exception here... but a blank filter\n is the equivalent of all table rows... so ... \"\"\"\n return self.all()\n else:\n fkwargs = self.parse_fkwargs(kwargs)\n diff = [x for x in kwargs if x not in self.cls._fields]\n intersection = set(kwargs.keys()) & set(self._filters.keys())\n if diff:\n raise AttributeError(\"%s has no column %s\" %\n (self.table, \" or, \".join(diff)))\n elif intersection:\n noun = \"arguments\" if len(intersection) > 1 else \"argument\"\n cnoun = noun.capitalize()\n raise ValueError(\"You may not repeat multiple filters with the \" +\n \"same %s.\\n Repeated %s: %s.\" % (noun, cnoun, str(intersection)))\n else:\n self._filters.update(kwargs)\n where_query = \"AND \".join(\"%s='%s'\" % (k, v) + \\\n \" COLLATE NOCASE\" if k.endswith('__iexact') else ''\\\n for k, v in kwargs.items())\n query = \"SELECT * FROM %s WHERE %s\" % (self.table, where_query)\n with DBConnection() as conn:\n cursor = conn.execute(query)\n self._result = (row for row in cursor.fetchall())\n return self\n\n # This method returns all objects of a model / table.\n def all(self, fk=False):\n\n if self._result is None:\n with DBConnection() as conn:\n if not fk:\n cursor = conn.execute(\"SELECT * FROM %s\" % self.table)\n else:\n cursor = conn.execute(\"SELECT * FROM %s WHERE %s.id=%s.%s\"\n (table, rel_table, table, field.get_name()))\n self._result = (row for row in cursor.fetchmany())\n return self","sub_path":"sqlite_wrapper/querying.py","file_name":"querying.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"411342850","text":"\"\"\"main\"\"\"\n#!/usr/bin/env python3\n\nfrom darts import models\nimport pandas as pd\nfrom soam.forecaster import Forecaster\n\n\ndef main():\n url = \"https://raw.githubusercontent.com/facebook/prophet/master/examples/example_retail_sales.csv\"\n df = pd.read_csv(url)\n my_model = models.Prophet(weekly_seasonality=False, daily_seasonality=False)\n forecaster = Forecaster(my_model)\n predictions = forecaster.run(raw_series=df, output_length=7)\n print(predictions)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"soam/templates/init/{{ cookiecutter.package_name }}/{{ cookiecutter.project_name }}/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"215688906","text":"import sys\n\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.QtCore import QVariant, Qt\nfrom PyQt4.phonon import Phonon\n\nfrom MainWindowAuto import Ui_MainWindow\n\nclass Track(object):\n def __init__(self, titile=u'', artist=u'', album=u'', _hash=''):\n self.__hash = _hash\n self.__artist = artist\n self.__title = titile\n self.__album = album\n\n def get_hash(self):\n return self.__hash\n\n def get_artist(self):\n return self.__artist\n\n def get_title(self):\n return self.__title\n\n def get_album(self):\n return self.__album\n\n def set_hash(self, value):\n self.__hash = value\n\n def set_artist(self, value):\n self.__artist = value\n\n def set_title(self, value):\n self.__title = value\n\n def set_album(self, value):\n self.__album = value\n \n hash_ = property(get_hash, set_hash, None, None)\n artist = property(get_artist, set_artist, None, None)\n title = property(get_title, set_title, None, None)\n album = property(get_album, set_album, None, None)\n\nclass TrackModel(QtCore.QAbstractListModel):\n __track_list = list()\n \n def init(self, parent=None):\n super(TrackModel, self).__init__() \n \n def setTrackList(self, tracks):\n self.__track_list = list(tracks)\n \n def rowCount(self, index):\n return len(self.__track_list)\n \n def data(self, index, role=Qt.DisplayRole):\n tr = self.__track_list[index.row()]\n if role == Qt.DisplayRole: \n return QVariant(\"%s - %s (%s)\" % (tr.artist, tr.title, tr.album))\n elif role == Qt.UserRole:\n return tr\n \nclass MainWindow(QtGui.QMainWindow):\n def __init__(self, peer, stream_adapter, parent=None):\n super(MainWindow, self).__init__(parent)\n \n self.__track_list = TrackModel()\n \n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.ui.trackListView.setModel(self.__track_list)\n self.ui.trackListView.doubleClicked.connect(self.list_double_clicked)\n self.ui.trackListView.clicked.connect(self.list_clicked)\n self.ui.searchBtn.clicked.connect(self.search)\n self.ui.playBtn.clicked.connect(self.play)\n self.ui.stopBtn.clicked.connect(self.stop)\n self.ui.pauseBtn.clicked.connect(self.pause)\n self.setWindowTitle('Zalgo')\n \n self.__peer = peer\n self.__recv_contr = None\n self.__stream_adapter = stream_adapter\n self.__hashes = []\n self.__pid = '' \n self.__playing_now = None\n \n self.__audio_output = Phonon.AudioOutput(Phonon.MusicCategory, self)\n self.__media_object = Phonon.MediaObject(self)\n \n self.__media_object.setTickInterval(1000)\n self.__media_object.stateChanged.connect(self.state_changed)\n self.__media_object.tick.connect(self.tick)\n \n self.ui.seekSlider.setMediaObject(self.__media_object)\n self.ui.volumeSlider.setAudioOutput(self.__audio_output)\n \n Phonon.createPath(self.__media_object, self.__audio_output)\n \n def tick(self, time):\n displayTime = QtCore.QTime(0, (time / 60000) % 60, (time / 1000) % 60)\n self.ui.timeLbl.setText(displayTime.toString('mm:ss'))\n \n def state_changed(self, newState, oldState):\n self.ui.pauseBtn.setEnabled(False)\n self.ui.playBtn.setEnabled(False)\n self.ui.stopBtn.setEnabled(False)\n \n if newState == Phonon.PlayingState:\n self.ui.pauseBtn.setEnabled(True)\n self.ui.stopBtn.setEnabled(True)\n elif newState == Phonon.StoppedState:\n self.ui.playBtn.setEnabled(True)\n self.ui.timeLbl.setText(\"00:00\")\n elif newState == Phonon.PausedState:\n self.ui.playBtn.setEnabled(True)\n self.ui.stopBtn.setEnabled(True)\n \n def search(self):\n search_text = self.ui.searchEdit.text()\n self.__peer.lookup(search_text)\n self.ui.playBtn.setEnabled(True)\n\n def play(self):\n if self.__media_object.state() != Phonon.PausedState:\n self.__playing_now = self.ui.trackListView.currentIndex()\n track = self.__track_list.data(self.__playing_now, Qt.UserRole)\n self.__peer.start_stream(self.__pid, track.hash_)\n self.__media_object.play()\n \n def stop(self):\n self.__media_object.stop()\n \n def pause(self):\n self.__media_object.pause()\n\n def streamCreated(self, size):\n self.__media_object.setCurrentSource(Phonon.MediaSource(self.__stream_adapter))\n self.__media_object.play()\n\n def musicFound(self, pid, music_list):\n self.__track_list.reset()\n self.__pid = str(pid)\n self.__track_list.setTrackList([Track(t['title'], t['artist'], t['album'], t['hash']) for t in music_list])\n \n def list_double_clicked(self, item):\n if self.__playing_now != self.ui.trackListView.currentIndex():\n self.__media_object.stop()\n self.play()\n \n def list_clicked(self, item):\n self.ui.playBtn.setEnabled(True)\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n main_window = MainWindow(None)\n main_window.show()\n sys.exit(app.exec_())\n","sub_path":"zalgo/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"140159041","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nimport xml.etree.cElementTree as ET\nfrom xml.etree import ElementTree\nfrom xml.dom import minidom\nimport html\n\n\ndef prettify(elem):\n \"\"\"Return a pretty-printed XML string for the Element.\n \"\"\"\n rough_string = ElementTree.tostring(html.unescape(elem), 'utf-8')\n rep = minidom.parseString(rough_string)\n return rep.toprettyxml(indent=\" \")\n\n\ndef generator_xml(lines, filename):\n if not os.path.isdir('output'):\n os.mkdir('output')\n filename = 'output/' + filename\n root = ET.Element(\"jobs\")\n\n for line in lines:\n ET.SubElement(root, \"job\", {\n 'Employer': line[0],\n 'Title': line[1],\n 'Sector': line[2],\n 'Location': line[3],\n 'Provider': line[4],\n 'Link': line[5]\n })\n\n output_file = open(filename, 'w')\n output_file.write(prettify(root))\n output_file.close()\n\n\ndef graduate():\n initial_url = 'https://www.graduate-jobs.com/jobs/?page='\n page = 0\n urls = []\n lines = []\n provider = 'Graduate Jobs'\n while True:\n page += 1\n url = initial_url + str(page)\n soup = BeautifulSoup(requests.get(url=url).content, 'html5lib')\n items = soup.find_all('li', {'class': 'job-list__item'})\n if not items:\n break\n if url in urls:\n continue\n urls.append(url)\n print(url)\n for item in items:\n title = item.find('span', {'class': 'job-list__title'}).text.replace(' – ', ' - ').replace('€', '').replace('£', '').strip()\n emp = item.find('p', {'class': 'job-list__company'})\n if emp.find('span', {'class': 'job-list__rank'}):\n emp.find('span', {'class': 'job-list__rank'}).decompose()\n employer = emp.text.strip()\n link = 'https://www.graduate-jobs.com' + item.find('a', {'class': 'job-list__link'})['href']\n link_soup = BeautifulSoup(requests.get(url=link).content, 'html5lib')\n loc = link_soup.find('dt', text=r'Location:').parent\n loc.find(class_='job-page-overview__title').decompose()\n location = loc.dd.text.strip()\n sec = link_soup.find('dt', text=r'Sectors:').parent\n sec.find(class_='job-page-overview__title').decompose()\n sector = sec.dd.text.strip()\n line = [employer, title, sector, location, provider, link + '||View']\n if line not in lines:\n print(line)\n lines.append(line)\n generator_xml(lines=lines, filename='Graduate_Jobs.xml')\n\n\nif __name__ == '__main__':\n print('=============================Start===============================')\n graduate()\n print('============================= The End ===========================')\n","sub_path":"scripts/graduate.py","file_name":"graduate.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"}