diff --git "a/2014.jsonl" "b/2014.jsonl" new file mode 100644--- /dev/null +++ "b/2014.jsonl" @@ -0,0 +1,633 @@ +{"seq_id":"326066117","text":"import unittest\n\nclim = {\n \"danceability\" : 0.366,\n \"energy\" : 0.963,\n \"key\" : 11,\n \"loudness\" : -5.301,\n \"mode\" : 0,\n \"speechiness\" : 0.142,\n \"acousticness\" : 0.000273,\n \"instrumentalness\" : 0.0122,\n \"liveness\" : 0.115,\n \"valence\" : 0.212,\n \"tempo\" : 137.114,\n \"type\" : \"audio_features\",\n \"id\" : \"7ouMYWpwJ422jRcDASZB7P\",\n \"uri\" : \"spotify:track:7ouMYWpwJ422jRcDASZB7P\",\n \"track_href\" : \"https://api.spotify.com/v1/tracks/7ouMYWpwJ422jRcDASZB7P\",\n \"analysis_url\" : \"http://echonest-analysis.s3.amazonaws.com/TR/EKRQaQmj3oVgxmhikP2Kx2cRkxwVujI8d_aIe4q3fC--3HVSRY8cDTGJUAnNxoAEgGjv3yK_KUHBqV644=/3/full.json?AWSAccessKeyId=AKIAJRDFEY23UEVW42BQ&Expires=1459622447&Signature=St0mLXTL/oy35kZXDUgIjypuyxo%3D\",\n \"duration_ms\" : 366213,\n \"time_signature\" : 4\n}\nsong1={\n \"danceability\" : 0.602,\n \"energy\" : 0.905,\n \"key\" : 2,\n \"loudness\" : -4.046,\n \"mode\" : 1,\n \"speechiness\" : 0.0775,\n \"acousticness\" : 0.000202,\n \"instrumentalness\" : 0.0640,\n \"liveness\" : 0.117,\n \"valence\" : 0.436,\n \"tempo\" : 128.019,\n \"type\" : \"audio_features\",\n \"id\" : \"4VqPOruhp5EdPBeR92t6lQ\",\n \"uri\" : \"spotify:track:4VqPOruhp5EdPBeR92t6lQ\",\n \"track_href\" : \"https://api.spotify.com/v1/tracks/4VqPOruhp5EdPBeR92t6lQ\",\n \"analysis_url\" : \"http://echonest-analysis.s3.amazonaws.com/TR/u7X79QAoA7BbONklFpvFCOTtaPbWCeWakqZxiyU4za75wHddFWLMJZacQRplMUGc4ofuDEGgV91PRYh20=/3/full.json?AWSAccessKeyId=AKIAJRDFEY23UEVW42BQ&Expires=1459622447&Signature=yHBljwTOAAJVKthY/1kxsIvJIvM%3D\",\n \"duration_ms\" : 304840,\n \"time_signature\" : 4\n }\nsong2={\n \"danceability\" : 0.585,\n \"energy\" : 0.842,\n \"key\" : 9,\n \"loudness\" : -5.883,\n \"mode\" : 0,\n \"speechiness\" : 0.0556,\n \"acousticness\" : 0.00242,\n \"instrumentalness\" : 0.00686,\n \"liveness\" : 0.0866,\n \"valence\" : 0.437,\n \"tempo\" : 118.211,\n \"type\" : \"audio_features\",\n \"id\" : \"2takcwOaAZWiXQijPHIx7B\",\n \"uri\" : \"spotify:track:2takcwOaAZWiXQijPHIx7B\",\n \"track_href\" : \"https://api.spotify.com/v1/tracks/2takcwOaAZWiXQijPHIx7B\",\n \"analysis_url\" : \"http://echonest-analysis.s3.amazonaws.com/TR/ksgbYAlncW_bVSXjI4AQrzOBtuvH1odjck8NTEGyeK67ADIa_4KAfB2S5VDeXNqDV_xWt7KSWJw2KIyiI=/3/full.json?AWSAccessKeyId=AKIAJRDFEY23UEVW42BQ&Expires=1459622447&Signature=W6VRgXUADsefpiuowqslKmoGYnA%3D\",\n \"duration_ms\" : 237040,\n \"time_signature\" : 4\n }\natts = { \"audio_features\" : [ clim, song1, song2 ]}\n\n\n\ndef energies(alist):\n \"\"\"create a list of the energies of the songs\"\"\"\n list_of_tracks = alist\n lenergies=[]\n for i in list_of_tracks:\n lenergies.append(i[\"energy\"])\n return lenergies\n\ndef dances(alist):\n \"\"\"create a list of the danceabilities of the songs\"\"\"\n list_of_tracks = alist\n ldances=[]\n for i in list_of_tracks:\n ldances.append(i[\"danceability\"])\n return ldances\n\ndef tempos(alist):\n \"\"\"create a list of the tempos of the songs\"\"\"\n list_of_tracks = alist\n ltempos=[]\n for i in list_of_tracks:\n ltempos.append(i[\"tempo\"])\n return ltempos\n\nclass AttribsTests(unittest.TestCase):\n \"\"\"tests the attribute list funcitons\"\"\"\n def test_energies(self):\n self.assertEqual(energies(atts[\"audio_features\"]),[0.963, 0.905, 0.842])\n def test_dances(self):\n self.assertEqual(dances(atts[\"audio_features\"]),[.366,.602,.585])\n def test_tempos(self):\n self.assertEqual(tempos(atts[\"audio_features\"]),[137.114,128.019,118.211])\n\ndef climax(alist):\n \"\"\"find which track is the 'climax' (dance and energy, tempo)\"\"\"\n list_of_tracks = alist\n for i in range(0,len(list_of_tracks)):\n if max(dances(alist))==(dances(alist)[i]):\n dancei=i\n if (max(energies(alist))==(energies(alist)[i])):\n energyi=i\n if (dancei == energyi):\n lclimax=list_of_tracks[dancei]\n else:\n if ((list_of_tracks[dancei])[\"tempo\"] >\n (list_of_tracks[energyi])[\"tempo\"]):\n lclimax=list_of_tracks[dancei]\n elif ((list_of_tracks[dancei])[\"tempo\"] <\n (list_of_tracks[energyi])[\"tempo\"]):\n lclimax=list_of_tracks[energyi]\n else: lclimax=list_of_tracks[dancei]\n return lclimax\n\ndef climaxorder(alist):\n \"\"\"orders the list in terms of climax-ness\"\"\"\n list_of_tracks = alist\n climorder = []\n while len(list_of_tracks) > 0:\n climorder.append(climax(list_of_tracks))\n list_of_tracks.remove(climax(list_of_tracks))\n return climorder\n\ndef playlistmaker(alist):\n blist = climaxorder(alist)\n clist = blist.copy()\n climaxs = clist[0]\n climl = blist[1:]\n playrising = []\n playfalling = []\n for i in range(0,len(climl)):\n if i % 3 == 0:\n playfalling.append(climl[i])\n else:\n playrising.append(climl[i])\n playrising.reverse()\n playrising.append(climaxs)\n playlist = playrising + playfalling\n return playlist\n\ndef idlist(alist):\n \"\"\"create a list of the ids of the songs\"\"\"\n list_of_tracks = playlistmaker(alist)\n lids=[]\n for i in list_of_tracks:\n lids.append(i[\"id\"])\n return lids \n\n\nclass ClimaxTests(unittest.TestCase):\n \"\"\"tests the functions involving climax\"\"\"\n def test_climaxorder(self):\n self.assertEqual(climaxorder(atts[\"audio_features\"]),[clim,song1,song2])\n def test_climax(self):\n self.assertEqual(climax(atts[\"audio_features\"]),clim)\n def test_playlistmaker(self):\n self.assertEqual(playlistmaker([song1,song2,clim]),[song2, clim, song1])\n def test_idlist(self):\n self.assertEqual(idlist([song1,song2,clim]), [\"2takcwOaAZWiXQijPHIx7B\",\"7ouMYWpwJ422jRcDASZB7P\",\"4VqPOruhp5EdPBeR92t6lQ\"])\n\n\nimport spotipy\n\ndef plistTotracks(uid,pid):\n user_playlist_tracks(uid, playlist_id=pid, fields=id)\n\ndef tracksToplist(uid,pid,trackids):\n user_playlist_replace_tracks(uid, pid, trackids)\n\ndef main(uid,pid):\n tracksToplist(uid, pid, idlist(audio_features(plistTotracks(uid,pid)))[\"audio_features\"])\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"spotifychooser.py","file_name":"spotifychooser.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"7616323","text":"#!/usr/bin/env python\n\n# -*- encoding: utf-8 -*-\n\n'''\n@Author : {lif54334}\n\n@Software: PyCharm\n\n@File : sentiment.py\n\n@Time : 2019/7/3 8:39\n\n@Desc :\n\n'''\nimport csv\nimport json\nimport jieba\nimport pandas as pd\nimport numpy as np\n\ndef read():\n big_dict={}\n with open(\"result.txt\",\"r\",encoding=\"utf8\")as fr:\n lines=fr.readlines()\n for line in lines:\n num_list=[]\n line2=eval(line)\n word=(line2[\"Word\"])\n fre=line2[\"Frequency\"]\n Valence_Mean=line2[\"Valence_Mean\"]\n Valence_SD=line2[\"Valence_SD\"]\n Arousal_Mean=line2[\"Arousal_Mean\"]\n Arousal_SD=line2[\"Arousal_SD\"]\n num_list=[fre,Valence_Mean,Valence_SD,Arousal_Mean,Arousal_SD]\n big_dict[word]=num_list\n big_dict[\"不错\"]=['686', '6.3', '0.447', '4.4', '0.894']\n return big_dict\ndef txt_emotions(big_dict):\n with open('test1.csv', 'a', newline='',encoding=\"utf_8_sig\")as csv_file:\n # 获取一个csv对象进行内容写入\n writer = csv.writer(csv_file)\n writer.writerow(['文本', '情感词出现次数', 'Frequency', 'Valence_SD','Valence_SD','Arousal_Mean','Arousal_SD'])\n df=pd.read_excel(\"review.xls\", sheet_name='Sheet1',)\n items=df.ix[:,[0]].values\n for item in items:\n txt=item[0]\n seg_list = jieba.cut(txt)\n num_lists=[]\n for seg in seg_list:\n try:\n if big_dict[seg]:\n num_list=big_dict[seg]\n num_lists.append(num_list)\n except:\n pass\n if len(num_lists)>0:\n num=np.zeros(5,dtype=np.float64)\n for i in num_lists:\n nums=np.array(i,dtype=np.float64)\n num=num+nums\n numlist_result=list((num/len(num_lists)))\n numlist_num=len(num_lists)\n else:\n numlist_result=list((np.zeros(5,dtype=np.float64)))\n numlist_num=0\n csv_text=[txt,numlist_num,numlist_result[0],numlist_result[1],numlist_result[2],numlist_result[3],numlist_result[4]]\n writer.writerow(csv_text)\ndef main():\n # big_dict={\"不错\":[1,1,1,1,1]}\n big_dict=read()\n print(big_dict)\n txt_emotions(big_dict)\nif __name__ == '__main__':\n main()","sub_path":"data_mine/excel_nums/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"213702553","text":"from ..typecheck import *\n\nimport sublime\nimport sublime_plugin\n\nfrom ..libs import asyncio\n\nfrom .core import call_soon_threadsafe, create_future, coroutine, awaitable\nfrom .event import Handle\n\n@coroutine\ndef sublime_open_file_async(window: sublime.Window, file: str, line: Optional[int] = None) -> awaitable[sublime.View]:\n\tview = window.open_file(file)\n\tyield from wait_for_view_to_load(view)\n\tif line is None:\n\t\treturn view\n\tview.show(view.text_point(line, 0), True)\n\treturn view\n\n\n@coroutine\ndef wait_for_view_to_load(view: sublime.View):\n\tfrom .. import ui\n\tif view.is_loading():\n\t\tfuture_view = create_future()\n\n\t\tdef loaded_view(v: sublime.View) -> None:\n\t\t\tif view.id() == v.id():\n\t\t\t\tfuture_view.set_result(view)\n\n\t\thandle = ui.view_loaded.add(loaded_view)\n\t\tyield from future_view\n\t\thandle.dispose()\n","sub_path":"modules/core/sublime.py","file_name":"sublime.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"143811319","text":"# The environment on Centos 7 is:\n# source /cvmfs/sft.cern.ch/lcg/views/SetupViews.sh LCG_99 x86_64-centos7-gcc10-opt\nimport ROOT as R\nimport numpy as np\nimport time\nimport config_parse\nimport os\nimport yaml\nfrom glob import glob\n\nR.gROOT.ProcessLine(\".include ../../..\")\n\nprint(\"Compiling Setup classes...\")\n\nwith open(os.path.abspath( \"../configs/training_v1.yaml\")) as f:\n config = yaml.safe_load(f)\nR.gInterpreter.Declare(config_parse.create_scaling_input(\"../configs/scaling_params_v1.json\", config, verbose=False))\nR.gInterpreter.Declare(config_parse.create_settings(config, verbose=False))\n\nprint(\"Compiling DataLoader_main...\")\nR.gInterpreter.Declare('#include \"../interface/DataLoader_main.h\"')\nR.gInterpreter.Declare('#include \"TauMLTools/Core/interface/exception.h\"')\n\nn_tau = R.Setup.n_tau\nn_inner_cells = R.Setup.n_inner_cells\nn_outer_cells = R.Setup.n_outer_cells\nn_fe_tau = R.Setup.n_TauFlat\nn_gridglob = R.Setup.n_GridGlobal\nn_pf_el = R.Setup.n_PfCand_electron\nn_pf_mu = R.Setup.n_PfCand_muon\nn_pf_chHad = R.Setup.n_PfCand_chHad\nn_pf_nHad = R.Setup.n_PfCand_nHad\nn_pf_gamma = R.Setup.n_PfCand_gamma\nn_ele = R.Setup.n_Electron\nn_muon = R.Setup.n_Muon\ntau_types = R.Setup.tau_types_names.size()\ninput_files = glob(f'{R.Setup.input_dir}*.root')\n\nn_grid_features = {\n \"GridGlobal\" : n_gridglob,\n \"PfCand_electron\" : n_pf_el,\n \"PfCand_muon\" : n_pf_mu,\n \"PfCand_chHad\" : n_pf_chHad,\n \"PfCand_nHad\" : n_pf_nHad,\n \"PfCand_gamma\" : n_pf_gamma,\n \"Electron\" : n_ele,\n \"Muon\" : n_muon\n}\n\ninput_grids =[ [ \"GridGlobal\", \"PfCand_electron\", \"PfCand_gamma\", \"Electron\" ],\n [ \"GridGlobal\", \"PfCand_muon\", \"Muon\" ],\n [ \"GridGlobal\", \"PfCand_chHad\", \"PfCand_nHad\" ] ]\n\ninput_files = []\nfor root, dirs, files in os.walk(os.path.abspath(R.Setup.input_dir)):\n for file in files:\n input_files.append(os.path.join(root, file))\n\ndata_loader = R.DataLoader()\n\nn_batches = 1000\nn_batches_store = 5\n\nfrom queue import Queue\ndata_que = Queue(maxsize = n_batches_store)\n\ntimes = []\n\ndef getdata(_obj_f, _reshape, _dtype=np.float32):\n x = np.copy(np.frombuffer(_obj_f.data(), dtype=_dtype, count=_obj_f.size()))\n return x if _reshape==-1 else x.reshape(_reshape)\n\ndef getgrid(_obj_grid, _inner):\n _n_cells = n_inner_cells if _inner else n_outer_cells\n _X = []\n for group in input_grids:\n _X.append(\n np.concatenate(\n [ getdata(_obj_grid[ getattr(R.CellObjectType,fname) ][_inner],\n (n_tau, _n_cells, _n_cells, n_grid_features[fname])) for fname in group ],\n axis=-1\n )\n )\n return _X\n\nc = 0\ndata_loader.ReadFile(R.std.string(input_files[c]), 0, -1)\nc+=1\n\nfor i in range(n_batches):\n\n start = time.time()\n\n if(data_que.full()):\n _ = data_que.get()\n\n checker = data_loader.MoveNext()\n\n if checker==False:\n data_loader.ReadFile(R.std.string(input_files[c]), 0, -1)\n c+=1\n continue\n\n data = data_loader.LoadData()\n\n\n # Flat Tau features\n X = [getdata(data.x_tau, (n_tau, n_fe_tau))]\n # Inner grid\n X += getgrid(data.x_grid, 1) # 500 11 11 176\n # Outer grid\n X += getgrid(data.x_grid, 0) # 500 21 21 176\n\n X = tuple(X)\n\n weights = getdata(data.weight, -1)\n Y = getdata(data.y_onehot, (n_tau, tau_types))\n\n\n data_que.put(X)\n\n end = time.time()\n print(i, \" end: \",end-start, ' s.')\n times.append(end-start)\n\n for x in X:\n if np.isnan(x).any():\n print(\"Nan detected! element=\",x.shape) \n print(np.argwhere(x))\n if np.isinf(x).any():\n print(\"Inf detected! element=\",x.shape)\n if np.amax(x)==0:\n print(\"Empty tuple detected! element=\",x.shape)\n\nfrom statistics import mean\nprint(\"Mean time: \", mean(times))\n\ntime_arr = np.asarray(times)\nnp.savetxt(\"dataloader.csv\", time_arr, delimiter=\",\")\n","sub_path":"Training/python/DataLoader_test.py","file_name":"DataLoader_test.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"398573404","text":"''' the configure of soft attention'''\n''' soft attention 的配置文件'''\nfrom .nic import NIC_cfg\n\nclass SoftAtt_cfg(NIC_cfg):\n\n def __init__(self):\n\n super(SoftAtt_cfg, self).__init__()\n self.model = 'att'\n self.fea_dim = 512\n self.att_dim = 100\n self.batch_size = 64\n self.lam = 1 # attention regular loss rate (attention正则损失比重)\n self.en_lr = 1e-4\n self.vis_dir = '../../eval_log/{}/{}/vis_img/'\n\n\n","sub_path":"config/softAtt.py","file_name":"softAtt.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649374842","text":"import warnings\n\n\nclass Decorators(object):\n\n __warning_header = 'Decorator Warning: '\n __warning_messages = {\n 'not_valid_column': 'Some columns names are not in the related '\n 'dataframe.',\n 'invalid_types': 'Not valid types.'\n }\n\n @classmethod\n def __launch_warning(cls, warning_type):\n warnings.warn(\n cls.__warning_header +\n cls.__warning_messages[warning_type]\n )\n\n @classmethod\n def __validate_columns_list(cls, df, columns_list):\n for c in columns_list:\n if c not in df.columns.values:\n return False\n return True\n\n @classmethod\n def __get_param_from_arguments(cls, i, args, kwargs):\n if i < len(args):\n return args[i]\n else:\n kwargs_as_list = list(kwargs.items())\n return kwargs_as_list[i-len(args)][1]\n\n @classmethod\n def validate_columns(cls, df_arg_pos, column_arg_pos):\n def validate_column_generator(function_to_decorate):\n def inner_function(*args, **kwargs):\n df_to_validate = \\\n cls.__get_param_from_arguments(df_arg_pos, args, kwargs)\n columns_to_validate = \\\n cls.__get_param_from_arguments(column_arg_pos, args, kwargs)\n\n if isinstance(columns_to_validate, str):\n if columns_to_validate in df_to_validate.columns.values:\n return function_to_decorate(*args, **kwargs)\n else:\n cls.__launch_warning('not_valid_column')\n return df_to_validate\n elif isinstance(columns_to_validate, list):\n if cls.__validate_columns_list(df_to_validate,\n columns_to_validate):\n return function_to_decorate(*args, **kwargs)\n else:\n cls.__launch_warning('not_valid_column')\n return df_to_validate\n else:\n cls.__launch_warning('invalid_types')\n return function_to_decorate(*args, **kwargs)\n\n return inner_function\n return validate_column_generator\n","sub_path":"src/rimac_analytics_api/Old/rimac_analytics/decorators/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"139920666","text":"import requests\nimport json\nimport itertools\nimport GetLookUpTable\n #This Code Works - returns Dictionary with each key being a skelID and each value being a list of their annotation IDs\nimport config\n\n\n\ntoken = config.token\nauth = config.CatmaidApiTokenAuth(token)\n\nproject_id = config.project_id\n\n\ndef getVolumeIDs():\n response = requests.get(\n 'https://neuropil.janelia.org/tracing/fafb/v14/1/volumes/'.format(project_id), \n auth = auth\n )\n\n myData = json.loads(response.content)\n\n volumeDict = {}\n \n for i in myData:\n currentName = i['name']\n currentID = i['id']\n volumeDict[currentID] = currentName\n return volumeDict\n\ndef getCondensedVolumeDict():\n volumeDict = getVolumeIDs()\n condensedVolumeDict={}\n \n for x in volumeDict:\n if 'v14' not in volumeDict[x]:\n if '_R' in volumeDict[x] or '_L' in volumeDict[x]:\n condensedVolumeDict[volumeDict[x]] = x\n \n \n return condensedVolumeDict\n\n\n#returns list of strings of VOLume ids\ndef getVolumeIDintList():\n myVols = getVolumeIDs()\n volList = list(myVols.values())\n return volList\n\ndef getVolumeStringList():\n myVols = getVolumeIDs()\n volList = list(myVols.keys())\n return volList\n\n\n#return dictionary containing only ITO nomenclature neuropils (with 1 of each neuropil)\ndef filterVolumes():\n myVols = getVolumeIDs()\n myVols = neuropilLookUp(myVols)\n filteredVols = {}\n for i in myVols:\n #if ('_R' in i or '_L' in i) and (len(i) <= 10) and ('v14' not in i):\n if ('_R' in i or '_L' in i or len(i)<5) and (len(i)<=10) and not('v14' in i):\n filteredVols[i] = myVols[i]\n return filteredVols\n\n#reverse above dictionary, so as to have a means to look up neuropil name based upon volume id\ndef neuropilLookUp(myDict = None):\n if myDict is None:\n myDict = filterVolumes()\n myLookUp = {v: k for k, v in myDict.items()}\n return myLookUp\n\n\n#returns list of int of VOLume ids\ndef getFilteredVolumeIDList():\n myVols = filterVolumes()\n volList = list(myVols.values())\n return volList\n\n#returns dictionary with volume IDs as keys and min/max x,y,z as values ({'max': {'x': 349868.054, 'y': 286977.5893, 'z': 162232.8238}, 'min': {'x': 323114.8215, 'y': 255270.7114, 'z': 142607.9776}}\ndef getAllVolumeBoundaries():\n myVolumes = getFilteredVolumeIDList()\n myBounds = {}\n for i in myVolumes:\n\n response = requests.get(\n 'https://neuropil.janelia.org/tracing/fafb/v14/{}/volumes/{}'.format(project_id, i),\n auth=auth\n )\n myData = json.loads(response.content)\n tempBound = myData['bbox']\n myBounds[i] = tempBound\n return myBounds\n\n\n\n\n\n#input should be a volume name with either _R or _L to specify hemisphere \ndef getVolumeBoundary(volume):\n volList = getVolumeIDs()\n if '_R' not in volume and '_L' not in volume:\n volume += '_R' #defaults to right hemisphere volume if not specified\n myVolID = volList[volume]\n response = requests.get(\n 'https://neuropil.janelia.org/tracing/fafb/v14/1/volumes/{}/'.format(myVolID), \n auth = auth\n )\n\n myData = json.loads(response.content)\n return myData\n \n \n \n#def getVolumeBoundaries():\n # myVolumes = getVolumeIDs()\n \n # myVolumeIDs = myVolumes.keys()\n # myBoundDict = {}\n \n \n #for i in myVolumeIDs:\n # r = requests.get(\n # 'https://neuropil.janelia.org/tracing/fafb/v14/{}/volumes/{}/'.format(project_id, i), \n # auth = auth\n #) \n \n \n \n #newData = json.loads(r.content)\n #thisBound = response['bbox']\n \n #myBoundDict[i] = thisBound\n \n \n #return myBoundDict\n \n \ndef getVolumePoints(volume_id):\n response = requests.get(\n 'https://neuropil.janelia.org/tracing/fafb/v14/{}/volume_detail'.format(project_id),\n auth=auth,\n data = (volume_id)\n )\n myData = json.loads(response.content)\n return myData\n\nimport numpy as np\ndef intersectPointVolume(volume_id, skeletonPoints):\n x = np.double(skeletonPoints[0])\n y = np.double(skeletonPoints[1])\n z = np.double(skeletonPoints[2])\n response = requests.get(\n 'https://neuropil.janelia.org/tracing/fafb/v14/{}/volumes/{}/intersect'.format(project_id, volume_id),\n auth=auth,\n data = { 'x': x, 'y': y, 'z': z}\n )\n myData = json.loads(response.content)\n return myData\n\n","sub_path":"getAllVolumeIDs.py","file_name":"getAllVolumeIDs.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149862752","text":"import pytest\nfrom fixture.Application import Application\n\n\n@pytest.fixture()\ndef app(request):\n fixture = Application()\n request.addfinalizer(fixture.Stop)\n return fixture\n\ndef test_IPV(app):\n\n app.Options_Forms()\n app.Add_cam_user()\n app.Check_Add_cam_user()\n app.Delete_cam()\n app.Add_cam_admin()\n app.Check_Add_cam_admin()\n app.OSD()\n app.NetworkConfig()\n app.Profils()\n app.ImageConfig()\n app.Open_IE()\n app.Events()\n app.PlaySound()\n app.BlinkBorder()\n app.Poupup()\n\n","sub_path":"testIPV/test/test_IPV_in_code.py","file_name":"test_IPV_in_code.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"631001482","text":"\n# Tanto las Tumpla como las listas son conjuntos ordenados de elementos, no asi los diccionarios.\n\n# Una tupla es una variable que permite almeasenar datos inmutables (no pueden ser modificados una ves creados) de tipos\n# diferentes. Las tuplas se encierran entre parentesis().\n # Tiene longitud fija\n # Solo tine una dimencion\n\n# Que son las tuplas:\n # Las tuplas son listas inmutables, es decir, no se puede modificar despues de su creacion.\n # No permite anadir, eliminar, mover elementos etc(no append, extend, remove)\n # Si permiten extraer porciones, pero el resultado de la extraccion es una tupla nueva.\n # No permiten busquedas (no incex), en los versiones de puthon antiguos 2,5 2,4 2,3...\n # Si permiten comprobar si un elemento se encuentra en la tupla.\n # Que utulidad o ventajas tinen respecto a las listas?\n # Mas rapidas\n # Menos espacio (mayor optimizacion)\n # Formatean Strings\n # Pueden utilizarse como claves en un diccionario.(las listas no)\n#-----------------------------------------------------------------------------------------------------------------------\ntupla_A = (4, 'Hola', 6.0, 99)\n# print('tupla_A = ', tupla_A)\n# print('Tupe de tupla_A = ', type(tupla_A))\n# print('tupla_A[0] = ', tupla_A[0])\n# Da una error, este indice no existe\n# print('tupla_A[7] = ', tupla_A[7]) # IndexError: tuple index out of range\n# print(tupla_A[0:10]) # Asi no le da IndexError\n# print('---------------------------------------')\n#-------------------------------------------- Operaciones sobre tuplas -------------------------------------------------\nmyTupla = (12, 'Roman', 12, 2000, 12)\n# print(myTupla[2])\n\n# Crear una tupla a partir de uan lista\nmyLista = [1,3,4]\nmyTupla2 = tuple(myLista)\n# print(myTupla2)\n\n# Comprobar si unelemento se encuentra en la tupla\n# print('Roman' in myTupla)\n\n# Metodo count() permite buscar la cantidad de mizmos elementos, es decir cuantos elementos(que se pasan como carametro)\n# iguales existen en la tupla\n# print(myTupla.count(12))\n\n# Metodo len() le dise la longitud de la tupla\n# print(len(myTupla))\n\n# Tupla unitaria, o tupla con un unico elemento\ntuplaUnElemento = ('Roman',) # para crear tupla con un elemento treba stavutu komy v kinci. A esli ne postavit komy v kinci\n# to eto yze ne bydet tupla. Nyzno yznat chto budet esli ne postavit komy\n# print(tuplaUnElemento)\n# print(len(tuplaUnElemento))\n\n# Una tupla pude ser creada sin parantesis, o se puede dicir empaquetado de tupla\ntuplaSinP = \"Roman\", 12, 45, \"Hello\"\n# print(tuplaSinP)\n\n# Desempaquetar tuplas, a veces interesa asignar un nombre a los elementos de la tuplas para, posteriormente trabajar\n# con esas variables:\nlaborales = (1,2,3,4,5)\nlunes, martes, miercoles, jueves, viernes = laborales\n# print(martes)\n# print(viernes)\n\n# Methodo indice() hace que nosotros podemos aceder al elemento de tupla por un indice, en las verisiones de puthon\n# 2,5 2,4 ... no estaba permetido\nprint(tuplaSinP[2])\n#-----------------------------------------------------------------------------------------------------------------------\n# ERROR tupla ne vozmozno meniat\n# print('Antes ',tupla_A)\n# tupla_A[0] = 0.0 # TypeError: 'tuple' object does not support item assignment\n# print('Despues ', tupla_A)\n\n# Los elementos de una lista o tupla pueden ser de cualquier tipo, incluyendo otra lista o tupla\ndatosT = (('1', '2', '3'), 'Hola', 4)\n# print(datosT)\nsT = datosT[0][1]\n# print(sT)\n# print(type(sT))\n\n# En algunos cassos nos puede interesar definir tupla de tuplas:\ntupla_1 = (1, 2, 3, 4), 'A', 'B', 'C', 'D', (5, 6, 7, 8, 9) # mozna neykazyvatu zovniwni dywku\n# print(tupla_1)\ntupla_2 = (1, 2, 3, 4), 'A', 'B', 'C', 'D', [5, 6, 7, 8, 9] # mozna i tak robutu\n# print(tupla_2)\n# print('---------------------------------------------------')\nlista_1 = [1, 2, 3, 4], 'A', 'B', 'C', 'D', [5, 6, 7, 8, 9] # mozna neykazyvatu zovniwni dywku\n# print(lista_1)\nlista_2 = (1, 2, 3, 4), 'A', 'B', 'C', 'D', [5, 6, 7, 8, 9] # mozna i tak robutu\n# print(lista_2)\n\n#----------------------------------- Concatenacion tuplas con + --------------------------------------------\n# Estas operaciones no modifican las secuencias originales.\ntupla_1 = (1,2,3,4,5,6)\ntupla_2 = (11,22,33,44,55,66)\ntupla_result = tupla_1 + tupla_2\n# print(tupla_result)\ntupla_result1 = (1,2,3,4) + (5,6,7)\n# print(tupla_result1)\n\n#--------------------------------------------- Replication (*) de secuencias -------------------------------------------\n# Estas operaciones no modifican las secuencias originales.\nt = tupla_1 * 4\n# print(t)\n\n\n","sub_path":"src/main/python/Tupla.py","file_name":"Tupla.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"151475718","text":"from tensorflow.keras.applications import MobileNetV2\nimport tensorflow.keras.layers as layers\nimport tensorflow as tf\nimport numpy as np\n\ndef get_mobilenet_SSD(image_size, num_classes):\n mobilenet = MobileNetV2(input_shape=image_size, include_top=False, weights=\"imagenet\")\n for layer in mobilenet.layers:\n layer._name = layer.name + '_base'\n\n x = layers.BatchNormalization(beta_initializer='glorot_uniform', gamma_initializer='glorot_uniform')(mobilenet.get_layer(name='block_8_add_base').output)\n conf1 = layers.Conv2D(4*4*num_classes, kernel_size=3, padding='same')(x)\n conf1 = layers.Reshape((conf1.shape[1]*conf1.shape[2]*conf1.shape[3]//num_classes, num_classes))(conf1)\n loc1 = layers.Conv2D(4*4*4, kernel_size=3, padding='same')(x)\n loc1 = layers.Reshape((loc1.shape[1]*loc1.shape[2]*loc1.shape[3]//4, 4))(loc1)\n\n x = layers.MaxPool2D(3, 1, padding='same')(mobilenet.get_layer(name='block_12_add_base').output)\n x = layers.Conv2D(1024, 3, padding='same', dilation_rate=6, activation='relu')(x)\n x = layers.Conv2D(1024, 1, padding='same', activation='relu')(x)\n conf2 = layers.Conv2D(6 * num_classes, kernel_size=3, padding='same')(x)\n conf2 = layers.Reshape((conf2.shape[1] * conf2.shape[2] * conf2.shape[3] // num_classes, num_classes))(conf2)\n loc2 = layers.Conv2D(6 * 4, kernel_size=3, padding='same')(x)\n loc2 = layers.Reshape((loc2.shape[1]*loc2.shape[2]*loc2.shape[3]//4, 4))(loc2)\n\n x = layers.Conv2D(256, 1, activation='relu')(x)\n x = layers.Conv2D(512, 3, strides=2, padding='same', activation='relu')(x)\n conf3 = layers.Conv2D(6 * num_classes, kernel_size=3, padding='same')(x)\n conf3 = layers.Reshape((conf3.shape[1] * conf3.shape[2] * conf3.shape[3] // num_classes, num_classes))(conf3)\n loc3 = layers.Conv2D(6 * 4, kernel_size=3, padding='same')(x)\n loc3 = layers.Reshape((loc3.shape[1] * loc3.shape[2] * loc3.shape[3] // 4, 4))(loc3)\n\n x = layers.Conv2D(128, 1, activation='relu')(x)\n x = layers.Conv2D(256, 3, strides=2, padding='same', activation='relu')(x)\n conf4 = layers.Conv2D(6 * num_classes, kernel_size=3, padding='same')(x)\n conf4 = layers.Reshape((conf4.shape[1] * conf4.shape[2] * conf4.shape[3] // num_classes, num_classes))(conf4)\n loc4 = layers.Conv2D(6 * 4, kernel_size=3, padding='same')(x)\n loc4 = layers.Reshape((loc4.shape[1] * loc4.shape[2] * loc4.shape[3] // 4, 4))(loc4)\n\n x = layers.Conv2D(128, 1, activation='relu')(x)\n x = layers.Conv2D(256, 3, activation='relu')(x)\n conf5 = layers.Conv2D(4 * num_classes, kernel_size=3, padding='same')(x)\n conf5 = layers.Reshape((conf5.shape[1] * conf5.shape[2] * conf5.shape[3] // num_classes, num_classes))(conf5)\n loc5 = layers.Conv2D(4 * 4, kernel_size=3, padding='same')(x)\n loc5 = layers.Reshape((loc5.shape[1] * loc5.shape[2] * loc5.shape[3] // 4, 4))(loc5)\n\n x = layers.Conv2D(128, 1, activation='relu')(x)\n x = layers.Conv2D(256, 3, activation='relu')(x)\n conf6 = layers.Conv2D(4 * num_classes, kernel_size=3, padding='same')(x)\n conf6 = layers.Reshape((conf6.shape[1] * conf6.shape[2] * conf6.shape[3] // num_classes, num_classes))(conf6)\n loc6 = layers.Conv2D(4 * 4, kernel_size=3, padding='same')(x)\n loc6 = layers.Reshape((loc6.shape[1] * loc6.shape[2] * loc6.shape[3] // 4, 4))(loc6)\n\n confs = layers.concatenate([conf1, conf2, conf3, conf4, conf5, conf6], axis=1)\n locs = layers.concatenate([loc1, loc2, loc3, loc4, loc5, loc6], axis=1)\n model = tf.keras.Model(inputs=mobilenet.layers[0].output, outputs=[confs, locs])\n\n return model\n\nif __name__ == '__main__':\n num_classes = 10\n model = get_mobilenet_SSD(image_size=(300, 300, 3), num_classes=num_classes)\n\n print(model.summary())\n\n image = np.random.rand(1, 300, 300, 3)\n confs, locs = model.predict(image)\n print('confs shape =', np.shape(confs))\n print('locs shape =', np.shape(locs))\n","sub_path":"get_model.py","file_name":"get_model.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"74473683","text":"\r\nfile = open(\"archivo.txt\", \"rb\")\r\n'''\r\nclass token:\r\n\t_id, _num = range(2)\r\n\r\n\r\ndef siguiente():\r\n\tdato = \"\"\r\n'''\r\nn = 0\r\nlectura = \"\"\r\n\r\ntamano = len(file.read())\r\nfile.seek(0)\r\n\r\nwhile n < tamano:\r\n\tlectura = file.read(1)\r\n\tn+=1\r\n\r\nfile.close()\r\n","sub_path":"Cerpas/lexico.py","file_name":"lexico.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"634494757","text":"\n\nfrom xai.brain.wordbase.verbs._quaver import _QUAVER\n\n#calss header\nclass _QUAVERS(_QUAVER, ):\n\tdef __init__(self,): \n\t\t_QUAVER.__init__(self)\n\t\tself.name = \"QUAVERS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"quaver\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_quavers.py","file_name":"_quavers.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"172970656","text":"from planet5521.Processes import HumanViewProcess, ViewUpdateProcess, WorldUpdateProcess, EntityFadeAndRemovalProcess, BuildingDestructionProcess, CinematicCamProcess\nfrom planet5521.Planet5521World import Planet5521World\nfrom planet5521.graphics.Planet5521GameView import Planet5521GameView\nfrom planet5521.Controllers import CameraController, TestGroundTroopController, SelectionController, SelectedUnitsController\nfrom planet5521.ParticleSystemProcesses import BuildingDestructionParticleSystem\nfrom nEngine.Game import GameState\nfrom nEngine.Events import Event\nfrom nEngine.Entities import EntityFactory\nfrom nEngine.graphics.ResourceManager import ResourceManager\nfrom nEngine.graphics.nGUI import NGUIImage, NGUIBasicButton\nfrom nEngine.graphics.EntityViews import EntityViewFactory\nfrom nEngine.common.Components import VelocityComponent, FacingComponent\nfrom nEngine.Rand import Rand\nfrom nEngine.Input import Input\nfrom sfml import Sprite, Color, Vector2\n\n#TODO: HumanView states should be refactored into a class.\n# class should create relevant processes, add them to the PM, and terminate\n\nclass MainMenuState(GameState):\n def __init__(self, humanView):\n GameState.__init__(self)\n self._humanView = humanView\n \n \n def initialise(self):\n \n # Background\n self._background = NGUIImage(0, 0, Sprite(ResourceManager.getTexture(\"planet5521/data/screen.png\")))\n self._background.name = \"background\"\n self._humanView.getPane().addChild(self._background)\n \n # Buttons\n self._startButton = NGUIBasicButton(500, 350, 200, 75, \"Start\")\n self._exitButton = NGUIBasicButton(500, 450, 200, 75, \"Exit\")\n \n self._startButton.style = self._exitButton.style = \"default_title\"\n self._startButton.styleFocus = self._exitButton.styleFocus = \"default_title_focus\"\n self._startButton.stylePrimed = self._exitButton.stylePrimed = \"default_title_primed\"\n self._startButton.backgroundColour = self._exitButton.backgroundColour = Color(80, 25, 25, 100)\n self._startButton.backgroundColourFocus = self._exitButton.backgroundColourFocus = Color(80, 25, 25, 200)\n self._startButton.backgroundColourPrimed = self._exitButton.backgroundColourPrimed = Color(80, 25, 25, 255)\n \n self._humanView.getPane().addChild(self._startButton)\n self._humanView.getPane().addChild(self._exitButton)\n \n self._startButton.addListener(self)\n self._exitButton.addListener(self)\n # Make humanview visible\n self._pm.processList.append(HumanViewProcess(self._humanView)) \n \n def terminate(self):\n \"\"\"Does final computation on this game state before it is removed.\"\"\"\n self._humanView.getPane().clear()\n \n \n\n def onMouseDownEvent(self, dispatcher, event):\n if dispatcher == self._exitButton:\n self.done = True\n elif dispatcher == self._startButton:\n self._nextState = PlayState(self._humanView)\n self.done = True\n \n \n \n\nclass PlayState(GameState):\n def __init__(self, humanView):\n GameState.__init__(self)\n self._humanView = humanView\n \n \n def initialise(self):\n \n # Register event types\n Event.registerEventType(\"ENTITY_ADDED\")\n Event.registerEventType(\"ENTITY_REMOVED\")\n Event.registerEventType(\"ENTITY_MOVING\")\n Event.registerEventType(\"ENTITY_STOPPED\")\n Event.registerEventType(\"ENTITY_MOVED\")\n Event.registerEventType(\"ENTITY_FACING_UPDATED\")\n Event.registerEventType(\"ENTITY_ATTACKING\")\n Event.registerEventType(\"ENTITY_DIED\")\n Event.registerEventType(\"RESEARCH_DEPLETED\")\n Event.registerEventType(\"ENTITY_HARVEST_START\")\n Event.registerEventType(\"ENTITY_HARVEST_STOP\")\n Event.registerEventType(\"ENTITY_RESEARCH_DUMPED\")\n \n \n \n # Load stuff\n ResourceManager.loadAnimations(\"planet5521/data/animations.xml\")\n EntityFactory.getSingleton().readFile(\"planet5521/data/entities.xml\")\n EntityViewFactory.readFile(\"planet5521/data/views.xml\")\n \n self._world = Planet5521World(200, 80)\n \n self._gameView = Planet5521GameView(0, 0, 1280, 720, self._world)\n self._gameView.name = \"Game View\"\n \n \n \n \n self._world._em.registerListener(Event.ENTITY_ADDED, self._gameView.entityCreated)\n # Entity destruction is handled at the entity EventManager level\n \n \n # Make environment\n self._world.produce(\"Sky\", (0, 0))\n self._world.produce(\"Ground\", (0, self._world.height - 10))\n self._world.produce(\"Ground Surface\", (0, self._world.height - 10))\n \n for _ in range(0):\n e = self._world.produce(\"Cloud\", Vector2(Rand.r.random()*self._world.width, Rand.r.random()*(self._world.height - 20)))\n e.getComponent(VelocityComponent).v.x = Rand.r.random()\n \n # Make some units\n insectFaction = self._world.factions[\"Insects\"]\n for _ in range(15):\n #for _ in range(0):\n insectFaction.produce(\"Insect Warrior\", Vector2(10 + Rand.r.random()*20, self._world.height - 10-1.4))\n for _ in range(15):\n #for _ in range(0):\n insectFaction.produce(\"Insect Warrior\", Vector2(170 + Rand.r.random()*20, self._world.height - 10-1.4))\n \n terrans = self._world.factions[\"Terrans\"]\n for _ in range(15):\n #for _ in range(0):\n entity = terrans.produce(\"Terran Grunt\", Vector2(80 + (Rand.r.random())*10, self._world.height - 10-1))\n entity.getComponent(FacingComponent).facingLeft = Rand.r.random() < 0.5\n for _ in range(15):\n #for _ in range(0):\n entity = terrans.produce(\"Terran Grunt\", Vector2(110 + (Rand.r.random())*10, self._world.height - 10-1))\n entity.getComponent(FacingComponent).facingLeft = Rand.r.random() < 0.5\n for _ in range(15):\n #for _ in range(0):\n entity = terrans.produce(\"Terran Engineer\", Vector2(90 + (Rand.r.random())*20, self._world.height - 10-1))\n entity.getComponent(FacingComponent).facingLeft = Rand.r.random() < 0.5\n \n #insectFaction.produce(\"Insect Warrior\", Vector2(100, self._world.height - 10-1.4))\n terrans.produce(\"Terran Barracks\", Vector2(100,self._world.height - 10))\n \n # Controllers\n self._gameView.addListener(TestGroundTroopController(self._world, self._gameView))\n selectionController = SelectedUnitsController(self._world, self._gameView)\n self._gameView.addListener(selectionController)\n self._gameView.addListener(SelectionController(self._world, self._gameView, terrans, selectionController))\n self._gameView.addListener(CameraController(self._world, self._gameView))\n \n # Close by interactions\n #terrans.produce(\"Terran Grunt\", Vector2(80, self._world.height - 10-1))\n #terrans.produce(\"Terran Grunt\", Vector2(80.5, self._world.height - 10-1))\n #entity = terrans.produce(\"Terran Engineer\", Vector2(81, self._world.height - 10-1))\n #insectFaction.produce(\"Insect Warrior\", Vector2(65, self._world.height - 10-1.4))\n \n # Dropship is going to have something different.\n #dp = self._world.produce(\"Dropship\", Vector2(10, 10))\n #self._gameView.addListener(DropshipController(dp))\n \n self._humanView.getPane().addChild(self._gameView)\n \n # Make humanview visible\n self._pm.processList.append(ViewUpdateProcess(self._gameView))\n self._pm.processList.append(WorldUpdateProcess(self._world))\n self._pm.processList.append(EntityFadeAndRemovalProcess(self._world, self._gameView))\n self._pm.processList.append(HumanViewProcess(self._humanView))\n #self._pm.processList.append(CinematicCamProcess(self._gameView.camera))\n \n # TODO: This should perhaps be in Planet5521GameView?\n # TODO: And instead of changing the position, it should change view's dy \n self.bdP = BuildingDestructionProcess(self._world, self._gameView)\n self._pm.processList.append(self.bdP)\n self._world._em.registerListener(Event.ENTITY_DIED, self.entityDied)\n \n \n # For checking window resize events\n Input.addListener(self)\n \n def entityDied(self, event):\n entity = event.entity\n if entity.name in self._world._buildings:\n self.buildingDestroyed(event)\n self.bdP.addDestroyedBuildingEntity(event)\n \n def buildingDestroyed(self, event):\n bdpS = BuildingDestructionParticleSystem(event.entity, self._gameView, 30, 6)\n self._pm.processList.append(bdpS)\n event.entity._em.registerListener(Event.ENTITY_REMOVED, bdpS.entityRemoved, [])\n \n \n def onWindowResizeEvent(self, event):\n self._gameView.resizeTo(event.size)\n \n def terminate(self):\n \"\"\"Does final computation on this game state before it is removed.\"\"\"\n self._humanView.getPane().clear()\n\n \n \n ","sub_path":"7drl/planet5521/States.py","file_name":"States.py","file_ext":"py","file_size_in_byte":8511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123718529","text":"# source: https://github.com/data-science-on-aws/workshop\nimport os\nimport socket\nimport json\nimport psutil\nimport subprocess\nimport sys\nimport time\nfrom shutil import copyfile\n\nHADOOP_CONFIG_PATH = '/opt/hadoop-config/'\nHADOOP_PATH = '/usr/hadoop-3.2.1'\nSPARK_PATH = '/usr/spark-2.4.6'\n\n# HACK\nEXTRA_JARS_PATH = '/usr/jars'\n\ndef copy_cluster_config():\n src =os.path.join(HADOOP_CONFIG_PATH, \"hdfs-site.xml\")\n dst = HADOOP_PATH + '/etc/hadoop/hdfs-site.xml'\n copyfile(src, dst)\n\n src = os.path.join(HADOOP_CONFIG_PATH, \"core-site.xml\")\n dst= HADOOP_PATH + '/etc/hadoop/core-site.xml'\n copyfile(src, dst)\n\n src = os.path.join(HADOOP_CONFIG_PATH, \"yarn-site.xml\")\n dst= HADOOP_PATH + '/etc/hadoop/yarn-site.xml'\n copyfile(src, dst)\n\n src = os.path.join(HADOOP_CONFIG_PATH, \"spark-defaults.conf\")\n dst= SPARK_PATH + '/conf/spark-defaults.conf'\n copyfile(src, dst)\n\n\ndef copy_aws_jars():\n src = HADOOP_PATH + \"/share/hadoop/tools/lib/aws-java-sdk-bundle-1.11.375.jar\"\n dst = HADOOP_PATH + \"/share/hadoop/common/lib/aws-java-sdk-bundle-1.11.375.jar\"\n copyfile(src, dst)\n\n src = HADOOP_PATH + \"/share/hadoop/tools/lib/hadoop-aws-3.2.1.jar\"\n dst = HADOOP_PATH + \"/share/hadoop/common/lib/hadoop-aws-3.2.1.jar\"\n copyfile(src, dst)\n\n # Add Deequ Jars\n # https://github.com/awslabs/deequ\n src = EXTRA_JARS_PATH + \"/deequ-1.0.1.jar\"\n dst = SPARK_PATH + \"/jars/deequ-1.0.1.jar\"\n copyfile(src, dst)\n\n src = EXTRA_JARS_PATH + \"/preprocess-deequ.jar\"\n dst = SPARK_PATH + \"/jars/preprocess-deequ.jar\"\n copyfile(src, dst)\n\n # Add Spark-TFRecord Jars\n # https://github.com/tensorflow/ecosystem/tree/master/spark/spark-tensorflow-connector#usage-examples \n src = EXTRA_JARS_PATH + \"/spark-tensorflow-connector_2.11-1.15.0.jar\"\n dst = SPARK_PATH + \"/jars/spark-tensorflow-connector_2.11-1.15.0.jar\"\n copyfile(src, dst)\n\n\ndef get_resource_config():\n resource_config_path = '/opt/ml/config/resourceconfig.json'\n with open(resource_config_path, 'r') as f:\n return json.load(f)\n\n\ndef write_runtime_cluster_config():\n resource_config = get_resource_config()\n master_host = resource_config['hosts'][0]\n master_ip = get_ip_from_host(master_host)\n current_host = resource_config['current_host']\n\n core_site_file_path = HADOOP_PATH + \"/etc/hadoop/core-site.xml\"\n yarn_site_file_path = HADOOP_PATH + \"/etc/hadoop/yarn-site.xml\"\n\n hadoop_env_file_path = HADOOP_PATH + \"/etc/hadoop/hadoop-env.sh\"\n yarn_env_file_path = HADOOP_PATH + \"/etc/hadoop/yarn-env.sh\"\n spark_conf_file_path = SPARK_PATH + \"/conf/spark-defaults.conf\"\n\n # Pass through environment variables to hadoop env\n with open(hadoop_env_file_path, 'a') as hadoop_env_file:\n hadoop_env_file.write(\"export JAVA_HOME=\" + os.environ['JAVA_HOME'] + \"\\n\")\n hadoop_env_file.write(\"export SPARK_MASTER_HOST=\" + master_ip + \"\\n\")\n hadoop_env_file.write(\"export AWS_CONTAINER_CREDENTIALS_RELATIVE_URI=\" + os.environ.get('AWS_CONTAINER_CREDENTIALS_RELATIVE_URI', '') + \"\\n\")\n\n # Add YARN log directory\n with open(yarn_env_file_path, 'a') as yarn_env_file:\n yarn_env_file.write(\"export YARN_LOG_DIR=/var/log/yarn/\")\n\n # Configure ip address for name node\n with open(core_site_file_path, 'r') as core_file:\n file_data = core_file.read()\n file_data = file_data.replace('nn_uri', master_ip)\n with open(core_site_file_path, 'w') as core_file:\n core_file.write(file_data)\n\n # Configure hostname for resource manager and node manager\n with open(yarn_site_file_path, 'r') as yarn_file:\n file_data = yarn_file.read()\n file_data = file_data.replace('rm_hostname', master_ip)\n file_data = file_data.replace('nm_hostname', current_host)\n with open(yarn_site_file_path, 'w') as yarn_file:\n yarn_file.write(file_data)\n\n # Configure yarn resource limitation\n mem = int(psutil.virtual_memory().total/(1024*1024)) # total physical memory in mb\n cores = psutil.cpu_count(logical=True) # vCPUs\n\n minimum_allocation_mb = '1'\n maximum_allocation_mb = str(mem)\n minimum_allocation_vcores = '1'\n maximum_allocation_vcores = str(cores)\n # Add some residual in memory due to rounding in memory allocation\n memory_mb_total = str(mem+2048)\n # Ensure core allocations\n cpu_vcores_total = str(cores*16)\n\n with open(yarn_site_file_path, 'r') as yarn_file:\n file_data = yarn_file.read()\n file_data = file_data.replace('minimum_allocation_mb', minimum_allocation_mb)\n file_data = file_data.replace('maximum_allocation_mb', maximum_allocation_mb)\n file_data = file_data.replace('minimum_allocation_vcores', minimum_allocation_vcores)\n file_data = file_data.replace('maximum_allocation_vcores', maximum_allocation_vcores)\n file_data = file_data.replace('memory_mb_total', memory_mb_total)\n file_data = file_data.replace('cpu_vcores_total', cpu_vcores_total)\n with open(yarn_site_file_path, 'w') as yarn_file:\n yarn_file.write(file_data)\n\n # Configure Spark defaults\n with open(spark_conf_file_path, 'r') as spark_file:\n file_data = spark_file.read()\n file_data = file_data.replace('sd_host', master_ip)\n file_data = file_data.replace('exec_mem', str(int((mem / 3)*2.2))+'m')\n file_data = file_data.replace('exec_cores', str(min(5, cores-1)))\n with open(spark_conf_file_path, 'w') as spark_file:\n spark_file.write(file_data)\n print(\"Finished Yarn configuration files setup.\\n\") \n\n\ndef start_daemons():\n resource_config = get_resource_config()\n current_host = resource_config['current_host']\n master_host = resource_config['hosts'][0]\n\n cmd_namenode_format = HADOOP_PATH + '/bin/hdfs namenode -format -force'\n cmd_start_dfs = HADOOP_PATH + '/sbin/start-dfs.sh'\n cmd_start_namenode = HADOOP_PATH + '/sbin/hadoop-daemon.sh start namenode'\n cmd_start_datanode = HADOOP_PATH + '/sbin/hadoop-daemon.sh start datanode'\n cmd_start_nodemanager = HADOOP_PATH + '/sbin/yarn-daemon.sh start nodemanager'\n cmd_start_yarn = HADOOP_PATH + '/sbin/start-yarn.sh'\n\n if current_host == master_host:\n subprocess.call(cmd_namenode_format, shell=True)\n subprocess.call(cmd_start_dfs, shell=True)\n subprocess.call(cmd_start_namenode, shell=True)\n subprocess.call(cmd_start_datanode, shell=True)\n subprocess.call(cmd_start_yarn, shell=True)\n else:\n subprocess.call(cmd_start_datanode, shell=True)\n subprocess.call(cmd_start_nodemanager, shell=True)\n\n\ndef get_ip_from_host(host_name):\n IP_WAIT_TIME = 300\n counter = 0\n ip = ''\n\n while counter < IP_WAIT_TIME and ip == '':\n try:\n ip = socket.gethostbyname(host_name)\n break\n except:\n counter += 1\n time.sleep(1)\n\n if counter == IP_WAIT_TIME and ip == '':\n raise Exception(\"Exceeded max wait time of 300s for hostname resolution\")\n\n return ip\n","sub_path":"container/program/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":6972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"323437639","text":"'''\nAlex Code\n\n'''\n\n#Import the necessary packages to run the code\nimport rpmClass_Stable as rpm \t\t#Most recent version of the RPM artificial spin ice code\n\nimport importlib\t\t\t#Package to update the version of rpmClass_Stable\nimport numpy as np\t\t\t#Mathematics package\n\nimportlib.reload(rpm)\t\t#Reloads RPM file\n\nimport importlib\t\t\t#Package to update the \nimport numpy as np\n\nimportlib.reload(rpm)\t\t#\nimport os\n\n\n\nDisorder = [0.0, 0.05, 0.2]\n\nHc = 0.062\nbar_length = 400e-9\nvertex_gap = 1e-7\nbar_thickness = 20e-9\nbar_width = 80e-9\nmagnetisation = 800e3\n\nangle = 30\n\n\nLattice = rpm.ASI_RPM(25, 25, bar_length = bar_length,\\\n vertex_gap = vertex_gap, bar_thickness = bar_thickness,\\\n bar_width = bar_width, magnetisation = magnetisation)\ni = 0\nfor Hc_std in np.array([0.0, 0.05, 0.2]):\n\tLattice.kagome(Hc_mean=Hc, Hc_std=Hc_std)\n\t#squareLattice.randomMag()\n\t#Lattice.relax()\n\tHamp = 2*Hc\n\ti = i+1\n\tprint(Hamp)\n\tfolder = r'C:\\Users\\av2813\\Box\\Writing\\ESA\\Images\\MacroSpinSim\\Hysteresis\\Kagome'\n\tdirectory = folder+str(Hc_std).replace('.', 'p')\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory) \n\tLattice.fieldSweep(Hamp,50,angle, n = 10, loops = 2, folder = directory)\n\n\n\nfor d in Disorder:\n\tdata = np.load('HysteresisQuenchedDisorderKagome'+str(d).replace('.','p')+ '.npz')\n\tfield = data['arr_0']\n\tcorrelation = data['arr_1']\n\tmag = data['arr_2']\n\tmonopole = data['arr_3']\n\n\tcorr = plt.figure('Correlation'+str(d))\n\tax_c = corr.add_subplot(111)\n\tax_c.plot(field[:,1],correlation,'o-', label = d)\n\n\tmag_plot = plt.figure('Magnetisation (y-direction)'+str(d))\n\tax_m = mag_plot.add_subplot(111)\n\tax_m.plot(field[:,1]*1000,2*mag[:,1],'o-', label = str(d*100)+'%')\n\tplt.xlabel('Magnetic field (mT)')\n\tplt.ylabel('Magnetisation (y-direction)')\n\tplt.legend()\n\tplt.title('Hysteresis Quenched Disorder (My)')\n\tmono = plt.figure('Monopole Density'+str(d))\n\tax_mono = mono.add_subplot(111)\n\tax_mono.plot(field[20:,1]*1000,monopole[20:],'o-', label = str(d*100)+'%')\n\tplt.xlabel('Magnetic field (mT)')\n\tplt.ylabel('Monopole Density')\n\tplt.legend()\n\tplt.title('Hysteresis Quenched Disorder (Monopole)')\n\nplt.show()\n","sub_path":"HysteresisTest.py","file_name":"HysteresisTest.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"547545243","text":"#!/usr/bin/python\r\n\r\n\"\"\"\r\nLevel\t Numeric value\r\nCRITICAL\t50\r\nERROR\t 40\r\nWARNING \t30\r\nINFO\t 20\r\nDEBUG\t 10\r\nNOTSET\t 0\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\nimport logging\r\nimport os\r\n\r\n\r\n\r\nclass MyLogger():\r\n\r\n def __init__(self, name):\r\n self.logger = logging.getLogger(name)\r\n self._set_handlers()\r\n\r\n def _set_handlers(self):\r\n self.logger.addHandler(self.sta_handler)\r\n self.logger.addHandler(self.file_handler)\r\n\r\n @property\r\n def sta_handler(self):\r\n formater = logging.Formatter(\r\n '%(asctime)s %(levelname)s %(message)s in %(filename)s %(levelno)s'\r\n )\r\n com_handler = logging.StreamHandler()\r\n com_handler.setLevel(logging.ERROR)\r\n com_handler.setFormatter(formater)\r\n return com_handler\r\n\r\n @property\r\n def file_handler(self):\r\n formater = logging.Formatter(\r\n '%(asctime)s %(levelname)s %(message)s in %(filename)s %(levelno)s'\r\n )\r\n com_handler = logging.FileHandler('s.log')\r\n com_handler.setLevel(logging.ERROR)\r\n com_handler.setFormatter(formater)\r\n return com_handler\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n l = MyLogger('haibo').logger\r\n l.error('test error')\r\n l.warn('test warn')\r\n\r\n sl = logging.getLogger('haibo.c')\r\n sl.warn('test inherit')\r\n\r\n","sub_path":"build/lib/jobspider/baseclass/utils/joblog.py","file_name":"joblog.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"424975921","text":"#!/usr/bin/env python3\nimport sys\n \ndef div(a, b):\n try:\n print(c)\n a, b = int(a), int(b)\n return a / b\n except ValueError:\n return \"You can only divide integers and floats.\"\n except ZeroDivisionError:\n return \"You cannot divide a number by 0\"\n except Exception as e:\n print('Hit an unknown bug')\n return e\n \nif __name__ == '__main__':\n print(div(sys.argv[1], sys.argv[2]))\n","sub_path":"exception/pythonexp5.py","file_name":"pythonexp5.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"432193996","text":"from __future__ import print_function\nfrom lib_rosalind import *\n#\ndef ddeg(dataset):\n n,e = dataset[0]\n del dataset[0]\n #print ('n',n)\n #print ('e',e)\n #print ('dataset',dataset)\n res = []\n for i in range(1,n+1):\n c = 0\n for j in dataset:\n if i in j:\n if i == j[0]:\n t = j[1]\n else: \n t = j[0]\n c += sum([1 for k in dataset if t in k])\n res += [c]\n return res\n#\nif __name__ == '__main__':\n print ('{0} {1} {0}'.format('-'*20,'start'))\n dataset = [[int(i) for i in line.strip().split(' ')] for line in Read()]\n print ('{0} {1} {0}'.format('-'*20,'answer'))\n res = ddeg(dataset)\n print (' '.join([str (i) for i in res]))\n ","sub_path":"ddeg.py","file_name":"ddeg.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316374678","text":"import ROOT, os, array, re, math\n\n\nclass s:\n def __init__(self, inf):\n self.inf = inf\n self.setValues()\n\n def setValues(self):\n tmp_f = open(self.inf, 'r')\n tmp_l = tmp_f.readlines()\n for l in tmp_l:\n if 'Signal processes scaled' in l:\n tmp_scale = float(l.split()[-1])\n if 'BACKGROUND' in l:\n tmp_b = float(l.split()[1])\n tmp_be = float(l.split()[3])\n if 'SIGNAL' in l:\n tmp_s = float(l.split()[1])\n tmp_se = float(l.split()[3])\n if 'DATA' in l:\n tmp_d = float(l.split()[1])\n\n self.sig = tmp_s\n self.sig_e = tmp_se\n self.bkg = tmp_b\n self.bkg_e = tmp_be\n\n self.data = tmp_d\n\n self.scale = tmp_scale\n self.origsig = self.sig/self.scale\n self.newscale = (self.data-self.bkg-self.bkg_e)/self.origsig\n\n self.scale_e = self.scale-self.newscale\n \n \n\nROOT.TColor.CreateGradientColorTable(3, array.array (\"d\", [0.00, 0.50, 1.00]),\n ##array (\"d\", [1.00, 1.00, 0.00]),\n ##array (\"d\", [0.70, 1.00, 0.34]),\n ##array (\"d\", [0.00, 1.00, 0.82]),\n array.array (\"d\", [0.00, 1.00, 1.00]),\n array.array (\"d\", [0.34, 1.00, 0.65]),\n array.array (\"d\", [0.82, 1.00, 0.00]),\n 255, 0.95)\n\ndef getnumbers(s):\n a = re.findall(r'\\d+', s)\n a = [float(i) for i in a]\n return a\n\n\nindir = '/afs/cern.ch/user/m/mdunser/www/private/w-helicity-13TeV/fakeClosureKinematic/'\n\nscales = {}\n\nfor sd in os.listdir(indir):\n if not os.path.isdir(indir+'/'+sd): continue\n if not '2019-03-05' in sd: continue\n tmp_fn = indir+'/'+sd+'/etal1.txt'\n vals = s(tmp_fn)\n ## tmp_f = open(indir+'/'+sd+'/etal1.txt','r')\n ## tmp_l = tmp_f.readlines()\n ## for l in tmp_l:\n ## if 'Signal processes scaled' in l:\n ## tmp_s = float(l.split()[-1])\n \n scales[sd.split('-')[-1]] = vals\n\narr_eta = array.array('d', [0., 0.5, 1.5, 2.4])\narr_pt = array.array('d', [26, 29, 32, 35, 38, 41, 45])\n \nth2_p = ROOT.TH2F('p','scale factors for +', len(arr_eta)-1, arr_eta, len(arr_pt)-1, arr_pt)\nth2_m = ROOT.TH2F('m','scale factors for -', len(arr_eta)-1, arr_eta, len(arr_pt)-1, arr_pt)\nth2_p .GetXaxis().SetTitle('|#eta|')\nth2_m .GetXaxis().SetTitle('|#eta|')\nth2_p .GetYaxis().SetTitle('p_{T}')\nth2_m .GetYaxis().SetTitle('p_{T}')\n\nfor k,v in scales.items():\n pt = sum(getnumbers(k.split('_')[0]))/2.\n eta = sum(getnumbers(k.split('_')[1].replace('p','')))/20.\n\n b = th2_p.FindBin(eta, pt)\n if k.split('_')[-1] == 'plus':\n th2_p.SetBinContent(b, v.scale)\n th2_p.SetBinError (b, v.scale_e)\n else:\n th2_m.SetBinContent(b, v.scale)\n th2_m.SetBinError (b, v.scale_e)\n\nth2_p.GetZaxis().SetRangeUser(0.65, 1.35)\nth2_m.GetZaxis().SetRangeUser(0.65, 1.35)\n\nROOT.gStyle.SetOptStat(0)\nROOT.gStyle.SetPaintTextFormat('5.3g')\n\nc = ROOT.TCanvas('c','', 1200, 600)\nc.Divide(2,1)\nc.cd(1)\n\nth2_p.Draw('colz text e')\nc.cd(2)\nth2_m.Draw('colz text e')\n","sub_path":"WMass/python/plotter/w-mass-13TeV/wmass_mu/checkFakesKinematic.py","file_name":"checkFakesKinematic.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"338871110","text":"from datetime import timedelta\n\nfrom fastapi import Depends, APIRouter, HTTPException\nfrom fastapi.security import HTTPBasic, HTTPBasicCredentials\nfrom sqlalchemy.orm import Session\n\nfrom app.api.utils.db import get_db\nfrom app.api.utils.security import get_current_user\nfrom app.core import config\nfrom app.core.jwt import create_access_token\nfrom app.models.token import Token\nfrom app import crud\n\nrouter = APIRouter()\n\nsecurity = HTTPBasic()\n\n\n@router.post(\"/token\", response_model=Token, tags=[\"login\"])\ndef login_access_token(\n db: Session = Depends(get_db),\n credentials: HTTPBasicCredentials = Depends(security)\n):\n user = crud.user.authenticate(\n db, email=credentials.username, password=credentials.password\n )\n if not user:\n raise HTTPException(\n status_code=400, detail=\"Incorrect email or password\"\n )\n elif not crud.user.is_active(user):\n raise HTTPException(status_code=400, detail=\"Inactive user\")\n access_token_expires = timedelta(\n minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES\n )\n token = create_access_token(\n data={\"user_id\": user.id, \"user_email\": user.email},\n expires_delta=access_token_expires\n )\n return {\n \"access_token\": token,\n \"token_type\": \"bearer\",\n \"expires_in\": config.ACCESS_TOKEN_EXPIRE_MINUTES * 60\n }\n\n\n# @router.post(\"/login/test-token\", tags=[\"login\"], response_model=User)\n@router.post(\"/test-token\", tags=[\"login\"])\n# def test_token(current_user: DBUser = Depends(get_current_user)):\ndef test_token(current_user=Depends(get_current_user)):\n \"\"\"\n Test access token\n \"\"\"\n return current_user\n","sub_path":"web/backend/machine/app/api/api_v1/endpoints/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"624367617","text":"import math\r\nfrom sklearn.externals import joblib\r\nfrom sklearn import svm\r\nimport sys\r\n\r\n#####################Parse sequences to predict#########################################\r\n\r\n\r\npath = input('Input the path to your fasta file: ')\r\n\r\n#to_predict = open ('C:\\\\Users\\\\Leonie\\\\Git\\\\scilife-project\\\\scripts\\\\Sequence_based_prediction\\\\to_predict.txt', 'r')\r\nto_predict = open(path, 'r')\r\n\r\nlist_all = list()\r\nfor line in to_predict:\r\n\tnewline = line.replace('\\n', '')\r\n\tlist_all.append(newline)\r\n\r\nlist_title=[]\r\nlist_seq = []\r\nlist_seq2 = []\r\nword_list = []\r\nmfeature_list = []\r\ncword_list = []\r\nsepn_list = [] \r\n\r\n#define window size\r\nwindows = 45\r\n\r\n#creates lists separating titles and sequences\r\nfor i in range (0, len(list_all), 2):\r\n\tlist_title.append(list_all[i])\r\n\tlist_seq2.append(list_all[i+1])\r\n\tlist_seq.append((math.floor(windows/2))*'X'+list_all[i+1]+(math.floor(windows/2))*'X')\r\n\r\n\t\r\n#creates sliding windows of sequence\r\nfor seq in list_seq:\r\n\taa_list = list(seq)\r\n\tfor aa in range(0, len(aa_list)-(windows-1)):\r\n\t\tword_list.append(aa_list[aa:aa+windows])\r\n\r\n#translates sequence windows into numerical code \r\nfrom aa_dictionary import aa_dict\r\nfor a in range (0, len(word_list)):\r\n\tfor b in range(0, windows):\r\n\t\tfor key in aa_dict:\r\n\t\t\tif key == word_list[a][b]:\t\r\n\t\t\t\tword_list[a][b] = aa_dict[key]\r\n\r\n\t\t\t\t\r\n#Joines numerical to one code per word\r\ncword = str()\r\nfor word in word_list:\r\n for aa in range(0, len(word)):\r\n cword = cword + str(word[aa])\r\n cword_list.append(cword) \r\n cword = str()\r\n\r\n#Bringing words into the right format\r\nfor word in cword_list:\r\n\tposition_list = list(word)\t\r\n\tsepn_list.append(position_list)\r\nfor element in range(0, len(sepn_list)):\r\n\tfor i in range(0, len(sepn_list[element])):\r\n\t\tsepn_list[element][i] = int(sepn_list[element][i])\r\n\t\r\n\r\n#######################Import model############################################################\r\n\r\nclf = joblib.load('Transmembrane_globular_predictor')\r\n\r\n\r\n\r\n#####################Prediction################################################################\r\n\r\npred_val = clf.predict(sepn_list)\r\npred_val_l = list(pred_val)\r\n\r\n###############Create file with prediction###################################################\r\n\r\n#backtranslate numbers into structure features\r\nfor featnum in range(0, len(pred_val_l)):\r\n\tif pred_val_l[featnum] == 1:\r\n\t\tpred_val_l[featnum] = 'G'\r\n\telif pred_val_l[featnum] == 2:\r\n\t\tpred_val_l[featnum] = 'O'\r\n\telif pred_val_l[featnum] == 3:\r\n\t\tpred_val_l[featnum] = 'M'\r\n\r\n#bring predicted structure windows in sequence format\r\npredicted_list = list()\r\nfor i in range (1, len(list_all), 2):\r\n\tpredicted_stru = str()\r\n\tfor j in range(0, len(list_all[i])):\r\n\t\tpredicted_stru = predicted_stru + pred_val_l[0]\r\n\t\tpred_val_l.remove(pred_val_l[0])\r\n\tpredicted_list.append(predicted_stru)\r\n\r\nzip_list = list(zip(list_title, list_seq2, predicted_list))\r\n\r\n\r\n#saves the prediction in a file\r\nwith open('C:\\\\Users\\\\Leonie\\\\Git\\\\scilife-project\\\\output\\\\structure_prediction.txt', 'w+') as sp:\r\n\tfor i in zip_list:\r\n\t\tfor j in i:\r\n\t\t\tsp.write(j + '\\n')\r\nsp.close()\r\nprint('finished' + '\\n' + 'globular residues will be assigned with \"G\", transembrane residues with \"M\" and outside membrane residues with \"O\"')\r\n","sub_path":"scripts/Sequence_based_prediction/runall_predictor.py","file_name":"runall_predictor.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"411929328","text":"n, m = map(int, input().split())\na = [list(map(int, input().split())) for i in range(m)]\nflag = [0 for i in range(n)]\ndic = {}\nl = [0 for i in range(n)]\nfor i in range(m):\n if a[i][0] not in dic.keys():\n dic[a[i][0]] = [a[i][1]]\n else:\n dic[a[i][0]].append(a[i][1])\n l[a[i][0]-1] = 1\n l[a[i][1]-1] = 1\nstack = [a[0][0]]\nprint(dic)\ncnt = n-sum(l)\nwhile stack:\n print(stack)\n tmp = stack.pop()\n if tmp in dic:\n for i in dic[tmp]:\n stack.append(i)\n if flag[tmp] == 0:\n flag[tmp] = 1\n else:\n cnt += 1\nprint(cnt-1)","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637274039","text":"import socket\nfrom datetime import datetime\n\nfrom logging.handlers import SocketHandler\n\nfrom .serializer import serialize_message\n\n\ndef format_timestamp(time):\n tstamp = datetime.utcfromtimestamp(time)\n return tstamp.strftime(\"%Y-%m-%dT%H:%M:%S\") + \".%03d\" % (tstamp.microsecond / 1000) + \"Z\"\n\n\nclass TCPLogstashHandler(SocketHandler):\n \"\"\"\n Python logging handler for Logstash. Sends events over TCP.\n :param host: The host of the logstash server.\n :param port: The port of the logstash server (default 5959).\n \"\"\"\n\n def __init__(self, host, port=5959):\n super().__init__(host, port)\n self._host = socket.gethostname()\n\n def makePickle(self, record):\n return serialize_message(\n self.formatter.format(record),\n {\n 'logger': {\n 'host': self._host,\n 'path': record.pathname,\n 'timestamp': format_timestamp(record.created),\n 'level': record.levelname,\n 'logger_name': record.name,\n },\n **getattr(record, 'metadata', {}),\n }\n )\n","sub_path":"security/backends/elasticsearch/logstash/handler_tcp.py","file_name":"handler_tcp.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"332014419","text":"from django.db.models.fields import DecimalField\nfrom Restaurant_app.models import Breads, Chinese, Curries, Manageorders, Order, Orderhistory, Rice, Rolereq, Starters, Thickshake, User\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.http import HttpResponse\nfrom Restaurant_app.forms import Breadsform, Chineseform, Curriesform, Thickshakeform, Pfupd, Riceform, Rlupd, Startersform, usgform, Rltype,Chgepwd\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import send_mail\nfrom Restaurant import settings\nfrom decimal import Decimal\n# Create your views here.\ndef home(request):\n return render(request,'app/home.html')\n\ndef about(request):\n return render(request,'app/about.html')\n\ndef contact(request):\n return render(request,'app/contact.html')\n\ndef login(request):\n return render(request,'app/login.html')\n\ndef usrreg(request):\n if request.method == \"POST\":\n d = usgform(request.POST)\n if d.is_valid():\n d.save()\n return redirect('/login')\n d=usgform()\n return render(request,'app/userregister.html',{'t':d})\n\n\n@login_required\ndef additems(request): \n return render(request,'app/additems.html')\n\n@login_required\ndef addstarter(request):\n if request.method == \"POST\":\n k = Startersform(request.POST,request.FILES)\n if k.is_valid():\n k.save()\n messages.success(request,\"Item added Successfully\")\n return redirect('/additems')\n k=Startersform(request.FILES) \n return render(request,'app/addstarter.html',{'r':k})\n\n@login_required\ndef addbreads(request):\n if request.method == \"POST\":\n k = Breadsform(request.POST,request.FILES)\n if k.is_valid():\n k.save()\n messages.success(request,\"Item added Successfully\")\n return redirect('/additems')\n k=Breadsform(request.FILES)\n return render(request,'app/addbreads.html',{'r':k})\n\n@login_required\ndef addrab(request):\n if request.method == \"POST\":\n k = Riceform(request.POST,request.FILES)\n if k.is_valid():\n k.save()\n messages.success(request,\"Item added Successfully\")\n return redirect('/additems')\n k=Riceform(request.FILES)\n return render(request,'app/addrab.html',{'r':k})\n\n@login_required\ndef addts(request):\n if request.method == \"POST\":\n k = Thickshakeform(request.POST,request.FILES)\n if k.is_valid():\n k.save()\n messages.success(request,\"Item added Successfully\")\n return redirect('/additems')\n k=Thickshakeform(request.FILES)\n return render(request,'app/addts.html',{'r':k})\n\n@login_required\ndef addchinese(request):\n if request.method == \"POST\":\n k = Chineseform(request.POST,request.FILES)\n if k.is_valid():\n k.save()\n messages.success(request,\"Item added Successfully\")\n return redirect('/additems')\n k=Chineseform(request.FILES)\n return render(request,'app/addchinese.html',{'r':k})\n\n@login_required\ndef addcurries(request):\n if request.method == \"POST\":\n k = Curriesform(request.POST,request.FILES)\n if k.is_valid():\n k.save()\n messages.success(request,\"Item added Successfully\")\n return redirect('/additems')\n k=Curriesform(request.FILES)\n return render(request,'app/addcurries.html',{'r':k})\n\n@login_required\ndef orders(request):\n a = Starters.objects.all()\n b = Breads.objects.all()\n c = Curries.objects.all()\n d = Rice.objects.all()\n e = Thickshake.objects.all()\n f = Chinese.objects.all()\n if request.method == \"POST\":\n iname = request.POST['itemname']\n iprice = request.POST['itemprice']\n iquant = request.POST['qt']\n q = Order(name = iname, price =iprice, quantity =iquant,uid_id=request.user.id)\n q.save()\n return redirect('/orders')\n return render(request,'app/orders.html',{'starters':a,'breads':b,'curries':c,'rice':d,'thickshake':e,'chinese':f})\n\n@login_required\ndef accept(request):\n a = Order.objects.filter(uid_id=request.user.id)\n strn=\"\"\n tp=0\n tax=float(0.18)\n for j in a:\n tp = tp + (j.price*j.quantity)\n gt = float(tp)+(float(tp)*tax)\n for i in a:\n strn = strn + i.name + \"(\" +str(i.quantity) + \")\" + \",\"\n return render(request,'app/orderaccept.html',{'orders':a})\n\n@login_required\ndef bill(request):\n a = Order.objects.filter(uid_id=request.user.id)\n tp=0\n tot=0\n tax=float(0.18)\n taxes = 0\n for i in a:\n tp = tp + (i.price*i.quantity)\n gt = float(tp)+(float(tp)*tax)\n taxes = gt-float(tp)\n tot = gt-taxes\n strn = \"\"\n for i in a:\n strn = strn + i.name + \"(\" +str(i.quantity) + \")\" + \",\"\n if request.method == \"POST\": \n w = Orderhistory(items=strn,billamoubt=gt, cid_id=request.user.id)\n w.save()\n name=request.POST['Cname']\n tablenum = request.POST['tn']\n s = Manageorders(uname=name,tname=tablenum,items=strn)\n s.save()\n a.delete()\n return redirect('/orders')\n return render(request,'app/bill.html',{'orders':a,'gt':gt,'taxes':taxes,'tot':tot})\n\n@login_required\ndef od(request,n):\n r=Manageorders.objects.get(id=n)\n if request.method==\"POST\":\n r.delete()\n return redirect('/po')\n return render(request,'app/od.html')\n\n@login_required\ndef oh(request):\n a = Orderhistory.objects.filter(cid_id=request.user.id)\n return render(request,'app/oh.html',{'oh':a})\n\n@login_required\ndef id(request,n):\n r=Order.objects.get(id=n)\n if request.method==\"POST\":\n r.delete()\n return redirect('/accept')\n return render(request,'app/id.html')\n\n@login_required\ndef iup(request,n):\n t = Order.objects.get(id=n)\n ct = 0\n ct = (t.price*t.quantity)\n print(ct)\n if request.method == \"POST\":\n newquantity = request.POST['n']\n t.quantity = newquantity\n t.save()\n return redirect('/accept')\n return render(request,'app/iup.html',{'p':t,'ct':ct})\n\n@login_required\ndef po(request):\n a = Manageorders.objects.all()\n return render(request,'app/po.html',{'orders':a})\n\n@login_required\ndef rolereq(request):\n p = Rolereq.objects.filter(ud_id=request.user.id).count()\n if request.method == \"POST\":\n k = Rltype(request.POST,request.FILES)\n if k.is_valid():\n y = k.save(commit=False)\n y.ud_id = request.user.id\n y.uname = request.user.username\n y.is_checked = 0\n print(y)\n y.save()\n # return redirect('/')\n k = Rltype()\n return render(request,'app/rolereq.html',{'d':k, 'c':p})\n\n@login_required\ndef gveperm(request):\n u = User.objects.all()\n r = Rolereq.objects.all()\n d={}\n for n in u:\n for m in r:\n if n.is_superuser == 1 or n.id != m.ud_id :\n continue\n else:\n d[m.id] = m.Uname, m.rltype, n.role,n.id,m.id\n return render(request,'app/gvpl.html',{'h':d.values()})\n\n@login_required\ndef gvupd(request,t):\n y = Rolereq.objects.get(ud_id=t)\n d = User.objects.get(id=t)\n if request.method == \"POST\":\n n = Rlupd(request.POST,instance=d)\n if n.is_valid():\n n.save()\n y.is_checked = 1\n y.save()\n return redirect('/gvper')\n n = Rlupd(instance=d)\n return render(request,'app/gvepermission.html',{'c':n})\n\n@login_required\ndef gvdel(request,m):\n r = Rolereq.objects.get(id=m)\n a = User.objects.get(id = r.ud_id)\n if request.method == \"POST\":\n a.role=1\n r.delete()\n a.save()\n messages.success(request,\"{} Request Deleted Successfully\".format(a.username))\n return redirect('/gvper')\n n = Rlupd(instance=r)\n return render(request,'app/gvdel.html',{'a':n})\n\n@login_required\ndef pfle(request):\n y = User.objects.get(id=request.user.id)\n return render(request,'app/profile.html',{'q':y})\n\n@login_required\ndef pfupd(request):\n\tt = User.objects.get(id=request.user.id)\n\tif request.method == \"POST\":\n\t\tpfl = Pfupd(request.POST,request.FILES,instance=t)\n\t\tif pfl.is_valid():\n\t\t\tpfl.save()\n\t\t\treturn redirect('/pfle')\n\tpfl = Pfupd(instance=t)\n\treturn render(request,'app/profileupdate.html',{'u':pfl})\n\n\n@login_required\ndef feedback(request):\n if request.method == \"POST\":\n sd = request.POST['snmail'].split(',')\n sm = request.POST['sub']\n mg = request.POST['msg']\n rt = settings.EMAIL_HOST_USER\n dt = send_mail(sm,mg,rt,sd)\n if dt == 1:\n return redirect('/')\n return render(request,'app/feedback.html')\n\n@login_required\ndef changepwd(request):\n\tif request.method == \"POST\":\n\t\tk = Chgepwd(user=request.user,data=request.POST)\n\t\tif k.is_valid():\n\t\t\tk.save()\n\t\t\treturn redirect('/login')\n\tk = Chgepwd(user=request)\n\treturn render(request,'app/changepwd.html',{'t':k})\n\n","sub_path":"Restaurant_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"48036959","text":"from nltk.stem import PorterStemmer \nfrom nltk.tokenize import word_tokenize \nfrom nltk.corpus import stopwords\nfrom fileToStructure import *\nimport json\nimport os\nimport csv\n\n#este file se corre una vez para obtener el preprocessing de las json\n#podrían haber algunos problemas con la lista terminos. Parece que es una lista dentro de otra lista de manera redundante.\n# ¿terminos = terminos[0]? redundancia\n\n\n#Se podría dividir el programa en más funciones. No se hace esto porque:\n#1. Van a haber múltiples llamada (por tweet por word) a múltiples funciones.\n#Esto es costoso\n#2. Es legible de todas formas.\n\n#Preprocessing es para crear los csv necesarios para la primera parte del proyecto\n\n#funcion que quita stopwords, saca tokens y raíces. Devuelve un array de los terminos en un tweet\ndef preprocessing(tweet):\n sw = set(stopwords.words('spanish')) \n tokens = word_tokenize(tweet) \n \n words = [] \n stemmer = PorterStemmer()\n\n for token in tokens:\n if token not in sw: \n token = stemmer.stem(token)\n words.append(token) \n return words\n \n\n#inicializar estructuras para almacenar informacion\nentries = os.listdir('parse/')\nallData = []\nterminos = []\nids = []\nidTerminos = {}\nidText = {}\n\n#buscar en todos los json de tweets\nfor entrie in entries:\n with open('parse/' + entrie) as json_file:\n data = json.load(json_file)\n allData.append(data)\n\n#preprocessing a cada tweet.\n#se almacena el vector de terminos no repetidos como un csv\n#se almacena el id del tweet con una lista de terminos como un json\n#se almacena el id con el texto completo para la muestra de resultados final\n#esto agiliza la lectura\nfor data in allData:\n for tweet in data:\n idText[tweet['id']] = tweet['text']\n i = tweet['id']\n text = tweet['text'] \n terms = preprocessing(text)\n idTerminos[i] = terms\n terms = set(terms)\n for term in terms:\n terminos.append(term)\n\n\ntermsFile = open(\"terminos.csv\", \"w\") \nterminos = set(terminos)\n\n#escribe el csv separado por ;\nfor term in terminos:\n termsFile.write(term)\n termsFile.write(\";\")\ntermsFile.close() \n\n#escribe el json\nwith open('idTerminos.json', 'w') as outfile:\n json.dump(idTerminos, outfile)\n\n#sacar la frecuencia de documentos por cada termino\ndf = {}\n\nfor term in terminos: \n df[term] = 0\n\nfor tweetKey in idTerminos.keys():\n for t in idTerminos[tweetKey]:\n if t in df.keys():\n df[t] = df[t] + 1\n\nwith open('documentFrequency.json', 'w') as outfile:\n json.dump(df, outfile)\n\nwith open('idText.json', 'w') as outfile:\n json.dump(idText, outfile)\n\n\n#obtener el indice \n\n#id del texto con cada token (igual verifica si el token está)\ntweetIds = jsonToDict(\"idTerminos.json\")\n#frecuencia de cada token\ndf = jsonToDict(\"documentFrequency.json\")\n#id y tweet\nidText = jsonToDict(\"idText.json\")\n#terminos (esta en lista[0])\ntokens = csvToArray(\"terminos.csv\")\n\n\nindex = {}\nnIds = len(tweetIds.keys())\ncounter = 0\nfor i in tweetIds:\n for j in tweetIds[i]:\n if j in tokens[0]:\n print(str(counter) + \"out of \" + str(nIds) + \"\\n\")\n if j in index:\n index[j]['docs'][i] = 1\n else:\n base_dict = {'token': j, 'frequency': df[j], 'docs': dict()}\n index[j] = base_dict\n counter = counter + 1\n\nwith open('index.json', 'w') as outfile:\n json.dump(index, outfile)","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"625774163","text":"from django.shortcuts import render, redirect\nfrom time import gmtime, strftime\nimport re\nimport random\nfrom django.contrib import messages\n\nfrom . import models\n\n\ndef index(request):\n return render(request,'sr_users/index.html')\n\ndef new(request):\n return render(request, 'sr_users/new.html')\n\ndef create(request):\n #validate data\n errors = User.objects.validate(request.POST)\n if (errors):\n print (\"==== errror \")\n for error in errors:\n messages.error(request, errors[error])\n print (messages)\n return redirect(\"/users/new\")\n print (\"===== no errors ===\")\n newU = User.objects.create(first_name = request.POST['first_name'], last_name = request.POST['last_name'], email = request.POST['email'])\n\n return redirect(\"/users\")\n\ndef update(request):\n print (\"------update\")\n # validate data\n errors = User.objects.validate(request.POST)\n if (errors):\n print (\"==== errror \")\n for error in errors:\n messages.error(request, errors[error])\n print (messages)\n reURL = \"/users/\"+ str(request.POST['id']) + \"/edit\"\n return redirect(reURL)\n print (\"===== no errors ===\")\n\n usr = User.objects.get(id=request.POST['id'])\n print (usr)\n usr.first_name = request.POST['first_name']\n usr.last_name = request.POST['last_name']\n usr.email = request.POST['email']\n usr.save()\n \n reURL = \"/users/\" + str(usr.id)\n return redirect(reURL)\n\ndef destroy(request, uid):\n\n usr = User.objects.get(id=uid)\n usr.delete()\n return redirect('/users')\n\ndef show(request, uid):\n context = {\n 'user': User.objects.get(id=uid),\n }\n\n return render(request,'sr_users/view.html', context)\n\ndef edit(request, uid):\n context = {\n 'user': User.objects.get(id = uid),\n }\n\n return render(request, 'sr_users/edit.html', context)","sub_path":"django/semi_restful/apps/sr_users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"44279201","text":"N = int(input())\nlista = [0] * N\nlistb = [0] * N\nindex = 0\nfor n in range(N):\n tasktime = list(map(int, input().split()))\n lista[n] = tasktime[0]\n listb[n] = tasktime[1]\n\na = min(lista)\nb = min(listb)\nans = max(a, b)\nindexa = lista.index(a)\nindexb = listb.index(b)\nif(indexa == indexb):\n ans = a + b\n newans = 100000\n if(a > b):\n del lista[indexa]\n newa = min(lista)\n newans = max(newa, b)\n else:\n del listb[indexb]\n newb = min(listb)\n newans = max(newb,a)\n \n if(newans < ans):\n ans = newans\n\nprint(ans)","sub_path":"ABC/194/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"644693400","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os \n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import metrics\nfrom sklearn.linear_model import LinearRegression \n\nfrom sklearn.preprocessing import StandardScaler\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n\n\"\"\"\nDescription:\n Plot classification figure with three kinds of data. \n True values, BEP predictions and Structural predictions. \nAuthor:\n Jiayan XU, CCC, ECUST, 2018-2019. \nNotes: \n paper figure 8.\n\"\"\"\n\n\ndef ols_with_chosen_features(y, X):\n \"\"\"Ordinary linear regression with input y and X, \n return the function and r2.\"\"\" \n # regression setting \n ols = LinearRegression(fit_intercept=True, normalize=False)\n\n # fit and predict \n ols.fit(X, y)\n y_p = ols.predict(X)\n\n # save model \n coefs = ols.coef_ \n intercept = ols.intercept_ \n r2 = metrics.r2_score(y, y_p)\n mse = metrics.mean_squared_error(y, y_p)\n\n return ols, r2 \n\n\ndef plt_ts_classification(ax, subtitle, Etype, Ets, Etsra):\n \"\"\"Plot ts-ss energy v.s. ts-ra energy.\"\"\" \n # init dict \n en_ts = {'ts':[], 'tsra':[]}\n en_tsra = {'ts':[], 'tsra':[]}\n\n for i in range(len(Etype)):\n if Etype[i] == 'ts':\n en_ts['ts'].append(Ets[i])\n en_ts['tsra'].append(Etsra[i])\n elif Etype[i] == 'tsra':\n en_tsra['ts'].append(Ets[i])\n en_tsra['tsra'].append(Etsra[i])\n for i, e1 in enumerate(en_tsra['tsra']):\n for j, e2 in enumerate(en_tsra['tsra']):\n if e1 != e2:\n if np.fabs(e1 - e2) < 0.01:\n if e1 < e2:\n en_tsra['tsra'][i] -= 0.01 \n en_tsra['tsra'][j] += 0.012\n else: \n en_tsra['tsra'][i] += 0.012 \n en_tsra['tsra'][j] -= 0.01 \n\n # subfigure general setting \n ax.set_title(subtitle, fontsize=20)\n ax.set_xlabel('$E_{ra}\\ /\\ eV$', fontsize=16)\n ax.set_xlim(0.0, 1.6)\n ax.set_ylabel('$E_{ss}\\ /\\ eV$', fontsize=16)\n ax.set_ylim(0.0, 1.6)\n\n # plot the line y=x \n #ax.plot([Etsra.min(), Etsra.max()], [Etsra.min(), Etsra.max()], 'k--', lw=2)\n ax.plot([0.2, 1.4], [0.2, 1.4], 'k--', lw=2)\n\n # scatter points \n ss = ax.scatter(en_ts['tsra'], en_ts['ts'], \\\n color='royalblue', marker='o', alpha=0.8, label='Surface-stabilized Preference')\n ra = ax.scatter(en_tsra['tsra'], en_tsra['ts'], \\\n color='salmon', marker='v', alpha=0.8, label='Radical-like Preference')\n\n ax.legend(handles=[ss, ra], loc=2)\n\n return ss, ra\n\n\ndef plot_linear_classification(csv_path):\n \"\"\"Regression with different methods and plot the classification.\"\"\"\n # prepare data \n df = pd.read_csv(csv_path, index_col=0)\n df = df.loc[df.loc[:, 'E_ts'] != 'np.nan', :]\n df = df.loc[df.loc[:, 'E_tsra'] != 'np.nan', :]\n\n # specific features \n Etype = df.loc[:, 'mtype'].values \n\n Ets = df.loc[:, 'E_ts'].values.astype(np.float64)\n Etsra = df.loc[:, 'E_tsra'].values.astype(np.float64) \n \n # BEP relations\n Eh = df.loc[:, 'E_Hab3'].values.reshape(-1,1).astype(np.float64) \n Ech3 = df.loc[:, 'E_CH3ab'].values.reshape(-1,1).astype(np.float64) \n\n ols_ts_bep, dummy = ols_with_chosen_features(Ets, Eh+Ech3)\n Ets_bep = ols_ts_bep.predict(Eh+Ech3)\n\n ols_tsra_bep, dummy = ols_with_chosen_features(Etsra, Eh)\n Etsra_bep = ols_tsra_bep.predict(Eh)\n\n # Geo relations selected \n feanames = ['E_CH3ab', 'E_Hab3', \\\n 'a_O2-M4-C_CH3ab', 'h_O2-C-M4-H1_CH3ab', 'd_H1-C_CH3ab', \\\n 'h_O2-H1-M4-C_CH3ab', 'a_O2-M4-H1_Hab3'] \n X = df.loc[:, feanames].values.astype(np.float64) \n\n s4y, s4X = StandardScaler().fit(Ets.reshape(-1,1)), StandardScaler().fit(X)\n y_s, X_s = s4y.transform(Ets.reshape(-1,1)), s4X.transform(X)\n\n ols_ts_geo, r2 = ols_with_chosen_features(y_s, X_s)\n Ets_geo_s = ols_ts_geo.predict(X_s)\n Ets_geo = s4y.inverse_transform(Ets_geo_s)\n\n # general figure setting \n fig, ax = plt.subplots(1, 2, figsize=(16,6))\n plt.suptitle('Methane Activation Classification using Revised Linear Relation', \\\n fontsize=24, fontweight='bold')\n plt.tight_layout(pad=2.0, w_pad=2.0, h_pad=2.0)\n plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.85, \\\n wspace=0.2, hspace=0.1)\n\n # bep values \n ss, ra = plt_ts_classification(ax[0], '(a) Regular Linear Relations', Etype, Ets_bep, Etsra_bep)\n\n # geo values \n ss, ra = plt_ts_classification(ax[1], '(b) Structural Descriptors Amended Relations', \\\n Etype, Ets_geo, Etsra_bep)\n\n #plt.legend(handles=[ss, ra])\n\n plt.savefig('./figures/fig8.png')\n #plt.show()\n \n\nif __name__ == '__main__':\n plot_linear_classification('../CH4_10.csv')\n","sub_path":"LasAndClf-dev/processing_methods/plot_paper_figrues/plot_linear_classification_2.py","file_name":"plot_linear_classification_2.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"52807027","text":"import random\n\ntext = open(\"mesasges.txt\",\"r\").read().split(\"\\n\")\n\nsenders = [\"artiumdominus\", \"ripley\", \"kylereese\", \"connorsarah\", \"connorjohn\", \"oldbladerunner\", \"iamthebusiness\", \"snake\", \"mirrorshades\", \"major\"]\n\naddresses = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"a\", \"b\", \"c\"]\n\nfor i in range(0,len(text)-1,2):\n\tprint(\"INSERT INTO Mensagem (conteudo, envio, status, emissor, receptor) VALUES (\\\"\" + text[i] +\n\t \" \" + text[i+1] + \"\\\", \\\"2019-04-27 13:06:00\\\", \\\"enviado\\\", \\\"\" + random.choice(senders) +\n\t \"\\\", \\\"\" + random.choice(addresses) + \"\\\");\")\n","sub_path":"AS02/1.Documentos/talkgen.py","file_name":"talkgen.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"71338753","text":"#Contador de numeros\ncont_num = 0\n\n#Contador de vueltas\ncont_vueltas = 0\n\n#Acumulador de numeros\nacu_num = 0\n\n\n\nlimite =int(input(\"¿Cuantos numeros quiere ingresar?\"))\n\nwhile(True):\n num = int(input(\"Ingrese numero : \"))\n\n #Suma\n acu_num += num\n\n cont_vueltas = cont_vueltas + 1\n prom=acu_num/cont_vueltas\n if(cont_vueltas == limite):\n break\n\n\n\n\n\n\n\n\nprint(\"La suma de los numeros es :\", acu_num)\nprint(\"Cantidad de numeros ingresados es \", limite)","sub_path":"ejercicio_6.py","file_name":"ejercicio_6.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"369392117","text":"# Create your views here.\nimport json\n\nfrom django.core import serializers\nfrom django.http import HttpResponse, HttpRequest\nfrom django.template import Context, loader, RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.core import serializers\nfrom django.conf import settings\nfrom django import forms\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom cloudchoice.models import Vendor, Product, Plan\n\ndef index(request):\n\n if settings.DEBUG:\n javascripts = [\"underscore.js\", \"backbone.js\"]\n else:\n javascripts = [\"underscore-min.js\", \"backbone-min.js\"]\n javascripts.append(\"cloudchoice/index.js\")\n\n stylesheets = [\"cloudchoice/style.css\"]\n\n dataToTemplate = {\n 'title' : 'Choose A Cloud Vendor',\n 'vendors' : Vendor.objects.all(),\n 'javascripts' : javascripts,\n 'stylesheets' : stylesheets,\n }\n\n return render_to_response('cloudchoice/index.html', dataToTemplate, context_instance = RequestContext(request))\n\ndef list_vendors(request):\n body = \"Here is the list\"\n vendorSets = Vendor.objects.all()\n vendorS = dict()\n for vendor in vendorSets:\n vendorS[vendor.id] = vendor.name\n body = json.dumps(vendorS)\n return HttpResponse(body)\n\ndef list_products(request):\n productList = list()\n\n for plan in Plan.objects.all():\n node = dict()\n node['vendor'] = plan.product.vendor.name\n node['product_name'] = plan.product.product_name\n node['plan_name'] = plan.name\n node['offer'] = plan.product.offer\n node['unit'] = plan.unit.unit\n node['unit_amount'] = plan.unit_amount\n node['unit_price'] = '$%.2f' % plan.unit_price\n productList.append(node)\n \n# body = json.dumps(serializers.serialize(\"json\", productList))\n body = json.dumps(productList)\n return HttpResponse(body)\n","sub_path":"cloudchoice/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"222360913","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nimport lib.dist as dist\nimport torch\nfrom torch.autograd import Variable\n\nclass VAE_ladder(object):\n def __init__(self,\n z_dim=10,\n beta=1.0,\n learning_rate=5e-4,\n fade_in_duration=5000,\n flags=None,\n chn_num=1,\n train_seq=1,\n image_size=64):\n self.flags=flags\n self.activation=tf.nn.leaky_relu\n self.z_dim = z_dim\n self.layer_num=4\n self.learning_rate = learning_rate\n self.beta=beta\n self.chn_num=chn_num\n self.fade_in_duration = fade_in_duration\n self.train_seq=train_seq\n self.image_size=image_size\n self.pre_KL=flags.KL\n self.fadein=flags.fadein\n\n self.q_dist = dist.Normal()\n self.x_dist = dist.Bernoulli()\n self.prior_dist = dist.Normal()\n self.prior_params = torch.zeros(self.z_dim, 2)\n\n self._create_network()\n self._create_loss_optimizer()\n\n def _get_prior_params(self, batch_size=1):\n expanded_size = (batch_size,) + self.prior_params.size()\n prior_params = Variable(self.prior_params.expand(expanded_size))\n return prior_params\n\n def _sample_z(self, z_mean, z_log_sigma_sq):\n eps_shape = tf.shape(z_mean)\n eps = tf.random_normal(eps_shape, 0, 1, dtype=tf.float32)\n # z = mu + sigma * epsilon\n z = tf.add(z_mean,\n tf.multiply(tf.sqrt(tf.exp(z_log_sigma_sq)), eps))\n return z\n\n def _KL(self,z_mean,z_log_sigma_sq):\n latent_loss = -0.5 * tf.reduce_sum(1 + z_log_sigma_sq\n - tf.square(z_mean)\n - tf.exp(z_log_sigma_sq), 1)\n latent_loss = tf.reduce_mean(latent_loss)\n return latent_loss\n\n def fade_in_alpha(self, step):\n if step > self.fade_in_duration:\n a = 1.\n else:\n a = 1. * (step / self.fade_in_duration)\n return a\n\n def inference_h1(self, x, reuse=False):\n with tf.variable_scope(\"qh1\", reuse=reuse) as scope:\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(x, 64, 4, strides=(2, 2), padding='same', activation=self.activation))\n return conv1\n\n def inference_h2(self, h1, reuse=False):\n with tf.variable_scope(\"qh2\", reuse=reuse) as scope:\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(h1, 128, 4, strides=(2, 2), padding='same', activation=self.activation))\n return conv1\n\n def inference_h3(self, h2, reuse=False):\n with tf.variable_scope(\"qh3\", reuse=reuse) as scope:\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(h2, 256, 4, strides=(2, 2), padding='same', activation=self.activation))\n return conv1\n\n def inference_h4(self, h3, reuse=False):\n with tf.variable_scope(\"qh4\", reuse=reuse) as scope:\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(h3, 512, 4, strides=(2, 2), padding='same', activation=self.activation))\n return conv1\n\n def ladder1(self,h1,reuse=False):\n with tf.variable_scope(\"qladder1\", reuse=reuse) as scope:\n if self.train_seq == 3 and self.fadein:\n h1 = h1 * self.fade_in\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(h1, 64, 4, strides=(2, 2), padding='same', activation=self.activation))\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(conv1, 64, 4, strides=(1, 1), padding='same', activation=self.activation))\n print('ladder1',conv1.shape)\n fc1 = tf.layers.flatten(conv1)\n z_mean = tf.layers.dense(fc1, self.z_dim)\n z_log_sigma_sq = tf.layers.dense(fc1, self.z_dim)\n return z_mean,z_log_sigma_sq\n\n def ladder2(self,h2,reuse=False):\n with tf.variable_scope(\"qladder2\", reuse=reuse) as scope:\n if self.train_seq == 2 and self.fadein:\n h2=h2*self.fade_in\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(h2, 128, 4, strides=(2, 2), padding='same', activation=self.activation))\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(conv1, 256, 4, strides=(2, 2), padding='same', activation=self.activation))\n print('ladder2', conv1.shape)\n fc1 = tf.layers.flatten(conv1)\n z_mean = tf.layers.dense(fc1, self.z_dim)\n z_log_sigma_sq = tf.layers.dense(fc1, self.z_dim)\n return z_mean,z_log_sigma_sq\n\n def ladder3(self,h3,reuse=False):\n with tf.variable_scope(\"qladder3\", reuse=reuse) as scope:\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(h3, 256, 4, strides=(2, 2), padding='same', activation=self.activation))\n conv1 = tf.layers.batch_normalization(tf.layers.conv2d(conv1, 512, 4, strides=(2, 2), padding='same', activation=self.activation))\n print('ladder3', conv1.shape)\n fc1 = tf.layers.flatten(conv1)\n z_mean = tf.layers.dense(fc1, self.z_dim)\n z_log_sigma_sq = tf.layers.dense(fc1, self.z_dim)\n return z_mean,z_log_sigma_sq\n\n def ladder4(self,h4,reuse=False):\n with tf.variable_scope(\"qladder4\", reuse=reuse) as scope:\n print('ladder4', h4.shape)\n fc1 = tf.layers.batch_normalization(tf.layers.dense(tf.layers.flatten(h4), 1024, activation=self.activation))\n fc2 = tf.layers.batch_normalization(tf.layers.dense(fc1, 1024, activation=self.activation))\n z_mean = tf.layers.dense(fc2, self.z_dim)\n z_log_sigma_sq = tf.layers.dense(fc2, self.z_dim)\n return z_mean,z_log_sigma_sq\n\n def generative4(self,z4_sample,reuse=False):\n with tf.variable_scope(\"gen4\", reuse=reuse) as scope:\n fc1 = tf.layers.batch_normalization(tf.layers.dense(z4_sample, 1024, activation=self.activation))\n fc1 = tf.layers.batch_normalization(tf.layers.dense(fc1, 1024, activation=self.activation))\n fc2 = tf.layers.batch_normalization(tf.layers.dense(fc1, 4 * 4 * 512, activation=self.activation))\n fc2_reshaped = tf.reshape(fc2, [-1, 4, 4, 512])\n return fc2_reshaped\n\n def generative3(self,z3_sample,g4,reuse=False):\n with tf.variable_scope(\"gen3\", reuse=reuse) as scope:\n fc2 = tf.layers.batch_normalization(tf.layers.dense(z3_sample, 4 * 4 * 512, activation=self.activation))\n fc2_reshaped = tf.reshape(fc2, [-1, 4, 4, 512])\n if self.train_seq==2 and self.fadein:\n fc2_reshaped=fc2_reshaped*self.fade_in\n if self.train_seq<2:\n fc2_reshaped = fc2_reshaped * 0.\n fc2_reshaped = tf.concat(values=[fc2_reshaped, g4], axis=len(fc2_reshaped.get_shape()) - 1)\n deconv1 = tf.layers.batch_normalization(tf.layers.conv2d_transpose(fc2_reshaped, 512, 4, strides=(1, 1), padding='same',\n activation=self.activation))\n deconv1 = tf.layers.batch_normalization(\n tf.layers.conv2d_transpose(deconv1, 256, 4, strides=(2, 2), padding='same',activation=self.activation))\n return deconv1\n\n def generative2(self,z2_sample,g3,block_z=False,reuse=False):\n with tf.variable_scope(\"gen2\", reuse=reuse) as scope:\n fc2 = tf.layers.batch_normalization(tf.layers.dense(z2_sample, 8 * 8 * 256, activation=self.activation))\n fc2_reshaped = tf.reshape(fc2, [-1, 8, 8, 256])\n if self.train_seq==3 and self.fadein:\n fc2_reshaped=fc2_reshaped*self.fade_in\n if self.train_seq<3 or block_z:\n fc2_reshaped = fc2_reshaped * 0.\n fc2_reshaped = tf.concat(values=[fc2_reshaped, g3], axis=len(fc2_reshaped.get_shape()) - 1)\n deconv1 = tf.layers.batch_normalization(tf.layers.conv2d_transpose(fc2_reshaped, 128, 4, strides=(2, 2), padding='same',\n activation=self.activation))\n deconv1 = tf.layers.batch_normalization(\n tf.layers.conv2d_transpose(deconv1, 64, 4, strides=(1, 1), padding='same',activation=self.activation))\n return deconv1\n\n def generative1(self,z1_sample,g2,block_z=False,reuse=False):\n with tf.variable_scope(\"gen1\", reuse=reuse) as scope:\n fc2 = tf.layers.dense(z1_sample, 16 * 16 * 64, activation=self.activation)\n fc2_reshaped = tf.reshape(fc2, [-1, 16, 16, 64])\n if self.train_seq==4 and self.fadein:\n fc2_reshaped = fc2_reshaped * self.fade_in\n if self.train_seq<4 or block_z:\n fc2_reshaped = fc2_reshaped*0.\n fc2_reshaped = tf.concat(values=[fc2_reshaped, g2], axis=len(fc2_reshaped.get_shape()) - 1)\n deconv1 = tf.layers.conv2d_transpose(fc2_reshaped, 64, 4, strides=(2, 2), padding='same',\n activation=self.activation) # 32*32,64\n return deconv1\n\n def generative0(self,g1,reuse=False):\n with tf.variable_scope(\"gen0\", reuse=reuse) as scope:\n deconv5 = tf.layers.conv2d_transpose(g1, self.chn_num, 4, strides=(2, 2), padding='same', activation=None)\n return deconv5\n\n def _create_network(self):\n self.x = tf.placeholder(tf.float32, shape=[None, self.image_size,self.image_size,self.chn_num]) #input image\n self.fade_in = tf.placeholder(tf.float32,shape=[]) #fade-in coefficient\n with tf.variable_scope(\"vae_ladder\"):\n #inference\n self.h1 = self.inference_h1(self.x)\n print('h1',self.h1.shape)\n self.h2 = self.inference_h2(self.h1)\n print('h2', self.h2.shape)\n self.h3 = self.inference_h3(self.h2)\n print('h3', self.h3.shape)\n self.h4 = self.inference_h4(self.h3)\n print('h4', self.h4.shape)\n\n self.z_mean4, self.z_log_sigma_sq4 = self.ladder4(self.h4)\n self.z_sample4 = self._sample_z(self.z_mean4, self.z_log_sigma_sq4)\n self.z_log_sigma_sq4 = tf.clip_by_value(self.z_log_sigma_sq4, -1e2, 3)\n\n self.z_mean3, self.z_log_sigma_sq3 = self.ladder3(self.h3)\n self.z_sample3 = self._sample_z(self.z_mean3, self.z_log_sigma_sq3)\n self.z_log_sigma_sq3 = tf.clip_by_value(self.z_log_sigma_sq3, -1e2, 3)\n\n self.z_mean2, self.z_log_sigma_sq2 = self.ladder2(self.h2)\n self.z_sample2 = self._sample_z(self.z_mean2, self.z_log_sigma_sq2)\n self.z_log_sigma_sq2= tf.clip_by_value(self.z_log_sigma_sq2, -1e2, 3)\n\n\n self.z_mean1, self.z_log_sigma_sq1 = self.ladder1(self.h1)\n self.z_sample1 = self._sample_z(self.z_mean1, self.z_log_sigma_sq1)\n self.z_log_sigma_sq1 = tf.clip_by_value(self.z_log_sigma_sq1, -1e2, 3)\n\n #gneration\n self.g4 = self.generative4(self.z_sample4)\n print('g4', self.g4.shape)\n\n self.g3 = self.generative3(self.z_sample3,self.g4)\n print('g3',self.g3.shape)\n\n self.g2 = self.generative2(self.z_sample2,self.g3)\n print('g2', self.g2.shape)\n\n self.g1 = self.generative1(self.z_sample1, self.g2)\n print('g1', self.g1.shape)\n\n self.x_out_logit = self.generative0(self.g1)\n print('x_out_logit', self.x_out_logit.shape)\n\n self.x_out = tf.nn.sigmoid(self.x_out_logit)\n\n #store latent variables\n self.ladders = {}\n self.ladders['ladder1'] = [self.z_sample1, self.z_mean1, self.z_log_sigma_sq1]\n self.ladders['ladder2'] = [self.z_sample2, self.z_mean2, self.z_log_sigma_sq2]\n self.ladders['ladder3'] = [self.z_sample3, self.z_mean3, self.z_log_sigma_sq3]\n self.ladders['ladder4'] = [self.z_sample4, self.z_mean4, self.z_log_sigma_sq4]\n\n def _create_loss_optimizer(self):\n # Reconstruction loss\n self.x_recon = self.x\n reconstr_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.x_recon,\n logits=self.x_out_logit)\n reconstr_loss = tf.reduce_sum(reconstr_loss, [1,2,3],)\n self.reconstr_loss = tf.reduce_mean(reconstr_loss)\n\n # Latent loss\n self.latent_loss1 = self._KL(self.z_mean1, self.z_log_sigma_sq1)\n self.latent_loss2 = self._KL(self.z_mean2, self.z_log_sigma_sq2)\n self.latent_loss3 = self._KL(self.z_mean3, self.z_log_sigma_sq3)\n self.latent_loss4 = self._KL(self.z_mean4, self.z_log_sigma_sq4)\n\n self.latent_loss=self.latent_loss1+self.latent_loss2+self.latent_loss3+self.latent_loss4\n\n # summary\n reconstr_loss_summary_op = tf.summary.scalar('reconstr_loss', self.reconstr_loss)\n latent_loss_summary_op = tf.summary.scalar('latent_loss', self.latent_loss)\n self.summary_op = tf.summary.merge([reconstr_loss_summary_op, latent_loss_summary_op])\n\n self.loss = self.reconstr_loss\n coff=self.flags.coff\n\n if self.train_seq==1:\n self.loss += self.beta * self.latent_loss4\n if self.pre_KL:\n self.loss+=(self.latent_loss3+self.latent_loss2+self.latent_loss1)*coff\n elif self.train_seq==2:\n self.loss+= self.beta * (self.latent_loss4+self.latent_loss3)\n if self.pre_KL:\n self.loss += (self.latent_loss2+ self.latent_loss1)*coff\n elif self.train_seq == 3:\n self.loss+= self.beta * (self.latent_loss4+self.latent_loss3+self.latent_loss2)\n if self.pre_KL:\n self.loss+= self.latent_loss1*coff\n elif self.train_seq == 4:\n self.loss+= self.beta * (self.latent_loss4+self.latent_loss3+self.latent_loss2+self.latent_loss1)\n\n self.KL_list = [self.latent_loss1, self.latent_loss2, self.latent_loss3,self.latent_loss4,self.latent_loss]\n\n t_vars = tf.trainable_variables()\n\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n self.train_op = self.optimizer.minimize(self.loss)\n\n\n def partial_fit(self, sess, xs, iter):\n a = self.fade_in_alpha(iter)\n\n _, reconstr_loss, latent_loss, summary_str, KL_list = sess.run((self.train_op,\n self.reconstr_loss,\n self.latent_loss,\n self.summary_op,\n self.KL_list,\n ),\n feed_dict={\n self.x: xs,\n self.fade_in:a,\n })\n\n return reconstr_loss, latent_loss, summary_str, KL_list,\n\n def reconstruct(self, sess, xs):\n return sess.run(self.x_out,\n feed_dict={self.x: xs,self.fade_in:1.})\n\n def transform(self, sess, xs, zlayer=0):\n m1,log_var1,m2,log_var2,m3,log_var3,m4,log_var4=sess.run([self.z_mean1, self.z_log_sigma_sq1,\n self.z_mean2, self.z_log_sigma_sq2,\n self.z_mean3, self.z_log_sigma_sq3,\n self.z_mean4, self.z_log_sigma_sq4],\n feed_dict={self.x: xs,self.fade_in:1.})\n if zlayer==0:\n return np.concatenate([m1,m2,m3,m4],axis=1),np.concatenate([log_var1,log_var2,log_var3,log_var4],axis=1)\n elif zlayer==4:\n return m4, log_var4\n elif zlayer==3:\n return m3, log_var3\n elif zlayer==2:\n return m2, log_var2\n else:\n return m1, log_var1\n\n def inference(self, sess, xs):\n tensor_handle = [self.ladders[key][1:] for key in self.ladders]\n tensor_value = sess.run(tensor_handle,\n feed_dict={self.x: xs,self.fade_in:1.})\n return {name: value for name, value in zip(self.ladders, tensor_value)}\n\n def inference_z(self, sess, xs):\n tensor_handle = [self.ladders[key][0] for key in self.ladders]\n tensor_value = sess.run(tensor_handle,\n feed_dict={self.x: xs,self.fade_in:1.})\n return {name: value for name, value in zip(self.ladders, tensor_value)}\n\n def generate(self, sess, codes):\n return sess.run(self.x_out,\n feed_dict={self.z_sample1: codes['ladder1'],\n self.z_sample2: codes['ladder2'],\n self.z_sample3: codes['ladder3'],\n self.z_sample4: codes['ladder4'],\n self.fade_in: 1.\n })\n\n def get_recons_loss(self, sess, xs):\n reconstr_loss, latent_loss = sess.run((self.reconstr_loss, self.latent_loss,),\n feed_dict={\n self.x: xs,\n self.fade_in: 1.\n })\n return reconstr_loss, latent_loss","sub_path":"model_ladder_pro_celbA.py","file_name":"model_ladder_pro_celbA.py","file_ext":"py","file_size_in_byte":17549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"323802763","text":"#!/usr/bin/env python3\n\nfrom . import render\n\nclass ParseException(Exception):\n\tpass\n\nclass Parser:\n\t\"Main parsing class.\"\n\tdef __init__(self, tokens):\n\t\tself.tokens = tokens\n\t\tself.pos = 0\n\t\tself.length = len(tokens)\n\n\tdef end(self):\n\t\treturn self.pos == self.length\n\n\tdef peek(self):\n\t\tif not self.end():\n\t\t\treturn self.tokens[self.pos]\n\n\tdef next(self, num=1):\n\t\tif not self.end():\n\t\t\tself.pos += num\n\n\tdef _parse_token(self):\n\t\tif self.end():\n\t\t\treturn None\n\n\t\t# expr\n\t\tif self.peek() == \"{{\":\n\t\t\tself.next()\n\n\t\t\texpr = render.ExprNode(self.peek())\n\t\t\tself.next()\n\n\t\t\tif not expr or self.peek() != \"}}\":\n\t\t\t\traise ParseError(\"error parsing expression\")\n\n\t\t\tself.next()\n\t\t\treturn expr\n\t\tif self.peek() == \"{%\":\n\t\t\tself.next()\n\n\t\t\tif self.end():\n\t\t\t\traise ParseException(\"missing closing {% tag %}\")\n\n\t\t\ttag = self.peek().strip().split()\n\n\t\t\t# only {% %} -- no information\n\t\t\tif not tag:\n\t\t\t\traise ParseException(\"no type information for {% tag %}\")\n\n\t\t\tself.next()\n\t\t\tif self.peek() != \"%}\":\n\t\t\t\traise ParseException(\"too many tokens in {% tag %}\")\n\n\t\t\tself.next()\n\n\t\t\ttp = tag[0]\n\t\t\targs = \" \".join(tag[1:])\n\n\t\t\tif tp == \"include\":\n\t\t\t\targs = args.split()\n\n\t\t\t\tif len(args) == 1:\n\t\t\t\t\tret = render.IncludeNode(args[0])\n\t\t\t\t\treturn ret\n\n\t\t\t\traise ParseException(\"wrong number of arguments to {% include %}\")\n\n\t\t\telif tp == \"for\":\n\t\t\t\t# {% for in %}\n\t\t\t\tslen = len('in')\n\t\t\t\tsep = args.find(\"in\")\n\n\t\t\t\tif sep < 0:\n\t\t\t\t\traise ParseException(\"missing 'in' in {% for in %}\")\n\n\t\t\t\tvar, expr = args[:sep].strip(), args[sep + slen:].strip()\n\n\t\t\t\tblock = self._parse_group([[\"end\", \"for\"]])\n\t\t\t\tnode = render.ForNode(var, expr, block)\n\n\t\t\t\tif not self._check_end([[\"end\", \"for\"]]):\n\t\t\t\t\traise ParseException(\"missing {% end for %}\")\n\n\t\t\t\tself.next(3)\n\t\t\t\treturn node\n\n\t\t\telif tp == \"if\":\n\t\t\t\t# {% if %}\n\t\t\t\tif not args:\n\t\t\t\t\traise ParseException(\"no predicate for 'if' condition\")\n\n\t\t\t\tpredicate = args\n\n\t\t\t\tifblock = self._parse_group([[\"elif\", ...], [\"else\"], [\"end\", \"if\"]])\n\t\t\t\tblocks = [(predicate, ifblock)]\n\n\t\t\t\twhile self._check_end([[\"elif\", ...], [\"else\"]]):\n\t\t\t\t\ttokens = self.tokens[self.pos:]\n\n\t\t\t\t\tstart, tag, close = tokens[:3]\n\t\t\t\t\ttag, *predicate = tag.split()\n\t\t\t\t\tpredicate = \" \".join(predicate)\n\n\t\t\t\t\tself.next(3)\n\t\t\t\t\tif self._check_end([[\"else\"]]):\n\t\t\t\t\t\tblock = self._parse_group([[\"end\", \"if\"]])\n\t\t\t\t\t\tblocks += [(None, block)]\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tblock = self._parse_group([[\"elif\", ...], [\"else\"], [\"end\", \"if\"]])\n\t\t\t\t\t\tblocks += [(predicate, block)]\n\n\n\t\t\t\tnode = render.IfNode(blocks)\n\n\t\t\t\tif not self._check_end([[\"end\", \"if\"]]):\n\t\t\t\t\traise ParseException(\"missing {% end if %}\")\n\n\t\t\t\tself.next(3)\n\t\t\t\treturn node\n\n\t\t\telif tp == \"let\":\n\t\t\t\t# {% let = %}\n\n\t\t\t\tif \"=\" not in args:\n\t\t\t\t\traise ParseException(\"no '=' in {% let = %}\")\n\n\t\t\t\tvar, *sep, expr = args.split(\"=\")\n\n\t\t\t\tif sep:\n\t\t\t\t\traise ParseException(\"too many '=' in {% let = %}\")\n\n\t\t\t\treturn render.LetNode(var.strip(), expr.strip())\n\t\t# text\n\t\telse:\n\t\t\ttext = self.peek()\n\t\t\tself.next()\n\t\t\treturn render.TextNode(text)\n\n\tdef _check_end(self, ends):\n\t\tif not ends:\n\t\t\treturn False\n\n\t\tpos = self.pos\n\t\ttokens = self.tokens[pos:]\n\n\t\t# There are no {% ... %} tags left\n\t\tif len(tokens) < 3:\n\t\t\treturn False\n\n\t\tstart, tag, end = tokens[:3]\n\n\t\tif start != \"{%\" or end != \"%}\":\n\t\t\treturn False\n\n\t\t# Get tag information.\n\t\ttag = tag.split()\n\n\t\t# '...' represents only the part UP TO the '...' is to be matched with the tag information,\n\t\t# as opposed to matching everything in the {% ... %} tags, and the rest will be ignored.\n\n\t\tvalids = []\n\t\tfor end in ends:\n\t\t\tfull = True\n\t\t\tif ... in end:\n\t\t\t\twhere = end.index(...)\n\t\t\t\tend = end[:where]\n\t\t\t\tfull = False\n\n\t\t\tvalids.append((end, full))\n\n\t\tfor valid, full in valids:\n\t\t\tif valid == tag:\n\t\t\t\treturn True\n\n\t\t\tif not full:\n\t\t\t\tsection = tag[:len(valid)]\n\t\t\t\tif valid == section:\n\t\t\t\t\treturn True\n\n\t\treturn False\n\n\tdef _parse_group(self, ends=None):\n\t\tgroups = []\n\n\t\twhile not self.end() and not self._check_end(ends):\n\t\t\tgroups.append(self._parse_token())\n\n\t\tgroups = [group for group in groups if group]\n\t\treturn render.GroupNode(groups)\n\n\tdef parse(self):\n\t\t# EBNF for epyc:\n\t\t# group = (token)+\n\t\t# token = expr\n\t\t# token = text\n\t\t# expr\t= {{ }}\n\t\t# text\t= \n\n\t\treturn self._parse_group()\n","sub_path":"epyc/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255423544","text":"from question_model import Question\nfrom data import question_data\nfrom quiz_brain import QuizBrain\n\nquestion_bank = []\nfor entry in range(len(question_data)):\n question = Question(question_data[entry][\"text\"], question_data[entry][\"answer\"])\n question_bank.append(question)\n\nQuiz = QuizBrain(question_bank)\n\nwhile Quiz.still_has_questions():\n Quiz.next_question()\n\nprint(\"\\n\\n\\You have completed the quiz\")\nprint(f\"Your final score is: {Quiz.score}/{Quiz.question_number} \")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"504066376","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nTC : O(L) where L is length of the word \r\nSC : O(L) where L is length of the word\r\n\"\"\"\r\n\r\n# Time: O(N)\r\n# Space: O(N)\r\n\r\nclass TrieNode:\r\n def __init__(self):\r\n self.children = [None] * 26\r\n self.isEnd = False\r\nclass Trie:\r\n def __init__(self):\r\n \"\"\"\r\n Initialize your data structure here.\r\n \"\"\"\r\n self.root = TrieNode()\r\n \r\n def insert(self, word: str) -> None:\r\n \"\"\"\r\n Inserts a word into the trie.\r\n \"\"\"\r\n\t\t# insert each char in correct posititon and then move my curr\r\n current = self.root\r\n for i in range(len(word)):\r\n char = word[i]\r\n if current.children[ord(char) - ord('a')] == None:\r\n current.children[ord(char) - ord('a')] = TrieNode()\r\n current = current.children[ord(char) - ord('a')] # move my current to that charcter\r\n \r\n current.isEnd = True # end insert True meaning the word is in tree\r\n \r\n\r\n\t# Search for a char if not there then return False or if there move my curr\r\n def search(self, word: str) -> bool:\r\n \"\"\"\r\n Returns if the word is in the trie.\r\n \"\"\"\r\n current = self.root\r\n for i in range(len(word)):\r\n char = word[i]\r\n if current.children[ord(char) - ord('a')] == None:\r\n return False # that character is not present in that children\r\n current = current.children[ord(char) - ord('a')]\r\n \r\n return current.isEnd #wheather the char is true or false that what we are returning\r\n \r\n\t# Search for a prefix if not there then return False or if there move my curr till my prefix ends and return True\r\n def startsWith(self, prefix: str) -> bool:\r\n \"\"\"\r\n Returns if there is any word in the trie that starts with the given prefix.\r\n \"\"\"\r\n current = self.root\r\n for i in range(len(prefix)):\r\n char = prefix[i]\r\n if current.children[ord(char) - ord('a')] == None:\r\n return False # that character is not present in that children\r\n current = current.children[ord(char) - ord('a')]\r\n \r\n return True\r\n \r\n\r\n\r\n# Your Trie object will be instantiated and called as such:\r\n# obj = Trie()\r\n# obj.insert(word)\r\n# param_2 = obj.search(word)\r\n# param_3 = obj.startsWith(prefix)","sub_path":"Implement_Tries.py","file_name":"Implement_Tries.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"380958102","text":"def pig_latin(word):\n vowels = ['a', 'e', 'i', 'o', 'u']\n punctuations = [',', '.', ';', ':']\n p = ''\n all_chars = [ ch for ch in word if ch in vowels]\n\n if word[-1] in punctuations:\n p = word[-1]\n word = word.rstrip(word[-1])\n \n if len(set(all_chars)) >= 2:\n return word + 'way' + p\n\n return word[1:] + word[0] + 'ay' + p\n\n\nprint(pig_latin('wine,'))","sub_path":"ex05-beyond.py","file_name":"ex05-beyond.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"605988831","text":"# -*- coding: utf-8 -*-\nimport sys, os\nimport tornado.ioloop\nfrom tornado.httpclient import AsyncHTTPClient\nfrom pyquery import PyQuery as pq\nfrom pymongo import MongoClient\nimport pymongo\nimport datetime, time\n\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../util'))\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../support'))\nimport loghelper\nimport config\nimport util\n\n#logger\nloghelper.init_logger(\"gen_package\", stream=True)\nlogger = loghelper.get_logger(\"gen_package\")\n\n#mongo\n(mongodb_host, mongodb_port) = config.get_mongodb_config()\nmongo = MongoClient(mongodb_host, mongodb_port)\n\nbaidu_collection = mongo.crawler_v2.market_baidu\nm360_collection = mongo.crawler_v2.market_360\nother_collection = mongo.crawler_v2.market_other\n\ndef gen(app):\n if app.get(\"html_parsed\") is None:\n return\n package = app.get(\"html_parsed\")[\"package\"].strip()\n if other_collection.find_one({\"package\":package}) is None:\n other_collection.insert_one({\"package\":package})\n logger.info(package)\n\nif __name__ == \"__main__\":\n logger.info(\"Start...\")\n\n apps = baidu_collection.find({})\n for app in apps:\n gen(app)\n #break\n\n apps = m360_collection.find({})\n for app in apps:\n gen(app)\n #break\n","sub_path":"data/spider/crawler/market/gen_package.py","file_name":"gen_package.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"568133736","text":"import sidekit\nimport h5py\n\nfile = h5py.File('mfcc_feat.h5', 'r+')\ndataset = file['/dset']\ndata_read = dataset[...]\n\n\nubm = sidekit.Mixture()\n\nubm.EM_split(features_server=data_read,\n feature_list=None,\n distrib_nb=1024,\n iterations=(1, 2, 2, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8),\n num_thread=10,\n save_partial=False,\n ceil_cov=10,\n floor_cov=1e-2\n )","sub_path":"h5_features/test33.py","file_name":"test33.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"506506125","text":"# Libraries\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import urlopen\r\nimport concurrent.futures\r\nimport numpy as np\r\nfrom typing import Tuple, List, Callable, DefaultDict, Union\r\nimport pandas as pd\r\n\r\nclass Scraper:\r\n \"\"\"\r\n General class from which classes to scrape specific offer pages inherit\r\n\r\n Methods\r\n -------\r\n enterPage_parser(link: str) -> BeautifulSoup:\r\n Read website, encode and create HTML parser\r\n\r\n extract_links_idClass(isId: bool, to_find: str, soup: BeautifulSoup, replace: bool,\r\n replace_to: List[str] = []) -> Tuple[List[str], List[str]]:\r\n Extract links with id or class tag\r\n\r\n prepare_range(pages_names: List[str]) -> range:\r\n Prepare pages range\r\n\r\n create_split(links: List[str], split_size: int) -> List[range]:\r\n Create splits to relieve RAM memory\r\n\r\n flatten(result_to_flatt: List[List[str]]) -> Union[List[List[str]],List[str]]:\r\n Flatten a list\r\n\r\n scraping_all_links(func: Callable, all_links: List[str]) -> List[DefaultDict[str, str]]:\r\n General function to scrape links that activates ThreadPoolExecutor\r\n\r\n missed_offers_pages(links: List[str], offers: bool, func: Callable) -> Tuple[List[DefaultDict[str, str]],List[str]]:\r\n Scrape missed offers and pages links\r\n\r\n missed_links_all(missed_offers: List[str], func: Callable, details: bool, restriction: int = 5, offers: bool = None,\r\n func_pages_or_offers: Callable = None) -> List:\r\n Scrape omitted data until you have scraped all\r\n\r\n join_missed_with_scraped(missed: List[str], scraped: List[str]) -> List:\r\n Join missed information with already scraped\r\n\r\n scraping_offers_details_exceptions(link: str) -> Union[DefaultDict[str, str], str]:\r\n Try to connect with offer link, if it is not possible save link to global list\r\n\r\n soup_find_information(soup: BeautifulSoup, find_attr: List[str]) -> List[str]:\r\n Find in soup with 3 args\r\n\r\n extract_information(find_in: BeautifulSoup, find_with_obj: bool = False, obj: str = None) -> Union[List[str], str]:\r\n Extract strings from infos founded in soup\r\n \"\"\"\r\n\r\n # Read website, encode and create HTML parser\r\n def enterPage_parser(self, link: str) -> BeautifulSoup:\r\n \"\"\"Read website, encode and create HTML parser\r\n\r\n try to encode with \"utf-8\" if it creates error then use \"laitn-1\"\r\n\r\n Parameters\r\n ----------\r\n link : str\r\n link to web page which you want to parse\r\n\r\n Returns\r\n ------\r\n BeautifulSoup\r\n a beautifulsoup object used to extract useful information\r\n \"\"\"\r\n\r\n # Get website\r\n URL = link\r\n page = urlopen(URL)\r\n\r\n # Read website, encode and create HTML parser\r\n html_bytes = page.read()\r\n try:\r\n html = html_bytes.decode(\"utf-8\")\r\n except:\r\n html = html_bytes.decode(\"latin-1\")\r\n\r\n return BeautifulSoup(html, \"html.parser\")\r\n\r\n # Extract links with id or class tag\r\n def extract_links_idClass(self, isId: bool, to_find: str, soup: BeautifulSoup, replace: bool,\r\n replace_to: List[str] = []) -> Tuple[List[str], List[str]]:\r\n \"\"\"Extract links with id or class tag\r\n\r\n extracting links with id or class tag\r\n\r\n Parameters\r\n ----------\r\n isId: boolean\r\n determines whether to look for an id or a class\r\n to_find: str\r\n name of class or id\r\n soup: BeautifulSoup\r\n object used to extract information\r\n replace: boolean\r\n determines whether part of the link is to be replaced\r\n replace_to: list, optional\r\n two elements list containing what [0] has to be replaces with what [1]\r\n\r\n Returns\r\n ------\r\n list, list\r\n 1. list containing names of extracted links e.g. districts, cities.\r\n 2. list containing extrated links e.g. districts, pages\r\n \"\"\"\r\n\r\n # Find by id or class\r\n if (isId):\r\n extracted = soup.find(id=to_find)\r\n else:\r\n extracted = soup.find(class_=to_find)\r\n\r\n # If there is only one page assign empty arrays to variables\r\n try:\r\n # Find all a tag's\r\n extracted_names = [name.string for name in extracted.findAll('a') if (name.string != None)]\r\n # Extract links and replace part of string to create link with newest observations\r\n extracted_links = [link.get(\"href\") for link in extracted.findAll('a') if (link.get(\"href\") != None)]\r\n if (replace):\r\n extracted_links = [link.replace(replace_to[0], replace_to[1]) for link in extracted_links]\r\n except:\r\n extracted_names = []\r\n extracted_links = []\r\n\r\n return extracted_names, extracted_links\r\n\r\n # Prepare pages range\r\n def prepare_range(self, pages_names: List[str]) -> range:\r\n \"\"\"Preparing the range of pages to create links\r\n\r\n Parameters\r\n ----------\r\n pages_names: list\r\n links to individual city districts\r\n Returns\r\n ------\r\n range\r\n range of pages at morizon for specific page_name\r\n \"\"\"\r\n\r\n # if length is 0 then there is only 1 page\r\n if len(pages_names) != 0:\r\n last_page = int(pages_names[len(pages_names) - 1])\r\n else:\r\n last_page = 1\r\n\r\n return range(1, last_page + 1)\r\n\r\n # Create splits to relieve RAM memory\r\n def create_split(self, links: object, split_size: object) -> object:\r\n \"\"\"Create splits to relieve RAM memory\r\n\r\n Parameters\r\n ----------\r\n links: list\r\n list with list based on which length of splits is created\r\n split_size: int\r\n value divided by total number of links it is used to create splits to relieve RAM memory\r\n Returns\r\n ------\r\n list\r\n list with ranges\r\n \"\"\"\r\n\r\n if (len(links) < split_size):\r\n splitted = [[0, len(links)]]\r\n else:\r\n splitted = np.array_split(list(range(0, len(links))), len(links) / split_size)\r\n splitted = [[elements[0] - 1, elements[-1]] if elements[0] != 0 else [elements[0], elements[-1]] for\r\n elements in splitted]\r\n splitted[len(splitted) - 1][1] += 1\r\n\r\n\r\n return splitted\r\n\r\n # Flatten a list\r\n def flatten(self, result_to_flatt: List[List[str]]) -> Union[List[List[str]],List[str]]:\r\n \"\"\"Flatten a list\r\n\r\n Parameters\r\n ----------\r\n result_to_flatt: list\r\n which has to be flatten\r\n\r\n Returns\r\n ------\r\n list\r\n flatten list\r\n \"\"\"\r\n\r\n rt = []\r\n for i in result_to_flatt:\r\n if isinstance(i, list):\r\n rt.extend(self.flatten(i))\r\n else:\r\n rt.append(i)\r\n return rt\r\n\r\n # General function to scrape links that activates ThreadPoolExecutor\r\n def scraping_all_links(self, func: Callable, all_links: List[str]) -> List[DefaultDict[str, str]]:\r\n \"\"\"General function to scrape links that activates ThreadPoolExecutor\r\n\r\n Parameters\r\n ----------\r\n func: function\r\n function which will be activated in ThreadPoolExecutor\r\n all_links: list\r\n list with links to scrape\r\n Returns\r\n ------\r\n list\r\n scraped elements: details, and links e.g. pages\r\n \"\"\"\r\n\r\n threads = min(self.max_threads, len(all_links))\r\n\r\n with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:\r\n results = list(executor.map(func, all_links))\r\n\r\n return results\r\n\r\n # Scrape missed offers and pages links\r\n def missed_offers_pages(self, links: List[str], offers: bool,\r\n func: Callable) -> Tuple[List[DefaultDict[str, str]], List[str]]:\r\n \"\"\"Scrape missed offers and pages links\r\n\r\n Parameters\r\n ----------\r\n links: list\r\n missing links\r\n offers: boolean\r\n determines whether the missing links relate to properties\r\n func: function\r\n function which will be activated in ThreadPoolExecutor\r\n\r\n Returns\r\n ------\r\n defaultdict, list\r\n 1. scraped missed links\r\n 2. links that are still missing\r\n \"\"\"\r\n\r\n links = self.scraping_all_links(func, links)\r\n\r\n # Assign missed links to variable\r\n if offers:\r\n missed_links = [offers for offers in links if \"page\" in offers]\r\n else:\r\n missed_links = [offers for offers in links if \"page\" not in offers]\r\n\r\n return links, missed_links\r\n\r\n # Scrape omitted data until you have scraped all\r\n def missed_links_all(self, missed_offers: List[str], func: Callable, details: bool, restriction: int = 5,\r\n offers: bool = None, func_pages_or_offers: Callable = None) -> List:\r\n \"\"\"General function to scrape missing links that activates ThreadPoolExecutor until all are scraped\r\n\r\n Parameters\r\n ----------\r\n missed_offers: list\r\n missing links\r\n func: function\r\n function which will be activated in ThreadPoolExecutor\r\n details: boolean\r\n determines whether the missing links relate to details\r\n restriction: int\r\n restriction for while loop\r\n offers: boolean, default(None)\r\n determines whether the missing links relate to properties\r\n func_pages_or_offers: function, default(None)\r\n function to scrape pages or offers\r\n\r\n Returns\r\n ------\r\n list\r\n scraped elements: details, and links e.g. pages\r\n \"\"\"\r\n\r\n missed_offers_list = []\r\n n_times = 0\r\n\r\n # If there are some missed links left scrape them\r\n while (len(missed_offers) != 0) & (n_times <= restriction):\r\n if details:\r\n missed_scraped, missed_offers = func(missed_offers)\r\n else:\r\n missed_scraped, missed_offers = func(missed_offers, offers, func_pages_or_offers)\r\n missed_offers_list.append(missed_scraped)\r\n n_times += 1\r\n\r\n\r\n return missed_offers_list\r\n\r\n # Join missed information with already scraped\r\n def join_missed_with_scraped(self, missed: List[str], scraped: List[str]) -> List:\r\n \"\"\"Join missed information with already scraped\r\n\r\n Parameters\r\n ----------\r\n missed: list\r\n scraped missed links\r\n scraped: list\r\n links scraped without problems\r\n\r\n Returns\r\n ------\r\n list\r\n scraped elements: details, and links e.g. pages\r\n \"\"\"\r\n\r\n if len(missed) > 1:\r\n missed = [properties for properties in self.flatten(missed) if properties != None]\r\n scraped = np.concatenate([self.flatten(scraped), missed], axis=0)\r\n elif len(missed) == 1:\r\n scraped = np.concatenate([self.flatten(scraped), self.flatten(missed[0])], axis=0)\r\n elif len(missed) == 0:\r\n scraped = self.flatten(scraped)\r\n\r\n return scraped\r\n\r\n # Try to connect with offer link, if it is not possible save link to global list\r\n def scraping_offers_details_exceptions(self, link: str) -> Union[DefaultDict[str, str], str]:\r\n \"\"\"Try to connect with offer link, if it is not possible save link to global list\r\n\r\n Parameters\r\n ----------\r\n link: str\r\n offer link\r\n\r\n Returns\r\n ------\r\n defaultdict or str\r\n If scraping succeeds, it is the details of the flat and otherwise a link to the offer\r\n \"\"\"\r\n\r\n try:\r\n offer_infos = self.scraping_offers_details(link)\r\n except:\r\n offer_infos = link\r\n\r\n return offer_infos\r\n\r\n # Find in Beautifulsoup with 3 args\r\n def soup_find_information(self, soup: BeautifulSoup, find_attr: List[str]) -> List[str]:\r\n \"\"\"Find in soup with 3 args\r\n\r\n Parameters\r\n ----------\r\n soup: str\r\n offer link\r\n find_attr: list\r\n attributes of tag\r\n\r\n Returns\r\n ------\r\n list\r\n elements with specific attributes\r\n \"\"\"\r\n\r\n return soup.find(find_attr[0], attrs={find_attr[1]: find_attr[2]})\r\n\r\n # Extract strings from information founded in soup\r\n def extract_information(self, find_in: BeautifulSoup, find_with_obj: bool = False,\r\n obj: str = None) -> Union[List[str], str]:\r\n \"\"\"Find in soup with 3 args\r\n\r\n Parameters\r\n ----------\r\n find_in: BeautifulSoup\r\n object where used to find information\r\n find_with_obj: boolean, (default False)\r\n determines whether user wants to find elements by \"obj\"\r\n obj: str, (default None)\r\n find all elements with that object\r\n\r\n Returns\r\n ------\r\n list or str\r\n 1. elements with specific attributes\r\n 2. \"None\" informs that information is not available\r\n \"\"\"\r\n\r\n try:\r\n if find_with_obj:\r\n return [info_part.string.strip() for info_part in find_in.find_all(obj) if (info_part.string != None)]\r\n else:\r\n return [info_part.string.strip() for info_part in find_in if (info_part.string != None)]\r\n except:\r\n return None\r\n","sub_path":"Scraping/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":13672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"108536242","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import reverse\nfrom django.views.generic import RedirectView\n\n\nclass IndexView(RedirectView):\n def get_redirect_url(self, *args, **kwargs):\n user = self.request.user\n\n if user.is_manager:\n return reverse(\"hcap:pending_authorizations\")\n elif user.is_notifier:\n return reverse(\"hcap:notify\")\n elif user.has_pending_authorization:\n return reverse(\"hcap:my_authorizations\")\n else:\n return reverse(\"hcap:request_authorization\")\n\n\nindex_view = login_required(IndexView.as_view())\n","sub_path":"hcap/views/index_view.py","file_name":"index_view.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"598389361","text":"import os\nimport codecs\nimport itertools\n\nclean_lines = []\nwith open('/users/nannanliu/Downloads/Parallel Corpora/2013-ST.xml','r',encoding='utf-8') as f:\n lines = f.readlines()\n clean_lines = [l.strip() for l in lines if l.strip()]\n\nwith open ('/users/nannanliu/Downloads/Parallel Corpora/2013-ST.xml','w',encoding='utf-8') as f:\n f.writelines('\\n'.join(clean_lines))\n\n# remove the blank lines of all text files in a folder\nimport glob\nimport shutil\n\npath='users/nannanliu/Downloads/Parallel Corpora'\nclean_lines=[]\n\nfor infile in glob.glob (os.path.join(path,'*.txt')):\n output=infile+ 'out.txt'\n with open (infile,'r',encoding='utf-8') as oldfile:\n lines=oldfile.readlines()\n clean_lines=[l.strip() for l in lines if l.strip()]\n\n with open (output,'w',encoding='utf-8') as newfile:\n newfile.writelines('\\n',join(clean_lines))\n \n \n\n\n \n","sub_path":"removeblanklines.py","file_name":"removeblanklines.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"209444037","text":"class Solution:\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n prev_d, way, prev_way = \"\", int(s>\"\"), 0\n for d in s:\n prev_way, way = way, way*(d>\"0\") + prev_way*(9',\n }\n if use_relative_paths:\n group_object['path'] = group_name\n else:\n group_object['name'] = group_name\n parent_group_name = parent_group.get('name', '')\n group_object_key = project.AddObject(parent_group_name, group_object)\n parent_group['children'].append(group_object_key)\n return group_object\n\n\nclass ObjectKey(object):\n\n \"\"\"Wrapper around PBXFileReference and PBXGroup for sorting.\n\n A PBXGroup represents a \"directory\" containing a list of files in an\n Xcode project; it can contain references to a list of directories or\n files.\n\n A PBXFileReference represents a \"file\".\n\n The type is stored in the object \"isa\" property as a string. Since we\n want to sort all directories before all files, the < and > operators\n are defined so that if \"isa\" is different, they are sorted in the\n reverse of alphabetic ordering, otherwise the name (or path) property\n is checked and compared in alphabetic order.\n \"\"\"\n\n def __init__(self, obj, last):\n self.isa = obj['isa']\n if 'name' in obj:\n self.name = obj['name']\n else:\n self.name = obj['path']\n self.last = last\n\n def __lt__(self, other):\n if self.last != other.last:\n return other.last\n if self.isa != other.isa:\n return self.isa > other.isa\n return self.name < other.name\n\n def __gt__(self, other):\n if self.last != other.last:\n return self.last\n if self.isa != other.isa:\n return self.isa < other.isa\n return self.name > other.name\n\n def __eq__(self, other):\n return self.isa == other.isa and self.name == other.name\n\n\ndef SortFileReferencesByName(project, group_object, products_group_ref):\n SortFileReferencesByNameWithSortKey(\n project, group_object,\n lambda ref: ObjectKey(project.objects[ref], ref == products_group_ref))\n\n\ndef SortFileReferencesByNameWithSortKey(project, group_object, sort_key):\n group_object['children'].sort(key=sort_key)\n for key in group_object['children']:\n child = project.objects[key]\n if child['isa'] == 'PBXGroup':\n SortFileReferencesByNameWithSortKey(project, child, sort_key)\n\n\ndef AddMarkdownToProject(project, root_dir, group_object, use_relative_paths):\n list_files_cmd = ['git', '-C', root_dir, 'ls-files', '*.md']\n paths = check_output(list_files_cmd).splitlines()\n ios_internal_dir = os.path.join(root_dir, 'ios_internal')\n if os.path.exists(ios_internal_dir):\n list_files_cmd = ['git', '-C', ios_internal_dir, 'ls-files', '*.md']\n ios_paths = check_output(list_files_cmd).splitlines()\n paths.extend([os.path.join(\"ios_internal\", path) for path in ios_paths])\n for path in paths:\n new_markdown_entry = {\n \"fileEncoding\": \"4\",\n \"isa\": \"PBXFileReference\",\n \"lastKnownFileType\": \"net.daringfireball.markdown\",\n \"sourceTree\": \"\"\n }\n if use_relative_paths:\n new_markdown_entry['path'] = os.path.basename(path)\n else:\n new_markdown_entry['name'] = os.path.basename(path)\n new_markdown_entry['path'] = path\n folder = GetFolderForPath(\n project, group_object, os.path.dirname(path),\n use_relative_paths)\n folder_name = folder.get('name', None)\n if folder_name is None:\n folder_name = folder.get('path', 'sources')\n new_markdown_entry_id = project.AddObject(folder_name, new_markdown_entry)\n folder['children'].append(new_markdown_entry_id)\n\n\ndef GetFolderForPath(project, group_object, path, use_relative_paths):\n objects = project.objects\n if not path:\n return group_object\n for folder in path.split('/'):\n children = group_object['children']\n new_root = None\n for child_key in children:\n child = objects[child_key]\n if child['isa'] == 'PBXGroup':\n child_name = child.get('name', None)\n if child_name is None:\n child_name = child.get('path')\n if child_name == folder:\n new_root = child\n break\n if not new_root:\n # If the folder isn't found we could just cram it into the leaf existing\n # folder, but that leads to folders with tons of README.md inside.\n new_root = CreateGroup(project, group_object, folder, use_relative_paths)\n group_object = new_root\n return group_object\n\n\ndef ConvertGnXcodeProject(root_dir, proj_name, input_dir, output_dir, configs):\n '''Tweak the Xcode project generated by gn to support multiple configurations.\n\n The Xcode projects generated by \"gn gen --ide\" only supports a single\n platform and configuration (as the platform and configuration are set\n per output directory). This method takes as input such projects and\n add support for multiple configurations and platforms (to allow devs\n to select them in Xcode).\n\n Args:\n root_dir: directory that is the root of the project\n proj_name: name of the Xcode project \"file\" (usually `all.xcodeproj`)\n input_dir: directory containing the XCode projects created by \"gn gen --ide\"\n output_dir: directory where the tweaked Xcode projects will be saved\n configs: list of string corresponding to the configurations that need to be\n supported by the tweaked Xcode projects, must contains at least one\n value.\n '''\n\n UpdateXcodeProject(\n os.path.join(input_dir, proj_name),\n os.path.join(output_dir, proj_name),\n configs, root_dir)\n\n CopyTreeIfChanged(os.path.join(input_dir, proj_name),\n os.path.join(output_dir, proj_name))\n\n\ndef Main(args):\n parser = argparse.ArgumentParser(\n description='Convert GN Xcode projects for iOS.')\n parser.add_argument(\n 'input',\n help='directory containing [product|all] Xcode projects.')\n parser.add_argument(\n 'output',\n help='directory where to generate the iOS configuration.')\n parser.add_argument(\n '--add-config', dest='configurations', default=[], action='append',\n help='configuration to add to the Xcode project')\n parser.add_argument(\n '--root', type=os.path.abspath, required=True,\n help='root directory of the project')\n parser.add_argument(\n '--project-name', default='all.xcodeproj', dest='proj_name',\n help='name of the Xcode project (default: %(default)s)')\n args = parser.parse_args(args)\n\n if not os.path.isdir(args.input):\n sys.stderr.write('Input directory does not exists.\\n')\n return 1\n\n if args.proj_name not in os.listdir(args.input):\n sys.stderr.write(\n 'Input directory does not contain the Xcode project.\\n')\n return 1\n\n if not args.configurations:\n sys.stderr.write('At least one configuration required, see --add-config.\\n')\n return 1\n\n ConvertGnXcodeProject(\n args.root,\n args.proj_name,\n args.input,\n args.output,\n args.configurations)\n\nif __name__ == '__main__':\n sys.exit(Main(sys.argv[1:]))\n","sub_path":"ios/build/tools/convert_gn_xcodeproj.py","file_name":"convert_gn_xcodeproj.py","file_ext":"py","file_size_in_byte":19132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"13078023","text":"'''\nrun = python solving3.py\ninput name,age,height\n*just separate with comma\nenter for end or closing input and this program will be show output\n '''\nTuples = []\nwhile True:\n input_tupl = raw_input('name,age,height = ')\n if input_tupl != '':\n tupl = input_tupl.split(',')\n tupl [1], tupl [2] = int(tupl[1]), int(tupl[2])\n Tuples.append(tuple(tupl))\n elif input_tupl == '':\n print('\\n',sorted(Tuples))\n break","sub_path":"lulokal_solving/solving3.py","file_name":"solving3.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"492983058","text":"\"\"\"minor adjustments in models\n\nRevision ID: ac21d9a9a35c\nRevises: 465902c5347e\nCreate Date: 2020-02-12 19:24:38.024674\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ac21d9a9a35c'\ndown_revision = '465902c5347e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('transactions') as batch_op:\n batch_op.alter_column('stock_id',\n existing_type=sa.INTEGER(),\n nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('transactions', 'stock_id',\n existing_type=sa.INTEGER(),\n nullable=True)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ac21d9a9a35c_minor_adjustments_in_models.py","file_name":"ac21d9a9a35c_minor_adjustments_in_models.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"14211477","text":"import pickle\nimport numpy as np\nfrom keras import layers, models\n\n\ndef make_upvote_model(score_data_shape, text_data_shape, neurons, vocab_size):\n score_input = layers.Input(shape=score_data_shape,\n dtype='float32', name=\"score_input\")\n text_input = layers.Input(shape=text_data_shape,\n dtype='float32', name=\"text_input\")\n embedding = layers.Embedding(vocab_size, neurons[0])(text_input)\n text_lstm = layers.LSTM(neurons[1])(embedding)\n concate = layers.concatenate([score_input, text_lstm])\n dense = layers.Dense(neurons[2], activation=\"relu\")(concate)\n main_output = layers.Dense(1, activation=\"relu\",\n name=\"main_output\")(dense)\n model = models.Model(inputs=[score_input, text_input],\n outputs=[main_output])\n return model\n\n\ndef compile_model(model, opt, loss, metrics):\n model.compile(\n optimizer=opt,\n loss=loss,\n metrics=metrics\n )\n return model\n\n\ndef main():\n texts = np.load(\"cleaned/comment_texts.npy\")\n scores = np.load(\"cleaned/comment_sentiment_scores.npy\")\n upvotes = np.load(\"cleaned/comment_upvotes.npy\")\n\n epochs = 3\n neurons = (16, 32, 64)\n model = make_upvote_model(scores.shape[1:],\n texts.shape[1:], neurons, 10000)\n model = compile_model(model, \"adam\", \"mse\", [\"mse\"])\n hst = model.fit([scores, texts], [upvotes],\n epochs=epochs, batch_size=32, validation_split=0.1,\n shuffle=True, verbose=True).history\n\n model.save(\"models/predict_upvotes.h5\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/train/predict_upvotes_model.py","file_name":"predict_upvotes_model.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"108088475","text":"import apache_beam as beam\n\nfile_in = 'car_ad.csv'\nskip_head = \"car,price,body,mileage,engV,engType,registration,year,model,drive\"\n\nclass Head(beam.DoFn):\n def process(self, element):\n\n if (element!= skip_head):\n yield element\n\n\n\nclass Split(beam.DoFn):\n def process(self, element):\n x = element.split(\",\")\n y = x[2]\n if(y == \"sedan\") or (y == \"hatch\"):\n yield element\n\nclass Filter(beam.DoFn):\n def process(self, element):\n x = element.split(\",\")\n y= float(x[1])/100\n\n if y < 200.0:\n\n yield element\n\nclass Print_Row(beam.DoFn):\n def process(self, element):\n print(element)\n# Running locally in the DirectRunner.\nwith beam.Pipeline() as pipeline:\n (\n pipeline\n | 'Read lines' >> beam.io.ReadFromText(file_in)\n | 'Par Do' >> beam.ParDo(Head())\n | 'Par D1' >> beam.ParDo(Split())\n | 'Par D2' >> beam.ParDo(Filter())\n | 'Par D3' >> beam.Map(print)\n )","sub_path":"SaiStudy - Apache Beam JR-6.py","file_name":"SaiStudy - Apache Beam JR-6.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"378764776","text":"'''\nTCSS 435 - PA 3. Trigram Language Model\n\n@author: Jieun Lee (jieun212@uw.edu)\n@version: June 01, 2017\n\n** Instruction:\n - The output text files are saved in the same folder of this project.\n - It will not display new story on the console. Please see the generated output text file.\n - Only the process of running this program will be displayed on the console.\n (please wait until you see 'Done! Please see the output text files' on the console)\n'''\n\nimport random\n\ndef main():\n \n \n # Open and read input text file, convert all to lower cases, and create word list\n doyle_txt = open(\"doyle-27.txt\", 'r').read().lower().split()\n doyle_case_txt = open(\"doyle-case-27.txt\", 'r').read().lower().split()\n alice_txt = open(\"alice-27.txt\", 'r').read().lower().split()\n london_call_txt = open(\"london-call-27.txt\", 'r').read().lower().split()\n melville_billy_txt = open(\"melville-billy-27.txt\", 'r').read().lower().split()\n twain_adventures_txt = open(\"twain-adventures-27.txt\", 'r').read().lower().split()\n \n print (\"Read all the input text files successfully\")\n \n # ---- For 2 books -----\n \n # Open a file to write a new story\n output_2books = open(\"2books.txt\", 'w')\n \n # Combines 2 books\n words2 = doyle_txt + doyle_case_txt\n \n print (\"Creating tri-gram model for 2 books ...\")\n # Train a tri-gram language model with given word list\n word_dict2 = trainTrigramLanguageModel(words2)\n \n print (\"Generating a new story of 2 books ... \")\n \n # Choose a word from word_dict(hash table) at random to generate a new story of 1000 words\n generateStroy(output_2books, word_dict2, 1000)\n \n # Close opened output file\n output_2books.close()\n \n print (\"A new story of 2 books is generated successfully!\")\n \n \n \n \n # ----- For 6 books -----\n \n # Open a file to write a new story\n output_6books = open(\"6books.txt\", 'w')\n \n # Combines 6 books\n words6 = alice_txt + doyle_txt + doyle_case_txt + london_call_txt + melville_billy_txt + twain_adventures_txt\n \n print (\"Creating tri-gram model for 6 books ...\")\n # Train a tri-gram language model with given word list\n word_dict6 = trainTrigramLanguageModel(words6)\n \n print (\"Generating a new story of 6 books ... \")\n # Choose a word from word_dict(hash table) at random to generate a new story of 1000 words\n generateStroy(output_6books, word_dict6, 1000)\n \n # Close opened output file\n output_6books.close()\n print (\"A new story of 6 books is generated successfully!\")\n \n \n print (\"Done! Please see the output text files\")\n \n \n \n# Generates a new story and write the story on the given output file \ndef generateStroy(outputFile, word_dict, lenOfWord):\n \n # Choose 1st word from word_dict at random\n key1 = random.choice(list(word_dict.keys()))\n count = 1\n \n #print (key1.word)\n outputFile.write(str(key1.word))\n \n # get words until total number of words are the given lenOfWord\n while True:\n\n # Extends next word at random \n key2 = random.choice(list(key1.nextDict.keys()))\n #print(key2.word)\n outputFile.write(' ' + str(key2.word))\n \n # increases the number of words for the new story\n count += 1\n \n if (count >= lenOfWord):\n break\n \n # Get the last key which has high probability\n \n maxProbability = 0\n key3 = None\n for k in key2.nextDict:\n if (key3 == None):\n key3 = k\n currentP = calculateProbability(key2.nextDict[k], key1.nextDict[key2])\n #print('\\t', currentP, k.word)\n if (currentP > maxProbability):\n maxProbability = currentP\n key3 = k\n #print(maxProbability, key3.word)\n \n #print(key3.word)\n outputFile.write(' ' + str(key3.word))\n # print(key2.word, key3.word, '[' , key1.nextDict[key2],'/', key2.nextDict[key3], '=',calculateProbability(key2.nextDict[key3], key1.nextDict[key2]), ']')\n \n # increases the number of words for the new story\n count += 1\n \n # get the 1st word from word_dict\n for key in word_dict:\n hasWord = False\n if (key.word == key3.word):\n hasWord = True\n key1 = key\n break\n \n # If there is no word that starts with the previous last word,\n # get the first word at random from word_dict\n if (not hasWord):\n key1 = random.choice(list(word_dict.keys()))\n\n \n \n\n# Construct tri-gram language model with given the list of words\ndef trainTrigramLanguageModel(words):\n \n word_dict = dict() \n \n for i in range (0, len(words) - 2):\n \n # check if there exists 2nd word for 1st word\n w1_has_key = False\n for key1 in word_dict:\n if (words[i] == key1.word):\n w1_has_key = True\n w1_key = key1\n break\n \n if (w1_has_key): # if the key exists in the word_dict\n \n # increase value of w1[key]\n word_dict[w1_key] += 1\n \n # check if there exists 2nd word for 1st word\n w2_has_key = False\n for key2 in w1_key.nextDict:\n if (words[i + 1] == key2.word):\n w2_has_key = True\n w2_key = key2 \n break\n \n if (w2_has_key): # if the key exists in the w2_dict\n \n # increase value of w2[key]\n w1_key.nextDict[w2_key] += 1\n \n # check if there exists 3rd word for 1st and 2nd word\n w3_has_key = False\n for key3 in w2_key.nextDict:\n if (words[i + 2] == key3.word):\n w3_has_key = True\n w3_key = key3 \n break\n \n if (w3_has_key): # if the key exists in the w3_dict\n \n # increase value of w3[key]\n w2_key.nextDict[w3_key] += 1\n\n else:\n \n # Add new key with its value with 1 for w3_dict\n w3_key = Node(words[i + 2])\n w2_key.nextDict[w3_key] = 1\n \n else:\n # Add new key with value 1 for w2_dict & w3_dict\\\n w2_key = Node(words[i + 1])\n w3_key = Node(words[i + 2])\n \n w2_key.nextDict[w3_key] = 1\n w1_key.nextDict[w2_key] = 1\n \n\n else:\n # Add new key with value 1 for w1_dict & w2_dict & w3_dict\n w1_key = Node(words[i])\n w2_key = Node(words[i + 1])\n w3_key = Node(words[i + 2])\n w2_key.nextDict[w3_key] = 1\n w1_key.nextDict[w2_key] = 1\n word_dict[w1_key] = 1\n \n return word_dict \n\n\n \n# Calculates probability of the word is used\n# P (word | given1, given2)\ndef calculateProbability(w3, w2):\n return round((w3 / w2), 5)\n\n# Tests with 2 short text file to generate 10 length of word of the new story.\ndef testProgram():\n doyle_txt = open(\"test.txt\", 'r').read().lower().split()\n doyle_case_txt = open(\"test2.txt\", 'r').read().lower().split()\n \n output_test = open(\"testOut.txt\", 'w')\n \n words = doyle_txt + doyle_case_txt\n word_dict = trainTrigramLanguageModel(words)\n \n printThreeWordsWithValue(word_dict)\n \n generateStroy(output_test, word_dict, 10)\n \n output_test.close() \n \n# Tests for calculating probability\ndef testCalculateProbability(word_dict):\n for key1 in word_dict:\n if (key1.word == 'it'):\n for key2 in key1.nextDict:\n if (key2.word == 'finger'):\n for key3 in key2.nextDict:\n if (key3.word == 'on'):\n print(key1.nextDict[key2], key2.nextDict[key3], calculateProbability(key2.nextDict[key3], key1.nextDict[key2]))\n \n\n# Prints all 3 consecutive words with its values and the probability\ndef printThreeWordsWithValue(word_dict):\n print (\"---- (Word Dictionary (Hash Table)) -------------------\")\n for key1 in word_dict:\n for key2 in key1.nextDict:\n for key3 in key2.nextDict:\n print(key1.word, word_dict[key1], key2.word, key1.nextDict[key2], key3.word, key2.nextDict[key3], '\\t\\t P(w3/w2) = ', calculateProbability(key2.nextDict[key3], key1.nextDict[key2]))\n print (\"-------------------------------------------------------\")\n \n\n# Node class: Each node has its word and the its own child dictionary\nclass Node:\n def __init__(self, word):\n self.word = word\n self.nextDict = dict()\n \n def __str__(self):\n return self.word\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"TrigramModel.py","file_name":"TrigramModel.py","file_ext":"py","file_size_in_byte":9099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534446474","text":"import logging\n\nfrom lib.mysql import mysql\nimport tushare as ts\n\n\nclass TuShare:\n def download_data(self, svc_name, primary_keys, params=None, appendix={}, table_name=None):\n logging.info(f'Downloading data from service<{svc_name}> with keys<{primary_keys}>')\n args = [] if params is None else params['args']\n kwargs = {} if params is None else params['kwargs']\n data = getattr(ts, svc_name)(*args, **kwargs)\n if data is None or len(data) == 0:\n logging.warning(f'No data found by calling service<{svc_name}>.')\n return\n\n # Add constant columns if has any\n for key in appendix:\n data[key] = appendix[key]\n\n # remove duplicated items when drop_duplicates is True\n if self.should_drop_duplicates(data.index.name, primary_keys):\n data = data.drop_duplicates(subset=primary_keys, keep=\"last\")\n\n if table_name is None:\n table_name = tushare.get_table_name(svc_name)\n\n # 为了便于显示,浮点数均保留4位有效数字\n mysql.insert_db(data, table_name, primary_keys)\n logging.info(f'Finish downloading data from service[{svc_name}]')\n\n @staticmethod\n def should_drop_duplicates(index_name, primary_keys):\n if index_name is None:\n return True\n\n if isinstance(primary_keys, str):\n return primary_keys != index_name\n\n return index_name not in primary_keys\n\n @staticmethod\n def get_table_name(svc_name):\n table_name = svc_name\n if table_name.startswith('get_'):\n table_name = table_name.replace('get_', '')\n\n return 'ts_' + table_name\n\n\ntushare = TuShare()\n","sub_path":"lib/tushare.py","file_name":"tushare.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"175295560","text":"import cmath\nimport math\nimport os\n\nimport pygame\nfrom pygame.sprite import Sprite\n\nfrom constants import *\n\n\nclass Altimeter(Sprite):\n def __init__(self, skydiver):\n Sprite.__init__(self)\n self.skydiver = skydiver\n self.image = pygame.image.load(os.path.join(DATA_PATH, 'altimeter.5x5x32.png')).convert()\n self.rect = self.image.get_rect()\n self.max_altitude = MAX_ALTITUDE\n\n def update(self, *args):\n color = pygame.Color(0, 255, 0)\n rect = self.rect.copy()\n rect.top = 0\n rect.left = 0\n\n center = complex(rect.center[0], rect.center[1])\n phi = (self.skydiver.altitude / self.max_altitude) * 2 * math.pi\n phi -= math.pi / 2\n\n line_start = rect.center\n line_end = center + cmath.rect(rect.width // 4, phi)\n line_end = line_end.real, line_end.imag\n\n pygame.draw.line(self.image, color, line_start, line_end, 5)\n","sub_path":"altimeter.py","file_name":"altimeter.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"334342163","text":"import nltk\r\nimport random\r\nfrom nltk.corpus import movie_reviews\r\n\r\ndocuments = [(list(movie_reviews.words(fileid)), categoryid)\r\n for categoryid in movie_reviews.categories()\r\n for fileid in movie_reviews.fileids(categoryid)]\r\n\r\nrandom.shuffle(documents)\r\n\r\n\r\nall_words = []\r\n\r\nfor w in movie_reviews.words():\r\n all_words.append(w.lower())\r\n \r\nall_words = nltk.FreqDist(all_words)\r\n\r\nprint(all_words.most_common(15)) \r\n\r\n","sub_path":"NLP/Tutorial_hands_on/sentdex/11_Text_Classification.py","file_name":"11_Text_Classification.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"566394094","text":"from decimal import Decimal\n\nfrom django.core.urlresolvers import get_callable\nfrom django.utils.text import capfirst\nfrom django.utils.translation import ugettext as _\n\nfrom pdfdocument.document import PDFDocument, cm, mm\nfrom pdfdocument.elements import create_stationery_fn, ExampleStationery\nfrom pdfdocument.utils import pdf_response\n\nimport plata\n\n\ndef invoice_pdf(pdf, order):\n pdf.init_letter(page_fn=create_stationery_fn(\n get_callable(plata.settings.PLATA_REPORTING_STATIONERY)()))\n\n if plata.settings.PLATA_REPORTING_ADDRESSLINE:\n pdf.address_head(plata.settings.PLATA_REPORTING_ADDRESSLINE)\n\n pdf.address(order, 'billing_')\n pdf.next_frame()\n\n pdf.p(u'%s: %s' % (\n capfirst(_('order date')),\n order.confirmed and order.confirmed.strftime('%d.%m.%Y') or _('Not confirmed yet'),\n ))\n pdf.spacer(3*mm)\n\n pdf.h1(_('Order %09d') % order.id)\n pdf.hr()\n\n pdf.table([(\n _('SKU'),\n capfirst(_('product')),\n capfirst(_('quantity')),\n capfirst(_('unit price')),\n capfirst(_('line item price')),\n )]+[\n (\n item.variation.sku,\n unicode(item.variation),\n item.quantity,\n u'%.2f' % item.unit_price,\n u'%.2f' % item.discounted_subtotal,\n ) for item in order.items.all()],\n (2*cm, 6*cm, 1*cm, 3*cm, 4.4*cm), pdf.style.tableHead+(\n ('ALIGN', (1, 0), (1, -1), 'LEFT'),\n ))\n\n summary_table = [\n ('', ''),\n (capfirst(_('subtotal')), u'%.2f' % order.subtotal),\n ]\n\n if order.discount:\n summary_table.append((capfirst(_('discount')), u'%.2f' % order.discount))\n\n if order.shipping:\n summary_table.append((capfirst(_('shipping')), u'%.2f' % order.shipping))\n\n pdf.table(summary_table, (12*cm, 4.4*cm), pdf.style.table)\n\n pdf.spacer(1*mm)\n\n total_title = u'%s %s' % (capfirst(_('total')), order.currency)\n\n if order.tax:\n if 'tax_details' in order.data:\n zero = Decimal('0.00')\n\n pdf.table([(\n u'',\n u'%s %s' % (\n _('Incl. tax'),\n u'%.1f%%' % row['tax_rate'],\n ),\n row['total'].quantize(zero),\n row['tax_amount'].quantize(zero),\n u'',\n ) for rate, row in order.data['tax_details']],\n (2*cm, 4*cm, 3*cm, 3*cm, 4.4*cm), pdf.style.table)\n\n pdf.table([\n (total_title, u'%.2f' % order.total),\n ], (12*cm, 4.4*cm), pdf.style.tableHead)\n\n pdf.spacer()\n if order.is_paid:\n try:\n payment = order.payments.authorized()[0]\n except IndexError:\n payment = None\n\n if payment:\n pdf.p(_('Already paid for with %(payment_method)s (Transaction %(transaction)s).') % {\n 'payment_method': payment.payment_method,\n 'transaction': payment.transaction_id,\n })\n else:\n pdf.p(_('Already paid for.'))\n else:\n pdf.p(_('Not paid yet.'))\n\n pdf.generate()\n\n\ndef packing_slip_pdf(pdf, order):\n pdf.init_letter(page_fn=create_stationery_fn(\n get_callable(plata.settings.PLATA_REPORTING_STATIONERY)()))\n\n if plata.settings.PLATA_REPORTING_ADDRESSLINE:\n pdf.address_head(plata.settings.PLATA_REPORTING_ADDRESSLINE)\n\n pdf.address(order.addresses()['shipping'])\n pdf.next_frame()\n\n pdf.p(u'%s: %s' % (\n capfirst(_('order date')),\n order.confirmed and order.confirmed.strftime('%d.%m.%Y') or _('Not confirmed yet'),\n ))\n pdf.spacer(3*mm)\n\n pdf.h1(_('Order %09d') % order.id)\n pdf.hr()\n\n pdf.table([(\n _('SKU'),\n capfirst(_('product')),\n capfirst(_('quantity')),\n )]+[\n (\n item.variation.sku,\n unicode(item.variation),\n item.quantity,\n ) for item in order.items.all()],\n (2*cm, 13.4*cm, 1*cm), pdf.style.tableHead+(\n ('ALIGN', (1, 0), (1, -1), 'LEFT'),\n ))\n\n if order.notes:\n pdf.spacer(10*mm)\n pdf.p(capfirst(_('notes')), style=pdf.style.bold)\n pdf.spacer(1*mm)\n pdf.p(order.notes)\n\n pdf.generate()\n\n","sub_path":"plata/reporting/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"605650746","text":"def quickSort(L, low, high):\n l = low\n h = high\n if(l >= h):\n return L\n key = L[l]\n while l < h:\n while l < h and L[h][1] >= key[1]:\n h -= 1\n L[l] = L[h]\n while l < h and L[l][1] <= key[1]:\n l += 1\n L[h] = L[l]\n L[l] = key\n quickSort(L, low, l - 1)\n quickSort(L, h + 1, high)\n return L\n","sub_path":"Python/Lab/T2a/quickSort.py","file_name":"quickSort.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"619144265","text":"from redsolutioncms.make import BaseMake\nfrom redsolutioncms.models import CMSSettings\n\nclass Make(BaseMake):\n def make(self):\n super(Make, self).make()\n cms_settings = CMSSettings.objects.get_settings()\n cms_settings.render_to('settings.py', 'chunks/redsolutioncms/settings.pyt')\n cms_settings.render_to(['..', 'templates', 'base_chunks.html'],\n 'chunks/redsolutioncms/base_chunks.html', {\n }, 'w')\n cms_settings.render_to('urls.py', 'chunks/redsolutioncms/urls.pyt')\n cms_settings.render_to(['..', 'templates', 'robots.txt'],\n 'chunks/redsolutioncms/robots.txt', {}, 'w')\n cms_settings.base_template = 'base_chunks.html'\n cms_settings.save()\n\nmake = Make()\n\n","sub_path":"chunks/redsolution_setup/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"183945154","text":"from tweepy import API\nfrom tweepy import Cursor\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\n\nimport twitter_credential\nimport numpy as np\nimport pandas as pd\n\n\n### TWITTER CLIENT ###\nclass TwitterClient():\n\n\tdef __init__(self, twitter_user=None):\n\n\t\tself.auth = TwitterAuthenticator().authenticate_twitter_app()\n\t\tself.twitter_client = API(self.auth, wait_on_rate_limit=True)\n\n\t\tself.twitter_user = twitter_user\n\n\tdef get_twitter_client_api(self):\n\n\t\treturn self.twitter_client\n\n\tdef get_user_timeline_tweets(self, num_tweets):\n\t\ttweets =[]\n\n\t\tfor tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):\n\t\t\ttweets.append(tweet)\n\t\treturn tweets\n\n\tdef get_home_timeline_tweets(self, num_tweets):\n\t\thome_timeline_tweets = []\n\n\t\tfor tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):\n\t\t\thome_timeline_tweets.append(tweet)\n\n\t\treturn home_timeline_tweets\n\n\n### TWITTER AUTHENTICATOR###\nclass TwitterAuthenticator():\n\n\tdef authenticate_twitter_app(self):\n\t\tauth = OAuthHandler(twitter_credential.CONSUMER_KEY, twitter_credential.CONSUMER_SECRET)\n\t\tauth.set_access_token(twitter_credential.ACCESS_TOKEN, twitter_credential.ACCESS_TOKEN_SECRET)\n\t\treturn auth\n### TWITTER STREAMTER ###\nclass TwitterStreamer():\n\n\tdef __init__(self):\n\t\tself.twitter_authenticator = TwitterAuthenticator()\n\n\tdef stream_tweets(self, fetched_filename, hash_tag_list):\n\t\tlistener = TwitterListener(fetched_filename)\n\t\tauth = self.twitter_authenticator.authenticate_twitter_app()\n\t\tstream = Stream(auth, listener)\n\n\t\t# To filter twitter stream using keywords\n\t\tstream.filter(track=hash_tag_list)\n\n\nclass TwitterListener(StreamListener):\n\n\tdef __init__(self, fetched_filename):\n\t\tself.fetched_filename = fetched_filename\n\n\tdef on_data(self, data):\n\n\t\ttry:\n\t\t\tprint(data)\n\t\t\twith open(self.fetched_filename, 'a') as tf:\n\t\t\t\ttf.write(data)\n\t\t\treturn True\n\n\t\texcept BaseException as e:\n\t\t\tprint(\"Error on data: %s\" % str(e))\n\n\tdef on_error(self, status):\n\t\tif status == 420:\n\t\t\t# Returning False on_data menthod in case rate limit occurs.\n\t\t\treturn False\n\t\tprint(status)\n\nclass TweetAnalyzer():\n\t\"\"\"\n\tFunctionality for analyzing and categorizing from tweets\n\t\"\"\"\n\tdef tweets_to_data_frame(self,tweets):\n\n\t\tdf = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])\n\t\tdf['date'] = np.array([tweet.created_at for tweet in tweets])\n\t\t\n\t\treturn df\n\nif __name__ == \"__main__\":\n\n\tusername = input(\"Enter username: \")\n\n\toptions = int(input(\"Choose timeline: [1] Home [2] User \"))\n\n\ttwitter_client = TwitterClient()\n\ttweet_analyzer = TweetAnalyzer()\n\tapi = twitter_client.get_twitter_client_api()\n\n\tif options == 1:\n\t\ttweets = api.home_timeline(screen_name=username, count=20)\n\telif options == 2:\n\t\ttweets = api.user_timeline(screen_name=username, count=20)\n\telse:\n\t\tprint(\"Input Invalid.\")\n\n\tdf = tweet_analyzer.tweets_to_data_frame(tweets)\n\n\tprint(df)\n\t","sub_path":"tweet_streamer.py","file_name":"tweet_streamer.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"63532112","text":"from Link.framework.serializer import Serializer\n\n\nclass ChatSessionSerializer(Serializer):\n\n def _post_processing(self, obj, result_dict):\n # make custom changes to result_dict:\n members = []\n for member in obj.members:\n temp = {\n \"id\": str(member.id),\n \"name\": member.name\n }\n members.append(temp)\n\n result_dict[\"members\"] = members\n","sub_path":"Link/Link/chat_session/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"276823591","text":"import zmq\nimport random\nimport msgpack\nimport numpy as np\nimport msgpack_numpy\nfrom collections import deque\nfrom lib.util import Transition\nfrom zmq.eventloop.ioloop import IOLoop\nfrom zmq.eventloop.zmqstream import ZMQStream\nmsgpack_numpy.patch()\n\n\nclass Master(object):\n \"\"\"\n Broker for asynchronous interaction,\n But I would like to call it Master!!!\n \"\"\"\n\n def __init__(self, url_worker, url_client, batch_size,\n estimator_update_callable):\n\n context = zmq.Context()\n frontend = context.socket(zmq.ROUTER)\n frontend.bind(url_client)\n backend = context.socket(zmq.ROUTER)\n backend.bind(url_worker)\n\n self.available_workers = 0\n self.workers = []\n\n self.batch_size = batch_size\n self.estimator_update = estimator_update_callable\n\n self.backend = ZMQStream(backend)\n self.frontend = ZMQStream(frontend)\n self.backend.on_recv(self.handle_backend)\n\n self.loop = IOLoop.instance()\n\n def handle_backend(self, msg):\n # Queue worker address for LRU routing\n worker_addr, empty, client_addr = msg[:3]\n\n # add worker back to the list of workers\n self.available_workers += 1\n self.workers.append(worker_addr)\n\n # Third frame is READY or else a client reply address\n # If client reply, send rest back to frontend\n if client_addr != b\"READY\":\n empty, reply = msg[3:]\n self.frontend.send_multipart([client_addr, b'', reply])\n\n if self.available_workers == 1:\n # on first recv, start accepting frontend messages\n self.frontend.on_recv(self.handle_frontend)\n\n def handle_frontend(self, msg):\n # Now get next client request, route to LRU worker\n # Client request is [address][empty][request]\n client_addr, empty, request = msg\n request = msgpack.loads(request)\n if request[0] == 'reset':\n state = request[1]\n msg = [b'', client_addr, b'', msgpack.dumps([state])]\n self.worker_send(msg)\n elif request[0] == 'step':\n t = Transition(*request[1:])\n self.update(t)\n\n if t.done:\n self.frontend.send_multipart([client_addr, b'', b'reset'])\n else:\n msg = [b'', client_addr, b'', msgpack.dumps([t.next_state])]\n self.worker_send(msg)\n\n def worker_send(self, msg):\n # Dequeue and drop the next worker address\n self.available_workers -= 1\n worker_id = self.workers.pop(0)\n\n self.backend.send_multipart([worker_id] + msg)\n if self.available_workers == 0:\n # stop receiving until workers become available again\n self.frontend.stop_on_recv()\n\n\nclass OnMaster(Master):\n \"\"\"For online learning.\"\"\"\n\n def __init__(self, **kwargs):\n super(OffMaster, self).__init__(**kwargs)\n self.memory = []\n\n def update(self, transition):\n self.memory.append(transition)\n if len(self.memory) == self.batch_size:\n samples = map(np.array, zip(*self.memory))\n self.estimator_update(*samples)\n self.memory = []\n\n\nclass OffMaster(Master):\n \"\"\"For memory buffer learning.\"\"\"\n\n def __init__(self, init_memory_size, memory_size, update_estimator_every,\n **kwargs):\n super(OffMaster, self).__init__(**kwargs)\n self.init_memory_size = init_memory_size\n self.memory = deque(maxlen=memory_size)\n self.update_estimator_every = update_estimator_every\n self.tot = 0\n\n def update(self, transition):\n self.memory.append(transition)\n self.tot += 1\n if len(self.memory) > self.init_memory_size and \\\n self.tot % self.update_estimator_every == 0:\n samples = random.sample(self.memory, self.batch_size)\n samples = map(np.array, zip(*samples))\n self.estimator_update(*samples)\n\n\ndef estimator_worker(url, i, sess, q_estimator, policy):\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n identity = ('Worker-%d' % i).encode('utf-8')\n socket.identity = identity\n socket.connect(url)\n\n socket.send(b'READY')\n while True:\n address, empty, request = socket.recv_multipart()\n q_values = q_estimator.predict(sess, msgpack.loads(request))\n action = policy(q_values[0])\n socket.send_multipart([address, b'', msgpack.dumps(action)])\n","sub_path":"qlearning/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98262491","text":"#!/usr/bin/python\n# Filename: appConfig.py\n\ndef singleton(cls):\n obj = cls()\n cls.__new__ = lambda cls: obj # Always return the same object\n return cls\n\n@singleton\nclass Const(object):\n # Holding type(s)\n LONG = 'Long'\n SHORT = 'Short'\n HOLDING_TYPE_CHOICES = (\n (LONG, LONG),\n (SHORT, SHORT),\n )\n # Account type(s)\n CHECKING = 'Checking'\n SAVINGS = 'Savings'\n BROKERAGE = 'Brokerage'\n RETIREMENT = 'Retirement'\n ACCOUNT_TYPE_CHOICES = (\n (BROKERAGE, BROKERAGE),\n (RETIREMENT, RETIREMENT),\n (CHECKING, CHECKING),\n (SAVINGS, SAVINGS),\n )\n \n\n ","sub_path":"trailstop_app/appcfg.py","file_name":"appcfg.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"436997697","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom collections import defaultdict\r\nimport math\r\nfrom datetime import datetime\r\n\r\n# Tinh khoang cach gan nhat giua 2 ngay \r\ndef min_dis_of_day(day1, day2):\r\n dis = 1000000\r\n for data1 in day1:\r\n for data2 in day2:\r\n #each position of a day is considered as a point\r\n disTemp = float(math.sqrt( (float(data2[0]) - float(data1[0]))**2 + (float(data2[1]) - float(data1[1]))**2 ) )\r\n if(disTemp < dis): dis = disTemp\r\n return dis\r\n\r\n# find two day with the min distance\r\ndef min_dis_of_horse(horse):\r\n final_distance = 1000000000\r\n day1, day2 = 0, 0\r\n key = list(horse.keys())\r\n for i in range(len(key)- 1):\r\n day1 = key[i]\r\n for j in range(i): \r\n day2 = key[j]\r\n disTemp = min_dis_of_day(horse[day1], horse[day2])\r\n if(disTemp < final_distance): \r\n final_distance = disTemp\r\n return final_distance, day1, day2 \r\n\r\n\r\nf1 = open ('output4.txt', 'w')\r\n\r\n\r\n#read data and divine it into dictionary with key is the id of horse\r\n\r\nhorse_by_id = defaultdict(list)\r\n#f = open('ZebraBotswana.txt' , 'r')\r\nf = open('input.txt' , 'r')\r\nfor x in f:\r\n horse = x.split(\",\")\r\n horse[0] = datetime.fromtimestamp(float(horse[0])).strftime('%Y-%m-%d') \r\n id = horse[3] \r\n horse_by_id[id].append(horse) \r\n\r\n\r\n# for each bt_id horse, divine each horse_by_time a position (long, latt) of a horse\r\nfor key in horse_by_id:\r\n horse_by_time = defaultdict(list)\r\n for horse in horse_by_id[key]: \r\n viTri = [horse[1], horse[2]]\r\n time = horse[0]\r\n horse_by_time[time].append(viTri)\r\n #caculate the day that has the min distance\r\n lengh, day1, day2 = min_dis_of_horse(horse_by_time)\r\n result = str(key) + \"//\" +str(day1) + \"/\" +str(day2) + \"/\" + str(lengh) + \"\\n\" +\"=============================\" + \"\\n\"\r\n f1.write(result)\r\n\r\n\r\nprint(\"Done\")","sub_path":"task3/bai 4/timkc.py","file_name":"timkc.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"62914964","text":"# Copyright 2020-2021 Huawei Technologies Co., Ltd.All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Graph based scripts converter workflow.\"\"\"\nimport multiprocessing as mp\nimport os\nimport re\nimport sys\nfrom typing import List\nfrom importlib import import_module\nfrom importlib.util import find_spec\nfrom functools import partial\nfrom google.protobuf.internal import api_implementation\nfrom mindinsight.mindconverter.graph_based_converter.common.global_context import GlobalContext\nfrom mindinsight.mindconverter.graph_based_converter.common.utils import lib_version_satisfied, onnx_satisfied, \\\n save_code_file_and_report, get_framework_type, check_dependency_integrity, get_third_part_lib_validation_error_info\nfrom mindinsight.mindconverter.graph_based_converter.constant import FrameworkType, \\\n ONNX_MIN_VER, TF2ONNX_MIN_VER, ONNXRUNTIME_MIN_VER, ONNXOPTIMIZER_MIN_VER, TORCH_MIN_VER\nfrom mindinsight.mindconverter.graph_based_converter.generator import batch_add_nodes\nfrom mindinsight.mindconverter.graph_based_converter.mapper import ONNXToMindSporeMapper\nfrom mindinsight.mindconverter.common.log import logger as log, logger_console as log_console\nfrom mindinsight.mindconverter.common.exceptions import GraphInitError, TreeCreationError, SourceFilesSaveError, \\\n BaseConverterError, UnknownModelError, GeneratorError, TfRuntimeError, RuntimeIntegrityError, ParamMissingError, \\\n BadParamError\nfrom mindinsight.mindconverter.graph_based_converter.third_party_graph import GraphFactory\n\ncheck_common_dependency_integrity = partial(check_dependency_integrity,\n \"onnx\", \"onnxruntime\", \"onnxoptimizer\")\n\n\ndef onnx_lib_version_satisfied():\n \"\"\"Check onnx libs version whether is satisfied.\"\"\"\n onnx = import_module(\"onnx\")\n ort = import_module(\"onnxruntime\")\n optimizer = import_module(\"onnxoptimizer.version\")\n if not lib_version_satisfied(getattr(ort, \"__version__\"), ONNXRUNTIME_MIN_VER):\n log_console.warning(\"onnxruntime's version should be greater than %s, however current version is %s.\",\n ONNXRUNTIME_MIN_VER, ort.__version__)\n\n if not lib_version_satisfied(getattr(onnx, \"__version__\"), ONNX_MIN_VER) \\\n or not lib_version_satisfied(getattr(optimizer, \"version\"), ONNXOPTIMIZER_MIN_VER):\n return False\n return True\n\n\ndef _print_error(err):\n \"\"\"Print error to stdout and record it.\"\"\"\n log.error(err)\n log_console.error(\"\\n\")\n log_console.error(str(err))\n log_console.error(\"\\n\")\n\n\ndef torch_version_satisfied(output_queue):\n \"\"\"Check Torch version whether is satisfied.\"\"\"\n satisfied = False\n pattern = r\"\\d+\\.\\d+\\.\\d+\"\n torch_version = re.findall(pattern, getattr(import_module('torch'), \"__version__\"))\n if torch_version:\n satisfied = lib_version_satisfied(torch_version[0], TORCH_MIN_VER)\n output_queue.put(satisfied)\n\n\ndef torch_installation_validation(func):\n \"\"\"\n Validate args of func.\n\n Args:\n func (type): Function.\n\n Returns:\n type, inner function.\n \"\"\"\n\n def _f(graph_path: str, input_nodes: dict, output_nodes: List[str],\n output_folder: str, report_folder: str = None):\n # Check whether pytorch is installed.\n error_info = None\n torch_version_validation = False\n if graph_path.endswith('.onnx'):\n if not onnx_satisfied() or not check_common_dependency_integrity():\n error_info = f\"{get_third_part_lib_validation_error_info(['onnx', 'onnxruntime', 'onnxoptimizer'])} \" \\\n f\"are required when using graph based scripts converter.\"\n else:\n if not find_spec(\"torch\") or not onnx_satisfied() or not check_common_dependency_integrity():\n error_info = \\\n f\"{get_third_part_lib_validation_error_info(['torch', 'onnx', 'onnxruntime', 'onnxoptimizer'])} \" \\\n f\"are required when using graph based scripts converter, and PyTorch version must \" \\\n f\"be consisted with model generation runtime.\"\n\n if not error_info:\n output_queue = mp.Queue()\n process = mp.Process(target=torch_version_satisfied, args=(output_queue,))\n process.start()\n torch_version_validation = output_queue.get()\n process.join()\n\n if error_info:\n _print_error(RuntimeIntegrityError(error_info))\n sys.exit(0)\n\n if (not torch_version_validation and not graph_path.endswith('.onnx')) or not onnx_lib_version_satisfied():\n lib_check_list = ['onnx', 'onnxruntime', 'onnxoptimizer']\n if not graph_path.endswith('.onnx'):\n lib_check_list.insert(0, 'torch')\n error = RuntimeIntegrityError(\n f\"{get_third_part_lib_validation_error_info(lib_check_list)} \"\n f\"are required when using graph based scripts converter.\"\n )\n _print_error(error)\n sys.exit(0)\n\n func(graph_path=graph_path,\n input_nodes=input_nodes, output_nodes=output_nodes,\n output_folder=output_folder, report_folder=report_folder)\n\n return _f\n\n\ndef _check_tf_installation():\n \"\"\"\n Check whether TensorFlow was installed.\n\n Returns:\n bool, true or false.\n \"\"\"\n return find_spec(\"tensorflow\") or find_spec(\"tensorflow-gpu\")\n\n\ndef tf_installation_validation(func):\n \"\"\"\n Validate args of func.\n\n Args:\n func (type): Function.\n\n Returns:\n type, inner function.\n \"\"\"\n\n def _f(graph_path: str, input_nodes: dict, output_nodes: List[str],\n output_folder: str, report_folder: str):\n not_integral_error = RuntimeIntegrityError(\n f\"TensorFlow, \"\n f\"{get_third_part_lib_validation_error_info(['tf2onnx', 'onnx', 'onnxruntime', 'onnxoptimizer'])} \"\n f\"are required when using graph based scripts converter for TensorFlow conversion.\"\n )\n # Check whether tensorflow is installed.\n if not _check_tf_installation() or not onnx_satisfied():\n _print_error(not_integral_error)\n sys.exit(0)\n\n if not any([check_common_dependency_integrity(\"tensorflow\"),\n check_common_dependency_integrity(\"tensorflow-gpu\")]):\n _print_error(not_integral_error)\n sys.exit(0)\n\n tf2onnx = import_module(\"tf2onnx\")\n\n if not lib_version_satisfied(getattr(tf2onnx, \"__version__\"), TF2ONNX_MIN_VER) \\\n or not onnx_lib_version_satisfied():\n _print_error(not_integral_error)\n sys.exit(0)\n\n func(graph_path=graph_path,\n input_nodes=input_nodes, output_nodes=output_nodes,\n output_folder=output_folder, report_folder=report_folder)\n\n return _f\n\n\ndef _extract_model_name(model_path):\n \"\"\"\n Extract model name from model path.\n\n Args:\n model_path (str): Path of Converted model.\n\n Returns:\n str, name of Converted model.\n \"\"\"\n\n base_path = os.path.basename(model_path)\n model_name = '.'.join(base_path.split('.')[:-1])\n return model_name\n\n\n@torch_installation_validation\n@GraphInitError.uniform_catcher()\n@TreeCreationError.uniform_catcher()\n@SourceFilesSaveError.uniform_catcher()\n@GeneratorError.uniform_catcher()\ndef graph_based_converter_pytorch_to_ms(graph_path: str,\n input_nodes: dict, output_nodes: List[str],\n output_folder: str, report_folder: str = None):\n \"\"\"\n PyTorch to MindSpore based on Graph.\n\n Args:\n graph_path (str): Graph file path.\n input_nodes (dict): Input node(s) of the model.\n output_nodes (list[str]): Output node(s) of the model.\n output_folder (str): Output folder.\n report_folder (str): Report output folder path.\n \"\"\"\n graph_obj = GraphFactory.init(graph_path, input_nodes=input_nodes, output_nodes=output_nodes)\n generator_inst = batch_add_nodes(graph_obj, ONNXToMindSporeMapper)\n model_name = _extract_model_name(graph_path)\n code_fragments = generator_inst.generate()\n save_code_file_and_report(model_name, code_fragments, output_folder, report_folder)\n # Release global context.\n GlobalContext.release()\n\n\n@tf_installation_validation\n@GraphInitError.uniform_catcher()\n@TfRuntimeError.uniform_catcher()\n@TreeCreationError.uniform_catcher()\n@SourceFilesSaveError.uniform_catcher()\n@GeneratorError.uniform_catcher()\ndef graph_based_converter_tf_to_ms(graph_path: str,\n input_nodes: dict, output_nodes: List[str],\n output_folder: str, report_folder: str = None):\n \"\"\"\n Tensorflow to MindSpore based on Graph.\n\n Args:\n graph_path (str): Graph file path.\n input_nodes (dict): Input node(s) of the model.\n output_nodes (list[str]): Output node(s) of the model.\n output_folder (str): Output folder.\n report_folder (str): Report output folder path.\n \"\"\"\n # Close unnecessary log.\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n graph_obj = GraphFactory.init(graph_path, input_nodes=input_nodes, output_nodes=output_nodes)\n generator_inst = batch_add_nodes(graph_obj, ONNXToMindSporeMapper)\n model_name = _extract_model_name(graph_path)\n code_fragments = generator_inst.generate()\n save_code_file_and_report(model_name, code_fragments, output_folder, report_folder)\n # Release global context.\n GlobalContext.release()\n\n\n@BaseConverterError.uniform_catcher()\ndef main_graph_base_converter(file_config):\n \"\"\"\n The entrance for converter, script files will be converted.\n\n Args:\n file_config (dict): The config of file which to convert.\n \"\"\"\n\n if api_implementation.Type() != 'cpp' or os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION') != 'cpp':\n log_console.warning(\"Protobuf is currently implemented in \\\"Python\\\". \"\n \"The conversion process may take a long time. \"\n \"Please use `export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp` to enable cpp backend.\")\n\n graph_path = file_config['model_file']\n frame_type = get_framework_type(graph_path)\n if not file_config.get(\"shape\"):\n raise ParamMissingError(\"Param missing, `--shape` is required when using graph mode.\")\n\n if graph_path.endswith(\"pth\") and not file_config.get(\"input_nodes\", []) and \\\n file_config.get(\"shape\") and len(file_config.get(\"shape\", ())) == 1:\n file_config['input_nodes'] = [\"input.1\"]\n else:\n check_params = ['input_nodes', 'output_nodes']\n check_params_exist(check_params, file_config)\n\n if len(file_config['shape']) != len(file_config.get(\"input_nodes\", [])):\n raise BadParamError(\"`--shape` and `--input_nodes` must have the same length, \"\n \"and no redundant node in `--input_nodes`.\")\n\n input_nodes = dict()\n for shape, node in zip(file_config['shape'], file_config['input_nodes']):\n input_nodes[node] = shape\n\n if frame_type == FrameworkType.PYTORCH.value:\n if graph_path.endswith('.onnx'):\n graph_based_converter_pytorch_to_ms(graph_path=graph_path,\n input_nodes=input_nodes,\n output_nodes=file_config['output_nodes'],\n output_folder=file_config['outfile_dir'],\n report_folder=file_config['report_dir'])\n else:\n graph_based_converter_pytorch_to_ms(graph_path=graph_path,\n input_nodes=input_nodes,\n output_nodes=[],\n output_folder=file_config['outfile_dir'],\n report_folder=file_config['report_dir'])\n elif frame_type == FrameworkType.TENSORFLOW.value:\n graph_based_converter_tf_to_ms(graph_path=graph_path,\n input_nodes=input_nodes,\n output_nodes=file_config['output_nodes'],\n output_folder=file_config['outfile_dir'],\n report_folder=file_config['report_dir'])\n else:\n error_msg = \"Get UNSUPPORTED model.\"\n error = UnknownModelError(error_msg)\n raise error\n\n\ndef check_params_exist(params: list, config):\n \"\"\"Check params exist.\"\"\"\n miss_param_list = ''\n for param in params:\n if not config.get(param) or not config[param]:\n miss_param_list = ', '.join((miss_param_list, param)) if miss_param_list else param\n\n if miss_param_list:\n raise ParamMissingError(f\"Param(s) missing, {miss_param_list} is(are) required when using graph mode.\")\n","sub_path":"mindinsight/mindconverter/graph_based_converter/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":13607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"575483877","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description: \n\"\"\"\nfrom textgen.augmentation.text_augment import augment\n\nif __name__ == '__main__':\n a = ['晚上一个人好孤单,想:找附近的人陪陪我.',\n '晚上肚子好难受',\n '你会武功吗,我不会',\n '组装标题质量受限于广告主自提物料的片段质量,且表达丰富度有限',\n '主要研究机器学习、深度学习、计算机视觉、智能对话系统相关内容',\n ]\n b = augment(a, aug_ops='tfidf-1.0', aug_type='word')\n print(a)\n for i in b:\n print(i)\n\n b = augment(a, aug_ops='bt-0.9', aug_type='sentence')\n print(a)\n for i in b:\n print(i)\n","sub_path":"examples/base_demo.py","file_name":"base_demo.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"511049741","text":"from setuptools import setup, find_packages\n\ndef readme():\n with open(\"README.md\") as f:\n README = f.read()\n return README\n\n\nwith open(\"requirements.txt\") as f:\n required = f.read().splitlines()\n\n\nsetup(\n name=\"timetk\",\n version=\"0.0.0.9000\",\n description=\"TimeTK - The time series toolkit for Python.\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/business-science/pytimetk\",\n author=\"Matt Dancho\",\n author_email=\"mdancho@business-science.io\",\n copyright=\"Business Science\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n include_package_data=True,\n install_requires=required,\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"353272526","text":"from powerline.lib.vcs import guess, tree_status\nfrom powerline.segments import Segment, with_docstring\nfrom powerline.theme import requires_segment_info, requires_filesystem_watcher\n\n\n@requires_filesystem_watcher\n@requires_segment_info\nclass VCSInfoSegment(Segment):\n divider_highlight_group = None\n\n @staticmethod\n def get_directory(segment_info):\n return segment_info['getcwd']()\n\n @staticmethod\n def is_dirty(pl, repo, ignore_statuses):\n try:\n status = tree_status(repo, pl)\n except Exception as e:\n pl.exception('Failed to compute tree status: {0}', str(e))\n status = '?'\n else:\n status = status and status.strip()\n if status in ignore_statuses:\n return False\n return bool(status)\n\n def get_highlight_group(self, pl, repo, name, status_colors=False, ignore_statuses=(), **kwargs):\n scol = ['vcsinfo:' + name, 'vcsinfo']\n if status_colors:\n scol.insert(0, 'vcsinfo:clean' if not self.is_dirty(pl, repo, ignore_statuses) else 'vcsinfo:dirty')\n return scol\n\n def get_property(self, pl, repo, name, **kwargs):\n cont = getattr(repo, name)\n # I apologize in advance for the ugliness ahead.\n if isinstance(cont, str):\n return [{\n 'contents': cont,\n 'highlight_groups': self.get_highlight_group(pl, repo, name, **kwargs),\n 'divider_highlight_group': self.divider_highlight_group\n }]\n else:\n return [{\n 'contents': c,\n 'highlight_groups': self.get_highlight_group(pl, repo, name, **kwargs),\n 'divider_highlight_group': self.divider_highlight_group\n } for c in cont]\n\n def __call__(self, pl, segment_info, create_watcher, **kwargs):\n directory = self.get_directory(segment_info)\n if directory:\n repo = guess(path=directory, create_watcher=create_watcher)\n if repo is not None:\n return self.get_property(pl, repo, **kwargs)\n return None\n\n def argspecobjs(self):\n yield '__call__', self.__call__\n yield 'get_property', self.get_property\n yield 'get_highlight_group', self.get_highlight_group\n\n omitted_method_args = {\n '__call__': (0,),\n 'get_property': (0, 1, 2,),\n 'get_highlight_group': (0, 1, 2,),\n }\n\n def omitted_args(self, name, method):\n return self.omitted_method_args[name]\n\n\nvcsinfo = with_docstring(VCSInfoSegment(),\n'''Return the current revision info\n\n:param str name:\n Determines what property should be used. Valid values:\n\n ======== ===================================================\n Name Description\n ======== ===================================================\n branch Current branch name.\n short Current commit revision abbreviated hex or revno.\n summary Current commit summary.\n name Human-readable name of the current revision.\n bookmark Current bookmark (mercurial) or branch (otherwise).\n status Current repository status.\n ======== ===================================================\n:param bool status_colors:\n Determines whether repository status will be used to determine highlighting.\n Default: False.\n:param list ignore_statuses:\n List of statuses which will not result in repo being marked as dirty. Most\n useful is setting this option to ``[\"U\"]``: this will ignore repository\n which has just untracked files (i.e. repository with modified, deleted or\n removed files will be marked as dirty, while just untracked files will make\n segment show clean repository). Only applicable if ``status_colors`` option\n is True.\n\nHighlight groups used: ``vcsinfo:clean``, ``vcsinfo:dirty``, ``vcsinfo``.\n\nAdditionally ``vcsinfo:{name}`` is used.\n''')\n","sub_path":"powerline/segments/common/vcs.py","file_name":"vcs.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"491421520","text":"from math import sqrt\n\n\ndef is_prime(num):\n if num in (0, 1):\n return False\n if num in (2, 3):\n return True\n if int(str(num)[-1]) % 2 == 0:\n return False\n if sum(map(int, str(num))) % 3 == 0 or sum(map(int, str(num))) % 9 == 0:\n return False\n if len(str(num)) >= 2 and int(str(num)[-2:]) % 4 == 0:\n return False\n for i in range(2, int(sqrt(num)) + 1):\n if num % i == 0:\n return False\n return True\n\n\ndef primes():\n \"\"\"\n A generator which yields primes using a naive algorithm.\n Increments numbers and checks if the number is prime\n using the is_prime method.\n \"\"\"\n current = 2\n while True:\n if is_prime(current):\n yield current\n current += 1\n\n\ndef generate_primes(max_num):\n \"\"\"\n Generates a list of primes using the\n `Sieve of Eratosthenes `_ method.\n :param max_num: Max value to use in the sieve\n :return: list of primes up to max_num\n \"\"\"\n rng = list(range(2, max_num+1))\n if max_num <= 3:\n return list(range(2, max_num+1))\n\n p = 2\n while p <= max_num:\n p_increments = list(range(p, max_num, p))\n if len(p_increments) <= 1:\n break\n for i in p_increments:\n if rng[i-2] != p:\n rng[i-2] = 0\n p += 1\n\n return [v for v in rng if v != 0]","sub_path":"Numbers/common/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"482808995","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n'''\r\nEXERCÍCIO 1: Faça um formulário que pergunte\r\no nome, cpf, endereço, idade, altura, e telefone\r\ne imprima isso em um relatório organizado\r\n'''\r\n\r\nprint('FORMULÁRIO\\n')\r\n\r\nnome = input('Digite seu nome: ')\r\ncpf = input('Digite seu CPF: ')\r\nendereco = input('Digite seu endereço completo: ')\r\nidade = input('Digite sua idade: ')\r\naltura = input('Digite sua altura [use o formato 1.70]: ')\r\ntelefone = input('Digite o seu telefone [Insira o DDD + 9 no início]: ')\r\n\r\nprint('\\nDADOS CADASTRADOS CO SUCESSO!\\nCONFIRA SEUS DADOS...')\r\nprint('\\nSeu nome é', nome, ', com cpf de número:', cpf, '\\nEndereço:', endereco,\r\n '\\nIdade:', idade, 'anos\\nAltura:', altura, '\\nTelefone para contato:', telefone)\r\n","sub_path":"src/exercicios/1-exercicio1.py","file_name":"1-exercicio1.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"348780752","text":"import re\nfrom WebArchiveParser import WebArchiveParser\nimport json\n\nclass LinkHandler:\n def __init__(self, domain, sitemap):\n #domain without / in end\n\n self._domain = domain\n self._sitemap = sitemap\n self.categories_map = {}\n\n def create_url_pattern(self, datas): # site/view/12, site/view/124\n \"\"\"На вход список с 2 ссылками на записи, на выходе шаблон записи\"\"\"\n i = 0\n pattern = r'http://web.archive.org/web/\\d*/%s' % self._domain\n while i < len(datas):\n datas[i] = re.sub(pattern, '', datas[i])\n i += 1\n datas = list(map(lambda data: data.split('/'), datas))\n checked_datas = []\n\n for data in datas:\n tmp = []\n for url in data:\n if url:\n tmp.append(url)\n checked_datas.append(tmp)\n\n url0, url1 = checked_datas\n # for data in datas:\n # if len(data) != len(url0):\n # raise IncorrectInputData('Введены не корректные ссылки. Попробуйте исправить ошибку или используйте другой вариант определения структуры')\n i = -1\n k = 0\n while i < len(url0)-1:\n i += 1\n if url0[i] == url1[i]:\n continue\n url0[i] = ''\n url1[i] = ''\n k = k + 1\n\n # if k > 1:\n # raise StuctureNotDeterminated('Не удалось определить структуру сайта. Попробуйте другой вариант')\n\n self._url_pattern = '/'.join(url0)\n return self._url_pattern.replace(self._domain, '')\n\n def create_site_structure(self, example_categories, example_records):\n \"\"\"вход: сайтмап, выход - структура сайта по шаблонам урл\"\"\"\n self.lk = LinkHandler(self._domain, self._sitemap)\n template_cat = self.lk.create_url_pattern(example_categories)\n template_rec = self.lk.create_url_pattern(example_records)\n\n\n finded_categories = {} # в словаре найденные категории по шаблону\n for sitemap in self._sitemap: # поиск категорий по шаблону\n for page, urls in sitemap.items():\n if page == template_cat != -1:\n finded_categories[page] = []\n\n for sitemap in self._sitemap: # поиск records в sitemap\n for page, urls in sitemap.items():\n tmp = set()\n for url in urls:\n if url and url.find(template_rec) != -1:\n tmp.add(url)\n finded_categories[page] = tmp\n\n #print(finded_categories)\n for key, value in finded_categories.items():\n print(key)\n for val in value:\n print(' %s' %val)\n\n print(template_cat, template_rec)\n\n def create_site_structure_sets(self):\n with open('sitemap.txt', 'r') as f:\n self._sitemap = json.loads(f.read())\n cleaned_sitemap = {}\n set_all_links = set()\n\n for sitemap in self._sitemap: # очищаю сайтмап от дублей, создаю множество всех ссылок\n for key, value in sitemap.items():\n name = re.sub('.*{}'.format(self._domain), '', key)\n cleaned_sitemap[name] = set(value)\n links_for_cat = set()\n for url in value:\n if url:\n ulr_cleaned = re.sub(r'.*{}'.format(self._domain), '', url)\n set_all_links.add(ulr_cleaned)\n links_for_cat.add(ulr_cleaned)\n cleaned_sitemap[name] = links_for_cat\n\n #print(cleaned_sitemap)\n site_tree = {}\n for key, value in cleaned_sitemap.items(): # из каждого множества отнять все множества, кроме самого себя\n for keydiff, valdiff in cleaned_sitemap.items():\n if key == keydiff:\n continue\n site_tree[key] = value.difference(valdiff)\n\n clean_site_tree = {}\n for page, urls in site_tree.items(): # перенести пустые каталоги в отдельный массив\n if page != '':\n clean_site_tree[page] = urls\n if not urls:\n clean_site_tree['БЕЗ КАТЕГОРИИ'] = page\n return clean_site_tree\n\n def create_site_structure_cat_in_url(self):\n urls_set = set()\n\n for sitemap in self._sitemap:\n for page, urls in sitemap.items():\n name = re.sub('.*{}'.format(self._domain), '', page)\n urls_set.add(name)\n for url in urls:\n if url:\n href = re.sub('.*{}'.format(self._domain), '', url)\n urls_set.add(href)\n\n splited_url_list = []\n\n for url in urls_set: # разбил на список списков\n parts = url.split('/')\n cleaned_parts = []\n for part in parts:\n if part and part != '/':\n cleaned_parts.append(part)\n if cleaned_parts:\n splited_url_list.append(cleaned_parts)\n\n # создание структуры\n site_tree = {}\n for urls in splited_url_list: # create dict with key url[0]\n site_tree[urls[0]] = []\n\n for page, value in site_tree.items():\n for url in splited_url_list:\n if page in url:\n href = '/'.join(url)\n site_tree[page].append(href)\n return site_tree\n\n\n#\n# import json\n# with open('sitemap.txt', 'r') as f:\n# json_sitemap = json.loads(f.read())\n#\n#\n# lk = LinkHandler('http://sibdst.ru', json_sitemap)\n# print()\n# for key, value in lk.create_site_structure_sets().items():\n# print(key)\n# for val in value:\n# print(' ', val)\n\n# print(lk.create_url_pattern(example_categories))\n# print(lk.create_url_pattern(example_records))\n# lk.create_site_structure(example_categories, example_records)\n# print(lk._datas)\n# lk.non_indefied_url(['site/view/12', 'site/view/15', 'site/view/13'])","sub_path":"LinkHandler.py","file_name":"LinkHandler.py","file_ext":"py","file_size_in_byte":6457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224138678","text":"\"\"\"\nAuthors:\n Rodrigo Martin Sziller\n Lior Mahfoda\n\"\"\"\n\nimport socket\nimport time\nfrom random import randint\nimport math\n\n\ndef sxor(s1, s2):\n if (s1 == 0):\n return s2\n if (s2 == 0):\n return s1\n return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(s1, s2))\n\n\nudpIp = \"127.0.0.1\"\nudpPort = 12321\npacketSize = 100\nsequenceLen = 4\nmsg = \"Make it totally clear that this gun has a right end and a wrong end.\\n\" \\\n \"Make it totally clear to anyone standing at the wrong end that things are going badly for them.\\n\" \\\n \"If that means sticking all sort of spikes and prongs and blackened bits all over it then so be it.\\n\" \\\n \"This is not a gun for hanging over the fireplace or sticking in the umbrella stand,\\n\" \\\n \"it is a gun for going out and making people miserable with.\"\n\nprint (\"UDP target IP:\", udpIp)\nprint (\"UDP target port:\", udpPort)\n\nsock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\ndata_size = (packetSize - sequenceLen - 1)\npackets_len = int(math.ceil(len(msg) / float(data_size)))\nwhile True:\n d = randint(2, packets_len)\n sock.sendto(str(d), (udpIp, udpPort))\n print (\"\\nclient sent random number d to server\")\n print (\"d = \", d)\n e_count = int(math.ceil(packets_len / float(d)))\n e = 0\n len1 = packets_len + e_count\n e_i = 1\n msg_i = 0\n print (\"The client begins to send the message to the server\")\n for i in reversed(range(len1)):\n if (i == 0 or e_i == d + 1):\n sock.sendto((\"{0:0\" + str(sequenceLen) + \"d}\").format(i) + \"#\" + e, (udpIp, udpPort))\n e = 0\n e_i = 1\n else:\n _msg = msg[msg_i:msg_i + (data_size)]\n if (e_i != d and i != 1):\n sock.sendto((\"{0:0\" + str(sequenceLen) + \"d}\").format(i) + \"#\" + msg, (udpIp, udpPort))\n msg_i += data_size\n e = sxor(e, msg)\n e_i += 1\n\n print (\"\\nThe client sent packet #\", i)\n time.sleep(1)\n print (\"\\nThe client has finished to send the message to the server.\")\n print (\"\\nThe client waits for the server to send back the message lines.\")\n for i in range(len(msg.split('\\n'))):\n data, addr = sock.recvfrom(10000)\n print (\"\\nThe client received line:\", data)\n print (\"\\nThe client received all the data lines from the server\")\n print (\"\\nThe client waits 3 seconds...\")\n time.sleep(3)\n","sub_path":"task_2/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"120536626","text":"from django.shortcuts import render\nfrom search.models import Fund\nfrom portfolio.models import Holding, FundInfo, Security, Transaction\nfrom search.forms import forms\nimport locale, os\n\n# Create your views here.\ndef index(request):\n\n dataInjection = {}\n\n if(request.method == 'POST'):\n doesFundExist = True\n queryString = request.POST['searchQuery']\n try:\n targetedFund = Fund.objects.get(fund_name=queryString)\n except Fund.DoesNotExist:\n print(\"I went inside exception\")\n doesFundExist = False\n\n if(doesFundExist):\n currQ = targetedFund.last_update\n lastQ = getLastQuarter(currQ)\n local_cik = targetedFund.cik_num\n locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )\n \n local_info = FundInfo.objects.get(cik=local_cik, year_q=currQ) \n local_aum = locale.currency(local_info.aum, symbol=True, grouping=True)[:-3]\n thisQuarterHoldings = Holding.objects.filter(cik=local_cik, year_q=currQ)\n lastQuarterHoldings = Holding.objects.filter(cik=local_cik, year_q=lastQ)\n\n dataInjection = {\n 'fundExists': doesFundExist,\n 'numHoldings': local_info.num_holdings,\n 'aum': local_info.aum,\n 'fundName': targetedFund.fund_name,\n 'transactions': twoQuarters(thisQuarterHoldings, lastQuarterHoldings, local_info.aum)\n }\n else:\n dataInjection = {\n 'fundExists': doesFundExist,\n 'fundName': queryString,\n }\n \n\n return render(request, 'portfolio/fundDisplay.html', context=dataInjection)\n\ndef display(request, cik=102):\n doesFundExist = True\n doTransactionsExist = False\n transactionStatusChar = None\n try:\n targetedFund = Fund.objects.get(cik_num=cik)\n transactionStatusChar = targetedFund.transactions\n if transactionStatusChar is 't':\n doTransactionsExist = True\n except Fund.DoesNotExist:\n doesFundExist = False\n\n if doesFundExist and doTransactionsExist:\n currQ = targetedFund.last_update\n freshFund = newerQuarter(currQ, os.environ.get('curr_Q'))\n\n local_cik = targetedFund.cik_num\n locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )\n try:\n local_info = FundInfo.objects.get(cik=local_cik, year_q=currQ)\n local_aum = locale.currency(local_info.aum, symbol=True, grouping=True)[:-3]\n aum = local_info.aum\n numHoldings = local_info.num_holdings \n except FundInfo.DoesNotExist:\n aum = -1\n numHoldings = \"??\"\n \n buy_list = Transaction.objects.filter(cik=local_cik, year_q=currQ, action_type='buy')\n sell_list = Transaction.objects.filter(cik=local_cik, year_q=currQ, action_type='sell')\n hold_list = Transaction.objects.filter(cik=local_cik, year_q=currQ, action_type='hold')\n\n dataInjection = {\n 'fundExists': doesFundExist,\n 'numHoldings': numHoldings,\n 'aum': aum,\n 'fundName': targetedFund.fund_name,\n 'updateDate': str(currQ[4:6] + \" \" + currQ[0:4]),\n 'freshAlert': freshFund,\n 'buy_list': buy_list if len(buy_list) else None,\n 'sell_list': sell_list if len(sell_list) else None,\n 'hold_list': hold_list if len(hold_list) else None,\n }\n else:\n if transactionStatusChar == 'x' or transactionStatusChar == 'n':\n currQ = os.environ.get('curr_Q')\n \n dataInjection = {\n 'fundExists': True,\n 'numHoldings': 0,\n 'aum': 0,\n 'updateDate': str(currQ[4:6] + \" \" + currQ[0:4]),\n 'fundName': targetedFund.fund_name,\n 'newAlert': True\n }\n\n elif transactionStatusChar is 'd':\n dataInjection = {\n 'fundExists': True,\n 'fundName': targetedFund.fund_name,\n 'oldAlert': True\n }\n \n elif transactionStatusChar is 'e':\n currQ = os.environ.get('curr_Q')\n\n dataInjection = {\n 'fundExists': True,\n 'fundName': targetedFund.fund_name,\n 'emptyAlert': True,\n 'updateDate': str(currQ[4:6] + \" \" + currQ[0:4]),\n 'numHoldings': 0,\n 'aum': 0\n }\n\n else:\n dataInjection = {\n 'fundExists': doesFundExist,\n 'failedCIK': cik\n }\n \n\n return render(request, 'portfolio/fundDisplay.html', context=dataInjection)\n\ndef twoQuarters(newQ, oldQ, aum):\n transactionList = []\n \n\n for newHold in newQ:\n local_cusip = newHold.cusip\n newHoldingFlag = False\n\n try:\n oldHolding = oldQ.get(cusip=local_cusip)\n except Holding.DoesNotExist:\n newHoldingFlag = True\n\n if newHoldingFlag == False:\n transactionList.append(transactionFromTwoHoldings(newHold, oldHolding, aum))\n else:\n transactionList.append(transactionFromTwoHoldings(newHold, None, aum))\n\n for oldHold in oldQ:\n local_cusip = oldHold.cusip\n\n try:\n newQ.get(cusip=local_cusip)\n except Holding.DoesNotExist:\n transactionList.append(transactionFromTwoHoldings(None, oldHold, aum))\n\n return transactionList\n\n\n\ndef transactionFromTwoHoldings(newHold, oldHold, aum):\n \n if newHold != None:\n weight = (newHold.share_value / aum) * 100\n companyData = getCompanyInfo(newHold.cusip)\n ticker = companyData[0]\n company_name = companyData[1]\n total = newHold.num_shares\n totalVal = newHold.share_value\n\n if oldHold != None:\n # Add/Decrease/Unchanged\n delta = total - oldHold.num_shares\n deltaVal = totalVal - oldHold.share_value\n\n if delta > 0:\n # Add\n actionType = \"buy\"\n movement = \"add\"\n percent = round((delta/oldHold.num_shares) * 100, 1)\n\n elif delta < 0:\n # Decrease\n delta = abs(delta)\n actionType = \"sell\"\n movement = \"sold\"\n percent = round((delta/oldHold.num_shares) * 100, 1)\n\n else:\n # Unchanged\n actionType = \"hold\"\n movement = \"none\"\n percent = 0\n else:\n # New\n delta = total\n deltaVal = totalVal\n actionType = \"buy\"\n movement = \"new\"\n percent = 0 \n\n elif newHold == None and oldHold != None:\n # Drop\n actionType = \"sell\"\n movement = \"drop\"\n delta = oldHold.num_shares\n weight = 0\n companyData = getCompanyInfo(oldHold.cusip)\n ticker = companyData[0]\n company_name = companyData[1]\n deltaVal = oldHold.share_value\n total = 0\n totalVal = 0\n percent = 100\n \n transaction = {\n 'weight': str(round(weight, 1)), # Weight of holding in portfolio, less than 100\n 'action_type': actionType, # \"buy\", \"sell\", or \"hold\"\n 'movement': movement, # \"new\", \"add\", \"drop\", \"sold\", \"none\"\n 'ticker': ticker, # Ticker Symbol of Holding\n 'name': company_name, # Company Name of Holding\n 'delta': delta, # Shares bought or sold\n 'total': total, # Total shares in Holding\n 'val_delta': locale.currency(deltaVal, symbol=True, grouping=True)[:-3], # Change in valuation\n 'total_val': locale.currency(totalVal, symbol=True, grouping=True)[:-3], # Valuation of Holding\n 'percent': percent\n }\n\n print(transaction)\n\n return transaction\n\ndef getCompanyInfo(try_cusip):\n try:\n companyInfo = Security.objects.get(cusip=try_cusip)\n except Security.DoesNotExist:\n companyInfo = None\n \n if companyInfo != None:\n companyData = (companyInfo.ticker, companyInfo.name) \n else:\n companyData = (\"N/A\", \"Unknown\")\n\n return companyData\n\n\n# Given a year quarter string (YYYYQQ), returns prior year quarter\ndef getLastQuarter(currentQuarter):\n q = int(currentQuarter[5])\n if q == 1:\n yr = int(currentQuarter[0:4]) - 1\n return (str(yr) + \"Q4\")\n else:\n return(currentQuarter[0:4] + \"Q\" + str(q-1))\n\n\n# Between two quarter+year strings, check which one is more recent\n# \"Is q1 newer than q2?\" \ndef newerQuarter(q1, q2):\n if q1 == q2:\n return False\n\n y1 = int(q1[0:4])\n y2 = int(q2[0:4])\n\n if y2 > y1:\n return False\n elif y2 < y1:\n return True\n\n if int(q1[5]) > int(q2[5]):\n return True\n \n return False","sub_path":"portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"527173154","text":"import smart_thruster as thrusters\nimport socket,threading,os,selectors,time,math\n\nHOST = '127.0.0.2'\nPORT = 50008\nHEADERSIZE = 5 #in bytes\nMESSAGESIZE = 8*2 #in bytes\nBUFSIZE = HEADERSIZE+MESSAGESIZE\nRECV = 125\nSEND = 255\n\nN_motors = 8\nport = '/dev/ttyS3'\n\n#print(\"Starting Thrusters...\")\n#m = thrusters.start(N_motors,port)\n\ndef iToC(inp):# unused\n out = [0 for x in range(size*2)]\n for i in range(size):\n out[2*i]=inp[i]>>8\n out[2*i+1]=inp[i]&0xff\n return out\n\ndef cToI(inp):# unused\n out = [0 for x in range(size)]\n for i in range(size):\n out[i]=(inp[2*i]<<8)+(inp[2*i+1])\n return out\n\nclass Data:\n def __init__(self):\n self.target_rpm=[0 for x in range(N_motors+1)]\n self.current_rpm=[0 for x in range(N_motors+1)]\n self.data_string=\"\"\n self.str_length=0\n\n def get_tRPMs(self):\n return self.target_rpm\n\n def get_cRPMs(self):\n return self.current_rpm\n\n def set_tRPMs(self,id,val):\n self.target_rpm[id]=val\n\n def set_all_tRPMs(self,val):\n for id in range(n_Motors+1):\n self.target_rpm[id]=val\n\n def set_cRPMS(self,id,val):\n self.current_rpm[id]=val\n\n def set_str(self,val):\n self.data_string=val\n self.str_length=len(val)\n\ndef head(d,header): #looks at header\n if(header[0]==1&header[2]==1):\n return 0;\n if(header[4]!=MESSAGESIZE):\n print(\"ERROR: Data Message Size Mismatch\")\n print(header)\n return -1; #-1 error code\n if(header[0]==SEND):\n return 1; #1 command received, read data\n elif(header[0]==RECV):\n return 2; #2 send received, send data ( header[1] is return message size )\n if(header[2]==RECV):\n d.set_all_tRPMs(0)\n return 0\n\ndef msg(d,arr): #looks at data array\n for i in range(int(MESSAGESIZE/2)):\n temp=(arr[2*i]<<8)+(arr[2*i+1])-5000\n d.set_tRPMs(i,temp)\n\ndef main_thread():\n d = Data()\n while True: #replace with while(m.running):\n with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:\n s.bind((HOST,PORT))\n s.listen(1)\n conn, addr = s.accept()\n with conn:\n print('Connected by',addr)\n while True:\n data = conn.recv(BUFSIZE)\n if not data:\n break\n header = list(data)[:HEADERSIZE]\n print(list(data))\n hr = head(d,header)\n print(hr)\n if(hr==1): #recv instructions\n message = list(data)[HEADERSIZE:]\n msg(d,message)\n print(message)\n print(d.target_rpm)\n if(hr==2): #reply\n s = \"Hi, server says hi after receiving from client\"\n print(\"sending: %s\" % s)\n conn.sendall(s.encode())\n elif(hr==-1|hr==0):\n sys.exit()\n\nif(__name__==\"__main__\"):\n thread = threading.Thread(target=main_thread)\n thread.start()\n","sub_path":"transferserver.py","file_name":"transferserver.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"615230533","text":"#!/usr/bin/env python3\nimport math\n\na, b = map(int,input().split())\ndiff = a - b\nif diff < 0:\n print(0)\nelif diff == 0:\n print(\"infinity\")\nelse:\n cnt = 0\n for i in range(1, 1 + int(math.sqrt(diff))):\n if diff % i == 0 and i > b:\n cnt += 1\n if diff % i == 0 and diff // i > b and i * i != diff:\n cnt += 1\n print(cnt)\n","sub_path":"495b.py","file_name":"495b.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"366310937","text":"import sqlite3 as lite\nimport os\n \npath = os.path.dirname(__file__) + \"test.db\"\ncon = lite.connect(path) \n \nwith con:\n# Để dữ liệu trả về là Dictionary thì chúng ta thiết lập thuộc tính \n# row_factory trong đối tượng Connection là sqlite3.Row. \n con.row_factory = lite.Row\n \n cur = con.cursor() \n cur.execute(\"SELECT * FROM Cars\")\n \n rows = cur.fetchall()\n# Dữ liệu trả về kiểu Dictionary và bạn có thể truy xuất dữ liệu của các ô thông qua tên cột.\n for row in rows:\n print (\"%s %s %s\" % (row[\"Id\"], row[\"Name\"], row[\"Price\"]))\n\n\n# chúng ta sẽ lấy dữ liệu về dạng Dictionary.\n","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"502216275","text":"import boto3\nimport math\nimport dateutil.parser\nimport datetime\nimport time\nimport os\nimport logging\nimport re\nimport json\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\n\"\"\" --- Helpers to build responses which match the structure of the necessary dialog actions --- \"\"\"\n\n\ndef get_slots(intent_request):\n return intent_request['currentIntent']['slots']\n\n\ndef elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):\n return {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'ElicitSlot',\n 'intentName': intent_name,\n 'slots': slots,\n 'slotToElicit': slot_to_elicit,\n 'message': message\n }\n }\n\n\ndef close(session_attributes, fulfillment_state, message):\n response = {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'Close',\n 'fulfillmentState': fulfillment_state,\n 'message': message\n }\n }\n\n return response\n\n\ndef delegate(session_attributes, slots):\n return {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'Delegate',\n 'slots': slots\n }\n }\n\n\n\"\"\" --- Helper Functions --- \"\"\"\n\n\ndef parse_int(n):\n try:\n return int(n)\n except ValueError:\n return -1\n\n\ndef build_validation_result(is_valid, violated_slot, message_content):\n if message_content is None:\n return {\n \"isValid\": is_valid,\n \"violatedSlot\": violated_slot,\n }\n\n return {\n 'isValid': is_valid,\n 'violatedSlot': violated_slot,\n 'message': {'contentType': 'PlainText', 'content': message_content}\n }\n\n\ndef isvalid_date(date):\n try:\n dateutil.parser.parse(date)\n return True\n except ValueError:\n return False\n\n\ndef isvalid_city(city):\n valid_cities = ['new york', 'brooklyn', 'manhattan', 'queens', 'bronx', 'staten island']\n return city.lower() in valid_cities\n\n\ndef validate_dining_info(cuisine_type, date, dining_time, num_people, email, city):\n cuisine_types = ['chinese', 'american', 'italian', 'indian', 'korean', 'japanese', 'french']\n if cuisine_type is not None and cuisine_type.lower() not in cuisine_types:\n return build_validation_result(False,\n 'Cuisine',\n 'We do not have {}, would you like a different type of cuisine? (Chinese, Italian, etc...)'\n .format(cuisine_type))\n istoday = False\n if date is not None:\n if not isvalid_date(date):\n return build_validation_result(False, 'Date', 'I did not understand that, what date would you like to eat?')\n elif datetime.datetime.strptime(date, '%Y-%m-%d').date() < datetime.date.today():\n return build_validation_result(False, 'Date', 'You can find information from today onwards. What day?')\n elif datetime.datetime.strptime(date, '%Y-%m-%d').date() == datetime.date.today():\n istoday = True\n\n if dining_time is not None:\n if len(dining_time) != 5:\n return build_validation_result(False, 'Time', None)\n hour, minute = dining_time.split(':')\n hour = parse_int(hour)\n minute = parse_int(minute)\n timenow = datetime.datetime.now().time()\n timedining = timenow.replace(hour, minute, 0, 0)\n if math.isnan(hour) or math.isnan(minute):\n return build_validation_result(False, 'Time', 'Sorry, what time?')\n\n if istoday and timedining <= timenow:\n return build_validation_result(False, 'Time', 'Invalid time. What time would you prefer?')\n\n if hour < 10 or hour > 22:\n # Outside of business hours\n return build_validation_result(False, 'Time', 'Restaurants may not open at this time. Can you specify another time?')\n\n if email is not None:\n if not re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", email):\n return build_validation_result(False, 'Email', 'Sorry, could you please provide a valid email address?')\n\n if num_people is not None:\n if parse_int(num_people) < 1:\n return build_validation_result(False, 'NumberOfPeople', 'Sorry, how many?')\n\n if city is not None:\n if not isvalid_city(city):\n return build_validation_result(False, 'Location',\n 'Sorry, location {} is not found, where would you like to dine in?'.format(city))\n\n return build_validation_result(True, None, None)\n\n\n\"\"\" --- Functions that control the bot's behavior --- \"\"\"\n\n\ndef suggest_dining(intent_request):\n\n cuisine_type = get_slots(intent_request)[\"Cuisine\"]\n location = get_slots(intent_request)[\"Location\"]\n date = get_slots(intent_request)[\"Date\"]\n dining_time = get_slots(intent_request)[\"Time\"]\n num_of_people = get_slots(intent_request)[\"NumberOfPeople\"]\n email = get_slots(intent_request)[\"Email\"]\n source = intent_request['invocationSource']\n\n if source == 'DialogCodeHook':\n slots = get_slots(intent_request)\n\n validation_result = validate_dining_info(cuisine_type, date, dining_time, num_of_people, email, location)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n return elicit_slot(intent_request['sessionAttributes'],\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message'])\n\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n output_session_attributes['location'] = location\n output_session_attributes['cuisine'] = cuisine_type\n output_session_attributes['date'] = date\n output_session_attributes['time'] = dining_time\n output_session_attributes['numberOfPeople'] = num_of_people\n output_session_attributes['email'] = email\n return delegate(output_session_attributes, get_slots(intent_request))\n\n if source == 'FulfillmentCodeHook':\n sqs = boto3.resource('sqs')\n queue = sqs.Queue('https://sqs.us-east-1.amazonaws.com/795907756437/dining_queue')\n msgs = json.dumps(intent_request['sessionAttributes'])\n response = queue.send_message(MessageBody=msgs)\n\n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': 'You\\'re all set. Expect my recommendations shortly! Have a good day.'})\n\n\n\"\"\" --- Intents --- \"\"\"\n\n\ndef dispatch(intent_request):\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'DiningSuggestionsIntent':\n return suggest_dining(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')\n\n\n\"\"\" --- Main handler --- \"\"\"\n\n\ndef lambda_handler(event, context):\n os.environ['TZ'] = 'America/New_York'\n time.tzset()\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)\n","sub_path":"assignment3/backend/dining_suggestion.py","file_name":"dining_suggestion.py","file_ext":"py","file_size_in_byte":7455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"340818350","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nimport time\nfrom typing import TYPE_CHECKING\n\nfrom azure.core.configuration import Configuration\nfrom azure.core.pipeline import Pipeline\nfrom azure.core.pipeline.policies import (\n NetworkTraceLoggingPolicy,\n RetryPolicy,\n ProxyPolicy,\n UserAgentPolicy,\n DistributedTracingPolicy,\n HttpLoggingPolicy,\n)\n\nfrom .aad_client_base import AadClientBase\nfrom .user_agent import USER_AGENT\n\nif TYPE_CHECKING:\n # pylint:disable=unused-import,ungrouped-imports\n from typing import Any, Iterable, List, Optional, Union\n from azure.core.credentials import AccessToken\n from azure.core.pipeline.policies import HTTPPolicy, SansIOHTTPPolicy\n from azure.core.pipeline.transport import HttpTransport\n from .._internal import AadClientCertificate\n\n Policy = Union[HTTPPolicy, SansIOHTTPPolicy]\n\n\nclass AadClient(AadClientBase):\n def obtain_token_by_authorization_code(self, scopes, code, redirect_uri, client_secret=None, **kwargs):\n # type: (Iterable[str], str, str, Optional[str], **Any) -> AccessToken\n request = self._get_auth_code_request(\n scopes=scopes, code=code, redirect_uri=redirect_uri, client_secret=client_secret\n )\n now = int(time.time())\n response = self._pipeline.run(request, stream=False, retry_on_methods=self._POST, **kwargs)\n return self._process_response(response, now)\n\n def obtain_token_by_client_certificate(self, scopes, certificate, **kwargs):\n # type: (Iterable[str], AadClientCertificate, **Any) -> AccessToken\n request = self._get_client_certificate_request(scopes, certificate)\n now = int(time.time())\n response = self._pipeline.run(request, stream=False, retry_on_methods=self._POST, **kwargs)\n return self._process_response(response, now)\n\n def obtain_token_by_client_secret(self, scopes, secret, **kwargs):\n # type: (Iterable[str], str, **Any) -> AccessToken\n request = self._get_client_secret_request(scopes, secret)\n now = int(time.time())\n response = self._pipeline.run(request, stream=False, retry_on_methods=self._POST, **kwargs)\n return self._process_response(response, now)\n\n def obtain_token_by_refresh_token(self, scopes, refresh_token, **kwargs):\n # type: (Iterable[str], str, **Any) -> AccessToken\n request = self._get_refresh_token_request(scopes, refresh_token)\n now = int(time.time())\n response = self._pipeline.run(request, stream=False, retry_on_methods=self._POST, **kwargs)\n return self._process_response(response, now)\n\n # pylint:disable=no-self-use\n def _build_pipeline(self, config=None, policies=None, transport=None, **kwargs):\n # type: (Optional[Configuration], Optional[List[Policy]], Optional[HttpTransport], **Any) -> Pipeline\n config = config or _create_config(**kwargs)\n policies = policies or [\n config.user_agent_policy,\n config.proxy_policy,\n config.retry_policy,\n config.logging_policy,\n DistributedTracingPolicy(**kwargs),\n HttpLoggingPolicy(**kwargs),\n ]\n if not transport:\n from azure.core.pipeline.transport import RequestsTransport\n\n transport = RequestsTransport(**kwargs)\n\n return Pipeline(transport=transport, policies=policies)\n\n\ndef _create_config(**kwargs):\n # type: (**Any) -> Configuration\n config = Configuration(**kwargs)\n config.logging_policy = NetworkTraceLoggingPolicy(**kwargs)\n config.retry_policy = RetryPolicy(**kwargs)\n config.proxy_policy = ProxyPolicy(**kwargs)\n config.user_agent_policy = UserAgentPolicy(base_user_agent=USER_AGENT, **kwargs)\n return config\n","sub_path":"sdk/identity/azure-identity/azure/identity/_internal/aad_client.py","file_name":"aad_client.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"128816542","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDisplay images in a rosbag on a specific image topic\n\"\"\"\nimport os\nimport argparse\n\nimport cv2\n\nimport rosbag\nfrom cv_bridge import CvBridge\n\n\ndef print_image_info(cv_img):\n h, w = cv_img.shape[:2]\n dtype = cv_img.dtype\n lenshape = len(cv_img.shape)\n if lenshape == 2:\n channels = 1\n else:\n channels = cv_img.shape[2]\n print('Image h {} w {} data type {} channels {}'.format(h, w, dtype, channels))\n\n\ndef main():\n \"\"\"Extract a folder of images from a rosbag.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Display images in a ROS bag.\")\n parser.add_argument(\"bag_file\", help=\"Input ROS bag.\")\n parser.add_argument(\"--image_topic\", help=\"Display images from which topic.\",\n default='/cam0/image_raw')\n\n args = parser.parse_args()\n\n in_bag = rosbag.Bag(args.bag_file, \"r\")\n topic_list = in_bag.get_type_and_topic_info()[1].keys()\n print('Topics {} are in {}'.format(topic_list, args.bag_file))\n\n print('Press q key to quit.')\n bridge = CvBridge()\n count = 0\n first_image_time = None\n last_image_time = None\n for _, msg, t in in_bag.read_messages(topics=[args.image_topic]):\n cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding=\"passthrough\")\n if count == 0:\n print('Video frame info:')\n print_image_info(cv_img)\n first_image_time = t\n last_image_time = t\n cv2.imshow('Frame', cv_img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n count += 1\n cv2.destroyAllWindows()\n in_bag.close()\n print(\"Displayed {} images of {} on topic {} first image time {} last image time {}\".\n format(count, args.bag_file, args.image_topic, first_image_time, last_image_time))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/play_images_in_rosbag.py","file_name":"play_images_in_rosbag.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"394519625","text":"# -*- coding: utf-8 -*-\n\nfrom discord.ext import commands\nimport discord\nfrom ..tools import Cog\nimport random\nimport logging\nimport asyncio\n\nfrom ..tools import get_wallet\nfrom ..tools import get_guild_config\nfrom ..tools import update_wallet\nfrom ..tools import TransactionError\nfrom ..tools import BotError\nfrom ..tools import CoinConverter\n\nlog = logging.getLogger(__name__)\n\n\nclass Money(Cog):\n \"\"\"The description for Money goes here.\"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.lock = False\n self.money_cooldown = []\n\n @commands.command()\n @commands.is_owner()\n async def write(self, ctx, target: discord.Member, amount:CoinConverter):\n \"\"\"sets the amount that a user has. owner only.\"\"\"\n old = await get_wallet(self.bot, target.id)\n await update_wallet(self.bot, target.id, amount)\n await ctx.send(f\"set {target}'s wallet to {amount}, was {old}\")\n\n @commands.command()\n async def wallet(self, ctx:commands.Context, target:discord.Member=None):\n if target is None:\n target = ctx.author\n wallet = await get_wallet(self.bot,target.id)\n await ctx.send(f\"wallet for {target} is {wallet:.2f}\")\n\n\n @commands.command()\n async def transfer(self, ctx, target:discord.Member, amount: CoinConverter):\n \"\"\"transfers an amount of money from your wallet to the target's wallet.\"\"\"\n author_wallet = await get_wallet(self.bot, ctx.author.id)\n target_wallet = await get_wallet(self.bot, target.id)\n try:\n assert author_wallet > amount\n except AssertionError:\n raise TransactionError(\"you don't have enough funds for that\")\n await update_wallet(self.bot, ctx.author.id, author_wallet-amount)\n await update_wallet(self.bot, target.id, target_wallet+amount)\n # TODO: make a modify_wallet func that takes a float and applies that value to the target_wallet when you do that also use it on line #67\n await ctx.send(f\"sent {amount} to {target.display_name}\")\n\n\n async def on_message(self, message:discord.Message):\n try:\n gcfg = await get_guild_config(self.bot, message.guild.id)\n assert gcfg['money'] == True\n except (AssertionError, AttributeError):\n # not in guild\n return\n if message.author.id in self.money_cooldown:\n # user recently got money\n return\n chance = random.random() # get this message's percent chance\n if chance < self.bot.config.money['CHANCE']:\n try:\n old = await get_wallet(self.bot, message.author.id)\n except BotError:\n return\n log.info(f'gave {message.author} money')\n if not gcfg['hide_coins']:\n await message.add_reaction(self.bot.config.money['REACTION'])\n self.money_cooldown.append(message.author.id)\n await update_wallet(self.bot, message.author.id, old + self.bot.config.money['PER_MESSAGE'])\n await asyncio.sleep(5)\n if not gcfg['hide_coins']:\n await message.remove_reaction(self.bot.config.money['REACTION'], self.bot.user)\n await asyncio.sleep(300)\n self.money_cooldown.remove(message.author.id)\n return\n\n\n @commands.command()\n async def top(self, ctx):\n \"\"\"returns a set of memebers and their wallets, based on who has the most\"\"\"\n ret = \"\"\n members = await self.bot.pool.fetch(\"\"\"SELECT * FROM bank SORT ORDER BY amount DESC LIMIT 5\"\"\")\n # list constructor\n members = [(self.bot.get_user(r['owner']).mention, r['amount']) for r in members]\n c = 1\n for i in members:\n ret += f'{c}: {i[0]} has {i[1]}\\n'\n c += 1\n emb = discord.Embed()\n emb.description = ret\n await ctx.send(embed=emb)\n\n\ndef setup(bot):\n bot.add_cog(Money(bot))\n","sub_path":"alexBot/cogs/money.py","file_name":"money.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"83861076","text":"# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration\n\nfrom AthenaConfiguration.ComponentAccumulator import ComponentAccumulator\nfrom AthenaConfiguration.ComponentFactory import CompFactory\nfrom JetTagTools.NewLikelihoodToolConfig import NewLikelihoodToolCfg\n\n# import the SVTag configurable\nAnalysis__SVTag=CompFactory.Analysis.SVTag\n\ndef SV1TagCfg( flags, name = 'SV1Tag', scheme = '', useBTagFlagsDefaults = True, **options ):\n \"\"\"Sets up a SV1Tag tool and returns it.\n\n The following options have BTaggingFlags defaults:\n\n Runmodus default: BTagging.RunModus\n referenceType default: BTagging.ReferenceType\n SVAlgType default: \"SV1\"\n jetCollectionList default: BTaggingFlags.Jets\n LikelihoodTool default: None\n SecVxFinderName default: \"SV1\"\n UseCHypo default: True\n\n input: name: The name of the tool (should be unique).\n useBTagFlagsDefaults : Whether to use BTaggingFlags defaults for options that are not specified.\n **options: Python dictionary with options for the tool.\n output: The actual tool.\"\"\"\n acc = ComponentAccumulator()\n options['name'] = name\n options['xAODBaseName'] = 'SV1'\n if useBTagFlagsDefaults:\n likelihood = acc.popToolsAndMerge(NewLikelihoodToolCfg(flags, 'SV1NewLikelihoodTool', 'SV1', scheme))\n defaults = { 'Runmodus' : flags.BTagging.RunModus,\n 'referenceType' : flags.BTagging.ReferenceType,\n 'jetPtMinRef' : flags.BTagging.JetPtMinRef,\n 'SVAlgType' : 'SV1',\n 'jetCollectionList' : [], #used only in reference mode\n 'SecVxFinderName' : 'SV1',\n 'UseCHypo' : True,\n 'LikelihoodTool' : likelihood }\n for option in defaults:\n options.setdefault(option, defaults[option])\n acc.setPrivateTools(Analysis__SVTag(**options))\n\n return acc\n\n","sub_path":"PhysicsAnalysis/JetTagging/JetTagTools/python/SV1TagConfig.py","file_name":"SV1TagConfig.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"395405857","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n################################################################################\n#\n# Copyright (c) 2017 Baidu.com, Inc. All Rights Reserved\n#\n################################################################################\n\"\"\"\nAuthors: Jiahui Liu(2505774110@qq.com)\nDate: 2017/11/17 17:27:06\n\n使用python及numpy库来实现深层神经网络识别猫案例,关键步骤如下:\n1.载入数据和预处理:load_data()\n2.初始化模型参数(Parameters)\n3.循环:\n a)\t计算成本(Cost)\n b)\t计算梯度(Gradient)\n c)\t更新参数(Gradient Descent)\n4.利用模型进行预测\n5.分析预测结果\n6.定义model函数来按顺序将上述步骤合并\n\"\"\"\n\nimport matplotlib\n\nmatplotlib.use('Agg')\nimport dnn_app_utils_v2\n\n\ndef main():\n \"\"\"\n 定义神经网络结构,训练、预测、检验准确率并打印学习曲线\n Args:\n Return:\n \"\"\"\n # 数据加载\n train_x_orig, train_y, test_x, test_y, classes = dnn_app_utils_v2.load_data()\n\n # 数据预处理\n train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T\n test_x_flatten = test_x.reshape(test_x.shape[0], -1).T\n # 归一化\n train_x = train_x_flatten / 255.\n test_x = test_x_flatten / 255.\n\n # 网络结构定义\n layers_dims = [12288, 20, 7, 5, 1]\n\n # 参数计算\n parameters = dnn_app_utils_v2.L_layer_model(train_x, train_y, layers_dims,\n num_iterations=2500, print_cost=True)\n\n # 准确率输出\n print('Train accuracy:')\n pred_train = dnn_app_utils_v2.predict(train_x, train_y, parameters)\n print('Test accuracy:')\n pred_test = dnn_app_utils_v2.predict(test_x, test_y, parameters)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lesson5/train_with_numpy.py","file_name":"train_with_numpy.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"287043576","text":"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2023 Ramon van der Winkel.\n# All rights reserved.\n# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n \"\"\" Migratie class voor dit deel van de applicatie \"\"\"\n\n # volgorde afdwingen\n dependencies = [\n ('Geo', 'm0001_initial_copy'),\n ('Competitie', 'm0106_geo_1'),\n ]\n\n # migratie functies\n operations = [\n migrations.RemoveField(\n model_name='kampioenschap',\n name='rayon',\n ),\n migrations.RenameField(\n model_name='kampioenschap',\n old_name='geo_rayon',\n new_name='rayon',\n ),\n migrations.AlterField(\n model_name='kampioenschap',\n name='rayon',\n field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.PROTECT, to='Geo.rayon'),\n ),\n\n migrations.RemoveField(\n model_name='regiocompetitie',\n name='regio',\n ),\n migrations.RenameField(\n model_name='regiocompetitie',\n old_name='geo_regio',\n new_name='regio',\n ),\n migrations.AlterField(\n model_name='regiocompetitie',\n name='regio',\n field=models.ForeignKey(on_delete=models.deletion.PROTECT, to='Geo.regio'),\n ),\n\n migrations.RemoveField(\n model_name='regiocompetitieronde',\n name='cluster',\n ),\n migrations.RenameField(\n model_name='regiocompetitieronde',\n old_name='geo_cluster',\n new_name='cluster',\n ),\n migrations.AlterField(\n model_name='regiocompetitieronde',\n name='cluster',\n field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.PROTECT, to='Geo.cluster'),\n ),\n ]\n\n# end of file\n","sub_path":"Competitie/migrations/m0107_geo_2.py","file_name":"m0107_geo_2.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"48470098","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n 排序数组\n\"\"\"\nfrom typing import List\n\n\n# 使用计数排序 208ms\nclass Solution:\n def sortArray(self, nums: List[int]) -> List[int]:\n min_v = min(nums)\n max_v = max(nums)\n key = [0 for i in range(max_v - min_v + 1)]\n for n in nums:\n key[n - min_v] += 1\n\n result = []\n for i, k in enumerate(key):\n if k:\n result += [i + min_v] * k\n return result","sub_path":"src/leetcode/python3/leetcode912.py","file_name":"leetcode912.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"379919932","text":"from django.db import models\n\nfrom django_backblaze_b2 import LoggedInStorage, PublicStorage, StaffStorage\n\n\nclass Files(models.Model):\n b2StorageFile = models.FileField(name=\"b2StorageFile\", upload_to=\"uploads\", verbose_name=\"B2 Storage File\")\n publicFile = models.FileField(\n name=\"publicFile\", upload_to=\"uploads\", verbose_name=\"Public File\", storage=PublicStorage, # type: ignore\n )\n loggedInFile = models.FileField(\n name=\"loggedInFile\",\n upload_to=\"uploads\",\n verbose_name=\"Logged-In File\",\n storage=LoggedInStorage, # type: ignore\n )\n staffFile = models.FileField(\n name=\"staffFile\", upload_to=\"uploads\", verbose_name=\"Staff-Only File\", storage=StaffStorage, # type: ignore\n )\n\n def __str__(self) -> str:\n return f\"f={self.b2StorageFile}, p={self.publicFile.url}, li={self.loggedInFile.url}, s={self.staffFile.url}\"\n","sub_path":"tests/test_project/files/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"30600619","text":"\r\n\"\"\" File is not fully documented\"\"\"\r\nimport sys\r\nimport nuke\r\nimport getpass\r\nimport uuid\r\nimport pymongo\r\nimport datetime\r\nfrom PySide.QtGui import *\r\nfrom PySide.QtCore import *\r\nfrom copyboardUi import CopyboardUi\r\n\r\n# pymongo db connection example\r\nSERVER = pymongo.Connection()\r\nDB = SERVER['exampleServer']\r\nUSER_COLLECTION = DB['usersDB']\r\nCLIPBOARD_COLLECTION = DB['collection']\r\nSCRIPT_LOCATION = \":/copyboard\"\r\nCURRENT_USER = getpass.getuser()\r\n\r\n\r\nclass CopyboardCore(CopyboardUi):\r\n def __init__(self):\r\n super(CopyboardCore, self).__init__()\r\n\r\n self.all_users = [user for user in USER_COLLECTION.find()]\r\n self.build_user_list_widget()\r\n\r\n self.users_search_line_edit.textChanged.connect(self.build_user_list_widget)\r\n self.send_close_push_button.clicked.connect(self.close)\r\n self.send_push_button.clicked.connect(self.send_copyboard)\r\n self.received_close_push_button.clicked.connect(self.close)\r\n self.paste_push_button.clicked.connect(self.paste_copyboard)\r\n self.history_table_widget.currentCellChanged.connect(self.set_note)\r\n\r\n self.build_history()\r\n\r\n def set_note(self, index):\r\n\r\n item = self.history_table_widget.item(index, 0)\r\n obj = item.data(32)\r\n note = obj['note']\r\n self.received_notes_text_edit.setPlainText(note)\r\n\r\n def paste_copyboard(self):\r\n\r\n row = self.history_table_widget.currentRow()\r\n item = self.history_table_widget.item(row, 0)\r\n doc = item.data(32)\r\n script = doc['nuke_file']\r\n nuke.nodePaste(\"%s/%s\" % (SCRIPT_LOCATION, script))\r\n\r\n def send_copyboard(self):\r\n\r\n row_count = self.stack_list_widget.count()\r\n if not row_count:\r\n QMessageBox.information(self, \"Warning\", \"No user selected\")\r\n return\r\n\r\n now = datetime.datetime.now()\r\n script = \"%s.nk\" % uuid.uuid1()\r\n nuke.nodeCopy(\"%s/%s\" % (SCRIPT_LOCATION, script))\r\n for i in range(row_count):\r\n obj = self.stack_list_widget.item(i).data(32)\r\n doc = dict()\r\n doc['sender'] = CURRENT_USER\r\n doc['submitted_at'] = now\r\n doc['destination_user'] = obj['login']\r\n doc['nuke_file'] = script\r\n doc['note'] = self.text_note_text_edit.toPlainText()\r\n CLIPBOARD_COLLECTION.save(doc)\r\n self.close()\r\n\r\n def build_history(self):\r\n\r\n query = CLIPBOARD_COLLECTION.find({\"destination_user\": CURRENT_USER}).sort(\"submitted_at\", -1)\r\n self.history_table_widget.setRowCount(query.count())\r\n for x,i in enumerate(query):\r\n sender_query = USER_COLLECTION.find_one({\"login\": i['sender']})\r\n item1 = QTableWidgetItem(sender_query['name'])\r\n item1.setData(32, i)\r\n item2 = QTableWidgetItem(self.get_time_difference_as_string(i['submitted_at']))\r\n self.history_table_widget.setItem(x, 0, item1)\r\n self.history_table_widget.setItem(x, 1, item2)\r\n\r\n def get_time_difference_as_string(self, date):\r\n\r\n delta = datetime.datetime.today() - date\r\n if delta.days:\r\n return \"%s day\" % delta.days\r\n seconds = delta.seconds\r\n if seconds < 60:\r\n return \"A few seconds ago\"\r\n elif seconds < 3600:\r\n return \"%s minutes ago\" % (seconds/60)\r\n elif seconds < 86400:\r\n return \"%s hours ago\" % (seconds/3600)\r\n\r\n def build_user_list_widget(self):\r\n self.users_list_widget.clear()\r\n search_pattern = self.users_search_line_edit.text().lower()\r\n for user in self.all_users:\r\n name = user['name']\r\n if search_pattern in name.lower():\r\n item = QListWidgetItem(name)\r\n item.setData(32, user)\r\n item.setToolTip(self.get_user_tooltip(user))\r\n self.users_list_widget.addItem(item)\r\n self.users_list_widget.sortItems()\r\n\r\n def get_user_tooltip(self, user):\r\n return \"Email: %s\\nLogin: %s\\nAge: %s\" % (user['email'], user['login'], user['age'])\r\n\r\n# sho UI\r\ndef start():\r\n start.panel = CopyboardCore()\r\n start.panel.show()\r\n","sub_path":"copyboard/copyboardCore.py","file_name":"copyboardCore.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"581727637","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 30 11:42:10 2017\r\n\r\n@author: Visharg Shah\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\ndef faces_load_data():\r\n \r\n skip_rows = 1\r\n train_size = 28709\r\n test_size = 3589\r\n dim = 48\r\n X_train = np.empty([train_size,dim, dim])\r\n X_test = np.empty([test_size, dim, dim])\r\n y_train = np.empty(train_size)\r\n y_test = np.empty(test_size)\r\n \r\n f = open('fer2013.csv', 'r')\r\n \r\n train_index = test_index = 0\r\n for i, line in enumerate(f):\r\n if i >= skip_rows:\r\n split_line = line.split(\",\")\r\n usage = split_line[2].rstrip()\r\n if usage == 'Training':\r\n X_train[train_index, :,:] = np.fromstring(split_line[1], dtype = 'int', sep = ' ').reshape(dim, dim)\r\n y_train[train_index] = int(split_line[0])\r\n train_index += 1\r\n elif usage == 'PublicTest':\r\n X_test[test_index, :,:] = np.fromstring(split_line[1], dtype = 'int', sep = ' ').reshape(dim, dim)\r\n y_test[test_index] = int(split_line[0])\r\n test_index += 1\r\n \r\n return (X_train, y_train) , (X_test, y_test)\r\n\r\n\r\n\r\nnp.random.seed(1337) \r\n \r\n(X_train, y_train), (X_test, y_test) = faces_load_data()\r\n\r\nprint (X_train.shape)\r\n\r\nX_train = X_train.reshape(X_train.shape[0], 48,48,1)\r\nX_test = X_test.reshape(X_test.shape[0], 48,48,1)\r\nX_train = X_train.astype(\"float32\")\r\nX_test = X_test.astype(\"float32\")\r\nX_train /= 255\r\nX_test /= 255\r\nprint('X_train shape:', X_train.shape)\r\nprint(X_train.shape[0], 'train samples')\r\nprint(X_test.shape[0], 'test samples')\r\n\r\n\r\nfrom keras.utils import np_utils\r\n\r\nY_train = np_utils.to_categorical(y_train, 7)\r\nY_test = np_utils.to_categorical(y_test, 7)\r\n\r\n\r\nY_train_1 = pd.DataFrame(Y_train)\r\nY_train_1[0] = Y_train_1[0] + Y_train_1[1]\r\nY_train_1 = Y_train_1.drop(1, 1)\r\nY_train_1 = Y_train_1.as_matrix(columns = Y_train_1.columns[:])\r\n\r\n\r\nY_test_1 = pd.DataFrame(Y_test)\r\nY_test_1[0] = Y_test_1[0] + Y_test_1[1]\r\nY_test_1 = Y_test_1.drop(1, 1)\r\nY_test_1 = Y_test_1.as_matrix(columns = Y_test_1.columns[:])\r\n\r\n\r\n#########\r\n\r\n###########\r\n############\r\n\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Dropout\r\nfrom keras.layers import Activation\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Convolution2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.optimizers import Adadelta, Adam\r\n\r\n#####\r\n\r\nfacial = Sequential()\r\n\r\n#Input Layer\r\nfacial.add(Convolution2D(32, (5,5), input_shape = (48,48,1), padding='same', activation='relu'))\r\nfacial.add(Convolution2D(32, (5,5), padding = 'same',activation='relu'))\r\nfacial.add(Convolution2D(32, (5,5), padding = 'same',activation='relu'))\r\n\r\n \r\nfacial.add(MaxPooling2D(pool_size=(2,2)))\r\n\r\nfacial.add(Convolution2D(64, (5,5), input_shape = (48,48,1), padding='same', activation='relu'))\r\nfacial.add(Convolution2D(64, (5,5), padding='same', activation='relu'))\r\nfacial.add(Convolution2D(64, (5,5), padding = 'same',activation='relu'))\r\n\r\n\r\n \r\nfacial.add(MaxPooling2D(pool_size=(2,2)))\r\n\r\nfacial.add(Convolution2D(128, (5,5), input_shape = (48,48,1), padding='same', activation='relu'))\r\nfacial.add(Convolution2D(128, (5,5), padding='same', activation='relu'))\r\nfacial.add(Convolution2D(128, (5,5), padding = 'same',activation='relu'))\r\n\r\n\r\n \r\nfacial.add(MaxPooling2D(pool_size=(2,2)))\r\n\r\nfacial.add(Flatten())\r\nfacial.add(Dense(128, activation = 'relu'))\r\nfacial.add(Dense(64, activation = 'relu'))\r\nfacial.add(Dropout(0.5))\r\nfacial.add(Dense(7, activation='softmax'))\r\nfacial.add(Dropout(0.5))\r\n\r\nada = Adadelta(lr=0.1, rho=0.95, epsilon=1e-08)\r\n#adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\r\nfacial.compile(loss='categorical_crossentropy',optimizer=ada,metrics=['accuracy'])\r\n#facial.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\nfacial.fit(X_train, Y_train, batch_size=128, epochs=30, verbose=1, validation_data=(X_test, Y_test))\r\nscore = facial.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)\r\nprint('Test score:', score[0])\r\nprint('Test accuracy:', score[1])","sub_path":"fer2013raw.py","file_name":"fer2013raw.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"271005481","text":"# file: train_cnn.py\r\n#\r\n# A CNN implementation in TensorFlow\r\n#\r\n# First set the variable \r\n# It tells the Python script where your training and\r\n# validation data is.\r\n#\r\n# Store your training images in a sub-folder for each\r\n# class in /train/\r\n# e.g.\r\n# /train/bikes\r\n# /train/cars\r\n#\r\n# Store your test images in a sub-folder for each\r\n# class in /test/\r\n# e.g.\r\n# /test/bikes\r\n# /test/cars\r\n#\r\n# Then let the script run. The final will be saved.\r\n#\r\n# ---\r\n# Prof. Dr. Juergen Brauer, www.juergenbrauer.org\r\n\r\nfrom dataset_reader import dataset\r\nimport tensorflow as tf\r\n\r\n# Experiments\r\n# Exp-Nr Comment\r\n# 01 10 hidden layers: INPUT->C1->P1->C2->P2->C3->C4->C5->P3->FC1->FC2->OUT\r\n# trained 100 mini-batches of size 32\r\n# feature maps: 10-15-20-25-30\r\n# --> 59.93% accuracy evaluated with test_cnn.py\r\n#\r\n# 02 10 hidden layers: INPUT->C1->P1->C2->P2->C3->C4->C5->P3->FC1->FC2->OUT\r\n# trained 1000 mini-batches of size 32\r\n# feature maps: 10-15-20-25-30\r\n# --> 72.50% accuracy evaluated with test_cnn.py\r\n#\r\n# 03 4 hidden layers: INPUT->C1->P1->FC1->FC2->OUT\r\n# trained 1000 mini-batches of size 32\r\n# feature maps: 10\r\n# --> 47.59% (!!!) accuracy evaluated with test_cnn.py\r\n#\r\n#\r\n\r\nexp_nr = 3\r\n\r\n# helper function to build 1st conv layer with filter size 11x11\r\n# and stride 4 (in both directions) and no padding\r\ndef conv1st(name, l_input, filter, b):\r\n cov = tf.nn.conv2d(l_input, filter, strides=[1, 4, 4, 1], padding='VALID')\r\n return tf.nn.relu(tf.nn.bias_add(cov, b), name=name)\r\n\r\n\r\n# in all other layers we use a stride of 1 (in both directions)\r\n# and a padding such that the spatial dimension (width,height)\r\n# of the output volume is the same as the spatial dimension\r\n# of the input volume\r\ndef conv2d(name, l_input, w, b):\r\n cov = tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME')\r\n return tf.nn.relu(tf.nn.bias_add(cov, b), name=name)\r\n\r\n# generates a max pooling layer\r\ndef max_pool(name, l_input, k, s):\r\n return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, s, s, 1], padding='VALID', name=name)\r\n\r\n\r\n# helper function to generate a CNN\r\ndef build_cnn_model(_X, keep_prob, n_classes, imagesize, img_channel):\r\n # prepare matrices for weights\r\n _weights = {\r\n 'wc1': tf.Variable(tf.random_normal([11, 11, img_channel, 10])),\r\n 'wc2': tf.Variable(tf.random_normal([5, 5, 10, 15])),\r\n 'wc3': tf.Variable(tf.random_normal([3, 3, 15, 20])),\r\n 'wc4': tf.Variable(tf.random_normal([3, 3, 20, 25])),\r\n 'wc5': tf.Variable(tf.random_normal([3, 3, 25, 30])),\r\n 'wd1': tf.Variable(tf.random_normal([6 * 6 * 30, 40])),\r\n 'wd2': tf.Variable(tf.random_normal([40, 40])),\r\n 'out': tf.Variable(tf.random_normal([40, n_classes])),\r\n 'exp3_wd1': tf.Variable(tf.random_normal([27 * 27 * 10, 40]))\r\n }\r\n\r\n # prepare vectors for biases\r\n _biases = {\r\n 'bc1': tf.Variable(tf.random_normal([10])),\r\n 'bc2': tf.Variable(tf.random_normal([15])),\r\n 'bc3': tf.Variable(tf.random_normal([20])),\r\n 'bc4': tf.Variable(tf.random_normal([25])),\r\n 'bc5': tf.Variable(tf.random_normal([30])),\r\n 'bd1': tf.Variable(tf.random_normal([40])),\r\n 'bd2': tf.Variable(tf.random_normal([40])),\r\n 'out': tf.Variable(tf.random_normal([n_classes]))\r\n }\r\n\r\n # reshape input picture\r\n _X = tf.reshape(_X, shape=[-1, imagesize, imagesize, img_channel])\r\n\r\n if (exp_nr == 1 or exp_nr==2):\r\n # topology: INPUT->C1->P1->C2->P2->C3->C4->C5->P3->FC1->FC2->OUT\r\n conv1 = conv1st('conv1', _X, _weights['wc1'], _biases['bc1'])\r\n pool1 = max_pool('pool1', conv1, k=3, s=2)\r\n conv2 = conv2d('conv2', pool1, _weights['wc2'], _biases['bc2'])\r\n pool2 = max_pool('pool2', conv2, k=3, s=2)\r\n conv3 = conv2d('conv3', pool2, _weights['wc3'], _biases['bc3'])\r\n conv4 = conv2d('conv4', conv3, _weights['wc4'], _biases['bc4'])\r\n conv5 = conv2d('conv5', conv4, _weights['wc5'], _biases['bc5'])\r\n pool3 = max_pool('pool3', conv5, k=3, s=2)\r\n # fully connected layer\r\n dense1 = tf.reshape(pool3, [-1, _weights['wd1'].get_shape().as_list()[0]])\r\n dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1')\r\n dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2')\r\n # dense2 = tf.nn.dropout(dense1, keep_prob)\r\n out = tf.matmul(dense2, _weights['out']) + _biases['out']\r\n\r\n elif exp_nr == 3:\r\n\r\n # topology: INPUT->C1->P1->FC1->FC2->OUT\r\n conv1 = conv1st('conv1', _X, _weights['wc1'], _biases['bc1'])\r\n print(\"conv1 shape: \", conv1.get_shape())\r\n pool1 = max_pool('pool1', conv1, k=3, s=2)\r\n print(\"pool1 shape: \", pool1.get_shape())\r\n\r\n # fully connected layer\r\n dense1 = tf.reshape(pool1, [-1, 27*27*10])\r\n dense1 = tf.nn.relu(tf.matmul(dense1, _weights['exp3_wd1']) + _biases['bd1'], name='fc1')\r\n print(\"dense1 shape: \", dense1.get_shape())\r\n dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2')\r\n print(\"dense2 shape: \", dense2.get_shape())\r\n\r\n out = tf.matmul(dense2, _weights['out']) + _biases['out']\r\n print(\"out shape: \", out.get_shape())\r\n\r\n return [out, _weights['wc1']]\r\n\r\n\r\n# 1. create a training and testing Dataset object that stores\r\n# the training / testing images\r\ndataset_root = \"V:/01_job/12_datasets/imagenet/cars_vs_bikes_prepared/\"\r\ntraining = dataset(dataset_root + \"train\", \".jpeg\")\r\ntesting = dataset(dataset_root + \"validation\", \".jpeg\")\r\n\r\n# 2. set training parameters\r\nlearn_rate = 0.001\r\nbatch_size = 32\r\ndisplay_step = 1\r\nif exp_nr==1:\r\n nr_mini_batches_to_train = 100\r\nelif exp_nr==2:\r\n nr_mini_batches_to_train = 1000\r\nelif exp_nr==3:\r\n nr_mini_batches_to_train = 1000\r\n\r\nsave_filename = 'save/model.ckpt'\r\nlogs_path = './logfiles'\r\n\r\nn_classes = training.num_labels\r\ndropout = 0.8 # dropout (probability to keep units)\r\nimagesize = 227\r\nimg_channel = 3\r\n\r\nx = tf.placeholder(tf.float32, [None, imagesize, imagesize, img_channel])\r\ny = tf.placeholder(tf.float32, [None, n_classes])\r\nkeep_prob = tf.placeholder(tf.float32) # dropout (keep probability)\r\n\r\n[pred, filter1st] = build_cnn_model(x, keep_prob, n_classes, imagesize, img_channel)\r\n\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\r\n# cost = tf.reduce_mean(tf.squared_difference(pred, y))\r\n\r\nglobal_step = tf.Variable(0, trainable=False)\r\n# lr = tf.train.exponential_decay(learn_rate, global_step, 1000, decay_rate, staircase=True)\r\n\r\noptimizer = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(cost, global_step=global_step)\r\n# optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost, global_step=global_step)\r\n\r\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n\r\nsaver = tf.train.Saver()\r\ntf.add_to_collection(\"x\", x)\r\ntf.add_to_collection(\"y\", y)\r\ntf.add_to_collection(\"keep_prob\", keep_prob)\r\ntf.add_to_collection(\"pred\", pred)\r\ntf.add_to_collection(\"accuracy\", accuracy)\r\n\r\nprint(\"\\n\\n\")\r\nprint(\"----------------------------------------\")\r\nprint(\"I am ready to start the training...\")\r\nprint(\"So I will train a CNN, starting with a learn rate of\", learn_rate)\r\nprint(\"I will train \", nr_mini_batches_to_train, \"mini batches of \", batch_size, \"images\")\r\nprint(\"Your input images will be resized to \", imagesize, \"x\", imagesize, \"pixels\")\r\nprint(\"----------------------------------------\")\r\n\r\nwith tf.Session() as my_session:\r\n my_session.run(tf.global_variables_initializer())\r\n\r\n step = 1\r\n while step < nr_mini_batches_to_train:\r\n\r\n batch_ys, batch_xs = training.nextBatch(batch_size)\r\n # note: batch_ys and batch_xs are tuples each\r\n # batch_ys a tuple of e.g. 32 one-hot NumPy arrays\r\n # batch_xs a tuple of e.g. 32 NumPy arrays of shape\r\n # (width, height, 3)\r\n\r\n\r\n _ = my_session.run([optimizer],\r\n feed_dict={x: batch_xs,\r\n y: batch_ys,\r\n keep_prob: dropout})\r\n\r\n if step % display_step == 0:\r\n acc = my_session.run(accuracy,\r\n feed_dict={x: batch_xs,\r\n y: batch_ys,\r\n keep_prob: 1.})\r\n loss = my_session.run(cost, feed_dict={x: batch_xs,\r\n y: batch_ys,\r\n keep_prob: 1.})\r\n print(\"learn rate:\" + str(learn_rate) +\r\n \" mini batch:\" + str(step) +\r\n \", minibatch loss= \" + \"{:.5f}\".format(loss) +\r\n \", batch accuracy= \" + \"{:.5f}\".format(acc))\r\n step += 1\r\n\r\n print(\"\\n\")\r\n print(\"Training of CNN model finished.\")\r\n\r\n save_filename = \"saved_model_exp0\" + str(exp_nr) + \"/final_model.ckpt\"\r\n saver.save(my_session, save_filename, global_step=step)\r\n print(\"Saved CNN model to file '\",save_filename,\"'\")\r\n","sub_path":"A first Convolutional Neural Network/train_cnn.py","file_name":"train_cnn.py","file_ext":"py","file_size_in_byte":9370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"517228733","text":"from peewee import *\n\n\ndb = SqliteDatabase('apples.db')\n\n\nclass Apple(Model): # always call a model in singular as they represent object instances, so not plural\n variety = CharField(max_length=50, unique=True)\n uses = CharField(max_length=50, unique=True)\n origin = CharField(max_length=50, unique=True)\n description = TextField(default='')\n\n class Meta:\n database = db\n\n \napples = [\n {'variety':'Egremont Russet',\n 'uses':'Eating, Cooking, Drying',\n 'origin':'Sussex, 1872',\n 'description':'Non-juicy firm flesh, characteristic sweet/dry nutty flavour'},\n {'variety':\"Lemon Pippin\",\n 'uses':'Eating, Cooking',\n 'origin':'Ancient, possible Norman origin',\n 'description':'firm, coarse texture, dry, slightly acid flesh, faintly aromatic fruity lemony flavour'}]\n\n\ndef add_apples(): # dir(\"instance of Apple\") for a list of available methods, e.g. create() and sql commands\n for worm in apples:\n try:\n Apple.create(variety=worm['variety'],\n uses=worm['uses'],\n origin=worm['origin'],\n description=worm['description'])\n except IntegrityError:\n apple_entry = Apple.get(variety=worm['variety'])\n apple_entry.uses = worm['uses']\n apple_entry.origin = worm['origin']\n apple_entry.description = worm['description']\n apple_entry.save()\n\n\"\"\"\nnb. Try: try something where it is anticipated that an error might occur\n Except: to handle anticipated errors. Never use a \"bare\" except: if you handle errors that you do not expect then\n your code may do the wrong thing and hide bugs.\n Else/Finally: optional clauses, finally always runs, else only runs if there is no exception from the try block.\n Often overlooked but can be very useful in writing cleaner code\n\"\"\"\n\n\ndef use_choice():\n while True:\n try:\n use = int(input('Enter 1 for Eating, 2 for Cooking or 3 for Drying'))\n except ValueError:\n print('Not a valid entry, please re-enter')\n continue\n else:\n if use == 1:\n choice = 'Eating'\n elif use == 2:\n choice = 'Cooking'\n elif use == 3:\n choice = 'Drying'\n else:\n continue\n break\n for fruit in Apple.select().where(Apple.uses.contains(choice)).order_by(Apple.variety):\n print(fruit.variety, ': ',fruit.description)\n \"\"\"For a single instance, use get() instead of select, or call first(); in this case, would no longer need to use\n the for the loop which is needed for the queryset otherwise it prints 'SelectQuery object has no attribute ....\"\"\"\n\n \nif __name__ == '__main__': #use in case you want to be able to import the file as a module\n db.connect()\n db.create_tables([Apple], safe=True)\n add_apples()\n use_choice()\n\n\"\"\" \nhttp://docs.peewee-orm.com/en/latest/peewee/querying.html\nBasic CRUD operations:\nModel.create(), for executing INSERT queries.\nModel.save() and Model.update(), for executing UPDATE queries.\nModel.delete_instance() and Model.delete(), for executing DELETE queries.\nModel.select(), for executing SELECT queries.\n\ne.g. Creation on new model instance/row in table:\n >>> user = User(username='Charlie')\n >>> user.save() # save() returns the number of rows modified.\n\n assignment of new model instance which has an FK field:\n >>> tweet = Tweet.create(user=huey, message='Hello!')\n\n insertion of data without creating a new instance:\n >>> User.insert(username='Mickey').execute()\n (returns value representing no. rows modified)\n\n Bulk inserts:\n data_source = [\n {'field1': 'val1-1', 'field2': 'val1-2'},\n {'field1': 'val2-1', 'field2': 'val2-2'},\n # ...\n ]\n with db.atomic():\n Model.insert_many(data_source).execute()\n\nOnce a model instance has a primary key, any subsequent call to save() results in an UPDATE rather than another INSERT.\n \"\"\"\n","sub_path":"heritage_apples.py","file_name":"heritage_apples.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336511100","text":"# from functools import partialmethod\nfrom kivy.uix.screenmanager import Screen\nfrom kivymd.uix.filemanager import MDFileManager\nfrom database import show_alert_dialog\nimport flags\nimport os\nimport pandas as pd\nfrom mysql.connector.errors import InterfaceError\n\n\nclass FinalizeOffer(Screen):\n def __init__(self, **kw):\n super(FinalizeOffer, self).__init__(**kw)\n # file manager part\n self.manager_open = False\n self.file_manager = MDFileManager(\n exit_manager = self.exit_manager,\n select_path = self.select_path,\n # preview = True\n ext=[\".xlsx\",\".csv\",\".txt\",\".xls\"]\n )\n\n def file_manager_open(self):\n # creating path of dosnt exists\n try:\n os.makedirs(f\"C:\\\\Users\\\\{os.getlogin()}\\\\Downloads\\\\tnp\")\n except FileExistsError:\n pass\n # open path\n self.file_manager.show(f\"C:\\\\Users\\\\{os.getlogin()}\\\\Downloads\\\\tnp\")\n self.manager_open = True\n\n def select_path(self, path):\n # retrive csv file path\n # my_db, my_cursor = db_connector()\n my_db, my_cursor = self.manager.my_db, self.manager.my_cursor\n df=pd.read_excel(path)\n # preparing dataframe\n enroll=list(df['enrollment id'])\n comp=list(df['company name'])\n role=list(df['role'])\n # getting officer branch\n for k,v in flags.branch.items():\n if v==flags.app.officer_branch:\n branch=k\n break \n # pinging database to check for network connection\n try:\n my_db.ping(reconnect=True,attempts=1)\n except InterfaceError:\n show_alert_dialog(self,\"Unable to connect to remote database, due to weak network. Try reconnect after sometime\")\n return\n for i in range(len(enroll)):\n # upating database\n qu=\"update offer_letters set finalised='' where enrollment_id=%s and company_id=(select company_id from company where name=%s and role=%s and branch = %s) ;\"\n va=(enroll[i],comp[i],role[i],branch)\n # print(enroll[i],comp[i],role[i],branch)\n my_cursor.execute(qu,va)\n my_db.commit()\n show_alert_dialog(self,'Database Updated!!!')\n # close file manager\n self.exit_manager()\n \n\n\n def exit_manager(self, *args):\n # close file manager\n self.manager_open = False\n self.file_manager.close()\n \n \n\n ","sub_path":"finalize_offer.py","file_name":"finalize_offer.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"551073001","text":"import json,logging\nlogger = logging.getLogger(__name__)\n\ndef serialize(msg,pretty_print=False):\n try:\n if pretty_print:\n return json.dumps(msg,indent=2,sort_keys=True)\n else:\n return json.dumps(msg)\n except:\n logger.exception('failed to serialize the message: %s',msg)\n raise\n\n\ndef deserialize(msg):\n try:\n return json.loads(msg)\n except:\n logger.exception('failed to deserialize the message: %s',msg)\n raise\n\n","sub_path":"pandayoda/common/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"443261210","text":"class Solution:\n \"\"\"\n @param nums: A list of integer which is 0, 1 or 2 \n @return: nothing\n \"\"\"\n\n def sortColors(self, nums):\n # write your code here\n left, right, index = 0, len(nums)-1, 0\n while index < right:\n if nums[index] == 0:\n nums[index], nums[left] = nums[left], nums[index]\n left += 1\n index += 1\n elif nums[index] == 1:\n index += 1\n else:\n nums[index], nums[right] = nums[right], nums[index]\n right -= 1\n \ndef main():\n nums = [1, 0, 1, 2]\n sol = Solution()\n sol.sortColors(nums)\n print(nums)\n\n\nmain()\n","sub_path":"lint-148-sort-colors/solution3.py","file_name":"solution3.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"648245474","text":"# URI 1133\n# CLEVERSON MENDES FARIA\n\na = int(input())\nb = int(input())\n\nif a>b:\n for i in range(b+1,a,1):\n if (i % 5 == 2) or (i % 5 == 3):\n print(i)\n \nelif b>a:\n for j in range(a+1,b,1):\n if (j % 5 == 2) or (j % 5 == 3):\n print(j)\n","sub_path":"PYTHON-URI/1133 - Resto da Divisão.py","file_name":"1133 - Resto da Divisão.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336250067","text":"#!usr/bin/env python\r\n\"\"\"Contains all sqls for menu program.\"\"\"\r\n\r\nfrom sqlalchemy import create_engine, inspect, func\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\nfrom database_setup import Restaurant, Base, MenuItem\r\n\r\nengine = create_engine('sqlite:///restaurantmenu.db')\r\n# Bind the engine to the metadata of the Base class so that the\r\n# declaratives can be accessed through a DBSession instance\r\nBase.metadata.bind = engine\r\n\r\nDBSession = sessionmaker(bind=engine)\r\n# A DBSession() instance establishes all conversations with the database\r\n# and represents a \"staging zone\" for all the objects loaded into the\r\n# database session object. Any change made against the objects in the\r\n# session won't be persisted into the database until you call\r\n# session.commit(). If you're not happy about the changes, you can\r\n# revert all of them back to the last commit by calling\r\n# session.rollback()\r\nsession = DBSession()\r\n\r\n\r\ndef get_restaurants():\r\n \"\"\"Query to list all restaurant ids and names.\"\"\"\r\n return session.query(Restaurant.id, Restaurant.name).all()\r\n\r\n\r\ndef get_restaurant_name_from_id(idval):\r\n \"\"\"Query to get restaurant name from its id.\"\"\"\r\n return session.query(Restaurant).filter_by(id=idval).one()\r\n\r\n\r\ndef add_restaurant(newname):\r\n \"\"\"Add a new restaurant to the table.\"\"\"\r\n newrec = Restaurant(name=newname)\r\n session.add(newrec)\r\n session.commit()\r\n\r\ndef del_restaurant(idval):\r\n \"\"\"Add a new restaurant to the table.\"\"\"\r\n delrec = session.query(Restaurant).filter_by(id=idval).one()\r\n if delrec != []:\r\n session.delete(delrec)\r\n session.commit()\r\n\r\ndef upd_restaurant(idval, updname):\r\n \"\"\"Update the restaurant name.\"\"\"\r\n print(idval, updname)\r\n updrec = session.query(Restaurant).filter_by(id=idval).one()\r\n if updrec != []:\r\n updrec.name = updname\r\n session.add(updrec)\r\n session.commit()\r\n # print(updrec.name)\r\n\r\n\r\ndef main():\r\n \"\"\"Main.\"\"\"\r\n #print('start')\r\n # add_restaurant('AABS')\r\n rest_name = get_restaurant_name_from_id(3)\r\n # for rest_name in rest_names:\r\n print(rest_name.name)\r\n # session.rollback()\r\n\r\n\r\n# use filterby to isolate a specific column in the table.\r\n\"\"\"veggieBurgers = session.query(MenuItem).filter_by(name = 'Veggie Burger')\r\nfor vburger in veggieBurgers:\r\n print(vburger.id, vburger.price, vburger.restaurant.name)\r\n\"\"\"\r\n","sub_path":"vagrant/menu/webserver_sql.py","file_name":"webserver_sql.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"403645736","text":"# WRITE YOUR CODE SOLUTION HERE\nimport datetime\nimport time\nfrom datetime import date\n\ntoday=date.today()\nprint(\"Date:\",today)\nprint(\"Day:\",datetime.date.today().strftime(\"%a\"))\n\nday=time.strftime(\"%a\")\nweek = set([\"Mon\",\"Tue\",\"Wed\",\"Thur\",\"Fri\"])\nSat = set([\"Sat\"])\nSun = set([\"Sun\"])\n\nif day in week:\n print(\"Fare: 100\")\nelif day in Sat :\n print(\"Fare: 60\")\n\nelif day in Sun:\n print(\"Fare: 80\")\n","sub_path":"challenges/week_1/bus_fare_challenge.py","file_name":"bus_fare_challenge.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"496680153","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2021/2/3 11:46\r\n# File: 0043.py\r\n# Desc: \r\n\r\nclass Solution:\r\n def multiply(self, num1: str, num2: str) -> str:\r\n dic1 = {}\r\n dic2 = {}\r\n l1 = len(num1)\r\n l2 = len(num2)\r\n for idx,num in enumerate(num1):\r\n k = l1-idx-1\r\n dic1[k] = num\r\n for idx,num in enumerate(num2):\r\n k = l2-idx-1\r\n dic2[k] = num\r\n ans = 0\r\n for k1, v1 in dic1.items():\r\n for k2, v2 in dic2.items():\r\n ans += int(v1) * int(v2) * 10**k1 * 10**k2\r\n return str(ans)","sub_path":"Solutions/0043/0043.py","file_name":"0043.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"344923145","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#@Time : 5/31/21 6:45 PM\n#@Author: Yiyang Huo\n#@File : convert_csv.py\n\nimport csv\n\ncsv_columns = [\n \"Rejection_Rate\",\n \"Date\",\n \"Season_Year\",\n \"County\",\n \"#loads\",\n \"Tomato_variety\",\n \"Average_worm/insect_damage\",\n \"Average_mold\",\n \"Average_green\",\n \"Average_material_other_than_tomatoes\",\n \"Average_hue_loads\",\n \"Average_hue\",\n \"Average_color\",\n \"Average_solids\",\n \"Average_pH\"\n]\n\n\nimport pandas as pd\nimport numpy as np\n\nrandom = True\ndf = pd.read_csv('data/ptab.csv')\ndf = df[df[\"#loads\"] != \"0.0\"]\ndf[\"Date\"] = pd.to_datetime(df[\"Date\"])\n\ndf['Average_hue_loads'] = pd.to_numeric(df['Average_hue_loads'],errors='coerce')\ndf['Average_hue'] = pd.to_numeric(df['Average_hue'],errors='coerce')\n\nhue_loads_mean_value=df['Average_hue_loads'].mean()\nhue_mean_value = df['Average_hue'].mean()\ndf['Average_hue_loads'].fillna(value=hue_loads_mean_value, inplace=True)\ndf['Average_hue'].fillna(value=hue_mean_value, inplace=True)\ndf = df.sort_values([\"Date\", \"Tomato_variety\"])\noutput_unique = np.array2string(df.Tomato_variety.unique(), precision=2, separator=',',\n suppress_small=True)\n\nf=open('unique varieties.txt','w')\nf.write(output_unique)\nf.close()\n\nif not random:\n df.to_csv('data/ptab_conv.csv', index=False)\nelse:\n df = df.sample(frac=1)\n df.to_csv('data/ptab_conv_random.csv', index=False)","sub_path":"convert_csv.py","file_name":"convert_csv.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"185589827","text":"from typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n l = len(nums)\n if l == 0:\n return 0\n\n pos, last = 1, nums[0]\n for x in range(1, l):\n if nums[x] != last:\n nums[pos] = nums[x]\n pos, last = pos + 1, nums[x]\n\n return pos\n\n\ndef get_input():\n items = [1, 2, 2, 3, 3, 3, 4]\n return items\n\n\ndef main():\n print(\"Hello Ahmed\")\n items = get_input()\n print(items)\n\n sol = Solution()\n value = sol.removeDuplicates(items)\n print(\"value is \", value)\n\n print(\"Printing result\")\n # print(items)\n for x in range(0, value):\n print(items[x])\n\n\nmain()\n","sub_path":"dump/py/26.Remove Duplicates from Sorted Array.py","file_name":"26.Remove Duplicates from Sorted Array.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"597351130","text":"__author__ = 'sibs-a-lot'\nimport datetime\nimport Creation\n# Set console size\n#os.system(\"mode con: cols=124 lines=55\")\n# Define some whitespace functions\ntab = '\\t'\npounds = '#'*4\nspace = ' '\nnew_line = '\\n'\nside = '|'\n# Display a program initialization splash screen.\n\n# Define a cross-platform 'clear screen'\n'''\ndef cls():\n os.system(['clear','cls'][os.name == 'nt'])\n'''\ndef ProgramHeader():\n '''\n This is the program header that clears the console and then prints the current status.\n '''\n # Clear the console.\n\n #cls()\n\n # Print the header\n print(pounds*31)\n print((tab*6), 'Current Brewery Status')\n print((tab*5),tab,' ', datetime.date.today().strftime(\"%m-%d-%Y\"))\n print(new_line)\n\n\n\nProgramHeader()\n","sub_path":"Dashboard.py","file_name":"Dashboard.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"327626037","text":"# program that converts all digits in a string to underline\n\n#Write a function that converts all digits in a string to underline.\ndef convertDigitsUnderline (sentence):\n \"\"\"Convert all numbers in a string to underscore.\"\"\"\n for i in sentence:\n #check if character in user input is a number using isdigit() method, boolean\n if i.isdigit() == True:\n #replace the numbers in string to underline using string replace() method\n sentence = sentence.replace(i, \"_\")\n return sentence\n\n#ask user to enter String input\nsentence = input(\"Input a string: \")\nprint (convertDigitsUnderline(sentence))","sub_path":"2127930NT/05/numberUnderscore.py","file_name":"numberUnderscore.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"633283720","text":"import argparse\nimport logging\n\nimport imageio\nimport numpy as np\nimport yaml\nfrom IPython import embed\n\nfrom common import get_image_array, get_probability_for_class, get_perturbed_images\nfrom differential_evolution import init_population, gen_children\nfrom models.base import get_model_from_name\n\nCONFIG = None\nlogging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S')\n\n\ndef fitness_function(prediction, true_class=None):\n \"\"\"\n For non-targeted attacks, the fitness function is the negative probability of true class\n \"\"\"\n return -get_probability_for_class(prediction, true_class)\n\n\ndef get_fit_population(fathers, children, fathers_predictions, children_predictions, true_class):\n final_population = list()\n for i in range(len(fathers_predictions)):\n father_fitness = fitness_function(fathers_predictions[i], true_class)\n child_fitness = fitness_function(children_predictions[i], true_class)\n if father_fitness < child_fitness:\n final_population.append(children[i])\n else:\n final_population.append(fathers[i])\n return np.array(final_population)\n\n\ndef find_adversary_image(image, model):\n original_predictions = model.predict(np.copy(image))\n true_label = original_predictions[0][0][1]\n true_label_probability = original_predictions[0][0][2]\n logging.info(\"True label: {}, Probability: {}\".format(true_label, true_label_probability))\n imageio.imwrite('output/original.jpg', image[0])\n\n population = init_population(CONFIG)\n for i in range(CONFIG[\"num_iterations\"]):\n logging.info(\"Iteration: {}\".format(i))\n perturbed_images = get_perturbed_images(image, population)\n perturbed_predictions = model.predict(np.copy(perturbed_images), top=model.num_classes)\n\n true_class_probabilities = map(lambda p: get_probability_for_class(p, true_label), perturbed_predictions)\n logging.info(\"Probabilites for true class: Min={}, Max={}\".format(min(true_class_probabilities),\n max(true_class_probabilities)))\n if i % 10 == 0:\n imageio.imwrite('output/{}.jpg'.format(i),\n perturbed_images[true_class_probabilities.index(min(true_class_probabilities))])\n\n population_children = gen_children(population, CONFIG)\n perturbed_images_children = get_perturbed_images(image, population_children)\n perturbed_predictions_children = model.predict(np.copy(perturbed_images_children), top=model.num_classes)\n\n population = get_fit_population(population, population_children,\n perturbed_predictions,\n perturbed_predictions_children,\n true_class=true_label)\n embed()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', '-c', dest='config_file', help='config file')\n parser.add_argument('--input', '-i', dest='input_image', help='input image file')\n args = parser.parse_args()\n\n CONFIG = yaml.safe_load(open(args.config_file))\n model = get_model_from_name(CONFIG[\"model\"])\n CONFIG[\"img_x\"], CONFIG[\"img_y\"], CONFIG[\"img_channels\"] = model.input_size\n image_arr = get_image_array(args.input_image, config=CONFIG)\n find_adversary_image(image_arr, model)\n","sub_path":"non_targeted.py","file_name":"non_targeted.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"250650914","text":"from mycroft.util.log import LOG\nimport requests\nimport json\n\n\ndef play_yt(kodi_path, video_id):\n LOG.info('play youtube ID: ' + str(video_id))\n if len(video_id) > 11:\n yt_link = \"plugin://plugin.video.youtube/play/\" + video_id\n else:\n yt_link = \"plugin://plugin.video.youtube/play/\" + video_id\n json_header = {'content-type': 'application/json'}\n method = \"Player.Open\"\n kodi_payload = {\n \"jsonrpc\": \"2.0\",\n \"params\": {\n \"item\": {\n \"file\": yt_link\n }\n },\n \"method\": method,\n \"id\": \"libPlayer\"\n }\n try:\n kodi_response = requests.post(kodi_path, data=json.dumps(kodi_payload), headers=json_header)\n return kodi_response\n except Exception as e:\n LOG.info(e)\n return None\n","sub_path":"kodi_tools/PlayYT.py","file_name":"PlayYT.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"97408514","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import spectral_clustering\nfrom sklearn.metrics import euclidean_distances\n\nlogging.basicConfig(format='%(asctime)s\\t%(name)s\\t%(levelname)s : %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef expand(_min, _max):\n _dis = (_max - _min) * 0.1\n return _min - _dis, _max + _dis\n\n\nx = np.arange(0, 2 * np.pi, 0.1)\ndata1 = np.vstack((1 * np.cos(x), 1 * np.sin(x))).T\ndata2 = np.vstack((2 * np.cos(x), 2 * np.sin(x))).T\ndata3 = np.vstack((3 * np.cos(x), 3 * np.sin(x))).T\ndata = np.vstack((data1, data2, data3))\n\nx1_min, x2_min = np.min(data, axis=0)\nx1_max, x2_max = np.max(data, axis=0)\nleft, right = expand(x1_min, x1_max)\nbottom, top = expand(x2_min, x2_max)\n\nn_clusters = 3\n\ndistances = euclidean_distances(data, squared=True)\n_sigma = np.median(distances)\n\ncolors = plt.cm.Spectral(np.linspace(0, 0.8, n_clusters))\n\nplt.figure(figsize=(12, 8))\nplt.suptitle('Spectral cluster')\n\nfor i, s in enumerate(np.logspace(-2, 0, 6)):\n affinity = np.exp(-distances ** 2 / (s ** 2)) + 1e-6\n y_pred = spectral_clustering(affinity, n_clusters)\n\n plt.subplot(2, 3, i + 1)\n for k, c in enumerate(colors):\n k_indices = (y_pred == k)\n plt.scatter(data[k_indices, 0], data[k_indices, 1], c=c, edgecolors='k')\n plt.xlim((left, right))\n plt.ylim((bottom, top))\n plt.grid(True)\n plt.title('sigma = {}'.format(s))\n\nplt.tight_layout()\nplt.subplots_adjust(top=0.9)\nplt.show()\n","sub_path":"battle/cluster_spectral.py","file_name":"cluster_spectral.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"24269800","text":"from sklearn import neighbors\nfrom sklearn import tree as treeClassifier\nfrom sklearn import svm as svmClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, auc\nfrom metrics import false_rejection_rate, false_acceptance_rate, equal_error_rate_with_verification, equal_error_rate\nimport numpy as np\nimport sklearn.metrics as sk_metrics\nfrom typing import List, Tuple, Dict\n\ndef knn(k, weight, algorithm=\"auto\", n_jobs=-1):\n return neighbors.KNeighborsClassifier(k, weights=weight, algorithm=algorithm, n_jobs=n_jobs)\n\n\n\ndef tree(weights, max_features, criterion, min_impurity_decrease):\n return treeClassifier.DecisionTreeClassifier(class_weight=weights, max_features=max_features, criterion=criterion, min_impurity_decrease=min_impurity_decrease)\n \n\ndef svm(gamma='auto', weights = None, kernel=\"linear\"):\n return svmClassifier.SVC(probability=True, class_weight = weights, kernel = kernel, gamma = gamma)\n \n\ndef mlp(alpha, hidden_layer_sizes, solver='adam', random_state=1, max_iter=1000):\n return MLPClassifier(solver=solver, alpha=alpha, hidden_layer_sizes=hidden_layer_sizes, random_state=random_state, max_iter=max_iter, learning_rate=\"adaptive\")\n \n\ndef test(clf, test_sets, test_classes, number_of_genuine, number_of_skilled, number_of_random, global_threshold=0.5):\n scores = {\n \"genuine\": [],\n \"skilled\": [],\n \"random\": []\n } \n for test_set in test_sets:\n prediction_probability = clf.predict_proba(test_set)\n scores_prob = prediction_probability[:,1]\n genuine_scores = scores_prob[:number_of_genuine]\n forgery_scores = scores_prob[number_of_genuine:]\n skilled_scores = forgery_scores[:number_of_skilled]\n random_scores = forgery_scores[number_of_skilled:]\n scores[\"genuine\"].append(genuine_scores)\n scores[\"skilled\"].append(skilled_scores)\n scores[\"random\"].append(random_scores)\n results = [[], [], [], [], [], [], []]\n result = compute_metrics(scores[\"genuine\"], scores[\"random\"], scores[\"skilled\"], global_threshold)\n results[0].append(result['FRR'])\n results[1].append(result['FAR_skilled'])\n results[2].append(result['FAR_random'])\n results[3].append(result['EER'])\n results[4].append(result['EER_userthresholds'])\n results[5] += (result['auc_list'])\n results[6].append(result['global_threshold'])\n return results\n\ndef __prediction_list(threshold, prediction_probability):\n prediction = []\n for pred in prediction_probability:\n if pred <= threshold:\n prediction.append(0)\n else:\n prediction.append(1)\n return prediction\n\ndef __classification_metrics(prediction, number_of_genuine, number_of_skilled, number_of_random):\n fn = 0\n only_genuine_prediction = prediction[:number_of_genuine]\n for pred in only_genuine_prediction:\n if pred == 0:\n fn += 1 \n frr = false_rejection_rate(number_of_genuine, fn)\n only_forgery_prediction = prediction[number_of_genuine:]\n skilled_prediction = only_forgery_prediction[:number_of_skilled]\n random_prediction = only_forgery_prediction[number_of_skilled:]\n fp_skilled = skilled_prediction.count(1)\n fp_random = random_prediction.count(1)\n far_skilled = false_acceptance_rate(number_of_skilled, fp_skilled)\n far_random = false_acceptance_rate(number_of_random, fp_random)\n return [frr, far_skilled, far_random]\n\n\ndef compute_metrics(genuine_preds, random_preds, skilled_preds, global_threshold):\n \"\"\" Compute metrics given the predictions (scores) of genuine signatures,\n random forgeries and skilled forgeries.\n\n Parameters\n ----------\n genuine_preds: list of np.ndarray\n A list of predictions of genuine signatures (each element on the list is the\n prediction for one user)\n random_preds: list of np.ndarray\n A list of predictions of random forgeries (each element on the list is the\n prediction for one user)\n skilled_preds: list of np.ndarray\n A list of predictions of skilled forgeries (each element on the list is the\n prediction for one user)\n global_threshold: float\n The global threshold used to compute false acceptance and false rejection rates\n\n Returns\n -------\n dict\n A dictionary containing:\n 'FRR': false rejection rate\n 'FAR_random': false acceptance rate for random forgeries\n 'FAR_skilled': false acceptance rate for skilled forgeries\n 'mean_AUC': mean Area Under the Curve (average of AUC for each user)\n 'EER': Equal Error Rate using a global threshold\n 'EER_userthresholds': Equal Error Rate using user-specific thresholds\n 'auc_list': the list of AUCs (one per user)\n 'global_threshold': the optimum global threshold (used in EER)\n \"\"\"\n all_genuine_preds = np.concatenate(genuine_preds)\n all_random_preds = np.concatenate(random_preds)\n all_skilled_preds = np.concatenate(skilled_preds)\n\n FRR = 1 - np.mean(all_genuine_preds >= global_threshold)\n FAR_random = 1 - np.mean(all_random_preds < global_threshold)\n FAR_skilled = 1 - np.mean(all_skilled_preds < global_threshold)\n\n aucs, meanAUC = compute_AUCs(genuine_preds, skilled_preds)\n\n EER, global_threshold = compute_EER(all_genuine_preds, all_skilled_preds)\n EER_userthresholds = calculate_EER_user_thresholds(genuine_preds, skilled_preds)\n\n all_metrics = {'FRR': FRR,\n 'FAR_random': FAR_random,\n 'FAR_skilled': FAR_skilled,\n 'mean_AUC': meanAUC,\n 'EER': EER,\n 'EER_userthresholds': EER_userthresholds,\n 'auc_list': aucs,\n 'global_threshold': global_threshold}\n\n return all_metrics\n\n\ndef compute_AUCs(genuine_preds, skilled_preds):\n \"\"\" Compute the area under the curve for the classifiers\n\n Parameters\n ----------\n genuine_preds: list of np.ndarray\n A list of predictions of genuine signatures (each element on the list is the\n prediction for one user)\n skilled_preds: list of np.ndarray\n A list of predictions of skilled forgeries (each element on the list is the\n prediction for one user)\n\n Returns\n -------\n list\n The list of AUCs (one per user)\n float\n The mean AUC\n\n \"\"\"\n aucs = []\n for thisRealPreds, thisSkilledPreds in zip(genuine_preds, skilled_preds):\n y_true = np.ones(len(thisRealPreds) + len(thisSkilledPreds))\n y_true[len(thisRealPreds):] = -1\n y_scores = np.concatenate([thisRealPreds, thisSkilledPreds])\n aucs.append(sk_metrics.roc_auc_score(y_true, y_scores))\n meanAUC = np.mean(aucs)\n return aucs, meanAUC.item()\n\n\ndef compute_EER(all_genuine_preds, all_skilled_preds):\n \"\"\" Calculate Equal Error Rate with a global decision threshold.\n\n Parameters\n ----------\n all_genuine_preds: np.ndarray\n Scores for genuine predictions of all users\n all_skilled_preds: np.ndarray\n Scores for skilled forgery predictions of all users\n\n Returns\n -------\n float:\n The Equal Error Rate\n float:\n The optimum global threshold (a posteriori)\n\n \"\"\"\n\n all_preds = np.concatenate([all_genuine_preds, all_skilled_preds])\n all_ys = np.concatenate([np.ones_like(all_genuine_preds), np.ones_like(all_skilled_preds) * -1])\n fpr, tpr, thresholds = sk_metrics.roc_curve(all_ys, all_preds)\n\n # Select the threshold closest to (FPR = 1 - TPR)\n t = thresholds[sorted(enumerate(abs(fpr - (1 - tpr))), key=lambda x: x[1])[0][0]]\n genuineErrors = 1 - np.mean(all_genuine_preds >= t).item()\n skilledErrors = 1 - np.mean(all_skilled_preds < t).item()\n EER = (genuineErrors + skilledErrors) / 2.0\n return EER, t\n\n\ndef calculate_EER_user_thresholds(genuine_preds, skilled_preds):\n \"\"\" Calculate Equal Error Rate with a decision threshold specific for each user\n\n Parameters\n ----------\n genuine_preds: list of np.ndarray\n A list of predictions of genuine signatures (each element on the list is the\n prediction for one user)\n skilled_preds: list of np.ndarray\n A list of predictions of skilled forgeries (each element on the list is the\n prediction for one user)\n\n Returns\n -------\n float\n The Equal Error Rate when using user-specific thresholds\n\n \"\"\"\n all_genuine_errors = []\n all_skilled_errors = []\n\n nRealPreds = 0\n nSkilledPreds = 0\n\n for this_real_preds, this_skilled_preds in zip(genuine_preds, skilled_preds):\n # Calculate user AUC\n y_true = np.ones(len(this_real_preds) + len(this_skilled_preds))\n y_true[len(this_real_preds):] = -1\n y_scores = np.concatenate([this_real_preds, this_skilled_preds])\n\n # Calculate user threshold\n fpr, tpr, thresholds = sk_metrics.roc_curve(y_true, y_scores)\n # Select the threshold closest to (FPR = 1 - TPR).\n t = thresholds[sorted(enumerate(abs(fpr - (1 - tpr))), key=lambda x: x[1])[0][0]]\n\n genuineErrors = np.sum(this_real_preds < t)\n skilledErrors = np.sum(this_skilled_preds >= t)\n\n all_genuine_errors.append(genuineErrors)\n all_skilled_errors.append(skilledErrors)\n\n nRealPreds += len(this_real_preds)\n nSkilledPreds += len(this_skilled_preds)\n\n genuineErrors = float(np.sum(all_genuine_errors)) / nRealPreds\n skilledErrors = float(np.sum(all_skilled_errors)) / nSkilledPreds\n\n # Errors should be nearly equal, up to a small rounding error since we have few examples per user.\n EER = (genuineErrors + skilledErrors) / 2.0\n return EER","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":9701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"456600017","text":"import Tkinter\nimport svm\nwindow = Tkinter.Tk()\nwindow.configure(background=\"#a1dbcd\")\nwindow.title(\"Sentiment Predictor\")\nwindow.geometry(\"400x400\")\n\n\ndef OnButtonClick():\n def numbers_to_strings(argument):\n switcher = {\n 4: \"Positive\",\n 2: \"Neutral\",\n 0: \"Negative\",\n }\n return switcher.get(argument, \"nothing\")\n str = entryVariable.get()\n if not str:\n labelVariable.set(\"Cannot Predict Sentiment Of Blank Spaces. Please Enter Text\")\n else:\n labelVariable.set( numbers_to_strings(int(svm.predict(str,svm.MODEL))) ) \n entryLabel.focus_set()\n entryLabel.selection_range(0, Tkinter.END)\n\ndef OnPressEnter(event):\n def numbers_to_strings(argument):\n switcher = {\n 4: \"Positive\",\n 2: \"Neutral\",\n 0: \"Negative\",\n }\n return switcher.get(argument, \"nothing\")\n str = entryVariable.get()\n if not str:\n labelVariable.set(\"Cannot Predict Sentiment Of Blank Spaces. Please Enter Text\")\n else:\n labelVariable.set( numbers_to_strings(int(svm.predict(str,svm.MODEL))) ) \n entryLabel.focus_set()\n entryLabel.selection_range(0, Tkinter.END)\n \n\nlabel = Tkinter.Label(window, text = \"Enter Tweet to predicts its Sentiment: \", fg = \"#383a39\", bg = \"#a1dbcd\")\nlabel.pack(pady = 10)\n\nentryVariable = Tkinter.StringVar()\nentryLabel = Tkinter.Entry(window, textvariable= entryVariable)\nentryLabel.bind(\"\", OnPressEnter)\nentryLabel.pack(pady = 10, ipadx= 100, ipady =20)\n\nbtn = Tkinter.Button(window, text=\"Predict\", fg = \"#383a39\", bg = \"green\", command= OnButtonClick)\nbtn.pack(pady = 10)\n\nlabelVariable = Tkinter.StringVar()\nlabel2 = Tkinter.Label(window, text = \"Sentiment : \", fg = \"#383a39\", bg = \"#a1dbcd\", textvariable=labelVariable)\nlabel2.pack(pady = 10)\n\nwindow.mainloop()","sub_path":"Twitter/src/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"604889949","text":"#\n# Jasy - Web Tooling Framework\n# Copyright 2010-2012 Sebastian Werner\n#\n\nimport logging, re\n\n\n__all__ = [\"markdown\", \"markdown2html\", \"code2highlight\"]\n\n\ndef markdown(text, code=True):\n html = markdown2html(text)\n if code and html is not None:\n html = code2highlight(html)\n \n return html\n\n\nimport misaka\n\nmisakaExt = misaka.EXT_AUTOLINK | misaka.EXT_NO_INTRA_EMPHASIS | misaka.EXT_FENCED_CODE\nmisakaRender = misaka.HTML_SKIP_STYLE | misaka.HTML_SMARTYPANTS\n\ndef markdown2html(markdownStr):\n return misaka.html(markdownStr, misakaExt, misakaRender)\n\n\n# By http://misaka.61924.nl/#toc_3\n\nfrom pygments import highlight\nfrom pygments.formatters import HtmlFormatter\nfrom pygments.lexers import get_lexer_by_name\n\ncodeblock = re.compile(r'(.*?)', re.IGNORECASE | re.DOTALL)\n\ndef code2highlight(html):\n def unescape(html):\n html = html.replace('<', '<')\n html = html.replace('>', '>')\n html = html.replace('&', '&')\n return html.replace(''', \"'\")\n \n def replace(match):\n language, classname, code = match.groups()\n if language is None:\n language = classname if classname else \"javascript\"\n \n lexer = get_lexer_by_name(language, tabsize=2)\n formatter = HtmlFormatter(linenos=\"table\")\n \n return highlight(unescape(code), lexer, formatter)\n \n return codeblock.sub(replace, html)\n\n\n","sub_path":"jasy/core/Markdown.py","file_name":"Markdown.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"187051966","text":"\r\n\r\ndef instructions():\r\n print(\"Change Counter\\nPlease enter the count of each coin type.\\n\")\r\ndef request_inputs():\r\n while True:\r\n quarters = int(input(\"Quarters: \"))\r\n dimes = int(input(\"Dimes: \"))\r\n nickels = int(input(\"Nickels: \"))\r\n pennies = int(input(\"Pennies: \"))\r\n if quarters < 0 or dimes < 0 or nickels < 0 or pennies < 0:\r\n print(\"Please enter positive ammounts.\")\r\n else:\r\n break\r\n return quarters, dimes, nickels, pennies\r\n\r\ndef sum_change(quarters, dimes, nickels, pennies):\r\n total = quarters * 25 + dimes * 10 + nickels * 5 + pennies\r\n return total\r\n\r\ndef print_total(t):\r\n print(f\"\\nThe total value of your change is ${t/100:,.2f}\")\r\n\r\ndef test():\r\n assert sum_change(0,0,0,0) == 0\r\n assert sum_change(1,1,1,1) == 41\r\n assert sum_change(3,3,4,5) == 130\r\n assert sum_change(2,4,3,37) == 142\r\n assert sum_change(4,5,-2,65) == 205\r\n\r\ndef main():\r\n instructions()\r\n q,d,n,p = request_inputs()\r\n total = sum_change(q,d,n,p)\r\n print_total(total)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test()\r\n main()\r\n","sub_path":".idea/inspectionProfiles/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"568972229","text":"from django.core.management.base import BaseCommand\n\nfrom app.models import Player, PlayerRole\n\n\nclass Command(BaseCommand):\n help = 'Prints a list of all zombie emails'\n\n def handle(self, *args, **options):\n players = Player.objects.exclude(role=PlayerRole.HUMAN).all()\n player_emails = [p.user.email for p in players]\n print(\", \".join(player_emails))\n","sub_path":"app/management/commands/zombie_emails.py","file_name":"zombie_emails.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"425159135","text":"from ahk import AHK\nimport time\nimport logging\nfrom window_interaction import WindowInteraction\n\nclass WingEdit:\n \"\"\" Interface to wing edit box \"\"\"\n def __init__(self, window_interaction : WindowInteraction):\n self.wi = window_interaction\n self.current_index = 0\n self.section_gap = 10\n self.interface_map = {\n 'y' : 7,\n 'chord' : 8,\n 'offset' : 9,\n 'dihedral' : 10,\n 'twist' : 11,\n 'foil' : 12,\n 'x_panels' : 13,\n 'x_dist' : 14,\n 'y_panels' : 15,\n 'y_dist' : 16\n }\n \n def start(self):\n \"\"\" Starts the window, must done at the beginning of every new interaction with the window \"\"\"\n self.wi.ctrl_press(\"W\")\n self.wi.get_window(\"Wing Edit\")\n self.current_index = 0\n \n def end(self):\n \"\"\" Save and close the window \"\"\"\n self.wi.press(\"{Esc}\")\n # Check we have the save box\n \n self.wi.press(\"{Enter}\")\n \n def select_field(self, name : str, section : int):\n \"\"\" Select a field based on name and section number. See interface_map \"\"\"\n self.wi.get_window(\"Wing Edit\")\n self.wi.field_selector(self.section_index(name, section))\n \n def section_index(self, name : str, section : int):\n \"\"\" get the index for a given name and section, taking into accoutn current index \"\"\"\n if name not in self.interface_map.keys():\n raise FieldNotFound\n \n key_presses = self.interface_map[name] + section*self.section_gap - self.current_index\n \n if key_presses < 0:\n # The 'cursor' has gone past the field. We gonna have to save and reopen..\n self.end()\n self.start()\n return self.section_index(name, section)\n else:\n return key_presses \n\n def enter_number(self, value : float):\n \"\"\" Just wraps WI function for pressing a key \"\"\"\n self.wi.press(f\"{value}\")\n\n def edit_field(self, value : float, name : str, section : int):\n \"\"\" change the value of a field given: value, name, section \"\"\"\n self.select_field(name, section)\n self.enter_number(value)\n\n\n# define Python user-defined exceptions\nclass Error(Exception):\n \"\"\"Base class for other exceptions\"\"\"\n pass\nclass FieldNotFound(Error):\n \"\"\"Raised when the input value is too small\"\"\"\n pass","sub_path":"wingplane_design.py","file_name":"wingplane_design.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"495582065","text":"import pickle\nfrom UtilGp import UtilGp\n\nclass CardData:\n def __init__(self, name, mobileNo, dob, netSalary, adharNo):\n self.cardId=0;\n self.name=name;self.mobileNo=mobileNo;self.dob=dob;self.netSalary=netSalary;\n self.cardNo='8000';self.cardType=\"\";self.creditLimit=0;\n self.creditUsed=0;self.passCode='1234';self.adharNo=adharNo;\n self.rewardPoint=0;self.rewardUsed=0;\n def __repr__(self):\n return \"%s- %s-%s-%s-%s- %s-%s-%s -%s-%s -%s-%s %s-%s\"%(str(self.cardId),\n self.name,self.mobileNo,self.dob,str(self.netSalary),\n str(self.cardNo),self.cardType,self.creditLimit,\n self.creditUsed,self.creditBal(),\n self.passCode,self.adharNo,str(self.rewardPoint),\n str(self.rewardUsed))\n def creditBal(self):\n return self.creditLimit - self.creditUsed\n def preCardRegister(self):\n from dBase import ndb\n dbCard = ndb.dbCard\n cardNo = DbCardData.genCardNo(dbCard);\n creditLimit = self.netSalary * 20 / 100;\n cardType = 'Gold' if creditLimit<100000 else 'Platinum'\n self.cardNo=cardNo\n self.creditLimit=creditLimit\n self.cardType=cardType\n self.cardId = len(dbCard)+1\n\nDbCardDataFile=\"data/Card.dat\"\nclass DbCardData:\n @staticmethod\n def addToCard(valueCard):\n with open(DbCardDataFile, 'ab') as output:\n pickle.dump(valueCard, output, pickle.HIGHEST_PROTOCOL)\n @staticmethod\n def initDbCards(): #list Of Cards\n with open(DbCardDataFile, 'wb') as output:\n pass\n @staticmethod\n def updateCards(dbCard,argCard): #list Of Cards\n with open(DbCardDataFile, 'wb') as output:\n for valueCard in dbCard:\n if valueCard.cardNo==argCard.cardNo:\n pickle.dump(argCard, output, pickle.HIGHEST_PROTOCOL)\n else:\n pickle.dump(valueCard, output, pickle.HIGHEST_PROTOCOL)\n @staticmethod\n def readCards():\n dbCard = []\n with open(DbCardDataFile, \"rb\") as f:\n while True:\n try:\n dbCard.append(pickle.load(f))\n except EOFError:\n break\n return dbCard\n @staticmethod\n def checkCardNoExist(cardNo, dbCard):\n for valueCard in dbCard:\n if valueCard.cardNo == cardNo:\n return True\n return False\n @staticmethod\n def countCardByAdhaar(dbCard,adhaarNo):\n count = 0\n for valueCard in dbCard:\n if valueCard.adharNo == adhaarNo:\n count += 1\n return count\n @staticmethod\n def genCardNo(dbCard):\n noStart = 8000;noEnd = 9000;stepValue = 1;\n cardNo = UtilGp.randNo(noStart, noEnd, stepValue,True)\n while DbCardData.checkCardNoExist(cardNo,dbCard):\n cardNo = UtilGp.randNo(noStart, noEnd, stepValue,True)\n cardNo = str(cardNo)\n return cardNo\n @staticmethod\n def getCardObject(dbCard, cardNo):\n for valueCard in dbCard:\n if valueCard.cardNo==cardNo:\n return (True, valueCard)\n return (False, None)\n\n","sub_path":"expenseManager/app/CardData.py","file_name":"CardData.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"3426892","text":"from django.test import TestCase\nfrom pool.models import PickSet\nfrom pool.views import round_1_expiration_time\nfrom unittest import skip\nimport datetime\n\n\n# Create your tests here.\nclass HomePageTest(TestCase):\n\n def test_uses_home_template(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'home.html')\n\n def test_only_saves_when_necessary(self):\n self.client.get('/')\n self.assertEqual(PickSet.objects.count(), 0)\n\n\nclass PicksViewTest(TestCase):\n\n def test_uses_picks_template(self):\n pick_set = PickSet.objects.create()\n response = self.client.get(f'/picks/{pick_set.id}/')\n self.assertTemplateUsed(response, 'picks.html')\n\n def test_displays_home_team_picks_as_default(self):\n pick_set = PickSet.objects.create(\n round_1_game_1=3,\n round_1_game_2=3,\n round_1_game_3=3,\n round_1_game_4=3,\n )\n\n response = self.client.get(f'/picks/{pick_set.id}/')\n self.assertContains(response, 'Texans')\n self.assertContains(response, 'Cowboys')\n self.assertContains(response, 'Ravens')\n self.assertContains(response, 'Bears')\n\n def test_displays_visiting_team_picks(self):\n pick_set = PickSet.objects.create(\n round_1_game_1_team=0,\n round_1_game_1=3,\n round_1_game_2_team=0,\n round_1_game_2=3,\n round_1_game_3_team=0,\n round_1_game_3=3,\n round_1_game_4_team=0,\n round_1_game_4=3,\n )\n\n response = self.client.get(f'/picks/{pick_set.id}/')\n self.assertContains(response, 'Colts')\n self.assertContains(response, 'Seahawks')\n self.assertContains(response, 'Chargers')\n self.assertContains(response, 'Eagles')\n\n def test_displays_negative_picks(self):\n other_set = PickSet.objects.create(\n round_1_game_1=-7,\n round_1_game_2=-10,\n round_1_game_3=-3,\n round_1_game_4=-14,\n )\n\n response = self.client.get(f'/picks/{other_set.id}/')\n self.assertContains(response, '-7')\n self.assertContains(response, '-10')\n self.assertContains(response, '-3')\n self.assertContains(response, '-14')\n\n def test_passses_correct_pick_set_to_template(self):\n correct_set = PickSet.objects.create(\n round_1_game_1=3,\n round_1_game_2=3,\n round_1_game_3=3,\n round_1_game_4=3,\n )\n other_set = PickSet.objects.create(\n round_1_game_1=-7,\n round_1_game_2=-7,\n round_1_game_3=-7,\n round_1_game_4=-7,\n )\n response = self.client.get(f'/picks/{correct_set.id}/')\n self.assertEqual(response.context['pick_set'], correct_set)\n\n @skip(\"Time based testing\")\n def test_cannot_edit_after_games_start(self):\n # Need to set the expiration time as start_time + 5 seconds\n pick_set = PickSet.objects.create(\n round_1_game_1=-7,\n round_1_game_2=-10,\n round_1_game_3=-3,\n round_1_game_4=-14,\n )\n\n response = self.client.get(f'/picks/{pick_set.id}/')\n self.assertContains(response, 'edit_picks')\n # time.sleep(3)\n response = self.client.get(f'/picks/{pick_set.id}/')\n self.assertNotContains(response, 'edit_picks')\n\n\nclass PicksEditTest(TestCase):\n\n def test_uses_edit_template(self):\n pick_set = PickSet.objects.create()\n response = self.client.get(f'/picks/{pick_set.id}/edit/')\n self.assertTemplateUsed(response, 'edit.html')\n\n def test_passes_correct_pick_set_template(self):\n pick_set = PickSet.objects.create(\n round_1_game_1=3,\n round_1_game_2=3,\n round_1_game_3=3,\n round_1_game_4=3,\n )\n response = self.client.get(f'/picks/{pick_set.id}/edit/')\n self.assertContains(response, 'Texans')\n self.assertContains(response, '\"selected\">Cowboys')\n self.assertContains(response, '\"selected\">Ravens')\n self.assertContains(response, '\"selected\">Bears')\n\n def test_shows_selected_visiting_teams(self):\n pick_set = PickSet.objects.create(\n round_1_game_1_team=0,\n round_1_game_1=3,\n round_1_game_2_team=0,\n round_1_game_2=3,\n round_1_game_3_team=0,\n round_1_game_3=3,\n round_1_game_4_team=0,\n round_1_game_4=3,\n )\n response = self.client.get(f'/picks/{pick_set.id}/edit/')\n self.assertContains(response, '\"selected\">Colts')\n self.assertContains(response, '\"selected\">Seahawks')\n self.assertContains(response, '\"selected\">Chargers')\n self.assertContains(response, '\"selected\">Eagles')\n\n @skip(\"Time based testing\")\n def test_cannot_edit_after_games_start(self):\n # Need to set the expiration time as start_time + 5 seconds\n other_set = PickSet.objects.create(\n round_1_game_1=-7,\n round_1_game_2=-10,\n round_1_game_3=-3,\n round_1_game_4=-14,\n )\n\n response = self.client.get(f'/picks/{other_set.id}/edit/')\n print(response.content.decode())\n self.assertContains(response, '',views.detail, name='detail'),\n path('submit/', views.submit, name=\"submit\"),\n path('finish/', views.finish, name=\"finish\"),\n path('edit/', views.edit, name=\"edit\"),\n path('apply_modify/', views.apply_modify, name=\"apply_modify\"),\n path('apply_modify//', views.apply_modify, name=\"apply_modify\"),\n]\n ","sub_path":"application/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"625332864","text":"# Function that find common values between lists\nlist_a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nlist_b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n\ndef common():\n for i in range(len(list_a)):\n both = list_a[i]\n if both in list_b:\n print(both, end=\" \")\ncommon()\n","sub_path":"pratice-python.org/005.py","file_name":"005.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"299139329","text":"#bloque def constantes\ncombin = pow(2,10)\nvol = 3000 \n\n\n\n#bloque funciones\n\ndef proporcion(val,peso):\n return val/float(peso)\n\n#programa principal\n\nelementos = [] #arreglo elementos\npropor = []\norden = []\n\nelementos.append([1800,72])\nelementos.append([600,36])\nelementos.append([1200,60])\n\n\nfor i in xrange(3):\n\n p = proporcion(elementos[i][1],elementos[i][0])\n\n propor.append([p,i])\n\npropor.sort(reverse=True)\n\n\nacum = 0\n\nfor i in xrange(3):\n\n acum = acum + elementos[propor[i][1]][0] #peso del objeto i\n\n if(acum <= vol):\n\n orden.append(propor[i][1])\n else:\n acum = acum - elementos[propor[i][1]][0]\n\nsumValor = 0\nsumPeso = 0\nprint('Metodo Greedy (3 elementos)')\nprint('Elementos de la mochila')\nfor i in xrange(len(orden)):\n\n print(orden[i]+1,elementos[orden[i]])\n\n sumValor = sumValor+ elementos[orden[i]][1]\n sumPeso = sumPeso + elementos[orden[i]][0]\n\nprint('Valor de la mochila: ',sumValor)\nprint('Peso de la mochila: ',sumPeso)\n\n\n\n \n\n\n\n\n \n\n \n \n \n","sub_path":"Mochila/3elemGreedy.py","file_name":"3elemGreedy.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"125137970","text":"# -*- coding: utf-8 -*-\nimport os\nimport numpy\nimport xml.etree.ElementTree as xml\n#import _bpy\nfrom PIL import Image\nfrom utils_boxes import bbox_transform_inv\nimport random\n\nin_img = '/home/user/Документы/hard_hat/1_horizontal'\nout_img = '/home/user/Документы/cropped_objects'\nannotations = '/home/user/Документы/hard_hat/1_annotations'\n\ndef resize(xmin, ymin, xmax, ymax) :\n deltay = int(ymax) - int(ymin)\n deltax = int(xmax) - int(xmin)\n if deltay > deltax:\n xmin = xmin - (deltay - deltax) / 2\n xmax = xmax + (deltay - deltax) / 2\n else:\n ymin = ymin - (deltax - deltay) / 2\n ymax = ymax + (deltax - deltay) / 2\n return xmin, ymin, xmax, ymax\n\ndef cut (xmin, ymin, xmax, ymax) :\n deltay = int(ymax) - int(ymin)\n deltax = int(xmax) - int(xmin)\n if deltay < deltax:\n xmin = xmin + (deltax - deltay) / 2\n xmax = xmax - (deltax - deltay) / 2\n else:\n ymin = ymin + (deltay - deltax) / 2\n ymax = ymax - (deltay - deltax) / 2\n return xmin, ymin, xmax, ymax\n\ndef create_background(name, width, height):\n boxes=[None]*4\n for items in range(2):\n value = random.randint(0, width)\n boxes[items] = int(value)\n # boxes.sort()\n if boxes[0] > boxes[1]:\n temp = boxes[0]\n boxes[0] = boxes[1]\n boxes[1] = temp\n items = 2\n while items < 4:\n value = random.randint(0, height)\n boxes[items] = int(value)\n items = items + 1\n if boxes[2] > boxes[3]:\n temp = boxes[2]\n boxes[2] = boxes[3]\n boxes[3] = temp\n boxes = resize(boxes[0], boxes[1], boxes[2], boxes[3])\n xmin, ymin, xmax, ymax = boxes\n if xmin < 0:\n xmin = 0\n if xmax > width:\n xmax = width\n if ymin < 0:\n ymin = 0\n if ymax > height:\n ymax = height\n boxes = cut(xmin, ymin, xmax, ymax)\n\n img = in_img + '/' + name.replace('xml', 'jpg')\n try:\n cropped = imageObject.crop(boxes)\n # Display the cropped portion\n save_location = out_img + '/' + 'background' + '/' + name.replace('xml', 'jpg')\n cropped.save(save_location)\n except: return\n\n\n\nimglist = os.listdir(in_img)\nimglist.sort()\n\nannlist = os.listdir(annotations)\nannlist.sort()\n\nfor ann in annlist:\n\n tree = xml.parse(annotations + '/' + ann)\n root = tree.getroot()\n for elem in root:\n for subelem in elem:\n if subelem.tag == 'name':\n classname = subelem.text\n if subelem.tag == 'bndbox':\n imageObject = Image.open(in_img + '/' + ann.replace('xml', 'jpg'))\n width, height = imageObject.size\n '''box= [None]*4\n i = 0\n for values in subelem:\n box[i] = int(values.text)\n i=i+1\n xmin, ymin, xmax, ymax = box\n\n if (classname == 'helmet' or classname == 'head' or classname == 'helmet_off' or classname == 'hood' or classname == 'hat'):\n ymin = ymin - 0.1 * ymin\n ymax = ymax + 0.2 * ymax\n\n \n \n xmin, ymin, xmax, ymax = resize(xmin, ymin, xmax, ymax)\n if xmin < 0 :\n xmin = 0\n if xmax > width :\n xmax = width\n if ymin < 0 :\n ymin =0\n if ymax > height :\n ymax = height\n box = cut(xmin, ymin, xmax, ymax)\n #box = xmin, ymin, xmax, ymax\n img = in_img + '/' + ann.replace('xml', 'jpg')\n #box = bbox_transform_inv(box)\n # Create an Image object from an Image\n\n # Crop the image\n cropped = imageObject.crop(box)\n # Display the cropped portion\n save_location = out_img + '/' + classname + '/'+ ann.replace('xml', 'jpg')\n cropped.save(save_location)\n #cropped.show()'''\n create_background(ann, width, height)\n","sub_path":"crop_object_background.py","file_name":"crop_object_background.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"330667747","text":"# -*- coding: utf-8 -*-\n__author__ = 'Akinava'\n__author_email__ = 'akinava@gmail.com'\n__copyright__ = \"Copyright © 2019\"\n__license__ = \"MIT License\"\n__version__ = [0, 0]\n\n\nimport sys\nimport os\nfrom pathlib import Path\nimport asyncio\npath = Path(os.path.dirname(os.path.realpath(__file__))).parent\nsys.path.append(os.path.join(path, 'src'))\n\n\nfrom client_host import Client\nfrom settings import logger\nfrom utilit import now\n\n\nPROTOCOL = {\n 'protocol_version': __version__,\n 'packages': {\n 'test_peer_hello': {\n 'name': 'test_peer_hello',\n 'package_id_marker': 128,\n 'define': [\n 'verify_test_peer_hello_package_len',\n 'verify_package_id_marker'],\n 'response': 'test_peer_time',\n 'structure': [\n {'name': 'package_id_marker', 'length': 1}]\n },\n 'test_peer_time': {\n 'name': 'test_peer_time',\n 'package_id_marker': 129,\n 'define': 'verify_package_id_marker',\n 'structure': [\n {'name': 'package_id_marker', 'length': 1},\n {'name': 'peer_time', 'length': len(now())}]\n }\n }\n}\n\n\nclass Handler:\n def init(self):\n logger.info('')\n self.do_test_peer_hello()\n\n def verify_test_peer_hello_package_len(self, package_protocol):\n logger.info('')\n request_length = len(self.connection.get_request())\n required_length = self.parser.calc_requared_length()\n return required_length == request_length\n\n def verify_package_id_marker(self, package_protocol):\n request_id_marker = self.parser.get_part('package_id_marker')\n required_id_marker = package_protocol['package_id_marker']\n return request_id_marker == required_id_marker\n\n def test_peer_time(self):\n logger.info('')\n return self.make_message(package_name='test_peer_time')\n\n def get_peer_time(self, **kwarg):\n return now()\n\n # def get_package_id_marker(self, **kwarg):\n # marker = self.parser.find_protocol_package(kwargs['package_name'])['package_id_marker']\n # return self.parser.pack_int(marker, 1)\n\n\nif __name__ == '__main__':\n logger.info('test client start')\n test_client = Client(handler=Handler, protocol=PROTOCOL)\n asyncio.run(test_client.run())\n logger.info('test client shutdown')\n","sub_path":"test/test_peer.py","file_name":"test_peer.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"420945310","text":"#! /usr/bin/python3 \n\nimport os\nimport yaml\nfrom colorama import Fore\nart = \"\"\" \n ___ ____ ____ __ __ ____ ___ _ _ ____ ____ ____ \n/ __)( ___)(_ _)( )( )( _ \\ / __)( )_( )(_ _)( ___)(_ _)\n\\__ \\ )__) )( )(__)( )___/ \\__ \\ ) _ ( _)(_ )__) )( \n(___/(____) (__) (______)(__) (___/(_) (_)(____)(__) (__) \n\"\"\"\ndef installer(packages):\n print(Fore.YELLOW + \"\\nType 'all' to install all packages or choose a single package\\n\")\n i = 1\n for p in packages:\n print(Fore.RED + '[',i,']',p[0],'\\n')\n i = i+1\n option = input(\":$ \")\n if not option == \"all\":\n option = int(option)\n if (option in range(0,len(packages)+1) ):\n os.system(packages[option-1][1])\n else:\n for i in range(len(packages)):\n cmd = packages[i][1]\n os.system(cmd) \n \ndef add():\n print(Fore.YELLOW + \"What's the name of the automated command: \")\n pack_name = str(input(':$ '))\n print(Fore.YELLOW + \"What's the command to run the command: \")\n pack_com = str(input(':$ '))\n pack = [pack_name,pack_com]\n return pack\n\n\ndef main():\n print(art)\n print(Fore.CYAN + \"[!] The idea by: @elfalehdev and shoutout to @cryptolake for the update.\\n\")\n print(Fore.RED + \"[1] Install packages \\n[2] Add a package to the config\\n\") \n option = int(input(\":$ \"))\n with open('config.yaml','r') as file:\n config = yaml.safe_load(file)\n packages = config['packages']\n if (option == 1):\n installer(packages)\n elif (option == 2):\n with open('config.yaml','w') as file:\n config['packages'].append(add())\n yaml.dump(config,file)\n \nif __name__=='__main__':main()\n","sub_path":"SS.py","file_name":"SS.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46642055","text":"vehicle_type = 'mpv'\n\ncl = open('real-first-'+vehicle_type+'s-list', 'r')\nallcl = cl.readlines()\ncl.close()\n\nvehicles = dict()\n\nfor item in allcl:\n\tme, mlyr = item.split('\",\"', 1)\n\tmake = me[1:].strip()\n\tmodel, year = mlyr.strip().split('\",', 1)\n\tif make not in vehicles:\n\t\tvehicles[make] = dict()\n\tif model not in vehicles[make]:\n\t\tvehicles[make][model] = []\n\tvehicles[make][model].append(int(year))\n\ncy = open('combined-years-'+vehicle_type+'s-list', 'w')\n\nfor make in vehicles:\n\tfor model in vehicles[make]:\n\t\tmodel_years = vehicles[make][model]\n\t\tnum_years = len(model_years)\n\t\tif num_years == 1:\n\t\t\twrite_str = '\"' + make + '\",\"' + model + '\",' + str(model_years[0]) + '\\n'\n\t\t\tcy.write(write_str)\n\t\telse:\n\t\t\tstart_year = model_years[0]\n\t\t\tcurr_years = [start_year]\n\t\t\tfor i in range(1,num_years):\n\t\t\t\tend_year = model_years[i]\n\t\t\t\tif (end_year - start_year) < 4:\n\t\t\t\t\tcurr_years.append(end_year)\n\t\t\t\telse:\n\t\t\t\t\tif len(curr_years) == 1:\n\t\t\t\t\t\twrite_str = '\"' + make + '\",\"' + model + '\",' + str(curr_years[0]) + '\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\twrite_str = '\"' + make + '\",\"' + model + '\",' + str(curr_years[0]) + ',' + str(curr_years[-1]) + '\\n'\n\t\t\t\t\tstart_year = end_year\n\t\t\t\t\tcurr_years = [start_year]\n\t\t\t\t\tcy.write(write_str)\n\t\t\tif len(curr_years) == 1:\n\t\t\t\twrite_str = '\"' + make + '\",\"' + model + '\",' + str(curr_years[0]) + '\\n'\n\t\t\telse:\n\t\t\t\twrite_str = '\"' + make + '\",\"' + model + '\",' + str(curr_years[0]) + ',' + str(curr_years[-1]) + '\\n'\n\t\t\tcy.write(write_str)\ncy.close()","sub_path":"scripts/year_combine.py","file_name":"year_combine.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"592623582","text":"#!/usr/bin/env python3\n# encoding=utf-8\nimport pyglet\n\nwindow = pyglet.window.Window()\n\n\n@window.event\ndef on_key_press(symbol, modifiers):\n print(symbol, modifiers)\n\n if symbol == pyglet.window.key.A:\n print('The \"A\" key was pressed.')\n elif symbol == pyglet.window.key.LEFT:\n print('The left arrow key was pressed.')\n elif symbol == pyglet.window.key.ENTER:\n print('The enter key was pressed.')\n\n\n@window.event\ndef on_mouse_press(x, y, button, modifiers):\n print(x, y, button, modifiers)\n\n if button == pyglet.window.mouse.LEFT:\n print('The left mouse button was pressed.')\n\n\n@window.event\ndef on_draw():\n window.clear()\n\n\n# There are more than 20 event types that you can handle on a window. An\n# easy way to find the event names and parameters you need is to add the\n# following lines to your program:\nevent_logger = pyglet.window.event.WindowEventLogger()\nwindow.push_handlers(event_logger)\n\npyglet.app.run()\n","sub_path":"pyglet-app/03-handle-mouse-and-keyboard-events.py","file_name":"03-handle-mouse-and-keyboard-events.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"29308402","text":"\ntitle = \"Knight's Tale Creator 3000\"\nprint(\"*\" * 80)\nprint(title)\nprint(\"*\" * 80)\n\n# Gather input from the player\nplayer_name = input(\"Enter your name: \")\nadjective = input(\"Enter an adjective: \")\nfamous_person = input(\"Enter the name of a famous person: \")\nanimal = input(\"Enter the name of an animal: \")\nvacation_place = input(\"Enter a place you would go on vacation: \")\nsharp_thing = input(\"Enter the name of something sharp: \")\ncxclamation = input(\"Enter something you might exclaim aloud: \")\n\n# Create the sentences by joining the input words\nsentence1 = \"There was a brave knight, \" + player_name + \", who was sent on a quest to vanquish the \" + adjective + \" evildoer, \" + famous_person + \". \" \nsentence2 = \"Riding on his/her trusty \" + animal + \", the brave \" + player_name + \" traveled to the faraway land of \" + vacationPlace + \". \" \nsentence3 = player_name + \" battled valiantly against \" + famous_person + \"’s army using his \" + sharp_thing + \" until he defeated them. \"\nsentence4 = \"Emerging victorious, \" + player_name + \" exclaimed, '\" + exclamation + \"!!!' I claim the land of \" + vacation_place + \" in the name of Python.\"\n\n# Join together the sentences and display it to the screen\ntale = sentence1 + sentence2 + sentence3 + sentence4\nprint(tale)\n","sub_path":"appendixC/CH3_KnightsTale.py","file_name":"CH3_KnightsTale.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"157286785","text":"from cs50 import get_float\nimport math\n\nowed = get_float(\"Change: \")\n\nwhile owed < 0:\n owed = get_float(\"Change: \")\n\nowed = math.floor(owed * 100)\n\ncounter = 0\ndimecounter = 0\nnickelcounter = 0\npennycounter = 0\n\n#Control flow: Check if quarters go in then divide and assign variables. Rinse and repeat. Update counter as you go.\nif owed >= 25:\n counter = math.floor(owed / 25)\n owed = owed - (counter * 25)\n\nif owed >= 10:\n dimecounter = math.floor(owed / 10)\n counter = counter + dimecounter\n owed = owed - (dimecounter * 10)\n\nif owed >= 5:\n nickelcounter = math.floor(owed / 5)\n counter = counter + nickelcounter\n owed = owed - (nickelcounter * 5)\n\nif owed >= 1:\n pennycounter = math.floor(owed / 1)\n counter = counter + pennycounter\n owed = owed - (pennycounter * 1)\n\n#print the final counter.\nprint(counter)","sub_path":"pset6/cash/cash.py","file_name":"cash.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"111738328","text":"import os\nimport argparse\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nfrom tensorboardX import SummaryWriter\n\nfrom training import train\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64,\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=512,\n help='input batch size for testing (default: 512)')\n parser.add_argument('--epochs', type=int, default=1,\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01,\n help='learning rate (default: 0.01)')\n parser.add_argument('--folds', type=int, default=5,\n help='number of folds for KFold validation (default: 5)')\n parser.add_argument('--image-size', type=int, default=224,\n help='the size of the image (default: 224)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training (default False)')\n parser.add_argument('--seed', type=int, default=1,\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=5,\n help='how many batches to wait before logging training status (default 5)')\n parser.add_argument('--save-model', type=bool, default=True,\n help='for saving the current model (default false)')\n parser.add_argument('--dir', default='logs',\n help='directory where summary logs are stored')\n if dist.is_available():\n parser.add_argument('--nodes', type=int, default=1,\n help='number of nodes (default: 1)')\n parser.add_argument('--nr', type=int, default=0,\n help='ranking within the nodes')\n parser.add_argument('--gpus', type=int, default=1,\n help='number of gpus per node (default: 1)')\n parser.add_argument('--backend', type=str, help='distributed backend',\n choices=[dist.Backend.GLOO, dist.Backend.NCCL],\n default=dist.Backend.GLOO)\n parser.add_argument('--host', type=str, default=\"localhost\",\n help='master address')\n parser.add_argument('--port', type=str, default=\"5000\",\n help='master port')\n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n\n if not args.no_cuda and torch.cuda.is_available():\n print(\"Using CUDA\")\n\n if dist.is_available():\n args.world_size = args.gpus * args.nodes\n mp.spawn(train, nprocs=args.gpus, args=(args,))\n\nif __name__ == \"__main__\":\n main()","sub_path":"distributed/ranzcr/ranzrc.py","file_name":"ranzrc.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224535244","text":"\"\"\"\r\nThis example demonstrates using Mayavi as a component of a large Qt\r\napplication.\r\nFor this use, Mayavi is embedded in a QWidget. To understand this\r\nexample, please read section :ref:`builing-applications`.\r\n\"\"\"\r\n\r\n# First, and before importing any Enthought packages, set the ETS_TOOLKIT\r\n# environment variable to qt4, to tell Traits that we will use Qt.\r\nimport os\r\nos.environ['ETS_TOOLKIT'] = 'qt4'\r\n# By default, the PySide binding will be used. If you want the PyQt bindings\r\n# to be used, you need to set the QT_API environment variable to 'pyqt'\r\nos.environ['QT_API'] = 'pyqt'\r\n\r\n# To be able to use PySide or PyQt4 and not run in conflicts with traits,\r\n# we need to import QtGui and QtCore from pyface.qt\r\n#from pyface.qt import QtGui, QtCore\r\n# Alternatively, you can bypass this line, but you need to make sure that\r\n# the following lines are executed before the import of PyQT:\r\nimport sip\r\nsip.setapi('QString', 2)\r\n\r\nfrom traits.api import HasTraits, Instance, on_trait_change\r\nfrom traitsui.api import View, Item\r\nfrom mayavi.core.ui.api import MayaviScene, MlabSceneModel, \\\r\n SceneEditor\r\n\r\nfrom PyQt4 import QtCore, QtGui\r\n\r\n################################################################################\r\n#The actual visualization\r\nclass Visualization(HasTraits):\r\n\r\n # The scene model.\r\n scene = Instance(MlabSceneModel, ())\r\n @on_trait_change('scene.activated')\r\n\r\n def update_plot(self):\r\n # This function is called when the view is opened. We don't\r\n # populate the scene when the view is not yet open, as some\r\n # VTK features require a GLContext.\r\n\r\n # We can do normal mlab calls on the embedded scene.\r\n self.scene.mlab.test_points3d()\r\n\r\n # the layout of the dialog screated\r\n ######################\r\n view = View(HSplit(VSplit(\r\n Item(name='current_selection',\r\n editor=InstanceEditor(),\r\n enabled_when='current_selection is not None',\r\n style='custom',\r\n springy=True,\r\n show_label=False),\r\n ),\r\n Item(name='scene',\r\n editor=SceneEditor(scene_class=MayaviScene),\r\n show_label=False,\r\n resizable=True,\r\n height=500,\r\n width=500),\r\n ),\r\n resizable=True,\r\n scrollable=True\r\n )\r\n\r\n\r\n################################################################################\r\n# The QWidget containing the visualization, this is pure PyQt4 code.\r\nclass MayaviQWidget(QtGui.QWidget):\r\n def __init__(self, parent=None):\r\n QtGui.QWidget.__init__(self, parent)\r\n layout = QtGui.QVBoxLayout(self)\r\n layout.setContentsMargins(0,0,0,0)\r\n layout.setSpacing(0)\r\n self.visualization = Visualization()\r\n\r\n # If you want to debug, beware that you need to remove the Qt\r\n # input hook.\r\n #QtCore.pyqtRemoveInputHook()\r\n #import pdb ; pdb.set_trace()\r\n #QtCore.pyqtRestoreInputHook()\r\n\r\n # The edit_traits call will generate the widget to embed.\r\n self.ui = self.visualization.edit_traits(parent=self,\r\n kind='subpanel').control\r\n layout.addWidget(self.ui)\r\n self.ui.setParent(self)\r\n\r\n","sub_path":"Versiones anteriores/Version 2.2/QT/prueba_ir3d.py","file_name":"prueba_ir3d.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"307159484","text":"from typing import List\n\n\nclass Solution:\n \"\"\"\n Given an integer array nums, reorder it such that nums[0] <= nums[1] >= nums[2] <= nums[3]....\n\n You may assume the input array always has a valid answer.\n\n Example 1:\n\n Input: nums = [3,5,2,1,6,4]\n Output: [3,5,1,6,2,4]\n Explanation: [1,6,2,5,3,4] is also accepted.\n Example 2:\n\n Input: nums = [6,6,5,6,3,8]\n Output: [6,6,5,6,3,8]\n\n Constraints:\n\n 1 <= nums.length <= 5 * 104\n 0 <= nums[i] <= 104\n It is guaranteed that there will be an answer for the given input nums.\n \"\"\"\n def wiggleSort1(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n nums.sort()\n for i in range(1, len(nums) - 1, 2):\n nums[i + 1], nums[i] = nums[i], nums[i + 1]\n\n def wiggleSort2(self, nums: List[int]) -> None:\n less = True\n for i in range(len(nums) - 1):\n if less:\n if nums[i] > nums[i + 1]:\n nums[i], nums[i + 1] = nums[i + 1], nums[i]\n else:\n if nums[i] < nums[i + 1]:\n nums[i], nums[i + 1] = nums[i + 1], nums[i]\n less = not less\n\n def wiggleSort(self, nums: List[int]) -> None:\n for i in range(len(nums) - 1):\n if (i % 2 == 0 and nums[i] > nums[i + 1]) or (i % 2 == 1 and nums[i] < nums[i + 1]):\n nums[i], nums[i + 1] = nums[i + 1], nums[i]\n\nif __name__ == \"__main__\":\n solution = Solution()\n nums = [3,5,2,1,6,4]\n ret = solution.wiggleSort2(nums)\n print(ret)\n","sub_path":"python_module/examples/280_Wiggle_Sort.py","file_name":"280_Wiggle_Sort.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"294612815","text":"# Copyright 2017 Catalyst IT Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom concurrent import futures\n\nimport futurist\nfrom oslo_serialization import jsonutils\nfrom tempest.lib import decorators\nfrom tempest.lib import exceptions\n\nfrom qinling_tempest_plugin.tests import base\n\n\nclass ExecutionsTest(base.BaseQinlingTest):\n name_prefix = 'ExecutionsTest'\n\n def setUp(self):\n super(ExecutionsTest, self).setUp()\n self.wait_runtime_available(self.runtime_id)\n\n @decorators.idempotent_id('2a93fab0-2dae-4748-b0d4-f06b735ff451')\n def test_crud_execution(self):\n function_id = self.create_function()\n resp, body = self.client.create_execution(function_id,\n input='{\"name\": \"Qinling\"}')\n self.assertEqual(201, resp.status)\n execution_id_1 = body['id']\n self.addCleanup(self.client.delete_resource, 'executions',\n execution_id_1, ignore_notfound=True)\n self.assertEqual('success', body['status'])\n\n # Create another execution without input\n resp, body = self.client.create_execution(function_id)\n self.assertEqual(201, resp.status)\n execution_id_2 = body['id']\n self.addCleanup(self.client.delete_resource, 'executions',\n execution_id_2, ignore_notfound=True)\n self.assertEqual('success', body['status'])\n\n # Get executions\n resp, body = self.client.get_resources('executions')\n self.assertEqual(200, resp.status)\n expected = {execution_id_1, execution_id_2}\n actual = set([execution['id'] for execution in body['executions']])\n self.assertTrue(expected.issubset(actual))\n\n # Delete executions\n resp = self.client.delete_resource('executions', execution_id_1)\n self.assertEqual(204, resp.status)\n resp = self.client.delete_resource('executions', execution_id_2)\n self.assertEqual(204, resp.status)\n\n @decorators.idempotent_id('2199d1e6-de7d-4345-8745-a8184d6022b1')\n def test_get_all_admin(self):\n \"\"\"Admin user can get executions of other projects\"\"\"\n function_id = self.create_function()\n resp, body = self.client.create_execution(\n function_id, input='{\"name\": \"Qinling\"}'\n )\n self.assertEqual(201, resp.status)\n\n execution_id = body['id']\n self.addCleanup(self.client.delete_resource, 'executions',\n execution_id, ignore_notfound=True)\n\n resp, body = self.admin_client.get_resources(\n 'executions?all_projects=true'\n )\n self.assertEqual(200, resp.status)\n self.assertIn(\n execution_id,\n [execution['id'] for execution in body['executions']]\n )\n\n @decorators.idempotent_id('009fba47-957e-4de5-82e8-a032386d3ac0')\n def test_get_all_not_allowed(self):\n # Get other projects functions by normal user\n context = self.assertRaises(\n exceptions.Forbidden,\n self.client.get_resources,\n 'executions?all_projects=true'\n )\n self.assertIn(\n 'Operation not allowed',\n context.resp_body.get('faultstring')\n )\n\n @decorators.idempotent_id('794cdfb2-0a27-4e56-86e8-be18eee9400f')\n def test_create_with_function_version(self):\n function_id = self.create_function()\n execution_id = self.create_execution(function_id)\n resp, body = self.client.get_execution_log(execution_id)\n self.assertEqual(200, resp.status)\n self.assertIn('Hello, World', body)\n\n version_1 = self.create_function_version(function_id)\n execution_id = self.create_execution(function_id, version=version_1)\n resp, body = self.client.get_execution_log(execution_id)\n self.assertEqual(200, resp.status)\n self.assertIn('Hello, World', body)\n\n self.update_function_package(function_id,\n \"python/test_python_sleep.py\")\n version_2 = self.create_function_version(function_id)\n execution_id = self.create_execution(function_id, version=version_2)\n resp, body = self.client.get_execution_log(execution_id)\n self.assertEqual(200, resp.status)\n self.assertNotIn('Hello, World', body)\n\n @decorators.idempotent_id('8096cc52-64d2-4660-a657-9ac0bdd743ae')\n def test_execution_async(self):\n function_id = self.create_function()\n resp, body = self.client.create_execution(function_id, sync=False)\n self.assertEqual(201, resp.status)\n\n execution_id = body['id']\n self.addCleanup(self.client.delete_resource, 'executions',\n execution_id, ignore_notfound=True)\n\n self.assertEqual('running', body['status'])\n self.wait_execution_success(execution_id)\n\n @decorators.idempotent_id('6cb47b1d-a8c6-48f2-a92f-c4f613c33d1c')\n def test_execution_log(self):\n function_id = self.create_function()\n resp, body = self.client.create_execution(\n function_id, input='{\"name\": \"OpenStack\"}'\n )\n\n self.assertEqual(201, resp.status)\n self.addCleanup(self.client.delete_resource, 'executions',\n body['id'], ignore_notfound=True)\n self.assertEqual('success', body['status'])\n\n execution_id = body['id']\n\n # Get execution log\n resp, body = self.client.get_execution_log(execution_id)\n\n self.assertEqual(200, resp.status)\n self.assertIn('Hello, OpenStack', body)\n\n @decorators.idempotent_id('f22097dc-37db-484d-83d3-3a97e72ec576')\n def test_execution_concurrency_no_scale(self):\n package = self.create_package(name='python/test_python_sleep.py')\n function_id = self.create_function(package_path=package)\n\n def _create_execution():\n resp, body = self.client.create_execution(function_id)\n return resp, body\n\n futs = []\n with futurist.ThreadPoolExecutor(max_workers=10) as executor:\n for _ in range(3):\n fut = executor.submit(_create_execution)\n futs.append(fut)\n for f in futures.as_completed(futs):\n # Wait until we get the response\n resp, body = f.result()\n\n self.assertEqual(201, resp.status)\n self.addCleanup(self.client.delete_resource, 'executions',\n body['id'], ignore_notfound=True)\n self.assertEqual('success', body['status'])\n\n resp, body = self.admin_client.get_function_workers(function_id)\n\n self.assertEqual(200, resp.status)\n self.assertEqual(1, len(body['workers']))\n\n @decorators.idempotent_id('a5ed173a-19b7-4c92-ac78-c8862ad1d1d2')\n def test_execution_concurrency_scale_up(self):\n package = self.create_package(name='python/test_python_sleep.py')\n function_id = self.create_function(package_path=package)\n\n def _create_execution():\n resp, body = self.client.create_execution(function_id)\n return resp, body\n\n futs = []\n with futurist.ThreadPoolExecutor(max_workers=10) as executor:\n for _ in range(6):\n fut = executor.submit(_create_execution)\n futs.append(fut)\n for f in futures.as_completed(futs):\n # Wait until we get the response\n resp, body = f.result()\n\n self.assertEqual(201, resp.status)\n self.addCleanup(self.client.delete_resource, 'executions',\n body['id'], ignore_notfound=True)\n self.assertEqual('success', body['status'])\n\n resp, body = self.admin_client.get_function_workers(function_id)\n self.assertEqual(200, resp.status)\n self.assertEqual(2, len(body['workers']))\n\n @decorators.idempotent_id('ccfe67ce-e467-11e7-916c-00224d6b7bc1')\n def test_python_execution_positional_args(self):\n package = self.create_package(\n name='python/test_python_positional_args.py'\n )\n function_id = self.create_function(package_path=package)\n\n resp, body = self.client.create_execution(function_id,\n input='Qinling')\n\n self.assertEqual(201, resp.status)\n self.addCleanup(self.client.delete_resource, 'executions',\n body['id'], ignore_notfound=True)\n self.assertEqual('success', body['status'])\n\n result = jsonutils.loads(body['result'])\n self.assertIn('Qinling', result['output'])\n\n @decorators.idempotent_id('a948382a-84af-4f0e-ad08-4297345e302c')\n def test_python_execution_file_limit(self):\n package = self.create_package(name='python/test_python_file_limit.py')\n function_id = self.create_function(package_path=package)\n\n resp, body = self.client.create_execution(function_id)\n\n self.assertEqual(201, resp.status)\n self.addCleanup(self.client.delete_resource, 'executions',\n body['id'], ignore_notfound=True)\n self.assertEqual('failed', body['status'])\n\n result = jsonutils.loads(body['result'])\n self.assertNotIn('error', result)\n self.assertIn(\n 'Too many open files', result['output']\n )\n\n @decorators.idempotent_id('bf6f8f35-fa88-469b-8878-7aa85a8ce5ab')\n def test_python_execution_process_number(self):\n package = self.create_package(\n name='python/test_python_process_limit.py'\n )\n function_id = self.create_function(package_path=package)\n\n resp, body = self.client.create_execution(function_id)\n\n self.assertEqual(201, resp.status)\n self.addCleanup(self.client.delete_resource, 'executions',\n body['id'], ignore_notfound=True)\n self.assertEqual('failed', body['status'])\n\n result = jsonutils.loads(body['result'])\n self.assertNotIn('error', result)\n self.assertIn(\n 'too much resource consumption', result['output']\n )\n\n @decorators.idempotent_id('d0598868-e45d-11e7-9125-00224d6b7bc1')\n def test_execution_image_function(self):\n function_id = self.create_function(image=True)\n resp, body = self.client.create_execution(function_id,\n input='Qinling')\n\n self.assertEqual(201, resp.status)\n execution_id = body['id']\n self.addCleanup(self.client.delete_resource, 'executions',\n execution_id, ignore_notfound=True)\n self.assertEqual('success', body['status'])\n self.assertIn('Qinling', jsonutils.loads(body['result'])['output'])\n","sub_path":"qinling_tempest_plugin/tests/api/test_executions.py","file_name":"test_executions.py","file_ext":"py","file_size_in_byte":11275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"400120916","text":"import redis\nimport datetime\nimport time\nclient = redis.Redis();\n\n# -----存储字符串\n# now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\");\n# client.set(\"jianan\", now);\n# print(type(client.get(\"jianan\"))); # \n# print(client.get(\"jianan\"));\n# print(client.get(\"jianan\").decode());\n\n# -----字符串读取设置超时\n# client.set(\"jianan\", now, ex=10);\n# value = client.get(\"jianan\").decode();\n# print(\"输入后即可读取, value =\", value);\n# time.sleep(11);\n# print(\"输入后即可读取, value2 =\", client.get(\"jianan\"));\n\n# -----自增\n# client.set(\"num_value\", 10);\n# value = client.get(\"num_value\").decode();\n# print(\"type value =\", type(value)); # \n#\n# client.incr(\"num_value\", 5);\n# print( client.get(\"num_value\").decode());\n\n# -----列表\n# client.lpush(\"users\", \"jianan\", \"xx\", \"yy\");\n# print(client.lrange(\"users\", 0, 100));\n# client.lpop(\"users\");\n# print(client.lrange(\"users\", 0, 100));\n# 1) \"yy\"\n# 2) \"xx\"\n# 3) \"jianan\"\n# 4) \"yy\"\n# 5) \"xx\"\n# 6) \"jianan\"\n\n# print(client.llen(\"users\"));\n# print(client.lrange(\"users\", 0, -4));\n# [b'xx', b'jianan', b'yy', b'xx']\n\n# client.flushdb();\n\n# -----集合\naccount = [\"Bob\", \"Alice\", \"Dilen\"];\nresult = client.sadd(\"account\", *account);\nprint(result);\n\nclient.sadd(\"account\", \"jianan\");\nclient.sadd(\"account\", \"jianan\");\n\nall_data = client.smembers(\"account\");\n\nfor data in all_data:\n print (data.decode())\n\n\nnot_exists = client.sismember(\"account\", \"abc\");\nprint(not_exists);\n\n# 交集并集差集\ndata_1 = [1,2,3,4,5];\ndata_2 = [4,5,6,7,8];\nclient.sadd(\"data_1\", *data_1);\nclient.sadd(\"data_2\", *data_2);\n\n# 交集\njiaoji = client.sinter(\"data_1\", \"data_2\");\nprint(\"jiaoji =\", jiaoji);\n\n# 并集\nbingji = client.sunion(\"data_1\", \"data_2\");\nprint(\"bingji =\", bingji);\n\n# 差集\nchaji = client.sdiff(\"data_1\", \"data_2\");\nprint(\"chaji =\", chaji);","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21669574","text":"\ndef indexable_allclose(dct1, dct2, rel_tol=1e-9, abs_tol=0.0, return_info=False):\n \"\"\"\n PORT FROM UBELT WITH SUPPORT FOR NDARRAYS\n\n Walks through two nested data structures and ensures that everything is\n roughly the same.\n \"\"\"\n import ubelt as ub\n import numpy as np\n from math import isclose\n from functools import partial\n try:\n import torch\n except ImportError:\n torch = None\n\n isclose_ = partial(isclose, rel_tol=rel_tol, abs_tol=abs_tol)\n np_isclose_ = partial(np.isclose, rtol=rel_tol, atol=abs_tol)\n\n walker1 = ub.IndexableWalker(dct1)\n walker2 = ub.IndexableWalker(dct2)\n flat_items1 = [\n (path, value) for path, value in walker1\n if not isinstance(value, walker1.indexable_cls) or len(value) == 0]\n flat_items2 = [\n (path, value) for path, value in walker2\n if not isinstance(value, walker1.indexable_cls) or len(value) == 0]\n\n flat_items1 = sorted(flat_items1)\n flat_items2 = sorted(flat_items2)\n\n if len(flat_items1) != len(flat_items2):\n info = {\n 'faillist': ['length mismatch']\n }\n final_flag = False\n else:\n passlist = []\n faillist = []\n\n for t1, t2 in zip(flat_items1, flat_items2):\n p1, v1 = t1\n p2, v2 = t2\n assert p1 == p2\n if torch is not None:\n if torch.is_tensor(v1):\n v1 = v1.numpy()\n if torch.is_tensor(v2):\n v2 = v2.numpy()\n\n if isinstance(v1, np.ndarray) or isinstance(v2, np.ndarray):\n flag = np.all(np_isclose_(v1, v2))\n else:\n flag = (v1 == v2)\n if not flag:\n if isinstance(v1, float) and isinstance(v2, float) and isclose_(v1, v2):\n flag = True\n if flag:\n passlist.append(p1)\n else:\n faillist.append((p1, v1, v2))\n\n final_flag = len(faillist) == 0\n info = {\n 'passlist': passlist,\n 'faillist': faillist,\n }\n\n if return_info:\n info.update({\n 'walker1': walker1,\n 'walker2': walker2,\n })\n return final_flag, info\n else:\n return final_flag\n\n\ndef test_numpy_torch_compat():\n import pytest\n import ubelt as ub\n import numpy as np\n import kwarray\n from kwarray import arrayapi\n try:\n import torch\n except ImportError:\n torch = None\n\n ArrayAPI = arrayapi.ArrayAPI\n\n if torch is None:\n pytest.skip('no torch')\n\n # arrayapi._REGISTERY.registered['numpy']\n rows = list(arrayapi._REGISTERY.registered['api'].values())\n groups = ub.group_items(rows, lambda item: item['func_type'])\n\n rng = kwarray.ensure_rng()\n\n basis = {\n 'shape': [(3, 5)],\n 'dtype': ['float32', 'uint8'],\n }\n\n for item in ub.named_product(basis):\n np_data1 = rng.rand(3, 5)\n pt_data1 = ArrayAPI.tensor(np_data1)\n\n blocklist = {\n 'take', 'compress', 'repeat', 'tile', 'reshape', 'view',\n 'numel', 'atleast_nd', 'full_like',\n #\n 'maximum', 'minimum', 'matmul',\n 'astype', 'ensure',\n 'transpose',\n 'pad',\n 'dtype_kind',\n 'clip',\n\n 'array_equal',\n }\n\n if arrayapi._TORCH_LT_1_7_0:\n # Hack for old torch, works on new torch\n blocklist.update({'all', 'any'})\n\n errors = []\n for func_type, group in groups.items():\n if func_type == 'data_func':\n for row in group:\n # TODO: better signature registration so we know how we\n # need to call the data. For now blocklist non-easy cases\n\n func_name = row['func_name']\n if func_name in blocklist:\n continue\n\n print(f'func_name={func_name}')\n func = getattr(ArrayAPI, func_name)\n np_func = getattr(arrayapi.ArrayAPI._numpy, func_name)\n pt_func = getattr(arrayapi.ArrayAPI._torch, func_name)\n\n np_result1 = func(np_data1)\n np_result2 = np_func(np_data1)\n pt_result1 = func(pt_data1)\n pt_result2 = pt_func(pt_data1)\n\n results = [np_result1, np_result2, pt_result1, pt_result2]\n\n flag = True\n if isinstance(np_result1, tuple):\n for a, b in ub.iter_window(results, 2):\n flag &= indexable_allclose(a, b)\n else:\n results = [ArrayAPI.numpy(r) for r in results]\n for a, b in ub.iter_window(results, 2):\n flag &= np.all(np.isclose(a, b))\n\n if not flag:\n errors.append(row)\n","sub_path":"tests/test_arrayapi.py","file_name":"test_arrayapi.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"166066800","text":"#!/usr/bin/python3\nfrom fabric.api import *\n\nenv.hosts = ['35.196.60.116', '54.221.176.56']\nenv.user = 'ubuntu'\n\n\ndef do_clean(number=0):\n \"\"\" deletes out of data archives \"\"\"\n\n number = int(number)\n if number == 1 or number == 0:\n local('cd versions; ls | head -n -1 | xargs rm -rf')\n run('cd /data/web_static/releases; ls | head -n -1 | xargs rm -rf')\n else:\n local('cd versions; ls | head -n -{} | xargs rm -rf'.format(number))\n run('cd /data/web_static/releases; ls | head -n -{} | xargs rm -rf'.\n format(number))\n","sub_path":"100-clean_web_static.py","file_name":"100-clean_web_static.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"400285681","text":"\"\"\" Two factor authentication views\"\"\"\nimport os\nimport base64\nimport io\nimport logging\nimport qrcode\nfrom user.authentication import TOTPValidityToken, ExpiringTokenAuthentication, create_random_user_id, find_user_id_from_nonce\nfrom rest_framework import permissions\nfrom rest_framework.views import APIView\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom django_otp.plugins.otp_totp.models import TOTPDevice\nfrom django_otp import devices_for_user, user_has_device\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.utils.encoding import force_text\nfrom django.contrib.auth import get_user_model\nfrom django.utils import timezone\n\nIMAGE_PATH = \"/app/media/QR/token_qr.png\"\nlogger = logging.getLogger(__name__)\n\ndef get_user_totp_device(user, confirmed=False):\n \"\"\"\n Find an existing user totp device and returning it\n \"\"\"\n\n devices = devices_for_user(user, confirmed=confirmed)\n for device in devices:\n if isinstance(device, TOTPDevice):\n return device\n\nclass TOTPCreateAPIView(APIView):\n \"\"\"\n Creation of a time based one time password for a user\n \"\"\"\n\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (permissions.IsAuthenticated,)\n\n def __str__(self):\n return \"TOTP create endpoint\"\n\n def get(self, request):\n \"\"\"TOPT generaton\"\"\"\n\n user = request.user\n if user_has_device(user, confirmed=True):\n return Response(dict(errors=[\"2FA is already activated on this account\"]),\n status=status.HTTP_400_BAD_REQUEST)\n\n device = get_user_totp_device(user, False)\n if not device:\n device = user.totpdevice_set.create(confirmed=False)\n user.save()\n\n try:\n img = qrcode.make(device.config_url)\n bytes_image = io.BytesIO()\n img.save(bytes_image, format='PNG')\n bytes_image.seek(0)\n img = base64.b64encode(bytes_image.read()).decode('utf-8')\n token = TOTPValidityToken().make_token(user)\n return Response({\"qrImg\": img, \"token\": token}, status=status.HTTP_201_CREATED)\n \n except Exception as e:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n\nclass TOTPAuthenticateView(APIView):\n \"\"\"\n Use this endpoint to verify a token produced by a TOTP device\n in order to authenticate user\n \"\"\"\n\n def __str__(self):\n return \"Verification endpoint\"\n\n\n def post(self, request, uidb64, token):\n \"\"\"Verify user one-time password\"\"\"\n\n try:\n nonce = force_text(urlsafe_base64_decode(uidb64))\n uid = find_user_id_from_nonce(nonce)\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist) as e_ex:\n logger.warning('Unsuccessfull login with id %s. Exception: %s', uid, e_ex)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n if TOTPValidityToken().check_token(user, token) and (\"token_totp\" in request.data):\n\n device = get_user_totp_device(user, True)\n token_totp = request.data[\"token_totp\"]\n\n if not device or not device.verify_token(token_totp):\n return Response(dict(errors=['This user has not setup two' \\\n + 'factor authentication or has not enter a valid code']),\n status=status.HTTP_400_BAD_REQUEST\n )\n\n token, created = Token.objects.get_or_create(user=user)\n if not created:\n # update the created time of the token to keep it valid\n token.created = timezone.now()\n token.save()\n\n logger.info(\"User with email %s logs at %s\", user.email, timezone.now())\n return Response({'token': token.key}, status=status.HTTP_200_OK)\n\n\n return Response(dict(errors=['Expired token']), status=status.HTTP_400_BAD_REQUEST)\n\n\nclass VerifyTOTPView(APIView):\n \"\"\"Endpoint for validation of TOTP service\"\"\"\n\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (permissions.IsAuthenticated,)\n\n def post(self, request, token):\n \"\"\"Activate the device of authenticated user given a totp token in the post request\n which validation period is defined by token\n \"\"\"\n\n user = request.user\n if not TOTPValidityToken().check_token(user, token):\n return Response(dict(\n errors=['Validation token expired']),\n status=status.HTTP_401_UNAUTHORIZED\n )\n\n device = get_user_totp_device(user, False)\n if not device:\n return Response(dict(\n errors=['This user has not setup two factor authentication']),\n status=status.HTTP_400_BAD_REQUEST\n )\n\n if not \"token_totp\" in request.data:\n return Response(dict(errors=[\"Need authentication app code\"]),\n status=status.HTTP_400_BAD_REQUEST)\n\n token_totp = request.data[\"token_totp\"]\n if device.verify_token(token_totp):\n device.confirmed = True\n user.profile.two_factor_enabled = True\n device.save()\n user.profile.save()\n user.save()\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n\nclass DisableTOTP(APIView):\n \"\"\"Endpoing for disabling 2fa service\"\"\"\n\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (permissions.IsAuthenticated,)\n\n def post(self, request):\n \"\"\"Disable device\"\"\"\n user = request.user\n device = get_user_totp_device(user, True)\n\n if not device or not device.confirmed:\n logger.warning(\"Suspicious 2FA desactivation - no 2FA or 2FA not activated for user %s\",\n user)\n return Response(dict(errors=[\"Ivalid operation\"]), status=status.HTTP_400_BAD_REQUEST)\n\n if not \"token_totp\" in request.data:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n token_totp = request.data[\"token_totp\"]\n if not device.verify_token(token_totp):\n return Response(dict(errors=[\"Invalid code\"]), status=status.HTTP_400_BAD_REQUEST)\n\n TOTPDevice.objects.filter(user=user).delete()\n user.profile.two_factor_enabled = False\n user.profile.save()\n user.save()\n return Response(status=status.HTTP_200_OK)\n","sub_path":"app/backend/app/user/totpviews.py","file_name":"totpviews.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487317687","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n self.length = 0\n self.depth = 0\n\nclass BinarySearchTree:\n \n def __init__(self, root = None):\n self.depth = 0\n self.length = 0\n if root != None:\n self.root = Node(root)\n self.length = 0\n else:\n self.root = root\n self.length = 1\n self.depth = 1\n \n def insert(self, value):\n # Insert a value into the binary tree\n node = Node(value)\n \n if self.root == None:\n self.root = node\n self.length = 1\n self.depth = 1\n print(\"depth = 1\")\n return\n elif self.root:\n temp = self.root\n depth_count = 1\n print(\"for inserting \", value, \" --->\", end=\" \")\n while True:\n depth_count += 1\n if value > temp.value:\n print(\"Right\", end = \" \")\n if temp.right:\n temp = temp.right\n continue\n else:\n temp.right = node\n break\n else:\n print(\"Left\", end = \" \")\n if temp.left:\n temp = temp.left\n continue\n else:\n temp.left = node\n break\n print(\"Depth = \", self.depth)\n print(\"Number of nodes = \", self.length)\n print(\" \")\n \n if(depth_count > self.depth):\n self.depth = depth_count\n self.length += 1\n return self\n def check(self, value):\n # check if the given value is present in the tree\n if self.root == None:\n print(\" the binary search tree is empty\")\n return False\n if value == self.root.value:\n return True\n else:\n temp = self.root\n while True:\n if temp == None:\n print(\"value \", value, \" not found in the Binary Search Tree\")\n return False\n if value == temp.value:\n return True\n if value > temp.value:\n temp = temp.right\n else:\n temp = temp.left\n return False\n def BFS(self):\n # 10\n # 6 15\n # 3 8 20\n # ---> [10, 6, 15, 3, 8, 20]\n\n if self.root == None:\n return []\n else:\n queue = []\n visited = []\n \n queue.append(self.root)\n \n while len(queue) > 0:\n current = queue[0]\n queue = queue[1:]\n visited.append(current.value)\n if current.left:\n queue.append(current.left)\n if current.right:\n queue.append(current.right)\n return visited\n def DFS_PreOrder(self):\n \n # 10\n # 6 15\n # 3 8 20\n # ---> [10, 6, 3, 8, 15, 20]\n \n \n data = []\n current = self.root\n \n def treverse(node):\n data.append(node.value)\n if node.left:\n treverse(node.left)\n if node.right:\n treverse(node.right)\n treverse(current)\n \n return data\n \n def DFS_PostOrder(self):\n # 10\n # 6 15\n # 3 8 20\n # ---> [3, 8, 6, 20, 15, 10]\n # we visit a node after we have vidited all of its left and then all the right children\n data = []\n current = self.root\n \n def treverse(node):\n if node.left:\n treverse(node.left)\n if node.right:\n treverse(node.right)\n data.append(node.value)\n treverse(current)\n \n return data\n def DFS_InOrder(self):\n # treverse entire left nodes and then visit the node then its right side\n # 10\n # 6 15\n # 3 8 20\n # ---> [3, 6, 8, 10, 15, 20]\n # we visit a node after we have vidited all of its left and then all the right children\n data = []\n current = self.root\n \n def treverse(node):\n if node.left:\n treverse(node.left)\n data.append(node.value)\n if node.right:\n treverse(node.right)\n \n treverse(current)\n \n return data\n \n# Big O analysis\n# insertion ---> ordinary case O(Log(n)) worst case O(n) if the BTS is skewed height = n\n# Searching ---> ordinary case O(Log(n)) wordt case O(n) if the BTS is skewed height = n ","sub_path":"DataStructures/BinarySearchTree/BinarySearchTree.py","file_name":"BinarySearchTree.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"585642870","text":"#!/hpcf/apps/python/install/2.7.13/bin/python\n\nimport sys\nimport os\n\nbed = sys.argv[1]\nlines = open(bed).readlines()\noutfile = \"%s.bedjs\"%(bed)\nout = open(outfile,\"wb\")\nfor line in lines:\n\tline = line.strip().split(\"\\t\")\n\tif line == [\"\"]:\n\t\tcontinue\n\tif line[3] == \"gain\":\n\t\t# name = \"\"\"{\"strand\":\"%s\",\"color\":\"rgba(255,0,0,0.8)\"}\"\"\"%(line[5])\n\t\tname = \"\"\"{\"strand\":\"%s\",\"color\":\"rgba(255,0,0,0.8)\"}\"\"\"%(\"+\")\n\telse:\n\t\t# name = \"\"\"{\"strand\":\"%s\",\"color\":\"rgba(0,0,255,0.8)\"}\"\"\"%(line[5])\n\t\tname = \"\"\"{\"strand\":\"%s\",\"color\":\"rgba(0,0,255,0.8)\"}\"\"\"%(\"+\")\n\tprint >>out,\"\\t\".join([line[0],line[1],line[2],name])\n\nout.close()\nos.system(\"module load htslib;sort -k1,1 -k2,2n %s > %s.sorted;bgzip %s.sorted;tabix -p bed %s.sorted.gz\"%(outfile,outfile,outfile,outfile))","sub_path":"bin/bed_to_bedjs_diffpeak.py","file_name":"bed_to_bedjs_diffpeak.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"183685681","text":"import werkzeug.routing as routing\nimport bemtevi.controllers.posts_controller as posts_controller\nimport bemtevi.controllers.authors_controller as authors_controller\n\ndef routes():\n return [\n routing.Rule('/posts', endpoint = posts_controller.list, methods = ['GET']),\n routing.Rule('/posts', endpoint = posts_controller.create, methods = ['POST']),\n routing.Rule('/authors', endpoint = authors_controller.list, methods = ['GET']),\n routing.Rule('/authors', endpoint = authors_controller.create, methods = ['POST']),\n ]\n\ndef match(request):\n route_map = routing.Map(routes())\n adapter = route_map.bind_to_environ(request.environ)\n return adapter.match()","sub_path":"bemtevi/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440282448","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport subprocess\nimport os\n\nfrom pcraster.framework import *\nimport pcraster as pcr\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport waterBodies_for_modflow as waterBodies\n\nimport virtualOS as vos\nfrom ncConverter import *\n\nclass GroundwaterModflow(object):\n \n def getState(self):\n result = {}\n result['groundwaterHead'] = self.groundwaterHead # unit: m\n return result\n\n\n def __init__(self, iniItems, landmask):\n object.__init__(self)\n \n # cloneMap, temporary directory, absolute path for input directory, landmask\n self.cloneMap = iniItems.cloneMap\n self.tmpDir = iniItems.tmpDir\n self.inputDir = iniItems.globalOptions['inputDir']\n self.landmask = landmask\n \n # configuration from the ini file\n self.iniItems = iniItems\n \n # topography properties: read several variables from the netcdf file\n for var in ['dem_minimum','dem_maximum','dem_average','dem_standard_deviation',\\\n 'slopeLength','orographyBeta','tanslope',\\\n 'dzRel0000','dzRel0001','dzRel0005',\\\n 'dzRel0010','dzRel0020','dzRel0030','dzRel0040','dzRel0050',\\\n 'dzRel0060','dzRel0070','dzRel0080','dzRel0090','dzRel0100']:\n vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['topographyNC'], \\\n var, self.cloneMap)\n vars(self)[var] = pcr.cover(vars(self)[var], 0.0)\n\n # channel properties: read several variables from the netcdf file\n for var in ['lddMap','cellAreaMap','gradient','bankfull_width',\n 'bankfull_depth','dem_floodplain','dem_riverbed']:\n vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['channelNC'], \\\n var, self.cloneMap)\n vars(self)[var] = pcr.cover(vars(self)[var], 0.0)\n \n # minimum channel width\n minimum_channel_width = 1.0\n self.bankfull_width = pcr.max(minimum_channel_width, self.bankfull_width)\n \n #~ # cell fraction if channel water reaching the flood plan # NOT USED \n #~ self.flood_plain_fraction = self.return_innundation_fraction(pcr.max(0.0, self.dem_floodplain - self.dem_minimum))\n \n # coefficient of Manning\n self.manningsN = vos.readPCRmapClone(self.iniItems.modflowParameterOptions['manningsN'],\\\n self.cloneMap,self.tmpDir,self.inputDir)\n \n # minimum channel gradient\n minGradient = 0.00005\n self.gradient = pcr.max(minGradient, pcr.cover(self.gradient, minGradient))\n\n # correcting lddMap\n self.lddMap = pcr.ifthen(pcr.scalar(self.lddMap) > 0.0, self.lddMap)\n self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap))\n \n # channelLength = approximation of channel length (unit: m) # This is approximated by cell diagonal. \n cellSizeInArcMin = np.round(pcr.clone().cellSize()*60.)\n verticalSizeInMeter = cellSizeInArcMin*1852. \n self.channelLength = ((self.cellAreaMap/verticalSizeInMeter)**(2)+\\\n (verticalSizeInMeter)**(2))**(0.5)\n \n # option for lakes and reservoir\n self.onlyNaturalWaterBodies = False\n if self.iniItems.modflowParameterOptions['onlyNaturalWaterBodies'] == \"True\": self.onlyNaturalWaterBodies = True\n\n # groundwater linear recession coefficient (day-1) ; the linear reservoir concept is still being used to represent fast response flow \n # particularly from karstic aquifer in mountainous regions \n self.recessionCoeff = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['groundwaterPropertiesNC'],\\\n 'recessionCoeff', self.cloneMap)\n self.recessionCoeff = pcr.cover(self.recessionCoeff,0.00) \n self.recessionCoeff = pcr.min(1.0000,self.recessionCoeff) \n #\n if 'minRecessionCoeff' in iniItems.modflowParameterOptions.keys():\n minRecessionCoeff = float(iniItems.modflowParameterOptions['minRecessionCoeff'])\n else:\n minRecessionCoeff = 1.0e-4 # This is the minimum value used in Van Beek et al. (2011). \n self.recessionCoeff = pcr.max(minRecessionCoeff,self.recessionCoeff) \n \n # aquifer saturated conductivity (m/day)\n self.kSatAquifer = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['groundwaterPropertiesNC'],\\\n 'kSatAquifer', self.cloneMap)\n self.kSatAquifer = pcr.cover(self.kSatAquifer,pcr.mapmaximum(self.kSatAquifer)) \n self.kSatAquifer = pcr.max(0.010,self.kSatAquifer)\n \n # aquifer specific yield (dimensionless)\n self.specificYield = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['groundwaterPropertiesNC'],\\\n 'specificYield', self.cloneMap)\n self.specificYield = pcr.cover(self.specificYield,pcr.mapmaximum(self.specificYield)) \n self.specificYield = pcr.max(0.010,self.specificYield) # TODO: TO BE CHECKED: The resample process of specificYield \n self.specificYield = pcr.min(1.000,self.specificYield) \n\n # estimate of thickness (unit: m) of accesible groundwater \n totalGroundwaterThickness = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['estimateOfTotalGroundwaterThicknessNC'],\\\n 'thickness', self.cloneMap)\n # extrapolation \n totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,\\\n pcr.windowaverage(totalGroundwaterThickness, 1.0))\n totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,\\\n pcr.windowaverage(totalGroundwaterThickness, 1.5))\n totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, 0.0)\n #\n # set minimum thickness\n minimumThickness = pcr.scalar(float(\\\n self.iniItems.modflowParameterOptions['minimumTotalGroundwaterThickness']))\n totalGroundwaterThickness = pcr.max(minimumThickness, totalGroundwaterThickness)\n #\n # set maximum thickness: 500 m.\n maximumThickness = 500.\n self.totalGroundwaterThickness = pcr.min(maximumThickness, totalGroundwaterThickness)\n\n # river bed resistance (unit: day)\n self.bed_resistance = 1.0\n \n # initiate old style reporting # TODO: remove this!\n self.initiate_old_style_groundwater_reporting(iniItems)\n\n def initiate_modflow(self):\n\n logger.info(\"Initializing pcraster modflow.\")\n \n # initialise \n self.pcr_modflow = None\n self.pcr_modflow = pcr.initialise(pcr.clone())\n \n # grid specification - one layer model\n top = self.dem_average\n bottom = top - self.totalGroundwaterThickness\n self.pcr_modflow.createBottomLayer(bottom, top) \n \n # specification for the boundary condition (IBOUND, BAS package)\n # - active cells only in landmask\n # - constant head for outside the landmask\n ibound = pcr.ifthen(self.landmask, pcr.nominal(1))\n ibound = pcr.cover(ibound, pcr.nominal(-1))\n self.pcr_modflow.setBoundary(ibound, 1)\n \n # specification for conductivities (BCF package)\n horizontal_conductivity = self.kSatAquifer # unit: m/day\n # set the minimum value for transmissivity; (Deltares's default value: 10 m2/day)\n minimimumTransmissivity = 10.\n horizontal_conductivity = pcr.max(minimimumTransmissivity, \\\n horizontal_conductivity * self.totalGroundwaterThickness) / self.totalGroundwaterThickness\n vertical_conductivity = horizontal_conductivity # dummy values, as one layer model is used\n self.pcr_modflow.setConductivity(00, horizontal_conductivity, \\\n vertical_conductivity, 1) \n \n # specification for storage coefficient\n # - correction due to the usage of lat/lon coordinates\n primary = pcr.cover(self.specificYield * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0)\n primary = pcr.max(1e-20, primary)\n secondary = primary # dummy values as we used layer type 00\n self.pcr_modflow.setStorage(primary, secondary, 1)\n \n # set drain package\n self.set_drain_package()\n \n # TODO: defining/incorporating anisotrophy values\n\n def get_initial_heads(self):\n\t\t\n if self.iniItems.modflowTransientInputOptions['groundwaterHeadIni'] != \"None\": \n \n # using a pre-defined groundwater head described in the ini/configuration file\n self.groundwaterHead = vos.readPCRmapClone(self.modflowTransientInputOptions['groundwaterHeadIni'],\\\n self.cloneMap, self.tmpDir, self.inputDir)\n else: \n\n # calculate/simulate a steady state condition and obtain its calculated head values\n self.modflow_simulation(\"steady-state\", self.dem_average, None)\n\n def estimate_bottom_of_bank_storage(self):\n\n # influence zone depth (m)\n influence_zone_depth = 0.50\n \n # bottom_elevation > flood_plain elevation - influence zone\n bottom_of_bank_storage = self.dem_floodplain - influence_zone_depth\n\n #~ # bottom_elevation > river bed\n #~ bottom_of_bank_storage = pcr.max(self.dem_riverbed, bottom_of_bank_storage)\n \n # bottom_elevation > its downstream value\n bottom_of_bank_storage = pcr.max(bottom_of_bank_storage, \\\n pcr.cover(pcr.downstream(self.lddMap, bottom_of_bank_storage), bottom_of_bank_storage))\n\n # bottom_elevation >= 0.0 (must be higher than sea level)\n bottom_of_bank_storage = pcr.max(0.0, bottom_of_bank_storage)\n \n # reducing noise\n bottom_of_bank_storage = pcr.max(bottom_of_bank_storage,\\\n pcr.windowaverage(bottom_of_bank_storage, 3.0 * pcr.clone().cellSize()))\n\n # bottom_elevation < dem_average\n bottom_of_bank_storage = pcr.min(bottom_of_bank_storage, self.dem_average)\n bottom_of_bank_storage = pcr.cover(bottom_of_bank_storage, self.dem_average)\n\n # TODO: Check again this concept. \n \n # TODO: We may want to improve this concept - by incorporating the following \n # - smooth bottom_elevation\n # - upstream areas in the mountainous regions and above perrenial stream starting points may also be drained (otherwise water will accumulate) \n # - bottom_elevation > minimum elevation that is estimated from the maximum of S3 from the PCR-GLOBWB simulation\n \n return bottom_of_bank_storage\n\n def initiate_old_style_groundwater_reporting(self,iniItems):\n\n self.report = True\n try:\n self.outDailyTotNC = iniItems.groundwaterOptions['outDailyTotNC'].split(\",\")\n self.outMonthTotNC = iniItems.groundwaterOptions['outMonthTotNC'].split(\",\")\n self.outMonthAvgNC = iniItems.groundwaterOptions['outMonthAvgNC'].split(\",\")\n self.outMonthEndNC = iniItems.groundwaterOptions['outMonthEndNC'].split(\",\")\n self.outAnnuaTotNC = iniItems.groundwaterOptions['outAnnuaTotNC'].split(\",\")\n self.outAnnuaAvgNC = iniItems.groundwaterOptions['outAnnuaAvgNC'].split(\",\")\n self.outAnnuaEndNC = iniItems.groundwaterOptions['outAnnuaEndNC'].split(\",\")\n except:\n self.report = False\n if self.report == True:\n self.outNCDir = iniItems.outNCDir\n self.netcdfObj = PCR2netCDF(iniItems)\n #\n # daily output in netCDF files:\n if self.outDailyTotNC[0] != \"None\":\n for var in self.outDailyTotNC:\n # creating the netCDF files:\n self.netcdfObj.createNetCDF(str(self.outNCDir)+\"/\"+ \\\n str(var)+\"_dailyTot.nc\",\\\n var,\"undefined\")\n # MONTHly output in netCDF files:\n # - cummulative\n if self.outMonthTotNC[0] != \"None\":\n for var in self.outMonthTotNC:\n # initiating monthlyVarTot (accumulator variable):\n vars(self)[var+'MonthTot'] = None\n # creating the netCDF files:\n self.netcdfObj.createNetCDF(str(self.outNCDir)+\"/\"+ \\\n str(var)+\"_monthTot.nc\",\\\n var,\"undefined\")\n # - average\n if self.outMonthAvgNC[0] != \"None\":\n for var in self.outMonthAvgNC:\n # initiating monthlyTotAvg (accumulator variable)\n vars(self)[var+'MonthTot'] = None\n # initiating monthlyVarAvg:\n vars(self)[var+'MonthAvg'] = None\n # creating the netCDF files:\n self.netcdfObj.createNetCDF(str(self.outNCDir)+\"/\"+ \\\n str(var)+\"_monthAvg.nc\",\\\n var,\"undefined\")\n # - last day of the month\n if self.outMonthEndNC[0] != \"None\":\n for var in self.outMonthEndNC:\n # creating the netCDF files:\n self.netcdfObj.createNetCDF(str(self.outNCDir)+\"/\"+ \\\n str(var)+\"_monthEnd.nc\",\\\n var,\"undefined\")\n # YEARly output in netCDF files:\n # - cummulative\n if self.outAnnuaTotNC[0] != \"None\":\n for var in self.outAnnuaTotNC:\n # initiating yearly accumulator variable:\n vars(self)[var+'AnnuaTot'] = None\n # creating the netCDF files:\n self.netcdfObj.createNetCDF(str(self.outNCDir)+\"/\"+ \\\n str(var)+\"_annuaTot.nc\",\\\n var,\"undefined\")\n # - average\n if self.outAnnuaAvgNC[0] != \"None\":\n for var in self.outAnnuaAvgNC:\n # initiating annualyVarAvg:\n vars(self)[var+'AnnuaAvg'] = None\n # initiating annualyTotAvg (accumulator variable)\n vars(self)[var+'AnnuaTot'] = None\n # creating the netCDF files:\n self.netcdfObj.createNetCDF(str(self.outNCDir)+\"/\"+ \\\n str(var)+\"_annuaAvg.nc\",\\\n var,\"undefined\")\n # - last day of the year\n if self.outAnnuaEndNC[0] != \"None\":\n for var in self.outAnnuaEndNC:\n # creating the netCDF files:\n self.netcdfObj.createNetCDF(str(self.outNCDir)+\"/\"+ \\\n str(var)+\"_annuaEnd.nc\",\\\n var,\"undefined\")\n\n\n def update(self,currTimeStep):\n\n # at the end of the month, calculate/simulate a steady state condition and obtain its calculated head values\n if currTimeStep.isLastDayOfMonth(): self.modflow_simulation(\"transient\",self.groundwaterHead,currTimeStep,4,0.001, 10.)\n\n def modflow_simulation(self,\\\n simulation_type,\\\n initial_head,\\\n currTimeStep = None,\\\n NSTP = 1, \\\n HCLOSE = 0.05,\\\n RCLOSE = 100.* 400.*400.,\\\n MXITER = 300,\\\n ITERI = 100,\\\n NPCOND = 1,\\\n RELAX = 1.00,\\\n NBPOL = 2,\\\n DAMP = 1,\\\n ITMUNI = 4, LENUNI = 2, PERLEN = 1.0, TSMULT = 1.0):\n # initiate pcraster modflow object\n self.initiate_modflow()\n\n if simulation_type == \"transient\":\n logger.info(\"Preparing MODFLOW input for a transient simulation.\")\n SSTR = 0\n if simulation_type == \"steady-state\":\n logger.info(\"Preparing MODFLOW input for a steady-state simulation.\")\n SSTR = 1\n\n # waterBody class to define the extent of lakes and reservoirs\n #\n if simulation_type == \"steady-state\":\n self.WaterBodies = waterBodies.WaterBodies(self.iniItems,\\\n self.landmask,\\\n self.onlyNaturalWaterBodies)\n self.WaterBodies.getParameterFiles(date_given = self.iniItems.globalOptions['startTime'],\\\n cellArea = self.cellAreaMap, \\\n ldd = self.lddMap) \n #\n if simulation_type == \"transient\":\n if currTimeStep.timeStepPCR == 1:\n self.WaterBodies = waterBodies.WaterBodies(self.iniItems,\\\n self.landmask,\\\n self.onlyNaturalWaterBodies)\n if currTimeStep.timeStepPCR == 1 or currTimeStep.doy == 1:\n self.WaterBodies.getParameterFiles(date_given = str(currTimeStep.fulldate),\\\n cellArea = self.cellAreaMap, \\\n ldd = self.lddMap) \n\n # using dem_average as the initial groundwater head value \n self.pcr_modflow.setInitialHead(initial_head, 1)\n \n # set parameter values for the DIS package and PCG solver\n self.pcr_modflow.setDISParameter(ITMUNI, LENUNI, PERLEN, NSTP, TSMULT, SSTR)\n self.pcr_modflow.setPCG(MXITER, ITERI, NPCOND, HCLOSE, RCLOSE, RELAX, NBPOL, DAMP)\n #\n # Some notes about the values \n #\n # ITMUNI = 4 # indicates the time unit (0: undefined, 1: seconds, 2: minutes, 3: hours, 4: days, 5: years)\n # LENUNI = 2 # indicates the length unit (0: undefined, 1: feet, 2: meters, 3: centimeters)\n # PERLEN = 1.0 # duration of a stress period\n # NSTP = 1 # number of time steps in a stress period\n # TSMULT = 1.0 # multiplier for the length of the successive iterations\n # SSTR = 1 # 0 - transient, 1 - steady state\n #\n # MXITER = 100 # maximum number of outer iterations\n # ITERI = 30 # number of inner iterations\n # NPCOND = 1 # 1 - Modified Incomplete Cholesky, 2 - Polynomial matrix conditioning method;\n # HCLOSE = 0.01 # HCLOSE (unit: m) # 0.05 is working\n # RCLOSE = 10.* 400.*400. # RCLOSE (unit: m3) ; Deltares people uses 100 m3 for their 25 m resolution modflow model \n # RELAX = 1.00 # relaxation parameter used with NPCOND = 1\n # NBPOL = 2 # indicates whether the estimate of the upper bound on the maximum eigenvalue is 2.0 (but we don ot use it, since NPCOND = 1) \n # DAMP = 1 # no damping (DAMP introduced in MODFLOW 2000)\n \n # read input files (for the steady-state condition, we use pcraster maps):\n if simulation_type == \"steady-state\":\n # - discharge (m3/s) from PCR-GLOBWB\n discharge = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgDischargeInputMap'],\\\n self.cloneMap, self.tmpDir, self.inputDir)\n # - recharge/capillary rise (unit: m/day) from PCR-GLOBWB \n gwRecharge = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgGroundwaterRechargeInputMap'],\\\n self.cloneMap, self.tmpDir, self.inputDir)\n #\n # - for a steady state condition that will be used as the initial condition \n # ignore any withdrawal from groundwater\n gwRecharge = pcr.max(0.0, gwRecharge) \n gwAbstraction = pcr.spatial(pcr.scalar(0.0))\n\n # read input files (for the transient, input files are given in netcdf files):\n if simulation_type == \"transient\":\n # - discharge (m3/s) from PCR-GLOBWB\n discharge = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['dischargeInputNC'],\n \"discharge\",str(currTimeStep.fulldate),None,self.cloneMap)\n # - recharge/capillary rise (unit: m/day) from PCR-GLOBWB \n gwRecharge = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['groundwaterRechargeInputNC'],\\\n \"groundwater_recharge\",str(currTimeStep.fulldate),None,self.cloneMap)\n # - groundwater abstraction (unit: m/day) from PCR-GLOBWB \n gwAbstraction = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['groundwaterAbstractionInputNC'],\\\n \"total_groundwater_abstraction\",str(currTimeStep.fulldate),None,self.cloneMap)\n\n # set recharge and river packages\n self.set_river_package(discharge)\n self.set_recharge_package(gwRecharge, gwAbstraction)\n \n # execute MODFLOW \n logger.info(\"Executing MODFLOW.\")\n self.pcr_modflow.run()\n \n # TODO: Add the mechanism to check whether a run has converged or not.\n\n # obtaining the results from modflow simulation\n self.groundwaterHead = None\n self.groundwaterHead = self.pcr_modflow.getHeads(1) \n\n # calculate groundwater depth only in the landmask region\n self.groundwaterDepth = pcr.ifthen(self.landmask, self.dem_average - self.groundwaterHead)\n \n # for debuging only\n pcr.report(self.groundwaterHead , \"gw_head.map\")\n pcr.report(self.groundwaterDepth, \"gw_depth.map\")\n pcr.report(self.surface_water_elevation, \"surface_water_elevation.map\")\n\n \n def set_river_package(self, discharge):\n\n logger.info(\"Set the river package based on the given discharge.\")\n \n # specify the river package\n #\n # - surface water river bed/bottom elevation\n #\n # - for lakes and resevoirs, make the bottom elevation deep --- Shall we do this? \n #~ additional_depth = 500.\n #~ surface_water_bed_elevation = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, \\\n #~ self.dem_riverbed - additional_depth)\n #~ surface_water_bed_elevation = pcr.cover(surface_water_bed_elevation, self.dem_riverbed)\n #\n surface_water_bed_elevation = self.dem_riverbed # This is an alternative, if we do not want to introduce very deep bottom elevations of lakes and/or reservoirs. \n #\n # rounding values for surface_water_bed_elevation\n self.surface_water_bed_elevation = pcr.roundup(surface_water_bed_elevation * 1000.)/1000.\n #\n # - river bed condutance (unit: m2/day)\n bed_surface_area = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, \\\n self.WaterBodies.fracWat * self.cellAreaMap) # TODO: Incorporate the concept of dynamicFracWat\n bed_surface_area = pcr.cover(bed_surface_area, \\\n self.bankfull_width * self.channelLength)\n bed_surface_area = self.bankfull_width * self.channelLength\n bed_conductance = (1.0/self.bed_resistance) * bed_surface_area\n bed_conductance = pcr.ifthenelse(bed_conductance < 1e-20, 0.0, \\\n bed_conductance) \n self.bed_conductance = pcr.cover(bed_conductance, 0.0)\n # \n # - 'channel width' for lakes and reservoirs \n channel_width = pcr.areamaximum(self.bankfull_width, self.WaterBodies.waterBodyIds)\n channel_width = pcr.cover(channel_width, self.bankfull_width)\n #\n # - convert discharge value to surface water elevation (m)\n river_water_height = (channel_width**(-3/5)) * (discharge**(3/5)) * ((self.gradient)**(-3/10)) *(self.manningsN**(3/5))\n surface_water_elevation = self.dem_riverbed + \\\n river_water_height\n #\n # - calculating water level (unit: m) above the flood plain # TODO: Improve this concept (using Rens's latest innundation scheme) \n #----------------------------------------------------------\n water_above_fpl = pcr.max(0.0, surface_water_elevation - self.dem_floodplain) # unit: m, water level above the floodplain (not distributed)\n water_above_fpl *= self.bankfull_depth * self.bankfull_width / self.cellAreaMap # unit: m, water level above the floodplain (distributed within the cell)\n # TODO: Improve this concept using Rens's latest scheme\n #\n # - corrected surface water elevation\n surface_water_elevation = pcr.ifthenelse(surface_water_elevation > self.dem_floodplain, \\\n self.dem_floodplain + water_above_fpl, \\\n surface_water_elevation)\n # - surface water elevation for lakes and reservoirs:\n lake_reservoir_water_elevation = pcr.ifthen(self.WaterBodies.waterBodyOut, surface_water_elevation)\n lake_reservoir_water_elevation = pcr.areamaximum(lake_reservoir_water_elevation, self.WaterBodies.waterBodyIds)\n lake_reservoir_water_elevation = pcr.cover(lake_reservoir_water_elevation, \\\n pcr.areaaverage(surface_water_elevation, self.WaterBodies.waterBodyIds))\n # - maximum and minimum values for lake_reservoir_water_elevation\n lake_reservoir_water_elevation = pcr.min(self.dem_floodplain, lake_reservoir_water_elevation)\n lake_reservoir_water_elevation = pcr.max(surface_water_bed_elevation, lake_reservoir_water_elevation)\n # - smoothing\n lake_reservoir_water_elevation = pcr.areaaverage(surface_water_elevation, self.WaterBodies.waterBodyIds)\n # \n # - merge lake and reservoir water elevation\n surface_water_elevation = pcr.cover(lake_reservoir_water_elevation, surface_water_elevation)\n #\n # - pass values to the river package\n surface_water_elevation = pcr.cover(surface_water_elevation, self.surface_water_bed_elevation)\n surface_water_elevation = pcr.rounddown(surface_water_elevation * 1000.)/1000.\n #\n # - make sure that HRIV >= RBOT ; no infiltration if HRIV = RBOT (and h < RBOT) \n self.surface_water_elevation = pcr.max(surface_water_elevation, self.surface_water_bed_elevation)\n #\n # - pass the values to the RIV package \n self.pcr_modflow.setRiver(self.surface_water_elevation, self.surface_water_bed_elevation, self.bed_conductance, 1)\n \n # TODO: Improve this concept, particularly while calculating surface water elevation in lakes and reservoirs\n \n def set_recharge_package(self, \\\n gwRecharge, gwAbstraction, \n gwAbstractionReturnFlow = 0.0): # Note: We ignored the latter as MODFLOW should capture this part as well. \n\n logger.info(\"Set the recharge package based on the given recharge, abstraction and abstraction return flow fields.\")\n\n # specify the recharge package\n # + recharge/capillary rise (unit: m/day) from PCR-GLOBWB \n # - groundwater abstraction (unit: m/day) from PCR-GLOBWB \n # + return flow of groundwater abstraction (unit: m/day) from PCR-GLOBWB \n net_recharge = gwRecharge - gwAbstraction + \\\n gwAbstractionReturnFlow\n\n # - correcting values (considering MODFLOW lat/lon cell properties)\n # and pass them to the RCH package \n net_RCH = pcr.cover(net_recharge * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0)\n net_RCH = pcr.cover(pcr.ifthenelse(pcr.abs(net_RCH) < 1e-20, 0.0, net_RCH), 0.0)\n \n self.pcr_modflow.setRecharge(net_RCH, 1)\n\n def set_drain_package(self):\n\n logger.info(\"Set the drain package (for the release of over bank storage).\")\n\n # specify the drain package \n # - the drain package is used to simulate the drainage of bank storage \n drain_elevation = self.estimate_bottom_of_bank_storage() # unit: m\n drain_condutance = self.recessionCoeff * self.specificYield * self.cellAreaMap # unit: m2/day\n self.pcr_modflow.setDrain(drain_elevation, drain_condutance, 1)\n\n def return_innundation_fraction(self,relative_water_height):\n\n # - fractions of flooded area (in percentage) based on the relative_water_height (above the minimum dem)\n DZRIV = relative_water_height\n \n CRFRAC_RIV = pcr.min(1.0,1.00-(self.dzRel0100-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0100-self.dzRel0090) \t )\n CRFRAC_RIV = pcr.ifthenelse(DZRIV list:\n \"\"\" Прочитать Судоку из указанного файла \"\"\"\n digits = [c for c in open(filename).read() if c in '123456789.']\n grid = group(digits, 9)\n return grid\n\n\ndef display(values: list) -> None:\n \"\"\"Вывод Судоку \"\"\"\n width = 2\n line = '+'.join(['-' * (width * 3)] * 3)\n for row in range(9):\n print(''.join(values[row][col].center(width) + ('|' if str(col) in '25' else '') for col in range(9)))\n if str(row) in '25':\n print(line)\n print()\n\n\ndef group(values: list, n: int)-> list:\n \"\"\"\n Сгруппировать значения values в список, состоящий из списков по n элементов\n >>> group([1,2,3,4], 2)\n [[1, 2], [3, 4]]\n >>> group([1,2,3,4,5,6,7,8,9], 3)\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n \"\"\"\n return [values[i:i + n] for i in range(0, len(values), n)]\n\n\ndef get_row(values: list, pos: tuple) -> list:\n \"\"\" Возвращает все значения для номера строки, указанной в pos\n >>> get_row([['1', '2', '.'], ['4', '5', '6'], ['7', '8', '9']], (0, 0))\n ['1', '2', '.']\n >>> get_row([['1', '2', '3'], ['4', '.', '6'], ['7', '8', '9']], (1, 0))\n ['4', '.', '6']\n >>> get_row([['1', '2', '3'], ['4', '5', '6'], ['.', '8', '9']], (2, 0))\n ['.', '8', '9']\n \"\"\"\n a, _ = pos\n return values[a]\n\n\ndef get_col(values: list, pos: tuple) -> list:\n \"\"\" Возвращает все значения для номера столбца, указанного в pos\n >>> get_col([['1', '2', '.'], ['4', '5', '6'], ['7', '8', '9']], (0, 0))\n ['1', '4', '7']\n >>> get_col([['1', '2', '3'], ['4', '.', '6'], ['7', '8', '9']], (0, 1))\n ['2', '.', '8']\n >>> get_col([['1', '2', '3'], ['4', '5', '6'], ['.', '8', '9']], (0, 2))\n ['3', '6', '9']\n \"\"\"\n\n _, b = pos\n return [values[a][b] for a in range(len(values))]\n\n\ndef get_block(values: list, pos: tuple) -> list:\n \"\"\" Возвращает все значения из квадрата, в который попадает позиция pos\n >>> grid = read_sudoku('puzzle1.txt')\n >>> get_block(grid, (0, 1))\n ['5', '3', '.', '6', '.', '.', '.', '9', '8']\n >>> get_block(grid, (4, 7))\n ['.', '.', '3', '.', '.', '1', '.', '.', '6']\n >>> get_block(grid, (8, 8))\n ['2', '8', '.', '.', '.', '5', '.', '7', '9']\n \"\"\"\n b, d = pos\n a: list = []\n if b < 3:\n c = 0\n elif 3 <= b < 6:\n c = 3\n else:\n c = 6\n a += values[c] + values[c + 1] + values[c + 2]\n if d < 3:\n block = a[0:3] + a[9:12] + a[18:21]\n elif 3 <= d < 6:\n block = a[3:6] + a[12:15] + a[21:24]\n else:\n block = a[6:9] + a[15:18] + a[24:27]\n return block\n\n\ndef find_empty_positions(grid: list) -> Optional[tuple]:\n \"\"\" Найти первую свободную позицию в пазле\n >>> find_empty_positions([['1', '2', '.'], ['4', '5', '6'], ['7', '8', '9']])\n (0, 2)\n >>> find_empty_positions([['1', '2', '3'], ['4', '.', '6'], ['7', '8', '9']])\n (1, 1)\n >>> find_empty_positions([['1', '2', '3'], ['4', '5', '6'], ['.', '8', '9']])\n (2, 0)\n \"\"\"\n global empty_poz\n for a in range(len(grid)):\n for b in range(len(grid)):\n if grid[a][b] == '.':\n return a, b\n return None\n\n\ndef find_possible_values(grid: list, pos: tuple)-> set:\n \"\"\" Вернуть множество возможных значения для указанной позиции\n >>> grid = read_sudoku('puzzle1.txt')\n >>> values = find_possible_values(grid, (0,2))\n >>> values == {'1', '2', '4'}\n True\n >>> values = find_possible_values(grid, (4,7))\n >>> values == {'2', '5', '9'}\n True\n \"\"\"\n a = set('123456789')\n b = a.difference(set(get_col(grid, pos)))\n c = a.difference(set(get_row(grid, pos)))\n d = a.difference(set(get_block(grid, pos)))\n\n return b & c & d\n\n\ndef solve(grid: list):\n \"\"\" Решение пазла, заданного в grid\n Как решать Судоку?\n 1. Найти свободную позицию\n 2. Найти все возможные значения, которые могут находиться на этой позиции\n 3. Для каждого возможного значения:\n 3.1. Поместить это значение на эту позицию\n 3.2. Продолжить решать оставшуюся часть пазла\n >>> grid = read_sudoku('puzzle1.txt')\n >>> solve(grid)\n [['5', '3', '4', '6', '7', '8', '9', '1', '2'], ['6', '7', '2', '1', '9', '5', '3', '4', '8'], ['1', '9', '8', '3', '4', '2', '5', '6', '7'], ['8', '5', '9', '7', '6', '1', '4', '2', '3'], ['4', '2', '6', '8', '5', '3', '7', '9', '1'], ['7', '1', '3', '9', '2', '4', '8', '5', '6'], ['9', '6', '1', '5', '3', '7', '2', '8', '4'], ['2', '8', '7', '4', '1', '9', '6', '3', '5'], ['3', '4', '5', '2', '8', '6', '1', '7', '9']]\n \"\"\"\n poz = find_empty_positions(grid)\n if not poz:\n return grid\n a, b = poz\n for var in find_possible_values(grid, poz):\n grid[a][b] = var\n resh = solve(grid)\n if resh:\n return resh\n grid[a][b] = '.'\n return None\n\n\ndef check_solution(solution: list)-> bool:\n \"\"\" Если решение solution верно, то вернуть True, в противном случае False \"\"\"\n # TODO: Add doctests with bad puzzles\n for a in (1, 2, 3):\n for b in (1, 2, 3):\n var = set(get_block(solution, (a, b)))\n if var != set('123456789'):\n return False\n\n for a in range(len(solution)):\n var = set(get_col(solution, (1, a)))\n if var != set('123456789'):\n return False\n\n for b in range(len(solution)):\n var = set(get_row(solution, (b, 1)))\n if var != set('123456789'):\n return False\n return True\n\n\ndef generate_sudoku(n: int)->str:\n \"\"\" Генерация судоку заполненного на N элементов\n >>> grid = generate_sudoku(40)\n >>> sum(1 for row in grid for e in row if e == '.')\n 41\n >>> solution = solve(grid)\n >>> check_solution(solution)\n True\n >>> grid = generate_sudoku(1000)\n >>> sum(1 for row in grid for e in row if e == '.')\n 0\n >>> solution = solve(grid)\n >>> check_solution(solution)\n True\n >>> grid = generate_sudoku(0)\n >>> sum(1 for row in grid for e in row if e == '.')\n 81\n >>> solution = solve(grid)\n >>> check_solution(solution)\n True\n \"\"\"\n\n generate = solve([['.', '.', '.', '.', '.', '.', '.', '.', '.'] for _ in range(9)])\n n = 81 - min(81, n)\n while n:\n a = random.randint(0, 8)\n b = random.randint(0, 8)\n if generate[a][b] != '.':\n generate[a][b] = '.'\n n -= 1\n return generate\n\n\nif __name__ == '__main__':\n for fname in ['sudoku1.txt', 'sudoku2.txt', 'sudoku3.txt']:\n grid = read_sudoku(fname)\n display(grid)\n solution = solve(grid)\n display(solution)\n","sub_path":"Судоку/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"271404511","text":"from rest_framework.test import APITestCase\nfrom rest_framework import status\n\nfrom ... import VERSION\nfrom ..factories import LibraryFactory\n\n\nclass TestCreateLibraryItem(APITestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.library = LibraryFactory.create(data={\"tag1\": {\"name\": \"foo\"}})\n\n def test_create_library_item(self):\n item_data = {\n \"tag\": \"tag2\",\n \"item\": {\n \"name\": \"bar\",\n }\n }\n\n self.client.force_authenticate(self.library.owner)\n response = self.client.post(\n f\"/{VERSION}/api/libraries/{self.library.id}/items/\",\n item_data,\n format=\"json\"\n )\n\n assert response.status_code == status.HTTP_204_NO_CONTENT\n\n def test_create_library_item_fails_if_tag_already_exists(self):\n item_data = {\n \"tag\": \"tag1\",\n \"item\": {\n \"name\": \"bar\",\n }\n }\n\n self.client.force_authenticate(self.library.owner)\n response = self.client.post(\n f\"/{VERSION}/api/libraries/{self.library.id}/items/\",\n item_data,\n format=\"json\"\n )\n\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert response.data == {\n \"detail\": f\"tag `tag1` already exists in library {self.library.id}\"\n }\n","sub_path":"mhep/mhep/v1/tests/views/test_create_library_item.py","file_name":"test_create_library_item.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"43170768","text":"class Bicycle(object):\n def __init__(self, model_name, wheels, frame, manufacturer):\n self.model_name = model_name\n self.weight = 2*wheels.weight + frame.weight\n self.cost = (2*wheels.cost + frame.cost) + ((2*wheels.cost + frame.cost) * manufacturer.margin)\n self.wheels = wheels #expected to be a list of wheel objects\n self.frame = frame\n self.manufacturer = manufacturer\n \nclass Bike_Shop(object):\n def __init__(self, shop_name, inventory, margin, profit):\n self.shop_name = shop_name\n self.inventory = inventory\n self.margin = margin\n self.profit = profit\n \nclass Customer(object):\n def __init__(self, customer_name, bike_funds, bike_owned):\n self.customer_name = customer_name\n self.bike_funds = bike_funds\n self.bike_owned = bike_owned\n\nclass Wheel(object):\n def __init__(self, model_name, weight, cost):\n self.model_name = model_name\n self.weight = weight\n self.cost = cost\n \nclass Frame(object):\n def __init__(self, material, weight, cost):\n self.material = material\n self.weight = weight\n self.cost = cost\n \nclass Manufacturer(object):\n def __init__(self, name, margin):\n self.name = name\n self.models = []\n self.margin = margin\n \n \n","sub_path":"bicycles.py","file_name":"bicycles.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"560591796","text":"from os.path import dirname\nfrom shared.utils import load\nfrom preprocessing.cleaning import as_float, as_list\nimport re\n\n\ndirectory = dirname(__file__) + '/'\n\n\ndef get_attributes():\n with open(directory + 'datasets/Attributes.txt', 'r') as attributes_file:\n attributes = [attribute.strip() for attribute in attributes_file.read().split(\"\\n\")]\n\n for n, attribute in enumerate(attributes):\n if attribute[-1] == \",\":\n attribute = attribute[0:-1]\n\n if attribute[0] == attribute[-1] == '\"':\n attribute = attribute[1:-1]\n\n attributes[n] = attribute\n\n return attributes\n\n\ndef get_data():\n with open(directory + 'datasets/DataMining2015Responses.csv', 'r') as csv_file:\n return load(csv_file)\n\n\ndef clean(row):\n \"\"\"\n Cleans selected attributes from the survey data.\n \"\"\"\n # conversion to float\n row['Height'] = as_float(row['Height'])\n row['Shoe size'] = as_float(row['Shoe size'])\n row['Age'] = as_float(row['Age'])\n\n # programming languages\n languages = list(set(as_list(row['Which programming languages do you know?'])))\n languages = [language.lower() for language in languages]\n\n corrections = {\n 'phyton': 'python',\n 'obj-c': 'objective-c',\n 'nodejs': 'javascript',\n 't-sql': 'sql',\n 'sql ...': 'sql'\n }\n\n for incorrect, correct in corrections.items():\n if incorrect in languages:\n languages.remove(incorrect)\n languages.append(correct)\n\n row['Which programming languages do you know?'] = languages\n\n # video games\n row['Which of these games have you played?'] = [\n game\n for game\n in as_list(row['Which of these games have you played?'])\n if game != 'I have not played any of these games'\n ]\n\n # interesting topics\n very_interesting = []\n sounds_interesting = []\n meh = []\n not_interested = []\n\n topic_pattern = re.compile('Which topics would you prefer to learn in this course\\? \\[(.+)\\]')\n\n for key, value in row.items():\n match = topic_pattern.match(key)\n if match:\n if value == 'Very interested':\n very_interesting.append(match.group(1))\n elif value == 'Sounds interesting':\n sounds_interesting.append(match.group(1))\n elif value == 'Meh':\n meh.append(match.group(1))\n else:\n print(match.group(1))\n not_interested.append(match.group(1))\n\n row['very_interesting'] = ['very_interesting: ' + topic for topic in very_interesting]\n row['not_interested'] = ['not_interested: ' + topic for topic in not_interested]\n\n return row\n","sub_path":"datamining/survey.py","file_name":"survey.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"530782347","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n # Run as:\n# $ grep CERT: node-1.log > xx.txt\n# $ python blockexplore.py xx.txt > g.dot\n# $ dot -Kneato -n -Tsvg -o sample.svg g.dot\n\nimport re\nimport sys\n\ndata = open(sys.argv[1], 'r').read()\ndata = re.findall(\n \"\\[([0-9]{4})-([0-9]{2})-([0-9]{2})T([0-9]{2}):([0-9]{2}):([0-9]{2})\\.([0-9]{3})Z INFO dag_core::primary] CERT: \\(([^,]*),([^\\)]*)\\) Deps: \\{([^}]*)\\} Txs: \\{([^}]*)\\}\",\n data)\n\nclass Seq:\n def __init__(self):\n self.cnt = 0\n self.d = {}\n\n def lookup(self, item):\n if item not in self.d:\n self.d[item] = self.cnt\n self.cnt += 1\n return self.d[item]\n\n\nseq = Seq()\ninversemapx = {}\nmapx = {}\nlistx = []\nxmin = None\nfor (_, _, _, hx, mx, sx, ms, senderid, xround, deps, txs) in data:\n mins = int(hx) * 60 + int(mx)\n secs = 60 * mins + int(sx)\n millis = 1000 * secs + int(ms)\n\n if xmin is None:\n xmin = millis\n\n xtime = millis - xmin\n sender = seq.lookup(senderid)\n xround = int(xround)\n others = list(map(seq.lookup, re.findall(\"\\(([^,]+), [^\\)]+\\)\", deps)))\n volume = re.findall(\"\\(([^,]+), [^\\)]+\\)\", txs)\n # print(millis)\n #if (sender, xround) in mapx:\n # print((sender, xround))\n\n mapx[(sender, xround)] = (xtime, sender, xround, others, volume)\n listx += [(xtime, sender, xround, others, volume)]\n\n for parent in others:\n inversemapx[(parent, xround-1)] = None\n\nprint(\"digraph \\\"blocks\\\" {\")\nprint(\" graph [outputorder=edgesfirst];\")\nstored = {}\ntxs = {}\nfor (xtime, sender, xround, others, volume) in listx:\n # (xtime, sender, xround, others, volume) = mapx[(sender, xround)]\n if sender not in txs:\n txs[sender] = set()\n\n prev_txs = txs[sender]\n tx_add = set(volume) - prev_txs\n tx_commit = prev_txs - set(volume)\n label = f'+{len(tx_add)}-{len(tx_commit)}'\n txs[sender] = set(volume)\n\n shape=\"circle\"\n add = \"\"\n col = 'white'\n if xround % 2 == 1:\n col = \"gainsboro\"\n if (sender, xround) not in inversemapx:\n col = 'red'\n if len(volume) > 0:\n shape=\"box\"\n add = f' ({len(volume)})'\n if (sender, xround) not in stored:\n print(f' n{sender}r{xround} [label=\"{str(xround) + add}\" pos=\"{100*sender},{xtime}!\" shape={shape} style=filled fillcolor=\"{col}\"];')\n stored[(sender, xround)] = 0\n else:\n stored[(sender, xround)] += 1\n print(f' n{sender}r{xround}v{stored[(sender, xround)]} [label=\"{str(xround) + add}\" pos=\"{100*sender},{xtime}!\" shape={shape} style=filled fillcolor=\"mistyrose\"];')\n\n\ndone = {}\nfor (xtime, sender, xround, others, volume) in listx:\n if (sender, xround) in done:\n continue\n done[(sender, xround)] = 1\n # (xtime, sender, xround, others, volume) = mapx[(sender, xround)]\n for parent in others:\n if (parent, xround-1) in mapx:\n print(f' n{parent}r{xround-1} -> n{sender}r{xround};')\n\n\n\n\nprint(\"}\")\n","sub_path":"scripts/blockexplore.py","file_name":"blockexplore.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"247334557","text":"# Regular string\nprint(\"Mary had a little lamb.\")\n# String with an argument passed which is then filled by format function passing in a string\nprint(\"Its fleece was white as {}.\".format('snow'))\n# Another string\nprint(\"And everywhere that Mary went.\")\n# print a period 10 times\nprint(\".\" * 10) #what'd that do?\n\nend1 = \"C\"\nend2 = \"h\"\nend3 = \"e\"\nend4 = \"e\"\nend5 = \"s\"\nend6 = \"e\"\nend7 = \"B\"\nend8 = \"u\"\nend9 = \"r\"\nend10 = \"g\"\nend11 = \"e\"\nend12 = \"r\"\n\n# watch that comma at the end. try removing it to see what happens\n\n# String concatination but the comma at the end explanation:\n\n# The print statement with commas separating items, uses a space to separate them.\n# A trailing comma will cause another space to be appended.\n# No trailing comma will append a newline character to be appended to your printed item.\n# You could put each item on a separate print statement and use a comma after each and\n# they would print the same, on the same line.\n\n# Print each letter of \"Cheese\" and concat them.\n# use the command to seperate items\nprint(end1 + end2 + end3 + end4 + end5 + end6, end=' ')\n# Print each letter of Burger\nprint(end7 + end8 + end9 + end10 + end11 + end12)\n","sub_path":"theHardWay/ex07_Printing.py","file_name":"ex07_Printing.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"430526024","text":"import random\ncust_bag = []\nresponse_gift = []\ndef purchaseitems():\n items_purch = input(str(\"What do you want to buy? \"))\n cust_bag.append(items_purch)\ndef giftcard():\n buy_gift = input(str(\"Do you want to buy a giftcard? (yes/no) \"))\n if buy_gift == \"yes\":\n cust_bag.append(\"GIFT CARD\")\n response_gift.append(buy_gift)\n\nprint(\"Hello shopper!\")\nbuy_items = input(str(\"Do you want to order any items? (yes/no) \"))\n\nif buy_items == \"yes\":\n print(\"Wohoo! Let's shop :)\")\n cust_name = input(str(\"Welcome customer: \"))\n purchaseitems()\n giftcard()\n continue_shop = input(str(\"Do you want to continue shopping \"))\n while continue_shop == \"yes\":\n purchaseitems()\n giftcard()\n continue_shop = input(str(\"Do you want to continue shopping \"))\n continue\n else:\n if \"yes\" in response_gift:\n print(\"Yay, you bought our GIFT CARD! You get a 10% discount on all of your items!\")\n for item in cust_bag:\n print(\"You have: \",str(item))\n print(\"Come again \", cust_name)\nelse:\n print(\"Ok, bye :(\")\n\n\n","sub_path":"Introduction to Python Projects/ShoppingCart.py","file_name":"ShoppingCart.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"353000825","text":"from ..components import _BaseComponent\nfrom ..config import ConfigLoader\n\nimport asyncio\n\n# Sub components imports :\n# -Speech To Text engine\nfrom .STT_EngineWebsocket import STT_Engine_WebSocket as STT_Engine\n\nclass SpeechToText(_BaseComponent):\n\n def __init__(self, queues):\n super().__init__(queues)\n self.queue_command = None\n self.queue_input = None\n self.queue_tts = None\n self.stt = STT_Engine()\n self.config = ConfigLoader(None, None)\n self.config.subscribe('SpeechToTextConfig', 'engine')\n\n def setup(self):\n self.queue_command = self._queues['QueueDispatcher']\n self.queue_input = self._queues['QueueInput']\n self.queue_tts = self._queues['QueueTextToSpeech']\n\n # Function calling STT_Engine Recognize with async call\n def run(self):\n while self._is_init:\n audio_stream = self.queue_input.get()\n STTConfig = self.config.get('SpeechToTextConfig')\n if STTConfig is not None:\n self.stt.switchServer(STTConfig)\n if audio_stream is None:\n break\n self.queue_tts.put(\"Wait ...\")\n print (\"Sending information to be translated...\")\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(self.stt.recognize(audio_stream, self))\n\n def stop(self):\n print('Stopping {0}...'.format(self.__class__.__name__))\n self._is_init = False\n self.queue_input.put(None)\n","sub_path":"ava/speech_to_text/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61913207","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nimport json\nimport pandas as pd\n\nfile_name='data.csv'\nwith open('data.json') as data_file:\n j = json.load(data_file)\n df=pd.DataFrame(j)\n cols= ['TIMESTAMP','AVG( IrrPOA_Avg )']\n df.cols = ['TIMESTAMP','IrrPOA_Avg']\n #Stores Columns based off of the above order stored into the list 'cols'\n df=df.ix[:,cols]\n \n df.to_csv(file_name,sep=',',encoding='utf-8',index=False) \n \n#with open('data.tsv.', 'w') as output_file:\n# dw = csv.DictWriter(output_file, sorted(j[0].keys()), delimiter='\\t')\n# dw.writeheader()\n# dw.writerows(j)\n \n \n \n \n\n","sub_path":"app/graph/swh_irr/jsontotsv.py","file_name":"jsontotsv.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"1131883","text":"import asyncio\nimport re\nimport time\nfrom random import shuffle\n\nfrom musicbot.cleverbot import CleverWrap\nfrom musicbot.config import ConfigDefaults\nfrom musicbot.games.game_2048 import Game2048\nfrom musicbot.games.game_cah import GameCAH\nfrom musicbot.games.game_connect_four import GameConnectFour\nfrom musicbot.games.game_hangman import GameHangman\nfrom musicbot.utils import (Response, block_user, escape_dis, command_info, owner_only,\n random_line)\n\n\nclass FunCommands:\n\n @command_info(\"1.9.5\", 1477781586, {\n \"4.8.7\": (1504288917, \"Improved command\")\n })\n async def cmd_c(self, author, channel, leftover_args):\n \"\"\"\n ///|Usage\n `{command_prefix}c `\n ///|Explanation\n Chat with Giesela\n \"\"\"\n if len(leftover_args) < 1:\n return Response(\"You need to actually say something...\")\n\n cb, nick = self.chatters.get(author.id, (None, None))\n if cb is None:\n cb = CleverWrap(\"CCC8n_IXK43aOV38rcWUILmYUBQ\")\n nick = random_line(ConfigDefaults.name_list).strip().title()\n self.chatters[author.id] = (cb, nick)\n\n await self.send_typing(channel)\n msgContent = \" \".join(leftover_args)\n\n start_time = time.time()\n\n answer = await self.loop.run_in_executor(None, cb.say, msgContent)\n\n typing_time = len(answer) / 5.5\n\n left_to_wait = typing_time - (time.time() - start_time)\n\n if left_to_wait > 0:\n await asyncio.sleep(left_to_wait)\n\n return Response(answer)\n\n @block_user\n async def cmd_cah(self, message, channel, author, leftover_args):\n \"\"\"\n Usage:\n {command_prefix}cah create\n {command_prefix}cah join \n {command_prefix}cah leave \n\n {command_prefix}cah start \n {command_prefix}cah stop \n\n Play a cards against humanity game\n\n References:\n {command_prefix}help cards\n -learn how to create/edit cards\n {command_prefix}help qcards\n -learn about how to create/edit question cards\n \"\"\"\n\n argument = leftover_args[0].lower() if len(leftover_args) > 0 else None\n\n if argument == \"create\":\n if self.cah.is_user_in_game(author.id):\n g = self.cah.get_game(author.id)\n return Response(\n \"You can't host a game if you're already in one\\nUse `{}cah leave {}` to leave your current game\".\n format(self.config.command_prefix, g.token),\n delete_after=15)\n\n token = self.cah.new_game(author.id)\n return Response(\n \"Created a new game.\\nUse `{0}cah join {1}` to join this game and\\nwhen everyone's in use `{0}cah start {1}`\".\n format(self.config.command_prefix, token),\n delete_after=1000)\n elif argument == \"join\":\n token = leftover_args[\n 1].lower() if len(leftover_args) > 1 else None\n if token is None:\n return Response(\"You need to provide a token\")\n\n if self.cah.is_user_in_game(author.id):\n g = self.cah.get_game_from_user_id(author.id)\n return Response(\n \"You can only be part of one game at a time!\\nUse `{}cah leave {}` to leave your current game\".\n format(self.config.command_prefix, g.token),\n delete_after=15)\n\n g = self.cah.get_game(token)\n\n if g is None:\n return Response(\n \"This game does not exist *shrugs*\")\n\n if g.in_game(author.id):\n return Response(\n \"You're already in this game!\")\n\n if self.cah.user_join_game(author.id, token):\n return Response(\"Successfully joined the game **{}**\".format(\n token.upper()))\n else:\n return Response(\n \"Failed to join game **{}**\".format(token.upper()))\n elif argument == \"leave\":\n token = leftover_args[\n 1].lower() if len(leftover_args) > 1 else None\n if token is None:\n return Response(\"You need to provide a token\")\n\n g = self.cah.get_game(token)\n\n if g is None:\n return Response(\n \"This game does not exist *shrugs*\")\n\n if not g.in_game(author.id):\n return Response(\n \"You're not part of this game!\")\n\n if self.cah.player_leave_game(author.id, token):\n return Response(\n \"Successfully left the game **{}**\".format(token.upper()))\n else:\n return Response(\n \"Failed to leave game **{}**\".format(token.upper()))\n elif argument == \"start\":\n token = leftover_args[\n 1].lower() if len(leftover_args) > 1 else None\n if token is None:\n return Response(\"You need to provide a token\")\n\n g = self.cah.get_game(token)\n if g is None:\n return Response(\"This game does not exist!\")\n\n if not g.is_owner(author.id):\n return Response(\n \"Only the owner may start a game!\")\n\n if not g.enough_players():\n return Response(\n \"There are not enough players to start this game.\\nUse `{}cah join {}` to join a game\".\n format(self.config.command_prefix, g.token),\n delete_after=15)\n\n if not g.start_game():\n return Response(\n \"This game has already started!\")\n elif argument == \"stop\":\n token = leftover_args[\n 1].lower() if len(leftover_args) > 1 else None\n g = self.cah.get_game(token)\n if g is None:\n return Response(\"This game does not exist!\")\n\n if not g.is_owner(author.id):\n return Response(\n \"Only the owner may stop a game!\")\n\n self.cah.stop_game(g.token)\n return Response(\n \"Stopped the game **{}**\".format(token))\n\n @block_user\n async def cmd_cards(self, server, channel, author, message, leftover_args):\n \"\"\"\n Usage:\n {command_prefix}cards list [@mention] [text | likes | occurences | date | random | id | author | none]\n -list all the available cards\n {command_prefix}cards create \n -create a new card with text\n {command_prefix}cards edit \n -edit a card by its id\n {command_prefix}cards info \n -Get more detailed information about a card\n {command_prefix}cards search \n -Search for a card\n {command_prefix}cards delete \n -Delete a question card\n\n Here you manage the non question cards\n \"\"\"\n\n argument = leftover_args[0].lower() if len(leftover_args) > 0 else None\n\n if argument == \"list\":\n sort_modes = {\"text\": (lambda entry: entry.text, False, lambda entry: None), \"random\": None, \"occurences\": (lambda entry: entry.occurences, True, lambda entry: entry.occurences), \"date\": (\n lambda entry: entry.creation_date, True, lambda entry: prettydate(entry.creation_date)), \"author\": (lambda entry: entry.creator_id, False, lambda entry: self.get_global_user(entry.creator_id).name), \"id\": (lambda entry: entry.id, False, lambda entry: None), \"likes\": (lambda entry: entry.like_dislike_ratio, True, lambda entry: \"{}%\".format(int(entry.like_dislike_ratio * 100)))}\n\n cards = self.cah.cards.cards.copy(\n ) if message.mentions is None or len(message.mentions) < 1 else [\n x for x in self.cah.cards.cards.copy()\n if x.creator_id in [u.id for u in message.mentions]\n ]\n sort_mode = leftover_args[1].lower(\n ) if len(leftover_args) > 1 and leftover_args[1].lower(\n ) in sort_modes.keys() else \"none\"\n\n display_info = None\n\n if sort_mode == \"random\":\n shuffle(cards)\n elif sort_mode != \"none\":\n cards = sorted(\n cards,\n key=sort_modes[sort_mode][0],\n reverse=sort_modes[sort_mode][1])\n display_info = sort_modes[sort_mode][2]\n\n await self.card_viewer(channel, author, cards, display_info)\n elif argument == \"search\":\n search_query = \" \".join(\n leftover_args[1:]) if len(leftover_args) > 1 else None\n\n if search_query is None:\n return Response(\n \"You need to provide a query to search for!\",\n delete_after=15)\n\n results = self.cah.cards.search_card(search_query, 3)\n\n if len(results) < 1:\n return Response(\"**Didn't find any cards!**\")\n\n card_string = \"{0.id}. \\\"{1}\\\"\"\n cards = []\n for card in results:\n cards.append(\n card_string.format(card, card.text.replace(\"$\", \"_____\")))\n\n return Response(\n \"**I found the following cards:**\\n\\n\" + \"\\n\".join(cards),\n delete_after=40)\n elif argument == \"info\":\n card_id = leftover_args[\n 1].lower().strip() if len(leftover_args) > 1 else None\n\n card = self.cah.cards.get_card(card_id)\n if card is not None:\n info = \"Card **{0.id}** by {1}\\n```\\n\\\"{0.text}\\\"\\nused {0.occurences} time{2}\\ndrawn {0.picked_up_count} time{5}\\nliked by {6}% of players\\ncreated {3}```\\nUse `{4}cards edit {0.id}` to edit this card\"\n return Response(\n info.format(card,\n self.get_global_user(card.creator_id).mention,\n \"s\" if card.occurences != 1 else \"\",\n prettydate(card.creation_date), self.config.\n command_prefix, \"s\" if card.picked_up_count !=\n 1 else \"\", int(card.like_dislike_ratio * 100)))\n\n return Response(\n \"There's no card with that id. Use `{}cards list` to list all the possible cards\".\n format(self.config.command_prefix))\n elif argument == \"create\":\n text = \" \".join(\n leftover_args[1:]) if len(leftover_args) > 1 else None\n if text is None:\n return Response(\n \"You might want to actually add some text to your card\",\n delete_after=20)\n if len(text) < 3:\n return Response(\n \"I think that's a bit too short...\")\n if len(text) > 140:\n return Response(\"Maybe a bit too long?\")\n\n already_has_card, card = self.cah.cards.card_with_text(text)\n if already_has_card:\n return Response(\n \"There's already a card with a fairly similar content. <{0}>\\nUse `{1}cards info {0}` to find out more about this card\".\n format(card.id, self.config.command_prefix))\n\n card_id = self.cah.cards.add_card(text, author.id)\n return Response(\"Successfully created card **{}**\".format(card_id))\n elif argument == \"edit\":\n card_id = leftover_args[\n 1].lower().strip() if len(leftover_args) > 1 else None\n\n try:\n card_id_value = int(card_id)\n except:\n return Response(\"An id must be a number\")\n\n if card_id is None:\n return Response(\n \"You need to provide the card's id!\")\n\n text = \" \".join(\n leftover_args[2:]) if len(leftover_args) > 1 else None\n if text is None:\n return Response(\n \"You might want to actually add some text to your card\",\n delete_after=20)\n if len(text) < 3:\n return Response(\n \"I think that's a bit too short...\")\n if len(text) > 140:\n return Response(\"Maybe a bit too long?\")\n\n already_has_card, card = self.cah.cards.card_with_text(text)\n if already_has_card and card.id != card_id_value:\n return Response(\n \"There's already a card with a fairly similar content. <{0}>\\nUse `{1}cards info {0}` to find out more about this card\".\n format(card.id, self.config.command_prefix))\n\n if self.cah.cards.edit_card(card_id, text):\n return Response(\n \"Edited card <**{}**>\".format(card_id))\n else:\n return Response(\n \"There's no card with that id\")\n elif argument == \"delete\":\n card_id = leftover_args[\n 1].lower().strip() if len(leftover_args) > 1 else None\n\n if card_id is None:\n return Response(\n \"You must specify the card id\")\n\n if self.cah.cards.remove_card(card_id):\n return Response(\n \"Deleted card <**{}**>\".format(card_id))\n else:\n return Response(\n \"Could not remove card <**{}**>\".format(card_id),\n delete_after=15)\n else:\n return await self.cmd_help(channel, [\"cards\"])\n\n async def card_viewer(self,\n channel,\n author,\n cards,\n display_additional=None):\n cmds = (\"n\", \"p\", \"exit\")\n site_interface = \"**Cards | Page {0} of {1}**\\n```\\n{2}\\n```\\nShit you can do:\\n`n`: Switch to the next page\\n`p`: Switch to the previous page\\n`exit`: Exit the viewer\"\n card_string = \"<{}> [{}]{}\"\n\n items_per_page = 20\n timeout = 60\n current_page = 0\n\n total_pages, items_on_last_page = divmod(\n len(cards) - 1, items_per_page)\n\n def msg_check(msg):\n return msg.content.lower().strip().startswith(cmds)\n\n while True:\n start_index = current_page * items_per_page\n end_index = start_index + \\\n (items_per_page - 1 if current_page <\n total_pages else items_on_last_page)\n page_cards = cards[start_index:end_index]\n\n page_cards_texts = []\n for p_c in page_cards:\n page_cards_texts.append(\n card_string.format(\n p_c.id, p_c.text, \"\" if display_additional is None or\n display_additional(p_c) is None else \" | {}\".format(\n display_additional(p_c))))\n\n interface_msg = await self.safe_send_message(\n channel,\n site_interface.format(current_page + 1, total_pages + 1,\n \"\\n\".join(page_cards_texts)))\n user_msg = await self.wait_for_message(\n timeout, author=author, channel=channel, check=msg_check)\n\n if not user_msg:\n await self.safe_delete_message(interface_msg)\n break\n\n content = user_msg.content.lower().strip()\n\n if content.startswith(\"n\"):\n await self.safe_delete_message(interface_msg)\n await self.safe_delete_message(user_msg)\n current_page = (current_page + 1) % (total_pages + 1)\n elif content.startswith(\"p\"):\n await self.safe_delete_message(interface_msg)\n await self.safe_delete_message(user_msg)\n current_page = (current_page - 1) % (total_pages + 1)\n elif content.startswith(\"exit\"):\n await self.safe_delete_message(interface_msg)\n await self.safe_delete_message(user_msg)\n break\n\n await self.safe_send_message(\n channel, \"Closed the card viewer!\", expire_in=20)\n\n @block_user\n async def cmd_qcards(self, server, channel, author, message,\n leftover_args):\n \"\"\"\n Usage:\n {command_prefix}qcards list [@mention] [text | likes | occurences | date | author | id | blanks | random | none]\n -list all the available question cards\n {command_prefix}qcards create \n -create a new question card with text and if you want the number of cards to draw\n {command_prefix}qcards edit \n -edit a question card by its id\n {command_prefix}qcards info \n -Get more detailed information about a question card\n {command_prefix}qcards search \n -Search for a question card\n {command_prefix}qcards delete \n -Delete a question card\n\n Here you manage the question cards\n \"\"\"\n\n argument = leftover_args[0].lower() if len(leftover_args) > 0 else None\n\n if argument == \"list\":\n sort_modes = {\"text\": (lambda entry: entry.text, False, lambda entry: None), \"random\": None, \"occurences\": (lambda entry: entry.occurences, True, lambda entry: entry.occurences), \"date\": (lambda entry: entry.creation_date, True, lambda entry: prettydate(entry.creation_date)), \"author\": (lambda entry: entry.creator_id, False, lambda entry: self.get_global_user(\n entry.creator_id).name), \"id\": (lambda entry: entry.id, False, lambda entry: None), \"blanks\": (lambda entry: entry.number_of_blanks, True, lambda entry: entry.number_of_blanks), \"likes\": (lambda entry: entry.like_dislike_ratio, True, lambda entry: \"{}%\".format(int(entry.like_dislike_ratio * 100)))}\n\n cards = self.cah.cards.question_cards.copy(\n ) if message.mentions is None or len(message.mentions) < 1 else [\n x for x in self.cah.cards.question_cards.copy()\n if x.creator_id in [u.id for u in message.mentions]\n ]\n sort_mode = leftover_args[1].lower(\n ) if len(leftover_args) > 1 and leftover_args[1].lower(\n ) in sort_modes.keys() else \"none\"\n\n display_info = None\n\n if sort_mode == \"random\":\n shuffle(cards)\n elif sort_mode != \"none\":\n cards = sorted(\n cards,\n key=sort_modes[sort_mode][0],\n reverse=sort_modes[sort_mode][1])\n display_info = sort_modes[sort_mode][2]\n\n await self.qcard_viewer(channel, author, cards, display_info)\n elif argument == \"search\":\n search_query = \" \".join(\n leftover_args[1:]) if len(leftover_args) > 1 else None\n\n if search_query is None:\n return Response(\n \"You need to provide a query to search for!\",\n delete_after=15)\n\n results = self.cah.cards.search_question_card(search_query, 3)\n\n if len(results) < 1:\n return Response(\n \"**Didn't find any question cards!**\")\n\n card_string = \"{0.id}. \\\"{1}\\\"\"\n cards = []\n for card in results:\n cards.append(\n card_string.format(card,\n card.text.replace(\"$\", \"\\_\\_\\_\\_\\_\")))\n\n return Response(\n \"**I found the following question cards:**\\n\\n\" +\n \"\\n\".join(cards),\n delete_after=40)\n elif argument == \"info\":\n card_id = leftover_args[\n 1].lower().strip() if len(leftover_args) > 1 else None\n\n card = self.cah.cards.get_question_card(card_id)\n if card is not None:\n info = \"Question Card **{0.id}** by {1}\\n```\\n\\\"{0.text}\\\"\\nused {0.occurences} time{2}\\ncreated {3}```\\nUse `{4}cards edit {0.id}` to edit this card`\"\n return Response(\n info.format(card,\n self.get_global_user(card.creator_id).mention,\n \"s\" if card.occurences != 1 else \"\",\n prettydate(card.creation_date),\n self.config.command_prefix))\n elif argument == \"create\":\n text = \" \".join(\n leftover_args[1:]) if len(leftover_args) > 1 else None\n if text is None:\n return Response(\n \"You might want to actually add some text to your card\",\n delete_after=20)\n if len(text) < 3:\n return Response(\n \"I think that's a bit too short...\")\n if len(text) > 500:\n return Response(\"Maybe a bit too long?\")\n\n if text.count(\"$\") < 1:\n return Response(\n \"You need to have at least one blank ($) space\",\n delete_after=20)\n\n already_has_card, card = self.cah.cards.question_card_with_text(\n text)\n if already_has_card:\n return Response(\n \"There's already a question card with a fairly similar content. <{0}>\\nUse `{1}qcards info {0}` to find out more about this card\".\n format(card.id, self.config.command_prefix))\n\n card_id = self.cah.cards.add_question_card(text, author.id)\n return Response(\n \"Successfully created question card **{}**\".format(card_id))\n elif argument == \"edit\":\n card_id = leftover_args[\n 1].lower().strip() if len(leftover_args) > 1 else None\n\n try:\n card_id_value = int(card_id)\n except:\n return Response(\"An id must be a number\")\n\n if card_id is None:\n return Response(\n \"You need to provide the question card's id!\",\n delete_after=20)\n\n text = \" \".join(\n leftover_args[2:]) if len(leftover_args) > 2 else None\n if text is None:\n return Response(\n \"You might want to actually add some text to your question card\",\n delete_after=20)\n if len(text) < 3:\n return Response(\n \"I think that's a bit too short...\")\n if len(text) > 500:\n return Response(\"Maybe a bit too long?\")\n\n if text.count(\"$\") < 1:\n return Response(\n \"You need to have at least one blank ($) space\",\n delete_after=20)\n\n already_has_card, card = self.cah.cards.question_card_with_text(\n text)\n if already_has_card and card.id != card_id_value:\n return Response(\n \"There's already a question card with a fairly similar content. <{0}>\\nUse `{1}qcards info {0}` to find out more about this question card\".\n format(card.id, self.config.command_prefix))\n\n if self.cah.cards.edit_question_card(card_id, text):\n return Response(\n \"Edited question card <**{}**>\".format(card_id),\n delete_after=15)\n else:\n return Response(\n \"There's no question card with that id\")\n elif argument == \"delete\":\n card_id = leftover_args[\n 1].lower().strip() if len(leftover_args) > 1 else None\n\n if card_id is None:\n return Response(\n \"You must specify the question card id\")\n\n if self.cah.cards.remove_question_card(card_id):\n return Response(\n \"Deleted question card <**{}**>\".format(card_id),\n delete_after=15)\n else:\n return Response(\n \"Could not remove question card <**{}**>\".format(card_id),\n delete_after=15)\n else:\n return await self.cmd_help(channel, [\"qcards\"])\n\n async def qcard_viewer(self,\n channel,\n author,\n cards,\n display_additional=None):\n cmds = (\"n\", \"p\", \"exit\")\n site_interface = \"**Question Cards | Page {0} of {1}**\\n```\\n{2}\\n```\\nShit you can do:\\n`n`: Switch to the next page\\n`p`: Switch to the previous page\\n`exit`: Exit the viewer\"\n card_string = \"<{}> \\\"{}\\\"{}\"\n\n items_per_page = 20\n timeout = 60\n current_page = 0\n\n total_pages, items_on_last_page = divmod(\n len(cards) - 1, items_per_page)\n\n def msg_check(msg):\n return msg.content.lower().strip().startswith(cmds)\n\n while True:\n start_index = current_page * items_per_page\n end_index = start_index + \\\n (items_per_page - 1 if current_page <\n total_pages else items_on_last_page)\n page_cards = cards[start_index:end_index]\n\n page_cards_texts = []\n for p_c in page_cards:\n page_cards_texts.append(\n card_string.format(\n p_c.id,\n p_c.text.replace(\"$\", \"_____\"), \"\" if\n display_additional is None or display_additional(p_c)\n is None else \" | {}\".format(display_additional(p_c))))\n\n interface_msg = await self.safe_send_message(\n channel,\n site_interface.format(current_page + 1, total_pages + 1,\n \"\\n\".join(page_cards_texts)))\n user_msg = await self.wait_for_message(\n timeout, author=author, channel=channel, check=msg_check)\n\n if not user_msg:\n await self.safe_delete_message(interface_msg)\n break\n\n content = user_msg.content.lower().strip()\n\n if content.startswith(\"n\"):\n await self.safe_delete_message(interface_msg)\n await self.safe_delete_message(user_msg)\n current_page = (current_page + 1) % (total_pages + 1)\n elif content.startswith(\"p\"):\n await self.safe_delete_message(interface_msg)\n await self.safe_delete_message(user_msg)\n current_page = (current_page - 1) % (total_pages + 1)\n elif content.startswith(\"exit\"):\n await self.safe_delete_message(interface_msg)\n await self.safe_delete_message(user_msg)\n break\n\n await self.safe_send_message(\n channel, \"Closed the question card viewer!\", expire_in=20)\n\n @block_user\n @command_info(\"1.9.5\", 1478998740, {\n \"2.0.2\": (1481387640, \"Added Hangman game and generalised game hub command\"),\n \"3.5.2\": (1497712233, \"Updated documentaion for this command\"),\n \"4.6.3\": (1503158773, \"Added Connect Four\")\n })\n async def cmd_game(self, message, channel, author, leftover_args, game=None):\n \"\"\"\n ///|Usage\n `{command_prefix}game [name]`\n ///|Explanation\n Play a game\n ///|References\n Cards against humanity can be played with the `cah` command.\n Use `{command_prefix}help cah` to learn more\n \"\"\"\n\n all_funcs = dir(self)\n all_games = list(filter(lambda x: re.search(\"^g_\\w+\", x), all_funcs))\n all_game_names = [x[2:] for x in all_games]\n game_list = [{\n \"name\": x[2:],\n \"handler\": getattr(self, x, None),\n \"description\": getattr(self, x, None).__doc__.strip(\" \\t\\n\\r\")\n } for x in all_games]\n\n if message.mentions is not None and len(message.mentions) > 0:\n author = message.mentions[0]\n\n if game is None:\n shuffle(game_list)\n\n def check(m):\n return (m.content.lower() in [\"y\", \"n\", \"exit\"])\n\n for current_game in game_list:\n msg = await self.safe_send_message(\n channel,\n \"How about this game:\\n\\n**{}**\\n{}\\n\\nType `y`, `n` or `exit`\".\n format(current_game[\"name\"], current_game[\"description\"]))\n response = await self.wait_for_message(\n 100, author=author, channel=channel, check=check)\n\n if not response or response.content.startswith(\n self.config.command_prefix) or response.content.lower(\n ).startswith(\"exit\"):\n await self.safe_delete_message(msg)\n await self.safe_delete_message(response)\n await self.safe_send_message(channel, \"Nevermind then.\")\n return\n\n if response.content.lower() == \"y\":\n await self.safe_delete_message(msg)\n await self.safe_delete_message(response)\n game = current_game[\"name\"]\n break\n\n await self.safe_delete_message(msg)\n await self.safe_delete_message(response)\n\n if game is None:\n await self.safe_send_message(\n channel, \"That was all of them.\", expire_in=20)\n return\n\n # game = game.lower().replace(\" \", \"_\")\n handler = getattr(self, \"g_\" + game, None)\n if handler is None:\n return Response(\"There's no game like that...\")\n\n await handler(author, channel, leftover_args)\n\n async def g_2048(self, author, channel, additional_args):\n \"\"\"\n Join the same numbers and get to the 2048 tile!\n \"\"\"\n\n save_code = additional_args[0] if len(additional_args) > 0 else None\n size = additional_args[1] if len(additional_args) > 1 else 5\n\n game = Game2048(size, save_code)\n game_running = True\n turn_index = 1\n cache_location = \"cache/pictures/g2048_img\" + str(author.id)\n\n def check(reaction, user):\n if reaction.custom_emoji:\n # self.log (str (reaction.emoji) + \" is a custom emoji\")\n # print(\"Ignoring my own reaction\")\n return False\n\n if (str(reaction.emoji) in (\"⬇\", \"➡\", \"⬆\", \"⬅\") or\n str(reaction.emoji).startswith(\"📽\") or\n str(reaction.emoji).startswith(\"💾\")\n ) and reaction.count > 1 and user == author:\n return True\n\n # self.log (str (reaction.emoji) + \" was the wrong type of\n # emoji\")\n return False\n\n while game_running:\n direction = None\n turn_information = \"\"\n # self.log (str (game))\n\n await self.send_typing(channel)\n\n while direction is None:\n msg = await self.send_file(\n channel,\n game.getImage(cache_location) + \".png\",\n content=\"**2048**\\n{} turn {}\".format(\n str(turn_index) +\n (\"th\" if 4 <= turn_index % 100 <= 20 else {\n 1: \"st\",\n 2: \"nd\",\n 3: \"rd\"\n }.get(turn_index % 10, \"th\")), turn_information))\n turn_information = \"\"\n await self.add_reaction(msg, \"⬅\")\n await self.add_reaction(msg, \"⬆\")\n await self.add_reaction(msg, \"➡\")\n await self.add_reaction(msg, \"⬇\")\n await self.add_reaction(msg, \"📽\")\n await self.add_reaction(msg, \"💾\")\n\n reaction, user = await self.wait_for_reaction(\n check=check, message=msg)\n msg = reaction.message # for some reason this has to be like this\n # self.log (\"User accepted. There are \" + str (len\n # (msg.reactions)) + \" reactions. [\" + \", \".join ([str\n # (r.count) for r in msg.reactions]) + \"]\")\n\n for reaction in msg.reactions:\n if str(reaction.emoji) == \"📽\" and reaction.count > 1:\n await self.send_file(\n user,\n game.getImage(cache_location) + \".gif\",\n content=\"**2048**\\nYour replay:\")\n turn_information = \"| *replay has been sent*\"\n\n if str(reaction.emoji) == \"💾\" and reaction.count > 1:\n await self.safe_send_message(\n user,\n \"The save code is: **{0}**\\nUse `{1}game 2048 {2}` to continue your current game\".\n format(\n escape_dis(game.get_save()),\n self.config.command_prefix, game.get_save()))\n turn_information = \"| *save code has been sent*\"\n\n if str(reaction.emoji) in (\"⬇\", \"➡\", \"⬆\",\n \"⬅\") and reaction.count > 1:\n direction = (\"⬇\", \"➡\", \"⬆\",\n \"⬅\").index(str(reaction.emoji))\n\n # self.log (\"This did not match a direction: \" + str\n # (reaction.emoji))\n\n if direction is None:\n await self.safe_delete_message(msg)\n turn_information = \"| You didn't specifiy the direction\" if turn_information is not \"\" else turn_information\n\n # self.log (\"Chose the direction \" + str (direction))\n game.move(direction)\n turn_index += 1\n await self.safe_delete_message(msg)\n\n if game.won():\n await self.safe_send_message(\n channel,\n \"**2048**\\nCongratulations, you won after {} turns\".format(\n str(turn_index)))\n game_running = False\n\n if game.lost():\n await self.safe_send_message(\n channel, \"**2048**\\nYou lost after {} turns\".format(\n str(turn_index)))\n game_running = False\n\n await self.send_file(\n channel,\n game.getImage(cache_location) + \".gif\",\n content=\"**2048**\\nYour replay:\")\n await self.safe_delete_message(msg)\n\n async def g_Hangman(self, author, channel, additional_args):\n \"\"\"\n Guess a word by guessing each and every letter\n \"\"\"\n\n tries = additional_args[0] if len(additional_args) > 0 else 10\n\n word = additional_args[1] if len(additional_args) > 1 else re.sub(\n \"[^a-zA-Z]\", \"\", random_line(ConfigDefaults.hangman_wordlist))\n\n alphabet = list(\"abcdefghijklmnopqrstuvwxyz\")\n print(\"Started a Hangman game with \\\"\" + word + \"\\\"\")\n\n game = GameHangman(word, tries)\n running = True\n\n def check(m):\n return (m.content.lower() in alphabet or\n m.content.lower() == word or m.content.lower() == \"exit\")\n\n while running:\n current_status = game.get_beautified_string()\n msg = await self.safe_send_message(\n channel,\n \"**Hangman**\\n{} tr{} left\\n\\n{}\\n\\n`Send the letter you want to guess or type \\\"exit\\\" to exit.`\".\n format(game.tries_left, \"ies\"\n if game.tries_left != 1 else \"y\", current_status))\n response = await self.wait_for_message(\n 300, author=author, channel=channel, check=check)\n\n if not response or response.content.lower().startswith(\n self.config.command_prefix) or response.content.lower(\n ).startswith(\"exit\"):\n await self.safe_delete_message(msg)\n await self.safe_send_message(\n channel, \"Aborting this Hangman game. Thanks for playing!\")\n running = False\n\n if response.content.lower() == word:\n await self.safe_send_message(\n channel,\n \"Congratulations, you got it!\\nThe word is: *{}*\".format(\n word))\n return\n\n letter = response.content[0]\n game.guess(letter)\n\n if game.won:\n await self.safe_send_message(\n channel,\n \"Congratulations, you got it!\\nThe word is: *{}*\".format(\n word))\n running = False\n\n if game.lost:\n await self.safe_send_message(channel, \"You lost!\")\n running = False\n\n await self.safe_delete_message(msg)\n await self.safe_delete_message(response)\n\n async def g_ConnectFour(self, author, channel, additional_args):\n \"\"\"\n I hope you already know how this one works...\n \"\"\"\n\n to_delete = []\n\n to_delete.append(await self.safe_send_message(channel, \"Whom would you like to play against? You can **@mention** someone to challenge them or you can play against Giesela by sending \\\"ai\\\" or **@mention**ing her\"))\n\n players = None\n\n while True:\n msg = await self.wait_for_message(timeout=None, author=author, channel=channel)\n to_delete.append(msg)\n\n if msg.mentions:\n challanged_user = msg.mentions[0]\n\n if challanged_user == self.user:\n players = author\n break\n\n if challanged_user.bot:\n to_delete.append(await self.safe_send_message(channel, \"You can't challange a bot\"))\n\n await self.safe_send_message(challanged_user, \"**{}** challanded you to a game of **Connect 4**. Do you accept?\".format(author.display_name))\n resp = await self.wait_for_message(timeout=60, author=challanged_user)\n\n if resp:\n to_delete.append(resp)\n\n if resp and resp.content.lower().strip() in (\"yes\", \"sure\", \"of course\", \"bring it\", \"y\", \"ye\", \"yeah\", \"yea\", \"yup\", \"k\", \"okay\", \"let's go\"):\n players = [author, challanged_user]\n break\n else:\n to_delete.append(await self.safe_send_message(channel, \"**{}** declined!\".format(author.display_name)))\n\n elif msg.content.lower().strip() in (\"ai\", \"computer\", \"giesela\", \"you\"):\n players = author\n break\n\n for msg in to_delete:\n asyncio.ensure_future(self.safe_delete_message(msg))\n\n game_done = asyncio.Future()\n\n game = GameConnectFour.start(self, channel, game_done, players, ai_level=3)\n\n await game_done\n","sub_path":"musicbot/commands/fun_commands.py","file_name":"fun_commands.py","file_ext":"py","file_size_in_byte":39361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"308409756","text":"#!/usr/bin/python\n# ex:set fileencoding=utf-8:\n\nfrom fabric.api import *\nfrom fabric.contrib import files\n\nimport os\n\nBASEDIR = os.path.dirname(env.real_fabfile)\n\nPYTHON = BASEDIR + \"/virtenv/bin/python\"\nDEVELOP = BASEDIR + \"/develop.py\"\n\nfrom djangobmf.demo import FIXTURES\n\nAPPS = [\n 'accounting',\n 'address',\n 'customer',\n 'employee',\n 'invoice',\n 'position',\n 'product',\n 'project',\n 'quotation',\n 'task',\n 'taxing',\n 'team',\n 'timesheet',\n]\n\n\n@task\ndef static():\n \"\"\"\n update static files\n \"\"\"\n js()\n css()\n with lcd(BASEDIR):\n local('cp bower_components/bootstrap/fonts/glyphicons* djangobmf/static/djangobmf/fonts/')\n\n\n@task\ndef css():\n \"\"\"\n recreate css files - with lessc and yui-compressor\n \"\"\"\n with lcd(BASEDIR):\n local('lessc less/djangobmf.less > bootstrap.css')\n local('yui-compressor --type css -o djangobmf/static/djangobmf/css/djangobmf.min.css bootstrap.css')\n local('rm bootstrap.css')\n\n\n@task\ndef js():\n \"\"\"\n recreate js files for development and production\n \"\"\"\n with lcd(BASEDIR):\n js_ext = (\n 'bower_components/jquery-cookie/jquery.cookie.js',\n 'submodules/jquery-treegrid/js/jquery.treegrid.js',\n 'bower_components/bootstrap/dist/js/bootstrap.js',\n )\n js_own = (\n 'js/variables.js',\n 'js/bmf-autocomplete.js',\n 'js/bmf-calendar.js',\n 'js/bmf-editform.js',\n 'js/bmf-buildform.js',\n 'js/menu.js',\n )\n\n local('cp bower_components/jquery/dist/jquery.min.js djangobmf/static/djangobmf/js/')\n local('cp bower_components/jquery/dist/jquery.min.map djangobmf/static/djangobmf/js/')\n local('cp bower_components/angular/angular.min.js djangobmf/static/djangobmf/js/')\n local('cp bower_components/angular/angular.min.js.map djangobmf/static/djangobmf/js/')\n\n local('cp bower_components/bootstrap/dist/js/bootstrap.min.js djangobmf/static/djangobmf/js/')\n\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.cookie.min.js bower_components/jquery-cookie/jquery.cookie.js')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.treegrid.min.js submodules/jquery-treegrid/js/jquery.treegrid.js')\n\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_ext + js_own))\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/djangobmf.min.js djangobmf/static/djangobmf/js/djangobmf.js')\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_own))\n\n\n@task\ndef test(fast=False):\n \"\"\"\n Tests code with django unittests\n \"\"\"\n with lcd(BASEDIR):\n if fast:\n local('virtenv/bin/coverage run runtests.py -v2 --failfast')\n else:\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')\n\n\n@task\ndef test_mod(app):\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2 --contrib %(app)s' % {'app': app})\n local('virtenv/bin/coverage report -m --include=\"djangobmf/contrib/%(app)s/*\"' % {'app': app})\n\n\n@task\ndef test_core(module=\"\"):\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py %s -v2 --nocontrib' % module)\n local('virtenv/bin/coverage report -m --omit=\"djangobmf/contrib/*\"')\n\n\n@task\ndef locale():\n with lcd(BASEDIR + '/djangobmf'):\n local('%s %s makemessages -l %s --domain django' % (PYTHON, DEVELOP, 'en'))\n local('%s %s makemessages -l %s --domain djangojs' % (PYTHON, DEVELOP, 'en'))\n check_locale()\n\n for app in APPS:\n with lcd(BASEDIR + '/djangobmf/contrib/' + app):\n local('%s %s makemessages -l %s --domain django' % (PYTHON, DEVELOP, 'en'))\n check_locale()\n\n with lcd(BASEDIR):\n local('tx pull')\n\n with lcd(BASEDIR + '/djangobmf'):\n local('%s %s compilemessages' % (PYTHON, DEVELOP))\n\n for app in APPS:\n with lcd(BASEDIR + '/djangobmf/contrib/' + app):\n local('%s %s compilemessages' % (PYTHON, DEVELOP))\n\n puts(\"Dont forget to run 'tx push -s' to push new source files\")\n\ndef check_locale():\n numstat = local('git diff --numstat locale', quiet)\n for stats in numstat.split('\\n'):\n insertions, deletions, file = stats.split('\\t')\n if insertions == '1' and deletions == '1' and file[-3:] == '.po':\n with lcd(BASEDIR):\n local('git checkout -- %s' % file)\n\n@task\ndef make(data=''):\n \"\"\"\n \"\"\"\n with lcd(BASEDIR):\n local('rm -f sandbox/database.sqlite')\n local('%s %s migrate --noinput' % (PYTHON, DEVELOP))\n if not data:\n local('%s %s loaddata %s' % (PYTHON, DEVELOP, ' '.join(FIXTURES)))\n else:\n local('%s %s loaddata fixtures/users.json' % (PYTHON, DEVELOP))\n\n\n@task\ndef start():\n \"\"\"\n \"\"\"\n with lcd(BASEDIR):\n local('%s %s runserver 8000' % (PYTHON, DEVELOP))\n\n\n@task\ndef shell():\n \"\"\"\n \"\"\"\n local('%s %s shell' % (PYTHON, DEVELOP))\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387328607","text":"import numpy as np\r\nimport pandas as pd \r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import datasets\r\n\r\n\r\niris = datasets.load_iris()\r\ncluster_df = iris.data\r\n\r\n# Gap Statistic for K means\r\ndef optimalK(data, nrefs=3, maxClusters=15):\r\n \"\"\"\r\n Calculates KMeans optimal K using Gap Statistic \r\n Params:\r\n data: ndarry of shape (n_samples, n_features)\r\n nrefs: number of sample reference datasets to create\r\n maxClusters: Maximum number of clusters to test for\r\n Returns: (gaps, optimalK)\r\n \"\"\"\r\n gaps = np.zeros((len(range(1, maxClusters)),))\r\n resultsdf = pd.DataFrame({'clusterCount':[], 'gap':[]})\r\n for gap_index, k in enumerate(range(1, maxClusters)):\r\n # Holder for reference dispersion results\r\n refDisps = np.zeros(nrefs)\r\n # For n references, generate random sample and perform kmeans getting resulting dispersion of each loop\r\n for i in range(nrefs):\r\n \r\n # Create new random reference set\r\n randomReference = np.random.random_sample(size=data.shape)\r\n \r\n # Fit to it\r\n km = KMeans(k)\r\n km.fit(randomReference)\r\n \r\n refDisp = km.inertia_\r\n refDisps[i] = refDisp\r\n # Fit cluster to original data and create dispersion\r\n km = KMeans(k)\r\n km.fit(data)\r\n \r\n origDisp = km.inertia_\r\n # Calculate gap statistic\r\n gap = np.log(np.mean(refDisps)) - np.log(origDisp)\r\n # Assign this loop's gap statistic to gaps\r\n gaps[gap_index] = gap\r\n \r\n resultsdf = resultsdf.append({'clusterCount':k, 'gap':gap}, ignore_index=True)\r\n return (gaps.argmax() + 1, resultsdf)\r\n\r\nscore_g, df = optimalK(cluster_df, nrefs=5, maxClusters=30)\r\nplt.plot(df['clusterCount'], df['gap'], linestyle='--', marker='o', color='b');\r\nplt.xlabel('K');\r\nplt.ylabel('Gap Statistic');\r\nplt.title('Gap Statistic vs. K');\r\nplt.show()","sub_path":"Gap_Statistic.py","file_name":"Gap_Statistic.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"300947825","text":"import argparse\nimport matplotlib\nimport logging\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button\n\nfrom word_counter.count import count_files_in_directory\n\n\ndef view_histogram(data):\n labels, values = zip(*data.items())\n matplotlib.rcParams['toolbar'] = 'None' # disabling toolbar\n # creating figure.\n fig, ax = plt.subplots(1, 1)\n plt.subplots_adjust(bottom=0.2)\n fig.canvas.set_window_title('Word count.')\n\n step_size = 10\n current = 0\n size = len(data)\n\n # update figure.\n def update():\n ax.clear()\n sub_label = labels[current: current + step_size]\n sub_values = values[current: current + step_size]\n indices = [i for i in range(len(sub_values))]\n\n ax.bar(indices, sub_values)\n\n ax.set_xticks(indices)\n ax.set_xticklabels(sub_label)\n showed_size = current + step_size\n if showed_size >= size:\n showed_size = current + (size - current)\n ax.set_title(\"{}-{} of {}\".format(current, showed_size, size))\n plt.draw()\n\n def to_next_page(_):\n nonlocal current\n if current + step_size >= size:\n return\n current += step_size\n update()\n\n def to_prev_page(_):\n nonlocal current\n if current == 0:\n return\n current -= step_size\n update()\n\n # buttons to navigate through histogram.\n next_button = Button(plt.axes([0.81, 0.05, 0.1, 0.075]), 'Next')\n next_button.on_clicked(to_next_page)\n previous_file = Button(plt.axes([0.7, 0.05, 0.1, 0.075]), 'Previous')\n previous_file.on_clicked(to_prev_page)\n\n update()\n plt.show()\n\n\ndef main():\n \"\"\"\n Entry point for console command - count_words_in_directory\n \"\"\"\n logging.basicConfig(format='%(levelname)s: %(message)s')\n parser = argparse.ArgumentParser()\n parser.add_argument('path')\n args = parser.parse_args()\n counter = count_files_in_directory(args.path)\n view_histogram(counter)\n","sub_path":"word_counter/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"273562664","text":"from tkinter import *\r\nimport cv2\r\nimport face_recognition\r\nimport numpy as np\r\nimport os\r\nfrom datetime import datetime,date\r\n\r\n\r\n\r\nroot=Tk();\r\n\r\nroot.title(\"Welcome To F-BASE\")\r\nroot.geometry(\"670x300\")\r\nroot.minsize(670,300)\r\nroot.maxsize(700,300)\r\n\r\np1 = PhotoImage(file='download.png')\r\nroot.iconphoto(False, p1)\r\n\r\n\r\nf1=Frame(root)\r\nf1.pack(side=RIGHT,anchor=\"ne\",pady=53,padx=45)\r\n\r\ntext_label=Label(f1,text=\"WELCOME TO FBASE\",fg=\"green\",bg=\"white\",font=\"TimesNewRoman 15 bold\")\r\ntext_label.pack()\r\n\r\n\r\ntext_label2=Label(f1,text=\"This software is developed and tested by team F-BASE\\n All Rights Reserved\",font=\"TimesNewRoman 9\")\r\ntext_label2.pack(pady=30)\r\n\r\ntext_label=Label(f1,text=\"For any Query contact us on : f-base23@gmail.com\",fg=\"green\",font=\"TimesNewRoman 9 italic\")\r\ntext_label.pack()\r\n\r\nb2=Button(f1,text=\"Exit\",borderwidth=2,font=\"TimesNewRoman 11 bold\")\r\nb2.pack(side=RIGHT,padx=20,pady=10)\r\nb2.bind('',quit)\r\n\r\ndef createnewadmin():\r\n os.system('python newadmingui.py')\r\n\r\nb3=Button(f1,text=\"Create New Admin\",borderwidth=2,font=\"TimesNewRoman 11 bold\",command=createnewadmin).pack(side=RIGHT,padx=(30,10))\r\n\r\nphoto=PhotoImage(file=\"imageBasic/logo1.png\")\r\nphoto_label=Label(image=photo,borderwidth=7,relief=SUNKEN)\r\nphoto_label.pack(anchor=\"nw\",pady=47)\r\n\r\ndef func():\r\n path = 'imageAttendance'\r\n images = []\r\n classNames = []\r\n myList = os.listdir(path)\r\n print(myList)\r\n\r\n\r\n for cl in myList:\r\n curImg = cv2.imread(f'{path}/{cl}')\r\n images.append(curImg)\r\n classNames.append(os.path.splitext(cl)[0])\r\n print(classNames)\r\n\r\n def findEncodings(images):\r\n encodeList = []\r\n for img in images:\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n encode = face_recognition.face_encodings(img)[0]\r\n encodeList.append(encode)\r\n return encodeList\r\n\r\n def markAttendance(name):\r\n with open(\"Attendance.csv\", \"r+\") as f,open(\"Exit.csv\", \"r+\") as g:\r\n myDataList = f.readlines()\r\n nameList = []\r\n now = datetime.now()\r\n dtString = now.strftime('%H:%M:%S')\r\n dt = int(dtString[0:2])\r\n\r\n for line in myDataList:\r\n entry = line.split(',')\r\n nameList.append(entry[0])\r\n\r\n if name not in nameList:\r\n now = datetime.now()\r\n dtString = now.strftime('%H:%M:%S')\r\n dt=int(dtString[0:2])\r\n dtm=int(dtString[3:5])\r\n print(dt,dtm)\r\n\r\n if (dt>12 and dtm>30):\r\n entry=\"LATE\"\r\n else :\r\n entry=\"ON TIME\"\r\n dt2=date.today()\r\n f.writelines(f'\\n{name},{dtString},{dt2},{entry}')\r\n\r\n elif name in nameList :\r\n if dt>15 and dt<16:\r\n now = datetime.now()\r\n dtString = now.strftime('%H:%M:%S')\r\n dt2=date.today()\r\n exit=\"HALF DAY\"\r\n g.writelines(f'\\n{name},{dtString},{dt2}.{exit}')\r\n elif dt>17:\r\n now = datetime.now()\r\n dtString = now.strftime('%H:%M:%S')\r\n dt2 = date.today()\r\n exit = \"FULL DAY\"\r\n g.writelines(f'\\n{name},{dtString},{dt2}.{exit}')\r\n\r\n encodeListKnown = findEncodings(images)\r\n print('ENCODING COMPLETE')\r\n\r\n cap = cv2.VideoCapture(0)\r\n\r\n while True:\r\n success, img = cap.read()\r\n imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)\r\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\r\n\r\n facesCurFrame = face_recognition.face_locations(imgS)\r\n encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)\r\n\r\n for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):\r\n matches = face_recognition.compare_faces(encodeListKnown, encodeFace)\r\n faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)\r\n # print(faceDis)\r\n matchIndex = np.argmin(faceDis)\r\n\r\n # if matches[matchIndex]:\r\n # name = classNames[matchIndex].upper()\r\n # # print(name)\r\n # y1, x2, y2, x1 = faceLoc\r\n # y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\r\n # cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\r\n # cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\r\n # cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 1)\r\n # markAttendance(name)\r\n if faceDis[matchIndex] < 0.50:\r\n name = classNames[matchIndex].upper()\r\n markAttendance(name)\r\n else:\r\n name = 'Unknown'\r\n\r\n # print(name)\r\n y1, x2, y2, x1 = faceLoc\r\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\r\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\r\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\r\n cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\r\n\r\n\r\n cv2.imshow('webcam', img)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\nb1=Button(f1,text=\"Let's Start\",command=func,borderwidth=2,font=\"TimesNewRoman 11 bold\")\r\nb1.pack(side=BOTTOM,pady=10)\r\n\r\nroot.mainloop()\r\n\r\n\r\n","sub_path":"GUIFbase.py","file_name":"GUIFbase.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"626022952","text":"# Complete the function below.\nimport math\n\nclass HashTable:\n\n def __init__(self):\n self.hash_table = {}\n\n def add(self, key, val=True):\n x = self.get(key)\n if x == False:\n self.hash_table[key] = val\n\n def get(self, key):\n try:\n return self.hash_table[key]\n except KeyError:\n return False\n\n def keys(self):\n return self.hash_table.keys()\n \ndef Circles( distance, radius, cost):\n op = \"\"\n n = len(radius)\n htn = HashTable()\n max = 0\n min = int(math.pow(10, 10))\n for i in xrange(n):\n htn.add(radius[i], {\"cost\": cost[i], \"index\": i+1})\n if radius[i] > max:\n max = radius[i]\n if cost[i] < min:\n min = cost[i]\n\n b = htn.keys()\n\n for i in xrange(n):\n minCost = int(math.pow(10, 9))\n minGearOpt = []\n # opt =\n for y in [k for k in b if k >= (distance - radius[i])]:\n data = htn.get(y)\n if data == False:\n continue\n #print data, y\n optCost = data['cost']\n if optCost == min:\n minGearOpt = [data['index']]\n break\n if optCost <= minCost:\n if optCost == minCost:\n minGearOpt.append(data['index'])\n # minGearOpt.append(hti.get(y) + 1)\n else:\n minGearOpt = [data['index']]\n # minGearOpt = [hti.get(y) + 1]\n minCost = optCost\n if len(minGearOpt) == 0:\n op += \"0\"\n elif len(minGearOpt) == 1:\n op += str(minGearOpt[0])\n # op += \" \"\n else:\n op += str(minGearOpt[-1])\n # op += \" \"\n return op\n","sub_path":"practice/python/newradius.py","file_name":"newradius.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"78361238","text":"\"\"\"Utility file to seed tracker database from data in seed_data/\"\"\"\n\nfrom DiamondCaseWeb.model.product import ProductCategory as ProductCategoryModel\nfrom DiamondCaseWeb.model.product import Product as ProductModel\nfrom DiamondCaseWeb.model.product import Location as LocationModel\nfrom DiamondCaseWeb.model.product import LocationProduct as LocationProductModel\nfrom DiamondCaseWeb.model.static import HomepageFeature as HomepageFeatureModel\nfrom DiamondCaseWeb.model.static import HelpArticle as HelpArticleModel\nfrom DiamondCaseWeb.model.user import Role as RoleModel\nfrom DiamondCaseWeb.model.user import User as UserModel \n# from DiamondCaseWeb.model.user import Order as OrderModel \nfrom DiamondCaseWeb import create_app, db\n\n\ndef load_products():\n \"\"\"Load users from u.user into database.\"\"\"\n\n for i, row in enumerate(open(\"seed_data/category.product\")):\n row = row.rstrip()\n name = row.split(\"|\")\n product_category = ProductCategoryModel(name=name)\n db.session.add(product_category)\n\n for i, row in enumerate(open(\"seed_data/product.product\")):\n row = row.rstrip()\n name, short_description, long_description, product_category_id, img_path_xs, img_path_sm, img_path_md, img_path_lg = row.split(\"|\")\n product = ProductModel(name=name,\n short_description=short_description,\n long_description=long_description,\n product_category_id=product_category_id,\n img_path_xs=img_path_xs,\n img_path_sm=img_path_sm,\n img_path_md=img_path_md,\n img_path_lg=img_path_lg)\n db.session.add(product)\n\n for i, row in enumerate(open(\"seed_data/location.product\")):\n row = row.rstrip()\n name, description, address1, address2, city, state, zip_code, country, latitude, longitude, direction_url = row.split(\"|\")\n location = LocationModel(name=name,\n description=description,\n address1=address1,\n address2=address2,\n city=city,\n state=state,\n zip_code=zip_code,\n country=country,\n latitude=latitude,\n longitude=longitude,\n direction_url=direction_url)\n db.session.add(location)\n\n for i, row in enumerate(open(\"seed_data/location_product.product\")):\n row = row.rstrip()\n location_id, product_id, price, num_available = row.split(\"|\")\n location_product = LocationProductModel(location_id=location_id,\n product_id=product_id,\n price=price,\n num_available=num_available)\n db.session.add(location_product)\n\n db.session.commit()\n\n\ndef load_static():\n \"\"\"Load users from u.user into database.\"\"\"\n\n for i, row in enumerate(open(\"seed_data/homepage_feature.static\")):\n row = row.rstrip()\n title, body, img_path_xs, img_path_sm, img_path_md, img_path_lg, is_active = row.split(\"|\")\n homepage_feature = HomepageFeatureModel(title=title,\n body=body,\n img_path_xs=img_path_xs,\n img_path_sm=img_path_sm,\n img_path_md=img_path_md,\n img_path_lg=img_path_lg,\n is_active=is_active)\n db.session.add(homepage_feature)\n\n for i, row in enumerate(open(\"seed_data/help_article.static\")):\n row = row.rstrip()\n title, description, body = row.split(\"|\")\n help_article = HelpArticleModel(title=title, \n description=description, \n body=body)\n db.session.add(help_article)\n\n db.session.commit()\n\n\ndef load_user():\n \"\"\"Load users from u.user into database.\"\"\"\n\n for i, row in enumerate(open(\"seed_data/role.user\")):\n row = row.rstrip()\n name, description = row.split(\"|\")\n role = RoleModel(name=name, description=description)\n db.session.add(role)\n\n for i, row in enumerate(open(\"seed_data/user.user\")):\n row = row.rstrip()\n name, phone, email, password, confirmed_at, role_id = row.split(\"|\")\n user = UserModel(name=name,\n phone=phone,\n email=email,\n password=password,\n confirmed_at=confirmed_at,\n role_id=role_id)\n db.session.add(user)\n\n # for i, row in enumerate(open(\"seed_data/order.user\")):\n # row = row.rstrip()\n # active, user_id, product_location_id = row.split(\"|\")\n # order = OrderrModel(\n # active=active, \n # user_id=user_id, \n # product_location_id=product_location_id)\n # db.session.add(order)\n\n db.session.commit()\n\n\nif __name__ == \"__main__\":\n app = create_app()\n app.config.from_object('DiamondCaseWeb.config.DevelopmentConfig')\n db.init_app(app)\n with app.app_context():\n db.drop_all()\n db.create_all()\n\n load_products()\n load_static()\n load_user()","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387533075","text":"from pythreejs import *\nimport colorsys\nfrom math import atan\nfrom IPython.display import display\n\ndef obj_read(filename):\n with open(filename,'r') as obj:\n lines = [ [f for f in s.split(' ') if len(f)>0] for s in obj.read().split('\\n') ]\n vertices = [[float(coord) for coord in l[1:4]] for l in lines if len(l)>3 and l[0]=='v']\n faces = [[int(coord.split('/')[0])-1 for coord in l[1:4]] for l in lines if len(l)>3 and l[0]=='f']\n return faces, vertices\n\ndef heatmap(value):\n return '#{:02x}{:02x}{:02x}'.format(*[int(i*255) for i in colorsys.hls_to_rgb(.3-atan(value)/15, .5, 1)])\n\ndef draw(faces, vertices, colorfunc=None):\n # Create the geometry:\n faces = [f + [None, [vertexcolors[i] for i in f], None] for f in faces]\n geometry = Geometry(faces=faces,vertices=vertices,colors='ff0000')\n # Calculate normals per face, for nice crisp edges:\n #geometry.exec_three_obj_method('computeFaceNormals')\n\n object1 = Mesh(\n geometry=geometry,\n material=MeshLambertMaterial( side=\"FrontSide\", vertexColors='VertexColors'),\n )\n\n object2 = Mesh(\n geometry=geometry,\n material=MeshLambertMaterial(color= \"gray\", side=\"BackSide\"),\n )\n\n# Set up a scene and render it:\n camera = PerspectiveCamera(position=[2*max(v[0] for v in vertices), 2*max(v[1] for v in vertices), 2*max(v[2] for v in vertices)], fov=40,\n children=[DirectionalLight(color='#cccccc', position=[-3, 5, 1], intensity=0.5)])\n scene = Scene(children=[object1, object2, camera, AmbientLight(color='#dddddd')])\n\n renderer = Renderer(camera=camera, background='black', background_opacity=1,\n scene=scene, controls=[OrbitControls(controlling=camera)])\n display(renderer)","sub_path":"pyjs/drawwh.py","file_name":"drawwh.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"497419","text":"__author__ = 'Administrator'\nfrom testCase.BaseTestCase import *\nfrom testData.BaseOperateElement import *\nfrom testData.Global import operateType,findElemtType\nimport testRunner.runner\nfrom testDriver.testLog import *\nfrom testDriver.yamHome import *\nclass chatHome(testRunner.runner.TestInterfaceCase):\n\n\n def test_click_home(self):\n gh = getHome('d:\\home.yaml')[0]\n ch = getBaseTestCase(gh['findElemtType'], gh['element_info'], gh['operate_type'])\n # ch = getBaseTestCase(findElemtType.XPATH, \"/android.widget.ListView/android.widget.LinearLayout[@index='7']\",operateType.CLICK)\n fg = getOperateElement(driver=testRunner.runner.driver, element_type=ch.get_element_types(), element_info=ch.get_element_info()).findElement()\n logTest = myLog.getLog()\n logger = logTest.getMyLogger()\n logger.debug(\"1111\")\n if fg:\n getOperateElement(driver=testRunner.runner.driver, element_type=ch.get_element_types(), operate_type=ch.get_operate_type(), element_info=ch.get_element_info()).operateElement()\n # raise selenium.common.exceptions.TimeoutException\n logTest.buildStartLine(\"执行test_click_home案例成功\")\n else:\n logTest.buildStartLine(\"执行test_click_home案例失败\")\n\n def test_001(self):\n logTest = myLog.getLog()\n logger = logTest.getMyLogger()\n logger.debug(\"1111\")\n logTest.buildStartLine(\"执行案例成功\")\n\n\n","sub_path":"testDriver/weChatHome.py","file_name":"weChatHome.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"294921299","text":"\"\"\" puzzle \"\"\"\nimport itertools\n\n\ndef main():\n perm = itertools.permutations(range(10))\n for p in perm:\n letters = {'e': p[0],\n 'f': p[1],\n 'g': p[2],\n 'h': p[3],\n 'i': p[4],\n 'n': p[5],\n 'o': p[6],\n 't': p[7],\n 'w': p[8],\n 'y': p[9]\n }\n\n one = 100 * letters['o'] + \\\n 10 * letters['n'] + \\\n letters['e']\n nine = 1000 * letters['n'] + \\\n 100 * letters['i'] + \\\n 10 * letters['n'] + \\\n letters['e']\n twenty = 100000 * letters['t'] + \\\n 10000 * letters['w'] + \\\n 1000 * letters['e'] + \\\n 100 * letters['n'] + \\\n 10 * letters['t'] + \\\n letters['y']\n fifty = 10000 * letters['f'] + \\\n 1000 * letters['i'] + \\\n 100 * letters['f'] + \\\n 10 * letters['t'] + \\\n letters['y']\n eighty = 100000 * letters['e'] + \\\n 10000 * letters['i'] + \\\n 1000 * letters['g'] + \\\n 100 * letters['h'] + \\\n 10 * letters['t'] + \\\n letters['y']\n summe = one + nine + twenty + fifty\n\n if summe == eighty:\n print(sorted(letters.items()))\n\n print('done')\n\n\n# Standard boilerplate to call the main() function to begin\n# the program.\nif __name__ == '__main__':\n main()\n","sub_path":"puzzles/puzzle/puzzle2.py","file_name":"puzzle2.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"578293682","text":"import numpy as np\nimport scipy.stats\n\n\ndef knn(x, x_train, y_train, k):\n '''\n KNN k-Nearest Neighbors Algorithm.\n\n INPUT: x: testing sample features, (N_test, P) matrix.\n x_train: training sample features, (N, P) matrix.\n y_train: training sample labels, (N, ) column vector.\n k: the k in k-Nearest Neighbors\n\n OUTPUT: y : predicted labels, (N_test, ) column vector.\n '''\n\n # Warning: uint8 matrix multiply uint8 matrix may cause overflow, take care\n # Hint: You may find numpy.argsort & scipy.stats.mode helpful\n N_test, P = x.shape\n y = np.zeros(N_test)\n for i in range(N_test):\n d = np.sum((x[i] - x_train) ** 2, axis=1)\n idx = np.argsort(d)[:k]\n y[i] = scipy.stats.mode(y_train[idx])[0][0]\n return y\n","sub_path":"3/knn/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"34056581","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 1 20:06:10 2020\n\n@author: andyjiang\n\"\"\"\n\npdbFile = open(\"6qlt.pdb\", \"r\")\n\ntxtFile = open(\"6qlt_coord_protein_no_comm.xyz\", \"w\")\n\npdb_lines = pdbFile.readlines()\n\natom_lines = []\nhetatm_lines = []\n\nfor i in range(len(pdb_lines)):\n if pdb_lines[i][0:6] == \"ATOM \":\n atom_lines.append(pdb_lines[i])\n\n elif pdb_lines[i][0:6] == \"HETATM\":\n hetatm_lines.append(pdb_lines[i])\n\nfor j in range(len(atom_lines)):\n l = atom_lines[j]\n txtFile.write(l[76:78] + \" \" + l[30:38] + \" \" + l[38:46] + \" \" + l[46:54] + \"\\n\")\n\nligand_files = []\n\ncount = 0\n\ntemp = open(\"ligand0_no_comm.xyz\", \"w\")\n\nligand_files.append(temp)\n\nfor k in range(len(hetatm_lines)):\n het = hetatm_lines[k]\n if k >= 1 and het[22:26] != hetatm_lines[k-1][22:26]:\n count = count + 1\n temp = open(\"ligand\" + str(count) + \"_no_comm.xyz\", \"w\")\n ligand_files.append(temp)\n\n ligand_files[count].write(het[76:78] + \" \" + het[30:38] + \" \" + het[38:46] + \" \" + het[46:54] + \"\\n\")\n \n\npdbFile.close()\ntxtFile.close()\n\nfor q in range(len(ligand_files)):\n ligand_files[q].close()\n\n\n","sub_path":"pdb_to_txt_no_comm.py","file_name":"pdb_to_txt_no_comm.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171523011","text":"#Fridge of Your Dreams\n'''\n그냥 이진수 변환\n'''\nN= int(input())\n\nfor _ in range(N):\n num = input()\n answer = 0\n for i in range(len(num)):\n answer += int(num[i]) * 2**(23-i)\n print(answer)\n\n\n","sub_path":"백준/Python/알고파/11.24/Fridge of Your Dreams(11104).py","file_name":"Fridge of Your Dreams(11104).py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246688636","text":"import os\nimport numpy as np\nfrom collections import OrderedDict\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\nfrom hailo_model_zoo.core.eval.eval_base_class import Eval\n\nGT_LABELS_FILE = 'person_keypoints_val2017.json'\n\nMETRIC_NAMES = ['Average-Precision-IoU-0.50:0.95',\n 'Average-Precision-IoU-0.50',\n 'Average-Precision-IoU-0.75',\n 'Average-Precision-IoU-0.50:0.95-medium',\n 'Average-Precision-0.50:0.95-large',\n 'Average-Recall-IoU-0.50:0.95',\n 'Average-Recall-IoU-0.50',\n 'Average-Recall-IoU-0.75',\n 'Average-Recall-IoU-0.50:0.95-medium',\n 'Average-Recall-0.50:0.95-large'\n ]\n\n\nclass PoseEstimationEval(Eval):\n\n def __init__(self, **kwargs):\n self._metrics_vals = OrderedDict()\n self._local_det_file = 'my_pose_detections.json'\n self._gt_labels_path = kwargs['gt_labels_path']\n self._coco_result = []\n\n def _parse_net_output(self, net_output):\n return net_output['predictions']\n\n def update_op(self, net_output, img_info):\n if \"center\" not in img_info:\n # OpenPose Evaluation\n net_output = self._parse_net_output(net_output)\n self._coco_result += net_output\n return\n\n # CenterPose evaluation\n bboxes, scores, keypoints, joint_scores = (\n net_output['bboxes'], net_output['scores'], net_output['keypoints'], net_output['joint_scores'])\n\n batch_size = bboxes.shape[0]\n for batch_index in range(batch_size):\n box, score, keypoint, keypoint_score = (\n bboxes[batch_index], scores[batch_index], keypoints[batch_index], joint_scores[batch_index])\n ground_truth = {k: v[batch_index] for k, v in img_info.items()}\n\n # change boxes to coco format\n box[:, 2] -= box[:, 0]\n box[:, 3] -= box[:, 1]\n for b, s, kps, kps_score in zip(box, score, keypoint, keypoint_score):\n detection_keypoints = np.concatenate([kps.reshape(17, 2), kps_score.reshape(17, 1)], axis=1)\n\n detection = {\n \"image_id\": int(ground_truth['image_id']),\n \"category_id\": 1, # Always person\n \"bbox\": b.tolist(),\n \"score\": s[0],\n \"keypoints\": detection_keypoints.reshape(-1).tolist(),\n }\n\n self._coco_result.append(detection)\n\n # undo modifications\n box[:, 2] += box[:, 0]\n box[:, 3] += box[:, 1]\n\n def evaluate(self):\n coco_gt = COCO(os.path.join(self._gt_labels_path, GT_LABELS_FILE))\n coco_dt = coco_gt.loadRes(self._coco_result)\n for annotation_type in ['keypoints', 'bbox']:\n if annotation_type not in self._coco_result[0]:\n continue\n result = COCOeval(coco_gt, coco_dt, annotation_type)\n result.evaluate()\n result.accumulate()\n result.summarize()\n self._metrics_vals['{}_{}'.format(annotation_type, METRIC_NAMES[0])] = result.stats[0]\n self._metrics_vals['{}_{}'.format(annotation_type, METRIC_NAMES[1])] = result.stats[1]\n\n def _get_accuracy(self):\n return self._metrics_vals\n\n def reset(self):\n pass\n","sub_path":"hailo_model_zoo/core/eval/pose_estimation_evaluation.py","file_name":"pose_estimation_evaluation.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"27674462","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nScript to convert list of REFCODEs into CIFs.\n\nAuthor: Andrew Tarzia\n\nDate Created: 4 Mar 2019\n\n\"\"\"\nimport ccdc.io\nimport CSD_f\n\n# read in CSD\nentry_reader = CSD_f.get_entryreader()\n\nRCODE_file = 'DB_final_040319.gcd'\nprint('reading', RCODE_file, 'is that correct??')\nREFCODEs = []\nfor line in open(RCODE_file, 'r'):\n REFCODEs.append(line.rstrip())\n\ncount = 0\ncount_no = 0\nRC_list = []\nfor i, RC in enumerate(sorted(REFCODEs)):\n # if RC.lower() != 'dovmuw':\n # continue\n count_no += 1\n entry = entry_reader.entry(RC)\n # print('%s %s' % (RC, entry.ccdc_number))\n # print(entry.chemical_name)\n # print(entry.is_polymeric)\n # skip polymeric structures\n if entry.chemical_name is not None:\n if 'catena' in entry.chemical_name:\n continue\n if entry.is_polymeric is True:\n continue\n # skip if structure is powder study\n if entry.is_powder_study is True:\n continue\n # skip structures that are purely organic\n if entry.is_organometallic is False:\n continue\n # print('passed!')\n # note structures with solvent\n solvent = 'n'\n if entry.chemical_name is not None:\n if len(entry.chemical_name.split(' ')) > 1:\n solvent = 'y'\n # note structures with disorder\n disorder = 'n'\n if entry.has_disorder is True:\n disorder = 'y'\n # print(solvent, disorder)\n # break\n crystal = entry.crystal\n if entry.has_3d_structure is False:\n print(RC, entry.ccdc_number)\n RC_list.append(RC)\n else:\n # write to CIF\n ccdc.io.CrystalWriter(RC+'_extracted.cif').write(crystal)\n count += 1\n\nprint(count, 'cifs found from', count_no, 'RCs')\nprint(RC_list)\n","sub_path":"CSD_API_python3/to_CIF.py","file_name":"to_CIF.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87994425","text":"# coding=utf-8\n\n# 连接\nCACHES = {\n 'connection': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11202',\n 'TIMEOUT': 604800,\n },\n}\n\n\n# s3相关数据\nS3_url = 's3.amazonaws.com'\nS3_region = 'us-east-1'\nS3_access_key = 'AKIAJTQBGHIMXHDSGNZQ'\nS3_secret_access_key = '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'\n\n# s3用户图片,需要给该路径设置公开只读权限\nS3_bucket = 'c_log_test'\nS3_image_path = 'images/country'\n\n# redshift\nREDSHIFT = {\n \"host\": \"shinezone.chlwjqdovwm4.us-east-1.redshift.amazonaws.com\",\n \"port\": 5439,\n \"user\": \"arch_sdk\",\n \"password\": \"Shinezone2015\",\n \"database\": \"szdw\"\n}\n# TB_GAME_ID = \"general.business_gamename_dim\"\nTB_GAME_ID = \"public.business_gamename_dim\"\nSQL_INSERT_GAME_ID = \"INSERT INTO \" + TB_GAME_ID + \\\n \" (game_id, gamedb, game_name, versions, timezone_diff, is_enable, platform, orderid) \" \\\n \"VALUES (%s, '%s', '%s', '%s', %s, %s, '%s', %s)\"\nGAME_ID_START = 1000\n\n\n# 发送邮件信息\nMAIL_SERVER = 'smtp.exmail.qq.com'\nMAIL_PORT = 25\nMAIL_SEND = 'no-reply@shinezone.com'\nMAIL_SEND_PWD = 'Shinezone2016'\nMAIL_TOKEN = 'ZDt6jMwdgnGYbpS8'\nMAIL_TIMEOUT = 600\n\n\n# 随机字符串\nRANDOM_STR = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789!@#$%^&*'\n# cp最大允许的game数\nMAX_GAME_NUM = 30\n\n","sub_path":"shinezone/helpdesk/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"640542040","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Nicolas Bessi. Copyright Camptocamp SA\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nfrom openerp.osv.orm import Model, fields\nfrom openerp.tools.translate import _\n\n\nclass ResPartner(Model):\n \"\"\"Adds lastname and firstname, name become a stored function field\"\"\"\n\n _inherit = 'res.partner'\n\n def init(self, cursor):\n cursor.execute('SELECT id FROM res_partner WHERE lastname IS NOT NULL Limit 1')\n if not cursor.fetchone():\n cursor.execute('UPDATE res_partner set lastname = name WHERE name IS NOT NULL')\n # Create Sql constraint if table is not empty\n cursor.execute('SELECT id FROM res_partner Limit 1')\n if cursor.fetchone():\n cursor.execute('ALTER TABLE res_partner ALTER COLUMN lastname SET NOT NULL')\n\n def _prepare_name_custom(self, cursor, uid, partner, context=None):\n \"\"\"\n This function is designed to be inherited in a custom module\n \"\"\"\n names = (partner.lastname, partner.firstname)\n fullname = \" \".join([s for s in names if s])\n return fullname\n\n def _compute_name_custom(self, cursor, uid, ids, fname, arg, context=None):\n res = {}\n for partner in self.browse(cursor, uid, ids, context=context):\n res[partner.id] = self._prepare_name_custom(\n cursor, uid, partner, context=context)\n return res\n\n def _write_name(self, cursor, uid, partner_id, field_name, field_value, arg, context=None):\n \"\"\"\n Try to reverse the effect of _compute_name_custom:\n * if the partner is not a company and the firstname does not change in the new name\n then firstname remains untouched and lastname is updated accordingly\n * otherwise lastname=new name and firstname=False\n In addition an heuristic avoids to keep a firstname without a non-blank lastname\n \"\"\"\n field_value = field_value and not field_value.isspace() and field_value or False\n vals = {'lastname': field_value, 'firstname': False}\n if field_value:\n flds = self.read(cursor, uid, [partner_id], ['firstname', 'is_company'], context=context)[0]\n if not flds['is_company']:\n to_check = ' %s' % flds['firstname']\n if field_value.endswith(to_check):\n ln = field_value[:-len(to_check)].strip()\n if ln:\n vals['lastname'] = ln\n del(vals['firstname'])\n else:\n # If the lastname is deleted from the new name\n # then the firstname becomes the lastname\n vals['lastname'] = flds['firstname']\n\n return self.write(cursor, uid, partner_id, vals, context=context)\n\n def copy_data(self, cr, uid, _id, default=None, context=None):\n \"\"\"\n Avoid to replicate the firstname into the name when duplicating a partner\n \"\"\"\n default = default or {}\n if not default.get('lastname'):\n default = default.copy()\n default['lastname'] = _('%s (copy)') % self.read(cr, uid, [_id], ['lastname'], context=context)[0]['lastname']\n if default.get('name'):\n del(default['name'])\n return super(ResPartner, self).copy_data(cr, uid, _id, default, context=context)\n\n def create(self, cursor, uid, vals, context=None):\n \"\"\"\n To support data backward compatibility we have to keep this overwrite even if we\n use fnct_inv: otherwise we can't create entry because lastname is mandatory and module\n will not install if there is demo data\n \"\"\"\n to_use = vals\n if 'name' in vals:\n corr_vals = vals.copy()\n corr_vals['lastname'] = corr_vals['name']\n del(corr_vals['name'])\n to_use = corr_vals\n return super(ResPartner, self).create(cursor, uid, to_use, context=context)\n\n _columns = {'name': fields.function(_compute_name_custom, string=\"Name\",\n type=\"char\", store=True,\n select=True, readonly=True,\n fnct_inv=_write_name),\n\n 'firstname': fields.char(\"Firstname\"),\n 'lastname': fields.char(\"Lastname\", required=True),\n }\n","sub_path":"partner_firstname/partner.py","file_name":"partner.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"493445513","text":"#!/usr/bin/python\nimport os\nimport string\nimport re\n\ndef visit(arg, dir, files):\n global f\n dir = string.replace(dir, '\\\\','/')\n if 'svn' in dir : return #skip svn folder\n if '.svn' in dir : return #skip svn folder\n if 'bak' in dir : return #skip bak folder\n if 'tmp' in dir : return #skip tmp folder\n\n headers = []\n sources = []\n\n for file in files:\n if file.endswith('.cpp'):\n sources.append(dir+'/'+file)\n if file.endswith('.c'):\n sources.append(dir+'/'+file)\n\n\n f.write('#####################\\n')\n f.write('# %s\\n\\n' % dir)\n f.write('include_directories( %s )\\n\\n' % dir)\n\n if sources:\n f.write('set( SOURCES ${SOURCES} ' +\n string.join(sources, ' \\n ') )\n f.write(')\\n\\n')\n\nf = file('source_file.txt', 'wt')\nos.path.walk('../source', visit, None)\nf.close()\n","sub_path":"cpp/template/project/source_file_list.py","file_name":"source_file_list.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"168445758","text":"num_list=list(input().split(\" \",1))\nset_list=[1]\nk=int(num_list[0])\nif num_list[1]:\n m=2\nelse:\n m=int(num_list[1])\ntime=1\nt=0\ntime_1=0\nfor i in range(k):\n set_list.append(set_list[i]*2+1)\n set_list.append(set_list[i]*4+5)\n time+=2\n if time>=2*k:\n break\nset_list.sort()\nneed_list=set_list[0:k]\noutput1=\"\"\noutput2=\"\"\nfor j in range(k):\n output1+=str(set_list[j])\nprint(output1)\nlist2=list(map(int,output1))\nlength=len(list2)\nwhile t>> \\03{lightpurple}%s\\03{default} <<<\" % page.aslink(True, True))\n old = page.get()\n new = rev['*']\n pywikibot.showDiff(old, new)\n page.put(new, comment)\n return comment\n\n def log(self, msg):\n pywikibot.output(msg)\n\nimport re\n\nclass myRevertBot(BaseRevertBot):\n\n def callback(self, item):\n if 'top' in item:\n page = pywikibot.Page(self.site, item['title'])\n text=page.get()\n pattern = re.compile('\\[\\[.+?:.+?\\..+?\\]\\]', re.UNICODE)\n return pattern.search(text) >= 0\n return False\n\ndef main():\n item = None\n for arg in pywikibot.handleArgs():\n continue\n bot = myRevertBot(site = pywikibot.Site())\n bot.revert_contribs()\n\nif __name__ == \"__main__\":\n try:\n main()\n finally:\n pywikibot.stopme()\n","sub_path":"revert.py","file_name":"revert.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"41987051","text":"'''preview.py'''\n\n# Included Libraries\nfrom usefulFunctions import getNewLength\n\n# Internal Libraries\nimport os\nfrom datetime import timedelta\n\ndef printTimeFrame(title: str, frames, fps: float):\n inSec = round(frames / fps, 1)\n fps = round(fps)\n if(inSec < 1):\n minutes = f'{int(frames)}/{fps} frames'\n else:\n minutes = timedelta(seconds=round(inSec))\n print(f'{title}: {inSec} secs ({minutes})')\n\n\ndef preview(myInput, chunks: list, speeds: list, fps: float, audioFile, log):\n if(not os.path.isfile(myInput)):\n log.error('preview.py: Could not find file ' + myInput)\n\n oldTime = chunks[len(chunks)-1][1]\n print('')\n printTimeFrame('Old length', oldTime, fps)\n\n newL = getNewLength(chunks, speeds, fps)\n printTimeFrame('New length', newL * fps, fps)\n print('')\n\n clips = 0\n cuts = 0\n cutL = []\n clipLengths = []\n for chunk in chunks:\n state = chunk[2]\n if(speeds[state] != 99999):\n clips += 1\n leng = (chunk[1] - chunk[0]) / speeds[state]\n clipLengths.append(leng)\n else:\n cuts += 1\n leng = chunk[1] - chunk[0]\n cutL.append(leng)\n\n print('Number of clips:', clips)\n printTimeFrame('Smallest clip length', min(clipLengths), fps)\n printTimeFrame('Largest clip length', max(clipLengths), fps)\n printTimeFrame('Average clip length', sum(clipLengths) / len(clipLengths), fps)\n print('\\nNumber of cuts:', cuts)\n\n if(cutL != []):\n printTimeFrame('Smallest cut length', min(cutL), fps)\n printTimeFrame('Largest cut length', max(cutL), fps)\n printTimeFrame('Average cut length', sum(cutL) / len(cutL), fps)\n print('')\n\n if(not audioFile):\n print('Video framerate:', fps)\n log.debug(f'Chunks:\\n{chunks}')\n","sub_path":"auto_editor/preview.py","file_name":"preview.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"134704335","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _, tools\nfrom datetime import datetime\nimport re\n\nDTFORMAT = tools.DEFAULT_SERVER_DATETIME_FORMAT\n\nclass SaleOrderLine(models.Model):\n \"\"\" Question Formulier id add \"\"\"\n\n _inherit = \"sale.order.line\"\n\n question_frm_id = fields.Many2one(related='order_id.question_frm_id', string='Project Formulier')\n\nclass SaleOrder(models.Model):\n \"\"\" Question Formulier Tab \"\"\"\n\n _inherit = \"sale.order\"\n\n soort = fields.Selection(string='Soort', selection=[('aanbouw', 'aanbouw'),\n ('hoek', 'hoek'), ('gevel', 'kopgevel'), ('groot deel', 'groot deel')],\n default='aanbouw')\n\n def _get_default_formulier(self):\n opportunity_id = self.env.context.get('default_opportunity_id') or False\n if opportunity_id:\n CrmLead = self.env['crm.lead'].browse(opportunity_id)\n return CrmLead.question_frm_id.id\n\n question_frm_id = fields.Many2one('question.formulier', default=_get_default_formulier, string='Project Formulier')\n\n @api.model\n def create(self,vals):\n res = super(SaleOrder, self).create(vals)\n if res.sale_order_template_id:\n res.fill_drawing_images()\n oppo = res.opportunity_id\n if oppo:\n res.user_id = oppo.user_id.id or self.env.user.id\n if oppo.question_frm_id:\n res.question_frm_id = oppo.question_frm_id.id\n res.question_frm_id.date_report = res.create_date.date()\n res.question_frm_id.state = 'task'\n if oppo.soort:\n res.soort = oppo.soort\n return res\n\n def write(self,vals):\n res = super(SaleOrder, self).write(vals)\n if vals.get('sale_order_template_id'):\n self.fill_drawing_images()\n if vals.get('opportunity_id'):\n oppo = self.opportunity_id\n if oppo:\n self.user_id = oppo.user_id.id or self.env.user.id\n if oppo.question_frm_id:\n self.question_frm_id = oppo.question_frm_id.id\n self.question_frm_id.state = 'task'\n if oppo.soort:\n self.soort = oppo.soort\n return res\n\n @api.model\n def fill_drawing_images(self):\n footer = \"\"\n description = \"\"\n imgDict = {}\n f_variables = \"\"\n if self.website_description:\n description = self.website_description.encode('utf-8')\n if self.website_desc_footer:\n footer = self.website_desc_footer.encode('utf-8')\n\n if description:# or footer:\n # custom object replacement for sale order fields value \n c_variables = re.findall(\n b'\\${custom:.*?}', description)\n if footer:\n c_variables.extend(re.findall(\n b'\\${custom:.*?}', footer))\n if c_variables:\n for custom in list(set(c_variables)):\n custom_object = custom.decode('utf-8')\n field = custom_object.split('}')[0][16:]\n if field in self._fields:\n value = self.read([field])\n if value[0].get(field):\n if field == 'amount_total':\n imgDict.update(\n {custom_object: str(\"{:.2f}\".format(value[0].get(field)))})\n elif type(value[0].get(field)) is tuple:\n imgDict.update(\n {custom_object: str(value[0].get(field)[1])})\n else:\n imgDict.update(\n {custom_object: str(value[0].get(field))})\n else:\n imgDict.update({custom_object: ''})\n\n if self.question_frm_id:\n # dynamic value for custom and project formulier object\n f_variables = re.findall(\n b'\\${formulier:.*?}', description)\n if footer:\n f_variables.extend(re.findall(\n b'\\${formulier:.*?}', footer))\n if f_variables:\n for custom in list(set(f_variables)):\n custom_object = custom.decode('utf-8')\n field = custom_object.split('}')[0][19:]\n if field in self.question_frm_id._fields:\n value = self.question_frm_id.read([field])\n if value[0].get(field):\n if type(value[0].get(field)) is tuple:\n imgDict.update(\n {custom_object: str(value[0].get(field)[1])})\n else:\n imgDict.update(\n {custom_object: str(value[0].get(field))})\n else:\n imgDict.update({custom_object: ''})\n if self.opportunity_id:\n # dynamic value for opportunity\n o_variables = re.findall(\n b'\\${opportunity:.*?}', description)\n if footer:\n o_variables.extend(re.findall(\n b'\\${opportunity:.*?}', footer))\n if o_variables:\n for oppo in list(set(o_variables)):\n oppo_object = oppo.decode('utf-8')\n field = oppo_object.split('}')[0][21:]\n if field == 'salutation':\n salutation = 'Geachte '\n if self.opportunity_id.title:\n salutation = salutation + self.opportunity_id.title.name\n else:\n salutation = salutation + 'heer/mevrouw'\n imgDict.update(\n {oppo_object: salutation})\n if field in self.opportunity_id._fields:\n value = self.opportunity_id.read([field])\n if value[0].get(field):\n if type(value[0].get(field)) is tuple:\n imgDict.update(\n {oppo_object: str(value[0].get(field)[1])})\n else:\n imgDict.update(\n {oppo_object: str(value[0].get(field))})\n else:\n imgDict.update({oppo_object: ''})\n\n # dynamic value of 3 field in quote template\n dutch_date = {'January': 'januari', 'February': 'februari', 'March': 'maart', 'May': 'mei', 'June': 'juni', 'July': 'juli', 'August': 'augustus', 'October': 'oktober', 'Monday': 'maandag', 'Tuesday': 'dinsdag', 'Wednesday': 'woensdag', 'Thursday': 'donderdag', 'Friday': 'vrijdag', 'Saturday': 'zaterdag', 'Sunday': 'zondag'}\n if f_variables:\n if self.question_frm_id.lead_id and self.question_frm_id.lead_id.soort:\n imgDict.update({'${formulier:object.soort}': str(\n self.question_frm_id.lead_id.soort)})\n else:\n imgDict.update({'${formulier:object.soort}': ''})\n if self.question_frm_id.lead_id and self.question_frm_id.lead_id.user_id:\n imgDict.update({'${formulier:object.salesman}': str(\n self.question_frm_id.lead_id.user_id.name)})\n else:\n imgDict.update({'${formulier:object.salesman}': ''})\n if self.question_frm_id.date_opportunity:\n date_string = self.question_frm_id.date_opportunity.strftime('%A %d %B')\n if self._context.get('lang') == 'nl_NL':\n for i, j in dutch_date.items():\n date_string = date_string.replace(i,j)\n imgDict.update({'${formulier:object.date_opportunity}': date_string})\n else:\n imgDict.update({'${formulier:object.date_opportunity}': ''})\n\n for key, val in imgDict.items():\n description = description.replace(\n key.encode('utf-8'), val.encode('utf-8'))\n if footer:\n for key, val in imgDict.items():\n footer = footer.replace(\n key.encode('utf-8'), val.encode('utf-8'))\n self.website_desc_footer = footer\n self.website_description = description\n return self.website_description\n\nclass OrderTemplate(models.Model):\n \"\"\" Sale Order Template \"\"\"\n\n _inherit = \"sale.order.template\"\n\n template_video_ids = fields.One2many('order.video', 'order_template_id', track_visibility='always', string='Video')\n\nclass OrderTemplateVideo(models.Model):\n \"\"\" new model for add videos in sale order template\"\"\"\n\n _inherit = 'order.video'\n\n order_template_id = fields.Many2one('sale.order.template', 'Related Template', copy=True, readonly=True)\n","sub_path":"quotation_images_feedback/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":9311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"646996506","text":"import pygame\nimport json, math\n\n# -- Global Constants\n# -- Colours\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nBLUE = (50,50,255)\nYELLOW = (255,255,0)\nBROWN = (100,100,0)\nRED = (255,0,0)\n\ngame_over = False\n\npygame.init()\n\nsize = (500,500)\nscreen = pygame.display.set_mode(size)\n\npygame.display.set_caption(\"Djikstras Algorithm\")\n\nclock = pygame.time.Clock()\n\nfile = open(\"Pygame/Shortest paths/map.JSON\",\"r\")\ntheMazeArray = json.load(file)\nfile.close()\n\nfile = open(\"Pygame/Shortest paths/graph.JSON\", \"r\")\ngraph = json.load(file)\nfile.close()\n\ngraph = graph\n\n\ndef getNodes(Maze):\n Node_List = []\n for y in range(len(Maze)):\n for x in range(len(Maze[y])):\n if Maze[y][x] == 0:\n neighbours = [0,0,0,0] #left,right,above,below\n if y > 0 and y < len(Maze)-1:\n if Maze[y-1][x]== 0: neighbours[2] = 1\n if Maze[y+1][x]== 0: neighbours[3] = 1\n if x > 0 and x < len(Maze[y])-1:\n if Maze[y][x-1]== 0: neighbours[0] = 1\n if Maze[y][x+1] == 0: neighbours[1] = 1\n if neighbours.count(1) > 2:\n Node_List.append([x,y])\n elif neighbours[0] ^ neighbours[1] and neighbours[2] ^ neighbours[3]:\n Node_List.append([x,y])\n return Node_List\n\ndef getConnections(Maze,Nodes):\n Adjacency_Vector = [[] for i in range(len(Nodes))] #Adjacency_Vector is a list of lists of coordinates and weights\n for i in range(len(Nodes)):\n x = Nodes[i][0]\n y = Nodes[i][1]\n if Maze[y-1][x]== 0:\n Found = False\n count = 0\n while Maze[y][x] != 1 and Found == False:\n y -= 1\n count += 1\n if [x,y] in Nodes:\n Adjacency_Vector[i].append([(Nodes.index([x,y])),count])\n Found = True\n y = Nodes[i][1]\n x = Nodes[i][0]\n if Maze[y+1][x]== 0:\n Found = False\n count = 0\n while Maze[y][x] != 1 and Found == False:\n y += 1\n count += 1\n if [x,y] in Nodes:\n Adjacency_Vector[i].append([(Nodes.index([x,y])),count])\n Found = True\n y = Nodes[i][1]\n x = Nodes[i][0]\n if Maze[y][x-1]== 0:\n Found = False\n count = 0\n while Maze[y][x] != 1 and Found == False:\n x -= 1\n count += 1\n if [x,y] in Nodes:\n Adjacency_Vector[i].append([(Nodes.index([x,y])),count])\n Found = True\n x = Nodes[i][0]\n y = Nodes[i][1]\n if Maze[y][x+1] == 0:\n Found = False\n count = 0\n while Maze[y][x] != 1 and Found == False:\n x += 1\n count += 1\n if [x,y] in Nodes:\n Adjacency_Vector[i].append([(Nodes.index([x,y])),count])\n Found = True\n x = Nodes[i][0]\n y = Nodes[i][1]\n Connection_Dict = {i:{} for i in range(len(Nodes))}\n for i in range(len(Adjacency_Vector)):\n for j in Adjacency_Vector[i]:\n Connection_Dict[i][j[0]] = j[1]\n return Connection_Dict #list for each node containing lists of index of connecting node and distance\n\ndef Dijkstra(graph,start_node,end_node):\n shortest_distance = {}\n Path = []\n previous = {}\n unseen_nodes = graph\n for node in unseen_nodes:\n shortest_distance[node] = math.inf\n shortest_distance[start_node] = 0\n while unseen_nodes:\n min_node = None\n for node in unseen_nodes:\n if min_node is None:\n min_node = node\n elif shortest_distance[node] < shortest_distance[min_node]:\n min_node = node\n for Edge, Weight in graph[min_node].items():\n if Weight + shortest_distance[min_node] < shortest_distance[Edge]:\n shortest_distance[Edge] = Weight + shortest_distance[min_node]\n previous[Edge] = min_node\n unseen_nodes.pop(min_node)\n current_node = end_node\n while current_node != start_node:\n Path.insert(0,current_node)\n current_node = previous[current_node] \n Path.insert(0,start_node)\n if shortest_distance[end_node] != math.inf:\n return Path\n\nclass Game(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n self.wall_group = pygame.sprite.Group()\n self.all_sprites_group = pygame.sprite.Group()\n self.player_group = pygame.sprite.Group()\n\n self.player = Player(30,30,8,8)\n self.player_group.add(self.player)\n self.all_sprites_group.add(self.player)\n \n for i in range (len(theMazeArray)):\n for j in range (len(theMazeArray[i])):\n if theMazeArray[i][j] == 1:\n self.newwall = Wall(j*10,i*10)\n self.wall_group.add(self.newwall)\n self.all_sprites_group.add(self.newwall)\n #end if\n #next j\n #next i\n #end procedure\n \n def update(self):\n self.all_sprites_group.update()\n self.all_sprites_group.draw(screen)\n\n #player movement\n keys = pygame.key.get_pressed()\n if keys[pygame.K_UP]:\n self.player.move_up()\n elif keys[pygame.K_DOWN]:\n self.player.move_down()\n elif keys[pygame.K_RIGHT]:\n self.player.move_right()\n elif keys[pygame.K_LEFT]:\n self.player.move_left()\n #end if\n\n self.player_hit_wall_group = pygame.sprite.spritecollide(self.player, self.wall_group, False)\n if len(self.player_hit_wall_group) > 0:\n self.player.set_speed(0, 0)\n self.player.rect.x = self.player_old_x\n self.player.rect.y = self.player_old_y\n #end if\n\n self.player_old_x = self.player.rect.x\n self.player_old_y = self.player.rect.y\n #end procedure\n#end class\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, x_coord, y_coord, width, height):\n super().__init__()\n self.width = width\n self.height = height\n self.image = pygame.Surface([self.width, self.height])\n self.image.fill(RED)\n self.rect = self.image.get_rect()\n self.rect.x = x_coord\n self.rect.y = y_coord\n self.speed = 2\n #end procedure\n\n def move_up(self):\n self.rect.y -= self.speed\n #end procedure\n\n def move_down(self):\n self.rect.y += self.speed\n #end procedure\n \n def move_right(self):\n self.rect.x += self.speed\n #end procedure\n\n def move_left(self):\n self.rect.x -= self.speed\n #end procedure\n\n def set_speed(self, x_val, y_val):\n self.rect.x += x_val\n self.rect.x += y_val\n #end procedure\n#end class\n\n \nclass Wall(pygame.sprite.Sprite):\n def __init__(self, x, y):\n super().__init__()\n width = 10\n height = 10\n self.image = pygame.Surface([width, height])\n self.image.fill(BLUE)\n self.rect = self.image.get_rect()\n self.rect.y = y\n self.rect.x = x\n #end procedure\n#end class\n\ngame = Game()\nwhile not game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True \n #end if\n #next event\n nodes = getNodes(theMazeArray)\n connections = getConnections(theMazeArray, nodes)\n screen.fill(BLACK)\n \n game_over = game.update()\n\n pygame.display.flip()\n clock.tick(60)\n#end while\npygame.quit()\n\n","sub_path":"Pygame/Shortest paths/Djikstras.py","file_name":"Djikstras.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"519502850","text":"import csv\nimport glob\nimport os\n\nfrom django.db import transaction\n\nfrom .models import (\n Chapter,\n ChemicalSubstance,\n Paragraph,\n Presentation,\n Product,\n Section,\n Subparagraph,\n)\n\n\ndef import_data(release_dir):\n paths = glob.glob(os.path.join(release_dir, \"*.csv\"))\n assert len(paths) == 1\n path = paths[0]\n\n chapters = set()\n sections = set()\n paragraphs = set()\n subparagraphs = set()\n chemical_substances = set()\n products = set()\n presentations = set()\n\n with open(path) as f:\n for r in csv.DictReader(f):\n chapters.add((r[\"BNF Chapter Code\"], r[\"BNF Chapter\"]))\n sections.add((r[\"BNF Section Code\"], r[\"BNF Section\"]))\n paragraphs.add((r[\"BNF Paragraph Code\"], r[\"BNF Paragraph\"]))\n subparagraphs.add((r[\"BNF Subparagraph Code\"], r[\"BNF Subparagraph\"]))\n chemical_substances.add(\n (r[\"BNF Chemical Substance Code\"], r[\"BNF Chemical Substance\"])\n )\n products.add((r[\"BNF Product Code\"], r[\"BNF Product\"]))\n presentations.add((r[\"BNF Presentation Code\"], r[\"BNF Presentation\"]))\n\n with transaction.atomic():\n Chapter.objects.all().delete()\n Chapter.objects.bulk_create(\n Chapter(code, name) for code, name in sorted(chapters)\n )\n\n Section.objects.all().delete()\n Section.objects.bulk_create(\n Section(code, name) for code, name in sorted(sections)\n )\n\n Paragraph.objects.all().delete()\n Paragraph.objects.bulk_create(\n Paragraph(code, name)\n for code, name in sorted(paragraphs)\n if \"DUMMY\" not in name\n )\n\n Subparagraph.objects.all().delete()\n Subparagraph.objects.bulk_create(\n Subparagraph(code, name)\n for code, name in sorted(subparagraphs)\n if \"DUMMY\" not in name\n )\n\n ChemicalSubstance.objects.all().delete()\n ChemicalSubstance.objects.bulk_create(\n ChemicalSubstance(code, name)\n for code, name in sorted(chemical_substances)\n if \"DUMMY\" not in name\n )\n\n Product.objects.all().delete()\n Product.objects.bulk_create(\n Product(code, name)\n for code, name in sorted(products)\n if \"DUMMY\" not in name\n )\n\n Presentation.objects.all().delete()\n Presentation.objects.bulk_create(\n Presentation(code, name)\n for code, name in sorted(presentations)\n if \"DUMMY\" not in name\n )\n","sub_path":"coding_systems/bnf/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"291560429","text":"#!/usr/local/bin/python3\n\nimport threading\nimport queue\nimport sys\n\nimport sqlite3\n\nimport reader\nimport db\n\n\ndef main():\n \"\"\"\n Reads a tag and opens a door if the UID received exists in a database.\n \"\"\"\n dbo = db.Database(\"/var/www/data/database.db\")\n print(dbo)\n try:\n dbo.con()\n except sqlite3.Error as e:\n print(e)\n sys.exit(0)\n else:\n # Creates a Reader object\n rdr = reader.Reader()\n rdr.init()\n\n # Creates an event in order to blink the RGB LED.\n evt = threading.Event()\n evt.clear()\n\n # Creates a queue in order to receive the LED modes.\n q = queue.Queue()\n\n # Creates a thread in order to obtain asynchronous behaviour.\n t = threading.Thread(target=reader.led, args=(q, evt,))\n evt.set()\n t.start()\n\n # READ mode\n q.put(0)\n\n run = True\n while run:\n try:\n uid = rdr.read()\n if uid is not None:\n try:\n name = dbo.chck(uid) # Checks UID into the database.\n except ValueError:\n q.put(2) # BAN mode.\n rdr.ban()\n q.put(0)\n else:\n q.put(1) # OPEN mode.\n rdr.open()\n try:\n dbo.add_r(name, \"Principal\") # Appends UID to the register.\n except sqlite3.Error:\n run = False\n else:\n q.put(0) # READ mode.\n except KeyboardInterrupt:\n run = False\n\n # Termination of different tasks properly.\n try:\n dbo.dis()\n except sqlite3.Error:\n print(\"ERROR -> Could not close the database properly.\")\n finally:\n evt.clear()\n t.join()\n rdr.init()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/web/nfc.py","file_name":"nfc.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"538575487","text":"from __future__ import print_function\r\n\r\nimport numpy\r\nfrom imutils.object_detection import non_max_suppression\r\nfrom imutils import paths\r\nimport numpy as np\r\nimport argparse\r\nimport imutils\r\nimport cv2\r\nimport random\r\nfrom time import sleep\r\n\r\n\r\n# fgbg = cv2.createBackgroundSubtractorMOG2()\r\n# frame = fgbg.apply(frame)\r\ndef sameRect(rect1, rect2):\r\n # if(rect1 == None or rect2 == None):\r\n # return False\r\n # return False\r\n if (rect1[0] > rect2[2] or rect2[0] > rect1[2]):\r\n print(\"if 1: return false\")\r\n return False\r\n elif (rect1[3] < rect2[1] or rect2[3] < rect1[1]):\r\n print(\"if 2: return false\")\r\n return False\r\n else:\r\n print(\"return true\")\r\n return True\r\n pass\r\n\r\n\r\nrandom.seed(a=None, version=2)\r\nrearm_clock = 20\r\ncritic_frames_array = []\r\ncritic_data_array = []\r\n# cap = cv2.VideoCapture(r'C:\\Users\\Riccardo\\Downloads\\PETS09-S2L1.mp4')\r\n# cap = cv2.VideoCapture(r'C:\\Users\\Riccardo\\Downloads\\192.168.101.19_ch29_20190704053054_20190704080049.mp4')\r\n# cap = cv2.VideoCapture(r'C:\\Users\\Riccardo\\Downloads\\0-40_1-05.mp4')\r\n# cap=cv2.VideoCapture(r'C:\\Users\\Riccardo\\Downloads\\0-40_1-05__3_13.mp4')\r\ncap = cv2.VideoCapture(r'C:\\Users\\Riccardo\\Downloads\\0-40_1-05__11_13__0-25.mp4')\r\nfor i in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))):\r\n if (i % rearm_clock == 0):\r\n print(\"reading at frame\", i)\r\n cap.set(1, i - 1)\r\n res, frame = cap.read()\r\n frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\r\n critic_frames_array.append(frame)\r\ncap.release()\r\nhog = cv2.HOGDescriptor()\r\nhog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\r\nfor j in range(len(critic_frames_array)):\r\n print(\"analyzing critic frame\", j)\r\n current_frame = critic_frames_array[j]\r\n # (rects, weights) = hog.detectMultiScale(current_frame, winStride=(4, 4), padding=(8, 8), scale=1.1)\r\n (rects, weights) = hog.detectMultiScale(current_frame, winStride=(4, 4), scale=1.3)\r\n rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\r\n critic_data_array.append(rects)\r\n\r\ntracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']\r\ntracker_type = tracker_types[2] # KCF (2) good out, no resize MEDIANFLOW (4) pretty good resize, bad out TLD (3) bad\r\n# speed, bad tracking, pretty good resize\r\n\r\n# video = cv2.VideoCapture(r'C:\\Users\\Riccardo\\Downloads\\PETS09-S2L1.mp4')\r\n# video = cv2.VideoCapture(r'C:\\Users\\Riccardo\\Downloads\\0-40_1-05.mp4')\r\n# video = cv2.VideoCapture(r'C:\\Users\\Riccardo\\Downloads\\0-40_1-05__3_13.mp4')\r\nvideo = cv2.VideoCapture(r'C:\\Users\\Riccardo\\Downloads\\0-40_1-05__11_13__0-25.mp4')\r\nok, frame = video.read()\r\nframe = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\r\ncv2.imshow(\"frame1\", frame)\r\ncv2.waitKey(0)\r\ncv2.destroyWindow(\"frame1\")\r\nii = 0\r\ncritic_counter = 0\r\ntrackers = []\r\ncolors = []\r\nbboxes = []\r\nwhile video.isOpened():\r\n print(ii)\r\n frame_after = frame.copy()\r\n if (ii % rearm_clock == 0):\r\n print(\"multiplo\")\r\n print(\"critic counter \" + str(critic_counter))\r\n if not ok:\r\n break\r\n # trackers = []\r\n\r\n rects = critic_data_array[critic_counter]\r\n # detect people in the image\r\n prev_length = len(rects)\r\n pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)\r\n if not (len(rects) == prev_length):\r\n print(\"non maxima suppression\")\r\n # draw the final bounding boxes\r\n bboxes = [[x, y, x + w, y + h] for (x, y, w, h) in bboxes]\r\n print(\"pick\", pick)\r\n for i in range(len(pick)):\r\n print(\"i=\", i)\r\n found = False\r\n for j in range(len(bboxes)):\r\n print(\"j=\", j)\r\n print(\"before if bboxes\", bboxes)\r\n if (sameRect(pick[i], bboxes[j])):\r\n found = True\r\n # bboxes[j]=pick[i]\r\n bboxes.remove(bboxes[j])\r\n bboxes.insert(j, pick[i])\r\n print(\"bboxes\", bboxes)\r\n break\r\n if not found:\r\n bboxes.append(pick[i])\r\n color = (random.randrange(255), random.randrange(255), random.randrange(255))\r\n colors.append(color)\r\n print(\"after append bboxes\", bboxes)\r\n\r\n trackers = []\r\n for (xA, yA, xB, yB) in bboxes:\r\n # cv2.rectangle(frame_after, (xA, yA), (xB, yB), (0, 255, 0), 2)\r\n # print(xA, yA, xB, yB)\r\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n if tracker_type == 'BOOSTING':\r\n trackers.append(cv2.TrackerBoosting_create())\r\n trackers[-1].init(gray_frame, (xA, yA, xB - xA, yB - yA))\r\n if tracker_type == 'MIL':\r\n trackers.append(cv2.TrackerMIL_create())\r\n trackers[-1].init(gray_frame, (xA, yA, xB - xA, yB - yA))\r\n if tracker_type == 'KCF':\r\n trackers.append(cv2.TrackerKCF_create())\r\n trackers[-1].init(gray_frame, (xA, yA, xB - xA, yB - yA))\r\n if tracker_type == 'TLD':\r\n trackers.append(cv2.TrackerTLD_create())\r\n trackers[-1].init(gray_frame, (xA, yA, xB - xA, yB - yA))\r\n if tracker_type == 'MEDIANFLOW':\r\n trackers.append(cv2.TrackerMedianFlow_create())\r\n trackers[-1].init(gray_frame, (xA, yA, xB - xA, yB - yA))\r\n if tracker_type == 'GOTURN':\r\n trackers.append(cv2.TrackerGOTURN_create())\r\n trackers[-1].init(gray_frame, (xA, yA, xB - xA, yB - yA))\r\n if tracker_type == 'MOSSE':\r\n trackers.append(cv2.TrackerMOSSE_create())\r\n trackers[-1].init(gray_frame, (xA, yA, xB - xA, yB - yA))\r\n if tracker_type == \"CSRT\":\r\n trackers.append(cv2.TrackerCSRT_create())\r\n trackers[-1].init(gray_frame, (xA, yA, xB - xA, yB - yA))\r\n rects = non_max_suppression(rects, probs=None, overlapThresh=0.65) # // A COSA SERVE?!?\r\n correction_factor = 0\r\n for i in range(len(trackers)):\r\n ok, bbox = trackers[i - correction_factor].update(frame)\r\n # print(ok, bbox)\r\n # Draw bounding box\r\n if ok:\r\n # Tracking success\r\n p1 = (int(bbox[0]), int(bbox[1]))\r\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\r\n # cv2.rectangle(frame_after, p1, p2, (255, 0, 0), 2, 1)\r\n cv2.rectangle(frame_after, p1, p2, colors[i - correction_factor], 2, 1)\r\n else:\r\n trackers.remove(trackers[i - correction_factor])\r\n colors.remove(colors[i - correction_factor])\r\n correction_factor += 1\r\n # Tracking failure\r\n # cv2.putText(frame_after, \"Tracking failure detected\", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,\r\n # (0, 0, 255), 2)\r\n # pass\r\n critic_counter = critic_counter + 1\r\n else:\r\n print(\"non multiplo\")\r\n correction_factor = 0\r\n bboxes = []\r\n for i in range(len(trackers)):\r\n ok, bbox = trackers[i - correction_factor].update(frame)\r\n # print(ok, bbox)\r\n # Draw bounding box\r\n if ok:\r\n # Tracking success\r\n bboxes.append(bbox)\r\n p1 = (int(bbox[0]), int(bbox[1]))\r\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\r\n # cv2.rectangle(frame_after, p1, p2, (255, 0, 0), 2, 1)\r\n cv2.rectangle(frame_after, p1, p2, colors[i - correction_factor], 2, 1)\r\n else:\r\n trackers.remove(trackers[i - correction_factor])\r\n colors.remove(colors[i - correction_factor])\r\n correction_factor += 1\r\n # Tracking failure\r\n # cv2.putText(frame_after, \"Tracking failure detected\", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,\r\n # (0, 0, 255), 2)\r\n # pass\r\n cv2.imshow(\"pedestrian tracking\", frame_after)\r\n ok, frame = video.read()\r\n if ok:\r\n pass\r\n frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\r\n print(ok)\r\n if not ok:\r\n break\r\n cv2.waitKey(20) # refresh delay\r\n ii = ii + 1\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"tracking4.py","file_name":"tracking4.py","file_ext":"py","file_size_in_byte":8443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"40521899","text":"from parser import Parser\nfrom nfa import Nfa, State\n\n\nclass Regex:\n def __init__(self, regex, verbose=False):\n if verbose:\n print('Regex:', regex)\n postfix = Parser.parse_regex(regex, verbose)\n if verbose:\n print('Regex in reversed polish notation:', postfix)\n self.nfa = Nfa.from_postfix(postfix)\n\n def match(self, text: str):\n current_states = self.nfa.start.walk_eps()\n for char in text:\n next_states = []\n for state in current_states:\n if char in state.transitions:\n next_states.extend(state.transitions[char].walk_eps())\n if Parser.MATCH_ALL_OP in state.transitions:\n next_states.extend(state.transitions[Parser.MATCH_ALL_OP].walk_eps())\n current_states = next_states\n\n for state in current_states:\n if state.final:\n return True\n return False\n","sub_path":"lab7/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"633590546","text":"import sys\nimport string\n\nfilename = sys.argv[1] \n\nfor ascii in (string.ascii_lowercase):\n count=0\n for line in open(filename,\"r\"):\n for chr in line:\n if chr == ascii:\n count = count + 1\n if count != 0:\n\t print(\"Der Buchstabe \",ascii,\"kommt\",count,\"mal vor!\")\n","sub_path":"Schulung/Count_Characters.py","file_name":"Count_Characters.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356008265","text":"'''\n2018-4-4\n去除原数据所提供的txt文本中无关紧要的部分 ‘###’\n\n'''\n\nimport tensorflow as tf\n\nfiles=tf.train.match_filenames_once('E:/ICPR_text_train_part2_20180313/txt_10000/T*')\nfilename_queue=tf.train.string_input_producer(files)\nreader=tf.TextLineReader()\nkey,value=reader.read(filename_queue)\n\nwith tf.Session() as sess:\n # Start populating the filename queue.\n tf.global_variable_initilizer().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n\n\n coord.request_stop()\n coord.join(threads)","sub_path":"filefilter.py","file_name":"filefilter.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"465617011","text":"from common.context import Feature\nfrom services import space_settings\nfrom common import utils\nimport os\n\n\nclass ExploreFiles(Feature):\n def __init__(self):\n super(ExploreFiles, self).__init__(ExploreFiles)\n\n def execute(self, parent_path=None):\n if os.path.exists(parent_path) is False:\n return None\n if os.path.isfile(parent_path) is True:\n return []\n else:\n child_list = os.listdir(parent_path)\n answer = []\n for child in child_list:\n child_object = utils.Object()\n child_object.path = os.path.join(parent_path, child)\n child_object.is_file = os.path.isfile(child_object.path)\n child_object.size = os.path.getsize(child_object.path)\n answer.append(child_object)\n return answer\n\n\nclass GetAvailablePlaces(Feature):\n\n def __init__(self):\n super(GetAvailablePlaces, self).__init__(GetAvailablePlaces)\n\n def execute(self, args=None):\n sm = self.service(space_settings.SettingManager)\n assert isinstance(sm, space_settings.SettingManager)\n places = sm.specified_places()\n answer_list = []\n for place in places:\n place_result = utils.Object()\n place_result.name = place.id\n place_result.folder = place.folder\n answer_list.append(place_result)\n return answer_list","sub_path":"server/uc/user_cases_fs.py","file_name":"user_cases_fs.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"4184994","text":"import utils.tools as tools\nimport Shuju_ruku as shuju\nimport pandas as pd\n\n\ndef jisuan_huolipanbili(date, n):\n datas_now = shuju.get_huolipanbili(date)\n data_result = pd.DataFrame()\n times = 0\n for index, row in datas_now.iterrows():\n for day in tools.getDateIterator(n, date):\n\n try:\n datas = shuju.get_huolipanbili_with_code(day, row['ts_code'])\n datas.set_index(['ts_code'], inplace = True, drop=False)\n if float(datas['profit'][0]) <= 0.5:\n times = 0\n break\n times += 1\n if n == times: \n data_result = data_result.append(row, ignore_index=False)\n print('data_result: ', data_result)\n except (TypeError, KeyError) as e:\n times = 0\n break\n data_result = data_result[['ts_code', 'amount', 'average', 'float_share', 'turnover_rate', 'vol', 'profit']] \n print('data_result: ', data_result)\n data_result.to_excel('Huolipanbili.xlsx')\n \n \n\njisuan_huolipanbili('20191024', 1)\n# jisuan_choumafenbu('20190801', '20191023')\n","sub_path":"Huolipanbili.py","file_name":"Huolipanbili.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"581014310","text":"from restfulie.parser import Parser\nfrom restfulie.processor import ExecuteRequestProcessor\n\nfrom mockito import *\n\ndef test_parser_follow():\n\n request = mock()\n processor = mock()\n resource = mock()\n\n parser = Parser([processor])\n when(processor).execute(parser, request, {}).thenReturn(resource)\n\n assert parser.follow(request, {}) == resource\n","sub_path":"test/parser_test.py","file_name":"parser_test.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"586708873","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0004_auto_20160424_2108'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='article',\n name='subcolumn',\n field=models.ForeignKey(default=1, verbose_name='\\u5f52\\u5c5e\\u5b50\\u680f\\u76ee', to='news.SubColumn'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='article',\n name='column',\n field=models.ForeignKey(verbose_name='\\u5f52\\u5c5e\\u680f\\u76ee', to='news.Column'),\n ),\n ]\n","sub_path":"news/migrations/0005_auto_20160425_1922.py","file_name":"0005_auto_20160425_1922.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"566838760","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 29 10:30:44 2019\n\n@author: ckielasjensen\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport pickle\nfrom sklearn import preprocessing, model_selection\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation\n\nDATA_DIR = 'data'\nDATA_FNAME = '2d_5deg_5veh_2obs_euclidean'\n\nHIDDEN_SIZE = 1000\nEPOCHS = 5\nOPTIMIZER = 'rmsprop'\nLOSS = 'mean_squared_error'\nMODEL_NAME = f'{OPTIMIZER}_{LOSS}_{EPOCHS}epochs_{str(HIDDEN_SIZE)}'\n\nX_HEADERS = ['x0_i',\n 'y0_i',\n 'x1_i',\n 'y1_i',\n 'x2_i',\n 'y2_i',\n 'x3_i',\n 'y3_i',\n 'x4_i',\n 'y4_i',\n 'x0_f',\n 'y0_f',\n 'x1_f',\n 'y1_f',\n 'x2_f',\n 'y2_f',\n 'x3_f',\n 'y3_f',\n 'x4_f',\n 'y4_f',\n 'obs0x',\n 'obs0y',\n 'obs1x',\n 'obs1y']\n\n\n\n\ndef import_data(directory='.', data_fname='2d_5deg_5veh_2obs_euclidean'):\n # Import the data\n fnames = [i for i in os.listdir(directory) if data_fname in i]\n print('[+] Importing Data...')\n temp = []\n for name in fnames:\n print(' [-] importing ' + name)\n df = pd.read_csv(os.path.join(directory, name))\n temp.append(df)\n\n data = pd.concat(temp, ignore_index=True)\n print('[+] Imported Data:')\n print(data.head())\n\n # Properly organize the data\n X = data[X_HEADERS]\n y = data.drop(X_HEADERS, axis=1)\n print('[+] X Data:')\n print(X.head())\n print('[+] Y Data:')\n print(y.head())\n\n X = np.array(X)\n y = np.array(y)\n\n return model_selection.train_test_split(X, y, test_size=0.2)\n\n\ndef build_model():\n model = Sequential()\n model.add(Dense(HIDDEN_SIZE, input_dim=24))\n model.add(Activation('relu'))\n model.add(Dense(40, input_dim=HIDDEN_SIZE))\n \n model.compile(optimizer=OPTIMIZER,\n loss=LOSS,\n metrics=['accuracy'])\n \n return model\n\n\ndef save_model(model):\n # Increment the number of the file name and save the regressor as a pickle\n print('[+] Saving regressor object...')\n i = 0\n while os.path.exists(MODEL_NAME + '_' + str(i) + '.pickle'):\n i += 1\n \n model.save(MODEL_NAME + '_' + str(i) + '.model')\n\nif __name__ == '__main__':\n # Import data\n# var_names = ['X_train', 'y_train', 'X_test', 'y_test']\n if 'X_train' not in locals():\n X_train, X_test, y_train, y_test = import_data(DATA_DIR, DATA_FNAME)\n X_train = tf.keras.utils.normalize(X_train, axis=1)\n X_test = tf.keras.utils.normalize(X_test, axis=1)\n# # Create the model\n# model = build_model()\n# # Train the model\n# model.fit(X_train, y_train, epochs=EPOCHS)\n# \n# val_loss, val_acc = model.evaluate(X_test, y_test)\n# print(val_loss, val_acc)\n# \n# save_model(model)\n\n print('[!] Done')","sub_path":"BuildDNNRegressor.py","file_name":"BuildDNNRegressor.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"619487691","text":"import os \r\nimport json \r\nimport pymongo\r\nimport logging\r\n\r\ndef connectToMongo():\r\n #get the VCAP_SERVICES env. variable\r\n logging.info(\"Connecting to Mongo\")\r\n services = os.getenv('VCAP_SERVICES') \r\n #convert to json:\r\n services_json = json.loads(services) \r\n #get the mongoDB url:\r\n mongodb_url = services_json['mongodb-2.4'][0]['credentials']['url']\r\n #connect:\r\n client = pymongo.MongoClient(mongodb_url) \r\n #get the default database:\r\n db = client.get_default_database() \r\n logging.info ('connected to mongodb!') \r\n return db\r\n\r\ndef connectToLocalMongo():\r\n client = pymongo.MongoClient()\r\n db = client[\"otc\"]\r\n return db","sub_path":"mongoHelpers.py","file_name":"mongoHelpers.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"393979857","text":"\"\"\"Methods to cache remote files.\"\"\"\nimport hashlib\nimport logging\nimport os\n\nimport requests\n\nfrom timer import timed\n\nlogger = logging.getLogger(__name__)\n\nCACHE_DIRECTORY = '/cache/'\n\n\n@timed\ndef cache_file(url, prefix):\n \"\"\"Locally cache files fetched from a url.\"\"\"\n cache_filepath = _get_cached_filepath(\n prefix=prefix,\n url=url,\n )\n # If the file exists, return path.\n if os.path.isfile(cache_filepath):\n logger.info('Returning cached file for {}.'.format(url))\n return cache_filepath\n # If the file does not exist, download and return path.\n else:\n r = requests.get(url, verify=False)\n\n with open(cache_filepath, 'wb') as f:\n f.write(r.content)\n\n logger.info('Caching file for {}.'.format(url))\n return cache_filepath\n\n\ndef _get_cached_filepath(prefix, url):\n \"\"\"Return the filepath where a cached response would live for the given inputs.\"\"\"\n filename = '{prefix}_{hash_string}.cache'.format(\n prefix=prefix,\n hash_string=_hash_value(url),\n )\n logger.debug('Cached filepath: ' + os.path.join(CACHE_DIRECTORY, filename))\n return os.path.join(CACHE_DIRECTORY, filename)\n\n\ndef _hash_value(value):\n \"\"\"Hash value to help identify what cached file to use.\"\"\"\n return hashlib.md5(value.encode('utf-8')).hexdigest()[:9]\n","sub_path":"api-flask/app/caching.py","file_name":"caching.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"614484803","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\n\n#================================#\n# 新华网视频 youtube-dl 下载扩展 #\n#================================#\n\n\"\"\"\n#\n# 使用前须知:\n# 该脚本需要预装 youtube-dl 下载器,推荐用 pip3 install youtube-dl 安装\n# 1.使用该脚本时请将该脚本放入脚本库 youtube_dl/extractor 文件夹下\n# 2.请找到 youtube_dl/extractor 文件夹下的 extractors.py 文件\n# 在最后添加一行内容 from .xinhuanet import XinHuaNetIE,XinHuaNet2IE\n# *另外请使用 python3,避免出现一些比较低级的麻烦。\n#\n# 解析网页说明:\n# 该脚本解析 http://www.xinhuanet.com 的两种视频页\n# 第一种:http://www.xinhuanet.com/video/2018-08/28/c_129941452.htm(新闻页)\n# 第二种:http://vod.xinhuanet.com/v/vod.html?vid=536626(通用地址)\n# 其中第二种为通用视频地址,第一种页面内嵌套第二种地址\n#\n# *不重要的说明:\n# 工具主要用于新华网视频下载,主要是视频下载,所以,就对一般爬虫而言,\n# 即便出现第一种网页也会在第一种网页内搜集到第二种网页地址作为该网页的视频地址进行统一,也方便去重\n# 所以这里即便有对第一种网页实现下载功能,这也只是一种并不是很有必要的扩展,不过说不定以后别人什么时候也能用上\n# 毕竟新闻的比重很多,而且解析也比第二种要方便\n#\n# 指定存放文件地址方法:\n# youtube-dl 如果想要指定文件存放地址的话,请使用它的命令行语法:%()s\n# eg. > youtube-dl -o \"./video/%(id)s.%(ext)s\" http://vod.xinhuanet.com/v/vod.html?vid=536626\n# 例如上面这行命令会将视频下载到当前路径的video目录下面并且用 532276.mp4 文件名存放\n# 目前我知道的 %()s 语法内必然存在的有 id title ext。 id title是在函数内算出的,详细看函数即可,\n# 文件扩展名(ext)是工具自动算出来的\n#\n\"\"\"\n\n\n\nclass XinHuaNetIE(InfoExtractor):\n IE_NAME = 'xinhuanet'\n IE_DESC = 'xinhuanet video downloader.'\n _VALID_URL = 'http://vod\\.xinhuanet\\.com/v/vod\\.html\\?vid=\\d+'\n\n #==================#\n # 通用地址视频下载 #\n #==================#\n # eg. http://vod.xinhuanet.com/v/vod.html?vid=536626\n def _real_extract(self, url):\n def get_real_url_by_simple_url(self, url):\n def _mk_gcid_url(url):\n u = 'http://vod.xinhuanet.com/vod_video_js/%d/%d.js'\n v = int(url.rsplit('=')[1])\n uid = str(v)\n v = u % (int(v/1000),v) \n # 混肴后的js源代码里面的函数方法:\n # 源代码:\"http://vod.xinhuanet.com/vod_video_js/\"+Math.floor(parseInt(a)/1E3)+\"/\"+a+\".js\";\n # 源代码里面的 a 代表了该视频页的数字vid。 通过这个地址进一步获取 gcid 信息后续再继续处理。\n return v,uid\n\n gurl, uid = _mk_gcid_url(url)\n page = self._download_webpage(gurl, uid)\n gcid = re.findall('http://pubnet.xinhuanet.net:8080/40/([^/]{40})',page)\n\n if gcid: gcid = gcid[0].upper()\n else: return 'gcid failed,'+gurl,None\n\n # 通过 gcid 拼接下面的地址可以获取到一些类似js脚本的HTML文本,里面有真实的视频url地址信息\n url = 'http://p2s.xinhuanet.com/getCdnresource_flv?gcid=' + gcid\n page = self._download_webpage(url, uid)\n vurl = re.findall('{ip:\"([^\"]+)\",port:(\\d+),path:\"([^\"]+)\"}',page)\n\n if vurl: vurl = vurl[0]\n else: return 'get realurl failed,'+url,None\n\n def _get_real_url(vurl):\n # 对通过 gcid 获取的文本找到的信息拼接视频真实 url.\n # vurl 数据样例: ('vodfile12.news.cn', '8080', '/data/cdn_transfer/5C/FB/5cf3d08723ca6a481eb81a572505af7dcca381fb.mp4')\n # 这里没有使用port参数原因是测试得出的结论(8080端口无法使用,用默认80端口就能获取视频,所以不需要使用该port)\n url = 'http://' + vurl[0] + '/' + vurl[2]\n return url\n return uid, _get_real_url(vurl)\n uid, url = get_real_url_by_simple_url(self, url)\n\n # 对于 youtube-dl 工具而言,该函数返回的参数是一个字典,至少要包括 id,title,url 这三个key的字典。工具会通过这个视频真实url进行下载。\n # 另外,通过给与的url: http://.../vod.html?vid=532276 这个视频地址无法获取更多视频详细信息,甚至连标题信息都没有\n # 所以我把 title和 id都赋值为uid了,下载文件的名字默认为 \"%s-%s\" % (id, title)\n # eg. http://vod.xinhuanet.com/v/vod.html?vid=532276 这个地址会下载到文件名字为 \"532276-532276.mp4\" 的视频文件\n if url:\n return {\n 'id': uid,\n 'title': uid,\n 'url': url,\n }\n else:\n # 如果获取真实url失败,则我写的代码里,uid里面回传了简单的错误信息\n # 主要可能是视频不存在了,如果有其他异常可能是正则匹配问题,会打印一下问题出现的地址\n print(uid)\n\n\nclass XinHuaNet2IE(InfoExtractor):\n IE_NAME = 'xinhuanet'\n IE_DESC = 'xinhuanet video downloader.'\n _VALID_URL = 'https?://www\\.xinhuanet\\.com/video/[^/]+/[^/]+/(?P[^\\.]+)\\.htm'\n\n #================#\n # 新闻页视频下载 #\n #================#\n # eg. http://www.xinhuanet.com/video/2018-08/28/c_129941452.htm\n def _real_extract(self, url):\n # 原本计划是从该新闻页面找到嵌套在里面的视频页,然后通过视频页解析方法进一步获取真实地址的\n # 结果发现该页面上直接包含了真实的视频url地址,这设计很真实。真实~ 实在是太真实了~,连跳板都省了。\n m = re.match(self._VALID_URL, url)\n uid = m.group('id')\n page = self._download_webpage(url,uid)\n r_uid = re.findall('http://vod\\.xinhuanet\\.com/v/vod\\.html\\?vid=(\\d+)',page)\n r_url = re.findall('(?:vodfile)[^<]+(?:mp4)',page)\n if r_uid and r_url:\n uid,url = r_uid[0],'http://' + r_url[0]\n return {\n 'id': uid,\n 'title': uid,\n 'url': url,\n }\n else:\n print(\"failed download.\", url)\n","sub_path":"youtube_dl/extractor/xinhuanet.py","file_name":"xinhuanet.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387366767","text":"\"\"\"empty message\n\nRevision ID: 7d680d9205fd\nRevises: 24828d6ba633\nCreate Date: 2017-07-04 00:49:47.074064\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '7d680d9205fd'\ndown_revision = '24828d6ba633'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('players', sa.Column('pitcher_bs', sa.Float(), nullable=True))\n op.add_column('players', sa.Column('pitcher_cg_per_gs', sa.Float(), nullable=True))\n op.add_column('players', sa.Column('pitcher_hold', sa.Float(), nullable=True))\n op.add_column('players', sa.Column('pitcher_shutout_per_gs', sa.Float(), nullable=True))\n op.add_column('score', sa.Column('pitcher_cg', sa.Float(), nullable=True))\n op.add_column('score', sa.Column('pitcher_shutout', sa.Float(), nullable=True))\n op.drop_column('score', 'pitcher_cg_per_gs')\n op.drop_column('score', 'pitcher_shutout_per_gs')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('score', sa.Column('pitcher_shutout_per_gs', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))\n op.add_column('score', sa.Column('pitcher_cg_per_gs', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))\n op.drop_column('score', 'pitcher_shutout')\n op.drop_column('score', 'pitcher_cg')\n op.drop_column('players', 'pitcher_shutout_per_gs')\n op.drop_column('players', 'pitcher_hold')\n op.drop_column('players', 'pitcher_cg_per_gs')\n op.drop_column('players', 'pitcher_bs')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/7d680d9205fd_.py","file_name":"7d680d9205fd_.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"448631346","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 20 11:02:40 2020\n\n@author: ebaccourepbesaid\n\"\"\"\n\nimport random\nimport math\nimport cmath\nimport json\nimport gym\nfrom gym import spaces\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nimport csv\nfrom utility import * \nfrom random import seed\nfrom random import randint\nimport control\n\nclass NetworkEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n\n def __init__(self):\n super(NetworkEnv, self).__init__()\n print(\"init\")\n self.M=5\n self.L=300000\n self.SFs=6\n self.K=self.SFs*self.M\n self.sp=10\n self.dmax=500\n self.N0=1.6*10**(-12)\n self.siDB=0 #-10 \n fc=2.5*10**9\n d0=1\n alphaa=3.7\n g0=(3*10**8/(4*np.pi*d0*fc))**2\n self.Tout=10**-3\n self.SI0=control.db2mag(self.siDB)\n self.H=math.sqrt(1/2)*(np.random.randn(self.K,self.M,self.L)+np.random.randn(self.K,self.M,self.L)*1j)\n #math.sqrt(1/2)*complex(np.random.randn(self.K,self.M,self.L),np.random.randn(self.K,self.M,self.L))\n self.d=np.random.randint(self.dmax/10,self.dmax,size=self.K)\n self.d=g0*self.d**(-alphaa)\n self.d=self.d**(1/2)\n self.D=np.diag(self.d) \n self.action_space = spaces.Discrete(self.SFs)#MultiDiscrete([self.M,self.SFs]) \n self.observation_space = spaces.Box(\n low=0, high=1.0, shape=(1,30), dtype=np.float32) \n self.current_step = 0 #steps in the episode\n self.episode_reward=0 # episode reward\n self.episode=0 # epised\n self.totalEnergy=0\n self.resultsCsv=[] \n #self.data = getDataList() # Get the created data\n self.rewards=0\n self.assignedLD=np.zeros(self.M)\n self.assignedSF=np.ones((self.M,self.SFs))\n self.maxLdPerChannel=6 # should be equal to SFs => check\n #self.states=\n def _next_observation(self): \n self.cur_state=np.expand_dims(np.concatenate(self.assignedSF, axis=None),0) \n #print(self.assignedSF)\n return self.cur_state \n\n def _take_action(self, action):\n phy=1 # reset the reward in the time step\n self.assignedLD[action[0]]=self.assignedLD[action[0]]+1 \n if self.assignedLD[action[0]]>self.maxLdPerChannel:\n phy=phy*0\n if self.assignedSF[action[0]][action[1]]==0: \n phy=phy*0\n else:\n self.assignedSF[action[0]][action[1]]=0\n self.stepRewards= phy \n self.stepEnergy=-getEnergy(self.current_step,action[0],action[1],self.episode,self.H,self.D,self.N0,self.SI0,self.Tout)*0.1\n\n\n def step(self, predictedAction):\n self._take_action(predictedAction)\n print(str(self.episode)+': '+str(self.current_step)+': '+str(predictedAction)+ ':'+ str(self.stepRewards))\n self.totalEnergy=self.totalEnergy+(self.stepEnergy) #penalty of the episode\n reward=self.stepEnergy+self.stepRewards #Reward of the episode: rewards + penalty\n self.episode_reward=self.episode_reward+self.stepRewards #only rewards are counted\n self.current_step=self.current_step+1 #increment for the next step\n done=False\n self.render(\"human\")\n if(self.current_step % self.K ==0):\n self.current_step=0\n obs=self.reset()\n # if(self.episode>=len(self.data)-1):\n # self.reset()\n # done=True\n obs = self._next_observation()\n\n return obs, reward, done, {}\n\n def reset(self):\n self.resultsCsv.append([self.episode,self.episode_reward,self.totalEnergy,self.episode_reward+self.totalEnergy]) \n with open(\"PPO_sigma_0.csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(self.resultsCsv)\n self.episode_reward=0 # reset reawrds\n self.episode=self.episode+1 # pass to the nex episode\n self.current_step=0 # reset step\n self.totalEnergy=0\n self.assignedLD=np.zeros(self.M)\n self.assignedSF=np.ones((self.M,self.SFs))\n return self._next_observation()\n\n def render(self, mode='human'):\n if(self.current_step % self.K ==0):\n print(\"Episode: \"+ str(self.episode)+ \", constraints respect: \"+ str(self.episode_reward)+\", Total energy (penalties): \"+str(self.totalEnergy) + \", Total Rewards: \"+str(self.episode_reward+self.totalEnergy))\n print(\"------------\")\n\n\n","sub_path":"Env.py","file_name":"Env.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"157211158","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 23 12:58:01 2021\n\n@author: Alfonso S.A\n\"\"\"\n\nimport os #Module to import data\nos.chdir(\"//home//usuario//Desktop//Simbolos\")\nos.getcwd()\nimport matplotlib.pyplot as plt\nimport numpy \nimport pandas as pd \nimport random\nimport math\nfrom scipy.stats import skew\nimport time\n\nt1=time.time()\nprint(t1)\n#This function calculates epsilon\\dotx\ndef VecP(M,x,p):\n r=M.shape\n n=r[0]\n k=r[1]\n #print(r)\n \n # H1=numpy.zeros((n,k-1))\n # print(H1)\n HM=numpy.ones((n,k))\n # H1=M[:,1:]-M[:,0:k-1]\n #y=numpy.zeros(n)\n #print(M[:,1:],M[:,0:k-1])\n #print(HM,H1)\n for i in range(0,n):\n #print(M[i,:].mean(),H1.mean()) \n HM[i,:]=p[i]*HM[i,:]\n #print(HM) \n M1=(HM-M[:,:])/M[:,:] \n #print(M1)\n y=x[0]*M1[0,:] \n for i in range(1,n):\n #y=y+M1[i,:]\n y=y+x[i]*M1[i,:]\n return y\n\n#This function calculates the close price matrix\ndef ddtt(n):\n \n r=numpy.zeros(n)\n \n for i in range(0,n):\n \n fi='Data'+str(i)+'.csv'\n \n datai=pd.read_csv(fi,header=0)\n yi=datai['close']\n #ki=yi[:5]\n #print(ki)\n r[i]=len(yi)\n\n k=int(numpy.min(r))\n k=252\n \n #print(k)\n#k=5\n h=numpy.zeros((n,k))\n #print(h(1,:))\n for i in range(0,n):\n \n fi='Data'+str(i)+'.csv'\n filenamei=fi\n datai=pd.read_csv(filenamei,header=0)\n yi=datai['close']\n h[i,:]=yi[:k]\n #h(i,:)=yi[:5]\n #print(h[i,:])\n #r[i]=len(yi)\n return h\n\n#P1=ddtt(2)\n#print(P1) \n#x=numpy.array([2.0,4.0])\n#p=numpy.array([50.0,25.0])\n#P2=VecP(P1,x,p)\n#print(P2)\n\ndef GWO(lb, ub, dim, SearchAgents_no,Max_iter,eta,ups,pA):\n \n MP=ddtt(dim)#Cada fila representa los precios de cierre de un simbolo\n \n \n # Numero de activos\n # Max_iter=1000\n # lb=-100\n # ub=100\n # dim=30\n # SearchAgents_no=5\n\n # initialize alpha, beta, and delta_pos\n Alpha_pos = numpy.zeros(dim)\n Alpha_score = float(\"inf\")\n\n Beta_pos = numpy.zeros(dim)\n Beta_score = float(\"inf\")\n\n Delta_pos = numpy.zeros(dim)\n Delta_score = float(\"inf\")\n\n if not isinstance(lb, list):\n lb = [lb] * dim\n if not isinstance(ub, list):\n ub = [ub] * dim\n\n # Initialize the positions of search agents\n Positions = numpy.zeros((SearchAgents_no, dim))\n for i in range(dim):\n Positions[:, i] = (\n numpy.random.uniform(0, 1, SearchAgents_no) * (ub[i] - lb[i]) + lb[i]\n )\n\n Convergence_curve = numpy.zeros(Max_iter)\n class solution:\n def __init__(self):\n self.best = 0\n self.bestIndividual = []\n self.convergence = []\n self.optimizer = \"\"\n self.objfname = \"\"\n self.startTime = 0\n self.endTime = 0\n self.executionTime = 0\n self.lb = 0\n self.ub = 0\n self.dim = 0\n self.popnum = 0\n self.maxiers = 0\n\n s = solution()\n #print(s)\n # Loop counter\n #print('GWO is optimizing \"' + objf.__name__ + '\"')\n\n timerStart = time.time()\n s.startTime = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n #print(s.best,'best')\n #print(s.bestIndividual,'bestIndividual')\n #print(s.convergence,'convergence')\n #print(s.optimizer,'optimizer')\n #print(s.objfname,'objfname')\n #print(s.startTime,'startTime')\n #print(s.endTime,'endTime')\n #print(s.executionTime,'executionTime')\n #print(s.lb,'lb')\n #print(s.ub,'ub')\n #print(s.dim,'dim')\n #print(s.popnum,'popnum')\n #print(s.maxiers,'maxiers')\n # Main loop\n for l in range(0, Max_iter):\n for i in range(0, SearchAgents_no):\n\n # Return back the search agents that go beyond the boundaries of the search space\n for j in range(dim):\n Positions[i, j] = numpy.clip(Positions[i, j], lb[j], ub[j])\n \n #print(pA)\n #print(MP)\n # Calculate objective function for each search agent\n Positions[i,:]=Positions[i,:]/sum(Positions[i,:])\n eps=VecP(MP,Positions[i, :],pA) \n #print(eps) \n fitness = eps.var()\n \n #print('fitness',fitness)\n sk=skew(eps)-eta\n \n mean=eps.mean()-ups\n #print(mean,'media')\n \n #print(buni)\n if sk<0:\n fitness=fitness+10**10.0\n if mean<0:\n fitness=fitness+10**10.0 \n \n \n \n \n\n # Update Alpha, Beta, and Delta\n if fitness < Alpha_score:\n Delta_score = Beta_score # Update delte\n Delta_pos = Beta_pos.copy()\n Beta_score = Alpha_score # Update beta\n Beta_pos = Alpha_pos.copy()\n Alpha_score = fitness\n # Update alpha\n Alpha_pos = Positions[i, :].copy()\n\n if fitness > Alpha_score and fitness < Beta_score:\n Delta_score = Beta_score # Update delte\n Delta_pos = Beta_pos.copy()\n Beta_score = fitness # Update beta\n Beta_pos = Positions[i, :].copy()\n\n if fitness > Alpha_score and fitness > Beta_score and fitness < Delta_score:\n Delta_score = fitness # Update delta\n Delta_pos = Positions[i, :].copy()\n\n a = 2 - l * ((2) / Max_iter)\n # a decreases linearly fron 2 to 0\n\n # Update the Position of search agents including omegas\n for i in range(0, SearchAgents_no):\n for j in range(0, dim):\n\n r1 = random.random() # r1 is a random number in [0,1]\n r2 = random.random() # r2 is a random number in [0,1]\n\n A1 = 2 * a * r1 - a\n # Equation (3.3)\n C1 = 2 * r2\n # Equation (3.4)\n\n D_alpha = abs(C1 * Alpha_pos[j] - Positions[i, j])\n # Equation (3.5)-part 1\n X1 = Alpha_pos[j] - A1 * D_alpha\n # Equation (3.6)-part 1\n\n r1 = random.random()\n r2 = random.random()\n\n A2 = 2 * a * r1 - a\n # Equation (3.3)\n C2 = 2 * r2\n # Equation (3.4)\n\n D_beta = abs(C2 * Beta_pos[j] - Positions[i, j])\n # Equation (3.5)-part 2\n X2 = Beta_pos[j] - A2 * D_beta\n # Equation (3.6)-part 2\n\n r1 = random.random()\n r2 = random.random()\n\n A3 = 2 * a * r1 - a\n # Equation (3.3)\n C3 = 2 * r2\n # Equation (3.4)\n\n D_delta = abs(C3 * Delta_pos[j] - Positions[i, j])\n # Equation (3.5)-part 3\n X3 = Delta_pos[j] - A3 * D_delta\n # Equation (3.5)-part 3\n\n Positions[i, j] = (X1 + X2 + X3) / 3 # Equation (3.7)\n\n Convergence_curve[l] = Alpha_score\n\n if l % 1 == 0:\n print(\n [\"At iteration \" + str(l) + \" the best fitness is \" + str(Alpha_score)]\n )\n\n timerEnd = time.time()\n s.endTime = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n s.executionTime = timerEnd - timerStart\n s.convergence = Convergence_curve\n s.optimizer = \"GWO\"\n #s.objfname = objf.__name__ \n s.bestIndividual=Alpha_pos\n # print(s.best,'best')\n # print(s.bestIndividual,'bestIndividual')\n # print(s.convergence,'convergence')\n #print(s.optimizer,'optimizer')\n #print(s.objfname,'objfname')\n #print(s.startTime,'startTime')\n #print(s.endTime,'endTime')\n #print(s.executionTime,'executionTime')\n #print(s.lb,'lb')\n #print(s.ub,'ub')\n #print(s.dim,'dim')\n #print(s.popnum,'popnum')\n #print(s.maxiers,'maxiers')\n \n return Alpha_pos\ndim=100\nhh=ddtt(dim)\ny=hh[:,251]\n#print(y)\n#GWO(lb, ub, dim, SearchAgents_no,Max_iter,eta,ups,pA): \nsol=GWO(0.01, 1.0, dim, 100, 1000,0.1,0.1,y)\nprint(sol)\n\ndef ddtt2(n): \n r=numpy.zeros(n) \n for i in range(0,n): \n fi='Data'+str(i)+'.csv'\n #filenamei=fi\n datai=pd.read_csv(fi,header=0)\n yi=datai['close']\n r[i]=len(yi)\n\n k=int(numpy.min(r))\n \n h=numpy.zeros((n,k))\n for i in range(0,n):\n \n fi='Data'+str(i)+'.csv'\n #filenamei=fi\n datai=pd.read_csv(fi,header=0)\n yi=datai['close']\n h[i,:]=yi[:k]\n \n return h\n\n\n\n\nBT=ddtt2(dim)\nBT0=BT[:,:252]\nyBT0=BT[:,253]\n\nepBT0=VecP(BT0,sol,yBT0)\n\n\nBT1=BT[:,253:283]\nyBT=BT[:,283]\n\nepBT=VecP(BT1,sol,yBT)\n#Esperanza, riesgo y oblicuidad calculados con la estrategia empleada\nprint(epBT0.mean())\nprint(epBT0.var())\nprint(skew(epBT0))\n#Esperanza, riesgo y oblicuidad calculados con la estrategia empleada\nprint(epBT.mean())\nprint(epBT.var())\nprint(skew(epBT))\n\nt2=time.time()\n\nprint((t2-t1)/60)\n\n","sub_path":"OptPort-EVS-VES-SEV/PortGWOA-VSE.py","file_name":"PortGWOA-VSE.py","file_ext":"py","file_size_in_byte":8727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"300193810","text":"#!/usr/bin/env python3\n\nimport json\nimport copy\nimport argparse\nimport re\nimport codecs\nimport copy\nimport readline\nimport getpass\nfrom helper_functions import *\nfrom logging.handlers import RotatingFileHandler\n\n\nlog_formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\nlogFile = 'poolinfo.log'\nfile_handler = RotatingFileHandler(logFile, mode='a', maxBytes=1024 * 1024,\n backupCount=1, encoding=None, delay=0)\nfile_handler.setFormatter(log_formatter)\nfile_handler.setLevel(logging.INFO)\nlog = logging.getLogger('root')\nlog.setLevel(logging.INFO)\nlog.addHandler(file_handler)\nconsole_handler=logging.StreamHandler()\nconsole_handler.setLevel(logging.ERROR)\nlog.addHandler(console_handler)\n\npattern1 = re.compile(r'\\\\u251c[^ ]*')\npattern2 = re.compile(r'\\\\u25cf[^ ]*')\npattern3 = re.compile(r'\\\\u2514[^ ]*')\npool_handle = None\nwallet_handle = None\n\ndef remove_json_cruft(line):\n line = codecs.escape_decode(line)[0].decode('ascii', 'ignore')\n line = pattern1.sub(r\"\", line)\n line = pattern2.sub(r\"\", line)\n line = pattern3.sub(r\"\", line)\n line = line.replace(r'\\/', '/') # removes \\ on \\/\n line = line.replace('\\n', ' ') # json parser chokes on \\n, \\t\n line = line.replace('\\t', ' ')\n return line\n\n\ndef get_validator_info(pool, wallet, walletKey, did, genesisFile = None, didSeed = None):\n\n global pool_handle, wallet_handle\n looper = asyncio.get_event_loop()\n looper.run = looper.run_until_complete\n\n if pool_handle == None:\n pool_handle = looper.run(open_pool(pool, genesisFile))\n if wallet_handle == None:\n wallet_handle = looper.run(open_wallet(wallet, walletKey))\n if looper.run(get_did_from_wallet(wallet_handle, did)) == None:\n if didSeed == None:\n log.error(\"DID '{}' does not exist in wallet '{}'. A seed must be provided.\".format(did, wallet))\n sys.exit(1)\n else:\n looper.run(store_did(wallet_handle, didSeed))\n\n v_i_request = looper.run(ledger.build_get_validator_info_request(did))\n vi = looper.run(ledger.sign_and_submit_request(pool_handle,wallet_handle,did,v_i_request))\n jsonStrings = json.loads(vi)\n parsedJson = {}\n for key,value in jsonStrings.items():\n value = remove_json_cruft(value)\n if value == 'timeout':\n print(\"Warning: Node '{}' is unreachable and will be excluded.\".format(key))\n else:\n parsedValue = json.loads(value)\n if 'result' in parsedValue:\n parsedJson[key] = parsedValue['result']['data']\n elif 'reason' in parsedValue and 'UnauthorizedClientRequest' in parsedValue['reason']:\n print(\"Error: You must use steward keys to execute this script\")\n sys.exit(1)\n else:\n print(\"Warning: Status for {} will be excluded due to unexpected result: {}\".format(key, value))\n log.info(json.dumps(parsedJson))\n return parsedJson\n\ndef parse_inputs():\n parser = argparse.ArgumentParser(\n description='Get validator-info on a pool, and make an interpretive interactive shell.')\n parser.add_argument('pool', help='The name of the pool to connect to')\n parser.add_argument('wallet', help='The name of the wallet to use')\n parser.add_argument('did', help='DID of steward')\n parser.add_argument('--genesisFile', help='Needed if the pool has not been previously initialized.')\n parser.add_argument('--didSeed', action='store_true', help='Prompt for seed. Use if keys are not yet in your wallet')\n parser.add_argument('--infoFile', help='Get validator info from this JSON file, instead of querying pool. Use of this option means that all other arguments are ignored')\n args = parser.parse_args()\n\n return args\n\ndef add_branch(path, full_branch, destination):\n source_branch = full_branch\n dest_branch = destination\n for step in path:\n try:\n source_branch = source_branch[step]\n except:\n print(\"Invalid field requested: {}\".format(':'.join(path)))\n return\n if step == path[-1]:\n dest_branch[step] = copy.copy(source_branch)\n elif step not in dest_branch.keys():\n dest_branch[step] = {}\n dest_branch = dest_branch[step]\n\ndef make_pruned_tree(field_array, node, full_branch):\n destination = {}\n for field in field_array:\n if field == 'transCount':\n add_branch(['Node_info','Metrics','transaction-count'], full_branch, destination)\n elif field == 'reachable':\n add_branch(['Pool_info','Reachable_nodes_count'], full_branch, destination)\n add_branch(['Pool_info','Unreachable_nodes_count'], full_branch, destination)\n add_branch(['Pool_info','Unreachable_nodes'], full_branch, destination)\n elif field == 'version':\n add_branch(['Software','indy-node'], full_branch, destination)\n add_branch(['Software','sovrin'], full_branch, destination)\n elif field == 'primary':\n add_branch(['Node_info','Replicas_status','{}:0'.format(node),'Primary'], full_branch, destination)\n else:\n add_branch(field.split('/'), full_branch, destination)\n return destination\n\ndef find_and_print(info, fields, nodes):\n field_array = fields.split(',')\n node_array = nodes.split(',')\n if 'all' in node_array:\n node_array =info.keys()\n\n pruned = {}\n for node in node_array:\n if 'all' in field_array: # no need to look further if 'all' fields are requested\n pruned[node] = info[node]\n else:\n if info[node] == 'timeout':\n pruned[node] = info[node]\n else:\n pruned[node] = make_pruned_tree(field_array, node, info[node])\n print(json.dumps(pruned, sort_keys=True, indent=4))\n\n\ndef print_help():\n print('The available commands are nodes, show, save, reload, help, and quit.')\n print('First use the nodes command to set for which nodes the stats should be displayed.')\n print(' Example: \"> nodes validator01,validator02\"')\n print(' \"nodes all\" will display info for all nodes in the pool. (This is the default)')\n print('Then use show to generate the output. A parameter is required to select which fields to display for each node selected.')\n print(' Options for show include \"all\", \"transCount\", \"reachable\", version and \"primary\". You can also give an arbitrary field using slashes.')\n print(' Example: \"> show transCount\"')\n print(' Example: \"> show Pool_info/Total_nodes_count\"')\n print(' A comma can be used to delimit multiple fields to display')\n print(' Example: \"> show transCount,primary\"')\n print('If at some time you want to query the pool to update the status information, use the \"reload\" command.')\n print('To save the data retrieved from the ledger for later offline analysis with this or other tools, use save.')\n print(' Example: > save myfile.json')\n print(\"\")\n\ncommands = ['nodes', 'show', 'save', 'reload', 'help', 'quit']\ninfo = {}\nnothing = []\n\ndef completer(text, state):\n line = readline.get_line_buffer()\n parts = line.split(' ')\n part_count = len(parts)\n log.debug('Line: \"{}\", parts: {}'.format(line, part_count))\n if part_count == 1:\n log.debug('part0=\"{}'.format(parts[0]))\n options = [x for x in commands if x.startswith(text)]\n elif part_count == 2:\n log.debug('part0=\"{}, part1=\"{}\"'.format(parts[0], parts[1]))\n nodes = list(info.keys())\n if parts[0] == 'show':\n log.debug('c6')\n field = parts[1].split(',')[-1]\n log.debug('Checking for field {}'.format(field))\n subparts = field.split('/')\n treepointer = info[nodes[0]]\n for subpart in subparts[0:-1]:\n log.debug('Looking for completions to {} in {}'.format(subpart, json.dumps(list(treepointer.keys()))))\n try:\n treepointer = treepointer[subpart]\n except:\n options = [x for x in nothing if x.startswith(text)]\n if len(subparts) == 1:\n mylist = list(treepointer.keys())\n mylist.append('all')\n mylist.append('transCount')\n mylist.append('reachable')\n mylist.append('version')\n mylist.append('primary')\n options = [x for x in mylist if x.startswith(text)]\n else:\n options = [x for x in treepointer.keys() if x.startswith(text)]\n elif parts[0] == 'nodes':\n log.debug('c7')\n options = [x for x in nodes if x.startswith(text)]\n else:\n log.debug('c8')\n options = [x for x in nothing if x.startswith(text)]\n else:\n log.debug('c9')\n options = [x for x in nothing if x.startswith(text)]\n try:\n log.debug('returning {}'.format(options[state]))\n return options[state]\n except IndexError:\n log.debug('IndexError thrown')\n return None\n log.debug('Exiting normally')\n\n\nif __name__ == '__main__':\n args=parse_inputs()\n if (args.infoFile):\n print('An input file has been provided. All other arguments will be ignored. Loading file...')\n with open(args.infoFile) as infoStream:\n info = json.load(infoStream)\n else:\n if args.didSeed:\n didSeed = getpass.getpass(\"DID seed: \")\n else:\n didSeed = None\n walletKey = getpass.getpass(\"Wallet key: \")\n print('Please be patient while I contact all the nodes in the pool for their status...')\n info = get_validator_info(args.pool, args.wallet, walletKey, args.did, args.genesisFile, didSeed)\n nodes='all'\n action=''\n readline.set_completer(completer)\n readline.parse_and_bind(\"tab: complete\")\n while action != 'quit':\n prompt = '[{}]> '.format(nodes)\n result = input(prompt)\n command = result.split()\n if len(command) == 0:\n continue\n action = command[0]\n if action == 'nodes':\n if len(command) == 2:\n valid_nodes = True\n if command[1] == 'all':\n nodes = 'all'\n else:\n for node in command[1].split(','):\n if node not in info.keys():\n print(\"Unrecognized node '{}'\".format(node))\n valid_nodes = False\n break\n if valid_nodes:\n nodes = command[1]\n else:\n print('Input error: \"nodes\" command requires one argument')\n elif action == 'show':\n if len(command) == 2:\n find_and_print(info, command[1], nodes)\n else:\n print('Input error: \"show\" command requires one argument')\n elif action == 'save':\n if len(command) == 2:\n with open(command[1], 'w') as infoStream:\n json.dump(info, infoStream)\n else:\n print('Input error: \"save\" command requires one argument')\n elif action == 'reload':\n print('Please be patient while I contact all the nodes in the pool for their status...')\n info = get_validator_info(args.pool, args.wallet, walletKey, args.did, args.genesisFile, didSeed)\n elif action == 'help':\n print_help()\n elif action == 'quit':\n pass\n else:\n print(\"Invalid instruction: {}\".format(result))\n print_help()\n","sub_path":"pool_status/pool_status.py","file_name":"pool_status.py","file_ext":"py","file_size_in_byte":11689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"113739575","text":"import tensorflow as tf\n\ntf.compat.v1.disable_v2_behavior()\n\nW = tf.Variable(tf.zeros([2,1]),name=\"weights\")\nb = tf.Variable(0.,name=\"bias\")\n\ndef inference(X): #假设,模型的关键\n return tf.compat.v1.matmul(X,W) + b\n\ndef loss(X,Y):\n y_predicted = inference(X)\n return tf.compat.v1.reduce_sum(tf.compat.v1.squared_difference(Y,y_predicted))\n\ndef train(total_loss):\n learning_rate = 0.00000001\n return tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n\ndef evaluate(sess):\n print(\"value:\",sess.run(inference([[80.,25.]])))\n\ndef inputs():\n weight_age = [[84,46],[73,20],[65,52],[70,30],[76,57],[69,25],[63,28],\n [72,36],[63,28],[72,36],[79,57],[75,44],\n [27,24],[89,31],[65,52],[57,23],[59,60],\n [69,48],[60,34],[79,51],[75,50],[82,34],[59,46],[67,23],\n [85,37],[55,40],[63,30]]\n blood_fat_content = [354,190,405,263,451,302,288,\n 385,402,365,209,290,346,254,395,434,220,\n 374,308,220,311,181,274,303,244]\n return tf.compat.v1.to_float(weight_age),tf.compat.v1.to_float(blood_fat_content)\n\n\nwith tf.compat.v1.Session() as sess:\n tf.compat.v1.initialize_all_variables().run()\n X,Y = inputs()\n total_loss = loss(X,Y)\n train_op = train(total_loss)\n coord = tf.compat.v1.train.Coordinator\n training_steps = 100000\n for step in range(training_steps):\n sess.run([train_op])\n if step %1000 == 0:\n print(\"loss:\",sess.run([total_loss]))\n print(W.eval())\n # coord.request_stop()\n # coord.join(threads=threads)\n sess.close()\n\n\n\n","sub_path":"tfbasic/linearregression.py","file_name":"linearregression.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"25125841","text":"# You have a list of words and a pattern, and you want to know which words in wo\n# rds matches the pattern. \n# \n# A word matches the pattern if there exists a permutation of letters p so that\n# after replacing every letter x in the pattern with p(x), we get the desired wor\n# d. \n# \n# (Recall that a permutation of letters is a bijection from letters to letters:\n# every letter maps to another letter, and no two letters map to the same letter.\n# ) \n# \n# Return a list of the words in words that match the given pattern. \n# \n# You may return the answer in any order. \n# \n# \n# \n# \n# Example 1: \n# \n# \n# Input: words = [\"abc\",\"deq\",\"mee\",\"aqq\",\"dkd\",\"ccc\"], pattern = \"abb\"\n# Output: [\"mee\",\"aqq\"]\n# Explanation: \"mee\" matches the pattern because there is a permutation {a -> m,\n# b -> e, ...}. \n# \"ccc\" does not match the pattern because {a -> c, b -> c, ...} is not a permut\n# ation,\n# since a and b map to the same letter. \n# \n# \n# \n# Note: \n# \n# \n# 1 <= words.length <= 50 \n# 1 <= pattern.length = words[i].length <= 20 \n# \n# \n# Related Topics String\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution(object):\n def findAndReplacePattern(self, words, pattern):\n \"\"\"\n :type words: List[str]\n :type pattern: str\n :rtype: List[str]\n \"\"\"\n n = len(pattern)\n ans = []\n for word in words:\n w2p = {}\n p2w = {}\n i = 0\n while i= abs(i[5] - i[4]) and abs(i[2] - i[0]) + abs(i[3] - i[1]) > i[4]:\n color.append('r')\n else:\n color.append('b')\n\n# MAIN LOOP\nsns.jointplot(possibleRides['xend'], possibleRides['yend'], kind='hex', xlim=[-0.1*rows, rows*1.1], ylim=[-0.1*rows, columns*1.1])\nplt.suptitle(FILE+' destinations')\n\nsns.jointplot(possibleRides['xstart'], possibleRides['ystart'], kind='hex', xlim=[-0.1*rows, rows*1.1], ylim=[-0.1*rows, columns*1.1])\nplt.suptitle(FILE+' origins')\n\nplt.figure()\nplt.subplot(211)\nplt.scatter(possibleRides['xstart'], possibleRides['ystart'], alpha=0.2, c=color)\nplt.title('Origins')\nplt.xlim([-0.1*rows, rows*1.1])\nplt.ylim([-0.1*rows, columns*1.1])\nplt.subplot(212)\nplt.scatter(possibleRides['xend'], possibleRides['yend'], alpha=0.2, c=color)\nplt.title('Destinations')\nplt.xlim([-0.1*rows, rows*1.1])\nplt.ylim([-0.1*rows, columns*1.1])\nplt.tight_layout()\n\nplt.figure()\nplt.plot([possibleRides['xstart'], possibleRides['xend']], [possibleRides['ystart'], possibleRides['yend']], alpha=0.1)\nplt.scatter(possibleRides['xstart'], possibleRides['ystart'], marker='o', s=.5)\nplt.title(FILE+' routes')\n\nplt.figure()\nplt.subplot(211)\nsns.kdeplot(abs(possibleRides['xend']-possibleRides['xstart']) + abs(possibleRides['yend'] - possibleRides['ystart']))\nplt.title('Route distance distribution')\nplt.subplot(212)\nsns.kdeplot(possibleRides['stop']-possibleRides['start'])\nplt.title('Route maxtime distribution')\nplt.tight_layout()\n\nplt.show()","sub_path":"Google Hash/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"1990823","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport parameters as PA\nimport mvt_gene.class_genetics as CG\nimport config as cfg\n#import communication as co\n\ndef main() :\n\n gen = CG.generation(PA.SIZE_G, PA.SIZE_I)\n gen.ran_gen()\n for i in range(PA.NB_RUN):\n run_gene(gen)\n\n\ndef run_gene(gen) :\n\n scalar=0\n for ind in gen.liste :\n\n # on cree le fichier correspondant à l'individu, il sera envoye par le module de communication ( par main_ind )\n cfg.configure.ind_file(ind) #\n\n\n # Ici on est cense recuperer la distance parcouru par le robot, elle est renvoyee par le main_ind du module communication\n # scalar = co.main_ind('ind.bin')\n\n PA.FCT_EVAL(ind,scalar) # On evalue l'individu avec la fonction d'evaluation indiquee par le module parameters.py\n\n gen.next_gene(PA.FCT_ACC,PA.FCT_MUT) # on genere la prochaine generation une fois que tous les individus ont ete evalues\n\n\n","sub_path":"mvt_gene/main_genetics.py","file_name":"main_genetics.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"77204439","text":"#! /user/bin/python\n# -*- coding:utf-8 -*-\nimport math\n\n\ndef draw_pyramid(inv=False, degree=10):\n width = (degree - 1) * 2 + 1\n pyramid = [[0 for col in range(width)] for row in range(degree)]\n\n index = 0\n for h in range(degree):\n index = 0\n if inv is True:\n blank = h * 2 + 1\n star = int((width - blank) / 2)\n else:\n star = h * 2 + 1\n blank = int((width - star) / 2)\n\n # left blank\n for i in range(blank):\n pyramid[h][index] = ' '\n index += 1\n\n # center star\n for j in range(width - (blank * 2)):\n pyramid[h][index] = \"*\"\n index += 1\n\n # right blank\n for k in range(blank):\n pyramid[h][index] = ' '\n index += 1\n\n for h in range(degree):\n for w in range(width):\n print(pyramid[h][w], end=' ')\n print(' ')\n\n\ndef draw_halfpyramid(inv=False, degree=10):\n print('half pyramid')\n\n\ndef draw_whelk(w=10, h=10):\n whelk = [[0 for col in range(w)] for row in range(h)]\n\n num = 1\n lshell = 0\n rshell = 0\n tshell = 0\n bshell = 0\n\n while num <= (w*h):\n # T -> R\n for i in range(lshell, w-rshell):\n whelk[i][tshell] = num\n num += 1\n tshell += 1\n\n # T -> B\n for j in range(tshell, h-bshell):\n whelk[i][j] = num\n num += 1\n rshell += 1\n\n # R -> L\n for k in reversed(range(lshell, w-rshell)):\n whelk[k][j] = num\n num += 1\n bshell += 1\n\n # B -> T\n for l in reversed(range(tshell, h-bshell)):\n whelk[k][l] = num\n num += 1\n lshell += 1\n\n maxnum = 100\n\n for y in range(w):\n for x in range(h):\n if not whelk[x][y] == maxnum:\n print('%3d' % whelk[x][y], end='\\t')\n else:\n print('%d' % whelk[x][y], end='\\t')\n print('')\n\n\nif __name__ == '__main__':\n # draw_whelk(10, 10)\n draw_pyramid(True, 10)\n","sub_path":"quiz/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"105029130","text":"import os\nimport sys\nimport logging\n\nfrom sylvia import log\n\nWORK_DIR = os.path.normpath(os.path.dirname(__file__))\nWEB_DIR = os.path.normpath(os.path.join(WORK_DIR, \"web\"))\nLOG_DIR = os.path.normpath(os.path.join(WORK_DIR, \"log\"))\nTMP_DIR = os.path.normpath(os.path.join(WORK_DIR, \"tmp\"))\nSCRIPT_DIR = os.path.normpath(os.path.join(WORK_DIR, \"script\"))\nREPORT_DIR = os.path.normpath(os.path.join(WORK_DIR, \"report\"))\n\nTMP_REFERENCE_DIR = os.path.join(os.path.join(TMP_DIR, \"reference\"))\nTMP_EVIDENCE_DIR = os.path.normpath(os.path.join(TMP_DIR, \"evidence\"))\nTMP_VIDEO_DIR = os.path.normpath(os.path.join(TMP_DIR, \"video\"))\n\nDEFAULT_FORMAT = '[%(levelname)1.1s %(asctime)s (PID: %(process)d) %(module)s] %(message)s'\nDEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'\n\nLOG = log.Log(\"Grog.Project.SylViA\", format=DEFAULT_FORMAT)\nif not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\nlogfile = os.path.join(LOG_DIR, \"system.log\")\nif not os.path.exists(logfile):\n with open(logfile, 'a') as f:\n os.utime(logfile, None)\n\nLOG.addHandler(log.Log.fileHandler(logfile, DEFAULT_FORMAT, logging.INFO))\n\ndef description(string, cr=True):\n if cr: print()\n LOG.info(string);\n\nDEBUG = True\nTIMEOUT_COUNT=10\nTIMEOUT_LOOP=10\nTAP_THRESHOLD=0.2\n\nclass POINT(object):\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n\n def __repr__(self):\n return \"POINT()\"\n\n def __str__(self):\n return \"(X, Y) = (%s, %s), Width = %s, Height = %s\" \\\n % (str(self.x), str(self.y), str(self.width), str(self.height))\n","sub_path":"project/elizabeth/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"537643611","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = 'SaltyRTC'\nSITENAME = 'SaltyRTC'\nSITEURL = ''\n\nPATH = 'content'\n\nTIMEZONE = 'Europe/Paris'\n\nDEFAULT_LANG = 'en'\n\nTHEME = 'themes/notmyidea'\n\nSTATIC_PATHS = ['static']\nEXTRA_PATH_METADATA = {\n 'static/robots.txt': {'path': 'robots.txt'},\n 'static/favicon-64.png': {'path': 'favicon.ico'},\n}\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Additional menu items\nMENUITEMS = (('Home', '/'),)\n\n# Blogroll\nLINKS = (\n ('Specification', 'https://github.com/saltyrtc/saltyrtc-meta/blob/master/Protocol.md'),\n ('WebRTC', 'https://webrtc.org/'),\n ('ORTC', 'https://ortc.org/'),\n)\nLINKS_WIDGET_NAME = 'Links'\n\n# Social widget\nSOCIAL = (\n ('Twitter', 'https://twitter.com/saltyrtc'),\n ('Github', 'https://github.com/saltyrtc/'),\n)\nSOCIAL_WIDGET_NAME = 'Social'\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"491653577","text":"import turtle\nfrom random import randint\n\ndef randPoly(iterations, pen):\n print(\"Hello World\")\n pen.down()\n angle = randint(10,170)\n for i in range(iterations):\n col = (randint(0,128),randint(0,128),randint(0,128))\n pen.pencolor(col)\n fd((iterations // 100)*100 + 100)\n left(angle)\n\npen = turtle.Turtle()\nscreen = turtle.Screen()\n#screen.bgcolor(\"black\")\n\nrandPoly(1000, pen)\n","sub_path":"CS100-RoadMap to CS Python/Crazy.py","file_name":"Crazy.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"489726718","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\ndef getRotation(img, printCircle):\n\n # LIGHT BLUE\n lower = np.array([210, 210, 140], dtype = \"uint8\")\n upper = np.array([255, 255, 200], dtype = \"uint8\")\n mask1 = cv2.inRange(img, lower, upper)\n\n\n edges = cv2.Canny(mask1,100,200)\n cv2.imwrite(\"1.jpg\",mask1)\n cv2.imwrite(\"2.jpg\",edges)\n\n # plt.show()\n\n # plt.imshow(mask1)\n # plt.show()\n\n # # PINK\n # lower = np.array([110, 70, 215], dtype = \"uint8\")\n # upper = np.array([210, 180, 255], dtype = \"uint8\")\n # mask2 = cv2.inRange(img, lower, upper)\n\n\n # # YELLOW\n # lower = np.array([0, 120, 110], dtype = \"uint8\")\n # upper = np.array([100, 230, 210], dtype = \"uint8\")\n # mask3 = cv2.inRange(img, lower, upper)\n\n circle1 = cv2.HoughCircles(mask1, cv2.HOUGH_GRADIENT, 1.6, 50, param1=50,param2=30,minRadius=60,maxRadius=100)\n # circle2 = cv2.HoughCircles(mask2, cv2.HOUGH_GRADIENT, 1.6, 50, param1=50,param2=30,minRadius=60,maxRadius=100)\n # circle3 = cv2.HoughCircles(mask3, cv2.HOUGH_GRADIENT, 1.6, 50, param1=50,param2=30,minRadius=60,maxRadius=100)\n\n # circles = np.hstack((circle1,circle2,circle3))\n\n\n if circle1 is not None and printCircle == 1:\n # circles = np.uint16(np.around(circle1))\n\n for i in circle1[0,:]:\n # draw the outer circle\n cv2.circle(img,(i[0],i[1]),i[2],(0,255,0),2)\n # draw the center of the circle\n cv2.circle(img,(i[0],i[1]),2,(0,0,255),3)\n\n\n circle1 = circle1[0][0] # LIGHT BLUE\n # circle2 = circle2[0][0] # PINK\n # circle3 = circle3[0][0] # YELLOW\n\n halfx = mask1.shape[1]/2\n halfy = mask1.shape[0]/2\n\n if(circle1[1] > halfy and circle1[0] < halfx):\n return 0, circle1[0], circle1[1]\n\n elif(circle1[1] < halfy and circle1[0] < halfx):\n return 1, circle1[0], circle1[1] # 90\n\n elif(circle1[1] < halfy and circle1[0] > halfx):\n return 2, circle1[0], circle1[1] # 180\n\n elif(circle1[1] > halfy and circle1[0] > halfx):\n return 3, circle1[0], circle1[1] # 270\n\n return -1, circle1[0], circle1[1]\n\n\n# img = cv2.imread('untitled.png')\n# getRotation(img)\n","sub_path":"rotation.py","file_name":"rotation.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"199388290","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom importlib import import_module\nimport misc.transforms as own_transforms\nimport torchvision.transforms as standard_transforms\nfrom . import basedataset\nfrom . import setting\nfrom torch.utils.data import DataLoader\nimport pdb\nfrom config import cfg\ndef createTrainData(datasetname, Dataset, cfg_data):\n\n folder, list_file = None, None\n\n if datasetname in ['SHHA', 'SHHB' , 'QNRF', 'JHU', 'NWPU']:\n list_file=[]\n list_file.append({'data_path':cfg_data.DATA_PATH,\n 'imgId_txt': cfg_data.TRAIN_LST,\n 'box_gt_txt': []})\n else:\n print('dataset is not exist')\n\n main_transform = own_transforms.Compose([\n own_transforms.ScaleByRateWithMin([0.8, 1.2], cfg_data.TRAIN_SIZE[1], cfg_data.TRAIN_SIZE[0]),\n own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),\n own_transforms.RandomHorizontallyFlip(),\n ])\n\n img_transform = standard_transforms.Compose([\n standard_transforms.ToTensor(),\n standard_transforms.Normalize(*cfg_data.MEAN_STD)\n ])\n mask_transform = standard_transforms.Compose([\n standard_transforms.ToTensor()\n ])\n\n train_set = Dataset(datasetname, 'train',\n main_transform = main_transform,\n img_transform = img_transform,\n mask_transform = mask_transform,\n list_file = list_file\n )\n return DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=6, shuffle=True, drop_last=True)\n\ndef createValData(datasetname, Dataset, cfg_data):\n\n if datasetname in ['SHHA', 'SHHB' , 'QNRF', 'JHU', 'NWPU']:\n list_file=[]\n list_file.append({'data_path':cfg_data.DATA_PATH,\n 'imgId_txt': cfg_data.VAL_LST,\n 'box_gt_txt': cfg_data.VAL4EVAL})\n else:\n print('dataset is not exist')\n\n img_transform = standard_transforms.Compose([\n standard_transforms.ToTensor(),\n standard_transforms.Normalize(*cfg_data.MEAN_STD)\n ])\n mask_transform = standard_transforms.Compose([\n standard_transforms.ToTensor()\n\n ])\n\n test_set = Dataset(datasetname, 'val',\n img_transform = img_transform,\n mask_transform = mask_transform,\n list_file = list_file\n\n )\n train_loader = DataLoader(test_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=6, shuffle=True, drop_last=False)\n return train_loader\n\n\ndef createRestore(mean_std):\n return standard_transforms.Compose([\n own_transforms.DeNormalize(*mean_std),\n standard_transforms.ToPILImage()\n ])\n\ndef loading_data(datasetname):\n datasetname = datasetname.upper()\n cfg_data = getattr(setting, datasetname).cfg_data\n\n Dataset = basedataset.Dataset \n \n train_loader = createTrainData(datasetname, Dataset, cfg_data)\n val_loader = createValData(datasetname, Dataset, cfg_data)\n\n restore_transform = createRestore(cfg_data.MEAN_STD)\n return train_loader, val_loader, restore_transform\n\n","sub_path":"datasets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"38665803","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\ndriver = webdriver.Firefox()\ndriver.get(\"file:///Users/shubhra/Desktop/Coding/remote_python/Selenium/test.html\")\nassert \"LOCAL\" in driver.title\n\nlst = driver.find_elements_by_xpath(\"//div[@class='foo']/h3/a\")\nprint(len(lst))\nprint([l.text for l in lst])","sub_path":"Selenium/selenium-local.py","file_name":"selenium-local.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"469628821","text":"import os, sys, pot, expense, config, summary\n\ndebug = False\n#________________________________________________________________________________________________\nif __name__==\"__main__\":\n\n fileName = sys.argv[1]\n if len(sys.argv)>2:\n for i in range(1,3):\n if sys.argv[i]==\"-d\":\n debug = True\n else:\n fileName = sys.argv[i]\n\n c = config.config(fileName)\n c.process()\n\n # Adding pot\n Pot = pot.pot()\n\n # Adding everyone (loop since initial amount is 0 for everyone)\n for person in c.pot.keys():\n Pot.addContribution(person=person, amount=c.pot[person])\n\n # Adding expenses\n expenseDict = {}\n expenseList = c.expenses.keys()\n expenseList.sort()\n for e in expenseList:\n expenseDict[e] = expense.expense()\n expenseDict[e].setExpenseName(e)\n for p in c.expenses[e][\"PAYER\"]:\n expenseDict[e].addPayerAndAmount(payerName=p[0], amount=p[1])\n for p in c.expenses[e][\"BENEFICIARY\"]:\n expenseDict[e].addBeneficiary(beneficiaryName=p)\n expenseDict[e].computePricePerParticipant()\n\n # Create expense manager\n expensemanager = expense.expenseManager(expensesDict=expenseDict)\n expensemanager.compute()\n\n # Compute and print Totals\n summary = summary.summary(pot=Pot, expensemanager=expensemanager, expenseDict=expenseDict, debug=debug)\n summary.printTotals()\n summary.printCorrespondences()\n","sub_path":"compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"76969072","text":"#回数是指从左向右读和从右向左读都是一样的数,例如12321,909。请利用filter()筛选出回数:\n\ndef f1(s):\n n=0\n while n 1:\n # frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)\n\n text_base_position = 140\n i = 0\n while not self.feedback_queue.empty():\n obj = self.feedback_queue.get()\n obj_type = type(obj)\n if obj_type == np.ndarray:\n self.bin = cv2.cvtColor(obj, cv2.COLOR_GRAY2BGR)\n elif obj_type == tuple:\n if obj[0] == 0:\n cv2.line(self.bin, (160, obj[1]), (obj[2], obj[1]), (0, 0, 255), 3)\n elif obj[0] == 1:\n cv2.line(self.bin, (obj[1], 120), (obj[1], obj[2]), (0, 0, 255), 3)\n\n elif obj_type == str:\n cv2.putText(self.bin, obj, (0, text_base_position + i * 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 255), 1)\n i += 1\n \n self.read_count += 1\n while self.output_queue.full():\n if self.replay_path:\n sleep(0.1)\n else:\n try:\n self.output_queue.get(timeout=0.00001)\n except:\n pass\n self.output_queue.put(frame)\n\n if ret == True and not self.replay_path:\n if self.save:\n try:\n self.out.write(cv2.hconcat([frame, self.bin]))\n except:\n pass\n # if self.feedback_queue:\n # try:\n # processed_frame = self.feedback_queue.get(timeout=0.001)\n # except:\n # processed_frame = MASK_ALL = np.zeros([240, 320, 3],dtype=np.uint8)\n\n # # print('frame shape:', frame.shape, 'feedback shape:', processed_frame.shape)\n # self.out.write(cv2.hconcat([frame, processed_frame]))\n # else:\n # processed_frame = MASK_ALL = np.zeros([240, 320, 3],dtype=np.uint8)\n # # print('frame shape:', frame.shape, 'feedback shape:', processed_frame.shape)\n # self.out.write(cv2.hconcat([frame, processed_frame]))\n\n self.write_count += 1\n # th0, th1, th2, th3 = self.threshold(frame)\n # self.out0.write(cv2.cvtColor(th0, cv2.COLOR_GRAY2BGR))\n # self.out1.write(cv2.cvtColor(th1, cv2.COLOR_GRAY2BGR))\n # self.out2.write(cv2.cvtColor(th2, cv2.COLOR_GRAY2BGR))\n # self.out3.write(cv2.cvtColor(th3, cv2.COLOR_GRAY2BGR))\n # cv2.imshow('VideoWriter test', frame)\n\n if self.rec_stop:\n self.cap.release()\n if self.save:\n self.out.release()\n # self.out0.release()\n # self.out1.release()\n # self.out2.release()\n # self.out3.release()\n break\n\n def stop_rec(self):\n # print(\"=OxO=\")\n sleep(3)\n print('=' * 20 + 'Record stopped' + '=' * 20)\n self.rec_stop = True\n","sub_path":"ver2/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":6017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"516939618","text":"import os\nimport Tkinter as tk\nfrom Tkinter import Label, Frame\nimport ViewModel\nfrom FrameConnect import FrameConnect\nfrom FrameTable import FrameTable\nfrom FrameSet import FrameSet\nfrom FrameQuery import FrameQuery\nfrom ttk import *\n\nclass View(tk.Frame):\n \"\"\"\n Primary container\n \"\"\"\n\n def __init__(self, parent, ctx, *args, **kwargs):\n tk.Frame.__init__(self, parent, *args, **kwargs)\n self.parent = parent\n self.parent.title(\"CSIRO Energy Analyzer\")\n\n #Appearance\n self.parent.Style = Style()\n self.parent.Style.theme_use(\"xpnative\")\n self.parent.Style.configure('.', font=('Helvetica', 9))\n self.parent.Style.map('TButton',\n background=[('disabled','#d9d9d9'), ('active','#ececec')],\n foreground=[('disabled','#a3a3a3')],\n relief=[('pressed', '!disabled', 'sunken')])\n self.parent.Style.configure('Title.TLabel',\n font=\"Arial 9 bold\")\n\n\n #Context container to pass to children\n self.ctx = ctx\n self.ctx.status = tk.StringVar(value=\"Ready.\")\n self.ctx.global_widget_conf = {'padx': 5, 'pady': 2}\n self.ctx.const = {'win_width': 760,#Window width\n 'font_title': \"Arial 9 bold\"}#Font of titles\n self.ctx.opts = {'dir': {'initialdir':os.path.expanduser('~')}}\n\n #Frames (containers for UIs)\n ViewModel.mk_frames_in(parent, ['main', 'status'], {'fill': tk.BOTH})\n\n f = ViewModel.get_frame(parent, 'main')\n self.initUI_main(f)\n\n f = ViewModel.get_frame(parent, 'status')\n self.initUI_status(f)\n\n def initUI_main(self, parent):\n #UI Layout\n self.frames = {'connect': FrameConnect(parent, self.ctx, bd=2, relief=tk.GROOVE),\n 'table': FrameTable(parent, self.ctx, bd=2, relief=tk.GROOVE),\n 'setting': FrameSet(parent, self.ctx, bd=2, relief=tk.GROOVE),\n 'query': FrameQuery(parent, self.ctx, bd=2, relief=tk.GROOVE)}\n\n self.frames['connect'].pack(fill=tk.BOTH)\n self.frames['table'].pack(fill=tk.BOTH)\n self.frames['setting'].pack(fill=tk.BOTH)\n self.frames['query'].pack(fill=tk.BOTH)\n\n def initUI_status(self, parent):\n #Status bar\n self.widgets = {'stat': Label(parent, textvariable=self.ctx.status, font=\"Default 8\")}\n self.widgets['stat'].pack(anchor=tk.W)\n","sub_path":"sqlenergy/app/View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"617229515","text":"class MinStack:\r\n\r\n def __init__(self): #Intializing main stack and min stack to store incoming and minimum elements.\r\n self.mainst = []\r\n self.minst = []\r\n self.min = float(\"inf\") #Intializing infinity to Minimum\r\n \r\n def push(self, x):\r\n if x < self.min: #Checks if the incoming element is less than min.If it is less than min that update min\r\n self.min = x\r\n \r\n self.mainst.append(x) #Append incoming element to main stack\r\n self.minst.append(self.min) ##Append min element to min stack\r\n \r\n def pop(self):##Checks if the main and min stacks are empty if not pop the element from both the stacks and update min\r\n \r\n if len(self.mainst) == 0:\r\n return None\r\n \r\n else:\r\n self.mainst.pop()\r\n self.minst.pop()\r\n \r\n if len(self.minst) == 0:\r\n return None\r\n else:\r\n self.min = self.minst[-1]\r\n \r\n def top(self):##retur the top element of main stack\r\n if len(self.mainst) == 0:\r\n return None\r\n return self.mainst[-1]\r\n \r\n def getMin(self):#returns the top element in min stack\r\n if len(self.minst) == 0:\r\n return None \r\n \r\n else:\r\n return self.minst[-1]\r\n \r\n def isEmpty(self):\r\n return self.mainst == []\r\n \r\n\r\n\r\n# Your MinStack object will be instantiated and called as such:\r\nobj = MinStack()\r\nobj.push(3)\r\n\r\nobj.pop()\r\nparam_3 = obj.top()\r\nparam_4 = obj.getMin()\r\n\r\n\r\n","sub_path":"minstack.py","file_name":"minstack.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"254935073","text":"# encoding: utf-8\n\n\nfrom __future__ import unicode_literals\nfrom django.conf.urls import url\nfrom filemanagement import views as fileview\n\n\nurlpatterns = [\n url(r'^$', fileview.FMIndex.as_view(), name='file_index'),\n url(r'(?P\\d+)/$', fileview.FMList.as_view(), name='file_list'),\n url(r'create/$', fileview.FMNewFolder.as_view(), name='new_folder'),\n url(r'(?P\\d+)/upload/$', fileview.FMUpload.as_view(), name='file_upload'),\n url(r'(?P\\d+)/deletefile/$', fileview.FMFileDelete.as_view(), name='file_delete'),\n url(r'bulkdelete/$', fileview.FMFileBulkDelete.as_view(), name='bulk_delete'),\n url(r'bulkdownload/$', fileview.FMFileBulkDownload.as_view(), name='bulk_download'),\n url(r'bulkmove/$', fileview.FMBulkMove.as_view(), name='bulk_move'),\n url(r'search/$', fileview.FMSearch.as_view(), name='file_search'),\n url(r'edit/$', fileview.FMEdit.as_view(), name='file_edit'),\n]\n","sub_path":"filemanagement/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"603558737","text":"import random\r\ni=input(\"digite numero do codigo (1,2,3,..)\")\r\nfile=open(\"randomico\"+i+\".txt\",\"w\")\r\na=0\r\nl=[\"A\",\"C\",\"G\",\"T\"]\r\nwhile a<4639675:\r\n a+=1\r\n r=random.randint(0,3)\r\n file.write(l[r])\r\nfile.close()\r\n","sub_path":"randomico.py","file_name":"randomico.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"65058593","text":"\nimport sqlite3\nimport random\n\nconnection = sqlite3.connect(\"queueDatabase.db\")\n\nconn = connection.cursor()\n\nclass Channel:\n def __init__(self, channelNumber, token, service):\n self.channelNumber = int(channelNumber)\n self.token = token\n self.service = service\n self.songs = []\n self.playing = None\n self.device_id = None\n\n def requestSong(self, searchString):\n results = self.service.search(searchString, type = \"track\")[\"tracks\"][\"items\"]\n song = Song(results[0])\n self.queueSong(song)\n\n def getDevices(self):\n return self.service.devices()\n\n def prepareActiveDevice(self):\n device = spotify.get_active_device(self.service)\n if device == None:\n print(\"Error\")\n self.device_id = device[\"id\"]\n\n def playNextSong(self):\n if len(self.songs) == 0:\n self.playing = None\n else:\n song = self.songs.pop(0)\n self.playing = song\n self.prepareActiveDevice()\n playSong(service, self.device_id, [song.spotifyUri])\n print(\"Song duration!!: \", song.duration)\n t = Timer(song.duration, self.playNextSong)\n t.start()\n \n def queueSong(self, song):\n if len(self.songs) == 0 and self.playing == None:\n self.songs.append(song)\n self.playNextSong()\n else:\n self.songs.append(song)\n \n def getSize(self):\n return len(self.songs)\n\n def getSongs(self):\n return self.songs\n\n\n\ndef createDatabase(conn): \n conn.execute('''CREATE TABLE channels\n (channelNumber real, channelHostId real, channelName text)''')\n connection.comit()\n connection.close()\n\ndef createChannel():\n newChannelNumber = None\n existingChannelNumbers = getChannelNumbers()\n while newChannelNumber in existingChannelNumbers or newChannelNumber == None:\n newChannelNumber = random.randint(0, 1000)\n \n conn.execute(\"INSERT INTO channels VALUES ({0}, 224, 'Channel Name')\".format(newChannelNumber))\n print(\"Created Channel #\" + str(newChannelNumber))\n\ndef getChannelNumbers():\n return [channelData[0] for channelData in conn.execute('SELECT * FROM channels ORDER BY channelNumber')]\n\ndef printChannels():\n for row in conn.execute('SELECT * FROM channels ORDER BY channelNumber'):\n print(row)\n\n\ndef loadChannels():\n return {}\n\n\n\n\ncreateChannel()\ncreateChannel()\ncreateChannel()\n\nprintChannels()\n\nprint(getChannelNumbers())\n","sub_path":"databaseMangagerMock.py","file_name":"databaseMangagerMock.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"334456894","text":"class Person:\n '''Represents a person.'''\n population = 0\n\n def __init__(self, name): # be called when a object is created\n self.name = name\n print('(Initialization %s)' % self.name)\n Person.population += 1\n\n def __del__(self): # be called when a object is destroyed and there is no guarantee when that method will be run.\n print('%s says goodbye.' % self.name)\n Person.population -= 1\n if Person.population == 0:\n print('I am the last one.')\n else:\n print('There are still %d people left.' % Person.population)\n\n def sayhi(self):\n print('Hi, my name is %s.' % self.name)\n\n def howmany(self):\n if Person.population == 1:\n print('There is only one person left.')\n else:\n print('We have %d persons here.' % Person.population)\n\n\np1 = Person('Tang long 001')\np1.sayhi()\np1.howmany()\n\np2 = Person('Tang long 002')\np2.sayhi()\np2.howmany()","sub_path":"python_basic/python_class/python_class3.py","file_name":"python_class3.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"604105864","text":"import os\nimport mnist_net\nimport torch\nimport torch.nn as nn\nimport config\nfrom torchvision import datasets, transforms\nfrom visdom import Visdom\nimport numpy as np\n\n\nvis = Visdom()\nline = vis.line(np.arange(1))\ncurrentCount = []\ncurrentLoss = []\n\ndef train(net, device):\n\n if not os.path.exists(\"./data\"):\n print(\"data not exist\")\n os.mkdir(\"./data\")\n if not os.path.exists(\"./model\"):\n print(\"model not exist\")\n os.mkdir(\"./model\")\n data_train = datasets.MNIST(\"./data/\", train=True, transform=transforms.ToTensor(), download=True)\n data_loader_train = torch.utils.data.DataLoader(dataset=data_train, batch_size = config.BATCH_SIZE, shuffle = True)\n\n\n crossEntropy = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(net.parameters(), lr=config.learning_rate)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=config.LR_decay_steps_size,\n\n gamma=config.LR_decay_gamma, last_epoch=-1)\n\n for epoch in range(config.EPOCHS):\n\n scheduler.step()\n for i, (images, labels) in enumerate(data_loader_train):\n if config.USE_GPU:\n images = images.to(device)\n labels = labels.to(device)\n\n _labels = net(images)\n loss = crossEntropy(_labels, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if i % 100 ==0:\n print(epoch, i, loss.data)\n currentCount.append(epoch*60000+i*64)\n currentLoss.append(loss.data.tolist())\n vis.line(X=currentCount, Y=currentLoss, win=line, opts=dict(legend=[\"loss\"]))\n if epoch % 500 == 0:\n torch.save(net, \"./model/model_decay_{}.pth\".format(epoch))\n torch.save(net.state_dict(), \"./model/model_decay_data_{}.pth\".format(epoch))\n torch.save(net, \"./model/model_decay.pth\")\n torch.save(net.state_dict(), \"./model/model_decay_data.pth\")\n\nif __name__==\"__main__\":\n device = 0\n config.USE_GPU = 1 if torch.cuda.is_available() else 0\n print(\"config.USE_GPU = {}\".format(config.USE_GPU))\n if config.USE_GPU:\n device = torch.device('cuda')\n model = mnist_net.MNIST().to(device)\n else:\n model = mnist_net.MNIST()\n train(model, device)\n\n\n","sub_path":"mnist/mnist_train.py","file_name":"mnist_train.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"507683162","text":"from django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.test import APITestCase\n\nfrom api.models import User, Issue\n\nOK = 200\nCREATED = 201\nNOT_FOUND = 404\nBAD_REQUEST = 400\n\n\nclass UserRegistrationAPIViewTestCase(APITestCase):\n fixtures = ['testUser.json']\n url = reverse(\"user-list\")\n\n def setUp(self):\n token = Token.objects.get(user__username='jtcasper')\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n\n def test_user_registration(self):\n \"\"\"\n Test to verify a post call valid information\n \"\"\"\n user_data = {\n \"first_name\": \"Test\",\n \"last_name\": \"Dude\",\n \"username\": \"tdude\",\n \"password\": \"password\",\n \"email\": \"tdude@email.com\",\n \"official\": False\n }\n\n response = self.client.post(self.url, user_data)\n self.assertEqual(CREATED, response.status_code)\n\n def test_unique_username_validation(self):\n \"\"\"\n Test to verify that a post call with already exists username\n \"\"\"\n user_data_1 = {\n \"first_name\": \"Test\",\n \"last_name\": \"Dude\",\n \"username\": \"tdude\",\n \"password\": \"password\",\n \"email\": \"tdude@email.com\",\n \"official\": False\n }\n response = self.client.post(self.url, user_data_1)\n self.assertEqual(CREATED, response.status_code)\n\n user_data_2 = {\n \"first_name\": \"Test2\",\n \"last_name\": \"Dude2\",\n \"username\": \"tdude\",\n \"password\": \"password2\",\n \"email\": \"tdude2@email.com\",\n \"official\": False\n }\n response = self.client.post(self.url, user_data_2)\n self.assertEqual(BAD_REQUEST, response.status_code)\n\n\nclass IssueAPIPostTestCase(APITestCase):\n fixtures = ['testUser.json']\n url = reverse(\"issue-list\")\n\n def setUp(self):\n token = Token.objects.get(user__username='jtcasper')\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n\n def test_post_issue_creation(self):\n \"\"\"\n Test to verify that an authorized Issue post will create an issue successfully.\n \"\"\"\n\n issue_data = {\n \"name\": \"The roads are all broken.\",\n \"desc\": \"Somehow, every road needs to be repaired?\",\n \"lat\": '78.6382',\n \"lng\": '35.7796',\n \"author\": \"jtcasper\"\n }\n\n response = self.client.post(self.url, issue_data)\n self.assertEqual(CREATED, response.status_code)\n\n\nclass IssueAPIGetTestCase(APITestCase):\n fixtures = ['testUser.json', 'testIssue.json']\n url = reverse(\"issue-list\")\n\n def setUp(self):\n token = Token.objects.get(user__username='jtcasper')\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n\n def test_get_issue_by_id(self):\n \"\"\"\n Test that an issue can be returned by id\n \"\"\"\n response = self.client.get(self.url + '1/')\n self.assertEqual(OK, response.status_code)\n self.assertEqual(\"TestIssue\", response.data.get('name'))\n self.assertEqual(\"TestDesc\", response.data.get('desc'))\n self.assertEqual(\"jtcasper\", response.data.get('author'))\n\n def test_get_issue_dne(self):\n \"\"\"\n Test the response for an issue that does not exist\n \"\"\"\n response = self.client.get(self.url + '999/')\n self.assertEqual(NOT_FOUND, response.status_code)\n\n def test_get_issue_by_dist(self):\n \"\"\"\n Test getting the nearest issues to the user.\n \"\"\"\n response = self.client.get(self.url + 'near/')\n self.assertEqual(OK, response.status_code)\n self.assertEqual(b'[]', response.content) # Empty byte array\n\n def test_get_issue_comments(self):\n \"\"\"\n Test getting the comments for an issue.\n \"\"\"\n response = self.client.get(self.url + '1/comments/')\n self.assertEqual(OK, response.status_code)\n self.assertEqual(b'[]', response.content) # Empty byte array\n\n def test_get_issue_votes(self):\n \"\"\"\n Test getting the votes for an issue.\n \"\"\"\n response = self.client.get(self.url + '1/votes/')\n self.assertEqual(OK, response.status_code)\n self.assertEqual(b'[]', response.content) # Empty byte array\n\n def test_get_issue_reports(self):\n \"\"\"\n Test getting the reports for an issue.\n \"\"\"\n response = self.client.get(self.url + '1/reports/')\n self.assertEqual(OK, response.status_code)\n self.assertEqual(b'[]', response.content) # Empty byte array\n\n\n\"\"\"\nTEST MODELS\n\"\"\"\n\n\nclass IssueModelTestCase(TestCase):\n fixtures = ['testUser.json']\n\n def test_string_representation(self):\n issue = Issue(name=\"TestIssue\", desc=\"TestDesc\", lng=1337, lat=1337)\n self.assertEqual(str(issue), issue.name)\n\n\nclass UserModelTestCase(TestCase):\n def test_string_representation(self):\n user = User(username=\"lwkerr\", email=\"lwkerr@ncsu.edu\", first_name=\"Len\", last_name=\"Kerr\")\n self.assertEqual(str(user), user.username)\n","sub_path":"backend/api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"493049127","text":"#!/usr/bin/env python3\n#\n# Populate the list\nfruit = [\"Apples\", \"Pears\", \"Oranges\", \"Peaches\"]\nprint(fruit)\n\n# Add another fruit\nnew_fruit = input(\"pick another fruit: \")\nfruit.append(new_fruit)\nprint(fruit)\n\n# Pick a number corresponding to a fruit\nindex = int(input(\"Pick a number: \"))\nif index > 0 and index <= len(fruit):\n print(fruit[index-1])\n \n#Add another fruit to beginning of list\nfruit = [\"Plums\"] + fruit\nprint(fruit)\n\n#Add another to the beginning of the list\nfruit.insert(0,\"Pineapples\")\nprint(fruit)\n\n#Display all fruits beginning with a p\nfor each_fruit in fruit:\n if each_fruit[0] == 'P':\n print(each_fruit)\n\n#Remove last from list\nprint(fruit)\nfruit.pop()\nprint(fruit)\n\n#Delete selected fruit\ndeleted_fruit = input(\"Enter a fruit to delete: \")\nfruit.remove(deleted_fruit)\nprint(fruit)\n\n#Delete all occurences \nfruit = fruit*2\nprint(fruit)\ndeleted_fruit = input(\"Enter a fruit to delete: \")\nfor i in range(fruit.count(deleted_fruit)):\n fruit.remove(deleted_fruit)\nprint(fruit)\n\n#Keep the ones you like\nfor each in fruit:\n answer = \"\"\n while answer != \"yes\" or answer !=\"no\":\n answer = input(\"Do you like {}? \".format(each))\n if answer == \"no\":\n fruit.remove(each)\n print(fruit)\n\n\n","sub_path":"students/cheryl/session03/list_lab.py","file_name":"list_lab.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"455978452","text":"import uuid\nimport logging\nimport time\nfrom boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType\nfrom fabric.api import run, env, sudo\nfrom . import wait_for_status\n\nlog = logging.getLogger(__name__)\n\n\ndef run_instance(connection, instance_name, config, key_name, user='root',\n subnet_id=None):\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n reservation = connection.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=str(uuid.uuid4())[:16],\n subnet_id=subnet_id,\n )\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n wait_for_status(instance, \"state\", \"running\", \"update\")\n if subnet_id:\n env.host_string = instance.private_ip_address\n else:\n env.host_string = instance.public_dns_name\n env.user = user\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n\n # wait until the instance is responsive\n while True:\n try:\n if run('date').succeeded:\n break\n except:\n log.debug('hit error waiting for instance to come up')\n time.sleep(10)\n\n instance.add_tag('Name', instance_name)\n # Overwrite root's limited authorized_keys\n if user != 'root':\n sudo(\"cp -f ~%s/.ssh/authorized_keys \"\n \"/root/.ssh/authorized_keys\" % user)\n sudo(\"sed -i -e '/PermitRootLogin/d' \"\n \"-e '$ a PermitRootLogin without-password' /etc/ssh/sshd_config\")\n sudo(\"service sshd restart || service ssh restart\")\n sudo(\"sleep 20\")\n return instance\n","sub_path":"cloudtools/aws/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"479286610","text":"## This is a recommender system that the user can choose to use\n## content based recommendation or \nimport numpy as np\nimport scipy.io as sio\nimport numpy.linalg as LA\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom scipy.sparse import csr_matrix\n\nclass recommender_system(object):\n def __init__(self):\n self.prediction = None\n def predict_content(self, R, uid = None, iid = None, type='user'):\n # User pairwise_distances function from sklearn to calculate the cosine\n # similarity between users and items respectively\n if type == 'user':\n similarity = pairwise_distances(csr_matrix(R), metric='cosine')\n mean_user_rating = R.mean(axis=1) \n #You use np.newaxis so that mean_user_rating has same format as ratings\n ratings_diff = (R - mean_user_rating[:, np.newaxis]) \n# pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T\n pred = mean_user_rating[uid] + similarity[uid, :].dot(ratings_diff) / np.sum(np.abs(similarity[uid, :]))\n elif type == 'item':\n similarity = pairwise_distances(csr_matrix(R).T, metric='cosine')\n pred = R[uid,:].dot(similarity) / np.abs(similarity).sum(axis=0, keepdims=True)\n return pred\n def predict_model(self, R, d, type='MSE'):\n return None\n def MSE(self,R,d):\n # Replace the nan values by zero\n index=np.isnan(R)\n Rnew = R.copy()\n row, col = Rnew.shape\n Rnew[index]=0\n U,s,V = LA.svd(Rnew, full_matrices=False)\n s[d:]=0\n out = np.dot(U, np.dot(np.diag(s), V))\n Rnew[index]=out[index]\n prediction = out\n return prediction\n def MLF(self, R, d, Lambda):\n row, col= R.shape\n U = np.random.rand(row, d)\n V = np.random.rand(d, col)\n Rnew = R.copy()\n for i in range(100):\n current = np.dot(U, V)\n U=self.updateU(Rnew,V,Lambda)\n V=self.updateV(Rnew,U,Lambda)\n out = np.dot(U,V)\n prediction = out\n return prediction\n def updateU(self, R,V,Lambda):\n U = np.linalg.solve(np.dot(V, V.T)+Lambda*np.eye(len(V)), np.dot(V, R.T)) \n return U.T\n def updateV(self, R,U,Lambda):\n V = np.linalg.solve(np.dot(U.T,U)+Lambda*np.eye(len(U[0])), np.dot(U.T, R))\n return V\n def rmse(self, prediction, ground_truth):\n prediction = prediction[ground_truth.nonzero()].flatten() \n ground_truth = ground_truth[ground_truth.nonzero()].flatten()\n return sqrt(mean_squared_error(prediction, ground_truth))\n\n\n\n","sub_path":"recommender_system.py","file_name":"recommender_system.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"88022082","text":"import tensorflow as tf\nimport numpy as np\nimport cv2 as cv \nimport params\nimport utils\nimport pdb\nimport re\nimport os \n\nparams.show_params()\n\nconfig = tf.ConfigProto(\n device_count = {'GPU': 1}\n ) \n \ndef upscale(downscaled_image, checkpoint):\n\n scale_factor = params.scale \n \n # cnn resize \n input = tf.placeholder(tf.float32, (1, downscaled_image.shape[1], downscaled_image.shape[2], params.num_channels), name='input') \n _, output = params.network_architecture(input) \n \n\n with tf.Session(config=config) as sess: \n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n print('restoring from ' + checkpoint)\n saver.restore(sess, checkpoint)\n \n # step 1 - apply cnn on each resized image, maybe as a batch \n cnn_output = []\n for image in downscaled_image: \n cnn_output.append(sess.run(output, feed_dict={input: [image]})[0])\n \n cnn_output = np.array(cnn_output) \n cnn_output = np.round(cnn_output) \n cnn_output[cnn_output > 255] = 255 \n \n return cnn_output\n \n \ndef predict(downscaled_image, original_image, checkpoint):\n \n downscaled_image = upscale(downscaled_image, checkpoint)\n tf.reset_default_graph()\n \n ssim_cnn, psnr_cnn = utils.compute_ssim_psnr_batch(downscaled_image, original_image) \n\n return ssim_cnn, psnr_cnn\n \n \ndef read_images(test_path):\n\n test_images_gt = utils.read_all_directory_images_from_directory_test(test_path, add_to_path='original')\n test_images = utils.read_all_directory_images_from_directory_test(test_path, add_to_path='input_x%d' % scale) \n return test_images_gt, test_images\n \ndef compute_performance_indeces(test_path, test_images_gt, test_images, checkpoint, write_to_summary=True):\n\n num_images = 0 \n ssim_cnn_sum = 0; psnr_cnn_sum = 0; ssim_standard_sum = 0; psnr_standard_sum = 0; \n \n for index in range(len(test_images)):\n # pdb.set_trace()\n ssim_cnn, psnr_cnn = predict(test_images[index], test_images_gt[index], checkpoint)\n tf.reset_default_graph()\n ssim_cnn_sum += ssim_cnn; psnr_cnn_sum += psnr_cnn \n num_images += test_images[index].shape[0]\n \n print('cnn {} --- psnr = {} ssim = {}'.format(test_path, psnr_cnn_sum/num_images, ssim_cnn_sum/num_images))\n \n if test_path.find('test') != -1 and write_to_summary == True:\n \n tf.summary.scalar('psnr_cnn', psnr_cnn_sum/num_images) \n tf.summary.scalar('ssim_cnn', ssim_cnn_sum/num_images) \n merged = tf.summary.merge_all() \n writer = tf.summary.FileWriter('test.log') \n epoch = re.findall(r'\\d+', checkpoint)\n epoch = int(epoch[0]) \n with tf.Session(config=config) as sess:\n merged_ = sess.run(merged)\n writer.add_summary(merged_, epoch)\n \ntest_path = './../../../images-testing/t2w' \neval_path = './data/train'\nscale = 2\n\ntest_images_gt, test_images = read_images(test_path) \n# checkpoint = tf.train.latest_checkpoint(params.folder_data) \ncheckpoint = os.path.join(params.folder_data, 'model.ckpt%d' % 28)\ncompute_performance_indeces(test_path, test_images_gt, test_images, checkpoint, write_to_summary=False) \nexit()\n\nfor i in range(29, 35):\n checkpoint = os.path.join(params.folder_data, 'model.ckpt%d' % i)\n \n compute_performance_indeces(test_path, test_images_gt, test_images, checkpoint)\n # compute_performance_indeces(eval_path, eval_images_gt, eval_images, checkpoint) \n ","sub_path":"training/t2w/h and w/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"69014484","text":"import sys\nimport re\nimport operator\n\nfrom .vm import COMMANDS\nfrom .sexpr import *\n\ndef index(name, namelist):\n for x, level1 in enumerate(namelist):\n for y, element in enumerate(level1):\n if element == name:\n return (x, y)\n raise SyntaxError(\"Unknown variable '\"+str(name)+\"' in \"+str(namelist))\n\ndef genatom(expr, names):\n if expr is None:\n return ['NIL']\n elif isnum(expr) or isstr(expr):\n return ['LDC', expr]\n elif issym(expr):\n return ['LD', index(expr.name, names)]\n raise SyntaxError(\"Cannot codegen atom: \" + str(expr))\n\ndef peekargs(n, expr):\n out = [x for x in itercons(expr)][:n]\n if n != len(out) and expr is not None:\n raise SyntaxError(\"Trailing arguments, expected %d, remaining %d: %s\" % (n, n - len(out), unparse(expr)))\n return out\n\nEXPR = lambda n, f: lambda names, x: f(*[names] + peekargs(n, x))\n\nflatten = lambda x: reduce(operator.add, x) if x else []\n\ncomplis = lambda n, v: flatten([codegen(X, n) + ['CONS'] for X in itercons(v)])\n\ngenap = lambda n, v, b: ['NIL'] + complis(n, v) + b + ['AP']\n\nMACROS = {\n 'QUOTE': EXPR(1, lambda _, a: ['LDC', a]),\n\n # (IF x then else)\n 'IF': EXPR(3, lambda n, x, a, b: codegen(x, n) + ['SEL', codegen(a, n) + ['JOIN'], codegen(b, n) + ['JOIN']]),\n\n # (LAMBDA args body)\n 'LAMBDA': EXPR(2, lambda n, a, b: ['LDF', codegen(b, [a] + n) + ['RTN']]),\n\n # (LET args vals body)\n 'LET': EXPR(3, lambda n, a, v, b: genap(n, v, ['LDF', codegen(b, [[x.name if x else None for x in a]] + n) + ['RTN']]))\n}\nCOMMAND_ALIASES = {\n '+': 'ADD', '-': 'SUB', '/': 'DIV', '*': 'MUL',\n '^': 'XOR', '&': 'AND', '|': 'OR', 'EQ?': 'EQ',\n 'ATOM?': 'ATOM', 'NIL?': 'NIL', '#T': 'T', '#F': 'F',\n 'NULL?': 'NIL', 'NULL': 'NIL', '<=': 'LE', \">=\": 'GE',\n '<': 'LT', '>': 'GT',\n}\n\ndef codegen(expr, names=None):\n if names is None:\n names = []\n if isatom(expr):\n return genatom(expr, names)\n assert iscons(expr) or islist(expr)\n op = car(expr)\n if issym(op):\n name = op.name\n if name in MACROS:\n return MACROS[name](names, cdr(expr))\n # Otherwise, passthru to builtin command\n name = COMMAND_ALIASES.get(name, name)\n if name in COMMANDS:\n nargs, _ = COMMANDS[name]\n if nargs:\n args = peekargs(nargs, cdr(expr))\n return flatten([codegen(arg, names) for arg in args[::-1]]) + [name]\n return [name]\n raise SyntaxError(\"Unknown operation: '\"+str(op)+\"'\")\n raise SyntaxError(\"Unknown token: \"+type(op).__name__+' = '+str(op))\n\ndef main():\n code = sys.stdin.read()\n print(codegen(parse(code)))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"secd/codegen.py","file_name":"codegen.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"647818326","text":"'''\nЛабораторная работа 5\n\n1. Загрузить данные (ex1data.csv) из csv-файла в скрипт с помощью метода read_csv модуля pandas.\n\n2. Используя метод plot или scatter, визуализируем данные, считанные из файла.\n\n3. Разберемся с объектом subplot и нарисуем на этом же графике линию, «соответствующую» данным,\nсчитанным из файла. Пусть эта будет линия, соответствующая графику функции y = 2*x-10.\n\n4. Используя метод polyfit из библиотеки scipy получите значения коэффициентов (аналогично было\nсделано в конспекте лекции) для модели линейной регрессии (степень полинома = 1)\n\n5. Предскажите значения целевого параметра при значениях x = [2.225, 17.5, 25].\n\n6. Еще раз визуализируйте графики из пункта 2, но дополнительно нанесите график с параметрами\nмодели, полученными с помощью polyfit.\n\n'''\n\nimport numpy\nimport matplotlib.pyplot as plt\nimport pandas\nimport scipy as sp\n\n\ndef error(f, x, y):\n\n \"\"\"\n Векторное вычисление суммы квадратов отклонений значений функции\n от известных значений целевого параметра (y).\n \"\"\"\n\n return numpy.sum((f(x) - y) ** 2)\n\n\ndef line(x):\n return (2*x - 10)\n\n\ndef draw_graph(x, y):\n\n \"\"\"\n Рисование графика функции.\n \"\"\"\n\n plt.scatter(x, y, s=5)\n\n y_line = [line(x_i) for x_i in x]\n plt.plot(x, y_line, linewidth=2.0, color='r')\n\n # polyfit подбирает коэффициенты модели\n f1p, residuals, rank, sv, rcond = numpy.polyfit(x, y, 1, full=True)\n f1 = sp.poly1d(f1p)\n x_pred = [2.225, 17.5, 25]\n y_pred = [f1(x_i) for x_i in x_pred]\n print('Предсказанные значения от x = [2.225, 17.5, 25]: ', y_pred)\n print(f\"{error(f1, x, y):.5}\")\n fx = numpy.linspace(min(x), max(x), 500)\n plt.plot(fx, f1(fx), linewidth=2.0, color='g')\n\n plt.scatter(x_pred, y_pred, s=40, color='orange')\n\n # plt.autoscale(tight=True)\n # plt.grid(True, color='0.75')\n\n plt.show()\n plt.savefig(\"LR5.png\")\n\ndef transform_data(data):\n\n \"\"\"\n Распределение данных по двум векторам (одномерным массивам)\n \"\"\"\n\n x = data['first'].to_list()\n y = data['second'].to_list()\n\n return x, y\n\n\nif __name__ == '__main__':\n\n data = pandas.read_csv('ex1data1.csv', names=['first', 'second'])\n x, y = transform_data(data)\n draw_graph(x, y)\n\n","sub_path":"LR/LR5.py","file_name":"LR5.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"586999345","text":"#!/usr/bin/python3\n# Project: performance tools\n# Author: syx10\n# Time 2021/1/30:12:05\nimport time\nimport os\nimport pickle\n\n\nclass TimerPerformance:\n def __init__(self):\n super(TimerPerformance).__init__()\n self.timer_perf = {}\n self.save_dir = os.getcwd() + '/performance/'\n\n def __set_tag(self, tag):\n if not self.timer_perf.get(tag):\n self.timer_perf[tag] = []\n\n def set_timer(self, tag, timer):\n if not self.timer_perf.get(tag):\n self.__set_tag(tag)\n self.timer_perf[tag].append(timer)\n\n def timer_wrapper(self, tag):\n def wrapper(func):\n def deco(*args, **kwargs):\n start = time.clock()\n res = func(*args, **kwargs)\n end = round(time.clock() - start, 6)\n self.set_timer(tag, end)\n return res\n return deco\n return wrapper\n\n\n def show_perf(self):\n import numpy as np\n max_mean = {}\n the_max_max = 0, ''\n the_max_mean = 0, ''\n for ele in self.timer_perf.keys():\n max_mean[ele] = [max(self.timer_perf[ele]), np.mean(self.timer_perf[ele])]\n print(\"Length of \" + ele + \" is \" + str(len(self.timer_perf[ele])))\n print(\"Max \" + ele + \" timer: \" + str(max_mean[ele][0]))\n print(\"Mean \" + ele + \" timer: \" + str(max_mean[ele][1]))\n\n for ele in max_mean:\n if max_mean[ele][0] > the_max_max[0]:\n the_max_max = max_mean[ele][0], ele\n\n if max_mean[ele][1] > the_max_mean[0]:\n the_max_mean = max_mean[ele][1], ele\n print(\"The max time conso ====>\" + str(the_max_max))\n print(\"The max mean time conso =====>\" + str(the_max_mean))\n\n def set_save_dir(self, directory):\n if os.path.exists(directory):\n self.save_dir = directory\n else:\n return 'Directory: ' + str(directory) + ' not exist!'\n\n def save_perf(self):\n if not os.path.exists(self.save_dir):\n os.mkdir(self.save_dir)\n if len(self.timer_perf) > 0:\n try:\n filename = time.strftime(\"%Y_%m_%d_%H_%M_%S\", time.localtime()) + '_perf.pkl'\n perf_file = open(self.save_dir + filename, 'wb')\n pickle.dump(self.timer_perf, perf_file)\n return perf_file.name\n except Exception as e:\n raise e\n else:\n return 'Performance object has no data to save!'\n\n\ndef performance_viewer(perf_obj=None, filepath=''):\n if perf_obj is None and filepath == '':\n raise ValueError + ' arguments exception.'\n if type(perf_obj) == TimerPerformance:\n if len(perf_obj.timer_perf) == 0:\n raise Exception('Performance object is empty')\n else:\n timer_perf = perf_obj.timer_perf\n elif os.path.exists(filepath):\n try:\n f = open(filepath, 'rb')\n timer_perf = pickle.load(f)\n except Exception as e:\n raise e\n finally:\n if f:\n f.close()\n else:\n raise ValueError + ' arguments exception.'\n\n import matplotlib.pyplot as plt\n for ele in timer_perf:\n plt.figure()\n plt.plot(timer_perf[ele])\n plt.title(ele + \" timer\")\n plt.show()","sub_path":"performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"424574963","text":"lambd = 0.9\r\nkappa = 5 / 5\r\nrho = 0.2\r\n\r\nimport numeric as np\r\nimport math\r\n\r\nimport nef\r\nsys.path.append('/home/bjkomer/nengo-inverted-pendulum')\r\nimport Learn\r\n\r\nclass NonlinearControl(nef.Node):\r\n def __init__(self, name, lambd=lambd, kappa=kappa, rho=rho, D=1):\r\n nef.Node.__init__(self, name)\r\n \r\n self.lambd = lambd\r\n self.kappa = kappa\r\n self.rho = rho\r\n self.x = self.make_input('x', dimensions=D)\r\n self.dx = self.make_input('dx', dimensions=D)\r\n self.ddx = self.make_input('ddx', dimensions=D)\r\n self.x_desired = self.make_input('desired', dimensions=D)\r\n\r\n self.a = np.array([0.0, 0.0, 0.0])\r\n \r\n self.u = self.make_output('u', dimensions=D)\r\n self.s = self.make_output('s', dimensions=D)\r\n self.a_val = self.make_output('a', dimensions=3)\r\n \r\n def tick(self): \r\n s = np.array(self.dx.get()) + self.lambd*(np.array(self.x.get()) - np.array(self.x_desired.get()))\r\n \r\n # shuld be ddx_r, which in this case is -lambda*dx\r\n Y = np.array([self.ddx.get()[0], self.dx.get()[0]*abs(self.dx.get()[0]), math.sin(self.x.get()[0])])\r\n # ddx_r = -lambd * dx\r\n self.u.set(sum(self.a * Y) - self.kappa * s)\r\n \r\n self.s.set(s)\r\n self.a_val.set(self.a)\r\n \r\n dt = 0.001\r\n self.a -= self.rho*(s*Y)*dt\r\n \r\n\r\nclass Physics(nef.Node):\r\n def __init__(self, name):\r\n nef.Node.__init__(self, name)\r\n \r\n \r\n self.x = self.make_output('x', dimensions=1)\r\n self.dx = self.make_output('dx', dimensions=1)\r\n self.ddx = self.make_output('ddx', dimensions=1)\r\n \r\n self.u = self.make_input('u', dimensions=1)\r\n \r\n def tick(self):\r\n dt = 0.001\r\n x = self.x._value[0]\r\n dx = self.dx._value[0]\r\n u = np.array(self.u.get())[0]\r\n\r\n J = 0.1\r\n b = 0\r\n mgl = 0.1\r\n\r\n ddx = u/J + dx*abs(dx)*b/J + mgl*math.sin(x)/J\r\n dx += ddx*dt\r\n x += dx*dt \r\n \r\n while x>math.pi:\r\n x -= math.pi*2\r\n while x<-math.pi:\r\n x += math.pi*2\r\n \r\n self.x.set([x])\r\n self.dx.set([dx])\r\n self.ddx.set([ddx])\r\n \r\n \r\n#net = nef.Network('Nonlinear Control', seed=1)\r\nnet = nef.Network('Nonlinear Control', seed=13)\r\n\r\nplant = net.add(Physics('plant'))\r\n\r\nnet.make_input('target', [1])\r\n\r\n#state = net.make('state', 300, 3, radius=2)\r\nstate = net.make('state', 300, 3, radius=2)\r\nnet.connect(plant.getOrigin('x'), 'state', index_post=0)\r\nnet.connect(plant.getOrigin('dx'), 'state', index_post=1)\r\nnet.connect(plant.getOrigin('ddx'), 'state', index_post=2)\r\n\r\n#s = net.make('s', 100, 1)\r\ns = net.make('s', 100, 1)\r\n#net.connect('state', 's', transform=[lambd, 1, 0])\r\nnet.connect('state', 's', transform=[lambd, 1, 0])\r\nnet.connect('target', 's', weight=-lambd)\r\n\r\n#net.make('u', 100, 1, radius=1)\r\nnet.make('u', 100, 1, radius=1)\r\nnet.connect('s', 'u', weight=-kappa)\r\n\r\ndef learn(x):\r\n return [0]\r\nnet.connect('state', 'u', func=learn)\r\n\"\"\"\r\nclass Learn(nef.Node):\r\n def __init__(self, name, origin):\r\n nef.Node.__init__(self, name)\r\n self.s = self.make_input('s', dimensions=1, pstc=0.01)\r\n self.Y = self.make_input('Y', dimensions=300, pstc=0.01)\r\n self.origin = origin\r\n self.counter = 0\r\n def tick(self):\r\n self.counter += 1\r\n if self.counter%10 == 0:\r\n delta = -rho * np.array(self.s.get())*0.00001\r\n Y = np.array(list(self.Y.get()))\r\n Y.shape = 300,1\r\n da = np.dot(Y, delta)\r\n decoder = np.array(self.origin.decoders)\r\n self.origin.decoders = decoder + da\r\n print( delta.shape )\r\n print ( Y.shape )\r\n print ( da.shape )\r\n print ( np.array(self.origin.decoders).shape )\r\n\"\"\"\r\n\r\nlearn=net.add(Learn('learn', net.get('state').getOrigin('learn'), s, state))\r\n#net.connect('s', learn.getTermination('s'))\r\n#net.connect(net.get('state').getOrigin('AXON'), learn.getTermination('Y')) \r\n \r\nnet.connect('u', plant.getTermination('u'))\r\n \r\nnet.view()\r\nnet.add_to_nengo() \r\n","sub_path":"adaptivependulum_java.py","file_name":"adaptivependulum_java.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"203243365","text":"import unittest\nimport random\n\nfrom quick import quick_pivot, quick_sort\n\n\nclass QuickSortTests(unittest.TestCase):\n\n def test_quick_pivot(self):\n \"\"\"\n Tests the quick_pivot(list) array\n \"\"\"\n data = [10, 5, 3, 15, 20]\n expected_after_pivot = [3, 5, 10, 15, 20]\n result = quick_pivot(data)\n self.assertEqual(data, expected_after_pivot)\n self.assertEqual(result, 2)\n\n # test single index list\n data = [1]\n expected_after_pivot = [1]\n result = quick_pivot(data)\n self.assertEqual(data, expected_after_pivot)\n self.assertEqual(result, 0)\n\n # test empty list\n data = []\n expected_after_pivot = []\n result = quick_pivot(data)\n self.assertEqual(data, expected_after_pivot)\n self.assertEqual(result, 0)\n\n def test_quick_sort(self):\n \"\"\"\n Tests the quick_sort(list) method\n \"\"\"\n data = [3, 1, 10, 9]\n results = quick_sort(data)\n self.assertIsInstance(results, list)\n self.assertEqual(results, [1, 3, 9, 10])\n data = random.sample(range(0, 100), 10)\n results = quick_sort(data)\n self.assertEqual(results, sorted(data))\n\n # test empty list\n data = []\n results = quick_sort(data)\n self.assertIsInstance(results, list)\n self.assertEqual(results, [])\n\n # test single index list\n data = [1]\n results = quick_sort(data)\n self.assertIsInstance(results, list)\n self.assertEqual(results, [1])\n\n # test two index list\n data = [1, -1]\n results = quick_sort(data)\n self.assertIsInstance(results, list)\n self.assertEqual(results, [-1, 1])\n\n # test three index list\n data = [1, -1, -5]\n results = quick_sort(data)\n self.assertIsInstance(results, list)\n self.assertEqual(results, [-5, -1, 1])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"sorting/Quick_Sort/python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"598910867","text":"# Copyright (c) 2017 CICESE\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Flask dependencies\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import make_response \nfrom flask import request, Response\n\n# Internal dependencies\nfrom lib.NGSIData import NGSIData\n\n# Format dependencies\nimport pprint\n\n# Start listening through port 5000\napp = Flask(__name__);\n\n\"\"\" Null route (/)\nThis route might be used for testing purposes.\n\"\"\"\n@app.route(\"/\")\ndef hello():\n return \"Hello FIWARE\";\n\n\"\"\" queryContext route (/v1/queryContext)\nThis method allows to retrieve NGSI v1 entities by triggering a POST call from which a contextResponses structure.\nFor more reference, please visit NGSI v1 official documentation: http://telefonicaid.github.io/fiware-orion/api/v1/\n\"\"\"\n@app.route(\"/v1/queryContext\", methods = ['POST'])\ndef getData():\n\n\t# Data handler.\n\t_contextResponses = NGSIData();\n\n\t# Remote/Cosmos access data.\n\tusername \t= str(request.headers[\"Referer\"]).split(\"/\")[3];\n\tservice \t= str(request.headers[\"Fiware-Service\"]);\n\tservicePath = str(request.headers[\"Fiware-ServicePath\"]);\n\tq\t\t= str(request.headers[\"Fiware-Queue\"]);\n\tt\t\t= str(request.headers[\"Fiware-Type\"]);\n\ttoken \t\t= str(request.headers[\"X-Auth-Token\"]);\n\n\t# Header settled.\n\tresponse = Response(response = _contextResponses.post(username, service, servicePath, q, t, request.args, token));\n\tresponse.headers[\"Accept\"] \t\t= \"application/json\";\n\tresponse.headers[\"Fiware-Service\"] \t= service;\n\tresponse.headers[\"Fiware-ServicePath\"] \t= servicePath;\n\n\treturn response;\n\nif __name__ == \"__main__\":\n app.run(host = \"0.0.0.0\", debug = False);\n","sub_path":"Bifrost/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"334139222","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic.edit import CreateView\nfrom django.core.mail import mail_admins \nfrom feedbacks.models import Feedback\n\nclass FeedbackView(CreateView):\n\tmodel = Feedback\n\ttemplate_name = \"feedback.html\"\n\tsuccess_url = reverse_lazy('feedback')\n\tdef form_valid(self, form):\n\t\tmessage = super(FeedbackView, self).form_valid(form)\n\t\tmes = \"Thank you for your feedback! We will keep in touch with you very soon!\"\n\t\tmessages.success(self.request, mes)\n\t\tmail_admins(self.object.subject, self.object.message)\n\t\treturn message\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(FeedbackView, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Feedback\"\n\t\treturn context\n\n\n","sub_path":"feedbacks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"245138442","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\guizero\\TextBox.py\n# Compiled at: 2019-10-24 09:39:32\n# Size of source mod 2**32: 3729 bytes\nfrom tkinter import Entry, StringVar, Text, END\nfrom tkinter.scrolledtext import ScrolledText\nfrom . import utilities as utils\nfrom .base import TextWidget\n\nclass TextBox(TextWidget):\n\n def __init__(self, master, text='', width=10, height=1, grid=None, align=None, visible=True, enabled=None, multiline=False, scrollbar=False, command=None, hide_text=False):\n description = '[TextBox] object with text \"' + str(text) + '\"'\n self._multiline = multiline\n self._text = StringVar()\n self._text.set(str(text))\n if multiline:\n if scrollbar:\n tk = ScrolledText((master.tk), wrap='word')\n else:\n tk = Text(master.tk)\n tk.insert(END, self._text.get())\n else:\n tk = Entry((master.tk), textvariable=(self._text))\n super(TextBox, self).__init__(master, tk, description, grid, align, visible, enabled, width, height)\n self.hide_text = hide_text\n self.update_command(command)\n self.events.set_event('', '', self._key_released)\n\n @property\n def value(self):\n if self._multiline:\n return self.tk.get(1.0, END)\n return self._text.get()\n\n @value.setter\n def value(self, value):\n self._text.set(str(value))\n if self._multiline:\n self.tk.delete(1.0, END)\n self.tk.insert(END, self._text.get())\n self.description = '[TextBox] object with text \"' + str(value) + '\"'\n\n def resize(self, width, height):\n self._width = width\n if width != 'fill':\n self._set_tk_config('width', width)\n elif height is not None:\n if self._multiline:\n self._height = height\n if height != 'fill':\n self.tk.config(height=height)\n elif isinstance(height, int):\n if height > 1:\n utils.error_format('Cannot change the height of a single line TextBox{}'.format(self.description))\n\n @property\n def hide_text(self):\n return self._hide_text\n\n @hide_text.setter\n def hide_text(self, value):\n self._hide_text = value\n if value == True:\n show_value = '*'\n else:\n if value == False:\n show_value = ''\n else:\n show_value = value\n self._set_tk_config('show', show_value)\n\n def _key_released(self, event):\n if self._command:\n args_expected = utils.no_args_expected(self._command)\n if args_expected == 0:\n self._command()\n else:\n if args_expected == 1:\n self._command(event.key)\n else:\n utils.error_format('TextBox command function must accept either 0 or 1 arguments.\\nThe current command has {} arguments.'.format(args_expected))\n\n def update_command(self, command):\n if command is None:\n self._command = lambda : None\n else:\n self._command = command\n\n def clear(self):\n self.value = ''\n\n def append(self, text):\n self.value = self.value + str(text)","sub_path":"pycfiles/guizero-1.1.0-py3.7/TextBox.cpython-37.py","file_name":"TextBox.cpython-37.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"55190451","text":"import pygame\nfrom random import randint\nimport numpy as np\n\nfrom game import Game\nfrom settings import *\nfrom .drawing import draw_background, draw_snake, draw_apple, write_text\nfrom .directions import Directions\nfrom .actions import Actions\n\nclass GameConsole(Game):\n\tdef run(self):\n\t\trunning = True\n\t\tself.actor.initialize_game(self)\n\t\twhile running:\n\t\t\taction = Actions.STAY\n\t\t\taction = self.actor.get_action(self)\n\t\t\tif self.gameover:\n\t\t\t\tif self.points>self.record:\n\t\t\t\t\tself.record=self.points\n\t\t\t\tprint(\"Score: \"+str(self.points))\n\t\t\t\tself.sum+=self.points\n\t\t\t\tif self.actor.want_restart():\n\t\t\t\t\tself.reset()\t\t\t\t\t\t\n\t\t\t\t\tif self.actor.is_ai():\n\t\t\t\t\t\tself.actor.replay_new(self.actor.memory)\n\t\t\t\t\t\tself.actor.initialize_game(self)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Record: \"+str(self.record))\n\t\t\t\t\tprint(\"Average: \"+str(self.sum/self.actor.max_retries))\n\t\t\t\t\trunning = False\n\n\t\t\tif running and not self.gameover:\n\t\t\t\tself.moveSnake(action)\n\n\t\tpygame.quit()\n\n\n\n\t\t\t\t\n\n","sub_path":"game/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"230672007","text":"import re\nimport sys\nimport math\nimport multiprocessing as mp\nimport json\n\ntens_path = \"/Volumes/tensusers/timzee/other/\" if sys.platform == \"darwin\" else \"/vol/tensusers/timzee/other/\"\n\nprint(\"Loading CHN1 data\")\n# keep differences in upper/lower case\nchn_uni = {}\nwith open(tens_path + \"CHN-ngrams1.0/chn1n_sorted.tab\", \"r\") as f:\n for l in f:\n freq, form = l[:-1].split(\"\\t\")\n if len(form) > 0:\n chn_uni[form] = int(freq)\n\nkeys = list(chn_uni.keys())\n\nnum_cores = 20\nnum_lex_lines = len(keys)\n# num_lex_lines = 1000\ncore_dict = {}\nfor i in range(num_cores):\n core_dict[str(i + 1)] = {}\n core_dict[str(i + 1)][\"start\"] = int(num_lex_lines / num_cores) * i + 1\n if i + 1 != num_cores:\n core_dict[str(i + 1)][\"end\"] = int(num_lex_lines / num_cores) * (i + 1)\n else:\n core_dict[str(i + 1)][\"end\"] = num_lex_lines\n\nverbal_stem_infs = {}\nwith open(tens_path + \"suitable_verbal_stems2.txt\", \"r\") as f:\n for l in f:\n verbal_stem_infs[l[:-1].split(\",\")[0]] = {\"subtlex_l_freq\": l[:-1].split(\",\")[-1], \"subtlex_f_freq\": l[:-1].split(\",\")[2]}\n\nprint(\"Loading CELEX data\")\ncelex_vs = {}\ncelex = {}\nwith open(tens_path + \"DML.CD\", \"r\") as f:\n for l in f:\n l_id, wrd, lem_freq = l.split(\"\\\\\")[:3]\n if re.search(r'^[0-9]+\\\\[a-z]+[^(en)io]en\\\\.*[a-z]\\\\.*\\)\\[V\\]\\\\', l):\n celex[l_id] = {\"inf_ort\": wrd, \"celex_l_freq\": lem_freq}\n elif wrd in verbal_stem_infs:\n celex_vs[l_id] = {\"inf_ort\": wrd, \"celex_l_freq\": lem_freq}\n\nf_id_list_vs = []\nf_id_list = []\nwith open(tens_path + \"DMW.CD\", \"r\") as f:\n for l in f:\n l_list = l[:-1].split(\"\\\\\")\n f_id = l_list[0]\n l_id = l_list[3]\n m_type = l_list[4]\n if l_id in celex and m_type == \"tm\":\n f_id_list.append(f_id)\n elif l_id in celex_vs and m_type == \"te1\":\n f_id_list_vs.append(f_id)\n\ncelex2_vs = {}\ncelex2 = {}\nwith open(tens_path + \"DPW.CD\", \"r\") as f:\n for l in f:\n f_id, wrd, f_freq, l_id, phon, syl_struc = l.split(\"\\\\\")[:6]\n if f_id in f_id_list and l_id in celex:\n syls = [i for i in phon.split(\"-\") if i != \"\"]\n num_syl = len(syls)\n counter = 0\n for syl in syls:\n counter += 1\n if \"'\" in syl:\n break\n stress = counter\n celex2[wrd] = {\"subtlex_l_freq\": 0, \"subtlex_f_freq\": 0, \"celex_l_freq\": celex[l_id][\"celex_l_freq\"], \"celex_f_freq\": f_freq, \"n_syl\": num_syl, \"stress\": stress, \"phon\": phon, \"syl_struc\": syl_struc}\n elif f_id in f_id_list_vs and l_id in celex_vs:\n syls = [i for i in phon.split(\"-\") if i != \"\"]\n num_syl = len(syls)\n counter = 0\n for syl in syls:\n counter += 1\n if \"'\" in syl:\n break\n stress = counter\n celex2_vs[wrd] = {\"subtlex_l_freq\": verbal_stem_infs[wrd + \"en\"][\"subtlex_l_freq\"], \"subtlex_f_freq\": verbal_stem_infs[wrd + \"en\"][\"subtlex_f_freq\"], \"celex_l_freq\": celex_vs[l_id][\"celex_l_freq\"], \"celex_f_freq\": f_freq, \"n_syl\": num_syl, \"stress\": stress, \"phon\": phon, \"syl_struc\": syl_struc}\n\nprint(\"Loading SUBTLEX data\")\nwith open(tens_path + \"SUBTLEX-NL.master.txt\", \"r\") as f:\n for l in f:\n l_list = l[:-1].split(\"\\t\")\n if l_list[0] in celex2 and l_list[1] == \"WW\":\n celex2[l_list[0]][\"subtlex_l_freq\"] += int(l_list[4])\n elif l_list[2] in celex2 and l_list[3].split(\",\")[0] in [\"inf\", \"pv\"]:\n celex2[l_list[2]][\"subtlex_f_freq\"] += int(l_list[4])\n\n# get measures for conditional probabilities\n# probability of verb given previous context\n# probability of following context given verb\n#\n# first get frequency of sentence starts (and thereby automatically number of sentence ends)\n# find all words that occur more frequently with first letter lowercase than uppercase\n# chn_lower_upper = sum of the frequency of all occurences of lowercase > uppercase words beginning with uppercase\n\n# now we still need to account for words that start more frequently with uppercase (e.g. names) used at the start of sentence\n# we approximate this number as follows:\n# chn_lower_lower = sum of the frequency of all occurences of lowercase > uppercase words beginning with lowercase\n# calculate average proportion of sentence starts: chn_upper_lower_prop = chn_lower_upper / (chn_lower_upper + chn_lower_lower)\n# chn_upper_upper = sum of the frequency of all occurences of lowercase <= uppercase words beginning with uppercase\n# chn_upper_upper_extra = chn_upper_lower_prop * chn_upper_upper\n# chn_total_sentences = chn_lower_upper + chn_upper_upper_extra\n\n\ndef getNumSentences(from_w, to_w, core):\n prev_words = []\n# upper_dom = []\n chn_upper_upper = 0\n# lower_dom = []\n chn_lower_upper = 0\n chn_lower_lower = 0\n counter = 0\n for w in keys[from_w:to_w + 1]:\n counter += 1\n if counter % 1000 == 0:\n print(core, counter)\n if w.lower() in prev_words:\n continue\n prev_words.append(w.lower())\n if w[0].isupper():\n upper_f = chn_uni[w]\n lower_w = w.lower()\n lower_f = chn_uni[lower_w] if lower_w in chn_uni else 0\n elif w.islower():\n lower_f = chn_uni[w]\n upper_w = w[0].upper() + w[1:]\n upper_f = chn_uni[upper_w] if upper_w in chn_uni else 0\n upper_w2 = w.upper()\n upper_f += chn_uni[upper_w2] if upper_w2 in chn_uni else 0\n else:\n continue\n if lower_f > upper_f:\n# lower_dom.append(w.lower())\n chn_lower_upper += upper_f\n chn_lower_lower += lower_f\n else:\n# upper_dom.append(w.lower())\n chn_upper_upper += upper_f\n q.put([chn_upper_upper, chn_lower_upper, chn_lower_lower])\n\n\nprint(\"Multiprocessing\")\n\nq = mp.Queue()\n\njobs = []\nfor core in range(num_cores):\n core_n = str(core + 1)\n s_word = core_dict[core_n][\"start\"]\n e_word = core_dict[core_n][\"end\"]\n p = mp.Process(target=getNumSentences, args=[s_word, e_word, core_n])\n jobs.append(p)\n p.start()\n\nresults = []\nwhile len(results) < num_cores:\n results.append(q.get())\n\nfor job in jobs:\n job.join()\n\nchn_upper_upper = 0\nchn_lower_upper = 0\nchn_lower_lower = 0\nfor r in results:\n chn_upper_upper += r[0]\n chn_lower_upper += r[1]\n chn_lower_lower += r[2]\n\nchn_upper_lower_prop = chn_lower_upper / (chn_lower_upper + chn_lower_lower)\nchn_upper_upper_extra = chn_upper_lower_prop * chn_upper_upper\nchn_total_sentences = chn_lower_upper + chn_upper_upper_extra\n\nprint(\"Loading CHN2 data\")\nchn_bi = {}\nwith open(tens_path + \"CHN-ngrams1.0/chn2n_sorted.tab\", \"r\") as f:\n for l in f:\n freq, form = l[:-1].split(\"\\t\")\n if len(form) > 0:\n chn_bi[form] = int(freq)\n\nkeys_bi = list(chn_bi.keys())\na_A_index = keys_bi.index(\"a A\")\n\n\ndef getBiFreqs(verb, next_w, prev_w):\n verb_upper = verb[0].upper() + verb[1:]\n chn_upper = chn_uni[verb_upper] if verb_upper in chn_uni else 0\n chn_total = chn_upper + chn_uni[verb] if verb in chn_uni else chn_upper\n chn_before_pron = chn_bi[verb + \" \" + next_w] if verb + \" \" + next_w in chn_bi else 0\n chn_before_pron += chn_bi[verb_upper + \" \" + next_w] if verb_upper + \" \" + next_w in chn_bi else 0\n chn_after_pron = chn_bi[prev_w + \" \" + verb] if prev_w + \" \" + verb in chn_bi else 0\n chn_after_pron += chn_bi[prev_w[0].upper() + prev_w[1:] + \" \" + verb] if prev_w[0].upper() + prev_w[1:] + \" \" + verb in chn_bi else 0\n chn_before_end = 0\n for bi in keys_bi[a_A_index:]:\n word1, word2 = bi.split(\" \")\n if word1 == verb and len(word2) > 0:\n if re.search(r'[A-Z]', word2[0]):\n chn_before_end += chn_bi[bi]\n else:\n break\n return chn_upper, chn_total, chn_before_pron, chn_after_pron, chn_before_end\n\n\nverb_keys = list(celex2.keys())\n\n\ndef getVUP(verb, phon): # we're taking lexical stress into account, so UP of be're.ken is 4 because be,re.de'ne.ren has different main stress on [e]\n phon_l = [s.strip(\"'\") for s in phon.split(\"-\")]\n n_phon = len(\"\".join(phon_l))\n if n_phon == 0:\n return None\n found_letter = False\n wrong_letter = False\n max_vup = 0\n v_i = -1\n while not (found_letter and wrong_letter):\n v_i += 1\n v = verb_keys[v_i]\n if verb == v:\n continue\n wrong_letter = verb[0] != v[0]\n if not wrong_letter:\n if not found_letter:\n found_letter = True\n vup = 0\n v_phon = celex2[v][\"phon\"]\n p_mismatch = False\n for p_i, p in enumerate(phon, 0):\n if p_i >= len(v_phon):\n break\n if p != v_phon[p_i]:\n p_mismatch = True\n break\n else:\n if p not in [\"'\", \"-\"]:\n vup += 1\n if vup >= n_phon: # we found a verb that contains all phones of the target verb, i.e., target verb does not have a UP\n max_vup = 0\n break\n if p_mismatch and vup + 1 > max_vup and int(celex2[v][\"celex_l_freq\"]) > 0: # we only count words with celex_freq > 0\n max_vup = vup + 1\n if v_i + 1 == len(verb_keys):\n wrong_letter = True\n return max_vup\n\n\nprint(\"Get probability data for verbal stems\")\n\nchn_total_jij = chn_uni[\"jij\"]\nchn_total_jij += chn_uni[\"Jij\"]\nchn_total_ik = chn_uni[\"ik\"]\nchn_total_ik += chn_uni[\"Ik\"]\n\nfor vs in celex2_vs:\n celex2_vs[vs][\"chn_upper\"], celex2_vs[vs][\"chn_total\"], celex2_vs[vs][\"chn_before_jij\"], celex2_vs[vs][\"chn_after_ik\"], celex2_vs[vs][\"chn_before_end\"] = getBiFreqs(vs, \"jij\", \"ik\")\n celex2_vs[vs][\"pr_verb_given_start\"] = celex2_vs[vs][\"chn_upper\"] / chn_total_sentences\n celex2_vs[vs][\"pr_verb_given_jij\"] = celex2_vs[vs][\"chn_before_jij\"] / chn_total_jij\n celex2_vs[vs][\"pr_jij_given_verb\"] = celex2_vs[vs][\"chn_before_jij\"] / celex2_vs[vs][\"chn_total\"] if celex2_vs[vs][\"chn_total\"] > 0 else None\n celex2_vs[vs][\"pr_verb_given_ik\"] = celex2_vs[vs][\"chn_after_ik\"] / chn_total_ik\n celex2_vs[vs][\"pr_verb_given_end\"] = celex2_vs[vs][\"chn_before_end\"] / chn_total_sentences\n celex2_vs[vs][\"pr_end_given_verb\"] = celex2_vs[vs][\"chn_before_end\"] / celex2_vs[vs][\"chn_total\"] if celex2_vs[vs][\"chn_total\"] > 0 else None\n celex2_vs[vs][\"verb_UP\"] = getVUP(vs, celex2_vs[vs][\"phon\"])\n\nprint(\"Get probability data for plural verbs\")\n\nchn_total_jullie = chn_uni[\"jullie\"]\nchn_total_jullie += chn_uni[\"Jullie\"]\nchn_total_we = chn_uni[\"we\"]\nchn_total_we += chn_uni[\"We\"]\n\n# note that celex_f_freq in celex2_vs represents different inflections than celex_f_freq in celex2\n# find matches in terms of n_syl, same length of vowels, stress, frequencies\nsyl_matches = {}\nfor i in celex2_vs:\n l_freq = float(celex2_vs[i]['subtlex_l_freq'])\n log_l_freq = math.log10(l_freq)\n# f_freq = float(celex2_vs[i]['subtlex_f_freq'])\n# f_prop = f_freq / l_freq\n syl_matches[i] = {}\n for j in celex2:\n if celex2_vs[i][\"stress\"] == celex2[j][\"stress\"] and \"\".join(celex2_vs[i][\"syl_struc\"].split(\"C\")) == \"\".join(celex2[j][\"syl_struc\"].split(\"C\")):\n m_l_freq = float(celex2[j]['subtlex_l_freq'])\n if m_l_freq > 0:\n m_log_l_freq = math.log10(m_l_freq)\n m_f_freq = float(celex2[j]['subtlex_f_freq'])\n# m_f_prop = m_f_freq / m_l_freq\n if m_log_l_freq > (log_l_freq - 0.4) and m_log_l_freq < (log_l_freq + 0.4): # and m_f_prop > (f_prop - 0.1) and m_f_prop < (f_prop + 0.1):\n syl_matches[i][j] = {'subtlex_l_freq': m_l_freq, 'subtlex_f_freq': m_f_freq}\n syl_matches[i][j][\"chn_upper\"], syl_matches[i][j][\"chn_total\"], syl_matches[i][j][\"chn_before_jullie\"], syl_matches[i][j][\"chn_after_we\"], syl_matches[i][j][\"chn_before_end\"] = getBiFreqs(j, \"jullie\", \"we\")\n syl_matches[i][j][\"pr_verb_given_start\"] = syl_matches[i][j][\"chn_upper\"] / chn_total_sentences\n syl_matches[i][j][\"pr_verb_given_jullie\"] = syl_matches[i][j][\"chn_before_jullie\"] / chn_total_jullie\n syl_matches[i][j][\"pr_jullie_given_verb\"] = syl_matches[i][j][\"chn_before_jullie\"] / syl_matches[i][j][\"chn_total\"] if syl_matches[i][j][\"chn_total\"] > 0 else None\n syl_matches[i][j][\"pr_verb_given_we\"] = syl_matches[i][j][\"chn_after_we\"] / chn_total_we\n syl_matches[i][j][\"pr_verb_given_end\"] = syl_matches[i][j][\"chn_before_end\"] / chn_total_sentences\n syl_matches[i][j][\"pr_end_given_verb\"] = syl_matches[i][j][\"chn_before_end\"] / syl_matches[i][j][\"chn_total\"] if syl_matches[i][j][\"chn_total\"] > 0 else None\n syl_matches[i][j][\"verb_UP\"] = getVUP(j, celex2[j][\"phon\"])\n\n\nprint(\"Writing data\")\n\nwith open(tens_path + \"verbal_stems.json\", \"w\") as f:\n json.dump(celex2_vs, f)\n\nwith open(tens_path + \"suitable_verbs.json\", \"w\") as f:\n json.dump(syl_matches, f)\n\n# ms = []\n# for i in syl_matches:\n# ms.extend(list(syl_matches[i].keys()))\n\n# msu = list(set(ms))\n","sub_path":"findSuitableWords_multi.py","file_name":"findSuitableWords_multi.py","file_ext":"py","file_size_in_byte":13266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201200263","text":"import serial\nimport time\n\ndef wait_for_ready():\n while True:\n serial.write(\"\\n\")\n time.sleep(0.75)\n data = serial.read()\n if data == ':':\n break\n else:\n time.sleep(0.1)\n\n\nserial = serial.Serial(\"/dev/ttyAMA0\", baudrate=9600)\n#serial.open()\nserial.write(\"\\n\")\ntime.sleep(1)\nserial.write(\"V15\\n\") # Adjust volume\n\nwait_for_ready()\ninput_text = input(\"Input text: \")\nbuffer = \"S%s\" % (input_text)\nserial.write(buffer)\ntime.sleep(0.5)\nserial.close()\n","sub_path":"emic_text.py","file_name":"emic_text.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210945730","text":"import functools\n\nfrom django.contrib.admin import helpers\nfrom django.template.response import TemplateResponse\n\n\ndef action_form(form_class=None):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, request, queryset):\n form = form_class()\n\n if 'confirm' in request.POST and request.POST:\n form = form_class(request.POST)\n if form.is_valid():\n obj_count = func(self, request, queryset, form)\n self.message_user(request, '%s objects updated' % obj_count)\n return None\n\n context = dict(\n self.admin_site.each_context(request),\n title=form_class.title,\n action=func.__name__,\n opts=self.model._meta,\n queryset=queryset, form=form,\n action_checkbox_name=helpers.ACTION_CHECKBOX_NAME)\n\n return TemplateResponse(request, 'form_action_confirmation.html', context)\n\n wrapper.short_description = form_class.title\n\n return wrapper\n\n return decorator","sub_path":"risks/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"337664771","text":"from flask import Flask\nfrom flask import request\nfrom flask import jsonify\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport keras\nfrom keras.models import Model\nimport matplotlib.pyplot as plt\nfrom preprocessing import *\nfrom flask import jsonify\nimport shutil\n\n\napp = Flask(__name__)\n\n\n\ndef predictByModel(target,compare,model):\n target = convert_document_to_patches(target)\n preparePatchesToModel(target)\n print('target ',np.array(target).shape)\n for i in range(len(compare)):\n compare[i] = convert_document_to_patches(compare[i])\n preparePatchesToModel(compare[i])\n print('compare ',np.array(compare[0]).shape)\n results=[]\n for i in range(len(compare)):\n predictions = model.predict([np.array(target),np.array(compare[i])]) > 0.70\n finalResult = np.sum(predictions) / 15 \n results.append(finalResult)\n print(results)\n return jsonify(results)\n \n\n@app.route('/flask', methods=['POST'])\ndef verifyWriter():\n modelName = request.form['model']\n userID = request.form['id']\n targetDoc = request.files['targetDoc'].read()\n targetDoc = np.fromstring(targetDoc, np.uint8)\n targetDoc = cv2.imdecode(targetDoc,cv2.IMREAD_COLOR)\n targetDoc = cv2.resize(targetDoc,(targetDoc.shape[1],targetDoc.shape[0]))\n points = request.form['targetPoints'].split(',')\n targetDoc = targetDoc[int(points[1]):int(points[3]),int(points[0]):int(points[2])]\n compareDocs = []\n cmpr_points_array = request.form.getlist('comparePoints')\n i=0\n for file in request.files.getlist('compareDocs'):\n img = file.read()\n img = np.fromstring(img, np.uint8)\n img = cv2.imdecode(img,cv2.IMREAD_COLOR)\n img = cv2.resize(img,(img.shape[1],img.shape[0]))\n points = cmpr_points_array[i].split(',')\n i+=1\n img = img[int(points[1]):int(points[3]),int(points[0]):int(points[2])]\n compareDocs.append(img)\n if(modelName == 'Default-model'):\n mdl_uploads_dir = os.path.join(app.instance_path[:len(app.instance_path)-8],'default-model')\n model = keras.models.load_model(mdl_uploads_dir + '/' + os.listdir(mdl_uploads_dir)[0])\n else:\n model = keras.models.load_model(os.path.join(app.instance_path, 'models' + '/' + userID + '/' + modelName ))\n results = predictByModel(targetDoc,compareDocs,model)\n return results\n\n\n@app.route('/upload', methods=['POST'])\ndef uploadModel():\n uploads_dir = os.path.join(app.instance_path, 'models' + '/' + request.form['id'])\n if(not os.path.isdir(uploads_dir)):\n os.makedirs(uploads_dir)\n modelFile = request.files['model']\n modelFile.save(os.path.join( uploads_dir , modelFile.filename))\n return \"Uploaded to flask succesfully\"\n\n\n@app.route('/get-user-models', methods=['POST'])\ndef getModels():\n usr_uploads_dir = os.path.join(app.instance_path, 'models' + '/' + request.form['id'])\n if(not os.path.isdir(usr_uploads_dir)):\n os.makedirs(usr_uploads_dir)\n return jsonify(os.listdir(usr_uploads_dir))\n\n@app.route('/delete-model', methods=['POST'])\ndef deleteModel():\n usr_uploads_dir = os.path.join(app.instance_path, 'models' + '/' + request.form['id'])\n if(not os.path.isdir(usr_uploads_dir)):\n return \"Error\"\n os.remove(usr_uploads_dir + \"/\" + request.form['modelName'])\n return jsonify(os.listdir(usr_uploads_dir))\n\n\n@app.route('/upload-default', methods=['POST'])\ndef uploadDefualtModel():\n modelFile = request.files['model'] \n mdl_uploads_dir = os.path.join(app.instance_path[:len(app.instance_path)-8],'default-model')\n os.remove(mdl_uploads_dir + \"/\" + os.listdir(mdl_uploads_dir)[0])\n modelFile.save(os.path.join(mdl_uploads_dir , 'default-model.h5'))\n return \"success\"\n\n@app.route('/delete-user-models', methods=['POST'])\ndef deleteAllModels():\n usr_uploads_dir = os.path.join(app.instance_path, 'models' + '/' + request.form['id'])\n if(not os.path.isdir(usr_uploads_dir)):\n return \"Error\"\n shutil.rmtree(usr_uploads_dir)\n return \"Succefully deleted\"\n\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True)\n","sub_path":"flask-server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"252977759","text":"import os\nimport datetime\nimport time\n\ndef set_time(year,month,day,hour,min,sec):\n\n #设定日期\n _date = datetime.datetime.strptime(\"{}/{}/{}\".format(year,month,day),\"%Y/%m/%d\")\n #设定时间为 0点30分\n _time = '{}.{}.{})'.format(hour,min,sec)\n #设定时间\n os.system('time {}'.format(_time))\n os.system('date {}'.format(_date))\n\n\ndef get():\n\n while True:\n\n year=input('input year:')\n month=input('input month:')\n day=input('input day:')\n hour=input('input hour:')\n min=input('input min:')\n if year=='q':\n exit()\n print('set',year,month,day,hour,min)\n\n set_time(year,month,day,hour,min,sec=15)\n\n\nget()\n","sub_path":"job/邮件改附件 msg/com_time.py","file_name":"com_time.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"260130271","text":"import numpy as np\nimport nibabel as nib\nfrom data_loaders import Dataset\nfrom models import Modified3DUNet\nimport os\nimport torch\nfrom collections import OrderedDict\n\npreprocessed_valid_data_path = r'C:/users/artur/Desktop/UCL/Brats2019/Data/Preprocessed_Validation'\nmodel_saves_path = r'C:/users/artur/Desktop/UCL/Brats2019/Model_Saves/V3_Prepro_Aug_CrossEn' # Folder where the model saves should be stored in the format Fold__Epoch_.tar\nepoch_nr = 180 # Epoch at which to take the model saves (determined from loss plots)\nparallel_training = 0 # Specify if the model was trained with multiple GPUs (slightly more code needed to load model save)\nsave_results_path = r'C:/users/artur/Desktop/UCL/Brats2019/Seg_Results'\n\n# Get paths and names (IDS) of folders that store the multimodal training data\nfolder_paths = []\nfolder_ids = []\nfor subdir in os.listdir(preprocessed_valid_data_path):\n folder_paths.append(os.path.join(preprocessed_valid_data_path, subdir))\n folder_ids.append(subdir)\n\nvalid_set = Dataset(folder_paths, folder_ids, False)\n\n# Use GPU\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\ntorch.backends.cudnn.benchmark = True\n\nmodel = Modified3DUNet(4, 4, 16)\ncheckpoint = torch.load(\"{}/Fold_{}_Epoch_{}.tar\".format(model_saves_path, 1, epoch_nr))\n\nif parallel_training: # If training was done on multiple GPUs have to rename keys in saved model_state_dict:\n new_state_dict = OrderedDict()\n for k, v in checkpoint['model_state_dict'].items():\n name = k[7:] # remove module.\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\nelse:\n model.load_state_dict(checkpoint['model_state_dict'])\nmodel.to(device)\nmodel.eval()\n\nfor idx in range(0, len(folder_ids)):\n with torch.no_grad():\n scans = valid_set[idx]\n scans = np.expand_dims(scans, 0)\n scans = torch.from_numpy(scans).to(device)\n output, seg_layer = model(scans)\n seg_layer = seg_layer.squeeze()\n _, indices = seg_layer.max(0)\n indices = indices.cpu().detach().numpy()\n indices[indices == 3] = 4\n img = nib.Nifti1Image(indices, np.eye(4))\n nib.save(img, \"{}/{}.nii.gz\".format(save_results_path, folder_ids[idx]))\n print('Saved example {}/{}'.format(idx + 1, len(folder_ids)))","sub_path":"segment_valid.py","file_name":"segment_valid.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"651159194","text":"\"\"\"\nScript to present DATIM MER metadata\n\nSupported Formats: html, xml, csv, json\nSupported Collections: Refer to DatimConstants.MER_OCL_EXPORT_DEFS (there are more than 60 options)\nOpenHIM Endpoint Request Format: /datim-mer?collection=____&format=____\n\nThis script fetches an export from OCL for the latest released version of the specified collection.\nIf it seems like you're looking at old data, check the collection version first.\n\"\"\"\nimport sys\nimport settings\nimport datim.datimshow\nimport datim.datimshowmer\n\n\n# Default Script Settings\nverbosity = 0 # 0=none, 1=some, 2=all\nrun_ocl_offline = False # Set to true to use local copies of ocl exports\nexport_format = datim.datimshow.DatimShow.DATIM_FORMAT_CSV\nrepo_id = 'MER-R-MOH-Facility-FY18' # e.g. MER-R-Operating-Unit-Level-IM-FY17Q2\n\n# OCL Settings - JetStream Staging user=datim-admin\noclenv = settings.oclenv\noclapitoken = settings.oclapitoken\n\n# Optionally set arguments from the command line\nif sys.argv and len(sys.argv) > 2:\n export_format = datim.datimshow.DatimShow.get_format_from_string(sys.argv[1])\n repo_id = sys.argv[2]\n\n# Create Show object and run\ndatim_show = datim.datimshowmer.DatimShowMer(\n oclenv=oclenv, oclapitoken=oclapitoken, run_ocl_offline=run_ocl_offline, verbosity=verbosity)\ndatim_show.get(repo_id=repo_id, export_format=export_format)\n","sub_path":"showmer.py","file_name":"showmer.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"242426712","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom multimedia_test_case import TestMultimedia\n\n\nclass TestLocalVideoPlaylistSelection(TestMultimedia):\n def __init__(self, doc, level, owner):\n super(TestLocalVideoPlaylistSelection, self).__init__(doc, level, owner)\n self.package_name = \"cn.whaley.cases.Helios.media.video.local.LocalTestSets\"\n self.test_content = \"testSelectionForPlayList\"\n\n def execute(self):\n super(TestLocalVideoPlaylistSelection, self).execute()\n\n\nif __name__ == \"__main__\":\n TestLocalVideoPlaylistSelection(\n \"Play shark tale first then select and check the super live then re-select and check shark tale\", 'p1',\n \"wangdd\").run()\n","sub_path":"Python_Java_UIautomator/case/platform/multiMedia/test_local_videoPlaylist_selection.py","file_name":"test_local_videoPlaylist_selection.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"72954381","text":"import datetime\nfrom google.appengine.ext import ndb\n\n__author__ = 'simonhutton'\n\n\nclass Configuration(ndb.Model):\n CACHE_TIME = datetime.timedelta(minutes=5)\n\n _INSTANCE = None\n _INSTANCE_AGE = None\n\n public_stripe_key = ndb.StringProperty()\n private_stripe_key = ndb.StringProperty()\n web_debug = ndb.BooleanProperty()\n\n\n @classmethod\n def get_instance(cls):\n now = datetime.datetime.now()\n if not cls._INSTANCE or cls._INSTANCE_AGE + cls.CACHE_TIME < now:\n cls._INSTANCE = cls.get_or_insert('config')\n\n if not cls._INSTANCE.public_stripe_key:\n cls._INSTANCE.public_stripe_key = 'pk_SPjp98jtIuyUn3t6DJtvZSc5lBeqr'\n cls._INSTANCE.private_stripe_key = 'c1r52uIFDTaniTJc29tMbGGo0CGJeDtu'\n cls._INSTANCE.web_debug = True\n\n cls._INSTANCE.put()\n\n cls._INSTANCE_AGE = now\n return cls._INSTANCE\n\n","sub_path":"configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"371467859","text":"# Copyright SDElements Inc\n# Extensible two way integration with TFS\n\nfrom sdetools.alm_integration.alm_plugin_base import AlmConnector, AlmException, AlmTask, STATUS\nfrom sdetools.extlib import markdown\nfrom sdetools.extlib.iso8601.iso8601 import parse\nfrom sdetools.sdelib.commons import json, urlquote_str\nfrom sdetools.sdelib.restclient import APIError, APIHTTPError, RESTBase, URLRequest\n\nfrom sdetools.sdelib import log_mgr\nlogger = log_mgr.mods.add_mod(__name__)\n\nTFS_DEFAULT_PRIORITY_MAP = {\n '9-10': '1',\n '7-8': '2',\n '3-6': '3',\n '1-2': '4',\n}\n\nTFS_DEFAULT_ISSUE_TEMPLATE = {\n 'Title': '${task_alm_title}',\n 'Description': '${task_richcontent}',\n 'Priority': '${task_priority_translated}'\n}\n\n\nclass TFSAPIBase(RESTBase):\n def __init__(self, config):\n super(TFSAPIBase, self).__init__('alm', 'TFS', config, '')\n self.set_auth_mode('ntlm')\n\n def parse_error(self, result):\n # Try to parse the response as JSON\n try:\n error_response = json.loads(result)\n except ValueError:\n return result\n\n # Send back all the error messages in one go\n if 'message' in error_response and error_response['message']:\n return error_response['message']\n else:\n return result\n\n\nclass TFSTask(AlmTask):\n \"\"\" Representation of a task in TFS\"\"\"\n\n def __init__(self, task_id, work_item, done_statuses, link, reference, name):\n self.task_id = task_id\n self.work_item = work_item\n self.done_statuses = done_statuses # comma-separated list\n self.priority = None\n self.status = None\n self.timestamp = None\n self.revision = work_item['rev']\n\n # task reference data\n self.link = link\n self.reference = reference\n self.name = name\n\n for item in work_item['fields']:\n if item == 'System.State':\n self.status = work_item['fields']['System.State']\n elif item == 'System.ChangedDate':\n self.timestamp = work_item['fields']['System.ChangedDate']\n\n def get_revision(self):\n return self.revision\n\n def get_task_id(self):\n return self.task_id\n\n def get_alm_id(self):\n return self.work_item['id']\n\n def get_status(self):\n \"\"\" Translates Mingle status into SDE status \"\"\"\n if self.status.lower() in [status.lower() for status in self.done_statuses]:\n return STATUS.DONE\n else:\n return STATUS.TODO\n\n def get_timestamp(self):\n \"\"\" Returns a datetime object \"\"\"\n return parse(self.timestamp)\n\n\nclass TFSConnector(AlmConnector):\n alm_name = 'TFS'\n default_issue_template = TFS_DEFAULT_ISSUE_TEMPLATE\n default_priority_map = TFS_DEFAULT_PRIORITY_MAP\n\n supports_references = True\n\n def __init__(self, config, alm_plugin):\n \"\"\" Initializes connection to TFS \"\"\"\n super(TFSConnector, self).__init__(config, alm_plugin)\n\n config.opts.add('tfs_iterationpath', 'Iteration Path in TFS',\n default='')\n config.opts.add('tfs_issue_type', 'Issue Type to create in TFS',\n default='User Story')\n config.opts.add('tfs_new_status', 'Status to set for new tasks in TFS',\n default='New')\n config.opts.add('tfs_reopen_status', 'Status to reopen tasks in TFS',\n default='New')\n config.opts.add('tfs_done_statuses', 'Statuses that signify a task is Done in TFS',\n default='Done')\n config.opts.add('tfs_collection', 'Name of collection in TFS',\n default='DefaultCollection')\n config.opts.add('alm_issue_label', 'Tag applied to issue in TFS',\n default='SD-Elements')\n\n self.project_collections = None\n self.projects = None\n\n def initialize(self):\n\n super(TFSConnector, self).initialize()\n\n for item in ['tfs_done_statuses',\n 'tfs_iterationpath',\n 'tfs_issue_type',\n 'tfs_new_status',\n 'alm_issue_label']:\n\n if not self.config[item]:\n raise AlmException('Missing %s in configuration' % item)\n\n self.config.process_list_config('tfs_done_statuses')\n\n self.mark_down_converter = markdown.Markdown(safe_mode=\"escape\")\n\n def alm_connect_server(self):\n try:\n self.project_collections = self.alm_plugin.call_api('_apis/projectcollections')\n except APIError as err:\n raise AlmException('Unable to connect to TFS. Please check server URL, ID, password. Reason: %s' % err)\n\n def alm_connect_project(self):\n\n # Check if project name is valid\n try:\n self.projects = self.alm_plugin.call_api('%s/_apis/projects' % self.config['tfs_collection'])\n except APIError as err:\n raise AlmException('Unable to validate project. Check that %s is a valid collection. Reason: %s' % (self.config['tfs_collection'], err))\n\n found_project = False\n for project in self.projects['value']:\n if project['name'] == self.config['alm_project']:\n found_project = True\n break\n\n if not found_project:\n raise AlmException('Project %s could not be found' % self.config['alm_project'])\n\n self._get_tfs_wit_fields()\n\n def alm_validate_configurations(self):\n invalid_fields = []\n for field_name in self.config['alm_custom_fields'].keys():\n if not self.user_friendly_name_map.get(field_name, None):\n invalid_fields.append(field_name)\n\n if invalid_fields:\n raise AlmException('Invalid field(s) specified: {0}'.format(', '.join(invalid_fields)))\n\n def get_work_item_url(self, work_item_id):\n return '%s/_apis/wit/workItems/%s' % (self.config['tfs_collection'], work_item_id)\n\n def get_task_object(self, task, work_item):\n return TFSTask(task['task_id'],\n work_item,\n self.config['tfs_done_statuses'],\n work_item['_links']['html'],\n work_item['id'],\n work_item['fields']['System.Title'])\n\n def alm_get_task_by_reference(self, task, reference):\n work_item_url = self.get_work_item_url(reference['reference'])\n try:\n work_item = self.alm_plugin.call_api(work_item_url)\n return self.get_task_object(task, work_item)\n\n except APIHTTPError as e:\n if e.code == 404:\n return None\n raise\n\n def alm_get_task_legacy(self, task):\n wiql_query = ('Select [Id] From WorkItems Where [System.IterationPath]=\\'%s\\' and [System.AreaPath]=\\'%s\\' '\n 'and [Title] Contains \\'%s\\'' % (self.config['tfs_iterationpath'], self.config['alm_project'],\n task['alm_fixed_title']))\n data = {\n \"query\": wiql_query\n }\n\n target_url = '%s/%s/_apis/wit/wiql?api-version=1.0' % \\\n (self.config['tfs_collection'], urlquote_str(self.config['alm_project']))\n try:\n response = self.alm_plugin.call_api(target_url, method=URLRequest.POST, args=data)\n except APIError as err:\n raise AlmException('Encountered an error searching for task: %s' % err)\n\n if 'workItems' not in response or not response['workItems']:\n return None\n\n work_item_id = response['workItems'][0]['id']\n work_item_url = self.get_work_item_url(work_item_id)\n\n try:\n work_item = self.alm_plugin.call_api(work_item_url)\n except APIError as err:\n raise AlmException('Unable to retrieve work item for %s: %s' % (task['task_id'], err))\n\n return self.get_task_object(task, work_item)\n\n def alm_add_task(self, task):\n if self.config['alm_issue_label']:\n tags_string = self.config['alm_issue_label']\n\n for tag in task['tags']:\n tags_string += '; ' + tag\n\n default_args = {\n 'State': self.config['tfs_new_status'],\n 'Iteration Path': self.config['tfs_iterationpath'],\n 'Area Path': self.config['alm_project'],\n 'Work Item Type': self.config['tfs_issue_type'],\n 'Tags': tags_string\n }\n\n create_args = self.get_new_issue(task, defaults=default_args)\n\n target_url = '%s/%s/_apis/wit/workitems/$Task?api-version=1.0' % \\\n (self.config['tfs_collection'], urlquote_str(self.config['alm_project']))\n\n try:\n work_item = self.alm_plugin.call_api(target_url, method=URLRequest.PATCH,\n args=self.convert_create_args_to_api_args(create_args),\n call_headers={\"content-type\": \"application/json-patch+json\"})\n except APIError as err:\n raise AlmException('Encountered an error creating a new work item: %s' % err)\n\n tfs_task = self.get_task_object(task, work_item)\n\n if self.config['alm_standard_workflow'] and (task['status']['id'] in STATUS.DONE_SET or task['status']['id'] in STATUS.NA_SET):\n self.alm_update_task_status(tfs_task, task['status']['id'])\n\n return tfs_task\n\n def get_note_message(self, alm_task):\n return '%s: %s, URL: %s' % (self.config['tfs_issue_type'],\n alm_task.work_item['id'],\n alm_task.work_item['_links']['html']['href'])\n\n def alm_update_task_status(self, alm_task, status):\n\n if status in STATUS.DONE_SET or status in STATUS.NA_SET:\n new_state = self.config['tfs_done_statuses'][0]\n elif status in STATUS.TODO_SET:\n new_state = self.config['tfs_reopen_status']\n\n data = [\n {\n 'op': 'add',\n 'path': '/fields/System.State',\n 'value': new_state\n }\n ]\n\n work_item_url = '%s/_apis/wit/workitems/%s?api-version=1.0' % (self.config['tfs_collection'], alm_task.get_alm_id())\n try:\n self.alm_plugin.call_api(work_item_url, method=URLRequest.PATCH, args=data,\n call_headers={\"content-type\": \"application/json-patch+json\"})\n except APIError as err:\n raise AlmException('Encountered an error updating work item: %s' % err)\n\n logger.debug('Status changed to %s for %s %s in TFS' %\n (status, self.config['tfs_issue_type'], alm_task.get_alm_id()))\n\n def alm_disconnect(self):\n pass\n\n def convert_markdown_to_alm(self, content, ref):\n return self.mark_down_converter.convert(content)\n\n def convert_create_args_to_api_args(self, create_args):\n return [\n {\n 'op': 'add',\n 'path': '/fields/{0}'.format(field_name),\n 'value': value\n }\n for field_name, value in create_args.items()\n ]\n\n def _get_tfs_wit_fields(self):\n api_response = self.alm_plugin.call_api('{0}/_apis/wit/fields'.format(self.config['tfs_collection']))\n self.user_friendly_name_map = {}\n\n for field_type in api_response['value']:\n self.user_friendly_name_map[field_type['name']] = field_type['referenceName']\n self.user_friendly_name_map[field_type['referenceName']] = field_type['referenceName']\n","sub_path":"sdetools/modules/sync_tfs/tfs_plugin.py","file_name":"tfs_plugin.py","file_ext":"py","file_size_in_byte":11578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"256264079","text":"from . SLSLStructs import * \r\nimport numpy as np\r\nNO=0\r\nYES=1\r\nMIGHT_BE=2\r\nclass Steepest_LS_List:\r\n def __init__(self,cycs_lists,dist_m):\r\n self.dist_m=dist_m\r\n self.cycs=[]\r\n for cyc_no,cyc in enumerate(cycs_lists):\r\n self.cycs.append(Cycle(cyc, cyc_no))\r\n self.move_list=None\r\n self.init_move_list()\r\n self.prev_move=None\r\n def get_poss_imp_moves(self,v,move_type=None):\r\n moves=[]\r\n if move_type==IN or move_type==None:\r\n for sec_v in self.cycs[v.cyc_no].vertecies:\r\n if sec_v is v.prev or sec_v is v.next: continue\r\n move=self.get_move(v,sec_v)\r\n if move.delta<0:\r\n moves.append(move)\r\n if move_type==OUT or move_type==None:\r\n for cyc in self.cycs:\r\n if cyc.cyc_no!=v.cyc_no:\r\n for sec_v in cyc.vertecies:\r\n move = self.get_move(v,sec_v)\r\n if move.delta<0:\r\n moves.append(move)\r\n return moves \r\n def dist_to_neigh(self,v1,v2=None):\r\n d_m=self.dist_m\r\n if v2==None:\r\n return d_m[v1.get_abs(),v1.next.get_abs()]+d_m[v1.get_abs(),v1.prev.get_abs()]\r\n else:\r\n return d_m[v1.get_abs(),v2.next.get_abs()]+d_m[v1.get_abs(),v2.prev.get_abs()]\r\n def dist_to_prev(self,v):\r\n d_m=self.dist_m\r\n return d_m[v.get_abs(),v.prev.get_abs()] \r\n def get_delta(self,v1,v2):\r\n d_m=self.dist_m\r\n if v1.cyc_no!=v2.cyc_no:\r\n prev_dist=self.dist_to_neigh(v1)+self.dist_to_neigh(v2)\r\n new_dist=self.dist_to_neigh(v1,v2)+self.dist_to_neigh(v2,v1)\r\n else:\r\n if v1==v2 or (v1.cyc_idx+1)%len(self.cycs[v1.cyc_no])==v2.cyc_idx or (v2.cyc_idx+1)%len(self.cycs[v2.cyc_no])==v1.cyc_idx:\r\n return 0\r\n prev_dist=self.dist_to_prev(v1)+self.dist_to_prev(v2)\r\n new_dist=d_m[v1.get_abs(),v2.get_abs()]+d_m[v1.prev.get_abs(),v2.prev.get_abs()]\r\n return new_dist-prev_dist\r\n def get_move(self,v1,v2):\r\n delta=self.get_delta(v1,v2)\r\n return Move(v1,v2,delta)\r\n def init_move_list(self):\r\n self.move_list=[]\r\n for cyc in self.cycs:\r\n for v in cyc.vertecies:\r\n moves=self.get_poss_imp_moves(v)\r\n self.move_list+=moves\r\n def update_moves_list(self): \r\n if self.prev_move !=None:\r\n for v in self.prev_move.mod_vertecies:\r\n moves=self.get_poss_imp_moves(v)\r\n self.move_list+=moves\r\n return\r\n def is_valid(self,move):\r\n valid=None\r\n if move.type==IN:\r\n state=move.edges_state()\r\n if state==CORRECT:\r\n valid=YES\r\n elif state==BOTH_REVERSED:\r\n move.validate()\r\n valid=YES\r\n elif state==ONE_REVERSED:\r\n valid=MIGHT_BE\r\n elif state==NOT_EXIST:\r\n valid=NO\r\n elif move.type==OUT:\r\n state=move.exact_same_neigh()\r\n if state==CORRECT:\r\n valid=YES\r\n elif state==BOTH_REVERSED or state==ONE_REVERSED:\r\n move.validate()\r\n valid=YES\r\n elif state==NOT_EXIST:\r\n valid=NO\r\n assert valid!=None\r\n return valid\r\n def remove_moves(self,increasing_idxs):\r\n while increasing_idxs:\r\n idx=increasing_idxs.pop()\r\n #print(\"Dluigosc, idx: \",len(self.move_list),\",\",idx)\r\n self.move_list.pop(idx)\r\n def make_best_move(self):\r\n imp=False\r\n move_list=self.move_list\r\n move_list.sort(key=lambda x:x.delta)#the smaller the better\r\n idxs_to_remove=[]\r\n for idx,move in enumerate(move_list):\r\n valid=self.is_valid(move)\r\n if valid==YES:\r\n move.apply(len(self.cycs[move.v1.cyc_no]))\r\n self.prev_move=move\r\n idxs_to_remove.append(idx)\r\n imp=True\r\n elif valid==MIGHT_BE:\r\n continue\r\n elif valid==NO:\r\n idxs_to_remove.append(idx)\r\n if imp==True:\r\n break\r\n self.remove_moves(idxs_to_remove)\r\n return imp\r\n \r\n def get_cycs(self):\r\n cycs=[]\r\n found=[False,False]\r\n break_=False\r\n for cyc in self.cycs:\r\n for v in cyc.vertecies:\r\n if found[v.cyc_no]==False:\r\n found[v.cyc_no]=True\r\n cycs.append(v.get_cyc_next(v))\r\n if np.all(found):\r\n break_=True\r\n break\r\n if break_:break\r\n return cycs\r\n def get_cycs_idxs(self):\r\n cycs=[]\r\n found=[False,False]\r\n break_=False\r\n for cyc in self.cycs:\r\n for v in cyc.vertecies:\r\n if found[v.cyc_no]==False:\r\n found[v.cyc_no]=True\r\n cycs.append(v.get_cyc_idxs(v))\r\n if np.all(found):\r\n break_=True\r\n break\r\n if break_:break\r\n return cycs\r\n def try_to_improve(self):\r\n #cycs=self.get_cycs()\r\n #cycs_idxs=self.get_cycs_idxs()\r\n #print(\"CYKLE\",cycs[0],cycs[1])\r\n #print(\"INDEKSY:\",cycs_idxs[0],cycs_idxs[1],len(np.unique(cycs_idxs[0])),len(np.unique(cycs_idxs[1])))\r\n improved=False\r\n self.update_moves_list()\r\n improved=self.make_best_move()\r\n l1=len([a for a in self.cycs[0].vertecies if a.cyc_no==0])+len([a for a in self.cycs[1].vertecies if a.cyc_no==0])\r\n l2=len([a for a in self.cycs[0].vertecies if a.cyc_no==1])+len([a for a in self.cycs[1].vertecies if a.cyc_no==1])\r\n assert l1==l2\r\n #print(\"Liczba wierzchołków w cyklu 0:\",wi)\r\n return improved\r\ndef ls_steepest_list(cyc1,cyc2,dist_m):\r\n lsl=Steepest_LS_List([cyc1,cyc2], dist_m)\r\n improved=True\r\n i=0\r\n while improved:\r\n improved=lsl.try_to_improve()\r\n i+=1\r\n return lsl.get_cycs()","sub_path":"ImprovedLocalSearch/SteepestList.py","file_name":"SteepestList.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"59538036","text":"# This function reads and prints the meta data\n# from a DICOM file, without reading the pixel data.\n#\n# Matthew Giarra\n# 2016-01-13\n# Virginia Tech\n\nimport dicom\n\n# Read the metadata without loading the pixel data.\ndcm_data = dicom.read_file(\"test_data.dcm\", stop_before_pixels=True);\n\n# Convert metadata to string\ndcm_string = str(dcm_data);\n\n# Open a file for writing\nf1 = open('/Users/matthewgiarra/Desktop/dicom_output.txt', 'w');\n\n# Write the string to file\nf1.write(dcm_string);\n\n# Close the file\nf1.close();\n\n\n","sub_path":"write_dicom_metadata.py","file_name":"write_dicom_metadata.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"445977932","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 4 18:45:52 2020\n\n@author: doguk\n\"\"\" \n\n# FILE READING AND WRITING\n\n\n# open file to read an print all file\nf = open(\"file.txt\",\"r\")\nprint(f.read())\nf.close()\n\n# open file to read an print 1 character\nf = open(\"file.txt\",\"r\")\nprint(f.read(1))\nf.close()\n\n# open file to read an print with line\nf = open(\"file.txt\",\"r\")\nfor line in f:\n print(line)\nf.close()\n\n# open file to write and write \"How are you\" to file \nf = open(\"file.txt\",\"a\")\nf.write(\"How are you\")\nf.close()\n\n\n","sub_path":"File reading and writing/File reading and writing.py","file_name":"File reading and writing.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"213060696","text":"# coding: utf-8\n'''\n数1406357289は0から9のパンデジタル数である (0から9が1度ずつ現れるので).\nこの数は部分文字列が面白い性質を持っている.\nd1を上位1桁目, d2を上位2桁目の数とし, 以下順にdnを定義する. この記法を用いると次のことが分かる.\nd2d3d4=406 は 2 で割り切れる\nd3d4d5=063 は 3 で割り切れる\nd4d5d6=635 は 5 で割り切れる\nd5d6d7=357 は 7 で割り切れる\nd6d7d8=572 は 11 で割り切れる\nd7d8d9=728 は 13 で割り切れる\nd8d9d10=289 は 17 で割り切れる\nこのような性質をもつ0から9のパンデジタル数の総和を求めよ.\n'''\nimport itertools\n\n\ndef pandegital_special(s):\n if int(s[1:4]) % 2 != 0:\n return False\n if int(s[2:5]) % 3 != 0:\n return False\n if int(s[3:6]) % 5 != 0:\n return False\n if int(s[4:7]) % 7 != 0:\n return False\n if int(s[5:8]) % 11 != 0:\n return False\n if int(s[6:9]) % 13 != 0:\n return False\n if int(s[7:10]) % 17 != 0:\n return False\n return True\n\n\ndef main():\n numlist = []\n for n in range(10):\n numlist.append(n)\n panlist = itertools.permutations(numlist)\n ans = 0\n for pan in panlist:\n if pan[0] != 0:\n strings = ''\n for i in range(10):\n strings += str(pan[i])\n if pandegital_special(strings) is True:\n ans += int(strings)\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Practice/ProjectEuler/Problem43.py","file_name":"Problem43.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"481101525","text":"# Project Euler Problem 55 / 6-30-18\nMAX_LYCHREL_NUMBER = 10000\nITERATIONS = 50 # Maximum number of iterations to check before stopping\n\n\ndef main():\n num_of_lychrel_nums = 0\n\n for num in range(1, MAX_LYCHREL_NUMBER):\n if is_lychrel_number(num, ITERATIONS):\n num_of_lychrel_nums += 1\n\n print(num_of_lychrel_nums)\n\n\n# A number is a Lychrel number if after 50, in this case, iterations there is\n# no palindrome created after first adding the mirror number\ndef is_lychrel_number(number, iterations):\n if iterations == 0: # Reached maxed iterations\n return not is_palindrome(number)\n elif is_palindrome(number) and iterations != ITERATIONS:\n return False\n else:\n reverse_num = int(str(number)[::-1]) # 1232 reverse number is 2321\n new_num = number + reverse_num\n return is_lychrel_number(new_num, iterations - 1)\n\n\ndef is_palindrome(num):\n str_num = str(num)\n if num == int(str_num[::-1]): # A number that is equal to mirror number\n return True\n return False\n\n\nmain()\n","sub_path":"python/Problems 51-100/Problem_55.py","file_name":"Problem_55.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149948916","text":"arr[n][c] = []\r\n\r\ndef knap(n,c):\r\n if arr[n][c] != []:\r\n return arr[n][c]\r\n if n == 0 or c == 0:\r\n result = 0\r\n elif weight[n] > c:\r\n result = knap(n-1,c)\r\n else:\r\n temp1 = knap(n-1,c)\r\n temp2 = value[n] + knap(n-1,c-weight[n])\r\n result = max(temp1,temp2)\r\n return result\r\n\r\nweight = [1,2,4,2,5]\r\nvalue = [5,3,5,3,2]\r\nn = len(weight)\r\nc = 10\r\nprint(knap(4,10))\r\n \r\n\r\n","sub_path":"knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"198668241","text":"import pandas as pd\nfrom flask import Flask, jsonify, request, render_template\nimport joblib # TO unpack .gz\n# import pickle # To use .pkl model\nimport json # To load in 'features.json'\n\n# Load the model from 'berlin_model.pkl' file or 'berlin_model.gz'\n# model = pickle.load(open('berlin_model.pkl', 'rb'))\nmodel = joblib.load(open('berlin_model.gz', 'rb'))\n\n# App\napp = Flask(__name__)\n\n# Could Remove this line of code: vvvv\n# Setting this to True makes the returned JSON look not so jumbled up\napp.config['JSONIFY_PRETTYPRINT_REGULAR'] = True\n\n\n# Routes\n@app.route('/')\ndef index():\n return render_template('feature_input.html')\n\n\n@app.route('/price', methods=['POST'])\ndef predict_price():\n # Takes data entered at index route via the 'feature_input.html' template\n data = dict(request.form)\n # Parse the file 'features.json' containing a skeleton JSON of feature\n # variables with default values set to '0'. This block of code compares\n # the data from the request form to the default values from the JSON file\n # and makes changes depending on what the input was. For features that\n # have multiple permutations (neighbourhood_group_cleansed, etc), whichever\n # was chosen in the drop-down list will be reassigned as an integer(1)\n # while the others remain an integer(0) which denotes that the listing is\n # not a particular feature permutation and thus, can be ran in the model.\n with open('features.json', 'r') as f:\n features_dict = json.load(f)\n for key, value in data.items():\n if key in features_dict:\n try:\n features_dict[key] = float(value)\n except:\n features_dict[key] = 0\n elif value in features_dict:\n features_dict[value] = 1\n # print(features_dict)\n data = features_dict\n\n # This code will take the variable 'data' and convert it from JSON into a\n # Pandas Dataframe which can be used with the next line of code\n df = pd.json_normalize(data)\n\n # Prediction based on the Dataframe containing feature values.\n # model.predict(df) returns the prediction as an n-dimensional array which\n # is not able to be 'jsonified' so you must grab the index at [0] to take\n # it out of the array and convert to JSON. The prediction is rounded to\n # the nearest integer.\n results = int(model.predict(df)[0])\n\n # Return the features and predicted price as JSON\n # return jsonify(features=data, price=results)\n return jsonify(price=results)\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"229917899","text":"import FWCore.ParameterSet.Config as cms\nfrom Configuration.AlCa.autoCond import autoCond\n\nprocess = cms.Process(\"Demo\")\n\n\nprocess.load(\"Configuration.StandardSequences.GeometryRecoDB_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('SimGeneral.MixingModule.mixNoPU_cfi')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_cff')\nprocess.load('Configuration.StandardSequences.Digi_cff')\nprocess.load('Configuration.StandardSequences.SimL1Emulator_cff')\nprocess.load('Configuration.StandardSequences.DigiToRaw_cff')\nprocess.load('HLTrigger.Configuration.HLT_GRun_cff')\nprocess.load('Configuration.StandardSequences.RawToDigi_cff')\nprocess.load('Configuration.StandardSequences.L1Reco_cff')\nprocess.load('Configuration.StandardSequences.Reconstruction_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nprocess.GlobalTag.globaltag=autoCond['run2_mc']\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )\n\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:startup', '')\n\n#process.GlobalTag.globaltag=autoCond['run2_mc']\n\n\nprocess.RECOSIMoutput = cms.OutputModule(\"PoolOutputModule\",\n dataset = cms.untracked.PSet(\n dataTier = cms.untracked.string('GEN-SIM-RECO'),\n filterName = cms.untracked.string('')\n ),\n eventAutoFlushCompressedSize = cms.untracked.int32(5242880),\n fileName = cms.untracked.string('file:step3.root'),\n outputCommands = process.RECOSIMEventContent.outputCommands,\n splitLevel = cms.untracked.int32(0)\n)\n\n\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring(\n # '/store/relval/CMSSW_8_0_1/RelValMinBias_13/GEN-SIM-RECO/80X_mcRun2_asymptotic_v6-v1/10000/1E55A63F-58E4-E511-8BF4-0025905A60B8.root'\n#\t'/store/relval/CMSSW_8_0_1/RelValProdMinBias/GEN-SIM-RAW/80X_mcRun1_realistic_v4-v1/10000/3CFF9832-04E5-E511-9EC6-0025905A608C.root'\n\t'/store/relval/CMSSW_8_0_0/RelValProdMinBias/GEN-SIM/80X_mcRun1_realistic_v4-v1/10000/18D40681-70D9-E511-A835-0CC47A4C8F30.root'\n )\n)\nprocess.load('Configuration.StandardSequences.Services_cff') #loads the TFileService \nprocess.TFileService = cms.Service( #creates one\n \"TFileService\",\n fileName = cms.string( 'test.root' ), #here goes the file name you want to create\n closeFileFast = cms.untracked.bool(True) \n )\n\n\nprocess.load('CalibTracker.SiStripCommon.ShallowClustersProducer_cfi')\nprocess.load('CalibTracker.SiStripCommon.ShallowRechitClustersProducer_cfi')\nprocess.load('CalibTracker.SiStripCommon.ShallowSimhitClustersProducer_cfi')\nprocess.load('CalibTracker.SiStripCommon.ShallowTrackClustersProducer_cfi')\nprocess.load('CalibTracker.SiStripCommon.ShallowTracksProducer_cfi')\nprocess.load('CalibTracker.SiStripCommon.ShallowGainCalibration_cfi')\nprocess.load('CalibTracker.SiStripCommon.ShallowEventDataProducer_cfi')\nprocess.testTree = cms.EDAnalyzer( #This module fills the TTree\n \"ShallowTree\",\n outputCommands = cms.untracked.vstring(\n 'drop *', #always include it!\n 'keep *_shallow*_*_*'#,\n# 'keep *_shallowRechitClusters_*_*',\n# 'keep *_shallowSimhitClusters_*_*',\n# 'keep *_shallowClusters_*_*', #specify what you want to store, in this case ALL the cluster product\n# 'keep *_shallowTracks_*_*',\n# 'keep *_shallowGainCalibration_*_*',\n# 'keep *_shallowEventRun_*_*'\n )\n )\n\n\n\n# Path and EndPath definitions\nprocess.digitisation_step = cms.Path(process.pdigi)\nprocess.L1simulation_step = cms.Path(process.SimL1Emulator)\nprocess.digi2raw_step = cms.Path(process.DigiToRaw)\nprocess.raw2digi_step = cms.Path(process.RawToDigi)\nprocess.L1Reco_step = cms.Path(process.L1Reco)\nprocess.reconstruction_step = cms.Path(process.reconstruction)\nprocess.tree_step=cms.Path(\n\t\t(process.shallowEventRun +\n process.shallowRechitClusters +\n\t\tprocess.shallowSimhitClusters +\n\t\tprocess.shallowClusters +\n process.shallowGainCalibration +\n\t\tprocess.shallowTrackClusters +\n\t\tprocess.shallowTracks \n ) *\n\tprocess.testTree\n)\nprocess.endjob_step = cms.EndPath(process.endOfProcess)\nprocess.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)\n# Schedule definition\nprocess.schedule = cms.Schedule(process.digitisation_step,process.L1simulation_step,process.digi2raw_step,process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.tree_step)\n\n","sub_path":"RunTheMacros/TreeProducer_cfg.py","file_name":"TreeProducer_cfg.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"86121978","text":"# -*- coding: utf-8 -*-\n\nimport check\nfrom telegram.ext import Updater,CommandHandler,MessageHandler, Filters, Job\nimport logging\nimport os\n\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef callback_hour(bot, job):\n\n bot.sendMessage(chat_id='need an id', text= 'KOD YAZ') \n\n\ndef callback_alarm(bot, job):\n\n bot.sendMessage(chat_id=job.context, text='beep')\n\n\ndef callback_timer(bot, update, job_queue):\n\n bot.sendMessage(chat_id=update.message.chat_id, text='Setting a time for 1 minute!')\n job_alarm = Job(callback_alarm,60.0, repeat=False, context = update.message.chat_id)\n job_queue.put(job_alarm)\n\n\ndef start(bot,update):\n\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Bot çalışıyor.\")\n\n\ndef hello(bot,update):\n\n bot.sendMessage(chat_id=update.message.chat_id,text=\"Hello \"+update.message.from_user.first_name)\n\n\ndef echo(bot,update):\n bot.sendMessage(chat_id=update.message.chat_id,text=\"Merhaba \"+update.message.from_user.first_name)\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Kaynak atmak için /kaynak \")\n bot.sendMessage(chat_id=update.message.chat_id,text=\"Bota Hello dedirtmek için /hello\")\n\n\ndef kaynak(bot,update):\n\n msg = update.message.text\n x = str(msg ).replace(\"/kaynak\",\" \")\n k = x.split(\" \")\n a=check.url(k[2])\n\n if (a == True):\n\n bot.sendMessage(chat_id=update.message.chat_id, text=update.message.from_user.first_name +\n \"'nin Kaynağı Databaseye kaydettim\")\n readme = open('README.md', 'a')\n x = str(update.message.text).replace(\"/kaynak\", \" \")\n readme.write(\"{}\".format(update.message.from_user.first_name+\"
  • \" + x + \"
  • \"))\n readme.close()\n os.system(\"git add -A\")\n os.system(\"git commit -m '\" + update.message.from_user.first_name + \" Link '\")\n os.system(\"git push\")\n bot.sendMessage(chat_id=update.message.chat_id, text=\"Url github'a eklendi.\")\n\n\n else:\n\n bot.sendMessage(chat_id=update.message.chat_id,text=\"URL HATALI\")\n\n\n\ndef main():\n\n Token=\"340726941:AAFggBH8pDqS7hidKtOmuXilWAybu8F5kUw\"\n updater = Updater(token=Token)\n dispatcher = updater.dispatcher\n job_j = updater.job_queue\n\n #---------------HANDLER IS HERE--------------------\n\n start_handler = CommandHandler('start', start)\n hello_handler=CommandHandler('hello',hello)\n echo_handler = MessageHandler(Filters.text, echo)\n kaynak_handler=CommandHandler('kaynak',kaynak)\n\n #--------------------------------------------------\n #----------------DISPATCHER IS HERE----------------\n\n dispatcher.add_handler(echo_handler)\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(hello_handler)\n dispatcher.add_handler(kaynak_handler)\n\n #--------------------------------------------------\n\n job_hour = Job(callback_hour, 7200.0)\n job_j.put(job_hour, next_t=0.0)\n\n updater.start_polling()\n\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"257406752","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ==================================================\n# @Time : 2019-04-02 10:46 \n# @Author : ryuchen\n# @Site : \n# @File : views.py \n# @Desc : \n# ==================================================\nimport logging\n\nfrom django.contrib.sessions.models import Session\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom contrib.colleges.models import Academy\nfrom core.decorators.excepts import excepts\nfrom core.exceptions.errors import *\n\nlog = logging.getLogger('default')\n\n\n@excepts\n@csrf_exempt\ndef login_view(request):\n res = {\n \"code\": \"00000000\",\n \"data\": {\n \"status\": 200,\n \"type\": \"account\",\n }\n }\n if request.method != \"POST\":\n raise NotImplementedError\n\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n user = User.objects.filter(username=username).first()\n if user.is_superuser:\n res[\"data\"][\"authority\"] = list(request.user.groups.values_list('name', flat=True))\n res[\"data\"][\"permission\"] = []\n for group in request.user.groups.all():\n res[\"data\"][\"permission\"].extend(list(group.permissions.values_list('codename', flat=True)))\n res[\"data\"][\"permission\"].extend(list(user.user_permissions.values_list('codename', flat=True)))\n elif user.is_staff:\n res[\"data\"][\"authority\"] = list(user.groups.values_list('name', flat=True))\n res[\"data\"][\"permission\"] = []\n for group in request.user.groups.all():\n res[\"data\"][\"permission\"].extend(list(group.permissions.values_list('codename', flat=True)))\n res[\"data\"][\"permission\"].extend(list(user.user_permissions.values_list('codename', flat=True)))\n else:\n res[\"data\"][\"authority\"] = list(user.groups.values_list('name', flat=True))\n res[\"data\"][\"permission\"] = []\n for group in request.user.groups.all():\n res[\"data\"][\"permission\"].extend(list(group.permissions.values_list('codename', flat=True)))\n res[\"data\"][\"permission\"].extend(list(user.user_permissions.values_list('codename', flat=True)))\n return JsonResponse(res)\n else:\n raise AuthenticateError(\"Username or Password is incorrect!\")\n\n\n@excepts\ndef logout_view(request):\n res = {\n \"code\": \"00000000\",\n \"data\": {\n \"status\": 200,\n }\n }\n logout(request)\n return JsonResponse(res)\n\n\n@excepts\ndef current_user_view(request):\n res = {\n \"code\": \"00000000\",\n \"data\": {\n \"status\": 200,\n }\n }\n if request.method != \"GET\":\n raise NotImplementedError\n\n if request.user.is_authenticated:\n user = User.objects.get(id=request.user.id)\n if user.is_superuser:\n res[\"data\"][\"profile\"] = {\n \"name\": '{0}{1}'.format(user.first_name, user.last_name),\n \"email\": user.email,\n \"avatar\": 'https://gw.alipayobjects.com/zos/antfincdn/XAosXuNZyF/BiazfanxmamNRoxxVxka.png',\n \"title\": \"你是当前系统的最高管理员\",\n \"group\": \"超级管理员\",\n \"academy\": \"\"\n }\n else:\n if user.is_staff:\n academy = Academy.objects.filter(aca_user_id=user.id).first()\n res[\"data\"][\"profile\"] = {\n \"name\": '{0}{1}'.format(user.first_name, user.last_name),\n \"email\": user.email,\n \"avatar\": 'https://gw.alipayobjects.com/zos/antfincdn/XAosXuNZyF/BiazfanxmamNRoxxVxka.png',\n \"title\": \"你是{0}学院的管理员\".format(academy.aca_cname),\n \"group\": \"学院管理人员\",\n \"academy\": academy.uuid\n }\n else:\n res[\"data\"][\"profile\"] = {\n \"name\": '{0}{1}'.format(user.first_name, user.last_name),\n \"email\": user.email,\n \"avatar\": 'https://gw.alipayobjects.com/zos/antfincdn/XAosXuNZyF/BiazfanxmamNRoxxVxka.png',\n \"title\": \"研究生导师\",\n \"group\": \"教师\",\n \"academy\": \"\"\n }\n return JsonResponse(res)\n else:\n raise AuthenticateError(\"You need login first!\")\n","sub_path":"apps/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"353551446","text":"#!/usr/bin/env python3\n\nimport numpy as np\n\ndef readFile(filename: str):\n xyz_file = np.genfromtxt(fname=filename, skip_header=2, dtype='unicode')\n symbols = xyz_file[:,0]\n coordinates = (xyz_file[:,1:])\n coordinates = coordinates.astype(float)\n return (symbols, coordinates)","sub_path":"rgpytools/xyz.py","file_name":"xyz.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"651720499","text":"import time\nimport typing\n\nimport dataclasses\nfrom django.core.cache import cache as default_cache\nfrom rest_framework.throttling import BaseThrottle\n\nfrom ..core import annotate_method\n\nif typing.TYPE_CHECKING:\n from ..routing import Route # noqa: F401\n\n\n@dataclasses.dataclass\nclass ThrottlingAnnotation:\n rate: typing.Optional[str]\n scope: typing.Optional[str]\n\n\n@dataclasses.dataclass\nclass Throttling:\n num_requests: int\n duration: int\n scope: str\n\n\ndef throttling(rate: typing.Optional[str], scope: typing.Optional[str] = None):\n return annotate_method(ThrottlingAnnotation(rate, scope), single=True)\n\n\nclass BaseRateThrottle(BaseThrottle):\n throttling_by_http_method: typing.Dict[str, Throttling] = {}\n cache = default_cache\n cache_format = 'throttle_{scope}_{ident}'\n\n def allow_request(self, request, view) -> bool:\n throttling_ = self._get_throttling(request)\n\n if throttling_ is None:\n return True\n\n ident = self.get_ident(request)\n key = self._get_cache_key(throttling_.scope, ident)\n\n history = self.cache.get(key, [])\n now = time.time()\n\n while history and history[-1] <= now - throttling_.duration:\n history.pop()\n\n if len(history) >= throttling_.num_requests:\n return False\n\n history.insert(0, now)\n self.cache.set(key, history, throttling_.duration)\n return True\n\n def _get_cache_key(self, scope: str, ident: str) -> str:\n return self.cache_format.format(scope=scope, ident=ident)\n\n def get_ident(self, request) -> str:\n user_pk = request.user.pk if request.user.is_authenticated else None\n\n if user_pk is not None:\n return str(user_pk)\n return super().get_ident(request)\n\n def _get_throttling(self, request) -> typing.Optional[Throttling]:\n return self.throttling_by_http_method.get(request.method.lower())\n\n\ndef _parse_rate(rate: str) -> typing.Tuple[int, int]:\n \"\"\"\n Given the request rate string, return a two tuple of:\n , \n \"\"\"\n num, period = rate.split('/')\n num_requests = int(num)\n duration = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period[0]]\n return num_requests, duration\n\n\ndef create_throttle_classes(routes: typing.List['Route']) -> typing.Tuple[typing.Type[BaseRateThrottle], ...]:\n throttling_by_http_method_: typing.Dict[str, Throttling] = {}\n\n for route in routes:\n throttling_annotation = route.method.annotations.get_one_or_none(ThrottlingAnnotation)\n\n if getattr(throttling_annotation, 'rate', None) is not None:\n num_requests, duration = _parse_rate(throttling_annotation.rate)\n throttling_scope = throttling_annotation.scope or route.method.full_name\n throttling_ = Throttling(num_requests, duration, throttling_scope)\n throttling_by_http_method_[route.http_method.lower()] = throttling_\n\n if not throttling_by_http_method_:\n return ()\n\n class RateThrottle(BaseRateThrottle):\n throttling_by_http_method = throttling_by_http_method_\n\n return (RateThrottle,)\n","sub_path":"winter/http/throttling.py","file_name":"throttling.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"396496018","text":"import sys\nimport argparse\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nsys.path.append(\"../\") # ../../GAN-SDPC/\n\nfrom utils import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-p\", \"--model_path\", type=str, default='models/last_G.pt',\n help=\"Chemin vers le générateur à charger\")\nparser.add_argument(\"-s\", \"--seed_path\", type=str, default='seeds.txt',\n help=\"Chemin vers le fichier contenant les seeds à générer.\")\nparser.add_argument(\"-r\", \"--results_path\", type=str, default='results',\n help=\"Dossier contenant les résultats\")\nparser.add_argument(\"-t\", \"--tag\", type=str, default='image',\n help=\"Nom du fichier contenant les résultats\")\nparser.add_argument(\"--eps\", type=float, default=0.5, help=\"batchnorm: espilon for numerical stability\")\nparser.add_argument(\"--lrelu\", type=float, default=1e-06, help=\"LeakyReLU : alpha\")\nparser.add_argument(\"--latent_dim\", type=int, default=32, help=\"dimensionality of the latent space\")\nparser.add_argument(\"--kernels_size\", type=int, default=9, help=\"Taille des kernels\")\nparser.add_argument(\"--padding\", type=int, default=4, help=\"Taille du padding\")\nparser.add_argument(\"-i\", \"--img_size\", type=int, default=128, help=\"size of each image dimension\")\nparser.add_argument(\"--channels\", type=int, default=3, help=\"number of image channels\")\nparser.add_argument(\"--points\", type=int, default=5, help=\"number of inter points between interpolation\")\nparser.add_argument(\"--sample\", type=int, default=3, help=\"number of interpolation\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"Afficher des informations complémentaire.\")\nparser.add_argument(\"--runs_path\", type=str, default='Interpol_SF/inter/',\n help=\"Dossier de stockage des résultats sous la forme : Experience_names/parameters/\")\nparser.add_argument(\"--GPU\", type=int, default=0, help=\"Identifiant du GPU à utiliser.\")\nopt = parser.parse_args()\nprint(opt)\n\n# Dossier de sauvegarde\nos.makedirs(opt.results_path, exist_ok=True)\n\n# Gestion du time tag\ntry:\n time_tag = datetime.datetime.now().isoformat(sep='_', timespec='seconds')\nexcept TypeError:\n # Python 3.5 and below\n # 'timespec' is an invalid keyword argument for this function\n time_tag = datetime.datetime.now().replace(microsecond=0).isoformat(sep='_')\ntime_tag = time_tag.replace(':','.')\n\n# ----------\n# Initialize generator \n# ----------\nNL = nn.LeakyReLU(opt.lrelu, inplace=True)\nopts_conv = dict(kernel_size=opt.kernels_size, stride=2, padding=opt.padding, padding_mode='zeros')\nchannels = [64, 128, 256, 512]\nclass Generator(nn.Module):\n def __init__(self, verbose=opt.verbose):\n super(Generator, self).__init__()\n \n def generator_block(in_filters, out_filters):\n block = [nn.UpsamplingNearest2d(scale_factor=opts_conv['stride']), nn.Conv2d(in_filters, out_filters, kernel_size=opts_conv['kernel_size'], stride=1, padding=opts_conv['padding'], padding_mode=opts_conv['padding_mode']), nn.BatchNorm2d(out_filters, opt.eps), NL]\n\n return block\n\n self.verbose = verbose\n self.init_size = opt.img_size // opts_conv['stride']**3\n self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, channels[3] * self.init_size ** 2), NL)\n\n\n self.conv1 = nn.Sequential(*generator_block(channels[3], channels[2]),)\n self.conv2 = nn.Sequential(*generator_block(channels[2], channels[1]),)\n self.conv3 = nn.Sequential(*generator_block(channels[1], channels[0]),)\n self.conv_blocks = nn.Sequential(\n nn.Conv2d(channels[0], opt.channels, 3, stride=1, padding=1),\n nn.Tanh(),\n )\n \n def forward(self, z):\n if self.verbose: print(\"G\")\n # Dim : opt.latent_dim\n out = self.l1(z)\n if self.verbose: print(\"l1 out : \",out.shape)\n out = out.view(out.shape[0], channels[3], self.init_size, self.init_size)\n # Dim : (channels[3], opt.img_size/8, opt.img_size/8)\n if self.verbose: print(\"View out : \",out.shape)\n\n out = self.conv1(out)\n # Dim : (channels[3]/2, opt.img_size/4, opt.img_size/4)\n if self.verbose: print(\"Conv1 out : \",out.shape)\n out = self.conv2(out)\n # Dim : (channels[3]/4, opt.img_size/2, opt.img_size/2)\n if self.verbose: print(\"Conv2 out : \",out.shape)\n out = self.conv3(out)\n # Dim : (channels[3]/8, opt.img_size, opt.img_size)\n if self.verbose: print(\"Conv3 out : \",out.shape)\n \n img = self.conv_blocks(out)\n # Dim : (opt.chanels, opt.img_size, opt.img_size)\n if self.verbose: print(\"img out : \", img.shape)\n\n return img\n\n def _name(self):\n return \"Generator\"\n\nclass Discriminator(nn.Module):\n def __init__(self,verbose=opt.verbose):\n super(Discriminator, self).__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [nn.Conv2d(in_filters, out_filters, **opts_conv), NL]#, nn.Dropout2d(0.25)\n if bn:\n block.append(nn.BatchNorm2d(out_filters, opt.eps))\n return block\n\n self.verbose = verbose\n\n self.conv1 = nn.Sequential(*discriminator_block(opt.channels, channels[0], bn=False),)\n self.conv2 = nn.Sequential(*discriminator_block(channels[0], channels[1]),)\n self.conv3 = nn.Sequential(*discriminator_block(channels[1], channels[2]),)\n self.conv4 = nn.Sequential(*discriminator_block(channels[2], channels[3]),)\n\n # The height and width of downsampled image\n self.init_size = opt.img_size // opts_conv['stride']**4\n self.adv_layer = nn.Sequential(nn.Linear(channels[3] * self.init_size ** 2, 1))#, nn.Sigmoid()\n\n def forward(self, img):\n if self.verbose:\n print(\"D\")\n print(\"Image shape : \",img.shape)\n out = self.conv1(img)\n print(\"Conv1 out : \",out.shape)\n out = self.conv2(out)\n print(\"Conv2 out : \",out.shape)\n out = self.conv3(out)\n print(\"Conv3 out : \",out.shape)\n out = self.conv4(out)\n print(\"Conv4 out : \",out.shape)\n\n out = out.view(out.shape[0], -1)\n print(\"View out : \",out.shape)\n validity = self.adv_layer(out)\n print(\"Val out : \",validity.shape)\n else:\n # Dim : (opt.chanels, opt.img_size, opt.img_size)\n out = self.conv1(img)\n # Dim : (channels[3]/8, opt.img_size/2, opt.img_size/2)\n out = self.conv2(out)\n # Dim : (channels[3]/4, opt.img_size/4, opt.img_size/4)\n out = self.conv3(out)\n # Dim : (channels[3]/2, opt.img_size/4, opt.img_size/4)\n out = self.conv4(out)\n # Dim : (channels[3], opt.img_size/8, opt.img_size/8)\n\n out = out.view(out.shape[0], -1)\n validity = self.adv_layer(out)\n # Dim : (1)\n\n return validity\n\n def _name(self):\n return \"Discriminator\"\n\ngenerator = Generator()\nprint_network(generator)\nload_model(generator, None, opt.model_path)\n\n# ----------\n# GPU paramétrisation\n# ----------\ncuda = True if torch.cuda.is_available() else False\nif cuda:\n if torch.cuda.device_count() > opt.GPU: \n torch.cuda.set_device(opt.GPU)\n generator.cuda()\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\n# ----------\n# Tensorboard\n# ----------\npath_data1 = \"../runs/\" + opt.runs_path\npath_data2 = \"../runs/\" + opt.runs_path + time_tag[:-1] + \"/\"\n\n# Les runs sont sauvegarder dans un dossiers \"runs\" à la racine du projet, dans un sous dossiers opt.runs_path.\nos.makedirs(path_data1, exist_ok=True)\nos.makedirs(path_data2, exist_ok=True)\n\nwriter = SummaryWriter(log_dir=path_data2)\n\n# ----------\n# Interpolation noise\n# ----------\nN = opt.sample\npoints = opt.points\n\n# Select 2 points\nprint(\"Choix du point A\")\nans = \"n\"\nwhile ans != 'y':\n a = np.random.normal(0, 1, (N, opt.latent_dim))\n print(a)\n tensorboard_sampling(Variable(Tensor(a)), generator, writer, 0, image_type=\"Point A\")\n print(\"Le point tiré convient-il ? (y/n)\")\n ans = input()\nprint(\"Choix du point B\")\nans = \"n\"\nwhile ans != 'y':\n b = np.random.normal(0, 1, (N, opt.latent_dim))\n print(b)\n tensorboard_sampling(Variable(Tensor(b)), generator, writer, 0, image_type=\"Point B\")\n print(\"Le point tiré convient-il ? (y/n)\")\n ans = input()\n\ndiff = np.abs(a-b)\n\n# spherical linear interpolation (slerp) Source : https://machinelearningmastery.com/how-to-interpolate-and-perform-vector-arithmetic-with-faces-using-a-generative-adversarial-network/\ndef slerp(val, low, high):\n omega = np.arccos(np.clip(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)), -1, 1))\n so = np.sin(omega)\n if so == 0:\n # L'Hopital's rule/LERP\n return (1.0-val) * low + val * high\n return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega) / so * high\n \n# uniform interpolation between two points in latent space\ndef interpolate_points(p1, p2, n_steps=10):\n # interpolate ratios between the points\n ratios = np.linspace(0, 1, num=n_steps)\n # linear interpolate vectors\n vectors = list()\n for ratio in ratios:\n v = slerp(ratio, p1, p2)\n vectors.append(v)\n return np.asarray(vectors)\n\n# Calcul des points intermédiaire avec SLERP\nc = list()\nfor i in range(N):\n c.append(interpolate_points(a[i],b[i],points))\nc = np.asarray(c).reshape((N*points,opt.latent_dim)) \nprint(c.shape)\nprint(c)\n\n# Génération\nnoise = Variable(Tensor(c))\nsampling(noise, generator, opt.results_path, 0, tag=opt.tag, nrow=points)\ntensorboard_sampling(noise, generator, writer, 0, image_type=\"Interpolation A vers B\", nrow=points)\nfor i,line in enumerate(c):\n line = line.reshape((1,opt.latent_dim))\n #print(line.shape)\n sampling(Variable(Tensor(line)), generator, opt.results_path, 0, tag=str(i), nrow=1)\n\n# Analyse de l'espace\nprint(diff)\nprint(\"Mean diff : :\\n\",diff.mean(axis=1))\nprint(\"Std diff : :\\n\",diff.std(axis=1))\n","sub_path":"W24_SLERP_SF/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":10223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"191447518","text":"#import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pickle\r\nimport cv2\r\nimport glob\r\nimport random\r\nimport pandas as pd\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import model_from_json, Model\r\nfrom keras.layers import Dense, Activation, Conv2D, Input\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.layers.convolutional import MaxPooling2D\r\nfrom keras.layers.core import Flatten\r\nfrom keras.utils import np_utils\r\nfrom keras import optimizers\r\nfrom keras.callbacks import ModelCheckpoint, CSVLogger\r\nfrom keras.initializers import glorot_normal\r\n\r\n# some declared variables\r\nrandomSeed = 42\r\nnetworkInitialize = glorot_normal()\r\ninputShape = (224, 224, 3)\r\nepoch=200\r\nBS = 32\r\nnum_classes = 2\r\nrandom.seed(randomSeed)\r\nINIT_LR=0.01\r\n\r\ndata = []\r\nlabels = []\r\n\r\ndef classification_report_csv(report,directory):\r\n report_data = [] # array to hold the data\r\n lines = report.split('\\n')\r\n for line in lines[2:-3]: # hya el function asln batseeeb etnen fel awel w talta fel a5er fa b3mel cut le awel etnen lines w a5er talta 3ashan me3mlesh exeption\r\n try:\r\n row = {}\r\n row_data_ = line.split(' ')\r\n row_data=[x for x in row_data_ if x]\r\n row['class'] = row_data[0]\r\n row['precision'] = float(row_data[1])\r\n row['recall'] = float(row_data[2])\r\n row['f1_score'] = float(row_data[3])\r\n row['support'] = float(row_data[4])\r\n report_data.append(row)\r\n except:\r\n pass\r\n dataframe = pd.DataFrame.from_dict(report_data) # inside panda there is function called dataframe\r\n dataframe.to_csv(directory+'_classification_report.csv', index = False) # index = false for not putting index in the excel sheet\r\n\r\n# to convert the the class from decimal (hot encoding) into binary\r\ndef decode(data):\r\n decoded_data = []\r\n for i in range(data.shape[0]):\r\n decoded_data.append(np.argmax(data[i]))\r\n return np.array(decoded_data)\r\n\r\n\r\n\r\n# read from the saved pickle file\r\ndata_fid = open('Nour_pre.pkl', 'rb')\r\n[dataX, labels] = pickle.load(data_fid)\r\ndata_fid.close()\r\n\r\nencoder = LabelEncoder()\r\nencoder.fit(labels)\r\nencoded_Y = encoder.transform(labels)\r\n\r\n\r\ndataY = np_utils.to_categorical(encoded_Y)\r\n# split data into train and test with ratio 90,10 respectively\r\n(trainX, testX, trainY, testY) = train_test_split(dataX, dataY, test_size=0.10, random_state=42)\r\n\r\n#augmentation process\r\naug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,\r\n height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,\r\n horizontal_flip=True, fill_mode=\"nearest\")\r\n\r\n# Convoultional neural network structure\r\ntraining = False # training is false to test not to train\r\nif training: # if training = true strat train\r\n \r\n images = Input(shape=(224, 224, 3))\r\n x1 = Conv2D(32, (3, 3), strides=(1, 1), kernel_initializer=networkInitialize, padding='same')(images)\r\n x1 = BatchNormalization()(x1)\r\n x1 = Activation('relu')(x1)\r\n x1 = MaxPooling2D(pool_size = (2, 2))(x1)\r\n x2 = Conv2D(64, (3, 3), strides=(1, 1), kernel_initializer=networkInitialize, padding='same')(x1)\r\n x2 = BatchNormalization()(x2)\r\n x2 = Activation('relu')(x2)\r\n x2 = MaxPooling2D(pool_size=(2, 2))(x2)\r\n x3 = Conv2D(64, (3, 3), strides=(1, 1), kernel_initializer=networkInitialize, padding='same')(x2)\r\n x3 = BatchNormalization()(x3)\r\n x3 = Activation('relu')(x3)\r\n x3 = MaxPooling2D(pool_size=(2, 2))(x3)\r\n x4 = Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer=networkInitialize, padding='same')(x3)\r\n x4 = BatchNormalization()(x4)\r\n x4 = Activation('relu')(x4)\r\n x4 = MaxPooling2D(pool_size=(2, 2))(x4)\r\n x5 = Conv2D(128, (3, 3), strides=(1, 1), kernel_initializer=networkInitialize, padding='same')(x4)\r\n x5 = BatchNormalization()(x5)\r\n x5= Activation('relu')(x5)\r\n x5 = MaxPooling2D(pool_size=(2, 2))(x5)\r\n x6= Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer=networkInitialize, padding='same')(x5)\r\n x6 = BatchNormalization()(x6)\r\n x6 = Activation('relu')(x6)\r\n x6 = MaxPooling2D(pool_size=(2, 2))(x6)\r\n x7 = Conv2D(256, (3, 3), strides=(1, 1), kernel_initializer=networkInitialize, padding='same')(x6)\r\n x7 = BatchNormalization()(x7)\r\n x7 = Activation('relu')(x7)\r\n x7 = MaxPooling2D(pool_size=(2, 2))(x7)\r\n x8 = Flatten()(x7)\r\n x9 = Dense(500, activation='relu')(x8)\r\n outputs = Dense(2, activation='sigmoid')(x9)\r\n \r\n model = Model(inputs=images, outputs=outputs)\r\n checkpoint = ModelCheckpoint('model-{epoch:03d}-{acc:03f}-{val_acc:03f}.h5', verbose=1, monitor='val_acc', save_best_only=True, mode='auto') # to monitor the validation accuracy to save the best model\r\n csv_logger = CSVLogger('report\\\\log_'+str(INIT_LR)+'.csv', append=False, separator=';')\r\n\r\n sgd = optimizers.SGD(lr=INIT_LR, decay=1e-6, momentum=0.9, nesterov=True)\r\n model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])\r\n print(model.summary())\r\n\r\n\t# serialize model to JSON\r\n model_json = model.to_json()\r\n with open('model.json', \"w\") as json_file:\r\n json_file.write(model_json)\r\n\r\n # serialize weights to HDF5\r\n model.save_weights('model_' + str(epoch) + '.h5')\r\n\r\n H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),\r\n validation_data=(testX, testY), steps_per_epoch=len(trainX),\r\n epochs=epoch, callbacks=[csv_logger, checkpoint])\r\n '''\r\n # plot the training loss and accuracy\r\n N = np.arange(0, epoch)\r\n plt.style.use(\"ggplot\")\r\n plt.figure()\r\n plt.plot(N, H.history[\"loss\"], label=\"train_loss\")\r\n plt.plot(N, H.history[\"val_loss\"], label=\"val_loss\")\r\n plt.plot(N, H.history[\"acc\"], label=\"train_acc\")\r\n plt.plot(N, H.history[\"val_acc\"], label=\"val_acc\")\r\n plt.title(\"Training Loss and Accuracy \" + str(INIT_LR))\r\n plt.xlabel(\"Epoch #\")\r\n plt.ylabel(\"Loss\\\\Accuracy\")\r\n plt.legend()\r\n plt.savefig('Plots\\\\history_fig')\r\n '''\r\nelse:\r\n\r\n # load json model\r\n json_file = open('model.json', 'r')\r\n loaded_model_json = json_file.read()\r\n json_file.close()\r\n\r\n model = model_from_json(loaded_model_json)\r\n # load weights into new model\r\n model.load_weights('model-100-0.998283-0.746269.h5')\r\n\r\n\r\n predictions = model.predict(testX)\r\n report = classification_report(testY.argmax(axis=1), predictions.argmax(axis=1))\r\n print(report)\r\n classification_report_csv(report, 'test\\\\' + str(INIT_LR))\r\n\r\n\r\nresult = decode(model.predict(testX))\r\nprint(result)\r\nref_result = decode(testY)\r\nprint(ref_result)\r\nacc = 100 * (1 - float(np.count_nonzero(result - ref_result))/float(len(result)))\r\nprint('Acc = ' + str(acc))\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421061860","text":"from django.urls import path, include\nfrom . import views\nfrom .views import default_redirect_login_page\n\nurlpatterns = [\n path('', default_redirect_login_page),\n path('home', views.index, name='index'),\n path('categories/details//', views.category_details, name='category_details'),\n path('groups/details/', views.group_details, name='group_details'),\n path('accounts/', include('django.contrib.auth.urls')),\n path('accounts/register/', views.register, name='register'),\n path('groups', views.groups, name='groups'),\n path('accounts/profile', views.profile, name='profile'),\n path('accounts/profile/edit', views.edit_profile, name='edit_profile'),\n path('groups//delete', views.group_delete, name = 'group_delete'),\n path('groups//edit', views.edit_group, name='edit_group'),\n path('groups//leave', views.leave_group, name='leave_group'),\n\n]","sub_path":"categories/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"400285493","text":"import sys\narg = sys.argv[1]\n\npersons = {}\n\nclass Student:\n def __init__(self):\n self.fullname = \"\"\n self.classroom = \"\"\n\n def data_input(self):\n self.fullname = input(\"Podaj dane ucznia: \")\n self.classroom = input(\"Podaj nazwę klasy: \")\n\n def data_output(self):\n v = persons.values()\n for z in v:\n if isinstance (z, Teacher):\n if self.classroom in z.classroom:\n print(\"Nauczyciel: \", z.fullname)\n print(\"Przedmioty: \", z.subject)\n\n\nclass Teacher:\n def __init__(self):\n self.fullname = \"\"\n self.classroom = []\n self.subject = []\n\n def data_input(self):\n self.fullname = input(\"Podaj dane nauczyciela: \")\n while True:\n classrooms = input(\"Podaj prowadzone klasy: \")\n if not classrooms:\n break\n self.classroom.append(classrooms)\n while True:\n subjects = input(\"Podaj nazwę przedmiotu: \")\n if not subjects:\n break\n self.subject.append(subjects)\n\n def data_output(self):\n v = persons.values()\n for z in v:\n if isinstance (z, Mentor):\n for c in self.classroom:\n if c in z.classroom:\n print(\"Wychowawca: \", z.fullname)\n\nclass Mentor:\n def __init__(self):\n self.fullname = \"\"\n self.classroom = []\n\n def data_input(self):\n self.fullname = input(\"Podaj dane wychowawcy: \")\n while True:\n classrooms = input(\"Podaj prowadzone klasy: \")\n if not classrooms:\n break\n self.classroom.append(classrooms)\n\n def data_output(self):\n v = persons.values()\n for z in v:\n if isinstance (z, Student):\n if z.classroom in self.classroom:\n print(z.fullname)\n\n\nwhile True:\n fhand = input(\"Podaj typ użytkownika: \")\n if fhand == \"uczen\":\n person = Student()\n elif fhand == \"nauczyciel\":\n person = Teacher()\n elif fhand == \"wychowawca\":\n person = Mentor()\n else:\n break\n person.data_input()\n persons[person.fullname] = person\n\nif arg in persons:\n v = persons.get(arg)\n v.data_output()\n\nelse:\n v = persons.values()\n for x in v:\n if isinstance (x, Student):\n if x.classroom == arg:\n print(\"Uczeń: \", x.fullname)\n n = persons.values()\n for y in n:\n if isinstance (y, Mentor):\n if arg in y.classroom:\n print(\"Wychowawca: \", y.fullname)\nquit()\n","sub_path":"obiekty.py","file_name":"obiekty.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"486156194","text":"lines = input()\nlinesList = lines.split('.')\nlineWords = []\ncounter = 0 # Counts the index of current word which is getting checked\nfor i in range(len(linesList)):\n linesList[i] = linesList[i].strip()\n lineWords = linesList[i].split(' ') # Delets leading and trailing spaces\n lineWords.pop(0) # Delets the first word\n counter+=1\n for j in range(len(lineWords)):\n counter+=1\n if lineWords[j] == lineWords[j].capitalize() and lineWords[j].isdecimal()!=True: # checks if the word is as desred type\n print(\"%i:%s\" %(counter, lineWords[j]))\n lineWords = [] # removes previous printed words to not print them again","sub_path":"FirstChapter/CapitalWords.py","file_name":"CapitalWords.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"352087362","text":"# submission gets 0.78468 public score\n# coding: utf-8\n\n# import modules\nimport pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt \nimport seaborn as sns\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, \\\n ExtraTreesClassifier, VotingClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve\n\n# 可以在Ipython编译器里直接使用,功能是可以内嵌绘图,并且可以省略掉plt.show()这一步。\n# %matplotlib inline\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n# 计数器\nfrom collections import Counter\n\n# set default seaborn config\nsns.set(style='white', context='notebook', palette='deep')\n\n# Load data\ntrain = pd.read_csv(\"/Users/yangxinchen/Documents/Python/python_wheel/\\\n machine_learning/kaggle_titanic/train.csv\")\ntest = pd.read_csv(\"/Users/yangxinchen/Documents/Python/python_wheel/\\\n machine_learning/kaggle_titanic/test.csv\")\nIDtest = test[\"PassengerId\"]\n\n\n#Outlier detection 检测异常值\ndef detect_outliers(df, n, features):\n outlier_indices = []\n\n for col in features:\n Q1 = np.percentile(df[col], 25)\n Q3 = np.percentile(df[col], 75)\n # Interquartile range 四分位差\n IQR = Q3 - Q1\n\n outlier_step = 1.5 * IQR\n print(outlier_step)\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + \\\n outlier_step)].index\n \n outlier_indices.extend(outlier_list_col)\n\n outlier_indices = Counter(outlier_indices)\n multiple_outliers = list(k for k, v in outlier_indices.items() if v > n)\n\n return multiple_outliers\n\n\n# drop outliers\nOutliers_to_drop = detect_outliers(train, 2, [\"SibSp\",\"Parch\",\"Fare\"])\n\n# print dropped ones\ntrain.loc[Outliers_to_drop]\ntrain = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True)\n\n\n# joining train and test set\ntrain_len = len(train)\ndataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True)\n\n\n#check for null and missing values\ndataset = dataset.fillna(np.nan)\ndataset.isnull().sum()\n\n# print train set info\ntrain.info()\ntrain.isnull().sum()\n\n# print train head rows\ntrain.head()\n\n# print train set data types\ntrain.dtypes\n\n# print train set description\ntrain.describe()\n\n\n# Feature analysis\n# Numerical values, plot correlation heatmap\ng = sns.heatmap(train[[\"Survived\",\"SibSp\",\"Parch\",\"Age\",\"Fare\"]].corr(),\n annot=True, fmt=\".2f\", cmap=\"coolwarm\")\n\n\n# Explore SibSp feature vs Survived\ng = sns.catplot(x=\"SibSp\", y=\"Survived\", data=train, \n kind=\"bar\", height=6, palette=\"muted\")\ng.despine(left=True)\ng = g.set_ylabels(\"survial probability\")\n\n\n# Explore Parch feature vs Survived \ng = sns.catplot(x=\"Parch\", y=\"Survived\", data=train, \n kind=\"bar\", height=6, palette=\"muted\")\ng.despine(left=True)\ng = g.set_ylabels(\"survival probability\")\n\n\n# Explore Age vs Survived\ng = sns.FacetGrid(train, col='Survived')\ng = g.map(sns.distplot, \"Age\")\n\n## test segment ##\n# g = sns.catplot(x=\"Age\", y=\"Survived\", data=train, kind=\"bar\", height=6, palette=\"muted\")\n# g.despine(left=True)\n# g = g.set_ylabels(\"survival probability\")\n\n# Explore Age distribution\ng = sns.kdeplot(train[\"Age\"][(train[\"Survived\"] == 0) & (train[\"Age\"].notnull())], \n color=\"Red\", shade=True)\ng = sns.kdeplot(train[\"Age\"][(train[\"Survived\"] == 1) & (train[\"Age\"].notnull())], \n color=\"Blue\", shade=True)\ng.set_xlabel(\"Age\")\ng.set_ylabel(\"Frequency\")\ng = g.legend([\"Not Survived\", \"Survived\"])\n\n# check null value in \"Fare\", and fill them with median number\ndataset[\"Fare\"].isnull().sum()\ndataset[\"Fare\"] = dataset[\"Fare\"].fillna(dataset[\"Fare\"].median())\n\n# Explore Fare distribution\ng = sns.distplot(dataset[\"Fare\"], color=\"m\", label=\"Skewness: %.2f\"%(dataset[\"Fare\"].skew()))\ng = g.legend(loc=\"best\")\n\n\n# Apply log to Fare to reduce skewness distribution\ndataset[\"Fare\"] = dataset[\"Fare\"].map(lambda i: np.log(i) if i > 0 else 0)\ng = sns.distplot(dataset[\"Fare\"], color=\"m\", label=\"Skewness: %.2f\"%(dataset[\"Fare\"].skew()))\ng = g.legend(loc=\"best\")\n\n\n# Categorical values processing\n# Sex\ng = sns.barplot(x=\"Sex\", y=\"Survived\", data=train)\ng = g.set_ylabel(\"Survival Probability\")\n\ntrain[[\"Sex\", \"Survived\"]].groupby('Sex').mean()\n\n\n# Explore Pclass vs Survived\ng = sns.catplot(x=\"Pclass\", y=\"Survived\", data=train, kind=\"bar\", \n height=6, palette=\"muted\")\ng.despine(left=True)\ng = g.set_ylabels(\"Survival probability\")\n\n# Explore Pclass vs Survived by Sex\ng = sns.catplot(x=\"Pclass\", y=\"Survived\", hue=\"Sex\", data=train, \n height=6, kind=\"bar\", palette=\"muted\")\ng.despine(left=True)\ng = g.set_ylabels(\"Survival probability\")\n\n# Embarked\ndataset[\"Embarked\"].isnull().sum()\ndataset[\"Embarked\"] = dataset[\"Embarked\"].fillna(\"S\")\n\n# Explore Embarked vs Survived\ng = sns.catplot(x=\"Embarked\", y=\"Survived\", data=train, \n height=6, kind=\"bar\", palette=\"muted\")\ng.despine(left=True)\ng = g.set_ylabels(\"survival probability\")\n\n# Explore Pclass vs Embarked\ng = sns.catplot(\"Pclass\", col=\"Embarked\", data=train, \n height=6, kind=\"count\", palette=\"muted\")\ng.despine(left=True)\ng = g.set_ylabels(\"Count\")\n\n\n# Filling missing values\n# Age \n# Explore Age vs Sex, Parch, Pclass and SibSp\ng = sns.catplot(y=\"Age\", x=\"Sex\", data=dataset, kind=\"box\")\ng = sns.catplot(y=\"Age\", x=\"Sex\", hue=\"Pclass\", data=dataset, kind=\"box\")\ng = sns.catplot(y=\"Age\", x=\"Parch\", data=dataset, kind=\"box\")\ng = sns.catplot(y=\"Age\", x=\"SibSp\", data=dataset, kind=\"box\")\n\n# convert Sex into categorical value 0 for male and 1 for female\ndataset[\"Sex\"] = dataset[\"Sex\"].map({\"male\":0, \"female\":1})\n\n# plot correlation heatmap\ng = sns.heatmap(dataset[[\"Age\",\"Sex\",\"SibSp\",\"Parch\",\"Pclass\"]].corr(), cmap=\"BrBG\", annot=True)\n\n\n# Filling missing value of Age\n# Fill Age with the median age of similar rows according to Pclass, Parch and SibSp\n# Index of NaN age rows\nindex_NaN_age = list(dataset[\"Age\"][dataset[\"Age\"].isnull()].index)\n\nfor i in index_NaN_age: \n age_med = dataset[\"Age\"].median()\n age_pred = dataset[\"Age\"][((dataset['SibSp'] == dataset.iloc[i][\"SibSp\"]) & \\\n (dataset['Parch'] == dataset.iloc[i][\"Parch\"]) & \\\n (dataset['Pclass'] == dataset.iloc[i][\"Pclass\"]))].median()\n if not np.isnan(age_pred):\n dataset['Age'].iloc[i] = age_pred\n else:\n dataset['Age'].iloc[i] = age_med\n \n# plot relations between Survived and Age \ng = sns.catplot(x=\"Survived\", y=\"Age\", data=train, kind=\"box\")\ng = sns.catplot(x=\"Survived\", y=\"Age\", data=train, kind=\"violin\")\n\n\n# Feature engineering \n# Name/Title, check data type for \"Name\" feature\ndataset[\"Name\"].head()\n\n# Get Title from Name\ndataset_title = [i.split(\",\")[1].split(\".\")[0].strip() for i in dataset[\"Name\"]]\ndataset[\"Title\"] = pd.Series(dataset_title)\ndataset[\"Title\"].head()\n\n# plot numbers of each Title\ng = sns.countplot(x=\"Title\", data=dataset)\ng = plt.setp(g.get_xticklabels(), rotation=45)\n\n# handling with special Title and turn it into numberic feature\ndataset[\"Title\"] = dataset[\"Title\"].replace(['Lady','the Countess','Countess','Capt',\n 'Col','Don','Dr','Major','Rev','Sir','Jonkheer','Dona'], 'Rare')\ndataset[\"Title\"] = dataset[\"Title\"].map({\"Master\":0, \"Miss\":1, \"Ms\":1, \"Mme\":1, \n \"Mlle\":1, \"Mrs\":1, \"Mr\":2, \"Rare\":3})\n\n# set data type as int \ndataset[\"Title\"] = dataset[\"Title\"].astype(int)\n\n# print \"Title\" head rows\ndataset[\"Title\"].head()\n\n# plot number counts for updated \"Title\"\ng = sns.countplot(dataset[\"Title\"])\ng = g.set_xticklabels([\"Master\",\"Miss/Ms/Mme/Mlle/Mrs\",\"Mr\",\"Rare\"])\n\n# plot relations between updated \"Title\" and \"Survived\" col\ng = sns.catplot(x=\"Title\", y=\"Survived\", data=dataset, kind=\"bar\")\ng = g.set_xticklabels([\"Master\",\"Miss-Mrs\",\"Mr\",\"Rare\"])\ng = g.set_ylabels(\"survival probability\")\n\n\n# Drop Name variable\ndataset.drop(labels=[\"Name\"], axis=1, inplace=True)\n\n\n# Family size (merge features)\ndataset[\"Fsize\"] = dataset[\"SibSp\"] + dataset[\"Parch\"] + 1\n# plot Survival Probability with Family size\ng = sns.factorplot(x=\"Fsize\", y=\"Survived\", data=dataset)\ng = g.set_ylabels(\"Survival Probability\")\n\n# Create new feature of family size\ndataset['Single'] = dataset['Fsize'].map(lambda s: 1 if s == 1 else 0)\ndataset['SmallF'] = dataset['Fsize'].map(lambda s: 1 if s == 2 else 0)\ndataset['MedF'] = dataset['Fsize'].map(lambda s: 1 if 3 <= s <= 4 else 0)\ndataset['LargeF'] = dataset['Fsize'].map(lambda s: 1 if s>=5 else 0)\n\n# plot Survival Probability with different family size\ng = sns.catplot(x=\"Single\", y=\"Survived\", data=dataset, kind=\"bar\")\ng = g.set_ylabels(\"Survival Probability\")\ng = sns.catplot(x=\"SmallF\", y=\"Survived\", data=dataset, kind=\"bar\")\ng = g.set_ylabels(\"Survival Probability\")\ng = sns.catplot(x=\"MedF\", y=\"Survived\", data=dataset, kind=\"bar\")\ng = g.set_ylabels(\"Survival Probability\")\ng = sns.catplot(x=\"LargeF\", y=\"Survived\", data=dataset, kind=\"bar\")\ng = g.set_ylabels(\"Survival Probability\")\n\n\n# convert to indicator values Title and Embarked\ndataset = pd.get_dummies(dataset, columns = [\"Title\"])\ndataset = pd.get_dummies(dataset, columns = [\"Embarked\"], prefix=\"Em\")\n\n# print head rows for check\ndataset.head()\n\n\n# handling with Cabin\ndataset[\"Cabin\"].head()\ndataset[\"Cabin\"].describe()\ndataset[\"Cabin\"].isnull().sum()\ndataset[\"Cabin\"][dataset[\"Cabin\"].notnull()].head()\n\n# Replace the Cabin number by the type of cabin 'X' if not \ndataset[\"Cabin\"] = pd.Series([i[0] if not pd.isnull(i) else 'X' for i in dataset['Cabin']])\n\n# plot the number counts in \"Cabin\"\ng = sns.countplot(dataset[\"Cabin\"], order=['A','B','C','D','E','F','G','T','X'])\n\n# plot the Survival Probability for different Cabin\ng = sns.catplot(y=\"Survived\", x=\"Cabin\", data=dataset, \n kind=\"bar\", order=['A','B','C','D','E','F','G','T','X'])\ng = g.set_xlabels(\"survival probability\")\n\n# set one-hot encode for \"Cabin\"\ndataset = pd.get_dummies(dataset, columns=[\"Cabin\"], prefix=\"Cabin\")\n\n\n# handling with Ticket\ndataset[\"Ticket\"].head()\n# Treat Ticket by extracting the tikcet prefix. When there is no prefix it returns X.\nTicket = []\nfor i in list(dataset.Ticket):\n if not i.isdigit():\n Ticket.append(i.replace(\".\",\"\").replace(\"/\",\"\").strip().split(' ')[0])\n else:\n Ticket.append('X')\n\ndataset[\"Ticket\"] = Ticket\ndataset[\"Ticket\"].head()\n\n# set dummies for \"Ticket\" columns\ndataset = pd.get_dummies(dataset, columns=[\"Ticket\"], prefix=\"T\")\n\n\n# Create categorical values for Pclass\ndataset[\"Pclass\"] = dataset[\"Pclass\"].astype(\"category\")\ndataset = pd.get_dummies(dataset, columns=[\"Pclass\"], prefix=\"Pc\")\n\n\n# Drop PassengerId\ndataset.drop(labels=[\"PassengerId\"], axis=1, inplace=True)\n\n\n# check dataset head\ndataset.head()\n\n\n\n## MODELING ##\n## Seperate train dataset and test dataset\ntrain = dataset[:train_len]\ntest = dataset[train_len:]\ntest.drop(labels=[\"Survived\"],axis=1, inplace=True)\n\n# Separate train features and label\ntrain[\"Survived\"] = train[\"Survived\"].astype(int)\nY_train = train[\"Survived\"]\nX_train = train.drop(labels = [\"Survived\"], axis=1)\n\n\n# Simple modeling\n# Cross validate model with Kfold stratified cross val\nkfold = StratifiedKFold(n_splits=10)\n\n# Modeling step Test differents algorithms\nrandom_state = 2\nclassifiers = []\nclassifiers.append(SVC(random_state=random_state))\nclassifiers.append(DecisionTreeClassifier(random_state=random_state))\nclassifiers.append(AdaBoostClassifier(DecisionTreeClassifier(random_state=random_state),\n random_state=random_state, learning_rate=0.1))\nclassifiers.append(RandomForestClassifier(random_state=random_state))\nclassifiers.append(ExtraTreesClassifier(random_state=random_state))\nclassifiers.append(GradientBoostingClassifier(random_state=random_state))\n#classifiers.append(MLPClassifier(random_state=random_state))\nclassifiers.append(KNeighborsClassifier())\nclassifiers.append(LogisticRegression(random_state=random_state))\n#classifiers.append(LinearDiscriminantAnalysis())\n\ncv_results = []\nfor classifier in classifiers:\n cv_results.append(cross_val_score(classifier, X_train, y=Y_train, \n scoring=\"accuracy\", cv=kfold, n_jobs=4))\n\ncv_means = []\ncv_std = []\nfor cv_result in cv_results:\n cv_means.append(cv_result.mean())\n cv_std.append(cv_result.std())\n\n# cv_res = pd.DataFrame({\"CrossValMeans\":cv_means, \"CrossValerrors\":cv_std, \"Algorithm\":\n# [\"SVC\",\"DecistionTree\",\"AdaBoost\",\"RandomForest\", \"ExtraTrees\",\"GradientBoosting\", \n# \"MultipleLayerPerceptron\",\"KNeighboors\",\"LogisticRegression\",\"LinerDiscriminantAnalysis\"]})\ncv_res = pd.DataFrame({\"CrossValMeans\":cv_means, \"CrossValerrors\":cv_std, \"Algorithm\":\\\n [\"SVC\",\"DecistionTree\",\"AdaBoost\",\"RandomForest\", \"ExtraTrees\",\"GradientBoosting\", \\\n \"KNeighboors\",\"LogisticRegression\"]})\n\n\n# plot mean Accuracy for each classifier\ng = sns.barplot(\"CrossValMeans\", \"Algorithm\", data=cv_res, palette=\"Set3\", \n orient=\"h\",**{'xerr':cv_std})\ng.set_xlabel(\"Mean Accuracy\")\ng = g.set_title(\"Cross validation scores\")\n\n\n\n# Hyperparameter tunning for best models\n# META MODELING WITH ADABOOST, RF, EXTRATREES AND GRADIENTBOOSTING\n\n# Adaboost\nDTC = DecisionTreeClassifier()\nadaDTC = AdaBoostClassifier(DTC, random_state=7)\nada_param_grid = {\"base_estimator__criterion\":[\"gini\", \"entropy\"],\n \"base_estimator__splitter\":[\"best\", \"random\"],\n \"algorithm\":[\"SAMME\", \"SAMME.R\"],\n \"n_estimators\": [1, 2],\n \"learning_rate\": [0.0001, 0.001, 0.1, 0.2, 0.3, 1.5]}\n\ngsadaDTC = GridSearchCV(adaDTC, param_grid=ada_param_grid, cv=kfold, \n scoring=\"accuracy\", n_jobs=4, verbose=1)\ngsadaDTC.fit(X_train, Y_train)\nada_best = gsadaDTC.best_estimator_\ngsadaDTC.best_score_\n\n\n#ExtraTrees\nExtC = ExtraTreesClassifier()\nex_param_grid = {\"max_depth\": [None],\n \"max_features\":[1, 3, 10],\n \"min_samples_split\":[2, 3, 10],\n \"min_samples_leaf\":[1, 3, 10],\n \"bootstrap\":[False],\n \"n_estimators\":[100, 300],\n \"criterion\":[\"gini\"]}\ngsExtC = GridSearchCV(ExtC, param_grid=ex_param_grid, cv=kfold, \n scoring=\"accuracy\", n_jobs=4, verbose=1)\ngsExtC.fit(X_train, Y_train)\nExtC_best = gsExtC.best_estimator_\n\ngsExtC.best_score_\n\n\n# RFC parameters tunning\nRFC = RandomForestClassifier()\nrf_param_grid = {\"max_depth\":[None],\n \"max_features\":[1, 3, 10],\n \"min_samples_split\":[2, 3, 10],\n \"min_samples_leaf\":[1, 3, 10],\n \"bootstrap\":[False],\n \"n_estimators\":[100, 300],\n \"criterion\":[\"gini\"]}\n\ngsRFC = GridSearchCV(RFC, param_grid=rf_param_grid, cv=kfold, \n scoring=\"accuracy\", n_jobs=4, verbose=1)\ngsRFC.fit(X_train, Y_train)\nRFC_best = gsRFC.best_estimator_\n\ngsRFC.best_score_\n\n\n\n# GradientBoosting\nGBC = GradientBoostingClassifier()\ngb_param_grid = {\"loss\": [\"deviance\"],\n \"n_estimators\":[100, 200, 300],\n \"learning_rate\":[0.1, 0.05, 0.01],\n \"max_depth\":[4, 8],\n \"min_samples_leaf\":[100, 150],\n 'max_features':[0.3, 0.1]}\n\ngsGBC = GridSearchCV(GBC, param_grid=gb_param_grid, cv=kfold, \n scoring=\"accuracy\", n_jobs=4, verbose=1)\ngsGBC.fit(X_train, Y_train)\nGBC_best = gsGBC.best_estimator_\n\ngsGBC.best_score_\n\n\n# SVC classifier\nSVMC = SVC(probability=True)\nsvc_param_grid = {'kernel':['rbf'],\n 'gamma':[0.001, 0.01, 0.1, 1],\n 'C':[1, 10, 50, 100, 200, 300, 1000]}\ngsSVMC = GridSearchCV(SVMC, param_grid=svc_param_grid, cv=kfold, \n scoring=\"accuracy\", n_jobs=4, verbose=1)\ngsSVMC.fit(X_train, Y_train)\nSVMC_best = gsSVMC.best_estimator_\n\ngsSVMC.best_score_\n\n\n# Plot learning curves\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, \n cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n \n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n \n plt.grid()\n \n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, \n train_scores_mean + train_scores_std, alpha=0.1, color=\"r\")\n \n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color='g')\n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', \n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color='g',\n label=\"Cross-validation score\")\n \n plt.legend(loc=\"best\")\n return plt\n\ng = plot_learning_curve(gsRFC.best_estimator_, \n \"RF learning curves\", X_train, Y_train, cv=kfold)\ng = plot_learning_curve(gsExtC.best_estimator_, \n \"ExtraTrees learning curves\", X_train, Y_train, cv=kfold)\ng = plot_learning_curve(gsSVMC.best_estimator_, \n \"SVM learning curves\", X_train, Y_train, cv=kfold)\ng = plot_learning_curve(gsadaDTC.best_estimator_, \n \"AdaBoost learning curves\", X_train, Y_train, cv=kfold)\ng = plot_learning_curve(gsGBC.best_estimator_, \n \"GradientBoosting learning curves\", X_train, Y_train, cv=kfold)\n\n\n# Feature importance of tree based classifiers\nnrows = ncols = 2\nfig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=\"all\", figsize=(15,15))\nnames_classifiers =[(\"AdaBoosting\",ada_best),(\"ExtraTrees\",ExtC_best),\\\n (\"RandomForest\",RFC_best),(\"GradientBoosting\",GBC_best)]\n\nnclassifier = 0\nfor row in range(nrows):\n for col in range(ncols):\n name = names_classifiers[nclassifier][0]\n classifier = names_classifiers[nclassifier][1]\n indices = np.argsort(classifier.feature_importances_)[::-1][:40]\n g = sns.barplot(y=X_train.columns[indices][:40], \n x=classifier.feature_importances_[indices][:40], orient='h',ax=axes[row][col])\n g.set_xlabel(\"Relative importance\", fontsize=12)\n g.set_ylabel(\"Features\", fontsize=12)\n g.tick_params(labelsize=9)\n g.set_title(name + \"feature importance\")\n nclassifier += 1\n\n\ntest_Survived_RFC = pd.Series(RFC_best.predict(test), name=\"RFC\")\ntest_Survived_ExtC = pd.Series(ExtC_best.predict(test), name=\"ExtC\")\ntest_Survived_SVMC = pd.Series(SVMC_best.predict(test), name=\"SVMC\")\ntest_Survived_AdaC = pd.Series(ada_best.predict(test), name=\"Ada\")\ntest_Survived_GBC = pd.Series(GBC_best.predict(test), name=\"GBC\")\n\nensemble_results = pd.concat([test_Survived_RFC, test_Survived_ExtC, \n test_Survived_SVMC, test_Survived_AdaC, test_Survived_GBC], axis=1)\n\ng = sns.heatmap(ensemble_results.corr(), annot=True)\n\n\n# voting classifier\nvotingC = VotingClassifier(estimators=[('rfc', RFC_best), ('extc', ExtC_best), \n ('svc', SVMC_best), ('adac', ada_best), ('gbc', GBC_best)], voting='soft', n_jobs=4)\nvotingC = votingC.fit(X_train, Y_train)\n\n\n# generate submission\ntest_Survived = pd.Series(votingC.predict(test), name=\"Survived\")\nresults = pd.concat([IDtest, test_Survived], axis=1)\nresults.to_csv(\"/Users/yangxinchen/Documents/Python/python_wheel/machine_learning/\\\n kaggle_titanic_top4per/ensemble_python_voting.csv\", index=False)\n","sub_path":"Titanic/ensembling_model.py","file_name":"ensembling_model.py","file_ext":"py","file_size_in_byte":20114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"588872884","text":"import pyaudio\nimport time\nimport numpy as np\nimport scipy\n\nRATE = 20000\nDELAY = 900\n\n# hl is a unit impulse function\nhl = []\nhl.extend([1])\nhl.extend([0]*DELAY)\n\n# hr is a delay impulse function\nhr = []\nhr.extend([0]*DELAY)\nhr.extend([1])\n\ne=[]\ne.extend([0]*len(hl))\n\ndef callback(in_data, frame_count, time_info, status, hl=hl,hr=hr,d=e[:],c=e[:]):\n\n # Transform byte format in_data to float format out_data\n out_data = np.fromstring(in_data,dtype=np.short)\n out_data = out_data/(2.0**15)\n\n # divide to left&right\n out_data_left=[]\n out_data_right=[]\n \n for i in range(int(len(out_data)/2)):\n out_data_left.extend(out_data[i*2:i*2+1])\n out_data_right.extend(out_data[i*2+1:i*2+2])\n\n FREQ = -12\n fft_left = scipy.fft(out_data_left)\n fft_right = scipy.fft(out_data_right)\n\n if FREQ>0:\n fft_left = np.concatenate(([0]*FREQ, fft_left[:1024-FREQ]))\n fft_right = np.concatenate(([0]*FREQ, fft_right[:1024-FREQ]))\n else:\n fft_left = np.concatenate((fft_left[-FREQ:1024], [0]*(-FREQ)))\n fft_right = np.concatenate((fft_right[-FREQ:1024], [0]*(-FREQ)))\n\n out_data_left = scipy.ifft(fft_left)\n out_data_right = scipy.ifft(fft_right)\n\n # overlap\n n = len(out_data)\n m = len(hl)\n l = len(out_data_left)\n\n # put left & right together\n out_data2 = out_data[0:n]\n for i in range(int(len(out_data)/2)):\n out_data2[i*2] = out_data_left[i]\n out_data2[i*2+1] = out_data_right[i]\n \n # Transform float format out_data2 to byte format out\n out_data2 *= 2.0**15\n out = np.int16(out_data2)\n out = out.tostring()\n \n return (out, pyaudio.paContinue)\n\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paInt16,\n channels=2,\n rate=RATE,\n input=True,\n output=True,\n stream_callback=callback)\n\nstream.start_stream()\nwhile stream.is_active():\n time.sleep(0.2)\n\np.terminate()\n","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"476086956","text":"from yahoo_app.share import Share\nfrom yahoo_app.file_io import read_shares_list, write_csv\n\n\ndef shares_io():\n\t\"\"\"Busca as informações e grava em um arquivo csv\"\"\"\n\n\tprint(\"Lendo lista de ações\")\n\tshares_list = read_shares_list(\"lista_acoes.txt\")\n\tshares = []\n\n\tprint(\"Obtendo informações\")\n\tfor s in shares_list:\n\t\tshare = Share(s)\n\t\tshare.get_share_information()\n\t\tshares.append(share)\n\n\tprint(\"Iniciando gravação do arquivo\")\n\twrite_csv(\"shares_table.csv\", shares)\n\tprint(\"Gravação completa\")\n\n\treturn shares","sub_path":"yahoo_app/yahoo_finance.py","file_name":"yahoo_finance.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"406492475","text":"import json\n\nfrom django.test import TestCase, Client\nfrom django.urls import reverse\n\nfrom boggle.games.models import Game\nfrom boggle.games.utils import (\n get_token,\n load_test_board,\n)\n\n\nclass GameTest(TestCase):\n def setUp(self):\n self.c = Client()\n\n def test_create_game(self):\n url = reverse('games_api:games')\n duration = 100\n # test invalid if random is missing\n resp = self.c.post(\n url,\n json.dumps({\n \"duration\": duration,\n }),\n content_type=\"application/json\"\n )\n self.assertEquals(400, resp.status_code)\n\n resp = self.c.post(\n url,\n json.dumps({\n \"duration\": duration,\n \"random\": False,\n \"board\": \"A,B,C,D\"\n }),\n content_type=\"application/json\"\n )\n self.assertEquals(400, resp.status_code)\n\n resp = self.c.post(\n url,\n json.dumps({\n \"duration\": duration,\n \"random\": False,\n \"board\": \"TT, A, P, *, E, A, K, S, O, B, R, S, S, *, X, D\"\n }),\n content_type=\"application/json\"\n )\n self.assertEquals(400, resp.status_code)\n\n resp = self.c.post(\n url,\n json.dumps({\n \"duration\": duration,\n \"random\": False,\n \"board\": \"T, A, P, *, E, A, K, S, O, B, R, S, S, *, X, D\"\n }),\n content_type=\"application/json\"\n )\n self.assertEquals(201, resp.status_code)\n\n resp = self.c.post(\n url,\n json.dumps({\n \"duration\": duration,\n \"random\": False\n }),\n content_type=\"application/json\"\n )\n self.assertEquals(201, resp.status_code)\n data = resp.json()\n self.assertEquals(data['duration'], duration)\n created_game = Game.objects.get(id=data['id'])\n\n self.assertEquals(created_game.id, data['id'])\n self.assertEquals(created_game.duration, duration)\n\n\n\n\nclass GameDetailTest(TestCase):\n def setUp(self):\n self.c = Client()\n self.duration = 100\n self.game = Game.objects.create(\n token=get_token(),\n duration=self.duration,\n board=load_test_board()\n )\n self.url = reverse(\n 'games_api:get_game',\n kwargs={'game_id': self.game.id})\n\n def test_get_game(self):\n resp = self.c.get(self.url)\n\n self.assertEquals(200, resp.status_code)\n data = resp.json()\n self.assertEquals(data['duration'], self.duration)\n self.assertEquals(data['id'], self.game.id)\n self.assertEquals(data['token'], self.game.token)\n self.assertEquals(data['board'], self.game.board)\n self.assertEquals(data['points'], 0)\n\n def test_play_game(self):\n # test invalid if token is invalid\n resp = self.c.put(\n self.url,\n json.dumps({\n \"token\": 'ABCASD',\n \"word\": \"tap\"\n })\n )\n self.assertEquals(401, resp.status_code)\n\n # test invalid if word is missing\n resp = self.c.put(\n self.url,\n json.dumps({\n \"token\": self.game.token\n })\n )\n self.assertEquals(400, resp.status_code)\n\n resp = self.c.put(\n self.url,\n json.dumps({\n \"random\": False,\n \"token\": self.game.token,\n \"word\": \"tap\"\n })\n )\n self.assertEquals(200, resp.status_code)\n","sub_path":"boggle/games/tests/tests_game.py","file_name":"tests_game.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"70090661","text":"#!/usr/local/bin python\n# -*- coding: utf-8 -*-\n\n# Created on 202106121937\n# Author: zhuoyin94 \n# Github: https://github.com/MichaelYin1994\n\n'''\n本模块(input_pipeline.py)构建数据读取与预处理的pipline,并训练神经网络模型。\n'''\n\nimport multiprocessing as mp\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder, LabelBinarizer\nfrom tensorflow import keras\nfrom tensorflow.keras import Model, layers\nfrom tensorflow.keras.optimizers import Adam\nfrom tqdm import tqdm\n\nfrom dingtalk_remote_monitor import RemoteMonitorDingTalk\nfrom models import build_model_resnet50_v2, build_model_resnet101_v2\n\nGLOBAL_RANDOM_SEED = 65535\n# np.random.seed(GLOBAL_RANDOM_SEED)\n# tf.random.set_seed(GLOBAL_RANDOM_SEED)\n\nTASK_NAME = 'iccv_meituan_2021'\nGPU_ID = 0\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n # 限制Tensorflow只使用GPU ID编号的GPU\n tf.config.experimental.set_visible_devices(gpus[GPU_ID], 'GPU')\n\n # 限制Tensorflow不占用所有显存\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n # print(len(gpus), 'Physical GPUs,', len(logical_gpus), 'Logical GPUs')\n except RuntimeError as e:\n print(e)\n# ----------------------------------------------------------------------------\n\ndef build_efficentnet_model(verbose=False, is_compile=True, **kwargs):\n '''构造基于imagenet预训练的ResNetV2的模型,并返回编译过的模型。'''\n\n # 解析preprocessing与model的参数\n # ---------------------\n input_shape = kwargs.pop('input_shape', (None, 224, 224))\n n_classes = kwargs.pop('n_classes', 1000)\n\n model_name = kwargs.pop('model_name', 'EfficentNetB0')\n model_lr = kwargs.pop('model_lr', 0.01)\n model_label_smoothing = kwargs.pop('model_label_smoothing', 0.1)\n\n # 依据关键字,构建模型\n # ---------------------\n model = tf.keras.Sequential()\n\n if 'B0' in model_name:\n model_tmp = tf.keras.applications.EfficientNetB0\n elif 'B1' in model_name:\n model_tmp = tf.keras.applications.EfficientNetB1\n elif 'B2' in model_name:\n model_tmp = tf.keras.applications.EfficientNetB2\n elif 'B3' in model_name:\n model_tmp = tf.keras.applications.EfficientNetB3\n elif 'B4' in model_name:\n model_tmp = tf.keras.applications.EfficientNetB4\n elif 'B5' in model_name:\n model_tmp = tf.keras.applications.EfficientNetB5\n elif 'B6' in model_name:\n model_tmp = tf.keras.applications.EfficientNetB6\n elif 'B7' in model_name:\n model_tmp = tf.keras.applications.EfficientNetB7\n\n model.add(\n model_tmp(\n input_shape=input_shape, \n include_top=False,\n weights='imagenet',\n drop_connect_rate=0.4,\n )\n )\n model.add(tf.keras.layers.GlobalAveragePooling2D())\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(\n 256, activation='relu',\n ))\n model.add(tf.keras.layers.Dropout(0.5))\n model.add(tf.keras.layers.Dense(n_classes, activation='softmax'))\n\n # 编译模型\n # ---------------------\n if verbose:\n model.summary()\n\n if is_compile:\n model.compile(\n loss=tf.keras.losses.CategoricalCrossentropy(\n label_smoothing=model_label_smoothing),\n optimizer=Adam(model_lr),\n metrics=['acc'])\n\n return model\n\n\ndef load_preprocessing_img(image_size, stage):\n '''通过闭包实现参数化的Image Loading与TTA数据增强。'''\n if stage not in ['train', 'valid', 'test']:\n raise ValueError('stage must be either train, valid or test !')\n\n if stage is 'train' or stage is 'test':\n def load_img(path=None):\n image = tf.io.read_file(path)\n image = tf.cond(\n tf.image.is_jpeg(image),\n lambda: tf.image.decode_jpeg(image, channels=3),\n lambda: tf.image.decode_gif(image)[0])\n\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_brightness(image, 0.3)\n\n image = tf.image.random_flip_left_right(image)\n image = tf.image.random_flip_up_down(image)\n\n image = tf.image.resize(image, image_size)\n return image\n else:\n def load_img(path=None):\n image = tf.io.read_file(path)\n image = tf.cond(\n tf.image.is_jpeg(image),\n lambda: tf.image.decode_jpeg(image, channels=3),\n lambda: tf.image.decode_gif(image)[0])\n\n image = tf.image.resize(image, image_size)\n return image\n\n return load_img\n\n\nif __name__ == '__main__':\n # 全局化的参数列表\n # ---------------------\n IMAGE_SIZE = (128, 128)\n BATCH_SIZE = 32\n NUM_EPOCHS = 128\n EARLY_STOP_ROUNDS = 5\n TTA_ROUNDS = 20\n MIN_CLASS_ID, MAX_CLASS_ID = 0, 1000\n\n MODEL_NAME = 'EfficentNetB0_rtx3090'\n MODEL_LR = 0.00003\n MODEL_LABEL_SMOOTHING = 0\n\n CKPT_DIR = './ckpt/'\n CKPT_FOLD_NAME = '{}_GPU_{}_{}'.format(TASK_NAME, GPU_ID, MODEL_NAME)\n\n IS_DEBUG = False\n IS_TRAIN_FROM_CKPT = False\n IS_SEND_MSG_TO_DINGTALK = False\n IS_RANDOM_VISUALIZING_PLOTS = False\n\n if IS_DEBUG:\n TRAIN_PATH = './data/Train_debug/'\n VALID_PATH = './data/Val_debug/'\n TEST_PATH = './data/Test_debug/Public_test_new/'\n else:\n TRAIN_PATH = './data/Train/'\n VALID_PATH = './data/Val/'\n TEST_PATH = './data/Test/Public_test_new/'\n\n # 利用tensorflow的preprocessing方法读取数据集\n # ---------------------\n train_file_full_name_list = []\n train_label_list = []\n\n train_fold_names = sorted(list(map(lambda x: int(x), os.listdir(TRAIN_PATH))))\n train_fold_names = train_fold_names[MIN_CLASS_ID:MAX_CLASS_ID]\n train_fold_names = [str(item) for item in train_fold_names]\n N_CLASSES = len(train_fold_names)\n\n for dir_name in train_fold_names:\n full_path_name = os.path.join(TRAIN_PATH, dir_name)\n for file_name in os.listdir(full_path_name):\n train_file_full_name_list.append(\n os.path.join(full_path_name, file_name)\n )\n train_label_list.append(int(dir_name))\n train_label_array = np.array(train_label_list)\n\n val_file_full_name_list = []\n val_label_list = []\n\n val_fold_names = sorted(list(map(lambda x: int(x), os.listdir(VALID_PATH))))\n val_fold_names = val_fold_names[MIN_CLASS_ID:MAX_CLASS_ID]\n val_fold_names = [str(item) for item in val_fold_names]\n\n for dir_name in val_fold_names:\n full_path_name = os.path.join(VALID_PATH, dir_name)\n for file_name in os.listdir(full_path_name):\n val_file_full_name_list.append(\n os.path.join(full_path_name, file_name)\n )\n val_label_list.append(int(dir_name))\n val_label_array = np.array(val_label_list)\n\n # 进行标签编码\n # ---------------------\n train_label_oht_array = np.zeros(\n (len(train_file_full_name_list), N_CLASSES)\n )\n for row, col in enumerate(train_label_array):\n train_label_oht_array[row, col] = 1\n\n val_label_oht_array = np.zeros(\n (len(val_file_full_name_list), N_CLASSES)\n )\n for row, col in enumerate(val_label_array):\n val_label_oht_array[row, col] = 1\n\n '''\n encoder = OneHotEncoder(sparse=False)\n encoder.fit(train_label_oht_array.reshape(-1, 1))\n\n train_label_oht_array = encoder.transform(\n train_label_oht_array.reshape(-1, 1)\n )\n val_label_oht_array = encoder.transform(\n val_label_oht_array.reshape(-1, 1)\n )\n '''\n\n processor_train_image = load_preprocessing_img(\n image_size=IMAGE_SIZE,\n stage='train')\n processor_valid_image = load_preprocessing_img(\n image_size=IMAGE_SIZE,\n stage='valid')\n\n # 构造训练集数据\n train_path_ds = tf.data.Dataset.from_tensor_slices(train_file_full_name_list)\n train_img_ds = train_path_ds.map(\n processor_train_image, num_parallel_calls=mp.cpu_count()\n )\n train_label_ds = tf.data.Dataset.from_tensor_slices(train_label_oht_array)\n\n train_ds = tf.data.Dataset.zip((train_img_ds, train_label_ds))\n\n # 构造验证集数据\n val_path_ds = tf.data.Dataset.from_tensor_slices(val_file_full_name_list)\n val_img_ds = val_path_ds.map(\n processor_valid_image, num_parallel_calls=mp.cpu_count()\n )\n val_label_ds = tf.data.Dataset.from_tensor_slices(val_label_oht_array)\n\n val_ds = tf.data.Dataset.zip((val_img_ds, val_label_ds))\n\n # 性能设定\n train_ds = train_ds.shuffle(buffer_size=int(32 * BATCH_SIZE))\n train_ds = train_ds.batch(BATCH_SIZE).prefetch(2 * BATCH_SIZE)\n val_ds = val_ds.batch(BATCH_SIZE).prefetch(2 * BATCH_SIZE)\n\n # 随机可视化几张图片\n IS_RANDOM_VISUALIZING_PLOTS = False\n\n if IS_RANDOM_VISUALIZING_PLOTS:\n plt.figure(figsize=(10, 10))\n for images, labels in train_ds.take(1):\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(images[i].numpy().astype('uint8'))\n plt.title(int(labels[i]))\n plt.axis('off')\n plt.tight_layout()\n\n # 构造与编译Model,并添加各种callback\n # ---------------------\n\n # 各种Callbacks\n # ckpt, lr schule, early stop, warm up, remote moniter\n callbacks = [\n tf.keras.callbacks.EarlyStopping(\n monitor='val_acc', mode=\"max\",\n verbose=1, patience=EARLY_STOP_ROUNDS,\n restore_best_weights=True),\n tf.keras.callbacks.ModelCheckpoint(\n filepath=os.path.join(\n CKPT_DIR + CKPT_FOLD_NAME,\n MODEL_NAME + '_epoch_{epoch:02d}_valacc_{val_acc:.3f}.ckpt'),\n monitor='val_acc',\n mode='max',\n save_weights_only=True,\n save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(\n monitor='val_acc',\n factor=0.7,\n patience=2,\n min_lr=0.0000003),\n ]\n\n # 训练模型\n model = build_efficentnet_model(\n n_classes=N_CLASSES,\n input_shape=IMAGE_SIZE + (3,),\n network_type=MODEL_NAME,\n model_name=MODEL_NAME,\n model_lr=MODEL_LR,\n model_label_smoothing=MODEL_LABEL_SMOOTHING,\n )\n\n # 如果模型名的ckpt文件夹不存在,创建该文件夹\n if CKPT_FOLD_NAME not in os.listdir(CKPT_DIR):\n os.mkdir(CKPT_DIR + CKPT_FOLD_NAME)\n\n # 如果指定ckpt weights文件名,则从ckpt位置开始训练\n if IS_TRAIN_FROM_CKPT:\n latest_ckpt = tf.train.latest_checkpoint(CKPT_DIR + CKPT_FOLD_NAME)\n model.load_weights(latest_ckpt)\n else:\n ckpt_file_name_list = os.listdir(CKPT_DIR + CKPT_FOLD_NAME)\n\n # https://www.geeksforgeeks.org/python-os-remove-method/\n try:\n for file_name in ckpt_file_name_list:\n os.remove(os.path.join(CKPT_DIR + CKPT_FOLD_NAME, file_name))\n except OSError:\n print('File {} can not be deleted !'.format(file_name))\n\n history = model.fit(\n train_ds,\n epochs=NUM_EPOCHS,\n validation_data=val_ds,\n callbacks=callbacks\n )\n\n # 生成Test预测结果,并进行Top-1 Accuracy评估\n # ---------------------\n test_file_name_list = os.listdir(TEST_PATH)\n test_file_name_list = \\\n sorted(test_file_name_list, key=lambda x: int(x.split('.')[0][1:]))\n test_file_fullname_list = [TEST_PATH + item for item in test_file_name_list]\n\n test_path_ds = tf.data.Dataset.from_tensor_slices(test_file_fullname_list)\n processor_test_image = load_preprocessing_img(\n image_size=IMAGE_SIZE, stage='test')\n test_ds = test_path_ds.map(\n processor_test_image,\n num_parallel_calls=mp.cpu_count()\n )\n test_ds = test_ds.batch(BATCH_SIZE)\n test_ds = test_ds.prefetch(buffer_size=int(BATCH_SIZE * 2))\n\n # TTA强化\n test_pred_proba_list = []\n for i in tqdm(range(TTA_ROUNDS)):\n test_pred_proba_list.append(model.predict(test_ds))\n test_pred_proba = np.mean(test_pred_proba_list, axis=0)\n test_pred_label_list = np.argmax(test_pred_proba, axis=1)\n\n test_pred_df = pd.DataFrame(\n test_file_name_list,\n columns=['Id']\n )\n test_pred_df['Predicted'] = test_pred_label_list\n\n sub_file_name = str(len(os.listdir('./submissions')) + 1) + \\\n '_{}_sub.csv'.format(MODEL_NAME)\n test_pred_df.to_csv('./submissions/{}'.format(sub_file_name), index=False)\n","sub_path":"input_pipeline.py","file_name":"input_pipeline.py","file_ext":"py","file_size_in_byte":12991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"295219689","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 8 20:24:44 2019\n\n@author: Aaron\n\"\"\"\n\nfrom time import sleep\nimport datetime,turtle\n\ndef writenum(n):\n t.penup()\n t.forward(200)\n t.write(n)\n t.backward(200)\n t.pendown()\n\nt=turtle.Turtle()\nt.speed(0)\nt.seth(90)\nfor i in range(12):\n t.right(30)\n writenum(i+1)\n\nupdate = True\nupdateSecond = True\nwhile True:\n now = datetime.datetime.now()\n h = now.hour%12\n m = now.minute\n s = now.second\n if update:\n hour=turtle.Turtle()\n hour.color(1, 0, 0)\n hour.seth(90)\n hour.right(h*30+m*0.5)\n hour.forward(100)\n min=turtle.Turtle()\n min.color(0, 0, 1)\n min.seth(90)\n min.right(m*6)\n min.forward(150)\n update=False\n if updateSecond:\n sec=turtle.Turtle()\n sec.seth(90)\n sec.right(s*6)\n sec.forward(200)\n updateSecond=False\n\n sleep(1)\n\n now = datetime.datetime.now()\n mNew = now.minute\n sNew = now.second\n if mNew != m:\n update=True\n hour.clear()\n hour.reset()\n min.clear()\n min.reset()\n if sNew != s:\n updateSecond=True\n sec.clear()\n sec.reset()\n\nturtle.done()\nturtle.exitonclick()","sub_path":"201908 時鐘.py","file_name":"201908 時鐘.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652562943","text":"import pyaudio\r\nfrom aip import AipSpeech\r\nfrom xpinyin import Pinyin\r\nimport cv2\r\nimport time as t\r\n\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\r\n\r\npd = cv2.imread('C1.jpg')\r\npd1 = cv2.imread('C2.jpg')\r\npd2 = cv2.imread('C3.jpg')\r\n\r\ndef RecodeSound():\r\n import wave\r\n CHUNK = 1024\r\n FORMAT = pyaudio.paInt16\r\n CHANNELS = 2\r\n RATE = 8000\r\n RECORD_SECONDS = 3\r\n WAVE_OUTPUT_FILENAME = \"audio.wav\"\r\n\r\n APP_ID='19165306'\r\n API_KEY='F0NWZzLVAnModNc6OG820Gu7'\r\n SECRET_KEY='M8enxlGmxLqSeFxpV9XHgwI50sHk6486'\r\n client =AipSpeech(APP_ID,API_KEY,SECRET_KEY)\r\n\r\n p = pyaudio.PyAudio()\r\n stream = p.open(format=FORMAT,\r\n channels=CHANNELS,\r\n rate=RATE,\r\n input=True,\r\n frames_per_buffer=CHUNK)\r\n\r\n stream.start_stream()\r\n print(\"* 开始录音......\")\r\n frames = []\r\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\r\n data = stream.read(CHUNK)\r\n frames.append(data)\r\n stream.stop_stream()\r\n #录音结束\r\n\r\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\r\n wf.setnchannels(CHANNELS)\r\n wf.setsampwidth(p.get_sample_size(FORMAT))\r\n wf.setframerate(RATE)\r\n wf.writeframes(b''.join(frames))\r\n wf.close()\r\n\r\n with open('audio.wav', 'rb') as fp:\r\n wave=fp.read()\r\n\r\n print(\"*正在识别......\",len(wave))\r\n result=client.asr(wave,'wav',16000,{'dev_pid':1537})\r\n #print(result['result'])\r\n if '。' in result['result'][0]:\r\n result['result'][0]=result['result'][0].replace('。','')\r\n \r\n if result[\"err_no\"]==0:\r\n print(result['result'][0])\r\n return result['result'][0]\r\n else:\r\n print(\"没有识别到语音\\n\",result[\"err_no\"])\r\n return 'Err'\r\n\r\ndef click(event,x,y,flags,param):\r\n p = Pinyin()\r\n #print('mouse coords:',x,y)\r\n txt=RecodeSound()\r\n ptxt = p.get_pinyin(txt)\r\n if p.get_pinyin('黑色') in ptxt:\r\n img=pd.copy()\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\r\n for (x, y, w, h) in faces:\r\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n cv2.imshow('result',img)\r\n\r\n elif p.get_pinyin('白色') in ptxt:\r\n img = pd1.copy()\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\r\n for (x, y, w, h) in faces:\r\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n cv2.imshow('result', img)\r\n\r\n elif p.get_pinyin('黄色') in ptxt:\r\n img = pd2.copy()\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\r\n for (x, y, w, h) in faces:\r\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n cv2.imshow('result', img)\r\n \r\ncv2.namedWindow('result')\r\ncv2.setMouseCallback('result',click)\r\nwhile True:\r\n if cv2.waitKey(10)&0xFF==27:\r\n break\r\ncv2.destroyAllWindows()\r\n","sub_path":"students/Zulin Li/2020.06.01HomeWork/Light.py","file_name":"Light.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"72183541","text":"import json\nimport MySQLdb\nimport requests\nimport unicodedata\nimport datetime\n\n\nfrom .models import Category, Product, Store\n\n\ndef fillelement(link):\n resp = requests.get(link)\n for i in range(0, 19):\n element = resp.json()['products'][i]\n test = Db_product()\n test.jsonread(element)\n if test.name != '':\n test.add_aliment()\n\n\n\nclass Db_product:\n\n def __init__(self):\n self.name = ''\n self.picture = ''\n self.link = ''\n self.nutri_score = 6\n self.created_at = ''\n self.categories = []\n self.stores = []\n\n\n\n def jsonread(self, json_file):\n if 'product_name_fr' in json_file:\n self.name = json_file['product_name_fr'][:199]\n\n if 'code' in json_file:\n self.link = 'https://fr.openfoodfacts.org/produit/' + \\\n json_file['code']\n if 'nutrition_grade_fr' in json_file:\n if json_file['nutrition_grade_fr'] == 'a':\n self.nutri_score = 1\n elif json_file['nutrition_grade_fr'] == 'b':\n self.nutri_score = 2\n elif json_file['nutrition_grade_fr'] == 'c':\n self.nutri_score = 3\n elif json_file['nutrition_grade_fr'] == 'd':\n self.nutri_score = 4\n elif json_file['nutrition_grade_fr'] == 'e':\n self.nutri_score = 5\n if 'entry_dates_tags' in json_file:\n self.created_at = json_file['entry_dates_tags'][0]\n if 'stores' in json_file:\n self.stores = json_file['stores'].split(',')\n self.stores = list(map(str.lstrip, self.stores))\n tmp = []\n for store in self.stores:\n tmp.append(store[:39])\n self.stores = tmp\n if 'categories' in json_file:\n # Separate the categories into a list and nromalise the name\n self.categories = json_file['categories'].split(',')\n self.categories = list(map(str.lstrip, self.categories))\n tmp = []\n for cat in self.categories:\n tmp.append(cat[:39])\n self.categories = tmp\n if 'image_url' in json_file:\n self.picture = json_file['image_url']\n\n def add_aliment(self):\n if len(self.categories) != 0:\n if len(Product.objects.filter(name=self.name)) == 0:\n\n query = Product(name=self.name, picture=self.picture,\n link=self.link, nutri_score=self.nutri_score,\n created_at=self.created_at)\n query.save()\n\n self.add_categories(query)\n if len(self.stores) != 0 :\n self.add_stores(query)\n\n\n def add_categories(self, product):\n for cat in self.categories:\n if len(Category.objects.filter(name = cat)) != 0 :\n c = Category.objects.get(name = cat)\n c.products.add(product.pk)\n\n else:\n query = Category(name = cat)\n query.save()\n c = Category.objects.get(name=cat)\n c.products.add(product.pk)\n\n def add_stores(self, product):\n for store in self.stores:\n if len(Store.objects.filter(name = store)) != 0 :\n c = Store.objects.get(name = store)\n c.products.add(product.pk)\n\n else:\n query = Store(name = store)\n query.save()\n c = Store.objects.get(name=store)\n c.products.add(product.pk)\n\ndef right_ns(product):\n score = product.nutri_score\n return ('catalog/img/icons/Score/nutriscore-'+score+'.svg')\n\n\n\n","sub_path":"catalog/apiload.py","file_name":"apiload.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"113857699","text":"from sanic.app import Sanic\nfrom sanic.response import html, text\n\n\napp = Sanic()\n\n@app.route('/', methods=['GET'])\nasync def index(request):\n return html(\"

    This is index

    \")\n\n\n@app.route('/router', methods=['GET', 'POST'])\nasync def new_router(reqeust):\n return html(\"

    New router

    \")\n\n\n@app.route('/my/', methods=['GET'])\nasync def name(request, name):\n return text('My name is {}'.format(name))\n\nif __name__ == '__main__':\n app.run()","sub_path":"example/router_example.py","file_name":"router_example.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"90492022","text":"# This tutorial is based on PyTorch's tutorials: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html and https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html.\n# This file contains the code snippets from them:\n# - imshow()\n# - accuracy()\n# - accuracy_batch()\n# - accuracy_of_classes()\n# - images_to_probs()\n# - plot_classes_preds()\n#\n# The license of the original tutorial is the 3-Clause BSD License.\n# See LICENSE for detail.\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom IPython.display import HTML, display\nimport tabulate\nimport torch\n\n# functions to show an image\n\n\ndef imshow(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n \ndef accuracy(loader, model, device=None):\n total = 0\n correct = 0\n with torch.no_grad():\n for data in loader:\n if device is None:\n images, labels = data\n else:\n images, labels = data[0].to(device), data[1].to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n return 100.0 * correct / total\n\n\ndef accuracy_batch(outputs, labels):\n total = 0\n correct = 0\n total = labels.size(0)\n _, predicted = torch.max(outputs.data, 1)\n correct = (predicted == labels).sum().item()\n return 100.0 * correct / total\n\n\ndef accuracy_of_classes(num_classes, loader, model, device=None):\n class_correct = list(0. for i in range(num_classes))\n class_total = list(0. for i in range(num_classes))\n with torch.no_grad():\n for data in loader:\n if device is None:\n images, labels = data\n else:\n images, labels = data[0].to(device), data[1].to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs, 1)\n c = (predicted == labels).squeeze()\n for i in range(len(labels)):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n return [100.0 * correct / total for correct, total in zip(class_correct, class_total)], (100.0 * sum(class_correct) / sum(class_total))\n\n\n\ndef images_to_probs(net, images, output):\n '''\n Generates predictions and corresponding probabilities from a trained\n network and a list of images\n '''\n# output = net(images.to(device))\n # convert output probabilities to predicted class\n _, preds_tensor = torch.max(output, 1)\n preds = np.squeeze(preds_tensor.cpu().numpy())\n return preds, [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]\n\n\ndef plot_classes_preds(net, images, outputs, labels):\n '''\n Generates matplotlib Figure using a trained network, along with images\n and labels from a batch, that shows the network's top prediction along\n with its probability, alongside the actual label, coloring this\n information based on whether the prediction was correct or not.\n Uses the \"images_to_probs\" function.\n '''\n preds, probs = images_to_probs(net, images, outputs)\n # plot the images in the batch, along with predicted and true labels\n fig = plt.figure(figsize=(12, 48))\n for idx in np.arange(4):\n ax = fig.add_subplot(1, 4, idx+1, xticks=[], yticks=[])\n# matplotlib_imshow(images[idx], one_channel=True)\n ax.set_title(\"{0}, {1:.1f}%\\n(label: {2})\".format(\n classes[preds[idx]],\n probs[idx] * 100.0,\n classes[labels[idx]]),\n color=(\"green\" if preds[idx]==labels[idx].item() else \"red\"))\n return fig\n\n\ndef show_table(data):\n # https://stackoverflow.com/questions/35160256/how-do-i-output-lists-as-a-table-in-jupyter-notebook\n display(HTML(tabulate.tabulate(data, tablefmt='html')))\n\n \ndef add_param(writer, net, step):\n for name, value in zip(net.state_dict(),net.parameters()):\n writer.add_histogram(name, value, step)\n\n\n \nclass IntermediateOutputWriter(object):\n def __init__(self, writer, net, step):\n super(IntermediateOutputWriter, self).__init__()\n self.writer = writer\n self.net = net\n self.hooks = []\n self.step = step\n \n def __enter__(self):\n class _f:\n def __init__(self, writer, name, step):\n self.wrote = False\n self.name = name\n self.step = step\n self.writer = writer\n def __call__(self, m, i, o):\n if not self.wrote:\n self.writer.add_histogram(self.name, o, self.step)\n self.wrote = True\n for name, module in self.net.named_modules():\n self.hooks.append(module.register_forward_hook(\n _f(self.writer, \"%s.output\" % name, self.step)\n ))\n return self\n\n def __exit__(self, ex_type, ex_value, trace):\n while len(self.hooks) > 0:\n self.hooks.pop().remove()\n return False\n ","sub_path":"Exercise03/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"588470719","text":"\"\"\"\nsentry.plugins.sentry_ipaddresses\n\n:copyright: (c) 2012 by OpenApp.\n:license: BSD.\n\"\"\"\nfrom django.utils.translation import ugettext_lazy as _\n\nimport sentry\nfrom sentry.plugins.bases.tag import TagPlugin\nfrom sentry.app import env\n\nclass IpAddressPlugin(TagPlugin):\n version = \"0.1\"\n description = \"To store/filter the IP addresses which invoked the event(for JS)\"\n author = \"Nik Skripko@OpenApp\"\n author_url = \"http://openapp.ie/\"\n resource_links = [\n ('Source', 'https://github.com/Nisk/sentry-plugin-ipaddresses'),\n ('Bug Tracker', 'https://github.com/Nisk/sentry-plugin-ipaddresses/issues')\n ]\n\n tag = 'ipaddress'\n tag_label = _('IP Address')\n slug = 'ipaddresses'\n title = _('IP Addresses')\n\n def get_tag_values(self, event):\n header = 'HTTP_X_FORWARDED_FOR'\n request = env.request\n x_forwd = request.META.get(header)\n if x_forwd:\n return [x_forwd]\n return []\n","sub_path":"sentry_ipaddresses/sentry_ipaddresses/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"535588418","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass PoseLSTM(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_layers=1, dropout=0.5):\n super(PoseLSTM, self).__init__()\n\n self.lstm = nn.LSTM(input_size=input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n batch_first=True,\n dropout=dropout)\n\n self.fc = nn.Linear(hidden_size, 6)\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.dropout = dropout\n self.init_weights()\n\n def init_weights(self):\n self.fc.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.zero_()\n\n def forward(self, input):\n # Set initial states\n states = self.init_states(input.size(0))\n\n output, hidden = self.lstm(input, states)\n\n # Only works with batch size 1\n # Using batchmode of fc layer to transform entire sequence\n pose_sequence = self.fc(output.squeeze()).unsqueeze(0)\n\n return pose_sequence, hidden\n\n def init_states(self, batch_size):\n h0 = Variable(torch.zeros(self.num_layers, batch_size, self.hidden_size))\n c0 = Variable(torch.zeros(self.num_layers, batch_size, self.hidden_size))\n if next(self.parameters()).is_cuda:\n h0 = h0.cuda()\n c0 = c0.cuda()\n\n return h0, c0\n\n def get_parameters(self):\n return list(self.lstm.parameters()) + list(self.fc.parameters())","sub_path":"binaryposeconvlstm/poseLSTM.py","file_name":"poseLSTM.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"631296019","text":"import numpy as np\n\n\ndef read_rare(fname):\n f = open(fname, \"r\")\n f_arr = f.read().split('\\n')\n f_size = int(f_arr[0])\n f_arr.pop(0)\n while '' in f_arr:\n f_arr.remove('')\n f.close()\n return f_arr, f_size\n\n\nA_arr, A_size = read_rare(\"a.txt\")\nB_arr, B_size = read_rare(\"b.txt\")\nsum_test, sum_test_size = read_rare('aplusb.txt')\nprod_test, prod_test_size = read_rare('aorib.txt')\n\n\ndef empty_rare_matrix(n):\n result = []\n for _ in range(n):\n row = []\n result.append(row)\n return result\n\n\ndef rare_matrix(data, n):\n result = []\n for i in range(n):\n result.append([])\n for i in data:\n params = list(map(lambda x: float(x), i.split(',')))\n found = False\n for j in result[int(params[1])]:\n if j[1] == int(params[2]):\n found = True\n j[0] += params[0]\n if not found:\n result[int(params[1])].append([params[0], int(params[2])])\n return result\n\n\ndef transpose(m, n):\n result = empty_rare_matrix(n)\n for i in range(n):\n for elem in m[i]:\n result[elem[1]].append([elem[0], i])\n return result\n\n\ndef rare_sum(m1, m2, n):\n result = m1.copy()\n for i in range(n):\n for j in range(len(m2[i])):\n found = False\n for k in range(len(result[i])):\n if result[i][k][1] == m2[i][j][1]:\n result[i][k][0] += m2[i][j][0]\n found = True\n if not found:\n result[i].append(m2[i][j])\n return result\n\n\ndef rare_product(m1, m2, n):\n result = empty_rare_matrix(n)\n m2_t = transpose(m2, n)\n for r_i in range(n):\n for r_j in range(n):\n s = 0\n for j in m1[r_i]:\n for k in m2_t[r_j]:\n if j[1] == k[1]:\n s += j[0] * k[0]\n break\n if s != 0:\n result[r_i].append([s, r_j])\n return result\n\n\ndef sums(m, n):\n for i in range(n):\n new_el = []\n for el in m[i]:\n found = False\n for k in new_el:\n if k[1] == el[1]:\n found = True\n k[0] += el[0]\n break\n if not found:\n new_el.append(el)\n m[i] = new_el\n\n\n'''\ntest_arr, test_size = read_rare(\"small_test.txt\")\ntest = rare_matrix(test_arr, test_size)\ntest2_arr, test2_size = read_rare(\"small_test2.txt\")\ntest2 = rare_matrix(test2_arr, test2_size)\nprint(test)\nprint(rare_product(test, test2, test2_size))\n'''\n\nA = rare_matrix(A_arr, A_size)\nB = rare_matrix(B_arr, B_size)\nsums(A, A_size)\nsums(B, B_size)\n\nA_plus_B = rare_sum(A, B, A_size)\nA_plus_B_verify = rare_matrix(sum_test, sum_test_size)\nprint(\"A + B:\")\nprint(A_plus_B)\nprint(\"A + B verificare:\")\nprint(A_plus_B_verify)\n\nA = rare_matrix(A_arr, A_size)\nB = rare_matrix(B_arr, B_size)\nsums(A, A_size)\nsums(B, B_size)\n\nA_ori_B = rare_product(A, B, A_size)\nA_ori_B_verify = rare_matrix(prod_test, prod_test_size)\nprint(\"A * B:\")\nprint(sorted(A_ori_B[0], key=lambda x: x[1]))\nprint(\"A * B verificare:\")\nprint(sorted(A_ori_B_verify[0], key=lambda x: x[1]))\n","sub_path":"T3/T3.py","file_name":"T3.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"69595043","text":"import requests\nimport os\nimport argparse\nfrom github import Github\n\n## organization name and token on env\norganization = 'fuchicorp'\n\n\n\ntoken = os.environ.get(\"GIT_ADMIN_TOKEN\")\n\n## Reading all members from file \nwith open('github-management/manage-users/users-to-add.txt') as file:\n users = file.read().splitlines()\n\n## Getting github user and organization \ng = Github(token, base_url='https://api.github.com')\norg = g.get_organization(organization)\n\n\n## Getting all teams \nteams = org.get_teams()\n\n## Function takes list and gettes users and returns as list \ndef get_users(users):\n \n ## Empty list which will be returned \n result = []\n for user in users:\n try:\n ## Trying to get user and append to result\n result.append(g.get_user(user))\n except:\n print(f\"User not found <{user}>\")\n return result\n\n## Users class using script to be able to get github users \nuser_clases = get_users(users)\n\n## Lopping to each teams \nfor team in teams:\n\n ## If team is part of members\n if team.name.lower() == \"members\":\n\n ## looping to users class to be able to onboard to memebers team \n for user in user_clases:\n try:\n ## Trying to invite user to organization \n org.invite_user(user=user, teams=[team])\n print(f\"User <{user.login}> invited to <{team.name}>\")\n except:\n print(f\"User <{user.login}> is already part of <{team.name}>\")","sub_path":"github-management/manage-users/add-user-org.py","file_name":"add-user-org.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"485392526","text":"from greensim.random import normal, constant\n\nfrom itsim.network.link import Link\nfrom itsim.network.internet import Internet\nfrom itsim.network.service import DHCP, NAT, PortForwarding\nfrom itsim.network.service.firewall import Firewall, Allow, Deny, Protocol\nfrom itsim.machine.endpoint import Endpoint\nfrom itsim.machine.router import Router\nfrom itsim.simulator import Simulator\nfrom itsim.types import as_address\nfrom itsim.units import MS, GbPS\n\n\nsim = Simulator()\ninternet = Internet()\n\nPORTS_DNS = [53]\nPORTS_WWW = [80, 443]\nPORTS_IT = [22] + list(range(135, 140)) + [445]\n\nFARM = \"10.1.128.0/18\"\nCORP = \"10.1.64.0/18\"\nDC = \"10.1.192.0/18\"\nLOBBY = \"10.1.0.0/18\"\n\nADDRESS_LOAD_BALANCER = \"10.1.128.10\"\n\nlobby = Link(LOBBY, normal(5.0 * MS, 1.5 * MS), constant(1 * GbPS))\nrouter_main = Router(\n internet.connected_as(\"24.192.132.23\").setup(\n NAT(), # Router will operate network address translation when forwarding on WAN.\n PortForwarding({port: (ADDRESS_LOAD_BALANCER, port) for port in PORTS_WWW})\n ),\n lobby.connected_as(1).setup(\n Firewall(\n inbound=[\n Allow(internet.cidr, Protocol.TCP, PORTS_WWW),\n Allow(internet.cidr, Protocol.BOTH, PORTS_DNS)\n ]\n # Default outbound rules: let everything through.\n )\n )\n)\n\nfarm = Link(FARM, normal(5.0 * MS, 1.5 * MS), constant(1 * GbPS))\nrouter_farm = Router(\n lobby.connected_as(2).setup(),\n farm.connected_as(1).setup(\n DHCP(),\n Firewall(\n inbound=[\n Allow(internet.cidr, Protocol.TCP, PORTS_WWW),\n Allow(internet.cidr, Protocol.BOTH, PORTS_DNS),\n Allow(CORP, Protocol.TCP, PORTS_IT)\n ],\n outbound=[\n Allow(internet.cidr, Protocol.TCP, PORTS_WWW),\n Allow(internet.cidr, Protocol.BOTH, PORTS_DNS),\n Deny.all()\n ]\n )\n )\n)\n\ncorp = Link(CORP, normal(5.0 * MS, 1.5 * MS), constant(1 * GbPS))\nrouter_corp = Router(\n lobby.connected_as(3).setup(),\n corp.connected_as(1).setup(DHCP(), Firewall())\n)\n\ndc = Link(DC, normal(5.0 * MS, 1.5 * MS), constant(1 * GbPS))\nrouter_dc = Router(\n lobby.connected_as(4).setup(),\n dc.connected_as(1).setup(\n DHCP(),\n Firewall(\n inbound=[\n Allow(internet.cidr, Protocol.BOTH, PORTS_DNS),\n Allow(CORP, Protocol.TCP, PORTS_IT)\n ],\n outbound=[Deny.all()]\n )\n )\n)\n\nassert {router_main, router_farm, router_corp, router_dc} == set(lobby.iter_nodes())\n\nNUM_ENDPOINTS_PER_SUBNET = 30\nendpoints = [Endpoint().connected_to(net) for _ in range(NUM_ENDPOINTS_PER_SUBNET) for net in [farm, corp, dc]]\nassert all(ept.address_default == as_address(0) for ept in endpoints)\n\nsim.run()\n\nNUM_ADDRESSES_INTERNET = 1\nNUM_ADDRESSES_LOBBY = 4\nall_addresses = set()\nfor net in [lobby, farm, corp, dc]:\n for node in net.iter_nodes():\n for addr in node.iter_addresses():\n all_addresses.add(addr)\nassert len(all_addresses) == NUM_ADDRESSES_INTERNET + NUM_ADDRESSES_LOBBY + 3 * NUM_ENDPOINTS_PER_SUBNET\n","sub_path":"examples/design/network-segmented-multirouter.py","file_name":"network-segmented-multirouter.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"470839331","text":"import pickle\nimport csv\nimport pandas as pd\nfrom collections import Counter, defaultdict\nfrom pathlib import Path\nfrom tqdm import tqdm\n\ndef is_similar_len(l1, l2, ratio=0.1):\n return abs(l1-l2) < max(l1,l2)*ratio\n\ndef connected_components(neighbors):\n seen = set()\n def component(node):\n nodes = set([node])\n while nodes:\n node = nodes.pop()\n seen.add(node)\n nodes |= neighbors[node] - seen\n yield node\n for node in neighbors:\n if node not in seen:\n yield component(node)\n\n\nanthologies = set(pd.read_csv('anthologies.csv')['hathi_id'])\nbook_path = Path('/home/allekim/stonybook-data/hathi/ocr_detection')\nparsed_ids = [tuple(str(p).split('/')[-2:]) for p in book_path.glob('*/*')]\n\nadj= defaultdict(set)\nfor lab, i in tqdm(parsed_ids):\n overlap_path = book_path / lab / i / 'overlap_scores.txt'\n hid = \"{}.{}\".format(lab, i)\n if hid in anthologies:\n continue\n with open(overlap_path, 'r') as f:\n for line in f:\n stats = line.strip().split()\n other_id, l1, l2, overlap = stats\n if other_id in anthologies:\n continue\n l1 = int(l1)\n l2 = int(l2)\n overlap = int(overlap)\n if l1 > 0 and l2 > 0 and overlap / min(l1,l2) > 0.25:\n adj[(l1, hid)].add((l2, other_id))\n adj[(l2, other_id)].add((l1, hid))\n\n\nwith open('dedup_hathi.csv', 'w') as csvfile:\n headers = ['hathi_id', 'hathi_id_set']\n writer = csv.DictWriter(csvfile, headers)\n writer.writeheader()\n for component in connected_components(adj):\n c = sorted(component, reverse=True)\n row = {\n 'hathi_id': c[0][1],\n 'hathi_id_set': [x[1] for x in c]\n }\n writer.writerow(row)\n\n","sub_path":"deduplication/cluster_hathi_books.py","file_name":"cluster_hathi_books.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87343068","text":"import asyncio\nimport urllib.parse\nimport sys\nfrom aiohttp import ClientSession\nfrom lxml import html\nfrom fake_useragent import UserAgent\nimport openpyxl\nfrom openpyxl.styles import fonts, alignment, Side, Border\nfrom openpyxl.styles.colors import COLOR_INDEX\nfrom openpyxl.comments import comments\nfrom tqdm import tqdm\nfrom attr import attrs\n\n\n@attrs(slots=True, auto_attribs=True)\nclass Event:\n name: str = ''\n date: str = ''\n time: str = ''\n place: str = ''\n url: str = ''\n buy: str = ''\n detail: str = ''\n\n\naddr = 'http://koncertsamara.ru/afisha/'\n\n\nasync def get_source(session, number, url, headers, bar):\n async with session.get(url, headers=headers) as response:\n if bar is not None:\n bar.update(1)\n return number, await response.read()\n\n\nasync def get_last_page(session, pages, headers)-> int:\n while True:\n url = f\"{addr}?a-page={pages}\"\n source = (await asyncio.gather(asyncio.ensure_future(get_source(session, 0, url, headers, None))))[0]\n http = html.fromstring(source[1])\n last_page = http.xpath('//div[@class=\"pagination\"]/ul/li/a/text()')\n if last_page[-1] == 'Следующая':\n pages = int(last_page[-2]) - 1\n else:\n return int(last_page[-1])\n\n\ndef changequotes(mytext: str) -> str:\n if mytext and mytext[0] == '\"':\n mytext = '«' + mytext[1:]\n return mytext.replace(' \"', ' «').replace('\"', '»').strip()\n\n\ndef get_element(source: html, path: str)-> str:\n result = source.xpath(path)\n return result[0] if result else ''\n\n\nasync def main():\n ua = UserAgent()\n headers = {'User-Agent': ua.ie}\n result = []\n index = 0\n async with ClientSession() as session:\n pages = await get_last_page(session, 0, headers)\n bar = tqdm(total=pages, desc='Обработка страниц')\n tasks = [asyncio.ensure_future(\n get_source(session, page, f\"{addr}?a-page={page}\", headers, bar)) for page in range(pages)]\n responses = await asyncio.gather(*tasks)\n bar.close()\n responses.sort()\n tasks = []\n bar = tqdm(desc='Загрузка мероприятий')\n for response in responses:\n source = html.fromstring(response[1])\n for base_event in source.xpath('//ul[@class=\"list\"]/li'):\n event = Event(\n name=changequotes(get_element(base_event, 'div/div[2]/h3/text()')),\n date=get_element(base_event, 'div/div[1]/span[1]/text()'),\n time=get_element(base_event, 'div/div[1]/span[3]/text()'),\n place=changequotes(get_element(base_event, 'h4/a/text()')),\n url=get_element(base_event, 'div/div[4]/div/a[1]/@href'),\n buy=get_element(base_event, 'div/div[4]/div/a[2]/@href')\n )\n if not event.url or event.url == '/newslist/novinka-elektronnyj-bilet/':\n event.url = get_element(base_event, 'div/div[4]/div/a[2]/@href')\n event.buy = get_element(base_event, 'div/div[4]/div/a[3]/@href')\n event.url = urllib.parse.urljoin(addr, event.url)\n event.buy = urllib.parse.urljoin(addr, event.buy)\n tasks.append(asyncio.ensure_future(\n get_source(session, index, event.url, headers, bar)))\n index += 1\n result.append(event)\n bar.total = len(result)\n resp = await asyncio.gather(*tasks)\n bar.close()\n\n resp.sort()\n for i, item in enumerate(result):\n temp_resp = html.fromstring(resp[i][1])\n temp_detail = temp_resp.xpath('//*[@id=\"current-description\"]/p/text()')\n item.detail = max(temp_detail, key=len) if temp_detail else ''\n return result\n\n\ndef savetofile(afisha_, file='koncert.xlsx'):\n\n wb = openpyxl.Workbook()\n ws = wb.active\n ws.append(['Дата', 'Время', 'Событие (клик – подробно)', 'Место проведения (клик – бронирование)'])\n side = Side(style='thin', color=COLOR_INDEX[0])\n dside = Side(style='double', color=COLOR_INDEX[0])\n border = Border(left=side, right=side, top=side, bottom=side)\n hborder = Border(left=side, right=side, top=side, bottom=dside)\n for i in range(len(afisha_)):\n ws.append([afisha_[i].date, afisha_[i].time,\n '=HYPERLINK(\"%s\",\"%s\")' % (afisha_[i].url, afisha_[i].name),\n '=HYPERLINK(\"%s\",\"%s\")' % (afisha_[i].buy, afisha_[i].place)])\n if len(afisha_[i].detail) > 10:\n ws['C' + str(i + 2)].comment = comments.Comment(afisha_[i].detail, '')\n for r in ('A', 'B', 'C', 'D'):\n ws[r + str(i + 2)].border = border\n if r in ('A', 'B'):\n ws[r + str(i + 2)].alignment = alignment.Alignment(horizontal='center')\n for sym in ('A1', 'B1', 'C1', 'D1'):\n ws[sym].font = fonts.Font(size=12, bold=True)\n ws[sym].alignment = alignment.Alignment(horizontal='center')\n ws[sym].border = hborder\n ws.column_dimensions['A'].width = 18\n ws.column_dimensions['B'].width = 12\n ws.column_dimensions['C'].width = 60\n ws.column_dimensions['D'].width = 60\n wb.save(file)\n\n\nif __name__ == '__main__':\n afisha = asyncio.run(main())\n if len(sys.argv) > 1:\n if not sys.argv[1].endswith('.xlsx'):\n sys.argv[1] += '.xlsx'\n savetofile(afisha, sys.argv[1])\n else:\n print()\n print(*[f\"{event.date}{' – ' if event.time else ''}{event.time} : {event.name} ({event.place})\"\n for event in afisha],\n sep='\\n')\n","sub_path":"aioparsing.py","file_name":"aioparsing.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637269678","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom modules.router import router\nfrom modules.db import MongodbConnector\nfrom credentials import mongodb_address, database\nimport uvicorn\n\napp = FastAPI ()\n\napp.add_middleware (CORSMiddleware, allow_origins = ['*'], allow_credentials = True, allow_methods = ['*'], allow_headers = ['*'])\napp.include_router (router)\n\naddress = mongodb_address\ndatabase = database\nconnector = MongodbConnector ().connect (address, database)\n\n@app.get ('/')\nasync def root ():\n return {\n 'year': 2021,\n 'location': 'IBTI'\n }\n\nif __name__ == '__main__':\n uvicorn.run ('server:app', host = '0.0.0.0', port = 8000, reload = True)","sub_path":"API/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"217724843","text":"import matplotlib.pyplot as plt\nimport networkx as nx\n\nfrom ESN import *\n\n\ndef nx_show(G):\n nx.draw(G, pos=nx.spring_layout(G), with_labels=True)\n plt.show()\n\n\nif __name__ == '__main__':\n # Fetch an adjacency matrix.\n esn = ESN(hidden_nodes=20, w_res_density=0.1)\n A = esn.w_res.numpy()\n\n # Create a network graph from the ESN network.\n G = nx.from_numpy_matrix(A)\n nx.draw(G, pos=nx.spring_layout(G), with_labels=True)\n plt.show()\n","sub_path":"py/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"115753571","text":"import pyaudio\nimport wave\nimport time\nRESPEAKER_RATE = 16000\nRESPEAKER_CHANNELS = 6 # change base on firmwares, 1_channel_firmware.bin as 1 or 6_channels_firmware.bin as 6\nRESPEAKER_WIDTH = 2\n# run getDeviceInfo.py to get index\nRESPEAKER_INDEX = 5 # refer to input device id\nCHUNK = 1024\nRECORD_SECONDS = 5\nWAVE_OUTPUT_FILENAME = \"outputme.wav\"\n\np = pyaudio.PyAudio()\n# print(p.get_device_info_by_index(9))\nstream = p.open(\n rate=RESPEAKER_RATE,\n format=p.get_format_from_width(RESPEAKER_WIDTH),\n channels=RESPEAKER_CHANNELS,\n input=True,\n input_device_index=RESPEAKER_INDEX,)\n\nprint(\"* recording\")\ntime.sleep(5)\nprint(\"started\")\nframes = []\n\nfor i in range(0, int(RESPEAKER_RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK) #,exception_on_overflow = False\n frames.append(data)\n\nprint(\"* done recording\")\n\nstream.stop_stream()\nstream.close()\np.terminate()\n\nwf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\nwf.setnchannels(RESPEAKER_CHANNELS)\nwf.setsampwidth(p.get_sample_size(p.get_format_from_width(RESPEAKER_WIDTH)))\nwf.setframerate(RESPEAKER_RATE)\nwf.writeframes(b''.join(frames))\nwf.close()\n\n","sub_path":"pocketsphinx/misc/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"266092044","text":"\"\"\"bootstrap implementation of cwproperty view\n\n:organization: Logilab\n:copyright: 2013 LOGILAB S.A. (Paris, FRANCE), license is LGPL.\n:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr\n\"\"\"\n\n__docformat__ = \"restructuredtext en\"\n\nfrom logilab.common.decorators import monkeypatch\nfrom cubicweb.web.views.cwproperties import SystemCWPropertiesForm, \\\n make_togglable_link\n\n@monkeypatch(SystemCWPropertiesForm)\ndef wrap_main_form(self, group, label, form):\n label += u' '\n status = self._group_status(group)\n cssclass = 'panel-body %s' % status if status else 'panel-body'\n self.w(u'
    '\n u'
    %s
    \\n' %\n (make_togglable_link('fieldset_' + group, label)))\n self.w(u'
    ' % (cssclass, group))\n self.w(form)\n self.w(u'
    ')\n self.w(u'
    ')\n\n@monkeypatch(SystemCWPropertiesForm)\ndef wrap_grouped_form(self, group, label, objects):\n label += u' '\n status = self._group_status(group)\n cssclass = 'panel-body %s' % status if status else 'panel-body'\n self.w(u'
    '\n u'
    %s
    \\n' %\n (make_togglable_link('fieldset_' + group, label)))\n self.w(u'
    ' % (cssclass, group))\n\n sorted_objects = sorted((self._cw.__('%s_%s' % (group, o)), o, f)\n for o, f in objects.iteritems())\n for label, oid, form in sorted_objects:\n self.wrap_object_form(group, oid, label, form)\n self.w(u'
    ')\n self.w(u'
    ')\n","sub_path":"bootstrap/views_original/cwproperties.py","file_name":"cwproperties.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"112971923","text":"m=10**9+7\ndef power(n,a):\n res=1\n while a!=0:\n if a&1:\n res=(res*n)%m\n a=a>>1\n n=(n*n)%m\n return res\nt=int(input())\nn=int(input())\nprint(t*power(2,n))\n","sub_path":"power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"160210469","text":"def solution(answers):\n pattern1 = [1, 2, 3, 4, 5]\n pattern2 = [2, 1, 2, 3, 2, 4, 2, 5]\n pattern3 = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]\n score_dict = dict()\n for i in range(1, 4):\n score_dict[i] = 0\n for i, ans in enumerate(answers):\n if pattern1[i % len(pattern1)] == ans:\n score_dict[1] += 1\n if pattern2[i % len(pattern2)] == ans:\n score_dict[2] += 1\n if pattern3[i % len(pattern3)] == ans:\n score_dict[3] += 1\n max_value = max(score_dict.values())\n\n return sorted([key for key, value in score_dict.items() if value == max_value])\n\n\nprint(solution([1, 2, 3, 4, 5]))","sub_path":"programmers/lessons_42840/mock_exam.py","file_name":"mock_exam.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"5038404","text":"import json\nimport os\nimport requests\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import make_response\n\n# Flask app should start in global layout\napp = Flask(__name__)\n\n@app.route('/stock', methods=['POST'])\ndef stock():\n req = request.get_json(silent=True, force=True)\n print(json.dumps(req, indent=4))\n \n res = makeResponse(req)\n \n res = json.dumps(res, indent=4)\n print(res)\n r = make_response(res)\n r.headers['Content-Type'] = 'application/json'\n return r\n\t\ndef makeResponse(req):\n\t\n\tresult = req.get(\"result\")\n\taction = result.get(\"action\")\n\t\n\tif action == 'stock.news':\n\t\treturn getNewsDetails(req)\n\telif action == 'stock.research':\n\t\treturn getResearch(req)\n\tif action in 'action.Login':\n\t\treturn getLoginService(req)\n\tif action in 'input.welcome':\n\t\treturn getUsername(req)\n\t\t# Fall-through by not using elif, but now the default case includes case 'a'!\n\t#elif action in 'xyz':\n\t\t# Do yet another thing\n\telse:\n\t\treturn commonResponse(\"Sorry, I did't get you ?\")\n\t\n\t\ndef getNewsDetails(req):\n\n\t# Request to get current news\n\tr=requests.get('')\n \n\t# Output response of news API\n\tjson_object = r.json()\n\t\n\tdataSet=json_object['articles']\n\ttemp = \"\"\n\tspeech = \"Today top new is : \"\n\t\n\tfor data in dataSet:\n\t\ttitle = data[\"title\"]\n\t\tdesc=data['description']\n\t\tspeech = title + \" \" + desc\n\t\tbreak\n\t\t#temp = temp + speech\n\t\t\n\treturn commonResponse(speech)\n\t\ndef getUsername(req):\n\t\n\taccessToken = \"Bearer \" + req['originalRequest']['data']['user']['accessToken']\n\t\n\tprint(accessToken)\n\t\n\t# Add your url\n\turl = \"\"\n\n\theaders = {\n\t\t'authorization': accessToken,\n\t\t'cache-control': \"no-cache\",\n\t\t'postman-token': \"83689df6-f3e3-30bb-a2de-ed30d1211721\"\n\t\t}\n\n\tresponse = requests.request(\"GET\", url, headers=headers)\n\tprint(response.text)\n\tobj = response.json()\n\tmsg = \"Hi \" + obj['name'] + \", Welcome to demo!\"\n\treturn commonResponse(msg)\n\t\n\t\ndef getLoginService(req):\n\n\tusername = req['result']['parameters']['username']\n\tpassword = req['result']['parameters']['password']\n\tid = 0\n\n\t# Add your url\n\turl = \"\"\n\n\tpayload = {\n\t\t\t\t\"emailId\": username,\n\t\t\t\t\"password\": password\n\t\t\t}\n\t\t\t\n\tpayload = json.dumps(payload, indent=4)\n\t\t\t\t\n\theaders = {\n\t\t'content-type': \"application/json\"\n\t\t}\n\n\tresponse = requests.request(\"POST\", url, data=payload, headers=headers)\n\n\tprint(response.text)\n\n\tobj = response.json()\n\t\n\tif obj['responseCode'] == 1:\n\t\tid = obj['id']\n\t\ttemp = 'Thank you user : ' + str(obj['id'])\n\telse:\n\t\ttemp = obj['responseMessage']\n\n\treturn commonSessionResponse(temp, id, 'test-context')\n\n\ndef getResearch(req):\n\t\tmsg=\"Today's fundamental research call. Buy Grasim Industries Limited. C M P rupees 1089. Target price Rupees 1300 with potential upside of 18 percent.\"\n\t\treturn commonResponse(msg)\n\t\t\ndef commonResponse(msg):\n\treturn {\n\t\t\t\"speech\":msg,\n\t\t\t\"displayText\":msg,\n\t\t\t\"source\": \"apiai-demostock-webhook\"\n\t}\n\t\ndef commonSessionResponse(msg, id, contextName):\n\treturn {\n\t\t\t\"speech\":msg,\n\t\t\t\"displayText\":msg,\n\t\t\t\"source\": \"apiai-demostock-webhook\",\n\t\t\t\"contextOut\":[\n\t\t\t\t{\n\t\t\t\t\t\"name\" : contextName,\n\t\t\t\t\t\"lifespan\": 99,\n\t\t\t\t\t\"parameters\" : {\n\t\t\t\t\t\t\"id\" : id\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t]\n\t}\n\t\nif __name__ == '__main__':\n port = int(os.getenv('PORT', 5000))\n print(\"Starting app on port %d\" % port)\n app.run(debug=False, port=port, host='0.0.0.0')\n \n \n \n \n ","sub_path":"webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"628250418","text":"import functools\nimport itertools\nimport operator\n\ndef same(iterable):\n iterable = iter(iterable)\n cmp = functools.partial(operator.eq, next(iterable))\n return all(map(cmp, iterable))\n\n\nif __name__ == \"__main__\":\n print(same(range(5)))\n print(same(\"abc\"))\n print(same([1, 1, 1]))\n print(same([None]))\n","sub_path":"same.py","file_name":"same.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"141575521","text":"\"\"\"\n@author: Mozilla\n Edited by Peter Kim\n\nreference:\n https://github.com/mozilla/DeepSpeech-examples/blob/r0.9/mic_vad_streaming/mic_vad_streaming.py\n\"\"\"\n\nfrom matrix_lite import gpio\nimport os\nimport time, logging\nimport threading\nimport collections\nimport queue\nimport deepspeech\nimport numpy as np\nimport chars2vec\n\nfrom ctypes import *\nfrom scipy import signal\nfrom trigger.trigger_thread import *\n\npin = 4\nmin_pulse_ms = 0.5\n\ndef deepSpeech_thread(frames, tods_deq, angle_deq, motor_ent):\n # Matrix Voice GPIO Setup\n gpio.setFunction(4, 'PWM')\n gpio.setMode(4, 'output')\n\n model = deepspeech.Model('deepSpeech/deepspeech-0.9.3-models.tflite')\n c2v_model = chars2vec.load_model('eng_50')\n trigger_model = get_updated_model('friend', c2v_model, 'trigger/data/others.txt')\n #model.enableExternalScorer('deepSpeech/deepspeech-0.9.3-models.scorer')\n angle = 0\n pre_angle = 0\n stream_context = model.createStream()\n for frame in frames:\n if tods_deq:\n print(\"New Trigger Detected\")\n new_trigger = tods_deq.popleft()\n new_trigger = new_trigger.replace(\" \", \"\").lower()\n trigger_model = get_updated_model(new_trigger, c2v_model, 'trigger/data/others.txt')\n print(\"Finish Train\")\n\n if frame is not None:\n logging.debug(\"streaming frame\")\n stream_context.feedAudioContent(np.frombuffer(frame, np.int16))\n angle = angle_deq[-1]\n else:\n logging.debug(\"end utterence\")\n text = stream_context.finishStream()\n print(\"Recognized: %s, Angle: %f\" %(text, angle))\n\n preds = []\n for t in text.split(' '):\n if \"\" == t:\n continue\n if \"'\" in t:\n continue\n #in_t = indexing(t)\n in_t = list([t])\n print(in_t)\n in_t = c2v_model.vectorize_words(in_t)\n print(2)\n pred = trigger_model.predict(in_t)\n print(3)\n preds.append(pred)\n\n if 1 in preds:\n print(\"Trigger!!\", pin)\n motor_ent.clear()\n turn_motor(pin, angle, pre_angle, min_pulse_ms, 2)\n motor_ent.set()\n pre_angle = angle\n\n stream_context = model.createStream()\n\ndef turn_motor(pin, angle, pre_angle, min_pulse_ms, step):\n step = step if angle > pre_angle else step * (-1)\n for i in range(pre_angle, angle, step):\n gpio.setServoAngle({\n \"pin\": pin,\n \"angle\": i,\n \"min_pulse_ms\": min_pulse_ms,\n })\n time.sleep(0.015)\n\n gpio.setServoAngle({\n \"pin\": pin,\n \"angle\": angle,\n \"min_pulse_ms\": min_pulse_ms,\n })\n","sub_path":"20210324/deepSpeech/ds_thread.py","file_name":"ds_thread.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"141536971","text":"#Author-tattaka\n#Description-\n\nimport adsk.core, adsk.fusion, adsk.cam, traceback\nimport math\n\n# Globals\n_app = adsk.core.Application.cast(None)\n_ui = adsk.core.UserInterface.cast(None)\n_units = ''\n# Command inputs\n_standard = adsk.core.DropDownCommandInput.cast(None)\n_width = adsk.core.ValueCommandInput.cast(None)\n_height = adsk.core.ValueCommandInput.cast(None)\n_depth = adsk.core.ValueCommandInput.cast(None)\n_errMessage = adsk.core.TextBoxCommandInput.cast(None)\n\n_field_width = adsk.core.ValueCommandInput.cast(None)\n_field_height = adsk.core.ValueCommandInput.cast(None)\n_object_number = adsk.core.ValueCommandInput.cast(None)\n_object_width_min = adsk.core.ValueCommandInput.cast(None)\n_object_width_max = adsk.core.ValueCommandInput.cast(None)\n_object_height_min = adsk.core.ValueCommandInput.cast(None)\n_object_height_max = adsk.core.ValueCommandInput.cast(None)\n\n_handlers = []\ndef run(context):\n try:\n global _app, _ui\n _app = adsk.core.Application.get()\n _ui = _app.userInterface\n\n cmdDef = _ui.commandDefinitions.itemById('adskCuboidPythonScript')\n if not cmdDef:\n # Create a command definition.\n cmdDef = _ui.commandDefinitions.addButtonDefinition('adskCuboidPythonScript', 'Cuboid', 'Creates a cuboid component', '')\n\n # Connect to the command created event.\n onCommandCreated = CuboidCommandCreatedHandler()\n cmdDef.commandCreated.add(onCommandCreated)\n _handlers.append(onCommandCreated)\n\n # Execute the command.\n cmdDef.execute()\n\n # prevent this module from being terminate when the script returns, because we are waiting for event handlers to fire\n adsk.autoTerminate(False)\n except:\n if _ui:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\nclass CuboidCommandDestroyHandler(adsk.core.CommandEventHandler):\n def __init__(self):\n super().__init__()\n def notify(self, args):\n try:\n eventArgs = adsk.core.CommandEventArgs.cast(args)\n\n # when the command is done, terminate the script\n # this will release all globals which will remove all event handlers\n adsk.terminate()\n except:\n if _ui:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n\n# Verfies that a value command input has a valid expression and returns the\n# value if it does. Otherwise it returns False. This works around a\n# problem where when you get the value from a ValueCommandInput it causes the\n# current expression to be evaluated and updates the display. Some new functionality\n# is being added in the future to the ValueCommandInput object that will make\n# this easier and should make this function obsolete.\ndef getCommandInputValue(commandInput, unitType):\n try:\n valCommandInput = adsk.core.ValueCommandInput.cast(commandInput)\n if not valCommandInput:\n return (False, 0)\n\n # Verify that the expression is valid.\n des = adsk.fusion.Design.cast(_app.activeProduct)\n unitsMgr = des.unitsManager\n\n if unitsMgr.isValidExpression(valCommandInput.expression, unitType):\n value = unitsMgr.evaluateExpression(valCommandInput.expression, unitType)\n return (True, value)\n else:\n return (False, 0)\n except:\n if _ui:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n# Event handler for the commandCreated event.\nclass CuboidCommandCreatedHandler(adsk.core.CommandCreatedEventHandler):\n def __init__(self):\n super().__init__()\n def notify(self, args):\n try:\n eventArgs = adsk.core.CommandCreatedEventArgs.cast(args)\n\n # Verify that a Fusion design is active.\n des = adsk.fusion.Design.cast(_app.activeProduct)\n if not des:\n _ui.messageBox('A Fusion design must be active when invoking this command.')\n return()\n\n defaultUnits = des.unitsManager.defaultLengthUnits\n\n # Determine whether to use inches or millimeters as the intial default.\n global _units\n if defaultUnits == 'in' or defaultUnits == 'ft':\n _units = 'in'\n else:\n _units = 'mm'\n\n # Define the default values and get the previous values from the attributes.\n if _units == 'in':\n standard = 'English'\n else:\n standard = 'Metric'\n standardAttrib = des.attributes.itemByName('Cuboid', 'standard')\n if standardAttrib:\n standard = standardAttrib.value\n\n if standard == 'English':\n _units = 'in'\n else:\n _units = 'mm'\n\n width = str(1)\n widthAttrib = des.attributes.itemByName('Cuboid', 'width')\n if widthAttrib:\n width = widthAttrib.value\n\n height = str(1)\n heightAttrib = des.attributes.itemByName('Cuboid', 'height')\n if heightAttrib:\n height = heightAttrib.value\n\n depth = str(1)\n depthAttrib = des.attributes.itemByName('Cuboid', 'depth')\n if depthAttrib:\n depth = depthAttrib.value\n\n cmd = eventArgs.command\n cmd.isExecutedWhenPreEmpted = False\n inputs = cmd.commandInputs\n\n global _standard, _width, _height, _depth, _errMessage\n\n _standard = inputs.addDropDownCommandInput('standard', 'Standard', adsk.core.DropDownStyles.TextListDropDownStyle)\n if standard == \"English\":\n _standard.listItems.add('English', True)\n _standard.listItems.add('Metric', False)\n else:\n _standard.listItems.add('English', False)\n _standard.listItems.add('Metric', True)\n\n _width = inputs.addValueInput('width', 'Cuboid Width', _units, adsk.core.ValueInput.createByReal(float(width)))\n\n _height = inputs.addValueInput('height', 'Cuboid Height', _units, adsk.core.ValueInput.createByReal(float(height)))\n\n _depth = inputs.addValueInput('depth', 'Cuboid Depth', _units, adsk.core.ValueInput.createByReal(float(depth)))\n\n _errMessage = inputs.addTextBoxCommandInput('errMessage', '', '', 2, True)\n _errMessage.isFullWidth = True\n\n # Connect to the command related events.\n onExecute = CuboidCommandExecuteHandler()\n cmd.execute.add(onExecute)\n _handlers.append(onExecute)\n\n onInputChanged = CuboidCommandInputChangedHandler()\n cmd.inputChanged.add(onInputChanged)\n _handlers.append(onInputChanged)\n\n onValidateInputs = CuboidCommandValidateInputsHandler()\n cmd.validateInputs.add(onValidateInputs)\n _handlers.append(onValidateInputs)\n\n onDestroy = CuboidCommandDestroyHandler()\n cmd.destroy.add(onDestroy)\n _handlers.append(onDestroy)\n except:\n if _ui:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n# Event handler for the execute event.\nclass CuboidCommandExecuteHandler(adsk.core.CommandEventHandler):\n def __init__(self):\n super().__init__()\n def notify(self, args):\n try:\n eventArgs = adsk.core.CommandEventArgs.cast(args)\n\n # Save the current values as attributes.\n des = adsk.fusion.Design.cast(_app.activeProduct)\n attribs = des.attributes\n attribs.add('Cuboid', 'standard', _standard.selectedItem.name)\n attribs.add('Cuboid', 'width', str(_width.value))\n attribs.add('Cuboid', 'height', str(_height.value))\n attribs.add('Cuboid', 'depth', str(_depth.value))\n\n width = _width.value\n height = _height.value\n depth = _depth.value\n\n # Create the cuboid.\n cuboidComp = drawCuboid(des, width, height, depth)\n\n except:\n if _ui:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n# Event handler for the inputChanged event.\nclass CuboidCommandInputChangedHandler(adsk.core.InputChangedEventHandler):\n def __init__(self):\n super().__init__()\n def notify(self, args):\n try:\n eventArgs = adsk.core.InputChangedEventArgs.cast(args)\n changedInput = eventArgs.input\n\n global _units\n if changedInput.id == 'standard':\n if _standard.selectedItem.name == 'English':\n\n _units = 'in'\n elif _standard.selectedItem.name == 'Metric':\n\n _units = 'mm'\n\n # Set each one to it's current value because otherwised if the user\n # has edited it, the value won't update in the dialog because\n # apparently it remembers the units when the value was edited.\n # Setting the value using the API resets this.\n _width.value = _width.value\n _width.unitType = _units\n _height.value = _height.value\n _height.unitType = _units\n _depth.value = _depth.value\n _depth.unitType = _units\n\n except:\n if _ui:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n# Event handler for the validateInputs event.\nclass CuboidCommandValidateInputsHandler(adsk.core.ValidateInputsEventHandler):\n def __init__(self):\n super().__init__()\n def notify(self, args):\n try:\n eventArgs = adsk.core.ValidateInputsEventArgs.cast(args)\n\n _errMessage.text = ''\n\n des = adsk.fusion.Design.cast(_app.activeProduct)\n\n result = getCommandInputValue(_width, _units)\n if result[0] == False:\n eventArgs.areInputsValid = False\n return\n else:\n width = result[1]\n\n if width <= 0:\n _errMessage.text = 'The width value should be greater than 0. It must be more than ' + des.unitsManager.formatInternalValue(0.1, _units, True)\n eventArgs.areInputsValid = False\n return\n\n result = getCommandInputValue(_height, _units)\n if result[0] == False:\n eventArgs.areInputsValid = False\n return\n else:\n height = result[1]\n\n if height <= 0:\n _errMessage.text = 'The height value should be greater than 0. It must be more than ' + des.unitsManager.formatInternalValue(0.1, _units, True)\n eventArgs.areInputsValid = False\n return\n\n result = getCommandInputValue(_depth, _units)\n if result[0] == False:\n eventArgs.areInputsValid = False\n return\n else:\n depth = result[1]\n\n if depth <= 0:\n _errMessage.text = 'The depth value should be greater than 0. It must be more than ' + des.unitsManager.formatInternalValue(0.1, _units, True)\n eventArgs.areInputsValid = False\n return\n\n except:\n if _ui:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n# Builds a spur cuboid.\ndef drawCuboid(design, width, height, depth):\n try:\n # Create a new component by creating an occurrence.\n occs = design.rootComponent.occurrences\n mat = adsk.core.Matrix3D.create()\n newOcc = occs.addNewComponent(mat)\n newComp = adsk.fusion.Component.cast(newOcc.component)\n\n # Create a new sketch.\n sketches = newComp.sketches\n xyPlane = newComp.xYConstructionPlane\n baseSketch = sketches.add(xyPlane)\n\n # Draw a rectangle for the base.\n baseSketch.sketchCurves.sketchLines.addCenterPointRectangle(adsk.core.Point3D.create(0,0,0), adsk.core.Point3D.create(width/2,height/2,0))\n\n #### Extrude the circle to create the base of the cuboid.\n prof = adsk.fusion.Profile.cast(None)\n prof = baseSketch.profiles.item(0)\n\n # Create an extrusion input to be able to define the input needed for an extrusion\n # while specifying the profile and that a new component is to be created\n extrudes = newComp.features.extrudeFeatures\n extInput = extrudes.createInput(prof, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)\n\n # Define that the extent is a distance extent of 5 cm.\n distance = adsk.core.ValueInput.createByReal(depth)\n extInput.setDistanceExtent(False, distance)\n\n # Create the extrusion.\n baseExtrude = extrudes.add(extInput)\n # Create an extrusion input to be able to define the input needed for an extrusion\n # while specifying the profile and that a new component is to be created\n extInput = extrudes.createInput(prof, adsk.fusion.FeatureOperations.JoinFeatureOperation)\n\n # Define that the extent is a distance extent of 5 cm.\n distance = adsk.core.ValueInput.createByReal(depth)\n extInput.setDistanceExtent(False, distance)\n\n baseFillet = None\n\n # Group everything used to create the cuboid in the timeline.\n timelineGroups = design.timeline.timelineGroups\n newOccIndex = newOcc.timelineObject.index\n diametralPitchSketch = sketches.add(xyPlane)\n pitchSketchIndex = diametralPitchSketch.timelineObject.index\n # ui.messageBox(\"Indices: \" + str(newOccIndex) + \", \" + str(pitchSketchIndex))\n timelineGroup = timelineGroups.add(newOccIndex, pitchSketchIndex)\n timelineGroup.name = 'Cuboid'\n\n # Add an attribute to the component with all of the input values. This might\n # be used in the future to be able to edit the cuboid.\n cuboidValues = {}\n cuboidValues['width'] = str(width)\n cuboidValues['height'] = str(height)\n cuboidValues['depth'] = str(depth)\n attrib = newComp.attributes.add('Cuboid', 'Values',str(cuboidValues))\n\n newComp.name = 'Cuboid (' + str(width*10) + \"mm\" + ' x ' + str(height*10) + \"mm\" + ' x ' + str(depth*10) + \"mm\" + ')'\n return newComp\n except Exception as error:\n _ui.messageBox(\"drawCuboid Failed : \" + str(error))\n return None\n","sub_path":"RandomSceneCreate/RandomSceneCreate.py","file_name":"RandomSceneCreate.py","file_ext":"py","file_size_in_byte":14394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459529366","text":"from django.test import TestCase, tag\nfrom django.utils import timezone\n\nfrom bptl.camunda.tests.factories import ExternalTaskFactory\n\nfrom ..api import NoCallback, TaskExpired, execute\nfrom ..registry import WorkUnitRegistry\nfrom .factories import TaskMappingFactory\n\nregister = WorkUnitRegistry()\n\n\n@register\ndef task_1(task):\n return {\"task_run\": \"task_1\"}\n\n\n@register\ndef task_2(task):\n return {\"task_run\": \"task_2\"}\n\n\n@tag(\"public-api\")\nclass RouteTaskTests(TestCase):\n def test_route_to_correct_task(self):\n # set up the routing decisions\n TaskMappingFactory.create(\n topic_name=\"task-1\", callback=register.get_for(task_1)\n )\n TaskMappingFactory.create(\n topic_name=\"task-2\", callback=register.get_for(task_2)\n )\n # set up fetched tasks\n task1 = ExternalTaskFactory.create(topic_name=\"task-1\")\n task2 = ExternalTaskFactory.create(topic_name=\"task-2\")\n\n execute(task1, registry=register)\n execute(task2, registry=register)\n\n task1.refresh_from_db()\n task2.refresh_from_db()\n\n self.assertEqual(task1.result_variables, {\"task_run\": \"task_1\"})\n self.assertEqual(task2.result_variables, {\"task_run\": \"task_2\"})\n\n def test_no_mapping_configured(self):\n task = ExternalTaskFactory.create(topic_name=\"task-1\")\n\n with self.assertRaises(NoCallback):\n execute(task, registry=register)\n\n def test_mapping_configured_invalid_callback(self):\n TaskMappingFactory.create(topic_name=\"task-1\", callback=\"foo.bar\")\n task = ExternalTaskFactory.create(topic_name=\"task-1\")\n\n with self.assertRaises(NoCallback):\n execute(task, registry=register)\n\n def test_expired_task(self):\n TaskMappingFactory.create(\n topic_name=\"task-1\", callback=register.get_for(task_1)\n )\n task1 = ExternalTaskFactory.create(\n topic_name=\"task-1\", lock_expires_at=timezone.now()\n )\n\n with self.assertRaises(TaskExpired):\n execute(task1, registry=register)\n","sub_path":"src/bptl/tasks/tests/test_public_api.py","file_name":"test_public_api.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"225280777","text":"# 230\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# O(n) O(n)\ndef kthSmallest(root, k):\n stack = [root]\n arr = []\n while stack:\n temp = stack.pop()\n if temp:\n arr.append(temp.val)\n stack.append(temp.left)\n stack.append(temp.right)\n arr.sort()\n return arr[k-1]\n\n\n# DFS 中序遍历 左根右 O(n) O(n)\ndef kthSmallest(root, k):\n def inorder(root):\n return inorder(root.left) + [root.val] + inorder(root.right) if r else []\n return inorder(root)[k]\n\n# DFS 迭代 中序遍历 左根右 O(n) O(1)\n\n\ndef kthSmallest(root, k):\n stack = []\n while True:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n k -= 1\n if k == 0:\n return temp.val\n root = root.right\n\n","sub_path":"algorithm/leetcode/树/二叉树中第k小的元素.py","file_name":"二叉树中第k小的元素.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"192776622","text":"# modules\nimport numpy as np\nfrom cube_utils import *\nfrom scipy.signal import fftconvolve, gaussian\n\n# Effects\n# speed_decorator, blurs\n\n\ndef speed_decorator(function, frame, speed):\n '''\n Effect: Speed\n\n Use this to decorate other functions to gain speed control.\n Example:\n for i in range(20):\n world *= 0.5\n blub = speed_decorator(random, 3, i)\n world = blub(world, 3)\n print(i, world)\n '''\n # converting input\n speed = int((speed/127.0)*40+1)\n\n def simple_return(world, *args):\n return world\n\n if frame % speed is 0:\n return function\n else:\n return simple_return\n\ndef move(world, frame, achse, direction):\n\n achse = int(round((achse/127.0)*2))\n direction = int(round((direction/127.0)))\n\n if direction == 0: direction = -1\n\n newworld = np.roll(world, direction, axis=achse)\n\n return newworld\n\n\n\ndef blur(world, frame, x=1, y=1, z=1, amount=1, fade=0.9):\n '''\n Effect: Blur\n\n x x blur on/off\n y y blur on/off\n z z blur on/off\n\n blurs the cube by spreading the values of each led\n onto the others\n '''\n # converting input\n x = int(round(x/127.0))\n y = int(round(y/127.0))\n z = int(round(z/127.0))\n amount = amount/127.0\n fade = fade/127.0\n\n # save previous max value for scaling\n old_max = world.max()\n\n # create gaussian window\n a = gaussian(5, amount+0.001, sym=True)\n\n dim = 7\n gauss = world_init(dim)\n center = dim/2\n\n '''\n if x != 1 or y != 1 or z != 1:\n if x == 1:\n gauss[center-2:center+3, center, center] = a[0]\n gauss[center-1:center+2, center, center] = a[1]\n if y == 1:\n gauss[center, center-2:center+3, center] = a[0]\n gauss[center, center-1:center+2, center] = a[1]\n if z == 1:\n gauss[center, center, center-2:center+3] = a[0]\n gauss[center, center, center-1:center+2] = a[1]\n else:\n '''\n\n gauss[center-2:center+3, center-2:center+3, center-2:center+3] = a[0]\n gauss[center-1:center+2, center-1:center+2, center-1:center+2] = a[1]\n\n gauss[center, center, center] = a[2]\n\n # convolute with gaussian\n world = fftconvolve(world, gauss, mode='same')\n\n # scale to 1\n #world /= world.max()+0.001\n\n # scale to old_max\n #world *= old_max\n\n return np.round(np.clip(world*fade, 0, 1), 2)\n\ndef color_translate(value):\n #translates values from 0 to 127 to rgb values\n if (value>126):\n value=126\n\n r_out = 0.0\n g_out = 0.0\n b_out = 0.0\n\n if(value<=21):\n r_out=1\n g_out=value/21.0\n b_out=0\n elif(value>21 and value<=42):\n r_out=1-((value-21.0)/21.0)\n g_out=1\n b_out=0\n elif(value>42 and value<=63):\n r_out=0\n g_out=1\n b_out=(value-42.0)/21.0\n elif(value>63 and value<=84):\n r_out=0\n g_out=1-((value-63.0)/21.0)\n b_out=1\n elif(value>84 and value<=105):\n r_out=(value-84.0)/21.0\n g_out=0\n b_out=1\n elif(value>105):\n r_out=1\n g_out=0\n b_out=1-((value-105.0)/21.0)\n\n return {'r':r_out, 'g':g_out, 'b':b_out}\n\n\ndef gradient(world, rgb1, rgb2, balance):\n if(balance<=0):\n balance=50.0\n else:\n balance = balance/127.0\n\n pixels_on = 1\n pixel_count = 0\n new_world_r = world_init(10)\n new_world_g = world_init(10)\n new_world_b = world_init(10)\n\n rgbResult1 = color_translate(rgb1)\n rgbResult2 = color_translate(rgb2)\n\n for x in range(len(world)):\n for y in range (len(world)):\n for z in range (len(world)):\n if(world[x,y,z]!=0):\n pixels_on+=1\n\n\n for x in range(len(world)):\n for y in range (len(world)):\n for z in range (len(world)):\n if(world[x,y,z]!=0):\n if (pixel_count <= (pixels_on * balance)):\n new_world_r[x,y,z]= world[x,y,z]*(rgbResult1['r']+(pixel_count/(pixels_on * balance))*rgbResult2['r'])\n new_world_g[x,y,z]= world[x,y,z]*(rgbResult1['g']+(pixel_count/(pixels_on * balance))*rgbResult2['g'])\n new_world_b[x,y,z]= world[x,y,z]*(rgbResult1['b']+(pixel_count/(pixels_on * balance))*rgbResult2['b'])\n pixel_count+=1\n elif (pixel_count > (pixels_on * balance)):\n new_world_r[x,y,z]= world[x,y,z]*((1-((pixels_on * balance)/pixel_count))*rgbResult1['r']+rgbResult2['r'])\n new_world_g[x,y,z]= world[x,y,z]*((1-((pixels_on * balance)/pixel_count))*rgbResult1['g']+rgbResult2['g'])\n new_world_b[x,y,z]= world[x,y,z]*((1-((pixels_on * balance)/pixel_count))*rgbResult1['b']+rgbResult2['b'])\n pixel_count+=1\n\n return {'r':new_world_r, 'g':new_world_g, 'b':new_world_b}\n\n\ndef gradient_sigmoidal(world, rgb1, rgb2, balance):\n\n if(balance<=0):\n balance=0.0001\n else:\n balance = balance/127.0\n\n pixels_on = 1\n pixel_count = 0\n new_world_r = world_init(10)\n new_world_g = world_init(10)\n new_world_b = world_init(10)\n\n rgbResult1 = color_translate(rgb1)\n rgbResult2 = color_translate(rgb2)\n\n for x in range(len(world)):\n for y in range (len(world)):\n for z in range (len(world)):\n if(world[x,y,z]!=0):\n pixels_on+=1\n\n for x in range(len(world)):\n for y in range (len(world)):\n for z in range (len(world)):\n if(world[x,y,z]!=0):\n if (pixel_count <= (pixels_on * balance)):\n new_world_r[x,y,z]= world[x,y,z]*((1-(1/(1+np.exp(1)**(5*((pixel_count/(balance * pixels_on))-1)))))*rgbResult1['r']+(1/(1+np.exp(1)**(5*((pixel_count/(balance * pixels_on))-1))))*rgbResult2['r'])\n new_world_g[x,y,z]= world[x,y,z]*((1-(1/(1+np.exp(1)**(5*((pixel_count/(balance * pixels_on))-1)))))*rgbResult1['g']+(1/(1+np.exp(1)**(5*((pixel_count/(balance * pixels_on))-1))))*rgbResult2['g'])\n new_world_b[x,y,z]= world[x,y,z]*((1-(1/(1+np.exp(1)**(5*((pixel_count/(balance * pixels_on))-1)))))*rgbResult1['b']+(1/(1+np.exp(1)**(5*((pixel_count/(balance * pixels_on))-1))))*rgbResult2['b'])\n pixel_count+=1\n elif (pixel_count > (pixels_on * balance)):\n new_world_r[x,y,z]= world[x,y,z]*((1-(1/(1+np.exp(1)**(5*((pixel_count-(balance * pixels_on))/(pixels_on-(balance * pixels_on)))))))*rgbResult1['r']+(1/(1+np.exp(1)**(5*((pixel_count-(balance * pixels_on))/(pixels_on-(balance * pixels_on))))))*rgbResult2['r'])\n new_world_g[x,y,z]= world[x,y,z]*((1-(1/(1+np.exp(1)**(5*((pixel_count-(balance * pixels_on))/(pixels_on-(balance * pixels_on)))))))*rgbResult1['g']+(1/(1+np.exp(1)**(5*((pixel_count-(balance * pixels_on))/(pixels_on-(balance * pixels_on))))))*rgbResult2['g'])\n new_world_b[x,y,z]= world[x,y,z]*((1-(1/(1+np.exp(1)**(5*((pixel_count-(balance * pixels_on))/(pixels_on-(balance * pixels_on)))))))*rgbResult1['b']+(1/(1+np.exp(1)**(5*((pixel_count-(balance * pixels_on))/(pixels_on-(balance * pixels_on))))))*rgbResult2['b'])\n pixel_count+=1\n return {'r':new_world_r, 'g':new_world_g, 'b':new_world_b}\n","sub_path":"cube_effects.py","file_name":"cube_effects.py","file_ext":"py","file_size_in_byte":7469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341246785","text":"# coding=utf-8\nfrom mock import patch\nfrom django.test import TestCase\nfrom pxl.weather_api import weather_api_call, weather_api_dict\nfrom weather_test_data import TEST_OUTPUT\nimport requests\nimport json\n\nRESP_DATA = {\"location\": \"Seattle, WA\", \\\n \"weather\": \"partlycloudy\", \\\n \"temperature\": \"76.5 F (24.7 C)\"}\n\n\nclass TestWeather(TestCase):\n \"\"\"Test Class for the Weather API call.\"\"\"\n @patch('pxl.weather_api.requests')\n def test_weather_api_call(self, requests):\n \"\"\"Test expected output of API call.\"\"\"\n mocked_method = requests.get().json()\n mocked_method.return_value = TEST_OUTPUT\n response = weather_api_call()\n self.assertEqual(response.return_value, TEST_OUTPUT)\n\n @patch('pxl.weather_api.requests')\n def test_weather_api_call_repsonse_type(self, requests):\n \"\"\"Test expected output is a dictionary.\"\"\"\n mocked_method = requests.get().json()\n mocked_method.return_value = TEST_OUTPUT\n response = weather_api_call()\n self.assertEqual(type(response.return_value), type(TEST_OUTPUT))\n\n @patch('pxl.weather_api.requests')\n def test_weather_api_expected_location(self, requests):\n \"\"\"Test expected location is Seattle, WA.\"\"\"\n mocked_method = requests.get().json\n mocked_method.return_value = TEST_OUTPUT\n response = weather_api_call()\n location = response['current_observation']['display_location']['full']\n self.assertEqual(location, 'Seattle, WA')\n\n @patch('pxl.weather_api.requests')\n def test_weather_api_response_type(self, requests):\n \"\"\"Test if the response is a Dictionary.\"\"\"\n mocked_method = requests.get().json\n mocked_method.return_value = TEST_OUTPUT\n response = weather_api_dict()\n self.assertEqual(type(response), type(RESP_DATA))\n\n @patch('pxl.weather_api.requests')\n def test_weather_api_response_right_keys(self, requests):\n \"\"\"Ensure keys in dict are the expected data fields.\"\"\"\n mocked_method = requests.get().json\n mocked_method.return_value = TEST_OUTPUT\n response = weather_api_dict()\n self.assertTrue(\"location\" in response)\n self.assertTrue(\"temperature\" in response)\n self.assertTrue(\"weather\" in response)\n\n @patch('pxl.weather_api.requests')\n def test_weather_api_response_wrong_keys(self, requests):\n \"\"\"Test improper key is not in response.\"\"\"\n mocked_method = requests.get().json\n mocked_method.return_value = TEST_OUTPUT\n response = weather_api_dict()\n self.assertFalse(\"slurpee\" in response)\n","sub_path":"pxl/tests/test_weather_api.py","file_name":"test_weather_api.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87687553","text":"f_test = ['test.in', 'test.out']\r\nf_small = ['A-small-attempt0.in', 'small.out']\r\nf_large = ['A-large.in', 'large.out']\r\nf_practice_small = ['A-small-practice.in', 'practice_small.out']\r\nf_practice_large = ['A-large-practice.in', 'practice_large.out']\r\n\r\n#FILE_NAME = f_test\r\nFILE_NAME = f_small\r\n#FILE_NAME = f_large\r\n#FILE_NAME = f_practice_small\r\n#FILE_NAME = f_practice_large\r\n\r\nf = open(FILE_NAME[0], 'r')\r\no = open(FILE_NAME[1], 'w')\r\n\r\ndef solve1(N, L) :\r\n ret = 0\r\n for i in range(1, N) :\r\n ret += max(0, L[i-1] - L[i])\r\n return ret\r\n\r\ndef solve2(N, L) :\r\n v = 0\r\n ret = 0\r\n for i in range(1, N) :\r\n v = max(v, L[i-1] - L[i])\r\n\r\n for i in range(N-1) :\r\n ret += min(v, L[i])\r\n return ret\r\n\r\ndef case_result(case) :\r\n N = int(f.readline())\r\n L = list(map(int, f.readline().split()))\r\n return str(solve1(N, L)) + ' ' + str(solve2(N, L))\r\n\r\nT = int(f.readline())\r\nfor case in range(1, T+1) :\r\n o.write('Case #'+str(case)+': '+case_result(case)+'\\n')\r\n\r\nf.close()\r\no.close()\r\n","sub_path":"solutions_6404600001200128_0/Python/InhaKorean/inhakoreanA.py","file_name":"inhakoreanA.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"603465984","text":"from requests_html import HTMLSession\nimport unittest\n#pip3 install requests-html\n\n\nclass LinksFinder:\n def __init__(self, words, sources):\n self.words = set(words)\n self.sources = set(sources)\n\n def printSources(self):\n print(self.sources)\n\n def printWords(self):\n print(self.words)\n\n def addSource(self, source):\n if source not in self.sources:\n self.sources.add(source)\n\n def removeSource(self, source):\n if source in self.sources:\n self.sources.remove(source)\n\n def addWord(self, word):\n if word not in self.words:\n self.words.add(word)\n\n def removeWord(self, word):\n if word in self.words:\n self.words.remove(word)\n\n def findLinks(self):\n session = HTMLSession()\n returnLinks = {}\n for source in self.sources:\n print('#####' + source + '####')\n for link in session.get(source).html.absolute_links:\n if any(word.lower() in link.lower() for word in self.words):\n returnLinks[link] = source\n session.close()\n return returnLinks\n\n\nwebsites = [\n 'https://www.magnapolonia.org',\n 'https://www.magnapolonia.org',\n 'https://wpolityce.pl',\n 'https://www.tvn24.pl',\n 'https://www.polsatnews.pl',\n 'https://www.rt.com',\n 'https://www.jpost.com',\n 'https://www.washingtonpost.com',\n 'https://www.aljazeera.com',\n 'https://www.spiegel.de'\n ]\n\nwords = ['polska', 'polska', 'polski', 'polskie', 'poland', 'polish', 'polnish', 'polen']\n\n\nclass NewsModelTestCases(unittest.TestCase):\n def test_find_some_links_containing_words(self):\n # Setup\n linksFinder = LinksFinder(words, websites)\n # Run\n linksFinder.printSources()\n linksFinder.printWords()\n print(linksFinder.findLinks())\n # Check\n self.assertNotEqual({}, linksFinder.findLinks())\n\n def test_find_links_empty_words(self):\n # Setup\n linksFinder = LinksFinder([], websites)\n # Run\n linksFinder.printSources()\n linksFinder.printWords()\n print(linksFinder.findLinks())\n # Check\n self.assertEqual({}, linksFinder.findLinks())\n # Run\n linksFinder.addWord('pol')\n print(linksFinder.findLinks())\n # Check\n self.assertNotEqual({}, linksFinder.findLinks())\n # Run\n linksFinder.removeWord('pol')\n print(linksFinder.findLinks())\n # Check\n self.assertEqual({}, linksFinder.findLinks())\n\n def test_find_links_empty_source(self):\n # Setup\n linksFinder = LinksFinder(words, [])\n # Run\n linksFinder.printSources()\n linksFinder.printWords()\n print(linksFinder.findLinks())\n # Check\n self.assertEqual({}, linksFinder.findLinks())\n # Run\n linksFinder.addSource('https://wpolityce.pl')\n # Check\n self.assertNotEqual({}, linksFinder.findLinks())\n # Run\n linksFinder.removeSource('https://wpolityce.pl')\n # Check\n self.assertEqual({}, linksFinder.findLinks())\n\n def test_find_empty_words_empty_source(self):\n # Setup\n linksFinder = LinksFinder([], [])\n # Run\n linksFinder.printSources()\n linksFinder.printWords()\n print(linksFinder.findLinks())\n # Check\n self.assertEqual({}, linksFinder.findLinks())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"django_blog/news/myparser.py","file_name":"myparser.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"186052986","text":"from datetime import datetime, timedelta\nfrom django.shortcuts import (\n render, reverse, redirect,\n Http404, HttpResponse, HttpResponseRedirect\n)\nfrom django.utils import timezone\nfrom django.shortcuts import get_object_or_404\nfrom django.forms.models import model_to_dict\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\nfrom halls.utils import render_form_errors\nfrom users.models import Profile\nfrom halls.models import Hall, RoomType\nfrom reviews.models import Review, ReviewPhotos, ReviewRating, Report\n\n\nfrom .forms import ReviewEditForm, ReviewPhotosEditForm, ReportForm\nfrom django.forms import modelformset_factory\nfrom django.http import JsonResponse\n\nfrom .forms import ReviewEditForm, ReviewPhotosEditFormSet, ReportForm\n\n# Passed to the template to specify\n# whether an existing review is being edited or a new one is being created.\nREVIEW_WRITE_NEW = 'WRITE_NEW'\nREVIEW_CHANGE_EXISTING = 'CHANGE_EXISTING'\n\n\n@login_required\ndef write(request, hall_id):\n \"\"\" Allows the user to write a new review for a room. \"\"\"\n conflicting_reviews = list(Review.objects.filter(\n user=request.user,\n date_created__gte=datetime.today() - timedelta(days=365),\n )[:1])\n\n if len(conflicting_reviews) > 0:\n cr = conflicting_reviews[0]\n cr_hall = cr.roomtype.hall.name\n cr_age = max(0, (datetime.today() - cr.date_created.replace(tzinfo=None)).days)\n messages.error(request, f\"\"\"\n Posting a second review in less than 365 days is disallowed.\n Your previous review of \\\"{cr_hall}\\\" was\n posted {cr_age} days ago.\n \"\"\")\n return redirect(f\"{reverse('profile')}#review-{cr.id}\")\n\n # We'll pass the hall to the view, so get the data needed for that\n hall = get_object_or_404(Hall, pk=hall_id)\n hall_data = hall.get_card_data()\n if request.method == 'POST':\n form = ReviewEditForm(request.POST)\n if form.is_valid():\n # Add the needed references to the other models.\n form.instance.roomtype_id = int(form.cleaned_data.get('roomtype'))\n form.instance.user = request.user\n # Save to DB and print message.\n form.save()\n messages.success(\n request,\n \"Your review was saved successfully. \"\n \"Now its time to add some photos!\"\n )\n return redirect(reverse('review-photos', kwargs={'review_id': form.instance.id}))\n else:\n messages.error(\n request, \"Please, correct any errors in the review form!\")\n\n if request.method == 'GET':\n form = ReviewEditForm()\n users_reviews = Review.objects.filter(user=request.user)\n if len(users_reviews) > 4:\n messages.error(\n request, \"You have exceeded the number of reviews an account can make. Please delete one of your reviews in order to make a new one.\")\n return HttpResponseRedirect(reverse('hallpage', kwargs={'id': hall_id}))\n roomtypes = hall.roomtype_set.all()\n return render(request, 'reviews/review-edit.html', {\n 'form': form,\n 'hall': hall_data,\n 'roomtypes': roomtypes,\n 'profile': Profile.objects.get(user=request.user),\n 'mode': REVIEW_WRITE_NEW\n })\n\n\n@login_required\ndef edit(request, review_id):\n \"\"\" Allows the user to edit an existing review. \"\"\"\n review = get_object_or_404(Review, pk=review_id)\n if review.user_id != request.user.id:\n messages.error(\n request, \"You can only edit reviews posted by yourself!\")\n return HttpResponseRedirect(reverse('index'))\n\n # We'll pass the hall to the view, so get the data needed for that\n # get_card_data mainly adds photo and thumbnail\n hall = review.roomtype.hall.get_card_data()\n if request.method == 'POST':\n form = ReviewEditForm(request.POST, instance=review)\n if form.is_valid():\n # TODO: Check if valid\n review.roomtype_id = form.cleaned_data.get('roomtype')\n # Save to DB and print message.\n form.save()\n messages.success(\n request, \"Your review was updated successfully!\")\n\n if 'goto-photos' in request.POST:\n # The user pressed the save & edit photos button\n return redirect(reverse('review-photos', kwargs={'review_id': form.instance.id}))\n else:\n messages.error(\n request, \"Please, correct any errors in the review form!\")\n\n if request.method == 'GET':\n form = ReviewEditForm(model_to_dict(review))\n\n # Get the room types from the DB\n room_types = RoomType.objects.filter(hall_id=hall.get('id'))\n review_photos = review.reviewphotos_set.all()\n return render(request, 'reviews/review-edit.html', {\n 'review': review,\n 'review_photos': review.reviewphotos_set.all(),\n 'form': form,\n 'hall': hall,\n 'roomtypes': room_types,\n 'profile': Profile.objects.get(user=request.user),\n 'date_created': review.date_created,\n 'date_modified': review.date_modified,\n 'mode': REVIEW_CHANGE_EXISTING\n })\n\n\n@login_required\ndef delete(request, review_id):\n \"\"\" Allows the user to delete an existing review. \"\"\"\n review = get_object_or_404(Review, pk=review_id)\n if review.user_id != request.user.id:\n # The review is by a different user. Return to profile page.\n messages.error(\n request, \"You can only delete reviews posted by yourself!\")\n return HttpResponseRedirect(reverse('profile'))\n\n if request.method == 'POST':\n # Delete the review from the DB and return to profile page.\n review.delete()\n messages.success(\n request, \"Review deleted successfully!\")\n return HttpResponseRedirect(reverse('profile'))\n else:\n # Send bad request for GET requests.\n return HttpResponse(status=400)\n\n\n@login_required\ndef review_photos(request, review_id):\n review = get_object_or_404(Review, pk=review_id)\n if review.user_id != request.user.id:\n # The review is by a different user. Return to profile page.\n messages.error(\n request, \"You can only delete reviews posted by yourself!\")\n return HttpResponseRedirect(reverse('profile'))\n\n if request.method == 'POST':\n formset = ReviewPhotosEditFormSet(\n request.POST,\n request.FILES,\n queryset=review.reviewphotos_set.none()\n )\n context = {\n 'formset': formset,\n 'review': review,\n 'user': request.user,\n }\n if not formset.is_valid():\n for form in formset.forms:\n render_form_errors(request, form)\n return render(request, 'reviews/review-photos.html', context)\n else:\n for form in formset.forms:\n if form.cleaned_data:\n # Call the overriden save() method to execute the\n # action requested by the user.\n form.save(user=request.user, review=review)\n form.instance.refresh_from_db()\n form['photo_path'].value = form.instance.photo_path\n # form.fields['photo_path'].value = form.instance.photo_path\n\n messages.success(\n request, 'Your changes to review photos have been made.')\n return render(request, 'reviews/review-photos.html', context)\n\n else:\n formset = ReviewPhotosEditFormSet(\n queryset=review.reviewphotos_set.all())\n context = {\n 'formset': formset,\n 'review': review,\n 'user': request.user,\n }\n\n return render(request, 'reviews/review-photos.html', context)\n\n\n# function takes a list of reviews and a list of ratings for those reviews\n# and returns a dictionary of review ids and their associated ratings\ndef display_ratings(reviews, ratings):\n reviews_dic = {review.id: 0 for review in reviews} # set initial rating\n for rating in ratings: # for all reviews to 0\n if (rating.vote): # if rating is an upvote\n reviews_dic[rating.review.id] += 1 # add 1 to rating of that review\n else:\n reviews_dic[rating.review.id] -= 1 # sub 1 from rating of that review\n return reviews_dic\n\n# function takes a list of reviews and a dictionary of review ids and their\n# associated ratings and returns a list of sorted reviews based off of ratings\ndef sort_reviews(reviews, reviews_dic):\n # sort the dictionary of review ids based off of their ranking, highest to lowest\n sorted_review_ids = sorted(reviews_dic, key=reviews_dic.get, reverse=True)\n # create a dictionary so reviews can be obtained from their id\n reviews_with_ids = {review.id: review for review in reviews}\n sorted_reviews = []\n for review_id in sorted_review_ids: # iterate through sorted review ids\n sorted_reviews.append(reviews_with_ids[review_id]) # add the related review\n return sorted_reviews # to sorted list\n\n# function takes a request object and a list of review ratings and\n# returns a dictionary of review ids and the users ratings\ndef user_ratings(request, ratings):\n if request.user.is_authenticated: # check if a user is logged on\n reviews_rated = ratings.filter(user=request.user) # get users reviews\n # create a dictionary of review ids and how the user voted\n users_ratings = {rating.review.id: rating.vote for rating in reviews_rated}\n return users_ratings\n return {}\n\ndef up_vote(request, hall_id, review_id):\n highlight = False\n review = Review.objects.filter(id=review_id)\n ratings = ReviewRating.objects.filter(review__id=review_id)\n if request.user.is_authenticated:\n user_rating = ratings.filter(user=request.user)\n if len(user_rating) == 0:\n new_rating = ReviewRating(review=review[0], user=request.user, vote=True)\n new_rating.save()\n highlight = True\n elif user_rating[0].vote == True:\n user_rating[0].delete()\n else:\n user_rating[0].vote = True\n user_rating[0].save()\n highlight = True\n ratings = ReviewRating.objects.filter(review__id=review_id)\n final_ratings = display_ratings(review, ratings)\n context = {\n 'value': review_id,\n 'logged_in': request.user.is_authenticated,\n 'url': reverse(\"login\"),\n 'rating': final_ratings[review_id],\n 'highlight_up': highlight,\n 'highlight_down': False\n }\n return JsonResponse(context)\n\ndef down_vote(request, hall_id, review_id):\n highlight = False\n review = Review.objects.filter(id=review_id)\n ratings = ReviewRating.objects.filter(review__id=review_id)\n if request.user.is_authenticated:\n user_rating = ratings.filter(user=request.user)\n if len(user_rating) == 0:\n new_rating = ReviewRating(review=review[0], user=request.user, vote=False)\n new_rating.save()\n highlight = True\n elif user_rating[0].vote == False:\n user_rating[0].delete()\n else:\n user_rating[0].vote = False\n user_rating[0].save()\n highlight = True\n ratings = ReviewRating.objects.filter(review__id=review_id)\n final_ratings = display_ratings(review, ratings)\n context = {\n 'value': review_id,\n 'logged_in': request.user.is_authenticated,\n 'url': reverse(\"login\"),\n 'rating': final_ratings[review_id],\n 'highlight_up': False,\n 'highlight_down': highlight\n }\n return JsonResponse(context)\n\n@login_required\ndef report(request):\n review = get_object_or_404(Review, pk=review_id)\n\n if request.method == 'POST':\n form = ReportForm(request.POST)\n\n if form.is_valid():\n # Save to DB and print message.\n form.instance.review = review\n\n form.instance.user = request.user\n form.save()\n review.reported = True\n review.save()\n messages.success(\n request, \"Your report was saved successfully!\")\n return HttpResponseRedirect(reverse('report', kwargs={'review_id': review.id}))\n\n else:\n messages.error(\n request, \"Please, correct any errors in the report form.\")\n return render(request, 'reviews/report.html', {\n \"form\": form\n })\n else:\n form = ReportForm()\n return render(request, 'reviews/report.html', {\n \"form\": form\n })\n","sub_path":"honesthalls/reviews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"292178036","text":"class Matrix:\n lst = []\n\n def __init__(self):\n self.len = int(input(\"Enter number of elements:\")) # declare list and append stuff in it\n for i in range(self.len):\n self.lst.append(int(input(\"Enter element at position \" + str(i + 1) + \":\")))\n\n def BubbleSort(self):\n for i in range(self.len - 1): # 0 se lekar end se ek kam tak gay\n for j in range(self.len - 1 - i): # similarly j ko leke gay, max ke bahar na jay isleye aise kiya\n if self.lst[j] > self.lst[j + 1]: # agar order galat hai toh change\n self.lst[j], self.lst[j + 1] = self.lst[j + 1], self.lst[j]\n print(self.lst)\n\n def InsertionSort(self):\n for i in range(1, self.len): # pehle element se end tak gay\n current = self.lst[i] # var ko value di ith value ki\n j = i - 1 # j ko uske ek pehle ghumaya\n while j >= 0 and current <= self.lst[j]: # aage se piche jayenge isleye j ki condition aur agar key\n self.lst[j + 1] = self.lst[j] # choota hai toh use piche le liya\n j -= 1 # j ko kam karte gay taaki uske piche wala dekh sake\n self.lst[j + 1] = current # jab khatam ho jay j ka kaam, toh value de denge usko key ki\n print(self.lst)\n\n def SelectionSort(self):\n for i in range(self.len): # 0 se aage tak jayenge\n minIndex = i # current wale ko minimum\n for j in range(i + 1, self.len): # doosra loop uske ek aage se end tak\n if self.lst[minIndex] > self.lst[j]: # agar minIndex wala pe element bada hai jth element se\n minIndex = j # toh min wala change karenge kyuki min sabse choota chahiye (obviously)\n self.lst[i], self.lst[minIndex] = self.lst[minIndex], self.lst[i] # jab sabse choota mil gaya toh swap karo\n print(self.lst)\n\n def ShellSort(self):\n gap = self.len // 2\n while gap >= 1:\n for i in range(0, self.len - gap):\n if self.lst[i] > self.lst[i + gap]:\n self.lst[i], self.lst[i + gap] = self.lst[i + gap], self.lst[i]\n gap //= 2\n print(self.lst)\n\n def Partition(self, start, end):\n pivot = self.lst[start]\n low = start + 1\n high = end\n\n while True:\n\n while low <= high and self.lst[high] >= pivot:\n high -= 1\n\n while low <= high and self.lst[low] <= pivot:\n low += 1\n\n if low <= high:\n self.lst[low], self.lst[high] = self.lst[high], self.lst[low]\n\n else:\n break\n\n self.lst[start], self.lst[high] = self.lst[high], self.lst[start]\n\n return high\n\n def QuickSort(self, start, end):\n if start >= end:\n return\n\n part = self.Partition(start, end)\n self.QuickSort(start, part - 1)\n self.QuickSort(part + 1, end)\n\n\ntestMat = Matrix()\n# testMat.BubbleSort()\n# testMat.InsertionSort()\n# testMat.SelectionSort()\n# testMat.ShellSort()\ntestMat.QuickSort(0, testMat.len-1)\nprint(testMat.lst)\n","sub_path":"DSLgit/Assignment5.py","file_name":"Assignment5.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450782195","text":"import tkinter as tk\nfrom tkinter.font import Font\nfrom PIL import ImageTk, Image\nimport random\nimport math\n\n# fix blurry font\nfrom ctypes import windll\nwindll.shcore.SetProcessDpiAwareness(1)\n\n\nclass MainProgram(tk.Tk):\n \"\"\"This class is the program skeleton - the Tk object\"\"\"\n def __init__(self):\n # initialise Tk object\n tk.Tk.__init__(self)\n\n # program window variables\n self.GAME_WINDOW_CAPTION = '2048 - Python Version'\n self.GAME_WIDTH = 700\n self.GAME_HEIGHT = 700\n\n # system wide controls\n self.slide_up_control = 'Up'\n self.slide_down_control = 'Down'\n self.slide_left_control = 'Left'\n self.slide_right_control = 'Right'\n\n # initialise window frame and spawn window\n self.title(self.GAME_WINDOW_CAPTION)\n self.minsize(self.GAME_WIDTH, self.GAME_HEIGHT)\n self.geometry('{}x{}'.format(self.GAME_WIDTH, self.GAME_HEIGHT))\n self.center_screen(self.GAME_WIDTH, self.GAME_HEIGHT)\n\n # initialise all program wide fonts\n self.DESCRIPTION_FONT = Font(family=\"Helvetica\", size=12)\n self.BUTTON_FONT = Font(family=\"Helvetica\", size=16)\n self.TITLE_FONT = Font(family=\"Helvetica\", size=28, weight='bold')\n\n # create the master frame\n self.GAME_MARGIN_X = 0\n self.GAME_MARGIN_Y = 0\n self.container = tk.Frame(self)\n self.container.pack(padx=self.GAME_MARGIN_X, pady=self.GAME_MARGIN_Y)\n\n # memory to hold the pointer to the current frame rendered\n self.current_frame = None\n\n # initialise by showing the main menu page\n self.show_frame(MainMenu)\n\n def center_screen(self, window_width, window_height):\n \"\"\"centers the spawned screen\"\"\"\n offset_right = int(self.winfo_screenwidth()/2 - window_width/2)\n offset_down = int((self.winfo_screenheight()-40)/2 - window_height / 2)\n\n self.geometry('+{}+{}'.format(offset_right, offset_down))\n\n def show_frame(self, frame_class):\n \"\"\"displays given frame class in parameter: e.g. show_frame(MainMenu)\"\"\"\n if self.current_frame is not None:\n self.current_frame.destroy()\n\n frame = frame_class(self.container, self)\n frame.grid(row=0, column=0, sticky='news')\n self.current_frame = frame\n\n\nclass MainMenu(tk.Frame):\n \"\"\"Main menu frame object\"\"\"\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, width=controller.GAME_WIDTH, height=controller.GAME_HEIGHT)\n self.controller = controller\n\n # create canvas\n self.create_canvas()\n\n # display background onto the canvas\n self.display_background('background.png')\n\n # create button frame which holds menu buttons\n button_frame = tk.Frame(self, width=250, height=250)\n button_frame.grid_columnconfigure(0, weight=1)\n button_frame.grid_propagate(False)\n\n # create menu buttons\n menu_button1 = tk.Button(button_frame, text='New Game', command=lambda: controller.show_frame(MainGame))\n menu_button2 = tk.Button(button_frame, text='Load Game', command=lambda: controller.show_frame(MainGame))\n menu_button3 = tk.Button(button_frame, text='Instructions', command=lambda: controller.show_frame(Instructions))\n menu_button4 = tk.Button(button_frame, text='Controls', command=lambda: controller.show_frame(Controls))\n buttons = [menu_button1, menu_button2, menu_button3, menu_button4]\n\n # grid buttons into the button frame\n for index, button in enumerate(buttons):\n button_frame.grid_rowconfigure(index, weight=1)\n button.config(font=controller.BUTTON_FONT)\n button.config(borderwidth=3)\n button.grid(row=index, column=0, sticky='news')\n\n # display button frame on canvas\n # increase pixels_below_center: push the menu buttons lower on the GUI\n pixels_below_center = 130\n self.display_object_on_canvas(\n button_frame,\n controller.GAME_WIDTH // 2 - 0.5*button_frame['width'],\n controller.GAME_HEIGHT // 2 - 0.5*button_frame['height'] + pixels_below_center)\n\n def create_canvas(self):\n \"\"\"Renders a canvas onto the frame - covering the whole frame\"\"\"\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)\n\n def display_background(self, image_path):\n \"\"\"Given a path to an image - it draws this image onto the frame canvas\"\"\"\n # draws and paints the background with image of given path\n background_image = Image.open(image_path)\n self.canvas.image = ImageTk.PhotoImage(background_image)\n self.canvas.create_image((0, 0), image=self.canvas.image, anchor='nw')\n\n def display_object_on_canvas(self, tk_object, x, y):\n \"\"\"Given object and X,Y coordinates, it draws it onto the frame canvas\"\"\"\n button1_window = self.canvas.create_window(\n x,\n y,\n anchor='nw',\n window=tk_object)\n\n\nclass Instructions(tk.Frame):\n \"\"\"This class is the instructions frame\"\"\"\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='white', width=controller.GAME_WIDTH, height=controller.GAME_HEIGHT)\n self.pack_propagate(False)\n self.controller = controller\n\n # create canvas\n self.create_canvas()\n\n # display background\n self.display_background('instructionsbackground.png')\n\n # display the main instructions\n instructions_label = tk.Label(\n self,\n bg='white',\n wrap=600,\n anchor=tk.W,\n justify=tk.LEFT,\n text=\"\"\"2048 is a game where you combine numbered tiles in order to gain a higher numbered tile. In this game you start with two tiles, the lowest possible number available is two. Then you will play by combining the tiles with the same number to have a tile with the sum of the number on the two tiles.\n\nThe default controls for the game are the arrow keys, however this can be changed in the controls section of the game.\n \"\"\",\n font=controller.DESCRIPTION_FONT)\n\n self.display_object_on_canvas(\n instructions_label,\n controller.GAME_WIDTH // 2 - 0.5 * instructions_label.winfo_reqwidth(),\n controller.GAME_HEIGHT // 2 - 0.5 * instructions_label.winfo_reqheight() + 120)\n\n # display the back button\n back_button_instructions = tk.Button(\n self,\n text='Back to menu',\n font=controller.BUTTON_FONT,\n command=lambda: controller.show_frame(MainMenu)\n )\n\n self.display_object_on_canvas(\n back_button_instructions,\n controller.GAME_WIDTH - back_button_instructions.winfo_reqwidth() - 30,\n controller.GAME_HEIGHT - back_button_instructions.winfo_reqheight() - 30\n )\n\n def create_canvas(self):\n \"\"\"Renders a canvas onto the frame - covering the whole frame\"\"\"\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)\n\n def display_background(self, imagepath):\n \"\"\"Given a path to an image - it draws this image onto the frame canvas\"\"\"\n background_image = Image.open(imagepath)\n self.canvas.image = ImageTk.PhotoImage(background_image)\n self.canvas.create_image((0, 0), image=self.canvas.image, anchor='nw')\n\n def display_object_on_canvas(self, tk_object, x, y):\n \"\"\"Given object and X,Y coordinates, it draws it onto the frame canvas\"\"\"\n button1_window = self.canvas.create_window(\n x,\n y,\n anchor='nw',\n window=tk_object)\n\n\nclass Controls(tk.Frame):\n \"\"\"This class is the controls frame\"\"\"\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, width=controller.GAME_WIDTH, height=controller.GAME_HEIGHT)\n self.controller = controller\n self.pack_propagate(False)\n\n # boolean showing if the user in the process of binding?\n self.is_binding = False\n\n # create canvas\n self.create_canvas()\n\n # display background\n self.display_background('controlsbackground.png')\n\n # display the main instructions\n instructions_label = tk.Label(\n self,\n bg='white',\n wrap=600,\n anchor=tk.W,\n justify=tk.LEFT,\n text=\"\"\"To change a control, click on the current value and input a key. To cancel the process, re-click it.\"\"\",\n font=controller.DESCRIPTION_FONT)\n\n self.display_object_on_canvas(\n instructions_label,\n controller.GAME_WIDTH // 2 - 0.5 * instructions_label.winfo_reqwidth(),\n instructions_label.winfo_reqheight() + 220)\n\n # display the input field grid 2x4 table (columns, rows)\n table_border_width = 2\n table_border_color = 'black'\n table_2x4 = tk.Frame(self, width=550, height=220, bg=table_border_color)\n table_2x4.grid_columnconfigure(0, weight=1)\n table_2x4.grid_columnconfigure(1, weight=1)\n table_2x4.grid_propagate(0)\n\n # create the LEFT HAND COLUMN labels: which show which column the user is changing\n control_up_label = tk.Label(table_2x4, text='Slide tiles up')\n control_down_label = tk.Label(table_2x4, text='Slide tiles down')\n control_left_label = tk.Label(table_2x4, text='Slide tiles left')\n control_right_label = tk.Label(table_2x4, text='Slide tiles right')\n\n # grid the LEFT HAND COLUMN\n control_labels = [control_up_label, control_down_label, control_left_label, control_right_label]\n for index, label in enumerate(control_labels):\n table_2x4.grid_rowconfigure(index, weight=1)\n label.config(\n bg='white',\n anchor=tk.W,\n justify=tk.LEFT,\n font=controller.DESCRIPTION_FONT\n )\n\n if index == 0:\n label.grid(row=index, column=0, sticky='news', pady=(table_border_width, table_border_width), padx=(table_border_width, 0))\n\n else:\n label.grid(row=index, column=0, sticky='news', pady=(0, table_border_width), padx=(table_border_width, 0))\n\n def initialise_control_button(button, button_save_string):\n \"\"\"\n Given a button object, and its purpose (left, right, up, down),\n Change the text on the button back to normal and,\n Exit the binding state\n \"\"\"\n\n # change the text back to black (from red)\n button['fg'] = 'black'\n\n # for each case of control, change its text back to what was stored.\n if button_save_string == 'left':\n if controller.slide_left_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_left_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_left_control\n\n elif button_save_string == 'right':\n if controller.slide_right_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_right_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_right_control\n\n elif button_save_string == 'up':\n if controller.slide_up_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_up_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_up_control\n\n elif button_save_string == 'down':\n if controller.slide_down_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_down_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_down_control\n\n # given button and which KEY it controls, it changes self variable storage\n def switch_keys(button, button_save_string):\n \"\"\"\n Given a button,\n and which command it controls\n \"\"\"\n\n def unbind_keys(button):\n \"\"\"Stop collecting keystrokes for this button by unbinding arrow keys + letter keys\"\"\"\n button.unbind('')\n button.unbind('')\n button.unbind('')\n button.unbind('')\n button.unbind('')\n\n def set_command_for(string, command):\n \"\"\"\n Given a command direction (left, right, up, down),\n and a keystroke symbol (a, b, return, up, down ... etc)\n It saves this command to the MainProgram variables\n \"\"\"\n\n button['fg'] = 'black'\n if string == 'left':\n controller.slide_left_control = command\n\n elif string == 'right':\n controller.slide_right_control = command\n\n elif string == 'up':\n controller.slide_up_control = command\n\n elif string == 'down':\n controller.slide_down_control = command\n\n # given a key event, it sets the commands to it and changes button text\n def key(event):\n \"\"\"\n Given a key event (collecting from tk.bind method)\n THAT IS NOT AN ARROW KEY\n and this key is not SPACE\n change text on BUTTON\n save EVENT\n stop COLLECTING, and exit BINDING\n \"\"\"\n nonlocal button_save_string\n self.is_binding = False\n try:\n if event.keysym == 'space':\n initialise_control_button(button, button_save_string)\n return\n button['text'] = str(event.keysym)\n set_command_for(button_save_string, event.keysym)\n\n except Exception:\n if event.char == 'space':\n initialise_control_button(button, button_save_string)\n return\n button['text'] = str(event.char)\n set_command_for(button_save_string, event)\n\n unbind_keys(button)\n\n\n def arrow_key(event):\n \"\"\"\n Given a key event (collecting from tk.bind method)\n that is an ARROW KEY\n and this key is not SPACE\n change text on BUTTON\n save EVENT\n stop COLLECTING, and exit BINDING\n \"\"\"\n set_command_for(button_save_string, event.keysym)\n button['text'] = event.keysym + ' arrow key'\n unbind_keys(button)\n\n\n def set_to_current_value():\n \"\"\"\n Exits binding and resets the text on the button to the original key\n \"\"\"\n self.is_binding = False\n button['fg'] = 'black'\n nonlocal button_save_string\n if button_save_string == 'left':\n if controller.slide_left_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_left_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_left_control\n\n elif button_save_string == 'right':\n if controller.slide_right_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_right_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_right_control\n\n elif button_save_string == 'up':\n if controller.slide_up_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_up_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_up_control\n\n elif button_save_string == 'down':\n if controller.slide_down_control in ('Left', 'Right', 'Up', 'Down'):\n button['text'] = controller.slide_down_control + ' arrow key'\n\n else:\n button['text'] = controller.slide_down_control\n\n \"\"\"\n BEGIN MAIN LOGIC FLOW FOR THIS FUNCTION - after all nested functions declared\n \"\"\"\n\n # If clicked and already in binding state, untoggle the button and exit binding.\n if button['text'] == 'Enter new key':\n set_to_current_value()\n unbind_keys(button)\n self.is_binding = False\n return\n\n # If clicked and another command is already in binding state, don't do anything\n elif self.is_binding:\n return\n\n # Otherwise, display the 'enter new key' option in red and begin binding\n else:\n button['text'] = 'Enter new key'\n button['fg'] = 'red'\n self.is_binding = True\n\n # Move the focus of the program to the button and start collecting button clicks\n button.focus_set()\n button.bind('', key)\n button.bind('', arrow_key)\n button.bind('', arrow_key)\n button.bind('', arrow_key)\n button.bind('', arrow_key)\n\n def useless_function(event):\n pass\n\n # Ignore any space key presses.\n button.bind('', useless_function)\n\n # Allow all the inputs to have this switch_key function on click\n # Initialise buttons with any saved buttons.\n control_up_input = tk.Button(table_2x4, text='', command=lambda: switch_keys(control_up_input, 'up'))\n initialise_control_button(control_up_input, 'up')\n\n control_down_input = tk.Button(table_2x4, text='', command=lambda: switch_keys(control_down_input, 'down'))\n initialise_control_button(control_down_input, 'down')\n\n control_left_input = tk.Button(table_2x4, text='', command=lambda: switch_keys(control_left_input, 'left'))\n initialise_control_button(control_left_input, 'left')\n\n control_right_input = tk.Button(table_2x4, text='', command=lambda: switch_keys(control_right_input, 'right'))\n initialise_control_button(control_right_input, 'right')\n\n # Grid the RIGHT HAND COLUMN of the 2x4 grid\n control_inputs = [control_up_input, control_down_input, control_left_input, control_right_input]\n for index, item in enumerate(control_inputs):\n table_2x4.grid_rowconfigure(index, weight=1)\n\n item.config(\n bg='white',\n borderwidth=0,\n font=controller.DESCRIPTION_FONT,\n justify='center'\n )\n\n if index == 0:\n item.grid(row=index, column=1, sticky='news', pady=(table_border_width, table_border_width), padx=(table_border_width, table_border_width))\n\n else:\n item.grid(row=index, column=1, sticky='news', pady=(0, table_border_width), padx=(table_border_width, table_border_width))\n\n # display the packed canvas\n self.display_object_on_canvas(\n table_2x4,\n controller.GAME_WIDTH // 2 - 0.5*table_2x4.winfo_reqwidth(),\n table_2x4.winfo_reqheight() + 140)\n\n # display the back button\n back_button_instructions = tk.Button(\n self,\n text='Save and go back to menu',\n font=controller.BUTTON_FONT,\n command=lambda: self.preliminary_check_controls()\n )\n\n self.display_object_on_canvas(\n back_button_instructions,\n controller.GAME_WIDTH - back_button_instructions.winfo_reqwidth() - 30,\n controller.GAME_HEIGHT - back_button_instructions.winfo_reqheight() - 30\n )\n\n # declare the error msg empty\n self.error_msg = tk.Label(\n self,\n text='',\n wraplength=200,\n width=20,\n height=3,\n justify=tk.LEFT,\n bg='white',\n fg='red'\n )\n\n def preliminary_check_controls(self):\n \"\"\"\n Perform checks on the validity of keys entered,\n and if all checks passed -\n exit and show_frame(MainMenu)\n \"\"\"\n\n # is the program still in a binding state?\n if self.is_binding:\n self.error_msg['text'] = 'You are still binding'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # are the controls set all unique?\n elif len({\n self.controller.slide_up_control,\n self.controller.slide_down_control,\n self.controller.slide_left_control,\n self.controller.slide_right_control\n }) != 4:\n self.error_msg['text'] = 'All controls must be unique'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # all tests passed?\n else:\n # save to file - do this\n\n # move to main menu frame\n self.controller.show_frame(MainMenu)\n\n def create_canvas(self):\n \"\"\"Renders a canvas onto the frame - covering the whole frame\"\"\"\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)\n\n def display_background(self, imagepath):\n \"\"\"Given a path to an image - it draws this image onto the frame canvas\"\"\"\n background_image = Image.open(imagepath)\n self.canvas.image = ImageTk.PhotoImage(background_image)\n self.canvas.create_image((0, 0), image=self.canvas.image, anchor='nw')\n\n def display_object_on_canvas(self, tk_object, x, y):\n \"\"\"Given object and X,Y coordinates, it draws it onto the frame canvas\"\"\"\n button1_window = self.canvas.create_window(\n x,\n y,\n anchor='nw',\n window=tk_object)\n\n def remove_object_from_canvas(self, tk_object):\n \"\"\"Given object memory pointer, it DELETES it from the frame canvas\"\"\"\n self.canvas.delete(tk_object)\n\n\nclass MainGame(tk.Frame):\n \"\"\"This class is the 2048 game frame\"\"\"\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, width=controller.GAME_WIDTH, height=controller.GAME_HEIGHT)\n self.controller = controller\n\n # create the grid frame\n self.GRID_WIDTH = 600\n self.TILES_PER_ROW = 4\n\n self.main_grid = tk.Frame(self, width=self.GRID_WIDTH, height=self.GRID_WIDTH, bg='black')\n self.main_grid.grid_propagate(0)\n\n # set the width of each row and column\n for i in range(self.TILES_PER_ROW):\n # document bug weight 1: do this\n self.main_grid.grid_columnconfigure(i, minsize=self.GRID_WIDTH // 4)\n self.main_grid.grid_rowconfigure(i, minsize=self.GRID_WIDTH // 4)\n\n # place it on screen\n self.main_grid.place(\n x=controller.GAME_WIDTH // 2 - self.main_grid.winfo_reqwidth() * 0.5,\n y=0)\n\n # generate the grid values (matrix of values)\n self.main_grid_values = [\n [0]*self.TILES_PER_ROW for _ in range(self.TILES_PER_ROW)\n ]\n\n # initiate by spawning 2 two's and rendering the frame\n self.add_two()\n self.add_two()\n self.update_grid()\n\n # set the focus onto the game frame, and bind the controls\n self.focus_set()\n self.bind('<{}>'.format(controller.slide_left_control), self.push_left)\n self.bind('<{}>'.format(controller.slide_right_control), self.push_right)\n self.bind('<{}>'.format(controller.slide_up_control), self.push_up)\n self.bind('<{}>'.format(controller.slide_down_control), self.push_down)\n\n # add the score text\n self.score_text = tk.Label(self, text='SCORE:', font=controller.BUTTON_FONT)\n self.score_text.place(\n x=30,\n y=controller.GAME_HEIGHT - self.score_text.winfo_reqheight() - 30)\n\n # add score value\n self.score_value = tk.StringVar(value='0')\n self.score_value_label = tk.Label(self, textvariable=self.score_value, font=controller.BUTTON_FONT)\n self.score_value_label.place(\n x=self.score_text.winfo_reqwidth() + 30,\n y=controller.GAME_HEIGHT - self.score_value_label.winfo_reqheight() - 30)\n\n # add the back button\n self.back_button_game = tk.Button(self, text='Back to main menu', font=controller.DESCRIPTION_FONT, command=lambda: controller.show_frame(MainMenu))\n self.back_button_game.place(x=controller.GAME_WIDTH - self.back_button_game.winfo_reqwidth() - 20, y=controller.GAME_HEIGHT - self.back_button_game.winfo_reqheight() - 20)\n\n\n # add the restart button\n self.restart_button = tk.Button(self, text='Restart game', font=controller.DESCRIPTION_FONT, command=self.restart)\n self.restart_button.place(\n x=controller.GAME_WIDTH - self.restart_button.winfo_reqwidth() - 30 - self.back_button_game.winfo_reqwidth(),\n y=controller.GAME_HEIGHT - self.back_button_game.winfo_reqheight() - 20)\n\n def update_grid(self):\n \"\"\"\n Redraws the game grid, from the matrix of main_grid_values\n \"\"\"\n def rgb_color(rgb):\n \"\"\"RGB to HEX\"\"\"\n return '#%02x%02x%02x' % rgb\n\n # clear on widgets preexisting on frame\n for widget in self.main_grid.winfo_children():\n widget.destroy()\n\n BORDER_WIDTH = 8\n\n # draw tiles\n for i in range(len(self.main_grid_values)):\n for j in range(len(self.main_grid_values[i])):\n if self.main_grid_values[i][j] == 0:\n # ignore 0 tiles for text\n tile = tk.Label(self.main_grid, bg='red', text='', font=('Arial', 18))\n else:\n # give tiles a different color\n exponent = int(math.log(int(self.main_grid_values[i][j]), 2))\n tile = tk.Label(\n self.main_grid,\n bg=rgb_color((255, 255 - exponent*31, 0)),\n text=self.main_grid_values[i][j],\n font=('Arial', 20)\n )\n\n # color the 0 tiles with grey\n if tile['text'] == '':\n tile.config(bg=rgb_color((210, 210, 210)))\n\n # border configuration - so that all borders are not overlapping\n if j == len(self.main_grid_values) - 1 and i == len(self.main_grid_values) - 1:\n tile.grid(row=i, column=j, padx=BORDER_WIDTH, pady=BORDER_WIDTH, sticky='news')\n\n elif j == len(self.main_grid_values) - 1:\n tile.grid(row=i, column=j, padx=BORDER_WIDTH, pady=(BORDER_WIDTH, 0), sticky='news')\n\n elif i == len(self.main_grid_values) - 1:\n tile.grid(row=i, column=j, padx=(BORDER_WIDTH, 0), pady=BORDER_WIDTH, sticky='news')\n\n else:\n tile.grid(row=i, column=j, padx=(BORDER_WIDTH, 0), pady=(BORDER_WIDTH, 0), sticky='news')\n\n def add_two(self):\n \"\"\"Adds a randomly placed two onto the game matrix\"\"\"\n i = random.randint(0, self.TILES_PER_ROW - 1)\n j = random.randint(0, self.TILES_PER_ROW - 1)\n\n # regenerate positions until empty one is found\n while self.main_grid_values[i][j] != 0:\n i = random.randint(0, self.TILES_PER_ROW - 1)\n j = random.randint(0, self.TILES_PER_ROW - 1)\n\n self.main_grid_values[i][j] = 2\n\n def stack(self):\n \"\"\"Stacks all tiles to the left (removes 0 tiles in between\"\"\"\n\n # create temporary matrix\n temp_matrix = [[0] * self.TILES_PER_ROW for _ in range(self.TILES_PER_ROW)]\n\n # for each row in actual matrix\n for i in range(len(self.main_grid_values)):\n empty_spot = 0\n # for each value in actual matrix\n for j in range(len(self.main_grid_values)):\n # if non zero, copy into optimal position into temporary matrix\n if self.main_grid_values[i][j] != 0:\n temp_matrix[i][empty_spot] = self.main_grid_values[i][j]\n empty_spot += 1\n\n # copy temp matrix to actual\n self.main_grid_values = temp_matrix\n\n def merge(self):\n \"\"\"Merges all adjacent congruent tiles in a leftwards direction\"\"\"\n for i in range(len(self.main_grid_values)):\n for j in range(len(self.main_grid_values) - 1):\n if self.main_grid_values[i][j] == self.main_grid_values[i][j+1]:\n self.score_value.set(str(int(self.score_value.get()) + self.main_grid_values[i][j]*2))\n self.main_grid_values[i][j] *= 2\n self.main_grid_values[i][j+1] = 0\n\n def transpose(self):\n \"\"\"Transposes the game matrix\"\"\"\n temp_matrix = [[0] * self.TILES_PER_ROW for _ in range(self.TILES_PER_ROW)]\n for i in range(len(self.main_grid_values)):\n for j in range(len(self.main_grid_values)):\n temp_matrix[j][i] = self.main_grid_values[i][j]\n\n self.main_grid_values = temp_matrix\n\n def reverse(self):\n \"\"\"Reverses the game matrix in horizontal direction\"\"\"\n temp_matrix = [[0] * self.TILES_PER_ROW for _ in range(self.TILES_PER_ROW)]\n for i in range(len(self.main_grid_values)):\n for j in range(len(self.main_grid_values)):\n temp_matrix[i][self.TILES_PER_ROW - 1 - j] = self.main_grid_values[i][j]\n\n self.main_grid_values = temp_matrix\n\n def any_empty_tiles(self):\n \"\"\"\n False if NO tiles remaining\n True if empty tiles exist\n \"\"\"\n for i in range(self.TILES_PER_ROW):\n for j in range(self.TILES_PER_ROW):\n if self.main_grid_values[i][j] == 0:\n return True\n\n return False\n\n def any_possible_moves_horizontal(self):\n \"\"\"\n False if NO moves possible left to right\n True if merges possible on the horizontal\n \"\"\"\n for i in range(self.TILES_PER_ROW):\n for j in range(self.TILES_PER_ROW - 1):\n if self.main_grid_values[i][j] == self.main_grid_values[i][j+1]:\n return True\n\n return False\n\n def any_possible_moves_vertical(self):\n \"\"\"\n False if NO moves possible up and down\n True if merges possible on the vertical\n \"\"\"\n for i in range(self.TILES_PER_ROW - 1):\n for j in range(self.TILES_PER_ROW):\n if self.main_grid_values[i][j] == self.main_grid_values[i+1][j]:\n return True\n\n return False\n\n def is_game_finished(self):\n \"\"\"\n Checks if game finished:\n if not, do nothing\n otherwise, it is, and UNBIND controls, and DISPLAY GAME OVER\n \"\"\"\n if not self.any_possible_moves_horizontal() and not self.any_possible_moves_vertical() and not self.any_empty_tiles():\n self.game_over_button = tk.Label(self, text='Game Over', padx=20, pady=20, font=self.controller.TITLE_FONT)\n self.game_over_button.place(\n x=self.controller.GAME_WIDTH // 2 - 0.5 * self.game_over_button.winfo_reqwidth() ,\n y=self.controller.GAME_HEIGHT // 2 - 0.5 * self.game_over_button.winfo_reqheight()- 40)\n\n self.unbind('<{}>'.format(self.controller.slide_left_control))\n self.unbind('<{}>'.format(self.controller.slide_right_control))\n self.unbind('<{}>'.format(self.controller.slide_up_control))\n self.unbind('<{}>'.format(self.controller.slide_down_control))\n\n def restart(self):\n \"\"\"Clear game matrix, score, and rebind the controls, and recreate new board\"\"\"\n self.main_grid_values = [\n [0] * self.TILES_PER_ROW for _ in range(self.TILES_PER_ROW)\n ]\n\n self.score_value.set('0')\n self.add_two()\n self.add_two()\n self.update_grid()\n\n self.bind('<{}>'.format(self.controller.slide_left_control), self.push_left)\n self.bind('<{}>'.format(self.controller.slide_right_control), self.push_right)\n self.bind('<{}>'.format(self.controller.slide_up_control), self.push_up)\n self.bind('<{}>'.format(self.controller.slide_down_control), self.push_down)\n\n self.game_over_button.destroy()\n\n def push_left(self, event):\n \"\"\"Control to swipe tiles left\"\"\"\n self.stack()\n self.merge()\n\n if self.any_empty_tiles():\n self.add_two()\n\n self.update_grid()\n self.is_game_finished()\n\n def push_up(self, event):\n \"\"\"Control to swipe tiles up\"\"\"\n self.transpose()\n self.stack()\n self.merge()\n self.transpose()\n\n if self.any_empty_tiles():\n self.add_two()\n\n self.update_grid()\n self.is_game_finished()\n\n def push_right(self, event):\n \"\"\"Control to swipe tiles right\"\"\"\n self.reverse()\n self.stack()\n self.merge()\n self.reverse()\n\n if self.any_empty_tiles():\n self.add_two()\n\n self.update_grid()\n self.is_game_finished()\n\n def push_down(self, event):\n \"\"\"Control to swipe tiles down\"\"\"\n self.transpose()\n self.reverse()\n self.stack()\n self.merge()\n self.reverse()\n self.transpose()\n\n if self.any_empty_tiles():\n self.add_two()\n\n self.update_grid()\n self.is_game_finished()\n\n\nif __name__ == '__main__':\n app = MainProgram()\n app.mainloop()","sub_path":"previous_versions/version_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":34603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"451836957","text":"import logging\nimport os\nfrom flask_restplus import Namespace, Resource\n\nlog = logging.getLogger(os.path.basename(__file__))\napi = Namespace('ping', description='Health check: Ping? - Pong!')\n\n\n@api.route('', strict_slashes=False)\nclass PingEndpoint(Resource):\n def get(self):\n log.info(\"GET to /ping. I'm alive!\")\n return {'Ping': 'Pong!'}\n","sub_path":"tracker/apis/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"100441724","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/ztfy/thesaurus/loader/config.py\n# Compiled at: 2013-09-04 09:38:38\nfrom ztfy.thesaurus.interfaces.loader import IThesaurusLoaderConfiguration, IThesaurusUpdaterConfiguration, IThesaurusExporterConfiguration\nfrom zope.interface import implements\nfrom zope.schema.fieldproperty import FieldProperty\n\nclass ThesaurusLoaderConfiguration(object):\n \"\"\"Thesaurus loader configuration\"\"\"\n implements(IThesaurusLoaderConfiguration)\n name = FieldProperty(IThesaurusLoaderConfiguration['name'])\n data = FieldProperty(IThesaurusLoaderConfiguration['data'])\n format = FieldProperty(IThesaurusLoaderConfiguration['format'])\n import_synonyms = FieldProperty(IThesaurusLoaderConfiguration['import_synonyms'])\n language = FieldProperty(IThesaurusLoaderConfiguration['language'])\n encoding = FieldProperty(IThesaurusLoaderConfiguration['encoding'])\n\n def __init__(self, data={}):\n if data:\n name = data.get('name')\n if name:\n self.name = name\n self.data = data.get('data')\n self.format = data.get('format')\n self.import_synonyms = data.get('import_synonyms')\n self.language = data.get('language')\n self.encoding = data.get('encoding')\n\n\nclass ThesaurusUpdaterConfiguration(ThesaurusLoaderConfiguration):\n \"\"\"Thesaurus updater configuration\"\"\"\n implements(IThesaurusUpdaterConfiguration)\n clear = FieldProperty(IThesaurusUpdaterConfiguration['clear'])\n conflict_suffix = FieldProperty(IThesaurusUpdaterConfiguration['conflict_suffix'])\n\n def __init__(self, data={}):\n super(ThesaurusUpdaterConfiguration, self).__init__(data)\n if data:\n self.clear = data.get('clear')\n self.conflict_suffix = data.get('conflict_suffix')\n\n\nclass ThesaurusExporterConfiguration(object):\n \"\"\"Thesaurus exporter configuration\"\"\"\n implements(IThesaurusExporterConfiguration)\n filename = FieldProperty(IThesaurusExporterConfiguration['filename'])\n format = FieldProperty(IThesaurusExporterConfiguration['format'])\n extract = FieldProperty(IThesaurusExporterConfiguration['extract'])\n\n def __init__(self, data={}):\n if data:\n self.filename = data.get('filename')\n self.format = data.get('format')\n self.extract = data.get('extract')","sub_path":"pycfiles/ztfy.thesaurus-0.2.16.1-py2.7/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"557121533","text":"# encoding = utf-8\r\n\"\"\"\r\nDetermination of LDA topic numbers\r\n\"\"\"\r\nfrom __future__ import print_function\r\nimport pandas as pd\r\nfrom gensim.corpora import Dictionary\r\nimport os\r\nimport gensim\r\nimport random\r\n\r\nimport logging\r\nfrom collections import OrderedDict\r\n\r\nfrom gensim.corpora import TextCorpus, MmCorpus\r\nfrom gensim import utils, models\r\nfrom topicAyc.Fun_staticLDA import Fun_staticLDA\r\n\r\nlogging.basicConfig(level=logging.ERROR) # disable warning logging\r\n\r\n\r\nimport logging\r\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\r\n\r\n\r\n# Some useful utility functions in case you want to save your models.\r\n\r\nmodels_dir = r\"D:\\3policyAyc\\_database\\_workshop\"\r\ndef save_models(named_models):\r\n for num_topics, model in named_models.items():\r\n model_path = os.path.join(models_dir, 'lda-MALLET-k%d.lda' % num_topics)\r\n model.save(model_path, separately=False)\r\n\r\n\r\ndef load_models():\r\n trained_models = OrderedDict()\r\n for num_topics in range(5, 31, 5):\r\n model_path = os.path.join(models_dir, 'lda-all7-k%d.lda' % num_topics)\r\n print(\"Loading LDA(k=%d) from %s\" % (num_topics, model_path))\r\n trained_models[num_topics] = models.LdaMulticore.load(model_path)\r\n\r\n return trained_models\r\nfrom gensim import models, utils\r\n\r\n# load LDA model(class:LDAmodel)\r\nmodelpath = 'D:\\\\3policyAyc\\\\_database\\\\_workshop\\\\lda-all0-k10.lda'\r\n\r\n#models.wrappers.LdaMallet.load(modelpath)\r\n\r\n#models.LdaModel.load(modelpath)\r\n\r\n\r\n# stopwords = []\r\n# fp = open('D:/3policyAyc/_database/_auxdata/rmvs_for_wordcloud.txt', 'r', encoding='utf-8')\r\n# for line in fp.readlines():\r\n# if line != '' and line != '\\n':\r\n# stopwords.append(line.strip('\\n'))\r\n# stopwords = list(set(stopwords))\r\n# noabov = [0.6, 0.5, 0.4, 0.3, 0.25]\r\n# for i in range(5):\r\n\r\n\r\ndatapath = 'D:/3policyAyc/_database/_policytxt/Wordlist_all5.csv'\r\nstaticLDA = Fun_staticLDA(datapath, nobelow=5, noabove=0.7) # initialize static LDA model\r\n\r\n\"\"\"Train LDA model\"\"\"\r\n# training,holding, and testing samples\r\nrdocs = staticLDA.docs\r\ntraindocs = rdocs[:15000]\r\ntestdocs = rdocs[15000:]\r\ntraining_corpus, training_dictionary = staticLDA.getSamples(traindocs)\r\ntesting_corpus, testing_dictionary = staticLDA.getSamples(testdocs)\r\n# Start training using multicore\r\ntrained_models = OrderedDict()\r\n\r\nfor num_topics in range(26,37,1):\r\n print(\"Training LDA(k=%d)\" % num_topics)\r\n lda = models.LdaMulticore(\r\n training_corpus, id2word=training_dictionary, num_topics=num_topics, workers=4,\r\n passes=10, iterations=100, random_state=42, eval_every=None,\r\n alpha='asymmetric', # shown to be better than symmetric in most cases\r\n decay=0.5, offset=64 # best params from Hoffman paper\r\n )\r\n trained_models[num_topics] = lda\r\n\r\n# writer = pd.ExcelWriter('D:/3policyAyc/_database/_interresults/0525LDAtest.xlsx')\r\n# topn = 50\r\n# for num_topic,ldamod in trained_models.items():\r\n# topdict = {}\r\n# for topic in range(num_topic):\r\n# temp = ldamod.show_topic(topic, topn)\r\n# terms = [tup[0] for tup in temp]\r\n# topdict.update({'Topic' + str(topic + 1): terms})\r\n# tdf = pd.DataFrame(topdict)\r\n# tdf.to_excel(writer,sheet_name='Topic'+str(num_topic))\r\n# writer.save()\r\n\r\nsave_models(trained_models)\r\n\r\ntrained_models = load_models() # for testing\r\n\r\n# compare different topic numbers from coherence and perplexity\r\n# coherence calculation\r\ncm = models.CoherenceModel.for_models(\r\n trained_models.values(), staticLDA.dictionary, texts=staticLDA.docs, coherence='c_v')\r\n\r\ncoherence_estimates = cm.compare_models(trained_models.values())\r\ncoherences = dict(zip(trained_models.keys(), coherence_estimates))\r\n\r\n[cohrence_best_num, cohtuples] = staticLDA.coherenceRankings(cm, coherences) # rank by average coherence\r\nbest_model1 = trained_models[cohrence_best_num]\r\ncohdf = pd.DataFrame(cohtuples, columns=['topic_numbers','cohrence_values']) # save coherence values\r\nbest_model1.save('./_database/_workshop/Static'+str(cohrence_best_num)+'_10to50cohebest_FINAL.lda', separately=False)\r\n\r\n# perplexity calculation\r\n\r\ntls,count = [],0\r\nfor tn, lda in trained_models.items():\r\n tls.append((tn, lda.log_perplexity(testing_corpus)))\r\n count += 1\r\n print('\\r计算perplexity中:{:.2f}%'.format(count*100/len(trained_models)), end='')\r\nranked = sorted(tls, key=lambda tup: tup[1], reverse=True)\r\n\r\n[perplexity_best_num, perptuples] = staticLDA.perplexityRankings(trained_models) # rank by log perplexity\r\nperplexity_best_num = ranked[0][0]\r\nbest_model2 = trained_models[perplexity_best_num]\r\nperpdf = pd.DataFrame(ranked, columns=['topic_numbers', 'perp_values'])\r\nperpdf.to_excel('./_database/_interresults/迷惑度-final1.xlsx')\r\nbest_model2.save('./_database/_workshop/Static'+str(perplexity_best_num)+'_10to50perpbest_final.lda', separately=False)\r\n\r\ncombdf = pd.merge(cohdf, perpdf, on='topic_numbers')\r\ncombdf.to_excel('./_database/_interresults/测试一致性和迷惑度-final.xlsx')\r\n\r\n\r\n\r\n# topic calculation,Output results to excel\r\nmodel = models.LdaMulticore.load('D:/3policyAyc/_database/_workshop/Static24_10to50cohebest.lda')\r\nwriter = pd.ExcelWriter('D:/3policyAyc/_database/_interresults/0525LDAresults.xlsx')\r\ntopn = 50\r\nfor num_topic,ldamod in trained_models.items():\r\n topdict = {}\r\n for topic in range(24):\r\n temp = model.show_topic(topic, topn)\r\n terms = [tup[0] for tup in temp]\r\n topdict.update({'Topic' + str(topic + 1): terms})\r\n tdf = pd.DataFrame(topdict)\r\n tdf.to_excel(writer)\r\n writer.save()\r\n # tdf.to_excel(writer,sheet_name='Topic'+str(num_topic))\r\n# writer.save()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"topicAyc/TopicSelectLDA.py","file_name":"TopicSelectLDA.py","file_ext":"py","file_size_in_byte":5678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84097142","text":"from bootstrap.bootstraper import App\nfrom utility.generator import Generator\nfrom utility.query_analyzer import QueryAnalyzer\n\ngenerator = Generator() # Generator of data and queries\ngenerator.generate_data()\ngenerator.generate_queries()\n\napplication = App() # Boostrap TPC-DS banchmark\napplication.create_database() # Set up new database\napplication.preprocess_queries() # Preprocess queries before running\n\nquery_analzer = QueryAnalyzer(application.spark) # Get an instance of query analyzer\nquery_analzer.run_benchmark() # Finally run a banchmark\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387644553","text":"import json\n\ncount = 0\nwith open('reviews.json', 'w') as output_f:\n with open('yelp_academic_dataset_review.json', 'r') as input_f:\n for line in input_f:\n if count >= 1000:\n break\n else:\n output_f.write(line)\n count += 1\ninput_f.close()\noutput_f.close()","sub_path":"parseData.py","file_name":"parseData.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"193802306","text":"\"\"\"\nTests for how we are calculating data.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util.testing import assert_almost_equal\nfrom pandas.util.testing import assert_numpy_array_equal\n\nfrom scripts.CalculateData import average_monthly_open_close\nfrom scripts.CalculateData import biggest_loser\nfrom scripts.CalculateData import average_volume\nfrom scripts.CalculateData import busy_days\nfrom scripts.CalculateData import max_daily_profit\n\n\nclass TestCalculateData(unittest.TestCase):\n\n def test_grouping_by_month_and_ticker(self):\n dates = pd.date_range('20130101', periods=5, freq='M').tolist()\n # Group the 5 rows into two separate months\n dates[1] = dates[0]\n dates[2] = dates[0]\n dates[4] = dates[3]\n\n values = np.random.rand(5)\n df = pd.DataFrame({\n 'date': dates,\n 'ticker': ['A', 'A', 'Z', 'A', 'Z'],\n 'value': values\n })\n result = average_monthly_open_close(df)\n\n expectedDates = np.array([dates[0].to_period('M'), dates[3].to_period('M'),\n dates[0].to_period('M'), dates[3].to_period('M')])\n\n # Taking the average of just two values is like below, everything else shows up just once in the month\n firstMonth_A = (values[0] + values[1]) / 2.\n expectedAvgs = np.array([firstMonth_A, values[3], values[2], values[4]])\n\n assert_numpy_array_equal(expectedDates, result.index.get_level_values('date').values)\n assert_numpy_array_equal(expectedAvgs, result['value'].values)\n\n def test_max_daily_profit(self):\n dates = pd.date_range('20130101', periods=5).tolist()\n\n los = np.random.rand(5)\n # We expect the last trading day/ticker to be max for this test\n his = np.array([los[idx] + idx for idx,x in enumerate(range(5))])\n df = pd.DataFrame({\n 'date': dates,\n 'ticker': ['A', 'A', 'Z', 'A', 'Z'],\n 'lo': los,\n 'hi': his\n })\n\n profit_col_name = 'anything_i_like'\n result = max_daily_profit(df, profit_column_name=profit_col_name, high_col_name='hi', low_col_name='lo')\n\n self.assertFalse(result[profit_col_name].empty)\n\n expectedMaxs = np.array([his[3] - los[3], his[4] - los[4]])\n assert_numpy_array_equal(expectedMaxs, result[profit_col_name].values)\n\n @unittest.skip(\"Originally, intent was to display the first date that had the max profit.\"\n \" But Pandas doesn't guaranteed order. Unignore this test to observe behavior over different runs\")\n def test_max_daily_profit_ties_are_broken_arbitrarily_by_pandas(self):\n dates = pd.date_range('20130101', periods=5).tolist()\n\n los = np.random.rand(5)\n his = los + 1\n df = pd.DataFrame({\n 'date': dates,\n 'ticker': ['A', 'A', 'Z', 'A', 'Z'],\n 'lo': los,\n 'hi': his\n })\n\n profit_col_name = 'anything_i_like'\n result = max_daily_profit(df, profit_column_name=profit_col_name, high_col_name='hi', low_col_name='lo')\n\n self.assertFalse(result[profit_col_name].empty)\n\n expectedMaxs = np.array([his[3] - los[3], his[4] - los[4]]) # All values are the same in this case\n assert_almost_equal(expectedMaxs, result[profit_col_name].values)\n\n print(result['date'])\n expectedDates = np.array(pd.to_datetime([dates[0], dates[2]]))\n assert_numpy_array_equal(expectedDates, result['date'].values)\n\n def test_busy_day(self):\n dates = pd.date_range('20130101', periods=5).tolist()\n\n vols = np.ones(5).tolist()\n vols[1] = 1.5\n vols[4] = 1.5\n\n volume_col_name = 'vol'\n df = pd.DataFrame({\n 'date': dates,\n 'ticker': ['A', 'A', 'Z', 'A', 'Z'],\n 'vol': vols\n })\n\n avgs = average_volume(df, volume_col_name)\n result = busy_days(df, avgs, volume_col_name)\n\n expectedVols = np.array([vols[1], vols[4]])\n assert_numpy_array_equal(expectedVols, result[volume_col_name].values)\n\n def test_biggest_loss(self):\n dates = pd.date_range('20130101', periods=5).tolist()\n\n open = np.ones(5)\n close = open - .5\n\n df = pd.DataFrame({\n 'date': dates,\n 'ticker': ['A', 'A', 'Z', 'A', 'Z'],\n 'open_mind': open,\n 'better_than_a_closed_one': close\n })\n\n result = biggest_loser(df, open_col_name='open_mind', close_col_name='better_than_a_closed_one')\n\n expectedBiggest = 'A'\n self.assertEqual(expectedBiggest, result.index[0])\n self.assertEqual(3, result['open_mind'].values[0])\n\n def test_biggest_loss_with_tie_returns_first_ticker_by_lexographic_order(self):\n dates = pd.date_range('20130101', periods=4).tolist()\n\n open = np.ones(4)\n close = open - .5\n\n df = pd.DataFrame({\n 'date': dates,\n 'ticker': ['AA', 'A', 'A', 'AA'],\n 'open_mind': open,\n 'better_than_a_closed_one': close\n })\n\n result = biggest_loser(df, open_col_name='open_mind', close_col_name='better_than_a_closed_one')\n\n self.assertEqual('A', result.index[0])\n self.assertEqual(2, result['open_mind'].values[0])\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromModule(TestCalculateData())\n unittest.TextTestRunner().run(suite)\n","sub_path":"tests/TestCalculateData.py","file_name":"TestCalculateData.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"304685612","text":"from __future__ import division\nfrom fatiando.seismic.wavefd import (Ricker,\n Gauss,\n ElasticSH,\n ElasticPSV,\n Scalar)\nfrom fatiando.seismic import analytic\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\n\n\ndef test_sources():\n \"testing simple source parameters\"\n w = Ricker(amp=10, cf=20)\n assert (w(0) == 10.0) # maximum\n assert_almost_equal(w(-2), w(2), decimal=5,\n err_msg=\"Ricker has no symmetry\")\n w = Gauss(amp=10, cf=20)\n assert_almost_equal(w(0), 0.0, decimal=4,\n err_msg=\"Gauss has no symmetry\")\n assert_almost_equal(w(-2), w(2), decimal=5,\n err_msg=\"Gauss has no symmetry\")\n\ndef test_wavefd_elastipsv_run():\n \"make a simple run of elastic psv\"\n shape = (50, 50)\n pvel = 4000*np.ones(shape)\n svel = 3000*np.ones(shape)\n density = 2200*np.ones(shape)\n sim = ElasticPSV(pvel, svel, density, spacing=10)\n sim.add_point_source((shape[0]//2, shape[1]//2),\n dip=45, source=Ricker(5, 10, 1./10))\n sim.run(180)\n\ndef test_wavfd_scalar_waveform():\n \"wave form comparison \"\n shape = (60, 50)\n velocity = 2000*np.ones(shape)\n sim = Scalar(velocity, (50, 50))\n fricker = Ricker(1., 2.5, 1.2/2.5)\n sim.add_point_source((25, 45), fricker) # far from boundary\n sim.run(100)\n seism = sim[:, 25, 25]\n t = np.linspace(0.0, sim.dt*sim.simsize, sim.simsize)\n analy = analytic.wave2d_analytic(20*50, 2000., sim.dt, fricker(t))\n # normalize everything\n analy = (analy - analy.min())/(analy.max() - analy.min())\n seism = (seism - seism.min())/(seism.max() - seism.min())\n analy -= analy.mean()\n seism -= seism.mean()\n assert np.all(abs(analy-seism) <= 0.03) # 3 percent amp. diff.\n\ndef test_wavefd_elasticsh_run():\n \"run and sum two simulation with different phase wavelets that equals zero\"\n shape = (50, 50)\n velocity = 1500*np.ones(shape)\n density = 2200*np.ones(shape)\n sim = ElasticSH(velocity, density, (5, 5))\n sim.add_point_source((shape[0]//2, shape[1]//2), Ricker(5, 10.))\n sim.run(100)\n sim_iphase = ElasticSH(velocity, density, (5, 5))\n sim_iphase.add_point_source((shape[0]//2, shape[1]//2), -1*Ricker(5, 10.))\n sim_iphase.run(100)\n diff = sim[-1] + sim_iphase[-1]\n assert np.all(diff <= 0.01), 'diff: %s' % (str(diff))\n\n","sub_path":"test/test_wavefd.py","file_name":"test_wavefd.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"101175810","text":"import numpy as np\nfrom pymatgen.io.vasp import Vasprun\nfrom pymatgen.io.vasp.outputs import CompleteDos, Dos\nfrom pymatgen.io.vasp.outputs import Spin, Structure\nimport os\n\nv = Vasprun(os.path.abspath(os.curdir) + '/vasprun.xml')\n\nc = CompleteDos(v.structures, v.tdos, v.pdos)\n\n\nif __name__ == '__main__':\n s: Structure = c.structure[0]\n z = []\n for i in s.frac_coords:\n b = True\n for j in z:\n if abs(j - i[2]) < 1e-2:\n b = False\n if b:\n z.append(i[2])\n z = sorted(z)\n same = [[] for _ in range(len(z))]\n for k, i in enumerate(s.frac_coords):\n for ind, j in enumerate(z):\n if abs(i[2] - j) < 1e-2:\n same[ind].append(k)\n dos = [None for _ in range(len(z))]\n pdos = [None for _ in range(len(z))]\n sites = s.sites\n for ind, i in enumerate(same):\n for j in i:\n if dos[ind] is None:\n dos[ind] = c.get_site_dos(sites[j]).densities[Spin.up]\n pdos[ind] = c.get_site_dos(sites[j])\n else:\n dos[ind] += c.get_site_dos(sites[j]).densities[Spin.up]\n pdos[ind].__add__(c.get_site_dos(sites[j]))\n dos = np.array(dos).transpose()\n with open('dos.dat', 'w') as f:\n for ind, i in enumerate(c.energies):\n f.write('%f\\t' % (i - c.efermi))\n for j in dos[ind][:-1]:\n f.write('%f\\t' % j)\n f.write('%f' % dos[ind][-1])\nf.write('\\n')","sub_path":"vasprun_related/layeredDos.py","file_name":"layeredDos.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"22348878","text":"import os\nimport tensorflow.compat.v1 as tf\nimport math\nimport numpy as np\nimport itertools\n\ntf.enable_eager_execution()\n\nfrom waymo_open_dataset.utils import range_image_utils\nfrom waymo_open_dataset.utils import transform_utils\nfrom waymo_open_dataset.utils import frame_utils\nfrom waymo_open_dataset import dataset_pb2 as open_dataset\n\nclass WaymoDataset():\n def __init__(self,path):\n self.filename = path\n\n def load_tfrecord(self,idx):\n dataset = tf.data.TFRecordDataset(self.filename, compression_type='')\n frame = None\n for data in dataset:\n frame = open_dataset.Frame()\n frame.ParseFromString(bytearray(data.numpy()))\n break\n return frame\n \n def camera_image(self,frame):\n \"\"\"return images from frame.\"\"\"\n images = []\n for index, image in enumerate(frame.images):\n images.append(tf.image.decode_jpeg(image.image).numpy())\n return images\n \n def lidar(self,frame):\n (range_images, camera_projections,range_image_top_pose) = \\\n frame_utils.parse_range_image_and_camera_projection(frame)\n points, _ = frame_utils.convert_range_image_to_point_cloud(frame,range_images, \\\n camera_projections, \\\n range_image_top_pose)\n points_all = np.concatenate(points, axis=0)\n return points_all\n\n def calib(self,frame):\n calibrations = frame.context.camera_calibrations\n Ks = []\n Es = []\n for idx, param in enumerate(calibrations):\n intrinsic = calibrations[idx].intrinsic\n K = np.array([[intrinsic[0],0,intrinsic[2]],\\\n [0,intrinsic[1],intrinsic[3]],[0,0,1]])\n extrinsic = calibrations[idx].extrinsic.transform\n E = np.array(extrinsic).reshape((4,4))\n Ks.append(K)\n Es.append(E)\n return Ks,Es\n\n def get(self,idx):\n frame = self.load_tfrecord(idx)\n images = self.camera_image(frame)\n lidar = self.lidar(frame)\n param = self.calib(frame)\n return images,lidar,param\n","sub_path":"dataset/waymo_dataset.py","file_name":"waymo_dataset.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"293252960","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.layers import Conv2D, BatchNormalization, Activation,LeakyReLU, Dense, Conv2DTranspose, Input, Lambda, Reshape, Flatten, UpSampling2D, MaxPooling2D\nfrom keras.models import Model, Sequential\nimport keras.backend as K\nfrom keras import initializers\nimport tensorflow as tf\n\nclass SVHNganGenerator():\n\n def __init__(self):\n self.latent_dim = 100 # Dimension of Latent Representation\n self.GAN = None\n self.weights_path = './model weights/gan_hans_svhn_extra.h5'\n\n \n def GenerateModel(self):\n gf_dim = 64\n gan = Sequential()\n gan.add(Dense(4*4*512, input_shape=(100,)))\n gan.add(Reshape([4,4,512]))\n gan.add(BatchNormalization())\n gan.add(LeakyReLU(alpha=.2))\n gan.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same')) \n gan.add(BatchNormalization())\n gan.add(LeakyReLU(alpha=.2))\n gan.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same')) \n gan.add(BatchNormalization())\n gan.add(LeakyReLU(alpha=.2))\n gan.add(Conv2DTranspose(3, kernel_size=5, strides=2, padding='same'))\n gan.add(Activation('tanh')) \n\n self.GAN = gan\n\n\n \n\n\n def LoadWeights(self):\n self.GAN.load_weights(self.weights_path)\n\n def GetModels(self):\n return self.GAN\n\nif __name__ == '__main__':\n Gen = SVHNganGenerator()\n Gen.GenerateModel()\n Gen.weights_path = '../model weights/gan_hans_svhn_extra.h5'\n Gen.LoadWeights()\n gan = Gen.GetModels()\n \n n_samples = 10\n len_z = Gen.latent_dim\n z = np.random.normal(0,1,size=(n_samples*n_samples ,len_z))\n sampled = gan.predict(z)\n sampled = (sampled+1)/2\n \n k = 0\n for i in range(n_samples):\n for j in range(n_samples):\n img = sampled[k]\n plt.subplot(n_samples,n_samples,k+1)\n plt.imshow(img)\n plt.axis(\"Off\")\n k=k+1\n plt.show()","sub_path":"scripts/generators/SVHNgan.py","file_name":"SVHNgan.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"552779734","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis is a simple Python script that converts BibTeX entries to CSV.\n\"\"\"\n\nfrom re import match\nfrom re import search\n\nimport pandas as pd\n\n''' Parses input file handle or text object holding bibtex formatted string data into a pandas dataframe. ''' \ndef parse_bib(handle):\n entries = []\n entry = {}\n \n for line in handle:\n if (match('^@', line.strip())):\n if entry != {}:\n entries.append(entry)\n entry = {}\n elif (match('url', line.strip())):\n value, = findall('\\{(\\S+)\\}', line)\n entry[\"url\"] = value\n elif (search('=', line.strip())):\n key, value = [v.strip(\" {},\\n\") for v in line.split(\"=\", 1)]\n entry[key] = value\n\n keys = set()\n for entry in entries:\n for key in entry.keys():\n keys.add(key)\n \n df = pd.DataFrame(columns=sorted(keys))\n for entry in entries:\n df = df.append(entry, ignore_index=True)\n \n return df\n\ndef convert(input_file, output_file):\n with open(input_file, 'r', encoding=\"utf8\") as handle:\n df = parse_bib(handle)\n df.to_csv(output_file, encoding='utf-8-sig')\n \nif __name__ == \"__main__\":\n convert(sys.argv[1], sys.argv[2])\n ","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"502394977","text":"\"\"\"Math functions for calculator.\"\"\"\n\n\ndef add(list):\n \"\"\"Sum all elements together\"\"\"\n sum = 0\n for n in list:\n sum += n\n return sum\n\n\ndef subtract(list):\n \"\"\"Subtract every other elemnt from [0]\"\"\"\n total = list[0]\n list = list[1:]\n for n in list:\n total -= n\n return total\n\ndef multiply(list):\n \"\"\"Multiply each element together\"\"\"\n total = 1\n for n in list:\n total *= n\n return total\n\ndef divide(num1, num2):\n \"\"\"Dividing [0] by each element\"\"\"\n total = list[0]\n list = list[1:]\n for n in list:\n total /= n\n return total\n\ndef square(list):\n \"\"\"Return the square of the input.\"\"\"\n if len(list) > 1:\n return -1;\n else:\n return list[0]**2\n\ndef cube(list):\n \"\"\"Return the cube of the input.\"\"\"\n if len(list) > 1:\n return -1;\n else:\n return list[0]**3\n\ndef power(list):\n \"\"\"Exponential expression where [0] ** [1] ** [2], etc.\"\"\"\n base_value = list[0]\n start_exponent_calc = list[len(list)-1]\n list = list[1:len(list)-2] #chops aff the base number and first exponent\n list = list[::-1] #exponents evaluated right to left so reverse list\n for n in list:\n start_exponent_calc = n ** start_exponent_calc\n return base_value ** start_exponent_calc\n\ndef mod(list):\n \"\"\"Mod [0] by each element\"\"\"\n total = list[0]\n list = list[1:]\n for n in list:\n total = total % n\n return total\n\ndef add_mult(list):\n \"\"\" Add first two inputs and multiply sum with the third input.\"\"\"\n if len(list) != 3:\n return -1\n else:\n return (list[0] + list[1]) * list[2]\n\ndef add_cubes(list):\n \"\"\"Sum of each element cubed\"\"\"\n total = 0\n for n in list:\n total += n ** 3\n return total","sub_path":"arithmetic2.py","file_name":"arithmetic2.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534591830","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport requests\nimport telegram\nimport random\nfrom app.utils import *\n\n\ndef news_handler(bot, update, msg_list):\n if msg_list[1] in [\"news\",\"News\"]:\n news_creds = open(\"api/news\", \"r\")\n creds = news_creds.read()\n url = ('https://newsapi.org/v2/top-headlines?'\n 'country=in&'\n 'apiKey='+creds)\n result = requests.get(url).json()['articles']\n data = ''\n count = 0\n for item in result:\n count += 1\n if count == 6:\n break\n data += item['title']+\"\\n\\n\" # +item['description']+\"\\n\\n\"\n update.message.reply_text(data)\n","sub_path":"app/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"40082783","text":"import numpy as np\nimport nltk\nimport re\nimport os\nfrom sklearn import feature_extraction\nfrom nltk.stem.snowball import SnowballStemmer\nstopwords = nltk.corpus.stopwords.words(\"english\")\nstemmer = SnowballStemmer(\"english\")\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport lda \n\n#############################documentation preparation###############################\n\ndoc1 = \"Sugar is bad to consume. My sister likes to have sugar, but not my father.\"\ndoc2 = \"My father spends a lot of time driving my sister around to dance practice.\"\ndoc3 = \"Doctors suggest that driving may cause increased stress and blood pressure.\"\ndoc4 = \"Sometimes I feel pressure to perform well at school, but my father never seems to drive my sister to do better.\"\ndoc5 = \"Health experts say that Sugar is not good for your lifestyle.\"\n\n# compile documents\ndoc = [doc1, doc2, doc3, doc4, doc5]\n\n###########################text preprocessing######################################\n\n\ndef preprocessing(doc, lower = True, stop_word = True, punctuation = True, word = True, stem = False):\n '''\n lower: do lower the words\n doc: one single document\n word: whether we want to word tokenize or sentence tokenize\n stem: whether we want to stem\n \n steps: tokenize -> remove stopwords -> remove punctuation -> stem\n '''\n if lower:\n doc = doc.lower()\n if word:\n if punctuation:\n from nltk.tokenize import RegexpTokenizer\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(doc) \n else:\n tokens = nltk.sent_tokenize(doc)\n \n if stop_word:\n tokens = [i for i in tokens if i not in stopwords]\n \n if stem:\n stems = [stemmer.stem(t) for t in tokens]\n return tokens\n\ntokens = [preprocessing(i) for i in doc]\n\n\n####################tfidf transformation################################\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ntfidf_vectorizer = TfidfVectorizer(max_df = 0.8, max_features = 10000, min_df = 0.05, \n stop_words = None,use_idf = True, \n tokenizer = preprocessing, ngram_range = (1,2))\n \nprint('vectorizing...')\ntfidf_matrix = tfidf_vectorizer.fit_transform(doc)\nprint('tfidf matrix shape: ')\nprint(tfidf_matrix.shape)\nprint\n\n\nterms = tfidf_vectorizer.get_feature_names()\nprint('first 100 terms: ')\nprint(terms[:100])\nprint\n\n###########################topic modeling####################################\n\ntf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=1000,\n stop_words=None, tokenizer = preprocessing, ngram_range = (1,2))\n\ntf = tf_vectorizer.fit_transform(doc)\n\nmodel = lda.LDA(n_topics=3, n_iter=50, random_state=1)\nmodel.fit(tf)\nvocab = tf_vectorizer.get_feature_names()\ntopic_word = model.topic_word_ \n\n\n\n#########################text clustering####################################\n\nfrom sklearn.metrics.pairwise import cosine_similarity\ndist = 1 - cosine_similarity(tfidf_matrix)\nprint('distance matrix: ')\nprint(dist)\nprint\n\nfrom sklearn.cluster import KMeans\n\nnum_clusters = 2\n\nkm = KMeans(n_clusters = num_clusters)\nkm.fit(tfidf_matrix)\n\n\n","sub_path":"Topic Modelling/text_analytics_toy.py","file_name":"text_analytics_toy.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"164085350","text":"from .base import * # noqa\n\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\nINSTALLED_APPS += [\n 'django_extensions'\n]\n\nDEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'\nTEST_OUTPUT_VERBOSE = 2\nTEST_RUNNER = 'xmlrunner.extra.djangotestrunner.XMLTestRunner'\nTEST_OUTPUT_FILE_NAME = 'test-report.xml'\n\nLOGGING = {}\n","sub_path":"project/settings/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"641412191","text":"import Generador_texto\nfrom pathlib import Path\nimport RLE\nimport string\nfrom tabulate import tabulate\nfrom time import time\nimport Formulas_shannon\n\n\n#MAIN\n#SIMBOLOS POSIBLES\ncdad_simbolos=len(string.ascii_letters)\n#Datos para generar la tabla\narchivo = []\ntamaño_1 = []\ninformacion_1 =[]\ntamaño_2 = []\ninformacion_2 =[]\ntiempo = []\nentropia_1 = []\nentropia_2 = []\nTabla = []\nTabla.append(['Archivo',\"Original(bits)\",\"Comprimido(bits)\",\"Tiempo(seg)\",\"I original\",\"I encode\",\"H original\",\"H encode\"])\nfor i in range(1,31):\n #TEXTO GENERADO\n texto_generado=Generador_texto.generador_cadena(10000)\n #Archivo de texto original\n output_file = open(\"Texto_generado\"+str(i)+\".txt\", \"w\")\n output_file.write(texto_generado)\n output_file.close()\n #calculo de tamaño\n file = Path('Texto_generado'+str(i)+\".txt\") # or Path('./doc.txt')\n size1 = file.stat().st_size\n #Calculo de información y entropía\n datos=Formulas_shannon.Datos_shannon(size1,cdad_simbolos) #información,entropia\n #para la tabla\n archivo.append(str(i))\n tamaño_1.append(size1)\n informacion_1.append(datos[0])\n entropia_1.append(datos[1])\n\n #COMPRESION RLE\n start_time = time()\n texto_comprimido=RLE.encode(\"Texto_generado\"+str(i)+\".txt\",\"Texto_generado_RLE\"+str(i)+\".txt\")\n elapsed_time = time() - start_time\n file = Path('Texto_generado_RLE'+str(i)+\".txt\") # or Path('./doc.txt')\n size2 = file.stat().st_size\n #Calculo de información y entropía\n datos=Formulas_shannon.Datos_shannon(size2,cdad_simbolos) #información,entropia\n #para la tabla\n tamaño_2.append(size2)\n tiempo.append(elapsed_time)\n informacion_2.append(datos[0])\n entropia_2.append(datos[1])\n Tabla.append([archivo[i-1],tamaño_1[i-1],tamaño_2[i-1],tiempo[i-1],informacion_1[i-1],informacion_2[i-1],entropia_1[i-1],entropia_2[i-1]])\n\n #DESCOMPRESION RLE\n start_time = time()\n texto_descomprimido=RLE.decode(\"Texto_generado_RLE\"+str(i)+\".txt\",\"Texto_generado_RLE_decode\"+str(i)+\".txt\")\n elapsed_time = time() - start_time\n file = Path('Texto_generado_RLE_decode'+str(i)+\".txt\") # or Path('./doc.txt')\n size3 = file.stat().st_size\n datos=Formulas_shannon.Datos_shannon(size3,cdad_simbolos) #información,entropia\n #para comprobar descompresión\n #print(\"decode \",i,\"|tamaño:\",size3,\"bits\",\"|información:\",datos[0],\"|tiempo:\",elapsed_time,\"seg\",\"|entropia:\",datos[1])\n\n\nprint(tabulate(Tabla, headers='firstrow', tablefmt='fancy_grid'))\n\n","sub_path":"Pruebas_RLE.py","file_name":"Pruebas_RLE.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"520825147","text":"import tensorflow as tf\nimport numpy as np\nimport utils\n\nfrom tqdm import trange\n\ndef load_data():\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\n x_full = np.concatenate((x_train, x_test))\n y_full = np.concatenate((y_train, y_test))\n\n x_full = x_full.reshape(-1, 28, 28, 1)\n\n return x_full, y_full\n\ndef create_cann():\n f_model = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(16, (3, 3), activation='relu', padding='same', input_shape=[28, 28, 1]),\n tf.keras.layers.MaxPooling2D((2, 2), padding='same'),\n tf.keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same'),\n tf.keras.layers.MaxPooling2D((2, 2), padding='same'),\n tf.keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same'),\n tf.keras.layers.MaxPooling2D((2, 2), padding='same'),\n tf.keras.layers.BatchNormalization(fused=False),\n\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(9, activation='linear'),\n\n tf.keras.layers.Lambda(lambda x: tf.concat([tf.fill([tf.shape(x)[0], 1], tf.cast(tf.convert_to_tensor(np.float32(1)), x.dtype)), x], axis=1))\n ])\n\n g_model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(9, activation='linear', input_shape=[10]),\n tf.keras.layers.Lambda(lambda x: tf.concat([tf.fill([tf.shape(x)[0], 1], tf.cast(tf.convert_to_tensor(np.float32(1)), x.dtype)), x], axis=1))\n ])\n\n return f_model, g_model\n\ndef train_cann(f_model, g_model, x_train, y_train, batch_size):\n variables = f_model.trainable_variables + g_model.trainable_variables\n optimizer = tf.keras.optimizers.Adam(1e-3)\n num_batches = int(np.ceil(x_train.shape[0] / batch_size))\n for epoch in range(20):\n losses = utils.Welford(1)\n prog = trange(num_batches)\n for b in prog:\n prog.set_description(f'Epoch {epoch+1}/20')\n start_idx = b * batch_size\n end_idx = min((b+1) * batch_size, x_train.shape[0])\n f_batch = x_train[start_idx:end_idx]\n g_batch = y_train[start_idx:end_idx]\n with tf.GradientTape() as tape:\n f_out = f_model(f_batch)\n g_out = g_model(g_batch)\n loss = utils.compute_loss(f_out, g_out)\n grads = tape.gradient(loss, variables)\n\n optimizer.apply_gradients(zip(grads, variables))\n losses.update(loss.numpy())\n prog.set_postfix({'loss': losses.mean[0]})\n","sub_path":"datasets/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"179870489","text":"#!/usr/bin/env python\n######################################################################\n#\n# File: glm.py\n# Author: Adam Janin\n# Mar 3, 2017\n#\n# Apply a \"global mapping file\" to a refst object.\n#\n# This supports a one-to-many mapping from words to general STM\n# conventions. For example, if the GLM is:\n#\n# it's => { it's / it is / it has }\n#\n# And the input is:\n#\n# when (it's) it's here\n#\n# The output will be a refst equivalent to:\n#\n# when ( { it's / it is / it has } ) { it's / it is / it has } here\n#\n# Unlike NIST tools, mrp-score glms operate at the word level, so you must\n# add any morphologic variants of words yourself. For example:\n#\n# adviser => advisor\n# advisers => advisors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you\n# may not use this file except in compliance with the License. The\n# License may be found in the file LICENSE at the top level of the\n# repository / directory tree.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\n\nimport re\n\nimport txt2refst\n\n\nclass GLMError(Exception):\n pass\n\n\nclass GLM:\n comment_re = re.compile('\\s*;.*$')\n blank_re = re.compile('\\s*$')\n line_re = re.compile('\\s*(\\S+)\\s*=>\\s*(.*)\\s*$')\n\n def __init__(self, fname):\n \"\"\"Read a (re) GLM file\"\"\"\n self.fname = fname\n\n # Map of word -> fst\n self.rules = {}\n\n with open(fname) as f:\n for line in f:\n # Remove comments if any\n line = re.sub(GLM.comment_re, '', line)\n # Then skip if all that's left is blank.\n if GLM.blank_re.match(line):\n continue\n line = line.lower()\n mo = GLM.line_re.match(line)\n if not mo:\n raise GLMError('Illegal line in GLM file \"%s\". The line was: \"%s\"'%(fname, line))\n word = mo.group(1)\n fsttxt = mo.group(2)\n rulefst = txt2refst.txt2refst(fsttxt)\n self.rules[word] = rulefst\n # end GLM.__init__()\n\n def apply(self, transfst):\n \"\"\"Apply the GLM to the passed transcript fst, editing it in place.\"\"\"\n # Note that we iterate through a copy of transfst.arcs. A bit inefficient,\n # but allows us to update in place.\n for arc in transfst.arcs[:]:\n if arc.ilabel in self.rules:\n lab = arc.ilabel\n s = arc.start\n e = arc.end\n transfst.delete_arc(arc)\n transfst.add_fst(self.rules[lab], s, e)\n\n # end GLM.apply()\n\n # end class GLM\n\n\nif __name__ == \"__main__\":\n f = txt2refst.txt2refst(\"he's a wilful whistleblower\")\n f.draw(\"/home/janin/public_html/pre\")\n g = GLM('remeeting.glm')\n g.apply(f)\n f.draw(\"/home/janin/public_html/post\")\n","sub_path":"glm.py","file_name":"glm.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"327830471","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/5/10 9:18\n# @Author : pf_xu\n# @FileName: subsets.py\n\n\ndef support(nums, k, n, flag, results):\n if k == n:\n result = []\n for i in range(n):\n if flag[i] is True:\n result.append(nums[i])\n results.append(result)\n return\n\n flag[k] = True\n support(nums,k+1,n,flag,results)\n flag[k] = False\n support(nums,k+1,n,flag,results)\n\n\ndef subsets(nums):\n results = []\n flag = [False for i in range(len(nums))]\n support(nums, 0, len(nums), flag, results)\n return results\n\n\nif __name__ == '__main__':\n print(subsets([1, 2, 3]))\n\n","sub_path":"001-100/78/subsets.py","file_name":"subsets.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"377606242","text":"def islucky(n):\n x = str(n)\n for i in x:\n if i!=str(4) and i!=str(7):\n return False\n\n\n\n return True\n\nflag =0\nn = int(input())\nfor i in range(4,n+1):\n if islucky(i):\n if n%i==0:\n flag = 1\n break\n\nif flag:\n print('YES')\nelse:\n print('NO')\n","sub_path":"Codeforces/lucky_division.py","file_name":"lucky_division.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387041701","text":"\"\"\"\nSimple graph implementation\n\"\"\"\nfrom util import Stack, Queue # These may come in handy\n\nclass Graph:\n\n \"\"\"Represent a graph as a dictionary of vertices mapping labels to edges.\"\"\"\n def __init__(self):\n self.vertices = {}\n\n def add_vertex(self, vertex_id):\n \"\"\"\n Add a vertex to the graph.\n \"\"\"\n if vertex_id not in self.vertices:\n self.vertices[vertex_id] = set()\n\n def add_edge(self, v1, v2):\n \"\"\"\n Add a directed edge to the graph.\n \"\"\"\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n\n\n def get_neighbors(self, vertex_id):\n \"\"\"\n Get all neighbors (edges) of a vertex.\n \"\"\"\n if vertex_id in self.vertices:\n return self.vertices[vertex_id]\n return set()\n\n def bft(self, start):\n \"\"\"\n Print each vertex in breadth-first order\n beginning from starting_vertex.\n \"\"\"\n queue = []\n visited = set()\n queue.append(start)\n while queue:\n curr_node = queue.pop(0)\n if curr_node not in visited:\n visited.add(curr_node)\n print(curr_node)\n for edge in self.get_neighbors(curr_node):\n queue.append(edge)\n\n def dft(self, start):\n \"\"\"\n Print each vertex in depth-first order\n beginning from starting_vertex.\n \"\"\"\n stack = []\n stack.append(start)\n visited = set()\n while stack:\n curr = stack.pop()\n if curr not in visited:\n visited.add(curr)\n print(curr)\n for edge in self.get_neighbors(curr):\n stack.append(edge)\n\n def dft_recursive(self, vertex, visited=set()):\n \"\"\"\n Print each vertex in depth-first order\n beginning from starting_vertex.\n This should be done using recursion.\n \"\"\"\n if vertex in visited:\n return\n visited.add(vertex)\n print(vertex)\n for edge in self.get_neighbors(vertex):\n self.dft_recursive(edge, visited)\n\n def bfs(self, start, end):\n \"\"\"\n Return a list containing the shortest path from\n starting_vertex to destination_vertex in\n breath-first order.\n \"\"\"\n queue = []\n visited = set()\n queue.append([start])\n while queue:\n curr_path = queue.pop(0)\n curr_node = curr_path[-1]\n if curr_node == end:\n return curr_path\n if curr_node not in visited:\n visited.add(curr_node)\n for edge in self.get_neighbors(curr_node):\n new_path = list(curr_path)\n new_path.append(edge)\n queue.append(new_path)\n\n def dfs(self, start, end):\n \"\"\"\n Return a list containing a path from\n starting_vertex to destination_vertex in\n depth-first order.\n \"\"\"\n stack = []\n stack.append([start])\n visited = set()\n while stack:\n curr_path = stack.pop() # [1,2,3]\n curr_node = curr_path[-1] #\n if curr_node == end:\n return curr_path\n if curr_node not in visited:\n visited.add(curr_node)\n print(curr_path)\n for edge in self.get_neighbors(curr_node):\n # creates new arr with current path\n new_path = list(curr_path)\n # add edges into \n new_path.append(edge)\n stack.append(new_path)\n\n def dfs_recursive(self, start, end):\n \"\"\"\n Return a list containing a path from\n starting_vertex to destination_vertex in\n depth-first order.\n This should be done using recursion.\n \"\"\"\n \n def recurse(vertex, end, visited=set()):\n curr_node = vertex[-1]\n if curr_node in visited:\n return \n if curr_node == end:\n return vertex\n visited.add(curr_node)\n for edge in self.get_neighbors(curr_node):\n new_path = list(vertex)\n new_path.append(edge)\n res = recurse(new_path, end, visited)\n if res:\n return res\n return recurse([start], end)\n \n\nif __name__ == '__main__':\n graph = Graph() # Instantiate your graph\n # https://github.com/LambdaSchool/Graphs/blob/master/objectives/breadth-first-search/img/bfs-visit-order.png\n graph.add_vertex(1)\n graph.add_vertex(2)\n graph.add_vertex(3)\n graph.add_vertex(4)\n graph.add_vertex(5)\n graph.add_vertex(6)\n graph.add_vertex(7)\n graph.add_edge(5, 3)\n graph.add_edge(6, 3)\n graph.add_edge(7, 1)\n graph.add_edge(4, 7)\n graph.add_edge(1, 2)\n graph.add_edge(7, 6)\n graph.add_edge(2, 4)\n graph.add_edge(3, 5)\n graph.add_edge(2, 3)\n graph.add_edge(4, 6)\n\n '''\n Should print:\n {1: {2}, 2: {3, 4}, 3: {5}, 4: {6, 7}, 5: {3}, 6: {3}, 7: {1, 6}}\n '''\n print(graph.vertices)\n\n '''\n Valid BFT paths:\n 1, 2, 3, 4, 5, 6, 7\n 1, 2, 3, 4, 5, 7, 6\n 1, 2, 3, 4, 6, 7, 5\n 1, 2, 3, 4, 6, 5, 7\n 1, 2, 3, 4, 7, 6, 5\n 1, 2, 3, 4, 7, 5, 6\n 1, 2, 4, 3, 5, 6, 7\n 1, 2, 4, 3, 5, 7, 6\n 1, 2, 4, 3, 6, 7, 5\n 1, 2, 4, 3, 6, 5, 7\n 1, 2, 4, 3, 7, 6, 5\n 1, 2, 4, 3, 7, 5, 6\n '''\n graph.bft(1)\n\n '''\n Valid DFT paths:\n 1, 2, 3, 5, 4, 6, 7\n 1, 2, 3, 5, 4, 7, 6\n 1, 2, 4, 7, 6, 3, 5\n 1, 2, 4, 6, 3, 5, 7\n '''\n graph.dft(1)\n graph.dft_recursive(1)\n\n '''\n Valid BFS path:\n [1, 2, 4, 6]\n '''\n print(graph.bfs(1, 6))\n\n '''\n Valid DFS paths:\n [1, 2, 4, 6]\n [1, 2, 4, 7, 6]\n '''\n print(graph.dfs(1, 6))\n print(graph.dfs_recursive(1, 6))","sub_path":"projects/graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"621852281","text":"from PIL import Image\nimport os\npath='./train_set/train'\ndirs=os.listdir(path)\nfor file in dirs:\n full_path=os.path.join(path,file)\n if len(os.listdir(full_path))<5:\n for lists in os.listdir(full_path):\n img_path=os.path.join(full_path,lists)\n img=Image.open(img_path)\n width=img.size[0]\n height=img.size[1]\n img2=img.crop((18,16,width,height))\n img2_path=os.path.join(full_path,lists[:-4]+'_1.png')\n img2.save(img2_path)\n img3 = img.crop((0, 0, width-18, height-16))\n img3_path = os.path.join(full_path, lists[:-4] + '_2.png')\n img3.save(img3_path)\n img4 = img.crop((0, 16, width - 18, height ))\n img4_path = os.path.join(full_path, lists[:-4] + '_3.png')\n img4.save(img4_path)\n img5 = img.crop((18, 0, width , height-16))\n img5_path = os.path.join(full_path, lists[:-4] + '_4.png')\n img5.save(img5_path)\n","sub_path":"train/IBN-Resnet50/pre-train/offset_augment.py","file_name":"offset_augment.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"408927904","text":"from flask import Flask,jsonify,request,Response\nimport json\nfrom test import *\nfrom settings import *\n\napp = Flask(__name__)\n\nbooks = [\n {\n 'name':'Green Eggs and Ham',\n 'price': 7.99,\n 'isbn': 978039400165\n },\n {\n 'name': 'The Cat In The Hat',\n 'price': 6.99,\n 'isbn': 9782371000193\n }\n\n]\n\n#GET /books\n@app.route('/books')\ndef get_books():\n return jsonify({'books':books})\n\n@app.route('/books',methods=['POST'])\ndef add_book():\n request_data = request.get_json()\n if(validBookObject(request_data)):\n new_book = {\n \"name\":request_data['name'],\n \"price\":request_data['price'],\n \"isbn\":request_data['isbn']\n }\n books.insert(0,new_book)\n response = Response(\"\",201,mimetype='application/json')\n response.headers['Location'] = \"/books/\" + str(new_book['isbn'])\n return response\n else:\n invalidBookObjectErrorMsg = {\n \"error\": \"Invalid book object passed in request\",\n \"helpString\": \"Data passed in similar to this {'name':'bookname'}\"\n }\n response = Response(json.dumps(invalidBookObjectErrorMsg),status=400,mimetype='application/json')\n return response\n\n@app.route('/books/')\ndef get_book_by_isbn(isbn):\n return_value = {}\n for book in books:\n if book[\"isbn\"] == isbn:\n return_value = {\n 'name': book[\"name\"],\n 'price': book[\"price\"]\n }\n return jsonify(return_value)\n\n@app.route('/books/',methods=['PUT'])\ndef replace_book(isbn):\n request_data = request.get_json()\n new_book = {\n 'name': request_data['name'],\n 'price': request_data['price'],\n 'isbn': isbn\n }\n i = 0;\n for book in books:\n currentIsbn = book[\"isbn\"]\n if currentIsbn == isbn:\n books[i] = new_book\n i += 1\n response = Response(\"\",status=204)\n return response\n\n@app.route('/books/',methods=['PATCH'])\ndef update_book(isbn):\n request_data = request.get_json()\n updated_book = {}\n if(\"name\" in request_data):\n updated_book[\"name\"] = request_data['name']\n if(\"price\" in request_data):\n updated_book[\"price\"] = request_data['price']\n for book in books:\n if book[\"isbn\"] == isbn:\n book.update(updated_book)\n response = Response(\"\",status=204)\n response.headers['Location'] = \"/books/\" + str(isbn)\n return response\n\n\n@app.route('/books/',methods=['DELETE'])\ndef delete_book(isbn):\n i = 0;\n for book in books:\n if book[\"isbn\"] == isbn:\n books.pop(i)\n response = Response(\"\",status=204)\n return response\n i += 1\n response = Response(\"\",status=404,mimetype='application/json')\n return response;\n\napp.run(port=5000)","sub_path":"venv/booksApi.py","file_name":"booksApi.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"322456685","text":"# -*- coding:utf-8 -*-\nimport sys, os\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n\nfrom collector.container import Container\n\nc = Container('6f0ceb8a08290b39f0907b5622102070f1c3ea2166f811390b52e3be2b641d28')\nm = c.collect_metric('container.cpu_user')\nprint('metric: ', m)\nm = c.collect_metric('container.cpu_system')\nprint('metric: ', m)\nm = c.collect_metric('container.mem.usage_in_bytes')\nprint('metric: ', m)\n\n\n","sub_path":"tmp/test_container.py","file_name":"test_container.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"605134791","text":"# coding=utf-8\n\n\"\"\"\n■ selenium + phantomJS + beautifulSoupでMobagaランキングをスクレイピング\n\n\"\"\"\n\nimport sys\nimport time\nimport scrape_const as Const\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom bs4 import BeautifulSoup\n\ndef main():\n\n html = Fetch()\n\n rankList = html.find_all('li', class_='line0')\n\n # print(rankList) # htmlソースを表示する\n\n for rankElement in rankList:\n rankNum = rankElement.find('div', class_='rankRibbon').text.replace('位', '')\n gameTitle = rankElement.find('span', class_='rankContentTitle caption_l').text\n print(rankNum, gameTitle)\n\n\ndef Fetch():\n \"\"\"\n PhantomJSで仮想ブラウザを生成し、BeautifulSoupでパース\n :return: str型HTML\n \"\"\"\n\n # options = webdriver.ChromeOptions()\n # options.add_argument('--user-agent=\"Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 \" \\\n # \"(KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1\"')\n # driver = webdriver.Chrome(executable_path = '/Users/tennessee-rose/Documents/chromedriver'\n # , chrome_options=options)\n driver = webdriver.PhantomJS(executable_path=GetPhantomjsPath()\n , desired_capabilities={'phantomjs.page.settings.userAgent': Const.USER_AGENT})\n wait = WebDriverWait(driver, 30)\n url = 'http://sp.mbga.jp/_game_ranking?genre=2000&sex_type=A&p=1&from_func=game_ranking&from_sex_type=A&from_genre=1000'\n driver.get(url)\n\n print('ページ取得中')\n # ページが読み込まれるまで待機\n wait.until(EC.presence_of_all_elements_located)\n print('ページ取得完了')\n\n # 30位まで取得するため、「もっと見る」ボタンを押下する\n driver.find_element_by_class_name('sp-more-load').click()\n\n time.sleep(5)\n\n # wait.until(EC.invisibility_of_element_located((By.CLASS_NAME, 'sp-more-load')))\n\n pageData = driver.page_source.encode('utf-8')\n\n html = BeautifulSoup(pageData, 'html.parser')\n print(html)\n\n driver.save_screenshot('ss.png')\n\n driver.quit()\n return html\n\ndef Scrape():\n return\n\ndef GetPhantomjsPath():\n phantomjsPath = ''\n\n # Macの場合\n if sys.platform == 'darwin':\n phantomjsPath = Const.PhantomJSPath_Mac\n # Windowsの\n elif sys.platform == 'win32':\n phantomjsPath = Const.PhantomJSPath_Windows\n\n if phantomjsPath == '':\n print(\"そのOSは対応してないっす〜\")\n sys.exit()\n\n return phantomjsPath\n\nif __name__ == '__main__':\n main()","sub_path":"scrape_mobaga_ranking.py","file_name":"scrape_mobaga_ranking.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"645131222","text":"# -*- coding: utf-8 -*-\n# -*- mode: python -*-\n\"\"\"Functions for ML estimation of MAT parameters\"\"\"\nfrom __future__ import print_function, division, absolute_import\nimport numpy as np\n\n\nclass mat(object):\n \"\"\"Compute max-likelihood estimate of the MAT model parameters\n\n stim: stimulus, dimensions (nchannels, nframes)\n rf_tau: number of time lags in the kernel OR a set of temporal basis functions\n spike_v: spike response, dimensions (nbins, [ntrials])\n spike_h: spike history (i.e. spike_v convolved with basis kernels), dim (nbins, nbasis, [ntrials])\n stim_dt: sampling rate of stimulus frames\n spike_dt: sampling rate of spike bins\n nlin: the nonlinearity. Allowed values: \"exp\" (default), \"softplus\", \"sigmoid\"\n\n If there are multiple trials for a given stimulus, then spikes must have\n dimensions (nbins, ntrials)\n\n \"\"\"\n\n def __init__(self, stim, rf_tau, spike_v, spike_h, stim_dt, spike_dt, nlin=\"exp\"):\n from theano import config\n import scipy.sparse as sps\n from dstrf.strf import lagged_matrix\n\n self.dtype = config.floatX\n # this is needed to prevent compiler errors in clang\n config.gcc.cxxflags = \"-Wno-c++11-narrowing\"\n # compiler optimizations:\n # config.gcc.cxxflags += \"-O3 -ffast-math -ftree-loop-distribution -funroll-loops -ftracer\"\n config.gcc.cxxflags += \" -O3 -ffast-math -funroll-loops\"\n\n if stim.ndim == 1:\n stim = np.expand_dims(stim, 0)\n if spike_v.ndim == 1:\n spike_v = np.expand_dims(spike_v, 1)\n if spike_h.ndim == 2:\n spike_h = np.expand_dims(spike_h, 2)\n\n self._nchannels, nframes = stim.shape\n nbins, ntau, ntrials = spike_h.shape\n upsample = int(stim_dt / spike_dt)\n if upsample != (nbins // nframes):\n raise ValueError(\"size of design matrices does not match sampling rates\")\n if spike_v.shape != (nbins, ntrials):\n raise ValueError(\"size of spikes matrix does not design matrix\")\n\n self._spike_dt = spike_dt\n self._nlin = nlin\n self._spikes = sps.csc_matrix(spike_v)\n self._X_stim = lagged_matrix(stim, rf_tau).astype(self.dtype)\n self._X_spike = spike_h.astype(self.dtype)\n self._interp = sps.kron(\n sps.eye(nframes), np.ones((upsample, 1), dtype=config.floatX), format=\"csc\"\n )\n self.select_data()\n self._make_functions()\n\n @property\n def spikes(self):\n return self._spikes\n\n @property\n def X_stim(self):\n return self._X_stim\n\n @property\n def X_spike(self):\n return self._X_spike\n\n def _nlin_theano(self, mu):\n import theano.tensor as T\n from theano.tensor import nnet\n\n if self._nlin == \"exp\":\n return T.exp(mu)\n elif self._nlin == \"softplus\":\n return nnet.softplus(mu)\n elif self._nlin == \"sigmoid\":\n return nnet.sigmoid(mu)\n else:\n raise ValueError(\"unknown nonlinearity type: {}\".format(self._nlin))\n\n def _make_functions(self):\n \"\"\"Generate the theano graph\"\"\"\n from theano import function, sparse, In\n import theano.tensor as T\n\n nalpha = self._X_spike.shape[1]\n\n # regularization parameters\n reg_lambda = T.scalar(\"lambda\")\n reg_alpha = T.scalar(\"alpha\")\n # split out the parameter vector\n w = T.vector(\"w\")\n dc = w[0]\n h = w[1 : (nalpha + 1)]\n k = w[(nalpha + 1) :]\n # elastic net penalty\n penalty = reg_lambda * T.dot(k, k) + reg_alpha * T.sqrt(T.dot(k, k) + 0.001)\n # Vx has to be promoted to a matrix for structured_dot to work\n Vx = T.dot(self._sh_X_stim, k)\n Vi = sparse.structured_dot(self._sh_interp, T.shape_padright(Vx))\n H = T.dot(self._sh_X_spike.dimshuffle([2, 0, 1]), h).T\n mu = Vi - H - dc\n lmb = self._nlin_theano(mu)\n # this version of the log-likelihood is faster, but the gradient doesn't work\n llf = (\n lmb.sum() * self._spike_dt\n - sparse.sp_sum(\n sparse.structured_log(self._sh_Y_spike * lmb), sparse_grad=True\n )\n + penalty\n )\n # this version has a working gradient\n ll = (\n lmb.sum() * self._spike_dt\n - sparse.sp_sum(self._sh_Y_spike * T.log(lmb), sparse_grad=True)\n + penalty\n )\n dL = T.grad(ll, w)\n v = T.vector(\"v\")\n ddLv = T.grad(T.sum(dL * v), w)\n\n self.V = function([w], Vx)\n self.V_interp = function([w], Vi)\n self.lci = function([w], mu)\n self.loglike = function(\n [w, In(reg_lambda, value=0.0), In(reg_alpha, value=0.0)], llf\n )\n self.gradient = function(\n [w, In(reg_lambda, value=0.0), In(reg_alpha, value=0.0)], dL\n )\n self.hessianv = function(\n [w, v, In(reg_lambda, value=0.0), In(reg_alpha, value=0.0)], ddLv\n )\n\n def select_data(self, fselect=None, bselect=None):\n \"\"\"Updates the shared variables containing the data to be fit\n\n Default behavior is to populate the shared variables with the data\n provided to the constructor. However, if fselect (selecting frames) and\n bselect (selecting bins) are specified, then only a subset of data is\n used. This is useful for cross-validation.\n\n \"\"\"\n from theano import shared\n\n if fselect is None or bselect is None:\n self._sh_interp = shared(self._interp)\n self._sh_X_stim = shared(self._X_stim)\n self._sh_X_spike = shared(self._X_spike)\n self._sh_Y_spike = shared(self._spikes)\n else:\n self._sh_interp.set_value(self._interp[bselect][:, fselect])\n self._sh_X_stim.set_value(self._X_stim[fselect])\n self._sh_X_spike.set_value(self._X_spike[bselect])\n self._sh_Y_spike.set_value(self._spikes[bselect])\n\n def sta(self, center=False, scale=False):\n \"\"\"Calculate the spike-triggered average\"\"\"\n from dstrf.strf import correlate\n\n spikes = self._spikes.toarray()\n X = self._X_stim.copy()\n if center:\n X -= X.mean(0)\n if scale:\n X /= X.std(0)\n return correlate(X, spikes)\n\n @property\n def n_hparams(self):\n \"\"\"Returns the number of h parameters\"\"\"\n return self._X_spike.shape[1]\n\n @property\n def n_kparams(self):\n \"\"\"Returns the number of k params\"\"\"\n return self._X_stim.shape[1]\n\n def param0(self):\n \"\"\"Returns a parameter vector with a good starting guess\"\"\"\n nbins, hdim, ntrials = self._X_spike.shape\n meanrate = self._spikes.sum(0).mean() / nbins\n return np.r_[np.exp(meanrate), np.zeros(hdim + self.n_kparams)].astype(\n self.dtype\n )\n\n def estimate(\n self,\n w0=None,\n reg_lambda=0,\n reg_alpha=0,\n gtol=1e-6,\n maxiter=300,\n method=\"trust-krylov\",\n **kwargs\n ):\n \"\"\"Compute max-likelihood estimate of the model parameters\n\n w0: initial guess at parameters. If not supplied (default), sets omega\n to the mean firing rate and all other parameters to zero.\n\n Additional arguments are passed to scipy.optimize.minimize\n\n \"\"\"\n import scipy.optimize as op\n\n if w0 is None:\n w0 = self.param0()\n\n res = op.minimize(\n self.loglike,\n w0,\n method=method,\n jac=self.gradient,\n hessp=self.hessianv,\n args=(reg_lambda, reg_alpha),\n options={\"gtol\": gtol, \"maxiter\": maxiter},\n **kwargs\n )\n return res.x\n\n def predict(self, w0, tau_params, V=None, random_state=None):\n \"\"\"Generate a predicted spike train\n\n w0: the estimator's parameters\n tau_params: - the values for t1, t2,...tN and tref (which are not estimated)\n V: if specified, skips calculating the convolution of the stim with the kernel\n\n \"\"\"\n import mat_neuron._model as mat\n\n if random_state is not None:\n mat.random_seed(random_state)\n nbins, hdim, ntrials = self._X_spike.shape\n omega = w0[0]\n hvalues = w0[1 : (1 + hdim)]\n tvalues = tau_params[:hdim]\n tref = tau_params[-1]\n if self._nlin == \"exp\":\n f = mat.predict_poisson\n elif self._nlin == \"softplus\":\n f = mat.predict_softplus\n if V is None:\n V = self.V(w0)\n return f(V - omega, hvalues, tvalues, tref, self._spike_dt, nbins // V.size)\n\n\nclass matfact(mat):\n \"\"\"The MAT dSTRF model, but with factorized (bilinear) kernel\n\n stim: stimulus, dimensions (nchannels, nframes)\n rf_tau: number of time lags in the kernel OR a set of temporal basis functions\n rf_rank: the rank of the factorized rf (1 or 2 is usually good)\n spike_v: spike response, dimensions (nbins, [ntrials])\n spike_h: spike history (i.e. spike_v convolved with basis kernels), dim (nbins, nbasis, [ntrials])\n stim_dt: sampling rate of stimulus frames\n spike_dt: sampling rate of spike bins\n nlin: the nonlinearity. Allowed values: \"exp\" (default), \"softplus\", \"sigmoid\"\n\n If there are multiple trials for a given stimulus, then spikes must have\n dimensions (nbins, ntrials)\n\n \"\"\"\n\n def __init__(\n self, stim, rf_tau, rf_rank, spike_v, spike_h, stim_dt, spike_dt, nlin=\"exp\"\n ):\n self._rank = rf_rank\n super(matfact, self).__init__(\n stim, rf_tau, spike_v, spike_h, stim_dt, spike_dt, nlin\n )\n\n def _make_functions(self):\n \"\"\"Generate the theano graph\"\"\"\n from theano import function, sparse, In\n import theano.tensor as T\n\n nframes, nk = self._X_stim.shape\n nbins, nalpha, ntrials = self._X_spike.shape\n nt = nk // self._nchannels\n nkf = self._nchannels * self._rank\n nkt = nt * self._rank\n\n # regularization parameters\n reg_lambda = T.scalar(\"lambda\")\n reg_alpha = T.scalar(\"alpha\")\n # split out the parameter vector\n w = T.vector(\"w\")\n dc = w[0]\n h = w[1 : (nalpha + 1)]\n kf = w[(nalpha + 1) : (nalpha + nkf + 1)]\n kt = w[(nalpha + nkf + 1) : (nalpha + nkf + nkt + 1)]\n k = T.dot(\n kf.reshape((self._nchannels, self._rank)), kt.reshape((self._rank, nt))\n ).ravel()\n # elastic net penalty\n penalty = reg_lambda * T.dot(k, k) + reg_alpha * T.sqrt(T.dot(k, k) + 0.001)\n # Vx has to be promoted to a matrix for structured_dot to work\n Vx = T.dot(self._sh_X_stim, k)\n Vi = sparse.structured_dot(self._sh_interp, T.shape_padright(Vx))\n H = T.dot(self._sh_X_spike.dimshuffle([2, 0, 1]), h).T\n mu = Vi - H - dc\n lmb = self._nlin_theano(mu)\n # this version of the log-likelihood is faster, but the gradient doesn't work\n llf = (\n lmb.sum() * self._spike_dt\n - sparse.sp_sum(\n sparse.structured_log(self._sh_Y_spike * lmb), sparse_grad=True\n )\n + penalty\n )\n # this version has a working gradient\n ll = (\n lmb.sum() * self._spike_dt\n - sparse.sp_sum(self._sh_Y_spike * T.log(lmb), sparse_grad=True)\n + penalty\n )\n dL = T.grad(ll, w)\n v = T.vector(\"v\")\n ddLv = T.grad(T.sum(dL * v), w)\n\n self.V = function([w], Vx)\n self.V_interp = function([w], Vi)\n self.lci = function([w], mu)\n self.loglike = function(\n [w, In(reg_lambda, value=0.0), In(reg_alpha, value=0.0)], llf\n )\n self.gradient = function(\n [w, In(reg_lambda, value=0.0), In(reg_alpha, value=0.0)], dL\n )\n self.hessianv = function(\n [w, v, In(reg_lambda, value=0.0), In(reg_alpha, value=0.0)], ddLv\n )\n\n @property\n def n_kparams(self):\n nframes, nk = self._X_stim.shape\n nkf = self._nchannels * self._rank\n nkt = nk // self._nchannels * self._rank\n return nkt + nkf\n\n def param0(self, random_seed=10, random_sd=0.1):\n \"\"\"Returns a parameter vector with a good starting guess\n\n For the factorized model, the RF *cannot* be all zeros because of the\n sign ambiguity in the factorization. So instead we use normally\n distributed random numbers with a fixed seed.\n\n \"\"\"\n from numpy import random\n\n random.seed(random_seed)\n nbins, hdim, ntrials = self._X_spike.shape\n meanrate = self._spikes.sum(0).mean() / nbins\n return np.r_[\n np.exp(meanrate), np.zeros(hdim), random_sd * random.randn(self.n_kparams)\n ].astype(self.dtype)\n\n def strf(self, w):\n \"\"\"Returns the full-rank RF\"\"\"\n from dstrf.strf import defactorize\n\n return defactorize(w[3:], self._nchannels, self._rank)\n","sub_path":"dstrf/mle.py","file_name":"mle.py","file_ext":"py","file_size_in_byte":13011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"275468524","text":"from downloader import Download\r\nfrom util import Tools\r\nfrom bs4 import BeautifulSoup\r\nimport urllib.request, urllib.error, urllib.parse, os, jsonpickle\r\nclass PornHub:\r\n\tdef __init__(self):\r\n\t\tself.helper = Tools()\r\n\t\tself.MAIN_FILE = \"./MAIN_PH.list\"\r\n\t\tself.TBD_FILE = \"./TBD_PH.list\"\r\n\t\tself.ARCHIVE_FILE = \"./ARCHIVE_PH.list\"\r\n\r\n\tdef PH_extractor_(self,resp):\r\n\t\ttry:\r\n\t\t\tparse_tree = BeautifulSoup(resp,\"html.parser\")\r\n\t\t\ttag_finder = parse_tree.findAll(\"li\", {\"class\" : \"videoblock\"})\r\n\t\t\tdel resp, parse_tree\r\n\t\t\tfor each_tag in tag_finder:\r\n\t\t\t\tlink = str(each_tag['_vkey'])\r\n\t\t\t\tif not self.helper.find_link(self.MAIN_FILE,link):\r\n\t\t\t\t\tself.helper.append_link(self.MAIN_FILE,link)\r\n\t\t\t\t\tself.helper.append_link(self.TBD_FILE,link)\r\n\t\t\tdel tag_finder\r\n\t\texcept:\r\n\t\t\t# Bad connection. Maybe. Or not...\r\n\t\t\tpass\r\n\r\n\tdef _fetch_CDN_(self,resp):\r\n\t\tresp = str(resp)\r\n\t\tif str(resp).find(\"alt=\\\"Upgrade to Pornhub Premium to enjoy this video.\\\"\") != -1:\r\n\t\t\t# There is nothing to fetch, then \"Upgrade to Pornhub Premium\" appears\r\n\t\t\treturn True\r\n\t\timport re\r\n\t\tregex = r\"(var flashvars_)(.*?)(=)(.*?)(};)\"\r\n\t\tmatch = re.findall(regex, resp)[0][3]\r\n\t\tjson = f\"{match.strip()}{'}'}\"\r\n\t\tjson = re.sub('(\"embedCode\":\"