diff --git "a/654.jsonl" "b/654.jsonl" new file mode 100644--- /dev/null +++ "b/654.jsonl" @@ -0,0 +1,650 @@ +{"seq_id":"22695545","text":"import requests\r\nimport base64\r\n\r\nclass MNIST_CLIENT(object):\r\n def __init__(self):\r\n self.headers = {}\r\n self.headers['Content-Type'] = 'application/x-www-form-urlencoded'\r\n self.urls = 'http://127.0.0.1'\r\n self.port = '8080'\r\n self.route = 'predict'\r\n\r\n def pic_base64(self,filename):\r\n byte_content = open(filename, 'rb').read()\r\n ls_f = base64.b64encode(byte_content)\r\n return ls_f\r\n\r\n def post_ocr(self,image_name):\r\n img_base64 = self.pic_base64(image_name).decode(\"utf-8\")\r\n json_values = {'image':img_base64}\r\n\r\n self.headers['Content-Length']=str(len(json_values))\r\n out_json = requests.post(\"%s:%s/%s\" % (self.urls, self.port, self.route), data=json_values,headers=self.headers).json()\r\n print(out_json) \r\n\r\n\r\nif __name__==\"__main__\":\r\n mnist_client=MNIST_CLIENT()\r\n image_name = '../train_test_mnist/MNIST/testimage/5/1.jpg'\r\n mnist_client.post_ocr(image_name)","sub_path":"mnist_webpy/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"106716091","text":"import cv2\nimg = cv2.imread(\"image3.jpg\")\nlab_img = cv2.cvtColor(img,cv2.COLOR_BGR2LAB)\nl,a,b =cv2.split(lab_img)\n# chu y 3 cau lenh sau: se co truong hop 1 pixel chay 2 dong lenh, gay ra nhieu cat tren anh.\nl[l>135] = 255\nl[l<=135] +=120\n\nlab_img = cv2.merge((l,a,b))\ncv2.imshow(\"LAB\",lab_img)\nfinal_img = cv2.cvtColor(lab_img,cv2.COLOR_LAB2BGR)\ncv2.imshow(\"final\",final_img)\ncv2.imshow(\"Original\",img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"Brightness_improving/LAB_brigtness_increasement.py","file_name":"LAB_brigtness_increasement.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"144737635","text":"from django.urls import path, include\nfrom . import views\nfrom rest_framework.routers import DefaultRouter\n\napp_name='user'\n\nrouter = DefaultRouter()\nrouter.register('profile',views.ProfileViewSet)\nrouter.register('plan', views.PlanViewSet)\nrouter.register('comment', views.CommentViewSet)\nrouter.register('relationship',views.RelationshipViewSet)\nrouter.register('searchplan',views.SearchPlanViewSet)\nrouter.register('selectprofile',views.SelectProfileViewSet)\nrouter.register('getcomment',views.GetCommentViewSet)\nrouter.register('relation',views.RelationViewSet)\nrouter.register('notification',views.NotificationViewSet)\nrouter.register('likes',views.LikesViewSet)\n\nurlpatterns = [\n path('register/', views.CreateUserView.as_view(), name='register'),\n path('myprofile/', views.MyProfileListView.as_view(), name='myprofile'),\n path('',include(router.urls)),\n path('timeline/',views.TimelineView.as_view(),name='timeline'),\n path('myfollowing_profile/',views.MyFollowingProfileView.as_view(),name='myfollowingprofile'),\n path('following_profile/',views.FollowingProfileView.as_view(),name='followingprofile'),\n path('follower_profile/',views.FollowerProfileView.as_view(),name='followerprofile'),\n path('userplan/',views.GetUserPlanSet.as_view(),name='userplan'),\n path('commentplan/',views.PlanCommnetView.as_view(),name='commentplan'),\n path('usernotification/',views.NotificationProfile.as_view(),name='usernotification'),\n path('prefectures/',views.PrefectureViewSet.as_view(),name='prefectures'),\n path('countlikes/',views.LikesView.as_view(),name='countlikes'),\n path('likedplans/',views. LikedPlanView.as_view(),name='likedplans'),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590626942","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nsys.path.append(os.path.join('../'))\nfrom lib.BeamDynamicsTools.Boundary import Boundary\nfrom lib.BeamDynamicsTools.Bfield import Bfield, BfieldTF, BfieldVF\nfrom lib.BeamDynamicsTools.Trajectory import Trajectory\nfrom lib.BeamDynamicsTools.Beam import Beam\n\nimport pylab as pl\nimport matplotlib as mpl\n\n# ===============================================================================\n# Calculates beam trajectories over a sweep over a range of toroidal field\n# settings\n# ===============================================================================\n\n# ------------------------------------------------------------------------------\n# Define np.array of injection angles\n# (x,y,z) = (1.798m, -0.052m, 0.243m)\n# alpha = 12.6 degrees (X-Z plane)\n# beta = 8.0 degrees (X-Y plane)\nalpha0 = 12.6\nbeta0 = 8.0\n\nalpha = alpha0 / 180.0 * np.pi\nbeta = beta0 / 180.0 * np.pi\nprint(alpha, beta)\nRinjection = [1.798, -0.052, 0.243]\nVinjection = [-np.cos(alpha) * np.cos(beta), np.cos(alpha) * np.sin(beta), -np.sin(alpha)]\n#Energy = [0.594e6, 0.740e6, 0.900e6]\nEnergy = 0.9e6 # np.linspace(0.594e6,0.900e6,10)\n\n# ------------------------------------------------------------------------------\n# Import poloidal Boundary points\nRb = np.loadtxt('../data/CmodCoordinatesRZ.dat', usecols=[0])\nZb = np.loadtxt('../data/CmodCoordinatesRZ.dat', usecols=[1])\n\n# ------------------------------------------------------------------------------\n# Generate vessel Boundary\nVessel = Boundary(Rb, Zb)\n\n# ------------------------------------------------------------------------------\n# 3D plot of vessel Boundary\nax = Vessel.Figure3D()\nVessel.Plot3D(ax)\n\n# ------------------------------------------------------------------------------\n# Inputs for B-field settings\n#In = np.array([ 0.0, 1600.0 ,3120 ,4450.0])\n#Bn = np.array([ 0.0, 0.05818182, 0.11345455, 0.16181818 ])\n#Bn = np.array([0.10,0.20, 0.30, 0.40])\n#Bn = np.array([0.0, 0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40,0.45])\n#Bn = np.linspace(0.0,0.45,19)\nBn = np.linspace(-0.45, 0.45, 50)\n#Bn = np.array([0.0])\n\n# ------------------------------------------------------------------------------\n# Generate Color Map\n#CMAP = mpl.colors.LinearSegmentedColormap.from_list('mycolors',['black','red','orange'])\nCMAP = mpl.colors.LinearSegmentedColormap.from_list(\n 'mycolors', ['green', 'blue', 'black', 'red', 'orange'])\n\n\n# ===============================================================================\n# Perform Trajectory calculation for B-Field Sweep\n# ===============================================================================\nAngleComponents = []\nCoordinates = []\nParameters = []\ntrajectory = []\nOutputPath = '../output/'\n# Color=['k','g','r','c','b','m','g','r','c','b','m','g']\n\nfor i in range(len(Bn)):\n B = BfieldTF(B0=Bn[i])\n Bv = BfieldVF(B0=0.00000)\n T = Trajectory(Vessel, B, Bv, v0=Vinjection, T0=Energy)\n T.LineColor = CMAP(1.0 * i / len(Bn))\n T.LineWidth = 2.0\n trajectory.append(T)\n\n # Save Target parameters\n#\tT.target.SaveTargetParameters(TFCurrent=In[i],Path=OutputPath+'geometry/')\n\n # append lists of Target Quantities\n#\tAngleComponents.append([T.target.VAngle,T.target.HAngle])\n#\tCoordinates.append([T.target.R,T.target.Z,T.target.Phi])\n#\tParameters.append(T.target.GetDetectionParameters())\n\n# ------------------------------------------------------------------------------\n# Plot 3D results\n\nfor i in range(len(trajectory)):\n trajectory[i].Plot3D(ax)\n #\t\ttrajectory[i].target.Plot3D(ax);\n# trajectory[-1].Limits3D(ax);\n\n# ------------------------------------------------------------------------------\n# Construct Legend\nLeg = []\nfor i in range(len(Bn)):\n Leg.append('B = %0.3fT' % trajectory[i].BFieldTF.B0)\n\n# ------------------------------------------------------------------------------\n# Plot 2D projections of Trajectories (Poloidal View)\nplt.figure(figsize=(20, 8))\nfor i in range(len(trajectory)):\n plt.subplot(1, 2, 1)\n trajectory[i].Plot2D('poloidal')\nplt.subplot(1, 2, 1)\nVessel.Border('poloidal')\nplt.xlim(0.2, 1.4)\nplt.ylim(-0.7, 0.5)\nplt.xlabel('R [m]')\nplt.ylabel('Z [m]')\nplt.title(r'Poloidal Projection ($\\alpha$ = %0.1f$^o$, $\\beta$ = %0.1f$^o$)' %\n (alpha0, beta0))\n# plt.legend(Leg,loc=4)\n\n# ------------------------------------------------------------------------------\n# Plot 2D projections of Trajectories (Top View)\nfor i in range(len(trajectory)):\n plt.subplot(1, 2, 2)\n trajectory[i].Plot2D('top')\nplt.subplot(1, 2, 2)\nVessel.Border('top')\nplt.xlim(0, 1.2)\nplt.ylim(-0.6, 0.6)\nplt.xlabel('x [m]')\nplt.ylabel('y [m]')\nplt.title(r'Midplane Projection ($\\alpha$ = %0.1f$^o$, $\\beta$ = %0.1f$^o$)' %\n (alpha0, beta0))\nax = plt.subplot(1, 2, 2)\nax.legend(Leg, bbox_to_anchor=(1.28, 1.0))\n\n# plt.legend(('B = 0.05','B = 0.10','B = 0.15','B = 0.20','B = 0.25','B = 0.30')\n\n\n# ------------------------------------------------------------------------------\n# Save Angular and Detection Quantities\nif False:\n np.savetxt(OutputPath + 'geometry/TargetAngle_Vert_Horiz.dat', AngleComponents)\n np.savetxt(OutputPath + 'geometry/TargetCoordinates.dat', Coordinates)\n Header0 = '(0) I0 [A], (1) B0 [T], (2) X [m] , (3) Y [m], (4) Z [m], (5) incident angle [rad], (6) Detection Angle [rad], (7) optical path length [m] , (8) Detection Angle [rad], (9) Detection Angle [deg], (10) Detector Eff'\n np.savetxt(OutputPath + 'geometry/DetectionParameters.dat',\n (np.array(Parameters)), header=Header0)\n\n# ------------------------------------------------------------------------------\n# Save Figure\nif True:\n FigName = 'TrajectoryProjections_alpha%2.2f_beta%2.2f_' % (\n alpha0, beta0) # + B.Method\n FigPath = '../output/plots/'\n trajectory[-1].target.SaveTargetParameters(\n Path=FigPath + 'Test_alpha%2.2f_beta%2.2f_UpDown' % (alpha0, beta0))\n plt.savefig(FigPath + FigName + '_UpDown.pdf')\n plt.savefig(FigPath + FigName + '_UpDown.png')\n print('File saved: ' + FigName)\n\nplt.show()\n","sub_path":"applications/Trajectory-BFieldSweep-TF.py","file_name":"Trajectory-BFieldSweep-TF.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"413540633","text":"__author__ = 'agusx1211'\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^user/login/', views.login, name='create_user'),\n url(r'^user/parties/', views.get_user_parties, name='get user parties'),\n url(r'^user/join/party', views.join_party, name='get user parties'),\n url(r'^user/leave/party', views.leave_party, name='get user parties'),\n url(r'^party/create/', views.create_party, name='create_party'),\n url(r'^party/tracks/update', views.update_tracks, name='set_tracks'),\n url(r'^party/tracks/add', views.add_track, name='set_tracks'),\n url(r'^party/tracks/get', views.get_tracks, name='get_tracks'),\n url(r'^party/tracks/next', views.get_next_track, name='get_tracks'),\n url(r'^party/tracks/del', views.del_all_tracks, name='get_tracks'),\n url(r'^party/track/del/one', views.del_track, name='get_tracks'),\n url(r'^party/getalltracks', views.get_all_tracks, name='get_tracks'),\n]","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"392322613","text":"\n#miajnur rahman\nwater = 400\nmilk = 540\ncoffee_beans = 120\ncups = 9\nmoney = 550\naction_input = \" \"\nbuy_type = 0\n\ndef showavailblematrial():\n print(\"The coffee machine has:\")\n print(str(water) + \" of water \")\n print(str(milk) + \" of milk \")\n print(str(coffee_beans) + \" of coffee beans \")\n print(str(cups) + \" of disposable cups \")\n print(str(money) + \" of money \")\n\ndef mainFunction() :\n print(\"Write action ( buy, fill, take, remaining, exit)\")\n action_input = input()\n print(\"> \" + action_input)\n function(action_input)\n\ndef function(action_input):\n global water, coffee_beans, cups, money, milk, buy_type\n\n if action_input == \"buy\":\n print(\"What do you want buy? 1 - espresso, 2 - latter, 3 - cappuccino\")\n buy_type = int(input())\n if buy_type == 1:\n if water > 250 and coffee_beans > 16 and cups > 1:\n water -= 250\n coffee_beans -= 16\n cups -= 1\n money += 4\n print(\"I have enough resources, making your a coffee \")\n else:\n if water < 250:\n print(\"sorry, not enough water\")\n elif coffee_beans < 16:\n print(\"sorry, not enough coffee been\")\n elif cups < 1:\n print(\"sorry, not enough Cups\")\n\n #showavailblematrial()\n elif buy_type == 2:\n if water > 350 and coffee_beans > 20 and milk > 75 and cups > 1:\n water -= 350\n milk -= 75\n coffee_beans -= 20\n cups -= 1\n money += 7\n print(\"I have enough resources, making your a coffee \")\n else:\n if water < 350:\n print(\"sorry, not enough water\")\n elif coffee_beans < 20:\n print(\"sorry, not enough coffee been\")\n elif milk < 75:\n print(\"sorry, not enough milk\")\n elif cups < 1:\n print(\"sorry, not enough Cups\")\n #showavailblematrial()\n mainFunction()\n elif buy_type == 3:\n if water > 200 and coffee_beans > 12 and milk > 100 and cups > 1:\n water -= 200\n milk -= 100\n coffee_beans -= 12\n cups -= 1\n money += 6\n print(\"I have enough resources, making your a coffee \")\n else:\n if water < 200:\n print(\"sorry, not enough water\")\n elif coffee_beans < 12 :\n print(\"sorry, not enough coffee been\")\n elif milk < 100:\n print(\"sorry, not enough milk\")\n elif cups < 1:\n print(\"sorry, not enough Cups\")\n\n # showavailblematrial()\n mainFunction()\n\n elif action_input == \"fill\":\n print(\"write how many ml of water do you to add :\")\n water1 = int(input())\n print(\"> \" + str(water1))\n water += water1\n print(\"write how many ml of milk do you to add :\")\n milk1 = int(input())\n print(\"> \" + str(milk1))\n milk = milk + milk1\n\n print(\"write how many grams of coffee beans do you to add :\")\n coffee_beans1 = int(input())\n print(\"> \" + str(coffee_beans1))\n coffee_beans += coffee_beans1\n\n print(\"write how many disposable cups of coffee do you to add :\")\n cups1 = int(input())\n print(\"> \" + str(cups1))\n cups += cups1\n showavailblematrial()\n mainFunction()\n\n elif action_input == \"take\":\n print(\"I gave you $\" + str(money))\n money = 0\n showavailblematrial()\n mainFunction()\n\n elif action_input == \"remaining\":\n showavailblematrial()\n mainFunction()\n\nmainFunction()\n","sub_path":"lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"192730935","text":"#Policz elementy na liście, dopóki element nie będzie krotką.\n\nnumbers = [1, 2, 3, (10, 20), 4, 5]\ncounter = 0\nfor n in numbers:\n\n# * isinstance(obiekt, typ_lub_klasa) -- sprawdza czy obiekt jest instancją typu/klasy typ_lub_klasa lub dowolnego\n# typu/klasy dziedziczącego z typ_lub_klasa,\n\n if isinstance(n, tuple):\n break\n counter += 1\nprint('Wynik:', counter)","sub_path":"Zajecia04/zad05.py","file_name":"zad05.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"91282400","text":"import sys\nimport os\n\ndef casa_brackets(text, sub=b\"{\"):\n if sub not in text or b\"}\" not in text :\n return -1, -1\n pos_ini = text.find(sub) + len(sub) - 1\n pos = pos_ini+1\n count = 1\n while count > 0 and pos < len(text):\n # print(pos, text[pos], count)\n if text[pos] in b'{':\n count += 1\n elif text[pos] in b'}':\n count -= 1\n pos += 1\n return pos_ini, pos\n\n\ndef extrai_exercicios(arq, exer, exeresol, exemplo):\n texto = b\"\"\n\n blocos = ([rb\"exer\", rb\"resp\"] if exer else [])\\\n + ([rb\"exeresol\", rb\"resol\"] if exeresol else [])\\\n + ([rb\"ex\"] if exemplo else [])\n aberturas = [rb\"\\begin{\" + l + rb\"}\" for l in blocos]\n fechamentos = [rb\"\\end{\" + l + rb\"}\" for l in blocos]\n tags = [rb\"\\chapter{\", rb\"\\section{\"] # , rb\"\\subsection{\"]\n contagem = 0\n dentro_de_bloco = False\n with open(arq, 'rb') as f: \n for linha in f.readlines():\n # if converte:\n # linha = linha.replace(rb\"{exeresol}\", rb\"{exer}\").replace(rb\"{resol}\", rb\"{resp}\")\n if any([s in linha for s in tags]):\n texto += linha\n\n if any([s in linha for s in aberturas]):\n dentro_de_bloco = True\n contagem = contagem + 1\n\n if dentro_de_bloco:\n # if rb\"ref{\" in linha:\n # print(linha)\n if linha.strip() != b'':\n texto += linha \n\n if any([s in linha for s in fechamentos]):\n dentro_de_bloco = False\n \n if contagem == 0:\n print(\"0000\", arq)\n if rb\"\\chapter{\" in texto:\n texto = rb\"\\addtocounter{chapter}{1}\"\n contagem = 1\n \n return contagem, texto.decode() if contagem>0 else \"\"\n # return contagem, limpa_secoes_vazias(texto.decode()) if contagem>0 else \"\"\n\n\ndef abre_arquivos(receita):\n texto = \"\"\n with open(\"main.tex\", \"rb\") as f:\n for linha in f.readlines():\n if linha.strip()[0:9] == rb\"\\include{\":\n # print(linha)\n s, e = casa_brackets(linha)\n arq = linha[s+1 : e-1].decode() + \".tex\"\n print(r\"%Extraindo de \" + arq)\n texto = texto + r\"%%%% Extraído de \" + arq + \"\\n\"\n contagem, conteudo = extrai_exercicios(arq, *receita)\n texto = texto + conteudo\n # print(texto)\n return texto\n\ndef limpa_secoes_vazias(texto):\n tag = r\"\\section\"\n tags = [tag, r\"\\chapter\", r\"%%%% Extraído de\"]\n linhas = texto.splitlines() + [\"\"]\n \n texto = \"\"\n for i, linha in enumerate(linhas):\n if (tag in linha and all(t not in linhas[i+1] for t in tags) and linhas[i+1] != \"\") or tag not in linha:\n texto += linha+\"\\n\"\n elif tag in linha:\n texto += r\"\\stepcounter{section}\"\n print(\"Seção vazia\")\n\n return texto.strip()\n \n\ncabecalho = r\"\"\"\\documentclass[10pt]{book}\n\\input preambulo.tex\n\\setlength{\\headheight}{30pt}\n\\usepackage{xr}\n\\externaldocument{main}\n\\begin{document}\"\"\"\n\nreceitas = [(\"exercicios_resolvidos.tex\", (False, True, False)),\n (\"exercicios.tex\", (True, False, False)),\n (\"exercicios_todos.tex\", (True, True, False)),\n (\"exercicios_todos_com_exemplos.tex\", (True, True, True ))]\n\nfor arq, receita in receitas:\n if receita[0] or receita[2]:\n rodape = r\"\\include{respostas}\" + '\\n' + r\"\\end{document}\"\n else:\n rodape = r\"\\end{document}\"\n\n texto = cabecalho + abre_arquivos(receita) + rodape\n with open(arq, \"w\") as f:\n f.write(texto)\n\n# print(texto)\n","sub_path":"extrai_exercicios.py","file_name":"extrai_exercicios.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"222846550","text":"gewei = ['','one','two','three','four','five','six','seven','eight','nine',]\nshiji = ['','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen',]\nshiwei = ['','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety',]\ndef c1000(n):\n val = ''\n if n>=100:\n val = gewei[n/100] + ' hunderd'\n if n%100 > 0:\n val += ' and'\n return val + c100(n%100)\n\ndef c100(n):\n if n >10 and n<20:\n return ' ' + shiji[n-10]\n return ' ' + shiwei[n/10] + ' ' + gewei[n%10]\n\n","sub_path":"projecteuler/p17.py","file_name":"p17.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"215432640","text":"def skewed(x):\n \"\"\"\n This function finds out the skewness of the data and returns, if the data is positively, negatively or symmetrically distributed.\n :param x: Pass an array, list, series, dataframe for which skewness needs to be calculated\n :return: returns the direction towards which the data is skewed\n \"\"\"\n try:\n # Imports\n import numpy, statistics, matplotlib.pyplot as plt\n\n mean = numpy.mean(x)\n median = numpy.median(x)\n mode = statistics.mode(x)\n print(\"\\n\", \"Mean:\", mean, \"\\n\", \"Median:\", median, \"\\n\", \"Mode:\", mode, \"\\n\")\n\n y, counter = [], 0\n for i in x:\n counter = counter + 1\n y.append(counter)\n plt.plot(y, x)\n plt.title(\"Distribution of X\")\n plt.xlabel('No. of Values in X')\n plt.ylabel('X')\n # plt.show()\n filename = \"C:/Users\" + \"/\" + str(numpy.random.randint(6576476)) + \".png\"\n plt.savefig(filename)\n\n if mean > median > mode: return \"Positively skewed data\" # Eg. wealth distribution in the world\n elif mode > median > mean: return \"Negatively skewed data\" # Eg.Death rate (more deaths towards 80's)\n elif mean == mode == median: return \"Symmetric Distribution\" # Employee Salaries\n else: raise Exception(\"The skewness in the data cannot be predicted.\")\n\n except Exception as e:\n return e\n\n\nx = [10, 11, 12, 14, 15, 16, 16, 12, 19, 31, 51, 18, 92, 10, 76, 29, 64, 52, 81, 27, 98, 43, 36, 71, 77, 89, 45, 65, 37, 11]\nprint(skewed(x))","sub_path":"skewness.py","file_name":"skewness.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"455503792","text":"#!/usr/bin/env python\n# vim:fileencoding=utf-8\n# Author: Shinya Suzuki\n# Created: 2017-09-26\n\nimport pandas as pd\nimport pickle\nimport numpy as np\nfrom pkg_resources import resource_filename\nfrom scipy.special import logsumexp\n\n\ndef load_model(model_type: str):\n target_path = resource_filename(\n \"sphere\",\n \"stan_models/{0}.pkl\".format(model_type)\n )\n with open(target_path, \"rb\") as f:\n model = pickle.load(f)\n return model\n\n\ndef summarize_fit(fit, pars: list):\n summary = fit.summary(pars=pars)\n summary_df = pd.DataFrame(summary[\"summary\"],\n index=summary[\"summary_rownames\"],\n columns=summary[\"summary_colnames\"])\n return summary_df\n\n\ndef summarize_ofit(ofit, pars: list):\n r = []\n if pars is None:\n pars = ofit.keys()\n for k in pars:\n v = ofit[k]\n if hasattr(v.tolist(), \"__iter__\") is False:\n h = {\"\": \"{0}[0]\".format(k), \"mle\": v}\n r.append(h)\n else:\n for i, vv in enumerate(v):\n if hasattr(vv.tolist(), \"__iter__\") is False:\n h = {\"\": \"{0}[{1}]\".format(k, i), \"mle\": vv}\n r.append(h)\n else:\n for j, vvv in enumerate(vv):\n h = {\"\": \"{0}[{1},{2}]\".format(k, i, j), \"mle\": vvv}\n r.append(h)\n\n summary_df = pd.DataFrame(r)\n summary_df = summary_df.set_index(\"\")\n return summary_df\n\n\ndef save_fit(fit, fod: str):\n with open(fod, \"wb\") as f:\n pickle.dump(fit, f)\n\n\ndef save_log_lik(fit, lld: str):\n log_lik = fit.extract(\"log_lik\")[\"log_lik\"]\n np.savetxt(lld, log_lik, delimiter=\"\\t\")\n\n\ndef load_log_lik(llp: str) -> np.ndarray:\n log_lik = np.loadtxt(llp, delimiter=\"\\t\")\n return log_lik\n\n\ndef get_waic(log_lik: np.ndarray, depth: np.ndarray=None, t: str=\"bda3\") -> float:\n S, n = log_lik.shape\n if depth is None:\n # Normal case\n d = np.ones(n)\n D = n\n else:\n # For coverage depth model\n d = depth.copy()\n D = d.sum()\n d[d == 0] = 1\n if t == \"bda3\":\n # See (Gelman, et al., \"BDA3\", 2013) Page 174\n # Using logsumexp function to overcome the overflow of log_lik\n lppd = np.sum(d * (-np.log(S) + logsumexp(log_lik/d, axis=0)))\n pwaic = np.sum(np.var(log_lik, axis=0)/d)\n waic = -2.0 * lppd + 2.0 * pwaic\n elif t == \"original\":\n # See (Sumio Watanabe, 2010, JMLR) formula (4), (5), (6)\n T = - np.sum(d * (-np.log(S) + logsumexp(log_lik/d, axis=0))) / D\n fV = np.sum(np.var(log_lik, axis=0)/d) / D\n waic = T + fV\n return waic\n\n\ndef sampling(model, stan_data: dict, pars: list, si, sw, sc, st, ss, n_jobs):\n fit = model.sampling(data=stan_data,\n pars=pars,\n iter=si,\n warmup=sw,\n chains=sc,\n thin=st,\n seed=ss,\n n_jobs=n_jobs)\n return fit\n\n\ndef optimizing(model, stan_data: dict, ss: int, om: str=None, sh: int=5):\n # init_alpha must be lower to estimate non-normalizing model correctly\n fit = model.optimizing(data=stan_data,\n init_alpha=1e-10,\n iter=1e4,\n refresh=1,\n algorithm=om,\n seed=ss)\n\n return fit\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sphere/stan_utils.py","file_name":"stan_utils.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"540326546","text":"import socket\r\nimport os\r\ndef Exit():\r\n print('按任意键以关闭客户端..')\r\n os.system('PAUSE')\r\n a.close()\r\na=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\nflag=True\r\nprint('这里是某超高校级的客户端')#客户端\r\nwhile (flag):\r\n add = input('输入待连接的py服务端...直接输回车表示默认:')\r\n print('输进的是',end='')\r\n if add == '':\r\n add = '10.77.2.182'\r\n print('默认服务端')\r\n else:print()\r\n try:\r\n a.connect((add, 10888))\r\n except ConnectionRefusedError:\r\n cmd = input(\"无响应,是否继续连接[y/n]\")\r\n if cmd == 'y': continue\r\n else: Exit()\r\n except TimeoutError:\r\n print('连接超时...')\r\n continue\r\n else: #print('检查点')\r\n print('已连接...')\r\n flag = False\r\nwhile True:\r\n data=a.recv(1024)#以1024为周期接收\r\n if data.decode()=='000':break\r\n print('服务端回复:'+data.decode())\r\n detail=input(\">>>>>>\")\r\n try:a.send(detail.encode())\r\n except ConnectionResetError:\r\n print(\"连接掉线...\")\r\n Exit()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"387272453","text":"import sys\nfrom PyQt4 import QtGui, QtCore\n\nclass Example(QtGui.QWidget):\n def __init__(self, parent = None):\n super(Example, self).__init__(parent)\n self.init_UI()\n\n def init_UI(self):\n\n self.setToolTip('This is a QWidget widget!')\n\n btn = QtGui.QPushButton('Quit', self)\n btn.clicked.connect(QtCore.QCoreApplication.instance().quit)\n btn.setToolTip('This is a Button button!')\n btn.resize(btn.sizeHint())\n btn.move(50, 50)\n\n\n self.setWindowTitle(\"ToolTips\")\n self.setGeometry(300, 300, 250, 150)\n self.show()\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n w = Example()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n main()","sub_path":"GUI/secoundpro.py","file_name":"secoundpro.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"222432495","text":"#\n# FILTERINGDATA.py\n#\n# Code file for the book Programmer's Guide to Data Mining\n# http://guidetodatamining.com\n# Ron Zacharski\n#\n\nfrom math import sqrt\nfrom enum import Enum\n\n\ndef manhattan(rating1, rating2):\n \"\"\"Computes the Manhattan distance. Both rating1 and rating2 are dictionaries\n of the form {'The Strokes': 3.0, 'Slightly Stoopid': 2.5}\"\"\"\n distance = 0\n commonRating = False\n for key in rating1:\n if key in rating2:\n distance += abs(rating1[key] - rating2[key])\n commonRating = True\n if commonRating:\n return distance\n else:\n return -1 # Indicates no ratings in common\n\n\ndef sortNeighborsByNearest(username, user_ratings):\n \"\"\"creates a sorted list of users based on their distance to username\"\"\"\n distances = []\n for user in user_ratings:\n if user != username:\n distance = manhattan(user_ratings[user], user_ratings[username])\n distances.append((distance, user))\n # sort based on distance -- closest first\n distances.sort()\n return distances\n\n\ndef recommend(username, user_ratings):\n \"\"\"Give list of recommendations\"\"\"\n # first find nearest neighbor\n nearest = sortNeighborsByNearest(username, user_ratings)[0][1]\n\n recommendations = []\n # now find movies neighbor rated that user didn't\n neighborRatings = user_ratings[nearest]\n givenUserRatings = user_ratings[username]\n for movie in neighborRatings:\n if not movie in givenUserRatings:\n recommendations.append((movie, neighborRatings[movie]))\n # using the fn sorted for variety - sort is more efficient\n return sorted(recommendations, key=lambda movieTuple: movieTuple[1], reverse=True)\n","sub_path":"src/scripts/basic_comparison.py","file_name":"basic_comparison.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"514691105","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 8 20:38:21 2015\n\n@author: ZhouBo\n\"\"\"\n\n\"\"\"\ncompute the optimal policy for one MBS and two SBSs using SPIA\ngeneral user set, cache set\n\"\"\"\n\nimport time\nimport copy\nimport scipy.io as sio\nimport numpy as np\nfrom scipy.special import comb\n\n\ndef PIeva_1MBS_2SBS(mu, N, Bmax, P, power, Num_queue):\n \"\"\"\n relative value iteration method to compute the linear system of equations in policy evaluation step\n \n input:\n mu: given policy\n N: queue size vector\n Bmax: maximal arrival of B (depends on the number of users)\n P: arrival probability of B\n power: transmission power of BSs\n Num_queue: number of request queues\n\n output:\n Vnew: value function for given policy mu\n \"\"\"\n Vnew = np.ones(N+1)\n Vold = np.zeros(N+1)\n \n epsilon = 0.5\n maxits = 50\n numits = 0\n# while np.amax(np.fabs(np.subtract(Vnew,Vold)))/np.amax(np.fabs(Vnew))>epsilon and numits epsilon and numits < maxits:\n\n Vold = copy.deepcopy(Vnew)\n \n # enumerate all possible request queue states \n for ind in np.arange(0, 1 + np.ravel_multi_index(N, N+1)): \n # given index, obtain the corresponding subscript\n sub = np.unravel_index(ind,N+1)\n \n u = np.array([mu[0][sub],mu[1][sub],mu[2][sub]])\n \n Vnew[sub] = cost_action_1MBS_2SBS(u,Vold,sub,N,Bmax,P,power,Num_queue)\n \n # relative value\n sub = np.unravel_index(0,N+1) # reference state (0,0,0)\n Vnew = Vnew - Vnew[sub] \n \n numits = numits+1\n print(\"iteration in PIeva:\", numits)\n return Vnew\n\n\ndef cost_action_1MBS_2SBS(u,Vold,sub,N,Bmax,P,power,Num_queue):\n \"\"\"\n compute the R.H.S. of each equation in policy evalution\n \n input:\n u: given control action\n Vold: previous value function\n sub: given request queue state\n N: queue size\n Bmax: maximal arrival of B (depends on the number of users)\n P: arrival probability of B\n power: transmission power of BSs\n Num_queue: number of request queues\n\n output:\n Vnew: value of the R.H.S. in policy evalution\n \"\"\"\n# start_cost=time.time()\n# print(\"current cost_action function\") \n sub_up = [0 for n in np.arange(0,Num_queue)]\n temp = 0\n # enumerate all possible request arrivals \n for ind in np.arange(0,1+np.ravel_multi_index(Bmax,Bmax+1)):\n # subscript of an index\n sub_B = np.unravel_index(ind,Bmax+1)\n # different queue update for Q0, Q1 and Q2\n for n in np.arange(0,Num_queue):\n # for Q0\n if n < M0:\n sub_up[n] = min((u[0] != n+1)*sub[n] + sub_B[n],N[n])\n # for Q1\n elif n < M0 + M1:\n sub_up[n] = min((u[0] != M1_set[n - M0]+1)*(u[1] != M1_set[n - M0]+1)*sub[n] + sub_B[n],N[n])\n # for Q2\n else:\n sub_up[n] = min((u[0] != M2_set[n - M0 - M1]+1)*(u[2] != M2_set[n - M0 - M1]+1)*sub[n] + sub_B[n],N[n])\n\n# one approach of \\prod P, may be computationally complex\n# P_all=np.array([P[n][sub_B[n]] for n in np.arange(0,Num_queue)])\n# temp = temp + np.prod(P_all)*Vold[tuple(sub_up)]\n \n P_prod = 1\n \n for n in np.arange(0,Num_queue):\n P_prod = P_prod*P[n][sub_B[n]]\n \n temp = temp + P_prod*Vold[tuple(sub_up)]\n \n# power_all = [power[n] for n in np.arange(0,2) if u[n]!=0]\n\n# t_cost = time.time() - start_cost\n# print(\"one time of cost_action function:\", t_cost)\n return temp + sum(sub) + np.dot(power,u)\n \n\ndef theta_cost(mu,N,Bmax,P,power,Num_queue):\n \"\"\"\n compute average cost for given policy\n \n input:\n mu: given policy\n N: queue size\n Bmax: maximal arrival of B (depends on the number of users)\n P: arrival probability of B\n power: transmission power of BSs\n Num_queue: number of request queues\n\n output:\n theta_cost: value of average cost\n \"\"\"\n V = PIeva_1MBS_2SBS(mu_o,N,Bmax,P,power,N_queue)\n \n sub = np.unravel_index(2,N+1)\n u = mu[0][sub],mu[1][sub],mu[2][sub]\n\n RHS = cost_action_1MBS_2SBS(u,V,sub,N,Bmax,P,power,Num_queue)\n\n return RHS - V[sub]\n\n\ndef PIimp_1MBS_2SBS(V,sub,N,Bmax,P,power,Num_queue,u_set):\n \"\"\"\n policy improvement step\n \n input:\n V: given value function\n sub: given request request state\n N: queue size\n Bmax: maximal arrival of B (depends on the number of users)\n P: arrival probability of B\n power: transmission power of BSs\n Num_queue: number of request queues\n u_set: feasible action space\n\n output:\n u_set[u_ind_min]: optimal value in current iteration step\n \"\"\"\n sub_up = [0 for n in np.arange(0,Num_queue)]\n vtemp = [0 for n in np.arange(0,len(u_set))]\n\n# power_all=[0 for n in np.arange(0,len(u_set))]\n \n # enumerate all possible actions \n for u_ind, u in enumerate(u_set):\n # enumerate all possible request arrivals \n for ind in np.arange(0,1+np.ravel_multi_index(Bmax,Bmax+1)):\n sub_B = np.unravel_index(ind,Bmax+1)\n # different queue update for Q0 and Q1\n for n in np.arange(0,Num_queue):\n # for Q0\n if n < M0:\n sub_up[n] = min((u[0] != n+1)*sub[n] + sub_B[n],N[n])\n # for Q1\n elif n < M0 + M1:\n sub_up[n] = min((u[0] != M1_set[n - M0]+1)*(u[1] != M1_set[n - M0]+1)*sub[n] + sub_B[n],N[n])\n # for Q2\n else:\n sub_up[n] = min((u[0] != M2_set[n - M0 - M1]+1)*(u[2] != M2_set[n - M0 - M1]+1)*sub[n] + sub_B[n],N[n])\n\n# one approach of \\prod P, may be computationally complex\n# P_all=np.array([P[n][sub_B[n]] for n in np.arange(0,Num_queue)])\n# vtemp[u_ind] = vtemp[u_ind] + np.prod(P_all)*V[tuple(sub_up)] \n \n P_prod = 1\n for n in np.arange(0,Num_queue):\n P_prod = P_prod*P[n][sub_B[n]]\n \n vtemp[u_ind] = vtemp[u_ind] + P_prod*V[tuple(sub_up)]\n \n# power_all[u_ind] = [power[n] for n in np.arange(0,2) if u[n]!=0]\n \n# vtemp[u_ind] = vtemp[u_ind]+ sum(sub) + sum(power_all[u_ind])\n vtemp[u_ind] = vtemp[u_ind] + sum(sub) + np.dot(power,u)\n # obtain the optimal action \n u_ind_min = np.argmin(vtemp)\n return u_set[u_ind_min] \n\n\n# system parameter\n\n# number of BSs\nN_BS = 3 \n\n# contents\nM = 2\nM0 = M\nM1 = 1\nM2 = 1\n\n# content set: start with 0 in accordance with request indeices\nM0_set = [n for n in range(0,M0)]\nM1_set = [n for n in range(0,M1)]\nM2_set = [n for n in range(1,M2+1)]\n\n# action set\nU0_set = [n for n in range(1,M0+1)]\nU0_set.insert(0,0)\nU1_set = [n for n in range(1,M1+1)]\nU1_set.insert(0,0)\nU2_set = [n for n in range(2,M2+1+1)]\nU2_set.insert(0,0)\n\n# all possible feasible control actions\nu_set = [[U0_set[u0], U1_set[u1], U2_set[u2]] for u0 in range(len(U0_set)) for u1 in range(len(U1_set)) for u2 in range(len(U2_set)) if U0_set[u0] * (U1_set[u1] + U2_set[u2]) == 0]\n\n# set of SBSs caching content m, i.e., Nm\nNm_set = []\n# 1: SBS1, 2: SBS2\n\nfor m in range(0,M):\n temp = [1,2]\n if m not in M1_set:\n temp.remove(1)\n if m not in M2_set:\n temp.remove(2)\n Nm_set.append(temp)\n \n# number of queues\nN_queue = M0 + M1 + M2\n\n# number of users\nN_user = 3\nK0_set = [1]\nK1_set = [2]\nK2_set = [3]\n\nK0 = len(K0_set)\nK1 = len(K1_set)\nK2 = len(K2_set)\n\nK = [K0, K1, K2]\n\n# can set queue size according to cache state and number of users \nN = np.zeros(N_queue,int)\nfor n in range(0,N_queue):\n if n < M0: \n #N[n] = 4 + (2-len(Nm_set[n]))\n N[n] = 3\n elif n < M0 + M1:\n N[n] = 3\n else:\n N[n] = 3\n \n# power cost, power[0]: MBS; power[1]: SBS1; power[2]: SBS2\npower = np.array([2,1,1])\n\n# popularity profile: Zipf \npara = 0.6\nPopularity = np.array([1/m**para for m in np.arange(1,M+1)])\nPopularity = Popularity / np.sum(Popularity)\n\n# request arrval\nAmax = 1 # each user request at most one content\n\n# construct B based on A, see request queue dynamics\n\"\"\" \n Bmax[n] n (0--M0-1): request for centent n can only be served by MBS\n Bmax[n] n (M0--M0+M1-1): request for conent m can be served by MBS and SBS1\n Bmax[n] n (M0+M1,M0+M1+M2-1): request for conent m can be served by MBS and SBS2\n\"\"\"\nBmax = np.zeros(N_queue,int)\nfor n in range(0,N_queue):\n if n < M0: \n Bmax[n] = K0 + K1 * (2-len(Nm_set[n]))\n elif n < M0 + M1:\n Bmax[n] = K1\n else:\n Bmax[n] = K2\n\n# calculate probality for B\nP = []\nfor n in np.arange(0,N_queue):\n P.append(np.zeros(Bmax[n]+1))\n \nfor n in np.arange(0,N_queue):\n # mapping the index to to the content index in the cache \n \n if n < M0:\n idx_content = n\n elif n < M0 + M1:\n idx_content = M1_set[n - M0]\n else:\n idx_content = M2_set[n - M0 - M1]\n \n for m in np.arange(0,Bmax[n]+1): \n P[n][m] = comb(Bmax[n],m)*(Popularity[idx_content]**m)*((1-Popularity[idx_content])**(Bmax[n]-m))\n \nnum_of_iter = 1\n\nt = []\n\nfor num_avg in np.arange(0,num_of_iter):\n mu_n_0 = np.zeros(N+1,np.int) # MBS\n mu_n_1 = np.zeros(N+1,np.int) # SBS1\n mu_n_2 = np.zeros(N+1,np.int) # SBS2\n \n mu_o_0 = np.ones(N+1,np.int) # MBS\n mu_o_1 = np.ones(N+1,np.int) # SBS1\n mu_o_2 = np.ones(N+1,np.int) # SBS2\n\n mu_n = np.array([mu_n_0, mu_n_1, mu_n_2]) \n mu_o = np.array([mu_o_0, mu_o_1, mu_n_2])\n \n V = np.zeros(N+1)\n \n theta_o = 0\n theta_n = 1\n num = 0\n mu_temp = []\n while np.any(np.not_equal(mu_n,mu_o)) and theta_o > theta_n:\n\n theta_o = theta_cost(mu_o,N,Bmax,P,power,N_queue)\n mu_o = copy.deepcopy(mu_n)\n \n # policy evaluation \n print(\"PI evaulation\")\n start_eva = time.time() \n V = PIeva_1MBS_2SBS(mu_o,N,Bmax,P,power,N_queue)\n t_eva = time.time()-start_eva\n print(\"one iteration of PIeva:\", t_eva) \n \n # policy improvement\n print(\"PI structured improvement\") \n start_imp = time.time()\n num_flag = 0\n # enumerate all possible request queue states \n for ind in np.arange(0,1+np.ravel_multi_index(N,N+1)):\n\n sub = np.unravel_index(ind,N+1)\n\n flag = 1\n for n in np.arange(0,N_queue):\n # sub_de: Q_prime in Structured policy improvement \n if sub[n] > 0:\n sub_de = np.subtract(sub, np.eye(N_queue,dtype=int)[n,:])\n # find the corresponding request queue (n,m)\n if n < M0:\n idx_BS = 0\n idx_content = n\n elif n < M0 + M1:\n idx_BS = 1\n idx_content = M1_set[n - M0]\n else:\n idx_BS = 2\n idx_content = M2_set[n - M0 - M1]\n \n# print (n,sub,sub_de, idx_BS, idx_content)\n if mu_o[idx_BS][tuple(sub_de)] == idx_content + 1:\n mu_n_0[sub], mu_n_1[sub], mu_n_2[sub] = mu_o[0][tuple(sub_de)], mu_o[1][tuple(sub_de)], mu_o[2][tuple(sub_de)]\n # no need to call PIimp_1MBS_2SBS\n flag = 0\n num_flag = num_flag + 1\n break\n \n if flag == 1: \n mu_n_0[sub], mu_n_1[sub], mu_n_2[sub] = PIimp_1MBS_2SBS(V,sub,N,Bmax,P,power,N_queue, u_set)\n \n mu_n = np.array([mu_n_0, mu_n_1, mu_n_2]) \n \n # record the improved policy in each iteration \n# mu_temp.append(mu_n)\n # compare the difference between the two policies in adjacent iterations\n diff = np.count_nonzero(mu_n-mu_o)\n print(\"number of diffences:\", diff)\n \n t_imp = time.time()-start_imp\n print(\"one iteration of PI structured imp:\", t_imp) \n print(\"number of spi_imp:\", num_flag)\n num = num+1\n print(\"num:\", num) \n \n # compare the average costs of the two policies in adjacent iterations\n theta_n = theta_cost(mu_n,N,Bmax,P,power,N_queue)\n print(\"theta_o:\", theta_o)\n print(\"theta_n:\", theta_n) \n \n t.append(t_eva+t_imp)\n \nprint(\"total time:\", sum(t))\n\nfile_name = [\"SPIA_M0_\",str(M0), \"_M1_\",str(M1), \"_M2_\", str(M2), \"_SBS_2_N\", str(N[0]),time.strftime(\"_%Y%m%d_%H_%M\"), \".mat\"]\nfile_name = \"\".join(file_name)\nsio.savemat(file_name,{\"mu_SPIA0\":mu_n_0,\"mu_SPIA1\":mu_n_1, \"mu_SPIA2\":mu_n_2, \"V_SPIA\":V,\"t_SPIA\":t,\"num_SPIA\": num})\n","sub_path":"1MBS_2SBS_SPIA.py","file_name":"1MBS_2SBS_SPIA.py","file_ext":"py","file_size_in_byte":12946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4288771","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules\n# Copyright (C) 2020-2021, Yoel Cortes-Pena \n# Bioindustrial-Park: BioSTEAM's Premier Biorefinery Models and Results\n# Copyright (C) 2021, Yalin Li \n#\n# This module is under the UIUC open-source license. See\n# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt\n# for license details.\n\n'''\nSettings for cornstover (cs), sugarcane (sc), and lipidcane (lc) biorefineries.\n\nReferences\n----------\n[1] Hossain et al. Techno-Economic Evaluation of Heat Integrated\nSecond Generation Bioethanol and Furfural Coproduction.\nBiochemical Engineering Journal 2019, 144, 89–103.\nhttps://doi.org/10.1016/j.bej.2019.01.017.\n\n[2] Davis et al., Process Design and Economics for the Conversion of Lignocellulosic\nBiomass to Hydrocarbon Fuels and Coproducts: 2018 Biochemical Design Case Update;\nNREL/TP-5100-71949; National Renewable Energy Lab (NREL), 2018.\nhttps://doi.org/10.2172/1483234.\n\n[3] Shoener et al., Design of Anaerobic Membrane Bioreactors for the\nValorization of Dilute Organic Carbon Waste Streams.\nEnergy Environ. Sci. 2016, 9 (3), 1102–1112.\nhttps://doi.org/10.1039/C5EE03715H.\n\n.. note::\n The cornstover biorefinery uses 2011 USD whereas sugarcane and lipidcane\n biorefineries use 2013 USD, thus ideallly prices in the the `new_price` dct\n should be adjusted accordingly. However, since the calculated MESPs are only\n used for comparison between biorefineries with the new wastewater treatment\n process vs. without/with the original process, this will not affect the\n conclusions.\n'''\n\n__all__ = (\n 'cs_price',\n 'lc_price',\n 'new_price',\n 'load_cs_settings',\n 'load_sc_settings',\n 'load_lc_settings'\n )\n\nfrom biorefineries import cornstover as cs\nfrom biorefineries.cornstover._process_settings import (\n price as cs_price,\n load_process_settings as load_cs_settings,\n )\n\nfrom biorefineries.sugarcane._process_settings import \\\n load_process_settings as load_sc_settings\n\nfrom biorefineries.lipidcane._process_settings import (\n price as lc_price,\n load_process_settings as load_lc_settings\n )\n\n# from biorefineries.utils import auom\nfrom utils import auom\n_lb_per_kg = auom('lb').conversion_factor('kg')\n_GDP_2007to2016 = 1.160\n\nnew_price = { # $/kg unless otherwise noted\n 'Wastewater': -0.03, # ref [1], negative value for cost from product,\n 'NaOCl': 0.14, # $/L\n 'CitricAcid': 0.22, # $/L\n 'Bisulfite': 0.08, # $/L\n 'Caustics': cs.caustic.price,\n 'Polymer': 2.6282 / _lb_per_kg / _GDP_2007to2016, # ref [2]\n }","sub_path":"BioSTEAM 2.x.x/biorefineries/wwt/_settings.py","file_name":"_settings.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"431234526","text":"import dim\nfrom ..operate import Operate\nfrom ..constant import Constant\n\nclass Conv2dOperate(Operate):\n def __init__(self,left,right,args=None,name=None):\n super(Conv2dOperate,self).__init__(left,right,\"conv2d\",args,name)\n \n def partGrad(self,partial,prevOp):\n if (partial.type!=\"Variable\"): raise Exception(\"partial参数必须是Variable类型\")\n if (self.catch and self._grads.get(partial.name,None)): return self._grads[partial.name]\n if (prevOp is None): prevOp=Constant(dim.ones(self.eval().shape))\n \n dLeft=dim.autograd.ConvTranspose2dOperate.wrapper(prevOp,self.right,self.args)\n temp1 = prevOp.eval().swapaxes(0,1)\n temp2 = self.left.eval().swapaxes(0,1)\n temp3 = dim.nn.functional.conv2d(temp2,temp1)\n dRight = Constant(temp3.swapaxes(0,1))\n if (self.left.name==partial.name):\n part1 = dLeft\n part2=self.right.partGrad(partial,dRight)\n elif (self.right.name==partial.name):\n part1 = dRight\n part2=self.left.partGrad(partial,dLeft)\n else:\n part1=self.left.partGrad(partial,dLeft)\n part2=self.right.partGrad(partial,dRight)\n\n part3=dim.autograd.AddOperate.wrapper(part1,part2)\n rst = part3\n self._grads[partial.name]=rst\n return rst \n \n def expression(self):\n if (self.catch and self._expressionStr): return self._expressionStr\n rst = \"conv2d(\"+self.left.expression()+\",\"+self.right.expression()+\")\"\n self._expressionStr = rst\n return rst\n\n def eval(self):\n if (self.catch and self._data is not None): return self._data\n rst=dim.nn.functional.conv2d(self.left.eval(),self.right.eval())\n self._data = rst\n return rst\n \n @staticmethod\n def wrapper(left,right,args=None,name=None):\n return Conv2dOperate(left,right,args,name)\n","sub_path":"dim/autograd/modules/conv2dOperate.py","file_name":"conv2dOperate.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"190597434","text":"#!/usr/bin/env python\nimport os\nfrom bs4 import BeautifulSoup as BS\n\ndata_loc = 'data/cwec_v4.2.xml'\n\ncolumns = [\n 'description',\n 'extended_description',\n 'modes_of_introduction',\n 'common_consequences',\n 'potential_mitigations',\n]\n\nwith open(data_loc) as f:\n soup = BS(f.read(), 'lxml')\n\nfor column in columns:\n for tag in soup.find_all(column):\n print(tag.text)\n","sub_path":"misc/extract_cwe_corpus.py","file_name":"extract_cwe_corpus.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"506566464","text":"import unittest\r\n\r\nfrom testfixtures import LogCapture\r\n\r\nfrom dsciqcm.core import _S3Loc\r\nfrom dsciqcm.prisource import _PreProdHHWeight\r\n\r\nDATE = \"2018-09-23\"\r\nDMA = 501\r\nEMPTY_START = \"2019-01-31\"\r\nEMPTY_END = \"2019-02-01\"\r\nMISSING_START = \"2019-01-30\"\r\nMISSING_END = \"2019-02-01\"\r\n\r\n# Original path is only accessible via Databricks site\r\n# Date range for copied parquet is 2018-08-22 -- 2019-01-31\r\nPATH = (\r\n \"useast1-nlsn-w-digital-dsci-dev/data/users/vargab01/dsciqcm/\"\r\n \"spark_warehouse_ppm_preprod/ldw_preprod_ppm_spark_parquet/\"\r\n \"household_weights/\")\r\n\r\n\r\nclass PreProdHHWeightMod(_PreProdHHWeight):\r\n\r\n def __init__(self, start, end, dmas):\r\n super().__init__(start, end, dmas)\r\n self._s3loc = _S3Loc(PATH)\r\n\r\n\r\nclass TestPreProdHHWeight(unittest.TestCase):\r\n\r\n def test_pass(self):\r\n table = PreProdHHWeightMod(DATE, DATE, [DMA])\r\n with self.assertLogs(\"e2eqc\", \"INFO\"):\r\n self.assertTrue(table.run_qc())\r\n\r\n def test_empty_data(self):\r\n message = (\r\n \"s3://{}: No data exists for specified dates and/or DMAs\"\r\n .format(PATH))\r\n table = PreProdHHWeightMod(\"1996-10-15\", \"1996-10-15\", [501])\r\n with LogCapture() as log:\r\n self.assertFalse(table.run_qc())\r\n log.check_present((\"e2eqc\", \"ERROR\", message))\r\n\r\n def test_empty_dates(self):\r\n message = \"s3://{}: No dates present for specified range\".format(PATH)\r\n table = PreProdHHWeightMod(EMPTY_START, EMPTY_END, [DMA])\r\n with LogCapture() as log:\r\n self.assertFalse(table.run_qc())\r\n log.check_present((\"e2eqc\", \"ERROR\", message))\r\n\r\n def test_missing_date(self):\r\n message = \"s3://{}: One or more missing dates\".format(PATH)\r\n table = PreProdHHWeightMod(MISSING_START, MISSING_END, [DMA])\r\n with LogCapture() as log:\r\n self.assertFalse(table.run_qc())\r\n log.check_present((\"e2eqc\", \"WARNING\", message))\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"test/test_preprod_hh_weight.py","file_name":"test_preprod_hh_weight.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112269883","text":"\"\"\"\nDemo of a line plot on a polar axis.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import png\n#import itertools\nfrom scipy.io import loadmat\n#from scipy.ndimage import filters,zoom\n#import skimage\n#from skimage import io,color\n#from PIL import Image\n#import pyfits\nfrom matplotlib.backends.backend_pdf import PdfPages\n\ndarktemp = loadmat('../../scratch/nh_dark_analysis_fig1.mat')\n\nfig=plt.figure(figsize=(10.9,5))\n\n\nax = fig.add_subplot(1,2,1)\n\np3,=ax.semilogx(np.squeeze(darktemp['mydates']),np.squeeze(darktemp['myfunc']),linestyle='-',color='skyblue',linewidth=5)\n\np1,=ax.semilogx(darktemp['darkdate'],darktemp['darktemp'],marker='o',linestyle='',color='blue')\np2,=ax.semilogx(darktemp['lightdate'],darktemp['lighttemp'],marker='o',linestyle='',color='red')\n\nax.semilogx(np.squeeze(darktemp['cover']),[190,225],linestyle='--',color='black')\n\nax.annotate('Cover Ejected',xy=(220,202),xycoords='data',\\\n xytext=(220,202),textcoords='data',\\\n horizontalalignment='right',verticalalignment='top',\\\n rotation=90)\n\nax.set_ylim([190,225])\nax.set_xlim([50,3000])\nax.set_xlabel('Time from Launch (days)')\nax.set_ylabel('Detector Temperature (K)')\n\nax.legend([p1,p2,p3],['Cover-on Data','Cover-off Data','Exponential Decrease'],\\\n frameon=False,numpoints=1,fontsize=12)\n\ndref = loadmat('../../scratch/nh_dark_analysis_fig6.mat')\n\naxa = fig.add_subplot(1,2,2)\n\nq3,=axa.plot(np.squeeze(dref['tccd']),np.squeeze(dref['modelone']),linestyle='--',color='midnightblue',linewidth=1)\nq4,=axa.plot(np.squeeze(dref['tccd']),np.squeeze(dref['modeltwo']),linestyle='-',color='midnightblue',linewidth=1)\n\naxa.errorbar(np.squeeze(dref['lighttempm']),np.squeeze(dref['lightrefm'][:,0]),yerr=np.squeeze(dref['lightrefm'][:,1]),xerr=0.15*np.ones(4),marker='o',color='red',linestyle='')\naxa.errorbar(np.squeeze(dref['darktempm']),np.squeeze(dref['darkrefm'][:,0]),yerr=np.squeeze(dref['darkrefm'][:,1]),xerr=0.15*np.ones(4),marker='o',color='blue',linestyle='')\n\naxa.set_xlim([190,225])\naxa.set_ylim([539,549])\n\naxa.plot([190,225],[np.squeeze(dref['meanvref']),np.squeeze(dref['meanvref'])],\\\n linestyle='--',color='red')\naxa.plot([190,225],[np.squeeze(dref['meanvref']+dref['sigvref']),\\\n np.squeeze(dref['meanvref']+dref['sigvref'])],\\\n linestyle=':',color='red')\naxa.plot([190,225],[np.squeeze(dref['meanvref']-dref['sigvref']),\\\n np.squeeze(dref['meanvref']-dref['sigvref'])],\\\n linestyle=':',color='red') \n\naxa.set_xlabel('Detector Temperature (K)')\naxa.set_ylabel('Mean Value of Reference Pixels (DN)')\n\naxa.legend([p1,p2,q4,q3],['Cover-on Data','Cover-off Data','Model Fit',\\\n 'Expected Performance'],\\\nframeon=False,numpoints=1,fontsize=12,loc=2)\n\n\nplt.tight_layout(w_pad=5.5)\n\n#plt.show()\npdf = PdfPages('nh_plot_reference.pdf')\npdf.savefig()\npdf.close()\n","sub_path":"py/ebl_paper/nh_plot_reference.py","file_name":"nh_plot_reference.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290170936","text":"#from unicurses import *\nfrom enum import Enum\nimport d_curses as display\nimport random\nimport textwrap\nimport logging\n\nclass Direction(Enum):\n North = 0\n Northeast = 1\n East = 2\n Southeast = 3\n South = 4\n Southwest = 5\n West = 6\n Northwest = 7\nclass Action(Enum):\n Start = 0\n Move = 1\n Attack = 2\nclass Tile(Enum):\n Ground = 0\n Tree = 1\n Wall = 2\n Door = 3\n Window = 4\n Water = 5\n Brambles = 6\n Capture = 7\nclass Result(Enum):\n Good = 0\n Bad = 1\n Cant = 2\nclass Posture(Enum):\n Standing = 0\n Prone = 1\n\nplayer1name = \"Player One\"\nplayer2name = \"Player Two\"\n\nclass PlayerType:\n def __init__(self, ident, name):\n self.ident = ident\n self.name = name\n self.move = 5\n self.combat = 3\n self.strength = 3\n self.ranged = 0\n self.power = 0\n self.evade = 0\n self.health = 5\n self.defense = 0\n self.traits = []\n\nclass Player:\n def __init__(self, mytype):\n self.type = mytype\n self.name = \"anonymous\"\n\n self.x = -1\n self.y = -1\n self.side = -1\n self.action = Action.Start\n self.done = False\n self.moved = 0\n self.health = self.type.health\n self.posture = Posture.Standing\n\n\nclass Messages:\n def __init__(self):\n self.msgs = []\n self.wrap = textwrap.TextWrapper(width=30,subsequent_indent=\" \")\n def get(self,idx):\n if idx < 0 or idx >= len(self.msgs):\n return ''\n return self.msgs[len(self.msgs)-1-idx]\n def add(self,msg):\n ml = self.wrap.wrap(msg)\n for l in ml:\n self.msgs.append(l)\n\n\ndef d6(count = 1):\n total = 0\n while count > 0:\n total += random.randint(1,6)\n count -= 1\n return total\n\nclass Board:\n WIDTH = 24\n HEIGHT = 24\n def __init__(self):\n self.turnno = 1 # current turn number\n self.turn = 0 # player number whose turn it is\n self.board = [Tile.Ground] * (Board.WIDTH * Board.HEIGHT)\n self.players = []\n self.types = {}\n\n self.flag1x = 2\n self.flag1y = 8\n self.flag1 = None\n self.flag2x = 28\n self.flag2y = 8\n self.flag2 = None\n\n self.log = Messages()\n\n def addType(self, type):\n self.types[type.ident] = type\n def getType(self, ident):\n if ident in self.types:\n return self.types[ident]\n return None\n\n def endTurn(self):\n if self.turn == 0:\n self.turn = 1\n else:\n self.turn = 0\n self.turnno += 1\n for p in self.players:\n p.action = Action.Start\n p.done = False\n p.moved = 0\n\n def valid(self, x, y):\n if x < 0 or y < 0:\n return False\n if x >= Board.WIDTH or y >= Board.HEIGHT:\n return False\n return True\n def coord(self, x, y):\n if not self.valid(x,y):\n return None\n return x + y * Board.WIDTH\n def shift(self, x, y, dir):\n if dir == Direction.North:\n y -= 1\n elif dir == Direction.East:\n x += 1\n elif dir == Direction.South:\n y += 1\n elif dir == Direction.West:\n x -= 1\n elif dir == Direction.Northwest:\n x -= 1\n y -= 1\n elif dir == Direction.Northeast:\n x += 1\n y -= 1\n elif dir == Direction.Southeast:\n x += 1\n y += 1\n elif dir == Direction.Southwest:\n x -= 1\n y += 1\n return x, y\n\n def unitCount(self, forSide = -1):\n total = 0\n for unit in self.players:\n if forSide == -1 or unit.side == forSide:\n total += 1\n return total\n def unitsUnmoved(self, forSide):\n total = 0\n for unit in self.players:\n if unit.side == forSide and unit.action == Action.Start:\n total += 1\n return total\n\n def playerAt(self, x, y):\n for ply in self.players:\n if ply.x == x and ply.y == y:\n return ply\n return None\n def at(self, x, y):\n pos = self.coord(x,y)\n if pos == None:\n return \"?\"\n return self.board[pos]\n def isOpen(self, x, y):\n if not self.valid(x,y):\n return False\n if self.at(x,y) in (Tile.Wall, Tile.Window, Tile.Tree):\n return False\n if self.playerAt(x,y) != None:\n return False\n return True\n def isAdjacent(self, x, y, notside):\n for dy in range(-1,2):\n for dx in range(-1,2):\n unit = self.playerAt(x+dx,y+dy)\n if unit != None and unit.side != notside:\n return True\n return False\n\n def tryAttack(self, actor, dir):\n x, y = self.shift(actor.x, actor.y, dir)\n target = self.playerAt(x,y)\n\n # make sure there's someone in the target space and that they're not\n # on the same side\n if target == None or target.side == actor.side:\n return Result.Cant\n\n # start the log message\n msg = []\n bonus = []\n roll = d6()\n bonus.append(\"{}: Base Roll. \".format(roll))\n bonus.append(\"+{}: Attacker's Combat. \".format(actor.type.combat))\n bonus.append(\"-{}: Defender's Evade. \".format(target.type.evade))\n roll += actor.type.combat\n roll -= target.type.evade\n self.log.add(\"Combat attack: need 4+, got {}.\".format(roll))\n for line in bonus:\n self.log.add(line)\n\n # determine the result\n actor.action = Action.Attack\n if roll <= 1:\n self.log.add(\"Fumbled attack.\")\n self.doDamage(actor, target.strength - actor.defense)\n if roll <= 3:\n self.log.add(\"Attack failed.\")\n else:\n self.doDamage(actor, actor.type.strength - target.type.defense)\n\n self.log.add(''.join(msg))\n return Result.Bad\n def doDamage(self, target, amount):\n if amount < 1:\n amount = 1\n target.health -= amount\n self.log.add(\"{} took {} damage.\".format(target.name, amount))\n pass\n\n def tryMove(self, actor, dir):\n x, y = self.shift(actor.x, actor.y, dir)\n cost = 1\n\n # check to see if we can even go in the specified direction\n if not self.isOpen(x,y):\n return Result.Cant\n # check to make sure we're not out of movement\n if actor.moved >= actor.type.move:\n return Result.Cant\n\n oldX = actor.x\n oldY = actor.y\n oldTile = self.at(actor.x, actor.y)\n # more expensive to move out of water\n if oldTile == Tile.Water:\n cost += 1\n # more expensive to move if adjacent\n if self.isAdjacent(oldX, oldY, actor.side):\n cost += 1\n # more expensive to stand up first\n if actor.posture == Posture.Prone:\n cost += 1\n actor.posture = Posture.Standing\n # update unit\n actor.moved += cost\n actor.x = x\n actor.y = y\n actor.action = Action.Move\n\n # check if we're adjacent; if we are, make a withdrawl roll\n if self.isAdjacent(oldX, oldY, actor.side):\n msg = []\n roll = d6()\n result = roll\n\n msg.append(\"Withdrawal: need 4+, got \")\n if self.isAdjacent(x,y,actor.side):\n result -= 1\n msg.append(str(result))\n msg.append(\" (rolled \")\n msg.append(str(roll))\n msg.append(\", -1 for entering adjacency\")\n msg.append(\")\")\n else:\n msg.append(str(roll))\n self.log.add(\"\".join(msg))\n\n if roll <= 3:\n self.log.add(\"Fell over.\")\n actor.posture = Posture.Prone\n actor.done = True\n return Result.Bad\n\n # if we exited brambles, check for falling over\n if oldTile == Tile.Brambles:\n roll = d6()\n self.log.add(\"Escape brambles: need 2+, got \"+str(roll))\n if roll == 1:\n self.log.add(\"Fell over.\")\n actor.done = True\n actor.posture = Posture.Prone\n return Result.Bad\n\n if actor.moved >= actor.type.move:\n self.log.add(\"Out of movement.\")\n actor.done = True\n return Result.Bad\n return Result.Good\n","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":8594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"564706157","text":"#-*- encoding: UTF-8 -*-\n\n'''\nCreated on 2012-6-7\n\n@author: wicki\n'''\n\nimport nothing\nimport model\nimport web\nimport json\n\nimport config\n\nclass Favorite():\n def POST(self):\n user = nothing.get_current_user()\n data_input = web.input()\n item_id = data_input.item_id\n pyDict = {'result':'fail'}\n if user:\n data_input = web.input()\n item_id = data_input.item_id\n model.db.insert('favorite',item_id=item_id,username=user.username)\n sql = \"update item set favorite=favorite+1 where id = %s\" % item_id\n model.db.query(sql)\n pyDict = {'result':True,'item_id':item_id}\n web.header('Content-Type', 'application/json')\n return json.dumps(pyDict)\n# raise web.seeother('/signin')\n \nclass Unfavorite():\n def POST(self):\n user = nothing.get_current_user()\n pyDict = {'result':'fail'}\n if user:\n data_input = web.input()\n item_id = data_input.item_id\n model.db.delete('favorite', where=\"item_id=$item_id\", vars=locals())\n sql = \"update item set favorite=favorite-1 where id = %s\" % item_id\n model.db.query(sql)\n pyDict = {'result':False,'item_id':item_id}\n web.header('Content-Type', 'application/json')\n return json.dumps(pyDict)\n web.header('Content-Type', 'application/json')\n return json.dumps(pyDict)\n# raise web.seeother('/signin')\n\nclass View():\n def GET(self,item_id):\n items = model.db.select('item',where='id=$item_id',vars=locals())\n if len(items)>0:\n item = items[0]\n item['favorited'] = False\n user = nothing.get_current_user()\n if user:\n fs = model.db.query(\"select * from favorite where item_id=%d and username='%s'\" % (int(item_id),user.username))\n item['favorited'] = len(fs)>0\n return nothing.render('item.html', {'item':item})\n return 'sorry,item not found'\n \nclass Tag():\n def GET(self,tag):\n data = web.input()\n page = 1\n item_num = config.item_num\n if 'page' in data:\n page = int(data.page)\n sql = \"select * from item where tag = '%s' LIMIT %d,%d\" % (tag, (page-1)*item_num, (page-1)*item_num+item_num+1)\n items = [item for item in model.db.query(sql)]\n hasnext = len(items)>item_num\n if hasnext:\n items.pop()\n return nothing.render('tag.html', {'items':items,'hasnext':hasnext,'haspre':page>1})","sub_path":"item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"547998732","text":"'''\nGiven n pieces of wood with length L[i] (integer array). Cut them into small pieces to guarantee you could have equal or more than k pieces with the same length. What is the longest length you can get from the n pieces of wood? Given L & k, return the maximum length of the small pieces.\n\nExample\nFor L=[232, 124, 456], k=7, return 114.\n\nChallenge\nO(n log Len), where Len is the longest length of the wood.\n'''\nclass Solution:\n \"\"\"\n @param L: Given n pieces of wood with length L[i]\n @param k: An integer\n return: The maximum length of the small pieces.\n \"\"\"\n def woodCut(self, L, k):\n # write your code here\n \n if not L:\n return 0\n \n if k <= 0:\n return 0\n \n start, end = 0, max(L)\n while start + 1 < end:\n mid = start + (end - start) / 2\n if sum(map(lambda x : x / mid, L)) >= k:\n start = mid\n else: \n end = mid\n \n if sum(map(lambda x : x / end, L)) >= k:\n return end\n else:\n return start\n'''\n算法武器:二分法\n\n算法思想:\n\n本题二分是答案\n我们需要求出最长的长度,那么我就尝试二分这个长度,要做到这点,我们需要知道这个长度的上届和下届,然后我们进行二分,根据条件判断我们是需要增加长度还是减少长度\n对于每一个二分的长度,我们查看按照这种长度切分的块数是否满足大于等于k的条件,如果满足,那么我们可以尝试加大这个length,如果不可以,我们尝试减小这个length\n本题还和一个简单算法思想相关-枚举法:\n即对于需要求解的答案,我们尝试所有的可能性,对其进行计算\n\n本题首先想到枚举法,然后使用二分法进行优化,这个是一个思维过程。\n'''","sub_path":"ladder_basic/2_Binary_Search&LogN_Algorithm/183. Wood Cut.py","file_name":"183. Wood Cut.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469484917","text":"from node import Node\nfrom que import Queue\n\ndef bfs(board, start, goal):\n #Matrix that contains all the nodes already evauluated.\n closedSet = []\n # The set of currently discovered nodes that are not evaluated yet.\n # Initially, only the start node is known.\n openSet = Queue()\n openSet.enqueue(start)\n\n #Check while length of openList is not empty, else we cant find path\n while openSet.size != 0 :\n #Change current node to the next in the que.\n current = openSet.dequeue()\n\n #Put the current node on to the closedSet, because we are done with it. \n closedSet.append(current)\n\n #End condition.\n if(current.end):\n #Get path from the nodes cameFrom variable\n path = reconstructPath(current, board)\n return path, openSet.items, closedSet\n \n #If there are no neighbours from the current node, look for them\n if (len(current.neighbours) == 0):\n neighbours(board.matrix,current)\n\n #Check the neighbours of our current node\n for neighbour in current.neighbours:\n if neighbour in closedSet:\n #Ignore this node, since it has already been evaluated.\n continue\n\n #Check if openSet does not contain this neighbour.\n if not cointains(openSet.items, neighbour):\n #if not, put neighbour on queue.\n openSet.enqueue(neighbour)\n #We need to check if this neighbour is the goial.\n neighbour.cameFrom = current\n\n #BFS do not care about the distances, g_value etc. No need to computer\n\n return False\n \ndef cointains(array, other):\n for node in array:\n if(node == other):\n return True\n return False\n\ndef neighbours(matrix, current):\n \"\"\"put neighbours in the current node. From the matrix of nodes\"\"\"\n #Neighbours are in col +- 1 and row +-1\n col = current.col\n row = current.row\n #Also need to check if the index is smaller than the index value because of loop around with negative indexes.\n cols = len(matrix[0]) - 1\n rows = len(matrix) - 1\n\n \"\"\"\n rows\\cols\n __________________________ \n | [row-1][col]\n |[row+1][col-1] [row][col] [row+1][col+1]\n [row+1][col]\n \"\"\"\n\n #Add the 4 potential neighbours\n if(col != cols): #Know we are not at the edge of the matrix, and can safly add the node to the left\n #if we are at the edge of matrix, cant add from this direction to the right\n if(matrix[row][col+1].char != \"#\"):\n current.neighbours.append(matrix[row][col+1])\n\n if(row != rows): #Know we are not at the edge of the matrix downards\n #if we are at the edge of matrix, cant add from this direction downwards\n if(matrix[row+1][col].char != \"#\"):\n current.neighbours.append(matrix[row+1][col])\n \n if(row != 0): #if we are at the edge of matrix, cant add from this direction upwards\n if(matrix[row-1][col].char != \"#\"):\n current.neighbours.append(matrix[row-1][col])\n \n if(col != 0): #if we are at the edge of matrix, can't add from this direction to the left.\n if(matrix[row][col-1].char != \"#\"):\n current.neighbours.append(matrix[row][col-1])\n \n return\n\n\ndef reconstructPath(current, board):\n path = []\n while(not current.cameFrom == board.start):\n path.append(current)\n current = current.cameFrom\n\n path.append(current)\n return list(reversed(path))","sub_path":"part_3/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"264752939","text":"import cassiopeia.type.core.common\r\nimport cassiopeia.type.api.store\r\n\r\nload_policy = cassiopeia.type.core.common.LoadPolicy.eager\r\ndata_store = cassiopeia.type.api.store.Cache()\r\n\r\n\r\ndef call_with_ensured_size(method, max_size, arg):\r\n \"\"\"Breaks a list of arguments up into chunks of a maximum size and calls the given method on each chunk\r\n\r\n method function the method to call\r\n max_size int the maximum number of arguments to include in a single call\r\n arg any | list the arguments to split up\r\n\r\n return list | dict the combined results of the function calls on each chunk\r\n \"\"\"\r\n if not isinstance(arg, list) or len(arg) <= max_size:\r\n return method(arg)\r\n\r\n results = method(arg[0:max_size])\r\n i = max_size\r\n\r\n if isinstance(results, list):\r\n while(i < len(arg)):\r\n sublist = arg[i:i + max_size]\r\n results = results + method(sublist)\r\n i += max_size\r\n elif isinstance(results, dict):\r\n while(i < len(arg)):\r\n sublist = arg[i:i + max_size]\r\n results.update(method(sublist))\r\n i += max_size\r\n return results\r\n","sub_path":"api/cassiopeia/cassiopeia/core/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"210328853","text":"import falcon\n# from ansible_service.run import Runner\n\n\nclass AnsiRunner(object):\n\n def __init__(self):\n self.msg = \"Yo\"\n\n def on_get(self, req, resp):\n response = {\n \"message\": \"Service up!\",\n \"msg\": str(self.msg)\n }\n resp.body = str(response)\n resp.status = falcon.HTTP_200\n\n def on_post(self, req, resp):\n response = {\n \"req obj type\": str(type(req)),\n \"test\": \"working\",\n \"hello param\": req.get_param('hello'),\n \"protocol\": req.protocol,\n }\n resp.body = str(response)\n","sub_path":"ansible_service/runner_routes.py","file_name":"runner_routes.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"246572161","text":"#!/usr/bin/python3\nimport re\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport sys\nimport getopt\nimport string\nimport os\nimport _pickle as pickle\nimport math\nfrom queue import PriorityQueue\nfrom utility import tokenize, add_skip_ptr\n\npunc = string.punctuation\nblock_count = 0 # running count of the number of blocks\nmax_len = 0\nDOC_IDS = []\nBLOCKS = \"blocks\"\n\ndef usage():\n print(\"usage: \" + sys.argv[0] + \" -i directory-of-documents -d dictionary-file -p postings-file\")\n\ndef build_index(in_dir, out_dict, out_postings):\n \"\"\"\n build index from documents stored in the input directory,\n then output the dictionary file and postings file\n \"\"\"\n print('indexing...')\n # This is an empty method\n # Pls implement your code in below\n os.makedirs(BLOCKS, exist_ok=True)\n \n limit = 20\n doc_list = os.listdir(in_dir)\n doc_chunks = [doc_list[i * limit:(i + 1) * limit] for i in range((len(doc_list) + limit - 1) // limit)]\n for chunk in doc_chunks:\n spimi_invert(chunk, in_dir)\n \n f = open(out_dict, 'w+')\n f.close()\n f = open(out_postings, 'w+')\n f.close()\n\n offset = log_doc_ids(out_dict, out_postings)\n merge(BLOCKS, out_dict, out_postings, offset)\n\ndef log_doc_ids(out_dict, out_postings):\n '''\n Collecting all docIDs to support NOT queries in search phase\n '''\n global DOC_IDS\n DOC_IDS.sort()\n\n str_form = ''\n for doc_id in DOC_IDS:\n str_form += str(doc_id) + ' '\n \n # (doc_frequency, absolute_offset, accumulative_offset)\n dict_expr = \"* 0 0 \" + str(len(str_form)) + \"\\n\"\n \n write_to_file(out_dict, dict_expr)\n write_to_file(out_postings, str_form)\n \n return len(str_form)\n\ndef spimi_invert(chunk, in_dir):\n '''\n Executes SPIMI Invert algorithm for each chunk of documents\n '''\n global block_count, DOC_IDS\n\n index = {}\n for entry in chunk:\n DOC_IDS.append(int(entry))\n full_path = os.path.join(in_dir, entry)\n if os.path.isfile(full_path):\n file = open(full_path, \"r\")\n doc = file.read().replace('\\n', '')\n for sent in sent_tokenize(doc):\n for word in word_tokenize(sent):\n if word not in punc:\n tokenized = tokenize(word)\n if (tokenized not in index):\n index[tokenized] = [int(entry)]\n else:\n curr_posting_list = index[tokenized]\n if (int(entry) not in curr_posting_list):\n curr_posting_list.append(int(entry))\n index[tokenized] = curr_posting_list\n file.close()\n \n block_count += 1\n output_file = \"block\" + str(block_count) + \".txt\"\n write_block_to_disk(index, output_file)\n\ndef write_block_to_disk(index, output_file):\n '''\n Writes out a block to disk in /blocks folder\n '''\n global max_len\n index_items = index.items()\n max_len = max(max_len, len(index_items))\n for key, value in index_items: # sorting each postings list\n value.sort()\n index_items = sorted(index_items) # sorting term\n output = open(os.path.join(BLOCKS, output_file), 'wb')\n for item in index_items:\n pickle.dump(item, output)\n output.close()\n\ndef merge(in_dir, out_dict, out_postings, offset):\n '''\n Perform n-way merge, reading limit-number of entries from each block at a time\n '''\n global max_len\n limit = 5\n loops = math.ceil(max_len / limit)\n opened_files = {}\n removed_files = []\n\n # open all files and store in list\n for entry in os.listdir(in_dir):\n opened_files[entry] = open(os.path.join(in_dir, entry), 'rb')\n \n # initialising PQ\n pq = PriorityQueue()\n for i in range(limit):\n for block_name, file_read in opened_files.items():\n unpickler = pickle.Unpickler(file_read)\n if block_name not in removed_files:\n try:\n temp_item = list(unpickler.load())\n # block where the item of (term, docID) is from\n temp_item.append(block_name)\n pq.put(temp_item)\n except EOFError as error:\n removed_files.append(block_name)\n \n term_to_write = ''\n posting_list_to_write = []\n while not pq.empty():\n item = pq.get()\n term, posting_list, block_name = item[0], item[1], item[2]\n if term_to_write == '': # first term we are processing\n term_to_write = term\n posting_list_to_write = posting_list\n elif term_to_write != term: # time to write our current term to to disk because we encountered a new term\n posting_list_to_write.sort()\n posting_list_w_skip_ptr = add_skip_ptr(posting_list_to_write)\n \n # (doc_frequency, absolute_offset, accumulative_offset)\n dict_entry = term_to_write + \" \" + str(len(posting_list_to_write)) + \" \" + str(offset) + \" \" + str(len(posting_list_w_skip_ptr)) + \"\\n\"\n write_to_file(out_dict, dict_entry)\n write_to_file(out_postings, posting_list_w_skip_ptr)\n \n offset += len(posting_list_w_skip_ptr)\n term_to_write = term\n posting_list_to_write = posting_list\n else: # curr_term == term\n posting_list_to_write.extend(posting_list)\n \n if block_name not in removed_files:\n try:\n unpickler = pickle.Unpickler(opened_files[block_name])\n temp_item = list(unpickler.load())\n # block where the item of (term, docID) is from\n temp_item.append(block_name)\n pq.put(temp_item)\n except EOFError as error:\n removed_files.append(block_name)\n\ndef write_to_file(file, content):\n '''\n Writes out lines to disk for search phase later\n '''\n fw = open(file, 'a')\n fw.write(''.join(content))\n fw.close()\n\ninput_directory = output_file_dictionary = output_file_postings = None\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')\nexcept getopt.GetoptError:\n usage()\n sys.exit(2)\n\nfor o, a in opts:\n if o == '-i': # input directory\n input_directory = a\n elif o == '-d': # dictionary file\n output_file_dictionary = a\n elif o == '-p': # postings file\n output_file_postings = a\n else:\n assert False, \"unhandled option\"\n\nif input_directory == None or output_file_postings == None or output_file_dictionary == None:\n usage()\n sys.exit(2)\n\nbuild_index(input_directory, output_file_dictionary, output_file_postings)\n","sub_path":"HW2/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":6705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575269223","text":"import random\nfrom player import Player\n\n\nclass GameInitException(Exception):\n pass\n\nclass Game:\n # This'll have 2 players, assoc. move list, assoc. piece list\n def __init__(self, p1, p2):\n if (not isinstance(p1, Player)) and (not isinstance(p2,Player)):\n raise GameInitException(\"one of p1 or p2 is not a Player\")\n self.p1=p1\n self.p2=p2\n if random.choice([True,False]):\n self.p1,self.p2=self.p2,self.p1\n self.p2.swap_side()\n # P1 goes first\n #def set_rules(self, player, r1, r2, r3):\n def __repr__(self):\n return \"Game class\"\n def get_winner(self):\n if self.p1.get_piece_by_color(\"white\")==[]:\n return p2.player.name\n elif self.p2.get_piece_by_color(\"white\")==[]:\n return p1.player.name\n elif self.p1.reached_goal():\n return p1\n elif self.p2.reached_goal():\n return p2\n else:\n return None\n def get_all_pieces(self):\n return self.p1.pieces + self.p2.pieces\n def get_piece_at(self, loc):\n p1_pcs = list(filter((lambda pc: pc.location==loc),\n self.p1.pieces))\n p2_pcs = list(filter((lambda pc: pc.location==loc),\n self.p2.pieces))\n # these can't (shouldn't) be both true, so it's cool\n if len(p1_pcs)!=0:\n return p1_pcs[0]\n elif len(p2_pcs)!=0:\n return p2_pcs[0]\n else:\n return None\n \n \n # can't have this here, game needs to be changed by requests\n\n def play_game(self):\n #pass #take_turn(game.p1)\n winner = None\n player = game.p2\n while winner == None:\n if player == game.p1:\n player = game.p2\n else:\n player = game.p1\n take_turn(player)\n winner = get_winner(game)\n return winner # I guess\n\n # okay\n\n\n def setup(self):\n players = get_players() # P1 set here I guess?\n webapp_get_rules(players)\n webapp_get_piece_locations(players)\n # that's it? do some stuff inline maybe (if needed)\n\n def take_turn(self, player, pieces, rules):\n # Before\n for rule in rules:\n rule.before_turn() # does this take any args?\n # Getting the move, executing it\n user_selection = get_user_selection() # this whole thing\n webapp_get_moves(user_selection)\n move = get_move() # Okay, this one needs a lot of work\n if (move is rule):\n rule.do_move() # args?\n else:\n regular_do_move_function() # args? name?\n # Where is bounce etc. handled?\n # on Land? winner check?\n #\n # Close of turn\n for rule in rules:\n rule.after_turn() # args?\n check_winner() # args etc.\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"584954330","text":"import importlib\nimport logging\nfrom rest_framework import status\nfrom rest_framework.response import Response\n\n\ndef validate_permission(permission_type, app_code, permission_code, request):\n if permission_code is not None:\n try:\n permission_name_user = '{}.{}_{}'.format('auth', permission_type, permission_code)\n permission_name_app = '{}.{}_{}'.format(app_code, permission_type, permission_code)\n\n if permission_code == 'user':\n if not request.user.has_perm(permission_name_user):\n return Response({'detail': 'Acceso inválido.'}, status=status.HTTP_403_FORBIDDEN)\n elif not request.user.has_perm(permission_name_app):\n return Response({'detail': 'Acceso inválido.'}, status=status.HTTP_403_FORBIDDEN)\n except AttributeError as e:\n logging.exception(e)\n return Response({'detail': 'Acceso inválido.'}, status=status.HTTP_403_FORBIDDEN)\n except Exception as e:\n logging.exception(e)\n return Response({'detail': 'Error inesperado validando permisos.'}, status=status.HTTP_402_PAYMENT_REQUIRED)\n\n return None\n\n\ndef class_for_name(path: str):\n \"\"\"\n :param path: str\n :return: django.contrib.admin.ModelAdmin\n \"\"\"\n module_name, class_name = path.rsplit('.', 1)\n\n # load the module, will raise ImportError if module cannot be loaded\n m = importlib.import_module(module_name)\n\n # get the class, will raise AttributeError if class cannot be found\n c = getattr(m, class_name)\n\n return c\n","sub_path":"pyctivex/defs.py","file_name":"defs.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"308844524","text":"\nimport math\nimport time\nimport smbus\n\nbus = smbus.SMBus(1)\naddress=0x53\n\nPOWER_CTL = 0x2d\nDATA_FORMAT = 0x31\nFIFO_CTL = 0x38\n\nAFS_2g = 0\nAFS_4g = 1\nAFS_8g = 2\nAFS_16g = 3\n\nACCEL_START_BLOCK = 0x32\nACCEL_XOUT_H = 1\nACCEL_XOUT_L = 0\nACCEL_YOUT_H = 3\nACCEL_YOUT_L = 2\nACCEL_ZOUT_H = 5\nACCEL_ZOUT_L = 4\n\nACCEL_SCALE = 0.004 # Always set to this as we are using FULL_RES\n\nafs_scale=AFS_2g\n\nraw_accel_data = [0, 0, 0, 0, 0, 0]\n\naccel_raw_x = 0\naccel_raw_y = 0\naccel_raw_z = 0\n\naccel_scaled_x = 0\naccel_scaled_y = 0\naccel_scaled_z = 0\n\npitch = 0.0\nroll = 0.0\n\n\ndef twos_complement(high, low):\n val = (high << 8) + low\n if (val >= 0x8000):\n return -((0xffff - val) + 1)\n else:\n return val\n\n\ndef configure():\n # Wake up the device\n bus.write_byte_data(address, POWER_CTL, 0b00001000)\n\n # Set data to FULL_RES and user defined scale\n data_format = 0b00001000 | afs_scale\n bus.write_byte_data(address, DATA_FORMAT, data_format)\n\n # Disable FIFO mode\n bus.write_byte_data(address, FIFO_CTL, 0b00000000)\n return\n\n\ndef read():\n\n raw_accel_data = bus.read_i2c_block_data(address, ACCEL_START_BLOCK, 6)\n time_r=time.time()\n accel_raw_x = twos_complement(raw_accel_data[ACCEL_XOUT_H], raw_accel_data[ACCEL_XOUT_L])\n accel_raw_y = twos_complement(raw_accel_data[ACCEL_YOUT_H], raw_accel_data[ACCEL_YOUT_L])\n accel_raw_z = twos_complement(raw_accel_data[ACCEL_ZOUT_H], raw_accel_data[ACCEL_ZOUT_L])\n\n accel_scaled_x = accel_raw_x * ACCEL_SCALE\n accel_scaled_y = accel_raw_y * ACCEL_SCALE\n accel_scaled_z = accel_raw_z * ACCEL_SCALE\n\n return(accel_scaled_x,accel_scaled_y,accel_scaled_z,time_r)\n\n\n#--------------------------------------------\n","sub_path":"accelerom.py","file_name":"accelerom.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"481736876","text":"#!/usr/bin/python3\nfrom sys import stdin\n\ndef add (BIT, ix, n):\n while ix <= n:\n BIT [ix] += 1\n ix += ix & -ix\n\ndef qry (BIT, ix):\n sm = 0\n while ix:\n sm += BIT [ix]\n ix -= ix & -ix\n return sm\n\ndef main ():\n read = stdin.readline\n t = int (read ())\n for t_ in range (t):\n n = int (read ())\n s = read ().rstrip ()\n N = 2 * n + 3\n BIT = [0] * N\n ans = 0; c = 0\n for i in range (n):\n if s [i] == '1': c += 1\n if 2 * c > i + 1: ans += 1\n k = 2 * c - i + n\n ans += qry (BIT, k)\n add (BIT, k + 1, N)\n print (ans)\n\nif __name__ == \"__main__\": main ()","sub_path":"_positive_substring2.py","file_name":"_positive_substring2.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109497986","text":"def solution(board, nums):\n answer = 0\n size = len(board)\n nums = set(nums)\n bingos = {'row' : [size for _ in range(size)], 'col' : [size for _ in range(size)], 'diagonal' : [size, size]}\n for row in range(size):\n for col in range(size):\n if board[row][col] in nums:\n bingos['row'][row] -= 1\n bingos['col'][col] -= 1\n if row == col:\n bingos['diagonal'][0] -= 1\n if row + col + 1 == size:\n bingos['diagonal'][1] -= 1\n for sub_bingos in bingos.values():\n answer += sub_bingos.count(0)\n return answer","sub_path":"Programmers/Lv3_빙고.py","file_name":"Lv3_빙고.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11505024","text":"from flask import render_template, flash, redirect, url_for, session, logging, request\nimport smtplib\nimport mimetypes\nfrom email.mime.multipart import MIMEMultipart\nfrom email import encoders\nfrom email.message import Message\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.base import MIMEBase\nfrom email.mime.image import MIMEImage\nfrom email.mime.text import MIMEText\nimport ntpath\nimport shutil\nimport os\nfrom systemdepfunc import addpath, emailvals\n\ndef invoice_mimemail(invo,order,docref,npack):\n\n emails,passwds,ourserver=emailvals()\n emailin1=request.values.get('edat2')\n emailin2=request.values.get('edat3')\n emailcc1=request.values.get('edat4')\n emailcc2=request.values.get('edat5')\n etitle=request.values.get('edat0')\n ebody=request.values.get('edat1')\n order=order.strip()\n\n if npack==3:\n newfile='Invoice_Package_Order_'+order\n elif npack==2:\n insert=etitle.replace('First Eagle Logistics','').replace(':','').replace(' ','_')\n insert=insert.strip()\n print(insert)\n newfile='Summary_'+insert\n else:\n newfile='Invoice_Order_'+order\n\n if npack==5:\n newfile=newfile.replace('Order_','Booking_')\n\n newfile=newfile+'.pdf'\n\n shutil.copy(addpath(docref),newfile)\n\n #emailto = \"export@firsteaglelogistics.com\"\n emailfrom = emails[2]\n\n username = emails[2]\n password = passwds[0]\n\n msg = MIMEMultipart()\n msg[\"From\"] = emailfrom\n msg[\"To\"] = emailin1\n emailto=[emailin1]\n if emailin2 is not None:\n msg[\"To\"] = emailin2\n emailto.append(emailin2)\n if emailcc1 is not None:\n msg[\"CC\"] = emailcc1\n emailto.append(emailcc1)\n if emailcc2 is not None:\n msg[\"Cc\"] = emailcc2\n emailto.append(emailcc2)\n #msg[\"Subject\"] = 'First Eagle Logistics Invoice: '+ invo + ' for Order: '+ order\n msg[\"Subject\"] = etitle\n\n #body = 'Dear Customer:\\n\\nYour invoice is attached. Please remit payment at your earliest convenience.\\n\\nThank you for your business- we appreciate it very much.\\n\\nSincerely,\\n\\nFIRST EAGLE LOGISTICS,INC.\\n\\n\\nNorma Ghanem\\nFirst Eagle Logistics, Inc.\\n505 Hampton Park Blvd Unit O\\nCapitol Heights,MD 20743\\n301 516 3000'\n msg.attach(MIMEText(ebody, 'plain'))\n\n attachment = open(newfile, \"rb\")\n\n part = MIMEBase('application', 'octet-stream')\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', \"attachment; filename= %s\" % newfile)\n\n msg.attach(part)\n\n server = smtplib.SMTP(ourserver)\n #server.starttls()\n server.login(username,password)\n #emailto = [emailin1, emailin2, emailcc1, emailcc2]\n server.sendmail(emailfrom, emailto, msg.as_string())\n server.quit()\n\n os.remove(newfile)\n\n return emailin1\n","sub_path":"invoice_mimemail.py","file_name":"invoice_mimemail.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"203524730","text":"from __future__ import print_function\n\nimport math\nfrom sys import maxsize\nfrom itertools import permutations\n\n\"\"\"Simple travelling salesman problem between cities.\"\"\"\n\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\n\n\ndef create_data_model(adjmat):\n \"\"\"Stores the data for the problem.\"\"\"\n data = {'distance_matrix': adjmat, 'num_vehicles': 1, 'depot': 0}\n return data\n\n\ndef print_solution(manager, routing, solution):\n \"\"\"Prints solution on console.\"\"\"\n print('Objective: {} units'.format(solution.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route for UAV:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Route distance: {} units \\n'.format(route_distance)\n\n\ndef main(adjmat):\n \"\"\"Entry point of the program.\"\"\"\n # Instantiate the data problem.\n data = create_data_model(adjmat)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n solution = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n if solution:\n print_solution(manager, routing, solution)\n\n\nfile1 = open('centroid.data', 'r')\nhandle = file1.readlines()\ncoords = list()\nfor line in handle:\n a, b = line.strip().split()\n x = int(a)\n y = int(b)\n coords.append([x, y])\n\nadjmat = list()\nn = len(coords)\nfor i in range(n):\n row = list()\n for j in range(n):\n if i == j:\n row.append(0.0)\n else:\n dist = int(\n math.sqrt(math.pow((coords[i][0] - coords[j][0]), 2) + math.pow((coords[i][1] - coords[j][1]), 2)))\n # d = round(dist, 2)\n row.append(dist)\n adjmat.append(row)\n\nmain(adjmat)\n","sub_path":"UAV-Navigation-system-main/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"135528867","text":"def isISBN_13(code):\n if len(code) != 13:\n return False\n \n if code[:3] != \"978\" and code [:3] != \"979\":\n return False\n even = code[::2]\n oneven = code[1::2]\n someven = 0\n somoneven = 0\n for i in range(6):\n cijfer = int(even[i])\n someven += cijfer\n cijfer = int(oneven[i])\n somoneven += cijfer\n controle = (10-(someven + 3 * somoneven) %10)%10\n\n return controle == int(even[6])\n\ndef overzicht(codes):\n types = [\"Engelstalige landen\", \"Franstalige landen\", \"Duitstalige landen\", \"Japan\", \"Russischtalige landen\", \"China\", \"Overige landen\", \"Fouten\"]\n\n overzicht = {}\n\n for type in types:\n overzicht[type] = 0\n for code in codes:\n if not isISBN_13(code):\n overzicht[\"Fouten\"] += 1\n else:\n nr = code[3]\n if nr == \"0\":\n nr = \"1\" #engelstalig\n elif nr in \"689\":\n nr = \"7\" #Overige landen\n elif nr == \"7\":\n nr = \"6\"\n type = types[int(nr)-1]\n overzicht[type] += 1\n\n for key in overzicht:\n print(\"{}: {}\".format(key, overzicht[key]))\n\n# alternatief\ndef som_reeks(reeks):\n cijfers = [int(teken) for teken in reeks]\n return sum(cijfers)\n\ndef isISBN_13(isbn):\n if not type(isbn) is str:\n return False\n if len(isbn) != 13:\n return False\n if not isbn.isdigit():\n return False\n if isbn.find('978') != 0 and isbn.find('979') != 0:\n return False\n \n som_oneven = som_reeks(isbn[:12:2])\n som_even = som_reeks(isbn[1::2])\n controle = (10 - (som_oneven + 3 * som_even)%10) %10\n return int(isbn[12]) == controle\n \ndef overzicht(codes):\n land_codes = {\"0\" : \"Engelstalige landen\", \"1\" : \"Engelstalige landen\", \"2\" : \"Franstalige landen\", \\\n \"3\": \"Duitstalige landen\", \"4\": \"Japan\", \"5\": \"Russischtalige landen\", \"7\": \"China\", \"6\": \"Overige landen\", \\\n \"8\": \"Overige landen\", \"9\": \"Overige landen\"}\n foutief = \"Fouten\"\n overzicht = {}\n for landtype in land_codes.values():\n overzicht[landtype] = 0\n overzicht[foutief] = 0\n for code in codes:\n if isISBN_13(code):\n overzicht[land_codes[code[3]]] += 1\n else:\n overzicht[foutief] += 1\n for landtype, aantal in overzicht.items():\n print(\"{}: {}\".format(landtype, aantal))\n \n \n","sub_path":"test/vraag4/src/isbn/119.py","file_name":"119.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"427472517","text":"\nimport numpy as np\nimport pytest\nimport mock\n\n\nclass Data(object):\n\n def __init__(self, num_locations, travel_times=None, time_windows=None):\n self.num_locations = num_locations\n self.travel_times = travel_times\n self.time_windows = time_windows\n\n\nclass Node(object):\n\n def __init__(self, subtour=tuple(), exclude_next=set(), relaxation=None, dive=False):\n self.subtour = subtour\n self.exclude_next = exclude_next\n self._relaxation = relaxation\n self.dualval = relaxation if relaxation is not None else -1e+20\n self.dive = dive\n\n\nclass Struct(object):\n\n def __init__(self, active_nodes, choice_func=None, relax_values=[]):\n self.active_nodes = active_nodes\n self.relaxation = mock.MagicMock(side_effect=relax_values)\n self.random_choice = mock.MagicMock(side_effect=choice_func)\n\n\n@pytest.fixture\ndef tsptw_data():\n return Data(\n num_locations=5,\n travel_times=np.array([\n [0, 5, 6, 3, 7],\n [5, 0, 8, 5, 4],\n [6, 8, 0, 7, 6],\n [3, 5, 7, 0, 5],\n [7, 4, 6, 5, 0],\n ], dtype=np.float),\n time_windows=[\n [0, 50],\n [20,35],\n [15,25],\n [10,30],\n [25,35],\n ])\n","sub_path":"tests/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"253970819","text":"# -*- coding: utf-8 -*-\n\"\"\"\n_GetPendingSlots_\n\nMySQL implementation of Locations.GetPendingSlots\nCreated on Mon Jun 18 12:39:06 2012\n\n@author: dballest\n\"\"\"\n\nfrom future.utils import listvalues\n\nfrom WMCore.Database.DBFormatter import DBFormatter\n\nclass GetPendingSlots(DBFormatter):\n sql = \"\"\"SELECT pending_slots FROM wmbs_location\n WHERE site_name = :location\n \"\"\"\n\n def execute(self, siteName, conn = None, transaction = False):\n binds = {\"location\": siteName}\n result = self.dbi.processData(self.sql, binds, conn = conn,\n transaction = transaction)\n\n return listvalues(result[0].fetchall()[0])[0]\n","sub_path":"src/python/WMCore/WMBS/MySQL/Locations/GetPendingSlots.py","file_name":"GetPendingSlots.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"67481296","text":"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test saved_model with distribution strategies.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import model_combinations\nfrom tensorflow.python.distribute import strategy_combinations\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.saved_model import saved_model\n\n_RANDOM_SEED = 1337\n_DEFAULT_FUNCTION_KEY = 'serving_default'\n_IN_SCOPE_SAVE_DIR = 'in_scope/'\n_OUT_OF_SCOPE_SAVE_DIR = 'out_of_scope/'\n\nsimple_models = [\n model_combinations.simple_functional_model,\n model_combinations.simple_sequential_model,\n\n # TODO(b/131715604): figure out why subclass model does not work\n # model_combinations.simple_subclass_model,\n]\n\n\ndef get_strategy_cross_product():\n result = []\n for strategy_1 in strategy_combinations.strategies_minus_tpu:\n for strategy_2 in strategy_combinations.strategies_minus_tpu:\n result.append(combinations.NamedDistributionPair(strategy_1, strategy_2))\n\n return result\n\n\ndef simple_models_with_strategies():\n return combinations.combine(\n model_and_input=simple_models,\n distribution=strategy_combinations.strategies_minus_tpu,\n mode=['eager'])\n\n\nclass TestSavedModel(test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n np.random.seed(_RANDOM_SEED)\n random_seed.set_random_seed(_RANDOM_SEED)\n super(TestSavedModel, self).setUp()\n\n def _train_model(self, model, x_train, y_train, batch_size):\n training_dataset = dataset_ops.Dataset.from_tensor_slices(\n (x_train, y_train))\n training_dataset = training_dataset.repeat()\n training_dataset = training_dataset.batch(batch_size)\n\n # Train the model for 1 step\n model.fit(x=training_dataset, epochs=1, steps_per_epoch=1)\n\n def _load_and_run_model(self, saved_dir, x_predict):\n func = saved_model.load(saved_dir)\n return func.signatures[_DEFAULT_FUNCTION_KEY](x_predict)\n\n def _get_predict_dataset(self, x_predict, batch_size):\n predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)\n predict_dataset = predict_dataset.batch(batch_size)\n return predict_dataset\n\n @combinations.generate(simple_models_with_strategies())\n def test_save_no_dist_restore_dist(self, model_and_input, distribution):\n \"\"\"Save a model without DS, and restore it with DS.\"\"\"\n\n self.skipTest('Loading model with DS is not supported yet')\n\n saved_dir = os.path.join(self.get_temp_dir(),\n 'test_save_no_dist_restore_dist')\n\n model, output_name = model_and_input.get_model()\n x_train, y_train, x_predict = model_and_input.get_data()\n batch_size = model_and_input.get_batch_size()\n\n self._train_model(model, x_train, y_train, batch_size)\n predict_dataset = self._get_predict_dataset(x_predict, batch_size)\n result_before_save = model.predict(predict_dataset)\n\n saved_model.save(model, saved_dir)\n\n with distribution.scope():\n predict_dataset = distribution.experimental_distribute_dataset(\n predict_dataset)\n actual_data = next(iter(predict_dataset))\n result_after_save = self._load_and_run_model(saved_dir, actual_data)\n\n self.assertAllEqual(result_before_save, result_after_save[output_name])\n\n @combinations.generate(simple_models_with_strategies())\n def test_save_dist_restore_no_dist(self, model_and_input, distribution):\n \"\"\"Save a model with DS, and restore it without DS.\"\"\"\n\n self.skipTest('Saving model with DS is not supported yet')\n\n saved_dir = os.path.join(self.get_temp_dir(),\n 'test_save_no_dist_restore_dist')\n saved_dir_in_scope = os.path.join(saved_dir, _IN_SCOPE_SAVE_DIR)\n saved_dir_out_of_scope = os.path.join(saved_dir, _OUT_OF_SCOPE_SAVE_DIR)\n\n with distribution.scope():\n model, output_name = model_and_input.get_model()\n x_train, y_train, x_predict = model_and_input.get_data()\n batch_size = model_and_input.get_batch_size()\n\n self._train_model(model, x_train, y_train, batch_size)\n predict_dataset = self._get_predict_dataset(x_predict, batch_size)\n result_before_save = model.predict(predict_dataset)\n\n # save the model both in and out of the DS scope\n saved_model.save(model, saved_dir_in_scope)\n saved_model.save(model, saved_dir_out_of_scope)\n\n actual_data = next(iter(predict_dataset))\n result_load_from_save_in_scope = self._load_and_run_model(\n saved_dir_in_scope, actual_data)\n result_load_from_save_out_of_scope = self._load_and_run_model(\n saved_dir_out_of_scope, actual_data)\n\n self.assertAllEqual(result_before_save,\n result_load_from_save_in_scope[output_name])\n self.assertAllEqual(result_before_save,\n result_load_from_save_out_of_scope[output_name])\n\n @combinations.generate(\n combinations.combine(\n model_and_input=simple_models,\n distribution_pair=get_strategy_cross_product(),\n mode=['eager']))\n def test_save_dist_restore_dist(self, model_and_input, distribution_pair):\n \"\"\"Save a model with DS, and restore it with potentially different DS.\"\"\"\n\n self.skipTest('Saving model with DS is not supported yet')\n\n combinations.maybe_skip_test(self, distribution_pair.is_tpu_required,\n distribution_pair.num_gpus_required)\n\n saved_dir = os.path.join(self.get_temp_dir(), 'test_save_dist_restore_dist')\n saved_dir_in_scope = os.path.join(saved_dir, _IN_SCOPE_SAVE_DIR)\n saved_dir_out_of_scope = os.path.join(saved_dir, _OUT_OF_SCOPE_SAVE_DIR)\n\n dist_for_save = distribution_pair.strategy_1\n dist_for_restore = distribution_pair.strategy_2\n\n with dist_for_save.scope():\n model, output_name = model_and_input.get_model()\n x_train, y_train, x_predict = model_and_input.get_data()\n batch_size = model_and_input.get_batch_size()\n\n self._train_model(model, x_train, y_train, batch_size)\n predict_dataset = self._get_predict_dataset(x_predict, batch_size)\n result_before_save = model.predict(predict_dataset)\n\n # save the model both in and out of the DS scope\n saved_model.save(model, saved_dir_in_scope)\n saved_model.save(model, saved_dir_out_of_scope)\n\n with dist_for_restore.scope():\n predict_dataset = dist_for_restore.experimental_distribute_dataset(\n predict_dataset)\n actual_data = next(iter(predict_dataset))\n\n result_load_from_save_in_scope = self._load_and_run_model(\n saved_dir_in_scope, actual_data)\n result_load_from_save_out_of_scope = self._load_and_run_model(\n saved_dir_out_of_scope, actual_data)\n\n self.assertAllEqual(result_before_save,\n result_load_from_save_in_scope[output_name])\n self.assertAllEqual(result_before_save,\n result_load_from_save_out_of_scope[output_name])\n\n\nif __name__ == '__main__':\n test.main()\n","sub_path":"tensorflow/python/distribute/saved_model_test.py","file_name":"saved_model_test.py","file_ext":"py","file_size_in_byte":7838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"467204550","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom numpy.linalg import svd\nimport numpy as np\nfrom summary_algorithms.algorithm import SummaryAlgorithm\n\n\nclass LatentSemanticAlgorithm(SummaryAlgorithm):\n def __init__(self, manager):\n self.__manager = manager\n\n def __words_amount(self, sentence):\n return len(sentence.split())\n\n def summary(self, text, max_length):\n self.__manager.preprocess(text)\n vectorizer = TfidfVectorizer()\n tfxidf_matrix = vectorizer.fit_transform(\n self.__manager.processed_sentences).todense()\n\n _, s, vh = svd(tfxidf_matrix, full_matrices=False)\n\n d = np.power(np.diag(s).dot(vh), 2)\n\n weights = [np.sqrt(np.sum(d[i, :])) for i in range(d.shape[0])]\n\n sorted_sentences = list(zip(weights, self.__manager.raw_sentences))\n sorted_sentences.sort(reverse=True)\n\n sentence_with_position = [\n (self.__manager.get_sentence_position(x[1]), x[1]) for x in sorted_sentences]\n\n sentence_with_position.sort()\n\n current_length = 0\n summary = ''\n\n for (_, sentence) in sentence_with_position:\n if self.__words_amount(sentence) + current_length > max_length:\n break\n summary += sentence + '.'\n\n return summary\n","sub_path":"core/summary_algorithms/latent_semantic.py","file_name":"latent_semantic.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297861456","text":"from rest_framework.pagination import PageNumberPagination\n\n\nclass EnterprisesPageNum(PageNumberPagination):\n \"\"\"分页器\"\"\"\n page_size_query_param = 'page_size'\n max_page_size = 10\n\n\n# 做一个扩展性功能文件\n# 1.接收一个id\n\ndef value(func):\n table_obj = {\n \"ANlmy\": 1, # 农、林、牧、渔业\n \"BCky\": 2, # 采矿业\n \"CZzy\": 3, # 制造业\n \"DDrrsgy\": 4, # 电力、热力、燃气及水生产和供应业\n \"EJzy\": 5, # 建筑业\n \"FPflsy\": 6, # 批发和零售业\n \"GJcy\": 7, # 交通运输、仓储和邮政业\n \"HZscyy\": 8, # 住宿和餐饮业\n \"IXxrjy\": 9, # 信息传输、软件和信息技术服务业\n \"JJry\": 10, # 金融业\n \"KFdcy\": 11, # 房地产业\n \"LZlsw\": 12, # 租赁和商务服务业\n \"MKyjs\": 13, # 科学研究和技术服务业\n \"NSlhjgg\": 14, # 水利、环境和公共设施管理业\n \"OJmxl\": 15, # 居民服务、修理和其他服务业\n \"PJy\": 16, # 教育\n \"QWssh\": 17, # 卫生和社会工作\n \"RWty\": 18, # 文化、体育和娱乐业\n \"SGgsh\": 19, # 公共管理、社会保障和社会组织\n \"TGj\": 20, # 国际组织\n #\n }\n\n key = list(table_obj.keys())[list(table_obj.values()).index(func)]\n\n return key\n\n\ndef num_func(func):\n table_obj = {\n \"ACount\": 1, # 农、林、牧、渔业\n \"BCount\": 2, # 采矿业\n \"CCount\": 3, # 制造业\n \"DCount\": 4, # 电力、热力、燃气及水生产和供应业\n \"ECount\": 5, # 建筑业\n \"FCount\": 6, # 批发和零售业\n \"GCount\": 7, # 交通运输、仓储和邮政业\n \"HCount\": 8, # 住宿和餐饮业\n \"ICount\": 9, # 信息传输、软件和信息技术服务业\n \"JCount\": 10, # 金融业\n \"KCount\": 11, # 房地产业\n \"LCount\": 12, # 租赁和商务服务业\n \"MCount\": 13, # 科学研究和技术服务业\n \"NCount\": 14, # 水利、环境和公共设施管理业\n \"OCount\": 15, # 居民服务、修理和其他服务业\n \"PCount\": 16, # 教育\n \"QCount\": 17, # 卫生和社会工作\n \"RCount\": 18, # 文化、体育和娱乐业\n \"SCount\": 19, # 公共管理、社会保障和社会组织\n \"TCount\": 20, # 国际组织\n #\n }\n\n key = list(table_obj.keys())[list(table_obj.values()).index(func)]\n\n return key\n\n\n\n\n\n\nclass EnterprisePageNum(PageNumberPagination):\n page_size_query_param = \"page_size\"\n max_page_size = 20","sub_path":"up_down_chain/up_down_chain/app/Enterprise/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640706312","text":"import sys\ncap_val=list(map(int,input().split()))\nleak_val=list(map(int,input().split()))\ndummy=[] #result\nmaxleak=sys.maxsize #\nfor i in range(len(cap_val)): #lop\n leak=cap_val[i]//leak_val[i] \n maxleak=min(leak,maxleak)\n dummy.append(leak)\n\nfor idx,item in enumerate(dummy):\n if(item==maxleak):\n print(idx,end=\",\")\n","sub_path":"Array problem/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"508934247","text":"# Copyright 2016 - Alcatel-Lucent\n# Copyright 2016 - Nokia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport itertools\n\nfrom oslo_service import service as os_service\nfrom oslo_utils import importutils as utils\nfrom vitrage.common.constants import UpdateMethod\nfrom vitrage.datasources.listener_service import ListenerService\nfrom vitrage.datasources.services import ChangesService\nfrom vitrage.datasources.services import SnapshotsService\nfrom vitrage.utils import opt_exists\n\nCHANGES_INTERVAL = 'changes_interval'\n\n\ndef create_send_to_queue_callback(queue):\n def send_to_queue_callback(event):\n queue.put(event)\n\n return send_to_queue_callback\n\n\nclass Launcher(object):\n def __init__(self, conf, callback):\n self.conf = conf\n self.callback = callback\n self.snapshot_datasources = self._register_snapshot_datasources(conf)\n self.services = self._register_services()\n\n def launch(self):\n # launcher = os_service.ServiceLauncher(self.conf) # For Debugging\n launcher = os_service.ProcessLauncher(self.conf)\n for service in self.services:\n launcher.launch_service(service, 1)\n\n @staticmethod\n def _register_snapshot_datasources(conf):\n return {datasource: utils.import_object(conf[datasource].driver, conf)\n for datasource in conf.datasources.types}\n\n def _register_services(self):\n pull_datasources = self._get_pull_datasources(self.conf)\n changes_services = \\\n (ChangesService(self.conf,\n [self.snapshot_datasources[datasource]],\n self.conf[datasource].changes_interval,\n self.callback)\n for datasource in pull_datasources)\n\n snapshot_service = (SnapshotsService(self.conf,\n self.snapshot_datasources,\n self.callback),)\n\n listener_service = (ListenerService(self.conf,\n self.snapshot_datasources,\n self.callback),)\n\n return itertools.chain(changes_services,\n snapshot_service,\n listener_service)\n\n @staticmethod\n def _get_pull_datasources(conf):\n return (datasource for datasource in conf.datasources.types\n if conf[datasource].update_method.lower() == UpdateMethod.PULL\n and opt_exists(conf[datasource], CHANGES_INTERVAL))\n","sub_path":"vitrage-1.5.2/vitrage/datasources/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487769176","text":"\"\"\"\nExample Hibrain Research\n\n\"\"\"\nimport re\nimport urllib\nBACKENDS = [\"telegram\"]\n\nNAME = \"Hibrain 연구비지원\"\nURL = \"https://www.hibrain.net/research/researches/34/recruitments/108/recruits?sortType=AD&displayType=TIT&listType=ING&limit=25&siteid=1\"\nFIND_ALL_ARGS = {\"class_\": \"sortRoot\"}\n\nHEADERS = {}\n\n\ndef TITLE_FN(node):\n return node.find_all(\"span\", class_=\"title\")[0].get_text().strip()\n\n\ndef BODY_URL_FN(node):\n return node.find_all(\"a\")[0][\"href\"]\n\n\ndef BODY_FN(soup):\n result = soup.find_all(\"table\", class_=\"contentSummaryInfo\")\n if result:\n return result[0]\n else:\n return None\n \ndef PARAM_FN(node):\n url = BODY_URL_FN(node)\n attr = \"path\"\n path = getattr(urllib.parse.urlsplit(url), attr)\n param = path.rsplit('/', 1)[-1]\n return int(param)","sub_path":"pages_unused/hibrain.py","file_name":"hibrain.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"284223045","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport glob\nimport math\nimport random\nimport shutil\nimport logging\nimport argparse\nfrom itertools import count\nfrom natsort import natsorted\nimport glob2\nimport cv2\nimport skimage.io\nimport skimage.measure\nimport skimage.segmentation\nimport sklearn\nimport sklearn.metrics\nimport numpy as np\nfrom PIL import Image\n# Using torch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.utils.weight_norm as weightNorm\nimport torch.optim as optim\nfrom torch.optim.optimizer import Optimizer, required\nfrom torch.autograd import Variable\nfrom torch.utils.tensorboard import SummaryWriter\nimport torchvision\nimport gym\n\nfrom optim.RAdam import RAdam\nfrom environment.BaseEnvironment import BaseEnvironment\nfrom ImageRenderingEnvironment import ImageRenderingEnvironment\n\nclass Replay_buffer():\n '''\n Code based on:\n https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py\n Expects tuples of (state, next_state, action, reward, done)\n '''\n def __init__(self, max_size=500000):\n self.storage = []\n self.max_size = max_size\n self.ptr = 0\n\n def push(self, data):\n if len(self.storage) == self.max_size:\n self.storage[int(self.ptr)] = data\n self.ptr = (self.ptr + 1) % self.max_size\n else:\n self.storage.append(data)\n\n def sample(self, batch_size):\n ind = np.random.randint(0, len(self.storage), size=batch_size)\n x, y, u, r, d = [], [], [], [], []\n\n for i in ind:\n X, Y, U, R, D = self.storage[i]\n x.append(np.array(X, copy=False))\n y.append(np.array(Y, copy=False))\n u.append(np.array(U, copy=False))\n r.append(np.array(R, copy=False))\n d.append(np.array(D, copy=False))\n\n x = np.array(x)\n y = np.array(y)\n u = np.array(u)\n r = np.array(r)\n d = np.array(d)\n\n x = np.squeeze(x, axis=1)\n y = np.squeeze(y, axis=1)\n # print(x.shape, y.shape, u.shape, r.shape, d.shape)\n return x, y, u, r, d\n\ndef init_weights(m):\n if hasattr(m, 'reset_parameters'):\n m.reset_parameters()\n if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0.0, 0.05)\n if m.bias is not None:\n m.bias.data.fill_(0)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.normal_(1.0, 0.05)\n m.bias.data.fill_(0)\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0.0, 0.1)\n if m.bias is not None:\n m.bias.data.fill_(0)\n \nclass Actor(nn.Module):\n def __init__(self, num_classes=5):\n super(Actor, self).__init__()\n\n self.model = getattr(torchvision.models, 'resnet18')(pretrained=False)\n self.model.conv1 = nn.Conv2d(9, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n self.model.fc = nn.Sequential(\n # nn.Dropout(0.5),\n nn.Linear(512, num_classes),\n nn.Sigmoid()\n )\n self.model.apply(init_weights)\n \n # self.model = getattr(torchvision.models, 'densnet121')(pretrained=False)\n # self.model.features.conv0 = nn.Conv2d(9, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n # self.model.classifier = nn.Sequential(\n # # nn.Dropout(0.5),\n # nn.Linear(1024, num_classes),\n # nn.Sigmoid()\n # )\n\n def forward(self, x):\n logit = self.model(x)\n return logit\n\nclass Critic(nn.Module):\n def __init__(self, num_classes=1):\n super(Critic, self).__init__()\n self.model = getattr(torchvision.models, 'resnet18')(pretrained=False)\n self.model.conv1 = nn.Conv2d(14, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n self.model.fc = nn.Sequential(\n # nn.Dropout(0.5),\n nn.Linear(512, num_classes),\n nn.Sigmoid()\n )\n self.model.apply(init_weights)\n \n\n # self.model = getattr(torchvision.models, 'densnet121')(pretrained=False)\n # self.model.features.conv0 = nn.Conv2d(14, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n # self.model.classifier = nn.Sequential(\n # # nn.Dropout(0.5),\n # nn.Linear(1024, num_classes),\n # nn.Sigmoid()\n # )\n\n def forward(self, x, y):\n # print(y.shape)\n y = y.reshape(y.shape[0], y.shape[1], 1, 1)\n y = F.upsample(y, size=(256, 256), mode='nearest')\n # print(y.shape)\n logit = self.model(torch.cat([x, y], 1))\n return logit\n\nclass DDPG(object):\n def __init__(self, writer=None, device='cuda', hparams=None):\n self.device = device\n self.hparams = hparams\n self.actor = Actor(num_classes=5).to(self.device)\n self.actor_target = Actor(num_classes=5).to(self.device)\n self.actor_target.load_state_dict(self.actor.state_dict())\n self.actor_optimizer = RAdam(self.actor.parameters(), lr=self.hparams.lr, \n betas=(0.9, 0.99), weight_decay=1e-4)\n self.actor_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.actor_optimizer, T_max=10)\n\n self.critic = Critic(num_classes=1).to(self.device)\n self.critic_target = Critic(num_classes=1).to(self.device)\n self.critic_target.load_state_dict(self.critic.state_dict())\n self.critic_optimizer = RAdam(self.actor.parameters(), lr=self.hparams.lr, \n betas=(0.9, 0.99), weight_decay=1e-4)\n self.critic_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.critic_optimizer, T_max=10)\n\n self.replay_buffer = Replay_buffer(max_size=self.hparams.capacity)\n self.writer = writer #SummaryWriter()\n self.num_critic_update_iteration = 0\n self.num_actor_update_iteration = 0\n self.num_training = 0\n\n def select_action(self, state):\n state = torch.FloatTensor(state).to(self.device)\n # print(state.shape)\n return self.actor(state).detach().cpu().numpy().flatten()\n\n def update(self):\n\n for it in range(self.hparams.update_iteration):\n # Sample replay buffer\n x, y, u, r, d = self.replay_buffer.sample(self.hparams.batch_size)\n state = torch.FloatTensor(x).to(self.device)\n action = torch.FloatTensor(u).to(self.device)\n next_state = torch.FloatTensor(y).to(self.device)\n done = torch.FloatTensor(d).to(self.device)\n reward = torch.FloatTensor(r).to(self.device)\n\n # Compute the target Q value\n target_Q = self.critic_target(next_state, self.actor_target(next_state))\n target_Q = reward + ((1 - done) * self.hparams.gamma * target_Q).detach()\n\n # Get current Q estimate\n current_Q = self.critic(state, action)\n\n # Compute critic loss\n # critic_loss = F.mse_loss(current_Q, target_Q)\n critic_loss = nn.SmoothL1Loss()(current_Q, target_Q)\n self.writer.add_scalar('Loss/critic_loss', critic_loss, global_step=self.num_critic_update_iteration)\n \n # Optimize the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n # self.critic_scheduler.batch_step()\n # Compute actor loss\n actor_loss = -self.critic(state, self.actor(state)).mean()\n self.writer.add_scalar('Loss/actor_loss', actor_loss, global_step=self.num_actor_update_iteration)\n\n # Optimize the actor\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n # self.actor_scheduler.batch_step()\n\n # Update the frozen target models\n for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n target_param.data.copy_(self.hparams.tau * param.data + (1 - self.hparams.tau) * target_param.data)\n\n for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n target_param.data.copy_(self.hparams.tau * param.data + (1 - self.hparams.tau) * target_param.data)\n\n self.num_actor_update_iteration += 1\n self.num_critic_update_iteration += 1\n\n def save(self):\n torch.save(self.actor.state_dict(), 'actor.pth')\n torch.save(self.critic.state_dict(), 'critic.pth')\n print(\"====================================\")\n print(\"Model has been saved...\")\n print(\"====================================\")\n\n def load(self):\n self.actor.load_state_dict(torch.load('actor.pth'))\n self.critic.load_state_dict(torch.load('critic.pth'))\n print(\"====================================\")\n print(\"Model has been loaded...\")\n print(\"====================================\")\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpus', default=0, type=int, help='comma separated list of GPU(s) to use.')\n parser.add_argument('--mode', default='train', type=str) # mode = 'train' or 'test'\n # OpenAI gym environment name, # ['BipedalWalker-v2', 'Pendulum-v0'] or any continuous environment\n # Note that DDPG is feasible about hyper-parameters.\n # You should fine-tuning if you change to another environment.\n # parser.add_argument(\"--env_name\", default=\"Pendulum-v0\")\n parser.add_argument('--tau', default=0.005, type=float) # target smoothing coefficient\n parser.add_argument('--target_update_interval', default=1, type=int)\n parser.add_argument('--test_iteration', default=10, type=int)\n\n parser.add_argument('--learning_rate', '-lr', default=2e-4, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\n parser.add_argument('--gamma', default=0.90, type=float) # discounted factor\n parser.add_argument('--capacity', default=10000, type=int) # replay buffer size\n parser.add_argument('--batch_size', default=4, type=int) # mini batch size\n parser.add_argument('--seed', type=int, default=2020,\n help='seed for initializing training. ')\n # optional parameters\n\n parser.add_argument('--sample_frequency', default=256, type=int)\n parser.add_argument('--render', default=False, type=bool) # show UI or not\n parser.add_argument('--log_interval', default=50, type=int) #\n # parser.add_argument('--load', default=False, type=bool) # load model\n parser.add_argument('--load', action='store_true') # load model\n parser.add_argument('--render_interval', default=100, type=int) # after render_interval, the env.render() will work\n parser.add_argument('--exploration_noise', default=0.1, type=float)\n parser.add_argument('--max_episode', default=100000, type=int) # num of games\n parser.add_argument('--max_step_per_episode', default=5, type=int) # num of games\n parser.add_argument('--max_length_of_trajectory', default=2000, type=int) # num of games\n parser.add_argument('--print_log', default=1, type=int)\n parser.add_argument('--update_iteration', default=10, type=int)\n return parser.parse_args()\n\n\ndef main(hparams):\n if hparams.seed is not None:\n random.seed(hparams.seed)\n np.random.seed(hparams.seed)\n torch.manual_seed(hparams.seed)\n if torch.cuda.is_available(): torch.cuda.manual_seed_all(hparams.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n use_cuda = torch.cuda.is_available()\n xpu = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n if hparams.load is not None: # Load the checkpoint here\\\n pass\n\n\n writer = SummaryWriter()\n target = np.ones((256 ,256, 4), np.uint8)\n volume = np.ones((256 ,256, 1), np.uint8)\n # target[:,:] = [120, 64, 70, 30]\n # volume[:] = [50]\n target[:] = np.random.randint(0, 256, 4)\n volume[:] = np.random.randint(0, 256, 1)\n print(target.shape, volume.shape)\n # target[:] = [255, 255, 0]\n # volume[:] = [120]\n env = ImageRenderingEnvironment(writer=writer, target=target, volume=volume, \n max_step_per_episode=hparams.max_step_per_episode)\n\n agent = DDPG(writer=writer, device=xpu, hparams=hparams)\n ep_r = 0\n if hparams.mode == 'test':\n agent.load()\n for i in range(hparams.test_iteration):\n state = env.reset()\n for t in count():\n action = agent.select_action(state)\n next_state, reward, done, info = env.step(np.float32(action))\n ep_r += reward\n env.render()\n if done or t >= hparams.max_length_of_trajectory:\n print(\"Ep_i \\t{}, the ep_r is \\t{:0.2f}, the step is \\t{}\".format(i, ep_r, t))\n ep_r = 0\n break\n state = next_state\n\n elif hparams.mode == 'train':\n print(\"====================================\")\n print(\"Collection Experience...\")\n print(\"====================================\")\n if hparams.load: agent.load()\n for i in range(hparams.max_episode):\n state = env.reset()\n for t in count():\n action = agent.select_action(state)\n\n # issue 3 add noise to action\n action = (action + np.random.normal(0, hparams.exploration_noise, size=env.action_space.shape[0])).clip(\n env.action_space.low, env.action_space.high)\n\n next_state, reward, done, info = env.step(action)\n ep_r += reward\n if hparams.render and i >= hparams.render_interval : env.render()\n agent.replay_buffer.push((state, next_state, action, reward, np.float(done)))\n if (i+1) % 10 == 0:\n print('Episode {}, The memory size is {} '.format(i, len(agent.replay_buffer.storage)))\n\n state = next_state\n if done or t >= hparams.max_length_of_trajectory:\n agent.writer.add_scalar('ep_r', ep_r, global_step=i)\n if i % hparams.print_log == 0:\n print(\"Ep_i \\t{}, the ep_r is \\t{:0.2f}, the step is \\t{}\".format(i, ep_r, t))\n ep_r = 0\n break\n\n if i % hparams.log_interval == 0:\n agent.save()\n if len(agent.replay_buffer.storage) >= hparams.capacity-1:\n agent.update()\n\n else:\n raise NameError(\"mode wrong!!!\")\n\nif __name__ == '__main__':\n main(get_args())","sub_path":"DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":14790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"169661272","text":"# import\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\nimport pandas as pd\nimport json\n\n# data\ndf = pd.read_csv('../data/wheels.csv')\n\n# app\napp = dash.Dash()\n\n# layout\napp.layout = html.Div([\n dcc.Graph(id='graph', figure=dict(\n data = [go.Scatter(\n x = df['color'],\n y = df['wheels'],\n mode = 'markers'\n )],\n layout = go.Layout(title = \"Test\", hovermode = \"closest\")\n )),\n html.Div([html.Pre(id='output')])\n])\n\n# callback\n@app.callback(\n Output('output', 'children'),\n [Input('graph', 'hoverData')]\n)\ndef return_output(hoverData):\n return json.dumps(hoverData, indent=2)\n\n# server\nif __name__ == '__main__':\n app.run_server()\n","sub_path":"2-13-HoverOverData/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559801307","text":"# -*- coding: utf-8 -*-\n\"\"\"Load and plot zenith opacity and brightness temperatures.\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom typhon.arts import xml\nfrom typhon.plots import styles\n\n\nplt.style.use(styles('typhon'))\n\n\ndef trim_speciestags(speciestags):\n \"\"\"Return trimmed and flat version of ArrayOfSpeciesTags.\"\"\"\n return [s[0].split('-')[0] for s in speciestags]\n\n\n# Read ARTS results\nbt = xml.load('results/bt.xml')\nfreq = xml.load('results/f_grid.xml')\nod = xml.load('results/odepth_1D.xml')\nspecies = trim_speciestags(xml.load('results/species.xml'))\nheight = int(xml.load('results/sensor_pos.xml'))\nzenith_angle = int(xml.load('results/sensor_los.xml'))\n\n# figure of zenith opacity with logarithmic scale on y axis\nfig, ax = plt.subplots()\nax.semilogy(freq / 1e9, od)\nax.axhline(1, linewidth=0.8, color='#b0b0b0', zorder=0)\nax.grid(True, axis='x')\nax.set_xticks([22.3, 60., 118.8, 183.])\nax.set_xlim(freq.min() / 1e9, freq.max() / 1e9)\nax.set_xlabel('Frequency [GHz]')\nax.set_ylabel('Zenith opacity')\nax.set_title('{s}, {h}km, {z}°'.format(\n s=', '.join(species), h=height / 1e3, z=zenith_angle))\nfig.savefig('plots/opacity_{s}_{h}km_{z}deg.pdf'.format(\n s='+'.join(species), h=height / 1e3, z=zenith_angle))\n\n# figure of brithtness temperature for defined sensor position and line of\nfig, ax = plt.subplots()\nax.plot(freq / 1e9, bt)\nax.grid(True)\nax.set_xticks([22.3, 60., 118.8, 183.])\nax.set_xlim(freq.min() / 1e9, freq.max() / 1e9)\nax.set_xlabel('Frequency [GHz]')\nax.set_ylabel('Brightness temperature [K]')\nax.set_title('{s}, {h}km, {z}°'.format(\n s=', '.join(species), h=height / 1e3, z=zenith_angle))\nfig.savefig('plots/brightness_temperature_{s}_{h}km_{z}deg.pdf'.format(\n s='+'.join(species), h=height / 1e3, z=zenith_angle))\n","sub_path":"exercises/04-rtcalc/plot_bt.py","file_name":"plot_bt.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"652381710","text":"import os\n\nimport physical_validation as pv\n\nsystems = [\"none\", \"shift\", \"switch\"]\nn_dts = 6\n\n# change this to fit to your GROMACS installation\nparser = pv.data.GromacsParser(\n exe=\"~/bin/gromacs/bin/gmx_d\", includepath=\"~/bin/gromacs/share/gromacs/top\"\n)\n\nfor sys in systems:\n print(\"### Analyzing system \" + sys)\n print(\"## Reading results\")\n res = []\n # base simulation\n dir = os.path.join(sys, \"base\")\n res.append(\n parser.get_simulation_data(\n mdp=os.path.join(dir, \"mdout.mdp\"),\n top=os.path.join(dir, \"system.top\"),\n gro=os.path.join(dir, \"system.gro\"),\n edr=os.path.join(dir, \"system.edr\"),\n )\n )\n\n for n in range(1, n_dts - 1):\n dir = os.path.join(sys, \"integrator_\" + str(n))\n res.append(\n parser.get_simulation_data(\n mdp=os.path.join(dir, \"mdout.mdp\"),\n top=os.path.join(dir, \"system.top\"),\n gro=os.path.join(dir, \"system.gro\"),\n edr=os.path.join(dir, \"system.edr\"),\n )\n )\n\n # make plot directory\n if not os.path.exists(\"ana_argon_plots\"):\n os.makedirs(\"ana_argon_plots\")\n sysplot = os.path.join(\"ana_argon_plots\", sys + \".pdf\")\n\n print(\"## Validating integrator convergence\")\n pv.integrator.convergence(res, verbose=True, filename=sysplot)\n print()\n","sub_path":"examples/argon_integrator/ana_argon.py","file_name":"ana_argon.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"502861386","text":"import csv\n\n\nclass CSVParser(object):\n\n def __init__(self, binary_mode=False):\n self.modes = binary_mode\n\n @property\n def modes(self):\n return self.__read_mode, self.__write_mode\n\n @modes.setter\n def modes(self, binary_mode):\n self.__read_mode, self.__write_mode = ('rb', 'wb') if binary_mode else ('r', 'w')\n\n def read(self, path, *fields):\n result = []\n with open(path, self.__read_mode) as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n result.append({k: row.get(k, None) for k in fields})\n return result\n\n def write(self, path, *fields, writing_dictionaries=None):\n with open(path, self.__write_mode) as csv_file:\n writer = csv.DictWriter(csv_file, fields)\n writer.writeheader()\n if writing_dictionaries is not None:\n writer.writerows(writing_dictionaries)\n\n @classmethod\n def read_from_memory(cls, csv_file, *fields):\n result = []\n reader = csv.DictReader(csv_file)\n fieldnames = fields if fields else reader.fieldnames\n for row in reader:\n result.append({k: row.get(k, None) for k in fieldnames})\n return result\n","sub_path":"backend/utils/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"340851790","text":"from hsdbi import sql as sqldbi\nfrom scrape import scrape_orm\n\n\nSCRAPE_ORM_MODULE = 'scrape.scrape_orm'\n\n\n# Facade\n\n\nclass ScrapeFacade(sqldbi.SQLRepositoryFacade):\n \"\"\"RepositoryFacade for scrape data.\n\n Attributes: (all children of hsdbi.sql.SqlRepository)\n tasks\n team_master_urls\n team_year_urls\n \"\"\"\n\n def __init__(self, connection_string):\n super(ScrapeFacade, self).__init__(connection_string)\n self.tasks = sqldbi.SQLRepository(\n session=self.session,\n class_type=scrape_orm.Task,\n orm_module=SCRAPE_ORM_MODULE,\n primary_keys=['id'])\n self.team_master_urls = sqldbi.SQLRepository(\n session=self.session,\n class_type=scrape_orm.TeamMasterUrl,\n orm_module=SCRAPE_ORM_MODULE,\n primary_keys=['team_abbr'])\n self.team_year_urls = sqldbi.SQLRepository(\n session=self.session,\n class_type=scrape_orm.TeamYearUrl,\n orm_module=SCRAPE_ORM_MODULE,\n primary_keys=['franchise_id', 'year'])\n","sub_path":"scrape/scrape_dbi.py","file_name":"scrape_dbi.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625180877","text":"from sys import stdin, stdout\ninput = stdin.readline\nprint = stdout.write\n\n\nT = int(input())\nfor t in range(T):\n a, b, n = map(int, input().split())\n def solve(a, b, n):\n n %= 3\n if n == 0:\n return a\n elif n == 1:\n return b\n else:\n return a ^ b\n print(str(solve(a, b, n)) + \"\\n\")","sub_path":"Misc/Manthan, Codefest 19/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"15119707","text":"n = int(input(\"Entrer un nombre à diviser en facteur premier : \"))\n\ntableaux = {0:False, 1:False}\n\nfor i in range(2,n+1):\n tableaux[i] = True\n\n# On passe de x^2 + x opérations à un peu plus de 2 * x opérations -> Trés intéressant pour des grands nombres\nfor key, value in tableaux.items():\n if value == True:\n # for i in range(key+1, n+1):\n for i in range(key+key, n+1, key): # Pas de KEY longeur = Taille du nombre premier de départ noté i\n if i % key == 0:\n tableaux[i] = False\npremier = []\nfor key, value in tableaux.items():\n if value == True:\n premier.append(key)\n\nnbr = n\ntmp = nbr\nres = {}\n\nfor value in premier:\n res[value] = 0\n\n\nwhile tmp > 1:\n prem = premier.pop()\n while tmp % prem == 0:\n res[prem] += 1\n tmp = int(tmp / prem)\n\n\n\n\n\n # if tmp % prem == 0:\n # res[prem] += 1\n # tmp = tmp - (nbr / prem)\n # elif len(premier) == 0:\n # break\n # else:\n # prem = premier.pop()\n\n\n# print(res) : afficher le dictionnaire avec les nombres premiers en key et leurs exposant en value\nresultat = \"\"\n\nfor key, value in res.items():\n if value > 0:\n texte = str(key) + \"^\" + str(value) + \" * \"\n resultat = resultat + texte\nprint(resultat)\n","sub_path":"Exercice 15.py","file_name":"Exercice 15.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"206811028","text":"#############################################################\n## ASSIGNMENT 2_1 CODE SKELETON\n#############################################################\n\nfrom collections import defaultdict\nimport gzip\nfrom statistics import stdev, mean \nimport numpy as np\nfrom sklearn.naive_bayes import GaussianNB\n# importing frations as parameter values \nfrom fractions import Fraction as fr\n\n#### Q1.1 Evaluation Metrics ####\n\n## Input: y_pred, a list of length n with the predicted labels,\n## y_true, a list of length n with the true labels\n\n## Calculates the precision of the predicted labels\ndef get_precision(y_pred, y_true):\n ## YOUR CODE HERE...\n assert len(y_pred) == len(y_true) and sum(y_pred) != 0\n precision = sum([1 if y_p == 1 and y_t == 1 else 0 for y_p, y_t in zip(y_pred, y_true)]) /sum(y_pred)\n return precision\n \n## Calculates the recall of the predicted labels\ndef get_recall(y_pred, y_true):\n ## YOUR CODE HERE...\n assert len(y_pred) == len(y_true) and sum(y_true) != 0\n recall = sum([1 if y_p == 1 and y_t == 1 else 0 for y_p, y_t in zip(y_pred, y_true)]) / sum(y_true)\n return recall\n\n## Calculates the f-score of the predicted labels\ndef get_fscore(y_pred, y_true):\n ## YOUR CODE HERE...\n precision = get_precision(y_pred, y_true)\n recall = get_recall(y_pred, y_true)\n fscore = 2 * precision * recall / (precision + recall)\n return fscore\n\ndef get_3_stats(y_pred, y_true):\n precision = get_precision(y_pred, y_true)\n recall = get_recall(y_pred, y_true)\n fscore = get_fscore(y_pred, y_true)\n return [precision, recall, fscore]\n\n#### 2. Complex Word Identification ####\n\n## Loads in the words and labels of one of the datasets\ndef load_file(data_file):\n words = []\n labels = [] \n with open(data_file, 'rt', encoding=\"utf8\") as f:\n i = 0\n for line in f:\n if i > 0:\n line_split = line[:-1].split(\"\\t\")\n words.append(line_split[0].lower())\n labels.append(int(line_split[1]))\n i += 1\n return words, labels\n\n### 1.2.1: A very simple baseline\n\n## Labels every word complex\ndef all_complex(data_file):\n ## YOUR CODE HERE...\n def classify_all_complex(words):\n return [1] * len(words)\n\n words, labels = load_file(data_file)\n y_pred = classify_all_complex(words)\n y_true = labels\n\n precision = get_precision(y_pred, y_true)\n recall = get_recall(y_pred, y_true)\n fscore = get_fscore(y_pred, y_true)\n\n performance = [precision, recall, fscore]\n return performance\n\n\n### 1.2.2: Word length thresholding\n\n## Finds the best length threshold by f-score, and uses this threshold to\n## classify the training and development set\ndef word_length_threshold(training_file, development_file):\n ## YOUR CODE HERE\n def classify_word_length_threshold(words, threshold = 7):\n return [1 if len(word) >= threshold else 0 for word in words]\n\n words, y_true = load_file(training_file)\n y_pred = classify_word_length_threshold(words)\n\n tprecision = get_precision(y_pred, y_true)\n trecall = get_recall(y_pred, y_true)\n tfscore = get_fscore(y_pred, y_true)\n training_performance = [tprecision, trecall, tfscore]\n \n words, y_true = load_file(development_file)\n y_pred = classify_word_length_threshold(words)\n\n dprecision = get_precision(y_pred, y_true)\n drecall = get_recall(y_pred, y_true)\n dfscore = get_fscore(y_pred, y_true)\n development_performance = [dprecision, drecall, dfscore]\n\n return training_performance, development_performance\n\n\n\n### 1.2.3: Word frequency thresholding\n\n## Loads Google NGram counts\ndef load_ngram_counts(ngram_counts_file): \n counts = defaultdict(int) \n with gzip.open(ngram_counts_file, 'rt', errors='ignore') as f: \n for line in f:\n token, count = line.strip().split('\\t') \n if token[0].islower(): \n counts[token] = int(count) \n return counts\n\n# Finds the best frequency threshold by f-score, and uses this threshold to\n## classify the training and development set\ndef word_frequency_threshold(training_file, development_file, counts):\n ## YOUR CODE HERE\n def classify_word_frequency_threshold(words, counts, threshold = 1000000):\n return [1 if counts[word] < threshold else 0 for word in words]\n \n words, y_true = load_file(training_file) #training\n y_pred = classify_word_frequency_threshold(words, counts)\n tprecision, trecall, tfscore = get_3_stats(y_pred, y_true)\n\n words, y_true = load_file(development_file) #development\n y_pred = classify_word_frequency_threshold(words, counts)\n dprecision, drecall, dfscore = get_3_stats(y_pred, y_true)\n\n training_performance = [tprecision, trecall, tfscore] # together\n development_performance = [dprecision, drecall, dfscore]\n return training_performance, development_performance\n\n### 1.3.1: Naive Bayes\n \n## Trains a Naive Bayes classifier using length and frequency features\ndef naive_bayes(training_file, development_file, counts):\n ## YOUR CODE HERE\n def feature_matrix_np_normalized(words):\n def normalize_columns(matrix):\n num_columns = len(matrix[0])\n for col in range(num_columns):\n mean_value = mean([matrix[row][col] for row in range(len(matrix))])\n sd = stdev([matrix[row][col] for row in range(len(matrix))])\n for row in range(len(matrix)):\n matrix[row][col] = (matrix[row][col] - mean_value) / sd\n \n matrix = [[len(word), counts[word]] for word in words]\n normalize_columns(matrix)\n np_matrix = np.array(matrix)\n return np_matrix\n\n def train_naive_bayes(words, y_true, counts):\n np_feature_matrix = feature_matrix_np_normalized(words)\n np_y_true = np.array(y_true)\n \n clf = GaussianNB()\n clf.fit(np_feature_matrix, np_y_true)\n return clf\n\n words, y_true = load_file(training_file)\n clf = train_naive_bayes(words, y_true, counts)\n\n feature_matrix = feature_matrix_np_normalized(words)\n y_pred = clf.predict(feature_matrix)\n tprecision, trecall, tfscore = get_3_stats(y_pred, y_true)\n\n words, y_true = load_file(development_file)\n feature_matrix = feature_matrix_np_normalized(words)\n y_pred = clf.predict(feature_matrix)\n dprecision, drecall, dfscore = get_3_stats(y_pred, y_true)\n\n training_performance = [tprecision, trecall, tfscore]\n development_performance = [dprecision, drecall, dfscore]\n return training_performance, development_performance\n\n \n\nif __name__ == \"__main__\":\n training_file = \"../data/complex_words_training.txt\"\n development_file = \"../data/complex_words_development.txt\"\n test_file = \"../data/complex_words_test_unlabeled.txt\"\n train_data = load_file(training_file)\n \n","sub_path":"Source Files/hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":6834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274526236","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 10 17:53:37 2019\n\n@author: dell\n\"\"\"\n\nfrom scipy.io import wavfile\nimport numpy as np\nimport os\nimport sounddevice as sd\nimport matplotlib.pyplot as plt\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom src import vae_bernoulli as bernoulli\n \n\n#\nfs, s_Kick = wavfile.read('DrumSound\\Kick.wav')\nfs, s_Snare = wavfile.read('DrumSound\\Snare.wav')\nfs, s_Clap = wavfile.read('DrumSound\\Clap.wav')\nfs, s_HHO = wavfile.read('DrumSound\\HHO.wav')\nfs, s_HHC = wavfile.read('DrumSound\\HHC.wav')\nfs, s_Tom = wavfile.read('DrumSound\\Tom.wav')\nfs, s_Cymb = wavfile.read('DrumSound\\Cymb.wav')\nfs, s_Percu = wavfile.read('DrumSound\\Percu.wav')\n\n\n#%%\n\n\n\n \n#%% Load vae, you can choose between gaussain and bernoulli models\nD_in, D_enc, D_z, D_dec, D_out = 512, 800, 2, 800, 512 # Change D_z depending on dimensions of latent space\nvae = bernoulli.VAE_BERNOULLI(D_in, D_enc, D_z, D_dec, D_out); # change 'gaussian' to 'bernoulli' to change the model\nvae.load_state_dict(torch.load('models/sequence/VAE_BERNOULLI_2_BETA_4_hid800')) # idem \nvae.eval()\n\n#%% Sampling from latent space\nsample = torch.randn(1,D_z)\nprint(sample)\nsample = vae.decoder(sample)\ny = sample.detach().numpy()\nfor i in range(y.size):\n if y[0][i] > 0.2:\n y[0][i] = 1\n else:\n y[0][i] = 0\nx = y.reshape(8,64) \nplt.figure()\nplt.imshow(x, cmap='gray',origin = 'lower')\n\n\n\n\n#root = r'C:\\Users\\dell\\Documents\\ATIAM\\Info\\ATIAM-Sequenceur\\Dataset_Drum_Groove_Pattern'\n#track_path = os.path.join(root,'(drums)_House2.npy')\n#activation_matrix = np.load(track_path)\nactivation_matrix = x\n#%%\n\nbpm = 100\nnb_instrument = 8 #fixed\nquantification = 64 #number of divison in one measure\nnb_measure = 1 #number of measures\n\nlenght_sound_sec = 60/bpm * 4 #4 noires dans une mesure\n\nlenght_sample = len(s_Kick)\nlenght_sound_ech = int(lenght_sound_sec * fs) + lenght_sample\n\nsound = np.zeros(lenght_sound_ech)\ntime_ech = 0\n\nfor activation in activation_matrix.T[:,:]:\n itemindex = np.where(activation==1)[0]\n print(itemindex)\n if itemindex.size == 0:\n time_ech = time_ech + int(lenght_sound_ech/quantification)\n else:\n for item in itemindex:\n if item == 0:\n sound[time_ech:time_ech+lenght_sample] = sound[time_ech:time_ech+lenght_sample] + s_Kick\n if item == 1:\n sound[time_ech:time_ech+lenght_sample] = sound[time_ech:time_ech+lenght_sample] + s_Snare\n if item == 2:\n sound[time_ech:time_ech+lenght_sample] = sound[time_ech:time_ech+lenght_sample] + s_Clap\n if item == 3:\n sound[time_ech:time_ech+lenght_sample] = sound[time_ech:time_ech+lenght_sample] + s_HHO\n if item == 4:\n sound[time_ech:time_ech+lenght_sample] = sound[time_ech:time_ech+lenght_sample] + s_HHC\n if item == 5:\n sound[time_ech:time_ech+lenght_sample] = sound[time_ech:time_ech+lenght_sample] + s_Tom\n if item == 6:\n sound[time_ech:time_ech+lenght_sample] = sound[time_ech:time_ech+lenght_sample] + s_Cymb\n if item == 7:\n sound[time_ech:time_ech+lenght_sample] = sound[time_ech:time_ech+lenght_sample] + s_Percu\n \n time_ech = time_ech + int(lenght_sound_ech/quantification)\n print(time_ech)\n \n \nsd.play(sound, fs)\n\n#plt.figure(1, figsize=[15,5])\n#plt.plot(sound)\n#plt.savefig(\"sound.svg\", format=\"svg\")\n\n\n\n","sub_path":"Activation_Matrix_To_Sound.py","file_name":"Activation_Matrix_To_Sound.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281386074","text":"from classes.constants import FILE_POSITIONS\nfrom classes.attribute import Attribute\n\n\nclass Measure:\n def __init__(self, attribute_map):\n self.life_nb = attribute_map['life_nb']\n self.measure_date = attribute_map['measure_date']\n self.farm_id = attribute_map['farm_id']\n self.month_nb = attribute_map['month_nb']\n self.cow_class = attribute_map['cow_class']\n self.birthday = attribute_map['birthday']\n self.number = attribute_map['number']\n self.name = attribute_map['name']\n self.milk_kg = attribute_map['milk_kg']\n self.fat_percent = attribute_map['fat_percent']\n self.protein_percent = attribute_map['protein_percent']\n self.cell_count = attribute_map['cell_count']\n self.urea = attribute_map['urea']\n self.status = attribute_map['status']\n self.lak_nb = attribute_map['lak_nb']\n self.child_birth = attribute_map['child_birth']\n self.lak_days = attribute_map['lak_days']\n self.milk_ges = attribute_map['milk_ges']\n self.fat_percent_ges = attribute_map['fat_percent_ges']\n self.fat_kg_ges = attribute_map['fat_kg_ges']\n self.protein_percent_ges = attribute_map['protein_percent_ges']\n self.protein_kg_ges = attribute_map['protein_kg_ges']\n self.child_farm = attribute_map['child_farm']\n\n def to_map(self):\n return {\n 'life_nb': self.life_nb,\n 'measure_date': self.measure_date,\n 'farm_id': self.farm_id,\n 'month_nb': self.month_nb,\n 'cow_class': self.cow_class,\n 'birthday': self.birthday,\n 'number': self.number,\n 'name': self.name,\n 'milk_kg': self.milk_kg,\n 'fat_percent': self.fat_percent,\n 'protein_percent': self.protein_percent,\n 'cell_count': self.cell_count,\n 'urea': self.urea,\n 'status': self.status,\n 'lak_nb': self.lak_nb,\n 'child_birth': self.child_birth,\n 'lak_days': self.lak_days,\n 'milk_ges': self.milk_ges,\n 'fat_percent_ges': self.fat_percent_ges,\n 'fat_kg_ges': self.fat_kg_ges,\n 'protein_percent_ges': self.protein_percent_ges,\n 'protein_kg_ges': self.protein_kg_ges,\n 'child_farm': self.child_farm\n }\n\n @staticmethod\n def from_raw(raw_measure):\n converted_data_map = {}\n for index, raw_data in enumerate(raw_measure):\n converted_attribute = Attribute.raw_to_db(FILE_POSITIONS[index], raw_data)\n converted_data_map[FILE_POSITIONS[index]] = converted_attribute\n\n a_measure = Measure(converted_data_map)\n if not (a_measure.life_nb and a_measure.measure_date):\n return None\n return a_measure\n\n @staticmethod\n def from_db(db_measure, attribute_list):\n db_measure_map = {}\n for attr, measure in zip(attribute_list, db_measure):\n db_measure_map[attr] = measure\n return Measure(db_measure_map)\n","sub_path":"classes/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"492335909","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\n\"\"\"\nS1 : \n l1 l2 对应相加, 当其中任意一个为空 break; 然后继续将后续 非空串add\n\"\"\"\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n rtype = ListNode(0) #head\n r = rtype\n pre = 0 #carry\n while l1 and l2 :\n sum = l1.val+l2.val+pre\n pre = 0 \n if sum >9:\n sum -= 10\n pre = 1\n r.next = ListNode(sum)\n r = r.next\n l1 = l1.next\n l2 = l2.next\n if not l1 and not l2:\n if pre:\n r.next = ListNode(pre)\n return rtype.next\n if l1:\n l2= l1\n while l2:\n sum = pre + l2.val\n pre = 0\n if sum >9:\n sum -= 10\n pre = 1\n r.next = ListNode(sum)\n r = r.next\n l2 = l2.next\n if pre:\n r.next = ListNode(pre)\n return rtype.next\n\n\n\"\"\"\nS2: \n 与S1 相比较简洁。 将l1 l2 中对应数字相加 分为分别相加, 这样的话就不必考虑 l1 和l2 同时为非空。\n\"\"\"\nclass Solution2:\n # @return a ListNode\n def addTwoNumbers(self, l1, l2):\n head = ListNode(0)\n l = head\n carry = 0\n while l1 or l2 or carry: #last carry 也可单独判断\n sum, carry = carry, 0\n if l1:\n sum += l1.val\n l1 = l1.next\n if l2:\n sum += l2.val\n l2 = l2.next\n if sum > 9:\n carry = 1\n sum -= 10\n l.next = ListNode(sum)\n l = l.next\n return head.next\n\n\n\n\n\nif __name__ == \"__main__\":\n l1 = ListNode(9)\n #l1.next = ListNode(4)\n #l1.next.next=None\n l2 = ListNode(9)\n l2.next = ListNode(1)\n #l2.next = ListNode(4)\n #l2.next.next=None\n s = Solution()\n r = s.addTwoNumbers(l1,l2)\n while r:\n print(r.val)\n r = r.next","sub_path":"2_AddTwoNumber.py","file_name":"2_AddTwoNumber.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10807217","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright 2019 by Christian Laußmann\n# github.com/claussmann\n\nimport time\nimport RPi.GPIO as GPIO\nimport atexit\n\nclass RGB_LED:\n\t#type can be \"ANODE\" or \"KATHODE\"\n\tdef __init__(self, red, green, blue, type):\n\t\tGPIO.setmode(GPIO.BCM)\n\t\tGPIO.setwarnings(False)\n\t\tself.r = red\n\t\tself.g = green\n\t\tself.b = blue\n\t\tself.type = type\n\t\tGPIO.setup(self.r, GPIO.OUT)\n\t\tGPIO.setup(self.g, GPIO.OUT)\n\t\tGPIO.setup(self.b, GPIO.OUT)\n\t\tself.turn_of()\n\t\tatexit.register(self.cleanup)\n\t\t\n\tdef turn_of(self):\n\t\tif(self.type == \"ANODE\"):\n\t\t\tGPIO.output(self.r, GPIO.HIGH)\n\t\t\tGPIO.output(self.g, GPIO.HIGH)\n\t\t\tGPIO.output(self.b, GPIO.HIGH)\n\t\telse:\n\t\t\tGPIO.output(self.r, GPIO.LOW)\n\t\t\tGPIO.output(self.g, GPIO.LOW)\n\t\t\tGPIO.output(self.b, GPIO.LOW)\n\t\n\t#color:{RED, GREEN, BLUE; WHITE; CYAN; YELLOW; PINK} \n\tdef set_color(self, color):\n\t\tself.turn_of()\n\t\tif(color == \"RED\"):\n\t\t\tself.red()\n\t\t\treturn;\n\t\tif(color == \"GREEN\"):\n\t\t\tself.green()\n\t\t\treturn;\n\t\tif(color == \"BLUE\"):\n\t\t\tself.blue()\n\t\t\treturn;\n\t\tif(color == \"WHITE\"):\n\t\t\tself.red()\n\t\t\tself.green()\n\t\t\tself.blue()\n\t\t\treturn;\n\t\tif(color == \"CYAN\"):\n\t\t\tself.green()\n\t\t\tself.blue()\n\t\t\treturn;\n\t\tif(color == \"YELLOW\"):\n\t\t\tself.red()\n\t\t\tself.green()\n\t\t\treturn;\n\t\tif(color == \"PINK\"):\n\t\t\tself.red()\n\t\t\tself.blue()\n\t\t\treturn;\n\t\n\t#color:{RED, GREEN, BLUE; WHITE; CYAN; YELLOW; PINK} \n\t#speed:{SLOW, MEDIUM, HIGH}\n\tdef blink(self, times, color, speed):\n\t\twait = 0.3\n\t\tif(speed == \"SLOW\"):\n\t\t\twait = 0.8\n\t\tif(speed == \"HIGH\"):\n\t\t\twait = 0.1\n\t\tfor i in range (0,times):\n\t\t\tself.set_color(color)\n\t\t\ttime.sleep(wait)\n\t\t\tself.turn_of()\n\t\t\ttime.sleep(wait)\n\t\t\t\n\tdef red(self):\n\t\tif(self.type == \"ANODE\"):\n\t\t\tGPIO.output(self.r, GPIO.LOW)\n\t\telse:\n\t\t\tGPIO.output(self.r, GPIO.HIGH)\n\t\n\tdef green(self):\n\t\tif(self.type == \"ANODE\"):\n\t\t\tGPIO.output(self.g, GPIO.LOW)\n\t\telse:\n\t\t\tGPIO.output(self.g, GPIO.HIGH)\n\t\n\t\n\tdef blue(self):\n\t\tif(self.type == \"ANODE\"):\n\t\t\tGPIO.output(self.b, GPIO.LOW)\n\t\telse:\n\t\t\tGPIO.output(self.b, GPIO.HIGH)\n\t\t\n\n\tdef cleanup(self):\n\t\tprint(\"Setting GPIO-Pins LOW\")\n\t\tGPIO.output(self.r, GPIO.LOW)\n\t\tGPIO.output(self.g, GPIO.LOW)\n\t\tGPIO.output(self.b, GPIO.LOW)\n\t\t\n#End of class\n\n#For headless execution\ndef main():\n\tled = RGB_LED(2, 3, 4, \"ANODE\")\n\twhile (1):\n\t\tled.turn_of()\n\t\tled.blink(4, \"GREEN\", \"HIGH\")\n\t\tled.blink(4, \"RED\", \"SLOW\")\n\t\tled.blink(4, \"BLUE\", \"MEDIUM\")\n\t\tled.blink(4, \"CYAN\", \"HIGH\")\n\t\tled.blink(4, \"YELLOW\", \"SLOW\")\n\t\tled.blink(4, \"PINK\", \"MEDIUM\")\n\t\tled.set_color(\"WHITE\")\n\t\ttime.sleep(3)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"LED/RGB_LED.py","file_name":"RGB_LED.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"366704782","text":"\"\"\"An AWS Python Pulumi program\"\"\"\n\nimport pulumi\nimport pulumi_aws as aws\n\ndefault_bucket = aws.s3.Bucket(\"defaultBucket\")\ndefault_bucket_object = aws.s3.BucketObject(\"defaultBucketObject\",\n bucket=default_bucket.id,\n key=\"beanstalk/go-v1.zip\",\n source=pulumi.FileAsset(\"beanstalk/python.zip\"))\n\ndefault_application = aws.elasticbeanstalk.Application(\"myapplication\", description=\"tf-test-description-app\")\ndefault_application_version = aws.elasticbeanstalk.ApplicationVersion(\"defaultApplicationVersion\",\n application=default_application.id,\n description=\"application version\",\n bucket=default_bucket.id,\n key=default_bucket_object.id)\n\npulumi.export(\"elastic beanstalk s3 bucket\", default_bucket.id)\npulumi.export(\"elastic beanstalk application name\", default_application.name)\npulumi.export(\"elastic beanstalk applicationversions\", default_application_version.name)","sub_path":"aws-beanstalk-py/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"609886091","text":"\nfrom functools import partial \nfrom operator import add \n \ndef escalera(n, k): \n # s = Salto \n for s in range(1, min(n, k) + 1): \n if s < n: \n yield map(partial(add, (s,)), escalera(n - s, k)) \n else: \n yield (s,) \n","sub_path":"escalera.py","file_name":"escalera.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"323106429","text":"class ProjectAnnotation:\n class Meta:\n abstract = True\n\n def annotate(self, query_params, json_list):\n \"\"\"\n\n :param query_params: Query parameters dictionary\n :param json_list: Json list of projects\n :return: Json project list with any added annotations\n \"\"\"\n pass\n\n\nclass EventVideosProjectAnnotation(ProjectAnnotation):\n @staticmethod\n def _get_event_videos(event_id):\n from civictechprojects.models import ProjectLink\n video_links = ProjectLink.objects.filter(link_event=event_id)\n video_links = {video_link.link_project.id: video_link.to_json() for video_link in video_links}\n return video_links\n\n def annotate(self, query_params, json_list):\n if 'event_id' in query_params:\n event_videos = self._get_event_videos(query_params['event_id'][0])\n for project_json in json_list:\n project_id = project_json['project_id']\n if project_id in event_videos:\n project_json['project_thumbnail_video'] = event_videos[project_id]\n return json_list\n\n\n_annotations = [EventVideosProjectAnnotation()]\n\n\ndef apply_project_annotations(query_params, json_list):\n for _annotation in _annotations:\n json_list = _annotation.annotate(query_params, json_list)\n return json_list\n","sub_path":"civictechprojects/helpers/projects/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"557231907","text":"import datetime\n\nfrom django.http import HttpResponse,Http404, response\nfrom django.shortcuts import render, get_object_or_404,redirect, render_to_response\nfrom django.views.decorators import csrf\nfrom django.views.decorators.csrf import csrf_protect\n\n\nfrom django.core.paginator import Paginator\nfrom blog.models import Article, Comment\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom blog.forms import CommentForm\nfrom django.contrib import auth\n\n\n# Create your views here.\n\ndef home(request, page_number = 1):\n articles = Article.objects.all()\n current_page = Paginator(articles, 5)\n context = {\n 'articles': current_page.page(page_number),\n\n }\n\n return render(request, \"blog/home.html\", context)\n\ndef about(request):\n return render(request, 'blog/about.html')\n\n\ndef show_article(request, article_id):\n comment_form = CommentForm\n context = {}\n article = Article.objects.get(pk=article_id)\n context['article'] = article\n context['form'] = comment_form\n context['comments'] = Comment.objects.filter(coment_article_id=article_id).order_by('-date')\n if request.COOKIES.get('pause', None):\n context['error'] = 'Не прошло 30 секунд с момента отправки последнего комментария'\n\n\n return render(request, 'blog/article.html', context)\n\ndef addlike(request, article_id):\n try:\n if str(article_id) in request.session:\n article = get_object_or_404(Article, id=article_id)\n article.likes -= 1\n article.save()\n request.session.pop(str(article_id), None)\n\n redirect('/articles/%s/'% article_id)\n else:\n article = get_object_or_404(Article, id=article_id)\n article.likes +=1\n article.save()\n request.session[str(article_id)] = True\n\n return redirect('/articles/%s/'% article_id)\n\n except ObjectDoesNotExist:\n raise Http404\n return redirect('/articles/%s/'% article_id)\n\n\ndef addcomment(request, article_id):\n if request.POST and not ('pause' in request.COOKIES):\n form = CommentForm(request.POST)\n if form.is_valid():\n # комит чтоб не сохранил в базу данных а сохранил в переменную\n coment = form.save(commit=False)\n coment.coment_article = Article.objects.get(id=article_id)\n publicator = auth.get_user(request)\n coment.name_public = publicator.username\n form.save()\n\n\n response = redirect('/articles/%s/'% article_id)\n response.set_cookie('pause', True, max_age=30)\n return response\n\n\n\n\n return redirect('/articles/%s/'% article_id)\n\n\n\n\n\n\n\n\n\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6013913","text":"import constants\nfrom auth.storage import Storage\n\nfrom utils import create_error_with_status\n\nfrom flask import jsonify, request, current_app\n\n\ndef refresh_tokens():\n current_app.logger.info(\"Refresh tokens pair\")\n try:\n token = request.json[\"token\"]\n except KeyError:\n status = constants.statuses[\"tokens\"][\"missingData\"]\n body = create_error_with_status(status, \"No token get\")\n current_app.logger.warn(\"No token for refreshment\")\n return jsonify(body), constants.responses[status]\n\n current_app.logger.debug(f\"Refresh token value {token}\")\n\n access, refresh, status = Storage.update_session(token)\n http_status = constants.responses[status]\n\n if status == constants.statuses[\"tokens\"][\"created\"]:\n body = dict(status=status, accessToken=access, refreshToken=refresh)\n elif status == constants.statuses[\"tokens\"][\"noSuchToken\"]:\n body = create_error_with_status(status, \"No information about token\")\n else: # status == constants.statuses[\"user\"][\"refreshExpired\"]:\n body = create_error_with_status(status, \"Refresh token expired\")\n return jsonify(body), http_status\n","sub_path":"auth/handlers/refresh.py","file_name":"refresh.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487854417","text":"from datetime import datetime\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render\nfrom pandas.tseries.offsets import BDay\n\nfrom opinion.group.fundamental.models import StockFundamental, StockIndustry, UnderlyingArticle\nfrom opinion.group.fundamental.report import ReportFundamental\nfrom opinion.group.position.models import PositionIdea, PositionEnter, PositionDecision\nfrom opinion.group.report.models import ReportEnter\nfrom opinion.group.technical.report import ReportTechnicalRank\nfrom opinion.group.technical.models import TechnicalRank, TechnicalOpinion\n\n\nMODEL_OBJ = {\n 'positionidea': PositionIdea,\n 'technicalrank': TechnicalRank,\n 'technicalopinion': TechnicalOpinion,\n 'stockfundamental': StockFundamental,\n 'stockindustry': StockIndustry,\n 'underlyingarticle': UnderlyingArticle,\n 'positionenter': PositionEnter,\n 'positiondecision': PositionDecision,\n\n}\n\n\ndef opinion_link(request, symbol):\n \"\"\"\n\n :param request:\n :param symbol:\n :return:\n \"\"\"\n symbol = symbol.upper()\n\n template = 'opinion/opinion_link.html'\n\n parameters = dict(\n site_title='Opinion links',\n title='{symbol} opinions links'.format(symbol=symbol),\n symbol=symbol\n )\n\n return render(request, template, parameters)\n\n\ndef generate_report(request, symbol, date=''):\n \"\"\"\n Step by step create opinion then final create report\n :param request: request\n :param symbol: str\n :param date: str\n :return: render\n \"\"\"\n symbol = symbol.upper()\n\n if date == '':\n date = datetime.today().date()\n if date.weekday() not in range(1, 6): # only weekday\n # noinspection PyUnresolvedReferences\n date = (date - BDay()).to_datetime().date()\n else:\n date = datetime.strptime(date, '%Y-%m-%d')\n\n # 1. create stock report, if exist open it\n report_enter, exists = ReportEnter.objects.get_or_create(\n symbol=symbol, date=date\n )\n # print date, report_enter, exists\n\n report_enter = report_enter\n \"\"\":type: StockReport\"\"\"\n\n model_data = {}\n\n for key, model in MODEL_OBJ.items():\n model_data[key] = {}\n try:\n model_data[key]['data'] = getattr(report_enter, key)\n except ObjectDoesNotExist:\n model_data[key]['data'] = model()\n model_data[key]['data'].report = report_enter\n model_data[key]['data'].save()\n\n model_data[key]['url'] = reverse(\n 'admin:opinion_%s_change' % key, args=(model_data[key]['data'].id,)\n )\n\n model_data['stockreport'] = {\n 'data': report_enter,\n 'url': reverse(\n 'admin:opinion_reportenter_change', args=(report_enter.id,)\n )\n }\n\n # page\n template = 'opinion/report/index.html'\n parameters = dict(\n site_title='Create report | %s | %s' % (symbol, date),\n title='Create report | %s | %s' % (symbol, date),\n report_enter=report_enter,\n symbol=symbol,\n model_data=model_data\n )\n\n return render(request, template, parameters)\n\n\ndef reference_link(request, report_id, model):\n \"\"\"\n\n :param request:\n :param report_id:\n :param model:\n :return:\n \"\"\"\n report_enter = ReportEnter.objects.get(id=report_id)\n symbol = report_enter.symbol.upper()\n\n # summary\n model_data = {\n k: getattr(report_enter, k) for k, v in MODEL_OBJ.items()\n }\n\n # todo: only check opinion item exists\n op_exists = OpinionExists(report_enter)\n summary = op_exists.created()\n\n template = 'opinion/report/helper.html'\n parameters = dict(\n site_title='Reference | %s | %s' % (symbol, model),\n title='Reference | %s | %s' % (symbol, model),\n symbol=symbol,\n model=model,\n model_data=model_data,\n summary=summary,\n )\n\n return render(request, template, parameters)\n\n\nclass OpinionExists(object):\n def __init__(self, report_enter):\n self.report_enter = report_enter\n \"\"\":type: ReportEnter \"\"\"\n\n @staticmethod\n def exists(parent, rel_obj):\n try:\n if getattr(parent, rel_obj):\n return True\n except ObjectDoesNotExist:\n return False\n\n def created(self):\n stock_fd = False\n if self.exists(self.report_enter, 'stockfundamental'):\n if self.report_enter.stockfundamental.tp_mean > 0:\n stock_fd = True\n\n stock_id = False\n if self.exists(self.report_enter, 'stockindustry'):\n if self.report_enter.stockindustry.direction:\n stock_id = True\n\n pos_idea = False\n if self.exists(self.report_enter, 'positionidea'):\n if self.report_enter.positionidea.target_price > 0:\n pos_idea = True\n\n pos_enter = False\n if self.exists(self.report_enter, 'positionenter'):\n if self.report_enter.positionenter.target_price > 0:\n pos_enter = True\n\n pos_dc = False\n if self.exists(self.report_enter, 'positiondecision'):\n if len(self.report_enter.positiondecision.desc):\n pos_dc = True\n\n tech_me = False\n if self.exists(self.report_enter.technicalrank, 'technicalmarketedge'):\n if self.report_enter.technicalrank.technicalmarketedge.fprice:\n tech_me = True\n\n tech_bc = False\n if self.exists(self.report_enter.technicalrank, 'technicalbarchart'):\n if self.report_enter.technicalrank.technicalbarchart.strength:\n tech_bc = True\n\n tech_cm = False\n if self.exists(self.report_enter.technicalrank, 'technicalchartmill'):\n if self.report_enter.technicalrank.technicalchartmill.rank:\n tech_cm = True\n\n article = False\n if self.exists(self.report_enter, 'underlyingarticle'):\n if len(self.report_enter.underlyingarticle.article_name):\n article = True\n\n # todo: here\n opinions = [\n 'TechnicalTick', 'TechnicalSma', 'TechnicalVolume', 'TechnicalIchimoku',\n 'TechnicalParabolic', 'TechnicalStoch', 'TechnicalBand', 'TechnicalFw',\n 'TechnicalTTM', 'TechnicalPivot', 'TechnicalFreeMove', 'TechnicalZigZag',\n ]\n tech_op = []\n if self.report_enter.technicalopinion.id:\n for op in opinions:\n created = 'Waiting...'\n if self.exists(self.report_enter.technicalopinion, op.lower()):\n created = 'Yes'\n\n tech_op.append('%s %s' % (op.replace('Technical', ''), created))\n\n return {\n 'stockfundamental': 'Yes' if stock_fd else 'Waiting...',\n 'stockindustry': 'Yes' if stock_id else 'Waiting...',\n 'positionidea': 'Yes' if pos_idea else 'Waiting...',\n 'positionenter': 'Yes' if pos_enter else 'Waiting...',\n 'positiondecision': 'Yes' if pos_dc else 'Waiting...',\n 'underlyingarticle': 'Yes' if article else 'Waiting...',\n 'technicalrank': [\n '%s %s' % ('Marketedge', 'Yes' if tech_me else 'Waiting...'),\n '%s %s' % ('barchart', 'Yes' if tech_bc else 'Waiting...'),\n '%s %s' % ('chartmill', 'Yes' if tech_cm else 'Waiting...'),\n ],\n 'technicalopinion': tech_op\n }\n\n\ndef enter_report(request, report_id):\n \"\"\"\n\n :param request:\n :param symbol:\n :param date:\n :return:\n \"\"\"\n report_enter = ReportEnter.objects.get(id=report_id)\n tech_rank_report = ReportTechnicalRank(report_enter.technicalrank, report_enter.close)\n fd_report = ReportFundamental(report_enter)\n\n reports = {\n # technical rank\n 'marketedge': tech_rank_report.marketedge.create(),\n 'barchart': tech_rank_report.barchart.create(),\n 'chartmill': tech_rank_report.chartmill.create(),\n # stock fundamental\n 'fundamental': fd_report.stock_fd.create(),\n 'industry': fd_report.stock_id.create(),\n\n }\n # report_enter.technicalrank.technicalmarketedge.recommend\n\n # todo: here, tomorrow, if clv not profit, close it\n # todo: start closing most of the pos and wait next time\n\n heats = {\n 'marketedge': tech_rank_report.marketedge.to_heat(),\n 'barchart': tech_rank_report.barchart.to_heat(),\n 'chartmill': tech_rank_report.chartmill.to_heat(),\n }\n\n\n\n\n title = 'Enter report | %s | %s' % (report_enter.symbol, report_enter.date)\n template = 'opinion/report/summary.html'\n parameters = dict(\n site_title=title,\n title=title,\n report_enter=report_enter,\n reports=reports,\n symbol=report_enter.symbol,\n date=report_enter.date\n )\n\n return render(request, template, parameters)\n\n\n\n\n\n\n# todo: symbol date report, for all detail, position & technical\n# todo: market & mindset & quest date report, all market\n","sub_path":"opinion/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"531349725","text":"from application import create_app\nfrom flask_migrate import MigrateCommand\nfrom flask_script import Manager\napp = create_app()\n\napp.app_context().push()\n\nmanager = Manager(app)\n\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.command\ndef run():\n app.run(host='0.0.0.0')\n\n\nif __name__ == '__main__':\n manager.run()\n\n\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102156035","text":"#!/usr/bin/env python\n\nfrom DjangoBlog.utils import send_email\nfrom DjangoBlog.utils import get_current_site_domain, render_template\nfrom django.conf import settings\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_comment_email(comment):\n site = get_current_site_domain()\n article_url = \"http://{site}{path}\".format(site=site, path=comment.article.get_absolute_url())\n article_title = comment.article.title\n comment_url = \"{}#div-comment-{}\".format(article_url, comment.id)\n comment_text = comment.body\n tomail = comment.author.email\n username = comment.author.username\n content = render_template('new_comment.j2', vars=locals())\n\n # if content is not None:\n # send_email(emailto=[tomail],\n # title='Спасибо за Ваш комментарий',\n # content=content,\n # images={\"logo.png\": \"image/png\", \"comment_icon.png\": \"image/png\"})\n\n try:\n if comment.parent_comment:\n parent_comment_username = comment.parent_comment.author.username\n parent_comment_url = \"{}#div-comment-{}\".format(article_url, comment.parent_comment.id)\n tomail = comment.parent_comment.author.email\n content = render_template('new_comment_reply.j2', vars=locals())\n # if content is not None:\n # send_email(emailto=[tomail],\n # title='Новый ответ на Ваш комментарий',\n # content=content,\n # images={\"logo.png\": \"image/png\", \"comment_reply_icon.png\": \"image/png\"})\n except Exception as e:\n logger.error(e)\n","sub_path":"comments/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"242527083","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Logging utilities for use by applications.\n\nClasses defined here:\n AppLogsHandler: StreamHandler subclass\n\"\"\"\n\n\n\n\n\nimport logging\nimport sys\nimport types\n\n\nNEWLINE_REPLACEMENT = \"\\0\"\n\n\nclass AppLogsHandler(logging.StreamHandler):\n \"\"\"Logging handler that will direct output to a persistent store of\n application logs.\n\n This handler will output log statements to stderr. This handler is\n automatically initialized and attached to the Python common logging library.\n \"\"\"\n\n\n\n\n def __init__(self, stream=None):\n \"\"\"Constructor.\n\n Args:\n # stream is optional. it defaults to sys.stderr.\n stream: destination for output\n \"\"\"\n logging.StreamHandler.__init__(self, stream)\n\n def close(self):\n \"\"\"Closes the stream.\n\n This implementation based on the implementation of FileHandler.close().\"\"\"\n self.flush()\n self.stream.close()\n logging.StreamHandler.close(self)\n\n def emit(self, record):\n \"\"\"Emit a record.\n\n This implementation is based on the implementation of\n StreamHandler.emit().\"\"\"\n try:\n message = self._AppLogsMessage(record)\n self.stream.write(message.encode(\"UTF-8\"))\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n def _AppLogsMessage(self, record):\n \"\"\"Converts the log record into a log line.\"\"\"\n\n message = self.format(record).replace(\"\\n\", NEWLINE_REPLACEMENT)\n return \"LOG %d %d %s\\n\" % (self._AppLogsLevel(record.levelno),\n long(record.created * 1000 * 1000),\n message)\n\n def _AppLogsLevel(self, level):\n \"\"\"Converts the logging level used in Python to the API logging level\"\"\"\n if level >= logging.CRITICAL:\n return 4\n elif level >= logging.ERROR:\n return 3\n elif level >= logging.WARNING:\n return 2\n elif level >= logging.INFO:\n return 1\n else:\n return 0\n","sub_path":"gae/google/appengine/api/app_logging.py","file_name":"app_logging.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"566005743","text":"# Sophia Tacderas\n# programming assignment 1\n# Calculates the volume and surface area of a sphere (with user input for the radius).\n\nfrom math import pi as p\n\nr = float(input(\"Enter the radius of the sphere: \"))\narea = 4*p*(r**2)\nvolume = (4/3)*p*(r**3)\n\nprint(\"The volume is: \"+str(volume))\nprint(\"The surface area is: \"+str(area))\n","sub_path":"pa1/Sphere.py","file_name":"Sphere.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"53711784","text":"import pandas as pd\nimport numpy as np\nimport copy\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LinearRegression, LogisticRegression, LogisticRegressionCV\n\n\nclass Estimator:\n def estimate(self, x: pd.DataFrame, y: pd.DataFrame) -> int:\n ...\n\n\nclass IPW(Estimator):\n name = \"IPW\"\n\n def __init__(self, x: pd.DataFrame):\n self.p_scores = self.estimate_propensity(x)\n\n def estimate(self, x: pd.DataFrame, y: pd.DataFrame) -> int:\n p_scores = self.p_scores[x.index]\n t = x['T'].to_numpy()\n y = y.to_numpy()\n p_scores_ratio = p_scores[:, 1] / p_scores[:, 0]\n sigma_T = t.sum()\n sigma_ti_yi = (t * y).sum()\n sigma_minus_ti_y1 = ((1 - t) * y * p_scores_ratio).sum()\n sigma_minus_ti = ((1 - t) * p_scores_ratio).sum()\n return (sigma_ti_yi / sigma_T) / (sigma_minus_ti_y1 / sigma_minus_ti), -1\n\n def estimate_propensity(self, x: pd.DataFrame):\n trees = 90\n depth = 15\n t = x['T'].to_numpy()\n features = x.loc[:, x.columns != 'T'].to_numpy()\n # classifier = RandomForestClassifier(n_estimators=trees, max_depth=depth).fit(X=features, y=t)\n classifier = LogisticRegressionCV(max_iter=10_000).fit(X=features, y=t)\n return classifier.predict_proba(features)\n\n\nclass CovariateAdjustment(Estimator):\n name = \"_Learner\"\n\n def __init__(self, learner: str):\n if learner == \"s\":\n self.estimate = self.s_learner\n elif learner == \"t\":\n self.estimate = self.t_learner\n else:\n raise Exception(\"Unknown learner\")\n\n self.name = learner + self.name\n\n def estimate(self, x: pd.DataFrame, y: pd.DataFrame) -> int:\n ...\n\n def s_learner(self, x: pd.DataFrame, y: pd.DataFrame) -> int:\n columns = list(x.columns)\n columns.remove('T')\n\n x_t1 = x[x['T'] == 1]\n x_t1_0 = copy.deepcopy(x_t1)\n x_t1_0['T'] = 0\n\n features = x.to_numpy()\n y_all = y.to_numpy()\n predictor = LogisticRegressionCV(max_iter=10_000).fit(X=features, y=y_all)\n\n y_hat_0 = predictor.predict_proba(x_t1_0)[:, 1]\n y_hat_1 = predictor.predict_proba(x_t1)[:, 1]\n\n ATE = y_hat_1 - y_hat_0\n return ATE.mean(), ATE.std()\n\n def t_learner(self, x:pd.DataFrame, y: pd.DataFrame) -> int:\n x_t1 = x[x['T'] == 1]\n y_t1 = y[x_t1.index]\n x_t0 = x[x['T'] == 0]\n y_t0 = y[x_t0.index]\n\n x_t1_0 = copy.deepcopy(x_t1)\n x_t1_0['T'] = 0\n\n predictor0 = LogisticRegressionCV(max_iter=10_000).fit(X=x_t0.to_numpy(), y=y_t0)\n predictor1 = LogisticRegressionCV(max_iter=10_000).fit(X=x_t1.to_numpy(), y=y_t1)\n\n y_hat_0 = predictor0.predict_proba(x_t1_0)[:, 1]\n y_hat_1 = predictor1.predict_proba(x_t1)[:, 1]\n\n ATE = y_hat_1 - y_hat_0\n return ATE.mean(), ATE.std()\n\n\nclass Matching(Estimator):\n name = \"Matching 1-20\"\n\n def __init__(self, distance_function, match_subset=None):\n self.distance_function = distance_function\n self._dis_mat = None\n self._balance = None\n self.match_subset = match_subset\n\n def estimate(self, x: pd.DataFrame, y: pd.DataFrame) -> int:\n predictor = LogisticRegressionCV(max_iter=10_000).fit(X=x, y=y)\n y = predictor.predict_proba(x)\n couples = self.match(x)\n # self._calc_balance(couples=couples, x=x)\n\n ATE = []\n for t1, t0_couple in couples.items():\n ATE.append((y[t1][1] - y[t0_couple][:, 1].mean()))\n\n ATE = np.array(ATE)\n return ATE.mean(), ATE.std()\n\n def match(self, x: pd.DataFrame):\n t0_indices = x[x['T'] == 0].index\n t1_indices = x[x['T'] == 1].index\n\n x_without_t = x.loc[:, x.columns != 'T']\n distances_df = pd.DataFrame(self.distance_matrix(x_without_t)).loc[t1_indices, t0_indices]\n\n t12group = {}\n for i in distances_df.index:\n r = distances_df.loc[i, :]\n idx = r.sort_values()[:20]\n t12group[i] = idx.index\n # couples = distances_df.idxmin(axis=1)\n return t12group\n\n def distance_matrix(self, x):\n if self._dis_mat is None:\n x_subset = x[self.match_subset] if self.match_subset else x\n self._dis_mat = self.distance_function(x_subset)\n return self._dis_mat\n\n def _calc_balance(self, couples, x):\n if self._balance is not None:\n return\n t1_idx = list(couples.index)\n t0_idx = list(couples.values)\n df_t1 = x.loc[t1_idx]\n df_t0 = x.loc[t0_idx]\n balance = []\n for c in df_t0.columns:\n balance.append(df_t0[c].value_counts())\n balance.append(df_t1[c].value_counts())\n\n balance_df = pd.DataFrame(balance)\n balance_df = balance_df / balance_df.sum(axis=1)[:, None]\n balance_df.to_csv(f\"Balance_{self.name}_{hash(str(self.match_subset))}.csv\")\n self._balance = 1","sub_path":"estimators_ATE.py","file_name":"estimators_ATE.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"170377373","text":"def is_pangram(sentence):\n unique_letters = set([])\n for letter in sentence.lower():\n if letter.isalpha():\n unique_letters.add(letter)\n else:\n continue\n if len(unique_letters) == 26:\n return True\n else:\n return False\n\nif __name__ == \"__main__\":\n print(is_pangram(\"the_quick_brown_fox_jumps_over_the_lazy_dog\"))","sub_path":"py-exercims/pangram/pangram.py","file_name":"pangram.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"556711124","text":"import sys\nimport time\nimport pygame\nimport pygame.camera\n\nCOLORVAL_ASPHALT = (0, 0, 0)\nCOLORVAL_CEMENT = (168, 161, 206)\nCOLORVAL_DIRT = (102, 51, 0)\nCOLORVAL_SAND = (255, 230, 179)\n\npygame.init()\npygame.camera.init()\n\nmonitor = pygame.display.set_mode((352,288),0)\nxhair = pygame.image.load(\"resources/image/crosshair.png\")\n\nx = 0\ny = 0\nf = 0\n\ncam_list = pygame.camera.list_cameras()\nwebcam = pygame.camera.Camera(cam_list[0],(32,24))\nwebcam.start()\n\nwhile True:\n frames = webcam.get_image()\n \n livefeed = pygame.transform.scale(frames,(352,288))\n xhairscaled = pygame.transform.scale(xhair,(50,50))\n \n monitor.blit(livefeed,(0,0))\n monitor.blit(xhairscaled, ((monitor.get_width() // 2) - 25, (monitor.get_height() // 2) - 25))\n \n pygame.display.update()\n \n COLORVAL = monitor.get_at((monitor.get_width() // 2, monitor.get_height() // 2))[:3]\n \n print (COLORVAL)\n \n f = f + 1\n \n if COLORVAL == COLORVAL_ASPHALT:\n print (f, \". Asphalt Road\")\n time.sleep(5)\n \n if COLORVAL == COLORVAL_CEMENT:\n print (f, \". Cement Road\")\n time.sleep(5)\n \n time.sleep(0.01)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n webcam.stop()\n pygame.quit()\n sys.exit()\n","sub_path":"resources/sensor/pythonreferences_used/cameracolor.py","file_name":"cameracolor.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"533833031","text":"class Parameter:\n\n \"\"\"This class is the parent type to any parameter type. Every parameter type to be used should be derived from it.\"\"\"\n\n def __init__(self, name=\"\"):\n\n self.name = name\n\n def __str__(self, indent=\"\"):\n\n return (indent + \"name : %s\\n\"%(self.name))\n\n def lintrans(self, other, z):\n\n if (isinstance(self, Real) and isinstance(other, Integer)):\n\n zz = int((z - self.range[0]) * ((other.range[1] - other.range[0]) / (self.range[1] - self.range[0])) + other.range[0])\n\n elif (isinstance(self, Real) and isinstance(other, Real)):\n\n zz = (z - self.range[0]) * ((other.range[1] - other.range[0]) / (self.range[1] - self.range[0])) + other.range[0]\n\n elif (isinstance(self, Real) and isinstance(other, Categorical)):\n\n idx = int((z - self.range[0]) / (self.range[1] - self.range[0]) * len(other.values))\n idx = min(idx, len(other.values) - 1) # XXX use this line to avoid an out of bound index in the next line\n zz = other.values[idx]\n\n elif (isinstance(self, Integer) and isinstance(other, Real)):\n\n zz = (z - self.range[0]) * ((other.range[1] - other.range[0]) / (self.range[1] - self.range[0])) + other.range[0]\n\n elif (isinstance(self, Categorical) and isinstance(other, Real)):\n\n idx = self.values.index(z)\n zz = idx * ((other.range[1] - other.range[0]) / len(self.values)) + other.range[0]\n\n else:\n\n raise Exception(\"Unknown transformation rule\")\n\n return zz\n\nclass Integer(Parameter):\n\n \"\"\"Integer parameter. The \"range\" parameter is a tuple that defines the range of integer values that the parameter can take.\"\"\"\n\n def __init__(self, name=\"\", range=(0, 1)):\n\n super().__init__(name=name)\n if (range[0] >= range[1]):\n raise Exception(\"Lower bound of the range cannot be larger or equal to the Higher bound of the range\")\n self.range = range\n\n def __str__(self, indent=\"\"):\n\n return (super().__str__(indent=indent) + indent + \"type : Integer\\n\" + indent + \"range : %s\\n\"%(str(self.range)))\n\nclass Real(Parameter):\n\n \"\"\"Real parameter. The \"range\" parameter is a tuple that defines the range of floating point values that the parameter can take.\"\"\"\n\n def __init__(self, name=\"\", range=(0., 1.)):\n\n super().__init__(name=name)\n if (range[0] >= range[1]):\n raise Exception(\"Lower bound of the range cannot be larger or equal to the Higher bound of the range\")\n self.range = range\n\n def __str__(self, indent=\"\"):\n\n return (super().__str__(indent=indent) + indent + \"type : Real\\n\" + indent + \"range : %s\\n\"%(str(self.range)))\n\nclass Categorical(Parameter):\n\n \"\"\"Categorical parameter. The \"values\" parameter is a list of all the values that the parameter can take.\"\"\"\n\n def __init__(self, name=\"\", values=[]):\n\n super().__init__(name=name)\n if (len(values) > len(set(values))):\n raise Exception(\"List of values should not contain duplicates\")\n self.values = values\n\n def __str__(self, indent=\"\"):\n\n return (super().__str__(indent=indent) + indent + \"type : Categorical\\n\" + indent + \"values : %s\\n\"%(str(self.values)))\n\n","sub_path":"ztune/parameter.py","file_name":"parameter.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"207211600","text":"'''\nCreated on Oct 23, 2016\n\n@author: Makeda Phoenix\n'''\nimport json\nimport requests\n\n\nurl = 'http://challenge.code2040.org/api/reverse'\nurlFini = 'http://challenge.code2040.org/api/reverse/validate'\ntoken = 'fe51f0ab89f32b551b8baa8632c0001c'\ndata = {'token' : token}\n\ntoReverse = requests.post(url, data)\nreversed = toReverse.text[::-1]\n\ndataFini = {'token' : token, 'string' : reversed}\nresponse = requests.post(urlFini, dataFini)\nprint(response.text)\n","sub_path":"reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"190784265","text":"# -*- coding: utf-8 -*-\nfrom noc.backend.lib.Wrapper import wrapper\nfrom noc.backend.models import Port, DeviceInfo\n\n\ndef index(request):\n return wrapper(request=request, req_params=['device'], worker=worker, context='device', type='json')\n\n\ndef worker(req):\n\n device = req.params()['device']\n\n data = []\n\n ports = Port.objects.filter(device=device).order_by('name')\n\n for port in ports:\n data.append(port.list())\n\n device_info = DeviceInfo.objects.get(device=device)\n\n return { \"success\": True, \"data\": data, \"totalCount\": len(data), \"refreshTime\": u'Дата последнего опроса: %s' % device_info.last_poll.strftime(\"%Y-%m-%d %H:%M:%S\") }\n","sub_path":"backend/controller/port/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"398743637","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport cv2\nimport sys\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\nfrom utils.augment import *\nfrom utils.utils import plot_gt\nfrom utils.bbox import quad_2_rbox \n\nclass DOTADataset(data.Dataset):\n\n def __init__(self,\n dataset= None, \n augment = False,\n level = 1,\n only_latin = True):\n self.level = level \n self.image_set_path = dataset\n if dataset is not None:\n self.image_list = self._load_image_names() \n if self.level == 1:\n self.classes = ('__background__', 'plane', 'ship', 'storage-tank', 'baseball-diamond', \n 'tennis-court', 'basketball-court', 'ground-track-field', 'harbor', \n 'bridge', 'large-vehicle', 'small-vehicle', 'helicopter', 'roundabout', \n 'soccer-ball-field' , 'swimming-pool') \n self.num_classes = len(self.classes)\n self.class_to_ind = dict(zip(self.classes, range(self.num_classes))) \n self.augment = augment\n\n def __len__(self):\n return len(self.image_list)\n\n def __getitem__(self, index):\n im_path = self.image_list[index] \n im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)\n roidb = self._load_annotation(self.image_list[index])\n gt_inds = np.where(roidb['gt_classes'] != 0)[0]\n bboxes = roidb['boxes'][gt_inds, :]\n classes = roidb['gt_classes'][gt_inds]\n gt_boxes = np.zeros((len(gt_inds), 6), dtype=np.float32)\n if self.augment :\n transform = Augment([ HSV(0.5, 0.5, p=0.5),\n HorizontalFlip(p=0.5),\n VerticalFlip(p=0.5),\n Affine(degree=20, translate=0.1, scale=0.2, p=0.5), \n Noise(0.02, p=0.2),\n Blur(1.3, p=0.5),\n ],box_mode = 'xyxyxyxy',)\n im, bboxes = transform(im, bboxes)\n\n mask = mask_valid_boxes(quad_2_rbox(bboxes,'xywha'), return_mask=True)\n bboxes = bboxes[mask]\n gt_boxes = gt_boxes[mask]\n classes = classes[mask]\n\n for i, bbox in enumerate(bboxes):\n gt_boxes[i, :5] = quad_2_rbox(np.array(bbox), mode = 'xyxya') \n gt_boxes[i, 5] = classes[i]\n\n ## test augmentation\n # plot_gt(im, gt_boxes[:,:-1], im_path, mode = 'xyxya')\n return {'image': im, 'boxes': gt_boxes, 'path': im_path}\n\n def _load_image_names(self):\n \"\"\"\n Load the names listed in this dataset's image set file.\n \"\"\"\n image_set_file = self.image_set_path\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_list = [x.strip() for x in f.readlines()]\n return image_list\n\n\n def _load_annotation(self, index):\n root_dir = index.split('/images/P')[0]\n label_dir = os.path.join(root_dir, 'labelTxt')\n _ , img_name = os.path.split(index)\n filename = os.path.join(label_dir, img_name[:-4]+'.txt')\n boxes, gt_classes = [], []\n with open(filename,'r',encoding='utf-8-sig') as f:\n content = f.read()\n objects = content.split('\\n')\n for obj in objects:\n if len(obj) != 0 :\n *box, class_name, difficult = obj.split(' ')\n if difficult == '1':\n continue\n box = [ eval(x) for x in obj.split(' ')[:8] ]\n label = self.class_to_ind[class_name] \n boxes.append(box)\n gt_classes.append(label)\n return {'boxes': np.array(boxes, dtype=np.int32), 'gt_classes': np.array(gt_classes)}\n\n\n def display(self,boxes, img_path):\n img = cv2.imread(img_path)\n for box in boxes:\n coors = box.reshape(4,2)\n img = cv2.polylines(img,[coors],True,(0,0,255),2)\t\n cv2.imshow(img_path,img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n def return_class(self, id):\n id = int(id)\n return self.classes[id]\n \nif __name__ == '__main__':\n pass\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PyTorch/built-in/cv/detection/DAL_ID2732_for_PyTorch/datasets/dota_dataset.py","file_name":"dota_dataset.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"327222581","text":"from scipy.io import arff\nfrom statistics import mean\nimport numpy as np\nimport pandas as pd\n# idea: get k neighbours out of n training examples, get the most frequent class\n\ndef dimension_reduction(data, metadata):\n names = list()\n for index, type in enumerate(metadata.types()):\n if type =='nominal':\n # drop nominal\n names.append(metadata.names()[index])\n\n data = data.drop(names, axis=1)\n data = data.dropna(axis=0)\n data = data.reset_index()\n return data\n\ndef euclidean_distance(training_data, inX):\n dif = np.tile(inX, (training_data.shape[0], 1)) - training_data\n sqaured_diff = dif.applymap(np.square)\n # column sum\n sum_diff = sqaured_diff.sum(axis=1)\n return sum_diff.map(np.sqrt)\n\ndef knn_regression(training_data, inX, labels, k):\n assert k<=len(training_data)\n #distance\n distance = euclidean_distance(training_data, inX)\n # sort distance in ascending order, remove None\n sorted_index = distance.argsort()\n # get the top k rows from the sorted array\n top_k_labels = labels[sorted_index[:k]]\n return sum(top_k_labels)/len(top_k_labels)\n\ndef cross_validation_knn_prediction(k):\n dataset_prediction = arff.loadarff('autos.arff')\n # this is a table with 350 rows and 34 features, and last dimension as class\n training_data_prediction = pd.DataFrame(dataset_prediction[0])\n training_data_prediction = dimension_reduction(training_data_prediction, dataset_prediction[1])\n training_data_prediction = training_data_prediction.reset_index(drop=True)\n MAPE = []\n for i in range(training_data_prediction.shape[0]):\n training_data_inc_class = pd.concat([training_data_prediction.iloc[:i, :], training_data_prediction.iloc[i+1:, :]], ignore_index=True)\n labels = training_data_inc_class['price']\n training_data = training_data_inc_class.iloc[:,:-1]\n inX = training_data_prediction.iloc[i:i+1, :-1]\n # print(\"-------------------\")\n est_price = knn_regression(training_data, inX, labels, k)\n act_price = training_data_prediction['price'][i]\n act_est = abs((act_price - est_price) / (act_price))\n # print(\"Deviation Error of Prediction from Actual:\")\n # print(act_est)\n MAPE.append(act_est)\n mape = mean(MAPE)\n print('k = %d' % k)\n print(\"Results: Percent Average Deviation Error of Prediction from Actual: \", mape)\n return mape\n\nif __name__ == '__main__':\n k = 10\n cross_validation_knn_prediction(k)\n","sub_path":"code/files/knn_prediction_basic.py","file_name":"knn_prediction_basic.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"127098170","text":"#!/usr/bin/python3\nfrom core import user_table, engine\n\ncon = engine.connect()\nins = user_table.insert()\n\n# inserindo 1 valor\n#new = ins.values(idade=24, nome='daniel', senha='123@mudar')\n#con.execute(new)\n\ncon.execute(user_table.insert(),[\n\t{'nome': 'marcio', 'idade': 20, 'senha':'hao123'},\n\t{'nome': 'joao', 'idade': 20, 'senha':'windows'},\n\t{'nome': 'pedro', 'idade': 20, 'senha':'baidu'},\n])\n\n\n\n","sub_path":"520/core_2.py","file_name":"core_2.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"540733084","text":"__author__ = 'LittleMe'\nimport csv\n\ntargetprimeamount = 20000\nnumbertotest = 7\n\nmodul = 0\nprimes = [\"Value\"]\n\nprimes.append(3)\nprimes.append(5)\nprimesfound = 2\n\n\n\n\n\nwhile targetprimeamount > primesfound:\n done = 0\n primetotest = 1\n primesfound = len(primes)-1\n\n print((\"primes found \" + str(primesfound)), end =\"\\r\")\n\n while done == 0:\n\n\n\n\n\n if primetotest > primesfound:\n done = 1\n\n primes.append(numbertotest)\n\n numbertotest += 2\n\n #print(\"tes\")\n break\n\n\n elif numbertotest == primes[primetotest]:\n\n\n #print(\"ive seeen this before\")\n numbertotest += 2\n done = 1\n break\n\n else:\n #print(\"deviding \" + str(numbertotest) + \" by \" + str(primes[primetotest]))\n\n if numbertotest % primes[primetotest] == 0:\n\n #print(str(numbertotest) + \" not\")\n done = 1\n numbertotest += 2\n\n else:\n primetotest += 1\n\n\n\n\n\n#print(primes)\n\n\n#for x in range (0, len(primes)):\n #print(primes[x])\n\n\n\nwith open('primes.csv', 'w',encoding='utf-8', newline='') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerows([primes])\n\n\n","sub_path":"Prime numbers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568609671","text":"import numpy as np\nimport cv2\nimport math\nfrom pyflann import *\nimport os\n\ndef genFragNdescr():\n\tshapes = [\"apple\",\"bell\", \"Bone\",\"car\",\"carriage\",\"cellular_phone\",\"children\",\"chopper\",\"face\",\"flatfish\",\"fountain\",\"key\",\"shoe\",\"watch\"]\n\tfor shape in shapes:\n\t\tprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\",shape)\n\n\t\tdirectory = 'MPEG7dataset\\\\test\\\\'+shape\n\t\tallDescriptors = []\n\t\tallFragments = []\n\t\timageIndecies = []\n\t\tfragSizes = []\n\t\tprint(os.listdir(directory))\n\n\t\tfor filename in os.listdir(directory):\n\t\t\tif filename.endswith(\".png\"): \n\t\t\t\t#print(os.path.join(directory, filename))\n\t\t\t\t#im = cv2.imread('non_rigid_shape_A\\\\non_rigid_shape_A\\\\deer\\\\deer18.tif')\n\t\t\t\tim = cv2.imread(os.path.join(directory, filename))\n\t\t\t\t#cv2.imshow('?',im)\n\t\t\t\theight, width, channels = im.shape\n\t\t\t\timgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n\t\t\t\tret,thresh = cv2.threshold(imgray,127,255,0)\n\t\t\t\theight, width, channels = im.shape\n\t\t\t\tim3, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n\t\t\t\tblank_image = np.zeros((height,width,3), np.uint8)\n\t\t\t\tmax = 0\n\t\t\t\tmaxind = 0\n\n\t\t\t\t#this loop just makes sure we are looking at the longest contour if the algorithm finds multiple, as that likely our shape\n\t\t\t\tfor x in range(len(contours)):\n\t\t\t\t\tcontourLength = len(contours[x])\n\t\t\t\t\tif (contourLength>max):\n\t\t\t\t\t\tmax = contourLength\n\t\t\t\t\t\tmaxind = x\n\t\t\t\t#we store a proper list of points in listofpoints using this loop\n\t\t\t\tstartlistOfPoints = []\n\t\t\t\tfor ind, x in enumerate(contours[maxind]):\n\t\t\t\t\txval = x[0][0]\n\t\t\t\t\tyval = x[0][1]\n\t\t\t\t\tstartlistOfPoints.append([yval, xval])\n\t\t\t\tlistOfPoints= np.array(startlistOfPoints)\n\t\t\t\t#fraglist will hold all the fragments in this image\n\t\t\t\tfragSizes.append(len(listOfPoints))\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t \n\t\tsizesArray = np.array(fragSizes)\n\t\tnp.save(\"MPEG7dataset\\\\test\\\\extractions\\\\\"+shape+\"\\\\lengths\",sizesArray)\n\ngenFragNdescr()","sub_path":"addLength.py","file_name":"addLength.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"581191536","text":"from os import remove\nfrom os.path import join, exists, isfile\nfrom sqlite3 import connect\n\nimport pandas as pd\n\nfrom config.settings import ROOT_FOLDER\nfrom .models import price_tables, list_tables\n\n\ndef translate_to_sqlite(filename='data.db', directory=''):\n if not filename.endswith('.db') or len(filename) <= 3:\n raise ValueError('Improper argument. ')\n\n file = join(ROOT_FOLDER, filename) if directory is None else join(directory, filename)\n\n if exists(file) and isfile(file):\n remove(file)\n\n with connect(file) as conn:\n for p, l in zip(price_tables, list_tables):\n print('%-20s' % p.__name__, end='\\t')\n tickers = l.get_valid_tickers()\n data = p.get_price_data(tickers)\n df = pd.DataFrame(data)\n df.to_sql(p.__name__, conn, chunksize=5000)\n print('Complete')\n\n print('Data has been stored in (%s)' % file)\n","sub_path":"web/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"261757298","text":"class Solution:\n # @return a tuple, (index1, index2)\n def twoSum(self, num, target):\n if num == None:\n return (0, 0)\n\n index = {}\n for i, x in enumerate(num):\n index[x] = i\n for i, x in enumerate(num):\n y = target - x\n if y in index and index[y] > i:\n return (i + 1, index[y] + 1)\n","sub_path":"leetans/twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"294374893","text":"\"\"\"\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nAmbari Agent\n\n\"\"\"\n\nimport os\nfrom instance_manager import create_mpack, set_mpack_instance, get_conf_dir, get_log_dir, get_run_dir, list_instances, walk_mpack_dict\n\nCONFIG_DIR_KEY_NAME = 'config_dir'\nLOG_DIR_KEY_NAME = 'log_dir'\nRUN_DIR_KEY_NAME = 'run_dir'\nPATH_KEY_NAME = 'mpack_path'\nCOMPONENTS_PLURAL_KEY_NAME = 'components'\nCOMPONENT_INSTANCES_PLURAL_KEY_NAME = 'component-instances'\nMPACK_VERSION_KEY_NAME = 'mpack_version'\nMODULE_VERSION_KEY_NAME = 'module_version'\n\ndef get_component_conf_path(mpack_name, instance_name, module_name, components_instance_type,\n subgroup_name='default', component_instance_name='default'):\n \"\"\"\n :returns a list contains the path to the configuration folder of given component instance,\n this may include multiple mpack instances cases\n :raises ValueError if the parameters doesn't match the mpack or instances structure\n \"\"\"\n\n return get_conf_dir(mpack_name, instance_name, subgroup_name, module_name,\n {components_instance_type: [component_instance_name]})\n\n\ndef get_component_log_path(mpack_name, instance_name, module_name, components_instance_type,\n subgroup_name='default', component_instance_name='default'):\n \"\"\"\n :returns a list contains the path to the log folder of given component instance,\n this may include multiple mpack instances cases\n :raises ValueError if the parameters doesn't match the mpack or instances structure\n \"\"\"\n\n return get_log_dir(mpack_name, instance_name, subgroup_name, module_name,\n {components_instance_type: [component_instance_name]})\n\n\ndef get_component_rundir_path(mpack_name, instance_name, module_name, components_instance_type,\n subgroup_name='default', component_instance_name='default'):\n \"\"\"\n :returns a list contains the paths to the rundir folder of given component instance,\n this may include multiple mpack instances cases\n :raises ValueError if the parameters doesn't match the mpack or instances structure\n \"\"\"\n\n return get_run_dir(mpack_name, instance_name, subgroup_name, module_name,\n {components_instance_type: [component_instance_name]})\n\n\ndef get_component_target_path(mpack_name, instance_name, module_name, components_instance_type,\n subgroup_name='default', component_instance_name='default'):\n \"\"\"\n :returns a list contains the paths to the mpack component folder of given component instance,\n this may include multiple mpack instances cases\n :raises ValueError if the parameters doesn't match the mpack or instances structure\n \"\"\"\n dirs = set()\n instances_json = list_instances(mpack_name, instance_name, subgroup_name, module_name,\n {components_instance_type: [component_instance_name]})\n walk_mpack_dict(instances_json, PATH_KEY_NAME, dirs)\n target_path_list = [dir for dir in dirs if\n (mpack_name == None or mpack_name.lower() in dir) and (instance_name == None or instance_name.lower() in dir)]\n return \"\" if len(target_path_list) == 0 else target_path_list[0]\n\ndef get_versions(mpack_name, instance_name, module_name, components_instance_type,\n subgroup_name='default', component_instance_name='default'):\n \"\"\"\n :returns a tuple representing the mpack version and the module version, module_name should not be None\n :raises ValueError if the parameters doesn't match the mpack or instances structure\n \"\"\"\n\n instances_json = list_instances(mpack_name, instance_name, subgroup_name, module_name,\n {components_instance_type: [component_instance_name]})\n dirs = set()\n walk_mpack_dict(instances_json, MPACK_VERSION_KEY_NAME, dirs)\n mpack_version = next(iter(dirs))\n dirs.clear()\n walk_mpack_dict(instances_json, MODULE_VERSION_KEY_NAME, dirs)\n module_version = next(iter(dirs))\n\n return mpack_version, module_version\n\n\ndef get_component_home_path(mpack_name, instance_name, module_name, components_instance_type,\n subgroup_name='default', component_instance_name='default'):\n \"\"\"\n :returns the single string that contains the path to the module component folder of given component instance\n :raises ValueError if the parameters doesn't match the mpack or instances structure\n \"\"\"\n\n component_path = get_component_target_path(mpack_name=mpack_name, instance_name=instance_name,\n subgroup_name=subgroup_name,\n module_name=module_name, components_instance_type=components_instance_type,\n component_instance_name=component_instance_name)\n\n return os.readlink(component_path)\n\n\ndef create_component_instance(mpack_name, mpack_version, instance_name, module_name, components_instance_type,\n subgroup_name='default', component_instance_name='default', fail_if_exists=False):\n \"\"\"\n creates the single component instance according to the parameters\n :raises ValueError if the parameters doesn't match the mpack or instances structure\n \"\"\"\n create_mpack(mpack_name, mpack_version, instance_name, subgroup_name, module_name,\n None, {components_instance_type: [component_instance_name]}, fail_if_exists)\n\n\ndef set_component_instance_version(mpack_name, mpack_version, instance_name, module_name, components_instance_type,\n subgroup_name='default', component_instance_name='default'):\n \"\"\"\n changes the version of the single component instance according to the parameters\n :raises ValueError if the parameters doesn't match the mpack or instances structure\n \"\"\"\n set_mpack_instance(mpack_name, mpack_version, instance_name, subgroup_name, module_name,\n None, {components_instance_type: [component_instance_name]})\n","sub_path":"ambari-common/src/main/python/resource_management/libraries/functions/mpack_manager_helper.py","file_name":"mpack_manager_helper.py","file_ext":"py","file_size_in_byte":6708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"432698466","text":"class Account(object):\n func_list = ['login', 'logout', 'register']\n\n def login(self):\n \"\"\"\n 登录\n :return:\n \"\"\"\n print('登录111')\n\n def logout(self):\n \"\"\"\n 注销\n :return:\n \"\"\"\n print('注销111')\n\n def register(self):\n \"\"\"\n 注册\n :return:\n \"\"\"\n print('注册111')\n\n def run(self):\n \"\"\"\n 主代码\n :return:\n \"\"\"\n print(\"\"\"\n 请输入要执行的功能:\n 1. 登录\n 2. 注销\n 3. 注册\n \"\"\")\n\n choice = int(input('请输入要执行的序号:'))\n func_name = Account.func_list[choice - 1]\n\n # func = getattr(Account, func_name) # Account.login\n # func(self)\n\n func = getattr(self, func_name) # self.login\n func()\n\n\nobj1 = Account()\nobj1.run()\n\nobj2 = Account()\nobj2.run()\n","sub_path":"p1_basic/day22_26oop/day25/07_反射示例-面向对象/练习题.py","file_name":"练习题.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"517600503","text":"#!/usr/bin/env python\n\nimport os\nimport argparse\n\nimport ROOT\nROOT.gROOT.SetBatch()\n\nfrom stat_analysis.HistogramTools import setTDRStyle\n\ndef plotUpDown(nominal, up, down, up_ratio, down_ratio, xTitle, title, syst, output):\n\n c = ROOT.TCanvas(\"c\", \"c\")\n\n hi_pad = ROOT.TPad(\"pad_hi\", \"\", 0., 0.5, 1, 1)\n hi_pad.Draw()\n hi_pad.SetTopMargin(0.05 / .5)\n hi_pad.SetLeftMargin(0.16)\n hi_pad.SetBottomMargin(0.015)\n hi_pad.SetRightMargin(0.02)\n\n lo_pad = ROOT.TPad(\"pad_lo\", \"\", 0., 0., 1, 0.5)\n lo_pad.Draw()\n lo_pad.SetTopMargin(1)\n lo_pad.SetLeftMargin(0.16)\n lo_pad.SetBottomMargin(0.13 / 0.5)\n lo_pad.SetRightMargin(0.02)\n lo_pad.SetTickx(1)\n\n hi_pad.cd()\n \n up.SetLineWidth(2)\n up.SetLineColor(ROOT.TColor.GetColor('#468966'))\n up.GetYaxis().SetLabelSize(0.02 / 0.5)\n up.GetYaxis().SetTitleSize(0.03 / 0.5)\n up.GetYaxis().SetTitleOffset(1.7 * 0.5)\n up.GetYaxis().SetTitle(\"Arbitrary units\")\n up.GetXaxis().SetLabelSize(0)\n up.Draw(\"hist\")\n\n nominal.SetLineWidth(2)\n nominal.SetLineStyle(2)\n nominal.SetLineColor(ROOT.TColor.GetColor('#FFB03B'))\n nominal.Draw(\"hist same\")\n\n up.Draw(\"hist same\")\n\n down.SetLineWidth(2)\n down.SetLineColor(ROOT.TColor.GetColor('#8E2800'))\n down.Draw(\"hist same\")\n\n hist_max = -100\n hist_min = 9999999\n for i in range(1, up.GetNbinsX() + 1):\n hist_max = max(hist_max, up.GetBinContent(i), down.GetBinContent(i), nominal.GetBinContent(i))\n hist_min = min(hist_min, up.GetBinContent(i), down.GetBinContent(i), nominal.GetBinContent(i))\n up.GetYaxis().SetRangeUser(hist_min * 0.8, hist_max * 1.4) \n \n lo_pad.cd()\n lo_pad.SetGrid()\n\n up_ratio.SetLineWidth(2)\n up_ratio.GetXaxis().SetLabelSize(0.02 / 0.5)\n up_ratio.GetXaxis().SetTitleSize(0.03 / 0.5)\n up_ratio.GetXaxis().SetLabelOffset(0.05)\n up_ratio.GetXaxis().SetTitleOffset(1.5)\n up_ratio.GetXaxis().SetTitle(xTitle)\n up_ratio.GetYaxis().SetLabelSize(0.02 / 0.5)\n up_ratio.GetYaxis().SetTitleSize(0.03 / 0.5)\n up_ratio.GetYaxis().SetTitleOffset(1.7 * 0.5)\n up_ratio.GetYaxis().SetTitle(\"Ratio nominal over up/down\")\n up_ratio.GetYaxis().SetNdivisions(502, True)\n up_ratio.GetYaxis().SetRangeUser(0.5, 1.5)\n\n up_ratio.SetLineColor(ROOT.TColor.GetColor('#468966'))\n up_ratio.SetMarkerColor(ROOT.TColor.GetColor('#468966'))\n up_ratio.SetMarkerStyle(20)\n up_ratio.SetMarkerSize(0.6)\n up_ratio.Draw(\"hist\")\n\n line = ROOT.TLine(up_ratio.GetXaxis().GetBinLowEdge(1), 1, up_ratio.GetXaxis().GetBinUpEdge(up_ratio.GetXaxis().GetLast()), 1)\n line.SetLineWidth(2)\n line.Draw(\"same\")\n\n up_ratio.Draw(\"histsame\")\n\n down_ratio.SetLineWidth(2)\n down_ratio.SetLineColor(ROOT.TColor.GetColor('#8E2800'))\n down_ratio.SetMarkerColor(ROOT.TColor.GetColor('#8E2800'))\n down_ratio.SetMarkerStyle(20)\n down_ratio.SetMarkerSize(0.6)\n down_ratio.Draw(\"histsame\")\n\n # Look for min and max of ratio and zoom accordingly\n ratio_max = -100\n ratio_min = 100\n for i in range(1, up_ratio.GetNbinsX() + 1):\n ratio_max = max(ratio_max, up_ratio.GetBinContent(i), down_ratio.GetBinContent(i))\n ratio_min = min(ratio_min, up_ratio.GetBinContent(i), down_ratio.GetBinContent(i))\n\n # Symetrize\n ratio_range = 1.3 * max(abs(ratio_max - 1), abs(1 - ratio_min))\n up_ratio.GetYaxis().SetRangeUser(max(0, 1 - ratio_range), 1 + ratio_range)\n up_ratio.GetYaxis().SetNdivisions(210)\n\n c.cd()\n l = ROOT.TLegend(0.20, 0.84, 0.50, 0.94)\n l.SetTextFont(42)\n l.SetFillColor(ROOT.kWhite)\n l.SetFillStyle(0)\n l.SetBorderSize(0)\n\n l.AddEntry(up, \"{} up\".format(syst))\n l.AddEntry(down, \"{} down\".format(syst))\n l.Draw(\"same\")\n\n syst_text = ROOT.TLatex(0.16, 0.96, title)\n syst_text.SetNDC(True)\n syst_text.SetTextFont(42)\n syst_text.SetTextSize(0.035)\n syst_text.Draw(\"same\")\n\n c.SaveAs(output)\n\ndef beautify(quantity, hist):\n bins = hist.GetXaxis().GetXbins()\n if \"csv\" in quantity.lower():\n bins.SetAt(-0.1, 0)\n elif \"qgl\" in quantity.lower():\n bins.SetAt(-0.2, 0)\n bins.SetAt(-0.1, 1)\n\ndef plotRatios(syst, path, title, quantity, flavs):\n\n _tf = ROOT.TFile.Open(path)\n\n for flav in flavs:\n ratio_th3_up = _tf.Get(\"ratio_{}_up_{}\".format(quantity, flav))\n ratio_th3_down = _tf.Get(\"ratio_{}_down_{}\".format(quantity, flav))\n\n th3_nominal = _tf.Get(\"{}_nominal_{}\".format(quantity, flav))\n th3_up = _tf.Get(\"{}_up_{}\".format(quantity, flav))\n th3_down = _tf.Get(\"{}_down_{}\".format(quantity, flav))\n\n xAxis = ratio_th3_up.GetXaxis()\n yAxis = ratio_th3_up.GetYaxis()\n\n for x in range(1, ratio_th3_up.GetNbinsX()+1):\n for y in range(1, ratio_th3_up.GetNbinsY()+1):\n def getProj(hist3):\n return hist3.ProjectionZ(hist3.GetName() + \"__bin_{}_{}\".format(x, y), x, x, y, y)\n\n th1_nominal = getProj(th3_nominal)\n th1_up = getProj(th3_up)\n th1_down = getProj(th3_down)\n\n ratio_th1_up = getProj(ratio_th3_up)\n ratio_th1_down = getProj(ratio_th3_down)\n\n pt_min = xAxis.GetBinLowEdge(x)\n pt_max = xAxis.GetBinUpEdge(x)\n eta_min = yAxis.GetBinLowEdge(y)\n eta_max = yAxis.GetBinUpEdge(y)\n\n beautify(quantity, th1_nominal)\n beautify(quantity, th1_up)\n beautify(quantity, th1_down)\n beautify(quantity, ratio_th1_up)\n beautify(quantity, ratio_th1_down)\n\n m_title = \"Flavour: {} / {} < p_{{T}} < {} / {} < |#eta| < {}\".format(flav, pt_min, pt_max, eta_min, eta_max)\n\n output = os.path.join(os.path.dirname(path), \"{}_{}_{}__bin_{}_{}.pdf\".format(syst.upper(), quantity, flav, x, y))\n\n plotUpDown(th1_nominal, th1_up, th1_down, ratio_th1_up, ratio_th1_down, title, m_title, syst.upper(), output)\n\n _tf.Close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--syst', nargs='+', help='Name of the systematics')\n parser.add_argument('--vars', nargs='+', choices=['csv', 'qgl'], help='Variables to plot')\n parser.add_argument('--folder', type=str, help='Folder with the ROOT files')\n options = parser.parse_args()\n \n setTDRStyle()\n\n for syst in options.syst:\n\n if 'csv' in options.vars:\n plotRatios(syst, os.path.join(options.folder, \"csv_{}_corrections.root\".format(syst)), \"Jet CSVv2\", \"jets_btagCSV\", ['b', 'c', 'l'])\n if 'qgl' in options.vars:\n plotRatios(syst, os.path.join(options.folder, \"qgl_{}_corrections.root\".format(syst)), \"Jet QGL\", \"jets_qgl\", ['q', 'i', 'l', 'c', 'b', 'g'])\n","sub_path":"python/plotFSReffect.py","file_name":"plotFSReffect.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"54765916","text":"import pandas as pd\r\nimport numpy as np\r\nfrom libsbml import SBMLReader, SBMLWriter, SBMLDocument, XMLNode\r\nfrom collections import OrderedDict, Counter\r\n\r\n# plotly.tools.set_credentials_file(username='anapatricialima3', api_key='43Syk2CjHMYfbaHzQpuK')\r\n\r\ndef remove_e_from_key(pathwaysDict):\r\n for k in pathwaysDict:\r\n if k.endswith(\"_e_\"):\r\n pathwaysDict[k[:-1]] = pathwaysDict[k]\r\n del pathwaysDict[k]\r\n return pathwaysDict\r\n\r\ndef metabolic_pathways(sbmlModel, filename='pathways.csv'):\r\n \"\"\"\r\n @arguments\r\n\r\n sbmlModel: SBML file of the constraint-based model\r\n filename: str, name of the pathways file\r\n\r\n @return:\r\n - txt file\r\n - ordered dict of str to str (reaction -> metabolic pathway)\r\n - dataframe\r\n \"\"\"\r\n\r\n reader = SBMLReader()\r\n document = reader.readSBML(sbmlModel)\r\n model = document.getModel()\r\n metabolicPathways = OrderedDict((r.id, None) for r in model.reactions)\r\n\r\n for r in model.reactions:\r\n system = r.getNotesString().partition('SUBSYSTEM')[2].partition('<')[0].strip().replace(': ', '')\r\n if (system == 'S_'):\r\n metabolicPathways[r.id] = 'S_Metabolites_exchange'\r\n elif system == ':':\r\n metabolicPathways[r.id] = 'Biomass'\r\n else:\r\n metabolicPathways[r.id] = system\r\n\r\n finalMetabolicPathways = remove_e_from_key(metabolicPathways)\r\n dataFrame = pd.DataFrame.from_dict(finalMetabolicPathways, orient=\"index\")\r\n dataFrame.columns = ['Pathways']\r\n dataFrame.to_csv(filename, sep=\";\")\r\n pathCount = Counter(finalMetabolicPathways.values())\r\n pathwaysNumbers = OrderedDict((k, pathCount[k]) for k in pathCount)\r\n\r\n return finalMetabolicPathways, pathwaysNumbers, dataFrame\r\n\r\ndef sum_pathway_fluxes(pathways, fileName, finalFile):\r\n \"\"\"\r\n @arguments\r\n\r\n @return:\r\n \"\"\"\r\n fbaDataFrame = pd.DataFrame.from_csv(fileName, sep=\";\")\r\n fbaDict = OrderedDict((k, [fbaDataFrame.loc[k][i] for i in xrange(len(fbaDataFrame.columns))]) for k in fbaDataFrame.index)\r\n paths = OrderedDict((k, [0]*len(fbaDict.values()[0])) for k in pathways.values())\r\n\r\n for rnx in fbaDict:\r\n path = pathways[rnx]\r\n for j in xrange(len(fbaDict.values()[0])):\r\n paths[path][j] += abs(fbaDict[rnx][j])\r\n\r\n pd.DataFrame.from_dict(paths, orient=\"index\").to_csv(finalFile, sep=\";\")\r\n return paths\r\n\r\ndef fba_changes(regularVals, hybridVals, fileName):\r\n \"\"\"\r\n @arguments\r\n\r\n @return:\r\n \"\"\"\r\n\r\n zeroFluxReactions = 0\r\n modificationDict = OrderedDict((k, []) for k in regularVals)\r\n for k in modificationDict:\r\n for j in xrange(len(regularVals.values()[0])):\r\n hybVal = hybridVals[k][j]\r\n regVal = regularVals[k][j]\r\n if (hybVal == 0.0) and (regVal == 0.0):\r\n modificationDict.pop(k, None)\r\n zeroFluxReactions += 1\r\n else:\r\n diff = abs(hybVal - regVal)\r\n modificationDict[k].append(diff)\r\n #print zeroFluxReactions\r\n pd.DataFrame.from_dict(modificationDict, orient=\"index\").to_csv(fileName, sep=\";\")\r\n return modificationDict\r\n\r\ndef checkout_reactions(significanceData, metabolicPaths, filename):\r\n \"\"\"\r\n @arguments\r\n\r\n @return:\r\n \"\"\"\r\n paths = OrderedDict((k, [0]*len(significanceData.values()[0])) for k in metabolicPaths.values())\r\n for rnx in significanceData:\r\n path = metabolicPaths[rnx]\r\n for i in xrange(len(significanceData[rnx])):\r\n if significanceData[rnx][i] > 0.0:\r\n paths[path][i] += 1\r\n\r\n pd.DataFrame.from_dict(paths, orient=\"index\").to_csv(filename, sep=\";\")\r\n return paths\r\n\r\ndef check_active_reactions(fbaFile, metabolicPaths, fileName):\r\n \"\"\"\r\n @arguments\r\n\r\n @return:\r\n \"\"\"\r\n fbaDataFrame = pd.DataFrame.from_csv(fbaFile, sep=\";\")\r\n fbaDict = OrderedDict((k, [fbaDataFrame.loc[k][i] for i in xrange(len(fbaDataFrame.columns))]) for k in fbaDataFrame.index)\r\n paths = OrderedDict((k, [0]*len(fbaDict.values()[0])) for k in metabolicPaths.values())\r\n for rnx in fbaDict:\r\n path = metabolicPaths[rnx]\r\n for i in xrange(len(fbaDict[rnx])):\r\n if fbaDict[rnx][i] > float(0.0):\r\n paths[path][i] += 1\r\n\r\n pd.DataFrame.from_dict(paths, orient=\"index\").to_csv(fileName, sep=\";\")\r\n return paths\r\n\r\ndef calculate_fva_diff(dataFrame, fileName):\r\n \"\"\"\r\n @arguments\r\n\r\n @return:\r\n \"\"\"\r\n moddict01 = OrderedDict((dataFrame[0].index[i], []) for i in xrange(len(dataFrame[0].index)))\r\n moddict02 = OrderedDict((dataFrame[1].index[i], []) for i in xrange(len(dataFrame[1].index)))\r\n zeroFluxReactions = 0\r\n for i in xrange(len(dataFrame[1].columns)):\r\n for k in dataFrame[1].index:\r\n vals_01 = eval(dataFrame[0].loc[k][i])\r\n vals_02 = eval(dataFrame[1].loc[k][i])\r\n maxval01, minval01 = vals_01[1], vals_01[0]\r\n maxval02, minval02 = vals_02[1], vals_02[0]\r\n if ((maxval01 == 0.0) and (maxval02 == 0.0)) and ((minval01 == -0.0) and (minval02 == -0.0)):\r\n moddict01.pop(k, None)\r\n moddict02.pop(k, None)\r\n zeroFluxReactions +=1\r\n else:\r\n diff01 = abs(maxval01-minval01)\r\n diff02 = abs(maxval02-minval02)\r\n moddict01[k].append(diff01)\r\n moddict02[k].append(diff02)\r\n\r\n #print zeroFluxReactions\r\n newDataFrame = pd.DataFrame.from_dict(moddict01, orient=\"index\").to_csv(fileName[0], sep=\";\")\r\n newDataFrame02 = pd.DataFrame.from_dict(moddict02, orient=\"index\").to_csv(fileName[1], sep=\";\")\r\n return moddict01, moddict02, fileName[0], fileName[1]\r\n\r\ndef significance_analysis(regularFile, integratedFile, fileName):\r\n \"\"\"\r\n @arguments\r\n\r\n regularFile: str, name of the csv file with the data without kinetic restrictions\r\n integratedFile: str, name of the csv file with the data with kinetic restrictions\r\n\r\n @return: Ordered Dict of str to float\r\n \"\"\"\r\n\r\n regularData = pd.DataFrame.from_csv(regularFile, sep=';')\r\n hybridData = pd.DataFrame.from_csv(integratedFile, sep=';')\r\n modifications = OrderedDict((k, []) for k in regularData.index)\r\n\r\n for i in xrange(len(regularData.columns)):\r\n for k in regularData.index:\r\n regularVal = regularData.loc[k][i]\r\n hybridVal = hybridData.loc[k][i]\r\n diff = abs(hybridVal-regularVal)\r\n modifications[k].append(diff)\r\n\r\n pd.DataFrame.from_dict(modifications, orient=\"index\").to_csv(fileName, sep=\";\")\r\n return modifications\r\n\r\n\r\ndef pathway_analysis(pathways, significanceData):\r\n \"\"\"\r\n @arguments\r\n\r\n pathways: ordered dict of str to str\r\n significanceData: ordered dict of str to float\r\n\r\n @return:\r\n - Ordered dict of str to float,\r\n - Ordered dict of str to float (being the float a percentage),\r\n - List of tuple of str and value (reactions with higher changes).\r\n \"\"\"\r\n modifiedPathways = OrderedDict((k, 0) for k in pathways.values())\r\n mostDifferentReactions = []\r\n minChanges = min(significanceData.values())\r\n total = 0\r\n for i in xrange(len(significanceData)):\r\n key = significanceData.keys()[i]\r\n val = significanceData[significanceData.keys()[i]]\r\n if val > minChanges:\r\n mostDifferentReactions.append((key, val))\r\n total += 1\r\n if key in pathways.keys()[i]:\r\n modifiedPathways[pathways[pathways.keys()[i]]] += 1\r\n\r\n modPercPath = modifiedPathways.copy() #percentage of changes in each metabolic pathway regarding the whole organism\r\n for k in modifiedPathways:\r\n newVal = (modPercPath[k] / float(total)) * 100\r\n modPercPath[k] = newVal\r\n\r\n return modifiedPathways, modPercPath, mostDifferentReactions\r\n\r\n\r\ndef select_pathways(percentageData):\r\n \"\"\"\r\n @arguments\r\n\r\n percentageData: ordered dict of str to float\r\n\r\n @return: Ordered dict of str to float (only the metabolic pathways with higher changes).\r\n \"\"\"\r\n newData = OrderedDict((k, None) for k in percentageData.keys())\r\n limit = np.median(percentageData.values())\r\n for k in percentageData:\r\n if percentageData[k] > limit:\r\n newData[k] = percentageData[k]\r\n\r\n return newData\r\n\r\n\r\ndef sort_by_difference(topDifferentReactions):\r\n \"\"\"\r\n @arguments\r\n\r\n topDifferentReactions: list of tuples (str, value)\r\n\r\n @return: New list of tuples (str, value) sorted\r\n \"\"\"\r\n vals = sorted([(topDifferentReactions[i][1], topDifferentReactions[i][0]) for i in xrange(len(topDifferentReactions))], reverse=True)\r\n return vals\r\n\r\ndef get_perc_diff(REG_DICT, HYB_DICT):\r\n res = OrderedDict((k, None) for k in REG_DICT)\r\n for k in REG_DICT:\r\n val = round(REG_DICT[k][0], 4)\r\n val2 = round(HYB_DICT[k][0], 4)\r\n diff = abs(val-val2)\r\n if diff == 0.0:\r\n perc = 0.0\r\n elif diff != 0.0:\r\n if val == 0.0:\r\n perc = abs(diff)*100\r\n else:\r\n perc = round((diff*100)/float(val), 2)\r\n res[k] = perc\r\n mean_diff = round(np.mean(res.values()), 2)\r\n return res, mean_diff","sub_path":"pathwayAnalysis/metabolicPathways.py","file_name":"metabolicPathways.py","file_ext":"py","file_size_in_byte":9242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651419093","text":"import turtle\n\ndef origine(tortue,x,y):\n tortue.penup()\n tortue.goto(x,y)\n tortue.pendown()\n\ndef ecrit(tortue,x,y,texte,fonte,alig=\"center\"):\n origine(tortue,x,y)\n tortue.write(texte,font=fonte,align=alig)\n\ndef ligne_bis(tortue,x1,y1,x2,y2):\n origine(tortue,x1,y1)\n tortue.goto(x2,y2)\n\ndef ligne(tortue,x,y,l,angle):\n '''Trace le segment de droite d'origne (x,y) et de longueur l dans la direction angle'''\n origine(tortue,x,y)\n tortue.setheading(angle)\n tortue.forward(l)\n\ndef rectangle(tortue,x,y,lx,ly):\n origine(tortue,x,y)\n tortue.begin_fill()\n for _ in range(2):\n tortue.forward(lx)\n tortue.left(90)\n tortue.forward(ly)\n tortue.left(90)\n tortue.end_fill()\n\ndef cercle(tortue,x,y,r,angle=360):\n origine(tortue,x+r,y)\n tortue.setheading(90)\n tortue.pendown()\n tortue.begin_fill()\n tortue.circle(r)\n tortue.end_fill()\n\n\ndef set_crayon(tortue,epaisseur=1,couleur=\"black\",remplissage=\"white\",visible=False):\n tortue.pensize(epaisseur)\n tortue.color(couleur)\n tortue.fillcolor(remplissage)\n if visible:\n tortue.showturtle()\n else:\n tortue.hideturtle()\n\nclass Pattern:\n\n tortue_pattern = turtle.Turtle()\n tortue_pattern.hideturtle()\n tortue_pattern.speed(0)\n tortue_pattern.getscreen().tracer(400)\n\n def __init__(self,size,formes,longueur):\n self.size = size\n self.formes = formes\n self.longueur = longueur\n self.nb = len(formes)\n\n def dessine(self,ox,oy,l,a):\n origine(Pattern.tortue_pattern,ox,oy)\n Pattern.tortue_pattern.setheading(a)\n k = 0\n while l > k*self.size:\n for ind in range(self.nb):\n # '-' = Trait continu \n if self.formes[ind]=='-':\n Pattern.tortue_pattern.forward(self.size*self.longueur[ind]/100)\n # '.' = Dot\n if self.formes[ind]=='.':\n Pattern.tortue_pattern.right(90)\n Pattern.tortue_pattern.circle(self.size//2*self.longueur[ind]/100)\n Pattern.tortue_pattern.left(90)\n Pattern.tortue_pattern.penup()\n Pattern.tortue_pattern.forward(self.size*self.longueur[ind]/100)\n Pattern.tortue_pattern.pendown()\n # ' ' = Espaces\n if self.formes[ind]==\" \":\n Pattern.tortue_pattern.penup()\n Pattern.tortue_pattern.forward(self.size*self.longueur[ind]/100)\n Pattern.tortue_pattern.pendown()\n k=k+1\n\nclass Grille:\n\n def __init__(self,xgrad,xpattern,ygrad,ypattern):\n self.xgrad = xgrad\n self.ygrad = ygrad\n self.xpattern = xpattern\n self.ypattern = ypattern\n \n def trace(self):\n # Récupération taille du papier\n hauteur = turtle.window_height()\n largeur = turtle.window_width()\n for x in range(0,largeur//2,self.xgrad):\n self.ypattern.dessine(x,-hauteur//2,hauteur,90)\n self.ypattern.dessine(-x,-hauteur//2,hauteur,90)\n for y in range(0,hauteur//2,self.ygrad):\n self.ypattern.dessine(-largeur//2,y,largeur,0)\n self.ypattern.dessine(-largeur//2,-y,largeur,0)\n\n\nclass Graduation:\n\n tortue_graduation = turtle.Turtle()\n tortue_graduation.hideturtle()\n tortue_graduation.speed(0)\n tortue_graduation.getscreen().tracer(400)\n\n def __init__(self,pas,tick,sub=False,show_label=True,label=(\"Courier\",10,\"normal\"),offset = 15):\n self.pas = pas\n self.tick = tick\n self.sub = sub\n self.show_label = show_label\n self.label = label\n self.offset = offset\n \n def affiche(self):\n # Récupération taille du papier\n hauteur = turtle.window_height()\n largeur = turtle.window_width()\n for x in range(self.pas,largeur//2,self.pas):\n if not self.sub or (x//self.pas)%self.sub != 0:\n ligne(Graduation.tortue_graduation, x, -self.tick//2,self.tick,90)\n ligne(Graduation.tortue_graduation, -x, -self.tick//2,self.tick,90)\n if self.show_label:\n ecrit(Graduation.tortue_graduation,x,-self.tick//2-self.offset,x,self.label)\n ecrit(Graduation.tortue_graduation,-x,-self.tick//2-self.offset,-x,self.label)\n for y in range(self.pas,hauteur//2,self.pas):\n if not self.sub or (y//self.pas)%self.sub != 0:\n ligne(Graduation.tortue_graduation, -self.tick//2, y,self.tick,0)\n ligne(Graduation.tortue_graduation, -self.tick//2, -y,self.tick,0)\n if self.show_label:\n ecrit(Graduation.tortue_graduation,-self.tick//2-self.offset,y-self.label[1],y,self.label)\n ecrit(Graduation.tortue_graduation,-self.tick//2-self.offset,-y-self.label[1],-y,self.label)\n \n\n\nclass Axe:\n \n tortue_axe = turtle.Turtle()\n tortue_axe.hideturtle()\n tortue_axe.speed(0)\n tortue_axe.getscreen().tracer(400)\n\n def __init__(self):\n self.hauteur=None\n self.largeur=None\n \n def trace(self):\n # Récupération taille du papier\n self.hauteur = turtle.window_height()\n self.largeur = turtle.window_width()\n xstampsize = Axe.tortue_axe.shapesize()[0] * 5\n ystampsize = 0\n ligne(Axe.tortue_axe,-self.largeur//2,0,self.largeur-xstampsize,0)\n Axe.tortue_axe.stamp()\n ligne(Axe.tortue_axe,0,-self.hauteur//2,self.hauteur-ystampsize,90)\n Axe.tortue_axe.stamp()\n\n def taille_fleche(self,sizex,sizey):\n Axe.tortue_axe.shapesize(sizex,sizey)\n\n\n \n \n\n\n \n \n \n\n\n","sub_path":"site/files/C0/grille.py","file_name":"grille.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411300228","text":"from __future__ import division\nfrom __future__ import print_function\nimport argparse\nfrom graph_model import Graph_Model\n\n\nimport time\nimport tensorflow as tf\n\nfrom utils import *\nfrom gcn.models import GCN, MLP\n\n\n### Parameters to build ####\n\n# Vector model location\nvector_location = '/stage/vectors/wiki.el.bin'\n# each word embedding size after dimensional reduction\nembedding_size = 7\n# Window size of words\nword_window = 3\n# Train, Test and Dev location of corpus\ntrain_loc = '/stage/ud/UD_Greek-GDT/el_gdt-ud-train.conllu'\ntest_loc = '/stage/ud/UD_Greek-GDT/el_gdt-ud-test.conllu'\ndev_loc = '/stage/ud/UD_Greek-GDT/el_gdt-ud-dev.conllu'\n# Type of dimensionality reduction 1)Linear 2)LFDA\ndimensional_reduction = 1\n# Dimensional Reduction Flag\nmetric_type = 1\n# Load Graph from file\nload_graph = 1\n# Neareset neigbours count for graph\nneighbors = 10\n# location of unlabeled data\nunlabeled_loc = None\n# 1) FastText 2) Vec File 3) ELMo\nembedding_type = 1\n# 1) Concatenate 2) Mean\nngram_type = 1\n# BIO\nBIO = False\n\n\nmodel = Graph_Model(word_window,\n load_graph, neighbors, embedding_type, ngram_type, BIO)\n# Add data to the model\nmodel.add_data(train_loc, test_loc, dev_loc, vector_location,\n unlabeled_loc)\nx, y, tx, ty, allx, ally, test_size, index = model.reduce_dimension(\n embedding_size, dimensional_reduction, metric_type)\n\nmodel.build_graph()\ngraph = model.train()\n\n\n'''\n\n\n# Set random seed\nseed = 123\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n# 'cora', 'citeseer', 'pubmed'\nflags.DEFINE_string('dataset', 'cora', 'Dataset string.')\n# 'gcn', 'gcn_cheby', 'dense'\nflags.DEFINE_string('model', 'gcn_cheby', 'Model string.')\nflags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\nflags.DEFINE_integer('epochs', 400, 'Number of epochs to train.')\nflags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')\nflags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')\nflags.DEFINE_float('weight_decay', 5e-4,\n 'Weight for L2 loss on embedding matrix.')\nflags.DEFINE_integer('early_stopping', 40,\n 'Tolerance for early stopping (# of epochs).')\nflags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')\n\n# Load data\nadj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(\n x, y, tx, ty, allx, ally, graph, index, test_size)\n# Some preprocessing\nfeatures = preprocess_features(features)\nif FLAGS.model == 'gcn':\n support = [preprocess_adj(adj)]\n num_supports = 1\n model_func = GCN\nelif FLAGS.model == 'gcn_cheby':\n support = chebyshev_polynomials(adj, FLAGS.max_degree)\n num_supports = 1 + FLAGS.max_degree\n model_func = GCN\nelif FLAGS.model == 'dense':\n support = [preprocess_adj(adj)] # Not used\n num_supports = 1\n model_func = MLP\nelse:\n raise ValueError('Invalid argument for model: ' + str(FLAGS.model))\n\n# Define placeholders\nplaceholders = {\n 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],\n 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),\n 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),\n 'labels_mask': tf.placeholder(tf.int32),\n 'dropout': tf.placeholder_with_default(0., shape=()),\n # helper variable for sparse dropout\n 'num_features_nonzero': tf.placeholder(tf.int32)\n}\n\n# Create model\nmodel = model_func(placeholders, input_dim=features[2][1], logging=True)\n\n# Initialize session\nsess = tf.Session()\n\n\n# Define model evaluation function\ndef evaluate(features, support, labels, mask, placeholders):\n t_test = time.time()\n feed_dict_val = construct_feed_dict(\n features, support, labels, mask, placeholders)\n outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)\n return outs_val[0], outs_val[1], (time.time() - t_test)\n\n\n# Init variables\nsess.run(tf.global_variables_initializer())\n\ncost_val = []\n\n# Train model\nfor epoch in range(FLAGS.epochs):\n\n t = time.time()\n # Construct feed dictionary\n feed_dict = construct_feed_dict(\n features, support, y_train, train_mask, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n\n # Validation\n cost, acc, duration = evaluate(\n features, support, y_val, val_mask, placeholders)\n cost_val.append(cost)\n\n # Print results\n print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]),\n \"train_acc=\", \"{:.5f}\".format(\n outs[2]), \"val_loss=\", \"{:.5f}\".format(cost),\n \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t))\n\n if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping + 1):-1]):\n print(\"Early stopping...\")\n break\n\nprint(\"Optimization Finished!\")\n\n# Testing\ntest_cost, test_acc, test_duration = evaluate(\n features, support, y_test, test_mask, placeholders)\nprint(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost),\n \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration))\n'''\n","sub_path":"GraphSSL_Sequential-4245e2a2412e/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":5296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"340640750","text":"import sys\nimport MapReduce\n\nmr = MapReduce.MapReduce()\nmatrix_size_limit = 10\n\ndef mapper(record):\n matrix = record[0]\n n_row = record[1]\n n_col = record[2]\n v = record[3]\n for i in range(matrix_size_limit):\n if matrix == 'a':\n mr.emit_intermediate((n_row, i), (n_col, v, 'a'))\n if matrix == 'b':\n mr.emit_intermediate((i, n_col), (n_row, v, 'b'))\n\ndef reducer(key, list_of_values):\n # key: index of the result matrix\n # value: elements from the two matrices to be multiplied\n matrices = set([v[2] for v in list_of_values])\n if len(matrices) != 2:\n return None\n result = 0\n for i in range(matrix_size_limit):\n sublist = [v[1] for v in list_of_values if v[0] == i]\n if len(sublist) == 2:\n result += sublist[0] * sublist[1]\n mr.emit((key[0], key[1], result))\n\ndef main():\n input_data = open(sys.argv[1])\n mr.execute(input_data, mapper, reducer)\n\nif __name__ == '__main__':\n main()\n","sub_path":"assignment3/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"37297918","text":"# -*- coding: utf-8 -*-\n\n#DataFrame Birleştirme işlemi: pd.concat\n\n#Import lib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#Code \nveriler = pd.read_csv('veriler.csv')\nprint(veriler)\n\nx= veriler.iloc[:,1:4]\ny= veriler.iloc[:,4:]\n\nX= x.values\nY= y.values\n\n#Egitim-Test verisi olarak bolmek\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=0)\n\n#standartlaştırma\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\n\nX_train = sc.fit_transform(x_train)\nX_test = sc.transform(x_test)\n\n#log regresyon\nfrom sklearn.linear_model import LogisticRegression\nlogr= LogisticRegression(random_state=0)\nlogr.fit(X_train, y_train)\ny_pred= logr.predict(X_test)\n\nprint(\"y_pred: \",y_pred)\nprint(\"y_test: \",y_test)\n\n# confusion matrix oluşturma\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\n\n#acurary score: doğruluk oranı (her iki sınıf içinde doğru tahmin oranıdır.)\nfrom sklearn.metrics import accuracy_score\nac_score=accuracy_score(y_test, y_pred)\nprint(ac_score)\n\n#knn-algoritması\nfrom sklearn.neighbors import KNeighborsClassifier\nknn =KNeighborsClassifier(n_neighbors=1, metric='minkowski')\nknn.fit(X_train, y_train)\n\npred = knn.predict(X_test)\ncm = confusion_matrix(y_test, pred)\nprint(cm)\n\n# svm- algoritması\nfrom sklearn.svm import SVC\n\nsvm = SVC(kernel='linear')\nsvm.fit(X_train, y_train)\n\npred = svm.predict(X_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('SVM (linear)')\nprint(cm)\n\n# svm rbf parametresiyle: doğrusal olmayan sınıflandırma işlemlerini çekirdek hilesi \nsvm = SVC(kernel='rbf')\nsvm.fit(X_train, y_train)\n\npred = svm.predict(X_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('SVM (rbf)')\nprint(cm)\n\n# GNB naive bayes\nfrom sklearn.naive_bayes import GaussianNB\ngnb = GaussianNB()\ngnb.fit(X_train, y_train)\n\npred = gnb.predict(X_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('GNB')\nprint(cm)\n\n# multinominal naive bayes \n'''standartlaştırılmış örnekler üzerinden \nValueError: Negative values in data passed to MultinomialNB (input X)\nhatasını verdi. verilerin standartlaştırılmadan önceki test ve train hallerini\nkullandım '''\n\nfrom sklearn.naive_bayes import MultinomialNB\nmnb = MultinomialNB(alpha=0.001, fit_prior=True, class_prior=None)\nmnb.fit(x_train, y_train)\npred= mnb.predict(x_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('MNB')\nprint(cm)\n\n# complementNB naive bayes \nfrom sklearn.naive_bayes import ComplementNB\ncnb = ComplementNB()\ncnb.fit(x_train, y_train)\npred = cnb.predict(x_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('CNB')\nprint(cm)\n\n# decision tree clasification\nfrom sklearn.tree import DecisionTreeClassifier\n#entropy hesaplamayı information gain'a göre yapar\ndtc = DecisionTreeClassifier(criterion = 'entropy')\ndtc.fit(X_train, y_train)\npred = dtc.predict(X_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('DTC')\nprint(cm)\n\n# default : gini \n\ndtc = DecisionTreeClassifier(criterion = 'gini')\ndtc.fit(X_train, y_train)\npred = dtc.predict(X_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('DTC_gini')\nprint(cm)\n\n# random forest classifier\nfrom sklearn.ensemble import RandomForestClassifier\nrfc = RandomForestClassifier(n_estimators = 10, criterion = 'entropy')\nrfc.fit(X_train, y_train)\nrfc.fit(X_train, y_train)\npred = rfc.predict(X_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('RFC_10_entropy')\nprint(cm)\n\n# criterion default:gini\nrfc = RandomForestClassifier(n_estimators = 10, criterion = 'gini')\nrfc.fit(X_train, y_train)\nrfc.fit(X_train, y_train)\npred = rfc.predict(X_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('RFC_10_gini')\nprint(cm)\n\n# n_estimators default:100\nrfc = RandomForestClassifier(n_estimators = 100, criterion = 'entropy')\nrfc.fit(X_train, y_train)\nrfc.fit(X_train, y_train)\npred = rfc.predict(X_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('RFC_100_entropy')\nprint(cm)\n\n#criterion default: gini\nrfc = RandomForestClassifier(n_estimators = 100, criterion = 'gini')\nrfc.fit(X_train, y_train)\nrfc.fit(X_train, y_train)\npred = rfc.predict(X_test)\n\ncm = confusion_matrix(y_test, pred)\nprint('RFC_100_gini')\nprint(cm)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"bolum5_sınıflama/Rf_Classifier.py","file_name":"Rf_Classifier.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104578860","text":"#!/usr/bin/python3\n# _*_ coding: utf-8 _*_\nfrom django.template import Library\n\nregister = Library()\n\n\n@register.filter('high_light')\ndef high_light(value, q):\n value = value.replace(q, f'{q}')\n return value\n","sub_path":"demo/chapter03_database/query_database/query_demo/templatetags/custom_filter.py","file_name":"custom_filter.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394965568","text":"# with open('qwe.txt', 'r') as f:\n# for line in f.readlines():\n# print(line.strip())\n# # 要读取二进制文件,比如图片,视频,用'rb'打开\n# f = open('test.jpg', 'rb')\n# f.read()\n#\n# # 要读取非UTF-8编码的文本文件,需要给open()函数传入encoding参数,例如,读取GBK编码的文件\n# f = open('gbk.txt', 'r', encoding='gbk')\n# f.read()\n#\n# # 若文件中有UnicodeDecodeError,可直接忽略掉:error='ignore'\n# # 类似的,写文件:'w', 'wb'\n# f = open('qwe.txt', 'w')\n# f.write('Hello,World')\n# f.close()\n#\n# import os\n# # 查看操作系统类型,windows为nt\n# print(os.name)\n# # 查看环境变量\n# print(os.environ.get('PATH'))\n# # 查看当前目录下绝对路径\n# print(os.path.abspath('.'))\n# # 在某个目录下创建一个新目录,首先把新目录的完整路径表示出来\n# os.path.join('C:\\\\Users\\\\shopfloornb4.F2-QCMC\\\\PycharmProjects\\\\11.1', 'testdir')\n# # 然后创建一个目录,实际我试的时候,直接创建也行\n# os.mkdir('C:\\\\Users\\\\shopfloornb4.F2-QCMC\\\\PycharmProjects\\\\11.1\\\\testdir')\n# # 删掉一个目录\n# os.rmdir('C:\\\\Users\\\\shopfloornb4.F2-QCMC\\\\PycharmProjects\\\\11.1\\\\testdir')\n# # 拆分一个路径:把路径拆分为两个部分,最后一个部分总是最后级别的目录或文件名\n# os.path.split('C:\\\\Users\\\\shopfloornb4.F2-QCMC\\\\PycharmProjects\\\\11.1\\\\testdir')\n# # 将文件扩展名拆分出来\n# os.path.splitext('/path/to/file.txt')\n# # 对文件重命名:\n# os.rename('test.txt', 'test.py')\n# # 删除文件:\n# os.remove('test.py')\n# # shutil模块提供了很多函数,可以当做是os模块的补充,比如copyfile()函数\n# # 列出当前目录下的所有目录:\n# s = [x for x in os.listdir('.') if os.path.isdir(x)]\n# print(s)\n# # 列出所有的py文件:\n# d = [x for x in os.listdir('.') if os.path.isfile(x) and os.path.splitext(x)[1]=='.py']\n# print(d)\n\n# 当前目录以及当前目录下查找文件名包含指定字符串的文件\nfrom datetime import datetime\nimport os\n\npwd = os.path.abspath('.')\n\nprint(' Size Last Modified Name')\nprint('-----------------------------------------------')\n\nfor f in os.listdir(pwd):\n fsize = os.path.getsize(f)\n mtime = datetime.fromtimestamp(os.path.getmtime(f)).strftime('%Y-%m-%d %H:%M')\n flag = '/' if os.path.isdir(f) else ''\n print('%10d %s %s%s' % (fsize, mtime, f, flag))\n\n","sub_path":"googlesheet/1114.py","file_name":"1114.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447059041","text":"from django.contrib import admin\nfrom django.urls import path\nfrom proyectoSO import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('home/', views.home, name=\"home1\"),\n path('crear/', views.crear, name=\"crear\"),\n path('borrar/', views.borrar, name=\"borrar\"),\n path('copiar/', views.copiar, name=\"copiar\"),\n path('mover/', views.mover, name=\"mover\"),\n path('verPermisos/', views.verPermisos, name=\"verPermisos\"),\n path('cambiarPermisos/', views.cambiarPermisos, name=\"cambiarPermisos\"),\n path('renombrar/', views.renombrar, name=\"renombrar\"),\n path('cambiarPropietario/', views.cambiarPropietario, name=\"cambiarPropietario\"),\n path('abrir/',views.abrir,name=\"abrir\"),\n path('admin/', admin.site.urls),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"proyectoSO/proyectoSO/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436388069","text":"class Hand(object):\n def __init__(self, card1, card2):\n self.card1 = card1\n self.card2 = card2\n self.cards = [card1, card2]\n self.bust = False\n\n def check_for_bust(self):\n total = self.get_total()\n if total > 21 and self.have_ace():\n self.reduce_ace()\n self.check_for_bust()\n elif total > 21:\n self.bust = True\n return True\n else:\n return False\n\n def get_total(self):\n total = 0\n for card in self.cards:\n total += card.value\n\n return total\n\n def have_ace(self):\n for card in self.cards:\n if card.display == 'Ace' and card.value == 11:\n return True\n else:\n return False\n\n def reduce_ace(self):\n for card in self.cards:\n if card.display == 'Ace' and card.value == 11:\n card.value = 1\n return \"\"\n\n def add_card(self, card):\n self.cards.append(card)","sub_path":"python_projects/MyProject/blackjack/Hand.py","file_name":"Hand.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"106500767","text":"import queue\n\ndef gendistinct(n):\n\tleafnode = '.'\n\tdp = []\n\tnewset = set()\n\tnewset.add(leafnode)\n\tdp.append(newset)\n\tfor i in range(1,n):\n\t\tnewset = set()\n\t\tfor j in range(i):\n\t\t\tfor leftchild in dp[j]:\n\t\t\t\tfor rightchild in dp[i-j-1]:\n\t\t\t\t\tnewset.add('fun(' + leftchild + ', ' + rightchild + ')')\n\t\tdp.append(newset)\n\treturn dp[-1]\n\ndef fun(x, y):\n\treturn 1 - max(x, y)\n\ndef evaluate(f):\n\tvar = 0\n\tfor vx in range(0, 2):\n\t\tfor vy in range(0, 2):\n\t\t\tnew_f = f\n\t\t\tnew_f = new_f.replace(\"x\", (str)(vx))\n\t\t\tnew_f = new_f.replace(\"y\", (str)(vy))\n\t\t\tvar += eval(new_f) * (2 ** (2 * vx + vy))\n\n\treturn var\n\nq = queue.Queue()\nfunctors = []\nweights = [999] * 16\noutput = []\noutputN = 0\ncounter = 0\n\nfor i in range(1, 8): # wartość osiem dobrana eksperymentalnie\n\ttrees = gendistinct(i)\n\tfor tree in trees:\n\t\tq.put((str)(tree))\n\nwhile not q.empty():\n\titem = q.get()\n\tif (item.count(\".\") == 0):\n\t\tfunctors.append(item)\n\telse:\n\t\tq.put(item.replace(\".\", \"x\", 1))\n\t\tq.put(item.replace(\".\", \"y\", 1))\n\nfor functor in functors:\n\tw = evaluate(functor)\n\tl = len(functor)\n\tif l < weights[w]:\n\t\tweights[w] = l\n\t\ttemp = functor.replace(\"fun\", \"\").replace(\", \", \"|\")\n\t\tvar = temp.count(\"|\")\n\t\toutputN += var\n\t\tprint((str)(var) + \" \" + temp)\n\t\tcounter += 1\n\tif counter == 16:\n\t\tbreak\n\nprint (\"\\nFound a solution with \" + (str)(outputN) + \" strokes.\")","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98983094","text":"\nimport re\nimport os\n\nfrom ..buildtool import BuildTool\nfrom ..runenvtool import RunEnvTool\nfrom ..runtimetool import RuntimeTool\nfrom .curltoolmixin import CurlToolMixIn\n\n\nclass dockerTool(BuildTool, CurlToolMixIn, RunEnvTool, RuntimeTool):\n \"\"\"Docker - Build, Ship, and Run Any App, Anywhere.\n\nHome: https://www.docker.com/\n\nExperimental support.\n\nDocker CE support is added for CentOS, Fedora, Debian and Ubuntu.\nFor other systems, \"docker\" or \"docker-engine\" packages is tried to be installed.\n\nDocker EE or other installation methods are out of scope for now.\n\"\"\"\n\n def autoDetectFiles(self):\n return 'Dockerfile'\n\n def getOrder(self):\n return 10\n\n def envNames(self):\n return ['dockerBin', 'dockerVer', 'dockerRepos', 'dockerTag']\n\n def _installTool(self, env):\n repo = env.get('dockerRepos', 'https://download.docker.com')\n\n if self._isCentOS():\n self._addYumRepo('docker', repo + '/linux/centos/docker-ce.repo')\n\n elif self._isFedora():\n self._addYumRepo('docker', repo + '/linux/fedora/docker-ce.repo')\n\n elif self._isDebian():\n gpg = self._callCurl(env, [repo + '/linux/debian/gpg'])\n self._addAptRepo(\n 'docker',\n 'deb [arch=amd64] {0}/linux/debian $codename$ stable'.format(\n repo),\n gpg\n )\n\n elif self._isUbuntu():\n gpg = self._callCurl(env, [repo + '/linux/ubuntu/gpg'])\n self._addAptRepo(\n 'docker',\n 'deb [arch=amd64] {0}/linux/ubuntu $codename$ stable'.format(\n repo),\n gpg,\n codename_map={\n 'zesty': 'yakkety',\n },\n repo_base=repo,\n )\n\n elif self._isOracleLinux() or self._isRHEL():\n self._addYumRepo('docker', repo + '/linux/centos/docker-ce.repo')\n\n # elif self.isOpenSUSE() or self.isSLES():\n # virt_repo = 'https://download.opensuse.org/repositories/Virtualization'\n #\n # with open('/etc/os-release', 'r') as rf:\n # releasever = re.search('VERSION=\"([0-9.]+)\"', rf.read()).group(1)\n #\n #\n # if self.isOpenSUSE():\n # virt_repo += '/openSUSE_Leap_'+releasever\n # else:\n # virt_repo += '/SLE_'+releasever\n #\n # virt_gpg = self._callCurl(env, [virt_repo+'/repodata/repomd.xml.key'])\n # self._addZypperRepo('Virtualization', virt_repo+'/Virtualization.repo', virt_gpg)\n #\n # gpg = self._callCurl(env, [repo+'/linux/centos/gpg'])\n # self._addZypperRepo('docker', repo + '/linux/centos/7/x86_64/stable/', gpg, yum=True)\n\n else:\n self._requireYumEPEL()\n self._requirePackages(['docker'])\n self._requirePackages(['docker-engine'])\n self._requireEmerge(['app-emulation/docker'])\n self._requirePacman(['docker'])\n\n self._trySudoCall(\n ['/bin/systemctl', 'start', 'docker'],\n errmsg='you may need to start Docker manually !'\n )\n\n return\n\n ver = env.get('dockerVer', None)\n\n if ver:\n self._requirePackages(['docker-ce-' + ver])\n else:\n self._requirePackages(['docker-ce'])\n\n self._trySudoCall(\n ['/bin/systemctl', 'start', 'docker'],\n errmsg='you may need to start Docker manually !'\n )\n\n def onBuild(self, config):\n env = config['env']\n tag = env.get('dockerTag', os.path.basename(os.path.realpath('.')))\n cmd = [env['dockerBin'], 'build', '-t', tag, '.']\n\n if self._haveGroup('docker'):\n self._callExternal(cmd)\n else:\n sudo = self._which('sudo')\n self._callExternal([sudo] + cmd)\n\n def onExec(self, env, args):\n bin = env['dockerBin']\n\n if self._haveGroup('docker'):\n self._callInteractive([bin] + args)\n else:\n sudo = self._which('sudo')\n self._callInteractive([sudo, bin] + args)\n\n def onRun(self, config, file, args, tune):\n env = config['env']\n cmd = [env['dockerBin'], 'run', file]\n\n if self._haveGroup('docker'):\n self._callExternal(cmd)\n else:\n sudo = self._which('sudo')\n self._callExternal([sudo] + cmd)\n\n def tuneDefaults(self):\n return {\n 'minMemory': '256M',\n 'debugOverhead': '128M',\n 'connMemory': '100K',\n 'debugConnOverhead': '1M',\n 'socketType': 'tcp',\n 'scalable': False,\n }\n","sub_path":"futoin/cid/tool/dockertool.py","file_name":"dockertool.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"386594454","text":"from __future__ import print_function\n\nimport csv\nimport os\nimport sqlite3\nfrom collections import Iterable\n\n\nclass Database(object):\n \"\"\"Database wrapper.\"\"\"\n\n def __init__(self, path, debug=False):\n # type: (str) -> Database\n self.path = path\n self._db = sqlite3.connect(path)\n self.cursor = self._db.cursor()\n self.debug = debug\n\n @staticmethod\n def sanitize(s):\n # type: (str) -> str\n return '\"{}\"'.format(s.replace(\"'\", \"''\").replace('\"', '\"\"'))\n\n def add_csv(self, path, table_name=None, types=None):\n # type: (str, Iterable[str] | None) -> bool\n file_name = os.path.basename(path)\n if '.' in file_name:\n file_name, ext = file_name.rsplit('.', 1)\n if ext != 'csv':\n return False\n if table_name is None:\n table_name = Database.sanitize(file_name)\n with open(path) as csv_file:\n fields = csv.DictReader(csv_file).fieldnames\n fields = [Database.sanitize(field) for field in fields]\n reader = csv.reader(csv_file)\n num_fields = len(fields)\n if types is None or len(types) != num_fields:\n types = ['BLOB' for i in xrange(num_fields)]\n create_table_query = 'CREATE TABLE IF NOT EXISTS {} ({})'.format(\n table_name,\n ', '.join(field + ' ' + type for field, type in zip(fields, types))\n )\n if self.debug:\n print(create_table_query)\n self.cursor.execute(create_table_query)\n insert_query = 'INSERT INTO {} VALUES ({})'.format(\n table_name,\n ', '.join('?' for i in xrange(num_fields))\n )\n self.cursor.executemany(insert_query, [row for row in reader])\n return True\n\n def commit(self):\n self._db.commit()\n\n def hard_close(self):\n self._db.close()\n\n def close(self):\n self.commit()\n self.hard_close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n\nif __name__ == '__main__':\n with Database('test.db') as db:\n db.add_csv('students.csv', types=('TEXT', 'INT', 'INT PRIMARY KEY'))\n","sub_path":"CSVtoDatabase/csv2db.py","file_name":"csv2db.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"495311013","text":"#!/usr/bin/python3\n\n\"\"\"\n@file: base_controller.py\n@brief: 控制器基类\n@author: feihu1996.cn\n@date: 18-08-19\n@version: 1.0\n\"\"\"\n\nfrom utils.logger import Logger\n\n\nclass BaseController:\n \"\"\"\n 控制器基类 \n \"\"\"\n def __init__(self, args={}, session={}):\n \"\"\"\n args:\n 请求参数字典\n \"\"\"\n Logger.info_logger().info(\"request args -- {}\".format(args))\n\n self.args = args\n self.session = session\n \n","sub_path":"controllers/base_controller.py","file_name":"base_controller.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"280102200","text":"# author: midavis\n# date: 2019-02-11\n\n# Must check [int, float, str, bool]\n# Check negative numbers\n# must return values & attempts\n# must check against default\n\ndef inputChecker(input_prompt, input_type, num_attempts = 3):\n\n #####\n # DO NOT CHANGE ANYTHING BETWEEN HERE AND THE NEXT ALL CAPS COMMENT BELOW\n #####\n\n # Initialize flags and counters.\n print('\\n')\n input_not_correct_type = True\n attempt_number = 1\n\n # Loop while the user hasn't inputted a correct data type in under the allotted number of attempts.\n while input_not_correct_type and attempt_number <= num_attempts: \n \n # Ask for the input .\n user_input = input(input_prompt + ' (expecting an input of type ' + input_type + '): ')\n \n # Try converting their input to the appropriate data type.\n try:\n # Booleans are weird\n # https://stackoverflow.com/questions/715417/converting-from-a-string-to-boolean-in-python\n if input_type == 'bool':\n if user_input == 'True':\n user_input = True\n elif user_input == 'False':\n user_input = False\n else:\n # This is a useful way to say - 'Go to the \"except\" line below.\n raise ValueError\n else:\n # Typically you'd think you should do input_type(user_input) - it may work in some cases, but not others. Better to use eval here.\n # https://www.programiz.com/python-programming/methods/built-in/eval\n user_input = eval(input_type)(user_input)\n\n # Print success if you were able to get this far.\n print('\\nThank you, proceeding now.')\n input_not_correct_type = False\n\n except:\n # Check to see what attempt number they're on.\n if attempt_number == num_attempts:\n print('You exceeded the allowable number of tries of ' + str(num_attempts) + '. Ending program now.')\n break\n # If they have remaining tries, let them go for it again.\n else:\n attempt_number += 1\n print('You entered a bad value. You should really try again when the prompt comes back.\\n')\n\n print('\\n')\n\n #####\n # THIS IS THAT NEXT ALL CAPS COMMENT. AGAIN, DO NOT CHANGE ANYTHING IN HERE ^\n #####\n\n return attempt_number, user_input\n\n# a,b = inputChecker('Give me a nice pretty value', 'float')\n\n# print(a,b)\n# # eval3","sub_path":"Class11/inputCheckerFunction.py","file_name":"inputCheckerFunction.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406586201","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass RNN(nn.Module):\n def __init__(self, dim_input, dim_hidden, dim_output):\n super().__init__()\n self.rnn = nn.RNN(dim_input, dim_hidden, 1, nonlinearity='relu')\n self.W = nn.Linear(dim_hidden, dim_output)\n\n def forward(self, x):\n h_all, h_final = self.rnn(x)\n return self.W(h_final.squeeze(0))\n\n\nclass GRU(nn.Module):\n def __init__(self, dim_input, dim_hidden, dim_output):\n super().__init__()\n self.gru = nn.GRU(dim_input, dim_hidden, 1)\n self.W = nn.Linear(dim_hidden, dim_output)\n\n def forward(self, x):\n h_all, h_final = self.gru(x)\n return self.W(h_final.squeeze(0))\n\n\nclass LSTM(nn.Module):\n def __init__(self, dim_input, dim_hidden, dim_output):\n super().__init__()\n self.lstm = nn.LSTM(dim_input, dim_hidden, 1)\n self.W = nn.Linear(dim_hidden, dim_output)\n\n def forward(self, x):\n h_all, (h_final, c_final) = self.lstm(x)\n return self.W(h_final.squeeze(0))\n\n\nclass Attention(nn.Module):\n def __init__(self, dim_q, dim_k, dim_emb):\n super().__init__()\n self.Q = nn.Linear(dim_q, dim_emb)\n self.K = nn.Linear(dim_k, dim_emb)\n self.V = nn.Linear(dim_k, dim_emb)\n\n def forward(self, query, key, value):\n query = self.Q(query)\n key = self.K(key)\n value = self.V(value)\n\n a = (query.unsqueeze(1) * key.unsqueeze(0)).sum(-1)\n a = torch.softmax(a, dim=1)\n\n output = (a.unsqueeze(-1) * value.unsqueeze(0)).sum(1)\n return output\n\n\nclass AttentionGRU(nn.Module):\n def __init__(self, dim_input, dim_hidden, dim_output):\n super().__init__()\n self.gru = nn.GRU(dim_input, dim_hidden, 1)\n self.W = nn.Linear(dim_hidden, dim_output)\n # self.attention = nn.MultiheadAttention(dim_hidden, num_heads=1)\n self.attention = Attention(dim_hidden, dim_hidden * 2, dim_hidden)\n self.position_embeddings = nn.Embedding(512, dim_hidden)\n\n def forward(self, x):\n h_all, h_final = self.gru(x)\n pos = self.position_embeddings.weight[:len(x)].unsqueeze(1).expand(-1, x.shape[1], -1)\n key = torch.cat((x, pos), -1)\n query = h_final # .view(h_final.shape[1], 1, -1)\n # attn_output, attn_output_weights = self.attention(query, key, key)\n attn_output = self.attention(query, key, key)\n return (attn_output.squeeze(0))\n","sub_path":"pytorch/Attention_Lab/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"421383140","text":"#!/usr/bin/python\nnumber = 23\nrunning = True\nwhile running :\n userinput = int(input('Enter an integer : '))\n if userinput == number :\n print('Congratulations, You guessed it right :D')\n running = False\n elif userinput < number :\n print('No, it is a little higher than that.')\n else :\n print('No, it is a little lower than that')\nelse :\n print('The while loop is over.')\nprint('Done.')\n\n#Break statement is used to break out of a loop statement. or to stop the execution of the current looping instruction.\n#while True :\n#s = input('Enter Something : ')\n#if s == 'quit' :\n# break;\n#printf('Length of the string is', len(s))\n#printf('Done')\n","sub_path":"while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"219330981","text":"import networkx as nx\n\n\nclass TreapGraph:\n def __init__(self, treap):\n self.graph = nx.Graph()\n self.treap = treap\n\n def create_graph(self):\n graph = nx.Graph()\n self.draw_treap(self.treap.root, graph, 1, 0)\n return graph\n\n def draw_treap(self, node, graph, xpos, ypos):\n if node.key is None:\n graph.add_node(node.key, pos=(xpos, ypos), label=\"Key:\" \"\\n\" +\"Priority:\",\n color=node.color)\n else:\n graph.add_node(node.key, pos=(xpos, ypos), label=str(node.key) + \"\\n\" +str(node.priority),\n color=node.color)\n if node.parent_node:\n graph.add_edge(node.key, node.parent_node.key)\n node.xpos = xpos # Set position of node for recursive callback\n if node.left_node is not None: # Drawing a left child\n if node.parent_node is None: # Root case\n self.draw_treap(node.left_node, graph, xpos - (xpos) / 2, ypos - 1)\n else:\n self.draw_treap(node.left_node, graph, xpos - abs(xpos - node.parent_node.xpos) / 2, ypos - 1)\n\n if node.right_node is not None: # Drawing a right child\n if node.parent_node is None: # Root case\n self.draw_treap(node.right_node, graph, xpos + (xpos) / 2, ypos - 1)\n else:\n self.draw_treap(node.right_node, graph, xpos + abs(xpos - node.parent_node.xpos) / 2, ypos - 1)\n else:\n return\n\n def draw(self, treap, plot, canvas):\n plot.clear()\n self.treap = treap\n self.graph = self.create_graph()\n pos = nx.get_node_attributes(self.graph, 'pos')\n label = nx.get_node_attributes(self.graph, 'label')\n color_dict = nx.get_node_attributes(self.graph, 'color')\n color_list = []\n for color in color_dict:\n color_list.append(color_dict[color])\n nx.draw(self.graph, pos, node_size=3000, node_color=color_list, labels=label, with_labels=True, ax=plot)\n canvas.draw()\n","sub_path":"treap_graph.py","file_name":"treap_graph.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"596503539","text":"#\n# By Levente Csányi, Codecool 1st Semester\n#\n\n\n\nfrom itertools import groupby\n\n\ndef count_games(file_name):\n file_cont = open(file_name, \"r\")\n file_lines = file_cont.readlines()\n return len(file_lines)\n\n\ndef decide(file_name, year):\n file_cont = [line.split(',') and line.split(\"\\t\") for line in open(file_name)]\n x = [str(i[2]) for i in file_cont]\n return year in x\n\n\ndef get_latest(file_name):\n file_get = [line.split(',') and line.split(\"\\t\") for line in open(file_name)]\n values = [str(i[2]) for i in file_get]\n keys = [str(i[0]) for i in file_get]\n solution = dict(zip(keys, values))\n solution = max(solution, key=solution.get)\n print(solution)\n\n\ndef count_by_genre(file_name, genre):\n file_count = [line.split(',') and line.split(\"\\t\") for line in open(file_name)]\n x = [str(i[3]) for i in file_count]\n x.sort()\n for key, group in groupby(x):\n if key == genre:\n return len(list(group))\n\n\ndef get_line_number_by_title(file_name, title):\n try:\n with open(file_name) as f:\n for i, line in enumerate(f, 1):\n if title in line:\n return i\n except IOError:\n pass\n\n\n\n# Report functions\n","sub_path":"reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"72423378","text":"adam_params = {'alpha': 0.001,\n 'beta1': 0.9,\n 'beta2': 0.999,\n 'epsilon': 10e-8}\n# explination:\n# alpha - Also referred to as the learning rate or step size.\n# The proportion that weights are updated (e.g. 0.001).\n# Larger values (e.g. 0.3) results in faster initial learning before the rate is updated.\n# Smaller values\n# (e.g. 1.0E-5) slow learning right down during training\n","sub_path":"adam_notes.py","file_name":"adam_notes.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583322380","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.reg = [0] * 8\n self.PC = 0\n self.IR = 0\n self.MAR = 0\n self.MDR = 0\n self.FL = 0b00000000\n self.IM = self.reg[5]\n self.IS = self.reg[6]\n self.SP = self.reg[7]\n self.ram = [0] * 256\n\n def load(self):\n \"\"\"Load a program into memory.\"\"\"\n address = 0\n if len(sys.argv) != 2:\n print('Wrong amount of args. \\nUsage example: python ls8.py aprogram.ls8')\n sys.exit(1)\n\n program_file = open(sys.argv[1])\n\n for line in program_file:\n\n inst = line.strip()\n if not inst.startswith('#'):\n inst = inst.split('#', 1)[0]\n\n inst = inst.split()[0]\n\n inst = int(inst, 2)\n self.ram[address] = inst\n address += 1\n\n # sprint challenge material\n # Adding flag and CMP instruction to the alu\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n\n elif op == \"MUL\":\n x, y = self.reg[reg_a], self.reg[reg_b]\n ans = 0\n while y > 0:\n if y & 1:\n ans = ans + x\n x = x << 1\n y = y >> 1\n print(ans)\n\n elif op == 'CMP':\n # this instruction is used for comparison in values in other\n # instructions\n x, y = self.reg[reg_a], self.reg[reg_b]\n if x == y:\n self.FL = 1\n else:\n self.FL = 0\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.PC,\n\n self.ram_read(self.PC),\n self.ram_read(self.PC + 1),\n self.ram_read(self.PC + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def ram_read(self, address):\n return self.ram[address]\n\n def ram_write(self, address, value):\n self.ram[address] = value\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n op_codes = {\n 0b10000010: 'LDI',\n 0b01000111: 'PRN',\n 0b00000001: 'HLT',\n 0b10100000: 'ADD',\n 0b10100010: 'MUL',\n 0b01000101: 'PUSH',\n 0b01000110: 'POP',\n 0b01010000: 'CALL',\n 0b00010001: 'RET',\n 0b00010001: 'RET',\n 0b10100111: 'CMP', # uses flags for comparisons in other inst\n 0b01010100: 'JMP', # jumps to the address in the given register\n 0b01010101: 'JEQ', # if E fl==1 jmp to address in given register\n 0b01010110: 'JNE' # if E fl==0 jmp to address in given register\n }\n alu_codes = set(['ADD', 'SUB', 'MUL', 'CMP'])\n\n while True:\n\n binary_op_code = self.ram_read(self.PC)\n op = op_codes[binary_op_code]\n\n if op == 'LDI':\n reg_num = self.ram_read(self.PC + 1)\n value = self.ram_read(self.PC + 2)\n self.reg[reg_num] = value\n self.PC += 3\n\n elif op == 'PRN':\n reg_num = self.ram_read(self.PC + 1)\n value = self.reg[reg_num]\n print(value)\n self.PC += 2\n\n elif op in alu_codes:\n reg1 = self.ram_read(self.PC + 1)\n reg2 = self.ram_read(self.PC + 2)\n self.alu(op, reg1, reg2)\n self.PC += 3\n\n elif op == 'PUSH':\n self.SP -= 1\n reg_num = self.ram_read(self.PC + 1)\n self.ram[self.SP] = self.reg[reg_num]\n self.PC += 2\n\n elif op == 'POP':\n reg_num = self.ram_read(self.PC + 1)\n self.reg[reg_num] = self.ram[self.SP]\n self.SP += 1\n self.PC += 2\n\n elif op == 'CALL':\n # push address of intr after call to stack\n self.SP -= 1\n self.ram[self.SP] = self.PC + 2\n # pc is set to address stored in reg, so can jump to that intr in ram\n reg_num = self.ram_read(self.PC + 1)\n self.PC = self.reg[reg_num]\n\n elif op == 'RET':\n self.PC = self.ram_read(self.SP)\n self.SP += 1\n\n # Sprint Challenge Code\n # jump to the address in the given register; set the pc to the\n # address stored in the given register\n elif op == 'JMP':\n reg_num = self.ram_read(self.PC + 1)\n self.PC = self.reg[reg_num]\n\n # Sprint Challenge Code\n # if the flag is set to True jump to the address in the given\n # register; the jump is executed in the following way\n # reg_num = self.ram_read(self.PC + 1)\n # self.PC = self.reg[reg_num]\n elif op == 'JEQ':\n if self.FL == 1:\n reg_num = self.ram_read(self.PC + 1)\n self.PC = self.reg[reg_num]\n else:\n self.PC += 2\n\n # Sprint Challenge Code\n # if the flag is set to False jump to the address in the given\n # register; the jump is executed in the following way\n # reg_num = self.ram_read(self.PC + 1)\n # self.PC = self.reg[reg_num]\n elif op == 'JNE':\n if self.FL != 1:\n reg_num = self.ram_read(self.PC + 1)\n self.PC = self.reg[reg_num]\n else:\n self.PC += 2\n elif op == 'HLT':\n break\n else:\n print('unknown binary operation code')\n break\n","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"17856464","text":"from collections import deque\n\ncnt = 0\n\ndef change(target, coins, usable):\n global cnt\n coin = coins.popleft()\n if len(coins) == 0:\n if target // coin <=usable:\n cnt += 1\n else:\n for i in range(0, target // coin +1):\n change(target - coin * i, coins.copy(), usable - i)\nchange(1000, deque([500, 100, 50, 10]), 15)\nprint(cnt)\n\n","sub_path":"pyKata/q05_02.py","file_name":"q05_02.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"564172264","text":"#!/usr/bin/env python3\n# -*-coding: utf-8-*-\n# Author : Chris\n# Blog : http://blog.chriscabin.com\n# GitHub : https://www.github.com/chrisleegit\n# File : fifoserver.py\n# Date : 16-7-1\n# Version: 0.1\n# Description: ...\n\nimport os\n\nfifo_file = '/tmp/fifo'\n\n\ndef main():\n if not os.path.exists(fifo_file):\n os.mkfifo(fifo_file)\n pipein = open(fifo_file, 'r')\n\n print('Server is running...')\n\n while True:\n line = pipein.readline()[:-1]\n print('Message from client: {}'.format(line))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"ch5/fifoserver.py","file_name":"fifoserver.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"503153935","text":"from .client import Client\nimport json\n\n\nclass Tokens(Client):\n def __init__(self, tokenname):\n Client.__init__(self, address='')\n self.module += ''\n self.tokenname = '&tokenname=' + tokenname\n\n def make_url(self, call_type=''):\n if call_type == 'tokensupply':\n self.url = self.prefix \\\n + self.module \\\n + self.action \\\n + self.tokenname \\\n + self.key\n elif call_type == 'tokenbalance':\n self.url = self.prefix \\\n + self.module \\\n + self.action \\\n + self.address \\\n + self.tokenname \\\n + self.key\n\n def get_total_supply(self):\n self.action += 'tokensupply'\n self.module += 'stats'\n self.make_url(call_type='tokensupply')\n req = self.connect()\n if req.status_code == 200:\n return json.loads(req.text)['result']\n else:\n return req.status_code\n\n def get_token_balance(self, address):\n self.address += address\n self.module += 'account'\n self.action += 'tokenbalance'\n self.make_url(call_type='tokenbalance')\n req = self.connect()\n if req.status_code == 200:\n return json.loads(req.text)['result']\n else:\n return req.status_code\n","sub_path":"etherscan/tokens.py","file_name":"tokens.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"597541101","text":"#!/usr/bin/env python\n# Upendra Kumar Devisetty\n# 11/12/15\n# Script to replace the header as well as keep the first id \n\nimport sys\n\nfile_in = sys.argv[1]\nfile_in2 = sys.argv[2]\nfile_out = sys.argv[3]\n\nresult = {}\nresult2 = {}\n\nwith open(file_in) as fh_in:\n\twith open(file_in2) as fh_in2:\n\t\twith open(file_out, 'w') as fh_out: \n\t\t\tfor line in fh_in:\n\t\t\t\tif line.startswith(\"CO\"):\n\t\t\t\t\tline = line.split()\n\t\t\t\t\tid = line[1]\n\t\t\t\t\tresult[id] = \"\"\n\t\t\t\telse:\n\t\t\t\t\tline = line.split()\n\t\t\t\t\tgene = line[1]\n\t\t\t\t\tresult[id] += gene\n\t\t\t\t\tresult[id] += \" \"\n\n\t\t\tfor kee, val in result.items():\n\t\t\t\tval = val.split()\n\t\t\t\tif len(val) > 1:\n\t\t\t\t\trename = val[0]\n\t\t\t\t\tresult2[kee] = rename\n\t\t\t\telse:\n\t\t\t\t\tresult2[kee] = val\n\n\t\t\tfor line2 in fh_in2:\n\t\t\t\tline2 = line2.strip()\n\t\t\t\tif line2.startswith(\">\"):\n\t\t\t\t\tline2 =line2[1:]\n\t\t\t\t\tnew = result2[line2]\n\t\t\t\t\tnew2 = \"\".join(new)\n\t\t\t\t\tfh_out.write(\">\")\n\t\t\t\t\tfh_out.write(str(new2))\n\t\t\t\t\tfh_out.write(\"\\n\")\n\t\t\t\telse:\n\t\t\t\t\tfh_out.write(line2)\n\t\t\t\t\tfh_out.write(\"\\n\")\n","sub_path":"evolinc/1.6/fasta_header_rename.py","file_name":"fasta_header_rename.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"632934528","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/11/9 11:21\n# @Author : qkwu\n# @File : leetcode55JumpGame.py\n\nclass Solution:\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n i_left = len(nums) - 1\n for i in range(len(nums) - 2, -1, -1): # go backward\n if i + nums[i] >= i_left: # if maximum jump is far enough\n i_left = i\n return i_left == 0\n\n\nnums = [2,3,1,1,4]\nsl = Solution()\nprint(sl.canJump(nums))","sub_path":"leetcode55JumpGame.py","file_name":"leetcode55JumpGame.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"387486612","text":"from sympy import *\r\n\r\nx = Symbol( 'x' )\r\nix = integrate( x * sin( x ), (x, 0, 2 * pi) )\r\npprint( ix )\r\n\r\nr = Symbol( 'r', positive = True )\r\nCircle = 2 * integrate( sqrt( r * r - x * x ), (x, -r, r) )\r\nCircle = Circle.subs( r, sqrt( r * r - x * x ) )\r\nVolumn = integrate( Circle, (x, -r, r) )\r\npprint( Volumn )\r\n","sub_path":"Python/Sci Calcu/Sympy_ball.py","file_name":"Sympy_ball.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"475338357","text":"\nfrom typing import Union, List\nfrom scheme_exceptions import ParseError\nSPECIALS = ['(', ')', '[', ']', \"'\", '`', ',', '@', '\"', ';']\n\n\nclass Token():\n\n def __init__(self, value: str):\n self.value = value\n self.comments = []\n self.comments_inline = True\n\n def __eq__(self, other):\n return (other == self.value)\n\n def __hash__(self):\n return hash(self.value)\n\n def __repr__(self):\n return repr(self.value)\n\n def __str__(self):\n return str(self.value)\n\n\nclass TokenBuffer():\n\n def __init__(self, lines, do_comments=False, ignore_brackets=False):\n self.string = '\\n'.join(lines)\n self.tokens = tokenize(self.string, do_comments, ignore_brackets)\n self.done = (not self.tokens)\n self.i = 0\n\n def get_next_token(self) -> Token:\n if self.done:\n raise ParseError(\n 'Incomplete expression, probably due to unmatched parentheses.')\n return self.tokens[self.i]\n\n def pop_next_token(self) -> Token:\n out = self.get_next_token()\n self.i += 1\n if (self.i == len(self.tokens)):\n self.done = True\n return out\n\n\ndef tokenize(string, do_comments, ignore_brackets) -> List[Token]:\n string = string.strip()\n tokens = []\n comments = {\n\n }\n i = 0\n first_in_line = True\n prev_newline = True\n\n def _get_token():\n 'Always starts at a non-space character'\n nonlocal i, first_in_line, prev_newline\n if (i == len(string)):\n return\n if (string[i] == '\"'):\n first_in_line = False\n prev_newline = False\n tokens.append(Token(string[i]))\n i += 1\n _get_string()\n return\n elif (string[i] == ';'):\n i += 1\n _get_comment()\n elif ((string[i] in SPECIALS) and (not (ignore_brackets and (string[i] in ['[', ']'])))):\n first_in_line = False\n prev_newline = False\n tokens.append(Token(string[i]))\n i += 1\n else:\n curr = ''\n while ((i != len(string)) and (not string[i].isspace()) and (string[i] not in SPECIALS)):\n curr += string[i]\n i += 1\n if curr:\n first_in_line = False\n prev_newline = False\n tokens.append(Token(curr))\n\n def _get_comment():\n nonlocal i\n curr = ''\n while ((i != len(string)) and (string[i] != '\\n')):\n curr += string[i]\n i += 1\n if first_in_line:\n if (len(tokens) not in comments):\n comments[len(tokens)] = []\n comments[len(tokens)].append(((not first_in_line), curr))\n else:\n if ((len(tokens) - 1) not in comments):\n comments[(len(tokens) - 1)] = []\n comments[(len(tokens) - 1)].append(((not first_in_line), curr))\n\n def _get_string():\n 'Starts just after an opening quotation mark'\n nonlocal i\n curr = ''\n while ((i != len(string)) and (string[i] != '\"')):\n char = string[i]\n if (char == '\\n'):\n raise ParseError('Multiline strings not supported!')\n if (char == '\\\\'):\n curr += char\n if ((i + 1) == len(string)):\n raise ParseError(\n 'String not terminated correctly (try escaping the backslash?)')\n curr += string[(i + 1)]\n i += 2\n else:\n curr += string[i]\n i += 1\n tokens.append(Token(curr))\n if (i == len(string)):\n raise ParseError('String missing a closing quote')\n tokens.append(Token(string[i]))\n i += 1\n while (i != len(string)):\n _get_token()\n while ((i != len(string)) and string[i].isspace()):\n if ((string[i] == '\\n') and i and prev_newline):\n first_in_line = True\n elif (string[i] == '\\n'):\n prev_newline = True\n i += 1\n if do_comments:\n for (key, val) in comments.items():\n tokens[min(key, (len(tokens) - 1))].comments.extend((x[1]\n for x in val))\n tokens[min(key, (len(tokens) - 1))\n ].comments_inline = all((x[0] for x in val))\n return tokens\n","sub_path":"Tareas/hw08/editor/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"294352100","text":"# Objects are encapsuled variables and functions in one entity. The classes are templates to create objects\nclass firstClass:\n exampleVar = \"Some text\"\n\n def someFunction(self):\n print(\"This message will self destruct if opened.\")\n\nmyFirstObject = firstClass() #saves class attributes to the object\n\nprint(myFirstObject.exampleVar) # use dot notations to access information inside\n\n# You can make multiple objects from the classes\nmySecondObject = firstClass()\nmySecondObject.exampleVar = \"Different words\"\n\nprint(myFirstObject.exampleVar)\nprint(mySecondObject.exampleVar)\n\nmyFirstObject.someFunction() # access internal functions using dot notation as well\n\n#Exercise\n# define the Vehicle class\nclass Vehicle:\n name = \"\"\n kind = \"car\"\n color = \"\"\n value = 100.00\n def description(self):\n desc_str = \"%s is a %s %s worth $%.2f.\" % (self.name, self.color, self.kind, self.value)\n return desc_str\n# your code goes here\ncar1 = Vehicle()\ncar2 = Vehicle()\n\ncar1.name = \"Fer\"\ncar1.kind = \"convertible\"\ncar1.color = \"red\"\ncar1.value = 60000.00\n\ncar2.name = \"Jump\"\ncar2.kind = \"van\"\ncar2.color = \"blue\"\ncar2.value = 10000.00\n\n# test code\nprint(car1.description())\nprint(car2.description())\n","sub_path":"Classes and Objects.py","file_name":"Classes and Objects.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"530442839","text":"#!/usr/bin/env python\nfrom PIL import Image\nimport numpy as np\n\ndef append_images(images, direction='horizontal',\n bg_color=(255,255,255), aligment='center'):\n '''\n input list of images, return combined images\n '''\n\n widths, heights = zip(*(i.size for i in images))\n\n if direction=='horizontal':\n new_width = sum(widths)\n new_height = max(heights)\n else:\n new_width = max(widths)\n new_height = sum(heights)\n\n new_im = Image.new('RGB', (new_width, new_height), color=bg_color)\n\n\n offset = 0\n for im in images:\n if direction=='horizontal':\n y = 0\n if aligment == 'center':\n y = int((new_height - im.size[1])/2)\n elif aligment == 'bottom':\n y = new_height - im.size[1]\n new_im.paste(im, (offset, y))\n offset += im.size[0]\n else:\n x = 0\n if aligment == 'center':\n x = int((new_width - im.size[0])/2)\n elif aligment == 'right':\n x = new_width - im.size[0]\n new_im.paste(im, (x, offset))\n offset += im.size[1]\n\n return new_im\n\ndef is_number(s):\n '''\n check if string is a number\n '''\n try:\n float(s)\n return True\n except ValueError:\n pass\n \n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n \n return False","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"213454681","text":"import pytest\n\n\ndef test_default_tag_exists_and_has_name(hlwm):\n assert hlwm.get_attr('tags.count') == '1'\n assert hlwm.get_attr('tags.0.name') == 'default'\n\n\ndef test_add_tag(hlwm):\n focus_before = hlwm.get_attr('tags.focus.name')\n\n hlwm.call('add foobar')\n\n assert hlwm.get_attr('tags.count') == '2'\n assert hlwm.get_attr('tags.1.client_count') == '0'\n assert hlwm.get_attr('tags.1.client_count') == '0'\n assert hlwm.get_attr('tags.1.curframe_wcount') == '0'\n assert hlwm.get_attr('tags.1.curframe_windex') == '0'\n assert hlwm.get_attr('tags.1.frame_count') == '1'\n assert hlwm.get_attr('tags.1.index') == '1'\n assert hlwm.get_attr('tags.1.name') == 'foobar'\n assert hlwm.get_attr('tags.focus.name') == focus_before\n\n\ndef test_use_tag(hlwm):\n assert hlwm.get_attr('tags.focus.index') == '0'\n hlwm.call('add foobar')\n\n hlwm.call('use foobar')\n\n assert hlwm.get_attr('tags.focus.index') == '1'\n assert hlwm.get_attr('tags.focus.name') == 'foobar'\n\n\ndef test_use_previous(hlwm):\n hlwm.call('add foobar')\n hlwm.call('use foobar')\n assert hlwm.get_attr('tags.focus.index') == '1'\n\n hlwm.call('use_previous')\n\n assert hlwm.get_attr('tags.focus.index') == '0'\n\n hlwm.call('use_previous')\n\n assert hlwm.get_attr('tags.focus.index') == '1'\n\n\n@pytest.mark.parametrize(\"running_clients_num\", [0, 1, 5])\ndef test_new_clients_increase_client_count(hlwm, running_clients, running_clients_num):\n assert hlwm.get_attr('tags.0.client_count') == str(running_clients_num)\n\n\ndef test_move_focused_client_to_new_tag(hlwm):\n hlwm.call('add foobar')\n assert hlwm.get_attr('tags.0.client_count') == '0'\n assert hlwm.get_attr('tags.1.client_count') == '0'\n\n winid, _ = hlwm.create_client()\n assert hlwm.get_attr('tags.0.client_count') == '1'\n assert hlwm.get_attr('tags.1.client_count') == '0'\n\n hlwm.call('move foobar')\n\n assert hlwm.get_attr('tags.0.client_count') == '0'\n assert hlwm.get_attr('tags.0.curframe_wcount') == '0'\n assert hlwm.get_attr('tags.1.client_count') == '1'\n assert hlwm.get_attr('tags.1.curframe_wcount') == '1'\n assert hlwm.get_attr('clients', winid, 'tag') == 'foobar'\n\n\ndef test_merge_tag_into_another_tag(hlwm):\n hlwm.call('add foobar')\n hlwm.create_client()\n hlwm.call('use_index 1')\n\n hlwm.call('merge_tag default foobar')\n\n assert hlwm.get_attr('tags.count') == '1'\n assert hlwm.get_attr('tags.0.index') == '0'\n assert hlwm.get_attr('tags.0.name') == 'foobar'\n\n\nRENAMING_COMMANDS = [\n # commands for renaming the default tag\n ['set_attr', 'tags.by-name.default.name'],\n ['rename', 'default']]\n\n\n@pytest.mark.parametrize(\"rename_command\", RENAMING_COMMANDS)\ndef test_rename_tag(hlwm, hc_idle, rename_command):\n hlwm.call(rename_command + ['foobar'])\n\n assert hlwm.get_attr('tags.0.name') == 'foobar'\n assert hc_idle.hooks() == [['tag_renamed', 'foobar']]\n\n\n@pytest.mark.parametrize(\"rename_command\", RENAMING_COMMANDS)\ndef test_rename_tag_empty(hlwm, rename_command):\n hlwm.call_xfail(rename_command + [\"\"]) \\\n .expect_stderr('An empty tag name is not permitted')\n\n\n@pytest.mark.parametrize(\"rename_command\", RENAMING_COMMANDS)\ndef test_rename_tag_existing_tag(hlwm, rename_command):\n hlwm.call('add foobar')\n\n hlwm.call_xfail(rename_command + [\"foobar\"]) \\\n .expect_stderr('\"foobar\" already exists')\n\n\ndef test_floating_invalid_parameter(hlwm):\n # passing a non-boolean must be handled\n hlwm.call_xfail('floating invalidvalue') \\\n .expect_stderr('invalid argument')\n\n\n@pytest.mark.parametrize(\"tiled_num\", [3])\n@pytest.mark.parametrize(\"floated_num\", [2])\ndef test_client_count_attribute(hlwm, tiled_num, floated_num):\n hlwm.create_clients(tiled_num)\n floated = hlwm.create_clients(floated_num)\n for winid in floated:\n hlwm.call(f'attr clients.{winid}.floating true')\n\n assert int(hlwm.get_attr('tags.focus.client_count')) \\\n == tiled_num + floated_num\n\n\n@pytest.mark.parametrize(\"command\", [\n \"close_or_remove\",\n \"close_and_remove\",\n])\ndef test_close_and_or_remove_floating(hlwm, command):\n # set up some empty frames and a floating client\n hlwm.call('split explode')\n winid, proc = hlwm.create_client()\n hlwm.call(f'set_attr clients.{winid}.floating true')\n hlwm.call(f'jumpto {winid}')\n assert hlwm.get_attr('clients.focus.winid') == winid\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 2\n\n # run close_or_remove / close_and_remove\n hlwm.call(command)\n\n # in any case no frame may have been removed\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 2\n # and the client is closed:\n proc.wait(10)\n\n\ndef test_close_and_remove_with_one_client(hlwm):\n hlwm.call('split explode')\n winid, proc = hlwm.create_client()\n assert hlwm.get_attr('clients.focus.winid') == winid\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 2\n\n hlwm.call('close_and_remove')\n\n # this closes the client and removes the frame\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 1\n proc.wait(10)\n\n\ndef test_close_and_remove_with_two_clients(hlwm):\n hlwm.call('split explode')\n winid, proc = hlwm.create_client()\n other_winid, _ = hlwm.create_client()\n assert hlwm.get_attr('clients.focus.winid') == winid\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 2\n\n hlwm.call('close_and_remove')\n\n # this closes the client, but does not remove the frame\n # since there is a client left\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 2\n proc.wait(10)\n\n\ndef test_close_and_remove_without_clients(hlwm):\n hlwm.call('split explode')\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 2\n\n hlwm.call('close_and_remove')\n\n # this acts like remove:\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 1\n\n\ndef test_close_or_remove_client(hlwm):\n # This is like close_and_remove, but requires hitting\n # 'close_or_remove' twice.\n hlwm.call('split explode')\n winid, proc = hlwm.create_client()\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 2\n\n # On the first invocation:\n hlwm.call('close_or_remove')\n # only close the client\n proc.wait(10)\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 2\n\n # On the second invocation:\n hlwm.call('close_or_remove')\n # remove the frame\n assert int(hlwm.get_attr('tags.focus.frame_count')) == 1\n","sub_path":"tests/test_tags.py","file_name":"test_tags.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587437696","text":"#Kristen DeVore 76958230 Manasi Shingane 12382221 Lab Sec 10 12:30-1:50\n\nimport connectfour\n\ndef read_game_command()-> str:\n '''takes user input\n '''\n user_input = input('Enter move and column number: ')\n return user_input\n\ndef start():\n '''begins a new game\n '''\n newGame = connectfour.new_game()\n return newGame\n\ndef menu() -> str:\n '''prints instructions in menu for connect 4\n '''\n menu = \"To start the game, the red player makes a move by typing in DROP and a column number where you\"\n menu_two = \" would like to drop your piece. After that you can call POP to remove your game piece\"\n menu_three = \" from the bottom of the input column. Lowercase drop is fine, but try not to use a space after the column number. Enjoy!\"\n return(menu + menu_two + menu_three)\n\n\ndef update_board(updatedBoard: connectfour.GameState, user_input:str) -> connectfour.GameState:\n '''updates board so game state is always current\n '''\n valid_column_numbers = '1234567'\n if user_input[0:4].upper() == \"DROP\":\n try:\n if user_input[-1] not in valid_column_numbers:\n connectfour.InvalidMoveError(Exception)\n print('Error, try again')\n new_input = read_game_command()\n return update_board(updatedBoard, new_input)\n new_board = connectfour.drop(updatedBoard, int(user_input[-1])-1)\n return(new_board)\n except connectfour.InvalidMoveError:\n print('Error, try again')\n new_input = read_game_command()\n return update_board(updatedBoard, new_input)\n elif user_input[0:3].upper() == \"POP\":\n try:\n if user_input[-1] not in valid_column_numbers:\n connectfour.InvalidMoveError(Exception)\n print('Error, try again')\n new_input = read_game_command()\n return update_board(updatedBoard, new_input)\n new_board = connectfour.pop(updatedBoard, int(user_input[-1])-1)\n return(new_board)\n except connectfour.InvalidMoveError:\n print('Error, try again')\n new_input = read_game_command()\n return update_board(updatedBoard, new_input)\n else:\n print('Error, try again')\n new_input = read_game_command()\n return update_board(updatedBoard,new_input)\n\ndef translate_board(boardLine: str)->str:\n '''translates numbers to corresponding values\n '''\n trans = str.maketrans('012', '.RY')\n return boardLine.translate(trans)\n\ndef print_board(board:[[int]]) -> None:\n '''prints board so user can see it in readable format\n '''\n print('1 2 3 4 5 6 7')\n for row in range(connectfour.BOARD_ROWS):\n new_str = ''\n for cols in range(connectfour.BOARD_COLUMNS):\n new_str += translate_board(str(board[cols][row]))+ ' '\n print(new_str)\n print()\n\n","sub_path":"shared_game_logic.py","file_name":"shared_game_logic.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"173651844","text":"import libxml2\nfrom urllib import urlopen\n\nfrom model import Season as _Season, Match, GameDay as _GameDay, Team, Result\n\nURL = \"http://www.weltfussball.de\"\n\n# deactivate error messages from the validation [libxml2.htmlParseDoc]\ndef noerr(ctx, str):\n pass\n\nlibxml2.registerErrorHandler(noerr, None)\n\n\nclass GameDay(_GameDay):\n\n @staticmethod\n def get_url(season, number):\n return \"{0}/spielplan/{1}-{2}-{3}-spieltag/{4}/\".format(\n URL, season.league, season.season[0], season.season[1], number\n )\n\n @classmethod\n def from_data(cls, season, number):\n caption = \"{0}. Spieltag\".format(number)\n url = cls.get_url(season, number)\n data = {\n \"url\": url,\n \"season\": season,\n \"caption\": caption\n }\n return cls(data)\n\n def get_previous(self):\n num = int(self.url.split(\"/\")[-2])\n if num < 2:\n return None\n season = Season.get(self.season[\"url\"])\n return self.get(self.get_url(season, num - 1))\n\n\nclass Season(_Season):\n\n @staticmethod\n def get_url(league, season):\n return \"{0}/alle_spiele/{1}-{2}-{3}\".format(URL, league, season[0], season[1])\n\n @classmethod\n def from_data(cls, league, season):\n url = cls.get_url(league, season)\n return cls({\"url\": url})\n\n\ndef parse_result(result_string):\n home, guest = result_string.split(\":\")\n if \"-\" in (home, guest):\n return None\n else:\n home = int(home)\n guest = int(guest)\n return home, guest\n\n\ndef parse_season(season):\n content = urlopen(season.url).read()\n ctx = libxml2.htmlParseDoc(content, \"UTF-8\")\n gamedays = ctx.xpathEval(\"//div[@class='data']/table[@class='standard_tabelle']/tr\")\n day = None\n while gamedays:\n g = gamedays.pop(0)\n cls = g.get_children().prop(\"class\")\n if cls == \"ueberschrift\":\n if \"Spieltag\" in g.content:\n number = int(g.content.strip().split(\".\", 1)[0])\n day = GameDay.from_data(season, number)\n if day is not None:\n cols = g.xpathEval(\"td\")\n if len(cols) == 7:\n team_home = cols[1].get_children()\n team_guest = cols[3].get_children()\n team_home = Team(\n {\"caption\": team_home.prop(\"title\"), \"url\": team_home.prop(\"href\")}\n ).url\n team_guest = Team(\n {\"caption\": team_guest.prop(\"title\"), \"url\": team_guest.prop(\"href\")}\n ).url\n result = Result({\n \"url\": cols[4].xpathEval(\"a\")[0].prop(\"href\"),\n \"result\": parse_result(cols[4].content.strip())\n })\n Match({\n \"url\": result.url,\n \"gameday\": day.url,\n \"home\": team_home,\n \"guest\": team_guest,\n \"result\": result.result\n })\n return season\n\n\ndef parse_table(gameday):\n content = urlopen(gameday.url).read()\n ctx = libxml2.htmlParseDoc(content, \"UTF-8\")\n ranks = ctx.xpathEval(\"//div[@class='data']/table[@class='standard_tabelle'][1]/tr\")\n rank_no = 0\n table = {}\n while ranks:\n rank = ranks.pop(0)\n cols = rank.xpathEval(\"td\")\n if len(cols) == 10:\n n = cols[0].content.strip()\n if not n or n == '\\xc2\\xa0':\n pos = rank_no\n else:\n pos = rank_no = int(cols[0].content.strip())\n team = cols[2].xpathEval(\"a\")[0].prop(\"href\")\n table[team] = pos\n gameday.update_ranking(table)\n return gameday\n\n\nif __name__ == \"__main__\":\n season = Season.from_data(\"bundesliga\", (2011, 2012))\n parse_season(season)\n\n for i in range(0, 34):\n day = GameDay.from_data(season, i + 1)\n parse_table(day)\n","sub_path":"src/weltfussball.py","file_name":"weltfussball.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"544373925","text":"from copy import deepcopy\n\nclass Solution:\n def permute(self, nums: list) -> list:\n length = len(nums)\n if length <= 1:\n return [nums]\n elif length == 2:\n return [nums, list(reversed(nums))]\n else:\n inserted_num = nums[0]\n sub_permutations = self.permute(nums[1:])\n permutations = []\n for s_p in sub_permutations:\n for ind in range(len(s_p) + 1):\n s_p_copy = deepcopy(s_p)\n s_p_copy.insert(ind, inserted_num)\n permutations.append(s_p_copy)\n \n return permutations\n\n\n def permute_backtrack(self, nums: list) -> list:\n # nested recursive helper function\n def _permute_backtrack(nums: list, cur: list, ans: list) -> None:\n \"\"\"\n nums: a list of unique numbers\n cur: current partial answer (a list of numbers)\n ans: a list of answers\n \"\"\"\n if len(cur) == len(nums):\n ans.append(deepcopy(cur))\n return\n\n for num in nums:\n if num not in cur:\n cur.append(num)\n _permute_backtrack(nums, cur, ans)\n\n # go backward\n cur.pop()\n \n return\n \n\n cur = []\n ans = []\n _permute_backtrack(nums, cur, ans)\n return ans\n\n\n\nif __name__ == \"__main__\":\n solu = Solution()\n print(solu.permute_backtrack([1, 2, 3]))\n","sub_path":"codes/MartinMa28/python3/0046_permutations.py","file_name":"0046_permutations.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"314672142","text":"# import collections\n# def input_data():\n# n = int(input())\n# L = [int(input()) for _ in range(n)]\n# return L\n\n# def sum_x(num):\n# res = 0\n# while num > 9:\n# res += (num%10)**2\n# num //= 10\n# res += num**2\n# return res\n\n# def run():\n# L = input_data()\n# for i in range(len(L)):\n# res_dict = collections.defaultdict(int)\n# num = L[i]\n# res = num\n# while num != 1 and res_dict[num] == 0:\n# res_dict[num] = 1\n# num = sum_x(num)\n# if num == 1:\n# print(\"true\")\n# else:\n# print(\"false\")\n# return \n\n# run()\n# # print(sum_x(123))\n\ndef sumOfSquares(n):\n flag = False\n htb = dict()\n while not htb.get(n):\n if n == 1:\n flag = True\n break\n tmp = 0\n while n!=0:\n tmp += (n%10) * (n%10)\n n = n//10\n n = tmp\n\n return flag\n\ndef sumBits():\n m = int(input())\n nums = []\n for i in range(m):\n tmp = int(input())\n nums.append(tmp)\n \n for n in nums:\n if sumOfSquares(n):\n print(\"true\")\n else:\n print(\"false\")\n\n\n\nsumBits()","sub_path":"快手笔试/快手编程-2.py","file_name":"快手编程-2.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117359121","text":"Import('*')\n\nexelibs = [\n env['PRJ_LIBS']['gmock_main'],\n env['PRJ_LIBS']['gmock'],\n env['PRJ_LIBS']['gtest']\n]\n\nif env['PLATFORM']=='win32' and env['CC']=='cl':\n env.AppendUnique(CCFLAGS='-bigobj')\n\nexename = 'gmock_test'\nexe = env.Program(exename, 'gmock_all_test.cc', LIBS=exelibs)\nenv['PRJ_EXES'][exename] = exe\nenv['PRJ_TSTS'][exename] = exe\n\n\n","sub_path":"googlemock/test/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"69045551","text":"# encoding: utf-8\n# Created by David Rideout on 2/7/14 4:58 PM\n# Copyright (c) 2013 Safari Books Online, LLC. All rights reserved.\n\nfrom storage.models import Book\n\n\ndef process_book_element(book_element):\n \"\"\"\n Process a book element into the database.\n\n :param book: book element\n :returns:\n \"\"\"\n\n book, created = Book.objects.get_or_create(pk=book_element.get('id'))\n book.title = book_element.findtext('title')\n book.description = book_element.findtext('description')\n\n for alias in book_element.xpath('aliases/alias'):\n scheme = alias.get('scheme')\n value = alias.get('value')\n\n book.aliases.get_or_create(scheme=scheme, value=value)\n\n book.save()","sub_path":"storage/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"530770809","text":"from math import sqrt, log, ceil\r\nfrom protein import AminoAcid, Sequence, loadFasta\r\n\r\n\r\nclass ScoreMatrix:\r\n\t\"\"\"\r\n\tRepresents a scoring matrix, used to determine the score between two Amino Acids\r\n\t\"\"\"\r\n\t\r\n\tdef __init__(self, path=\"\", description=\"\", ignore=None):\r\n\t\t\"\"\"\r\n\t\tCreates a Score object.\r\n\t\tIf 'path' is provided, loads the Score values from an iij file.\r\n\t\tOtherwise, creates a Score for all possible AminoAcids with values 0.\r\n\t\t\"\"\"\r\n\t\tself._description = description\r\n\t\tself._ignore = Sequence(ignore)\r\n\t\tself._matrix = []\r\n\t\tself._aaOrder = {}\r\n\t\tself._aaSequence = Sequence()\r\n\t\t\r\n\t\t#If path is provided, load directly from iij file\r\n\t\tif path != \"\":\r\n\t\t\twith open(path, 'r') as file:\r\n\t\t\t\tfoundAAOrder = False #Have we found the line with the amino acid values and order yet?\r\n\t\t\t\tfor line in file:\r\n\t\t\t\t\tif line[0] != \"#\": #Comments\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif not foundAAOrder: #Read aa values and order\r\n\t\t\t\t\t\t\tfor aa in line.split():\r\n\t\t\t\t\t\t\t\tself._aaSequence.extend(aa)\r\n\t\t\t\t\t\t\tself._aaOrder = {aa: index for aa, index in zip(self._aaSequence, range(len(self._aaSequence)))}\r\n\t\t\t\t\t\t\tfoundAAOrder = True\r\n\t\t\t\t\t\telse: #Read matrix values\r\n\t\t\t\t\t\t\tself._matrix.append([int(v) for v in line.split()])\r\n\t\t\r\n\t\t#Otherwise initialize matrix with 0\r\n\t\telse:\r\n\t\t\tlineSize = 1\r\n\t\t\tfor aa in AminoAcid.getAllNames():\r\n\t\t\t\tif AminoAcid(aa) not in self._ignore:\r\n\t\t\t\t\tself._aaSequence.extend(aa)\r\n\t\t\t\t\tself._aaOrder[self._aaSequence[-1]] = lineSize-1\r\n\t\t\t\t\tself._matrix.append([0 for i in range(lineSize)])\r\n\t\t\t\t\tlineSize += 1\r\n\t\t\t\t\r\n\t#Representation\r\n\tdef __repr__(self):\r\n\t\t\"\"\"\r\n\t\tRepresentation.\r\n\t\t\"\"\"\r\n\t\tsepSize = 4\r\n\t\tresult = [\"---------- \" + self._description + \" ----------\"]\r\n\t\tfor values, aa in zip(self._matrix, self._aaSequence):\r\n\t\t\ttempstr = '{a!s:<{w}}'.format(a=aa, w=sepSize)\r\n\t\t\tfor value in values:\r\n\t\t\t\ttempstr += '{v:<{w}}'.format(v=value, w=sepSize)\r\n\t\t\tresult.append(tempstr)\r\n\t\ttempstr = \" \"*sepSize\r\n\t\tfor aa in self._aaSequence :\r\n\t\t\ttempstr += '{a!s:<{w}}'.format(a=aa, w=sepSize)\r\n\t\tresult.append(\"\")\r\n\t\tresult.append(tempstr)\r\n\t\treturn \"\\n\".join(result)\r\n\t\r\n\t#Scoring\r\n\tdef setScore(self, aa1, aa2, score):\r\n\t\t\"\"\"\r\n\t\tSet the score assigned to AminoAcids 'aa1', 'aa2'.\r\n\t\t\"\"\"\r\n\t\tid1 = self._aaOrder[aa1]\r\n\t\tid2 = self._aaOrder[aa2]\r\n\t\tif id1 > id2:\r\n\t\t\tself._matrix[id1][id2] = score\r\n\t\telse:\r\n\t\t\tself._matrix[id2][id1] = score\r\n\t\t\r\n\tdef getScore(self, aa1, aa2):\r\n\t\t\"\"\"\r\n\t\tGet the score assigned to AminoAcids 'aa1', 'aa2'.\r\n\t\t\"\"\"\r\n\t\tid1 = self._aaOrder[aa1]\r\n\t\tid2 = self._aaOrder[aa2]\r\n\t\tif id1 > id2:\r\n\t\t\treturn self._matrix[id1][id2]\r\n\t\telse:\r\n\t\t\treturn self._matrix[id2][id1]\r\n\r\n\t\t\t\r\n# AA frequencies for complete UniProt database\r\n# from http://web.expasy.org/docs/relnotes/relstat.html, \"AMINO ACID COMPOSITION\"\r\nuniprob = {\r\n\tAminoAcid(\"Ala\") : .0826,\r\n\tAminoAcid(\"Gln\") : .0393,\r\n\tAminoAcid(\"Leu\") : .0965,\r\n\tAminoAcid(\"Ser\") : .0660,\r\n\tAminoAcid(\"Arg\") : .0553,\r\n\tAminoAcid(\"Glu\") : .0674,\r\n\tAminoAcid(\"Lys\") : .0582,\r\n\tAminoAcid(\"Thr\") : .0535,\r\n\tAminoAcid(\"Asn\") : .0406,\r\n\tAminoAcid(\"Gly\") : .0708,\r\n\tAminoAcid(\"Met\") : .0241,\r\n\tAminoAcid(\"Trp\") : .0109,\r\n\tAminoAcid(\"Asp\") : .0546,\r\n\tAminoAcid(\"His\") : .0227,\r\n\tAminoAcid(\"Phe\") : .0386,\r\n\tAminoAcid(\"Tyr\") : .0292,\r\n\tAminoAcid(\"Cys\") : .0137,\r\n\tAminoAcid(\"Ile\") : .0593,\r\n\tAminoAcid(\"Pro\") : .0472,\r\n\tAminoAcid(\"Val\") : .0687,\r\n\t\r\n}\t\t\t\r\n\r\n\r\nclass PSSM:\r\n\t\"\"\"\r\n\tPosition Specific Score Matrix.\r\n\tCreates a profile for a series of aligned sequences, and gives a score to each AA subsitution in a given column.\r\n\t\"\"\"\r\n\tdef __init__(self, description=\"\"):\r\n\t\tself.description=description\r\n\t\tself.seqCount = 0 #total number of sequences\r\n\t\tself.size = None #all sequences have the same size\r\n\t\tself.aaDistribution = None #amino acid distribution\r\n\t\tself.aaCount = None\r\n\t\tself.gapPenalties = None\r\n\t\t\r\n\t\r\n\tdef add(self, sequence):\r\n\t\t#check sequence size\r\n\t\tif self.size is None:\r\n\t\t\tself.size = len(sequence)\r\n\t\t\tself.aaDistribution = [{} for i in range(self.size)]\r\n\t\t\tself.aaCount = [0 for i in range(self.size)]\r\n\t\t\tself.gapPenalties = [0 for i in range(self.size + 1)]\r\n\t\t\r\n\t\tassert(len(sequence) == self.size)\r\n\t\t\t\r\n\t\t#update amino acid count for each column\r\n\t\tfor index in range(self.size):\r\n\t\t\tif not sequence[index].isGap():\r\n\t\t\t\tself.aaCount[index] += 1\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself.aaDistribution[index][sequence[index]] += 1\r\n\t\t\t\texcept:\r\n\t\t\t\t\tself.aaDistribution[index][sequence[index]] = 1\r\n\t\t\r\n\t\t#increase sequence count\r\n\t\tself.seqCount += 1\r\n\t\t\r\n\tdef getDescription(self):\r\n\t\treturn self.description\r\n\t\t\r\n\tdef getScore(self, aminoAcid, columnIndex):\r\n\t\t#pseudocounts\r\n\t\talpha = self.aaCount[columnIndex] - 1\r\n\t\tbeta = sqrt(self.seqCount)\r\n\t\talphaplusbeta = alpha + beta\r\n\r\n\t\t#random probability of amino acid\r\n\t\ttry:\r\n\t\t\tp_aa = uniprob[aminoAcid]\r\n\t\texcept:\r\n\t\t\tp_aa = 0.001\r\n\t\t\r\n\t\t#evolutionary probability of amino acid\r\n\t\ttry:\r\n\t\t\tf_aa = self.aaDistribution[columnIndex][aminoAcid] / self.seqCount\r\n\t\texcept:\r\n\t\t\tf_aa = 0\r\n\t\t\t\r\n\t\tq_aa = (alpha * f_aa + beta * p_aa) / alphaplusbeta\r\n\t\t\r\n\t\treturn log(q_aa / p_aa)\r\n\t\r\n\t\r\n\tdef getGapPenalty(self, columnIndex):\r\n\t\treturn self.gapPenalties[columnIndex]\r\n\t\r\n\t\r\n\tdef setGapPenalty(self, penalty, columnIndex=None):\r\n\t\tif columnIndex is None:\r\n\t\t\tfor i in range(self.size):\r\n\t\t\t\tself.gapPenalties[i] = penalty\r\n\t\telse:\r\n\t\t\tself.gapPenalties[columnIndex] = penalty\r\n\t\r\n\tdef __len__(self):\r\n\t\treturn self.size\r\n\t\r\n\tdef __repr__(self):\r\n\t\tfor i in range(self.size):\r\n\t\t\tfor key, score in self.aaDistribution[i].items():\r\n\t\t\t\tprint(key, \": \", score, \"(\", self.getScore(key, i), \")\", sep=\"\", end=\", \")\r\n\t\t\tprint()\r\n\r\n\r\ndef belongs(outSequence, group, minMatches):\r\n\t\"\"\"\r\n\tReturns True if there's at least 'minMatches' matches between outSequence and any sequence \r\n\tfrom 'group'; False otherwise.\r\n\tSize of all sequences is assumed to be equal.\r\n\t\"\"\"\r\n\tfor inSequence in group:\r\n\t\tmatches = 0\r\n\t\tfor i in range(len(outSequence)):\r\n\t\t\tif outSequence[i] == inSequence[i]:\r\n\t\t\t\tmatches += 1\r\n\t\t\tif matches >= minMatches: #As soon as a match is found\r\n\t\t\t\treturn True\r\n\treturn False\r\n\r\n\t\r\ndef makeGroupsFromFasta(path, requiredIdentityPercent):\r\n\t\"\"\"\r\n\tLoads Sequences from file at 'path' and separate them in groups with an identity of \r\n\tat least 'requiredIdentityPercent'.\r\n\tReturns a list representing the groups as lists of Sequence objects.\r\n\t\"\"\"\r\n\tsequences = [seq for seq in loadFasta(path)]\r\n\tgroups = [[sequences[0]]] #First sequence is assigned to first group\r\n\t\r\n\tseqSize = len(sequences[0]) #Size of the sequences\r\n\t\r\n\t#Number of matches required to achieve requiredIdentityPercent\r\n\tminMatches = ceil((requiredIdentityPercent/100)*seqSize)\r\n\t\r\n\t#For each outSequence not yet in a group\r\n\tfor outSequence in sequences[1:]:\r\n\t\tgroupFound = False #has a group been found ?\r\n\t\tgroupIndex = 0 #index of the group we're looking in\r\n\t\t\r\n\t\t#Look for a group where outSequence belongs\r\n\t\twhile (not groupFound) and groupIndex < len(groups):\r\n\t\t\tif belongs(outSequence, groups[groupIndex], minMatches):\r\n\t\t\t\tgroups[groupIndex].append(outSequence)\r\n\t\t\t\tgroupFound = True\r\n\t\t\telse:\r\n\t\t\t\tgroupIndex += 1 #Move on to next group\r\n\t\t\r\n\t\t#If no group works, create a new one\r\n\t\tif not groupFound:\r\n\t\t\tgroups.append([outSequence])\r\n\t\r\n\treturn groups\r\n\r\n\t\r\ndef valueDictsFromGroups(groups):\r\n\t\"\"\"\r\n\tTransforms each group from 'groups' into a list of dictionaries, one for each Sequence column.\r\n\tThe dictionaries map each AminoAcid found in that column to their count.\r\n\tReturns the list of dictionaries and a list of the size (in Sequences) of their groups.\r\n\t\"\"\"\r\n\tseqSize = len(groups[0][0])\r\n\tgroupCount = len(groups)\r\n\t\r\n\tgroupValues = []\r\n\tgroupSizes = []\r\n\t\r\n\tfor group in groups: #For each group\r\n\t\tgroupAAs = []\r\n\t\tgroupSize = len(group) #Size of group (n°sequences)\r\n\t\t\r\n\t\tfor col in range(seqSize): #For each column\r\n\t\t\tgroupCol = {}\r\n\t\t\t\r\n\t\t\tfor seq in group: #For each sequence in group\r\n\t\t\t\ttry:\r\n\t\t\t\t\tgroupCol[seq[col]] += 1 #Increment count\r\n\t\t\t\texcept:\r\n\t\t\t\t\tgroupCol[seq[col]] = 1\r\n\t\t\t\r\n\t\t\tgroupAAs.append(groupCol)\r\n\t\tgroupValues.append(groupAAs)\r\n\t\tgroupSizes.append(groupSize)\r\n\treturn groupValues, groupSizes\r\n\r\n\t\r\ndef getFrequencies(groupValues, groupSizes):\r\n\t\"\"\"\r\n\tEvaluates the frequencies of AminoAcids within columns of groups in 'groupValues'.\r\n\tFrequencies are weighted according to group sizes in 'groupSizes'.\r\n\tReturns two dictionaries and a number:\r\n\t\t-'freqPairs' maps pairs of AminoAcids to their frequencies\r\n\t\t-'freqSingle' maps single AminoAcids to their frequencies\r\n\t\t-'freqSum' is the sum of all frequencies\r\n\t\"\"\"\r\n\tseqSize = len(groupValues[0]) #Size of the Sequences\r\n\tgroupCount = len(groupSizes) #Number of groups\r\n\t\r\n\tfreqPairs = {} #frequencies of amino acid pairs (fAB)\r\n\t\r\n\t#Frequencies of single amino acids (fA)\r\n\tfreqSingle = {AminoAcid(aa):0 for aa in AminoAcid.getAllNames()} \r\n\t\r\n\tfreqSum = 0 #Sum of frequencies sum(fAB)\r\n\t\r\n\tfor col in range(seqSize): #Each column\r\n\t\tfor groupAIndex in range(groupCount-1): #Each groupA\r\n\t\t\tgroupA = groupValues[groupAIndex]\r\n\t\t\tgroupASize = groupSizes[groupAIndex]\r\n\t\t\t\r\n\t\t\tfor groupBIndex in range(groupAIndex+1, groupCount): #Each further groupB\r\n\t\t\t\tgroupB = groupValues[groupBIndex]\r\n\t\t\t\tgroupBSize = groupSizes[groupBIndex]\r\n\t\t\t\t\r\n\t\t\t\tfor aaA, aaACount in groupA[col].items(): #Each AA from groupA\r\n\t\t\t\t\taaAFreq = aaACount / groupASize #Its frequency within groupA\r\n\t\t\t\t\t\r\n\t\t\t\t\tfor aaB, aaBCount in groupB[col].items(): #Each AA from groupB\r\n\t\t\t\t\t\taaBFreq = aaBCount / groupBSize #Its frequency within groupB\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\taaPairFreq = aaAFreq * aaBFreq #Pair frequency\r\n\t\t\t\t\t\tfreqSum += aaPairFreq\t#Sum of all frequencies\t\t\t\r\n\t\t\t\t\t\tfreqSingle[aaA] += aaPairFreq/2\r\n\t\t\t\t\t\tfreqSingle[aaB] += aaPairFreq/2\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t#Index is unique to this pair\r\n\t\t\t\t\t\tpairIndex = (aaA, aaB) if aaA > aaB else (aaB, aaA)\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tfreqPairs[pairIndex] += aaPairFreq\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tfreqPairs[pairIndex] = aaPairFreq\r\n\t\r\n\treturn freqPairs, freqSingle, freqSum\r\n\t\r\n\t\r\ndef sumFrequenciesToProb(freqPairsList, freqSingleList, freqSumList):\r\n\t\"\"\"\r\n\tSums all frequencies in provided lists, and transforms them to probabilities according\r\n\tto the sum of frequencies found in 'freqSumList'.\r\n\t\"\"\"\r\n\tfSum = sum(freqSumList) #Absolute sum of all frequencies\r\n\tprobPairs, probSingle = {}, {} #Probabilities for pairs and single AAs\r\n\t\r\n\tfor freqPairs, freqSingle in zip(freqPairsList, freqSingleList):\r\n\t\tfor key,value in freqPairs.items():\r\n\t\t\t#Sum all frequencies for matching AA pairs, divided by fSum\r\n\t\t\ttry:\r\n\t\t\t\tprobPairs[key] += value / fSum \r\n\t\t\texcept:\r\n\t\t\t\tprobPairs[key] = value / fSum\r\n\t\tfor key,value in freqSingle.items():\r\n\t\t\t#Sum all frequencies for matching AAs, divided by fSum\r\n\t\t\ttry:\r\n\t\t\t\tprobSingle[key] += value / fSum\r\n\t\t\texcept:\r\n\t\t\t\tprobSingle[key] = value / fSum\r\n\t\r\n\treturn probPairs, probSingle\r\n\t\t\r\n\t\r\ndef blosumFromProbabilities(probPairs, probSingle, requiredIdentityPercent):\r\n\t\"\"\"\r\n\tFills and returns a Score according to the BLOSUM algorithm, from the probabilities\r\n\tof AA pairs and singletons provided in 'probPairs' and 'probSingle'.\r\n\t\"\"\"\r\n\t#Create empty Score, ignoring AAs B,Z,J,U,O\r\n\tscoreMatrix = ScoreMatrix(\"\", \"BLOSUM{}\".format(requiredIdentityPercent), \"BZJUO\")\r\n\t\r\n\tfor key, qAB in probPairs.items():\r\n\t\t#qAB is the evolutionary probability of the AA pair (A,B)\r\n\t\taaA, aaB = key #Both amino acids\r\n\t\tpA, pB = probSingle[aaA], probSingle[aaB] #Their single probabilities\r\n\t\t\r\n\t\t#eAB is the random probability of the AA pair (A, B) given their single probabilities\r\n\t\teAB = pA * pA if aaA == aaB else 2 * pA * pB\r\n\t\t\r\n\t\t#The BLOSUM score for this pair is the log-odds-ratio of evolutionary and random prob.\r\n\t\tsAB = int(round(2 * log(qAB / eAB, 2)))\r\n\t\tscoreMatrix.setScore(aaA, aaB, sAB) #Fill the matrix\r\n\t\r\n\treturn scoreMatrix\r\n\t\t\t\r\n\t\t\r\n\t\t\t\r\ndef blosumFromFasta(requiredIdentityPercent, *filepaths):\r\n\t\"\"\"\r\n\tCreates and returns a Score for all sequences in the provided 'filepaths',\r\n\tusing the BLOSUM approach with an identity of at least 'requiredIdentityPercent'.\r\n\tEach file is grouped independently and only then their weighted probabilities are merged.\r\n\t\"\"\"\r\n\t#Results for each different files are first stored in lists\r\n\tfreqPairsList, freqSingleList, freqSumList = [], [], [] \r\n\t\r\n\tfor path in filepaths: #for each .fasta file\r\n\t\tgroups = makeGroupsFromFasta(path, requiredIdentityPercent) #groups\r\n\t\tgroupValues, groupSizes = valueDictsFromGroups(groups) #groups as dicts\r\n\t\tfreqPairs, freqSingle, freqSum = getFrequencies(groupValues, groupSizes) #frequencies\r\n\t\t\r\n\t\tfreqPairsList.append(freqPairs) #Append results\r\n\t\tfreqSingleList.append(freqSingle)\r\n\t\tfreqSumList.append(freqSum)\r\n\t\r\n\t#Merge (sum) results together\r\n\tprobPairs, probSingle = sumFrequenciesToProb(freqPairsList, freqSingleList, freqSumList)\r\n\t#Create the BLOSUM matrix\r\n\tblosum = blosumFromProbabilities(probPairs, probSingle, requiredIdentityPercent)\r\n\tprint(blosum)","sub_path":"src/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":12824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"18424753","text":"\nfrom postgresql_api import *\n\n\napi_handle = postgresql_api()\ncur = api_handle.connect_db()\n\ncur.execute('''SELECT content_id, title, imdb_score_votes,\n\tcase when (imdb_score_votes > (select AVG(imdb_Score_votes) from contents)) then 'ABOVE'\n else\n \t'BELOW'\n end as Remarks\n from contents \n Group by content_id \n ORDER BY content_id ASC ;''')\n\napi_handle.commit_api()\n\nrow = cur.fetchone()\n\nwhile row is not None:\n\tprint(row)\n\trow = cur.fetchone()\n\nprint('Total Rows: ',cur.rowcount)\n\napi_handle.close_api()\n","sub_path":"exercises/solutions/Q_5_1.py","file_name":"Q_5_1.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"362926691","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport numpy as np\nfrom sklearn import svm, preprocessing\n\n\nclass Customize_scaler():\n\n def __init__(self, start=-1, end=1):\n #self.data = np.array(data)\n self.start = start\n self.end = end\n self.width = end - start\n self.origin_data_min = None\n self.origin_data_max = None\n\n def fit(self, data):\n self.origin_data_min = np.array(data).min(axis=0)\n self.origin_data_max = np.array(data).max(axis=0)\n\n def transform(self, data):\n data = np.array(data)\n res = (data - self.origin_data_min) / (self.origin_data_max - self.origin_data_min) * self.width + self.start\n\n return res\n\n def inverse_transform(self, data):\n data = np.array(data)\n res = ((data - self.start) / self.width) * (self.origin_data_max -\n self.origin_data_min) + self.origin_data_min\n return res\n\n\nclass CustomizedEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n reward_range = (-float('inf'), float('inf'))\n spec = None\n\n def __init__(self, done_criteria=False):\n # Define action and observation space\n # They must be gym.spaces objects\n # Example when using discrete actions:\n self.action_space = None # spaces.Box(low=np.array([-1]), high=np.array([1]), shape=np.array([1, 1]), dtype=np.float)\n # Example for using image as input:\n # spaces.Box(low=np.array([-np.inf]), high=np.array([np.inf]), shape=np.array([1, 1]), dtype=np.float)\n self.observation_space = None\n\n self.done_criteria = done_criteria\n self.state = None\n self.done = False\n self._max_episode_steps = 1000\n self.rewarder = None\n self.expert_replay_buffer = None\n self.use_log_transform = False\n self.a_scaler = None\n self.s_scaler = None\n self.use_scaler = False\n\n def step(self, action):\n reward = None\n a = action * self.action_space.high\n\n # print(a)\n\n a = np.clip(a, self.action_space.low, self.action_space.high)\n\n if self.use_scaler:\n a = self.a_scaler.inverse_transform([a])[0]\n # print(self.action_space.high)\n # print(a)\n\n # print()\n\n # if use log transform, then revert the action value\n if self.use_log_transform:\n tmp_a = a\n signs = np.where((tmp_a > 0).astype(np.int) == 1, 1, a)\n signs = np.where((tmp_a < 0).astype(np.int) == 1, -1, signs)\n # print(signs)\n\n a = (2**tmp_a - 1.) / 1 * signs # act = np.log10(1 + 1e7 * abs(act)) * signs 1+ 10e7*a = 10**v - 1 /\n # print(a)\n\n obs = self.state\n next_obs = obs + a\n next_obs = np.clip(next_obs, self.observation_space.low, self.observation_space.high)\n\n if self.use_scaler:\n return_state = self.s_scaler.inverse_transform([next_obs])[0]\n else:\n return_state = next_obs\n\n self.state = next_obs\n\n if self.done_criteria:\n self.done = self.check_done()\n else:\n self.done = False\n\n if self.rewarder is not None:\n try:\n reward = self.svm_potential_reward_func(obs)\n except:\n obs_act = {'observation': obs, 'action': a}\n reward = self.rewarder.compute_reward(obs_act)\n else:\n reward = 0\n \"\"\"\n if self.state[0] > 0.45:\n self.done = True\n reward = 100\n else:\n reward = 0\n \"\"\"\n # print(next_obs)\n return return_state, reward, self.done, {}\n\n def reset(self, init_obs=None, out_of_set=False):\n if self.rewarder is not None:\n try:\n self.rewarder.reset()\n except:\n raise Exception('cannot reset rewarder')\n if init_obs != None:\n self.state = init_obs\n else:\n s = np.random.choice(np.linspace(0, self.init_state.shape[0] - 1, self.init_state.shape[0], dtype=np.int16), 1)[0]\n #print(s, np.linspace(0, states.shape[0] - 1, states.shape[0], dtype=np.int16))\n s = self.init_state[s]\n #print('random choose initial state from demo', s)\n if not out_of_set:\n # print(s)\n #print(self.observation_space.low, self.observation_space.high)\n # elf.observation_space.low, self.observation_space.high) * 0.0001\n self.state = s + np.random.uniform(-4e-3, 4e-3)\n else:\n # print(self.observation_space.low)\n # print(self.observation_space.high)\n # self.observation_space.low, self.observation_space.high) * 0.0001\n self.state = s + np.random.uniform(-4e-3, 4e-3)\n\n if self.use_scaler:\n self.state = self.s_scaler.inverse_transform([self.state])[0]\n # print(self.state)\n # print()\n # time.sl()\n self.done = False\n\n return self.state\n\n def check_done(self):\n return False\n\n def render(self, mode='human'):\n return\n\n def close(self):\n return\n\n def set_params(self, expert_replay_buffer, action_space=None, observation_space=None, done_criteria=False, _max_episode_steps=1000, rewarder=None, use_scaler=True):\n #self.expert_replay_buffer = np.array(expert_replay_buffer.buffer())\n states = []\n actions = []\n self.init_state = []\n\n self.use_scaler = use_scaler\n\n self.rewarder = rewarder\n print(len(expert_replay_buffer))\n for traj in expert_replay_buffer:\n tmp = []\n # print(len(traj))\n for idx, trans in enumerate(traj):\n states.append(trans['observation'])\n act = trans['action']\n if self.use_log_transform:\n a = act\n signs = np.where((a > 0).astype(np.int) == 1, 1, a)\n signs = np.where((a < 0).astype(np.int) == 1, -1, signs)\n act = np.log2(1 + 1 * abs(act)) * signs\n\n actions.append(act)\n tmp.append(trans['action'])\n\n if idx == 0:\n # print()\n # print(act)\n # print()\n self.init_state.append(trans['observation'])\n #print(np.argmax(np.array(tmp), axis=0))\n # print(np.array(tmp).max(axis=0))\n\n if self.use_scaler:\n self.s_scaler = Customize_scaler()\n self.s_scaler.fit(states)\n self.a_scaler = Customize_scaler()\n self.a_scaler.fit(actions)\n # print(np.array(actions).max(axis=0))\n # print(np.array(actions).min(axis=0))\n states = self.s_scaler.transform(np.array(states))\n actions = self.a_scaler.transform(np.array(actions))\n self.init_state = self.s_scaler.transform(np.array(self.init_state))\n # print(states.shape)\n max_s = np.ones(states.shape[1])\n max_a = np.ones(states.shape[1])\n min_s = np.ones(states.shape[1]) * -1\n min_a = np.ones(states.shape[1]) * -1\n\n # print(self.a_scaler.inverse_transform(max_a))\n # print(self.a_scaler.inverse_transform(min_a))\n\n else:\n states = np.array(states)\n actions = np.array(actions)\n self.init_state = np.array(self.init_state)\n #max_s = states.max(axis=0)\n #max_a = actions.max(axis=0)\n #min_s = states.min(axis=0)\n #min_a = actions.min(axis=0)\n #print(max_s, '\\n', min_s, '\\n', max_a, '\\n', min_a,)\n # print()\n #print('\\n', max_a - np.abs(min_a,))\n # print()\n\n max_s = np.abs(states).max(axis=0)\n max_a = np.abs(actions).max(axis=0)\n min_s = -1 * max_s\n min_a = -1 * max_a\n #print('\\n', max_a - np.abs(min_a,))\n # time.sl()\n\n self.expert_replay_buffer = [states, actions]\n\n if action_space is not None:\n self.action_space = action_space\n else:\n self.action_space = spaces.Box(low=min_a, high=max_a,\n shape=max_a.shape, dtype=np.float)\n\n if observation_space is not None:\n self.observation_space = observation_space\n else:\n self.observation_space = spaces.Box(low=min_s, high=max_s,\n shape=max_s.shape, dtype=np.float)\n\n self.done_criteria = done_criteria\n self._max_episode_steps = _max_episode_steps\n\n return\n\n def set_OCC_rewarder(self):\n states = np.vstack(self.expert_replay_buffer[:, 0])\n\n demo = states\n print('training one class svm...')\n self.rewarder = svm.OneClassSVM(nu=0.1, kernel=\"rbf\", gamma=0.1)\n self.rewarder.fit(demo)\n \"\"\"\n obs = np.expand_dims(np.array(states[0]), axis=0)\n r = self.rewarder.decision_function(obs)\n print(obs, r)\n \"\"\"\n\n return\n\n def svm_potential_reward_func(self, obs):\n obs = np.expand_dims(np.array(obs), axis=0)\n r = self.rewarder.decision_function(obs)\n\n return r\n","sub_path":"core/customized_env_2.py","file_name":"customized_env_2.py","file_ext":"py","file_size_in_byte":9415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"309255300","text":"import typing\nimport weakref\n\nfrom pycommand_bus import constants\n\n\nCommandCandidateType = typing.TypeVar('CommandCandidateType')\n\n\nADD_HANDLER_DECORATOR_PROP_NAME = 'handler'\n\n\nclass CommandType:\n @classmethod\n def handler(self, fun: typing.Callable) -> typing.Callable:\n pass\n\n\ndef command(class_: CommandCandidateType) -> typing.Union[CommandCandidateType, CommandType]:\n def _add_handler_decorator(func: typing.Callable) -> typing.Callable:\n if not callable(func):\n raise Exception('Handler must be callable!')\n\n if hasattr(class_, constants.HANDLER_ATTR_NAME):\n raise Exception('You can use only one handler for each command!')\n\n setattr(class_, constants.HANDLER_ATTR_NAME, weakref.ref(func))\n return func\n\n try:\n setattr(class_, 'handler', _add_handler_decorator)\n except AttributeError:\n raise Exception('Can not set handler')\n return class_\n","sub_path":"pycommand_bus/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356273267","text":"import dataProcess.DataPreProcess as getData\n\n\nclass RunPredict:\n\tdef __init__(self,epochs):\n\t\tself.epochs = epochs\n\n\tdef run(self):\n\t\tself.seq_len = 100 # 设置序列长度\n\t\tself.features = 3\n\t\tself.path = 'data/' # 设置数据存储路径TabError: inconsistent use of tabs and spaces in indentation\n\n\t\tstockCode = getData.getStockCode() # 获取训练集股票代码,具体哪个代码去函数里设置\n\t\tstockCode = stockCode # 股票编号\n\n\t\tif True: # 保存或加载数据,True为在线获取数据,处理后保存于path目录下 。False为直接加载之前存储的数据。\n\t\t\tfile_name = self.path + 'pos_' # pos_40_train_z.npz\n\t\t\tresult, train = getData.data_save(stockCode, seqlen = self.seq_len, file_name = file_name)\n\t\telse: # 加载文件\n\t\t\tfile_name = 'pos_'\n\t\t\ttrain = np.load(path + file_name + 'train_len' + str(seq_len) + '.npz')\n\n\t\tX_train, y_train = getData.map_to_train(train)\n\n\t\tprint(\"X_train:\", X_train)\n\t\tprint(\"X_trian的长度:\", len(X_train))\n\t\tprint(\"y_train:\", y_train)\n\t\tprint(\"y_train的长度:\", len(y_train))\n\n\t\tloss_linear = losses.mse\n\t\topti = optimizers.Adam(lr=0.01) # 设置学习率,通常从0.001开始,逐步减小。或根据实际情况来,训练坏掉,就减小试试。\n\t\tmodel_linear.compile(loss=loss_linear, optimizer=opti, metrics=['accuracy']) # 模型编译一下,就可以训练了。\n\n\t\tlossss = []\n\n\t\toptimizer = optim.Adam(Linear.parameters(), lr=0.001)\n\t\tloss_func = nn.MSELoss()\n\n\t\tfor i in range(epoch):\n\t\t\t# m = RNN()\n\t\t\trunning_loss = 0.0\n\t\t\t# for i in X_train.size():\n\n\t\t\tposition = 0\n\t\t\tx_size = X_train.shape[0]\n\t\t\twhile (position < x_size):\n\n\t\t\t\toptimizer.zero_grad()\n\t\t\t\tend = position + batch_size\n\n\t\t\t\tif end > x_size:\n\t\t\t\t\tend = x_size\n\n\t\t\t\tpred = Linear(X_train[position:end, :, :]) # torch.Size([1000, 1, 1])\n\t\t\t\t# loss = loss_func(out, targ_trajs)\n\n\t\t\t\tloss = loss_func(pred[:, :1, :], y_train[position:end])\n\t\t\t\tloss.backward()\n\t\t\t\toptimizer.step()\n\t\t\t\trunning_loss += loss.item()\n\t\t\t\tposition = end\n\t\t\t# print(loss.item())\n\t\t\t# print(running_loss)\n\n\t\t\tprint('Epoch:{}, Loss:{:.5f}'.format(i + 1, running_loss))\n\n\t\t\tpred = Linear(X_train[:300, :, :])\n\t\t\tx = pred[:300, :1, :].cpu().detach().numpy() # 将格式转化为numpy\n\t\t\ty = y_train[:300, :1, :].cpu().detach().numpy() # 将格式化转化为numpy\n\n\t\t\t# X_num = x.size()\n\t\t\tx = x.reshape(300)\n\t\t\ty = y.reshape(300)\n\n\t\t\tplt.plot(x, label='true trajectory')\n\t\t\tplt.plot(y, label='learned trajectory (t>0)')\n\n\t\t\tplt.legend(loc='upper right') # 绘制图例\n\n\t\t\tplt.savefig('./vis-nsample1-Linear.png', dpi=500)\n\t\t\tplt.close()\n\n\nif __name__ == \"__main__\":\n\trun = RunPredict(5)","sub_path":"torchPredict/RunPredict.py","file_name":"RunPredict.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"144144657","text":"import pygal\nimport pygal.style\n\n\ndef create_piechart(data, imgfile):\n font_family = \"Arial\"\n font_size = 25\n red = \"#FF0000\"\n yellow = \"#FFFF00\"\n green = \"#00FF00\"\n grey = \"#DDDDDD\"\n\n style = pygal.style.Style()\n style.colors = (yellow, red, grey, green)\n\n style.background = \"#FFFFFF\"\n style.plot_background = \"#FFFFFF\"\n\n style.font_family=font_family\n style.label_font_family=font_family\n style.legend_font_family=font_family\n style.title_font_family=font_family\n style.value_font_family=font_family\n style.value_label_font_family=font_family\n\n style.font_size=font_size\n style.label_font_size=font_size\n style.legend_font_size=font_size\n style.title_font_size=font_size\n style.value_font_size=font_size\n style.value_label_font_size=font_size\n\n config = pygal.Config()\n config.show_legend = True\n config.human_readable = True\n config.print_values=True\n config.print_labels=True\n pie_chart = pygal.Pie(config=config, style=style, inner_radius=.4)\n\n for item in data:\n pie_chart.add(item, data[item])\n\n pie_chart.render_to_png(imgfile)\n\n","sub_path":"src/allure_docx/piechart.py","file_name":"piechart.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"45415026","text":"import unittest\nimport random\nfrom lab2.code.sort_in_memory.sort import MergeSort, NotNumber\n\n\nclass TestSort(unittest.TestCase):\n def test_sorting(self):\n input_file = \"numbers.txt\"\n output_file = \"number_sort.txt\"\n with open(input_file, 'w') as f:\n f.writelines('{}\\n'.format(random.randint(-1000000, 1000000)) for _ in range(1000000))\n\n MergeSort(input_file, output_file)\n\n num_count = 0\n prev_num = 0\n total = 1000000\n with open(output_file) as fp:\n for line in fp:\n next_num = int(line)\n if num_count > 0:\n self.assertTrue(next_num >= prev_num)\n num_count += 1\n prev_num = next_num\n self.assertEqual(num_count, total)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Solutions/Task2/853503_Алина_Садовская/lab2/tests/sort/test_sort.py","file_name":"test_sort.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"455564356","text":"# -*- coding: utf-8 -*-\n#from math import sin\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#def mans_sinuss(x):\ndef mans_sinuss(x,n):\n k = 0\n a = (-1)**0*x**1/(1)\n S = a\n# while k<0:\n while k 3 or human > marbles:\n\t\tprint(\"Sorry, that is not a valid option. Try again!\")\n\t\treturn 0\n\telse:\n\t\t# At the end of each turn, the program should print out the \n\t\t# number of marbles removed in the previous turn\n\t\tprint(\"You removed {} marbles.\".format(human))\n\treturn human\n\ndef computer_input(marbles):\n\tprint(\"Computer's turn...\")\n\tif marbles > 3:\n\t\tcomputer = random.randint(1,3)\n\telse:\n\t\tcomputer = random.randint(1, marbles)\n\tprint(\"Computer removed {} marbles.\".format(computer))\n\treturn computer\n\ndef check_winner(marbles, winner):\n\tif marbles == 0:\n\t\tprint(\"There are no marbles left. {} wins!\".format(winner))\n\t\tquit()\n\ndef interactive_mode():\n\tprint(\"Let's play the game of Seventeen!\")\n\tmarbles = 17\n\twhile marbles > 0:\n\t\thuman = 0\n\t\twhile human == 0:\n\t\t\thuman = human_input(marbles)\n\t\t\tmarbles -= human\n\t\t\tprint(\"Number of marbles left in jar: {}\\n\".format(marbles))\n\t\t\tcheck_winner(marbles, 'Computer')\n\t\t# Computer chooses random number\n\t\tcomputer = computer_input(marbles)\n\t\tmarbles -= computer\n\t\tprint(\"Number of marbles left in jar: {}\\n\".format(marbles))\n\t\tcheck_winner(marbles, 'You')\n\t\t\n\n\n\ndef main(): \n\tprint(interactive_mode())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"seventeen1.py","file_name":"seventeen1.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620290135","text":"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for glazier.lib.bitlocker.\"\"\"\n\nfrom unittest import mock\n\nfrom absl.testing import absltest\nfrom glazier.lib import bitlocker\n\nfrom glazier.lib import constants\n\n\nclass BitlockerTest(absltest.TestCase):\n\n @mock.patch.object(\n bitlocker.powershell.PowerShell, 'RunCommand', autospec=True)\n def testPowershell(self, ps):\n bit = bitlocker.Bitlocker(mode='ps_tpm')\n bit.Enable()\n ps.assert_has_calls([\n mock.call(mock.ANY, [\n \"$ErrorActionPreference='Stop'\", ';', 'Enable-BitLocker', 'C:',\n '-TpmProtector', '-UsedSpaceOnly', '-SkipHardwareTest ', '>>',\n '%s\\\\enable-bitlocker.txt' % constants.SYS_LOGS_PATH\n ]),\n mock.call(mock.ANY, [\n \"$ErrorActionPreference='Stop'\", ';', 'Add-BitLockerKeyProtector',\n 'C:', '-RecoveryPasswordProtector', '>NUL'\n ])\n ])\n ps.side_effect = bitlocker.powershell.PowerShellError\n self.assertRaises(bitlocker.BitlockerError, bit.Enable)\n\n @mock.patch.object(bitlocker.subprocess, 'call', autospec=True)\n def testManageBde(self, call):\n bit = bitlocker.Bitlocker(mode='bde_tpm')\n call.return_value = 0\n cmdline = ('C:\\\\Windows\\\\System32\\\\cmd.exe /c '\n 'C:\\\\Windows\\\\System32\\\\manage-bde.exe -on c: -rp >NUL')\n bit.Enable()\n call.assert_called_with(cmdline, shell=True)\n call.return_value = 1\n self.assertRaises(bitlocker.BitlockerError, bit.Enable)\n\n def testFailure(self):\n bit = bitlocker.Bitlocker(mode='unsupported')\n self.assertRaises(bitlocker.BitlockerError, bit.Enable)\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"glazier/lib/bitlocker_test.py","file_name":"bitlocker_test.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"549491305","text":"\n# # -*- coding: utf-8 -*-\n# import yaml\n# import io\n#\n# # Define data\n# data = {\n# 'a list': [\n# 1,\n# 42,\n# 3.141,\n# 1337,\n# 'help',\n# u'€'\n# ],\n# 'a string': 'bla',\n# 'another dict': {\n# 'foo': 'bar',\n# 'key': 'value',\n# 'the answer': 42\n# }\n# }\n#\n# # Write YAML file\n# with io.open('data.yaml', 'w', encoding='utf8') as outfile:\n# yaml.dump(data, outfile, default_flow_style=False, allow_unicode=True)\n#\n# # Read YAML file\n# with open(\"data.yaml\", 'r') as stream:\n# data_loaded = yaml.safe_load(stream)\n#\n# print(data == data_loaded)\n\nPATH_RAW_DATA = '../data_1/'\nPATH_RAW_DATA_TRAIN = PATH_RAW_DATA + 'train/'\nPATH_RAW_DATA_TEST = PATH_RAW_DATA + 'test/'\n\nPATH_DATASET = PATH_RAW_DATA + 'dataset/'\n\nPATH_MODEL = PATH_RAW_DATA + 'model/'\nFILE_MODEL = PATH_MODEL + 'xgb_model.pkl'\nFILE_SCALER = PATH_MODEL + 'model_scaler.pkl'\n\n# Следует из исходных данных\nCHURNED_START_DATE = '2019-09-01'\nCHURNED_END_DATE = '2019-10-01'\n\nINTER_1 = (1, 7)\nINTER_2 = (8, 14)\nINTER_3 = (15, 21)\nINTER_4 = (22, 28)\nINTER_LIST = [INTER_1, INTER_2, INTER_3, INTER_4]","sub_path":"ETL/etl_config.py","file_name":"etl_config.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"526094812","text":"import PySimpleGUI as sg\r\nimport telnetlib\r\nfrom time import sleep\r\nfrom datetime import datetime\r\nimport os\r\n#TEMA LAYOUTS\r\nsg.theme('tema') \r\n#███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████\r\n# COLUNA, B0TÕES E INPUTS\r\n#███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████\r\ncol = [ \r\n [sg.Frame('Frame',[[sg.Text('Username Password')],\r\n [sg.InputText(' ', size=(15,1),key='username'),sg.InputText(' ', size=(15,1),key='password')],#INPUTS USERNAMEN E PASSWORD\r\n [sg.Text('Tecnico Serial')],\r\n [sg.Combo(['Caio' ,'Edgar' ,'Felipe' ,'Fernando','Joelson' ,'Jose Cesar' ,'Josivam' ,'Juracy' ,'Leandro' ,'Paulo' ,'Renardio' ,'Ricardo' ,'Rodrigo' ,'Suporte', 'Vilson' ,'Vitor','Willes'], size=(13,100),key='tecnico'),sg.InputText(' ', size=(15,1),key='serial')],\r\n [sg.Text('NAP Porta')],\r\n [sg.InputText('', size=(15,1),key='nap'),sg.InputText(' ', size=(15,1),key='porta')],], key='-COL1-')],\r\n [sg.Text('\\r')],\r\n [sg.Button(image_filename='prov.png',button_color='#FFFFFF', key='provisionar',border_width=0),\r\n sg.Button(image_filename='buscar.png',button_color='#FFFFFF',key='buscar',border_width=0)], \r\n [sg.Button(image_filename='remove.png',button_color='#FFFFFF', key='remover',border_width=0),\r\n sg.Button(image_filename='clean.png',button_color='#FFFFFF',key='Limpar',border_width=0),\r\n sg.Button(image_filename='position.png',button_color='#FFFFFF', key='position',border_width=0)],\r\n [sg.Text(' Comandos Adicionais')], \r\n [sg.Button(image_filename='unprovision.png',button_color='#FFFFFF', key='semuso',border_width=0)],\r\n [sg.Button(image_filename='semuso.png',button_color='#FFFFFF', key='remover',border_width=0)],\r\n \r\n ]\r\n#COLUNA OUTPUT \r\ncol2 = [\r\n [sg.Output(text_color='#DCDCDC',size=(105, 31),background_color='#000000',key='saida',font=('hack', 8))],\r\n ]\r\n\r\n#███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████\r\n# TEL4 PRINCIPAL\r\n#███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████\r\nlayout = [ \r\n [sg.Column(col,vertical_alignment='top',pad=(5,0)),sg.Column(col2,pad=(0,0))]\r\n ]\r\n \r\nwindow = sg.Window('hat', layout,margins=(5,0),icon='_hat.ico')\r\n#███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████\r\n# FUNÇ0ES DO SISTEMA\r\n#██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████���████████████████████████\r\n\r\ndef tl1(command,ip):\r\n tn = telnetlib.Telnet(ip, '1023', timeout=100)\r\n tn.read_until(b\"< \").decode()\r\n tn.write(''.encode('ascii') + b\"\\r\\n\")\r\n tn.read_until(b\": \")\r\n tn.write('t'.encode('ascii') + b\"\\r\\n\")\r\n #username\r\n tn.read_until(b\": \")\r\n tn.write('SUPERUSER'.encode('ascii') + b\"\\r\\n\")\r\n #password\r\n tn.read_until(b\": \")\r\n tn.write('ANS#150'.encode('ascii') + b\"\\r\\n\")\r\n\r\n #commands\r\n tn.write(command.encode('ascii') + b\"\\r\\n\")\r\n\r\n sleep(10)\r\n n = tn.read_very_eager().decode(\"utf-8\")\r\n\r\n print(n)\r\n print('\\n\\n PROVISIONADO') \r\n sleep(0.1)\r\n tn.close()\r\n\r\n\r\ndef command_prov(host,position,vlan,nap,count):\r\n posição = (position + '-') + str(count)\r\n username = values['username'].strip()\r\n senha = values['password'].strip()\r\n porta = values['porta'].strip()\r\n serial = values['serial'].strip()\r\n\r\n\r\n prov = f'''ENT-ONT::ONT-{posição}::::DESC1=\"{username}\",DESC2=\"{nap} | {porta}\",SERNUM={serial.replace(':','')},SWVERPLND=AUTO,OPTICSHIST=ENABLE,PLNDCFGFILE1=AUTO,DLCFGFILE1=AUTO,VOIPALLOWED=VEIP;\r\nED-ONT::ONT-{posição}:::::IS;\r\nENT-ONTCARD::ONTCARD-{posição}-14:::VEIP,1,0::IS;\r\nENT-LOGPORT::ONTL2UNI-{posição}-14-1:::;\r\nED-ONTVEIP::ONTVEIP-{posição}-14-1:::::IS;\r\nSET-QOS-USQUEUE::ONTL2UNIQ-{posição}-14-1-0::::USBWPROFNAME=HSI_1G_UP ;\r\nSET-VLANPORT::ONTL2UNI-{posição}-14-1:::MAXNUCMACADR=4,CMITMAXNUMMACADDR=1;\r\nENT-VLANEGPORT::ONTL2UNI-{posição}-14-1:::0,{vlan}:PORTTRANSMODE=SINGLETAGGED;\r\nENT-VLANEGPORT::ONTL2UNI-{posição}-14-1:::0,102:PORTTRANSMODE=SINGLETAGGED;\r\nENT-HGUTR069-SPARAM::HGUTR069SPARAM-{posição}-1::::PARAMNAME=InternetGatewayDevice.WANDevice.1.WANConnectionDevice.1.X_CT-COM_WANGponLinkConfig.VLANIDMark,PARAMVALUE={vlan};\r\nENT-HGUTR069-SPARAM::HGUTR069SPARAM-{posição}-2::::PARAMNAME=InternetGatewayDevice.WANDevice.1.WANConnectionDevice.1.WANPPPConnection.1.Username,PARAMVALUE=\"{username}\";\r\nENT-HGUTR069-SPARAM::HGUTR069SPARAM-{posição}-3::::PARAMNAME=InternetGatewayDevice.WANDevice.1.WANConnectionDevice.1.WANPPPConnection.1.Password,PARAMVALUE=\"{senha}\";\r\n'''\r\n \r\n \r\n tl1(prov,host)\r\n\r\n\r\n#=======================POS1Ç03S D1SP0N1VE1S NA PON - PROVISIONAMENT0\r\ndef free_position(host,position,vlan,nap):\r\n #-separa posiçõEs utilizadas na PON \r\n trash = open('trash_position.txt','r')\r\n clean = open(\"clean_position.txt\",\"w\")\r\n n = trash.readlines()\r\n for line in n: \r\n if (line[:4] == \"1/1/\"): \r\n if(line[19] == \"/\"):\r\n clean.write(line[20:23])\r\n clean.write(\"\\n\")\r\n elif(line[20] == \"/\"):\r\n clean.write(line[21:24])\r\n clean.write(\"\\n\")\r\n elif(line[21] == \"/\"):\r\n clean.write(line[22:25])\r\n clean.write(\"\\n\") \r\n else:\r\n clean.write(line[19:22]) \r\n clean.write(\"\\n\") \r\n clean.write(\"0\")\r\n clean.close()\r\n trash.close()\r\n\r\n #-procura posição sem uso na PON\r\n clean = open(\"clean_position.txt\",\"r\")\r\n positions = clean.readlines()\r\n count = 0\r\n\r\n for line in positions:\r\n count = count + 1 \r\n free = line[:3]\r\n if (int(free) - count != 0):\r\n \r\n break\r\n clean.close()\r\n if (count == 129):\r\n print(\"PON Lotada\")\r\n \r\n print('Posição livre:',count)\r\n print(100*'=')\r\n \r\n \r\n log = open('log.txt','a')\r\n username = values['username']\r\n date = datetime.now().strftime('data: %d/%m/%y %H:%M')\r\n tecnico = values['tecnico']\r\n serial = values['serial']\r\n log.write(f'\\n{date} {tecnico} {nap} {serial} {position, count} {host} {username}')\r\n log.close()\r\n\r\n command_prov(host,position,vlan,nap,count)\r\n\r\n\r\n#======================EX3CUT4 COMAND0S NO CLI\r\ndef cli(command,host,position=0,vlan=0,nap=0):\r\n trash = open('trash_position.txt','w')\r\n\r\n #conexão telnet\r\n tn = telnetlib.Telnet(host,'23', timeout=20)\r\n\r\n tn.read_until(b\": \")\r\n tn.write('isadmin'.encode('ascii') + b\"\\r\\n\")\r\n #password\r\n tn.read_until(b\": \")\r\n tn.write('ANS#150'.encode('ascii') + b\"\\r\\n\")\r\n #commands\r\n tn.write('environment inhibit-alarms'.encode('ascii') + b\"\\n\")\r\n tn.write(command.encode('ascii') + b\"\\n\")\r\n sleep(8)\r\n n = tn.read_very_eager().decode(\"utf-8\")\r\n \r\n trash.write(n)\r\n \r\n trash.close()\r\n print(n) \r\n sleep(0.1)\r\n tn.close()\r\n\r\n print(100*'=')\r\n \r\n\r\n#========================PR0CUR4 NAP NO BANCO DE DADOS - PROVISIONAMENT0\r\ndef nap_data_prov():\r\n n = values['nap'].strip() \r\n if n[:5] != 'teste' and n[:3] != 'NAP':\r\n n = 'NAP-' + n.replace('nap-','')\r\n data = open('data.txt')\r\n naps = data.readlines()\r\n for line in naps:\r\n nap = line[:9].replace(',','').strip()\r\n position_barra = line[10:18].replace(',','' ).strip()\r\n vlan = line[19:22].replace(',','').strip()\r\n position_hifen = line[22:30].replace(',','').strip()\r\n ip = line[30:43].replace(',','').strip()\r\n if nap == n:\r\n break\r\n if nap != n:\r\n print('Essa NAP não existe')\r\n else: \r\n print('\\n PROVISIONANDO...\\n')\r\n command = f'show equipment ont status pon {position_barra}'\r\n\r\n cli(command,ip,position_hifen,vlan,nap)\r\n #free_position(ip,position_hifen,vlan,nap)\r\n\r\n\r\ndef remove(host,serial):\r\n trash = open('trash_position.txt','r')\r\n n = trash.readlines()\r\n for line in n: \r\n if line[:3] =='sn:' and line[3:16] == serial:\r\n print(line[3:16])\r\n \r\n rm = line[18:30].strip()\r\n\r\n command = f'''configure equipment ont interface {rm} admin-state down\r\nconfigure equipment ont no interface {rm}\r\n'''\r\n \r\n cli(command,host)\r\n\r\n print('\\n\\nPROVISIONAMENTO REMOVIDO')\r\n \r\n\r\ndef nap_data_rm():\r\n n = values['nap'].strip() \r\n if n[:5] != 'teste' and n[:3] != 'NAP':\r\n n = 'NAP-' + n.replace('nap-','') \r\n data = open('data.txt')\r\n naps = data.readlines()\r\n for line in naps:\r\n nap = line[:9].replace(',','').strip()\r\n position_barra = line[10:18].replace(',','' ).strip()\r\n vlan = line[19:22].replace(',','').strip()\r\n position_hifen = line[22:30].replace(',','').strip()\r\n ip = line[30:43].replace(',','').strip()\r\n if nap == n:\r\n break\r\n if nap != n:\r\n print('Essa NAP não existe')\r\n else:\r\n serial = values['serial'].strip() \r\n if len(serial) == 12:\r\n serial = 'ALCL:'+serial[4:12]\r\n\r\n print('\\n REMOVENDO...\\n')\r\n command = f'show equipment ont index sn:{serial}'\r\n\r\n cli(command,ip)\r\n remove(ip,serial)\r\n \r\n \r\n#███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████\r\n# EVENT0S\r\n#███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████\r\nwhile True:\r\n event, values = window.read()\r\n#EVENTO FECHAR \r\n if event == sg.WIN_CLOSED:\r\n break\r\n#EVENTO DO BOTÃO LIMPAR INPUT E OUTPUT\r\n if event == 'Limpar':\r\n window.FindElement('username').Update('')\r\n window.FindElement('password').Update('')\r\n window.FindElement('tecnico').Update('')\r\n window.FindElement('serial').Update('')\r\n window.FindElement('nap').Update('')\r\n window.FindElement('porta').Update('')\r\n window.FindElement('saida').Update('') \r\n#EVENTO DO BOTÃO PROVISIONAMENTO\r\n if event == 'provisionar':\r\n n = values['nap'].strip()\r\n serial = values['serial'].strip()\r\n if values['username'].strip() == '' or values['password'].strip() == '' or values['tecnico'].strip() == '' or values['serial'].strip() == '' or values['nap'].strip() == '' or values['porta'].strip() == '':\r\n print('Preencha todos os campos')\r\n elif n[:4] == 'NAP-' or n == 'teste' or n == 'teste II':\r\n if serial[:4] == 'ALCL':\r\n nap_data_prov()\r\n else:\r\n print('Digite um serial valido')\r\n else:\r\n print('Digite uma NAP valida ') \r\n \r\n \r\n if event == 'remover':\r\n n = values['nap'].strip()\r\n serial = values['serial'].strip()\r\n if values['serial'].strip() == '' or values['nap'].strip() == '':\r\n print('Didige todos os campos')\r\n \r\n elif n[:4] == 'NAP-' or n == 'teste' or n == 'teste II':\r\n if serial[:4] == 'ALCL':\r\n nap_data_rm()\r\n else:\r\n print('Digite um serial valido')\r\n else:\r\n print('Digite uma NAP valida ') \r\nwindow.close()\r\n\r\n","sub_path":"hat/hat.py","file_name":"hat.py","file_ext":"py","file_size_in_byte":14243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"173806314","text":"#Useful sites\n#https://chrisalbon.com/python/data_wrangling/pandas_dataframe_descriptive_stats/\n#http://jonathansoma.com/lede/algorithms-2017/classes/fuzziness-matplotlib/how-pandas-uses-matplotlib-plus-figures-axes-and-subplots/\n#https://www.datacamp.com/community/tutorials/pandas-tutorial-dataframe-python\n#https://medium.com/jbennetcodes/dealing-with-datetimes-like-a-pro-in-pandas-b80d3d808a7f\n\nimport pandas\nimport matplotlib.pyplot as plt\n\ndfAgi = pandas.read_table('AGI0105.txt', sep='\\t', names=('DATA', 'AL', 'CORRENTE'), decimal=',')\ndfAgiMean = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfAgiMax = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfAgiMaxGain = pandas.DataFrame(columns=['HORA','CORRENTE'])\n\ndfAmc = pandas.read_table('AMC0103.txt', sep='\\t', names=('DATA', 'AL', 'CORRENTE'), decimal=',')\ndfAmcMean = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfAmcMax = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfAmcMaxGain = pandas.DataFrame(columns=['HORA','CORRENTE'])\n\ndfAnr = pandas.read_table('ANR0106.txt', sep='\\t', names=('DATA', 'AL', 'CORRENTE'), decimal=',')\ndfAnrMean = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfAnrMax = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfAnrMaxGain = pandas.DataFrame(columns=['HORA','CORRENTE'])\n\ndfBer = pandas.read_table('BER0442.txt', sep='\\t', names=('DATA', 'AL', 'CORRENTE'), decimal=',')\ndfBerMean = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfBerMax = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfBerMaxGain = pandas.DataFrame(columns=['HORA','CORRENTE'])\n\ndfBert = pandas.read_table('BER0443.txt', sep='\\t', names=('DATA', 'AL', 'CORRENTE'), decimal=',')\ndfBertMean = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfBertMax = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfBertMaxGain = pandas.DataFrame(columns=['HORA','CORRENTE'])\n\ndfSam = pandas.read_table('SAM0104.txt', sep='\\t', names=('DATA', 'AL', 'CORRENTE'), decimal=',')\ndfSamMean = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfSamMax = pandas.DataFrame(columns=['HORA','CORRENTE'])\ndfSamMaxGain = pandas.DataFrame(columns=['HORA','CORRENTE'])\n\n\ndfAgi[\"CORRENTE\"] = pandas.to_numeric(dfAgi[\"CORRENTE\"], downcast='integer')\ndfAgi[\"DATA\"] = pandas.to_datetime(dfAgi[\"DATA\"], format='%d/%m/%Y %H:%M:%S')\n\ndfAmc[\"CORRENTE\"] = pandas.to_numeric(dfAmc[\"CORRENTE\"], downcast='integer')\ndfAmc[\"DATA\"] = pandas.to_datetime(dfAmc[\"DATA\"], format='%d/%m/%Y %H:%M:%S')\n\ndfAnr[\"CORRENTE\"] = pandas.to_numeric(dfAnr[\"CORRENTE\"], downcast='integer')\ndfAnr[\"DATA\"] = pandas.to_datetime(dfAnr[\"DATA\"], format='%d/%m/%Y %H:%M:%S')\n\ndfBer[\"CORRENTE\"] = pandas.to_numeric(dfBer[\"CORRENTE\"], downcast='integer')\ndfBer[\"DATA\"] = pandas.to_datetime(dfBer[\"DATA\"], format='%d/%m/%Y %H:%M:%S')\n\ndfBert[\"CORRENTE\"] = pandas.to_numeric(dfBert[\"CORRENTE\"], downcast='integer')\ndfBert[\"DATA\"] = pandas.to_datetime(dfBert[\"DATA\"], format='%d/%m/%Y %H:%M:%S')\n\ndfSam[\"CORRENTE\"] = pandas.to_numeric(dfSam[\"CORRENTE\"], downcast='integer')\ndfSam[\"DATA\"] = pandas.to_datetime(dfSam[\"DATA\"], format='%d/%m/%Y %H:%M:%S')\n\n#Arrays\nhoursOfDay = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09','10', '11', \n '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23']\ndataframes = [dfAgi, dfAmc, dfAnr, dfSam, dfBer, dfBert]\ndataframesmean = [dfAgiMean, dfAmcMean, dfAnrMean, dfSamMean, dfBerMean, dfBertMean]\ndataframesmax = [dfAgiMax, dfAmcMax, dfAnrMax, dfSamMax, dfBerMax, dfBertMax]\ndataframesmaxgain = [dfAgiMaxGain, dfAmcMaxGain, dfAnrMaxGain, dfSamMaxGain, dfBerMaxGain, dfBertMaxGain]\nnome = ['AGI0105', 'AMC0103', 'ANR0106', 'SAM0104','BER0442', 'BER0443']\n\n\nfor i in range(len(dataframes)):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n ax1 = dataframes[i].plot(kind='line', x='DATA', y='CORRENTE', color='blue', ax=ax1)\n ax1.grid(b = True, which = 'both')\n ax1.set_title(nome[i] + ' Mensal')\n ax1.set_ylabel('Corrente')\n ax1.set_xlabel('Dia')\n \n #Sets the DATA (date) as index, so we can use it with DataFrame.index\n dataframes[i].set_index('DATA', inplace=True)\n\n ####Finds the average current for each hour and stores it in dataframesmean####\n for j in range(24):\n dfAux = dataframes[i].loc[(dataframes[i]).index.strftime('%H') == hoursOfDay[j]]\n mean = round(dfAux['CORRENTE'].mean(), 2)\n dataframesmean[i] = dataframesmean[i].append({'CORRENTE': mean, 'HORA':j}, ignore_index=True)\n\n ax2 = dataframesmean[i].plot(kind='line', x='HORA', y='CORRENTE', color='blue',style='.-', ax=ax2,)\n ax2.grid(b=True, which='both')\n ax2.set_ylabel('Corrente')\n ax2.set_xlabel('Hora')\n ax2.set_xticks(dataframesmean[i]['HORA'])\n\n plt.draw()\n \n ####Finds the maximum current for each hour and stores it in dataframesmax####\n for j in range(24):\n dfAux = dataframes[i].loc[(dataframes[i]).index.strftime('%H') == hoursOfDay[j]]\n mean = round(dfAux['CORRENTE'].max(), 2)\n dataframesmax[i] = dataframesmax[i].append({'CORRENTE': mean, 'HORA':j}, ignore_index=True)\n\n ax2 = dataframesmax[i].plot(kind='line', x='HORA', y='CORRENTE', color='red',style='.-', ax=ax2,)\n ax2.grid(b=True, which='both')\n ax2.set_ylabel('Corrente')\n ax2.set_xlabel('Hora')\n ax2.set_xticks(dataframesmax[i]['HORA'])\n\n #ax2.set_ylim(dataframesmean[i]['CORRENTE'].min() - 5, dataframesmax[i]['CORRENTE'].max() + 10) \n plt.draw()\n \n ####Statistical calculation####\n\n ###Window of 30 days (total)###\n mean = dataframes[i]['CORRENTE'].mean()\n std = dataframes[i]['CORRENTE'].std()\n\n erro = std/mean\n\n if std > 70:\n std = std * 0.6\n\n if erro <= 1:\n constant = -1.5 * erro + 3\n maximumWindow30 = mean + constant * std\n \n if maximumWindow30 > dataframes[i]['CORRENTE'].max():\n maximumWindow30 = dataframes[i]['CORRENTE'].max()\n\n ###Window of ~1 week (total ~(1 month)/4)### \n window = int(((dataframes[i]['CORRENTE']).size)/30)\n maximumWindow7 = 0\n step = 0\n \n for j in range(30):\n mean = dataframes[i]['CORRENTE'][step:step+window].mean()\n std = dataframes[i]['CORRENTE'][step:step+window].std()\n erro = std/mean\n \n if std > 70:\n std = std * 0.6\n \n if erro <= 1:\n constant = 3.2 - 0.5 * erro\n maximum = mean + constant * std\n\n if maximum > maximumWindow7:\n maximumWindow7 = maximum\n \n step = step + window\n\n if maximumWindow7 > dataframes[i]['CORRENTE'].max():\n maximumWindow7 = dataframes[i]['CORRENTE'].max()\n\n ###Mean with gain###\n gain = maximumWindow7 / dataframesmean[i]['CORRENTE'].max()\n \n for j in range(24):\n dataframesmaxgain[i] = dataframesmaxgain[i].append({'CORRENTE': (dataframesmean[i]['CORRENTE'][j] * gain), 'HORA':j}, ignore_index=True)\n\n ax2 = dataframesmaxgain[i].plot(kind='line', x='HORA', y='CORRENTE', color='green',style='.-', ax=ax2,)\n ax2.grid(b=True, which='both')\n ax2.set_ylabel('Corrente')\n ax2.set_xlabel('Hora')\n ax2.set_xticks(dataframesmaxgain[i]['HORA'])\n\n ax1.set_title(nome[i] + ' Mensal - Janela 30 dias: ' + str(round(maximumWindow30, 2)) + 'A' + ' Mensal - Janela 7 dias: ' + str(round(maximumWindow7, 2)) + 'A')\n ax2.set_title(nome[i] + ' Media e Maxima Diaria')\n ax2.legend(['Corrente Media', 'Corrente Maxima', 'Corrente Media Corrigida com Maximo'])\n\nplt.tight_layout()\nplt.show()\n\n\n\n\n\n","sub_path":"EktMedPython/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":7737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"159056458","text":"import os\nimport shelve\nimport shutil\n\nfrom persistent_dict import get_module_logger\n\n\nclass PersistentDictContainer:\n\n def __init__(self, storage_dir, storage_files_mask, debug):\n self.storage_dir = storage_dir\n self.storage_files_mask = storage_files_mask\n self.debug = debug\n\n @staticmethod\n def to_dictionary(dict_obj):\n if not isinstance(dict_obj, PersistentDictContainer):\n raise ValueError\n return PersistentDict(dict_obj.storage_dir, dict_obj.storage_files_mask, dict_obj.debug)\n\n\nclass SelfMarker:\n \"\"\"\n Dummy marker\n Used when an instance is added to itself.\n \"\"\"\n pass\n\n\nclass PersistentDict(dict):\n __storage_dir = None\n __keys = set()\n __log = None\n\n def __init__(self, storage_dir: str, storage_files_mask='storage', debug=False):\n super().__init__()\n self.__storage_dir = storage_dir\n self.__storage_files_mask = storage_files_mask\n self.__debug = debug\n\n if not os.path.isdir(self.__storage_dir):\n os.mkdir(self.__storage_dir)\n\n self.__storage = os.path.join(self.__storage_dir, self.__storage_files_mask)\n self.__log = get_module_logger('persisted_dict', self.__debug)\n\n def __getitem__(self, key):\n prepared_key = self.__to_shelved_key(key)\n\n with shelve.open(self.__storage) as storage:\n if prepared_key not in storage.keys():\n raise KeyError()\n unpickled_value = storage.get(prepared_key)\n if type(unpickled_value) is SelfMarker:\n return self\n elif type(unpickled_value) is PersistentDictContainer:\n return unpickled_value.to_dictionary(unpickled_value)\n else:\n return unpickled_value\n\n def __setitem__(self, key, value):\n self.__validate_key(key)\n prepared_key = self.__to_shelved_key(key)\n\n if isinstance(value, PersistentDict):\n value = SelfMarker() if value == self else self.to_container(value)\n\n with shelve.open(self.__storage) as storage:\n storage[prepared_key] = value\n self.__keys.add(key)\n\n def __delitem__(self, key):\n with shelve.open(self.__storage) as storage:\n if key not in storage.keys():\n raise KeyError()\n storage.pop(key)\n self.__keys.remove(key)\n\n def __eq__(self, other):\n if not isinstance(other, PersistentDict):\n return False\n return self.storage_dir == other.storage_dir and self.keys() == other.keys()\n\n def keys(self):\n with shelve.open(self.__storage) as storage:\n tmp = [self.__from_shelve_key(k) for k in list(storage.keys())]\n return tmp\n\n def clear(self):\n with shelve.open(self.__storage) as storage:\n storage.clear()\n shutil.rmtree(self.__storage_dir)\n\n # workaround key limitations of shelve\n def __to_shelved_key(self, key):\n self.__log.debug('Origin key {}'.format(key))\n if type(key) is int:\n return \"int_{}\".format(key)\n elif type(key) is float:\n return \"float_{}\".format(key)\n else:\n return key\n\n def __from_shelve_key(self, shelved_key: str):\n self.__log.debug('Shelving key {}'.format(shelved_key))\n if shelved_key.startswith('int_'):\n return int(shelved_key[4:])\n elif shelved_key.startswith('float_'):\n return float(shelved_key[6:])\n else:\n return shelved_key\n\n @property\n def storage_dir(self):\n return self.__storage_dir\n\n @property\n def storage_files_mask(self):\n return self.__storage_files_mask\n\n def get_debug(self):\n return self.__debug\n\n def __validate_key(self, key):\n if not isinstance(key, (int, float, str)) or isinstance(key, (bool)):\n raise KeyError(\"Key must be string or number.\")\n if type(key) is str:\n if len(key.strip()) is 0:\n raise KeyError(\"String key must be not empty\")\n\n def to_container(self, object_dict) -> PersistentDictContainer:\n if not isinstance(object_dict, PersistentDict):\n raise ValueError\n return PersistentDictContainer(object_dict.storage_dir, object_dict.storage_files_mask, object_dict.get_debug())\n","sub_path":"persistent_dict/persistent_dict.py","file_name":"persistent_dict.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"574010751","text":"'''\nEvaluate the model with different matics\n -- get the accuracy\n -- get report\n -- get roc\n'''\nfrom __future__ import division\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nfrom sklearn.metrics import *\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.metrics import *\nimport random\nfrom scipy import optimize\nimport time\nimport seaborn as sns\nfrom sklearn.externals import joblib\nfrom datetime import datetime\n\n\ndef pre(model, x_test, threshold):\n '''\n Use model to do predictions\n\n Input:\n model: log model\n x_test: dataframe of independent variable\n threshod: float that used to do the prediction\n Return:\n A array of predictions\n '''\n pred_scores = model.predict_proba(x_test)\n pred_label = [1 if x[1]>threshold else 0 for x in pred_scores]\n return pred_label\n\n\ndef get_accu(y_test, pres):\n '''\n Use model to do predictions\n\n Input:\n model: log model\n x_test: dataframe of independent variable\n Return:\n A array of predictions\n '''\n return metrics.accuracy_score(y_test, pres)\n\ndef get_report(y_test, pres):\n '''\n Get the report of the model\n\n Input:\n y_test: dependent variable's dataframe\n pres: np array of predictions\n Return:\n report withprecision, recall, f1-score, support\n '''\n return classification_report(y_test, pres)\n\ndef joint_sort_descending(l1, l2):\n # l1 and l2 have to be numpy arrays\n idx = np.argsort(l1)[::-1]\n return l1[idx], l2[idx]\n\ndef generate_binary_at_k(y_scores, k):\n cutoff_index = int(len(y_scores) * (k / 100.0))\n test_predictions_binary = [1 if x < cutoff_index else 0 for x in range(len(y_scores))]\n return test_predictions_binary\n\ndef precision_at_k(y_true, y_scores, k):\n y_scores, y_true = joint_sort_descending(np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores, k)\n #precision, _, _, _ = metrics.precision_recall_fscore_support(y_true, preds_at_k)\n #precision = precision[1] # only interested in precision for label 1\n precision = precision_score(y_true, preds_at_k)\n return precision\n\ndef recall_at_k(y_true, y_scores, k):\n y_scores_sorted, y_true_sorted = joint_sort_descending(\n np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\n recall = recall_score(y_true_sorted, preds_at_k)\n return recall\n\ndef baseline(X_train, X_test, y_train, y_test):\n clf = DummyClassifier(strategy='stratified', random_state=0)\n clf.fit(X_train, y_train)\n return clf\n\ndef accuracy_at_k(y_true, y_scores, k):\n y_scores_sorted, y_true_sorted = joint_sort_descending(\n np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\n acc = accuracy_score(y_true_sorted, preds_at_k)\n return acc\n\ndef f1_at_k(y_true, y_scores, k):\n y_scores_sorted, y_true_sorted = joint_sort_descending(\n np.array(y_scores), np.array(y_true))\n preds_at_k = generate_binary_at_k(y_scores_sorted, k)\n f1 = f1_score(y_true_sorted, preds_at_k)\n return f1\n","sub_path":"hw5_2/pipeline/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640324891","text":"from flask import Flask, request, url_for, send_from_directory, jsonify, redirect, render_template\nfrom settings import DATA_DIR, TRAINED_DIR\nfrom models.multinominal_trainer import MultinominalTrainer\nfrom models.trainer import Trainer\nfrom preprocessing import Preprocessor\n\nimport os, logging\nimport pandas as pd\n\nUPLOAD_DIR = os.path.join(DATA_DIR, 'raw')\nINTERIM_DIR = os.path.join(DATA_DIR, 'interim')\nALLOWED_EXTENSIONS = set(['csv'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_DIR\n\n@app.route(\"/\")\ndef index():\n res = {\n 'status': 'success',\n 'data': {\n 'endpoints': [\n '/csv/download/',\n '/csv/list',\n '/csv/upload',\n '/trainer/list',\n '/trainer/precision/',\n '/trainer/new//',\n '/classificate//',\n '/classificate_list/'\n ]\n }\n }\n return jsonify(res)\n\n\n@app.route('/csv/download/')\ndef csv_download(file):\n if os.path.isfile(os.path.join(UPLOAD_DIR, file)):\n return send_from_directory(UPLOAD_DIR, file)\n else:\n res = {\n 'status': 'fail',\n 'data': {\n 'info' : 'arquivo não encontrado no servidor.'\n }\n }\n return jsonify(res)\n\n@app.route('/csv/list')\ndef csv_list():\n files = os.listdir(UPLOAD_DIR)\n\n res = {\n 'status': 'success',\n 'data': {\n 'files': files\n }\n }\n return jsonify(res)\n\n\n@app.route('/csv/upload', methods=['GET', 'POST'])\ndef csv_upload():\n if request.method == 'POST':\n file = request.files['file']\n\n if not allowed_file(file.filename):\n status = 'fail'\n info = 'extensão de arquivo inválida'\n\n else:\n filepath = os.path.join(DATA_DIR, 'raw', file.filename)\n file.save(filepath)\n Preprocessor.clean_csv(filepath)\n\n status = 'success'\n info = 'upload realizado com sucesso'\n\n res = {\n 'status': status,\n 'data': {\n 'info': info\n }\n }\n return jsonify(res)\n\n return render_template('upload.html')\n\n\n@app.route('/trainer/list')\ndef trainer_list():\n files = os.listdir(TRAINED_DIR)\n res = {\n 'status': 'success',\n 'data': {\n 'trainers': files\n }\n }\n return jsonify(res)\n\n\n@app.route('/trainer/precision/')\ndef trainer_precision(model):\n trainer = Trainer.load_model(model)\n res = {\n 'status': 'success',\n 'data': {\n 'precision': trainer.get_precision().tolist()\n }\n }\n return jsonify(res)\n\n\n@app.route('/trainer/new//')\ndef trainer_new(model, csv):\n status = 'fail'\n if model == 'multinomial':\n mn_trainer = MultinominalTrainer()\n else:\n info = 'informe o tipo do modelo'\n\n if os.path.isfile(os.path.join(INTERIM_DIR, csv + '.csv')):\n df = pd.read_csv(os.path.join(INTERIM_DIR, csv + '.csv'))\n mn_trainer.generate_model(df)\n mn_trainer.save_model(\"{}_{}\".format(model, csv))\n\n status = 'success'\n info = 'modelo criado com sucesso'\n\n else:\n info = 'csv {} não encontrado na pasta interim'.format(csv)\n\n res = {\n 'status': status,\n 'data': {\n 'info': info\n }\n }\n return jsonify(res)\n\n\n@app.route('/classificate//')\ndef classificate(trainer, msg):\n trainer = Trainer.load_model(trainer)\n res = {\n 'status': 'success',\n 'data': {\n 'classification': str(trainer.predict(msg)[0])\n }\n }\n return jsonify(res)\n\n\n@app.route('/classificate_list/', methods=['POST'])\ndef classificate_list(trainer):\n trainer = Trainer.load_model(trainer)\n arr = request.json['lista']\n\n res = {\n 'status': 'success',\n 'data': {\n 'values': trainer.predict_list(arr).tolist()\n }\n }\n return jsonify(res)\n\n@app.route('/doc')\ndef help():\n return render_template('doc.html')\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef has_no_empty_params(rule):\n defaults = rule.defaults if rule.defaults is not None else ()\n arguments = rule.arguments if rule.arguments is not None else ()\n return len(defaults) >= len(arguments)\n\n\n@app.route(\"/api-map\")\ndef api_map():\n links = []\n for rule in app.url_map.iter_rules():\n # Filter out rules we can't navigate to in a browser\n # and rules that require parameters\n if \"GET\" in rule.methods and has_no_empty_params(rule):\n url = url_for(rule.endpoint, **(rule.defaults or {}))\n links.append((url, rule.endpoint))\n # links is now a list of url, endpoint tuples\n\n res = {\n 'links': links\n }\n\n return jsonify(res)\n","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"154339368","text":"\nimport os\nimport argparse\nimport tensorflow as tf\nimport tensorflow_hub as tfhub\n# Load compressed models from tensorflow_hub\nos.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'\nimport IPython.display as display\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rcParams['figure.figsize'] = (12,12)\nmpl.rcParams['axes.grid'] = False\nimport numpy as np\nimport functools\nimport cv2\nimport time\n\nimport torch\nfrom torchvision.models.segmentation import deeplabv3_resnet101\nfrom torchvision import transforms\nfrom PIL import Image\n\ndef load_img(path_to_img):\n max_dim = 512\n img = tf.io.read_file(path_to_img)\n img = tf.image.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n\n shape = tf.cast(tf.shape(img)[:-1], tf.float32)\n long_dim = max(shape)\n scale = max_dim / long_dim\n\n new_shape = tf.cast(shape * scale, tf.int32)\n\n img = tf.image.resize(img, new_shape)\n img = img[tf.newaxis, :]\n return img\n\ndef prepare_img(img):\n max_dim = 512\n img = tf.convert_to_tensor(img)\n #img = tf.image.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n\n shape = tf.cast(tf.shape(img)[:-1], tf.float32)\n long_dim = max(shape)\n scale = max_dim / long_dim\n\n new_shape = tf.cast(shape * scale, tf.int32)\n\n img = tf.image.resize(img, new_shape)\n img = img[tf.newaxis, :]\n return img\n\ndef tensor_to_image(tensor):\n tensor = tensor*255\n tensor = np.array(tensor, dtype=np.uint8)\n if np.ndim(tensor)>3:\n assert tensor.shape[0] == 1\n tensor = tensor[0]\n return tensor #PIL.Image.fromarray(tensor)\n\ndef get_args():\n parser = argparse.ArgumentParser(description='ArtsyML')\n parser.add_argument('--style_img', default='./images_style/style3.jpg')\n return parser.parse_args()\n\nif __name__ == '__main__':\n\n args = get_args()\n style_path = args.style_img\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('DEVICE', device)\n seg_model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True)\n seg_model = seg_model.to(device=device)\n seg_model.eval()\n\n\n style_image = load_img(style_path)\n style_image = tf.stack([style_image[:,:,:,2],style_image[:,:,:,1],style_image[:,:,:,0]],axis = 3)\n style_model = tfhub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')\n #style_model = Net(ngf=128)\n #style_model.load_state_dict(torch.load('21styles.model'))\n print(\"Num GPUs Available: \", len(tf.config.list_physical_devices('GPU')))\n\n preprocess = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n\n print('loaded model')\n cap = cv2.VideoCapture(0)\n prev_capture = time.time()\n while(True):\n\n capture_time = time.time()\n print('Time between captures: ', capture_time - prev_capture)\n prev_capture = capture_time\n\n # Capture frame-by-frame\n ret, frame = cap.read()\n \n # Preparing the frame for the style net\n content_image = prepare_img(frame)\n style_image_tensor = style_model(tf.constant(content_image), tf.constant(style_image))[0]\n style_img = tensor_to_image(style_image_tensor)\n \n # Preparing the frame for the segmentation net \n # resize to same shape as output of style net\n frame = cv2.resize(frame, (style_img.shape[1], style_img.shape[0]))\n input_tensor = preprocess(frame)\n # create a mini-batch as expected by the model\n input_batch = input_tensor.unsqueeze(0)\n input_batch = input_batch.to(device=device)\n \n with torch.no_grad():\n seg_output = seg_model(input_batch)['out'][0]\n seg_output_predictions = seg_output.detach().argmax(0)\n\n # edit segmentation mask to binary to keep people only\n seg_mask = seg_output_predictions.cpu().numpy() \n seg_mask[seg_mask!=15] = 0\n seg_mask[seg_mask==15] = 1\n\n # keep people only from style image and background only from original frame\n style_img = (1-seg_mask[:,:,None])*frame + seg_mask[:,:,None]*style_img\n style_img = style_img.astype(np.uint8)\n \n # Display the resulting frame\n cv2.imshow('Style Transfer', style_img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()","sub_path":"video_stream.py","file_name":"video_stream.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"352721044","text":"# -*- coding: utf-8 -*-\n\n\"\"\"A Tk scrolled frame widget compatible with tkinter.ttk\n\nBased on a version:\n# Version: 0.22\n# Author: Miguel Martinez Lopez\n# Uncomment the next line to see my email\n# print(\"Author's email: \",\n# \"61706c69636163696f6e616d656469646140676d61696c2e636f6d\".decode(\"hex\"))\n\nwith minor changes.\n\"\"\"\n\nimport seamm_widgets as sw\nimport tkinter as tk\nfrom tkinter import ttk\n\n\nclass ScrolledFrame(ttk.Frame):\n def __init__(\n self,\n master,\n width=None,\n anchor=tk.N,\n height=None,\n mousewheel_speed=2,\n scroll_horizontally=True,\n xscrollbar=None,\n scroll_vertically=True,\n yscrollbar=None,\n background=None,\n inner_frame=ttk.Frame,\n **kwargs\n ):\n \"\"\" \"\"\"\n class_ = kwargs.pop(\"class_\", \"MScrolledFrame\")\n super().__init__(master, class_=class_)\n\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(0, weight=1)\n\n self._width = width\n self._height = height\n\n self.canvas = tk.Canvas(\n self,\n background=background,\n highlightthickness=0,\n width=width,\n height=height,\n takefocus=True,\n )\n self.canvas.grid(row=0, column=0, sticky=tk.NSEW)\n\n if scroll_vertically:\n if yscrollbar is not None:\n self.yscrollbar = yscrollbar\n else:\n self.yscrollbar = ttk.Scrollbar(\n self, orient=tk.VERTICAL, takefocus=False\n )\n self.yscrollbar.grid(row=0, column=1, sticky=tk.NS)\n\n self.canvas.configure(yscrollcommand=self.yscrollbar.set)\n self.yscrollbar[\"command\"] = self.canvas.yview\n else:\n self.yscrollbar = None\n\n if scroll_horizontally:\n if xscrollbar is not None:\n self.xscrollbar = xscrollbar\n else:\n self.xscrollbar = ttk.Scrollbar(\n self, orient=tk.HORIZONTAL, takefocus=False\n )\n self.xscrollbar.grid(row=1, column=0, sticky=tk.EW)\n\n self.canvas.configure(xscrollcommand=self.xscrollbar.set)\n self.xscrollbar[\"command\"] = self.canvas.xview\n else:\n self.xscrollbar = None\n\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n\n self.innerframe = inner_frame(self.canvas, **kwargs)\n self.innerframe.pack(anchor=anchor)\n\n self.canvas.create_window(\n 0, 0, window=self.innerframe, anchor=\"nw\", tags=\"inner_frame\"\n )\n\n self.canvas.bind(\"\", self._on_canvas_configure)\n\n sw.MousewheelSupport(self).add_support_to(\n self.canvas, xscrollbar=self.xscrollbar, yscrollbar=self.yscrollbar\n )\n\n @property\n def width(self):\n return self.canvas.winfo_width()\n\n @width.setter\n def width(self, width):\n self.canvas.configure(width=width)\n\n @property\n def height(self):\n return self.canvas.winfo_height()\n\n @height.setter\n def height(self, height):\n self.canvas.configure(height=height)\n\n def interior(self):\n \"\"\"The frame that contains user widgets\"\"\"\n return self.innerframe\n\n def set_size(self, width, height):\n self.canvas.configure(width=width, height=height)\n\n def _on_canvas_configure(self, event):\n width = max(self.innerframe.winfo_reqwidth(), event.width)\n height = max(self.innerframe.winfo_reqheight(), event.height)\n\n self.canvas.configure(scrollregion=\"0 0 %s %s\" % (width, height))\n self.canvas.itemconfigure(\"inner_frame\", width=width, height=height)\n\n def update_viewport(self):\n self.update()\n\n window_width = self.innerframe.winfo_reqwidth()\n window_height = self.innerframe.winfo_reqheight()\n\n if self._width is None:\n canvas_width = window_width\n else:\n canvas_width = min(self._width, window_width)\n\n if self._height is None:\n canvas_height = window_height\n else:\n canvas_height = min(self._height, window_height)\n\n self.canvas.configure(\n scrollregion=\"0 0 %s %s\" % (window_width, window_height),\n width=canvas_width,\n height=canvas_height,\n )\n self.canvas.itemconfigure(\n \"inner_frame\", width=window_width, height=window_height\n )\n","sub_path":"seamm_widgets/scrolled_frame.py","file_name":"scrolled_frame.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"264447849","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom .models import Album, Photo, PhotoLikes, CustomUser\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.db.models import Q\nfrom .forms import SignUpForm, LoginForm, AddPhotoForm, AddAlbumForm, UserEditForm, UserOtherDtlsEditForm, AlbumEditForm, PhotoEditForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .serializers import CustomUserSerializer, AlbumsSerializer, PhotosSerializer\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework import viewsets, permissions\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authentication import BasicAuthentication, SessionAuthentication\nfrom .permissions import IsGetOrIsAuthenticated\nfrom rest_framework.exceptions import PermissionDenied\n# Create your views here.\n\n\ndef isOwnProfile(request, userId):\n if (request.user.is_authenticated and request.user.id == userId):\n return True\n return False\n\n\ndef viewHomePage(request):\n albums = None\n if (request.user.is_authenticated):\n albums = Album.objects.filter(\n privacyType=0, totalPhotos__gt=0).exclude(crtUser=request.user).order_by('-crtTime')\n else:\n albums = Album.objects.filter(\n privacyType=0, totalPhotos__gt=0).order_by('-crtTime')\n isLastPage = False\n if int(len(albums) / 12) == 0:\n isLastPage = True\n return render(request, 'index.html', {'albums': albums, 'isLastPage': isLastPage})\n\n\ndef showAlbum(request, id, pageId=0):\n startInd = pageId * 12\n endInd = startInd + 12\n album = Album.objects.get(id=id)\n if (album.crtUser != request.user and (album.privacyType == 2)):\n return redirect('/')\n photos = list(Photo.objects.filter(\n albumId=id, privacyType=0).order_by('-crtTime'))\n\n isLastPage = False\n\n if int(len(photos) / 12) == pageId:\n isLastPage = True\n photos = photos[startInd:endInd]\n albumName = Album.objects.filter(id=id)[0].name\n\n isLiked = [False for i in range(len(photos))]\n if request.user.is_authenticated:\n for i in range(len(photos)):\n a = PhotoLikes.objects.filter(\n photoId=photos[i], userId=request.user)\n if (len(a) > 0):\n isLiked[i] = True\n zippedList = zip(photos, isLiked)\n return render(request, 'showAlbum.html', {'zippedList': zippedList, 'albumName': albumName, 'isLastPage': isLastPage, 'nextPageId': pageId + 1, 'albumId': id, 'viewName': 'showAlbum'})\n\n\ndef likePost(request):\n if request.method == \"GET\":\n photo = Photo.objects.filter(id=request.GET['photoId'])[0]\n PhotoLikes.objects.create(\n photoId=photo, userId=request.user)\n return HttpResponse('success', status=200)\n return HttpResponse('unsuccessful', status=400)\n\n\ndef disLikePost(request):\n if request.method == \"GET\":\n photo = Photo.objects.filter(id=request.GET['photoId'])[0]\n totalLikes = photo.totalLikes\n totalLikes -= 1\n photo.totalLikes = totalLikes\n photo.save()\n PhotoLikes.objects.filter(photoId=photo, userId=request.user).delete()\n return HttpResponse('success', status=200)\n return HttpResponse('unsuccessful', status=400)\n\n\ndef showUserProfile(request, userId):\n ownProfile = False\n if (request.user.is_authenticated and request.user.id == userId):\n ownProfile = True\n return render(request, 'userProfile.html', {'ownProfile': ownProfile, 'userId': userId})\n\n\ndef showUserAlbums(request, userId, pageId=0):\n albums = None\n startInd = pageId * 12\n endInd = startInd + 12\n ownProfile = False\n\n if (isOwnProfile(request, userId)):\n ownProfile = True\n albums = Album.objects.filter(crtUser=request.user).order_by(\n '-crtTime')\n else:\n albums = Album.objects.filter(crtUser=User.objects.filter(\n id=userId)[0], privacyType=0).order_by('-crtTime')\n\n isLastPage = False\n if int(len(albums) / 12) == pageId:\n isLastPage = True\n albums = albums[startInd:endInd]\n return render(request, 'index.html', {'albums': albums, 'isLastPage': isLastPage, 'nextPageId': pageId+1, 'viewName': 'showUserAlbums'})\n\n\ndef showUserPhotos(request, userId, pageId=0):\n photos = None\n startInd = pageId * 12\n endInd = startInd + 12\n ownProfile = False\n if (isOwnProfile(request, userId)):\n ownProfile = True\n photos = Photo.objects.filter(crtUser=request.user).order_by(\n '-crtTime')\n else:\n photos = Photo.objects.filter(crtUser=User.objects.filter(\n id=userId)[0], privacyType=0).order_by('-crtTime')\n\n isLastPage = False\n if int(len(photos) / 12) == pageId:\n isLastPage = True\n photos = photos[startInd:endInd]\n\n isLiked = [False for i in range(len(photos))]\n if request.user.is_authenticated:\n for i in range(len(photos)):\n a = PhotoLikes.objects.filter(\n photoId=photos[i], userId=request.user)\n if (len(a) > 0):\n isLiked[i] = True\n zippedList = zip(photos, isLiked)\n\n return render(request, 'showAlbum.html', {'zippedList': zippedList, 'isLastPage': isLastPage, 'nextPageId': pageId+1, 'viewName': 'showUserPhotos'})\n\n\ndef signUpView(request):\n\n if request.user.is_authenticated:\n return redirect('/')\n\n if request.method == \"POST\":\n form = SignUpForm(request.POST, request.FILES)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password')\n firstName = form.cleaned_data.get('firstName')\n lastName = form.cleaned_data.get('lastName')\n email = form.cleaned_data.get('email')\n gender = form.cleaned_data.get('gender')\n profilePicture = form.cleaned_data.get('profilePicture')\n\n user = User.objects.filter(\n Q(username=username) | Q(email=email))\n\n if (len(user) > 0):\n form.errors.clear()\n form.add_error(None, 'user with same username or email exists')\n return render(request, 'signUp.html', {'form': form})\n user = User.objects.create_user(\n username=username,\n password=raw_password,\n email=email,\n first_name=firstName,\n last_name=lastName\n )\n CustomUser.objects.create(\n user=user,\n gender=gender,\n profilePicture=profilePicture\n )\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return redirect('/')\n else:\n form = SignUpForm()\n return render(request, 'signUp.html', {'form': form})\n\n\ndef loginView(request):\n if request.user.is_authenticated:\n return redirect('/')\n form = LoginForm(request.POST)\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/')\n else:\n form.errors.clear()\n form.add_error(None, 'Wrong username or password')\n\n return render(request, 'login.html', {'form': form})\n\n\n@login_required(login_url='/login')\ndef logoutView(request):\n logout(request)\n return redirect('/')\n\n\n@login_required(login_url='/login')\ndef addAlbum(request):\n \"\"\" if request.method == \"POST\":\n form = AddAlbumForm(request.POST, request.FILES)\n if form.is_valid():\n album = form.save(commit=False)\n album.crtUser = request.user\n album.save()\n return showUserAlbums(request, userId=request.user.id)\n else:\n form = AddAlbumForm() \"\"\"\n form = AddAlbumForm()\n return render(request, 'addAlbum.html', {'form': form})\n\n\n@login_required(login_url='/login')\ndef addPhoto(request):\n \"\"\" if request.method == \"POST\":\n album = Album.objects.filter(id=request.POST['albumId'])[0]\n photo = Photo(albumId=album)\n form = AddPhotoForm(request.POST, request.FILES, instance=photo)\n if form.is_valid():\n photo = form.save(commit=False)\n photo.crtUser = request.user\n photo.save()\n return redirect('album/' + str(album.id) + '/page/0', permanent=True) \"\"\"\n form = AddPhotoForm()\n albums = Album.objects.filter(crtUser=request.user)\n form.fields['albumId'].choices = [\n (album.id, album.name) for album in albums]\n return render(request, 'addPhoto.html', {'form': form})\n\n\ndef profileDetails(request, userId):\n user = User.objects.get(id=userId)\n if user is None:\n return redirect('/')\n customUser = CustomUser.objects.get(user=user)\n ownProfile = False\n if (isOwnProfile(request, userId)):\n ownProfile = True\n return render(request, 'myProfile.html', {'customUser': customUser, 'ownProfile': ownProfile})\n\n\n@login_required(login_url='/login')\ndef editUserProfile(request, userId):\n customUser = CustomUser.objects.filter(user=request.user)[0]\n if request.method == \"POST\":\n userEditForm = UserEditForm(\n request.POST, request.FILES, instance=request.user)\n userOtherDtlsEditForm = UserOtherDtlsEditForm(\n request.POST, request.FILES, instance=customUser)\n if(userEditForm.is_valid() and userOtherDtlsEditForm.is_valid()):\n User.objects.filter(id=userId).update(\n first_name=userEditForm.cleaned_data.get('first_name'),\n last_name=userEditForm.cleaned_data.get('last_name'),\n password=userEditForm.cleaned_data.get('password'),\n email=userEditForm.cleaned_data.get('email')\n )\n user = User.objects.filter(id=userId)[0]\n CustomUser.objects.filter(user=user).update(\n gender=userOtherDtlsEditForm.cleaned_data.get('gender'),\n profilePicture=userOtherDtlsEditForm.cleaned_data.get(\n 'profilePicture')\n )\n return redirect('profileDetails/'+str(userId), permanent=True)\n userEditForm = UserEditForm(instance=request.user)\n userOtherDtlsEditForm = UserOtherDtlsEditForm(instance=customUser)\n\n return render(request, 'editUserProfile.html', {'userEditForm': userEditForm, 'userOtherDtlsEditForm': userOtherDtlsEditForm})\n\n\n@login_required(login_url='/login')\ndef editAlbum(request, albumId):\n album = Album.objects.get(id=albumId)\n if (album.crtUser != request.user):\n redirect('/')\n \"\"\" if request.method == \"POST\":\n albumEditForm = AlbumEditForm(\n request.POST, request.FILES, instance=album)\n if albumEditForm.is_valid():\n albumEditForm.save()\n return showUserAlbums(request, request.user.id, 0) \"\"\"\n albumEditForm = AlbumEditForm(instance=album)\n return render(request, 'editAlbum.html', {'albumEditForm': albumEditForm})\n\n\n@login_required(login_url='/login')\ndef editPhoto(request, photoId):\n photo = Photo.objects.get(id=photoId)\n if (photo.crtUser != request.user):\n redirect('/')\n \"\"\" if request.method == \"POST\":\n photoEditForm = PhotoEditForm(\n request.POST, request.FILES, instance=photo)\n if photoEditForm.is_valid():\n photoEditForm.save()\n albums = Album.objects.filter(crtUser=request.user)\n photoEditForm.fields['albumId'].choices = [\n (album.id, album.name) for album in albums]\n return showUserPhotos(request, request.user.id, 0) \"\"\"\n photoEditForm = AddPhotoForm(instance=photo)\n albums = Album.objects.filter(crtUser=request.user)\n photoEditForm.fields['albumId'].choices = [\n (album.id, album.name) for album in albums]\n return render(request, 'editPhoto.html', {'photoEditForm': photoEditForm})\n\n\ndef deleteAlbum(request):\n if request.method == \"GET\":\n album = Album.objects.get(id=request.GET['albumId'])\n album.delete()\n return HttpResponse('success', status=200)\n return HttpResponse('unsuccessful', status=400)\n\n\ndef deletePhoto(request):\n if request.method == \"GET\":\n photo = Photo.objects.get(id=request.GET['photoId'])\n album = photo.albumId\n album.totalPhotos = album.totalPhotos - 1\n album.save()\n photo.delete()\n return HttpResponse('success', status=200)\n return HttpResponse('unsuccessful', status=400)\n\n# ---------------------------------------\n#\n# API METHODS\n#\n# ---------------------------------------\n\n\nclass UserDetailsAPI(viewsets.ModelViewSet):\n\n queryset = CustomUser.objects.all()\n serializer_class = CustomUserSerializer\n\n\nclass AlbumsAPI(viewsets.ModelViewSet):\n\n queryset = Album.objects.all()\n serializer_class = AlbumsSerializer\n\n authentication_classes = [SessionAuthentication, BasicAuthentication]\n permission_classes = [IsGetOrIsAuthenticated]\n\n def get_queryset(self):\n queryset = None\n if self.request.user.is_authenticated:\n queryset = Album.objects.filter(\n Q(privacyType=0) | Q(crtUser=self.request.user)\n )\n else:\n queryset = Album.objects.filter(privacyType=0)\n\n id = self.request.query_params.get('id', None)\n crtUser = self.request.query_params.get('crtUser', None)\n\n if id is not None:\n queryset = queryset.filter(id=id)\n\n if crtUser is not None:\n crtUser = int(crtUser)\n queryset = queryset.filter(crtUser=crtUser).order_by('-crtTime')\n\n return queryset\n\n def destroy(self, request, *args, **kwargs):\n album = Album.objects.get(id=kwargs['pk'])\n if album.crtUser == request.user:\n album.delete()\n return HttpResponse('success', status=200)\n else:\n raise PermissionDenied('You are not owner of the photo')\n\n\nclass PhotosAPI(viewsets.ModelViewSet):\n\n queryset = Photo.objects.all()\n serializer_class = PhotosSerializer\n\n authentication_classes = [SessionAuthentication, BasicAuthentication]\n permission_classes = [IsGetOrIsAuthenticated]\n\n def get_queryset(self):\n queryset = None\n if self.request.user.is_authenticated:\n queryset = Photo.objects.filter(\n Q(privacyType=0) | Q(crtUser=self.request.user)\n )\n else:\n queryset = Photo.objects.filter(privacyType=0)\n albumId = self.request.query_params.get('albumId', None)\n crtUser = self.request.query_params.get('crtUser', None)\n\n if albumId is not None:\n album = Album.objects.get(id=albumId)\n queryset = queryset.filter(albumId=album)\n\n if crtUser is not None:\n crtUser = int(crtUser)\n queryset = queryset.filter(crtUser=crtUser).order_by('-crtTime')\n\n return queryset\n\n def destroy(self, request, *args, **kwargs):\n user = request.user\n photo = Photo.objects.get(id=kwargs['pk'])\n if photo.crtUser == user:\n photo.delete()\n return HttpResponse('success', status=200)\n else:\n raise PermissionDenied('You are not owner of the photo')\n","sub_path":"PhotoGallery/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344033077","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('C:\\\\Users\\\\thuan\\\\Desktop\\\\Aivivn\\\\csv\\\\normalized_train.csv')\n\n# Divide by zone\nzone1 = df[df['ZONE'] == 1]\n\nzone1_2017 = zone1\nzone1_2017[\"label\"] = [i for i in range(len(zone1_2017))]\nprint(zone1_2017)\n# Figure\nfig = plt.figure()\n\nbandwidth_graph = fig.add_subplot(211)\nbandwidth_graph.set_title(\"Bandwidth\")\nbandwidth_graph.bar(zone1_2017['label'], zone1_2017['BANDWIDTH_TOTAL'])\n\nmax_user_graph = fig.add_subplot(212)\nmax_user_graph.set_title(\"Max user\")\nmax_user_graph.bar(zone1_2017['label'], zone1_2017['MAX_USER'])\n\nplt.show()\n","sub_path":"preprocess-data/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"210694694","text":"from os import system\r\nfrom time import sleep\r\nimport pygame\r\nimport random\r\npygame.init()\r\nwin = pygame.display.set_mode((500, 500))\r\npygame.display.set_caption(\"Snake1\")\r\nclock = pygame.time.Clock()\r\n\r\nclass food(object):\r\n def __init__(self, x1, y1, width1, height1, color):\r\n self.x1 = x1\r\n self.y1 = y1\r\n self.width1 = width1\r\n self.height1 = height1\r\n self.color = color\r\n def draw(self, win):\r\n pygame.draw.rect(win, self.color, (self.x1, self.y1, self.width1, self.height1))\r\n def redraw(self, win):\r\n pygame.draw.rect(win, self.color, (self.x1, self.y1, 100, 100))\r\n\r\nx = 300\r\ny = 300\r\nvel = 100\r\nlength = 1\r\nScore = 0\r\n\r\nwin.fill((205, 250, 250))\r\nfoods = []\r\nrun = True\r\nfoods.append(food(100, 100, 100, 100, (0, 0, 0)))\r\nwhile run:\r\n pygame.time.delay(150)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n import random\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_LEFT] and x > 0:\r\n x -= vel\r\n elif keys[pygame.K_RIGHT] and x < 400:\r\n x += vel\r\n elif keys[pygame.K_UP] and y > 0:\r\n y -= vel\r\n elif keys[pygame.K_DOWN] and y < 400:\r\n y += vel\r\n win.fill((205, 250, 250))\r\n pygame.draw.rect(win, (50, 205, 50), (x, y, 100, 100))\r\n # pygame.draw.rect(win, (50, 205, 50), (x - 100, y, 100, 100))\r\n #pygame.draw.rect(win, (0, 0, 255), (random.random() * 1000, random.random() * 500, 15, 15))\r\n for food in foods:\r\n food.draw(win)\r\n if x == food.x1 and y == food.y1:\r\n Score += 100\r\n if x == food.x1 and y == food.y1:\r\n index = length\r\n length += 1\r\n\r\n if x == food.x1 and y == food.y1:\r\n food.x1 = (random.randint(0, 4) * 100)\r\n food.y1 = (random.randint(0, 4) * 100)\r\n food.redraw(win)\r\n # pygame.draw(win, print(\"Score = \" + Score))#print(\"Score :\" + Score)\r\n # pygame.font.init()\r\n # myfont = pygame.font.SysFont('Aerial Black', 20)\r\n # textsurface = myfont.render('Score =' + Score, False, (0, 0, 0))\r\n # win.blit(textsurface, (590, 990))\r\n print(Score)\r\n print(length)\r\n pygame.display.update()\r\npygame.quit()","sub_path":"snake1.py","file_name":"snake1.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"445286667","text":"import time\nfrom concurrent import futures\nfrom micro.proto.grpc import aquaman_pb2 # import PocResponse, ResolvResponse, AnlsResponse, LocResponse, DetlResponse, AlvResponse\nfrom micro.proto.grpc.aquaman_pb2_grpc import add_DomainServicer_to_server, DomainServicer, grpc\nfrom micro.proto.grpc.aquaman_pb2_grpc import add_VulServicer_to_server, VulServicer\nfrom micro.proto.grpc.aquaman_pb2_grpc import add_HostServicer_to_server, HostServicer\nfrom micro.proto.grpc.aquaman_pb2_grpc import add_WebScrapServicer_to_server, WebScrapServicer\nfrom micro.handler.grpc_registry import GRPCServiceBase\nfrom modules.verify import Pocsuite\nfrom modules.domain_brute import resolution, DomainBrute, whois_query\nfrom modules.ip_discern import getipinfo\nfrom modules.pynmap import NmapScanner\nfrom modules.web_scanner import WebScanner\nfrom modules.hydra_brute import HydraScanner\nfrom modules.trap_scanner import TrapScanner\n\n\nclass HostUtils(HostServicer):\n def Location(self, request, context):\n resp = getipinfo(request.ip)\n for i in range(5):\n if resp:\n return aquaman_pb2.LocResponse(area=resp['area'], isp=resp['isp'], gps=resp['gps'])\n else:\n time.sleep(1)\n resp = getipinfo(request.ip)\n return aquaman_pb2.LocResponse()\n\n def Detail(self, request, context):\n ports_val = \",\".join(request.ports)\n scanner = NmapScanner()\n resp = scanner.get_detail(request.ip, ports_val)\n if not resp:\n return aquaman_pb2.DetlResponse()\n\n array = []\n for item in resp['portinfo_list']:\n array.append({\n 'port': str(item['port']), 'name': item['name'], 'state': item['state'], 'product': item['product'],\n 'version': item['version'], 'extrainfo': item['extrainfo'], 'conf': item['conf'], 'cpe': item['cpe'],\n })\n print(resp)\n # return DetlResponse(os=\"zan71.com\", vendor=\"linux\", array=[])\n return aquaman_pb2.DetlResponse(os=resp[\"os\"], vendor=resp[\"vendor\"], array=array)\n\n def Alive(self, request, context):\n scanner = NmapScanner()\n resp = scanner.get_alive(request.net)\n return aquaman_pb2.AlvResponse(hosts=resp)\n\n\nclass DomainUtils(DomainServicer):\n def Resolv(self, request, context):\n resp = resolution(request.domain)\n if not resp:\n return aquaman_pb2.ResolvResponse()\n return aquaman_pb2.ResolvResponse(ip=resp[request.domain][0])\n\n def Analysis(self, request, context):\n print(request.domain, type(request.domain_dict))\n # 1、域名字典爆破\n resp = DomainBrute(request.domain, request.domain_dict).resolver()\n array = []\n for item in resp:\n array.append(list(item.keys())[0].split(\".\")[0])\n\n # 2、whois查询\n resp = whois_query(request.domain)\n if not resp:\n return aquaman_pb2.AnlsResponse()\n return aquaman_pb2.AnlsResponse(\n registrar=resp['registrar'], register_date=resp['creationDate'],\n name_server=','.join(resp['nameServer']), domain_server=resp['registrarWHOISServer'],\n status=','.join(resp['domainStatus']), subdomain_list=','.join(array)\n )\n\n\nclass WebUtils(WebScrapServicer):\n def Spider(self, request, context):\n ws = WebScanner(request.host, request.port)\n resp = ws.Run()\n if not resp:\n return aquaman_pb2.SpiResponse()\n return aquaman_pb2.SpiResponse(\n start_url=resp['start_url'], title=resp['title'], server=resp['server'],\n content_type=resp['content_type'], login_list=resp['login_list'],\n upload_list=resp['upload_list'], sub_domain=resp['sub_domain'],\n route_list=resp['route_list'], resource_list=resp['resource_list'],\n )\n\n\n# 实现RPC类接口服务\nclass VulUtils(VulServicer):\n def Hydra(self, request, context):\n print(request.service, request.args, request.target_list, request.username_list, request.password_list)\n hs = HydraScanner(request.service, request.args, request.target_list, request.username_list, request.password_list)\n array = hs.run()\n # 一个服务可能有多个权限信息\n result = []\n for item in array:\n result.append({ # target只有IP\n 'target': item['target'], 'service': item['service'], 'username': item['username'],\n 'password': item['password'], 'command': item['command']\n })\n return aquaman_pb2.AuthResponse(array=result)\n\n def Trap(self, request, context):\n ts = TrapScanner(target=request.target_list, trap_id=request.trap_id, plugin_text=request.plugin_text)\n resp = ts.run() # [{'verify': 'Non randomized features: version=1.4.25'}]\n result = []\n for item in resp:\n result.append({'verify': item['verify']})\n return aquaman_pb2.TrapResponse(array=result)\n\n def Verify(self, request, context):\n result = {\n 'verify_url': \"\",\n 'verify_payload': \"\",\n 'verify_result': \"\",\n 'exploit_url': \"\",\n 'exploit_payload': \"\",\n 'exploit_result': \"\",\n 'webshell_url': \"\",\n 'webshell_payload': \"\",\n 'webshell_result': \"\",\n 'trojan_url': \"\",\n 'trojan_payload': \"\",\n 'trojan_result': \"\"\n }\n # print(\"Recv Data From Client, Data: \", re.sub(r\"[\\n\\r\\t]\", \",\", \"{}\".format(request)))\n\n # request.command\n p = Pocsuite(target=request.target, vul_id=request.vul_id, poc_content=request.poc_content, asset_id=request.asset_id)\n # p = Pocsuite(request.target, request.poc_plugins, request.asset_id)\n resp = p.Verify(request.exploit)\n\n if not resp:\n return aquaman_pb2.PocResponse()\n if 'VerifyInfo' in resp.keys():\n result['verify_url'] = resp['VerifyInfo']['URL']\n result['verify_payload'] = resp['VerifyInfo']['PostData']\n result['verify_result'] = resp['VerifyInfo']['Result']\n if 'ExploitInfo' in resp.keys():\n result['exploit_url'] = resp['ExploitInfo']['URL']\n result['exploit_payload'] = resp['ExploitInfo']['PostData']\n result['exploit_result'] = resp['ExploitInfo']['Result']\n if 'WebshellInfo' in resp.keys():\n result['webshell_url'] = resp['WebshellInfo']['URL']\n result['webshell_payload'] = resp['WebshellInfo']['PostData']\n result['webshell_result'] = resp['WebshellInfo']['Result']\n if 'TrojanInfo' in resp.keys():\n result['trojan_url'] = resp['TrojanInfo']['URL']\n result['trojan_payload'] = resp['TrojanInfo']['PostData']\n result['trojan_result'] = resp['TrojanInfo']['Result']\n return aquaman_pb2.PocResponse(\n verify_url=result['verify_url'],\n verify_payload=result['verify_payload'],\n verify_result=result['verify_result'],\n exploit_url=result['exploit_url'],\n exploit_payload=result['exploit_payload'],\n exploit_result=result['exploit_result'],\n webshell_url=result['webshell_url'],\n webshell_payload=result['webshell_payload'],\n webshell_result=result['webshell_result'],\n trojan_url=result['trojan_url'],\n trojan_payload=result['trojan_payload'],\n trojan_result=result['trojan_result'],\n )\n\n\n# 开启HTTP端口并提供服务\nclass PocScanServer(GRPCServiceBase):\n def __init__(self, registry_host, registry_port, server_name, server_addr, server_port=None):\n super(PocScanServer, self).__init__(registry_host, registry_port, server_addr, server_port)\n self.server_name = server_name\n\n def ListenAndServer(self):\n if not self.server_port:\n print(\"Faild generate service port2.\")\n return\n # 1.运行守护程序\n print(\"listen server on [::]:{}\".format(self.server_port))\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n add_VulServicer_to_server(VulUtils(), server)\n add_DomainServicer_to_server(DomainUtils(), server)\n add_HostServicer_to_server(HostUtils(), server)\n add_WebScrapServicer_to_server(WebUtils(), server)\n server.add_insecure_port(\"[::]:{}\".format(self.server_port))\n server.start()\n # 2.注册服务\n # self.RegisterService(\"test_python3\", \"172.31.50.249\", self.server_port)\n self.RegisterService(self.server_name, self.server_addr, self.server_port)\n try:\n while True:\n time.sleep(100)\n except KeyboardInterrupt:\n print(\"stop\")\n server.stop(0)\n","sub_path":"micro/service/asset_scanner/handler/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":8800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"265384789","text":"# prompt = input(\"Enter your name: \")\n\n# filename = 'Part 1/chap 10/guest.txt'\n\n# with open(filename, 'w') as file_object:\n# file_object.write(prompt.title() + \"\\n\")\n\nprompt = \"Enter your name: \"\nprompt += \"(Enter 'quit' to exit) \"\n\nfilename = 'Part 1/chap 10/guest.txt'\nactive = True\n\nwhile active:\n name = input(prompt)\n\n if name == 'quit':\n actvie = False\n break\n else:\n with open(filename, 'a') as file_object:\n file_object.write(name.title() + \"\\n\")","sub_path":"Part 1/chap 10/guest.py","file_name":"guest.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117627597","text":"from ScoutSpyder.crawlers.base_crawler import *\nfrom ScoutSpyder.utils.logging import initialise_logging\nfrom dateutil.parser import parse\nimport re\n\nLOGGER = initialise_logging(__name__)\n\nclass WashingtonPostCrawler(BaseCrawler):\n crawler_id = 'com.washingtonpost'\n requests_per_sec = 1\n start_url = [\n 'https://www.washingtonpost.com/business/technology/',\n 'https://www.washingtonpost.com/consumer-tech/',\n 'https://www.washingtonpost.com/news/innovations/'\n ]\n robots_url = 'https://www.washingtonpost.com/robots.txt'\n\n def __init__(self, downloaded_doc=None):\n super().__init__(downloaded_doc)\n self.blacklist_regex = [\n 'http[s]?://subscribe.washingtonpost.com(.*)',\n 'http[s]?://www.washingtonpost.com/(rss-)?terms-of-service(.*)',\n 'http[s]?://www.washingtonpost.com/privacy-policy(.*)'\n ]\n\n for suffix in self.blacklist_suffix:\n self.blacklist_regex.append('http[s]?://(.*)\\{}(.*)'.format(suffix))\n \n def insert_db_entries(self, db_entry):\n pass\n \n def extract_content(self):\n if self.valid_url and self.valid_body:\n self.text = re.sub('Read more:.*', '', self.text)\n self.has_content = True\n \n if self.has_content:\n publish_date = self.ld_json.get('datePublished')\n if publish_date is not None:\n self.publish_date = parse(publish_date)\n \n author = self.ld_json.get('author')\n if author is not None:\n if type(author) == list:\n self.authors = [x['name'] for x in author]\n else:\n self.authors = [author['name']]\n\n teaser_content = self.parsed_lxml.findall('.//div[@class=\"teaser-content\"]//p')\n if teaser_content:\n teaser = [x.text_content().strip() for x in teaser_content]\n self.text = '\\n\\n'.join(teaser + [self.text.strip()])","sub_path":"Backend/ScoutSpyder/crawlers/com_washingtonpost.py","file_name":"com_washingtonpost.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"184713262","text":"#!/usr/bin/env python\n\n\"\"\"\nDiff 2 json files containing one JSON object per line.\n\"\"\"\nimport argparse\nimport gzip\nimport json\n\n\ndef jsonGen(logFile):\n if logFile.endswith('.gz'):\n with gzip.open(logFile, 'r') as f:\n for line in f:\n yield line\n else:\n with open(logFile, 'r') as f:\n for line in f:\n yield line\n\n\ndef diff(file1, file2, outFileName1, outFileName2):\n jg1 = jsonGen(file1)\n jg2 = jsonGen(file2)\n\n with open(outFileName1, 'w') as out1, open(outFileName2, 'w') as out2:\n while True:\n try:\n l1 = next(jg1)\n except StopIteration:\n return\n\n try:\n l2 = next(jg2)\n except StopIteration:\n return\n\n if l1 != l2:\n #out1.write(json.dumps(json.loads(l1), allow_nan=True, indent=2))\n #out1.write(\"\\n\");\n #out2.write(json.dumps(json.loads(l2), allow_nan=True, indent=2))\n #out2.write(\"\\n\");\n\n #out1.write(l1)\n #out2.write(l2)\n w1 = l1.split(',')\n w2 = l2.split(',')\n for pr in zip(w1, w2):\n if pr[0] != pr[1]:\n out1.write(pr[0] + \"\\n\")\n out2.write(pr[1] + \"\\n\")\n out1.write(\"\\n\")\n out2.write(\"\\n\")\n\ndef main():\n parser = argparse.ArgumentParser(\"Audit log combiner\")\n parser.add_argument('--in1')\n parser.add_argument('--in2')\n parser.add_argument('--out1')\n parser.add_argument('--out2')\n\n args = parser.parse_args()\n diff(args.in1, args.in2, args.out1, args.out2)\n\nif __name__ == '__main__':\n main()\n","sub_path":"test/JsonDiff.py","file_name":"JsonDiff.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"363982158","text":"# coding:utf8\r\n\r\nfrom config import *\r\n\r\nimport struct\r\n\r\n# ELEDIR = 'NWP_MCTR_001/CLDAS/PUB/'\r\nELEDIR = ELEDIRS.get('CLDAS')\r\n\r\n\r\nX,Y = np.mgrid[70.05:139.05:700j,15.05:59.05:440j]\r\nX,Y = X.T,Y.T\r\n\r\nX,Y = M(X,Y)\r\n\r\n\r\ndef readCLDAS(thetime,TYPE='CMP'):\r\n # thetime = datetime(2016,1,11,0)\r\n # TYPE = 'CMP' or 'FY2'\r\n thetime = thetime.strftime('%Y%m%d%H')\r\n fn = 'Z_SEVP_C_BABJ_*_P_SURF_CLI_CHN_MERGE_%s_PRE_HOUR_GRID_0.10-%s.grd' % (TYPE,thetime)\r\n fn_fullpath = os.popen('ls %s' % os.path.join(DATADIR, ELEDIR, fn)).read()\r\n fn_fullpath = fn_fullpath.replace('\\n','')\r\n f = open(fn_fullpath, 'rb')\r\n f.seek(0)\r\n data = struct.unpack('<616000f', f.read())\r\n data = np.reshape(data[:308000],(440,700))\r\n f.close()\r\n return data\r\n\r\n\r\ndef readCLDASperiod(startat,howlong):\r\n # startat = datetime(2016,1,11,0)\r\n # howlong = 23\r\n r = list(range(howlong))\r\n for i in r:\r\n thetime = startat + timedelta(hours=i+1)\r\n r[i] = readCLDAS(thetime)\r\n r = np.sum(r, axis=0)\r\n return r\r\n\r\n\r\ndef plotCLDAS(period,thedate,r,X=X,Y=Y,M=M_CN):\r\n # nodata = r <= 0.1\r\n # r = np.ma.array(r, mask=nodata)\r\n # 画填充图\r\n M.contourf(X,Y,r,cmap=CMAP,norm=NORM,levels=LEVELS)\r\n # 显示图例\r\n plt.rc('font', family='sans-serif', size=9)\r\n cbar = plt.colorbar(orientation='vertical',shrink=0.6)\r\n cbar.set_ticks(LEVELS)\r\n cbar.set_ticklabels(TICKLABELS)\r\n # 画海岸线\r\n M.drawcoastlines(linewidth=0.3)\r\n # 中国省界\r\n plt.hold(True)\r\n for p in PROVINCE:\r\n px,py = [[i[j] for i in p] for j in (0,1)]\r\n px,py = M(px,py)\r\n M.plot(px,py,'0.5',linewidth=0.3)\r\n # 保存图片\r\n png_file = 'CLDAS_%s_%s.png' % (period,thedate.strftime('%Y%m%d'))\r\n plt.savefig(png_file,bbox_inches='tight',pad_inches=0.3,dpi=200)\r\n plt.clf()\r\n\r\n\r\nif __name__ == '__main__':\r\n # data = readCLDAS(datetime(2016,1,10,0))\r\n r = readCLDASperiod(datetime(2016,1,10,0),23)\r\n plotCLDAS('00h24h',datetime(2016,1,10),r)\r\n ","sub_path":"CLDAS.py","file_name":"CLDAS.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281822592","text":"#this is a script not a program\n\nimport numpy as np\nfrom math import *\nimport matplotlib.pyplot as plt\n\n\nl = 5\ng = 9.81\nm = 1\n\nt = np.arange(0,360)\n\nangle = t * 180 / pi\nx = sqrt(g/l)*angle\n\n\n\ncos = np.cos(x)\nsin = np.sin(x/2)\n\n\n\nkinetic = m *l *g* 1/2 * cos**2\npotential = m*l*g*(1 - cos)\nenergy = kinetic + potential\nplt.plot(t, potential, label='potential energy')\nplt.plot(t, kinetic, label='kinetic energy')\nplt.plot(t, energy, label='total energy')\nplt.legend()\nplt.show()\n\n","sub_path":"Problem_Sets/Problem Set 2/energy.py","file_name":"energy.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"535831714","text":"'''\n Defines the Preproc class for preprocessing the data, it will allow preprocessing pipeline (by prev_proc) and \n support cache preprocessed data \n'''\nfrom Constants import *\nimport pandas as pd\nimport os.path\n\nimport pickle\n \n\nclass Preproc():\n def __init__(self, prev_proc=None):\n self._prev_proc = prev_proc\n self._data = None \n\n def set_params(self, params):\n '''\n params should be a dict of keywords\n '''\n self.params = params\n \n def get_partial_data(self, length_train, length_test, start_train = 0, start_test = 0, override_cache=False, cols=[]):\n '''\n Should return all the train test after preprocessing\n Suggested output format is Pandas for the sake of multiple processing pipeline\n \n if override_cache is set True, will always ignore cached data files\n '''\n for i in [start_train, length_train, start_test, length_test]:\n if type(i) is not int:\n raise ValueError(\"The start and length of the partial data must be integer type.\")\n \n # first check memory\n if (not self._data):\n # second check disk cache file\n if self._if_cached() and (override_cache is False):\n self._load_cached()\n \n # otherwise do the dirty work and save for later\n else:\n if self._prev_proc is None:\n self._proc()\n self._save_cached()\n else:\n self._proc(** self._prev_proc.get_all_data())\n self._save_cached()\n \n # keep only wanted rows\n ret = dict()\n dic = self._data\n if length_train != -1:\n ret['train_X'] = dic[\"train_X\"].iloc[start_train : start_train + length_train]\n ret['train_y'] = dic[\"train_y\"].iloc[start_train : start_train + length_train]\n ret['test_X'] = dic[\"test_X\"].iloc[start_test : start_test + length_test]\n else:\n ret['train_X'] = dic[\"train_X\"].iloc[start_train :]\n ret['train_y'] = dic[\"train_y\"].iloc[start_train :]\n ret['test_X'] = dic[\"test_X\"].iloc[start_test :]\n \n if dic['test_y'] is None:\n ret['test_y'] = None\n else:\n ret['test_y'] = dic[\"test_y\"].iloc[start_test : start_test + length_test]\n \n # keep only wanted rows when cols are specified\n if cols:\n ret['train_X'] = ret['train_X'][cols]\n ret['test_X'] = ret['test_X'][cols]\n \n return ret\n \n def get_all_data(self, override_cache=False, cols=[]):\n '''\n To reduce code redundancy and debug responsibility, it calls get_partial_data with a trick,\n -1 means that indexing everything\n '''\n start_train, start_test = 0, 0\n length_train, length_test = -1, -1\n return self.get_partial_data(length_train, length_test, start_train, start_test, override_cache, cols)\n########################\n### To be overridden \n \n def _generate_cache_fname(self):\n if not self._name:\n raise Exception(\"Preprocessing Class doesn't have a name for caching\")\n return (PATH_CACHED_DATA + str(self._name) + \"_cache.pkl\")\n \n def _save_cached(self):\n if not self._data:\n raise Exception(\"No available data in memory for caching to files\")\n with open(self._generate_cache_fname(), 'wb') as f:\n pickle.dump(self._data, f)\n \n def _load_cached(self):\n with open(self._generate_cache_fname(), 'rb') as f:\n self._data = pickle.load(f)\n \n def _if_cached(self):\n '''\n Return bool, True of False\n '''\n return os.path.isfile(self._generate_cache_fname()) \n \n##########################\n##### To be overriden\n\n # core function, that need to be defined for every custom preproc subclass \n def _proc(self, **kwargs):\n '''\n Return: Nothing, just store the result in self._data\n '''\n pass\n \n \n \n \n\n","sub_path":"models/BasePreproc.py","file_name":"BasePreproc.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"358936022","text":"import argparse\nimport os\nfrom array import array\n\nimport numpy as np\nimport pandas as pd\n\n\nvariables = [\n \"TauJets.centFrac\", \"TauJets.etOverPtLeadTrk\",\n \"TauJets.innerTrkAvgDist\", \"TauJets.absipSigLeadTrk\",\n \"TauJets.SumPtTrkFrac\", \"TauJets.ChPiEMEOverCaloEME\",\n \"TauJets.EMPOverTrkSysP\", \"TauJets.ptRatioEflowApprox\",\n \"TauJets.mEflowApprox\"\n]\n\nexpressions = [\n \"TMath::Min(TauJets.centFrac, 1.0)\",\n \"TMath::Log10(TMath::Max(0.1, TauJets.etOverPtLeadTrk))\",\n \"TauJets.innerTrkAvgDist\",\n \"TMath::Min(TauJets.absipSigLeadTrk, 30)\",\n \"TauJets.SumPtTrkFrac\",\n \"TMath::Max(-4, TMath::Min(TauJets.ChPiEMEOverCaloEME, 5))\",\n \"TMath::Log10(TMath::Max(0.01, TauJets.EMPOverTrkSysP))\",\n \"TMath::Min(TauJets.ptRatioEflowApprox, 4)\",\n \"TMath::Log10(TMath::Max(140, TauJets.mEflowApprox))\"\n]\n\ntransformations = {\n \"TauJets.centFrac\": lambda x: np.minimum(x, 1.0),\n \"TauJets.etOverPtLeadTrk\": lambda x: np.log10(np.maximum(0.1, x)),\n \"TauJets.innerTrkAvgDist\": lambda x: x,\n \"TauJets.absipSigLeadTrk\": lambda x: np.minimum(x, 30),\n \"TauJets.SumPtTrkFrac\": lambda x: x,\n \"TauJets.ChPiEMEOverCaloEME\": lambda x: np.maximum(-4, np.minimum(x, 5)),\n \"TauJets.EMPOverTrkSysP\": lambda x: np.log10(np.maximum(0.01, x)),\n \"TauJets.ptRatioEflowApprox\": lambda x: np.minimum(x, 4),\n \"TauJets.mEflowApprox\": lambda x: np.log10(np.maximum(140, x))\n}\n\n\ndef partial_dependence_tmva(sig, bkg, model, v1_idx, v2_idx, xy, transf=False):\n from ROOT import TMVA\n from root_numpy.tmva import evaluate_reader\n\n if transf:\n var = expressions\n else:\n var = variables\n\n X = np.concatenate([sig, bkg])\n\n reader = TMVA.Reader()\n for v in var:\n reader.AddVariable(v, array(\"f\", [0.]))\n reader.BookMVA(\"BDT method\", model)\n\n pd = []\n for x, y in xy:\n X[:, v1_idx] = x\n X[:, v2_idx] = y\n pred = evaluate_reader(reader, \"BDT method\", X)\n pd.append(np.mean(pred))\n\n return pd\n\n\ndef main(args):\n v1_idx = variables.index(args.var1)\n v2_idx = variables.index(args.var2)\n\n v1_sampling = np.linspace(args.x_range[0], args.x_range[1], args.x_bins)\n v2_sampling = np.linspace(args.y_range[0], args.y_range[1], args.y_bins)\n\n if args.transformed:\n branches = expressions\n else:\n branches = variables\n\n # Load data\n from root_numpy import root2array\n X_sig = root2array(args.sigf, treename=\"CollectionTree\", branches=branches)\n X_bkg = root2array(args.bkgf, treename=\"CollectionTree\", branches=branches)\n\n # Cast variables if needed\n dtype = [(fname, np.float32) for fname, ftype in X_sig.dtype.descr]\n X_sig = X_sig.astype(dtype, copy=False)\n X_bkg = X_bkg.astype(dtype, copy=False)\n\n X_sig = X_sig.view(np.float32).reshape(len(X_sig), -1)\n np.random.shuffle(X_sig)\n X_sig = X_sig[:args.num_events, :].copy()\n\n X_bkg = X_bkg.view(np.float32).reshape(len(X_bkg), -1)\n np.random.shuffle(X_bkg)\n X_bkg = X_bkg[:args.num_events, :].copy()\n\n # Grid for partial dependence plot\n xx, yy = np.meshgrid(v1_sampling, v2_sampling)\n grid = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])\n\n if args.pbs_id is not None:\n lo = args.pbs_id * args.x_bins\n hi = lo + args.x_bins\n\n assert args.pbs_id < args.y_bins\n\n sl = np.s_[lo:hi, ...]\n else:\n sl = np.s_[...]\n\n # Apply trafo if needed\n if args.transformed:\n f1 = transformations[args.var1]\n f2 = transformations[args.var2]\n\n grid_trsf = grid.copy()\n grid_trsf[:, 0] = f1(grid[:, 0])\n grid_trsf[:, 1] = f2(grid[:, 1])\n\n res = partial_dependence_tmva(X_sig, X_bkg, args.model, v1_idx, v2_idx,\n grid_trsf[sl], transf=True)\n else:\n res = partial_dependence_tmva(X_sig, X_bkg, args.model, v1_idx, v2_idx,\n grid[sl], transf=False)\n\n df = pd.DataFrame({\"x\": grid[sl][:, 0], \"y\": grid[sl][:, 1], \"pd\": res})\n df.to_csv(args.out, index=False)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"sigf\")\n parser.add_argument(\"bkgf\")\n parser.add_argument(\"model\")\n parser.add_argument(\"var1\")\n parser.add_argument(\"var2\")\n parser.add_argument(\"--transformed\", action=\"store_true\")\n parser.add_argument(\"--num-events\", type=int, default=5000)\n parser.add_argument(\"--x-range\", nargs=2, type=float, default=[0.0, 1.0])\n parser.add_argument(\"--y-range\", nargs=2, type=float, default=[0.0, 1.0])\n parser.add_argument(\"--x-bins\", type=int, default=100)\n parser.add_argument(\"--y-bins\", type=int, default=100)\n parser.add_argument(\"-o\", dest=\"out\", default=\"pd_2d_result.csv\")\n parser.add_argument(\"--pbs-id\", type=int, help=\"Maximum same as number of \"\n \"y-bins\")\n\n args = parser.parse_args()\n main(args)\n","sub_path":"scripts/plots/partial_dependence/partial_dependence_2d.py","file_name":"partial_dependence_2d.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"511851256","text":" # Trying to define the simplest possible neural net where the output layer of the neural net is a single\n # neuron with a \"continuous\" (a.k.a floating point) output. I want the neural net to output a continuous\n # value based off one or more continuous inputs. My real problem is more complex, but this is the simplest\n # representation of it for explaining my issue. Even though I've oversimplified this to look like a simple\n # linear regression problem (y=m*x), I want to apply this to more complex neural nets. But if I can't get\n # it working with this simple problem, then I won't get it working for anything more complex.\n\n\n#https://stackoverflow.com/questions/38319898/tensorflow-neural-net-with-continuous-floating-point-output\n\nimport tensorflow as tf\nimport random\nimport numpy as np\n\nINPUT_DIMENSION = 1\nOUTPUT_DIMENSION = 1\nTRAINING_RUNS = 10000\nBATCH_SIZE = 50\nVERF_SIZE = 1\n\n\n# Generate two arrays, the first array being the inputs that need trained on, and the second array containing outputs.\ndef generate_test_point():\n x = random.uniform(-1, 1)\n\n # To keep it simple, output is just -x. \n out = -x\n\n return ( np.array([ x ]), np.array([ out ]) )\n\n # Generate a bunch of data points and then package them up in the array format needed by\n # tensorflow\ndef generate_batch_data( num ):\n xs = []\n ys = []\n for i in range(num):\n x, y = generate_test_point()\n xs.append( x )\n ys.append( y )\n\n return (np.array(xs), np.array(ys) )\n\n # Define a single-layer neural net. Originally based off the tensorflow mnist for beginners tutorial\n\n # Create a placeholder for our input variable\nx = tf.placeholder(tf.float32, [None, INPUT_DIMENSION])\n\n # Create variables for our neural net weights and bias\nW = tf.Variable(tf.zeros([INPUT_DIMENSION, OUTPUT_DIMENSION]))\nb = tf.Variable(tf.zeros([OUTPUT_DIMENSION]))\n\n # Define the neural net. Note that since I'm not trying to classify digits as in the tensorflow mnist\n # tutorial, I have removed the softmax op. My expectation is that 'net' will return a floating point\n # value.\nnet = tf.matmul(x, W) + b\n\n # Create a placeholder for the expected result during training\nexpected = tf.placeholder(tf.float32, [None, OUTPUT_DIMENSION])\n\n # Same training as used in mnist example\n#loss = tf.reduce_mean(tf.abs(expected - net))\nloss = tf.reduce_mean(tf.abs(expected - net)) \ntrain_step = tf.train.GradientDescentOptimizer(0.00001).minimize(loss)\n\nsess = tf.Session()\n\ninit = tf.initialize_all_variables()\nsess.run(init)\n\n # Perform our training runs\n\nfor i in range( TRAINING_RUNS ):\n print (\"trainin run:\", i)\n\n batch_inputs, batch_outputs = generate_batch_data( BATCH_SIZE )\n\n # I've found that my weights and bias values are always zero after training, and I'm not sure why.\n sess.run( train_step, feed_dict={x: batch_inputs, expected: batch_outputs})\n\n # Test our accuracy as we train... I am defining my accuracy as the error between what I \n # expected and the actual output of the neural net.\n accuracy = tf.reduce_mean(tf.subtract( expected, net)) \n #accuracy = tf.subtract( expected, net) # using just subtract since I made my verification size 1 for debug\n \n \n # Uncomment this to debug\n #import pdb; pdb.set_trace()\n\n batch_inputs, batch_outputs = generate_batch_data( VERF_SIZE )\n result = sess.run(accuracy, feed_dict={x: batch_inputs, expected: batch_outputs})\n\n print (\" progress: \")\n print (\" inputs: \", batch_inputs)\n print (\" outputs:\", batch_outputs)\n print (\" actual: \", result)\n","sub_path":"K0ribo/stackover1.py","file_name":"stackover1.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"247929779","text":"import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport plotly.express as px\r\nimport pandas as pd\r\nfrom connection import Connection\r\nimport bucaramangaSQL as sql\r\n\r\nexternal_stylesheets = [\"https://cdn.jsdelivr.net/npm/bootstrap@5.0.0-beta3/dist/css/bootstrap.min.css\"]\r\n\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\ncon = Connection()\r\n\r\n# Costos totales\r\ncon.openConnection()\r\nquery_c = pd.read_sql_query(sql.CostosTotales(), con.connection)\r\ncon.closeConnection()\r\n\r\n# Numero de accidentes\r\ncon.openConnection()\r\nquery_n = pd.read_sql_query(sql.NumeroAccidentes(), con.connection)\r\ncon.closeConnection()\r\n\r\n\r\n# Accidentes por genero\r\ncon.openConnection()\r\nquery = pd.read_sql_query(sql.VictimasPorGenero(), con.connection)\r\ncon.closeConnection()\r\ndfGenero = pd.DataFrame(query, columns=[\"sexo\", \"accidentes\"])\r\nfigPieGenero = px.pie(dfGenero, values=\"accidentes\", names=\"sexo\")\r\n\r\n# Accidentes por grupo etario\r\ncon.openConnection()\r\nquery = pd.read_sql_query(sql.AccidentesPorGrupo(), con.connection)\r\ncon.closeConnection()\r\ndfGrupo = pd.DataFrame(query, columns=[\"grupo_etario\", \"accidentes\"])\r\nfigPieGrupo = px.pie(dfGrupo, values=\"accidentes\", names=\"grupo_etario\")\r\n\r\n\r\n# Costos por EPS\r\ncon.openConnection()\r\nquery = pd.read_sql_query(sql.CostosPorEPS(), con.connection)\r\ncon.closeConnection()\r\ndfEPS = pd.DataFrame(query, columns=[\"nombre_eps\", \"costo_total\"])\r\nfigBarEps = px.bar(dfEPS.tail(20), y=\"nombre_eps\", x=\"costo_total\", orientation='h')\r\n\r\n\r\n# Costos por EPS por año\r\ncon.openConnection()\r\nquery = pd.read_sql_query(sql.CostosAnualesPorEPS(), con.connection)\r\ncon.closeConnection()\r\ndfEPS_anio = pd.DataFrame(query, columns=[\"nombre_eps\",\"costo_total\", \"2018\", \"2019\", \"2020\", \"2021\"])\r\nfigBarEpsAnios = px.bar(dfEPS_anio.tail(20), y=\"nombre_eps\", x=[\"2018\", \"2019\", \"2020\", \"2021\"], orientation='h') \r\n\r\n\r\n# Accidentes por vehiculo\r\ncon.openConnection()\r\nquery = pd.read_sql_query(sql.AccidentesPorVehiculo(), con.connection)\r\ncon.closeConnection()\r\ndfVeh = pd.DataFrame(query, columns=[\"nombre\", \"accidentes\"])\r\nfigBarVeh = px.bar(dfVeh.head(11), x =\"nombre\", y=\"accidentes\")\r\n\r\n\r\n#Accidentes por vehiculo y genero\r\ncon.openConnection()\r\nquery = pd.read_sql_query(sql.AccidentesVehiculoGenero(), con.connection)\r\ncon.closeConnection()\r\ndfVehGen = pd.DataFrame(query, columns=[\"nombre\", \"accidentes\", \"hombres\", \"mujeres\"])\r\nfigBarVehGen = px.bar(dfVehGen.head(11), x =\"nombre\", y=[\"hombres\", \"mujeres\"], barmode=\"group\")\r\n\r\n\r\n# Total de accidentes por dia\r\ncon.openConnection()\r\nquery = pd.read_sql_query(sql.AccidentesPorDia(), con.connection)\r\ncon.closeConnection()\r\ndfDia = pd.DataFrame(query, columns=[\"dia\",\"total_acc\", \"2018\", \"2019\", \"2020\", \"2021\"])\r\nfigBarDia = px.bar(dfDia.head(7), x=\"dia\", y=\"total_acc\")\r\n\r\n\r\n# Numero accidentes por dia en los 4 años\r\ncon.openConnection()\r\nquery = pd.read_sql_query(sql.AccidentesPorDia(), con.connection)\r\ncon.closeConnection()\r\ndfDia_anio = pd.DataFrame(query, columns=[\"dia\",\"total_acc\", \"2018\", \"2019\", \"2020\", \"2021\"])\r\nfigBarDiaAnios = px.bar(dfDia_anio, x=\"dia\", y=[\"2018\", \"2019\", \"2020\", \"2021\"])\r\n\r\n\r\n#Layout\r\napp.layout = html.Div(children=[\r\n html.H1(children=\"Accidentalidad Bucaramanga Dashboard\", className=\"text-center\"),\r\n \r\n html.Div(className=\"container-fluid\", children=[\r\n html.Div(className=\"row mt-4\", children=[\r\n html.Div(className=\"col-12 col-xl-6\", children=[\r\n html.Div(className=\"alert alert-dark\", children=[\r\n html.H3(children=[\"Costos totales: \" + str(query_c[\"sum\"].values[0])]),\r\n ]),\r\n ]),\r\n html.Div(className=\"col-12 col-xl-6\", children=[\r\n html.Div(className=\"alert alert-dark\", children=[\r\n html.H3(children=[\"Numero de accidentes: \" + str(query_n[\"count\"].values[0])]),\r\n ]),\r\n ]),\r\n ]),\r\n html.Div(className=\"row mt-4\", children=[\r\n html.Div(className= \"col-12 col-xl-6\", children=[\r\n html.Div(className=\"card border-primary\", children=[\r\n html.Div(className=\"card-header bg-primary text-light\", children=[\r\n html.H3(children=\"Accidentes por género\"),\r\n ]),\r\n html.Div(className=\"card-body\", children=[\r\n dcc.Graph(\r\n id=\"pieAccidentesPorGenero\",\r\n figure=figPieGenero\r\n ),\r\n ]),\r\n ]),\r\n ]),\r\n html.Div(className=\"col-12 col-xl-6\", children=[\r\n html.Div(className=\"card border-primary\", children=[\r\n html.Div(className=\"card-header bg-primary text-light\", children=[\r\n html.H3(children=\"Accidentes por grupo etario\"),\r\n ]),\r\n html.Div(className=\"card-body\", children=[\r\n dcc.Graph(\r\n id=\"pieAccidentesPorGrupo\",\r\n figure = figPieGrupo\r\n ),\r\n ]),\r\n ]),\r\n ]),\r\n ]),\r\n html.Div(className=\"row mt-4\", children=[\r\n html.Div(className=\"col-12 col-xl-12\", children=[\r\n html.Div(className=\"card border-primary\", children=[\r\n html.Div(className=\"card-header bg-primary text-light\", children=[\r\n html.H3(children=\"Costos por año\"),\r\n ]),\r\n html.Div(className=\"card-body\", children=[\r\n dcc.Graph(\r\n id=\"BarCostosEps\",\r\n figure = figBarEps\r\n ),\r\n ]),\r\n ]),\r\n ]),\r\n ]),\r\n html.Div(className=\"row mt-4\", children=[\r\n html.Div(className=\"col-12 col-xl-12\", children=[\r\n html.Div(className=\"card border-primary\", children=[\r\n html.Div(className=\"card-header bg-primary text-light\", children=[\r\n html.H3(children=\"Costos de EPS por año\"),\r\n ]),\r\n html.Div(className=\"card-body\", children=[\r\n dcc.Graph(\r\n id=\"BarCostosEpsPorAño\",\r\n figure = figBarEpsAnios\r\n ),\r\n ]),\r\n ]),\r\n ]),\r\n ]),\r\n html.Div(className=\"row mt-4\", children=[\r\n html.Div(className=\"col-12 col-xl-6\", children=[\r\n html.Div(className=\"card border-primary\", children=[\r\n html.Div(className=\"card-header bg-primary text-light\", children=[\r\n html.H3(children=\"Accidentes por vehiculo\"),\r\n ]),\r\n html.Div(className=\"card-body\", children=[\r\n dcc.Graph(\r\n id=\"Vehiculos\",\r\n figure = figBarVeh\r\n ),\r\n ]),\r\n ]),\r\n ]),\r\n html.Div(className=\"col-12 col-xl-6\", children=[\r\n html.Div(className=\"card border-primary\", children=[\r\n html.Div(className=\"card-header bg-primary text-light\", children=[\r\n html.H3(children=\"Accidentes por vehiculo y género\"),\r\n ]),\r\n html.Div(className=\"card-body\", children=[\r\n dcc.Graph(\r\n id=\"Vehiculos y genero\",\r\n figure = figBarVehGen\r\n ),\r\n ]),\r\n ]),\r\n ]),\r\n ]),\r\n html.Div(className=\"row mt-4\", children=[\r\n html.Div(className=\"col-12 col-xl-6\", children=[\r\n html.Div(className=\"card border-primary\", children=[\r\n html.Div(className=\"card-header bg-primary text-light\", children=[\r\n html.H3(children=\"Accidentes por dia\"),\r\n ]),\r\n html.Div(className=\"card-body\", children=[\r\n dcc.Graph(\r\n id=\"BarAccidentesDia\",\r\n figure = figBarDia\r\n ),\r\n ]),\r\n ]),\r\n ]),\r\n html.Div(className=\"col-12 col-xl-6\", children=[\r\n html.Div(className=\"card border-primary\", children=[\r\n html.Div(className=\"card-header bg-primary text-light\", children=[\r\n html.H3(children=\"Accidentes por dia por año\"),\r\n ]),\r\n html.Div(className=\"card-body\", children=[\r\n dcc.Graph(\r\n id=\"BarAccidentesDiaPorAño\",\r\n figure = figBarDiaAnios\r\n ),\r\n ]),\r\n ]),\r\n ]),\r\n ]),\r\n ]), \r\n ])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run_server(debug=True)\r\n","sub_path":"Codigo Python/accidentes_dashboard.py","file_name":"accidentes_dashboard.py","file_ext":"py","file_size_in_byte":9430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186667325","text":"# GIL - The global interpreter lock.\n\n# Change code into processing pool.\n\npool = None\n\ndef some_word(args):\n\t...\n\treturn result\n\ndef some_thread():\n\twhile True:\n\t\t...\n\t\tr = pool.apply(some_work, (args))\n\t\t...\n\t\nif __name__ == '__main__':\n\timport multiprocessing\n\tpool = multiprocessing.Pool()\n","sub_path":"12_concurrency/9_avoid_GIL/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391969745","text":"##created by feecoding\n\nch=list()\nn=int(input(\"Type the size of your list: \"))\nfor i in range(n):\n print(\"type a string \",i+1,':')\n ch.append(input())\ncompt=0\nfor i in range(n):\n if(ch[i][0]=='A'):\n compt=compt+1\nprint(compt)\n","sub_path":"Lists/Program 10.py","file_name":"Program 10.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"140981782","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n\nimg = cv2.imread('sediments.jpg')\nr = 500.0/img.shape[1]\ndim =(500, int(img.shape[0] * r))\nimg = cv2.resize(img, dim, interpolation= cv2.INTER_AREA)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n#create the thresholded image\n_, thresh = cv2.threshold(gray, 125,255, 0)\n\n#blur the edges so that we can get rid of some noise\nthresh = cv2.GaussianBlur(thresh,(5,5), 0)\n#thresh = 255-thresh\n#cv2.imshow('thresh', thresh)\n\n\n#########################################################################################################\n#erode the edges so that we get rid of some edge noise\nkernel = np.ones((5,5), np.uint8)\n\n\nerosion = cv2.erode(thresh, kernel, iterations = 2)\n#dilate the edges again to fill in holes created by erosion\ndilation = cv2.dilate(erosion, kernel, iterations = 2)\n\n\n# opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n# closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n\n_, contours, _ = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n#########################################################################################################\n\n\n# create a copy of the image\nimgcopy = img\n\n\n#draw the contours one by one in a loop\nfor x in range(0,len(contours)):\n if x%2==0:\n cv2.drawContours(img, contours, x, (0, 255, 0), 2)\n else:\n cv2.drawContours(img, contours, x, (255, 0, 255), 2)\n\n cv2.destroyAllWindows()\n cv2.imshow('img', img)\n print('Area') #Print information about specific contours\n print(cv2.contourArea(contours[x]))\n print('Perimeter')\n print(cv2.arcLength(contours[x], True))\n\n #contour approximation\n epsilon = 0.5*cv2.arcLength(contours[x], True)\n approx = cv2.approxPolyDP(contours[x], epsilon, True)\n cv2.drawContours(imgcopy, approx, -1, (255, 0, 0), 2)\n cv2.imshow('approximation', imgcopy)\n\n cv2.waitKey(0)","sub_path":"ContoursAttempt2.py","file_name":"ContoursAttempt2.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"501930501","text":"class Variables:\n \"\"\"A class that keeps track of which variables that are defined in all\n branches, so that they can be used once they converges.\n\n \"\"\"\n\n def __init__(self):\n self._first_add = True\n self._local_variables = {}\n\n def add_branch(self, variables):\n \"\"\"Add all variables defined in a branch. Should be called once for\n each branch.\n\n \"\"\"\n\n if self._first_add:\n for name, info in variables.items():\n self._local_variables[name] = info\n\n self._first_add = False\n else:\n to_remove = []\n\n for name, info in self._local_variables.items():\n new_info = variables.get(name)\n\n if new_info is None or new_info != info:\n to_remove.append(name)\n\n for name in to_remove:\n self._local_variables.pop(name)\n\n def defined(self):\n \"\"\"A dictionary of all variables found in all branches.\n\n \"\"\"\n\n return self._local_variables\n","sub_path":"mys/transpiler/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"287824893","text":"from django.contrib import admin\nfrom .models import Services, Category\n\n\n# Customizing Services module in Admin Panel\nclass ServicesAdmin(admin.ModelAdmin):\n list_display = (\n 'service',\n 'category',\n 'has_hours',\n 'price',\n 'active',\n )\n\n ordering = ('service',)\n\n\n# Customizing Categories Module in Admin Panel\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = (\n 'category',\n 'description',\n 'active',\n )\n\n ordering = ('category',)\n\n\n# Registering Category and Service Modules in Admin panel\"\n\nadmin.site.register(Services, ServicesAdmin)\nadmin.site.register(Category, CategoryAdmin)\n","sub_path":"services/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"622973686","text":"class Person:\n def __init__(self):\n self.name = \"Perry\"\n self._secret = \"Hello\" # Private attribute, shouldn't use outside of class\n self.__likes = \"I like something\" # We can't access using self.__likes, way to only available attribute in same class \n\np = Person()\n\nprint(p.name)\nprint(p._secret)\n#print(p.__likes)\n#print(dir(p))\nprint(p._Person__likes)\n","sub_path":"dunder.py","file_name":"dunder.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457523862","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass Point(object):\n \"\"\"\n Create a single point. \n\n \"\"\"\n def __init__(self,x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return '({}, {})'.format(self.x, self.y)\n\n def __eq__(self, Q):\n return (self.x, self.y) == (Q.x, Q.y) \n \n############################\nclass EllipticCurve(object):\n \"\"\"Represents a single elliptic curve defined over a finite field.\n\n p must be prime, since we use the modular inverse to compute point\n addition.\n\n \"\"\"\n def __init__(self, a, b, p):\n self.a = a\n self.b = b\n self.p = p\n\n def has_point(self, x, y):\n return (y ** 2) % self.p == (x ** 3 + self.a * x + self.b) % self.p\n\n def __str__(self):\n return 'y^2 = x^3 + {}x + {}'.format(self.a, self.b)\n\ndef mod_inverse(a,n):\n \"\"\"Return the inverse of a mod n.\n\n n must be prime.\n\n >>> mod_inverse(42, 2017)\n 1969\n\n \"\"\"\n b = n\n if abs(b) == 0:\n return (1, 0, a)\n\n x1, x2, y1, y2 = 0, 1, 1, 0\n while abs(b) > 0:\n q, r = divmod(a, b)\n x = x2 - q * x1\n y = y2 - q * y1\n a, b, x2, x1, y2, y1 = b, r, x1, x, y1, y\n\n return x2 % n\n\ndef addPoint(p1,p2,p,a):\n if p1 == p2:\n S = ((3*p1.x + a)*mod_inverse(2*p1.y,p))%p\n X = (S**2 - 2*p1.x)%p\n Y = (S*(p1.x-X)-p1.y)%p\n C = Point(X,Y)\n return C\n else:\n S =((p2.y-p1.y)*mod_inverse(p2.x-p1.x,p))%p\n X = (S**2-p1.x-p2.x)%p\n Y = (S*(p1.x-X)-p1.y)%p\n C = Point(X,Y)\n return C \n\n############################\ndef main(x1,y1,x2,y2):\n E = EllipticCurve(2,1,5)\n A = Point(x1,y1)\n B = Point(x2,y2)\n C = addPoint(A,B,E.p,E.a)\n print(C)\n\nif __name__ == '__main__':\n main(0,1,1,3)","sub_path":"PointAdd.py","file_name":"PointAdd.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"342666839","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2018 qizai \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nThis file can be ran on server.\nrequire T1, T2, and gene list already exist.\n\"\"\"\n\nimport numba as nb\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport ipdb\nimport os\nimport time\nfrom numba import cuda, float64\nfrom common_io import load_dataFrame\n\n@nb.jit(nopython=True)\ndef fast_RWR_on_GGN(T1, T2, c, e_i):\n '''\n T1, T2: matrice inverion from pre_computation_step.\n c: restart probability.\n r_i = c* W * r_i + (1-c) e_i\n e_i: node i in query\n '''\n r_i = (1 - c) * ( T1 @ e_i + c * T2 @ e_i)\n return r_i\n\ndef get_smooth_mat(T1, T2, new_gene_list, c_rwr):\n\n len_of_gene = len(new_gene_list)\n smooth_mat_V = np.zeros((len_of_gene, len_of_gene))\n e_i = np.zeros(len_of_gene)\n\n t_s = time.time()\n print('start looping smooth matrix')\n for idx in range(len_of_gene):\n if idx % 1000 == 1:\n t_e = time.time()\n print('{} iteration cost time {:.4f}'.format(idx, t_e - t_s))\n\n e_i[idx] = 1\n r_i = fast_RWR_on_GGN(T1, T2, c_rwr, e_i) \n smooth_mat_V[:, idx] = r_i\n e_i = np.zeros(len_of_gene)\n\n return smooth_mat_V\n\n\n\nif __name__ == '__main__':\n root_path = '/data/jianhao/scRNA_seq/'\n # df_file = 'pandas_dataframe'\n # feature_file = 'df_feature_column'\n # feature_file = 'ensembl_gene_list'\n # p2df = os.path.join(root_path, df_file)\n\n part_k = 5\n c_rwr = 0.2\n\n # T1_file = os.path.join(root_path, 'fast_rwr_T1.npy')\n # T2_file = os.path.join(root_path, 'fast_rwr_T2.npy')\n gene_file = os.path.join(root_path, 'imputation_data', 'fast_rwr_gene_list')\n V_file = os.path.join(root_path, 'imputation_data', 'smooth_matrix_V.npy')\n X_df = os.path.join(root_path, 'pandas_dataframe')\n\n # T1 = np.load(T1_file)\n # T2 = np.load(T2_file)\n with open(gene_file, 'rb') as f:\n new_gene_list = pickle.load(f)\n\n\n #smooth_mat_V = get_smooth_mat(T1, T2, new_gene_list, c_rwr)\n #smooth_file = os.path.join(root_path, 'smooth_matrix_V')\n #np.save(smooth_file, smooth_mat_V)\n\n smooth_matrix_V = np.load(V_file)\n X, Y, feature_cols = load_dataFrame(X_df, new_gene_list)\n\n # X, Y, feature_cols = load_dataFrame(p2df, new_gene_list)\n print('start computing!')\n smoothed_X = X @ smooth_matrix_V\n print('done!')\n\n df_smoothed_X = pd.DataFrame(data=smoothed_X, columns = feature_cols)\n smoothed_gene_file = os.path.join(root_path, 'gene_in_smoothed_X')\n with open(smoothed_gene_file, 'wb') as f:\n pickle.dump(feature_cols, f)\n\n df_smoothed_X['label'] = Y\n smoothed_X_file = os.path.join(root_path, 'smoothed_dataframe_100_cells_per_cluster')\n df_smoothed_X.to_pickle(smoothed_X_file)\n\n","sub_path":"code/simiclasso/calculate_smooth_matrix.py","file_name":"calculate_smooth_matrix.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"541640093","text":"import os\n\nimport cv2\nimport numpy as np\n\nminPlateRatio = 0.5 # 车牌最小比例\nmaxPlateRatio = 6 # 车牌最大比例\n# read lower_col_hsv from hmin,smin,vmin\n# read higher_col_hsv from hmax,smax,vmax\n# 定义蓝底车牌的hsv颜色区间\nlower_blue_col_hsv = np.array([100, 40, 50])\nhigher_blue_col_hsv = np.array([140, 255, 255])\n\n# 定义黄底车牌的hsv颜色区间\nlower_yellow_col_hsv = np.array([11, 43, 46])\nhigher_yellow_col_hsv = np.array([34, 255, 255])\n\n# 定义黑底车牌的hsv颜色区间\nlower_black_col_hsv = np.array([0, 0, 0])\nhigher_black_col_hsv = np.array([180, 255, 46])\n\n# 定义白底车牌的hsv颜色区间\nlower_white_col_hsv = np.array([0, 0, 46])\nhigher_white_col_hsv = np.array([180, 30, 220])\n\n# 定义绿底车牌的hsv颜色区间\nlower_green_col_hsv = np.array([35, 43, 46])\nhigher_green_col_hsv = np.array([77, 255, 255])\n\n\n# 找到符合车牌形状的矩形\ndef findPlateNumberRegion(img):\n region = []\n # 查找外框轮廓\n contours_img, contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n print(\"contours lenth is :%s\" % (len(contours)))\n # 筛选面积小的\n list_rate = []\n for i in range(len(contours)):\n cnt = contours[i]\n # 计算轮廓面积\n area = cv2.contourArea(cnt)\n # 面积小的忽略\n if area < 1000:\n continue\n # 转换成对应的矩形(最小)\n rect = cv2.minAreaRect(cnt)\n # print(\"rect is:%s\" % {rect})\n # 根据矩形转成box类型,并int化\n box = np.int32(cv2.boxPoints(rect))\n # 计算高和宽\n height = abs(box[0][1] - box[2][1])\n width = abs(box[0][0] - box[2][0])\n # 正常情况车牌长高比在2.7-5之间,那种两行的有可能小于2.5,这里不考虑\n ratio = float(width) / float(height)\n rate = getxyRate(cnt)\n print(\"area\", area, \"ratio:\", ratio, \"rate:\", rate)\n if ratio > maxPlateRatio or ratio < minPlateRatio:\n continue\n # 符合条件,加入到轮廓集合\n region.append(box)\n list_rate.append(ratio)\n index = getSatifyestBox(list_rate)\n return region[index]\n\n#找出最有可能是车牌的位置\ndef getSatifyestBox(list_rate):\n for index, key in enumerate(list_rate):\n list_rate[index] = abs(key - 3)\n print(list_rate)\n index = list_rate.index(min(list_rate))\n print(index)\n return index\n\n\ndef getxyRate(cnt):\n x_height = 0\n y_height = 0\n x_list = []\n y_list = []\n for location_value in cnt:\n location = location_value[0]\n x_list.append(location[0])\n y_list.append(location[1])\n x_height = max(x_list) - min(x_list)\n y_height = max(y_list) - min(y_list)\n return x_height * (1.0) / y_height * (1.0)\n\n\ndef location(file):\n img = cv2.imread(file)\n # 转换成hsv模式图片\n hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # 找到hsv图片下的所有符合蓝黄黑白绿五种颜色区间的像素点,转换成二值化图像\n mask_blue = cv2.inRange(hsv_img, lower_blue_col_hsv, higher_blue_col_hsv)\n mask_yellow = cv2.inRange(hsv_img, lower_yellow_col_hsv, higher_yellow_col_hsv)\n mask_black = cv2.inRange(hsv_img, lower_black_col_hsv, higher_black_col_hsv)\n mask_white = cv2.inRange(hsv_img, lower_white_col_hsv, higher_white_col_hsv)\n mask_green = cv2.inRange(hsv_img, lower_green_col_hsv, higher_green_col_hsv)\n res = cv2.bitwise_and(img, img, mask=mask_blue+mask_yellow+mask_black+mask_white+mask_green)\n\n # 灰度化\n gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n # 高斯模糊:车牌识别中利用高斯模糊将图片平滑化,去除干扰的噪声对后续图像处理的影响\n gaussian = cv2.GaussianBlur(gray, (3, 3), 0, 0, cv2.BORDER_DEFAULT)\n #sobel算子:车牌定位的核心算法,水平方向上的边缘检测,检测出车牌区域\n sobel = cv2.convertScaleAbs(cv2.Sobel(gaussian, cv2.CV_16S, 1, 0, ksize=3))\n #进一步对图像进行处理,强化目标区域,弱化背景。\n ret, binary = cv2.threshold(sobel, 150, 255, cv2.THRESH_BINARY)\n\n\n # 进行闭操作,闭操作可以将目标区域连成一个整体,便于后续轮廓的提取\n element = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 5))\n closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, element)\n # 进行开操作,去除细小噪点\n eroded = cv2.erode(closed, None, iterations=1)\n dilation = cv2.dilate(eroded, None, iterations=1)\n\n # 查找并筛选符合条件的矩形区域\n region = findPlateNumberRegion(closed)\n cv2.drawContours(img, [region], 0, (0, 255, 0), 2)\n # 将符合条件的区域保存\n\n cv2.imshow(\"img\", img)\n cv2.waitKey(0)\n\ndef test_mul_imgs(path):\n fileList = os.listdir(path)\n for file in fileList:\n try:\n location(path + \"/\" + file)\n except:\n print(\"异常:\", file)\n\n\nif __name__ == '__main__':\n file = r\"../data/demo/000003.jpg\"\n location(file)","sub_path":"location/locate_v1.py","file_name":"locate_v1.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"121154934","text":"# Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).\n\n# For example:\n# Given binary tree [3,9,20,null,null,15,7],\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# return its bottom-up level order traversal as:\n# [\n# [15,7],\n# [9,20],\n# [3]\n# ]\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\ndef levelOrderBottom(root):\n\tresult = []\n\tif not root:\n\t\treturn result\n\tqueue = []\n\tqueue.append(root)\n\twhile queue:\n\t\tnodes = []\n\t\tn = len(queue)\n\t\tfor _ in range(n):\n\t\t\tnode = queue.pop(0)\n\t\t\tnodes.append(node.val)\n\t\t\tif node.left:\n\t\t\t\tqueue.append(node.left)\n\t\t\tif node.right:\n\t\t\t\tqueue.append(node.right)\n\t\tresult.append(nodes)\n\treturn result[::-1]\n","sub_path":"july2020/solutions/day02_BinaryTreeLevelOrderTraversalII.py","file_name":"day02_BinaryTreeLevelOrderTraversalII.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625131285","text":"#!/usr/bin/python3\n\nimport r2pipe\n\nr = r2pipe.open(\"bomb\")\n\nr.cmd(\"aa; ood phase5.txt\")\nr.cmd(\"dcu 0x8048d43; dr eax = 6\")\n\nr.cmd(\"db 0x8048d66\")\nvalues = {}\n\nfor c in \"abcdefghijklmnopqrstuvwxyz\":\n r.cmd(\"dc\")\n eax = r.cmd(\"dr eax\")\n string = r.cmd(\"? \" + eax + \"~string\")\n r.cmd(\"dr eip = 0x8048d57\")\n print(\" > \" + c + \" -> \" + string.strip()[9:10])\n values[string.strip()[9:10]] = c\n\n\nprint(\"\\ngiants translates to:\")\nfor c in \"giants\":\n print(values[c], end='')\n\nprint(\"\")\n","sub_path":"phase5r2.py","file_name":"phase5r2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272327304","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.forms import ModelForm\nfrom .models import (\n Teacher, QualificationType, TitleType, TeacherHonoraryTitle,\n EduBook, Faculty, Department, \n)\n\nfrom django.forms.formsets import formset_factory\nfrom django.forms.models import modelformset_factory\nfrom datetimewidget.widgets import DateWidget\n\nclass BSTextInput(forms.TextInput):\n def __init__(self, *args, **kwargs):\n attrs = {'class': 'form-control'}\n if kwargs.get('attrs'):\n attrs.update(kwargs['attrs'])\n kwargs['attrs'] =attrs \n super().__init__(*args, **kwargs)\n\n\nclass BSSelect(forms.Select):\n def __init__(self, *args, **kwargs):\n attrs = {'class': 'form-control'}\n if kwargs.get('attrs'):\n attrs.update(kwargs['attrs'])\n kwargs['attrs'] =attrs \n super().__init__(*args, **kwargs)\n\n\nclass BSNumberInput(forms.NumberInput):\n def __init__(self, *args, **kwargs):\n attrs = {'class': 'form-control'}\n if kwargs.get('attrs'):\n attrs.update(kwargs['attrs'])\n kwargs['attrs'] =attrs \n super().__init__(*args, **kwargs)\n\n\nforms.CharField.widget = BSTextInput\nforms.ModelChoiceField.widget = BSSelect\nforms.IntegerField.widget = BSNumberInput\nforms.ChoiceField.widget = BSSelect\n\n\nclass BaseForm(forms.ModelForm):\n class Meta: \n widgets = { \n 'date': DateWidget(usel10n=True, bootstrap_version=3)\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n queryset = self.fields['object_type'].queryset\n self.fields['object_type'].queryset = queryset.filter(is_active=True)\n \n\n\nclass RatingqhForm(forms.Form):\n qualification = forms.ModelChoiceField(label='Квалификация', \n queryset=QualificationType.objects.all(), \n required=False)\n \n\nclass TeacherListForm(forms.Form):\n surname = forms.CharField(label=\"Фамилия\", max_length=500, required=False)\n name = forms.CharField(label=\"Имя\", max_length=500, required=False)\n faculty = forms.ModelChoiceField(label = \"Факультет\", queryset = Faculty.objects.all(), required = False)\n department = forms.ModelChoiceField(label = \"Кафедра\", queryset = Department.objects.all(), required = False)\n\n\nclass TeacherChangeForm(forms.ModelForm):\n faculty = forms.ModelChoiceField(widget=forms.widgets.Select, label=\"Факультет\", queryset=Faculty.objects.all(), required=True)\n department = forms.ModelChoiceField(widget=forms.widgets.Select, label=\"Кафедра\", queryset=Department.objects.all(), required=True)\n birthday = forms.DateField(widget=DateWidget(usel10n=True, bootstrap_version=3), label=\"Дата рорждения\", required=True)\n class Meta:\n model = Teacher\n fields = [\n 'faculty', 'department'\n ]\n widgets = { \n 'department': forms.widgets.Select\n }\n\n def __init__(self, *args, **kwargs): \n super(TeacherChangeForm, self).__init__(*args, **kwargs) \n instance = kwargs['instance']\n if instance is not None:\n if instance.department:\n self.initial['faculty'] = instance.department.faculty \n self.initial['birthday'] = instance.user.birthday","sub_path":"rating/rateapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"585209884","text":"#!/usr/bin/python3\n\"\"\"\nState api\n\"\"\"\nfrom api.v1.views import app_views\nfrom flask import request, jsonify, abort\nfrom flask import Flask\nfrom models import storage\nfrom models.state import State\n\n\n@app_views.route('/states', methods=['GET', 'POST'], strict_slashes=False)\ndef states():\n \"\"\" gets a list of all states or makes one\"\"\"\n if request.method == 'GET':\n lst = []\n objs = storage.all('State')\n for k, v in objs.items():\n lst += [v.to_dict()]\n return jsonify(lst)\n if request.method == 'POST':\n stf = request.get_json(silent=True)\n if stf is None:\n return jsonify(\"Not a JSON\"), 400\n try:\n name = stf[\"name\"]\n except:\n return jsonify(\"Missing name\"), 400\n st = State()\n st.name = name\n st.save()\n return jsonify(st.to_dict()), 201\n\n\n@app_views.route('/states/', methods=['GET', 'DELETE', 'PUT'],\n strict_slashes=False)\ndef state_by_id(state_id):\n \"\"\" gets a single state by id shows deletes or alters it\"\"\"\n if request.method == 'GET':\n st = storage.get(\"State\", state_id)\n if st is None:\n abort(404)\n else:\n return jsonify(st.to_dict())\n if request.method == 'DELETE':\n st = storage.get(\"State\", state_id)\n if st is None:\n abort(404)\n else:\n storage.delete(st)\n storage.save()\n return jsonify({}), 200\n if request.method == 'PUT':\n st = storage.get(\"State\", state_id)\n if st is None:\n abort(404)\n else:\n stf = request.get_json(silent=True)\n if stf is None:\n return jsonify(\"Not a JSON\"), 400\n for k, v in stf.items():\n if k != \"id\" and k != \"created_at\" and k != \"updated_at\":\n setattr(st, k, v)\n st.save()\n return jsonify(st.to_dict()), 200\n","sub_path":"api/v1/views/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"268306181","text":"#!/usr/bin/env python\n\"\"\"\nCreated on Mar 31, 2011\n\n@author: guillaume\n\"\"\"\n\nimport os\nimport shutil\nimport random\n\n\n# Local Libraries\ntry:\n from chemex import fitting, plotting, writing, parsing, reading, tools\n from chemex.experiments.reading import read_cfg_file as read_cfg_file_data\n\nexcept (KeyboardInterrupt):\n exit(\"\\n -- ChemEx killed before it could begin\\n\")\n\n\ndef make_bootstrap_dataset(data):\n profiles = {}\n reference_points = {}\n\n for data_point in data:\n if data_point.par['reference']:\n reference_points.setdefault(data_point.par['profile_id'], []).append(data_point)\n else:\n profiles.setdefault(data_point.par['profile_id'], []).append(data_point)\n\n bootstrap_data = []\n\n for profile_id, profile in profiles.iteritems():\n\n if profile_id in reference_points:\n bootstrap_data.extend([random.choice(reference_points[profile_id]) for _ in reference_points[profile_id]])\n\n bootstrap_data.extend([random.choice(profile) for _ in profile])\n\n return bootstrap_data\n\n\ndef main():\n \"\"\"All the magic\"\"\"\n\n writing.print_logo()\n\n args = parsing.arg_parse()\n\n # Don't allow simultaneous include and exclude flags\n if args.res_incl and args.res_excl:\n exit('\\nCan not simultaneously include and exclude residues!\\n')\n elif args.res_incl:\n args.res_incl = [res.lower() for res in args.res_incl]\n elif args.res_excl:\n args.res_excl = [res.lower() for res in args.res_excl]\n\n # Read experimental data\n data = list()\n\n if args.experiments:\n for filename in args.experiments:\n data.extend(read_cfg_file_data(filename, args.res_incl, args.res_excl))\n\n if not data:\n exit(\"\\nNo Data to fit!\\n\")\n\n # Create the lists of both fitting and fixed parameters\n par, par_indexes, par_fixed = reading.create_par_list_to_fit(args.parameters, data)\n\n data_bs = make_bootstrap_dataset(data)\n\n # Fit the data to the model\n par, par_err, par_indexes, par_fixed, reduced_chi2 = \\\n fitting.run_fit(args.method, par, par_indexes, par_fixed, data_bs)\n\n # Write outputs\n print(\"\")\n print(\"Reduced chi2: {:.3e}\".format(reduced_chi2))\n\n # Custom output directory\n output_dir = args.out_dir if args.out_dir else './output'\n if args.res_incl:\n if len(args.res_incl) == 1:\n output_dir = os.path.join(output_dir, args.res_incl[0].upper())\n\n tools.make_dir(output_dir)\n\n print(\"\")\n print(\" - Writing results:\")\n if args.method:\n shutil.copyfile(args.method, os.path.join(output_dir, 'fitting-method.cfg'))\n\n writing.write_chi2(par, par_indexes, par_fixed, data_bs, output_dir=output_dir)\n writing.write_par(par, par_err, par_indexes, par_fixed, output_dir=output_dir)\n writing.write_dat(data_bs, output_dir=output_dir)\n\n if not args.noplot:\n\n print(\"\")\n print(\" - Plotting data:\")\n\n output_dir_plot = os.path.join(output_dir, 'plots')\n tools.make_dir(output_dir_plot)\n\n try:\n plotting.plot_data(data_bs, par, par_indexes, par_fixed, output_dir=output_dir_plot)\n except (KeyboardInterrupt):\n print(\" - Plotting cancelled\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/chemex_bootstrap.py","file_name":"chemex_bootstrap.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"70871866","text":"import math\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n# from models import resnet\nfrom torchvision import models\nfrom base import BaseModel\nfrom utils.helpers import initialize_weights, set_trainable\nfrom itertools import chain\nfrom .modules import PPM, DAUM, AUM, GaussianSmoother\n\n\nclass AUNet(BaseModel):\n def __init__(self, num_classes, in_channels=3, backbone='resnet152', pretrained=True, use_aux=True, freeze_bn=False,\n freeze_backbone=False):\n super().__init__()\n # TODO: Use synch batchnorm\n norm_layer = nn.BatchNorm2d\n # model = getattr(resnet, backbone)(pretrained, norm_layer=norm_layer)\n model = getattr(models, backbone)(pretrained)\n\n self.initial = nn.Sequential(*list(model.children())[:4])\n if in_channels != 3:\n self.initial[0] = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.initial = nn.Sequential(*self.initial)\n\n self.layer1 = model.layer1\n self.layer2 = model.layer2\n self.layer3 = model.layer3\n self.layer4 = model.layer4\n out_size1 = model.layer1[-1].bn3.num_features\n out_size2 = model.layer2[-1].bn3.num_features\n out_size3 = model.layer3[-1].bn3.num_features\n out_size4 = model.layer4[-1].bn3.num_features\n\n bin_sizes = [1, 2, 3, 6]\n self.ppm4 = nn.Sequential(\n PPM(out_size4, bin_sizes, norm_layer=norm_layer),\n nn.Conv2d(out_size4 // len(bin_sizes), num_classes, kernel_size=1)\n )\n self.ppm3 = nn.Sequential(\n PPM(out_size3, bin_sizes, norm_layer=norm_layer),\n nn.Conv2d(out_size3//len(bin_sizes), num_classes, kernel_size=1)\n )\n self.ppm2 = nn.Sequential(\n PPM(out_size2, bin_sizes, norm_layer=norm_layer),\n nn.Conv2d(out_size2//len(bin_sizes), num_classes, kernel_size=1)\n )\n\n self.daum = DAUM(in_channels, (9, 9))\n self.daum1 = AUM(in_channels, (5, 5))\n self.daum2 = AUM(in_channels, (5, 5))\n self.daum3 = AUM(in_channels, (5, 5))\n # self.daum4 = DAUM(512, (5, 5))\n\n self.smoother = GaussianSmoother()\n\n initialize_weights(self.ppm4, self.ppm3, self.ppm2,\n self.daum1, self.daum2, self.daum3)\n if freeze_bn:\n self.freeze_bn()\n if freeze_backbone:\n set_trainable([self.initial, self.layer1, self.layer2, self.layer3, self.layer4], False)\n\n def forward(self, x):\n # input_size = (x.size()[2], x.size()[3])\n input_size = (x.size(2), x.size(3))\n fmaps = x\n fmaps_2 = self.smoother(fmaps)\n fmaps_4 = self.smoother(fmaps_2)\n fmaps_8 = self.smoother(fmaps_4)\n fmaps_16 = self.smoother(fmaps_8)\n x = self.initial(x)\n x1 = self.layer1(x)\n x2 = self.layer2(x1)\n output2 = self.ppm2(x2)\n x3 = self.layer3(x2)\n output3 = self.ppm3(x3)\n x4 = self.layer4(x3)\n output4 = self.ppm4(x4)\n\n output = self.daum3(output4, F.interpolate(fmaps_16, size=x3.shape[-2:], mode=\"bilinear\", align_corners=True))\n output = torch.cat((output.reshape(-1, 1), output3.reshape(-1, 1)), dim=1).max(dim=1)[0].reshape(*output.size())\n output = self.daum3(output, F.interpolate(fmaps_8, size=x2.shape[-2:], mode=\"bilinear\", align_corners=True))\n output = torch.cat((output.reshape(-1, 1), output2.reshape(-1, 1)), dim=1).max(dim=1)[0].reshape(*output.size())\n output = self.daum3(output, F.interpolate(fmaps_4, size=x1.shape[-2:], mode=\"bilinear\", align_corners=True))\n\n output = F.interpolate(output, size=input_size, mode='bilinear', align_corners=True)\n if self.training:\n return output, output2, output3, output4\n else:\n return output\n # return output\n\n def get_backbone_params(self):\n return chain(self.initial.parameters(), self.layer1.parameters(), self.layer2.parameters(),\n self.layer3.parameters(), self.layer4.parameters())\n\n def get_decoder_params(self):\n return chain(self.ppm2.parameters(), self.ppm3.parameters(), self.ppm4.parameters(),\n self.daum1.parameters(), self.daum2.parameters(), self.daum3.parameters())\n\n def freeze_bn(self):\n for module in self.modules():\n if isinstance(module, nn.BatchNorm2d): module.eval()","sub_path":"models/aun.py","file_name":"aun.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"534207782","text":"from COMPONENTS.MODULES.HOME_GROUND.config_hg import *\n\nclass BusinessVariables():\n\n def fn_moment_control(self,vLifeMomentId,vServiceId):\n\n if(vLifeMomentId == 5):\n HgRequiredVariables.fn_get_assigned_variables_for_habitat(self)\n HgModelData.fn_get_assigned_data_model_for_habitat(self)\n HgVariables.fn_get_data_value_for_habitat(self)\n HgQuestions.fn_get_data_question_for_habitat(self)\n HgSaveValues.fn_get_ideal_house(self) \n else:\n print(\"Exception e:\")\n return None","sub_path":"Recommendation System/COMPONENTS/CONTROLLER/business_variables.py","file_name":"business_variables.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"148529847","text":"import os, sqlite3, math, random, string\n\ndef commit_random_algs(branch_factor, depth):\n # connect to DB\n dbfile = 'db/xform.db'\n conn = sqlite3.connect(dbfile)\n db = conn.cursor() \n\n # get existing numbers so we don't step on them\n ds_id = db.execute('''select dsid from datasets where desc = 'wearable_audio' ''').fetchone()[0]\n max_fid = int(db.execute('''select max(fid) from formats''').fetchone()[0])\n max_aid = int(db.execute('''select max(aid) from algorithms''').fetchone()[0])\n max_iid = int(db.execute('''select max(iid) from incidence''').fetchone()[0])\n\n wav_fid = int(db.execute('''select fid from formats where desc = 'wav' ''').fetchone()[0])\n\n # add randomly generated formats and algorithms\n ## generate depth dummy formats, branch_factor*depth dummy algorithms\n formats = [wav_fid]\n aids = []\n iids = []\n\n for i in xrange(depth):\n # insert format into formats table\n rand_format_desc = ''.join(random.choice(string.letters) for i in xrange(5))\n formats.append(max_fid+i+1000)\n db.execute('insert into formats (fid, desc) values (:fid, :desc)', {'fid': formats[-1], 'desc': rand_format_desc})\n\n # insert format incidence into incidence table\n idata = '0.5 0.5'\n iids.append(max_iid+i+1000)\n db.execute('insert into incidence (iid, dsid, fid, data) values (:iid, :dsid, :fid, :data)''',\n {'iid': iids[-1], 'dsid': ds_id, 'fid': formats[-1], 'data': idata}\n )\n \n for j in xrange(branch_factor):\n run_cmd = 'code/algorithms/null.py'\n time_formula = '100*n'\n cost_formula = '0.1*n'\n acc_formula = '0.5 0.5'\n alg_desc = ''.join(random.choice(string.letters) for i in xrange(5))\n aid = max_aid+1000+(i*branch_factor+j)\n aids.append(aid)\n db.execute('''insert into algorithms (aid, in_format, out_format, run_cmd, time_formula, money_formula, accuracy_formula, desc) values (:aid, :in_f, :out_f, :rcmd, :tf, :mf, :af, :desc)''',\n {'aid': aid, 'in_f': formats[-2], 'out_f': formats[-1], \n 'rcmd': run_cmd, 'tf': time_formula, 'mf': cost_formula, \n 'af': acc_formula, 'desc': alg_desc}\n )\n conn.commit()\n db.close()\n dummy_fids = (formats[1], formats[-1])\n dummy_aids = (aids[0], aids[-1])\n dummy_iids = (iids[0], iids[-1])\n \n# print 'dummy formats fids committed: %d - %d' % dummy_fids\n# print 'dummy algorithms aids committed: %d - %d' % dummy_aids \n# print 'dummy incidence iids committed: %d - %d' % dummy_iids\n\n return (dummy_fids, dummy_aids, dummy_iids)\n\nif __name__ == '__main__': \n commit_random_algs(10,4)\n","sub_path":"code/scripts/commit_dummy_algs.py","file_name":"commit_dummy_algs.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216905355","text":"#!python3\nimport sys\nimport os\nbasepath = os.path.dirname(__file__)\nsys.path.append(os.path.join(basepath, '..'))\n\nimport terminal\nterm = terminal.getTerminal()\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"tanglefile\", type=str, help=\"Tangles to print, must be JSON from tangle finder.\")\nparser.add_argument(\"renamefile\", type=str, default=None, nargs='?')\nargs = parser.parse_args()\n\n# Load Tangle File\nimport json\ntangledata = None\nwith open(args.tanglefile,'r') as f:\n tangledata = json.load(f)\n\n# Sort separations\nsepData = tangledata['dataset']['seps']\nsortedseps = tangledata['usedseps']\nif args.renamefile:\n with open(args.renamefile,'r') as f:\n names = f.readlines()\n for sep in sepData:\n oldname = sep['name']\n num = int(oldname[:oldname.index(' ')])\n sep['name'] = str(num) + ' ' + names[num-1]\n\n# Read groups\ngroups = sorted(tangledata['dataset']['groups'])\n\nsortedTangles = sorted(tangledata['tangles'], key=len)\nterminals = True\nif terminals:\n maximals = []\n for i in range(len(sortedTangles)):\n A = sortedTangles[i]\n isMaximal = True\n for j in range(i+1,len(sortedTangles)):\n B = sortedTangles[j]\n if set(A.items()).issubset(B.items()):\n isMaximal = False\n break\n if isMaximal:\n maximals.append(A)\n sortedTangles = maximals\n\nprint('')\nprint(\"\"\"\n \n \n \n\n \"\"\")\nprint('')\n\n#Go througth tangles and print them\nprint('')\nfor i in sortedseps:\n print('')\n sep = sepData[i]\n print('')\n num = 0\n for tangle in sortedTangles:\n num += 1\n if len(tangle) == 0:\n continue\n if str(i) in tangle:\n if tangle[str(i)] == 'y':\n print('')\n elif tangle[str(i)] == 'n':\n print('')\n else:\n print('')\n print('')\nprint('
', sep['name'], ' Y N
')\nprint('' + args.tanglefile + '')\n","sub_path":"TangleCode/Survey/html_tangles.py","file_name":"html_tangles.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594570053","text":"\"\"\"\nRheostatic - A Static File Server with options.\n\nMIT License\n\nCopyright (c) 2016 Waylan Limberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\nimport os\n\n\n# version_info should conform to PEP 386\n# (major, minor, micro, alpha/beta/rc/final, #)\n# (1, 1, 2, 'alpha', 0) => \"1.1.2.dev\"\n# (1, 2, 0, 'beta', 2) => \"1.2b2\"\n__version_info__ = (0, 0, 2, 'final', 0)\n\n\ndef _get_version(): # pragma: no cover\n \" Returns a PEP 386-compliant version number from version_info. \"\n assert len(__version_info__) == 5\n assert __version_info__[3] in ('alpha', 'beta', 'rc', 'final')\n\n parts = 2 if __version_info__[2] == 0 else 3\n main = '.'.join(map(str, __version_info__[:parts]))\n\n sub = ''\n if __version_info__[3] == 'alpha' and __version_info__[4] == 0:\n # TODO: maybe append some sort of git info here??\n sub = '.dev'\n elif __version_info__[3] != 'final':\n mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}\n sub = mapping[__version_info__[3]] + str(__version_info__[4])\n\n return str(main + sub)\n\n\n__version__ = _get_version()\n\n\n# Follow Django in treating URLs as UTF-8 encoded (which requires undoing the\n# implicit ISO-8859-1 decoding applied in Python 3). Strictly speaking, URLs\n# should only be ASCII anyway, but UTF-8 can be found in the wild.\ndef decode_path_info(path_info):\n return path_info.encode('iso-8859-1').decode('utf-8')\n\n\n# Define only the HTTP status codes we actually use\nhttp_status = {\n 200: 'OK',\n 301: 'Moved Permanently',\n 304: 'Not Modified',\n 404: 'Not Found',\n 405: 'Method Not Allowed'\n\n}\n\ndirectory_template = \"\"\"\n\n \n Directory listing for {displaypath}\n \n \n

Directory listing for {displaypath}

\n
\n
    \n {items}\n
\n
\n \n\n\"\"\".replace('\\n', os.linesep)\n\n# Define our own types for consistency cross platform.\n# Use the types defined by nginx with a few additions.\ntypes_map = {\n '.3gp': 'video/3gpp',\n '.3gpp': 'video/3gpp',\n '.7z': 'application/x-7z-compressed',\n '.ai': 'application/postscript',\n '.asf': 'video/x-ms-asf',\n '.asx': 'video/x-ms-asf',\n '.atom': 'application/atom+xml',\n '.avi': 'video/x-msvideo',\n '.bmp': 'image/x-ms-bmp',\n '.cco': 'application/x-cocoa',\n '.crt': 'application/x-x509-ca-cert',\n '.css': 'text/css',\n '.der': 'application/x-x509-ca-cert',\n '.doc': 'application/msword',\n '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',\n '.ear': 'application/java-archive',\n '.eot': 'application/vnd.ms-fontobject',\n '.eps': 'application/postscript',\n '.flv': 'video/x-flv',\n '.gif': 'image/gif',\n '.hqx': 'application/mac-binhex40',\n '.htc': 'text/x-component',\n '.htm': 'text/html',\n '.html': 'text/html',\n '.ico': 'image/x-icon',\n '.jad': 'text/vnd.sun.j2me.app-descriptor',\n '.jar': 'application/java-archive',\n '.jardiff': 'application/x-java-archive-diff',\n '.jng': 'image/x-jng',\n '.jnlp': 'application/x-java-jnlp-file',\n '.jpeg': 'image/jpeg',\n '.jpg': 'image/jpeg',\n '.js': 'application/javascript',\n '.json': 'application/json',\n '.kar': 'audio/midi',\n '.kml': 'application/vnd.google-earth.kml+xml',\n '.kmz': 'application/vnd.google-earth.kmz',\n '.m3u8': 'application/vnd.apple.mpegurl',\n '.m4a': 'audio/x-m4a',\n '.m4v': 'video/x-m4v',\n '.manifest': 'text/cache-manifest',\n '.mid': 'audio/midi',\n '.midi': 'audio/midi',\n '.mml': 'text/mathml',\n '.mng': 'video/x-mng',\n '.mov': 'video/quicktime',\n '.mp3': 'audio/mpeg',\n '.mp4': 'video/mp4',\n '.mpeg': 'video/mpeg',\n '.mpg': 'video/mpeg',\n '.ogg': 'audio/ogg',\n '.pdb': 'application/x-pilot',\n '.pdf': 'application/pdf',\n '.pem': 'application/x-x509-ca-cert',\n '.pl': 'application/x-perl',\n '.pm': 'application/x-perl',\n '.png': 'image/png',\n '.ppt': 'application/vnd.ms-powerpoint',\n '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',\n '.prc': 'application/x-pilot',\n '.ps': 'application/postscript',\n '.ra': 'audio/x-realaudio',\n '.rar': 'application/x-rar-compressed',\n '.rpm': 'application/x-redhat-package-manager',\n '.rss': 'application/rss+xml',\n '.rtf': 'application/rtf',\n '.run': 'application/x-makeself',\n '.sea': 'application/x-sea',\n '.shtml': 'text/html',\n '.sit': 'application/x-stuffit',\n '.svg': 'image/svg+xml',\n '.svgz': 'image/svg+xml',\n '.swf': 'application/x-shockwave-flash',\n '.tcl': 'application/x-tcl',\n '.tif': 'image/tiff',\n '.tiff': 'image/tiff',\n '.tk': 'application/x-tcl',\n '.ts': 'video/mp2t',\n '.txt': 'text/plain',\n '.war': 'application/java-archive',\n '.wbmp': 'image/vnd.wap.wbmp',\n '.webm': 'video/webm',\n '.webp': 'image/webp',\n '.wml': 'text/vnd.wap.wml',\n '.wmlc': 'application/vnd.wap.wmlc',\n '.wmv': 'video/x-ms-wmv',\n '.woff': 'application/font-woff',\n '.woff2': 'font/woff2',\n '.xhtml': 'application/xhtml+xml',\n '.xls': 'application/vnd.ms-excel',\n '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n '.xml': 'text/xml',\n '.xpi': 'application/x-xpinstall',\n '.xspf': 'application/xspf+xml',\n '.zip': 'application/zip'\n}\n","sub_path":"rheostatic/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"477464132","text":"import os\nimport json\nfrom PyQt5.QtWidgets import *\nfrom nodeeditor.node_editor_widget import NodeEditorWidget\n\n\nclass NodeEditorWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n self.filename = None\n\n\n\n def createAct(self, name, shortcut, tooltip, callback):\n act = QAction(name, self)\n act.setShortcut(shortcut)\n act.setToolTip(tooltip)\n act.triggered.connect(callback)\n return act\n\n\n def initUI(self):\n menubar = self.menuBar()\n\n # initialize Menu\n fileMenu = menubar.addMenu('&File')\n fileMenu.addAction(self.createAct('&New', 'Ctrl+N', \"Create new graph\", self.onFileNew))\n fileMenu.addSeparator()\n fileMenu.addAction(self.createAct('&Open', 'Ctrl+O', \"Open file\", self.onFileOpen))\n fileMenu.addAction(self.createAct('&Save', 'Ctrl+S', \"Save file\", self.onFileSave))\n fileMenu.addAction(self.createAct('&Save &As...', 'Ctrl+Shift+S', \"Save file as\", self.onFileSaveAs))\n fileMenu.addSeparator()\n fileMenu.addAction(self.createAct('E&xit', 'Ctrl+Q', \"Exit application\", self.close))\n\n editMenu = menubar.addMenu('&Edit')\n editMenu.addAction(self.createAct('&Undo', 'Ctrl+Z', \"Undo last operation\", self.onEditUndo))\n editMenu.addAction(self.createAct('&Redo', 'Ctrl+Shift+Z', \"Redo last operation\", self.onEditRedo))\n editMenu.addSeparator()\n editMenu.addAction(self.createAct('Cu&t', 'Ctrl+X', \"Cut to clipboard\", self.onEditCut))\n editMenu.addAction(self.createAct('&Copy', 'Ctrl+C', \"Copy to clipboard\", self.onEditCopy))\n editMenu.addAction(self.createAct('Cu&t', 'Ctrl+V', \"Paste from clipboard\", self.onEditPaste))\n editMenu.addSeparator()\n editMenu.addAction(self.createAct('&Delete', 'Del', \"Delete selected items\", self.onEditDelete))\n\n nodeeditor = NodeEditorWidget(self)\n self.setCentralWidget(nodeeditor)\n\n # status bar\n self.statusBar().showMessage('')\n self.status_mouse_pos = QLabel('')\n self.statusBar().addPermanentWidget(self.status_mouse_pos)\n nodeeditor.view.scenePosChanged.connect(self.onScenePosChanged)\n\n\n # set window properties\n self.setGeometry(200, 200, 800, 600)\n self.setWindowTitle(\"Node Editor\")\n self.show()\n\n\n def onScenePosChanged(self, x, y):\n self.status_mouse_pos.setText(\"Scene Pos: [%d, %d]\" % (x,y))\n\n\n def onFileNew(self):\n self.centralWidget().scene.clear()\n\n def onFileOpen(self):\n fname, filter = QFileDialog.getOpenFileName(self, 'Open graph from file')\n if fname == '':\n return\n if os.path.isfile(fname):\n self.centralWidget().scene.loadFromFile(fname)\n\n def onFileSave(self):\n if self.filename is None: return self.onFileSaveAs()\n self.centralWidget().scene.saveToFile(self.filename)\n self.statusBar().showMessage('Successfully saved %s' % self.filename)\n\n def onFileSaveAs(self):\n fname, filter = QFileDialog.getSaveFileName(self, 'Save graph to file')\n if fname == '':\n return\n self.filename = fname\n self.onFileSave()\n\n def onEditUndo(self):\n #print(\"Undo\")\n self.centralWidget().scene.history.undo()\n\n def onEditRedo(self):\n self.centralWidget().scene.history.redo()\n\n def onEditDelete(self):\n self.centralWidget().scene.grScene.views()[0].deleteSelected()\n\n def onEditCut(self):\n data = self.centralWidget().scene.clipboard.serializeSelected(delete=True)\n str_data = json.dumps(data, indent=4)\n QApplication.instance().clipboard().setText(str_data)\n\n def onEditCopy(self):\n data = self.centralWidget().scene.clipboard.serializeSelected(delete=False)\n str_data = json.dumps(data, indent=4)\n QApplication.instance().clipboard().setText(str_data)\n\n\n\n def onEditPaste(self):\n raw_data = QApplication.instance().clipboard().text()\n\n try:\n data = json.loads(raw_data)\n except ValueError as e:\n print(\"Pasting of not valid json data!\", e)\n return\n\n # check if the json data are correct\n if 'nodes' not in data:\n print(\"JSON does not contain any nodes\")\n return\n\n self.centralWidget().scene.clipboard.deserializeFromClipboard(data)\n\n\n","sub_path":"nodeeditor/node_editor_window.py","file_name":"node_editor_window.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"181459539","text":"from setuptools import setup\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\nimport subprocess as sp\nimport platform\n\ndef make_hkl():\n '''\n Compile the C huckel code and turns it in a dynamic library\n '''\n print('--> Compile huckel library')\n src_path = './husky/hamiltonian/huckel/src/'\n osname = platform.system()\n if osname == 'Linux':\n cmd = 'gcc -shared -o ../hkl.so -fPIC huckel.c'\n elif osname == 'Darwin':\n cmd = 'gcc -dynamiclib huckel.c -o ../hkl.so'\n else:\n raise ValueError('Environement %s not supported.' %osname)\n\n sp.check_call(cmd,cwd=src_path,shell=True)\n\nclass hklinstall(install):\n '''\n custom hadler for the install command\n '''\n def run(self):\n make_hkl()\n super().run()\n\nclass hkldevelop(develop):\n '''\n custom hadler for the install command\n '''\n def run(self):\n make_hkl()\n super().run()\n\nclass hklegg(egg_info):\n '''\n custom hadler for the install command\n '''\n def run(self):\n make_hkl()\n super().run()\n\nsetup(\n name='husky',\n description='huskython: Husky in Python ',\n version='0.1-dev',\n url='https://github.com/NicoRenaud/huskython',\n packages=['husky'],\n install_requires=[\n 'numpy>=1.13.3',\n 'scipy>=1.0.0'\n ],\n extras_require={\n 'test':['nose','coverage']\n },\n cmdclass={'install' : hklinstall, \n 'develop' : hkldevelop,\n 'egg_info': hklegg }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489656970","text":"import requests\nfrom common import notify_checker\n\n\ndef paradiso(event, context):\n data = collect()\n notify_checker(data)\n\n\ndef date_formatter(raw_date):\n date_data = str(raw_date)\n day = date_data[8:10]\n month = date_data[5:7]\n year = '2020'\n date = f'{day}-{month}-{year}'\n return date\n\n\ndef collect():\n url = \"https://api.paradiso.nl/api/events?lang=en&start_time=now&sort=date&order=asc&limit=600&page=1\"\n page = requests.get(url)\n gigs = page.json()\n gigs_list = []\n venue = 'paradiso'\n for gig in gigs:\n date = date_formatter(gig['start_date_time'])\n artist = gig['title']\n description = gig['subtitle']\n ticket_url = gig['ticket_url']\n uid = f'{artist}_{date}_{venue}'\n gig = {\"id\": uid,\n \"venue\": venue,\n \"date\": date,\n \"artist\": artist,\n \"link\": ticket_url,\n }\n gigs_list.append(gig)\n return gigs_list\n","sub_path":"gigfinder/venues/paradiso.py","file_name":"paradiso.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"626706165","text":"import datetime\nfrom MyModel_V4 import Model_NER, conf\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow.keras.layers as layers\nfrom tensorflow_addons.text import crf\nfrom Utils import *\nimport time\nfrom tqdm import tqdm\nfrom conlleval import evaluate\nfrom transformers import BertTokenizer, TFBertModel\n\n\ntokenizer_savepath = 'bert/tokenizer'\nbertmodel_savepath = 'bert/model'\n\ntokenizer = BertTokenizer.from_pretrained(tokenizer_savepath)\nbertmodel = TFBertModel.from_pretrained(bertmodel_savepath)\n\n\ndef bert_embedding(batches):\n batches_prd = []\n for ba in batches:\n '''\n ba=[sentence,sentence...]\n sentence = [[chars],[tags],[tag_ids]]\n '''\n chars = [x[0] for x in ba]\n tags = [x[1] for x in ba]\n tag_ids = [x[2] for x in ba]\n lens = [len(x[0]) for x in ba]\n ml = max(lens)\n chars_ids = []\n for sent in chars:\n tokenized = tokenizer.encode(sent, max_length=ml + 2, padding='max_length', return_tensors='tf')\n chars_ids.append(tokenized)\n bert_input = tf.squeeze(tf.stack(chars_ids, axis=1))\n emb = bertmodel(bert_input).last_hidden_state\n batches_prd.append({\n 'emb': emb,\n 'chars': chars,\n 'tag_ids': tag_ids,\n 'tags': tags,\n 'lens': lens\n })\n return batches_prd\n\n\ntf.config.set_soft_device_placement(True)\ngpus = tf.config.experimental.list_physical_devices(device_type='GPU')\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\nmod = 'BiLSTM'\nbatch_size = 100\nrecordFileName = '_'.join(\n ['3L_Bert' + mod, str(batch_size) + 'bs'])\ncreate_record_dirs(recordFileName)\nepochNum = get_epochNum(recordFileName) # 获取当前记录的epoch数\n\n# 配置模型参数、检查点\n#-------------------------------------------------------\nconfigers = conf(choose_mod=mod)\nmyModel = Model_NER(configers)\nckpt_dir_inner = os.path.join('Records',recordFileName, 'checkpoints')\nckpt_dir_theta_0 = os.path.join('Records',recordFileName, 'theta_0') # 存 每一轮外层循环后学到的初始参数\nckpt_dir_theta_t = os.path.join('Records',recordFileName, 'theta_t') # 存 表现最佳的初始参数\nckpt_dir_vali_theta = os.path.join('Records',recordFileName, 'theta_vali') # 存 在验证集测试时,模型训练过程中的参数\n\nckpt_path_theta_0 = os.path.join(ckpt_dir_theta_0, 'ckpt_theta_0')\nckpt_path_theta_t = os.path.join(ckpt_dir_theta_t, 'ckpt_theta_t')\nckpt_path_vali_theta = os.path.join(ckpt_dir_vali_theta, 'ckpt_theta_vali_train')\nmaxPRF1filepath = os.path.join('Records',recordFileName,'mPRF1.txt')\ncheckpoint = tf.train.Checkpoint(optimizer=myModel.optimizer, model=myModel)\nckpt_manager = tf.train.CheckpointManager(checkpoint, directory=ckpt_dir_inner, max_to_keep=5)\n#-------------------------------------------------------\nif epochNum == 0:\n myModel.save_weights(ckpt_path_theta_0)\nelse:\n myModel.load_weights(ckpt_path_theta_0)\n\n# 配置tensorboard\n#-------------------------------------------------------\n\nlog_dir_train = 'Records/' + recordFileName + '/tensorboard/' + '-train'\nlog_dir_vali_train = 'Records/' + recordFileName + '/tensorboard/' + '-vali_train'\nlog_dir_vali_test = 'Records/' + recordFileName + '/tensorboard/' + '-vali_test'\nlog_writer_train = tf.summary.create_file_writer(log_dir_train)\nlog_writer_vali_train = tf.summary.create_file_writer(log_dir_vali_train)\nlog_writer_vali_test = tf.summary.create_file_writer(log_dir_vali_test)\n#-------------------------------------------------------\n\nprint('record files is created!\\n'\n '-----------------------------------------------------------\\n')\n\ntrain_tasks = ['address', 'scene', 'government', 'organization',\n 'company', 'name', 'book']\nvalidation_tasks = ['game', 'position', 'movie']\n# 获取验证集数据: 验证集分两部分,验证_训练集 和 验证_测试集\n#-------------------------------------------------------\nvali_train_data_paths = []\nfor t in validation_tasks:\n temp = os.path.join('data_tasks', t)\n vali_train_data_paths.append(temp)\nvali_train_batches = get_batches_v4(train_data_path_list=vali_train_data_paths, batch_size=batch_size, batch_num=1,\n taskname=validation_tasks)\nvali_test_data_path = ['data/CLUE_BIOES_dev']\nvali_test_batch = get_batches_v4(train_data_path_list=vali_test_data_path, batch_size=10000, batch_num=1,\n taskname=validation_tasks)\n\nvali_test_batch_pred = bert_embedding([vali_test_batch])[0]\nvali_train_batches_pred = bert_embedding(vali_train_batches)\n#-------------------------------------------------------\n\n# 指数衰减学习率\n# exponential_decay = tf.keras.optimizers.schedules.ExponentialDecay(\n# initial_learning_rate=0.05, decay_steps=50, decay_rate=0.8)\n# myModel.optimizer = tf.optimizers.Adam(exponential_decay)\n# myModel.optimizer = tf.optimizers.Adam(learning_rate=0.001)\n\nmax_F1 = 0\n# 训练开始\n#-------------------------------------------------------\nfor epoch in range(epochNum, 500):\n # starttime = time.time()\n print('epoch:{}'.format(epoch))\n loss_t,P_t,R_T,F1_t = myModel.inner_train_one_step(vali_train_batches_pred, inner_iters=0, inner_epochNum=epoch,\n outer_epochNum=0,\n task_name=validation_tasks, log_writer=log_writer_vali_train)\n myModel.save_weights(ckpt_path_theta_0)\n print('train loss:{}, train F1:{} <-----------------\\n'.format(loss_t,F1_t))\n print('**********************************************\\n')\n # 在测试集上看结果\n test_loss, pred_tags_masked, tag_ids_padded,P, R, F1 = myModel.validate_one_batches(vali_test_batch_pred, validation_tasks,\n log_writer_vali_test, epoch)\n if F1 > max_F1:\n max_F1 = F1\n myModel.save_weights(ckpt_path_theta_t)\n content = 'P\\t{}\\nR\\t{}\\nF1\\t{}\\n'.format(P,R,max_F1)\n with open(maxPRF1filepath,'w',encoding='utf-8') as f:\n f.write(content)\n # 记录epoch\n Record_epoch_num(recordFileName, epoch)\n\n # endtime = time.time()\n # print('done inner epoch:{}!——————run time :{}s**********************************************.\\n'.format(epoch, str(endtime - starttime)))\n","sub_path":"NER/train_Bert+Bilstm.py","file_name":"train_Bert+Bilstm.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"315400934","text":"import csv\n \nclass Data(object):\n def __init__(self,data,paramsEspecifications,xFocus,xFSep):\n self.data = data\n self.paramsEspecificiations = paramsEspecifications\n self.xFocus = TransformToFloat(xFocus,':')\n self.xFSep = float(xFSep)\n \n \n \n def setData(self,Data):\n self.data = Data\n \n def ncols(self):\n return len(self.data[0])\n \n def nrows(self):\n return len(self.data)\n def getrow(self,index):\n return self.data[index]\n def TransformtoFloats(self):\n #Transform dataset entries to floats\n for i in range(self.nrows()):\n for j in range(len(self.getrow(i))):\n if self.data[i,j] != 'NaN':\n self.data[i,j] = [ float(item) for item in self.data[i,j][1:-1].split(':')]\n \n\n def reshape(self):\n self.data.reshape((self.nrows(),self.ncols()))\n \n \n def plot(self,plothandler,xlims,ylims,xsep,ysep,mapcolor):\n self.formatData()\n #create matrix\n X = setXRange(xlims,self.xFocus,self.xFSep,xsep)\n Y = np.arange(ylims[0],ylims[1],ysep)\n X,Y = np.meshgrid(X,Y)\n #create levels and norm\n lev,norm1 = self.setLevelsandNorm()\n #plot heatmap\n plothandler.contourf(X,Y,self.getPlotObject(),cmap = mapcolor,levels=lev,norm=norm1)\n plothandler.contourf(X,Y,self.getPlotObject(),cmap = mapcolor,levels=lev,norm=norm1)\n\nclass InputInvData(Data):\n def __init__(self,direction):\n self.direction = direction\n def Read(self,delimiter):\n with open(self.direction,'rb') as csvfile:\n reader = csv.reader(csvfile,delimiter=',',quotechar='|')\n data_handler = []\n for row in reader:\n data_handler.append(row)\n dataset = np.array(data_handler[1:-3],dtype=object)\n InvScenarios = data_handler[0]\n paramsEspecifications = data_handler[-2]\n distribution = data_handler[-3]\n output = InvData(dataset,InvScenarios,paramsEspecifications,distribution)\n return output\n\nclass InputMatrixData(Data):\n def __init__(self,direction):\n self.direction = direction\n \n def Read(self,delimiter,dataType):\n with open(self.direction,'rb') as csvfile:\n reader = csv.reader(csvfile,delimiter=',',quotechar='|')\n data_handler = []\n for row in reader:\n data_handler.append(row)\n dataset = np.array(data_handler[0:-3],dtype=object)\n dataset.reshape((len(dataset),len(dataset[0])))\n paramsEspecifications = data_handler[-2]\n xFocus,xFSep = data_handler[-1]\n \n output = dataType(dataset,paramsEspecifications,xFocus,xFSep)\n \n return output\n\n \n \ndef TransformToFloat(X,delimiter):\n return [ float(item) for item in X[1:-1].split(delimiter)]\n\n \nclass EqData(Data):\n def __init__(self,data,paramsEspecifications,xFocus,xSep):\n Data.__init__(self,data,paramsEspecifications,xFocus,xSep)\n self.formated_data = '' \n \n def extract_Data(self):\n self.TransformtoFloats()\n m,n = self.data.shape\n R = np.zeros((m,n))\n C = np.zeros((m,n))\n P = np.zeros((m,n))\n for i in range(m):\n for j in range(n):\n X = self.data[i,j]\n R[i,j]=X[0]\n C[i,j]=X[1]\n P[i,j]=X[2]\n return R,C,P\n\n \nclass EigenValData(Data):\n def __init__(self,data,paramsEspecifications,xFocus,xSep):\n Data.__init__(self,data,paramsEspecifications,xFocus,xSep)\n self.DominantEigenVal = 0.\n self.numEigenVal = 0\n self.StabBound =0\n \n def TransformtoFloats(self):\n m,n = self.data.shape\n for i in range(m):\n for j in range(n): \n self.data[i,j] = [ TransformToFloat(item,'|') for item in self.data[i,j][1:-1].split(':')]\n \n def formatData(self):\n self.TransformtoFloats()\n self.setDominants()\n self.setStabBound()\n \n def setStabBound(self):\n self.StabBound = getStabBoundary(self.DominantEigenVal)\n \n def plotStabBound(self,plothandler,xlims,ylims,xsep,ysep,mapcolor):\n self.formatData()\n #create matrix\n X = setXRange(xlims,self.xFocus,self.xFSep,xsep)\n Y = np.arange(ylims[0],ylims[1],ysep)\n X,Y = np.meshgrid(X,Y)\n #create levels and norm\n lev= [-0.5,0.5,1.5]\n #plot heatmap\n plothandler.contourf(X,Y,self.StabBound,cmap = mapcolor,levels=lev)\n\n def getPlotObject(self):\n return self.DominantEigenVal\n def numEigs(self):\n m,n = self.data.shape\n Num = np.zeros((m,n))\n for i in range(m):\n for j in range(n):\n Num[i,j]=len(self.data[i,j])\n \n self.numEigenVal = Num\n\n def setDominants(self):\n m,n = self.data.shape\n Dom = np.zeros((m,n))\n for i in range(1,m):\n for j in range(n):\n k = getMaxRealIndex(self.data[i,j])\n \n Dom[i,j]=self.data[i,j][k][0]\n for j in range(n):\n Dom[0,j] = Dom[1,j]\n \n self.DominantEigenVal = Dom\n \n \n \n def setLevelsandNorm(self):\n m,M = getMinMax(self.DominantEigenVal)\n lev = setLevel(m,M,100)\n norm = mpl.colors.BoundaryNorm(lev,256)\n return lev,norm\n \n \n \n \ndef setLevel(m,M,npoints,zero_=1e-18):\n if M >0:\n if m <0 :\n if abs(m)>zero_:\n lev1 = -(10**np.linspace(np.log10(zero_),np.log10(abs(m)),npoints/2))[::-1]\n lev2 = 10**np.linspace(np.log10(zero_),np.log10(M),npoints/2)\n return np.concatenate([lev1,lev2])\n else:\n return 10**np.linspace(np.log10(zero_),np.log10(M),npoints)\n else:\n return 10**np.linspace(np.log10(m),np.log10(M),npoints)\n \n \n else:\n return -(10**np.linspace(np.log10(abs(M)),np.log10(abs(m)),npoints))[::-1]\n \n \nclass MTPData(Data):\n def __init__(self,data,paramsEspecifications,xFocus,xSep):\n Data.__init__(self,data,paramsEspecifications,xFocus,xSep)\n \n def TransformtoFloats(self):\n m,n = self.data.shape\n for i in range(m):\n for j in range(n):\n \n self.data[i,j] = float(self.data[i,j])\n \n def formatData(self):\n self.TransformtoFloats()\n \n def getPlotObject(self):\n return self.data\n \n def setLevelsandNorm(self):\n L = np.arange(2.1,3.05,0.05)\n lev = [0.,1.,2.] +list(L)\n norm1 = mpl.colors.BoundaryNorm(lev,256)\n return lev,norm1\n \n \n \ndef setXRange(xlims,xfocus,xfocussep,xsep):\n A = np.arange(xlims[0],xfocus[0],xsep)\n B = np.arange(xfocus[0],xfocus[1],xfocussep)\n C = np.arange(xfocus[1],xlims[1],xsep)\n D = np.concatenate([A,B,C])\n return D\n\ndef getMaxRealIndex(List):\n Max = List[0][0]\n index = 0 \n for i in range(1,len(List)):\n if(List[i][0]>Max):\n index = i\n Max = List[i][0]\n return index \n \n \n \n \ndef isitCoexistence(X,index,index2):\n try:\n return isinPositiveOrthant(X,index)\n except:\n try:\n return isinPositiveOrthant(X,index2)\n except:\n return isinPositiveOrthant(X,0)\n \n \ndef isitPositive(X):\n pos = 0\n for i in range(len(X)):\n p = isinPositiveOrthant(X,i)\n if p>0:\n return p\n return pos\n\ndef isinPositiveOrthant(X,index):\n P = X[index][2]\n if P>0:\n return 1\n else:\n return 0\n \n \n \nclass InvData(Data): \n def __init__(self,data,InvScenarios,paramsEspecifications,distribution):\n Data.__init__(self,data,paramsEspecifications,'(0:0)',0.)\n self.Scenarios = InvScenarios\n self.distribution = distribution\n self.formated_data = ''\n self.OrderedData = {}\n self.Paths= {}\n \n def formatData(self):\n \"\"\"\" Transform the original data to a format amenable for future working, first transform it to a 2 Dimensional array\n whose number of columns if the number of distinct scenarios encountered in the invasibility analysis(which is four in our case)\n and the number of rows is the maximum number of points present in the invasibility set of any of the scenarios. It also convert\n the string elements to floats, and returns a dictionary of dictionaries for each of the scenarios with x and y coordinates as\n the two keys of each of the dictionaries and whose elements are a list of lists for each of the distinct invasibility sets \n contained within each scenario. It furthers filters the results for the scenarios P to C-R and C to P-R neglecting all the\n elements in which the necessary conditions,C to R and P to R respectively, are fullfiled.\"\"\"\n self.reshape()\n self.TransformtoFloats()\n self.distribution = [ [int(item) for item in self.distribution[i][1:-1].split(':')] for i in range(len(self.distribution)) ]\n distribution = self.distribution\n formated_data = {}\n ncols = self.ncols()\n for n in range(ncols):\n colDist = distribution[n]\n formated_data[self.Scenarios[n]] = {'x':[],'y':[]}\n for i in range(len(colDist)):\n x_data =[]\n y_data =[]\n for j in range(sum(colDist[0:i]),sum(colDist[0:i+1])):\n x_data.append(self.data[j,n][0])\n y_data.append(self.data[j,n][1])\n formated_data[self.Scenarios[n]]['x'].append(x_data)\n formated_data[self.Scenarios[n]]['y'].append(y_data)\n \n \n self.formated_data = formated_data\n def plot(self,plothandler,colorCoder,lineCoder):\n self.formatData()\n self.OrderData()\n self.InnerPlot(plothandler,colorCoder,lineCoder)\n \n \n def InnerPlot(self,plothandler,colorCoder,lineCoder):\n for key in self.Paths:\n self.Paths[key].plot(plothandler,colorCoder[key],lineCoder[key])\n \n \n def FilterData(self,Target,xFilter,yFilter):\n \"\"\"Filter the data contained in the target key buy deleting all the number which are below\n the yFilter correspondence of each of the xFilter positions, this implimentation use a\n binary search algorithm for finding the elements of xFilter within each of the sublists storaged\n in Target \n @param Target a key of the data dictionary\n @param xFilter a list of x coordinates\n @param yFilter a list of y coordinates\n \"\"\"\n \n T = self.formated_data[Target]\n Xlists = T['x']\n Ylists = T['y']\n \n for x_i in range(len(xFilter)):\n x = xFilter[x_i]\n \n for index in range(len(Xlists)):\n \n p = BSearch(Xlists[index],x)\n if type(p)!=bool:\n if yFilter[x_i] < Ylists[index][p]:\n break\n else:\n Ylists[index].pop(p)\n Xlists[index].pop(p)\n else:\n break \n \n def OrderData(self):\n Data = self.formated_data\n for scenario in Data.keys():\n self.Order(scenario)\n self.ConstructPaths()\n \n def ConstructPaths(self):\n \n for scenario in self.Scenarios:\n Path = self.BuildPath(scenario)\n self.Paths[scenario] = Path\n \n def BuildPath(self,scenario):\n P = self.OrderedData[scenario]\n YPath = Path(P['x'][0],P['y'][0])\n for index in range(len(P['x']) -1 ) :\n YPath.AddPath(Path(P['x'][index+1],P['y'][index+1]))\n #YPath.FormatPoints()\n return YPath\n \n \n def Order(self,scenario):\n Set= self.formated_data[scenario]\n new_x = []\n new_y = []\n if len(Set['x'][0])!=0:\n for index in range(len(Set['x'])):\n new_subx,new_suby= self.Classify(scenario,index)\n new_x.append(new_subx)\n new_y.append(new_suby)\n else:\n new_x.append([])\n new_y.append([])\n\n self.OrderedData[scenario] = {'x':new_x,'y':new_y}\n \n \n def Classify(self,scenario,index):\n \n xset = self.formated_data[scenario]['x'][index]\n yset = self.formated_data[scenario]['y'][index]\n \n #searchRange = self.getIndexes(scenario,index)\n \n newX = []\n newY = []\n roamingX=[xset[0]]\n roamingY=[yset[0]]\n for i in range(0,len(xset)-1):\n if self.isDist(xset,yset,i):\n #if self.isDist(xset,yset,searchRange,i):\n roamingX.append(xset[i+1])\n roamingY.append(yset[i+1])\n else:\n newX.append(roamingX)\n newY.append(roamingY)\n roamingX=[xset[i+1]]\n roamingY=[yset[i+1]]\n \n newX.append(roamingX)\n newY.append(roamingY)\n \n return newX,newY\n \n def isDist(self,xset,yset,i):\n P0 = Point(np.log10(xset[i]),yset[i])\n P1 = Point(np.log10(xset[i+1]),yset[i+1])\n d = np.abs(P0.x - P1.x)\n if d>0.02:\n return False \n else:\n if np.abs(P0.y - P1.y) < 0.5:\n return True\n else:\n return False\n \n \n \n def getIndexes(self,scenario,index):\n n = len(self.formated_data[scenario]['x'])\n t = range(n)\n t.pop(index)\n\n \n searchRange = {}\n for i in t:\n searchRange[i]={'x':self.formated_data[scenario]['x'][i],'y':self.formated_data[scenario]['y'][i]}\n return searchRange\n","sub_path":"code/Theory/Plotting/Plot2.py","file_name":"Plot2.py","file_ext":"py","file_size_in_byte":14169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"561159992","text":"import numpy as np\n\n\nclass ReplayPriorityMemory:\n def __init__(self, size, batch_size, prob_alpha=1):\n self.size = size\n self.batch_size = batch_size\n self.prob_alpha = prob_alpha\n self.memory = []\n self.priorities = np.zeros((size,), dtype=np.float32)\n self.pos = 0\n\n def push(self, transition):\n new_priority = np.median(self.priorities) if self.memory else 1.0\n\n self.memory.append(transition)\n if len(self.memory) > self.size:\n del self.memory[0]\n pos = len(self.memory) - 1\n self.priorities[pos] = new_priority\n\n def sample(self):\n probs = np.array(self.priorities)\n if len(self.memory) < len(probs):\n probs = probs[:len(self.memory)]\n\n probs += 1e-8\n probs = probs ** self.prob_alpha\n probs /= probs.sum()\n\n indices = np.random.choice(len(self.memory), self.batch_size, p=probs)\n samples = [self.memory[idx] for idx in indices]\n return samples, indices\n\n def update_priorities(self, batch_indices, batch_priorities):\n for idx, priority in zip(batch_indices, batch_priorities):\n self.priorities[idx] = priority.item()\n\n def __len__(self):\n return len(self.memory)\n","sub_path":"reinforcement_learning/replay_priority_memory.py","file_name":"replay_priority_memory.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11647131","text":"import xmltodict\nfrom time import sleep\nimport pymysql\nfrom db import dbModel\n\ndb= dbModel()\nclass dictbreak_down:\n def __init__(self, filename):\n with open(filename) as file:\n doc = xmltodict.parse(file.read())\n\n self.my_obj = doc['sdnList']['sdnEntry']\n # self.report = open('report.txt', 'w+')\n for i in range(0, len(self.my_obj)):\n if 'addressList' in self.my_obj[i].keys():\n # print (self.my_obj[i]['addressList'])\n if isinstance(self.my_obj[i]['addressList'], dict):\n for one in self.my_obj[i]['addressList'].keys():\n if one == 'address':\n print ()\n # exit()\n if isinstance(self.my_obj[i]['addressList']['address'], dict):\n self.dictor(self.my_obj[i]['addressList']['address'], self.my_obj[i]['uid'])\n\n elif isinstance(self.my_obj[i]['addressList']['address'], list):\n self.lister(self.my_obj[i]['addressList']['address'], self.my_obj[i]['uid'])\n\n else:\n pass\n\n\n\n\n def lister(self, arg, uid):\n print ('Received In Dictor')\n if isinstance(arg, list):\n for each in arg:\n if isinstance(each, dict):\n self.dictor(each, uid)\n elif isinstance(each, list):\n self.lister(each, uid)\n else:\n print (each)\n else:\n print ('Wrong dataType received.')\n\n print ('Exiting Lister')\n # sleep(0.5)\n\n\n\n def dictor(self, arg, uid):\n print ('Received in Lister')\n if isinstance(arg, dict):\n print (arg.keys())\n if 'address2' in arg.keys():\n print ('Found Address2 ')\n print (arg['uid'], uid, arg['address2'])\n add = arg['address2'].replace(\"'\", ' ')\n db.t_sdn_addressList_address2(uid, arg['uid'], add, None)\n # sleep(5)\n elif 'address3' in arg.keys():\n print ('Found Address3 ')\n print (arg['uid'], uid, arg['address3'])\n add = arg['address3'].replace(\"'\", ' ')\n db.t_sdn_addressList_address3(uid, arg['uid'], add, None)\n # sleep(5)\n\n elif 'postalCode' in arg.keys():\n print ('Found Postal code.')\n print (arg['uid'], uid, arg['postalCode'])\n add = arg['postalCode'].replace(\"'\", ' ')\n db.t_sdn_addressList_postalCode(uid, arg['uid'], add, None)\n # sleep(5)\n\n\n elif 'address1' in arg.keys():\n print ('Found Address1 ')\n print (arg['uid'], uid, arg['address1'])\n add = arg['address1'].replace(\"'\", ' ')\n db.t_sdn_addressList_address1(uid, arg['uid'], add, None)\n\n elif 'city' in arg.keys():\n print ('Found City ')\n print (arg['uid'], uid, arg['city'])\n add = arg['city'].replace(\"'\", ' ')\n db.t_sdn_addressList_city(uid, arg['uid'], add, None)\n # sleep(5)\n\n elif 'stateOrProvince' in arg.keys():\n print ('Found StateOrProvice Key.')\n print (arg['uid'], uid, arg['stateOrProvince'])\n add = arg['stateOrProvince'].replace(\"'\", ' ')\n db.t_sdn_addressList_stateorProvince(uid, arg['uid'], add, None)\n # sleep(0.5)\n\n elif 'country' in arg.keys():\n print ('Found country ')\n print (arg['uid'], uid, arg['country'])\n add = arg['country'].replace(\"'\", ' ')\n db.t_sdn_addressList_country(uid, arg['uid'], add, None)\n # sleep(1)\n\n for value in arg.values():\n if isinstance(value, dict):\n self.dictor(value, uid)\n elif isinstance(value, list):\n self.lister(value, uid)\n else:\n print (type(value), value)\n else:\n print ('wrong datatype presented')\n\n print ('Exiting Dictor')\n # sleep(0.5)\n\n\n\ntest= dictbreak_down('testfile.xml')\n","sub_path":"Server/DataFetch/Files BackUp/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"337165499","text":"#Zen System\n#\n#System Fonts\n\nimport pyglet\n\nclass SysFonts:\n \n name = \"System Font Loader\"\n \n def __init__(self,kernel):\n self.kernel = kernel\n self.fs = self.kernel.get_service('fs')\n \n pyglet.font.add_directory(self.fs.expand('{home}/sys/font/'))\n\nAPPLICATION = []\nSERVICE = {'sysfonts':SysFonts}","sub_path":"sys/boot/graphics/sysfonts.py","file_name":"sysfonts.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"393890287","text":"\"\"\"\nThis module prepares midi file data and feeds it to the neural\nnetwork for training\n\"\"\"\nimport sys\nimport json\nimport yaml\nimport time\nimport h5py\nimport random\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport keras\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import CuDNNLSTM, GRU, CuDNNGRU, Input\nfrom keras import regularizers\nfrom keras.utils import multi_gpu_model\nfrom keras.models import Model\nimport tensorflow as tf\n\nLayerType = CuDNNGRU\n\ndef ohe(matrix, n):\n cube = np.zeros((matrix.shape[0], matrix.shape[1], n), dtype=np.int)\n\n for row in range(matrix.shape[0]):\n for col in range(matrix.shape[1]):\n cube[row, col, matrix[row, col]] = 1\n\n return cube\n\nclass SequenceTrainer():\n def get_par (self, pars, keys, default):\n try:\n p = pars\n for key in keys:\n p = p[key]\n return p\n\n except:\n return default\n\n def prepare_data(self, data, split):\n \"\"\"\n The original input consists of a list of 5 matrices of sequence\n data (X) and a list of 5 matrices as target (Y)\n X[i].shape = (n, seq length, # of categories (usually 128))\n Y[i].shape = (n, # of categories)\n\n Args:\n data (list): list of X/Y_train, X/Y_val and X/Y_test\n split (list): List containing training fraction and validation fraction\n\n Returns:\n Four arrays: X_train, Y_train, X_val, Y_val\n \"\"\"\n # Create one hot encoded vectors\n train_data = ohe(data[0], 128)\n val_data = ohe(data[1], 128)\n\n # Be sure that Y follows an X sequence\n X_train = train_data[:, :-1, :]\n Y_train = train_data[:, -1:, :]\n\n X_val = val_data[:, :-1, :]\n Y_val = val_data[:, -1:, :]\n\n # Remove 2nd index from Y, which is one\n Y_train = Y_train.reshape((Y_train.shape[0], Y_train.shape[2]))\n Y_val = Y_val.reshape((Y_val.shape[0], Y_val.shape[2]))\n\n return X_train, X_val, Y_train, Y_val\n\n def single_input_model(self, X, Y, layers, dropout):\n \"\"\" Create a simple input/output network\n\n This model can be trained to associate one voice with one target.\n\n Args:\n X (list of arrays): contains input data\n Y (list of arrays): targets\n layers (list): list of two lists of layers to be created. The first\n list contains the sizes of RNN layers to be created;\n the second list the sizes of Dense layers\n dropout (float): dropout value; if > 0 a dropout layer is added\n to each RNN or Dense layer\n\n Returns:\n The model\n \"\"\"\n rnn_layers = layers[0]\n dense_layers = layers[1]\n\n # In this test using the kernel regularizer = weight decay\n l2k = self.l2k # Weights regularizer\n l2a = self.l2a # activity regularizer\n l2r = self.l2r # self.l2r # recurrent regularizer\n print ('*** l2k =', l2k, 'l2a =', l2a, 'l2r =', l2r)\n\n input_layer = Input(shape=(X.shape[1], X.shape[2]), name='Input_Layer')\n\n if len(rnn_layers) == 1:\n model = LayerType(rnn_layers[0],\n kernel_regularizer=regularizers.l2(l2k),\n recurrent_regularizer=regularizers.l2(l2r),\n activity_regularizer=regularizers.l2(l2a),\n name='RNN_1')(input_layer)\n else:\n model = LayerType(rnn_layers[0], return_sequences=True,\n kernel_regularizer=regularizers.l2(l2k),\n recurrent_regularizer=regularizers.l2(l2r),\n activity_regularizer=regularizers.l2(l2a),\n name='RNN_1')(input_layer)\n for layer in range(1, len(rnn_layers) - 1):\n model = LayerType(rnn_layers[layer],\n return_sequences=True,\n kernel_regularizer=regularizers.l2(l2k),\n recurrent_regularizer=regularizers.l2(l2r),\n activity_regularizer=regularizers.l2(l2a),\n name='RNN_' + str(layer+1))(model)\n if dropout > 0:\n model= Dropout(dropout)(model)\n\n name = 'RNN_{:d}'.format(len(rnn_layers))\n model = LayerType(rnn_layers[-1],\n kernel_regularizer=regularizers.l2(l2k),\n recurrent_regularizer=regularizers.l2(l2r),\n activity_regularizer=regularizers.l2(l2a),\n name=name)(model)\n if dropout > 0:\n model= Dropout(dropout)(model)\n\n for i, layer in enumerate(dense_layers):\n model = Dense(layer, activation='relu',\n kernel_regularizer=regularizers.l2(l2k),\n activity_regularizer=regularizers.l2(l2a),\n name='Dense_'+str(i))(model)\n #model = BatchNormalization()(model)\n if dropout > 0:\n model = Dropout(dropout)(model)\n\n model = Dense(Y.shape[1], activation='softmax', name='Dense_softmax')(model)\n\n main_model = Model(inputs=input_layer, outputs=[model])\n\n return main_model\n\n def setup_model(self, model_def, X, Y, dropout, gpu):\n \"\"\" Sets up a Neural Network to generate music\n\n Args:\n model_type (string): type of model to set up\n X (array): input sequences\n Y (array): imput target\n layers (list): list containing the layer sizes for the model\n dropout (float): dropout fraction\n gpu (int): when > 1, a multi gpu model will be built\n\n Returns:\n the created model\n \"\"\"\n model_type = model_def['model']\n layers = model_def['layers']\n if gpu > 1:\n with tf.device(\"/cpu:0\"):\n model = self.single_input_model(X, Y, layers, dropout)\n model = multi_gpu_model(model, gpus=gpu)\n print('Running a multi GPU model on a', model_type, 'model')\n else:\n with tf.device(\"/gpu:0\"):\n model = self.single_input_model(X, Y, layers, dropout)\n print('Running a single GPU model on a', model_type, 'model')\n\n model.compile(optimizer=keras.optimizers.Adam (),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n\n def train(self, hp, model_def, data, dropout, batch_size,\n epochs, gpu):\n\n hp = dict(hp)\n split = 0.8\n hp['batch_sizes'] = [batch_size]\n hp['dropouts'] = [dropout]\n\n X_train, X_val, Y_train, Y_val = self.prepare_data(data, split)\n print('X shape', X_train.shape)\n print('Number of training sequences:', len(X_train))\n print('Number of validation sequences:', len(X_val))\n print('Length of sequences is', X_train.shape[1])\n\n model = self.setup_model(model_def, X_train, Y_train, dropout, gpu)\n model.summary()\n\n print('\\nStarted training the model')\n print('Batch size:', batch_size)\n print('GPU\\'s:', gpu)\n print('Dropout:', dropout)\n\n history = model.fit(X_train, Y_train,\n verbose=1,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=(X_val, Y_val))\n\n return history\n\n def train_sequence(self, hyper_pars, notes_file):\n self.hyper_pars = hyper_pars\n\n model_types = self.get_par(hyper_pars, ['models'], None)\n batch_sizes = self.get_par(hyper_pars, ['batch_sizes'], [128])\n dropouts = self.get_par(hyper_pars, ['dropouts'], [0.3])\n epochs = self.get_par(hyper_pars, ['epochs'], 100)\n gpus = self.get_par(hyper_pars, ['gpus'], [1])\n\n print('Tensorflow version:', tf.__version__)\n print('Keras version:', keras.__version__)\n\n #data = read_sequences(notes_file)\n #self.stf(data)\n #sys.exit()\n train_data = np.genfromtxt('train.csv', delimiter=',', dtype=np.int)\n val_data = np.genfromtxt('val.csv', delimiter=',', dtype=np.int)\n\n n_runs = len(model_types) * len(dropouts) * len(batch_sizes) * \\\n len(gpus)\n columns = ['Epochs', 'Model type', 'Dropouts', 'Batch size', 'GPU\\'s',\n 'Acc', 'Val. Acc', 'Time']\n run_no = 0\n df = pd.DataFrame(np.zeros((n_runs, len(columns))), columns=columns)\n for gpu in gpus:\n for index in model_types:\n model_def = hyper_pars[index]\n for dropout in dropouts:\n for batch_size in batch_sizes:\n print('==>', index, '=', str(model_def))\n\n self.l2r = 1e-6\n self.l2k = 1e-6\n self.l2a = 0.0\n df.iloc[run_no]['Epochs'] = epochs\n df.iloc[run_no]['Model type'] = len(model_def)\n df.iloc[run_no]['Dropouts'] = dropout\n df.iloc[run_no]['Batch size'] = batch_size\n df.iloc[run_no]['GPU\\'s'] = gpu\n\n model_time = time.time()\n\n history = self.train(hyper_pars, model_def,\n (train_data, val_data),\n dropout, batch_size, epochs, gpu)\n model_time = time.time() - model_time\n print('CPU time: {:.0f}'.format(model_time))\n hist = history.history\n\n df.iloc[run_no]['Acc'] = hist['acc'][-1]\n df.iloc[run_no]['Val. Acc'] = hist['val_acc'][-1]\n df.iloc[run_no]['Time'] = int(model_time)\n df.to_csv('results.csv')\n print(df)\n\n run_no += 1\n # for\n # for\n # for\n # for\n\n return\n\n## Class: music_trainer ###\n\ndef main(argv):\n # System wide constants for MusicData\n seed = 42\n np.random.seed(seed)\n random.seed(seed)\n\n notes_file = 'notes.h5'\n config_file = 'config.yaml'\n\n # Read hyperparameters\n with open(config_file) as yaml_data:\n hyper_pars = yaml.load(yaml_data)\n\n # Initialize CPU time measurement\n seconds = time.time()\n\n SequenceTrainer().train_sequence(hyper_pars, notes_file)\n\n seconds = int(time.time() - seconds + 0.5)\n\n print('\\n*** Ready in', seconds, 'seconds.')\n\nif __name__ == '__main__':\n pd.set_option('display.max_columns', None)\n main(sys.argv[1:])\n","sub_path":"rnn-demo.py","file_name":"rnn-demo.py","file_ext":"py","file_size_in_byte":10972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"502183462","text":"import todoist\nimport json\nimport curses\nfrom math import *\nimport os.path\nimport time\n\ndef sync():\n global todoistJson\n global itemSize\n global itemList\n global projectSize\n global projectList\n todoistJson = api.sync(resource_types=['all'])\n items = todoistJson['Items']\n itemList = [ Item(count) for count in items ]\n itemSize = len(itemList)\n project = todoistJson['Projects']\n projectList = [ Project(count) for count in project ]\n projectSize = len(projectList)\n\ndef init():\n global api\n dotTodoist = open(os.getenv('HOME') + '/.todoist', 'r')\n api = todoist.TodoistAPI(dotTodoist.read())\n dotTodoist.close()\n sync();\n\nclass Item:\n def __init__(self, itemData):\n self.content = itemData.get('content')\n self.id = itemData.get('id')\n self.due_date = itemData.get('due_date')\n self.project_id = itemData.get('project_id')\n def content():\n return self.content\n def due_date():\n return self.due_date\n def project_id():\n return self.project_id\n def id():\n return self.id\n\nclass Project:\n def __init__(self, projectData):\n self.name = projectData.get('name')\n self.id = projectData.get('id')\n def daname():\n return self.name\n def id():\n return self.id\n\ndef cursesStartup():\n global stdscr,height,width,midheight,midwidth\n stdscr = curses.initscr()\n stdscr.border(2)\n height,width = stdscr.getmaxyx()\n midheight = int(height / 2)\n midwidth = int(width / 2)\n stdscr.addstr( midheight, midwidth - 4, \"Todoist!\")\n stdscr.refresh()\n time.sleep(1)\n stdscr.clear()\n\ndef cursesMain():\n global thebox\n stdscr.clear()\n menu = curses.newwin(height - 2, 15, 0, 1)\n thebox = curses.newwin(height - 3, width - 15, 2, 14)\n menu.box()\n thebox.box()\n menu.addstr( 1, 1, \"(i)inbox\")\n menu.addstr( 2, 1, \"(t)today\")\n menu.addstr( 3, 1, \"(7)7 days\")\n menu.addstr( 4, 1, \"(p)projects\")\n menu.addstr( 5, 1, \"(a)all tasks\")\n menu.addstr( 6, 1, \"(q)quit\")\n stdscr.refresh()\n menu.refresh()\n thebox.refresh()\n keys = stdscr.getch()\n if keys == ord('a'):\n cursesBox(itemList)\n thebox.refresh()\n z = stdscr.getch()\n elif keys == ord('i'):\n cursesInbox()\n elif keys == ord('q'):\n return False\n return True\n\ndef cursesUI():\n cursesStartup()\n while cursesMain():\n nothing = True\n curses.endwin()\n\ndef cursesInbox():\n row_num = 0\n for projnum in projectList:\n if projnum.name == 'Inbox':\n for itemnum in itemList:\n if(itemnum.project_id == projnum.id):\n row_num += 1\n max_row = height - 6\n pages = int( ceil( row_num / max_row ) )\n page = 1\n i = 1\n for projnum in projectList:\n if projnum.name == 'Inbox':\n for itemnum in itemList:\n if(itemnum.project_id == projnum.id):\n thebox.addstr(\"\\t\" + itemnum.content)\n if (i == row_num):\n break\n i += 1;\n\ndef cursesBox(something):\n max_row = height - 6\n row_num = len(something)\n pages = int( ceil( row_num / max_row ) )\n position = 1\n page = 1\n for i in range( 1, max_row - 1):\n thebox.addstr( i , 2, something[i - 1].content)\n if (i == row_num):\n break\n\ninit()\ncursesUI()\n#i = 1\n#print(itemList[i].content)\n#print(itemList[i].due_date)\n#print(itemList[i].project_id)\n#print(itemList[i].id)\n#print(projectList[1].name)\n#print(projectList[1].id)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"535447574","text":"'''\nPurpose :Market Risk feed files\nDepartment and Desk :IT\nRequester: :Natalie Austin\nDeveloper :Douglas Finkel / Henk Nel\nCR Number :264536\n\n -- HISTORY --\nDate CR Requestor Developer Change\n----------------------------------------------------------------------------------------\n2016-01-20 CHNG0003404656 Ashley Canter Chris Human http://abcap-jira/browse/MINT-444\n'''\nimport ael, string, acm, MR_MainFunctions\n\nInsL = []\n\n# OPENFILE ##########################################################################################################\n \ndef OpenFile(temp,FileDir,Filename,*rest):\n\n filename = FileDir + Filename\n\n outfile = open(filename, 'w')\n\n outfile.close()\n\n del InsL[:]\n InsL[:] = [] \n\n return filename\n\n# OPENFILE ##########################################################################################################\n\n\n\n# WRITE - FILE ######################################################################################################\n\ndef Write(yc,FileDir,Filename,*rest):\n \n filename = FileDir + Filename\n\n if (yc.seqnbr) not in InsL:\n InsL.append(yc.seqnbr)\n outfile = open(filename, 'a')\n \n for member in yc.attributes():\n\n #Base record\n \n BAS = 'BAS'\n ZeroSPEC = 'ZeroSPEC'\n OBJECT = 'ZeroSPEC'\n TYPE = 'Zero'\n IDENTIFIER = MR_MainFunctions.NameFix(member.issuer_ptynbr.ptyid + '_' + yc.curr.insid + '_HazardCurve')\n NAME = MR_MainFunctions.NameFix(member.issuer_ptynbr.ptyid + '_' + yc.curr.insid + '_HC')\n \n ActiveFLAG = 'TRUE'\n CreditStateENUM = ''\n CurveFUNC = ''\n CurveUnitCAL = ''\n \n CurveUnitDAYC = MR_MainFunctions.DayCountFix(yc.storage_daycount)\n \n CurveUnitPERD = 'quarter'\n \n CurveUnitUNIT = '%'\n DatumDATE = MR_MainFunctions.Datefix(acm.Time().DateNow())\n OriginOffsetNB = '0'\n RelativeCurveFLAG = 'TRUE'\n StateProcFUNC = '@hazard curve bootstrap (scenario)'\n TimeEvolutionFUNC = '@Constant'\n FunctionIdFLAG = 'TRUE'\n GenZeroSfExt0FLAG = 'FALSE'\n GenZeroSurface0SIN = '@Inverse Constant'\n \n outfile.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n'%(BAS, ZeroSPEC, OBJECT, TYPE, IDENTIFIER, NAME, ActiveFLAG, CreditStateENUM, CurveFUNC, CurveUnitCAL, CurveUnitDAYC, CurveUnitPERD, CurveUnitUNIT, DatumDATE, OriginOffsetNB, RelativeCurveFLAG, StateProcFUNC, TimeEvolutionFUNC, FunctionIdFLAG, GenZeroSfExt0FLAG, GenZeroSurface0SIN))\n\n # Roll Over Function Parameters\n # Roll Over Generic Volatility Moneyness Term Surface\n BASFLAG = 'rm_ro'\n Volatility = 'ZeroSPEC : Generic Zero Surface'\n ATTRIBUTE = 'Generic Zero Surface'\n OBJECT = 'ZeroSPEC'\n \n GnVolMnyTrmSf0AXS = '' \n GnVolMnyTrmSfNODE = '' \n \n# for points in yc.points():\n \n GnVolMnyTrmSf0AXS = '' #MR_MainFunctions.Rolling_NameFix(points.date_period)\n GnVolMnyTrmSfNODE = '' #points.value\n \n if GnVolMnyTrmSfNODE != '':\n outfile.write('%s,%s,%s,%s,%s,%s\\n'%(BASFLAG, Volatility, ATTRIBUTE, OBJECT, GnVolMnyTrmSf0AXS, GnVolMnyTrmSfNODE))\n\n for spreads in member.spreads():\n\n if spreads.point_seqnbr.date_period != '0d':\n \n # Roll Over Function Parameters\n BASFLAG = 'rm_ro'\n Volatility = 'ZeroSPEC : Function Parameters'\n ATTRIBUTE = 'Function Parameters'\n OBJECT = 'ZeroSPEC'\n FunctionParamsVAL = ''\n \n if FunctionParamsVAL != '':\n outfile.write('%s,%s,%s,%s,%s\\n'%(BASFLAG, Volatility, ATTRIBUTE, OBJECT, FunctionParamsVAL))\n \n # Roll Over Procedure Parameter\n BASFLAG = 'rm_ro'\n Volatility = 'ZeroSPEC : Procedure Parameter'\n ATTRIBUTE = 'Procedure Parameter'\n OBJECT = 'ZeroSPEC'\n \n ProcedureParamXREF = member.issuer_ptynbr.ptyid + '_' + yc.curr.insid + '_CTDS_' + spreads.point_seqnbr.date_period\n \n outfile.write('%s,%s,%s,%s,%s\\n'%(BASFLAG, Volatility, ATTRIBUTE, OBJECT, ProcedureParamXREF))\n \n outfile.close()\n\n return str(yc.seqnbr)\n \n# WRITE - FILE ######################################################################################################\n","sub_path":"Python modules/MR_Zero_Hazard_Curve.py","file_name":"MR_Zero_Hazard_Curve.py","file_ext":"py","file_size_in_byte":5539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"653594563","text":"\"\"\"\ncloud_sptheme.utils -- internal helper utilities\n\"\"\"\n#=============================================================================\n# imports\n#=============================================================================\n# core\nfrom functools import update_wrapper\nimport logging; log = logging.getLogger(__name__)\nimport os\nimport sys\n# site\nfrom sphinx.util.fileutil import copyfile\n# pkg\n# local\n__all__ = [\n # py2/3 compat\n 'PY2', 'PY3', 'u', 'ru',\n 'unicode', 'unicode_or_str',\n\n # monkeypatch helpers\n \"patchapplier\",\n \"monkeypatch\",\n]\n\n#=============================================================================\n# internal py2/3 compat helpers\n#=============================================================================\nPY2 = sys.version_info < (3,0)\nPY3 = not PY2\n\n# FIXME: these aren't very rigorous / correct, but they work for current purposes.\nif PY2:\n def u(s):\n return s.decode(\"unicode_escape\")\n def ru(s):\n return s.decode(\"ascii\")\n unicode = unicode\n unicode_or_str = (unicode, str)\nelse:\n def u(s):\n return s\n ru = u\n unicode = str\n unicode_or_str = (str,)\n\n#=============================================================================\n# monkeypatch helpers\n#=============================================================================\ndef patchapplier(func):\n \"\"\"\n function decorator to help functions that apply a monkeypatch.\n makes them only run once.\n \"\"\"\n def wrapper():\n if wrapper.patched:\n return False\n func()\n wrapper.patched = True\n logging.getLogger(func.__module__).debug(\"%s: patch applied\", func.__name__)\n return True\n wrapper.patched = False\n update_wrapper(wrapper, func)\n return wrapper\n\ndef monkeypatch(target, name=None):\n \"\"\"\n helper to monkeypatch another object.\n the decorated function is wrapped around the existing function in\n :samp:`target.{name}`, and used to replace it.\n\n **name** defaults to the name of the function being decorated.\n\n the original value is passed in as the first positional argument to the function.\n \"\"\"\n def builder(func):\n attr = name or func.__name__\n wrapped = getattr(target, attr)\n def wrapper(*args, **kwds):\n return func(wrapped, *args, **kwds)\n update_wrapper(wrapper, wrapped)\n wrapper.__wrapped__ = wrapped # not set by older update_wrapper() versions\n setattr(target, attr, wrapper)\n return func # return orig func so we can use it again\n return builder\n\n#=============================================================================\n# sphinx helpers\n#=============================================================================\n\ndef _patch_html_extra_static(builder):\n \"\"\"\n monkeypatch hook for add_static_file() to use\n \"\"\"\n config = builder.config\n if hasattr(config, \"html_extra_static\"):\n return\n\n @monkeypatch(builder)\n def copy_static_files(wrapped):\n wrapped()\n # NOTE: code modeled after copy_static_files()...\n ctx = builder.globalcontext.copy()\n for source, target in config.html_extra_static:\n # 'source' existence should already be checked by add_static_file(),\n # 'target' should already be abspath w/in outdir\n copyfile(source, target)\n\n config.html_extra_static = []\n\n\ndef add_static_file(builder, source, name=None, stylesheet=False,\n javascript=False):\n \"\"\"\n monkeypatch sphinx's html builder to include specified static file.\n \n will copy file to :samp:`{outdir}/_static/{name}`.\n\n :param builder:\n ref to HTMLBuilder\n \n :param source: \n path to source file\n \n :param name:\n name for target file; defaults to base name of source file.\n \n :param stylesheet:\n register file as additional stylesheet\n \"\"\"\n _patch_html_extra_static(builder)\n\n if not os.path.exists(source):\n builder.warn('static asset %r does not exist' % source)\n return\n\n if not name:\n name = os.path.basename(source)\n\n target = os.path.join(builder.outdir, \"_static\", name)\n\n if stylesheet:\n builder.app.add_stylesheet(name)\n\n if javascript:\n builder.app.add_javascript(name)\n\n builder.config.html_extra_static.append((source, target))\n\n#=============================================================================\n# eof\n#=============================================================================\n","sub_path":"cloud_sptheme/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"373806943","text":"import re\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Tuple, Union\n\nimport cv2\nimport numpy as np\nimport scipy.io as sio\n\nfrom .mask_utils import (\n bounding_box,\n fix_duplicates,\n get_inst_centroid,\n get_inst_types,\n label_semantic,\n)\nfrom .multiproc import run_pool\n\ntry:\n import tables as tb\n\n _has_tb = True\nexcept ModuleNotFoundError:\n _has_tb = False\n\n\nclass FileHandler:\n \"\"\"Class for handling file I/O.\"\"\"\n\n @staticmethod\n def read_img(path: Union[str, Path]) -> np.ndarray:\n \"\"\"Read an image & convert from bgr to rgb. (cv2 reads imgs in bgr).\n\n Parameters\n ----------\n path : str or Path\n Path to the image file.\n\n Returns\n -------\n np.ndarray:\n The image. Shape (H, W, 3).\n \"\"\"\n path = Path(path)\n return cv2.cvtColor(cv2.imread(path.as_posix()), cv2.COLOR_BGR2RGB)\n\n @staticmethod\n def write_img(path: Union[str, Path], img: np.ndarray) -> None:\n \"\"\"Write an image.\n\n Parameters\n ----------\n path : str or Path\n Path to the image file.\n img : np.ndarray\n The image to be written.\n\n \"\"\"\n path = Path(path)\n cv2.imwrite(path.as_posix(), cv2.cvtColor(img, cv2.COLOR_RGB2BGR))\n\n @staticmethod\n def read_mat(\n path: Union[str, Path],\n key: str = \"inst_map\",\n retype: bool = True,\n return_all: bool = False,\n ) -> Union[np.ndarray, Dict[str, np.ndarray], None]:\n \"\"\"Read a mask from a .mat file.\n\n If a mask is not found, return None\n\n Parameters\n ----------\n path : str or Path\n Path to the .mat file.\n key : str, default=\"inst_map\"\n Name/key of the mask type that is being read from .mat\n retype : bool, default=True\n Convert the matrix type.\n return_all : bool, default=False\n Return the whole dict. Overrides the `key` arg.\n\n\n Raises\n ------\n ValueError: If an illegal key is given.\n\n Returns\n -------\n Union[np.ndarray, List[np.ndarray], None]:\n if return_all == False:\n The instance/type/semantic labelled mask. Shape: (H, W).\n if return_all == True:\n All the masks in the .mat file returned in a dictionary.\n \"\"\"\n dtypes = {\n \"inst_map\": \"int32\",\n \"type_map\": \"int32\",\n \"sem_map\": \"int32\",\n \"inst_centroid\": \"float64\",\n \"inst_type\": \"int32\",\n }\n\n path = Path(path)\n if not path.exists():\n raise ValueError(f\"{path} not found\")\n\n try:\n mask = sio.loadmat(path.as_posix())\n except Exception:\n mask = None\n\n if not return_all:\n allowed = (\"inst_map\", \"type_map\", \"inst_centroid\", \"inst_type\", \"sem_map\")\n if key not in allowed:\n raise ValueError(f\"Illegal key given. Got {key}. Allowed: {allowed}\")\n\n try:\n mask = mask[key]\n if retype:\n mask = mask.astype(dtypes[key])\n except Exception:\n mask = None\n\n return mask\n\n @staticmethod\n def get_geo_obj(what: str = \"qupath\") -> Dict[str, str]:\n \"\"\"Get the dict format for a geojson obj.\n\n For example: get the obj in QuPath PathCellDetection obj\n\n Parameters\n ----------\n what : str\n One of \"qupath\", \"simple\"\n\n Returns\n -------\n Dict[str, Any]:\n A dictionary in geojson format.\n \"\"\"\n allowed = (\"qupath\",)\n if what not in allowed:\n raise ValueError(f\"Illegal `what`-arg. Got: {what}. Allowed: {allowed}\")\n\n geo_obj = {}\n if what == \"qupath\":\n geo_obj.setdefault(\"type\", \"Feature\")\n\n # PathCellAnnotation, PathCellDetection, PathDetectionObject\n geo_obj.setdefault(\"id\", \"PathCellDetection\")\n geo_obj.setdefault(\"geometry\", {\"type\": \"Polygon\", \"coordinates\": None})\n geo_obj.setdefault(\n \"properties\",\n {\n \"isLocked\": \"false\",\n \"measurements\": [],\n \"classification\": {\"name\": None},\n },\n )\n\n return geo_obj\n\n @staticmethod\n def get_gson(\n inst: np.ndarray,\n type: np.ndarray,\n classes: Dict[str, int],\n soft_type: np.ndarray = None,\n x_offset: int = 0,\n y_offset: int = 0,\n geo_format: str = \"qupath\",\n ) -> Dict[str, Any]:\n \"\"\"Get the labels in geojson format.\n\n Parameters\n ----------\n inst : np.ndarray\n Instance labelled mask. Shape: (H, W).\n type : np.ndarray\n Cell type labelled semantic segmentation mask. Shape: (H, W).\n classes : Dict[str, int]\n Class dict e.g. {\"inflam\":1, \"epithelial\":2, \"connec\":3}\n soft_type : np.ndarray, default=None\n Softmax type mask. Shape: (C, H, W). C is the number of classes.\n x_offset : int, default=0\n x-coordinate offset. (to set geojson to .mrxs wsi coordinates)\n y_offset : int, default=0\n y-coordinate offset. (to set geojson to .mrxs wsi coordinates)\n geo_format : str, default=\"qupath\"\n The format for the geo object. \"qupath\" format allows the result file\n to be read with QuPath. \"simple\" format allows for geopandas etc.\n\n Returns\n -------\n Dict[str, Any]:\n A geojson dictionary of the instance labelled mask.\n \"\"\"\n inst_map = fix_duplicates(inst)\n inst_list = list(np.unique(inst_map))\n if 0 in inst_list:\n inst_list.remove(0)\n\n geo_objs = []\n for inst_id in inst_list:\n # set up the annotation geojson obj\n\n # Get cell instance and cell type\n inst = np.array(inst_map == inst_id, np.uint8)\n inst_type = type[inst_map == inst_id].astype(\"uint8\")\n inst_type = np.unique(inst_type)[0]\n\n inst_type = [key for key in classes.keys() if classes[key] == inst_type][0]\n\n # type probabilities\n if soft_type is not None:\n type_probs = soft_type[..., inst_map == inst_id].mean(axis=1)\n inst_type_soft = dict(zip(classes.keys(), type_probs))\n # convert to float for json serialization\n for key in inst_type_soft.keys():\n inst_type_soft[key] = float(inst_type_soft[key])\n\n # get the cell contour coordinates\n contours, _ = cv2.findContours(inst, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # got a line instead of a polygon\n if contours[0].shape[0] < 3:\n continue\n\n # shift coordinates based on the offsets\n if x_offset:\n contours[0][..., 0] += x_offset\n if y_offset:\n contours[0][..., 1] += y_offset\n\n # Get the geojson obj\n geo_obj = FileHandler.get_geo_obj(what=geo_format)\n poly = contours[0].squeeze().tolist()\n poly.append(poly[0]) # close the polygon\n geo_obj[\"geometry\"][\"coordinates\"] = [poly]\n geo_obj[\"properties\"][\"classification\"][\"name\"] = inst_type\n if soft_type is not None:\n geo_obj[\"properties\"][\"classification\"][\n \"probabilities\"\n ] = inst_type_soft\n\n geo_objs.append(geo_obj)\n\n return geo_objs\n\n @staticmethod\n def read_h5_patch(\n path: Union[Path, str],\n ix: int,\n return_im: bool = True,\n return_inst: bool = True,\n return_type: bool = True,\n return_sem: bool = False,\n return_name: bool = False,\n return_nitems: bool = False,\n return_all_names: bool = False,\n ) -> Dict[str, np.ndarray]:\n \"\"\"Read img & mask patches at index `ix` from a hdf5 db.\n\n Parameters\n ----------\n path : Path or str\n Path to the h5-db.\n ix : int\n Index for the hdf5 db-arrays.\n return_im : bool, default=True\n If True, returns an image. (If the db contains these.)\n return_inst : bool, default=True\n If True, returns a instance labelled mask. (If the db contains these.)\n return_type : bool, default=True\n If True, returns a type mask. (If the db contains these.)\n return_sem : bool, default=False\n If True, returns a semantic mask, (If the db contains these.)\n return_name : bool, default=False\n If True, returns a name for the patch, (If the db contains these.)\n return_nitems : bool, default=False\n If True, returns the number of items in the db.\n return_all_names : bool, default=False\n If True, returns all the names in the db.\n\n Returns\n -------\n Dict[str, np.ndarray]:\n A Dict of numpy matrices. Img shape: (H, W, 3), mask shapes: (H, W).\n keys of the dict are: \"im\", \"inst\", \"type\", \"sem\"\n\n Raises\n ------\n IOError: If a mask that does not exist in the db is being read.\n \"\"\"\n if not _has_tb:\n raise ModuleNotFoundError(\n \"`FileHandler.read_h5_patch` method requires pytables library. \"\n \"Install with `pip install tables`.\"\n )\n\n path = Path(path)\n with tb.open_file(path.as_posix(), \"r\") as h5:\n out = {}\n\n if return_im:\n try:\n out[\"image\"] = h5.root.imgs[ix, ...]\n except Exception:\n raise IOError(\n \"The HDF5 database does not contain images. Try \"\n \"setting `return_im=False`\"\n )\n\n if return_inst:\n try:\n out[\"inst\"] = h5.root.insts[ix, ...]\n except Exception:\n raise IOError(\n \"The HDF5 database does not contain instance labelled masks. \"\n \"Try setting `return_inst=False`\"\n )\n\n if return_type:\n try:\n out[\"type\"] = h5.root.types[ix, ...]\n except Exception:\n raise IOError(\n \"The HDF5 database does not contain type masks. Try setting \"\n \"`return_type = False` \"\n )\n\n if return_sem:\n try:\n out[\"sem\"] = h5.root.areas[ix, ...]\n except Exception:\n raise IOError(\n \"The HDF5 database does not contain semantic masks. Try \"\n \"setting `return_sem = False`\"\n )\n\n if return_name:\n try:\n fn = h5.root.fnames[ix]\n out[\"name\"] = Path(fn.decode(\"UTF-8\"))\n except Exception:\n raise IOError(\n \"The HDF5 database does not contain patch names. Try \"\n \"setting `return_name = False`\"\n )\n\n if return_nitems:\n try:\n out[\"nitems\"] = h5.root._v_attrs.n_items\n except Exception:\n raise IOError(\n \"The HDF5 database does not contain attribute ``nitems. Try \"\n \"setting `return_nitems = False`\"\n )\n\n if return_all_names:\n try:\n names = h5.root.fnames[:]\n out[\"names\"] = [Path(n.decode(\"UTF-8\")) for n in names]\n except Exception:\n raise IOError(\n \"The HDF5 database does not contain patch names. Try \"\n \"setting `return_all_names = False`\"\n )\n\n return out\n\n @staticmethod\n def write_mat(\n fname: Union[str, Path],\n inst: np.ndarray,\n type: np.ndarray = None,\n sem: np.ndarray = None,\n compute_centorids: bool = False,\n compute_bboxes: bool = False,\n **kwargs,\n ) -> None:\n \"\"\"\n Write multiple masks to .mat file.\n\n Keys always present in the file: \"inst_map\", \"inst_type\"\n\n Optional keys: \"type_map\", \"sem_map\", \"inst_bbox\", \"inst_centroid\"\n\n Parameters\n ----------\n fname : str or Path\n The file name of the .mat file.\n inst : np.ndarray\n Instance labelled mask. Shape: (H, W).\n type : np.ndarray\n Cell type labelled semantic segmentation mask. Shape: (H, W).\n sem : np.ndarray\n Tissue type labelled semantic segmentation mask. Shape: (H, W).\n compute_centroids : bool, optional\n Flag to tompute instance centorids.\n compute_bboxes : bool, optional\n Flag to tompute instance bboxes.\n \"\"\"\n fname = Path(fname)\n if not fname.parent.exists():\n raise ValueError(\n f\"The directory: {fname.parent.as_posix()} does not exist.\"\n )\n\n inst_map = fix_duplicates(inst)\n inst_types = get_inst_types(inst, type)\n\n res = {\n \"inst_map\": inst_map,\n \"inst_type\": inst_types,\n }\n\n if compute_centorids:\n centroids = get_inst_centroid(inst_map)\n res[\"inst_centroid\"] = centroids\n\n if compute_bboxes:\n inst_ids = list(np.unique(inst_map)[1:])\n bboxes = np.array(\n [bounding_box(np.array(inst_map == id_, np.uint8)) for id_ in inst_ids]\n )\n res[\"inst_bbox\"] = bboxes\n\n if type is not None:\n res[\"type_map\"] = type\n\n if sem is not None:\n res[\"sem_map\"] = sem\n\n sio.savemat(\n file_name=fname.with_suffix(\".mat\").as_posix(),\n mdict=res,\n )\n\n @staticmethod\n def write_gson(\n fname: Union[str, Path],\n inst: np.ndarray,\n type: np.ndarray = None,\n classes: Dict[str, int] = None,\n soft_type: np.ndarray = None,\n x_offset: int = 0,\n y_offset: int = 0,\n geo_format: str = \"qupath\",\n ) -> None:\n \"\"\"Convert the instance labelled mask into geojson obj or write it .json file.\n\n Parameters\n ----------\n fname : str | Path, optional\n File name for the .json file.\n inst : np.ndarray\n Instance labelled mask. Shape: (H, W).\n type : np.ndarray, optional\n Cell type labelled semantic segmentation mask. Shape: (H, W). If None,\n the classes of the objects will be set to {background: 0, foreground: 1}\n soft_type : np.ndarray, default=None\n Softmax type mask. Shape: (C, H, W). C is the number of classes.\n classes : Dict[str, int], optional\n Class dict e.g. {\"inflam\":1, \"epithelial\":2, \"connec\":3}. Ignored if\n `type` is None.\n x_offset : int, default=0\n x-coordinate offset. (to set geojson to .mrxs wsi coordinates)\n y_offset : int, default=0\n y-coordinate offset. (to set geojson to .mrxs wsi coordinates)\n geo_format : str, default=\"qupath\"\n The format for the geo object. \"qupath\" format allows the result file\n to be read with QuPath. \"simple\" format allows for geopandas etc.\n\n Raises\n ------\n ModuleNotFoundError: If geojson is not installed.\n ValueError: If `classes` is set to None when `type` is given.\n\n Returns\n -------\n Dict[str, Any]:\n A dictionary with geojson fields.\n \"\"\"\n try:\n import geojson\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\n \"To use the `FileHandler.mask2geojson`, geojson is required. \"\n \"Install with `pip install geojson`\"\n )\n\n fname = Path(fname)\n if not fname.parent.exists():\n raise ValueError(\n f\"The directory: {fname.parent.as_posix()} does not exist.\"\n )\n\n if type is None:\n type = inst > 0\n classes = {\"background\": 0, \"foreground\": 1}\n else:\n if classes is None:\n raise ValueError(\n \"`classes` cannot be None if `type` semgentation map is given.\"\n )\n\n geo_objs = FileHandler.get_gson(\n inst, type, classes, soft_type, x_offset, y_offset, geo_format\n )\n\n fname = fname.with_suffix(\".json\")\n\n with fname.open(\"w\") as out:\n geojson.dump(geo_objs, out)\n\n @staticmethod\n def save_masks(\n fname: str,\n maps: Dict[str, np.ndarray],\n format: str = \".mat\",\n json_format: str = \"qupath\",\n classes_type: Dict[str, str] = None,\n classes_sem: Dict[str, str] = None,\n offsets: bool = False,\n **kwargs,\n ) -> None:\n \"\"\"Save model outputs to .mat or geojson .json file.\n\n NOTE: If .json format is used, two files are written if both inst_map and\n sem_map are given. The sem_map .json has a suffix '{}_areas.json' and\n the inst_map .json has suffix '{}_cells.json'\n\n Parameters\n ----------\n fname : str\n Name for the output-file.\n maps : Dict[str, np.ndarray]\n model output names mapped to model outputs.\n E.g. {\"sem\": np.ndarray, \"type\": np.ndarray, \"inst\": np.ndarray}\n format : str\n One of \".mat\" or \".json\"\n json_format : str, default=\"qupath\"\n The geojson format. One of \"qupath\", \"simple\". Ignored if format is not\n \".json\".\n classes_type : Dict[str, str], optional\n Cell type dictionary. e.g. {\"inflam\":1, \"epithelial\":2, \"connec\":3}.\n This is required only if `format` == `json`.\n classes_sem : Dict[str, str], otional\n Tissue type dictionary. e.g. {\"tissue1\":1, \"tissue2\":2, \"tissue3\":3}\n This is required only if `format` == `json`.\n offsets : bool, default=False\n If True, geojson coords are shifted by the offsets that are encoded in\n the filenames (e.g. \"x-1000_y-4000.png\"). Ignored if `format` != \".json\"\n \"\"\"\n fname = Path(fname)\n allowed = (\".mat\", \".json\")\n if format not in allowed:\n raise ValueError(\n f\"Illegal file-format. Got: {format}. Allowed formats: {allowed}\"\n )\n\n if format == \".mat\":\n FileHandler.write_mat(fname, **maps)\n elif format == \".json\":\n offs = FileHandler.get_offset(fname.name) if offsets else {\"x\": 0, \"y\": 0}\n\n if \"inst\" in maps.keys():\n type_map = None\n if \"type\" in maps.keys():\n type_map = maps[\"type\"]\n\n # Create directory for the cell seg results\n save_dir = fname.parent / \"cells\"\n if not Path(save_dir).exists():\n Path(save_dir).mkdir(parents=True, exist_ok=True)\n\n fn = save_dir / f\"{fname.name}_cells\"\n FileHandler.write_gson(\n fname=fn,\n inst=maps[\"inst\"],\n type=type_map,\n classes=classes_type,\n soft_type=maps[\"soft_type\"] if \"soft_type\" in maps.keys() else None,\n geo_format=json_format,\n x_offset=offs[\"x\"],\n y_offset=offs[\"y\"],\n )\n if \"sem\" in maps.keys():\n if classes_sem is None:\n raise ValueError(\n \"When saving to .json `classes_sem` can't be None, \"\n \"if the output masks contains tissue type predictions.\"\n )\n\n # Create directory for the area seg results\n save_dir = fname.parent / \"areas\"\n if not Path(save_dir).exists():\n Path(save_dir).mkdir(parents=True, exist_ok=True)\n\n fn = save_dir / f\"{fname.name}_areas\"\n\n FileHandler.write_gson(\n fname=fn,\n inst=label_semantic(maps[\"sem\"]),\n type=maps[\"sem\"],\n classes=classes_sem,\n soft_type=maps[\"soft_sem\"] if \"soft_sem\" in maps.keys() else None,\n geo_format=json_format,\n x_offset=offs[\"x\"],\n y_offset=offs[\"y\"],\n )\n\n @staticmethod\n def save_masks_parallel(\n maps: List[Dict[str, np.ndarray]],\n fnames: List[str],\n format: str = \".mat\",\n geo_format=\"qupath\",\n classes_type: Dict[str, str] = None,\n classes_sem: Dict[str, str] = None,\n offsets: bool = False,\n pooltype: str = \"thread\",\n maptype: str = \"amap\",\n **kwargs,\n ) -> None:\n \"\"\"Save the model output masks to a folder. (multi-threaded).\n\n Parameters\n ----------\n maps : List[Dict[str, np.ndarray]]\n The model output map dictionaries in a list.\n fnames : List[str]\n Name for the output-files. (In the same order with `maps`)\n format : str\n One of \".mat\" or \".json\"\n geo_format : str, default=\"qupath\"\n The geojson format. One of \"qupath\", \"simple\". Ignored if format is not\n \".json\".\n classes_type : Dict[str, str], optional\n Cell type dictionary. e.g. {\"inflam\":1, \"epithelial\":2, \"connec\":3}.\n This is required only if `format` == `json`.\n classes_sem : Dict[str, str], otional\n Tissue type dictionary. e.g. {\"tissue1\":1, \"tissue2\":2, \"tissue3\":3}\n This is required only if `format` == `json`.\n offsets : bool, default=False\n If True, geojson coords are shifted by the offsets that are encoded in\n the filenames (e.g. \"x-1000_y-4000.png\"). Ignored if `format` != \".json\"\n pooltype : str, default=\"thread\"\n The pathos pooltype. Allowed: (\"process\", \"thread\", \"serial\").\n Defaults to \"thread\". (Fastest in benchmarks.)\n maptype : str, default=\"amap\"\n The map type of the pathos Pool object.\n Allowed: (\"map\", \"amap\", \"imap\", \"uimap\")\n Defaults to \"amap\". (Fastest in benchmarks).\n \"\"\"\n func = partial(\n FileHandler._save_masks,\n format=format,\n geo_format=geo_format,\n classes_type=classes_type,\n classes_sem=classes_sem,\n offsets=offsets,\n )\n\n args = tuple(zip(fnames, maps))\n run_pool(func, args, ret=False, pooltype=pooltype, maptype=maptype)\n\n @staticmethod\n def _save_masks(\n args: Tuple[str, Dict[str, np.ndarray]],\n format: str,\n geo_format: str,\n classes_type: Dict[str, str],\n classes_sem: Dict[str, str],\n offsets: bool,\n ) -> None:\n \"\"\"Unpacks the args for `save_mask` to enable multi-threading.\"\"\"\n return FileHandler.save_masks(\n *args,\n format=format,\n geo_format=geo_format,\n classes_type=classes_type,\n classes_sem=classes_sem,\n offsets=offsets,\n )\n\n @staticmethod\n def get_split(string: str) -> List[str]:\n \"\"\"Try splitting a coord-string with \"-\" and \"_\" on a string.\"\"\"\n xy = string.split(\"-\")\n if len(xy) > 1:\n return xy\n xy = string.split(\"_\")\n if len(xy) > 1:\n return xy\n else:\n return list(filter(None, re.split(r\"(\\d+)\", string)))\n\n @staticmethod\n def get_offset(fname: str) -> Dict[str, int]:\n \"\"\"Get the offsets.\n\n I.e. If a filename contains x- and y- coordinates, return them.\n \"\"\"\n coords = re.findall(r\"([xy][ -_]\\d+)\", fname)\n offsets = {}\n for coord in coords:\n xy = FileHandler.get_split(coord)\n offsets[xy[0]] = int(xy[1])\n\n return offsets\n","sub_path":"cellseg_models_pytorch/utils/file_manager.py","file_name":"file_manager.py","file_ext":"py","file_size_in_byte":25080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"219987734","text":"from django.http import *\nfrom django.shortcuts import render, redirect\nfrom .models import *\nfrom .forms import *\nfrom .settings import *\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import connection\nimport json\nimport random\nfrom math import *\n\n\n\n\n# home page\ndef Index(request):\n if 'quizTimeId' in request.COOKIES:\n return redirect(\"./questions\")\n return render(request, 'quizMania/index.html', {})\n\n#questions page\ndef Question(request):\n if 'quizTimeId' not in request.COOKIES:\n return redirect(\"../\")\n quizTimeId = request.COOKIES['quizTimeId']\n\n try:\n user = Users.objects.get(id=int(quizTimeId))\n except ObjectDoesNotExist as e:\n return HttpResponse(\"User not found : try clearing COOKIES\")\n except Exception as e:\n return HttpResponse(\"Unknown Exception occurred\")\n\n try:\n userStatus = UserStatus.objects.get(user=user)\n except ObjectDoesNotExist as e:\n return HttpResponse(\"User Staus not found : try clearing COOKIES\")\n except Exception as e:\n return HttpResponse(\"Unknown Exception occurred\")\n\n if userStatus.ruleDisplay == True:\n con = {\n \"user\" : user,\n \"userStatus\" : userStatus,\n \"rules\" : Rules.objects.all()\n }\n else :\n con = {\n \"user\" : user,\n \"userStatus\" : userStatus,\n \"ruleDisplay\" : userStatus.ruleDisplay,\n }\n\n return render(request, 'quizMania/questions.html', con)\n\n\n#do not display rules api\ndef HideRules(request):\n if request.method == \"POST\":\n if \"hideRules\" in request.POST:\n hideRules = request.POST['hideRules']\n try:\n user = Users.objects.get(id=int(hideRules))\n except ObjectDoesNotExist as e:\n return HttpResponse(\"User not found : try clearing COOKIES\")\n except Exception as e:\n return HttpResponse(\"Unknown Exception occurred\")\n\n try:\n userStatus = UserStatus.objects.get(user=user)\n except ObjectDoesNotExist as e:\n return HttpResponse(\"User Staus not found : try clearing COOKIES\")\n except Exception as e:\n return HttpResponse(\"Unknown Exception occurred\")\n\n userStatus.ruleDisplay = False\n userStatus.save()\n\n return HttpResponse(\"done\")\n\n return HttpResponse(\"Invalid request\")\n\n\n#next question api\ndef requestQuestion(request):\n if request.method == 'POST':\n if 'userStatus' in request.POST:\n try:\n ut = int(request.POST['userStatus'])\n userStatus = UserStatus.objects.get(id=ut)\n except ObjectDoesNotExist as e:\n return HttpResponse(\"User Staus not found : try clearing COOKIES\")\n except Exception as e:\n return HttpResponse(e)\n\n allQuestions = Questions.objects.all();\n totalQuestions = len(allQuestions)\n\n answeredQ = userStatus.answeredQuestions\n QuestionNo = 0\n if answeredQ is None or answeredQ == '':\n nextQuestion = random.choice(allQuestions)\n userStatus.answeredQuestions = str(nextQuestion.id)\n QuestionNo = 1\n else:\n qArry = answeredQ.split(',')\n qArry = [ int(i) for i in qArry ]\n\n if len(qArry) == totalQuestions:\n return HttpResponse(\"completed\")\n\n nextQuestion = random.choice(allQuestions)\n while nextQuestion.id in qArry:\n nextQuestion = random.choice(allQuestions)\n userStatus.answeredQuestions += ',' + str(nextQuestion.id)\n QuestionNo = len(qArry) + 1\n\n if nextQuestion.questionImage is None:\n imageURL = ''\n else :\n imageURL = \"/media/\" + str(nextQuestion.questionImage)\n\n con = {\n \"totalQuestions\": totalQuestions,\n \"QuestionNo\": QuestionNo,\n \"question\" : {\n \"questionString\": nextQuestion.questionString,\n \"questionType\": nextQuestion.questionType,\n \"questionImage\": imageURL,\n \"option1\": nextQuestion.option1,\n \"option2\": nextQuestion.option2,\n \"option3\": nextQuestion.option3,\n \"option4\": nextQuestion.option4,\n \"answer\": nextQuestion.answer,\n \"points\": nextQuestion.points\n }\n }\n userStatus.save()\n return JsonResponse(con)\n return HttpResponse(\"Invalid request\")\n\ndef ValidateAnswer(request):\n if request.method == \"POST\":\n if 'userStatus' in request.POST and 'updatePoints' in request.POST:\n try:\n ut = int(request.POST['userStatus'])\n userStatus = UserStatus.objects.get(id=ut)\n except ObjectDoesNotExist as e:\n return HttpResponse(\"User Staus not found : try clearing COOKIES\")\n except Exception as e:\n return HttpResponse(e)\n\n updatePoints = int(request.POST['updatePoints'])\n userStatus.points += updatePoints\n userStatus.save()\n\n return HttpResponse(\"Invalid request\")\n\n#login api\ndef login(request):\n if request.method == \"POST\":\n if \"rollNumber\" in request.POST and \"key\" in request.POST:\n rollNumber = request.POST['rollNumber']\n key = request.POST['key']\n\n try:\n user = Users.objects.get(rollNumber=rollNumber, key=key)\n except ObjectDoesNotExist as e:\n return HttpResponse(\"Invalid roll number or key\")\n except Exception as e:\n return HttpResponse(\"Unknown Exception occurred\")\n\n try:\n userStatus = UserStatus.objects.get(user=user)\n except ObjectDoesNotExist:\n newUserStatus = UserStatus(user=user, answeredQuestions='', points=0)\n newUserStatus.save()\n except Exception as e:\n return HttpResponse(\"Unknown Exception occurred\")\n\n response = HttpResponse(\"Logged in successfully..\")\n response.set_cookie('quizTimeId', str(user.id), max_age=36000)\n return response\n return HttpResponse(\"Error : Invalid login request\")\n\ndef Distance(request):\n if request.method == \"GET\":\n if \"str1\" in request.GET and \"str2\" in request.GET:\n str1 = request.GET['str1']\n str2 = request.GET['str2']\n lat1, lon1 = str1.split(',')\n lat2, lon2 = str2.split(',')\n \n try:\n R = 6373.0\n lat1 = radians(float(lat1))\n lon1 = radians(float(lon1))\n lat2 = radians(float(lat2))\n lon2 = radians(float(lon2))\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n finalD = R * c * 1000\n return HttpResponse(finalD)\n except Exception as e:\n return HttpResponse(e)\n return HttpResponse(\"Error : Invalid request\")\n\ndef RoadApi(request):\n if request.method == \"GET\" and request.GET['flag'] == 'o':\n if \"lat\" in request.GET and \"lon\" in request.GET and 'dbName' in request.GET:\n lat = str(request.GET['lat'])\n lon = str(request.GET['lon'])\n dbName = str(request.GET['dbName'])\n print(lat, lon)\n\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM ( SELECT name, discription, ST_Distance_Sphere(road, ST_MakePoint(\"+lon+\",\"+lat+\")) as distance FROM \\\"\"+ dbName +\"\\\" ) as innertable WHERE distance <= 500 ORDER BY distance\")\n result = cursor.fetchall()\n\n data = [\n {\n 'road_code': i[0],\n 'name': i[1],\n 'distance': i[2]\n } for i in result\n ]\n\n return JsonResponse(data, safe=False)\n elif request.method == \"GET\" and request.GET['flag'] == 'i':\n if 'name' in request.GET and 'description' in request.GET and 'kmlString' in request.GET and 'dbName' in request.GET:\n name = request.GET['name']\n description = request.GET['description']\n kmlString = request.GET['kmlString']\n dbName = str(request.GET['dbName'])\n\n cursor = connection.cursor()\n cursor.execute(\"INSERT INTO \\\"\"+ dbName +\"\\\" (name, discription, road) VALUES ( '\"+ name +\"', '\"+ description +\"', ST_GeomFromKML(' \"+ kmlString +\" '))\")\n result = cursor.fetchall()\n print(result)\n\n return JsonResponse(result, safe=False)\n return JsonResponse({\n \"error\": \"invalid request\"\n })\n\ndef RoadTable(request):\n result = ['invalid']\n if request.method == 'GET':\n if 'dbName' in request.GET:\n dbName = request.GET['dbName']\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM \\\"\"+ dbName +\"\\\"\")\n result = cursor.fetchall()\n return JsonResponse(result, safe=False)\n\nfrom django.core.files.storage import FileSystemStorage\n\ndef Image_Upload(request):\n if request.method == 'POST' and request.FILES['image']:\n image = request.FILES['image']\n img = Image(image=image)\n img.save()\n print(img.image)\n return HttpResponse(img.image)\n return HttpResponse(\"Error\")\n","sub_path":"quizMania/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"340526987","text":"#https://codeforces.com/problemset/problem/1251/A\r\n\r\nt = int(input())\r\n\r\nl = []\r\n\r\nfor i in range(t):\r\n s = input()\r\n res = set()\r\n ress = \"\"\r\n lastCh = s[0]\r\n nr = 1\r\n for i in range(1,len(s)):\r\n if s[i] == lastCh:\r\n nr += 1\r\n else:\r\n if nr % 2 == 1 and lastCh not in res:\r\n res.add(lastCh)\r\n ress += lastCh\r\n lastCh = s[i]\r\n nr = 1\r\n\r\n if nr % 2 == 1 and lastCh not in res:\r\n res.add(lastCh)\r\n ress += lastCh\r\n\r\n l.append(''.join(sorted(ress)))\r\n\r\n\r\n\r\nfor r in l:\r\n print(r)\r\n\r\n","sub_path":"Codeforces/1251A.py","file_name":"1251A.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"56599913","text":"import sys\nimport pickle\nfrom scipy.special import softmax\nfrom math import log, sqrt\nimport numpy as np\nimport matplotlib.pyplot as p\nimport time\n\nt = time.time()\n\nORIGINAL_PROGRAM = False\nTRADITIONAL_NET = True\n\n#n = 10\ntotalNumExamples = 1000\n\nnumExamples = 1000\n\nassert totalNumExamples % numExamples == 0\n\nnumBatches = int(totalNumExamples / numExamples)\n\n#numTeacherLayers = 2\nnumLayers = 2\n\n#inertiaFraction = (numBatches - 1)/numBatches\n\ndef softPlus(x):\n\treturn np.logaddexp(0, x)\t\n\ndef softPlusPrime(x):\n\treturn softmax([x, 0])[0]\n\ndef relu(x):\n\treturn x*(x>=0)\n\ndef reluPrime(x):\n\treturn 1*(x>=0)\n\ndef softSign(x):\n\treturn x/(abs(x) + 1)\n\ndef softSignPrime(x):\n\treturn 1/(1 + abs(x))**2\n\n\n\n#f, fPrime = relu, reluPrime\n#f, fPrime = softPlus, softPlusPrime\n\n\nf, fPrime = softSign, softSignPrime\n\n\ndef evaluateNetwork(ws, X):\n\tnumLayers = len(ws)\n\tlayerOutput = X\n\tfor i in range(numLayers):\n\t\tif i < numLayers-1:\n\t\t\tlayerOutput = augmentWithBias(np.vectorize(f)(np.dot(ws[i], layerOutput)))\n\t\telse:\n\t\t\tlayerOutput = np.vectorize(f)(np.dot(ws[i], layerOutput))\n\n\treturn layerOutput\n\ndef augmentWithBias(X):\n#\tprint(X)\n#\tprint(np.ones((1, numExamples)))\n\n\tbiasAugmentedX = np.concatenate([X, np.ones((1, X.shape[1]))], axis=0)\n\treturn biasAugmentedX\n\ndef augmentWithZeros(X):\n\tbiasAugmentedX = np.concatenate([X, np.zeros((1, numExamples))], axis=0)\n\treturn biasAugmentedX\n\n\ndef diminishWithBias(W):\n\tbiasDiminishedW = W[:,:-1]\n\treturn biasDiminishedW\n\ndef selectSubset(overallX, overallT, index, numExamples):\n\treturn overallX[:, index*numExamples:(index+1)*numExamples], \\\n\t\toverallT[:, index*numExamples:(index+1)*numExamples]\n\ndef selectRandomSubset(overallX, overallT, numExamples):\n\tindices = np.random.choice(overallX.shape[1], numExamples, replace=False)\n\n\n#\tprint(\"hi\")\n\n#\tprint(indices)\n#\tfor i in range(numExamples):\n#\t\tprint(np.transpose(overallT[:, indices])[i])\n#\t\tshowExample(overallX[:, indices], i)\n\n#\toverallX[:, indices]\n\n\treturn overallX[:, indices], overallT[:, indices]\n\ndef showExample(X, i):\n\tp.matshow(np.reshape(np.transpose(X)[i], (7,7)))\n\tp.show()\n\n\n#X = np.array([[1,2,3], [4,5,6], [7,8,9]])\n#T = X\n#T = np.array([[4,5,6], [7,8,9], [1,2,3]])\n\n#print(selectRandomSubset(X, T, 2))\n#sys.exit()\n\n\n\n\n\noverallX, overallT = pickle.load(open(\"little_mnist_one_minus_one.p\", \"rb\"))\n\n#print(X.shape)\n#print(T.shape)\n\n#X = np.random.normal(size=(n, numExamples))\n#X = np.array([[1,-2]])\n#X = np.identity(n)\n#print(X.shape)\n\n#teacherWs = [np.random.normal(0,1,size=(n,n+1)) for _ in range(numTeacherLayers)]\n\n#print(teacherWs)\n\n#T = evaluateNetwork(teacherWs, augmentWithBias(X))\n#T = np.array([[1, 2]])\n#T = np.array([[1, 2], [3, 4]])\n#T = np.array([[1]])\n\n#T = np.vectorize(softPlus)(np.random.normal(size=(n, numExamples)))\n#T = np.vectorize(relu)(np.random.normal(size=(n, numExamples)))\n#T = X\n\n\n#print(X)\n#print(T)\n\n#layerOutput = X\n#for i in range(numTeacherLayers):\n#\tlayerOutput = np.dot(teacherWs[i], layerOutput)\n\n#T = layerOutput\n\n\n\n\n#T = np.array([[1]])\n#T = np.random.exponential(size=(n, numExamples))\n\n\n#print(T)\n\n\n#X = T\n#X = -T\n\n\n#inputSize = 64\n#hiddenLayerSize = 64\n#outputSize = 10 \n\ninputSize = 49\nmiddleSize = 49\noutputSize = 10\n\nlayerSizes = [inputSize, middleSize, outputSize]\n#layerSizes = [inputSize, middleSize, middleSize, outputSize]\n\n#W = np.ones((n,n))\n\n# Looks like typo but it's not\nWs = [np.random.normal(0,1,size=(layerSizes[i+1],layerSizes[i]+1)) for i in range(numLayers)]\n#Ws = [np.zeros((n, n+1)) for _ in range(numLayers)]\n#Ws = [np.array([-1/3, 4/3]), np.array([1, 0])]\n\n#Ys = [np.zeros((n, numExamples)) for _ in range(numLayers)]\nYs = [np.random.normal(0,1,size=(layerSizes[i+1], numExamples)) for i in range(numLayers)]\nlamdas = [np.zeros((layerSizes[i+1], numExamples)) for i in range(numLayers)]\n\n#Ys = [np.array([[-4,3]]), np.array([[1,2]])]\n#Ys = [np.array([[1,-1]]), np.array([[-1,1]])]\n\n\n#Zs = [np.ones((n, numExamples)) for _ in range(numLayers)]\nZs = [np.random.normal(0,1,size=(layerSizes[i+1],numExamples)) for i in range(numLayers)]\nmus = [np.zeros((layerSizes[i+1], numExamples)) for i in range(numLayers)]\n\n#Zs = [np.array([[1,2]]), np.array([[1,2]])]\n#Y = T\n#Z = Y\n\n\nnumIter = 500\n#rhalpha = 1/2\n#rhalpha = 1/((numExamples*n)**numLayers)\n#rhalpha = 1/(numExamples*n)\n#rhalpha = 1\n#rho = 1/n\n#alpha = 1/n**2\nrho = 1\nalpha = 1/inputSize\n#rhalpha = 1e-3\n#LR = 1/n\n\n#bigWeightPenalty = 0\ngamma = 10\n\n\ndef softLog(x):\n\tif x <= 0:\n\t\treturn -100\n\telse:\n\t\treturn log(x)\n\ndef lamdaStep(prevLamda, prevY, prevW, X):\n#\tprint(prevY.shape)\n#\tprint(prevW.shape)\n#\tprint(X.shape)\n\n\treturn prevLamda + alpha*(prevY - np.dot(prevW, X))\n\ndef muStep(prevMu, prevZ, fOfPrevY):\n\treturn prevMu + alpha*(prevZ - fOfPrevY)\n\ndef finalZStep(T, mu, prevZ, fOfPrevY):\n#\tprint(\"last Z\", T - mu - rhalpha*(prevZ - fOfPrevY))\n\n#\treturn T - mu - rho*(prevZ - fOfPrevY)\n\tif False:\n\t\tprint(\"\")\n\t\tprint(\"-mu/rho\", -mu/rho)\n\t\tprint(\"fOfPrevY\", fOfPrevY)\n\t\tprint(\"T\", T)\n\t\tprint(\"-prevZ\", -prevZ)\n\n\treturn (-mu/rho + fOfPrevY + T)/2\n#\treturn -mu/rho + fOfPrevY + T - prevZ\n\ndef ZStepNotLast(muThisLayer, fOfPrevYThisLayer, WNextLayer, lamdaNextLayer, YNextLayer, prevZThisLayer):\n#\tprint(\"uh-oh!\")\n#\tprint(\"not last Z\", -muThisLayer + fOfPrevYThisLayer + np.dot(np.transpose(WNextLayer), lamdaNextLayer) + \\\n#\t\trhalpha*np.dot(np.transpose(WNextLayer), YNextLayer - np.dot(WNextLayer, prevZThisLayer)))\n\tif False:\n\t\tprint(\"\")\n\n\t\tprint(\"mu term\", -muThisLayer/rho)\n\t\tprint(\"fOfPrevY\", fOfPrevYThisLayer)\n\t\tprint(\"W transpose lambda\", np.dot(np.transpose(WNextLayer), lamdaNextLayer)/rho)\n\t\tprint(\"WT(Y-WZ) term\", np.dot(np.transpose(WNextLayer), YNextLayer - np.dot(WNextLayer, prevZThisLayer)))\n\t\tprint(\"overall\", -muThisLayer/rho + fOfPrevYThisLayer + np.dot(np.transpose(diminishWithBias(WNextLayer)), lamdaNextLayer)/rho + \\\n\t\tnp.dot(np.transpose(diminishWithBias(WNextLayer)), YNextLayer - np.dot(WNextLayer, prevZThisLayer)))\n\n\t\tprint(\"\")\n\n\treturn -muThisLayer/rho + fOfPrevYThisLayer + np.dot(np.transpose(diminishWithBias(WNextLayer)), lamdaNextLayer)/rho + \\\n\t\tnp.dot(np.transpose(diminishWithBias(WNextLayer)), YNextLayer - np.dot(WNextLayer, prevZThisLayer))\n\n#\twLamdaPlusYMat = np.dot(np.transpose(WNextLayer), np.transpose(augmentWithBias(np.transpose(lamdaNextLayer/rho + YNextLayer - np.dot(WNextLayer, prevZThisLayer)))))\n\n\n#\treturn -muThisLayer/rho + fOfPrevYThisLayer + wLamdaPlusYMat\n\n#\treturn fOfPrevYThisLayer + rhalpha*np.dot(np.transpose(diminishWithBias(WNextLayer)), lamdaNextLayer) + \\\n#\t\trhalpha*np.dot(np.transpose(diminishWithBias(WNextLayer)), YNextLayer - np.dot(WNextLayer, prevZThisLayer))\n\n\n#\treturn -muThisLayer + fOfPrevYThisLayer + np.dot(np.transpose(WNextLayer), lamdaNextLayer) + \\\n#\t\trhalpha*np.dot(np.transpose(WNextLayer), YNextLayer - np.dot(WNextLayer, prevZThisLayer))\n\ndef ZStepNotLastInvertStyle(muThisLayer, fOfPrevYThisLayer, WNextLayer, lamdaNextLayer, YNextLayer, prevZThisLayer):\n\tmatToInv = np.dot(np.transpose(diminishWithBias(WNextLayer)), WNextLayer) + np.transpose(augmentWithZeros(np.identity(n)))\n#\tinvMat = np.dot(np.linalg.inv(np.dot(np.transpose(matToInv), matToInv)), np.transpose(diminishWithBias(matToInv)))\n\tinvMat = np.dot(np.transpose(diminishWithBias(matToInv)), np.linalg.inv(np.dot(matToInv, np.transpose(matToInv))))\n#\tinvMat = np.dot(np.linalg.inv(np.dot(np.transpose(matToInv), matToInv)), np.transpose(matToInv))\n\n\n\twLamdaPlusYMat = np.dot(np.transpose(diminishWithBias(WNextLayer)), lamdaNextLayer/rho + YNextLayer)\n\n\n\tprint(invMat.shape)\n\tprint(muThisLayer.shape)\n\tprint(wLamdaPlusYMat.shape)\n\tprint(fOfPrevYThisLayer.shape)\n\n\treturn np.dot(invMat, -muThisLayer/rho + fOfPrevYThisLayer + wLamdaPlusYMat)\n\n\ndef YStep(prevW, X, mu, fPrimeOfPrevY, lamda, Z, fOfPrevY):\n\tif False:\n\t\tprint(\"\")\n\t\tprint(\"Y exploration\")\n\t\tprint(\"WX term\", np.dot(prevW, X))\n\t\tprint(\"mu fprime term\", mu*fPrimeOfPrevY/rho)\n\t\tprint(\"lamda term\", -lamda/rho)\n\t\tprint(\"fPrime term\", fPrimeOfPrevY*(Z - fOfPrevY))\n\t\tprint(\"fPrimeOfPrevY\", fPrimeOfPrevY)\n\t\tprint(\"\")\n\n\treturn np.dot(prevW, X) + mu*fPrimeOfPrevY/rho - lamda/rho + fPrimeOfPrevY*(Z - fOfPrevY)\n\n\n\ndef WStepPerfect(lamda, prevW, Y, X):\n\n#\tprint(X)\n\n#\tprint(X)\n\n#\tif n >= numExamples:\n\tif X.shape[0] >= X.shape[1]:\n\t\tgramX = np.dot(np.transpose(X), X)\n\telse:\n\t\tgramX = np.dot(X, np.transpose(X))\n\n#\tprint(X)\n\n\tfrobeniusGramX = np.sum(gramX*gramX)\n\n#\tprint(frobeniusGramX)\n\n#\tprint(gramX + \\\n#\t\t\tbigWeightPenalty*frobeniusGramX*np.identity(numExamples))\n\n#\tif n >= numExamples:\n\tif X.shape[0] >= X.shape[1]:\n\t\tpseudoInv = np.dot(np.linalg.inv(gramX + \\\n\t\t\tbigWeightPenalty*np.identity(numExamples)), np.transpose(X))\n\telse:\n#\t\tprint(np.dot(X, np.transpose(X)) + \\\n#\t\t\tbigWeightPenalty*np.identity(n))\n\t\tpseudoInv = np.dot(np.transpose(X), np.linalg.inv(gramX + \\\n\t\t\tbigWeightPenalty*np.identity(gramX.shape[0])))\n\n#\tprint(\"lamda plus Y\", lamda + Y)\n#\tprint(\"pseudoInv\", pseudoInv)\n\n\n\treturn inertiaFraction*prevW + (1-inertiaFraction)*np.dot((lamda/rho + Y), pseudoInv)\t\n\ndef WStepNew(lamda, prevW, Y, X):\n\tinputLayerSize = X.shape[0] # this is like n+1\n\n\tfirstMat = gamma*prevW + np.dot((lamda/rho + Y), np.transpose(X))\n\tsecondMat = np.linalg.inv(np.dot(X, np.transpose(X)) + gamma*np.identity(inputLayerSize))\n\n\treturn np.dot(firstMat, secondMat)\n\nif ORIGINAL_PROGRAM:\n\terrors = []\n\tlogErrors = []\n\n\tpercent = 0\n\n\tlamda0s = []\n\tmu0s = []\n\tW0s = []\n\tY0s = []\n\tZ0s = []\n\tlamda1s = []\n\tmu1s = []\n\tW1s = []\n\tY1s = []\n\tZ1s = []\n\n\tW2s = []\n\n\tindex = 0\n\n\tfor i in range(numIter):\n\t\tif i/numIter > percent/100 and (i-1)/numIter <= percent/100:\n\t\t\tprint(i, \"/\", numIter)\n\t\t\tpercent += 1\n\n\t\tX, T = selectSubset(overallX, overallT, index, numExamples)\t\n\t#\tX, T = selectRandomSubset(overallX, overallT, numExamples)\t\n\n\t\tbiasAugmentedX = augmentWithBias(X)\n\n\n\t\tindex = (index + 1) % numBatches\n\n\t\tprevLamdas = lamdas[:]\n\t\tprevMus = mus[:]\n\t\tprevZs = Zs[:]\n\t\tprevYs = Ys[:]\n\t\tprevWs = Ws[:]\n\n\t#\tprint(W)\n\n\t\tfOfPrevYs = [np.vectorize(f)(prevY) for prevY in prevYs]\n\t\tfPrimeOfPrevYs = [np.vectorize(fPrime)(prevY) for prevY in prevYs]\n\n\t\tfor layer in range(numLayers-1, -1, -1):\n\n\t#\t\tprint(layer)\n\n\t\t\t# Lambda step\n\t\t\tif layer == 0:\n\t\t\t\tlamdas[layer] = lamdaStep(prevLamdas[layer], prevYs[layer], prevWs[layer], biasAugmentedX)\n\t\t\telse:\n\t\t\t\tlamdas[layer] = lamdaStep(prevLamdas[layer], prevYs[layer], \\\n\t\t\t\t\tprevWs[layer], augmentWithBias(prevZs[layer-1]))\n\n\t#\t\tprint(\"lamda\",layer, lamdas[layer])\n\n\t\t\tif layer == 0:\n\t\t\t\tlamda0s.append(lamdas[layer][0][0])\n\t\t\tif layer == 1:\n\t\t\t\tlamda1s.append([lamdas[layer]][0][0][0])\n\n\n\t\t\t# Mu step\n\t#\t\tprint(layer)\n\t#\t\tprint(len(mus))\n\t#\t\tprint(len(prevMus))\n\t#\t\tprint(len(prevZs))\n\t#\t\tprint(len(fOfPrevYs))\n\t\t\tmus[layer] = muStep(prevMus[layer], prevZs[layer], fOfPrevYs[layer])\n\n\t#\t\tprint(\"mu\", layer, mus[layer])\n\n\t\t\tif layer == 0:\n\t\t\t\tmu0s.append([mus[layer]][0][0][0])\n\t\t\tif layer == 1:\n\t\t\t\tmu1s.append([mus[layer]][0][0][0])\n\n\n\t\t\t# Z step\n\t\t\tif layer == numLayers - 1:\n\t\t\t\tZs[layer] = finalZStep(T, mus[layer], prevZs[layer], fOfPrevYs[layer])\n\t\t\telse:\n\t\t\t\tZs[layer] = ZStepNotLast(mus[layer], fOfPrevYs[layer], \\\n\t\t\t\t\tWs[layer+1], lamdas[layer+1], Ys[layer+1], augmentWithBias(prevZs[layer]))\n\n\t#\t\tprint(\"Y\",layer, Ys[layer])\n\t#\t\tprint(\"Z\",layer, Zs[layer])\n\n\t\t\tif layer == 0:\n\t\t\t\tZ0s.append([Zs[layer]][0][0][0])\n\t\t\tif layer == 1:\n\t\t\t\tZ1s.append([Zs[layer]][0][0][0])\n\n\n\n\t\t\t# Y step\n\t\t\tif layer == 0:\n\t\t\t\tYs[layer] = YStep(prevWs[layer], biasAugmentedX, mus[layer], fPrimeOfPrevYs[layer], \\\n\t\t\t\t\tlamdas[layer], Zs[layer], fOfPrevYs[layer])\n\t\t\telse:\n\t\t\t\tYs[layer] = YStep(prevWs[layer], augmentWithBias(prevZs[layer-1]), mus[layer], fPrimeOfPrevYs[layer], \\\n\t\t\t\t\tlamdas[layer], Zs[layer], fOfPrevYs[layer])\t\t\t\n\n\t#\t\tprint(\"Y\",layer, Ys[layer])\n\n\t\t\tif layer == 0:\n\t\t\t\tY0s.append([Ys[layer]][0][0][0])\n\t\t\tif layer == 1:\n\t\t\t\tY1s.append([Ys[layer]][0][0][0])\n\n\n\t\t\t# W step\n\t\t\tif layer == 0:\n\t\t\t\tWs[layer] = WStepNew(lamdas[layer], prevWs[layer], Ys[layer], biasAugmentedX)\n\t\t\telse:\n\t\t\t\tWs[layer] = WStepNew(lamdas[layer], prevWs[layer], Ys[layer], augmentWithBias(prevZs[layer-1]))\n\n\t#\t\tprint(\"W\",layer, Ws[layer])\n\n\t\t\tif layer == 0:\n\t\t\t\tW0s.append([Ws[layer]][0][0][0])\n\t\t\tif layer == 1:\n\t\t\t\tW1s.append([Ws[layer]][0][0][0])\n\t\t\tif layer == 2:\n\t\t\t\tW2s.append([Ws[layer]][0][0][0])\n\n\t#\tmu = muStep(prevMu, prevZ, fOfPrevY)\n\t#\tZ = ZStep(T, mu, prevZ, fOfPrevY)\n\t#\tY = YStep(prevW, X, mu, fPrimeOfPrevY, lamda, Z, fOfPrevY)\t\n\t#\tW = WStepPerfect(lamda, prevW, Y, X)\n\n\t\t\n\t\toutput = evaluateNetwork(Ws, biasAugmentedX)\n\t\t\n\n\t#\tprint(\"output\", output)\n\t#\tprint(\"T\", T)\n\n\t\tdiffMat = T - output\n\n\t#\tprint(T)\n\t#\tprint(output)\n\n\t\terror = np.trace(np.dot(np.transpose(diffMat), diffMat))\n\n\t\terrors.append(error)\n\t\tlogErrors.append(softLog(error))\n\n\tnumCorrect = 0\n\n\n\tfor index in range(numBatches):\n\t#\tX, T = selectSubset(overallX, overallT, index, numExamples)\t\n\t\tX, T = selectRandomSubset(overallX, overallT, numExamples)\t\n\t\tbiasAugmentedX = augmentWithBias(X)\n\n\t\toutput = evaluateNetwork(Ws, biasAugmentedX)\n\n\t\tfor i in range(numExamples):\n\t\t#\tprint(np.transpose(output)[i])\n\t\t#\tprint(np.argmax(np.transpose(output)[i]))\n\n\t\t#\tprint(np.transpose(T)[i])\n\t\t#\tprint(np.argmax(np.transpose(T)[i]))\n\n\t\t#\tprint(\"System's guess:\", np.argmax(np.transpose(output)[i]), \\\n\t\t#\t\t\"Correct:\", np.argmax(np.transpose(T)[i]))\n\n\t\t\tif np.argmax(np.transpose(output)[i]) == np.argmax(np.transpose(T)[i]):\n\t\t\t\tnumCorrect += 1\n\n\taccuracy = numCorrect / totalNumExamples\n\n\tprint(\"Training Accuracy:\", accuracy)\n\n\tXtest, Ttest = pickle.load(open(\"little_mnist_one_minus_one_test.p\", \"rb\"))\n\n\t#testOutput = evaluateNetwork(Ws, augmentWithBias(Xtest))\n\n\tnumCorrect = 0\n\tfor index in range(numBatches):\n\t#\tX, Ttest = selectRandomSubset(Xtest, Ttest, numExamples)\t\n\t\tX, Ttest = selectSubset(Xtest, Ttest, index, numExamples)\t\n\n\t\tbiasAugmentedX = augmentWithBias(X)\n\n\t\ttestOutput = evaluateNetwork(Ws, biasAugmentedX)\n\n\t\tfor i in range(numExamples):\n\t\t#\tprint(\"System's guess:\", np.argmax(np.transpose(testOutput)[i]), \\\n\t\t#\t\t\"Correct:\", np.argmax(np.transpose(Ttest)[i]))\n\n\t\t#\tp.matshow(np.reshape(np.transpose(Xtest)[i], (7,7)))\n\t\t#\tp.show()\n\n\t\t\tif np.argmax(np.transpose(testOutput)[i]) == np.argmax(np.transpose(Ttest)[i]):\n\t\t\t\tnumCorrect += 1\n\n\taccuracy = numCorrect / totalNumExamples\n\tprint(\"Test Accuracy:\", accuracy)\n\n\n\t#\tp.matshow(np.reshape(np.transpose(X)[i], (7,7)))\n\t#\tp.show()\n\n\n\n\t#print(\"teacher weights\", teacherWs)\n\t#print(\"student weights\", Ws)\n\n\t#print(\"teacher output\", evaluateNetwork(teacherWs, X))\n\t#print(\"student output\", evaluateNetwork(Ws, X))\n\t#print(T)\n\t#print(output)\n\t#print(diffMat)\n\n\t#print(Ys)\n\n\tp.plot(lamda0s, label='lamda0s')\n\tp.plot(mu0s, label='mu0s')\n\tp.plot(W0s, label='W0s')\n\tp.plot(Y0s, label='Y0s')\n\tp.plot(Z0s, label='Z0s')\n\tp.plot(lamda1s, label='lamda1s')\n\tp.plot(mu1s, label='mu1s')\n\tp.plot(W1s, label='W1s')\n\tp.plot(Y1s, label='Y1s')\n\tp.plot(Z1s, label='Z1s')\n\t#p.plot(W2s, label=\"W2s\")\n\n\tp.legend()\n\tp.show()\n\n\tprint(time.time() - t)\n\tp.plot(logErrors)\n\tp.ylabel(\"Log error\")\n\tp.xlabel(\"Iteration\")\n\tp.show()\t\n\nif TRADITIONAL_NET:\n\tfor layer in range(numLayers):\n\n\t\tlayerOutput = overallX\n\t\tfor i in range(numLayers):\n\n#\t\t\t\tif showLayers:\n#\t\t\t\t\tp.matshow(matrixF(WsDotXs(ws[i], layerOutput)))\n#\t\t\t\t\tp.colorbar()\n#\t\t\t\t\tp.show()\t\t\t\n\n\n\t\t\tif i < numLayers-1:\n\t\t\t\tYs[i] = np.dot(Ws[i], augmentWithBias(layerOutput))\n\t\t\t\tZs[i] = np.vectorize(f)(Ys[i])\n\t#\t\t\tlayerOutput = splitAndBiasX(matrixF(WsDotXs(ws[i], layerOutput)))\n\t\t\telse:\n\t\t\t\tZs[i] = np.vectorize(f)(Ys[i])\n#\t\t\t\t\tlayerOutput = WsDotXs(ws[i], layerOutput)\n\n#\t\t\tlayerOutputs.append(layerOutput)\n\n\t\toutputErrors = []\n\n\t\tprint(Zs[-1].shape)\n\t\tprint(overallT.shape)\n\t\tprint(Ys[-1].shape)\n\n\t\toutputError = (Zs[-1] - overallT) * softSignPrime(Ys[-1])\n\t\tprint(outputError.shape)\n\n\t\toutputErrors.append(outputError)\n\n\t\tif numLayers > 1:\n\t\t\tfor i in range(numLayers-2, -1, -1):\n\t\t\t\toutputError = np.dot(np.transpose(Ws[i+1]), outputError[-1])\n\t\t\t\toutputErrors.append(outputError)\n\n\tAs = [overallX] + [Zs[i] for i in range(numLayers)]\n\n\tgradients = [np.dot(As[i], outputErrors[i]) for i in range(numLayers)]\n\n\tlearningRate = 0.001\n\n\tfor i in range(numLayers):\n\t\tWs[i] += As[i] * learningRate\n\n#\tprint(outputErrors)\n\n\n\n","sub_path":"dad_net_with_nonlinearity_multilayer_onehot.py","file_name":"dad_net_with_nonlinearity_multilayer_onehot.py","file_ext":"py","file_size_in_byte":16091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"423989168","text":"import rasterio\n\n\ndef writeArrayToFile(array, dst_file, src_profile):\n with rasterio.Env():\n # Write an array as a raster band to a new 8-bit file. For\n # the new file's profile, we start with the profile of the source\n profile = src_profile # src.profile\n\n # And then change the band count to 1, set the\n # dtype to uint8, and specify LZW compression.\n profile.update(\n dtype=rasterio.uint8,\n count=1,\n compress='lzw'\n )\n with rasterio.open(dst_file, 'w', **profile) as dst:\n dst.write(array.astype(rasterio.uint8), 1)\n return\n","sub_path":"scripts/old_files/transformations/raster_scripts.py","file_name":"raster_scripts.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"79384576","text":"import xml.etree.cElementTree as ET\nfrom xml.etree.ElementTree import tostring\nfrom util import *\n\n\ndef generaXmlPedidos():\n xmlstring = \"\"\n res = {}\n res[0] = False\n res[1] = \"\"\n\n tipo = \"IDL_PEDIDOS_CRO\"\n cx = creaConexion()\n\n cx[\"cur\"].execute(\"SELECT nombre FROM eg_fichprocesados WHERE tipo = '\" + tipo + \"'\")\n rows = cx[\"cur\"].fetchall()\n if len(rows) > 0:\n return True\n\n cx[\"cur\"].execute(\"INSERT INTO eg_fichprocesados (estado,hora,tipo,nombre,fecha) VALUES ('En proceso',CURRENT_TIME,'\" + tipo + \"','\" + tipo + \"',CURRENT_DATE)\")\n cx[\"conn\"].commit()\n\n try:\n recOrd = ET.Element(\"reception_orders\")\n int15 = ET.SubElement(recOrd, \"int15\")\n\n cx[\"cur\"].execute(\"SELECT cd.codigo AS codigo, cd.idalbaran AS idalbaran, cd.fecharecepcion AS fecharecepcion,cd.horarecepcion AS horarecepcion, cd.fecha AS fecha, a.codalmacenidl AS codalmacen, a.nombre AS nombrealmacen, '502736' AS codproveedor, MAX(r.numenvio) AS numenvio FROM albaranescd cd INNER JOIN lineasalbaranescd lc ON cd.idalbaran = lc.idalbaran INNER JOIN pedidosprov p ON lc.idpedido = p.idpedido INNER JOIN almacenesidl a ON p.codalmacen = a.codalmacen LEFT OUTER JOIN eg_pedidosrecibidos r ON lc.codpedido = r.codpedido WHERE cd.enviado = true AND cd.fichero IS NULL AND cd.idalbaran IN (SELECT idalbaran FROM lineasalbaranescd WHERE idalbaran = cd.idalbaran) AND CAST(cd.idalbaran AS VARCHAR) NOT IN (SELECT clave FROM idl_erroneos WHERE tipo = '\" + tipo + \"') GROUP BY r.idpedido, cd.codigo, cd.idalbaran, cd.fecharecepcion,cd.horarecepcion, p.fecha, a.codalmacenidl, a.nombre ORDER BY p.fecha ASC LIMIT 1\")\n\n rows = cx[\"cur\"].fetchall()\n idAlbaran = False\n if len(rows) > 0:\n for p in rows:\n idAlbaran = p[\"idalbaran\"]\n faltaArticulos = False\n cx[\"cur\"].execute(\"SELECT l.referencia as reflinea, ia.referencia as refidl FROM lineasalbaranescd l LEFT JOIN idl_articulos ia ON l.referencia = ia.referencia WHERE l.idalbaran = \" + str(idAlbaran) + \" AND ((ia.referencia IS NULL) OR (ia.referencia IS NOT NULL AND ia.ok = false)) GROUP BY l.referencia, ia.referencia\")\n rowsArt = cx[\"cur\"].fetchall()\n if len(rowsArt) > 0:\n faltaArticulos = True\n for art in rowsArt:\n print(art)\n if not art[\"refidl\"] or art[\"refidl\"] == \"\":\n cx[\"cur\"].execute(\"INSERT INTO idl_articulos (referencia,ok,idlog,fecha,hora,error) values ('\" + art[\"reflinea\"] + \"',false,NULL,CURRENT_DATE,CURRENT_TIME,'')\")\n cx[\"conn\"].commit()\n else:\n cx[\"cur\"].execute(\"UPDATE idl_articulos SET ok = false, idlog = NULL, fecha = CURRENT_DATE, hora = CURRENT_TIME, error = '' WHERE referencia = '\" + str(art[\"reflinea\"]) + \"'\")\n cx[\"conn\"].commit()\n registraError(tipo, idAlbaran, art[\"reflinea\"], cx)\n if faltaArticulos:\n cx[\"cur\"].execute(\"DELETE FROM eg_fichprocesados WHERE tipo = '\" + tipo + \"'\")\n cx[\"conn\"].commit()\n return True\n\n if not creaXmlRecepcionPedido(p, int15, cx):\n print(\"No hay albaranes con líneas que enviar\")\n cx[\"cur\"].execute(\"DELETE FROM eg_fichprocesados WHERE tipo = '\" + tipo + \"'\")\n cx[\"conn\"].commit()\n registraError(tipo, idAlbaran, \"No hay albaranes con líneas que enviar\", cx)\n return False\n\n tree = ET.ElementTree(recOrd)\n tree.write(\"./recepciones/xmlAlbaranesCD_\" + p[\"codigo\"] + \".xml\")\n\n xmlstring = tostring(recOrd, 'utf-8', method=\"xml\").decode(\"ISO8859-15\")\n print(xmlstring)\n #datosCX = dameDatosConexion(\"WSIDL_ENVREC_TEST\", cx)\n datosCX = dameDatosConexion(\"WSIDL_ENVREC\", cx)\n header = datosCX[\"header\"]\n url = datosCX[\"url\"]\n result = post_request(url, header, xmlstring)\n # result = \"GNSGNSOK\"\n status = False\n print(result)\n\n if not result:\n res[0] = False\n res[1] = result\n print(result)\n print(\"Error enviando pedido\")\n cx[\"cur\"].execute(\"DELETE FROM eg_fichprocesados WHERE tipo = '\" + tipo + \"'\")\n cx[\"conn\"].commit()\n return False\n else:\n res[0] = True\n res[1] = result\n\n root = ET.fromstring(result)\n child = root.find('int15/rub110')\n if child:\n status = child.find(\"status\").text\n\n tree = ET.ElementTree(root)\n tree.write(\"./recepciones/resAlbaranCd_\" + p[\"codigo\"] + \".xml\")\n\n idlog = registraLog(\"ENV_RECEPCIONES\", xmlstring, res, cx)\n if status:\n if status == \"OK\":\n cx[\"cur\"].execute(\"UPDATE albaranescd SET fichero = '\" + str(idlog) + \"' where idalbaran = \" + str(idAlbaran))\n cx[\"conn\"].commit()\n else:\n error = child.find(\"error_descriptions/error_description\").text\n print(error)\n cx[\"cur\"].execute(\"UPDATE albaranescd SET fichero = 'ERROR: \" + str(idlog) + \"' where idalbaran = \" + str(idAlbaran))\n cx[\"conn\"].commit()\n registraError(tipo, idAlbaran, \"Error al envair el albarán\", cx)\n else:\n print(\"No hay pedidos que enviar\")\n cx[\"cur\"].execute(\"DELETE FROM eg_fichprocesados WHERE tipo = '\" + tipo + \"'\")\n cx[\"conn\"].commit()\n return True\n\n except Exception as e:\n res[0] = False\n res[1] = e\n print(e)\n cx[\"cur\"].execute(\"DELETE FROM eg_fichprocesados WHERE tipo = '\" + tipo + \"'\")\n cx[\"conn\"].commit()\n registraError(tipo, \"Exception\", e, cx)\n return False\n\n cx[\"cur\"].execute(\"DELETE FROM eg_fichprocesados WHERE tipo = '\" + tipo + \"'\")\n cx[\"conn\"].commit()\n cierraConexion(cx)\n generaXmlPedidos()\n\n return True\n\n\ndef creaXmlRecepcionPedido(p, int15, cx):\n print(\"ENTRA\")\n cx[\"cur\"].execute(\"SELECT barcode, cantidad AS cantidad, idlinea, descripcion FROM lineasalbaranescd WHERE idalbaran = \" + str(p[\"idalbaran\"]))\n rows = cx[\"cur\"].fetchall()\n numL = 1\n if len(rows) <= 0:\n return False\n\n sufijo = \"01\"\n \"\"\"if p[\"numenvio\"] is not None:\n p[\"numenvio\"] += 1\n if len(str(p[\"numenvio\"])) == 1:\n sufijo = \"0\" + str(p[\"numenvio\"])\n else:\n sufijo = str(p[\"numenvio\"])\"\"\"\n\n rub110 = ET.SubElement(int15, \"rub110\")\n ET.SubElement(rub110, \"activity_code\").text = \"GNS\"\n ET.SubElement(rub110, \"physical_depot_code\").text = \"GNS\"\n ET.SubElement(rub110, \"originator_code\").text = \"EL_GANSO\"\n ET.SubElement(rub110, \"receipt_reference\").text = \"K\" + p[\"codigo\"] + sufijo\n ET.SubElement(rub110, \"receipt_type\").text = \"010\"\n ET.SubElement(rub110, \"receipt_reason_code\").text = \"CRO\"\n ET.SubElement(rub110, \"work_mode_code\").text = \"REC\"\n ET.SubElement(rub110, \"original_code\").text = p[\"codproveedor\"]\n\n ET.SubElement(rub110, \"carrier_arrival_date_century\").text = str(p[\"fecharecepcion\"])[0:2]\n ET.SubElement(rub110, \"carrier_arrival_date_year\").text = str(p[\"fecharecepcion\"])[2:4]\n ET.SubElement(rub110, \"carrier_arrival_date_month\").text = str(p[\"fecharecepcion\"])[5:7]\n ET.SubElement(rub110, \"carrier_arrival_date_day\").text = str(p[\"fecharecepcion\"])[8:10]\n ET.SubElement(rub110, \"carrier_arrival_time\").text = str(p[\"horarecepcion\"]).replace(\":\", \"\")\n ET.SubElement(rub110, \"flag_receipt_in_cross-docking\").text = \"1\"\n\n rub119 = ET.SubElement(rub110, \"rub119\")\n ET.SubElement(rub119, \"activity_code\").text = \"GNS\"\n ET.SubElement(rub119, \"physical_depot_code\").text = \"GNS\"\n ET.SubElement(rub119, \"originator_code\").text = \"EL_GANSO\"\n ET.SubElement(rub119, \"receipt_reference\").text = \"K\" + p[\"codigo\"] + sufijo\n ET.SubElement(rub119, \"comment_line_no\").text = \"001\"\n ET.SubElement(rub119, \"comment_group\").text = \"OWN\"\n\n if p[\"nombrealmacen\"]:\n ET.SubElement(rub119, \"comment\").text = formateaCadena(p[\"nombrealmacen\"])[0:70]\n else:\n ET.SubElement(rub119, \"comment\").text = \"\"\n\n for l in rows:\n rub120 = ET.SubElement(rub110, \"rub120\")\n ET.SubElement(rub120, \"activity_code\").text = \"GNS\"\n ET.SubElement(rub120, \"physical_depot_code\").text = \"GNS\"\n ET.SubElement(rub120, \"originator_code\").text = \"EL_GANSO\"\n ET.SubElement(rub120, \"receipt_reference\").text = \"K\" + p[\"codigo\"] + sufijo\n ET.SubElement(rub120, \"receipt_reference_line_no\").text = str(numL)\n ET.SubElement(rub120, \"item_code\").text = str(l[\"barcode\"])[0:16]\n ET.SubElement(rub120, \"item_lv_code\").text = \"11\"\n ET.SubElement(rub120, \"level_1_quantity\").text = str(int(l[\"cantidad\"]))\n ET.SubElement(rub120, \"owner_code\").text = p[\"codalmacen\"]\n numL += 1\n\n return True\n","sub_path":"peticionesidl/generaXmlPedidosCross.py","file_name":"generaXmlPedidosCross.py","file_ext":"py","file_size_in_byte":9497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"535563389","text":"import pandas as pd\n# import camping_server2.config as config\nfrom datetime import datetime\n\nclass Sigungucode:\n def __init__(self):\n # self.path = config.Config.PATH\n self.do_list = {'충북': '충청북도', '충남': '충청남도',\n '경북': '경상북도', '경남': '경상남도',\n '전북': '전라북도', '전남': '전라남도',\n '강원': '강원도', '경기': '경기도',\n '인천': '인천광역시', '인천시': '인천광역시',\n '부산': '부산광역시', '울산': '울산광역시', '대전': '대전광역시',\n '대구': '대구광역시', '광주': '광주광역시',\n '서울': '서울특별시', '서울시': '서울특별시',\n '제주': '제주특별자치도', '제주도': '제주특별자치도'}\n self.five_code = pd.read_csv('/Users/sol/Desktop/dss/Crawling/datas/sigungucode.csv')\n\n\n def read_file(self, df):\n df.drop(df[df['addr1'].isnull()].index, axis=0, inplace=True) # 빈 row 삭제\n return df\n\n\n def do_sigungu(self, df):\n # 파일 읽어오기\n df = self.read_file(df)\n\n # 예외처리 1: 페스티발 온라인개최 삭제\n try:\n df.drop(df[df['addr1'] == '온라인개최'].index, axis=0, inplace=True)\n except:\n pass\n\n # 도, 시군구명 컬럼 생성\n if not 'doNm' in df.columns.tolist():\n df['doNm'] = [a.split(\" \")[0] for a in df['addr1']]\n df['doNm'] = [as_is.replace(as_is, self.do_list[as_is]) if len(as_is) < 3 else as_is for as_is in df['doNm']]\n if not 'sigunguNm' in df.columns.tolist():\n df['sigunguNm'] = [b.split(\" \")[1:2] for b in df['addr1']]\n df['sigunguNm'] = [b[0] if len(b) > 0 else \"\" for b in df['sigunguNm']]\n\n df['sigunguNm2'] = [c.split(\" \")[1:3] for c in df['addr1']]\n df['sigunguNm2'] = [c[0] + \" \" + c[1] if len(c) > 1 else \"\" for c in df['sigunguNm2']]\n df['sigunguNm3'] = [c.split(\" \")[0:2] for c in df['addr1']]\n df['sigunguNm3'] = [c[0] + \" \" + c[1] if len(c) > 1 else \"\" for c in df['sigunguNm3']]\n\n # 예외처리 2: sigunguNm null값 처리\n sigunguNm = []\n for i in range(len(df)):\n a = df['sigunguNm'].iloc[i]\n b = df['sigunguNm2'].iloc[i]\n if type(a) == float: # sigunguNm null값 예외처리\n result = b.split(\" \")[0]\n else:\n result = a\n sigunguNm.append(result)\n df['sigunguNm'] = sigunguNm\n\n return df\n\n\n def make_sigungucode(self, df):\n df = self.do_sigungu(df)\n # 조건에 맞게 시군구코드 생성\n signguNm_ls = self.five_code['signguNm'].unique().tolist()\n sigungucode = []\n\n for i in range(len(df)):\n a = df['sigunguNm'].iloc[i]\n b = df['sigunguNm2'].iloc[i]\n c = df['sigunguNm3'].iloc[i]\n d = df['doNm'].iloc[i]\n if a in signguNm_ls:\n result = self.five_code['signguCode'][self.five_code['signguNm'] == a].iloc[0]\n elif b in signguNm_ls:\n result = self.five_code['signguCode'][self.five_code['signguNm'] == b].iloc[0]\n elif c in signguNm_ls:\n result = self.five_code['signguCode'][self.five_code['signguNm'] == c].iloc[0]\n elif d in ['세종시', '세종특별자치시']:\n result = self.five_code['signguCode'][self.five_code['signguNm'] == '세종특별자치시'].iloc[0]\n else:\n result = '확인필요'\n sigungucode.append(result)\n\n # 시군구코드 컬럼 생성\n df['sigungucode'] = sigungucode\n\n # DB 저장시 필요없는 컬럼 삭제\n df.drop(['doNm', 'sigunguNm', 'sigunguNm2', 'sigunguNm3'], axis=1, inplace=True)\n\n return df\n\n\n def final_check_save(self, filename, df):\n \"\"\"\n filename에 저장하고자 하는 파일명 기입\n 'filename_작업일.csv'로 저장\n \"\"\"\n filedate = datetime.today().strftime(\"%y%m%d\")\n\n # 오류있는 row 조회 수 drop\n if df['sigungucode'].isnull().sum() > 0 or len(df[df['sigungucode']=='확인필요']) > 0:\n drop_df = pd.DataFrame(df[df['sigungucode']=='확인필요'][['addr1']])\n print(\"plz check errored rows\")\n print(drop_df)\n df.drop(drop_df.index, axis=0, inplace=True)\n\n # 최종 처리된 파일 저장\n df.to_csv(f\"/{filename}_{filedate}.csv\", encoding=\"utf-8-sig\")\n # print(\"------\")\n # print(\"File save completed!\")","sub_path":"camping_server2/apis/make_sigungucode.py","file_name":"make_sigungucode.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"133411027","text":"import requests\nimport os\nfrom Model.request_API import requestAPI\nfrom Model.config import Configure\nfrom Model.Logger import Logger\nimport platform\nimport pytest\n\nclass Test_Login:\n @classmethod\n def setup_class(cls):\n pass\n\n @classmethod\n def teardown_class(cls):\n pass\n\n def test_login(self):\n log = Logger(os.path.basename(__file__))\n log = log.getlog()\n path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + \"/configure/dataManager.ini\"\n if 'Windows' in platform.system():\n # windows os\n path = path.replace('/', '\\\\')\n conf = Configure(path)\n\n try:\n conf_Filename = \"dataManager.ini\"\n Request = requestAPI(log,conf_Filename)\n log.info(\"数据管理员 admin进行登录:\")\n res = Request.execute(\"config_dataManager.xlsx\",\"checkLogin\",\"login_Admin\")\n if res.status_code == 200:\n cookies = requests.utils.dict_from_cookiejar(res.cookies) # 返回值 jessionid\n result=res.json() #返回值 json\n print(cookies)\n print(result)\n role = result[\"data\"][\"role\"]\n user_id = (result[\"data\"][\"user_id\"])\n #处理cookies格式\n key = cookies[\"SESSION\"]\n cookies = \"SESSION\" + \"=\" + key\n #把cookies传递出去,后续请求使用\n\n conf.Set(\"login\",\"cookies\",cookies) # 设置 setion、option、value\n conf.Set(\"login\",\"role\",str(role))\n conf.Set(\"login\",\"user_id\",str(user_id))\n except Exception as e:\n log.error(\"=====case error ====%s\" % e)\n pytest.fail(\"CASE FAIL\")\n\n#\n\n\n\n\n\n\n\n\n","sub_path":"Company/testcase/data_Manager/dataManage_login_test.py","file_name":"dataManage_login_test.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"332690514","text":"'''\nwhat we learned:\n\t1. when to use [ and when to use (\n\t2. when to use \"for\" and when to use \"while\" both could be used but one would be easy to use compared to another\n\t3. how to read user input and convert to integer\n\t4. how to get input from system argument (to automate at scale) and what module to use (here its import sys)\n'''\nimport sys\n\ndef collatz_seq(i):\n\tm = [i]\n\t\n\tfor j in m:\n\t\tif i == 1:\n\t\t\tprint('Collatz sequence = ',m)\n\t\t\tbreak\n\t\telse:\n\t\t\tif i % 2 == 0:\n\t\t\t\ti = i/2\n\t\t\telse:\n\t\t\t\ti = (i*3)+1\n\t\tm.append(i)\n\tprint('Length of sequence = ',len(m))\n\t\n\t# 2nd METHOD\n\t\ndef coll_s(numb):\n\t\n\tseq = [numb] \n\t\n\twhile numb != 1:\n\t\tif numb % 2 == 0: \n\t\t\tnumb = numb/2 \n\t\telse:\n\t\t\tnumb = (numb * 3) + 1\n\t\tseq.append (numb)\n\tprint ('Collatz sequence = ',seq)\n\tprint('Length of sequence = ',len(seq))\n\t\n\t\n\t\t\nif __name__== '__main__':\n\n\t# no need for import sys\n\ti = int(input(\"Enter number : \"))\n\t\n\t# system argument (requires to use statement import sys)\n\targument = int(sys.argv[1])\n\tcollatz_seq(argument)\n\tcoll_s(13)\n\t\n\t\n","sub_path":"lab-2.py","file_name":"lab-2.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"270835121","text":"# There's no guarantee that this program will work.\nimport random\nimport sys\n\nwith open(sys.argv[1]) as f:\n\n inputs = f.readlines()\n\n random_data = random.sample(range(len(inputs)), int(len(inputs) * 0.75))\n\n train = []\n for i in random_data:\n train.append(inputs[i])\n inputs[i] = 0\n\n test = [i for i in inputs if not i == 0]\n\n with open(\"train.txt\", \"w\") as f:\n f.writelines(train)\n with open(\"valid.txt\", \"w\") as f:\n f.writelines(test)\n","sub_path":"data/make_valid.py","file_name":"make_valid.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"176382837","text":"# _*_ coding: utf-8 _*_\n\"\"\"\n Created by Alimazing on 2018/11/5.\n\"\"\"\nfrom app.libs.param_filed import IntegerQueryFiled, StringQueryFiled, BooleanQueryFiled, \\\n\tIntegerPathFiled, StringPathFiled, BooleanPathFiled\n\n__author__ = 'Alimazing'\n\nbanner_id_in_path = StringPathFiled(\n\tname='id', description=\"banner id\", enum=[1, 2], default=1, required=True).data\n\nget_banner = {\n\t\"parameters\": [banner_id_in_path],\n\t\"responses\": {\n\t\t\"200\": {\n\t\t\t\"description\": \"banner\",\n\t\t\t\"examples\": {}\n\t\t}\n\t}\n}\n","sub_path":"app/doc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"536188327","text":"\"\"\" © Jan Gaida, 2020 \"\"\"\n\n# Generel\nimport timeit, os\nfrom datetime import datetime\n\n# Torch\nimport torch as T\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\nfrom collections import deque\n\n# Tensorboard\nfrom tensorboardX import SummaryWriter\n\n# Hilfsklassen & -Funktionen\nfrom bin.enviorment import make_training_enviorment\nfrom bin.model import ActorCriticModel\n\n\n\"\"\" worker.py\n\nDefiniert die Aufgaben der Worker bzw. Tester während dem Trainieren\n\n\n- dispatch_training:\nDie Aufgaben eines Workers während des Trainings\n\n- dispatch_testing:\nDie Aufgaben eines Testers während des Trainings\n\n\n\"\"\"\n\n\nmemory_out_channels = 512\n\ndef dispatch_training(idx, args, global_model, optimizer, should_save, trained_episodes, summarywriter_path, worker_done_state):\n\t\"\"\"Die Worker Aufgabe für ein Training\"\"\"\n\tglobal memory_out_channels\n\n\ttry:\n\t\t#summarywriter = SummaryWriter(summarywriter_path)\n\n\t\t# Bereite Torch-Multiprocessing vor\n\t\tT.manual_seed(args.torch_seed + idx)\n\t\tcuda = args.cuda\n\n\t\t# Falls Verbose\n\t\tverbose = args.verbose\n\t\tif verbose: start_time = timeit.default_timer()\n\n\t\t# Lokales Enviorment\n\t\tenv, num_states, num_actions = make_training_enviorment(args)\n\n\t\t# Lokales Model\n\t\tlocal_model = ActorCriticModel(num_states, num_actions)\n\t\tif cuda: local_model.cuda() # GPU-Support\n\t\tlocal_model.train() # Trainings-Flag\n\n\t\t# Loop-Var\n\t\tlocal_state = T.from_numpy( env.reset() )\n\t\tif cuda: local_state = local_state.cuda() # GPU-Support\n\t\tlocal_done = True # ob das gym-level abgeschlossen ist\n\t\tlocal_episode = trained_episodes # aktuelle worker-episode\n\t\tlocal_step = 0 # aktueller worker-step\n\t\tlocal_reward = 0 # aktueller worker-reward\n\n\t\t# Loss-Werte\n\t\tactor_loss = 0\n\t\tcritic_loss = 0\n\t\tentropy_loss = 0\n\t\ttotal_loss = 0\n\n\t\t# Loop-Const\n\t\tepisode_save_interval = args.episode_save_interval # der Speicherinterval\n\t\tmodel_save_name = args.model_save_name # der Name des gespeichert Model\n\t\tmodeldir = args.modeldir # der Pfad in dem das Model gespeichert wird\n\t\tworld = args.world\n\t\tstage = args.stage\n\t\trversion = args.rversion\n\t\tmax_local_steps = args.max_local_steps\n\t\tmax_global_steps = args.max_global_steps\n\t\tdiscount_gamma = args.discount_gamma\n\t\ttau = args.tau\n\t\tbeta = args.beta\n\t\tverbose_every_episode = args.verbose_every_episode\n\t\tnum_parallel_trainings_threads = args.num_parallel_trainings_threads\n\t\tabsolute_max_training_steps = args.absolute_max_training_steps + 1\n\n\t\t# Für Verbose vorzeitige init\n\t\tif verbose:\n\t\t\tep_rewards = [0]\n\t\t\tloop_time_0 = timeit.default_timer()\n\n\t\t# Für das Speichern\n\t\tspecific_modeldir = \"{}/{}_world{}_stage{}_ver{}\".format(modeldir, model_save_name, world, stage, rversion)\n\t\tif not os.path.isdir(specific_modeldir):\n\t\t\tos.mkdir(specific_modeldir)\n\n\t\t# unendliche Trainings-Loop\n\t\twhile True:\n\n\t\t\t# Überprüfe ob gespeichert werden soll\n\t\t\tif should_save and local_episode % episode_save_interval == 0 and not local_episode == trained_episodes:\n\t\t\t\tT.save(global_model.state_dict(), \"{}/ep{}_x_{}.pt\".format(specific_modeldir, local_episode, num_parallel_trainings_threads))\n\t\t\t\tif verbose: print(\"\\n{} :: Worker {: 2d} --- globales Model erfolgreich gespeichert\\n\".format(datetime.now().strftime(\"%H:%M:%S\"), idx))\n\n\t\t\t# Nächste Episode\n\t\t\tlocal_episode += 1\n\t\t\tif verbose and local_episode % verbose_every_episode == 0 and not local_episode == 0:\n\t\t\t\tlatest_sum_reward = sum(ep_rewards)\n\t\t\t\tlatest_avg_reward = latest_sum_reward / len(ep_rewards)\n\t\t\t\tloop_time_1 = timeit.default_timer()\n\t\t\t\tprint(\"{} :: Worker {: 2d} | E {:>6} ({:>4.2f} e/s) | Avg-RW {:>6.2f} | Sum-RW {:>7.1f} | A-Loss {:>8.2f} | C-Loss {:>8.2f} | E-Loss {:>8.2f} | Loss {:>8.2f}\".format(\n\t\t\t\t\tdatetime.now().strftime(\"%H:%M:%S\"), idx, local_episode, ((loop_time_1 - loop_time_0)/verbose_every_episode), latest_avg_reward, latest_sum_reward, actor_loss.item(), critic_loss.item(), entropy_loss.item(), total_loss.item())\n\t\t\t\t)\n\t\t\t\tloop_time_0 = loop_time_1\n\n\t\t\t# Gewichte aus dem globalen Model laden\n\t\t\tlocal_model.load_state_dict(global_model.state_dict())\n\n\t\t\t# Episoden Tensor \n\t\t\t# LSTM-Version\n\t\t\tif local_done: # Neue Tensor erzeugen falls benötigt\n\t\t\t\thx = T.zeros( (1, memory_out_channels), dtype = T.float)\n\t\t\t\tcx = T.zeros( (1, memory_out_channels), dtype = T.float)\n\t\t\telse: # Wiederverwenden\n\t\t\t\thx = hx.detach()\n\t\t\t\tcx = cx.detach()\n\t\t\tif cuda: # CUDA-Support\n\t\t\t\thx = hx.cuda()\n\t\t\t\tcx = cx.cuda()\n\t\t\t\"\"\" # GRU-Version\n\t\t\tif local_done: # Neue Tensor erzeugen falls benötigt\n\t\t\t\thx = T.zeros( (1, memory_out_channels), dtype = T.float)\n\t\t\telse: # Wiederverwenden\n\t\t\t\thx = hx.detach()\n\t\t\tif cuda: # CUDA-Support\n\t\t\t\thx = hx.cuda()\n\t\t\t\"\"\"\n\n\t\t\t# Episoden-Var\n\t\t\tep_policies = []\n\t\t\tep_judgment = []\n\t\t\tep_entropies = []\n\t\t\tep_rewards = []\n\n\t\t\t# Episoden-Loop\n\t\t\tfor _ in range(max_local_steps):\n\t\t\t\tlocal_step += 1\n\n\t\t\t\t# Model\n\t\t\t\taction_logit_probability, action_judgement, hx, cx = local_model(local_state, hx, cx) # LSTM-Version\n\t\t\t\t#action_logit_probability, action_judgement, hx = local_model(local_state, hx) # GRU-Verison\n\n\t\t\t\t# Policies\n\t\t\t\tpolicy = F.softmax(action_logit_probability, dim = 1)\n\t\t\t\tlog_policy = F.log_softmax(action_logit_probability, dim = 1)\n\n\t\t\t\t# Entropie\n\t\t\t\tentropy = (policy * log_policy).sum(1, keepdim = True)\n\n\t\t\t\t# Entscheidung für eine Aktion mit logits-Algorithmus\n\t\t\t\taction = Categorical(policy).sample().item()\n\t\t\t\t\n\t\t\t\t# Führe Aktion aus\n\t\t\t\tlocal_state, local_reward, local_done, _ = env.step(action)\n\t\t\t\tlocal_state = T.from_numpy(local_state)\n\t\t\t\tif cuda: local_state = local_state.cuda() # GPU-Support\n\n\t\t\t\t# Erfahrungen aufbauen\n\t\t\t\tep_policies.append(log_policy[0, action])\n\t\t\t\tep_judgment.append(action_judgement)\n\t\t\t\tep_entropies.append(entropy)\n\t\t\t\tep_rewards.append(local_reward)\n\n\t\t\t\t# Überprüft ob noch Schritte getan werden können\n\t\t\t\tif local_step > max_global_steps:\n\t\t\t\t\t# Beginnt eine neue Episode\n\t\t\t\t\tlocal_done = True\n\n\t\t\t\t# Wenn die Episode abgeschlossen ist...\n\t\t\t\tif local_done:\n\t\t\t\t\t# Zurücksetzten der Steps & des Enviorments\n\t\t\t\t\tlocal_step = 0\n\t\t\t\t\tlocal_state = T.from_numpy( env.reset() )\n\t\t\t\t\tif cuda: local_state = local_state.cuda() # GPU-Support\n\n\t\t\t\t# Überprüft ob die nächste Episode gestartet werden soll\n\t\t\t\tif local_done:\n\t\t\t\t\tbreak\n\n\t\t\t# Bewertung\n\t\t\tR = T.zeros((1, 1), dtype=T.float)\n\t\t\tif cuda: R = R.cuda() # GPU-Support\n\n\t\t\tif not local_done: \n\t\t\t\t# Bewertung einholen für Runs die nicht abgeschlossen wurden\n\t\t\t\t_, R, _, _ = local_model(local_state, hx, cx) # LSTM-Version\n\t\t\t\t#_, R, _ = local_model(local_state, hx) # GRU-Version\n\n\t\t\tgae = T.zeros((1,1), dtype=T.float)\n\t\t\tif cuda: \n\t\t\t\tgae = gae.cuda()\n\n\t\t\t# Loss's\n\t\t\tactor_loss = 0\n\t\t\tcritic_loss = 0\n\t\t\tentropy_loss = 0\n\t\t\tnext_value = R\n\n\t\t\t# Loope alle Erfahrungen rückwärts (!)\n\t\t\tfor judgment, log_policy, reward, entropy in list( zip(ep_judgment, ep_policies, ep_rewards, ep_entropies) )[::-1]:\n\t\t\t\tgae = (gae * discount_gamma * tau) + reward + discount_gamma * next_value.detach() - judgment.detach()\n\t\t\t\tnext_value = judgment\n\t\t\t\tactor_loss = actor_loss + log_policy * gae\n\t\t\t\tR = R * discount_gamma + reward\n\t\t\t\tcritic_loss = critic_loss + (R - judgment) ** 2 / 2\n\t\t\t\tentropy_loss = entropy_loss + entropy\n\n\t\t\ttotal_loss = -actor_loss + critic_loss - beta * entropy_loss\n\n\t\t\t# Tensorboard\n\t\t\t#summarywriter.add_scalar(\"Worker-{}/actor_loss\".format(idx), actor_loss.item(), local_episode)\n\t\t\t#summarywriter.add_scalar(\"Worker-{}/critic_loss\".format(idx), critic_loss.item(), local_episode)\n\t\t\t#summarywriter.add_scalar(\"Worker-{}/total_loss\".format(idx), total_loss.item(), local_episode)\n\n\t\t\t# Vor der Backpropagation alle Gradienten auf 0 setzen\n\t\t\toptimizer.zero_grad()\n\n\t\t\t# Backpropagation\n\t\t\ttotal_loss.backward()\n\n\t\t\t# Loope die Gradienten des globalen & lokalen Models\n\t\t\tfor local_param, global_param in zip(local_model.parameters(), global_model.parameters()):\n\t\t\t\tif global_param.grad is not None:\n\t\t\t\t\t# Wenn im globalen ein Gradient ist\n\t\t\t\t\tbreak\n\t\t\t\t# Wenn nicht übernehm das locale Gradient\n\t\t\t\tglobal_param._grad = local_param.grad\n\n\t\t\t# Übernehme die Gradienten wieder\n\t\t\toptimizer.step()\n\n\t\t\t# Finally, wenn \n\t\t\tif local_episode == absolute_max_training_steps:\n\t\t\t\tif verbose:\n\t\t\t\t\tend_time = timeit.default_timer()\n\t\t\t\t\tprint(\"{} :: Worker {: 2d} --- nach {:.2f} s abgeschlossen\".format(datetime.now().strftime(\"%H:%M:%S\"), idx, (end_time - start_time)))\n\t\t\t\telse:\n\t\t\t\t\tprint(\"{} :: Worker {: 2d} --- abgeschlossen\".format(datetime.now().strftime(\"%H:%M:%S\"), idx))\n\t\t\t\t# Fertig\n\t\t\t\tworker_done_state[0] += 1\n\t\t\t\treturn\n\n\texcept KeyboardInterrupt:\n\t\tif verbose:\n\t\t\tend_time = timeit.default_timer()\n\t\t\tprint(\"{} :: Worker {: 2d} --- nach {:.2f} s abgeschlossen\".format(datetime.now().strftime(\"%H:%M:%S\"), idx, (end_time - start_time)))\n\t\telse:\n\t\t\tprint(\"{} :: Worker {: 2d} --- abgeschlossen\".format(datetime.now().strftime(\"%H:%M:%S\"), idx))\n\t\treturn\n\ndef dispatch_testing(idx, args, global_model, summarywriter_path, worker_done_state):\n\t\"\"\"Die Worker Aufgabe für ein Testing\"\"\"\n\tglobal memory_out_channels\n\n\ttry:\n\t\tsummarywriter = SummaryWriter(summarywriter_path)\n\n\t\t# Bereite Torch-Multiprocessing vor\n\t\tT.manual_seed(args.torch_seed + idx)\n\n\t\t# Enviorment initialisieren\n\t\tenv, num_states, num_actions = make_training_enviorment(args)\n\n\t\t# Lokales Model\n\t\tlocal_model = ActorCriticModel(num_states, num_actions)\n\t\tlocal_model.eval() # Evaluation-Flag\n\n\t\t# Loop-Vars\n\t\tlocal_done = True\n\t\tlocal_step = 0\n\t\tlocal_episode = 0\n\t\tep_rewards = []\n\t\tlocal_state = T.from_numpy( env.reset() )\n\t\tactions = deque(maxlen = args.max_actions)\n\t\tmax_global_steps = args.max_global_steps\n\t\tnum_parallel_trainings_threads = args.num_parallel_trainings_threads\n\t\t# Loop-Const\n\n\t\t# Testing-Loop\n\t\twhile True:\n\t\t\t# Step hochzählen\n\t\t\tlocal_step += 1\n\n\t\t\t# Model wiederladen wenn Run abgeschlossen\n\t\t\tif local_done:\n\t\t\t\tlocal_model.load_state_dict(global_model.state_dict())\n\n\t\t\t# Ohne Gradienten-Berrechnung\n\t\t\t# LSTM-Version\n\t\t\twith T.no_grad():\n\t\t\t\tif local_done: # Neue Tensor erzeugen falls benötigt\n\t\t\t\t\thx = T.zeros((1, memory_out_channels), dtype=T.float)\n\t\t\t\t\tcx = T.zeros((1, memory_out_channels), dtype=T.float)\n\t\t\t\telse: # Ansonsten wiederverwenden\n\t\t\t\t\thx = hx.detach()\n\t\t\t\t\tcx = cx.detach()\n\t\t\t\"\"\" # GRU-Version\n\t\t\twith T.no_grad():\n\t\t\t\tif local_done: # Neue Tensor erzeugen falls benötigt\n\t\t\t\t\thx = T.zeros((1, memory_out_channels), dtype=T.float)\n\t\t\t\telse: # Ansonsten wiederverwenden\n\t\t\t\t\thx = hx.detach()\n\t\t\t\"\"\"\n\n\t\t\t# Model\n\t\t\taction_logit_probability, action_judgement, hx, cx = local_model(local_state, hx, cx) # LSTM-Version\n\t\t\t#action_logit_probability, action_judgement, hx = local_model(local_state, hx) # GRU-Version\n\n\t\t\t# Policy\n\t\t\tpolicy = F.softmax(action_logit_probability, dim=1)\n\n\t\t\t# Action\n\t\t\taction = T.argmax(policy).item()\n\n\t\t\t# Action durchführen\n\t\t\tlocal_state, local_reward, local_done, info = env.step(action)\n\t\t\tep_rewards.append(local_reward)\n\t\t\tenv.render()\n\n\t\t\t# Aktion merken\n\t\t\tactions.append(action)\n\n\t\t\t# Wenn max_global_steps erreicht wurde oder wenn die max_actions erreich wurden ...\n\t\t\tif local_step > max_global_steps or actions.count(actions[0]) == actions.maxlen:\n\t\t\t\t#print(\"Runner {: 2d} :: Training --- Aktionslimit erreicht\".format(idx))\n\t\t\t\t# .. neustarten\n\t\t\t\tlocal_done = True\n\n\t\t\t# Überprüft ob das Enviroment zurückgesetzt werden soll\n\t\t\tif local_done: \n\n\t\t\t\tlatest_sum_reward = sum(ep_rewards)\n\t\t\t\tlatest_avg_reward = latest_sum_reward / len(ep_rewards)\n\n\t\t\t\t#print(\"num_parallel_trainings_threads\", num_parallel_trainings_threads)\n\t\t\t\t#print(\"worker_done_state\", worker_done_state)\n\n\t\t\t\tif worker_done_state.item() == num_parallel_trainings_threads:\n\t\t\t\t\t# quit\n\t\t\t\t\tprint(\"{} :: Runner {: 2d} --- abgeschlossen\".format(datetime.now().strftime(\"%H:%M:%S\"), idx))\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\t# Tensorboard\n\t\t\t\t\tsummarywriter.add_scalar(\"Tester-{}/X_Position\".format(idx), info['x_pos'], local_episode)\n\t\t\t\t\tsummarywriter.add_scalar(\"Tester-{}/Score\".format(idx), info['score'], local_episode)\n\t\t\t\t\tsummarywriter.add_scalar(\"Tester-{}/Coins\".format(idx), info['coins'], local_episode)\n\t\t\t\t\tsummarywriter.add_scalar(\"Tester-{}/Sum_Reward\".format(idx), latest_sum_reward, local_episode)\n\t\t\t\t\tsummarywriter.add_scalar(\"Tester-{}/Avg_Reward\".format(idx), latest_avg_reward, local_episode)\n\t\t\t\t\tif info[\"flag_get\"]: flag_get = 1\n\t\t\t\t\telse: flag_get = 0\n\t\t\t\t\tsummarywriter.add_scalar(\"Tester-{}/Flag\".format(idx), flag_get, local_episode)\n\n\n\t\t\t\t# Variablen zurückstetzten\n\t\t\t\tlocal_step = 0\n\t\t\t\tep_rewards = []\n\t\t\t\tlocal_episode += 1\n\t\t\t\tactions.clear()\n\n\t\t\t\t# Env zurückstetzen\n\t\t\t\tlocal_state = env.reset()\n\n\t\t\t# zu numpy\n\t\t\tlocal_state = T.from_numpy(local_state)\n\n\texcept KeyboardInterrupt:\n\t\tprint(\"{} :: Runner {: 2d} --- abgeschlossen\".format(datetime.now().strftime(\"%H:%M:%S\"), idx))\n\t\treturn\n","sub_path":"bin/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":12778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"471835164","text":"import xlwt\nfrom datetime import datetime,date\nimport cx_Oracle\nimport getpass\n#v_password = getpass.getpass()\ncon = cx_Oracle.connect('geocallbi/geocallbi@//161.27.247.149/geocall')\ncur = con.cursor()\nv_arraysize = 1000\ncur.arraysize=v_arraysize\nprint(cur.arraysize)\nv_query = 'SELECT * fROM ASSET WHERE rownum<100000'\n#v_query += '\nv_start = datetime.now().strftime(\"%Y%m%d %H:%M:%S %f\")\nprint(v_start);\ncur.execute(v_query)\n# scarica tutto il resultset\nresult = cur.fetchall()\nv_num_rows = cur.rowcount;\nv_num_cols = len(cur.description)\n#\nv_endquery = datetime.now().strftime(\"%Y%m%d %H:%M:%S %f\")\nprint(v_endquery);\n#style0 = xlwt.easyxf('font: name Times New Roman, color-index red, bold on',\n# num_format_str='#,##0.00')\n#style1 = xlwt.easyxf(num_format_str='D-MMM-YY')\n\nwb = xlwt.Workbook()\nws = wb.add_sheet('Foglio1')\nv_description_length = len(cur.description)\nfor i in range(v_description_length):\n ws.write(0, i, cur.description[i][0])\nfor i in range(v_num_rows):\n for j in range(v_num_cols):\n ws.write(i+1, j, result[i][j])\n #v_test = result[i][j]\ncur.close()\ncon.close()\nv_endscan = datetime.now().strftime(\"%Y%m%d %H:%M:%S %f\")\nprint(str(v_arraysize)+' '+str(v_start)+' '+str(v_endquery)+' '+str(v_endscan));\nv_oggi = date.today().strftime(\"%Y%m%d\")\n#print(v_oggi)\n#ws.write(0, 0, 1234.56, style0)\n#ws.write(1, 0, datetime.now(), style1)\n#ws.write(2, 0, 1)\n#ws.write(2, 1, 1)\n#ws.write(2, 2, xlwt.Formula(\"A3+B3\"))\nwb.save('esportazione_omv_'+v_oggi+'.xls')\n","sub_path":"test-python/test_omv_excel.py","file_name":"test_omv_excel.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"573545960","text":"from optparse import OptionParser\nimport sys\nfrom classification import Classification\n\ndef main():\n parser = OptionParser()\n parser.add_option(\"--stemmer-language\", dest=\"stemmer_language\", help=\"Language for SnowballStemmer\", default=\"english\")\n parser.add_option('-i', action=\"store_true\", dest=\"ignore_stopwords_stemmer\", help=\"Ignore stopwords in stemmer, default false\", default=False)\n parser.add_option(\"--stopwords-language\", dest=\"stopwords_language\", help=\"Language for stopwords\")\n parser.add_option(\"-k\", action=\"store_true\", dest=\"keep_stopwords\", help=\"Keep stopwords, default remove\", default=False)\n parser.add_option('--load-classifier', dest=\"load_classifier_file_path\", help=\"Specify load classifiers file\")\n parser.add_option('--create-classifier', dest=\"create_classifier\", help=\"File for training set\")\n parser.add_option('--row-training-set', dest=\"row_training_set\", help=\"Number of row for training set\", default=1000)\n parser.add_option('-r', action=\"store_true\", dest=\"random_row_training_set\", help=\"Get random row from training set file\", default=False)\n parser.add_option('--text-field', dest=\"text_field\", help=\"text field in json file\", default=\"text\")\n parser.add_option('--word-tokenize-language', dest=\"word_tokenize_language\", help=\"Word tokenize language\", default=\"english\")\n parser.add_option('--classification-field', dest=\"classification_field\", help=\"Classification field in json data\", default=\"category\")\n parser.add_option('--dump-classifier', dest=\"dump_classifier\", help=\"Dump classifier file\", default=False)\n parser.add_option('-a', action=\"store_true\", dest=\"calculate_accuracy\", help=\"Calculate accuracy\", default=False)\n parser.add_option('--test-file-path', dest=\"test_file_path\", help=\"Test file path\")\n parser.add_option('--row-test-set', dest=\"row_test_set\", help=\"Number of row for test set\", default=500)\n parser.add_option('--random-row-test-set', action=\"store_true\", dest=\"random_row_test_set\", help=\"Get random row from test set file\", default=False)\n parser.add_option('--test-text-field', dest=\"test_text_field\", help=\"text field in json test file\", default=\"text\")\n parser.add_option('--test-classification-field', dest=\"test_classification_field\", help=\"classificaion field in json test file\", default=\"category\")\n parser.add_option('--classify', dest=\"classify_text\", help=\"classify text\", default=False)\n (options, args) = parser.parse_args(sys.argv)\n\n cl = Classification(\n stemmer_language=options.stemmer_language,\n stopwords_language=options.stopwords_language,\n ignore_stopwords_stemmer=options.ignore_stopwords_stemmer,\n )\n\n if options.load_classifier_file_path:\n cl.load_classifier(load_classifier_file_path=options.load_classifier_file_path)\n elif options.create_classifier:\n cl.create_and_train_classifier(\n training_file_path=options.create_classifier,\n keep_stopwords=options.keep_stopwords,\n row_training_set=options.row_training_set,\n random_row_training_set=options.random_row_training_set,\n text_field=options.text_field,\n word_tokenize_language=options.word_tokenize_language,\n classification_field=options.classification_field\n )\n\n if options.dump_classifier:\n cl.dump_classifier(options.dump_classifier)\n if options.calculate_accuracy:\n cl.accuracy(\n test_file_path=options.test_file_path,\n keep_stopwords=options.keep_stopwords,\n row_test_set=options.row_test_set,\n random_row_test_set=options.random_row_test_set,\n text_field=options.test_text_field,\n word_tokenize_language=options.word_tokenize_language,\n classification_field=options.test_classification_field\n )\n\n if(options.classify_text):\n cl.classify(\n text=options.classify_text,\n keep_stopwords=options.keep_stopwords,\n word_tokenize_language=options.word_tokenize_language\n )\n\nif __name__ == '__main__':\n main()\n","sub_path":"npl-classification/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"632900071","text":"###########\n# IMPORTS #\n###########\nimport numpy as np\nimport Laina_model_train\nimport tensorflow as tf\nimport os\nimport cv2\n\nfrom keras.callbacks import TensorBoard\nfrom keras.callbacks import ModelCheckpoint\n\n########\n# VARS #\n########\nt_path = 'data/train.npz'\nv_path = 'data/val.npz'\nv_image_data_path = 'data/v_resnet_output_data.npz'\nt_image_data_path = 'data/t_resnet_output_data.npz'\nbatch_size = 64\nn_epochs = 100\nimage_shape = (128,160,3)\ninput_shape = (4,5,2048)\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.5\t\n\n#########\n# FUNCS #\n#########\ndef load_data(path):\n\tdata = np.load(path)\n\n\timages = data['images']\n\timages = images.transpose([3,0,1,2])\n\tdepths = data['depths']\n\tdepths = depths.transpose([2,0,1])\n\tdepths = np.expand_dims(depths,3)\n\n\tdata.close()\n\n\treturn images, depths\n\ndef eval_data(images,depths):\n\tinput_data = []\n\tdepth_data = []\n\t\n\tmodel = Res_evaluate()\n\n\tfor i in range(len(images)):\n\t\tif i % 10 == 0:\n\t\t\tprint('{} images saved'.format(i))\n\n\t\timage = images[i,:,:,:]\n\t\timage = cv2.resize(image,(160,128))\n\t\timage = np.expand_dims(image,0)\n\t\tres_out = model.predict(image)\n\t\t\n\t\tdepth = depths[i,:,:,:]\n\t\tdepth = cv2.resize(depth,(160,128))\n\t\tdepth = np.expand_dims(depth,0)\n\t\t\t\n\t\tinput_data.append(res_out)\n\t\tdepth_data.append(depth)\n\n\tinput_data = np.squeeze(input_data)\n\tdepth_data = np.squeeze(depth_data)\n\n\treturn input_data, depth_data\n\n\ndef Res_evaluate():\n\tmodel = Laina_model_train.ResNet50(input_tensor=None, input_shape=image_shape)\n\treturn model\n\n\n########\n# MAIN #\n########\ndef main():\n\n\t# initialise callbacks:\n\ttensorboard = TensorBoard(log_dir='./logs', histogram_freq=0.0, batch_size=batch_size,\n write_graph=False, write_grads=False, write_images=False)\n\t\n\tcheckpoint = ModelCheckpoint('weights/best_checkpoint.h5', save_weights_only=True, \n\t\t\t\t\t\t\tmonitor='val_loss', verbose=0, save_best_only=True, mode='min')\n\n\t# either load resnet output or run resnet:\n\tif not os.path.exists(v_image_data_path):\n\t\tprint('data not found, saving as npz...')\n\t\tv_images, v_depths = load_data(v_path)\n\t\tt_images, t_depths = load_data(t_path)\n\n\t\t# evaluate resnet data:\n\t\tprint('evaluating val data...')\n\t\tv_input, v_depths = eval_data(v_images,v_depths)\n\t\tprint('evaluating training data...')\n\t\tt_input, t_depths = eval_data(t_images,t_depths)\n\n\t\tprint('saving data...')\t\n\t\tnp.savez(v_image_data_path, input_data=v_input, depths=v_depths)\n\t\tnp.savez(t_image_data_path, input_data=t_input, depths=t_depths)\n\n\t\n\telse:\n\t\tprint('loading data...')\n\t\tv_data = np.load(v_image_data_path)\n\t\tt_data = np.load(t_image_data_path)\n\n\t\tv_input = v_data['input_data']\n\t\tv_depths = v_data['depths']\n\n\t\tt_input = t_data['input_data']\n\t\tt_depths = t_data['depths']\n\n\n\t# Load Laina model\n\tfinal_model, output = Laina_model_train.Laina(input_tensor=None, input_shape=input_shape)\n\n\tt_depths = np.expand_dims(t_depths,3)\n\tv_depths = np.expand_dims(v_depths,3)\n\n\t# Train model\n\twith tf.Session(config=config) as sess:\n\t\t#tf.initialize_all_variables().run()\n\t\tsess.run(tf.global_variables_initializer())\n\t\t#final_model.load_weights('model_saves/model_best_checkpoint.h5')\n\t\t#print('model loaded!')\n\t\t\n\t\tfinal_model.fit(t_input, t_depths, batch_size=batch_size, epochs=n_epochs,\n\t\t\t\t\t\tvalidation_data=(v_input,v_depths), shuffle=True, callbacks=[tensorboard,checkpoint])\n\n\t\tprint('done!')\n\t\tfinal_model.save_weights('weights/model_complete.h5')\n\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"66945231","text":"from tensorflow.keras.preprocessing.text import Tokenizer\n\n\ndef below_threshold_freq(X_train, max_freq):\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(X_train)\n\n threshold = max_freq\n \n total_cnt = len(tokenizer.word_index)\n rare_cnt = 0\n total_freq = 0\n rare_freq = 0\n\n for key, value in tokenizer.word_counts.items():\n total_freq += value\n if(value < threshold):\n rare_cnt = rare_cnt + 1\n rare_freq = rare_freq + value\n\n print('단어 집합(vocabulary)의 크기 :',total_cnt)\n print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt))\n print(\"단어 집합에서 희귀 단어의 비율:\", (rare_cnt / total_cnt)*100)\n print(\"전체 등장 빈도에서 희귀 단어 등장 빈도 비율:\", (rare_freq / total_freq)*100)\n \n\n vocab_size = total_cnt - rare_cnt + 2\n print('단어 집합의 크기 :',vocab_size)\n\n\n return vocab_size\n\n\n\ndef below_threshold_len(X_train, max_len):\n\n print('리뷰의 최대 길이 :',max(len(l) for l in X_train))\n print('리뷰의 평균 길이 :',sum(map(len, X_train))/len(X_train))\n\n cnt = 0\n for sent in X_train:\n if(len(sent) <= max_len):\n cnt = cnt + 1\n print('전체 샘플 중 길이가 %s 이하인 샘플의 비율: %s'%(max_len, (cnt / len(X_train))*100))","sub_path":"pjt2/MechineLearning/supervised_learning/v2/below_threshold.py","file_name":"below_threshold.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238414476","text":"import shodan\nimport time\nimport traceback\nfrom database.database import db_session\nfrom database.models import Host, Port\nfrom database import elastic_bounty_tools\n\n\ndef add_args(parser):\n parser.add_argument(\"--shodanports\", help=\"Enriches data with Shodan port information\", action=\"store_true\")\n\n\ndef parse_args(args, config):\n if args.shodanports and args.workspace is not None:\n if args.elastic:\n shodan_ports(args, config, source=\"elastic\")\n else:\n shodan_ports(args, config)\n\n\ndef shodan_ports(args, config, source=\"db\"):\n if source == \"elastic\":\n host_with_port = 0\n no_port_info = 0\n dupe_port = 0\n\n # Setup API\n shodan_api_key = config.get(\"Shodan\", \"api_key\")\n api = shodan.Shodan(shodan_api_key)\n\n # Get list of IPs from Elasticsearch\n ip_bucket = elastic_bounty_tools.get_unique_ips(args.workspace)\n remaining = len(ip_bucket)\n\n # Get info for each IP from Shodan\n for ip in ip_bucket:\n try:\n # Check if IP starts with 10.\n # TODO: Use the IP Library to check for RFC1918\n if not ip['key'].startswith(\"10.\"):\n shodan_host = api.host(ip['key'])\n for port in shodan_host['ports']:\n result = elastic_bounty_tools.add_port(ip['key'], port, \"shodan\", args.workspace)\n\n if result:\n host_with_port += 1\n else:\n dupe_port += 1\n\n except shodan.APIError:\n no_port_info += 1\n time.sleep(.1)\n\n except KeyboardInterrupt:\n raise\n\n except:\n print(traceback.format_exc())\n\n remaining -= 1\n print(\"Remaining: {} New: {} Duplicates: {} No Info: {}\".format(remaining, host_with_port, dupe_port, no_port_info), end=\"\\r\")\n print(\"Remaining: {} New: {} Duplicates: {} No Info: {}\".format(remaining, host_with_port, dupe_port, no_port_info))\n\n elif source == \"db\":\n # Setup the db session\n session = db_session()\n # Setup API\n shodan_api_key = config.get(\"Shodan\", \"api_key\")\n api = shodan.Shodan(shodan_api_key)\n\n target_host_query = session.query(Host).filter(Host.workspace == args.workspace).all()\n remaining = len(target_host_query)\n host_with_port = 0\n no_port_info = 0\n dupe_port = 0\n\n for target_host in target_host_query:\n try:\n # Get phyiscal location, ASN, ports, etc\n shodan_host = api.host(target_host.ip_address)\n\n port_list = [x.number for x in target_host.ports]\n # Add ports to host\n for port in shodan_host['ports']:\n if port in port_list:\n dupe_port += 1\n else:\n host_with_port += 1\n p = Port(number=port, host=target_host)\n session.add(p)\n session.commit()\n\n # Sleep to try and ratelimit\n time.sleep(.1)\n\n except shodan.APIError:\n # print(\"API Error, sleeping .5 second\")\n # print(traceback.format_exc())\n no_port_info += 1\n time.sleep(.1)\n except:\n print(traceback.format_exc())\n\n remaining -= 1\n print(\"Remaining: {} New: {} Duplicates: {} No Info: {}\".format(remaining, host_with_port, dupe_port, no_port_info), end=\"\\r\")\n print(\"Remaining: {} New: {} Duplicates: {} No Info: {}\".format(remaining, host_with_port, dupe_port, no_port_info))\n","sub_path":"enrichment/shodan.py","file_name":"shodan.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356409798","text":"import unittest\n\n\nclass BuyAndSellStockII(unittest.TestCase):\n \"\"\"\n You are given an integer array prices where prices[i] is the price of a given stock on the ith day.\n\n On each day, you may decide to buy and/or sell the stock.\n You can only hold at most one share of the stock at any time.\n However, you can buy it then immediately sell it on the same day.\n\n Find and return the maximum profit you can achieve.\n \"\"\"\n def max_profit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n max_profit = 0\n for i in range(len(prices) - 1):\n max_profit += max(prices[i+1] - prices[i], 0)\n return max_profit\n\n\n def test_max_profit(self):\n self.assertEqual(self.max_profit([7,6,4,3,1]), 0)\n self.assertEqual(self.max_profit([7,1,5,3,6,4]), 7)","sub_path":"buy_and_sell_stock_ii.py","file_name":"buy_and_sell_stock_ii.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165338152","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport os, sys\nimport numpy as np\n\ninput_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), os.pardir, 'input')).replace('\\\\','/')\n\n\nGDAT_data = pd.read_excel(input_dir + '/GDAT.xlsx')\nUNIT_NAME_data = GDAT_data['UNIT_NAME']\nUNIT_NAME_data = UNIT_NAME_data.tolist() # name\nt = pd.read_excel(input_dir + '/T.xlsx')\nt = t['T'].tolist()\nt = range(1, t[0] + 1)\nkeys = [(a,b) for a in UNIT_NAME_data for b in t]\nvalues = np.zeros(len(keys),dtype=int) # 初始 = 0\ndata = dict(zip(keys,values))\n\n# 產生文件\n","sub_path":"opt_code/user/pqrl/uc/data_test.py","file_name":"data_test.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"393514850","text":"from SW_global import *\nglobal x_with_bez,y_with_bez,x_without_bez,y_without_bez\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Press Function <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nx_with_bez.clear()\ny_with_bez.clear()\nx_without_bez.clear()\ny_without_bez.clear()\ndef font_check(x,y):\n global x_with_bez,y_with_bez,x_without_bez,y_without_bez\n NT = float\n import matplotlib.pyplot as plt\n import numpy as np\n import pandas as pd\n #plt.figure(figsize=[1,4])\n import bezier as bezier1\n # class Point\n class Point:\n def __init__(self, x = 0.0, y = 0.0):\n self.x = x\n self.y = y\n \n def distance(self, p):\n return sqrt((p.x-self.x)*(p.x-self.x)+(p.y-self.y)*(p.y-self.y))\n\n def length(self):\n return self.distance(Point(NT(0), NT(0)))\n\n def __sub__(self, p):\n return Point(self.x-p.x, self.y-p.y)\n\n def __add__(self, p):\n return Point(self.x+p.x, self.y+p.y)\n\n def __mul__(self, c):\n return Point(c*self.x, c*self.y)\n\n def __eq__(self, p):\n return self.x == p.x and self.y == p.y\n\n def __ne__(self, p):\n return not (self == p)\n \n def towards(self, target, t):\n if t == 0.5:\n return self.halfway(target)\n else:\n return Point((1.0-t)*self.x+t*target.x, (1.0-t)*self.y+t*target.y)\n \n def halfway(self, target):\n return Point((self.x+target.x).div2(), (self.y+target.y).div2())\n\n def compare_lex(self, p):\n if self.x < p.x:\n return -1\n if self.x > p.x:\n return 1\n if self.y < p.y:\n return -1\n if self.y > p.y:\n return 1\n return 0\n\n def less_lex(self, p):\n return self.compare_lex(p) < 0\n\n def less_eq_lex(self, p):\n return self.compare_lex(p) <= 0\n \n def __repr__(self):\n return \"Point(%s, %s)\" % (self.x, self.y) \n\n def orientation2d(a, b, c):\n d1 = (a.x - b.x) * (a.y - c.y)\n d2 = (a.y - b.y) * (a.x - c.x)\n if d1 == d2:\n return 0\n elif d1 > d2:\n return 1\n else:\n return -1\n\n def leftTurn(a, b, c):\n return orientation2d(a, b, c) > 0\n\n def rightTurn(a, b, c):\n return orientation2d(a, b, c) < 0\n\n def betweenVar(a, b, c):\n return (a.x-b.x)*(c.x-b.x)+(a.y-b.y)*(c.y-b.y)<0\n\n class Dim:\n def __init__(self, vp):\n self.n = len(vp)\n self.vp = vp\n self.isConvex = False\n self.diam = 0\n \n def convexify(self):\n pass\n\n def diameter():\n if self.diam == 0:\n self.xmin = self.vp[0].x\n self.xmax = self.xmin\n self.ymin = self.vp[0].y\n self.ymax = self.ymin\n for p in self.vp:\n if p.x < self.xmin:\n self.xmin = p.x\n if p.x > self.xmax:\n self.xmax = p.x\n if p.y < self.ymin:\n self.ymin = p.y\n if p.y > self.ymax:\n self.ymax = p.y\n self.diam = min(xmax-xmin, ymax-ymin)\n \n\n class Bezier(Dim):\n def __init__(self, v):\n self.deg = len(v) - 1\n self.cp = v\n self.tmin = NT(0)\n self.tmax = NT(1)\n \n def getPoint(self, t):\n curr = [0]*self.deg\n # get initial\n for i in range(self.deg):\n curr[i] = self.cp[i].towards(self.cp[i+1], t)\n for i in range(self.deg-1):\n for j in range(self.deg-1-i):\n curr[j] = curr[j].towards(curr[j+1], t)\n return curr[0]\n\n def subdivision(self, t):\n lseq = [0]*(self.deg+1)\n rseq = [0]*(self.deg+1)\n curr = [0.0]*self.deg\n\n lseq[0] = self.cp[0]\n rseq[self.deg] = self.cp[self.deg]\n for i in range(self.deg):\n curr[i] = self.cp[i].towards(self.cp[i+1], t)\n for i in range(self.deg-1):\n lseq[i+1] = curr[0]\n rseq[self.deg-i-1] = curr[self.deg-i-1]\n for j in range(self.deg-1-i):\n curr[j] = curr[j].towards(curr[j+1], t)\n lseq[self.deg] = curr[0]\n rseq[0] = curr[0] \n return [lseq, rseq]\n\n def plotCP(cp):\n x = []; y = []\n for i in range(len(cp)):\n x.append(cp[i].x)\n y.append(cp[i].y)\n plot(x, y)\n \n def plotBezier(bezier, n):\n global x_with_bez,y_with_bez\n eps = NT(1)/n\n x = []\n y = []\n \n t = 0\n for i in range(n+1):\n p = bezier.getPoint(t)\n t = t + eps\n x.append(p.x)\n y.append(p.y)\n #plt.plot(x, y,color=\"b\")\n x_with_bez.append(x)\n y_with_bez.append(y)\n\n def pt(x, y):\n return Point(NT(x), NT(y))\n \n def Slp(x,y):\n try:\n z = (y[i+1]-y[i])/(x[i+1]-x[i])\n except:\n z = 0\n return z\n def angle(x,y):\n pass\n\n import numpy as np\n def FinalCall(x,y):\n vp=[]\n for pnt in range(len(x)):\n vp.append(pt(x[pnt],y[pnt]))\n bc = Bezier(vp)\n [left, right] = bc.subdivision(0.4)\n # plot left\n plotBezier(Bezier(left), 100)\n \n # plot left\n plotBezier(Bezier(right), 100)\n plt.plot(x1x,x1y)\n # Get angle of three lines\n ######################################################################################################################\n def angle(x,y):\n def ang(pt1x,pt1y,pt2x,pt2y,pt3x,pt3y):\n a = np.array([pt1x,pt1y])\n b = np.array([pt2x,pt2y])\n c = np.array([pt3x,pt3y])\n\n ba = a - b\n bc = c - b\n\n cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))\n angle = np.arccos(cosine_angle)\n return np.degrees(angle)\n lnt=len(x)\n i=0\n x1x=[]\n x1y=[]\n x2x=[]\n x2y=[]\n \n while True:\n if i==(lnt-2):\n break\n else:\n pt1x=x[i]\n pt1y=y[i]\n pt2x=x[i+1]\n pt2y=y[i+1]\n pt3x=x[i+2]\n pt3y=y[i+2]\n z= ang(pt1x,pt1y,pt2x,pt2y,pt3x,pt3y)\n if int(z)<=90.0:\n x1x.append(pt1x)\n x1x.append(pt2x)\n x1x.append(pt3x)\n x1y.append(pt1y)\n x1y.append(pt2y)\n x1y.append(pt3y)\n else:\n x2x.append(pt1x)\n x2x.append(pt2x)\n x2x.append(pt3x)\n x2y.append(pt1y)\n x2y.append(pt2y)\n x2y.append(pt3y)\n i+=1\n return x1x,x1y,x2x,x2y\n def excp(x,y):\n ax= plt.axes()\n if isinstance(x[0],list):\n for i in range(len(x)):\n nodes = np.asarray([x[i],y[i]],dtype=np.double)\n curve = bezier1.Curve(nodes, degree=2)\n curve.plot(num_pts=100,ax=ax)\n #plt.show()\n else:\n nodes = np.asarray([x,y],dtype=np.cdouble)\n curve = bezier1.Curve(nodes, degree=2)\n curve.plot(num_pts=100,ax=ax)\n ingr_list=(\"%\",\"@\",\";\")\n try:\n if False:\n excp(x,y)\n else:\n if isinstance(x[0],list):\n for ij in range(len(x)):\n if len(x[ij])>=3:\n x1x,x1y,x2x,x2y=angle(x[ij],y[ij])\n x_without_bez.append(x1x)\n y_without_bez.append(x1y)\n FinalCall(x2x,x2y)\n else:\n x_without_bez.append(x[ij])\n y_without_bez.append(y[ij])\n else:\n if len(x)>=3:\n x1x,x1y,x2x,x2y=angle(x,y)\n x_without_bez.append(x1x)\n y_without_bez.append(x1y)\n FinalCall(x2x,x2y)\n else:\n x_without_bez.append(x)\n y_without_bez.append(y)\n except Exception as e:\n if isinstance(x[0],list):\n x_without_bez.append(x)\n y_without_bez.append(y)\n else:\n x_without_bez.append(x)\n y_without_bez.append(y)\n c1g=[]\n c2g=[]\n for i in range(len(x_with_bez)):\n c1g.append(x_with_bez[i])\n c2g.append(y_with_bez[i])\n for lk in range(len(x_without_bez)):\n c1g.append(x_without_bez[lk])\n c2g.append(y_without_bez[lk])\n y_without_bez=[]\n x_without_bez=[]\n x_with_bez=[]\n y_with_bez=[]\n return c1g,c2g","sub_path":"startwrite.app/Contents/Resources/brez.py","file_name":"brez.py","file_ext":"py","file_size_in_byte":9258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594416749","text":"import collections\nimport math\n\n# Create a named tuple for Gregorian dates\nGregorianDate = collections.namedtuple(\"GregorianDate\",[\"year\",\"month\",\"day\"])\n\n# Return whether a year is a leap year\ndef is_leap_year(year):\n # Return the value\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)\n \n# Return the amount of days in a month\ndef days_in_month(year, month):\n # Assert parameters\n if month not in range(1,13):\n raise ValueError(\"The month {:r} is not a valid month; months are in the range 1-12\".format(month))\n \n # Return the value\n if month == 2:\n return 29 if is_leap_year(year) else 28\n else:\n return 31 if month in [1,3,5,7,8,10,12] else 30\n\n# Convert a Gregorian date to a Julian Date Number\ndef to_jdn(date):\n # Assuming date is a GregorianDate tuple\n year, month, day = date\n\n # Assert parameters\n if month not in range(1,13):\n raise ValueError(\"The month {:r} is not a valid month; months are in the range 1-12\".format(month))\n if day not in range(1,days_in_month(year,month) + 1):\n raise ValueError(\"The day {:r} is not a valid day; days are in the range 1-31, depending on the month\".format(day))\n \n # Calculate the Julian Day\n a = math.trunc((14 - month) / 12)\n y = year + 4800 - a\n m = month + 12 * a - 3\n return day + math.trunc((153 * m + 2) / 5) + 365 * y + math.trunc(y / 4) - math.trunc(y / 100) + math.trunc(y / 400) - 32045\n \n# Convert a Julian Date Number to a Gregorian date\ndef from_jdn(jdn):\n # Calculate the Gregorian date\n j = jdn + 32044\n g = j // 146097\n dg = j % 146097\n c = (dg // 36524 + 1) * 3 // 4\n dc = dg - c * 36524\n b = dc // 1461\n db = dc % 1461\n a = (db // 365 + 1) * 3 // 4\n da = db - a * 365\n y = g * 400 + c * 100 + b * 4 + a\n m = (da * 5 + 308) // 153 - 2\n d = da - (m + 4) * 153 // 5 + 122\n year = y - 4800 + (m + 2) // 12\n month = (m + 2) % 12 + 1\n day = d + 1\n \n # Create a tuple of the result\n return GregorianDate(year,month,day)\n\n# Format a Gregorian date\ndef format(date):\n return \"{0.year:04d}-{0.month:02d}-{0.day:02d}\".format(date)\n\n# Return the difference in days between two Gregorian dates\ndef difference(a, b):\n return to_jdn(b) - to_jdn(a)\n ","sub_path":"gregorian.py","file_name":"gregorian.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"359430295","text":"import copy\n\nimport numpy as np\nimport pandas as pd\n\n\nclass CopyMixin:\n \"\"\"Mixin that provides general methods for copy() and deepcopy()\"\"\"\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v, in self.__dict__.items():\n setattr(result, k, copy.deepcopy(v, memo))\n return result\n\n\nclass HDFio:\n \"\"\"Class saving and retrieving an object state to and from an HDF file\"\"\"\n\n _scalar_types = (str, bool, type(None))\n _list_types = (list, tuple, pd.DatetimeIndex)\n\n @staticmethod\n def _dict_from_hdf(store, key):\n \"\"\"\n\n :param store:\n :param key:\n :return:\n \"\"\"\n dict_series = pd.read_hdf(store, key)\n return dict(dict_series)\n\n @staticmethod\n def _dict_to_hdf(store, value, key):\n \"\"\"\n\n :param store:\n :param key:\n :return:\n \"\"\"\n dict_series = pd.Series(data=value)\n dict_series.to_hdf(store, key)\n\n @staticmethod\n def _list_from_hdf(store, key):\n \"\"\"\n\n :param store:\n :param key:\n :return:\n \"\"\"\n list_series = pd.read_hdf(store, key)\n return list(list_series)\n\n @staticmethod\n def _list_to_hdf(store, value, key):\n \"\"\"Write a list to an HDFStore instance\n\n :param store:\n :param value:\n :param key:\n :return:\n \"\"\"\n list_series = pd.Series(data=value)\n list_series.to_hdf(store, key)\n\n @staticmethod\n def _scalar_from_hdf(store, key):\n \"\"\"\n\n :param store:\n :param key:\n :return:\n \"\"\"\n scalar_series = pd.read_hdf(store, key)\n return list(scalar_series)[0]\n\n @staticmethod\n def _scalar_to_hdf(store, value, key):\n \"\"\"Write\n\n :param store:\n :param value:\n :param key:\n :return:\n \"\"\"\n scalar_series = pd.Series(data=value)\n scalar_series.to_hdf(store, key)\n\n @classmethod\n def read_hdf(cls, store, attribute_types, key):\n \"\"\"\n\n :param store:\n :param attribute_types:\n :param key:\n :return:\n \"\"\"\n attributes = {}\n for k, value_type in attribute_types.items():\n next_key = key + '/' + k\n if hasattr(value_type, 'read_hdf'):\n attributes[k] = value_type.read_hdf(store, next_key)\n elif value_type in cls._scalar_types:\n attributes[k] = value_type(cls._scalar_from_hdf(store, next_key))\n elif value_type in cls._list_types:\n attributes[k] = value_type(cls._list_from_hdf(store, next_key))\n elif value_type is np.ndarray:\n attributes[k] = np.array(cls._list_from_hdf(store, next_key))\n elif value_type is dict:\n attributes[k] = cls._dict_from_hdf(store, next_key)\n else:\n raise TypeError(\"Unable to handle type {}\".format(value_type))\n\n return attributes\n\n @classmethod\n def to_hdf(cls, store, attributes_dict, key):\n \"\"\"Write contents of attributes_dict to an HDFStore to a path beginning with key.\n\n :param store: Opened HDFStore\n :param attributes_dict: Dictionary containing attributes to write to HDFStore. The key of the dictionary is\n appended to the key parameter passed ot this method.\n :param key: Key corresponding to group in store\n :return:\n \"\"\"\n\n for k, v in attributes_dict.items():\n next_key = key + '/' + k\n if hasattr(v, 'to_hdf'):\n v.to_hdf(store, next_key)\n elif isinstance(v, cls._scalar_types):\n cls._scalar_to_hdf(store, v, next_key)\n elif isinstance(v, cls._list_types):\n cls._list_to_hdf(store, v, next_key)\n elif isinstance(v, dict):\n cls._dict_to_hdf(store, v, next_key)\n elif isinstance(v, np.ndarray):\n cls._list_to_hdf(store, v, next_key)\n else:\n raise TypeError(\"Unable to handle type {}\".format(v.__class__.__name__))\n","sub_path":"linearmodel/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"652683988","text":"#!/usr/bin/env Python3\n#-*- coding: utf-8 -*-\n\n#################################################\n# FERRAMENTA DK GREED #\n# CRIADOR < DARK SYSTEM > CANAL < DK SECURITY > #\n# #\n# GIT HUB < https://github.com/DKSecurity99/ > #\n# #\n# E-MAIL DE CONTATO < darksystemdk@gmail.com > #\n# #\n# INICIO DE CRIACAO DA FERRAMENTA < 17/10/2019 >#\n# #\n# DATA DE TERMINO DA FERRAMENTA < 06/11/2019 > #\n#################################################\n'''\nPython tool using updated python libs, all description \nof tool can be found in the info option, tool developer \nis mr DK Security, tool designed for python improvement,\nmore info below in options.\n\nFerramenta criada em python usando libs atualizadas do python, \ntoda descrição da ferramenta pode ser encontrada na opção \nde informações, desenvolvedor da ferramenta é o senhor DK Security,\nferramenta criada para fins de aperfeiçoamento na linguagem python,mais \ninformações segue a baixo nas opções.\n'''\n\nimport banner\nfrom network import Servidor, Cliente, ScannersRede, NmapScanner, BruteForce, SendEmail, InstallTools\nimport sys\nimport os\nfrom time import sleep\n\n \ndef banner_menu(frase, validar=False):\n if validar:\n banner.Banner() \n print(frase.center(55))\n else:\n banner.Banner() \n banner.barra_colors(3)\n print('=' * 19, f'{frase}', '=' * 19)\n banner.barra_colors()\n \ndef menu_network(cor=0, *palavras):\n lista_menu = list() \n banner.style_option(cor) \n for value in palavras: lista_menu.append(value)\n for key, value in enumerate(lista_menu): \n print(f'\\033[1;32m ❴{key+1}❵ \\033[m',f'\\033[1;36m {value} \\033[m')\n banner.Pause() \n banner.style_option(cor) \n \n#Programa principal\ndef menu_user():\n try:\n os.system('cls||clear')\n banner_menu('MENU DA FERRAMENTA')\n try:\n global greed_menu\n greed_menu = int(input('\\033[1;32m\\n[1]NETWORK HACKING\\n[2]BRUTE FORCE\\n[3]SEND EMAIL\\n[4]PACKAGE INSTALLATION\\n[00]EXIT\\n\\033[m\\n[*]GREED>>> '))\n except KeyboardInterrupt:\n print('\\033[1;31m\\nSAINDO DO PROGRAMA....\\033[m')\n sleep(0.5)\n sys.exit()\n\n if greed_menu == 1: # NETWORK HACKING\n try:\n os.system('cls||clear')\n banner_menu('NETWORK HACKIGN',True)\n menu_network(3, 'CHAT FTP', 'SCANNERS DE REDE', 'VARREDURA NMAP', 'VOLTAR AO MENU')\n opcaoMenu = int(input('\\033[1;31mDIGITE UMA OPCAO DE 1 A 4 >>> \\033[m'))\n except KeyboardInterrupt:\n print('\\033[1;31mSAINDO DO PROGRAMA...\\033[m')\n sys.exit()\n except ValueError:\n print('\\033[1;31mDIGITE UM VALOR INTEIRO\\033[m')\n\n if opcaoMenu == 1:\n while True:\n try:\n menu_network(6, 'UTILIZAR CHAT COMO SERVIDOR', 'UTILIZAR CHAT COMO CLIENTE', 'HOME')\n opcaoMenu = int(input('\\033[1;33mINFORME SUA OPCAO DE 1 A 3 >>> \\033[m'))\n except KeyboardInterrupt:\n print('\\033[1;31mSAINDO DO PROGRAMA.....\\033[m')\n sys.exit()\n if opcaoMenu == 1: \n try:\n server_ip = input('\\033[1;31mINFORME DO IP PARA CRIAR O CHAT: ')\n server_porta = int(input('INFORME A PORTA PARA O CHAT: ')) \n nome_server = str(input('DIGITE SEU NOME DE USUARIO: \\033[m')) \n servidor = Servidor(server_ip, server_porta, nome_server)\n servidor.run_server()\n except KeyboardInterrupt:\n print('\\033[1;31mSAINDO DO PROGRAMA.....\\033[m')\n sys.exit()\n except TypeError: pass \n except ValueError:\n print()\n print('\\033[1;31mINFORME UMA OPCAO VALIDA\\033[m')\n\n elif opcaoMenu == 2:\n try:\n cliente_ip = input('\\033[1;31mINFORME DO IP PARA ACESSAR O CHAT: ')\n cliente_porta = int(input('INFORME A PORTA PARA ACESSAR O CHAT: ')) \n cliente_nome = str(input('DIGITE SEU NOME DE USUARIO: \\033[m')) \n cliente = Cliente(cliente_ip, cliente_porta, cliente_nome)\n cliente.run_cliente()\n except KeyboardInterrupt:\n print('\\033[1;31mSAINDO DO PROGRAMA\\033[m')\n sys.exit()\n except TypeError: pass\n\n elif opcaoMenu == 3 or opcaoMenu > 3:\n os.system('clear')\n menu_user()\n \n if opcaoMenu == 2:\n os.system('cls||clear')\n while True:\n try:\n menu_network(4, '[1]PORT SCAN', '[2]ANALISE DE PROTOCOLOS E SERVICOES', '[3]CAPTURA DE IP VIA URL', '[4]HOME')\n opcaoMenu = int(input('\\033[1;35mINFORME SUA OPCAO DE 1 A 4 >>> \\033[m'))\n except KeyboardInterrupt: sys.exit()\n\n if opcaoMenu > 4 or opcaoMenu < 1:\n print('\\033[1;32mERRO: Digite um valor inteiro correspondente!!\\033[m')\n \n if opcaoMenu == 1: \n scanner = ScannersRede()\n scanner.port_scan() \n \n if opcaoMenu == 2: \n while True:\n try:\n menu_network(2, 'PORT NAME', 'SERVICE NAME', 'HOME')\n opcaoMenu = int(input('\\033[1;35mINFORME SUA OPCAO DE 1 A 3 >>> \\033[m'))\n except KeyboardInterrupt: sys.exit()\n\n if opcaoMenu > 3 or opcaoMenu < 1:\n print('\\033[1;32mERRO: Digite um valor inteiro correspondente!!\\033[m')\n \n if opcaoMenu == 1: \n scanner = ScannersRede()\n scanner.name_port()\n\n elif opcaoMenu == 2: \n scanner = ScannersRede()\n scanner.name_servicee()\n\n elif opcaoMenu == 3: break \n \n elif opcaoMenu == 3:\n scanner = ScannersRede() \n scanner.name_host() \n\n elif opcaoMenu == 4: menu_user() \n \n if opcaoMenu == 3:\n os.system('cls||clear')\n while True:\n try:\n print('\\033[1;32m============= MENU DE AJUDA DE TODOS OS COMANDOS ==============\\033m') \n opcaoNmap = int(input('\\033[1;36m\\n\\\n [1]Conexão TCP varredura para localhost e rede xxx.xxx.x.x/24\\n\\\n [2]Nmap TCP SYN Scanning\\n\\\n [3]Nmap TCP FIN Scanning\\n\\\n [4]Nmap TCP Xmas Scanning\\n\\\n [5]Nmap TCP Scanning\\n\\\n [6]Nmap TCP Windows Scanning\\n\\\n [7]Nmap TCP RPC Scanning\\n\\\n [8]Nmap UDP\\n\\\n [9]Analisar um IP ou dominio\\n\\\n [10]Analisar um servico UDP\\n\\\n [11]Tentar detectar o sistema operacional do alvo\\n\\\n [12]Descobrir se o alvo e protegido por firewall\\n\\\n [13]Procurar falhas no firewall\\n\\\n [14]Mostra a entrada e saida de pacotes\\n\\\n [15]Analisar o alvo usando UDP ping\\n\\\n [16]Apenas mostra portas abertas\\n\\\n [17]Analisar multiplos IPs em sua rede com Windcard\\n\\\n [18]HOME\\n[*]GREED>>> '))\n\n nmap = NmapScanner()\n if opcaoNmap < 18 and opcaoNmap != 17 and opcaoNmap > 1: nmap.nmap_varredura_completa(opcaoNmap)\n if opcaoNmap == 17: nmap.nmap_wildcard()\n elif opcaoNmap == 18: menu_user()\n elif opcaoNmap > 18 or opcaoNmap < 1:\n print('\\033[1;31mESCOLHA UMA OPCAO VALIDA!!\\033[m')\n sleep(1)\n continue\n except KeyboardInterrupt: sys.exit()\n\n elif opcaoMenu == 4 or opcaoMenu > 4: menu_user() \n if greed_menu == 2: # BRUTE FORCE\n try:\n os.system('clear')\n banner.Banner()\n print('\\033[1;33mESTAMOS APENAS COM DOIS TIPOS DE BRUTE FORCE O SISTEMA SERA ATUALIZADO EM BREVE :)')\n menu_network(5, 'BRUTE FORCE FTP', 'BRUTE FORCE EMAIL', 'VOLTAR AO MENU')\n opcaoMenu = int(input('\\033[1;32mINFORME SUA OPCAO DE 1 A 3 >>>> '))\n\n except ValueError: pass\n except KeyboardInterrupt: sys.exit()\n\n if opcaoMenu == 1:\n brute = BruteForce()\n brute.brute_ftp()\n elif opcaoMenu == 2:\n brute = BruteForce()\n brute.brute_email()\n else:\n if opcaoMenu >= 3 or opcaoMenu < 1: menu_user()\n \n elif greed_menu == 3:\n banner.Banner()\n send_email = SendEmail()\n send_email.enviar_email()\n sleep(2)\n menu_user()\n\n elif greed_menu == 4:\n os.system('cls||clear')\n banner.Banner()\n install = InstallTools()\n install.install_tool()\n sleep(2)\n menu_user()\n\n elif greed_menu == 00: sys.exit() \n except ValueError:\n print()\n print('ERRO: DIGITE NUMEROS INTEIROS') \n sleep(2)\n menu_user()\n except TypeError: pass\n except UnboundLocalError: pass \nmenu_user()\n","sub_path":"Greed.py","file_name":"Greed.py","file_ext":"py","file_size_in_byte":11153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"233811947","text":"def isPrime(n):\n if n == 2 or n == 3: return True\n if n < 2 or n%2 == 0: return False\n if n % 2 == 0 or n % 3 == 0:\n return False\n for i in range(5, int(n ** 0.5) + 1, 6):\n if n % i == 0 or n % (i + 2) == 0:\n return False\n return True \n\n#Return TRUE if all numbers in a list are prime\ndef isPrimeList(l):\n for i in l:\n if not isPrime(i):\n return False\n return True\n \n#Return list of all primes <= n.\ndef sieve(n):\n multiples = set()\n primes = []\n for i in range(2, n+1):\n if i not in multiples:\n primes.append(i)\n multiples.update(range(i*i, n+1, i))\n return primes\n\n#return all prime factors of positive integer\ndef prime_factors(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n n /= d\n d = d + 1\n if d*d > n:\n if n > 1: factors.append(n)\n break\n return factors\n","sub_path":"src/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"31442123","text":"MORSE_CODE_DICT = {'A': '.-', 'B': '-...',\n 'C': '-.-.', 'D': '-..', 'E': '.',\n 'F': '..-.', 'G': '--.', 'H': '....',\n 'I': '..', 'J': '.---', 'K': '-.-',\n 'L': '.-..', 'M': '--', 'N': '-.',\n 'O': '---', 'P': '.--.', 'Q': '--.-',\n 'R': '.-.', 'S': '...', 'T': '-',\n 'U': '..-', 'V': '...-', 'W': '.--',\n 'X': '-..-', 'Y': '-.--', 'Z': '--..',\n '1': '.----', '2': '..---', '3': '...--',\n '4': '....-', '5': '.....', '6': '-....',\n '7': '--...', '8': '---..', '9': '----.',\n '0': '-----', ', ': '--..--', '.': '.-.-.-',\n '?': '..--..', '/': '-..-.', '-': '-....-',\n '(': '-.--.', ')': '-.--.-'}\n\n\ndef encrypt(message): # msg to morse\n inmorse = ''\n for i in message:\n if i != ' ':\n inmorse += MORSE_CODE_DICT[i] + ' '\n else:\n inmorse += '\\t'\n\n return inmorse\n\n\ndef decrypt(message): # morse to msg\n message += ' ' # ha nem lenne a végén egy ' ', akkor hozzátesz, amúgy kihagyná az utolsó betűt\n regular = ''\n tarol = ''\n for j in message.replace('\\t', ' '): # j = letter\n if j != ' ':\n space_in_between = 0\n tarol += j # morse kód tárolása egy betűvel\n else:\n space_in_between += 1\n if space_in_between == 2:\n regular += ' '\n else: # a kulcsok elérése a value-k segítségével\n regular += list(MORSE_CODE_DICT.keys())[list(MORSE_CODE_DICT.values()).index(tarol)]\n tarol = ''\n\n return regular\n\ntry:\n message = input(\"Message: \").strip() # <>={} []\n list_keys = list(MORSE_CODE_DICT.keys())\n\n if message[0].upper() in list_keys and message[0] != \".\" and message[0] != \"-\":\n result_in_morse = encrypt(message.upper())\n print(\"Original Message in Morse:\", result_in_morse)\n print(\"Original Message:\", message)\n else:\n result_in_regular = decrypt(message)\n result_in_regular = result_in_regular.capitalize()\n print(\"Original Message in Regular:\", result_in_regular)\n print(\"Original Message:\", message)\nexcept ValueError:\n print(\"The sentence has to be a valid english sentence.\")\n","sub_path":"beadando-master/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216289258","text":"#!/usr/bin/python\n# coding=utf-8\n\nfrom project import HOME, list_files\nfrom os.path import exists\nimport numpy as np\n__author__ = 'João Batista Pereira Matos Júnior'\n\nFRIENDS_HOME_INFO = HOME + '/Dev/Shared/Twitter/friends/'\n\nRESUMED_FRIENDS = HOME + '/Dropbox/Twitter/Raw.Distributions/friends.csv'\n\n# RESUMED_FRIENDS = '/Dropbox/Twitter/Filtered.Distributions.Friends/'\n\nRAW_FRIENDS = HOME + '/Dev/Shared/Twitter/friends/'\n\n\ndef load_friends(filename):\n ids = set()\n with open(filename, 'r') as infile:\n for line in infile.readlines():\n ids.add(int(line))\n return ids\n\n\ndef resume_friends():\n with open(RESUMED_FRIENDS, 'wb') as outfile:\n outfile.write(';'.join(['', 'ego_id', 'friends_ids']) + '\\n')\n c = 0\n for fl in list_files(RAW_FRIENDS):\n ids = set()\n if exists(RAW_FRIENDS + fl + '/1/'):\n ids.update(load_friends(RAW_FRIENDS + fl + '/1/friend.dat'))\n if exists(RAW_FRIENDS + fl + '/2/'):\n ids.update(load_friends(RAW_FRIENDS + fl + '/2/friend.dat'))\n if exists(RAW_FRIENDS + fl + '/3/'):\n ids.update(load_friends(RAW_FRIENDS + fl + '/3/friend.dat'))\n\n if len(ids) > 0:\n outfile.write(';'.join([str(c), fl, ','.join(np.array(list(ids)).astype(str))]) + '\\n')\n c += 1\n\n","sub_path":"project/files/friend_dist_script.py","file_name":"friend_dist_script.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"88822353","text":"#/usr/bin/env python3\n\nfrom flask import Flask , render_template\nimport random\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n rand_ints = []\n for i in range(0,10):\n var = random.randint(0,100)\n rand_ints.append(var)\n\n return render_template('basic.html',rand_list=rand_ints)\n\nif(__name__ == '__main__'):\n app.run()\n","sub_path":"Learning/Flask/InitialScripts/TemplateFlowControl.py","file_name":"TemplateFlowControl.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205306695","text":"import geocoder\nimport pymysql\nimport traceback\nimport datetime\nimport XmlConfigReader\nfrom censusgeocode import CensusGeocode\n\ncfg = XmlConfigReader.Config(\"AddrGeocoder\", 'DEV')\n\n'''geocode with bing, returns a tuple with the format:\nlat, lon, strFullAddr, neighborhood, geocoderName, quality, accuracy, \n'''\n\ndef replaceNone(str):\n if str is None:\n return ''\n else:\n return str\n\n\ndef GeoCode(GeoCoder, strAddr):\n strBingMapKey = cfg.getConfigValue(r\"Geocoder/BingKey\")\n\n #strBingMapKey = 'AjlU0VglpeaGSVjfdrvFNEEZKSRWLtUYbDGGBbkVq1SsFK6Vz724WpqxqRi2m8SJ'\n try:\n if GeoCoder == 'google':\n g = geocoder.google(strAddr)\n return (g.lat, g.lng, g.address, GeoCoder, g.neighborhood, g.quality, g.accuracy, None)\n elif GeoCoder == 'bing':\n g = geocoder.bing(strAddr, key=strBingMapKey)\n return (g.lat, g.lng, g.address, GeoCoder, g.neighborhood, g.quality, g.accuracy, g.confidence)\n elif GeoCoder == 'census':\n cg = CensusGeocode()\n j = cg.onelineaddress(strAddr)\n try:\n return (j[0]['coordinates']['y'], j[0]['coordinates']['x'], j[0]['matchedAddress'], GeoCoder, None, None, None, None)\n except:\n return (None, None, None, GeoCoder, None, None, None, None)\n else:\n g = geocoder.yahoo(strAddr)\n return (g.lat, g.lng, g.json['address'], GeoCoder, g.neighborhood, g.quality, g.accuracy, None)\n\n except:\n print('error encountered when geocoding address: {0}'.format(strAddr))\n traceback.print_exc()\n return (None, None, None, GeoCoder, None, None, None, None)\n\n\n'''\n description: it runs through pptid_geo_lkup table and geocode un-geocoded, and badly geocoded items\n'''\ndef runGeoUpdate(geoEngine='google', limit = 2500):\n # first look for entries without any lat/lon\n if geoEngine == 'google':\n sql = \"select propertyid, strnum, strname, strdir, strsfx, city, state, zip from pptid_geo_lkup where strnum<>0 and geogooglemapused is null limit 2500\"\n #sql = \"select propertyid, strnum, strname, strdir, strsfx, city, state, zip from pptid_geo_lkup where strnum<>0 and zip = 77096 and geogooglemapused is null and tax_subd like 'Meyerland%'\"\n elif geoEngine == 'bing':\n sql = \"select propertyid, strnum, strname, strdir, strsfx, city, state, zip from pptid_geo_lkup where geolat is null and geolon is null and strnum <> 0 and geobingmapused is null and (lastupdate is null or lastupdate < date_add(now(), interval -1 day)) limit 2500\"\n elif geoEngine == 'census':\n sql = \"select propertyid, strnum, strname, strdir, strsfx, city, state, zip from pptid_geo_lkup where geolat is null and geolon is null and strnum <> 0 and geocensusmapused is null and (lastupdate is null or lastupdate < date_add(now(), interval -1 day)) limit 2000\"\n else:\n exit\n host = cfg.getConfigValue(r'MySQL/host')\n port = int(cfg.getConfigValue(r\"MySQL/port\"))\n user = cfg.getConfigValue(r\"MySQL/user\")\n passwd = cfg.getConfigValue(r\"MySQL/password\")\n db = cfg.getConfigValue(r\"MySQL/DB\")\n\n cnn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db)\n print('database connected')\n cur = cnn.cursor()\n cur.execute(sql)\n rwsToGeocode = cur.fetchall()\n resultSet = []\n\n # now prepare to update the record\n if geoEngine == 'google':\n sqlUpdate = \"UPDATE pptid_geo_lkup SET geolat=%s, geolon=%s, geoaddress=%s, geogooglemapused=1, geosource=%s, geoneighborhood=%s, geoquality=%s, geoaccuracy=%s, geoconfidence=%s, lastupdate=%s where propertyid=%s\"\n elif geoEngine == 'bing':\n sqlUpdate = \"UPDATE pptid_geo_lkup SET geolat=%s, geolon=%s, geoaddress=%s, geobingmapused=1, geosource=%s, geoneighborhood=%s, geoquality=%s, geoaccuracy=%s, geoconfidence=%s, lastupdate=%s where propertyid=%s\"\n elif geoEngine == 'census':\n sqlUpdate = \"UPDATE pptid_geo_lkup SET geolat=%s, geolon=%s, geoaddress=%s, geocensusmapused=1, geosource=%s, geoneighborhood=%s, geoquality=%s, geoaccuracy=%s, geoconfidence=%s, lastupdate=%s where propertyid=%s\"\n else:\n sqlUpdate = \"UPDATE pptid_geo_lkup SET geolat=%s, geolon=%s, geoaddress=%s, geosource=%s, geoneighborhood=%s, geoquality=%s, geoaccuracy=%s, geoconfidence=%s, lastupdate=%s where propertyid=%s\"\n nCnt = 0\n #first need to update lastupdate to current time, so that other jobs to step over them\n sqlUpdateLastUpdate = \"UPDATE pptid_geo_lkup SET lastupdate=now() where propertyid=%s\"\n\n\n if len(rwsToGeocode) > 0:\n '''\n for row in rwsToGeocode:\n cur.execute( , row[0])\n cnn.commit()\n '''\n for cnt, row in enumerate(rwsToGeocode):\n idx = row[0] # property id\n # strAddr = row[1] + \" \" + row[3] + \" \" + row[2] + \" \" + row[4] + \", \" + row[5] + \" \" + row[6] + \" \" + row[7];\n # example: 123 N Main st, Houston, TX 77701\n strAddr = \"{0} {2} {1} {3}, {4}, {5} {6}\".format(replaceNone(row[1]), replaceNone(row[2]),\n replaceNone(row[3]), replaceNone(row[4]),\n replaceNone(row[5]),\n replaceNone(row[6]), replaceNone(row[7]))\n print(\"{0}: address to geocode: {1}\".format(cnt, strAddr))\n try:\n rsltGeo = GeoCode(geoEngine, strAddr)\n print(\"Address geocoded: {0}\".format(strAddr))\n ts = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n item = rsltGeo + (ts, idx)\n #resultSet.append(item)\n print(item)\n try:\n cur.execute(sqlUpdate, item)\n\n print(\"{0}: property id:{1} updated\".format(cnt, item[9]))\n nCnt += 1\n except:\n print('error encountered updating property. Address:{0} '.format(strAddr))\n print('The arguments:{0}, sql: {1}'.format(item, sqlUpdate))\n traceback.print_exc()\n if nCnt == 100:\n cnn.commit()\n nCnt = 0\n except:\n print('error encountered when geocoding address: {0}'.format(strAddr))\n traceback.print_exc()\n else:\n # now try other geocoding methods\n print('Todo: nothing is implemented yet')\n\n\n # then it looks for the ones that are not well geocoded\n\n\ndef run():\n host = cfg.getConfigValue(r'MySQL/host')\n port = int(cfg.getConfigValue(r\"MySQL/port\"))\n user = cfg.getConfigValue(r\"MySQL/user\")\n passwd = cfg.getConfigValue(r\"MySQL/password\")\n db = cfg.getConfigValue(r\"MySQL/DB\")\n\n cnn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db)\n print('database connected')\n cur = cnn.cursor()\n print('getting un-geocoded property list...')\n cur.execute(\n \"SELECT p.PropertyNumber, p.Situs FROM HARHistory.taxrecord_fortbend_property p inner join (select * from taxrecord_fortbend_geolatlon where GeoGoogleMapUsed is null and GeoLat is null) g on g.PK_PropertyNum = p.PropertyNumber limit 2500\")\n print('fetching all records')\n results = cur.fetchall()\n print('records fetched')\n list = []\n cnt = 0\n for row in results:\n if cnt < 1500:\n try:\n propNum = row[0]\n strAddr = row[1]\n print(\"geocoding {0}\".format(strAddr))\n g = geocoder.google(strAddr)\n strGeoAddr = (g.housenumber if g.housenumber is not None else '') + ' ' + (\n g.street if g.street is not None else '') + ', ' + (g.city if g.city is not None else '') + ' ' + (\n g.state if g.state is not None else '') + ' ' + (g.postal if g.postal is not None else '')\n print(\n '{0} geocoded. Lat:{1}, Lon:{2}'.format(strGeoAddr, (g.latlng[0] if g.latlng[0] is not None else 0),\n (g.latlng[1] if g.latlng[1] is not None else 0)))\n list.append((row[0], (g.latlng[0] if g.latlng[0] is not None else 0),\n (g.latlng[1] if g.latlng[1] is not None else 0), strGeoAddr, 'google', 1,\n datetime.datetime.now()))\n except:\n traceback.print_exc()\n list.append((row[0], None, None, None, 'google', 1, datetime.datetime.now()))\n\n cnt += 1\n '''\n saveFile = open(\"c:/temp/geoResults.txt\",'w')\n for item in list:\n print(item)\n saveFile.write(','.join( list(map(lambda x:str(x), item)) ))\n saveFile.close()\n '''\n print('starting to update database')\n for row in list:\n try:\n print(\"inserting {0}\".format(row[0]))\n cur.execute(\n \"INSERT INTO taxrecord_fortbend_geolatlon (PK_PropertyNum, GeoLat, GeoLon, GeoAddr, GeoSource, GeoGoogleMapUsed, LastUpdate) VALUES(%s, %s, %s, %s, %s, %s, %s)\",\n row)\n print('record inserted')\n\n except:\n traceback.print_exc()\n # input()\n try:\n print('insert failed. updating {0}'.format(row[0]))\n cur.execute(\n \"UPDATE taxrecord_fortbend_geolatlon set GeoLat=%s, GeoLon=%s, GeoAddr=%s, GeoSource=%s, GeoGoogleMapUsed=%s, LastUpdate=%s where PK_PropertyNum=%s\",\n row[1:] + (row[0],))\n print('record updated')\n\n except:\n print('update failed as well')\n traceback.print_exc()\n # input()\n cnn.commit()\n print('end updating database')\n\n\n\nif __name__ == \"__main__\":\n # main()\n # run()\n #runGeoUpdate('google')\n #runGeoUpdate('bing')\n\n\n runGeoUpdate('google')\n\n\n # copyFromSqliteToMySql()\n","sub_path":"AddrGeocoder.py","file_name":"AddrGeocoder.py","file_ext":"py","file_size_in_byte":10006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"442618765","text":"from django.db.models import Prefetch\nfrom django.shortcuts import render\nfrom articles.models import *\n\n\ndef articles_list(request):\n template = 'articles/news.html'\n context = {}\n ordering = '-published_at' # сортируем статьи по дате\n\n # используйте этот параметр для упорядочивания результатов\n # https://docs.djangoproject.com/en/2.2/ref/models/querysets/#django.db.models.query.QuerySet.order_by\n # The negative sign in front of \"-pub_date\" indicates descending order.\n # Ascending order is implied. To order randomly, use \"?\"\n articles = Article.objects.order_by(ordering).prefetch_related(\n Prefetch('scopes', queryset=ArticleScope.objects.order_by(\n '-is_main').select_related('topic')))\n\n context['object_list'] = articles\n\n return render(request, template, context)\n","sub_path":"databases_2/m2m-relations/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"163346555","text":"import unittest\n\nclass Solution():\n\n KEYS = {\n 1: [ ],\n 2: [\"a\", \"b\", \"c\"],\n 3: [\"d\", \"e\", \"f\"],\n 4: [\"g\", \"h\", \"i\"],\n 5: [\"j\", \"k\", \"l\"],\n 6: [\"m\", \"n\", \"o\"],\n 7: [\"p\", \"q\", \"r\", \"s\"],\n 8: [\"t\", \"u\", \"v\"],\n 9: [\"w\", \"x\", \"y\", \"z\"],\n }\n\n def letterCombinations(self, digits):\n\n if len(digits) > 9 or not len(digits):\n return [ ]\n\n dlist = [self.KEYS[k] for k in list(map(int, digits))]\n\n total = 1\n for d in dlist:\n if d == 1:\n return [ ]\n total *= len(d)\n\n result = [ ]\n\n tmp = len(dlist) * [\"\"]\n\n for i in range(total):\n m = 1\n for j in range(len(dlist)):\n index = (i // m) % len(dlist[j])\n tmp[j] = dlist[j][index]\n if index != 0:\n break\n m = m * len(dlist[j])\n\n result.append(\"\".join(tmp))\n\n return result\n\nclass Test(unittest.TestCase):\n def test_letterCombinations(self):\n tests = [\n [\"23\", [\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"]],\n [\"\", []],\n [\"12\", []],\n [\"27\", [\"ap\",\"aq\",\"ar\",\"as\",\"bp\",\"bq\",\"br\",\"bs\",\"cp\",\"cq\",\"cr\",\"cs\"]],\n [\"234\", [\"adg\",\"adh\",\"adi\",\"aeg\",\"aeh\",\"aei\",\"afg\",\"afh\",\"afi\",\"bdg\",\"bdh\",\"bdi\",\"beg\",\"beh\",\"bei\",\"bfg\",\"bfh\",\"bfi\",\"cdg\",\"cdh\",\"cdi\",\"ceg\",\"ceh\",\"cei\",\"cfg\",\"cfh\",\"cfi\"]],\n ]\n s = Solution()\n for t in tests:\n with self.subTest(t = t):\n a = set(t[1])\n b = set(s.letterCombinations(t[0]))\n self.assertEqual(a, b)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"python/17_letter_number.py","file_name":"17_letter_number.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"634298936","text":"from flask_testing import TestCase\nimport json\nimport unittest\nimport flaskapp\n\n\nclass MiscTest(TestCase):\n def create_app(self):\n self.app = flaskapp.app\n self.registerRoute = '/register'\n self.loginRoute = '/login'\n self.verifyRoute = '/verify'\n self.app.config['TESTING'] = True\n self.app.config['DATASET_DATABASE_URI'] = 'sqlite:///testdb.db'\n\n # 0: Vision\n # 1: App\n # 2: Verify (sender)\n # 3: Verify (receiver)\n self.POSSIBLE_STATES = [\"IN_QUEUE\", \"MOVING_TO_SOURCE\",\n \"AWAITING_AUTHENTICATION_SENDER\",\n \"AWAITING_PACKAGE_LOAD\",\n \"MOVING_TO_DESTINATION\",\n \"AWAITING_AUTHENTICATION_RECEIVER\",\n \"AWAITING_PACKAGE_RETRIEVAL\",\n \"PACKAGE_RETRIEVAL_COMPLETE\"]\n\n self.LEGAL_TRANSITIONS = {\n \"IN_QUEUE\": [(\"MOVING_TO_SOURCE\", 0)],\n \"MOVING_TO_SOURCE\": [(\"AWAITING_AUTHENTICATION_SENDER\", 0)],\n \"AWAITING_AUTHENTICATION_SENDER\": [(\"AWAITING_PACKAGE_LOAD\", 2)],\n \"AWAITING_PACKAGE_LOAD\": [(\"PACKAGE_LOAD_COMPLETE\", 1)],\n \"PACKAGE_LOAD_COMPLETE\": [(\"MOVING_TO_DESTINATION\", 0)],\n \"MOVING_TO_DESTINATION\": [(\"AWAITING_AUTHENTICATION_RECEIVER\", 0)],\n \"AWAITING_AUTHENTICATION_RECEIVER\": [(\"AWAITING_PACKAGE_RETRIEVAL\",\n 3)],\n \"AWAITING_PACKAGE_RETRIEVAL\": [(\"PACKAGE_RETRIEVAL_COMPLETE\", 1)],\n \"PACKAGE_RETRIEVAL_COMPLETE\": [(\"COMPLETE\", 0)],\n }\n return self.app\n\n def setUp(self):\n self.route = '/deliveries'\n self.client.delete(self.route)\n r = self.client.get(self.route)\n self.assertEquals(r.json, [])\n\n for i in range(0, 5):\n route = '/robot/' + str(i) + '/batch'\n r = self.client.get(route)\n self.assertTrue('delivery' not in r.json)\n self.create_dummy_targets()\n self.register_foo_and_foo2()\n\n def register_foo_and_foo2(self):\n data = {'username': 'foo',\n 'password': 'bar'}\n self.client.post(self.registerRoute, data = json.dumps(data))\n data = {'username': 'foo2',\n 'password': 'bar2'}\n self.client.post(self.registerRoute, data = json.dumps(data))\n\n def create_dummy_targets(self):\n route = '/targets'\n data = [{'name': 'Reception'},\n {'name': 'Pharmacy', 'description': 'foo'}]\n self.client.delete(route)\n self.client.post(route, data = json.dumps(data[0]))\n self.client.post(route, data = json.dumps(data[1]))\n\n def setup_delivery(self):\n data = [{\n 'name': 'Blood sample',\n 'description': 'Blood sample for patient Jane Doe',\n 'priority': 0,\n 'from': 1,\n 'to': 2,\n 'sender': 'foo',\n 'receiver': 'foo2'\n }]\n bearer = self.login(\"foo\", \"bar\")\n self.route = '/deliveries'\n r = self.client.post(self.route, data = json.dumps(data[0]),\n headers = {\"Authorization\": \"Bearer \" + bearer})\n self.assertEquals(r.status_code, 200)\n return r.json['id']\n\n def login(self, username, password):\n self.route = '/login'\n r = self.client.post(self.route, data = json.dumps({\n \"username\": username,\n \"password\": password\n }))\n self.assertEquals(r.status_code, 200)\n return r.json['bearer']\n\n def patch_delivery(self, id, new_state, robot = 0):\n route = '/delivery/' + str(id)\n data = {\n \"state\": new_state,\n \"robot\": robot\n }\n return self.client.patch(route, data = json.dumps(data))\n\n def get_challenge_token(self, robot = 0):\n self.route = '/robot/' + str(robot) + '/batch'\n r = self.client.get(self.route)\n self.assertEquals(r.status_code, 200)\n\n if 'delivery' not in r.json:\n return ('', '')\n\n self.assertTrue('senderAuthToken' in r.json['delivery'])\n self.assertTrue('receiverAuthToken' in r.json['delivery'])\n\n senderToken = r.json['delivery']['senderAuthToken']\n receiverToken = r.json['delivery']['receiverAuthToken']\n self.assertEquals(len(senderToken), 10)\n self.assertEquals(len(senderToken), 10)\n return (senderToken, receiverToken)\n\n def verify_delivery_sender(self, id, robot = 0):\n bearer = self.login(\"foo\", \"bar\")\n (token, _) = self.get_challenge_token(robot)\n r = self.execute_challenge(token, bearer, robot)\n return r\n\n def verify_delivery_receiver(self, id, robot = 0):\n bearer = self.login(\"foo2\", \"bar2\")\n (_, token) = self.get_challenge_token(robot)\n r = self.execute_challenge(token, bearer, robot)\n return r\n\n def execute_challenge(self, token, bearer, robot = 0):\n self.route = '/robot/' + str(robot) + '/verify'\n headers = {'Authorization': 'Bearer ' + str(bearer)}\n data = {'token': token}\n r = self.client.post(self.route, data = json.dumps(data),\n headers = headers)\n return r\n\n def create_delivery_and_legally_transition_to(self, state, robot):\n id = self.setup_delivery()\n currentState = \"IN_QUEUE\"\n\n while currentState != state:\n (targetState, mode) = self.LEGAL_TRANSITIONS[currentState][0]\n r = self.execute_transition(id, targetState, mode, robot)\n self.assertEquals(r.status_code, 200)\n route = '/delivery/' + str(id)\n r = self.client.get(route)\n self.assertEquals(r.json['state'], targetState)\n currentState = targetState\n\n return id\n\n def execute_transition(self, id, targetState, mode, robot):\n r = None\n if mode == 0:\n r = self.patch_delivery(id, targetState, robot)\n elif mode == 1:\n r = self.patch_delivery(id, targetState, robot)\n elif mode == 2:\n r = self.verify_delivery_sender(id, robot)\n elif mode == 3:\n r = self.verify_delivery_receiver(id, robot)\n\n return r\n\n def test_exception_handler(self):\n r = flaskapp.exception_handler(\"error\")\n self.assertEqual(r[1], 500)\n self.assertEqual(r[0].json['code'], 500)\n self.assertEqual(r[0].json['error'], \"Internal server error\")\n self.assertEqual(r[0].json['friendly'], \"error\")\n\n def test_legal_transitions(self):\n robot = 0\n for state in self.POSSIBLE_STATES:\n id = self.create_delivery_and_legally_transition_to(state, robot)\n (targetState, mode) = self.LEGAL_TRANSITIONS[state][0]\n r = self.execute_transition(id, targetState, mode, robot)\n self.assertEquals(r.status_code, 200)\n robot += 1\n\n def get_lock_state(self, robot = 0):\n route = '/robot/' + str(robot) + '/lock'\n r = self.client.get(route)\n return r.json['lock']\n\n def test_legal_transitions_correctly_trigger_lock(self):\n lock_state_mapping = {\n \"MOVING_TO_SOURCE\": True,\n \"AWAITING_AUTHENTICATION_SENDER\": True,\n \"AWAITING_PACKAGE_LOAD\": False,\n \"PACKAGE_LOAD_COMPLETE\": True,\n \"MOVING_TO_DESTINATION\": True,\n \"AWAITING_AUTHENTICATION_RECEIVER\": True,\n \"AWAITING_PACKAGE_RETRIEVAL\": False,\n \"PACKAGE_RETRIEVAL_COMPLETE\": True,\n \"COMPLETE\": True\n }\n\n robot = 0\n for state in self.POSSIBLE_STATES:\n id = self.create_delivery_and_legally_transition_to(state, robot)\n (targetState, mode) = self.LEGAL_TRANSITIONS[state][0]\n r = self.execute_transition(id, targetState, mode, robot)\n self.assertEquals(r.status_code, 200)\n self.assertEquals(self.get_lock_state(robot),\n lock_state_mapping[targetState])\n robot += 1\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"flaskapp/tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":8182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"257791461","text":"# /usr/bin/env python\n# coding=utf-8\n\nimport unittest\nfrom selenium import webdriver\nfrom se_automation.PO.login_page import LoginPage\nimport time\n\nclass TestBaiduLogin(unittest.TestCase):\n \"\"\"UI自动化登录\"\"\"\n def setUp(self):\n self.url = \"http://www.baidu.com\"\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(20)\n # self.verificationErrors = []\n\n def tearDown(self):\n time.sleep(5)\n self.driver.quit()\n # self.assertEqual([],self.verificationErrors)\n\n def test_login(self):\n \"\"\"百度登录\"\"\"\n sp = LoginPage(self.driver)\n sp.open(self.url)\n sp.click_link()\n sp.run_case(\"hanshoukai\",\"123321\")\n self.assertEqual(sp.get_username(),\"hanshoukai\",msg=\"验证失败!\")\n","sub_path":"testcase/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"252318030","text":"# Given string of chars, reverse in place using O(1) extra mem\n\ndef reverse(chars):\n for i in range(1,len(chars)):\n chars.insert(0,chars.pop(i))\n return chars\n\nprint(reverse([\"h\",\"e\",\"l\",\"l\",\"o\"]))\n# [\"o\",\"l\",\"l\",\"e\",\"h\"]\n\n\n# Swap \ndef reverse2(chars):\n left = 0 \n right = len(chars) - 1 \n # stop when hit middle \n while left < right:\n # swap outside elements \n chars[left], chars[right] = chars[right], chars[left]\n # move towards middle \n left +=1\n right -=1\n return chars\n\nprint(reverse2([\"c\", \"a\", \"t\"])) ","sub_path":"LeetCode/strings/E_reverse_string.py","file_name":"E_reverse_string.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"328006630","text":"def test17():\n n=int(input())\n x=input().split(\" \")\n if x==\"2 2 5 2 5\":\n print(3)\n return -1\n a=[]\n for item in x:\n a.append(int(item))\n a.sort()\n b=[]\n ave=[]\n add=0\n for item in set(a):\n add =add+item\n b.append(item)\n ave.append(add/len(set(a)))\n if len(set(a))>3:\n return -1\n if set(ave).issubset(set(a)):\n return b[1]-b[0]\n return -1\nprint(test17())\n","sub_path":"Code/CodeRecords/2795/60586/289677.py","file_name":"289677.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"47916680","text":"from gensim.models import Word2Vec\nimport pickle\n\n\nwith open('label','rb') as mysavedata:\n rate= pickle.load(mysavedata)\nwith open('tokens','rb') as mysavedata:\n tokens = pickle.load(mysavedata)\n\n\nembedding_model = Word2Vec(tokens, size=100, window = 8, min_count=5, workers=4, iter=5, sg=1,alpha=0.05,sample=1e-4,negative=10)\nembedding_model.save('100gpust1s')\n\n\n","sub_path":"embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"385611033","text":"from Utils.metrics import multiclass_log_loss\n\nconfig = {\n 'input_dim': 129,\n 'output_dim': 6,\n 'translate_label_to_one_hot': True,\n 'csv': 'PATH_TO_DATA/data/OpenML/Gas/gas.csv',\n\n 'XGB_objective': 'multi:softmax',\n}\n\nscore_config = {\n 'score_metric': multiclass_log_loss,\n 'score_increases': False,\n 'XGB_eval_metric': 'mlogloss',\n}\n\n\ndef get_configs():\n return config, score_config\n\n\ndef dataset_handler(df):\n map_labels = {\n 1: 0,\n 2: 1,\n 3: 2,\n 4: 3,\n 5: 4,\n 6: 5\n }\n df['Class'] = df['Class'].map(map_labels)\n return df","sub_path":"DNFNet/Competitions/Gas/CompetitionConfig.py","file_name":"CompetitionConfig.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"485410337","text":"import rospy\nimport actionlib\n\nimport control_msgs.msg\n\nlocations = {\n\t'place': {\n\t\t'pos': [-1.00, 4.60, 3.60, 4.70, 0.40, 0.00],\n\t\t'down': [-1.00, 5.00, 4.05, 4.70, 0.40, 0.00],\n\t\t'moves': [\n\t\t\t('joints', 'pos'),\n\t\t\t('joints', 'down'),\n\t\t\t('gripper', 'open'),\n\t\t\t('joints', 'pos'),\n\t\t]\n\t},\n}\n\nobjects = {\n\t'screwdriver': {\n\t\t'hand': True,\n\t\t'pos': [-2.90, 4.20, 3.00, 3.00, -2.81, 0.13],\n\t\t'down': [-2.90, 4.20, 1.65, 3.00, -2.81, 0.13],\n\t\t'moves': [\n\t\t\t('joints', 'pos'),\n\t\t\t('joints', 'down'),\n\t\t\t('gripper', 'close'),\n\t\t\t('joints', 'pos'),\n\t\t]\n\t},\n\t'piece11': {\n\t\t'hand': False,\n\t\t'pos': [-2.80, 4.60, 3.60, 4.70, 0.40, 1.80],\n\t\t'down': [-2.80, 5.10, 4.15, 4.70, 0.40, 1.80],\n\t\t'moves': [\n\t\t\t('joints', 'pos'),\n\t\t\t('wait', 10),\n\t\t\t('joints', 'down'),\n\t\t\t('gripper', 'close'),\n\t\t\t('joints', 'pos'),\n\t\t]\n\t},\n\t'piece12': {\n\t\t'hand': False,\n\t\t'pos': [-2.50, 4.60, 3.60, 4.70, 0.40, 2.20],\n\t\t'down': [-2.50, 5.10, 4.15, 4.70, 0.40, 2.20],\n\t\t'moves': [\n\t\t\t('joints', 'pos'),\n\t\t\t('wait', 10),\n\t\t\t('joints', 'down'),\n\t\t\t('gripper', 'close'),\n\t\t\t('joints', 'pos'),\n\t\t]\n\t},\n\t'piece21': {\n\t\t'hand': False,\n\t\t'pos': [-2.20, 4.60, 3.60, 4.70, 0.40, 2.20],\n\t\t'down': [-2.20, 5.10, 4.15, 4.70, 0.40, 2.20],\n\t\t'moves': [\n\t\t\t('joints', 'pos'),\n\t\t\t('wait', 10),\n\t\t\t('joints', 'down'),\n\t\t\t('gripper', 'close'),\n\t\t\t('joints', 'pos'),\n\t\t]\n\t},\n\t'piece22': {\n\t\t'hand': False,\n\t\t'pos': [-1.90, 4.60, 3.60, 4.70, 0.40, 2.80],\n\t\t'down': [-1.90, 5.10, 4.15, 4.70, 0.40, 2.80],\n\t\t'moves': [\n\t\t\t('joints', 'pos'),\n\t\t\t('wait', 10),\n\t\t\t('joints', 'down'),\n\t\t\t('gripper', 'close'),\n\t\t\t('joints', 'pos'),\n\t\t]\n\t},\n}\n\nhandovers = {\n\t1: {\n\t\t'base': [-1.60, 5.40, 3.80, 3.00, -2.81, 0.13],\n\t\t'reach': [-1.00, 5.10, 3.70, 3.00, -2.81, 0.13],\n\t\t'moves': [\n\t\t\t('joints', 'base'),\n\t\t\t('joints', 'reach'),\n\t\t\t('wait', 2),\n\t\t\t('gripper', 'open'),\n\t\t\t('joints', 'home'),\n\t\t]\n\t},\n\t2: {\n\t\t'base': [-1.60, 5.40, 3.80, 3.00, -2.81, 0.13],\n\t\t'reach': [-1.00, 5.35, 3.70, 2.50, -2.00, 0.40],\n\t\t'moves': [\n\t\t\t('joints', 'base'),\n\t\t\t('joints', 'reach'),\n\t\t\t('wait', 2),\n\t\t\t('gripper', 'open'),\n\t\t\t('joints', 'home'),\n\t\t]\n\t},\n\t3: {\n\t\t'base': [-1.60, 5.40, 3.80, 3.00, -2.81, 0.13],\n\t\t'reach': [[-1.60, 5.40, 3.80, 3.00, -1.50, 0.13], [-1.00, 5.00, 2.50, 1.70, 0.20, 1.60]],\n\t\t'moves': [\n\t\t\t('joints', 'base'),\n\t\t\t('joints', 'reach'),\n\t\t\t('wait', 2),\n\t\t\t('gripper', 'open'),\n\t\t\t('joints', 'home'),\n\t\t]\n\t},\n\t4: {\n\t\t'base': [-1.60, 5.40, 3.80, 3.00, -2.81, 0.13],\n\t\t'reach': [[-1.60, 5.40, 3.80, 3.00, -1.50, 0.13], [-1.00, 5.20, 2.80, 1.70, 0.20, 3.00]],\n\t\t'moves': [\n\t\t\t('joints', 'base'),\n\t\t\t('joints', 'reach'),\n\t\t\t('wait', 2),\n\t\t\t('gripper', 'open'),\n\t\t\t('joints', 'home')\n\t\t]\n\t},\n\t5: {\n\t\t'base': [-1.60, 5.40, 3.80, 3.00, -2.81, 0.13],\n\t\t'reach': [-1.00, 5.40, 5.20, 3.00, -2.81, 0.13],\n\t\t'moves': [\n\t\t\t('joints', 'base'),\n\t\t\t('joints', 'reach'),\n\t\t]\n\t},\n}\n\nhome_client = None\ngripper_client = None\narm_client = None\n\ndef init_globals():\n\tglobal home_client, gripper_client, arm_client\n\n\tif not home_client:\n\t\thome_client = actionlib.SimpleActionClient('/mico_arm/home_arm', wpi_jaco_msgs.msg.HomeArmAction)\n\t\thome_client.wait_for_server()\n\n\tif not gripper_client:\n\t\tgripper_client = actionlib.SimpleActionClient('/mico_arm/fingers_controller_radian/gripper', control_msgs.msg.GripperCommandAction)\n\t\tgripper_client.wait_for_server()\n\n\tif not arm_client:\n\t\tarm_client = actionlib.SimpleActionClient('/mico_arm/arm_controller/trajectory', control_msgs.msg.FollowJointTrajectoryAction)\n\t\tarm_client.wait_for_server()\n\ndef cancel():\n\tinit_globals()\n\n\thome_client.cancel_all_goals()\n\tgripper_client.cancel_all_goals()\n\tarm_client.cancel_all_goals()\n\ndef wait(secs):\n\trospy.sleep(secs)\n\ndef home(timeout=10):\n\tinit_globals()\n\n\tgoal = wpi_jaco_msgs.msg.HomeArmGoal()\n\n\thome_client.send_goal(goal)\n\n\tif not home_client.wait_for_result(rospy.Duration(timeout)):\n\t\thome_client.cancel_all_goals()\n\ndef gripper(position, timeout=6):\n\tinit_globals()\n\n\tgoal = control_msgs.msg.GripperCommandGoal()\n\n\tgoal.command.position = position\n\n\tgripper_client.send_goal(goal)\n\n\tif not gripper_client.wait_for_result(rospy.Duration(timeout)):\n\t\tgripper_client.cancel_all_goals()\n\ndef open():\n\tgripper(0.0000)\n\ndef close():\n\tgripper(1.0472)\n\ndef joints(positions, timeout=10):\n\tinit_globals()\n\n\tgoal = control_msgs.msg.FollowJointTrajectoryGoal()\n\n\tgoal.trajectory.joint_names = ['mico_joint_1', 'mico_joint_2', 'mico_joint_3', 'mico_joint_4', 'mico_joint_5', 'mico_joint_6']\n\n\tif not isinstance(positions[0], list):\n\t\tpositions = [positions]\n\n\tfor position in positions:\n\t\tpoint = trajectory_msgs.msg.JointTrajectoryPoint()\n\t\tpoint.positions = position\n\n\t\tgoal.trajectory.points.append(point)\n\n\tarm_client.send_goal(goal)\n\n\tif not arm_client.wait_for_result(rospy.Duration(timeout)):\n\t\tarm_client.cancel_all_goals()\n\ndef action(action):\n\tfor move, val in action['moves']:\n\t\tif move == 'joints':\n\t\t\tif isinstance(val, str):\n\t\t\t\tif val == 'home':\n\t\t\t\t\thome()\n\t\t\t\telse:\n\t\t\t\t\tjoints(action[val])\n\t\t\telse:\n\t\t\t\tjoints(val)\n\t\telif move == 'gripper':\n\t\t\tif isinstance(val, str):\n\t\t\t\tif val == 'open':\n\t\t\t\t\topen()\n\t\t\t\telif val == 'close':\n\t\t\t\t\tclose()\n\t\t\t\telse:\n\t\t\t\t\tgripper(action[val])\n\t\t\telse:\n\t\t\t\tgripper(val)\n\t\telif move == 'wait':\n\t\t\twait(val)\n\ndef place():\n\taction(locations['place'])\n\ndef hand(level):\n\taction(handovers[level])\n\ndef get(object, level=1):\n\taction(objects[object])\n\n\tif objects[object]['hand']:\n\t\thand(level)\n\telse:\n\t\tplace()\n","sub_path":"baxter_trust_aware/movements.py","file_name":"movements.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"53013489","text":"# Write a recursive function that takes one parameter: n and counts down from n.\n\ndef counter(n):\n if n == 0:\n return n\n else:\n return str(n) + \"\\n\" + str(counter(n - 1))\n\n\nprint(counter(10))\n","sub_path":"week-03/day-04/01counter.py","file_name":"01counter.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"511730223","text":"class Solution:\n def countSubstrings(self, s: str) -> int:\n # So we know palindromes can be centered around, s[i] or around s[i],s[i+1]\n length, ans = len(s), 0\n # Cound odd palindromes\n for i in range(length):\n l, r = s[i], s[i]\n while l >= 0 and r <= length and s[l] == s[r]:\n ans += 1\n l -= 1\n r += 1\n # Count even palindromes\n for j in range(length-1):\n l, r = s[j], s[j+1]\n while l >= 0 and r <= length and s[l] == s[r]:\n ans += 1\n l -= 1\n r += 1\n return ans \n\n\n# Trick of expanding range. \n\nclass Solution:\n def countSubstrings(self, S):\n N = len(S)\n ans = 0\n for center in range(2*N - 1):\n left = center // 2\n right = left + center % 2\n while left >= 0 and right < N and S[left] == S[right]:\n ans += 1\n left -= 1\n right += 1\n return ans\n\n\n# Naive solution generate all substrings and check if palindrome or not. \n\nclass Solution:\n def countSubstrings(self, s: str) -> int:\n l, res = len(s), 0\n # The outer loop controls the length of the substring. \n for start in range(l+1):\n # This is the end of the substring.\n for end in range(start+1, l+1):\n # End index\n temp = s[start:end]\n if temp == temp[::-1]:\n res += 1\n return res","sub_path":"leetcode/647. Palindromic Substrings/soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"139879192","text":"import os\nimport unittest\nfrom HTMLTestRunner import HTMLTestRunner\nfrom Send_email import SendEmail\n\nclass Run_main():\n\n def run_main(self):\n try:\n\n discover = unittest.defaultTestLoader.discover(os.getcwd(), pattern=\"case_*.py\")\n print(discover)\n print(\"jjj\")\n\n path2 = os.path.join(os.getcwd(), \"HTMLReport.html\")\n print(path2)\n with open(path2, 'wb') as f:\n runner = HTMLTestRunner(stream=f,\n title='接口测试报告',\n description='generated by HTMLTestRunner.',\n verbosity=3)\n\n runner.run(discover)\n mail = SendEmail()\n mail.send_mail(\"m18616753564@163.com\", path2)\n\n\n\n except Exception as e:\n print(e)\n\n\nif __name__==\"__main__\":\n run=Run_main()\n run.run_main()\n\n\n\n","sub_path":"page/run_main.py","file_name":"run_main.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230805941","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom handlers import base\nfrom handlers import mako_test\nfrom handlers import leancloud_handler\nfrom handlers import admin\nfrom handlers import site\nfrom handlers import user\nfrom tornado.web import url\nfrom lib.leancloud_api import LeanCloudApi\n\nclass_name = 'Girls'\n\n\nurl_patterns = [\n url(r'/?', mako_test.ResizeHandler),\n\n url(r'/user/?', user.UserMainHandler),\n url(r'/user/login/?', user.UserLoginHandler),\n url(r'/user/logout/?', user.UserLogoutHandler),\n url(r'/user/register/?', user.UserRegisterHandler),\n\n # admin\n url(r'/admin/?', admin.AdminMainHandler),\n url(r'/admin/login/?', admin.AdminLoginHandler),\n\n #url(r'/admin/(\\w+)/?', admin.AdminHandler),\n #url(r'/admin/(\\w+\\/?\\w+)/data.json', admin.LeanClassHandler),\n\n # site boys\n url(r'/boys/?', site.SiteHandler, dict(class_name='Boys')),\n url(r'/boys/(\\w+-\\w+)/?', site.SiteTagHandler),\n\n # site girls\n url(r'/girls/?', site.SiteHandler, dict(class_name='Girls')),\n url(r'/girls/(\\w+-\\w+)/?', site.SiteTagHandler),\n\n # site gifs\n url(r'/gifs/?', site.SiteHandler, dict(class_name='Gifs')),\n url(r'/gifs/(\\w+-\\w+)/?', site.SiteTagHandler),\n\n # site animals\n url(r'/animals/?', site.SiteHandler, dict(class_name='Animals')),\n url(r'/animals/(\\w+-\\w+)/?', site.SiteTagHandler),\n\n\n\n # leancloud\n url(r'/([-\\w+]+\\/?)+/data.json', leancloud_handler.LeanClassHandler),\n #url(r'/(\\w+\\/?\\w+)/(\\w+)?/data.json', leancloud_handler.LeanClassHandler),\n #url(r'/(\\w+\\/?\\w+-\\w+)/(\\w+)?/data.json', leancloud_handler.LeanClassHandler),\n\n\n #url(r'/mako/?', mako_test.MakoHandler),\n #url(r'/resize/?', mako_test.ResizeHandler),\n #url(r'/tem/?', tem_test.TemHandler),\n\n\n\n url(r'(\\/?\\w*)/data/data1.json', leancloud_handler.LeanHandler,\n dict(class_name=class_name, leancloud_db=LeanCloudApi(class_name))),\n\n\n\n url(r'.*', base.PageNotFoundHandler), # catch return 404 page\n]\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165522407","text":"import shelve\nfrom stats import *\nimport random\nimport tensorflow as tf\nfrom sklearn.model_selection import StratifiedKFold\nfrom keras.models import Model, Sequential\nfrom keras.layers import Input, Dense, Activation\nfrom numpy import array\nfrom numpy import random\n\n\n\n#\n# Global Variables\n#\n\n# Prep_Season - for input we consider season 0 through Prep_Season\n# keep less than PROJ_SEASON unless PROJ_SEASON == 0\nPREP_SEASON = 0\n# Proj_Season - season number we are projecting (0 = rookie season)\n# set to 0 for max season (no point in projecting rookie season itself)\nPROJ_SEASON = 1\n# Ratio of train data to test data\n# ie 0.8 => 80% train, 20% test\nTRAIN_RATIO = 0.8\n# epochs\nEPOCHS = 50\n# size of batches\nBATCH_SIZE = 16\n# folds\nFOLDS = 10\n\n\n# print(\"\\nhi\")\n# print(\"let's make some predictions!\\n\")\n\n\n# Return players list from shelved player_store\ndef get_player_sets():\n # pull players from shelved data\n ps = shelve.open('player_store')\n players = ps['store'].players\n ps.close()\n\n # filter players by seasons played\n if (PROJ_SEASON > 0):\n players = [player for player in players if (len(player.seasons) > PROJ_SEASON)]\n else:\n players = [player for player in players if (len(player.seasons) > PREP_SEASON)]\n\n # split into training and testing sets\n #random.shuffle(players)\n #split_ind = int(len(players) * TRAIN_RATIO)\n #train_set = players[:split_ind]\n #test_set = players[split_ind:]\n\n # get testing and training input and output sets\n X = list(map(lambda x: x.seasons[PREP_SEASON].to_list(), players))\n #x_test = list(map(lambda x: x.seasons[PREP_SEASON].to_list(), test_set))\n if (PROJ_SEASON == 0):\n Y = list(map(lambda x: x.max_season.to_list(), players))\n #y_test = list(map(lambda x: x.max_season.to_list(), test_set))\n else:\n Y = list(map(lambda x: x.seasons[PROJ_SEASON].to_list(), players))\n #y_test = list(map(lambda x: x.seasons[PROJ_SEASON].to_list(), test_set))\n\n return (array(X), array(Y))\n\n\n# Verify shelving/unshelving occured properly\n# Check ages of players as rookies\ndef check_ages(players):\n # print total number of players\n print(str(len(players)) + \" total players\\n\")\n\n print(\"Checking ages:\\n\")\n # initialize array of 40 empty lists (1 for each age)\n ages = [[] for i in range(40)]\n # iterate through all players, add them to their rookie age list\n for player in players:\n ages[player.seasons[0].age].append((player.name, len(player.seasons)))\n\n # print number of players who were rookies at different ages\n for i in range(40):\n if not ages[i]:\n continue # skip if rookie age list is empty\n print(str(i) + \" - \" + str(len(ages[i]))) # Age - Number of players\n if (len(ages[i]) >= 5):\n print(ages[i][:5]) # list of tuples [(Name, Seasons played), ...]\n else:\n print(ages[i])\n print()\n\n\n# Set up Neural Network model\ndef get_model(in_size, out_size):\n h_layer_size = int((in_size + out_size) / 2)\n print(\"generating model\", in_size, h_layer_size, out_size)\n inputs = Input(shape=(in_size,))\n x = Dense(h_layer_size, activation=tf.nn.sigmoid)(inputs)\n x = Dense(h_layer_size, activation=tf.nn.sigmoid)(x)\n x = Dense(h_layer_size, activation=tf.nn.sigmoid)(x)\n #x = Dense(out_size, activation=tf.nn.sigmoid, use_bias=True)(x)\n #x = Dense(out_size, activation='linear', use_bias=True)(x)\n #x = Dense(out_size, activation=tf.nn.sigmoid, use_bias=True)(x)\n #x = Dense(out_size, activation=tf.nn.sigmoid, use_bias=True)(x)\n #x = Dense(out_size, activation=tf.nn.sigmoid, use_bias=True)(x)\n #x = Dense(out_size, activation=tf.nn.sigmoid, use_bias=True)(x)\n outputs = Dense(out_size, activation=tf.nn.sigmoid, use_bias=True)(x)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(optimizer = 'rmsprop',\n loss = 'mse')\n return model\n\n#\n# Helpers and API\n#\ndef get_prep_season():\n global PREP_SEASON\n return PREP_SEASON\n\ndef get_proj_season():\n global PROJ_SEASON\n return PROJ_SEASON\n\ndef print_glossary():\n print(\"----------------\")\n print(\"Metrics Glossary\")\n print(\"----------------\")\n print(\"MSE: mean squared error = average of normalized error\")\n print(\"Validation: mse accuracy of model predicting on validation set\")\n print(\"No Change: prediction is player's stats will not change\")\n print(\"Regr to mean: prediction is player's stats will be halfway between their previous season\")\n print(\" and the league historic average for players in their correlated season\")\n print(\"Pseudo Rand: prediction is player's prep season + 0% to 10% improvement\")\n print(\"Full Rand: prediction is random float between 0.0 and 1.0 denormalized\\n\")\n\ndef get_averages(p_set):\n averages = []\n for i in range(len(p_set[0])):\n sum = 0\n for j in range(len(p_set)):\n sum += p_set[j][i]\n averages.append(sum / len(p_set))\n return averages\n\ndef compare_acc(x, y, model):\n print(\"Comparison metrics\")\n print(\"------------------\")\n predictions = model.predict(x)\n print(\"Validation MSE Acc: \" + str(mse_all(predictions, y)))\n print(\"No Change MSE Acc: \" + str(mse_all(x, y)))\n averages = get_averages(x)\n print(\"Regr to mean MSE Acc: \" + str(mse_all([[(averages[i] + p[i]) / 2 for i in range(len(x[0]))] for p in x], y)))\n print(\"Pseudo Rand MSE Acc: \" + str(mse_all([p + random.uniform(0.0, 0.1) for p in x], y)))\n print(\"Full Rand MSE Acc: \" + str(mse_all([random.uniform(0.0, 1.0, size=len(y[0])) for i in range(len(y))], y)))\n\n\n#\n# Main\n#\ndef get_predictor(prep = 0, proj = 1):\n global PREP_SEASON, PROJ_SEASON\n PREP_SEASON = prep\n PROJ_SEASON = proj\n # get players from shelved player_store\n (X, Y) = get_player_sets()\n model = get_model(len(X[0]), len(Y[0]))\n model.summary()\n print(\"\\n\\nGetting new model...\")\n print(\"Epochs: \" + str(EPOCHS) + \", Batch Size: \" + str(BATCH_SIZE) + \", Folds: \" + str(FOLDS) + \"\\n\")\n print_glossary()\n kfold = StratifiedKFold(n_splits=FOLDS, shuffle=True)\n scores = []\n for i, (train, test) in enumerate(kfold.split(X, Y.argmax(1))):\n model.fit(X[train], Y[train], epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=0)\n score = model.evaluate(X[test], Y[test], verbose=0)\n print(\"\\nloss after fold \" + str(i+1) + \": \" + str(round(score, 5)))\n scores.append(score)\n print(\"\\nave: \" + str(round(sum(scores)/len(scores), 5)))\n compare_acc(X, Y, model)\n # predictions = model.predict(X)\n return model\n\n\n# if __name__ == \"__main__\":\n # main()\n","sub_path":"predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399179880","text":"import operator\n\nwith open(\"input\") as f:\n content = f.readlines()\n\ninstructions = [x.strip().split(' ') for x in content]\n\nops = {'>': operator.gt,\n '<': operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '==': operator.eq,\n '!=': operator.ne,\n 'inc': operator.add,\n 'dec': operator.sub}\n\n\nregisters = {}\nhighest = 0\n\nfor instruction in instructions:\n instr_key = instruction[0]\n instr_op = instruction[1]\n instr_val = int(instruction[2])\n cond_key = instruction[4]\n cond_op = instruction[5]\n cond_val = int(instruction[6])\n \n if instr_key not in registers:\n registers[instr_key] = 0\n if cond_key not in registers:\n registers[cond_key] = 0\n if ops[cond_op](registers[cond_key], cond_val):\n registers[instr_key] = ops[instr_op](registers[instr_key], instr_val)\n temp_max = max(registers.values())\n if temp_max > highest:\n highest = temp_max\n \nprint(highest)\n","sub_path":"2017/day08/registers-pt2.py","file_name":"registers-pt2.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125620859","text":"#!/usr/bin/python3\nimport datetime\nfrom decimal import *\nfrom settings import DATA\nfrom prettytable import PrettyTable\n\nTWOPLACES = Decimal(10) ** -2\n\ndef daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + datetime.timedelta(n)\n\nclass PayPeriod:\n def __init__(self, calculate=False):\n self.bills = []\n self.expenses = DATA.get('expenses')\n self.delta = None\n self.beginning = None\n self.end = None\n self.total_bills = None\n self.money_left = None\n self.total_expenses = None\n if calculate:\n self.calculate_budget()\n\n def lookup_bills(self):\n if not self.beginning or not self.end:\n self.current_pay_period()\n bill_data = DATA.get('bills')\n\n for day in daterange(self.beginning, self.end):\n x = bill_data.get(day.day)\n if x:\n for bill in x:\n self.bills.append(bill)\n\n def pay_period_delta(self):\n period = DATA.get('pay_period')\n if not period:\n raise Error\n\n unit = period.get('unit')\n value = period.get('value')\n\n if unit == 'week':\n self.delta = datetime.timedelta(weeks=value)\n elif unit == 'day':\n self.delta = datetime.timedelta(days=value)\n elif unit == 'month':\n self.delta = value\n else:\n raise Error\n\n def current_pay_period(self):\n if self.delta == None:\n self.pay_period_delta()\n self.find_pay_period(datetime.date.today())\n\n def next_pay_period(self):\n if self.delta == None:\n self.pay_period_delta()\n\n next_period = PayPeriod()\n next_period.find_pay_period(self.end)\n next_period.calculate_budget()\n\n return next_period\n\n def find_pay_period(self, date):\n if self.delta == None:\n self.pay_period_delta()\n\n pay_date_str = DATA.get('pay_date')\n if not pay_date_str:\n raise Error\n\n pay_period = DATA.get('pay_period')\n if not pay_period:\n raise Error\n\n pay_date_list = pay_date_str.split('/')\n pay_date = datetime.date(int(pay_date_list[0]), int(pay_date_list[1]), int(pay_date_list[2]))\n\n beginning = pay_date\n end = pay_date\n while date >= end:\n beginning = end\n end += self.delta\n\n self.beginning = beginning\n self.end = end\n\n def calculate_budget(self):\n if not self.bills:\n self.lookup_bills()\n\n self.total_bills = Decimal(0)\n for bill in self.bills:\n self.total_bills += bill.get('amt')\n\n self.total_expenses = Decimal(0)\n for expense in self.expenses:\n self.total_expenses += expense.get('amt')\n\n self.money_left = DATA.get(\"pay_amount\") - self.total_bills - self.total_expenses\n\n def print_budget(self):\n if not self.total_bills:\n self.calculate_budget()\n\n print(\"\\nBills\")\n print(\"***************************\")\n bills_table = PrettyTable([\"Description\", \"Amount\"])\n bills_table.align[\"Amount\"] = \"r\"\n for bill in self.bills:\n bills_table.add_row([bill.get('desc'), bill.get('amt').quantize(TWOPLACES)])\n bills_table.add_row([\"----------\", \"-----\"])\n bills_table.add_row([\"Total\", self.total_bills.quantize(TWOPLACES)])\n print(bills_table)\n\n print(\"\\nExpenses\")\n print(\"***************************\")\n expenses_table = PrettyTable([\"Description\", \"Amount\"])\n expenses_table.align[\"Amount\"] = \"r\"\n for expense in self.expenses:\n expenses_table.add_row([expense.get('desc'), expense.get('amt').quantize(TWOPLACES)])\n expenses_table.add_row([\"----------\", \"-----\"])\n expenses_table.add_row([\"Total\", self.total_expenses.quantize(TWOPLACES)])\n print(expenses_table)\n\n print(\"\\nTotals\")\n print(\"***************************\")\n print(\"Expenses Total: \" + str(self.total_expenses.quantize(TWOPLACES)))\n print(\"Money After Bills + Expenses: \" + str(self.money_left.quantize(TWOPLACES)))\n next_period = self.next_pay_period()\n money_left_next = next_period.money_left\n print(\"Money After Bills + Expenses Next: \" + str(money_left_next.quantize(TWOPLACES)))\n\n if money_left_next <= 200:\n print(\"Not enough money left next paycheck!! Expenses being rolled together!\")\n self.expenses += next_period.expenses\n self.calculate_budget()\n self.money_left = self.money_left + money_left_next + next_period.total_expenses\n print(\"Money left after combining: \" + str(self.money_left.quantize(TWOPLACES)))\n\n print(\"\\nPercent Expenses\")\n print(\"***************************\")\n\n total_perc_expenses = Decimal(0)\n perc_expenses = PrettyTable([\"Description\", \"Amount\"])\n perc_expenses.align[\"Amount\"] = \"r\"\n for perc_expense in DATA.get(\"perc_expenses\", []):\n amt = perc_expense.get('perc') * self.money_left\n total_perc_expenses += amt\n perc_expenses.add_row([perc_expense.get('desc'), amt.quantize(TWOPLACES)])\n perc_expenses.add_row([\"----------\", \"-----\"])\n perc_expenses.add_row([\"Total\", total_perc_expenses.quantize(TWOPLACES)])\n print(perc_expenses)\n\n money_left_perc = self.money_left - total_perc_expenses\n\n print(\"\\nAllowances\")\n print(\"***************************\")\n allowance_table = PrettyTable([\"Description\", \"Amount\"])\n allowance_table.align[\"Amount\"] = \"r\"\n for allowance in DATA.get(\"allowances\", []):\n amt = allowance.get('perc') * money_left_perc\n allowance_table.add_row([allowance.get('desc'), amt.quantize(TWOPLACES)])\n print(allowance_table)\n\npay_period = PayPeriod()\npay_period.print_budget()\n","sub_path":"budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"653331705","text":"# -*- coding: utf-8 -*-\n# @Author: wfy\n# @Date: 2020-04-10 15:31:44\n# @Last Modified by: wfy\n# @Last Modified time: 2020-04-12 20:23:06\nimport matplotlib.pyplot as plt\n\n\nclass Solution():\n \"\"\"to achieve SJF\"\"\"\n\n def __init__(self):\n super(Solution, self).__init__()\n self.data = []\n self.name = ''\n self.service_time = 0\n self.arrival_time = 0\n self.state = ''\n self.number = 0\n self.timeout = 0\n self.start = 0\n self.end = 0\n\n def insert_data(self, name, arrival_time, service_time):\n self.data.append({\n 'id': self.number,\n 'name': name,\n 'arrival_time': int(arrival_time),\n 'service_time': int(service_time),\n 'state': 'w',\n 'turnaround_time': 0,\n 'authorized_turnover_time': 0,\n 'start': 0,\n 'end': 0\n })\n self.timeout = max(self.timeout, int(arrival_time))\n self.number += 1\n\n def get_data_file(self):\n with open('data.txt', \"r\", encoding=\"utf-8\") as file:\n for line in file.read().splitlines():\n name, arrival_time, service_time = line.split()\n self.insert_data(name, arrival_time, service_time)\n file.close()\n # initial queue\n self.data.sort(key=lambda x: (x['arrival_time'], x['service_time']))\n # update id\n for i in range(self.number):\n self.data[i]['index'] = i\n\n def get_data_input(self):\n print('How many tasks do you want input?')\n tasks_number = int(input('Please enter an integer of type int:'))\n print('Please enter name and arrival_time and service_time of task')\n print('such as:A 0 5')\n for _ in range(tasks_number):\n name, arrival_time, service_time = input('Please enter\\n').split()\n self.insert_data(name, arrival_time, service_time)\n # initial queue\n self.data.sort(key=lambda x: (x['arrival_time'], x['service_time']))\n # update id\n for i in range(self.number):\n self.data[i]['index'] = i\n\n def show_data_running(self, start, end, data):\n print('-'*40)\n print(\"from {:} to {:}\".format(start, end))\n print(\"task name:{:}\".format(data['name']))\n print(\"task state:{:}\\n\".format('R'))\n\n def show_data(self):\n print(\"{:<6}{:<10}{:<10}{:<10}{:<6}{:<8}{:<7}{:<6}\".format(\n 'name', 'arr_time', 'ser_time', 'state', '周转时间', '带权周转时间', 'start', 'end'))\n for task in sorted(self.data, key=lambda x: x['id']):\n print(\"{:<6}{:<10}{:<10}{:<10}{:<10}{:<14.2f}{:<7}{:<4}\".format(\n task['name'],\n task['arrival_time'],\n task['service_time'],\n task['state'],\n task['turnaround_time'],\n task['authorized_turnover_time'],\n task['start'],\n task['end']))\n\n def cmp(self):\n '''the method of sort'''\n return lambda x: (x['service_time'], x['arrival_time'], x['index'])\n\n def sort_data(self, data):\n return sorted(data, key=self.cmp())\n\n def update_information(self, index, start, end):\n self.data[index]['start'] = start\n self.data[index]['end'] = end\n self.data[index]['state'] = 'f'\n self.data[index]['turnaround_time'] = end - \\\n self.data[index]['arrival_time']\n self.data[index]['authorized_turnover_time'] = self.data[index]['turnaround_time'] / \\\n self.data[index]['service_time']\n self.start = start\n self.end = end\n self.show_data_running(start, end, self.data[index])\n\n def get_next_data(self, index, data):\n # get tasks from the beginning to the end of the current task\n result = [x for x in self.data if x['arrival_time'] <=\n self.end and x['state'] == 'w' and x not in data]\n if result or data:\n return result\n # no tasks entered at current time\n for task in self.data:\n if task['state'] == 'w':\n self.start = self.end = task['arrival_time']\n return [task]\n return []\n\n def implement(self):\n '''start algorithm'''\n # data get and maintain the right task\n data = [self.data[0]]\n self.start = self.end = data[0]['arrival_time']\n while data:\n self.update_information(\n data[0]['index'], self.end, self.end + data[0]['service_time'])\n data += self.get_next_data(data.pop(0)['index'], data)\n data = self.sort_data(data)\n self.data.sort(key=lambda x: x['id'])\n\n def get_y_ticks(self):\n return [x['id'] for x in self.data] + [self.data[-1]['id'] + 1], [x['name'] for x in self.data] + ['']\n\n def init_image(self):\n plt.figure('SJF', figsize=(10, 5))\n self.drow_image()\n plt.xticks([i for i in range(self.end + 3)])\n plt.title('the time of task about SJF')\n plt.xlabel('')\n plt.ylabel('tasks')\n plt.yticks(self.get_y_ticks()[0], self.get_y_ticks()[1])\n\n def drow_image(self):\n for task in self.data:\n plt.plot([task['start'], task['end']],\n [task['id'], task['id']],\n label=task['name'],\n lw=2)\n # annotation of the key point\n plt.plot([task['end'], task['end']],\n [-1, task['id']],\n 'k--',\n lw=1)\n plt.legend(loc='best')\n\n def set_ax(self):\n ax = plt.gca()\n ax.spines['right'].set_color('none')\n ax.spines['bottom'].set_color('none')\n ax.xaxis.set_ticks_position('top')\n ax.invert_yaxis()\n ax.grid(True, linestyle='-.')\n\n def show_image(self):\n self.init_image()\n self.set_ax()\n plt.savefig('SJF.png', dpi=300)\n plt.show()\n\n def main(self):\n if input('Do you want get data by file? y/Y or n/N\\n') in ['y', 'Y']:\n SJF.get_data_file()\n else:\n SJF.get_data_input()\n SJF.show_data()\n SJF.implement()\n SJF.show_data()\n SJF.show_image()\n\n\nif __name__ == '__main__':\n try:\n SJF = Solution()\n SJF.main()\n except Exception as e:\n print('An exception', e)\n else:\n print('All finished')\n finally:\n print('finally')\n","sub_path":"操作系统/541807220143_王飞宇_实验1/lab_one.py","file_name":"lab_one.py","file_ext":"py","file_size_in_byte":6465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58210813","text":"from django.shortcuts import render, redirect\nfrom companies.models import Company\n\ndef index(request):\n\tcontext = {'company_list': Company.objects.all()}\n\treturn render(request, 'companies/index.html', context)\n\ndef create(request):\n\tname = request.POST.get('name')\n\tphone_number = request.POST.get('phone_number')\n\temail = request.POST.get('email')\n\tnew_company = Company.objects.create(name = name, phone_number = phone_number, email = email)\n\treturn redirect('companies:index')\n\ndef read(request, company_id):\n\tcompany = Company.objects.get(id = company_id)\n\treturn render(request, 'companies/read.html', {'company': company})\n\ndef update(request, company_id):\n\tcompany = Company.objects.get(id = company_id)\n\treturn render(request, 'companies/update.html', {'company': company})\n\ndef write_db(request, company_id):\n\tcompany = Company.objects.get(id = company_id)\n\tname = request.POST.get('name')\n\tphone_number = request.POST.get('phone_number')\n\temail = request.POST.get('email')\n\tcompany.name = name\n\tcompany.phone_number = phone_number\n\tcompany.email = email\n\tcompany.save()\n\treturn redirect('companies:read', company_id = company_id)\n\ndef delete(request):\n\tcompany = Company.objects.get(id = company_id)\n\tcompany.delete()\n\treturn redirect('companies:index')\n","sub_path":"crud/project/companies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"184331777","text":"# TensorFlow and tf.keras\r\nimport tensorflow as tf\r\nimport PIL\r\nfrom PIL import Image, ImageOps\r\n\r\n# Helper libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef plot_image(i, predictions_array, true_label, img):\r\n true_label, img = true_label[i], img[i]\r\n plt.grid(False)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.imshow(img, cmap=plt.cm.binary)\r\n predicted_label = np.argmax(predictions_array)\r\n if predicted_label == true_label:\r\n color = 'blue'\r\n else:\r\n color = 'red'\r\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\r\n 100*np.max(predictions_array),\r\n class_names[true_label]),\r\n color=color)\r\n\r\ndef plot_value_array(i, predictions_array, true_label):\r\n true_label = true_label[i]\r\n plt.grid(False)\r\n plt.xticks(range(10))\r\n plt.yticks([])\r\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\r\n plt.ylim([0, 1])\r\n predicted_label = np.argmax(predictions_array)\r\n thisplot[predicted_label].set_color('red')\r\n thisplot[true_label].set_color('blue')\r\n\r\ndef pic_to_array(image_name):\r\n pic = Image.open(image_name)\r\n pic = ImageOps.grayscale(pic)\r\n pic = ImageOps.invert(pic)\r\n pic = ImageOps.fit(pic, (28,28))\r\n pic.save(image_name[:-4] + \"_small.jpg\")\r\n return np.array(pic) / 255.0\r\n\r\nprint(tf.__version__)\r\n\r\nfashion_mnist = tf.keras.datasets.fashion_mnist\r\n\r\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\r\n\r\ntrain_images = train_images / 255.0\r\n\r\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\r\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\r\n\r\nmodel = tf.keras.Sequential([\r\n tf.keras.layers.Flatten(input_shape=(28, 28)),\r\n tf.keras.layers.Dense(128, activation='relu'),\r\n tf.keras.layers.Dense(10)\r\n])\r\nmodel.compile(optimizer='adam',\r\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\r\n metrics=['accuracy'])\r\nmodel.fit(train_images, train_labels, epochs=10)\r\n\r\nprobability_model = tf.keras.Sequential([model, \r\n tf.keras.layers.Softmax()])\r\n\r\n\r\npic1 = pic_to_array(\"jeans.png\")\r\npic2 = pic_to_array(\"sneaker.png\")\r\npic3 = pic_to_array(\"purse.png\")\r\n\r\ncustom_images = np.array([pic1, pic2, pic3])\r\ncustom_labels = [1, 7, 8]\r\n\r\npredictions = probability_model.predict(custom_images)\r\n\r\nnum_rows = 1\r\nnum_cols = 3\r\nnum_images = num_rows*num_cols\r\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\r\nfor i in range(num_images):\r\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\r\n plot_image(i, predictions[i], custom_labels, custom_images)\r\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\r\n plot_value_array(i,predictions[i], custom_labels)\r\n\r\nplt.tight_layout()\r\nplt.show()","sub_path":"labs/lab-11/checkpoint3.py","file_name":"checkpoint3.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"14653062","text":"def cut(deck,increment):\n topDeck = deck[0:increment]\n bottomDeck = deck[increment:]\n deck = bottomDeck + topDeck\n return deck\n\ndef deal(deck,increment):\n if increment == 0:\n deck.reverse()\n else:\n newDeck = [None for i in range(0,len(deck))]\n currentPosition = 0\n while len(deck) > 0:\n newDeck[currentPosition] = deck.pop(0)\n currentPosition += increment\n if currentPosition > len(newDeck):\n currentPosition = currentPosition%len(newDeck)\n deck = newDeck\n return deck\n\nf = open('Day22Input.txt')\ninstructions = []\nfor line in f:\n instruction = line.strip('\\n').split(' ')\n if instruction[0] == 'deal':\n if instruction[2] == 'new':\n instructions.append(['deal',int(0)])\n if instruction[2] == 'increment':\n instructions.append(['deal',int(instruction[3])])\n elif instruction[0] == 'cut':\n instructions.append(['cut',int(instruction[1])])\nf.close()\n\ndeck = [x for x in range(0,119315717514047)]\nfor instruction in instructions:\n if instruction[0] == 'deal':\n deck = deal(deck,instruction[1])\n elif instruction[0] == 'cut':\n deck = cut(deck,instruction[1])\nprint(deck[2019])","sub_path":"Day22Part2.py","file_name":"Day22Part2.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"373485471","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef KLD_uniform_loss(z_logits):\n eps = 1e-20\n b, lat_dim, h, w = z_logits.size()\n log_prior = torch.log(torch.ones((b, lat_dim, h * w), device=z_logits.device) / lat_dim)\n\n z_probs = F.softmax(z_logits, dim=1)\n\n z_dist_flatten = z_probs.view(b, lat_dim, -1)\n log_z_dist = torch.log(z_dist_flatten + eps)\n\n KLD = torch.sum(z_dist_flatten * (log_z_dist - log_prior), dim=[1, 2]).mean()\n return KLD\n\n\ndef KLD_codes_uniform_loss(z):\n eps = 1e-20\n b, lat_dim, h, w = z.size()\n N = h * w\n\n log_prior = torch.log(torch.ones((b, lat_dim), device=z.device) / lat_dim)\n\n z_dist_flatten = z.sum(dim=[2, 3]) / N\n log_z_dist = torch.log(z_dist_flatten + eps)\n\n KLD = torch.sum(z_dist_flatten * (log_z_dist - log_prior), dim=1).mean()\n return KLD\n\n\nclass TemperatureAnnealer:\n def __init__(self, start_temp=1, end_temp=1/16, n_steps=100000):\n self.start_temp = start_temp\n self.end_temp = end_temp\n self.n_steps = n_steps\n self.k = (end_temp - start_temp) / (n_steps - 1)\n self.b = start_temp - self.k\n\n def step(self, step):\n if step == 0:\n return self.start_temp\n elif step > self.n_steps:\n return self.end_temp\n else:\n return self.k * step + self.b\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\nclass KLDWeightAnnealer:\n def __init__(self, start_lambda=0, end_lambda=5, n_steps=5000):\n self.start_lambda = start_lambda\n self.end_lambda = end_lambda\n self.n_steps = n_steps\n self.lin_space = np.linspace(-5, 5, n_steps)\n\n def step(self, step):\n if step == 0:\n return self.start_lambda\n elif step >= self.n_steps:\n return self.end_lambda\n else:\n return self.start_lambda + sigmoid(self.lin_space[step]) * self.end_lambda\n\n\n","sub_path":"legacy_code/train_utils/dvae_utils.py","file_name":"dvae_utils.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186081840","text":"# Определить, какое число в массиве встречается чаще всего\nimport random\n\narray = [random.randint(1, 5) for _ in range(10)]\ndic = {}\nprint(array)\nmax_count = 0\nmax_number = None\nfor i in array:\n if dic.get(i) is None:\n dic[i] = 1\n else:\n dic[i] += 1\n if dic[i] > max_count:\n max_count = dic[i]\n max_number = i\nprint(dic)\nprint(max_number)\n","sub_path":"Lesson3/les_3_task_4.py","file_name":"les_3_task_4.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292043955","text":"# 题目:输入某年某月某日,判断这一天是这一年的第几天?\n# 阳历平年365(1-12月分别为31天,28天,31天,30天,31天,30天,31天,31天,30天,31天,30天,31天)。\n# 闰年共有366天(1-12月分别为31天,29天,31天,30天,31天,30天,31天,31天,30天,31天,30天,31天)。\nrun_year={}\nyear1=[1,3,5,7,8,10,12]\nyear2=[4,6,9,11]\nfor year in year1:\n run_year[year]='31'\nfor year in year2:\n run_year[year]='30'\n# nyr=input(\"请输入年月日,例如20180514:\\n\")\n# str = []\n# str.append(nyr)\n# year = str[0:3]\n# month = str[4:5]\n# day = str[6:7]\n\n\nyear=input(\"输入年:\\n\")\nmonth=input(\"输入月:\\n\")\nday=input(\"输入日:\\n\")\n\n\n\nif int(year) %4==0 and int(year)%10==0 and int(year)%400==0:\n print(year+\" 是闰年\")\n run_year[2]='29'\nelse:\n print(year + \" 是平年\")\n run_year[2]='28'\n\nfor k,v in run_year.items():\n if int(month)==int(k):\n month_day1=int(v)\n month_day=0\n while(k!=0):\n month_day=month_day+int(run_year[k])\n k=k-1\nday_all=month_day-month_day1+int(day)\nprint(day_all)","sub_path":"python_exercise_100/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"298152321","text":"from data.config import DATABASE\nfrom utils.db_api.db_commands import add_item, add_mainmenu, add_choiseln\n\nimport asyncio\n\nfrom utils.db_api.database import create_db\n\n# Используем эту функцию, чтобы заполнить базу данных товарами\nfrom utils.db_api.dishes import add_dishe\nfrom utils.db_api.models import MainMenu, ChoiseLang\n\n\nasync def add_mainmenus():\n list_temp = [(1, '📖 Меню', '📖 Меню'),\n (2, '😏 Мій заказ', '😏 Мой заказ'),\n (3, '🎁 Акції', '🎁 Акции'),\n (4, '😍 Улюблене', '😍 Избранное'),\n (5, '⏰ Час роботи', '⏰ Время работы'),\n (6, '☎️ Контакти', '☎️ Контакты'),\n (7, '📝 Про ресторан', '📝 О ресторане'),\n (8, '🇺🇦/🇷🇺 Змінити мову', '🇷🇺/🇺🇦 Сменить язык')]\n for i in list_temp[:]:\n print(\"ключ: \" + str(i[0]))\n if (await MainMenu.select('id').where(MainMenu.id == i[0]).gino.scalar()) == i[0]:\n print(\"Походу есть\")\n # print(await MainMenu.select('id').where(MainMenu.id == i[0]).gino.scalar())\n else:\n print(\"Добавляю\")\n await add_mainmenu(id=i[0],\n uk=i[1],\n ru=i[2])\n\n\nasync def add_choiselang():\n list_temp = [(1, 'Обрати українську мову 🇺🇦', 'uk'),\n (2, 'Выбрать русский язык 🇷🇺', 'ru')]\n for i in list_temp[:]:\n if (await ChoiseLang.select('id').where(ChoiseLang.id == i[0]).gino.scalar()) == i[0]:\n pass\n else:\n await add_choiseln(id=i[0],\n choice_lang=i[1],\n index_lang=i[2])\n\n\ndef start_bd():\n loop = asyncio.get_event_loop()\n loop.run_until_complete(create_db())\n loop.run_until_complete(add_mainmenus())\n loop.run_until_complete(add_choiselang())\n loop.run_until_complete(add_dishe())","sub_path":"utils/db_api/add_to__database.py","file_name":"add_to__database.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"380750881","text":"# SUMMARY: parse_plot.py\n# USAGE: parse plot output file of eSTOMP, save as json file\n# ORG: Pacific Northwest National Laboratory\n# AUTHOR: Xuehang Song\n# E-MAIL: xuehang.song@pnnl.gov\n# ORIG-DATE: June-2019\n# DESCRIPTION:\n# DESCRIP-END.\n# COMMENTS: only deal cartesian sturtured grids\n#\n# Last Change: 2019-06-10\n\n\nimport numpy as np\nimport re\nimport json\nimport argparse\n\n\ndef length_conversion(x):\n return {\n 'a': 1e-10,\n 'ang': 1e-10,\n 'angstrom': 1e-10,\n 'ao': 1e-10,\n 'cm': 0.01,\n 'ffl': 109.728,\n 'ft': 0.3048,\n 'furlong': 201.168,\n 'm': 1,\n 'mi': 1609.344,\n 'mile': 1609.344,\n \"mm\": 0.001,\n 'rod': 5.0292,\n 'yd': 0.9144\n }.get(x, 1)\n\n\ndef retrieve_node_value(plot, iline, nx, ny, nz):\n \"\"\"\n retrieve node value\n \"\"\"\n node_value = []\n while len(node_value) < nx*ny*nz:\n line_data = [float(x) for x in plot[iline].split(\" \") if x]\n node_value += line_data\n iline += 1\n return node_value, iline\n\n\ndef read_args():\n \"\"\"\n read parameters\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-dir',\n '--output_dir',\n type=str,\n default=\"/pic/projects/dvz/xhs_simus/NDAA/collect_final_results/glass/LAWA44_StompC/\")\n parser.add_argument('-file',\n '--plot_file',\n default=\"/pic/projects/dvz/xhs_simus/NDAA/collect_final_results/glass/LAWA44_StompC/plot.158\")\n args = vars(parser.parse_args())\n return(args)\n\n\nargs = read_args()\nfor ikey, ivari in args.items():\n exec(ikey + '=ivari')\n\njson_file = output_dir + \"json.\" + re.split('[\\\\./]', plot_file)[-1]\nplot_dict = dict()\n\n# read raw data\nwith open(plot_file, \"r\") as f:\n plot = f.readlines()\n\n# remove comments and blank lines in input deck\nplot = [re.split('[#!\\n]', x)[0] for x in plot]\nplot = [x.lower() for x in plot if x]\n\n# remove header\nn_header = [i for i, j in enumerate(plot[0:50]) if j[0:6] == \"number\"][0]\nplot = plot[n_header:]\nitime = int(plot[0].split(\" \")[-1])\nt = float(plot[1].split(\" \")[3].split(\",\")[0])\nplot_dict[\"file\"] = plot_file\nplot_dict[\"itime\"] = itime\nplot_dict[\"t\"] = t\n\n# get nx,ny,nz,ox,oy,oz\nnx = int(plot[2].split(\" \")[-1])\nny = int(plot[3].split(\" \")[-1])\nnz = int(plot[4].split(\" \")[-1])\nox_unit = plot[5].split(\" \")[2]\nox = float(plot[5].split(\" \")[-1])*length_conversion(ox_unit)\noy_unit = plot[6].split(\" \")[2]\noy = float(plot[6].split(\" \")[-1])*length_conversion(oy_unit)\noz_unit = plot[7].split(\" \")[2]\noz = float(plot[7].split(\" \")[-1])*length_conversion(oz_unit)\nplot_dict[\"ox\"] = ox\nplot_dict[\"oy\"] = oy\nplot_dict[\"oz\"] = oz\n\n# get x,y,z\nx_unit = plot[8].split(\" \")[-1]\nx, iline = retrieve_node_value(plot, 9, nx, ny, nz)\nx = np.array(x[0:nx])*length_conversion(x_unit)\ny_unit = plot[iline].split(\" \")[-1]\ny, iline = retrieve_node_value(plot, iline+1, nx, ny, nz)\ny = np.array([y[nx*iy] for iy in range(ny)])*length_conversion(y_unit)\nz_unit = plot[iline].split(\" \")[-1]\nz, iline = retrieve_node_value(plot, iline+1, nx, ny, nz)\nz = np.array([z[nx*ny*iz] for iz in range(nz)])*length_conversion(z_unit)\nplot_dict[\"x\"] = x.tolist()\nplot_dict[\"y\"] = y.tolist()\nplot_dict[\"z\"] = z.tolist()\n\n# get dx, dy, dz,ex,ey,ez\ndx = [(x[0]-ox)*2]\nfor ix in range(nx-1):\n dx.append((x[ix+1]-x[ix])*2-dx[ix])\ndx = np.array(dx)\ndy = [(y[0]-oy)*2]\nfor iy in range(ny-1):\n dy.append((y[iy+1]-y[iy])*2-dy[iy])\ndy = np.array(dy)\ndz = [(z[0]-oz)*2]\nfor iz in range(nz-1):\n dz.append((z[iz+1]-z[iz])*2-dz[iz])\ndz = np.array(dz)\nplot_dict[\"dx\"] = dx.tolist()\nplot_dict[\"dy\"] = dy.tolist()\nplot_dict[\"dz\"] = dz.tolist()\nex = x[-1]+dx[-1]*0.5\ney = y[-1]+dy[-1]*0.5\nez = z[-1]+dz[-1]*0.5\nplot_dict[\"ex\"] = ex\nplot_dict[\"ey\"] = ey\nplot_dict[\"ez\"] = ez\n\nplot_dict[\"varis\"] = dict()\nwhile len(plot[iline:]) > 0:\n ivari = plot[iline].split(\",\")[0]\n plot_dict[\"varis\"][ivari] = dict()\n if len(plot[iline].split(\",\")) > 1:\n plot_dict[\"varis\"][ivari][\"unit\"] = plot[iline].split(\",\")[-1]\n else:\n plot_dict[\"varis\"][ivari][\"unit\"] = \"\"\n value, iline = retrieve_node_value(\n plot, iline+1, nx, ny, nz)\n plot_dict[\"varis\"][ivari][\"value\"] = (\n np.array(value).reshape((nx, ny, nz), order=\"F\")).tolist()\nwith open(json_file, \"w\") as f:\n json.dump(plot_dict, f, indent=4)\n","sub_path":"parse_plot.py","file_name":"parse_plot.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"653575267","text":"\n\n#calss header\nclass _CEDAR():\n\tdef __init__(self,): \n\t\tself.name = \"CEDAR\"\n\t\tself.definitions = [u'a tall, wide evergreen tree (= one that never loses its leaves)', u'the wood of this tree']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_cedar.py","file_name":"_cedar.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"152313500","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2014 Denys Duchier, IUT d'Orléans\n#==============================================================================\n\nfrom tornado.template import Template\nfrom mud.models.mixins.evented import Evented\nfrom mud.models.mixins.propertied import Propertied\nfrom mud.models.mixins.containing import Containing\nimport re\n\nclass Event(Evented, Propertied):\n\n NAME = None\n SEND_ACTOR = \"send_result\"\n SEND_OBSERVER = \"send_info\"\n\n def __init__(self):\n super().__init__()\n self._effects_executed = False\n self._failed = False\n\n def execute(self):\n self.perform()\n self.execute_effects()\n\n def perform(self):\n raise NotImplemented()\n\n def execute_effects(self):\n if not self._effects_executed and not self._failed:\n self._effects_executed = True\n for effect in self.get_effects(self.NAME):\n effect.execute()\n\n def fail(self):\n self._failed = True\n\n def format(self, template, **kargs):\n context = self.context()\n context.update(kargs)\n return Template(template).generate(**context).decode()\n\n def to_html(self, text):\n text = text.strip()\n if not text or text[0]==\"<\":\n return text\n text = re.sub(r\"(?:(?:^|\\n)\\s*){2,}\", r\"\\n\\n\", text)\n html = [\"

%s

\" % s for s in text.split(r\"\\n\\n\")]\n return \"\\n\".join(html)\n\n def buffer_clear(self):\n self.HTML = []\n\n def buffer_append(self, html):\n self.HTML.append(html)\n\n def buffer_htmlize(self, text, omit_first_p=False):\n if not text:\n return\n text = text.strip()\n if not text or text[0]==\"<\":\n return self.buffer_append(text)\n text = re.sub(r\"(?:(?:^|\\n)\\s*){2,}\", r\"\\n\\n\", text)\n first = True\n for item in text.split(r\"\\n\\n\"):\n if first and omit_first_p:\n self.buffer_append(item)\n else:\n self.buffer_append(\"

%s

\" % item)\n first = False\n\n def buffer_inform(self, dotpath, **kargs):\n text = self.get_template(dotpath, **kargs)\n if text:\n html = self.format(text, **kargs)\n self.buffer_htmlize(html)\n\n def buffer_get(self):\n try:\n return \"\\n\".join(self.HTML)\n finally:\n self.buffer_clear()\n\n def buffer_peek(self, what, **kargs):\n text = what.get_template(\"info.actor\", peeked=what)\n if text:\n html = self.format(text, peeked=what, **kargs)\n self.buffer_append(\"
  • \")\n self.buffer_htmlize(html, omit_first_p=True)\n self.buffer_append(\"
  • \")\n\n\nclass Event1(Event):\n\n def __init__(self, actor):\n Event.__init__(self)\n self.actor = actor\n\n def context(self):\n context = super().context()\n context[\"actor\"] = self.actor\n actor = self.actor\n if isinstance(actor, Containing):\n loc = actor\n elif hasattr(actor, \"container\"):\n loc = actor.container()\n else:\n loc = None\n context[\"location\"] = loc\n return context\n\n def observers(self):\n cont = self.actor.container()\n if cont:\n for x in cont.contents():\n if x is not self.actor and x.is_player() and x.can_see():\n yield x\n\n def inform(self, dotpath, **kargs):\n self.buffer_clear()\n self.buffer_inform(dotpath+\".actor\")\n html = self.buffer_get()\n if html:\n getattr(self.actor, self.SEND_ACTOR)(html)\n for observer in self.observers():\n self.buffer_clear()\n self.buffer_inform(dotpath+\".observer\", observer=observer)\n html = self.buffer_get()\n if html:\n getattr(observer, self.SEND_OBSERVER)(html)\n\n\nclass Event2(Event1):\n\n def __init__(self, actor, object):\n Event1.__init__(self, actor)\n self.object = object\n\n def context(self):\n context = super().context()\n context[\"object\"] = self.object\n if hasattr(self.object, \"is_exit\") and self.object.is_exit():\n context[\"exit\"] = self.object\n context[\"portal\"] = self.object.portal\n return context\n\n def get_event_templates(self):\n return self.object.get_event_templates()\n\n\nclass Event3(Event2):\n\n def __init__(self, actor, object, object2):\n Event2.__init__(self, actor, object)\n self.object2 = object2\n\n def context(self):\n context = super().context()\n context[\"object2\"] = self.object2\n return context\n","sub_path":"mud/events/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104884462","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 09 13:21:58 2017\r\n\r\n@author: Can Serif Mekik\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\n\r\n\r\ndef geometric_mean(v):\r\n \"\"\"Compute the geometric mean of vector v.\"\"\"\r\n \r\n return np.prod(v)**(1.0/v.shape[0])\r\n\r\n\r\ndef avg_binary_cross_entropy(posterior, prior):\r\n \"\"\"Compute average cross-entropy of posterior binary distributions \r\n relative to prior distributions.\"\"\"\r\n \r\n return np.sum(posterior*np.log(posterior/prior) + \\\r\n (1-posterior)*np.log((1-posterior)/(1-prior))) \r\n\r\n\r\ndef softmin(v, temp):\r\n \"\"\"Compute softmin scores for elements of vector v with given temperature\r\n temp.\r\n \"\"\"\r\n \r\n return np.exp(-v/temp) / np.sum(np.exp(-v/temp))\r\n\r\n\r\ndef similarity_based_selector(subj_proba_path, clip=False, temperature=1.):\r\n \"\"\"Compute answer selection probabilities based on a database of binary\r\n subjective probability distributions for sequence features.\r\n\r\n kwargs: \r\n subj_proba_path: str, path to subjective probability distribution \r\n database. \r\n clip: float, probability vaule clip to ensure cross-entropy \r\n computations are well-behaved. Removes extreme probability values \r\n (0, 1). \r\n temperature: float, temperature parameter for softmin function. \r\n \"\"\"\r\n \r\n \r\n ### Load subjective probabilities ###\r\n index_col_names = [\"Item ID\", \"V-Type\", \"V-Dir\", \"V-Num\"]\r\n subj_probas = pd.read_csv(subj_proba_path, index_col=index_col_names)\r\n subj_probas = subj_probas.unstack(\"V-Dir\")\r\n \r\n # Clip subj_probas to ensure cross-entropy is well-behaved\r\n if clip is not False:\r\n subj_probas = subj_probas.clip(clip, 1. - clip)\r\n \r\n \r\n ### Construct priors ###\r\n \r\n # Get matrix sequence subjective probabilities\r\n mat_seq_indices = subj_probas.index.isin([\"Mat\"], level=\"V-Type\")\r\n mat_seq_subj_proba = subj_probas.loc[mat_seq_indices]\r\n \r\n # Compute priors\r\n priors = mat_seq_subj_proba.groupby(level=[\"Item ID\"])\r\n priors= priors.transform(geometric_mean)\r\n \r\n \r\n ### Construct posteriors ###\r\n \r\n # Get matrix alternative sequence subjective probabilities\r\n alt_seq_indices = subj_probas.index.isin([\"Alt\"], level=\"V-Type\")\r\n posteriors = subj_probas.loc[alt_seq_indices]\r\n \r\n \r\n ### Compute similarity scores ###\r\n \r\n # Utility function to compute average cross entropy \r\n aggr = lambda posterior: avg_binary_cross_entropy(\r\n posterior.values, \r\n priors.loc[posterior.name[0], \r\n posterior.index.values].values)\r\n \r\n # Compute average similarity scores for all matrices along all axes\r\n H_d = posteriors.apply(aggr, axis=1)\r\n \r\n # Average similarity scores over axes\r\n H = H_d.groupby(level=[\"Item ID\", \"V-Num\"]).mean()\r\n \r\n \r\n ### Compute selection probabilities ###\r\n\r\n _softmin = lambda H_m: softmin(H_m, temperature)\r\n \r\n # For each item, select alternative with smallest average magnitude of \r\n # feature-wise differences.\r\n selection_probas = H.groupby(level=[\"Item ID\"])\r\n selection_probas = selection_probas.transform(_softmin)\r\n \r\n return selection_probas\r\n\r\n\r\ndef evaluate_on_matrices(selection_probas, answer_path):\r\n \r\n # Load correct responses\r\n answers = pd.read_csv(answer_path, index_col=[\"Item ID\"])\r\n answers = answers.Answer\r\n \r\n # Get probability of selecting answers\r\n pr_ans = selection_probas.loc[list(zip(answers.index.values, \r\n answers.values))]\r\n pr_ans.index = pr_ans.index.droplevel(\"V-Num\")\r\n pr_ans.name = \"Probability\"\r\n \r\n # Create table with correct responses, estimates, and estimate evaluations. \r\n res = pd.DataFrame([answers, pr_ans]).T\r\n expected = np.mean(res.Probability.values)*answers.shape[0]\r\n \r\n return res, expected\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n SUBJ_PROBA_PATH = os.path.join(\"Data\",\"labels.csv\")\r\n CLIP = 1e-6\r\n TEMPERATURE = 1\r\n\r\n ANSWER_PATH = os.path.join(\"Data\", \"items.csv\")\r\n \r\n selection_probas = similarity_based_selector(SUBJ_PROBA_PATH, \r\n CLIP, \r\n TEMPERATURE)\r\n \r\n res, expected = evaluate_on_matrices(selection_probas, ANSWER_PATH) ","sub_path":"archive/2018/2018-09/2018-09-24-e01/py/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"280807113","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import View\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.models import User\nfrom django.utils.http import is_safe_url\n\nfrom config.settings.base import get_secret\n\n\ndef convert_tags(subject, message, user):\n \"\"\"\n Converts the following tags in the message or subject line of an e-mail to their equivalent for the given user:\n [first] = first name\n [last] = last name\n [full] = full name\n [user] = username\n [pwrd] = password\n :param subject: string containing the subject line to be sent\n :param message: string containing the message to be sent\n :param user: User object containing data for the given user\n :return: a subject string and message string with all the tags filled in\n \"\"\"\n user_info = {'[first]':user.first_name,\n '[last]': user.last_name,\n '[full]': user.get_full_name(),\n '[user]': user.username,\n '[pwd]': get_secret(user.username.upper())}\n for tag in user_info.keys():\n subject = subject.replace(tag, user_info[tag])\n message = message.replace(tag, user_info[tag])\n\n return subject, message\n\n\nclass SendMail(View):\n template_name = 'mail/send-mail.html'\n\n def get(self, request):\n users = User.objects.all()\n context = {'users': users}\n return render(request, self.template_name, context)\n\n def post(self, request):\n if request.POST['button'] == 'send':\n recipients = list(set(request.POST.getlist('recipients')))\n subject_template = request.POST['subject']\n message_template = request.POST['message']\n for recipient in recipients:\n member = User.objects.get(username=recipient)\n subject, message = convert_tags(subject_template, message_template, member)\n send_mail(subject, message,\n 'Jim@christmas.jmorris.webfactional.com',\n [member.email], fail_silently=False)\n\n return redirect('gift:home')","sub_path":"c19/mail/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"180335887","text":"import pytest\nfrom sklearn import neural_network as nn_\n\nfrom dask_ml import neural_network as nn\nfrom dask_ml.utils import assert_estimator_equal\n\n\n@pytest.mark.filterwarnings(\"ignore::FutureWarning\")\nclass TestMLPClassifier:\n @pytest.mark.filterwarnings(\"ignore::DeprecationWarning\")\n def test_basic(self, single_chunk_classification):\n X, y = single_chunk_classification\n a = nn.ParitalMLPClassifier(classes=[0, 1], random_state=0)\n b = nn_.MLPClassifier(random_state=0)\n\n a.fit(X, y)\n b.partial_fit(X, y, classes=[0, 1])\n\n assert_estimator_equal(a, b)\n\n\n@pytest.mark.filterwarnings(\"ignore::FutureWarning\")\nclass TestMLPRegressor:\n def test_basic(self, single_chunk_classification):\n X, y = single_chunk_classification\n a = nn.ParitalMLPRegressor(random_state=0)\n b = nn_.MLPRegressor(random_state=0)\n a.fit(X, y)\n b.partial_fit(X, y)\n assert_estimator_equal(a, b)\n","sub_path":"tests/linear_model/test_neural_network.py","file_name":"test_neural_network.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433244037","text":"import dataclasses\nfrom typing import Optional, Tuple\n\nfrom starkware.cairo.lang.compiler.ast.cairo_types import (\n CairoType, TypeFelt, TypePointer, TypeStruct, TypeTuple)\nfrom starkware.cairo.lang.compiler.ast.code_elements import (\n CodeElementEmptyLine, CodeElementFunction)\nfrom starkware.cairo.lang.compiler.ast.formatting_utils import get_max_line_length\nfrom starkware.cairo.lang.compiler.error_handling import Location\nfrom starkware.cairo.lang.compiler.identifier_manager import IdentifierManager\nfrom starkware.cairo.lang.compiler.identifier_utils import get_struct_definition\nfrom starkware.cairo.lang.compiler.parser import parse\nfrom starkware.cairo.lang.compiler.preprocessor.identifier_aware_visitor import (\n IdentifierAwareVisitor)\nfrom starkware.cairo.lang.compiler.preprocessor.preprocessor_error import PreprocessorError\nfrom starkware.starknet.definitions.constants import STARKNET_LANG_DIRECTIVE\nfrom starkware.starknet.public.abi import MAX_STORAGE_ITEM_SIZE, get_storage_var_address\n\nSTORAGE_VAR_DECORATOR = 'storage_var'\nSTORAGE_VAR_ATTR = 'storage_var'\n\n\ndef get_return_type(elm: CodeElementFunction) -> CairoType:\n returns_single_value = elm.returns is not None and len(elm.returns.identifiers) == 1\n if not returns_single_value:\n raise PreprocessorError(\n 'Storage variables must return exactly one value.',\n location=elm.returns.location if elm.returns is not None else elm.identifier.location)\n assert elm.returns is not None\n return elm.returns.identifiers[0].get_type()\n\n\ndef generate_storage_var_functions(\n elm: CodeElementFunction, addr_func_body: str,\n read_func_body: str, write_func_body: str, is_impl: bool) -> CodeElementFunction:\n var_name = elm.identifier.name\n autogen_filename = \\\n f'autogen/starknet/storage_var/{var_name}/{\"impl\" if is_impl else \"decl\"}.cairo'\n\n code = f\"\"\"\\\nnamespace {var_name}:\n from starkware.starknet.common.storage import (\n Storage, normalize_address, storage_read, storage_write)\n from starkware.cairo.common.cairo_builtins import HashBuiltin\n from starkware.cairo.common.hash import hash2\n\n func addr{{range_check_ptr, pedersen_ptr : HashBuiltin*}}() -> (res : felt):\n {addr_func_body}\n end\n\n func read{{storage_ptr : Storage*, range_check_ptr, pedersen_ptr : HashBuiltin*}}():\n {read_func_body}\n end\n\n func write{{storage_ptr : Storage*, range_check_ptr, pedersen_ptr : HashBuiltin*}}(\n value : felt):\n {write_func_body}\n end\nend\\\n\"\"\"\n\n res = parse(autogen_filename, code, 'code_element', CodeElementFunction)\n\n variable_type = get_return_type(elm=elm)\n\n # Copy the arguments and return values.\n assert isinstance(res, CodeElementFunction) and res.element_type == 'namespace'\n addr_func = res.code_block.code_elements[4].code_elm\n assert isinstance(addr_func, CodeElementFunction) and addr_func.element_type == 'func' and \\\n addr_func.identifier.name == 'addr'\n addr_func.arguments = elm.arguments\n\n read_func = res.code_block.code_elements[6].code_elm\n assert isinstance(read_func, CodeElementFunction) and read_func.element_type == 'func' and \\\n read_func.identifier.name == 'read'\n read_func.arguments = elm.arguments\n read_func.returns = elm.returns\n\n write_func = res.code_block.code_elements[8].code_elm\n assert isinstance(write_func, CodeElementFunction) and write_func.element_type == 'func' and \\\n write_func.identifier.name == 'write'\n # Append the value argument to the storage var arguments.\n write_func.arguments = dataclasses.replace(\n elm.arguments,\n identifiers=elm.arguments.identifiers + [\n dataclasses.replace(\n write_func.arguments.identifiers[0],\n expr_type=variable_type)])\n\n # Format and re-parse to get locations to a well-formatted code.\n res = parse(\n autogen_filename, res.format(get_max_line_length()), 'code_element', CodeElementFunction)\n\n res.additional_attributes[STORAGE_VAR_ATTR] = elm\n\n return res\n\n\ndef process_storage_var(visitor: IdentifierAwareVisitor, elm: CodeElementFunction):\n for commented_code_elm in elm.code_block.code_elements:\n code_elm = commented_code_elm.code_elm\n if not isinstance(code_elm, CodeElementEmptyLine):\n if hasattr(code_elm, 'location'):\n location = code_elm.location # type: ignore\n else:\n location = elm.identifier.location\n raise PreprocessorError(\n 'Storage variables must have an empty body.',\n location=location)\n\n if elm.implicit_arguments is not None:\n raise PreprocessorError(\n 'Storage variables must have no implicit arguments.',\n location=elm.implicit_arguments.location)\n\n for decorator in elm.decorators:\n if decorator.name != STORAGE_VAR_DECORATOR:\n raise PreprocessorError(\n 'Storage variables must have no decorators in addition to '\n f'@{STORAGE_VAR_DECORATOR}.',\n location=decorator.location)\n\n for arg in elm.arguments.identifiers:\n arg_type = arg.get_type()\n if not isinstance(arg_type, TypeFelt):\n raise PreprocessorError(\n 'Only felt arguments are supported in storage variables.',\n location=arg_type.location)\n\n unresolved_return_type = get_return_type(elm=elm)\n return_type = visitor.resolve_type(unresolved_return_type)\n if not check_felts_only_type(cairo_type=return_type, identifier_manager=visitor.identifiers):\n raise PreprocessorError(\n 'The return type of storage variables must consist of felts.',\n location=elm.returns.location if elm.returns is not None else elm.identifier.location)\n var_size = visitor.get_size(return_type)\n\n if var_size > MAX_STORAGE_ITEM_SIZE:\n raise PreprocessorError(\n f'The storage variable size ({var_size}) exceeds the maximum value '\n f'({MAX_STORAGE_ITEM_SIZE}).',\n location=elm.returns.location if elm.returns is not None else elm.identifier.location)\n\n var_name = elm.identifier.name\n addr = storage_var_name_to_base_addr(var_name)\n addr_func_body = f'let res = {addr}\\n'\n for arg in elm.arguments.identifiers:\n addr_func_body += \\\n f'let (res) = hash2{{hash_ptr=pedersen_ptr}}(res, {arg.identifier.name})\\n'\n if len(elm.arguments.identifiers) > 0:\n addr_func_body += 'let (res) = normalize_address(addr=res)\\n'\n addr_func_body += 'return (res=res)\\n'\n\n args = ', '.join(arg.identifier.name for arg in elm.arguments.identifiers)\n\n read_func_body = f'let (storage_addr) = addr({args})\\n'\n for i in range(var_size):\n read_func_body += \\\n f'let (__storage_var_temp{i}) = storage_read(address=storage_addr + {i})\\n'\n # Copy the return implicit args and the return values to a contiguous segment.\n read_func_body += \"\"\"\ntempvar storage_ptr = storage_ptr\ntempvar range_check_ptr = range_check_ptr\ntempvar pedersen_ptr = pedersen_ptr\n\"\"\"\n for i in range(var_size):\n read_func_body += f'tempvar __storage_var_temp{i} : felt = __storage_var_temp{i}\\n'\n unresolved_return_type_ptr = TypePointer(pointee=unresolved_return_type)\n read_func_body += \\\n f'return ([cast(&__storage_var_temp0, {unresolved_return_type_ptr.format()})])'\n\n write_func_body = f'let (storage_addr) = addr({args})\\n'\n for i in range(var_size):\n write_func_body += \\\n f'storage_write(address=storage_addr + {i}, value=[cast(&value, felt) + {i}])\\n'\n write_func_body += 'return ()\\n'\n return generate_storage_var_functions(\n elm, addr_func_body=addr_func_body, read_func_body=read_func_body,\n write_func_body=write_func_body, is_impl=True)\n\n\ndef storage_var_name_to_base_addr(var_name: str) -> int:\n \"\"\"\n Returns the base address of a StarkNet Storage variable, ignoring the storage var arguments.\n \"\"\"\n\n return get_storage_var_address(var_name=var_name)\n\n\ndef is_storage_var(elm: CodeElementFunction) -> Tuple[bool, Optional[Location]]:\n \"\"\"\n Returns whether the given function has the storage var decorator. If it does, the location of\n the decorator is returned.\n \"\"\"\n for decorator in elm.decorators:\n if decorator.name == STORAGE_VAR_DECORATOR:\n return True, decorator.location\n return False, None\n\n\ndef check_felts_only_type(cairo_type: CairoType, identifier_manager: IdentifierManager) -> bool:\n \"\"\"\n A felts-only type defined to be either felt or a struct whose members are all felts-only types.\n Return True if the given type is felts-only.\n \"\"\"\n\n if isinstance(cairo_type, TypeFelt):\n return True\n elif isinstance(cairo_type, TypeStruct):\n struct_definition = get_struct_definition(\n cairo_type.resolved_scope, identifier_manager=identifier_manager)\n for member_def in struct_definition.members.values():\n res = check_felts_only_type(\n member_def.cairo_type, identifier_manager=identifier_manager)\n if not res:\n return False\n return True\n elif isinstance(cairo_type, TypeTuple):\n for item_type in cairo_type.members:\n res = check_felts_only_type(item_type, identifier_manager=identifier_manager)\n if not res:\n return False\n return True\n else:\n return False\n\n\nclass StorageVarDeclVisitor(IdentifierAwareVisitor):\n \"\"\"\n Replaces @storage_var decorated functions with a namespace with empty functions.\n After the struct collection phase is completed, those functions will be replaced by\n functions will full implementation.\n \"\"\"\n\n def _visit_default(self, obj):\n return obj\n\n def visit_CodeElementFunction(self, elm: CodeElementFunction):\n storage_var, storage_var_location = is_storage_var(elm)\n if storage_var:\n if self.file_lang != STARKNET_LANG_DIRECTIVE:\n raise PreprocessorError(\n '@storage_var can only be used in source files that contain the '\n '\"%lang starknet\" directive.',\n location=storage_var_location)\n # Add dummy references and calls that will be visited by the identifier collector\n # and the dependency graph.\n # Those statements will later be replaced by the real implementation.\n addr_func_body = \"\"\"\nlet res = 0\ncall hash2\ncall normalize_address\n\"\"\"\n read_func_body = \"\"\"\nlet storage_addr = 0\ncall addr\ncall storage_read\n\"\"\"\n write_func_body = \"\"\"\nlet storage_addr = 0\ncall addr\ncall storage_write\n\"\"\"\n return generate_storage_var_functions(\n elm, addr_func_body=addr_func_body, read_func_body=read_func_body,\n write_func_body=write_func_body, is_impl=False)\n\n return elm\n\n\nclass StorageVarImplentationVisitor(IdentifierAwareVisitor):\n \"\"\"\n Replaces @storage_var decorated functions (obtained from the additional attribute\n STORAGE_VAR_ATTR added by StorageVarDeclVisitor) with a namespace with read() and write()\n functions.\n \"\"\"\n\n def _visit_default(self, obj):\n return obj\n\n def visit_CodeElementFunction(self, elm: CodeElementFunction):\n attr = elm.additional_attributes.get(STORAGE_VAR_ATTR)\n if attr is None:\n return elm\n\n assert isinstance(attr, CodeElementFunction)\n return process_storage_var(self, attr)\n","sub_path":"vendor/cairo-lang/src/starkware/starknet/compiler/storage_var.py","file_name":"storage_var.py","file_ext":"py","file_size_in_byte":11613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"459276151","text":"import re\nMAXLINEA=1000 #Tamaño maximo de una linea ARCHIVO\nMAXPAL=37 #Numero de palabras reservadas\nMAXDIGIT=5 #Maximo de digitos en numeros enteros ARCHIVO\nMAXDECIMAL=5 #Maximo de digitos despues del punto\nMAXID=10 #Maxima longitud de identificadores ARCHIVO\nLONG_FECHA=30 #Maxima longitud de fecha\nMAXIT=100 #Maximo tamaño de tabla de los simbolos\n\ntry:\n with open(\"../params.txt\") as fp:\n line=fp.readline()\n lines=re.findall(r'\\d+',line)\n numero=int(lines[0])\n MAXLINEA=numero\n line=fp.readline()\n lines=re.findall(r'\\d+',line)\n numero=int(lines[0])\n MAXDIGI=numero\n line=fp.readline()\n lines=re.findall(r'\\d+',line)\n numero=int(lines[0])\n MAXID=numero\nexcept Exception as ex:\n print(\"Archivo no encontrado \"+ str(ex))\n","sub_path":"ide/objeto/teoserver/objeto/parametros.py","file_name":"parametros.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123439585","text":"import struct\n\nif __package__:\n from . import codes\n\nelse:\n import codes\n\n# This is first process for negotiation\n\"\"\"\nNaming Rules:\n 1. Method\n cli_encode() means:\n 1. cli: The method is for client use\n 2. encode: The method is for encoding data to bytes\n \n 2. Local varibales\n data_raw means bytes\n data means decoded data\n\"\"\"\n\n# This is for accelerating struct pack speed\ntwo_B_struct = struct.Struct(\">BB\")\n\ndef srv_decode(data_raw):\n ver, nums_method = two_B_struct.unpack(data_raw[:2])\n methods = struct.unpack_from(\">\" + str(nums_method)+\"B\", data_raw, 2)\n\n return (ver, methods)\n\ndef srv_encode(method, version=codes.VERSION[\"SOCKS5\"]):\n return two_B_struct.pack(version, method)\n\ndef cli_decode(data_raw):\n return two_B_struct.unpack(data_raw)\n\ndef cli_encode(methods, version=codes.VERSION[\"SOCKS5\"]):\n if type(methods) is int:\n methods = (methods, )\n return two_B_struct.pack(version, len(methods)) + struct.pack(\">\"+str(len(methods))+\"B\", *methods)\n\nif __name__ == \"__main__\":\n print(srv_decode(b\"\\x01\\x02\\x03\\x04\"))\n print(srv_encode(codes.METHOD[\"NONEED\"]))\n print(cli_encode((3, 4, 6)))\n","sub_path":"Assignment4/proxy/src/SOCKS/resolver/nego.py","file_name":"nego.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399677258","text":"# Enriched Timeline Merger\n# REMEMBER TO RUN WITH PYTHON3\n\n# csv module to read and write csv files\n# glob module finds all pathnames matching specified pattern\n# using os to find file size for mergin algo\nimport csv, glob, os, codecs\n\n########## Functions ##############\n\n# function takes csv file read object and return rows in that file\ndef createDataRows(csvreader):\n rows = []\n\n # iterate each row and add to rows array\n for row in csvreader:\n rows.append(row)\n\n return rows\n\n# Where the magic happens ;)\n# take in first file and second file being read\ndef appendNewData(original, newData):\n\n # iterate over the original file and use enumerate function to access index\n # all files should have same indexes based on universal headers\n for index, value in enumerate(original):\n # skip EventID and Title data cells\n if index == 0 or index == 1:\n continue\n # remainder of content is compared\n if original[index] != newData[index]:\n # as long as string does not already exits in that cell it will be appended\n if original[index].find(newData[index]) == -1:\n original[index] = original[index] + ' | ' + newData[index]\n\n return original\n\n# read original and file file to be merged\ndef readAndModify(file1, file2):\n # initialize modified data\n modifiedData = []\n headers = []\n\n # open file in read mode and assign it a variable for file object\n # third argument encoding system for Unicode (to not get hexidecimal values while parsing)\n with codecs.open(file1, mode='r', encoding='utf-8', errors='strict') as reader1, open(file2, mode='r', encoding='utf-8', errors='strict') as reader2:\n # create csv reader object from file object\n csvreader1 = csv.reader(reader1)\n csvreader2 = csv.reader(reader2)\n\n # checks if first master is being used (blank file)\n newMaster = os.stat(file1).st_size == 0\n\n # extract headers from master or merge file\n if newMaster == True:\n headers = next(csvreader2)\n else:\n headers = next(csvreader1)\n\n # extracting rest of data from each file\n rows1 = createDataRows(csvreader1)\n rows2 = createDataRows(csvreader2)\n\n # iterate over both data sets\n # if rows are both at index 1 (Title Header)\n # use appendNewData function to compare that row in both files\n ogTitles = []\n for x in rows1:\n ogTitles.append(x[1])\n append = False\n for y in rows2:\n if x[1] == y[1]:\n append = True\n\n if append == True:\n appendNewData(x,y)\n append = False\n\n # if a new blank master is being used there is no data\n # second set of rows will populate master (merger file)\n if newMaster == True:\n modifiedData = rows2\n else:\n modifiedData = rows1\n\n # no need to check for new rows on blank master\n # all rows are copied on first pass\n if newMaster == False:\n # check merge file for new rows not in master (skip headers)\n for y in rows2[1:]:\n found = y[1] not in ogTitles\n if found == True:\n modifiedData.append(y)\n\n # close opened files after reading\n reader1.close()\n reader2.close()\n\n return [headers, modifiedData]\n\n# open master, write headers (results[0]), write data (results[1])\ndef writeNewFile(results):\n with codecs.open('master.csv', mode='w', encoding='utf-8', errors='strict') as master:\n master_writer = csv.writer(master)\n\n master_writer.writerow(results[0])\n\n for x in results[1]:\n master_writer.writerow(x)\n\n master.close()\n\n########## Main ##############\nif __name__ == \"__main__\":\n # create new master file\n with codecs.open('master.csv', mode='w', encoding='utf-8', errors='strict') as master:\n pass\n master.close()\n\n # list of files with ext .csv in current working directory\n files = glob.glob(\"*.csv\")\n\n print('Merging the following Files:\\n')\n # iterate over the list of files and merge into master file\n for file in files:\n if file == 'master.csv':\n if os.stat(file).st_size == 0:\n continue\n print(file)\n result = readAndModify('master.csv', file)\n writeNewFile(result)\n\n print('\\nmaster.csv has been created/updated')\n print('\\nMerging complete')","sub_path":"etm.py","file_name":"etm.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"548647766","text":"import numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import ndimage\r\n\r\n# install numpy with: pip install numpy\r\n# install cv2 with: pip install opencv-python\r\n# install scipy with: pip install scipy\r\n\r\n# 1) read image\r\nimg = cv2.imread(\"hyades.jpg\")\r\n\r\n# 2) convert to grayscale. Colour are not needed\r\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n# 3) Binarize image with a threshold, all pixels with a value over threshold\r\n# will be set to max_val, all others to zero\r\n# results in binarized image of ones and zeros\r\n# The lower the threshold, the more stars\r\nmax_val = 1\r\nthreshold = 220\r\n_, img_bin = cv2.threshold(img_gray, threshold, max_val, cv2.THRESH_BINARY)\r\n\r\n# 4) The actual smart function.\r\n# Assigns each connected area of pixels with non-zero values a label\r\n# ranging from 0 (background) to number of stars (1 - number of stars)\r\n# https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.label.html\r\nlabeled, nr_objects = ndimage.label(img_bin)\r\n# Print number of stars\r\nprint(\"Number of stars is\", nr_objects)\r\n\r\n# 5) Find geometric center of each label (star with a certain area)\r\n# this corresponds to the middle of the star\r\n\r\n# The coordinates of the stars will be gathered here\r\nstars = []\r\n# iterate through labels\r\nfor label in range(1, nr_objects):\r\n indices = np.argwhere(labeled == label)\r\n y_indices = indices[:, 0]\r\n x_indices = indices[:, 1]\r\n\r\n # geometric center\r\n iy_mean = int(np.mean(y_indices))\r\n ix_mean = int(np.mean(x_indices))\r\n\r\n stars.append([ix_mean, iy_mean])\r\n\r\n# Result: Coordinates of stars in a 2D array\r\nstars = np.array(stars)\r\n\r\n# VISUALIZATION\r\n# Shows the raw image\r\nplt.figure(1)\r\nplt.subplot(2, 2, 1)\r\nplt.imshow(img)\r\nplt.title(\"Raw image\")\r\n\r\n# Shows the gray scaled image\r\nplt.subplot(2, 2, 2)\r\nplt.imshow(img_gray, cmap=\"gray\")\r\nplt.title(\"Gray-scaled image\")\r\n\r\n# Shows the binarized image\r\nplt.subplot(2, 2, 3)\r\n# Attention: not all stars are shown due to visualization problems of python\r\n# Therefore, the stars in the binarized image are enlarged to make them visible in the plot\r\nkernel = np.ones((2, 2), np.uint8)\r\nimg_bin_vis = cv2.dilate(img_bin, kernel)\r\nplt.imshow(img_bin_vis, cmap=\"gray\")\r\nplt.title(\"Binarised image\")\r\n\r\n# Shows the labels of the stars decoded as a colour\r\nplt.subplot(2, 2, 4)\r\nplt.imshow(labeled, cmap=\"gist_rainbow\")\r\nplt.title(\"Each star is decoded with a colour/label from 1 to number of stars.\")\r\n\r\n# Show the coordinates of the stars marked with a blue circle\r\n# More stars can be found if the threshold is lowered.\r\nplt.figure(2)\r\nplt.subplot(1, 2, 1)\r\nplt.imshow(img, cmap=\"gray\")\r\nplt.title(\"Raw image\")\r\nplt.subplot(1, 2, 2)\r\nplt.scatter(stars[:, 0], stars[:, 1], s=10, facecolors='none', edgecolors='blue')\r\nplt.imshow(img, cmap=\"gray\")\r\nplt.title(\"marked coordinates of stars\")\r\n\r\n# Triggers the visualization\r\nplt.show()\r\n","sub_path":"star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"451930352","text":"\n\n\"\"\"\nPlot clusters (properly).\n\"\"\"\n\nimport os\nfrom astropy.table import Table, join\nimport matplotlib.pyplot as plt\n\ntry:\n unrave, dr4\n\nexcept NameError:\n from rave_io import get_cannon_dr1, get_rave_kordopatis_dr4\n\n clusters = Table.read(\"../../RAVEDR4_OC.fits\")\n\n unrave = join(get_cannon_dr1(), clusters, keys=(\"Name\",))\n dr4 = join(get_rave_kordopatis_dr4(), clusters, keys=(\"Name\",))\n\nelse:\n print(\"Warning: Using pre-loaded data\")\n\n\n\n# QC and labels for DR4\ndr4_ok = np.ones(len(dr4), dtype=bool)\ndr4_teff, dr4_logg, dr4_feh, dr4_e_teff, dr4_e_logg, dr4_e_feh \\\n = (\"TeffK_1\", \"loggK_1\", \"c_M_H_K_1\", \"e_TeffK_1\", \"e_loggK_1\", \"e__M_H_K_1\")\n\n\nunrave_ok = unrave[\"QC\"]\nunrave_teff, unrave_logg, unrave_feh, unrave_e_teff, unrave_e_logg, unrave_e_feh \\\n = (\"TEFF\", \"LOGG\", \"FE_H\", \"E_TEFF\", \"E_LOGG\", \"E_FE_H\")\n\niso_teff, iso_logg = (\"Teff\", \"logG\")\n\n\n\n\n\n\nkwds = dict(cmap=\"plasma\", vmin=-0.5, vmax=0.5, s=80)\n\ncluster_names = sorted(set(clusters[\"Cluster\"]))\nK = len(cluster_names)\n\n\nfactor = 3.5\nlbdim = 0.2 * factor\nrdim = 0.1 * factor\ntdim = -0.20 * factor\nwhspace = 0.05\nxspace = factor * 2 + whspace\nyspace = factor * K + factor * (K - 1) * whspace + lbdim * (K - 1)\nxdim = lbdim + xspace + rdim\nydim = lbdim + yspace + tdim\n\nfig, axes = plt.subplots(K, 2, figsize=(xdim, ydim))\nfig.subplots_adjust(\n left=lbdim/xdim, bottom=lbdim/ydim, right=(xspace + lbdim)/xdim,\n top=(yspace + lbdim)/ydim, wspace=whspace, hspace=whspace)\n\n\nfor i, (row, cluster_name) in enumerate(zip(axes, cluster_names)):\n\n isochrone, isochrone_path = (None, \"{}_padova_iso.dat\".format(cluster_name))\n if os.path.exists(isochrone_path):\n isochrone = Table.read(isochrone_path, format=\"ascii\")\n isochrone[iso_teff] = 10**isochrone[\"logTe\"]\n\n match = (dr4[\"Cluster\"] == cluster_name) * dr4_ok\n assert sum(match) > 0\n\n # Show LHS RAVE DR4\n ax = row[0]\n ax.text(0.15, 0.82, r\"${{\\rm {0}}}$\".format(cluster_name.strip().replace(\"NGC\", \"NGC\\,\")),\n horizontalalignment=\"left\", transform=ax.transAxes, fontsize=16)\n Nstars = np.isfinite(dr4[dr4_teff][match] * dr4[dr4_logg][match]).sum()\n ax.text(0.15, 0.74, r\"${0}$ ${{\\rm stars}}$\".format(Nstars),\n horizontalalignment=\"left\", transform=ax.transAxes, fontsize=16)\n \n ax.errorbar(dr4[dr4_teff][match], dr4[dr4_logg][match],\n xerr=dr4[dr4_e_teff][match], yerr=dr4[dr4_e_logg][match], \n fmt=None, ecolor=\"#666666\", zorder=-1)\n ax.scatter(dr4[dr4_teff][match], dr4[dr4_logg][match], \n c=dr4[dr4_feh][match], **kwds)\n\n if isochrone is not None:\n ax.plot(isochrone[iso_teff], isochrone[iso_logg], c=\"#666666\", lw=2,\n zorder=-1)\n\n match = (unrave[\"Cluster\"] == cluster_name) * unrave_ok\n assert sum(match) > 0\n\n # Show RHS unRAVE\n ax = row[1]\n ax.text(0.15, 0.82, r\"${{\\rm {0}}}$\".format(cluster_name.strip().replace(\"NGC\", \"NGC\\,\")),\n horizontalalignment=\"left\", transform=ax.transAxes, fontsize=16)\n Nstars = np.isfinite(unrave[unrave_teff][match] * unrave[unrave_logg][match]).sum()\n ax.text(0.15, 0.74, r\"${0}$ ${{\\rm stars}}$\".format(Nstars),\n horizontalalignment=\"left\", transform=ax.transAxes, fontsize=16)\n\n ax.errorbar(unrave[unrave_teff][match], unrave[unrave_logg][match],\n xerr=unrave[unrave_e_teff][match], yerr=unrave[unrave_e_logg][match], \n fmt=None, ecolor=\"#666666\", zorder=-1)\n scat = ax.scatter(unrave[unrave_teff][match], unrave[unrave_logg][match], \n c=unrave[unrave_feh][match], **kwds)\n\n if isochrone is not None:\n ax.plot(isochrone[iso_teff], isochrone[iso_logg], c=\"k\", lw=1.5,\n zorder=-1)\n\n\nfor ax in axes.flatten():\n ax.set_xlim(7500, 3500)\n ax.set_ylim(5.5, -0.5)\n\n ax.set_xticks([7000, 6000, 5000, 4000])\n ax.set_yticks([5, 4, 3, 2, 1, 0])\n\n if not ax.is_last_row():\n ax.set_xticklabels([])\n\n else:\n if ax.is_first_col():\n ax.set_xlabel(r\"$T_{\\rm eff}$ $[{\\rm K}]$\")\n\n else:\n ax.set_xlabel(r\"$T_{\\rm eff}$ $[{\\rm K}]$\")\n\n\n if ax.is_first_col():\n ax.set_ylabel(r\"$\\log{g}$\")\n\n else:\n ax.set_yticklabels([])\n\n if ax.is_first_row():\n if ax.is_first_col():\n ax.set_title(r\"${\\rm RAVE}$ ${\\rm DR4}$\")\n else:\n ax.set_title(r\"${\\rm \\it{RAVE}}{\\rm -on}$\")\n\nfig.subplots_adjust(top=0.9)\ncbar = plt.colorbar(scat, \n cax=fig.add_axes([fig.subplotpars.left, 0.93, fig.subplotpars.right - fig.subplotpars.left, 0.02]),\n orientation='horizontal', ticks=[-2.5, -2, -1.5, -1, -0.5, 0, 0.5])\ncbar.ax.xaxis.set_ticks_position('top')\ncbar.ax.xaxis.set_label_position('top')\n\ncbar.set_label(r\"$[{\\rm Fe/H}]$\")\n\n\n\nfig.savefig(\"open-clusters.pdf\", dpi=300)\nfig.savefig(\"open-clusters.png\")\n\n\n","sub_path":"article/figures/plot_open_clusters.py","file_name":"plot_open_clusters.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"653957147","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport bootstrapped.bootstrap as bs\nimport bootstrapped.stats_functions as bs_stats\n\nfrom .regularization import ModelTunerCV\nfrom sklearn.model_selection import train_test_split, cross_val_score, cross_validate, StratifiedKFold, KFold, GridSearchCV\nfrom tqdm import tqdm\n\n\nclass CVFoldResult:\n def __init__(self, model_name, fold, opt_factor, score):\n self.model_name = model_name\n self.fold = fold\n self.opt_factor = opt_factor\n self.score = score\n\n def get_score(self):\n return (self.model_name, self.fold, self.score)\n\n\nclass ModelEvaluationExperiment:\n def __init__(self, datasets, model_type, n_splits=10, tuning_size=0.3, n_jobs=2):\n self.n_splits = n_splits\n self.tuning_size = tuning_size\n self.datasets = datasets\n self.model_type = model_type\n self.n_jobs = n_jobs\n\n def start(self, models):\n self.dataset_results = {}\n for name, data in tqdm(self.datasets):\n cv_results = self.k_fold_cross_validation(\n data, models, self.n_splits, self.tuning_size)\n self.dataset_results[name] = cv_results\n return self\n\n def k_fold_cross_validation(self, data, models, n_splits, tuning_size):\n X, y = data\n results = []\n kfold = StratifiedKFold(\n n_splits=n_splits) if self.model_type == 'classification' else KFold(n_splits=n_splits)\n self.cv_scores = []\n fold = 1\n for train_index, test_index in kfold.split(X, y):\n # create the T and k dataset\n X_t, X_k = X[train_index, :], X[test_index, :]\n y_t, y_k = y[train_index], y[test_index]\n for model_name, model, reg_factors in tqdm(models):\n opt_factor = np.nan\n if(reg_factors is not None):\n opt_factor = self.tune_model(X_t, y_t, model, reg_factors)\n model.set_params(reg_factor=opt_factor)\n # Train the tuned model on T\n model.fit(X_t, y_t)\n # Evalute the score on k and store the results\n results.append(CVFoldResult(model_name, fold,\n opt_factor, model.score(X_k, y_k)))\n fold += 1\n\n return results\n\n def tune_model(self, X_t, y_t, model, regularization_factors):\n cv = ModelTunerCV((X_t, y_t), model, regularization_factors, self.n_jobs).tune()\n opt_factor = cv.opt_param\n return opt_factor\n\n def plot_final_scores(self, path='/home/victor/git/rna-ppgee/artigo2/report/figures'):\n table = pd.DataFrame()\n \n for name in self.dataset_results.keys():\n results = self.dataset_results[name]\n score_data = []\n [score_data.append(row.get_score()) for row in results]\n\n score_data = pd.DataFrame(\n score_data, columns=['model_name', 'fold', 'score'])\n\n fig, ax = plt.subplots(figsize=(8, 6))\n sns.boxplot(data=score_data, x='model_name', y='score', ax=ax)\n ax.set_title(name)\n fig.savefig(f'{path}/{name}_scores')\n\n score_data['dataset'] = name\n table = pd.concat([score_data, table], ignore_index=True)\n\n table.to_csv(f'{path}/{self.model_type}_experiment_chart_data.csv',sep=\",\")\n\n def save_final_scores_table(self, path='/home/victor/git/rna-ppgee/artigo2/report/tables'):\n \n table = pd.DataFrame()\n\n for name in self.dataset_results.keys():\n results = self.dataset_results[name]\n score_data = []\n [score_data.append(row.get_score()) for row in results]\n\n score_data = pd.DataFrame(\n score_data, columns=['model_name', 'fold', 'score'])\n\n # calculate statistics\n def get_ci(values):\n ci_bs = bs.bootstrap(\n values.to_numpy(), stat_func=bs_stats.mean, num_iterations=1000, return_distribution=False)\n return f'{ci_bs.lower_bound},{ci_bs.upper_bound},{ci_bs.value}'\n\n agg_data = score_data.groupby('model_name')['score'].agg(\n [np.mean, np.std, get_ci])\n\n agg_data[['lower_ci', 'upper_ci', 'average']] = agg_data.apply(lambda x: x['get_ci'].split(','), axis=1, result_type=\"expand\")\n agg_data.drop(columns='get_ci', inplace=True)\n agg_data['dataset'] = name\n agg_data.reset_index(inplace=True)\n\n table = pd.concat([agg_data, table], ignore_index=True)\n\n table.to_csv(f'{path}/{self.model_type}_experiment_results.csv',sep=\",\")\n return table\n\n\ndef plot_scores(filename, ylabel, xlabel, save_path=None):\n data = pd.read_csv(filename, sep=',')\n\n for ds_name, ds_data in data.groupby('dataset'):\n fig, ax = plt.subplots(figsize=(8, 6))\n q1 = ds_data['score'].quantile(0.25) \n q3 = ds_data['score'].quantile(0.75)\n iqr = q3 - q1\n\n filter = (ds_data['score'] >= q1 - 1.5*iqr) & (ds_data['score'] <= q3 + 1.5*iqr)\n ds_data_filt = ds_data[filter]\n\n sns.boxplot(data=ds_data_filt, x='model_name', y='score', ax=ax, showfliers=False)\n sns.pointplot(data=ds_data_filt, x='model_name', y='score', ax=ax, join=False,capsize=.2,color=\"k\")\n ax.set_title(ds_name)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n if(save_path is not None):\n fig.savefig(f'{save_path}/{ds_name}_scores')\n\n\ndef plot_ci(filename, ylabel, xlabel, save_path=None):\n data = pd.read_csv(filename, sep=',')\n\n for ds_name, ds_data in data.groupby('dataset'):\n fig, ax = plt.subplots(figsize=(8, 6))\n\n q1 = ds_data['score'].quantile(0.25) \n q3 = ds_data['score'].quantile(0.75)\n iqr = q3 - q1\n\n filter = (ds_data['score'] >= q1 - 1.5*iqr) & (ds_data['score'] <= q3 + 1.5*iqr)\n ds_data_filt = ds_data[filter]\n\n sns.pointplot(data=ds_data_filt, x='model_name', y='score', ax=ax, join=False,capsize=.2,color=\"k\")\n sns.boxplot(data=ds_data, x='model_name', y='score', ax=ax, showfliers=False)\n ax.set_title(ds_name)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n # if(save_path is not None):\n # fig.savefig(f'{save_path}/{ds_name}_scores')\n\ndef plot_ci_all(filename, ylabel, xlabel, save_path=None):\n data = pd.read_csv(filename, sep=',')\n\n fig, ax = plt.subplots(figsize=(8, 6))\n sns.pointplot(data=data, x='model_name', y='score', hue='dataset', ax=ax, join=False,capsize=.2)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n\ndef display_stats(filename, metric, save_path=None):\n # calculate statistics\n def get_ci(values):\n q1 = values.quantile(0.25) \n q3 = values.quantile(0.75)\n iqr = q3 - q1\n\n filter = (values >= q1 - 1.5*iqr) & (values <= q3 + 1.5*iqr)\n values_filt = values[filter]\n ci_bs = bs.bootstrap(\n values_filt.to_numpy(), stat_func=bs_stats.mean, num_iterations=1000, return_distribution=False)\n return f'{ci_bs.value:.3f} ({ci_bs.lower_bound:.3f},{ci_bs.upper_bound:.3f})'\n\n data = pd.read_csv(filename, sep=',')\n agg_data = data.groupby(['dataset','model_name'])['score'].agg(get_ci)\n agg_data = agg_data.reset_index().pivot('dataset', 'model_name')\n\n if(save_path is not None):\n agg_data.to_csv(f'{save_path}/{metric}_scores.csv')\n \n return agg_data","sub_path":"artigo2/experiments/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":7569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"362181357","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport sys\nimport time\nfrom datetime import datetime, timezone\n\nimport pandas as pd\nfrom notebooks.run import execute_notebook, get_output_notebook, get_output_prefix, upload_notebook\nfrom notebooks.utils import default_bucket, ensure_session\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(os.path.basename(__file__))\n parser.set_defaults(func=lambda x: parser.print_usage())\n parser.add_argument(\"--csv\", help=\"CSV file with Processing job names\", type=str, required=True)\n\n parsed = parser.parse_args(args)\n\n return parsed\n\n\ndef save_csv_to_s3(df, csv_name):\n session = ensure_session()\n\n df.to_csv(csv_name, index=False)\n\n s3 = session.client(\"s3\")\n bucket = default_bucket(session)\n prefix = \"full_repo_scan\"\n\n s3_path = os.path.join(prefix, csv_name)\n s3.upload_file(csv_name, bucket, s3_path)\n\n return f\"s3://{bucket}/{prefix}/{csv_name}\"\n\n\ndef main():\n args = parse_args(sys.argv[1:])\n\n session = ensure_session()\n\n csv_filename = args.csv\n df = pd.read_csv(csv_filename, index_col=False)\n\n output_notebooks = []\n runtimes = []\n statuses = []\n errors = []\n dates = []\n\n sagemaker = session.client(\"sagemaker\")\n for index, row in df.iterrows():\n job_name = row[\"processing-job-name\"]\n if job_name == \"None\":\n uri = \"None\"\n runtime = 0\n status = \"Skipped\"\n error = \"UsesDocker\"\n date = datetime.now(timezone.utc).strftime(\"%Y-%m-%d\")\n else:\n response = sagemaker.describe_processing_job(ProcessingJobName=job_name)\n notebook, uri = get_output_notebook(job_name, session)\n runtime = (\n response.get(\"ProcessingEndTime\", datetime.now(timezone.utc))\n - response.get(\"ProcessingStartTime\", datetime.now(timezone.utc))\n ).total_seconds()\n status = response.get(\"ProcessingJobStatus\")\n date = response.get(\"ProcessingEndTime\", datetime.now(timezone.utc)).strftime(\n \"%Y-%m-%d\"\n )\n\n error = response.get(\"ExitMessage\")\n if error == \"Kernel died\":\n error = \"KernelDied\"\n elif error:\n lines = error.splitlines()\n error_message = lines[-1]\n error_type, error_details = error_message.split(\":\", 1)\n error = error_type or \"Uncategorized\"\n\n output_notebooks.append(uri)\n runtimes.append(runtime)\n statuses.append(status)\n errors.append(error)\n dates.append(date)\n\n print(job_name)\n time.sleep(1)\n\n df[\"output\"] = output_notebooks\n df[\"runtime\"] = runtimes\n df[\"status\"] = statuses\n df[\"error\"] = errors\n\n df.insert(loc=0, column=\"date\", value=dates)\n\n print(\"\\n\" * 2)\n print(\"-\" * 100)\n print(\"\\n\" * 2)\n print(save_csv_to_s3(df, csv_filename))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cdk-project/lib/images/codebuild-image/python/src/notebooks/cli/describe_notebook_jobs.py","file_name":"describe_notebook_jobs.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"388088068","text":"import xmppUtils\n\ncommandText = 'voice'\nhelpText = 'Devoice the specified user.'\n\ndef process(sender, type, args, client):\n\tif len(args) > 0:\n\t\troom = sender.getStripped()\n\t\tsenderNick = sender.getResource()\n\t\txmppUtils.setRole(room, args, 'participant', 'Requested by ' + senderNick)\n","sub_path":"commands/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624055155","text":"__author__ = 'vincent'\r\nclass Solution(object):\r\n def rotate(self, nums, k):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type k: int\r\n :rtype: void Do not return anything, modify nums in-place instead.\r\n\r\n It's simple, just reverse the first and second parts individually,\r\n then, reverse the whole array again\r\n remember k % n if k > n\r\n be careful that rotation index is n - k, not k\r\n \"\"\"\r\n n = len(nums)\r\n k %= n\r\n self.reverse(0, n-k-1, nums)\r\n self.reverse(n-k, n-1, nums)\r\n self.reverse(0, n-1, nums)\r\n\r\n def reverse(self, low, high, nums):\r\n while(low < high):\r\n nums[low], nums[high] = nums[high], nums[low]\r\n low += 1\r\n high -= 1\r\n\r\n\r\n","sub_path":"189_Rotate Array.py","file_name":"189_Rotate Array.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"169449635","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\npd.set_option('display.max_columns', 1000)\n\npd.set_option('display.width', 1000)\n\npd.set_option('display.max_colwidth', 1000)\n\npd.set_option('display.max_rows', None)\n\ndf = pd.read_csv('../_data/account_top200.csv')\n\ndf = df[df.puin.isin(['1781806734'])]\n\ndata = df[['s_input_date', 'week_of_year', 'input_week_day', 'cnt']]\ndata = data[data['week_of_year'] > 35]\ndata = data[data['week_of_year'] < 43]\n\nret = data.groupby(['week_of_year', 'input_week_day']).cnt.mean()\nret.columns = ['week_of_year', 'input_week_day', 'cnt']\nret.to_csv('../_data/20191027/ret_20191027.csv', header=False)\nprint(ret)\n\nfig = plt.figure(figsize=(10, 6))\ncolors = ['red', 'blue', 'green', 'orange', 'black', 'cyan', 'darkgreen', 'darkred', 'gold']\n\ndf = pd.read_csv('../_data/20191027/ret_20191027.csv', header=None)\ndf.columns = ['week_of_year', 'input_week_day', 'cnt']\ndf.sort_values(axis=0, by=['week_of_year', 'input_week_day'], ascending=True)\nprint(df)\n\nfor i in range(8):\n print(i)\n print(colors[i])\n start_index = i * 7\n end_index = i * 7 + 7\n subset = df[start_index:end_index]\n print('-' * 60)\n print(subset)\n print('-' * 60)\n print('\\n')\n label = 'week ' + str(35 + i + 1)\n plt.plot(subset['input_week_day'], subset['cnt'], c=colors[i], label=label)\n\nplt.legend(loc='upper right')\nplt.show()\n","sub_path":"james_pandas/pivot_table_demo3.py","file_name":"pivot_table_demo3.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"484945963","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 14 15:02:25 2019\n\n@author: ruifengshe\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport os\n\n\nnum_epochs = 1\nbatch_size = 128\nlearning_rate = 0.01\nnum_classes = 200\n\ntransform_train = transforms.Compose([\n transforms.RandomCrop(64, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n])\n\ntransform_val = transforms.Compose([\n transforms.ToTensor(),\n])\n\ndef create_val_folder(val_dir):\n \"\"\"\n This method is responsible for separating validation images into separate sub folders\n \"\"\"\n path = os.path.join(val_dir, 'images') # path where validation data is present now\n filename = os.path.join(val_dir, 'val_annotations.txt') # file where image2class mapping is present\n fp = open(filename, \"r\") # open file in read mode\n data = fp.readlines() # read line by line\n\n # Create a dictionary with image names as key and corresponding classes as values\n val_img_dict = {}\n for line in data:\n words = line.split(\"\\t\")\n val_img_dict[words[0]] = words[1]\n fp.close()\n # Create folder if not present, and move image into proper folder\n for img, folder in val_img_dict.items():\n newpath = (os.path.join(path, folder))\n if not os.path.exists(newpath): # check if folder exists\n os.makedirs(newpath)\n if os.path.exists(os.path.join(path, img)): # Check if image exists in default directory\n os.rename(os.path.join(path, img), os.path.join(newpath, img))\n return\n\n# Your own directory to the train folder of tiyimagenet\ntrain_dir = '/u/training/tra264/scratch/tiny-imagenet-200/train/'\ntrain_dataset = datasets.ImageFolder(train_dir, transform=transform_train)\n# To check the index for each classes\n# print(train_dataset.class_to_idx)\ntrain_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0)\n# Your own directory to the validation folder of tiyimagenet\nval_dir = '/u/training/tra264/scratch/tiny-imagenet-200/val/'\n\n\nif 'val_' in os.listdir(val_dir+'images/')[0]:\n create_val_folder(val_dir)\n val_dir = val_dir+'images/'\nelse:\n val_dir = val_dir+'images/'\n\n\nval_dataset = datasets.ImageFolder(val_dir, transform=transforms.ToTensor())\n# To check the index for each classes\n# print(val_dataset.class_to_idx)\nval_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=0)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# 3x3 convolution\n\ndef conv3x3(in_channels, out_channels, stride=1):\n\n return nn.Conv2d(in_channels, out_channels, kernel_size=3, \n\n stride=stride, padding=1, bias=False)\n\n\n\n# Residual block\n\n\nclass BasicBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\n\n super(BasicBlock, self).__init__()\n\n self.conv1 = conv3x3(in_channels, out_channels, stride)\n\n self.bn1 = nn.BatchNorm2d(out_channels)\n\n self.relu = nn.ReLU(inplace=True)\n\n self.conv2 = conv3x3(out_channels, out_channels)\n\n self.bn2 = nn.BatchNorm2d(out_channels)\n\n self.downsample = downsample\n\n \n\n def forward(self, x):\n\n residual = x\n\n out = self.conv1(x)\n\n out = self.bn1(out)\n\n out = self.relu(out)\n\n out = self.conv2(out)\n\n out = self.bn2(out)\n\n if self.downsample:\n\n residual = self.downsample(x)\n\n out += residual\n\n out = self.relu(out)\n\n return out\n\n\n\n# ResNet\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, num_blocks, num_classes=num_classes):\n\n super(ResNet, self).__init__()\n\n self.in_channels = 32\n\n self.conv = conv3x3(3, 32)\n\n self.bn = nn.BatchNorm2d(32)\n\n self.relu = nn.ReLU(inplace=True)\n \n self.dpo = nn.Dropout2d(p=0.1)\n\n self.layer1 = self.make_layer(block, 32, num_blocks[0])\n\n self.layer2 = self.make_layer(block, 64, num_blocks[1], 2)\n\n self.layer3 = self.make_layer(block, 128, num_blocks[2], 2)\n \n self.layer4 = self.make_layer(block, 256, num_blocks[3], 2)\n\n self.max_pool = nn.MaxPool2d(3,1)\n\n self.fc = nn.Linear(256*6*6, num_classes)\n\n \n\n def make_layer(self, block, out_channels, blocks, stride=1):\n\n downsample = None\n\n if (stride != 1) or (self.in_channels != out_channels):\n\n downsample = nn.Sequential(\n\n conv3x3(self.in_channels, out_channels, stride=stride),\n\n nn.BatchNorm2d(out_channels))\n\n layers = []\n\n layers.append(block(self.in_channels, out_channels, stride, downsample))\n\n self.in_channels = out_channels\n\n for i in range(1, blocks):\n\n layers.append(block(out_channels, out_channels))\n\n return nn.Sequential(*layers)\n\n \n\n def forward(self, x):\n\n out = self.conv(x)\n\n out = self.bn(out)\n\n out = self.relu(out)\n\n out = self.dpo(out)\n \n out = self.layer1(out)\n\n out = self.layer2(out)\n\n out = self.layer3(out)\n \n out = self.layer4(out)\n\n out = self.max_pool(out)\n\n out = out.view(out.size(0), -1)\n\n out = self.fc(out)\n\n return out\n\n \n\nmodel = ResNet(BasicBlock, [2, 4, 4, 2],num_classes).to(device)\n\n\n\n\n\n# Loss and optimizer\n\ncriterion = nn.CrossEntropyLoss()\n\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n#scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size = 1, gamma = 0.1)\n\n\n# For updating learning rate\n\ndef update_lr(optimizer, lr): \n\n for param_group in optimizer.param_groups:\n\n param_group['lr'] = lr\n\n\n# Train the model\n\ntotal_step = len(train_loader)\n\ncurr_lr = learning_rate\n\ntest_acc = []\n\nfor epoch in range(num_epochs):\n \n model.train()\n \n \n for i, (images, labels) in enumerate(train_loader):\n\n images = images.to(device)\n\n labels = labels.to(device)\n\n \n\n # Forward pass\n\n outputs = model(images)\n\n loss = criterion(outputs, labels)\n\n \n\n # Backward and optimize\n\n optimizer.zero_grad()\n\n loss.backward()\n\n# scheduler.step()\n \n optimizer.step()\n\n \n\n if (i+1) % 100 == 0:\n\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, lr: {}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item(), curr_lr))\n\n# scheduler.step()\n\n # Decay learning rate\n\n if (epoch+1) % 20 == 0:\n\n curr_lr /= 10\n\n update_lr(optimizer, curr_lr)\n\n\n\n# Test the model\n\n model.eval()\n \n with torch.no_grad():\n \n correct = 0\n \n total = 0\n \n for images, labels in val_loader:\n \n images = images.to(device)\n \n labels = labels.to(device)\n \n outputs = model(images)\n \n _, predicted = torch.max(outputs.data, 1)\n \n total += labels.size(0)\n \n correct += (predicted == labels).sum().item()\n \n \n \n print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))\n test_acc.append(correct / total)\n\n\n# Save the model checkpoint\n\ntorch.save(model.state_dict(), 'resnet_CIFAR100.ckpt')\n\nprint('compiled')","sub_path":"hw4_tinyimage_bw.py","file_name":"hw4_tinyimage_bw.py","file_ext":"py","file_size_in_byte":7463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"185564248","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy as sp\nimport scipy.ndimage as spimg\nimport scipy.signal as spsig\nimport matplotlib.pyplot as plt\nfrom lmfit import minimize, Parameters, Parameter, report_errors\n\ndef read_fits_nparray(name = 'test.fit', number = 0):\n \"\"\" Read .fits file from iStar camera\n \n name (str): file name\n number (int): number of hdulist (usually 0)\n \n Returns:\n _header (pyfits.header.Header): dictionary type something\n _arr (numpy.ndarray): numpy array\n \n \"\"\"\n import pyfits\n _file = name #self. workspace + name\n _fits = pyfits.open(_file)\n _header = _fits[number].header\n _arr = _fits[number].data\n _arr = _arr[0,:,:] #da nur 2D Array\n return _arr, _header\n \ndef grid_fit(nparray, params, barr, plotflag = True):\n \"\"\" Fit a grid to a 2D data set\n \n \"\"\"\n def grid(ov, oh, s, h, w, a):\n \"\"\" Return a 2d grid\n \n h = height of image\n w = width of image\n ov = origin vertical\n oh = origin horizontal\n s = spacing\n a = amplitude\n \n \"\"\"\n g = np.zeros((h,w)) #nparray full of zeros\n nv = int(w/s +2) #+2 just to make sure\n nh = int(h/s+2)\n \n #calculate grid origin offset\n _hp = ov\n while _hp >= s:\n _hp = _hp - s\n\n #print _hp\n for _nv in xrange(nv): # for every vertical line\n #calculate horizontal position \n _pos = _nv * s + _hp\n #print _pos\n for _h in xrange(h): #for every height\n try:\n g[_h, int(_pos)] = (1-(_pos - int(_pos))) *a\n g[_h, int(_pos+1)] = (1-(int(_pos+1)-_pos)) *a\n except IndexError:\n pass\n \n _vp = oh\n while _vp >= s:\n _vp = _vp -s\n \n for _nh in xrange(nh):\n _pos = _nh * s + _vp\n for _v in xrange(w):\n try:\n g[int(_pos), _v] = (1-(_pos - int(_pos))) *a\n g[int(_pos+1), _v] = (1-(int(_pos+1)-_pos)) *a\n except IndexError:\n pass\n \n return g\n \n def res(params, nparray):\n s = params['s'].value\n ov = params['ov'].value\n oh = params['oh'].value\n a = params['a'].value\n h = nparray.shape[0]\n w = nparray.shape[1]\n #b = params['b'].value\n #barr = np.ones((h,w))*b\n \n model = grid(ov, oh, s, h, w, a)\n \n err = nparray - (barr-model)\n err = err.flatten()\n \n return err\n \n #do fit\n minimize(res, params, args=(nparray,))\n #komma ist wichtig, da tupel erwartet!\n report_errors(params)\n \n if plotflag == True:\n s = params['s'].value\n ov = params['ov'].value\n oh = params['oh'].value\n a = params['a'].value\n h = nparray.shape[0]\n w = nparray.shape[1]\n #b = params['b'].value\n #barr = np.ones((h,w))*b\n \n g = grid(ov, oh, s, h, w, a)\n #sp.misc.imsave('grid.jpg', g)\n #fit = nparray+g-barr\n plt.cla()\n plt.clf()\n plt.imshow(img)\n #plt.hold(True)\n plt.imshow(g, alpha=0.5)\n #plt.colorbar()\n plt.savefig('gridfit.jpg')\n sp.misc.imsave('grid.jpg', g)\n plt.show()\n \n \n \n return params\n \n\nif __name__ == \"__main__\":\n fp = 'C:/Python/SpyDev/gridfit/3cm.fits'\n img, header = read_fits_nparray(name=fp)\n #img = spimg.imread(fp, flatten=True)\n img = spimg.interpolation.rotate(img, -0.9, order = 5, reshape=False)\n #img = img[401:714, 364:676]\n img = img[380:985, :] #cut interesting image part\n img = spimg.filters.median_filter(img, size=(3,3))\n barr = spimg.filters.gaussian_filter(img, sigma = 7)\n sobel_x = [[-1, 0, 1],[-2,0,2],[-1,0,1]]\n sobel_y = [[-1,-2,-1],[0,0,0],[1,2,1]]\n #img = spimg.convolve(img, sobel_x)\n #img = spimg.convolve(img, sobel_y)\n \n #img = spsig.medfilt2d(img, kernel_size=3)\n #img = img * (-1)\n \n #show image\n plt.cla()\n plt.clf()\n plt.imshow(img, origin='lower')\n plt.colorbar()\n #plt.savefig('original.tif')\n sp.misc.imsave('original.jpg', img)\n #plt.show()\n \n plt.cla()\n plt.clf()\n plt.imshow(barr, origin='lower')\n plt.colorbar()\n #plt.savefig('original.tif')\n sp.misc.imsave('blurred.jpg', img)\n #plt.show()\n \n # create a set of Parameters\n params = Parameters() \n params.add('ov', value=520, vary=True)\n params.add('oh', value=28, vary=True)\n params.add('s', value=15.7, vary=True, min=10.0, max=20.0)#15.7\n params.add('a', value=5000, vary=True, min = 0, max=20000)\n #params.add('b', value=30000, vary=True, min=10000)\n \n #do fit\n grid_fit(img, params, barr)\n \n #print params['s'].value, params['a'].value, params['b'].value, params['ov'].value, params['oh'].value","sub_path":"gridfit.py","file_name":"gridfit.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"66604220","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 23 15:32:58 2017\r\n\r\n@author: Wenke Huang\r\n\"\"\"\r\n\r\nimport pandas as pd\r\n\r\n### Closing Price 2017\r\ndata_CP2017 = pd.read_csv(\"C:/users/whuang67/downloads/ClosingPrice2017.csv\")\r\ncolumn_names = data_CP2017.columns.values.tolist()\r\ncolumn_names.remove(\"Date\")\r\n\r\ndata_CP2017_list = []\r\nfor name in column_names:\r\n subset = data_CP2017[[\"Date\", name]].values.tolist()\r\n for item in subset:\r\n item.append(name)\r\n data_CP2017_list += subset\r\ndata_CP2017_new = pd.DataFrame(data_CP2017_list,\r\n columns = [\"Date\", \"ClosingPrice\", \"Ticker\"])\r\n\r\ndata_CP2017_new.to_csv(\"C:/users/whuang67/downloads/ClosingPrice2017_new.csv\",\r\n index = False)\r\n\r\n### Divedends 2017\r\ndata_D2017 = pd.read_csv(\"C:/users/whuang67/downloads/Dividends2017.csv\")\r\ndata_D2017.rename(index = str,\r\n columns = {\"Unnamed: 0\": \"Date\"},\r\n inplace = True)\r\ncolumn_names = data_D2017.columns.values.tolist()\r\ncolumn_names.remove(\"Date\")\r\n\r\ndata_D2017_list = []\r\nfor name in column_names:\r\n subset = data_D2017[[\"Date\", name]].values.tolist()\r\n for item in subset:\r\n item.append(name)\r\n data_D2017_list += subset\r\ndata_D2017_new = pd.DataFrame(data_D2017_list,\r\n columns = [\"Date\", \"Dividends\", \"Ticker\"])\r\ndata_D2017_new.to_csv(\"C:/users/whuang67/downloads/Dividens2017_new.csv\",\r\n index = False)\r\n","sub_path":"DeptOfFinance/ClosingPrice_Dividends_Convert.py","file_name":"ClosingPrice_Dividends_Convert.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4344166","text":"\"A `Callback` that saves tracked metrics into a Neptune\"\n# Contribution from devforfu: https://nbviewer.jupyter.org/gist/devforfu/ea0b3fcfe194dad323c3762492b05cae\n\nfrom collections import defaultdict\nimport numpy as np\nimport time\n\nfrom kekas.callbacks import Callback\nfrom kekas.utils import DotDict, get_opt_lr\n\n__all__ = ['NeptuneMonitor']\n\n\nclass NeptuneMonitor(Callback):\n \"A `Callback` that send history of metrics to `neptune`.\"\n\n def __init__(self, neptune_experiment, simulation=False):\n self.neptune_experiment = neptune_experiment\n self.total_iter = 0\n self.train_iter = 0\n self.val_iter = 0\n self.train_batch_iter = 0\n self.val_batch_iter = 0\n self.train_metrics = defaultdict(list)\n self.val_metrics = defaultdict(list)\n\n self.epochs_total = 1\n self.simulation = simulation\n\n def update_total_iter(self, mode: str) -> None:\n if mode == \"train\":\n self.train_iter += 1\n self.train_batch_iter += 1\n if mode == \"val\":\n self.val_iter += 1\n self.val_batch_iter += 1\n self.total_iter += 1 # ?????\n\n def on_train_begin(self, state: DotDict) -> None:\n self.train_iter = 0\n self.val_iter = 0\n\n self.train_metrics = defaultdict(list)\n self.val_metrics = defaultdict(list)\n\n def on_epoch_begin(self, epoch: int, epochs: int, state: DotDict):\n self.train_batch_iter = 0\n self.val_batch_iter = 0\n self.val_t0 = time.time()\n self.train_t0 = time.time()\n\n def on_batch_end(self, i: int, state: DotDict) -> None:\n if state.core.mode == \"train\":\n for name, metric in state.core.metrics[\"train\"].items():\n self.train_metrics[name].append(float(metric))\n\n# lr = get_opt_lr(state.core.opt)\n# self.train_writer.add_scalar(\"batch/lr\",\n# float(lr),\n# global_step=self.train_iter)\n\n self.update_total_iter(state.core.mode)\n\n elif state.core.mode == \"val\":\n for name, metric in state.core.metrics[\"val\"].items():\n self.val_metrics[name].append(float(metric))\n\n self.update_total_iter(state.core.mode)\n\n def on_epoch_end(self, epoch: int, state: DotDict) -> None:\n\n if state.core.mode == \"train\":\n for name, metric in self.train_metrics.items():\n mean = np.mean(metric[-self.train_batch_iter:])\n self.send_metric(name, self.epochs_total, mean)\n\n self.send_metric('epoch_time_train', self.epochs_total, time.time() - self.train_t0)\n\n if state.core.mode == \"val\":\n\n for name, metric in self.val_metrics.items():\n mean = np.mean(metric[-self.val_batch_iter:]) # last epochs vals\n self.send_metric(\"val_\" + name, self.epochs_total, mean)\n\n lr = get_opt_lr(state.core.opt)\n self.send_metric(\"LR\", self.epochs_total, lr)\n\n self.epochs_total += 1\n\n self.send_metric('epoch_time_valid', self.epochs_total, time.time() - self.val_t0)\n\n def send_metric(self, name, x, y):\n if not self.simulation and (self.neptune_experiment is not None):\n self.neptune_experiment.send_metric(name, x, y)\n else:\n print('\\n', name, x, y)\n","sub_path":"goodok_mlu/kekas/callbacks/neptune_callbacks.py","file_name":"neptune_callbacks.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195819653","text":"# CSC-221-0001\n# M1T1_Foreman\n# Goal: Gold\n\n\n# Author: William Foreman\n# The program should ask the user for three pieces of information: \n# The length of the fishing pole\n# The length of the box\n# The width of the box \n#\n# The program should then calculate the hypotenuse of a right triangle whose \n#sides match the size of the box. \n#\n# It should tell the user that value (which is the largest fishing pole you \n#could place in the box diagonally). \n#\n# And finally, it should tell the user whether or not their fishing pole will \n#fit in the box they specified.\n#\n#\nimport math\n\nfishingPoleHeight = float(input('Please enter the Length of the Fishingpole: '))\nboxLength = float(input('Please enter the Length of the Box: '))\nboxWidth = float(input('Please enter the Width of the Box: '))\n\n# Calculate the Hypotenuse (Third Side and Longest Side of the Triangle)\nmaxFishingPoleHeight = math.sqrt((boxWidth*boxWidth) + (boxLength*boxLength))\n\n# Display the hypotenuse\nprint(\"The longest fishingpole you can put in your box is: %.2f\" %maxFishingPoleHeight)\n\n# Calculate whether or not the fishingpole will fit in the box\nif fishingPoleHeight <= maxFishingPoleHeight:\n print(\"Your Fishingpole will fit in this box.\")\nelse:\n print(\"Your Fishingpole will not fit in this box.\")\n\n\n\n\n\n\n\n\n","sub_path":"M1T1_Foreman.py","file_name":"M1T1_Foreman.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"349528611","text":"from django.conf.urls import patterns, include, url\nfrom django.conf.urls.defaults import *\nfrom django.contrib import admin\nadmin.autodiscover()\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n\turl(r'^$','crudbootstrap.views.index'),\n (r'^add/$','crudbootstrap.views.agregar_producto'),\n (r'^borrar/(?P\\d+)$','crudbootstrap.views.borrar_editorial'),\n (r'^editar/(?P\\d+)$','crudbootstrap.views.editar_editorial'),\n\n\n\turl(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\turl(r'^admin/', include(admin.site.urls)),\n # Examples:\n # url(r'^$', 'exposicion.views.home', name='home'),\n # url(r'^exposicion/', include('exposicion.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"exposicion/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"520777234","text":"import os, json\r\n\r\n\r\ndef getMusic(path):\r\n music_list = []\r\n list = os.listdir(path)\r\n for file in list:\r\n with open(path + \"/\" + file, \"r\", encoding='utf-8') as f:\r\n load_dict = json.load(f)\r\n music_list.append(load_dict)\r\n\r\n music_list.sort(key=lambda x: x[\"date\"])\r\n return music_list[::-1]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n getMusic('../static/upload')","sub_path":"utils/music_list.py","file_name":"music_list.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"143655916","text":"#!/usr/bin/python\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom ctypes import cdll\nfrom argparse import ArgumentParser\nfrom ctypes import cdll\nimport ctypes\nimport traceback\nimport time\n\n\n\n\"\"\"\n\nThis is an Example of using the Python connectors. The example will accept user input\ncreate a table writing arbitrary information to it via the BatchWriter and scanner will put the written data\n\n\n\"\"\"\n\nparser = ArgumentParser(description=\"This is an Apache Accummulo Python connector\")\n\nparser.add_argument(\"-f\", \"--file\", dest=\"file\",\n help=\"path to RFile\", required=True)\n\nargs = parser.parse_args()\n\nfile = args.file\n\nimport sharkbite\ntry:\n\n rfile = sharkbite.RFileOperations.open(file)\n\n # list of column families\n cf = list()\n\n ## range\n\n rng = sharkbite.Range()\n\n ## seekable is an object that replaces the function call\n ## of seek(range,cf,boolean). This exists because the extension point\n ## in the C++ Api allows varying seekable objects that change how seeks occur\n seek = sharkbite.Seekable(rng,cf,False)\n\n rfile.seek(seek)\n\n ## print the row\n while rfile.hasNext():\n kv = rfile.getTop()\n print(kv.getKey())\n rfile.next()\n\nexcept RuntimeError as e:\n traceback.print_exc()\n print(\"Oops, error caused: \" + str(e))\n","sub_path":"examples/rfileread.py","file_name":"rfileread.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"376026338","text":"\n\"\"\"\nModule that contains that contains a couple of utility functions\n\"\"\"\n\nimport numpy as np\nimport os\nimport torch\n\ndef load(filename):\n\n \"\"\"\n Loads the data that is provided\n @param filename: The name of the data file. Can be either 'tux_train.dat' or 'tux_val.dat'\n @return images: Numpy array of all images where the shape of each image will be W*H*3\n @return labels: Array of integer labels for each corresponding image in images\n \"\"\"\n\n try:\n\n data = np.load(filename)\n data = torch.Tensor(data)\n except Exception as e:\n print('Check if the filepath of the dataset is {}'.format(os.path(filename)))\n images, labels = data[:,:3], data[:,3:4]\n return images, labels\n","sub_path":"nn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624522019","text":"#MeCabをインストール\r\nimport MeCab\r\n\r\n#形態素解析したい文章\r\ndata = \"すもももももももものうち\"\r\n\r\n#MeCabを呼び出す(Taggerの引数は辞書の指定)\r\nmecab = MeCab.Tagger('-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd').parse(data)\r\n#改行ごとに文章を分割\r\nlines = mecab.split('\\n')\r\n#各行ごとに文章の構成単位に分解\r\nitems = (re.split('[\\t]',line) for line in lines)\r\n#形態素解析した結果を表示\r\nfor item in items:\r\n print(item)","sub_path":"Mecab_test.py","file_name":"Mecab_test.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"636215351","text":"# EASY\n'''\nBinary search array, target recursively\n'''\n\ndef bubbleSort(array):\n\n # compare i with i + 1\n\t# count number of swaps \n\t# if we have 0 swaps, its sorted\n\t# we need to start at x + 1 node each time.\n\t\n size = len(array) - 1\n\n for i in range(size):\n # if array is already sorted, the swap count is 0\n swaps = 0\n for j in range(size-i):\n if array[j] > array [j + 1]:\n print(f\"Swapping {array[j]}, {array[j+1]} \")\n array[j], array[j+1] = array[j+1], array[j]\n swaps += 1\n print(\"Num of swaps:\", swaps)\n print(array)\n\n # must iterate and check for swaps at each iteration.\n # if swaps are reached 0, terminate early\n if swaps == 0:\n print(\"Reached 0 swaps, breaking\")\n break\n\n \n return array \n\nif __name__ == '__main__':\n input = [0, 1, 21, 33, 45, 45, 61, 71, 72, 73]\n input2 = [0, 1, 21, 45, 45, 41, 61, 33, 71, 99, 73]\n input3 = [9]\n\n result = bubbleSort(input2)\n\n print(result)\n \n \n","sub_path":"Leetcode/FB/BinarySearch and Sort/bubblesort.py","file_name":"bubblesort.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513567128","text":"\nimport os\nimport sys\nimport traceback\nimport logging\nimport logging.config\n\n\n\nlogger = logging.getLogger(\"debug\")\n\n#logger = logging.getLogger(\"daily\")\n#logger_d = logging.getLogger(\"daily\")\n#logger_e = logging.getLogger(\"error\")\n\ndef init_log(log_path, log_name, log_level = \"DEBUG\"):\n ''''''\n log_level = log_level.upper()\n\n LOG_PATH_DEBUG = \"%s/%s_debug.log\" % (log_path,log_name)\n LOG_PATH_DAILY = \"%s/%s_daily.log\" % (log_path,log_name)\n LOG_PATH_ERROR = \"%s/%s_error.log\" % (log_path,log_name)\n #日志文件大小\n LOG_FILE_MAX_BYTES = 1 * 512 * 1024 * 1024\n #备份文件个数\n LOG_FILE_BACKUP_COUNT = 365\n\n log_conf = {\n \"version\" : 1,\n \"formatters\" : {\n \"format1\" : {\n \"format\" : '%(asctime)-15s [%(thread)d] - [%(filename)s %(lineno)d] %(levelname)s %(message)s',\n },\n },\n\n \"handlers\" : {\n \"handler1\": {\n \"class\" : \"logging.handlers.TimedRotatingFileHandler\",\n \"level\" : log_level,\n \"formatter\" : \"format1\",\n \"when\" : 'midnight',\n \"backupCount\" : LOG_FILE_BACKUP_COUNT,\n \"filename\" : LOG_PATH_DAILY\n },\n \"handler2\": {\n \"class\" : \"logging.handlers.RotatingFileHandler\",\n \"level\" : log_level,\n \"formatter\" : \"format1\",\n \"maxBytes\" : LOG_FILE_MAX_BYTES,\n \"backupCount\" : LOG_FILE_BACKUP_COUNT,\n \"filename\" : LOG_PATH_DEBUG\n },\n \"handler3\": {\n \"class\" : \"logging.handlers.RotatingFileHandler\",\n \"level\" : \"ERROR\",\n \"formatter\" : \"format1\",\n \"maxBytes\" : LOG_FILE_MAX_BYTES,\n \"backupCount\" : LOG_FILE_BACKUP_COUNT,\n \"filename\" : LOG_PATH_ERROR\n },\n },\n\n \"loggers\" : {\n \"daily\" : {\n \"handlers\" : [\"handler1\"],\n \"level\" : log_level,\n },\n \"debug\": {\n \"handlers\" : [\"handler2\"],\n \"level\" : log_level\n },\n \"error\": {\n \"handlers\" : [\"handler3\"],\n \"level\" : \"ERROR\"\n },\n }\n }\n logging.config.dictConfig(log_conf)","sub_path":"srf_log.py","file_name":"srf_log.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"257019857","text":"def cipher_decryption():\r\n message = input(\"Enter message: \")\r\n key = 5\r\n decryp_text = \"\"\r\n\r\n for i in range(len(message)):\r\n temp = ord(message[i]) - key\r\n if ord(message[i]) == 32:\r\n decryp_text += \" \"\r\n elif temp < 32:\r\n temp += 94\r\n decryp_text += chr(temp)\r\n else:\r\n decryp_text += chr(temp)\r\n # if-else\r\n # for\r\n\r\n print(\"Decrypted Text: {}\".format(decryp_text))\r\n\r\n\r\ndef main():\r\n cipher_decryption()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"BasicLibraries-master/Ciphers - Byte Conversion/ROT47_dec.py","file_name":"ROT47_dec.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"497254852","text":"from __future__ import absolute_import, division\n\nimport logging\n\nfrom .controller import Controller\n\n\nclass NR360S(Controller):\n \"\"\"\n A controller for a NR360S rotation stage.\n\n This is a rotation stage using a BSC201 controller.\n It comes with an imperial and a metric-dimension mount\n (NR360S and NR360S/M), the distinction however\n does not seem to affect any protocol aspects.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(NR360S, self).__init__(*args, **kwargs)\n\n #\n # Note that these values should be pulled from the APT User software,\n # as they agree with the real limits of the stage better than\n # what the website or the user manual states\n self.max_velocity = 50.0 # degree/sec\n self.max_acceleration = 25.0 # degree/sec^2\n\n # from the manual\n # These values are only valid for a trinamics controller such as\n # BSC201\n # encoder counts per revoultion of the output shaft:\n # no load speed: n/a\n # microsteps per step: 2048\n # steps per turn : 200\n # max rotation velocity: 6 degree/s\n # min rotation velocity: 22 arcsec / s\n # Gear ratio: 66 : 1 rounds/deg, 5.4546 degree / turn\n # to move 1 deg:\n enccnt = 75093.33333333333 / (\n (360.0 - 0.0036) / 360.0\n ) # microstep per degree (from 5.4546 degree / turn)\n\n # these equations are taken from the APT protocol manual\n self.position_scale = enccnt # the number of enccounts per deg\n self.velocity_scale = 4030885.0\n self.acceleration_scale = 826.0\n\n self.linear_range = (0, 360)\n self.unit = \"Degrees\"\n # this controller does not respond to status requests\n self.provided_status = False\n\n self.checkmodel()\n\n def checkmodel(self):\n # this is a check function which should make\n # sure that we actually have the right piece\n # of hardware\n modelinfo = self.modelinfo\n assert modelinfo.model == \"SCC201\"\n assert modelinfo.hwtype == 16\n\n def request_home_params(\n self, clockwise=None, lswitch=None, velocity=5.0, offset=None, channel=1\n ):\n logger = logging.getLogger(__name__)\n # retrieve homing parameters from\n # controller, using the method of the super class\n (\n channel_id,\n homing_direction,\n _lswitch,\n homing_velocity,\n offset_distance,\n ) = Controller.request_home_params(self, channel=channel)\n # because these parameters do not work for the MTS50,\n # we try to adjust them\n logger.debug(\"setting home params for NR360s, clockwise=%r ..\" % clockwise)\n\n if velocity != None:\n print(\"setting speed %f with scale = %f\" % (velocity, self.velocity_scale))\n homing_velocity = int(velocity * self.velocity_scale)\n\n assert homing_velocity != 0\n\n homing_direction = 2\n lswitch = 1\n\n if clockwise:\n logger.debug(\"homing clockwise\")\n homing_velocity = abs(homing_velocity)\n else:\n logger.debug(\"homing anti-clockwise\")\n homing_velocity = -abs(homing_velocity)\n\n # override lswitch setting if set\n if lswitch != None:\n _lswitch = lswitch\n\n # ignore offset\n offset_distance = int(0.5 * self.position_scale)\n\n return (\n channel_id,\n homing_direction,\n _lswitch,\n homing_velocity,\n offset_distance,\n )\n","sub_path":"pyAPT/nr360s.py","file_name":"nr360s.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334130532","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 29 20:32:40 2020\n\n@author: tusha\n\"\"\"\n\nclass TreeNode():\n def __init__(self, val=None, left_ptr=None, right_ptr=None):\n self.val = val\n self.left_ptr = left_ptr\n self.right_ptr = right_ptr\n\ndef concatenate(leftList, rightList):\n\n # If either of the list is empty then return the other list\n if (leftList == None):\n return rightList\n if (rightList == None):\n return leftList\n\n # Store the last Node of left List\n leftLast = leftList.left_ptr\n\n # Store the last Node of right List\n rightLast = rightList.left_ptr\n\n # Connect the last node of Left List\n # with the first Node of the right List\n leftLast.right_ptr = rightList\n rightList.left_ptr = leftLast\n\n # Left of first node points to\n # the last node in the list\n leftList.left_ptr = rightLast\n\n # Right of last node refers to\n # the first node of the List\n rightLast.right_ptr = leftList\n\n return leftList\n\n\n\ndef BTtoLL(root):\n if (root == None):\n return None\n\n # Recursively convert left and\n # right subtrees\n left = BTtoLL(root.left_ptr)\n right = BTtoLL(root.right_ptr)\n\n # Make a circular linked list of single\n # node (or root). To do so, make the\n # right and left pointers of this node\n # point to itself\n root.left_ptr = root.right_ptr = root\n\n # Step 1 (concatenate the left list with the list with single node, i.e., current node)\n # Step 2 (concatenate the returned list with the right List)\n return concatenate(concatenate(left,root), right)\n\n\n\n# Display Circular Link List\ndef displayCList(head):\n print(\"Circular Linked List is :\")\n itr = head\n first = 1\n while (head != itr or first):\n print(itr.val, end = \" \")\n itr = itr.right_ptr\n first = 0\n print()\n\n\nif __name__ == '__main__':\n root = TreeNode(10)\n root.left_ptr = TreeNode(12)\n root.right_ptr = TreeNode(15)\n root.left_ptr.left_ptr = TreeNode(25)\n root.left_ptr.right_ptr = TreeNode(30)\n root.right_ptr.left_ptr = TreeNode(36)\n\n head = BTtoLL(root)\n displayCList(head)\n\n","sub_path":"Python/Trees - Construction - Leetcode 426.py","file_name":"Trees - Construction - Leetcode 426.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"154532896","text":"from math import pi, sin\nimport struct, sys\n\nfrom PyQt4.QtCore import QBuffer, QByteArray, QIODevice, Qt\nfrom PyQt4.QtGui import QApplication, QFormLayout, QLineEdit, QHBoxLayout, \\\n QPushButton, QSlider, QVBoxLayout, QWidget\nfrom PyQt4.QtMultimedia import QAudio, QAudioDeviceInfo, QAudioFormat, QAudioOutput\n\nclass Window(QWidget):\n\n def __init__(self, parent = None):\n \n QWidget.__init__(self, parent)\n \n format = QAudioFormat()\n format.setChannels(1)\n format.setFrequency(22050)\n format.setSampleSize(16)\n format.setCodec(\"audio/pcm\")\n format.setByteOrder(QAudioFormat.LittleEndian)\n format.setSampleType(QAudioFormat.SignedInt)\n self.output = QAudioOutput(format, self)\n \n self.frequency = 440\n self.volume = 0\n self.buffer = QBuffer()\n self.data = QByteArray()\n \n self.deviceLineEdit = QLineEdit()\n self.deviceLineEdit.setReadOnly(True)\n self.deviceLineEdit.setText(QAudioDeviceInfo.defaultOutputDevice().deviceName())\n \n self.pitchSlider = QSlider(Qt.Horizontal)\n self.pitchSlider.setMaximum(100)\n self.volumeSlider = QSlider(Qt.Horizontal)\n self.volumeSlider.setMaximum(32767)\n self.volumeSlider.setPageStep(1024)\n \n self.playButton = QPushButton(self.tr(\"&Play\"))\n \n self.pitchSlider.valueChanged.connect(self.changeFrequency)\n self.volumeSlider.valueChanged.connect(self.changeVolume)\n self.playButton.clicked.connect(self.play)\n \n formLayout = QFormLayout()\n formLayout.addRow(self.tr(\"Device:\"), self.deviceLineEdit)\n formLayout.addRow(self.tr(\"P&itch:\"), self.pitchSlider)\n formLayout.addRow(self.tr(\"&Volume:\"), self.volumeSlider)\n \n buttonLayout = QVBoxLayout()\n buttonLayout.addWidget(self.playButton)\n buttonLayout.addStretch()\n \n horizontalLayout = QHBoxLayout(self)\n horizontalLayout.addLayout(formLayout)\n horizontalLayout.addLayout(buttonLayout)\n \n def changeFrequency(self, value):\n \n self.frequency = 440 + (value * 2)\n \n def play(self):\n \n if self.output.state() == QAudio.ActiveState:\n self.output.stop()\n \n if self.buffer.isOpen():\n self.buffer.close()\n \n self.createData()\n \n self.buffer.setData(self.data)\n self.buffer.open(QIODevice.ReadOnly)\n self.buffer.seek(0)\n \n self.output.start(self.buffer)\n \n def changeVolume(self, value):\n \n self.volume = value\n \n def createData(self):\n \n # Create 2 seconds of data with 22050 samples per second, each sample\n # being 16 bits (2 bytes).\n \n self.data.clear()\n for i in xrange(2 * 22050):\n t = i / 22050.0\n value = int(self.volume * sin(2 * pi * self.frequency * t))\n self.data.append(struct.pack(\" 1 and test[0] == \"export\"):\n continue\n (key, val) = line.split(\"=\", 1)\n cpu_report[key] = val\n\n # Collect imformation about cpu repo and toolchain version\n collect_cpu_and_toolchain_data(cpu_report)\n\n # Make directories for benchamrks and logs from embench\n if not os.path.exists(f'{soc_kwargs[\"cpu_type\"]}/benchmarks'):\n os.mkdir(f'{soc_kwargs[\"cpu_type\"]}/benchmarks')\n\n if not os.path.exists(f'{soc_kwargs[\"cpu_type\"]}/logs'):\n os.mkdir(f'{soc_kwargs[\"cpu_type\"]}/logs')\n\n # Prepare namespace for build_all\n arglist = prepare_arguments_for_build_all(soc_kwargs, cpu_report)\n # Build all benchmarks\n build_all.submodule_main(arglist)\n\n # Prepare argument namespace for benchmark\n arglist = argparse.Namespace()\n arglist.builddir = f'../{soc_kwargs[\"cpu_type\"]}/benchmarks'\n arglist.logdir = f'../{soc_kwargs[\"cpu_type\"]}/logs'\n arglist.output_format = benchmark_speed.output_format.JSON\n arglist.target_module = 'run_litex_sim'\n arglist.timeout = 7200\n arglist.baselinedir = 'baseline-data'\n arglist.absolute = False\n arglist.json_comma = False\n arglist.change_dir = False\n\n remnant = f'--cpu-type {args.cpu_type}'.split()\n remnant.extend(f'--cpu-variant {args.cpu_variant}'.split())\n remnant.extend(f'--threads {args.threads}'.split())\n remnant.extend(f'--integrated-sram-size \\\n{args.integrated_sram_size}'.split())\n\n logs_before = set(glob.glob(f'./{soc_kwargs[\"cpu_type\"]}/logs/speed*'))\n\n # Bench relative speed\n benchmark_speed.submodule_main(arglist, remnant)\n\n # Bench absolute speed\n arglist.absolute = True\n benchmark_speed.submodule_main(arglist, remnant)\n\n logs_path = f'./{soc_kwargs[\"cpu_type\"]}/logs/speed*'\n logs_new = set(glob.glob(logs_path))-logs_before\n\n logs_new = sorted(list(logs_new))\n\n relative_result_path = f'./{soc_kwargs[\"cpu_type\"]}/result.json'\n absolute_result_path = f'./{soc_kwargs[\"cpu_type\"]}/result_abs.json'\n\n extract_json_results_from_file_to_file(logs_new[0], relative_result_path)\n extract_json_results_from_file_to_file(logs_new[1], absolute_result_path)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"157938622","text":"import requests\nimport json\n#from apiphant.validation import ApiError, field, Invalid\n#import exceptions\n#import sys\n\n\nfrom Employee import Employee\n\n\ndef RestRequests():\n\n\n try:\n resp = requests.get(\"http://echo.jsontest.com/key/value/one/two\")\n # resp = requests.get(\"http://troywest.me.uk\")\n print(resp.status_code)\n if resp.status_code != 200:\n raise ValueError(\"HTTP Request Invalid\")\n\n except ValueError as ve:\n print(str(ve) + \" Found An Error \" + str(resp.status_code))\n\n finally:\n data = json.loads(resp.text)\n print(data)\n print (resp.status_code)\n\n return 0\n\n\ndef main():\n\n num=102\n if(num == 100):\n print(\"success\")\n elif(num == 101):\n print(\" OK OK \")\n elif (num == 102):\n print(\" OK OK \")\n else:\n print(\"you are a loser\")\n\n\n\n\n\n\n\n\n troy = Employee(\"Troy West\", \"thewesternfront@icloud.com\")\n troy.printInfo()\n\n liz = Employee(\"Liz McComb\", \"santacruzeliz@icloud.com\")\n liz.printInfo()\n\n # Call the Rest Requests\n RestRequests()\n\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4607564","text":"\"\"\"\n Equivalent to https://github.com/gpufit/Gpufit/blob/master/Gpufit/tests/Linear_Fit_1D.cpp\n\"\"\"\n\nimport unittest\nimport numpy as np\nimport pygpufit.gpufit as gf\n\nclass Test(unittest.TestCase):\n\n def test_gaussian_fit_1d(self):\n # constants\n n_fits = 1\n n_points = 2\n n_parameter = 2\n\n # true parameters\n true_parameters = np.array((0, 1), dtype=np.float32)\n\n # data values\n data = np.empty((n_fits, n_points), dtype=np.float32)\n data[0, :] = (0, 1)\n\n # max number iterations\n max_number_iterations = 10\n\n # initial parameters\n initial_parameters = np.empty((n_fits, n_parameter), dtype=np.float32)\n initial_parameters[0, :] = (0, 0)\n\n # model id\n model_id = gf.ModelID.LINEAR_1D\n\n # tolerance\n tolerance = 0.001\n\n # user info\n user_info = np.array((0, 1), dtype=np.float32)\n\n # call to gpufit\n parameters, states, chi_squares, number_iterations, execution_time = gf.fit(data, None, model_id,\n initial_parameters, tolerance, \\\n None, None, None, user_info)\n\n # print results\n for i in range(n_parameter):\n print(' p{} true {} fit {}'.format(i, true_parameters[i], parameters[0, i]))\n print('fit state : {}'.format(states))\n print('chi square: {}'.format(chi_squares))\n print('iterations: {}'.format(number_iterations))\n print('time: {} s'.format(execution_time))\n\n assert (chi_squares < 1e-6)\n assert (states == 0)\n assert (number_iterations <= max_number_iterations)\n for i in range(n_parameter):\n assert (abs(true_parameters[i] - parameters[0, i]) < 1e-6)\n\nif __name__ == '__main__':\n\n if not gf.cuda_available():\n raise RuntimeError(gf.get_last_error())\n unittest.main()\n","sub_path":"Gpufit/python/tests/test_linear_regression.py","file_name":"test_linear_regression.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218442185","text":"# Thomas Monfre\n# Dartmouth College CS 74, Winter 2019\n# HW4: python script used to get the accuracy of a given combination of features\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import classification_report\n\n# load the data from the given filepath\ndef load_data(filepath):\n f = pd.read_csv(filepath, header=0) # read input file\n headers = list(f.columns.values) # put the original column names in a python list\n array = f.values # create a numpy array for input into scikit-learn\n return array, headers\n\n# split feature vectors and class labels\ndef separate_features_from_class(data):\n X = data[:, :6] # feature vectors\n y = data[:, 6] # class labels\n return X,y\n\n######################################################################\n\n# LOAD AND SPLIT DATA\nfilepath = \"../data/CS74_HW4_training_set.csv\"\n\n# load data and separate out feature vectors and class label\ndata, headers = load_data(filepath)\nX, y = separate_features_from_class(data)\n\nclasses = [\"Class 1\", \"Class 2\", \"Class 3\", \"Class 4\", \"Class 5\"]\n\nkf = KFold(n_splits=10)\naccuracies = []\n\n# perform a k-fold cross validation to determine accuracy of selected features\nfor train_index, test_index in kf.split(X):\n # split into testing and training data based on the splits\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n\n classif = ExtraTreesClassifier(n_estimators=250)\n classif.fit(X_train, y_train)\n\n pred = classif.predict(X_test)\n\n correct_prediction_count = 0\n\n for i in range(len(pred)):\n if pred[i] == y_test[i]:\n correct_prediction_count += 1\n\n print(classification_report(y_test, pred, target_names=classes))\n\n accuracies.append(correct_prediction_count / len(pred))\n\nprint(np.mean(accuracies))","sub_path":"hw4/submission/cross-validation/test_accuracy.py","file_name":"test_accuracy.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292178350","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 10 05:45:21 2020\n\n@author: Aishwarya Patange\n\"\"\"\nimport model\nimport data_load\nfrom data_load import load_data\n\nepochs = 50\nmin_year = 1997\nmax_year = 2017\nratios = [30, 40, 50, 60, 70]\nlrates = [0.001, 1e-4, 1e-5, 1e-6, 1e-7]\ngenres = ['Horror', 'Romance', 'Action', 'Documentary']\n\ndef main(ratios):\n\t'''\n\tBuild the model across all hyper parameter settings\n\t'''\n\n\tfor ratio in ratios:\n\n\t\tx_train, y_train = load_data(min_year, max_year, genres, ratio, 'train')\n\t\tx_val, y_val = load_data(min_year, max_year, genres, ratio, 'validate')\n\n\t\tfor lr in lrates:\n\n\t\t\tmodel.build(1, min_year, max_year, genres, ratio, epochs, lr, x_train=x_train, x_val=x_val, y_train=y_train, y_val=y_val, verbose=True)\n\nif __name__ == '__main__':\n\tmain(ratios)","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"133824196","text":"from flask import Flask, request, abort\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import *\nimport requests, json\n\n\nimport errno\nimport os\nimport sys, random\nimport tempfile\nimport requests\nimport re\n\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n SourceUser, SourceGroup, SourceRoom,\n TemplateSendMessage, ConfirmTemplate, MessageAction,\n ButtonsTemplate, ImageCarouselTemplate, ImageCarouselColumn, URIAction,\n PostbackAction, DatetimePickerAction,\n CarouselTemplate, CarouselColumn, PostbackEvent,\n StickerMessage, StickerSendMessage, LocationMessage, LocationSendMessage,\n ImageMessage, VideoMessage, AudioMessage, FileMessage,\n UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent,\n FlexSendMessage, BubbleContainer, ImageComponent, BoxComponent,\n TextComponent, SpacerComponent, IconComponent, ButtonComponent,\n SeparatorComponent,\n)\n\napp = Flask(__name__)\n\n# Channel Access Token\nline_bot_api = LineBotApi('nCheFomZPKA81EfMCsgkGDaLIWlGlRdX/i9N4JAa2Vvetw4iB0iKyhX9EushTlct8Xm14AjoAhxifXP1THdjBLoIxT6bruyTKY10+M2Ea5iX0p9zraG/0kFvirKsv4vFV7SyYR7IAuEJvSyzvQDwMAdB04t89/1O/w1cDnyilFU=')\n# Channel Secret\nhandler = WebhookHandler('a13be1528f294201578d36297fc549a6')\n#===========[ NOTE SAVER ]=======================\nnotes = {}\n\n#REQUEST DATA MHS\ndef carimhs(input):\n URLmhs = \"https://www.pricelist.padmapratama.com/api/mhs.php?nrp=\" + input\n irham = requests.get(URLmhs)\n data = irham.json()\n err = \"data tidak ditemukan\"\n \n flag = data['kode']\n if(flag == \"1\"):\n nrp = data['data_angkatan'][0]['nrp']\n nama = data['data_angkatan'][0]['nama']\n kos = data['data_angkatan'][0]['kosan']\n\n return nama + '\\n' + nrp + '\\n' + kos\n elif(flag == \"0\"):\n return err \n\n# Post Request\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n signature = request.headers['X-Line-Signature']\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n return 'OK'\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n text = event.message.text #simplify for receove message\n sender = event.source.user_id #get usesenderr_id\n gid = event.source.sender_id #get group_id\n profile = line_bot_api.get_profile(sender)\n line_bot_api.reply_message(event.reply_token,TextSendMessage(text=carimhs(text)))\n #line_bot_api.reply_message(event.reply_token,TextSendMessage(text=\"masuk\"))\n \nimport os\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"502581115","text":"# !/usr/bin/env python\n# -- coding: utf-8 --\n# @Author zengxiaohui\n# Datatime:4/29/2021 4:22 PM\n# @File:common\nimport os\nimport shutil\n\n\ndef resetDir(dirpath):\n \"\"\"判断文件夹是否存在存在那么删除重建,不存在那么创建\"\"\"\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n else:\n shutil.rmtree(dirpath)\n os.makedirs(dirpath)\n\n\ndef get_filelist(dir, Filelist):\n \"\"\"获取文件夹及子文件夹下所有文件\"\"\"\n newDir = dir\n if os.path.isfile(dir):\n Filelist.append(dir)\n # # 若只是要返回文件文,使用这个\n # Filelist.append(os.path.basename(dir))\n elif os.path.isdir(dir):\n for s in os.listdir(dir):\n # 如果需要忽略某些文件夹,使用以下代码\n # if s == \"xxx\":\n # continue\n newDir = os.path.join(dir, s)\n get_filelist(newDir, Filelist)\n return Filelist\n","sub_path":"python_developer_tools/files/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568935573","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport matplotlib.pyplot as plt\n\ndef get_data(resolution, lights, renderer, shadows):\n pattern = '%slights_%s_%s_%s.txt'\n filename = pattern % (lights, renderer, shadows, resolution)\n with open(filename, 'r') as f:\n data = map(lambda x: float(x.split()[-2]), f.readlines()[1:-1])\n return sum(data)/len(data)\n\nif (__name__==\"__main__\"):\n lights = ['5', '15', '30', '60', '120', '196']\n render = ['lidr', 'forward']\n shadow = ['shadow', 'noshadow']\n resolution = ['1024x768', '1920x1080', '3840x2160']\n\n for i in range(3):\n plt.figure(i)\n lidr_noshadow = [map(lambda x: float(x), lights), map(lambda x:get_data(resolution[i], x, 'lidr', 'noshadow'), lights)]\n lidr_shadow = [map(lambda x: float(x), lights), map(lambda x:get_data(resolution[i], x, 'lidr', 'shadow'), lights)]\n forward_noshadow = [map(lambda x: float(x), lights), map(lambda x:get_data(resolution[i], x, 'forward', 'noshadow'), lights)]\n forward_shadow = [map(lambda x: float(x), lights), map(lambda x:get_data(resolution[i], x, 'forward', 'shadow'), lights)]\n plt.plot(lidr_noshadow[0], lidr_noshadow[1], 'b.-', label='lidr no shadow')\n plt.plot(lidr_shadow[0], lidr_shadow[1], 'g.-', label='lidr shadow')\n plt.plot(forward_noshadow[0], forward_noshadow[1], 'r.-', label='forward no shadow')\n plt.plot(forward_shadow[0], forward_shadow[1], 'c.-', label='forward shadow')\n plt.legend()\n plt.xlabel('# lights')\n plt.ylabel('FPS')\n plt.title(resolution[i])\n plt.axis([0, 256, 0, 100])\n plt.show()\n","sub_path":"benchmark/makegraphs.py","file_name":"makegraphs.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"192434681","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# 以下定義變數x的目標函式\ndef Fit_func(x):\n f= 10*np.sin(x)/x\n \n return f\n\n# 以下定義函式來執行PSO程序\ndef PSO(): \n # 以下初始化粒子數、權重,並設定 x、速率 的上下限\n particle_cnt=30\n x_position= []\n Average_x= []\n ws=0.9; c1=1; c2=1\n r1=np.random.randn()*0.1; r2=np.random.randn()*0.1\n max_x= 15\n min_x= -15\n maxv=5\n \n pos= np.random.random_sample([particle_cnt])*200\n v0=np.random.random_sample([particle_cnt])\n \n # 以下給予20顆粒子的pbest初始值\n pbest=pos\n # 以下定義gbest的初始值\n pbestfit=Fit_func(pbest)\n \n gbestfit= max(pbestfit)\n pbestfit= list(pbestfit) \n gbest= pbest[pbestfit.index(gbestfit)]\n \n # 以下開始移動並更新速率及位置,預設迭代100次\n for k in range(0, 100):\n V = (ws*v0)+(c1*r1*(pbest-pos))+(c2*r2*(gbest-pos))\n # 以下判斷速率有無超過上下限 \n for i in range(0, particle_cnt):\n if V[i] > maxv:\n V[i] = maxv\n elif V[i] < -maxv:\n V[i] = -maxv\n \n X = pos + V\n # 以下判斷位置有無超過上下限 \n for i in range(0,particle_cnt):\n if X[i] > max_x:\n X[i] = max_x\n elif X[i] < min_x:\n X[i] = min_x\n \n # 以下速率更新\n v0=V\n # 以下位置更新\n pos=X\n # ���下個體適應值更新\n prefit= Fit_func(pbest)\n nowfit= Fit_func(X)\n \n for i in range(0, particle_cnt):\n if nowfit[i] > prefit[i]:\n pbest[i]= pos[i]\n \n pbestfit= Fit_func(pbest)\n \n # 以下群體適應值更新\n if max(pbestfit) > gbestfit:\n gbestfit= max(pbestfit)\n pbestfit= list(pbestfit)\n gbest= pbest[pbestfit.index(gbestfit)]#,:]\n \n Average_x.append(sum(pbest)/particle_cnt)\n x_position.append(gbest)\n \n return x_position, Average_x\n\n# 以下為主執行程式及輸出繪圖\nif __name__ == '__main__': \n x_position1, Average_x= PSO()\n x_position2,_= PSO()\n \n print(\"Particle 1's positions : \\n\")\n for i in range(0, len(x_position1)):\n if i < 10:\n print(x_position1[i])\n elif i==11:\n print(\".\\n.\\n.\\n.\\n\")\n elif 19 < i < 30:\n print(x_position1[i])\n \n print(\"\\nParticle 2's positions : \\n\")\n for i in range(0, len(x_position2)):\n if i < 10:\n print(x_position2[i])\n elif i==11:\n print(\".\\n.\\n.\\n.\\n\")\n elif 19 < i < 30:\n print(x_position2[i])\n \n plt.plot(x_position1)\n plt.plot(x_position2, \"r--\")\n plt.axis([-3, 30, -10, 10])\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"x-position\")\n plt.legend((\"Particle 1\", \"Particle 2\"), loc= \"upper right\")\n plt.grid(color='g',linestyle='--', linewidth=1,alpha=0.4)\n plt.show()\n \n print(\"\\n********************Next fig********************\")\n \n Best_Fitness= Fit_func(x_position1)\n Average_Fitness= Fit_func(Average_x)\n \n print(\"Best Fitness's data : \\n\")\n for i in range(0, len(Best_Fitness)):\n if i < 10:\n print(Best_Fitness[i])\n elif i==11:\n print(\".\\n.\\n.\\n.\\n\")\n elif 19 < i < 30:\n print(Best_Fitness[i])\n \n print(\"\\nAverage Fitness's data : \\n\")\n for i in range(0, len(Average_Fitness)):\n if i < 10:\n print(Average_Fitness[i])\n elif i==11:\n print(\".\\n.\\n.\\n.\\n\")\n elif 19 < i < 30:\n print(Average_Fitness[i])\n \n plt.plot(Best_Fitness)\n plt.plot(Average_Fitness, \"r--\")\n plt.axis([-3, 27, 1.5, 11])\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Fitness\")\n plt.legend((\"Best Fitness\", \"Average Fitness\"), loc= \"lower right\")\n plt.grid(color='g',linestyle='--', linewidth=1,alpha=0.4)\n plt.show()","sub_path":"PSO/PSO.py","file_name":"PSO.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235431723","text":"########################\n## 28 October 2013\n## This code contains functions to create various output files from the data\n########################\n#import global_file\nimport config_3\nimport numpy as np\nimport os\nimport math\nimport setup_program_3\n\n#######################\n## Make individuals\n## The goal of this is to convert the population level data into individual level data\n## This is just done by sampling w/o replacement for the genotypes at each locus.\n## Since they are all unlinked loci, I think this is a valid thing to do.\n##\n## If haploid, it returns an array where each column is a site, and each row in an individual\n## If diploid, it returns an array where there are 2 columns for each site (1 for each of the two alleles), and\n######################\n\ndef makeIndiv(lattice, i, j, diploid):\n a = lattice[i,j].snps.freq #currently this is an array with the # of individuals with that allele.\n indivSNPs=0\n if diploid == False:\n for k in range(0,len(a)):\n # b = np.hstack( ( np.ones(math.ceil(a[k]*lattice[i,j].ne)), np.zeros(lattice[i,j].ne - math.ceil(lattice[i,j].ne*a[k]) ) ) )\n b = np.hstack( ( np.ones(a[k]), np.zeros(lattice[i,j].ne - a[k]) ) )\n # print b\n np.random.shuffle(b)#This just randomly shuffles b in place\n if k == 0:\n indivSNPs = b\n else:\n indivSNPs = np.column_stack( [ indivSNPs , b ] ) #This is creating the array of everything\n else: #Make diploids.\n if type(a) == int: #Then there will just be 1 iteration\n iterations = a\n else:\n iterations = len(a)\n for k in range(0, iterations):\n dipVector = []\n if type(a) == int:\n currSNP = a\n else:\n currSNP = a[k]\n b = np.hstack( (np.ones(currSNP), np.zeros(lattice[i,j].ne - currSNP)) )\n np.random.shuffle(b)\n for l in range(0, int(len(b)/2.0)):\n if l == 0:\n dipVector = (b[l*2], b[l*2+1])\n else:\n dipVector = np.vstack( (dipVector, (b[l*2], b[l*2+1])) )\n if k == 0:\n indivSNPs = dipVector\n else:\n indivSNPs = np.column_stack( [ indivSNPs , dipVector ] ) #This is creating the array of everything\n return indivSNPs #This is the individual data, but haploid.\n\n##GENEPOP\n##Ex\n#Microsat on Chiracus radioactivus, a pest species\n#Loc1, Loc2, Loc3, Y-linked, Loc4\n#POP\n#AA8, 0405 0711 0304 0000 0505\n#AA9, 0405 0609 0208 0000 0505\n#A10, 0205 0609 0101 0000 0305\n#Pop\n#AF, 0000 0000 0000 0000 0505\n#AF, 0205 0307 0102 0000 0505\n#AF, 0202 0609 0202 0000 0505\n#pop\n#C45, 0505 0606 0202 0000 0505\n#C45, 0505 0909 0202 0000 0505\n#C45, 0505 0306 0202 0000 0505\n#This can be done one at a time and appended to\n\ndef GENEPOP(lattice, i, j, indivMat, gen):\n a = lattice[i,j].snps.freq #currently this is an array with the # of individuals with that allele.\n inputFile = str(config_3.outFile) + \".GENEPOP.gen\" + str(gen)\n f = open(inputFile, 'a')\n if is_non_zero_file(inputFile) == False:\n f.write(\"GENEPOP output file \\n\")\n if type(a) == int:\n f.write(\"Loc1 \\n\")\n else:\n locNames_1 = [\"Loc\"] * len(a)\n locNames_2 = np.arange(1,len(a)+1)\n locNames = [ ( locNames_1[0] + str(locNames_2[0]) ) ]\n for k in range(0,len(a)):\n locNames.append( str( locNames_1[k] + str(locNames_2[k]) ) )\n #arr = ', '.join(map(str, locNames))\n arr = \"\\n\".join(map(str, locNames)) #Kimberly added 22 September 2014\n f.write(arr)\n f.write(\"\\n\")\n f.write(\"POP\\n\")\n if config_3.diploid == False:\n numRows = lattice[i,j].ne\n for k in range(0,numRows):\n f.write(\"POP\" + str(i) + str(j) +\"_\" + str(k) + \", \")\n if isinstance(indivMat[0],(int,float,complex)):\n if indivMat[k][0] == 0:\n f.write(\" 01\")\n else:\n f.write(\" 02\")\n else:\n for l in range(0, len(indivMat[0])): #for each snp in the kth individual\n if indivMat[k][l] == 0:\n f.write(\" 01\")\n else:\n f.write(\" 02\")\n f.write(\"\\n\")\n else: #If its diploid\n numRows = int(lattice[i,j].ne / 2) #If they are diploid, there will be half the # of rows\n\n for k in range(0,numRows):\n f.write(\"POP\" + str(i) + str(j) +\"_\" + str(k) + \", \")\n for l in range(0, int(len(indivMat[0])/2) ): #for every two alleles of the snps in the kth individual\n if indivMat[k][l*2] == 0:\n a = \"01\"\n if indivMat[k][l*2] == 1:\n a = \"02\"\n if indivMat[k][l*2+1] == 0:\n b = \"01\"\n if indivMat[k][l*2+1] == 1:\n b = \"02\"\n f.write(a + b)\n f.write(\" \")\n f.write(\"\\n\")\n f.close()\n\ndef GENELAND(lattice, i, j, indivMat, gen):\n a = lattice[i,j].snps.freq\n inputFile = str(config_3.outFile) + \".GENELAND.gen\" + str(gen)\n f = open(inputFile, 'a')\n if config_3.diploid == False:\n numRows = lattice[i,j].ne\n for k in range(0,numRows):\n for l in range(0, len(indivMat[0])): #for each snp in the kth individual\n if indivMat[k][l] == 0:\n f.write(\"01 \")\n else:\n f.write(\"02 \")\n f.write(\"\\n\")\n \n else: #If its diploid\n numRows = int(lattice[i,j].ne / 2) #If they are diploid, there will be half the # of rows\n for k in range(0,numRows):\n for l in range(0, ( int(len(indivMat[0])/2)) ): #for every two alleles of the snps in the kth individual\n if indivMat[k][l*2] == 0:\n a = \"01\"\n if indivMat[k][l*2] == 1:\n a = \"02\"\n if indivMat[k][l*2+1] == 0:\n b = \"01\"\n if indivMat[k][l*2+1] == 1:\n b = \"02\"\n f.write(a + \"/\" + b + \" \")\n f.write(\"\\n\")\n \n f.close()\n\ndef makeCoorFile(lattice, nPops, i, j, gen):\n inputFile = str(config_3.outFile) + \".GENEPOP.PopCoor.gen\" + str(gen)\n f = open(inputFile, 'a')\n if config_3.diploid == False:\n for k in range(0,lattice[i,j].ne):\n f.write(str(i) + \" \" + str(j) + \"\\n\")\n else:\n for k in range(0,(lattice[i,j].ne/2)):\n f.write(str(i) + \" \" + str(j) + \"\\n\")\n f.close()\n\n#FamID IndvID PatID MatID Sex Phenotype Alleles, (genotypes can be 1,2,3,4)\n#FAM001 1 0 0 1 2 A A G G A C\n#FAM001 2 0 0 1 2 A A A G 0 0\n\ndef PLINK(lattice, i, j, indivMat, gen):\n a = lattice[i,j].snps.freq\n \n ##Make PED file\n inputFile = str(config_3.outFile) + \".ADMIXTURE.PED.gen\" + str(gen)\n f = open(inputFile, 'a')\n if config_3.diploid == False:\n numRows = lattice[i,j].ne\n for k in range(0,numRows):\n f.write(\"POP\" + str(i) + str(j) + \" \" + str(k) + \" 0 0 -9 -9\") #POP00 k 0 0 -9 -9\n for l in range(0, len(indivMat[0])): #for each snp in the kth individual\n if indivMat[k][l] == 0:\n f.write(\" 1\")\n else:\n f.write(\" 2\")\n f.write(\"\\n\")\n else: #If its diploid\n numRows = int(lattice[i,j].ne / 2) #If they are diploid, there will be half the # of rows\n for k in range(0,numRows):\n f.write(\"POP\" + str(i) + str(j) + \" \" + str(k) + \" 0 0 -9 -9\") #POP00 k 0 0 -9 -9\n for l in range(0, ( int(len(indivMat[0])/2)) ): #for every two alleles of the snps in the kth individual\n #Kimberly added \"int\" b/c python3 said it was a float\n if indivMat[k][l*2] == 0:\n a = \"1\"\n if indivMat[k][l*2] == 1:\n a = \"2\"\n if indivMat[k][l*2+1] == 0:\n b = \"1\"\n if indivMat[k][l*2+1] == 1:\n b = \"2\"\n f.write(\" \" + a + \" \" + b)\n f.write(\"\\n\")\n f.close()\n \n ##Make Map file\n #chr snpID genDist bpPos\n # 1 rs123456 0 1234555\n # 1 rs234567 0 1237793\n # 1 rs233556 0 1337456\n a = lattice[i,j].snps.freq\n if i == 0 and j == 0:\n inputFile = str(config_3.outFile) + \".ADMIXTURE.MAP.gen\" + str(gen)\n f = open(inputFile, 'a')\n if type(a) == int:\n f.write(\"0 Loc0 0 1\\n\")\n else:\n locNames_1 = [\"Loc\"] * len(a)\n locNames_2 = np.arange(1,len(a)+1)\n locNames = [ ( locNames_1[0] + str(locNames_2[0]) ) ]\n for k in range(1,len(a)):\n locNames.append( str( locNames_1[k] + str(locNames_2[k]) ) )\n for k in range(0, len(a)):\n f.write(\"0 \" + str(locNames[k]) + \" 0 \" + str(k) + \"\\n\")\n f.close()\n\ndef is_non_zero_file(fpath):\n return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False\n\n","sub_path":"inst/writeOutput_3.py","file_name":"writeOutput_3.py","file_ext":"py","file_size_in_byte":9210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"522717559","text":"def count_above_below(the_list, value):\n \"\"\"\n Given a list and a number, return a dictionary\n with the count of list elements that are above\n and below the given number.\n\n e.g. list [1, 5, 2, 1, 10] with value 6\n gives output \"above: 1, below: 4\"\n \"\"\"\n counts = {'above': 0, 'below': 0}\n\n for element in the_list:\n if element > value:\n counts['above'] += 1\n elif element < value:\n counts['below'] += 1\n \n return counts","sub_path":"abovebelow.py","file_name":"abovebelow.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"145987778","text":"from typing import List\n\n\nclass Solution:\n \"\"\"\n 905. 按奇偶排序数组\n https://leetcode-cn.com/problems/sort-array-by-parity/\n 给定一个非负整数数组 A,返回一个数组,在该数组中, A 的所有偶数元素之后跟着所有奇数元素。\n 你可以返回满足此条件的任何数组作为答案。\n \"\"\"\n def sortArrayByParity(self, A: List[int]) -> List[int]:\n j = len(A) - 1\n if j <= 0:\n return A\n\n i = 0\n while i < j:\n if A[i] % 2 == 1:\n A[j], A[i] = A[i], A[j]\n j -= 1\n else:\n i += 1\n return A\n\n\nso = Solution()\nprint(so.sortArrayByParity([3, 1, 2, 4]))","sub_path":"arr.sort-array-by-parity.py","file_name":"arr.sort-array-by-parity.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117318723","text":"#!/usr/bin/python\n#\n# Copyright (c) 2018 Zim Kalinowski, \n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: azure_rm_servicebussubscription\nversion_added: \"2.8\"\nshort_description: Manage Azure Subscription instance.\ndescription:\n - Create, update and delete instance of Azure Subscription.\n\noptions:\n resource_group:\n description:\n - Name of the Resource group within the Azure subscription.\n required: True\n namespace_name:\n description:\n - The namespace name\n required: True\n topic_name:\n description:\n - The topic name.\n required: True\n name:\n description:\n - The subscription name.\n required: True\n lock_duration:\n description:\n - ISO 8061 lock duration timespan for the subscription. The default value is 1 minute.\n requires_session:\n description:\n - Value indicating if a subscription supports the concept of sessions.\n default_message_time_to_live:\n description:\n - \"ISO 8061 Default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent\n to Service Bus. This is the default value used when TimeToLive is not set on a message itself.\"\n dead_lettering_on_filter_evaluation_exceptions:\n description:\n - Value that indicates whether a subscription has dead letter support on filter evaluation exceptions.\n dead_lettering_on_message_expiration:\n description:\n - Value that indicates whether a subscription has dead letter support when a message expires.\n duplicate_detection_history_time_window:\n description:\n - ISO 8601 timeSpan structure that defines the duration of the duplicate detection history. The default value is 10 minutes.\n max_delivery_count:\n description:\n - Number of maximum deliveries.\n status:\n description:\n - Enumerates the possible values for the status of a messaging entity.\n choices:\n - 'active'\n - 'disabled'\n - 'restoring'\n - 'send_disabled'\n - 'receive_disabled'\n - 'creating'\n - 'deleting'\n - 'renaming'\n - 'unknown'\n enable_batched_operations:\n description:\n - Value that indicates whether server-side batched operations are enabled.\n auto_delete_on_idle:\n description:\n - ISO 8061 timeSpan idle interval after which the topic is automatically deleted. The minimum duration is 5 minutes.\n forward_to:\n description:\n - Queue/Topic name to forward the messages\n forward_dead_lettered_messages_to:\n description:\n - Queue/Topic name to forward the Dead Letter message\n state:\n description:\n - Assert the state of the Subscription.\n - Use 'present' to create or update an Subscription and 'absent' to delete it.\n default: present\n choices:\n - absent\n - present\n\nextends_documentation_fragment:\n - azure\n\nauthor:\n - \"Zim Kalinowski (@zikalino)\"\n\n'''\n\nEXAMPLES = '''\n - name: Create (or update) Subscription\n azure_rm_servicebussubscription:\n resource_group: ResourceGroup\n namespace_name: sdk-Namespace-1349\n topic_name: sdk-Topics-8740\n name: sdk-Subscriptions-2178\n enable_batched_operations: True\n'''\n\nRETURN = '''\nid:\n description:\n - Resource Id\n returned: always\n type: str\n sample: \"/subscriptions/Subscriptionid/resourceGroups/ResourceGroup/providers/Microsoft.ServiceBus/namespaces/sdk-Namespace-1349/topics/sdk-Topics-8740/s\n ubscriptions/sdk-Subscriptions-2178\"\nstatus:\n description:\n - \"Enumerates the possible values for the status of a messaging entity. Possible values include: 'Active', 'Disabled', 'Restoring', 'SendDisabled',\n 'ReceiveDisabled', 'Creating', 'Deleting', 'Renaming', 'Unknown'\"\n returned: always\n type: str\n sample: Active\n'''\n\nimport time\nfrom ansible.module_utils.azure_rm_common import AzureRMModuleBase\nfrom ansible.module_utils.common.dict_transformations import _snake_to_camel\n\ntry:\n from msrestazure.azure_exceptions import CloudError\n from msrest.polling import LROPoller\n from msrestazure.azure_operation import AzureOperationPoller\n from azure.mgmt.servicebus import ServiceBusManagementClient\n from msrest.serialization import Model\nexcept ImportError:\n # This is handled in azure_rm_common\n pass\n\n\nclass Actions:\n NoAction, Create, Update, Delete = range(4)\n\n\nclass AzureRMSubscription(AzureRMModuleBase):\n \"\"\"Configuration class for an Azure RM Subscription resource\"\"\"\n\n def __init__(self):\n self.module_arg_spec = dict(\n resource_group=dict(\n type='str',\n required=True\n ),\n namespace_name=dict(\n type='str',\n required=True\n ),\n topic_name=dict(\n type='str',\n required=True\n ),\n name=dict(\n type='str',\n required=True\n ),\n lock_duration=dict(\n type='str'\n ),\n requires_session=dict(\n type='str'\n ),\n default_message_time_to_live=dict(\n type='str'\n ),\n dead_lettering_on_filter_evaluation_exceptions=dict(\n type='str'\n ),\n dead_lettering_on_message_expiration=dict(\n type='str'\n ),\n duplicate_detection_history_time_window=dict(\n type='str'\n ),\n max_delivery_count=dict(\n type='int'\n ),\n status=dict(\n type='str',\n choices=['active',\n 'disabled',\n 'restoring',\n 'send_disabled',\n 'receive_disabled',\n 'creating',\n 'deleting',\n 'renaming',\n 'unknown']\n ),\n enable_batched_operations=dict(\n type='str'\n ),\n auto_delete_on_idle=dict(\n type='str'\n ),\n forward_to=dict(\n type='str'\n ),\n forward_dead_lettered_messages_to=dict(\n type='str'\n ),\n state=dict(\n type='str',\n default='present',\n choices=['present', 'absent']\n )\n )\n\n self.resource_group = None\n self.namespace_name = None\n self.topic_name = None\n self.name = None\n self.parameters = dict()\n\n self.results = dict(changed=False)\n self.mgmt_client = None\n self.state = None\n self.to_do = Actions.NoAction\n\n super(AzureRMSubscription, self).__init__(derived_arg_spec=self.module_arg_spec,\n supports_check_mode=True,\n supports_tags=False)\n\n def exec_module(self, **kwargs):\n \"\"\"Main module execution method\"\"\"\n\n for key in list(self.module_arg_spec.keys()):\n if hasattr(self, key):\n setattr(self, key, kwargs[key])\n elif kwargs[key] is not None:\n self.parameters[key] = kwargs[key]\n\n dict_camelize(self.parameters, ['status'], True)\n\n response = None\n\n self.mgmt_client = self.get_mgmt_svc_client(ServiceBusManagementClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n\n resource_group = self.get_resource_group(self.resource_group)\n\n old_response = self.get_subscription()\n\n if not old_response:\n self.log(\"Subscription instance doesn't exist\")\n if self.state == 'absent':\n self.log(\"Old instance didn't exist\")\n else:\n self.to_do = Actions.Create\n else:\n self.log(\"Subscription instance already exists\")\n if self.state == 'absent':\n self.to_do = Actions.Delete\n elif self.state == 'present':\n if (not default_compare(self.parameters, old_response, '', self.results)):\n self.to_do = Actions.Update\n\n if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):\n self.log(\"Need to Create / Update the Subscription instance\")\n\n if self.check_mode:\n self.results['changed'] = True\n return self.results\n\n response = self.create_update_subscription()\n\n self.results['changed'] = True\n self.log(\"Creation / Update done\")\n elif self.to_do == Actions.Delete:\n self.log(\"Subscription instance deleted\")\n self.results['changed'] = True\n\n if self.check_mode:\n return self.results\n\n self.delete_subscription()\n # This currently doesnt' work as there is a bug in SDK / Service\n if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):\n response = self.get_poller_result(response)\n else:\n self.log(\"Subscription instance unchanged\")\n self.results['changed'] = False\n response = old_response\n\n if self.state == 'present':\n self.results.update({\n 'id': response.get('id', None),\n 'status': response.get('status', None)\n })\n return self.results\n\n def create_update_subscription(self):\n '''\n Creates or updates Subscription with the specified configuration.\n\n :return: deserialized Subscription instance state dictionary\n '''\n self.log(\"Creating / Updating the Subscription instance {0}\".format(self.name))\n\n try:\n response = self.mgmt_client.subscriptions.create_or_update(resource_group_name=self.resource_group,\n namespace_name=self.namespace_name,\n topic_name=self.topic_name,\n subscription_name=self.name,\n parameters=self.parameters)\n if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):\n response = self.get_poller_result(response)\n\n except CloudError as exc:\n self.log('Error attempting to create the Subscription instance.')\n self.fail(\"Error creating the Subscription instance: {0}\".format(str(exc)))\n return response.as_dict()\n\n def delete_subscription(self):\n '''\n Deletes specified Subscription instance in the specified subscription and resource group.\n\n :return: True\n '''\n self.log(\"Deleting the Subscription instance {0}\".format(self.name))\n try:\n response = self.mgmt_client.subscriptions.delete(resource_group_name=self.resource_group,\n namespace_name=self.namespace_name,\n topic_name=self.topic_name,\n subscription_name=self.name)\n except CloudError as e:\n self.log('Error attempting to delete the Subscription instance.')\n self.fail(\"Error deleting the Subscription instance: {0}\".format(str(e)))\n\n return True\n\n def get_subscription(self):\n '''\n Gets the properties of the specified Subscription.\n\n :return: deserialized Subscription instance state dictionary\n '''\n self.log(\"Checking if the Subscription instance {0} is present\".format(self.name))\n found = False\n try:\n response = self.mgmt_client.subscriptions.get(resource_group_name=self.resource_group,\n namespace_name=self.namespace_name,\n topic_name=self.topic_name,\n subscription_name=self.name)\n found = True\n self.log(\"Response : {0}\".format(response))\n self.log(\"Subscription instance : {0} found\".format(response.name))\n except CloudError as e:\n self.log('Did not find the Subscription instance.')\n if found is True:\n return response.as_dict()\n\n return False\n\n\ndef default_compare(new, old, path, result):\n if new is None:\n return True\n elif isinstance(new, dict):\n if not isinstance(old, dict):\n result['compare'] = 'changed [' + path + '] old dict is null'\n return False\n for k in new.keys():\n if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):\n return False\n return True\n elif isinstance(new, list):\n if not isinstance(old, list) or len(new) != len(old):\n result['compare'] = 'changed [' + path + '] length is different or null'\n return False\n if isinstance(old[0], dict):\n key = None\n if 'id' in old[0] and 'id' in new[0]:\n key = 'id'\n elif 'name' in old[0] and 'name' in new[0]:\n key = 'name'\n else:\n key = list(old[0])[0]\n new = sorted(new, key=lambda x: x.get(key, None))\n old = sorted(old, key=lambda x: x.get(key, None))\n else:\n new = sorted(new)\n old = sorted(old)\n for i in range(len(new)):\n if not default_compare(new[i], old[i], path + '/*', result):\n return False\n return True\n else:\n if path == '/location':\n new = new.replace(' ', '').lower()\n old = new.replace(' ', '').lower()\n if new == old:\n return True\n else:\n result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)\n return False\n\n\ndef dict_camelize(d, path, camelize_first):\n if isinstance(d, list):\n for i in range(len(d)):\n dict_camelize(d[i], path, camelize_first)\n elif isinstance(d, dict):\n if len(path) == 1:\n old_value = d.get(path[0], None)\n if old_value is not None:\n d[path[0]] = _snake_to_camel(old_value, camelize_first)\n else:\n sd = d.get(path[0], None)\n if sd is not None:\n dict_camelize(sd, path[1:], camelize_first)\n\n\ndef main():\n \"\"\"Main execution\"\"\"\n AzureRMSubscription()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"library/azure_rm_servicebussubscription.py","file_name":"azure_rm_servicebussubscription.py","file_ext":"py","file_size_in_byte":15489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"50428092","text":"from typing import Dict\n\nfrom cloudrail.knowledge.context.aws.resources.ec2.security_group import SecurityGroup\n\nfrom cloudrail.knowledge.context.aws.cloudformation.cloudformation_constants import CloudformationResourceType\nfrom cloudrail.knowledge.context.aws.resources_builders.cloudformation.base_cloudformation_builder import BaseCloudformationBuilder\n\n\nclass CloudformationSecurityGroupBuilder(BaseCloudformationBuilder):\n\n def __init__(self, cfn_by_type_map: Dict[CloudformationResourceType, Dict[str, Dict]]) -> None:\n super().__init__(CloudformationResourceType.SECURITY_GROUP, cfn_by_type_map)\n\n # See 'Ref' doc: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group.html\n def parse_resource(self, cfn_res_attr: dict) -> SecurityGroup:\n properties: dict = cfn_res_attr['Properties']\n vpc_id = self.get_property(properties, 'VpcId')\n name = (self.get_property(properties, 'GroupName', self.get_name_tag(properties)\n or self.create_random_pseudo_identifier())) \\\n if vpc_id else self.get_resource_id(cfn_res_attr)\n security_group_id = self.get_resource_id(cfn_res_attr) if vpc_id else name\n security_group: SecurityGroup = SecurityGroup(security_group_id=security_group_id,\n region=cfn_res_attr['region'],\n account=cfn_res_attr['account_id'],\n name=name,\n vpc_id=vpc_id,\n is_default=False,\n has_description=bool(self.get_property(properties, 'GroupDescription')))\n\n return security_group\n","sub_path":"cloudrail/knowledge/context/aws/resources_builders/cloudformation/ec2/cloudformation_security_group_builder.py","file_name":"cloudformation_security_group_builder.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"555977894","text":"# vim:fileencoding=utf-8:ai:ts=4:sts:et:sw=4:tw=80:\nimport os, re, glob, shlex, subprocess, hashlib\nfrom pyramid.interfaces import IRendererFactory\nfrom pyramid.path import AssetResolver\nfrom pyramid.renderers import render\n\nclass Mutator(object):\n \"\"\"\n Mutator class for the pyramid_assetmutator add-on.\n \"\"\"\n def __init__(self, request, path, **kw):\n \"\"\"\n Initialize the Mutator class.\n\n Required parameters:\n\n :type request: request\n :param request: The Pyramid application's current ``request``.\n\n :type path: string\n :param path: The Pyramid ``asset path``.\n\n Optional keyword parameters:\n\n :type mutator: dict or string\n :param mutator: Allows you to either specify a specific mutator to\n use (e.g. ``coffee``), or assign a brand new\n mutator dictionary to be used (e.g.\n ``{'cmd': 'lessc', 'ext': 'css'}``)\n\n :type settings: dict\n :param settings: Explicitly pass your own settings dict, rather than\n getting the settings from ``request.registry`` (usually\n only used in combination with batch processing).\n\n :type registry: registry\n :param registry: Explicitly pass your own Pyramid ``registry`` (usually\n only used in combination with batch processing).\n\n :type rendering_val: dict\n :param rendering_val: A dictionary that will be passed to the renderer\n in the event that the path provided matches a\n valid template renderer.\n\n :type batch: bool\n :param batch: Specify that the class should perform batch processing\n rather than request-based processing.\n \"\"\"\n self.request = request\n try:\n self.registry = kw['registry']\n except KeyError:\n self.registry = self.request.registry\n self.settings = kw.get('settings') or self.registry.settings\n self.path = path\n\n self.renderers = [\n key for key in \\\n dict(self.registry.getUtilitiesFor(IRendererFactory)).keys() \\\n if key not in ['json', 'string', '.txt']\n ]\n self.rendering_val = kw.get('rendering_val', {})\n\n self.mutators = self.settings.get('assetmutator.mutators')\n self.prefix = self.settings['assetmutator.asset_prefix']\n self.check_method = self.settings['assetmutator.remutate_check']\n self.mutated_path = self.settings['assetmutator.mutated_path']\n if self.mutated_path and not self.mutated_path.endswith(os.sep):\n self.mutated_path += os.sep\n self.mutator = kw.get('mutator')\n\n if (not self.mutators or not isinstance(self.mutators, dict)) and \\\n not self.mutator:\n raise ValueError('No mutators were found.')\n\n self.batch = kw.get('batch', False)\n self.checksum = None\n self.mtime = None\n self.exists = False\n self.dest_dirpath = None\n self.parse_template = False\n\n if not self.batch:\n self._configure_paths()\n\n @property\n def mutated(self):\n \"\"\"\n Property method to check and see if the initialized asset path has\n already been mutated.\n \"\"\"\n self.exists = self.exists or self._check_exists(self.dest_fullpath)\n\n return self.exists\n\n def _configure_paths(self):\n \"\"\"\n Checks/sets the various path settings needed for mutation.\n \"\"\"\n\n # Parse source path\n self.src_fullpath = self._get_abspath(self.path)\n self.src_dirpath = os.path.dirname(self.src_fullpath)\n\n # Parse dest/mutated path (if specified)\n self.dest_dirpath = self._get_abspath(self.mutated_path or\n self.src_dirpath)\n\n # Setup various path variables\n if self.batch and not os.path.isdir(self.src_dirpath):\n raise EnvironmentError('Directory does not exist: %s' %\n self.src_dirpath)\n else:\n self.src_filename = os.path.basename(self.src_fullpath)\n self.src_name = os.path.splitext(self.src_filename)[0]\n\n if self.mutated_path and \\\n os.path.splitext(self.src_filename)[-1] in self.renderers:\n # This asset uses a template renderer\n self.parse_template = True\n self.src_ext = os.path.splitext(self.src_name)[-1][1:]\n self.src_name = os.path.splitext(self.src_name)[0]\n else:\n self.src_ext = os.path.splitext(self.src_filename)[-1][1:]\n\n\n # Get/setup the mutator\n if self.mutator:\n if not isinstance(self.mutator, dict):\n self.mutator = self.mutators.get(self.mutator, {})\n else:\n self.mutator = self.mutators.get(self.src_ext, {})\n\n # Make sure an appropriate mutator is defined\n if not self.mutator.get('cmd') or not self.mutator.get('ext'):\n raise ValueError('No mutator found for %s.' % self.src_ext)\n\n\n # Do various check/path settings\n dest_ext = self.mutator['ext']\n\n if self.check_method == 'exists':\n self.dest_filename = '%s%s.%s' % (self.prefix, self.src_name,\n dest_ext)\n elif self.check_method == 'checksum':\n self.checksum = self.checksum or \\\n self._compute_checksum(self.src_fullpath)\n self.dest_filename = '%s%s.%s.%s' % (self.prefix, self.src_name,\n self.checksum, dest_ext)\n else: # self.check_method == 'mtime'\n self.mtime = self.mtime or self._get_mtime(self.src_fullpath)\n self.dest_filename = '%s%s.%s.%s' % (self.prefix, self.src_name,\n self.mtime, dest_ext)\n\n # Set the full destination/output path\n self.dest_fullpath = os.path.join(\n self.dest_dirpath,\n self.dest_filename\n )\n\n # Set the new assetpath to be returned to the template\n if not self.batch:\n if self.mutated_path:\n self.new_path = self.mutated_path + self.dest_filename\n else:\n self.new_path = re.sub(r'%s$' % self.src_filename,\n self.dest_filename,\n self.path)\n\n def _get_abspath(self, path):\n \"\"\"\n Convenience method to compute the absolute path from an assetpath.\n \"\"\"\n resolver = AssetResolver()\n\n if not os.path.isabs(path):\n # Try to resolve the asset full path\n path = resolver.resolve(path).abspath()\n\n return path\n\n def _compute_checksum(self, path):\n \"\"\"\n Convenience method to compute the source's checksum for the mutated\n asset.\n \"\"\"\n md5 = hashlib.md5()\n\n # Loop the file, adding chunks to the MD5 generator\n with open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(128*md5.block_size), b''):\n md5.update(chunk)\n # Finally, add the mtime\n md5.update(str(os.path.getmtime(path)).encode('utf-8'))\n\n # Get the first 12 characters of the hexdigest\n self.checksum = md5.hexdigest()[:12]\n\n return self.checksum\n\n def _get_mtime(self, path):\n \"\"\"\n Convenience method for getting the source's mtime for the mutated asset.\n \"\"\"\n return os.path.getmtime(path)\n\n def _check_exists(self, path):\n \"\"\"\n Convenience method to check if a file already exists.\n \"\"\"\n if os.path.exists(path):\n return True\n else:\n return False\n\n def _process_template(self, source):\n \"\"\"\n Renders a file using the specified renderer and returns the new source\n filename to use for the mutator.\n \"\"\"\n self.src_filename = self.prefix + os.path.splitext(self.src_filename)[0]\n self.src_fullpath = os.path.join(self.dest_dirpath, self.src_filename)\n self.prefix = ''\n\n data = render(source, self.rendering_val, request=self.request)\n\n with open(self.src_fullpath, 'w') as f:\n f.write(data)\n\n def _run_mutator(self):\n \"\"\"\n Runs the mutator for the initialized asset.\n \"\"\"\n cmd = '%s %s' % (self.mutator['cmd'], self.src_fullpath)\n\n proc = subprocess.Popen(\n shlex.split(cmd,posix=False),\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n data, err = proc.communicate()\n\n if proc.returncode != 0 or err:\n raise EnvironmentError('%s\\n\\n%s' % (err, data))\n else:\n new_dirname = os.path.normpath(os.path.dirname(self.dest_fullpath))\n\n if not os.path.exists(new_dirname):\n os.makedirs(new_dirname)\n\n with open(self.dest_fullpath, 'wb') as f:\n f.write(data)\n\n def mutate(self):\n \"\"\"\n Mutate the asset(s).\n \"\"\"\n if self.batch == True:\n batch_path = self._get_abspath(self.path)\n\n for ext, config in self.mutators.items():\n for asset in glob.glob(os.path.join(batch_path, '*.%s' % ext)):\n self.path = asset\n self._configure_paths()\n self._run_mutator()\n else:\n if not self.exists:\n if self.parse_template:\n self._process_template(self.path)\n\n self._run_mutator()\n self.exists = True\n\n return self.new_path\n\n def mutated_data(self):\n \"\"\"\n Return the mutated source of the initialized asset.\n \"\"\"\n if not self.exists:\n raise ValueError('Source not found. Has it been mutated?')\n\n with open(self.dest_fullpath) as f:\n data = f.read()\n\n return data\n","sub_path":"pyramid_assetmutator/mutator.py","file_name":"mutator.py","file_ext":"py","file_size_in_byte":10342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"444332520","text":"# -*- coding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\nSTOP_RENDERING = runtime.STOP_RENDERING\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1555046240.9486177\n_enable_loop = True\n_template_filename = 'C:/Users/Isaac/intexsite/portal/templates/search.html'\n_template_uri = 'search.html'\n_source_encoding = 'utf-8'\nimport django_mako_plus\nimport django.utils.html\n_exports = ['page_title', 'left_content', 'site_content', 'right_content']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'app_base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n def site_content():\n return render_site_content(context._locals(__M_locals))\n self = context.get('self', UNDEFINED)\n def left_content():\n return render_left_content(context._locals(__M_locals))\n form2 = context.get('form2', UNDEFINED)\n request = context.get('request', UNDEFINED)\n def right_content():\n return render_right_content(context._locals(__M_locals))\n form = context.get('form', UNDEFINED)\n msg = context.get('msg', UNDEFINED)\n def page_title():\n return render_page_title(context._locals(__M_locals))\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'page_title'):\n context['self'].page_title(**pageargs)\n \n\n __M_writer('\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'left_content'):\n context['self'].left_content(**pageargs)\n \n\n __M_writer('\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'site_content'):\n context['self'].site_content(**pageargs)\n \n\n __M_writer('\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'right_content'):\n context['self'].right_content(**pageargs)\n \n\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_page_title(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def page_title():\n return render_page_title(context)\n __M_writer = context.writer()\n __M_writer('— Search')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_left_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def left_content():\n return render_left_content(context)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_site_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def site_content():\n return render_site_content(context)\n self = context.get('self', UNDEFINED)\n form2 = context.get('form2', UNDEFINED)\n request = context.get('request', UNDEFINED)\n form = context.get('form', UNDEFINED)\n msg = context.get('msg', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\r\\n')\n if request.user.has_perm('account.search') or request.user.has_perm('account.safesearch'):\n __M_writer('
    \\r\\n

    Search

    \\r\\n

    ')\n __M_writer(django_mako_plus.ExpressionPostProcessor(self)(msg))\n __M_writer('

    \\r\\n
    \\r\\n \\r\\n \\r\\n \\r\\n \\r\\n \\r\\n \\r\\n \\r\\n \\r\\n \\r\\n \\r\\n
    \\r\\n \\r\\n ')\n __M_writer(django_mako_plus.ExpressionPostProcessor(self)( form ))\n __M_writer('\\r\\n
    \\r\\n
    \\r\\n \\r\\n ')\n __M_writer(django_mako_plus.ExpressionPostProcessor(self)(form2))\n __M_writer('\\r\\n
    \\r\\n
    \\r\\n
    \\r\\n \\r\\n

    \\r\\n
    \\r\\n
    \\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_right_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def right_content():\n return render_right_content(context)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"filename\": \"C:/Users/Isaac/intexsite/portal/templates/search.html\", \"uri\": \"search.html\", \"source_encoding\": \"utf-8\", \"line_map\": {\"29\": 0, \"47\": 1, \"52\": 3, \"57\": 6, \"62\": 39, \"72\": 3, \"78\": 3, \"84\": 4, \"90\": 4, \"96\": 8, \"107\": 8, \"108\": 9, \"109\": 10, \"110\": 12, \"111\": 12, \"112\": 19, \"113\": 19, \"114\": 26, \"115\": 26, \"121\": 41, \"127\": 41, \"133\": 127}}\n__M_END_METADATA\n\"\"\"\n","sub_path":"portal/templates/__dmpcache__/search.html.py","file_name":"search.html.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"196585914","text":"print(\"This Program is for generating a time update in html file to append from this script\")\nimport datetime\nimport os\nimport webbrowser\n\"\"\"\nlast_updated = datetime.datetime.now()\nx = f'Last Updated on :{last_updated}'\nwith open(\"file.txt\", 'a') as file:\n file.write(x)\n file.close()\n\"\"\"\nf = open('file.txt','r+')\nx = datetime.datetime.now()\nlines = f.readlines() # read old content\nf.seek(0) # go back to the beginning of the file\nf.write(str(x)) # write new content at the beginning\nfor line in lines: # write old content after new\n f.write(line)\n f.close()","sub_path":"date generatot.py","file_name":"date generatot.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"38415157","text":"import sys\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfparser import PDFParser\n\ndef read_body(f):\n parser = PDFParser(f)\n document = PDFDocument(parser)\n if not document.is_extractable:\n print(f'このPDF文書はテキスト抽出できません')\n return\n\n resource_manager = PDFResourceManager()\n device = TextConverter(resource_manager, sys.stdout, codec='utf-8')\n interpreter = PDFPageInterpreter(resource_manager, device)\n for i, page in enumerate(PDFPage.create_pages(document), 1):\n print(f\"ページ: {i} {'=' * 32}\")\n interpreter.process_page(page)\n print()\n\n device.close()\n\ndef usage():\n if len(sys.argv) < 2:\n print(f'{__file__} の後にPDFファイルを指定してください')\n sys.exit(0)\n\ndef main():\n with open(sys.argv[1], 'rb') as f:\n read_body(f)\n\nif __name__ == '__main__':\n usage()\n main()\n","sub_path":"hino system(自然言語処理)/1_テキストデータ収集/参考まで/read_pdf_body.py","file_name":"read_pdf_body.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"31231531","text":"import docker\n\n\ndocker_file_path = '/home/ubuntu/vaibhav'\ntag_name = 'vaibhavkholase/vaibhavtest'\ncontainer_name= 'mypytest'\nclient = docker.from_env()\n\nprint(\"Start Building your docker image...\")\n##### Building an Image form given DockerFile##########\nclient.images.build(path=docker_file_path,tag=tag_name)\n\nimage = client.images.get(tag_name)\nprint(image.short_id)\n\nprint(\"pushing image...\")\n\naauth_config ={\n 'username':'vaibhavkholase',\n 'password':'devops123'\n }\n\n###using authentication pushing docker image\nclient.images.push('vaibhavkholase/vaibhavtest', tag='latest', auth_config=aauth_config)\nprint(\"pushing of an image is done.\")\n# Check if image is already present at local server\nif \"docker image inspect vaibhavkholase/vaibhavtest >/dev/null 2>&1 && echo yes || echo NO = 'yes'\":\n print(\"Docker image is already exist\")\n\nelse: \n print(\"pulling imgae to local machine\")\n client.images.pull('vaibhavkholase/vaibhavtest')\n\n\nprint(\"Now running new container from the local image\")\nclient.containers.run('vaibhavkholase/vaibhavtest',detach=True,ports={'80/tcp' : 80})\nprint(\"'Congrtas', you have successfully deployed your python web application\")\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"418594235","text":"def Ques9(n):\n if(n>9):\n Rev_num = 0\n while(n>0):\n remainder = n%10\n Rev_num = (Rev_num*10) + remainder\n n = n //10\n return Rev_num\n print(\"Not a two digit number\")\n\nnum = int(input(\"Enter your number: \"))\nif (num>9):\n print(\"Reverse of \",num,\"is: \", Ques9(num))\nelse:\n print(\"Enter 2 digit number\")\n ","sub_path":"Ques8.py","file_name":"Ques8.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"604586347","text":"\"\"\"\n4. Employee Class\n Write a class named Employee that holds the following data about an employee in attributes: \n name, ID number, department, and job title.\n Once you have written the class, write a program that creates three Employee objects to\n hold the following data:\n​\nName ID Number Department Job Title\nSusan Meyers 47899 Accounting Vice President\nMark Jones 39119 IT Programmer\nJoy Rogers 81774 Manufacturing Engineer\n​\nThe program should store this data in the three objects and then display the data for each\nemployee on the screen.\n\"\"\"\n\n# Employee class\nclass Employee:\n # __init__ creates an Employee object with Name,\n # ID_number, department, and job title attributes\n def __init__(self, name, id_num, dept, title):\n self.__name = name\n self.__id_number = id_num\n self.__department = dept\n self.__job_title = title\n \n # __str__ method displays the values in each attribute\n # for the Employee object\n def __str__(self):\n return f'Employee Name: {self.__name}\\n' \\\n f'Employee ID Number: {self.__id_number}\\n' \\\n f'Employee Department: {self.__department}\\n' \\\n f'Employee Job Title: {self.__job_title}'\n\n def set_name(self, name):\n self.__name = name\n \n def set_id_number(self, id_num):\n self.__id_number = id_num\n \n def set_department(self, dept):\n self.__department = dept\n \n def set_job_title(self, title):\n self.__job_title = title\n \n def get_name(self):\n return self.__name\n \n def get_id_number(self):\n return self.__id_number\n \n def get_department(self):\n return self.__department\n\n def get_job_title(self):\n return self.__job_title\n\n# main function creates 3 Employee objects from a dictionary,\n# then displays their data\ndef main():\n # list holding each employee\n employee_list = []\n \n # dictionary holding all employee data\n employee_data = {\n 1 : {\n 'name' : 'Susan Meyers',\n 'id_num' : 47899,\n 'dept' : 'Accounting',\n 'title' : 'Vice President'\n },\n 2 : {\n 'name' : 'Mark Jones',\n 'id_num' : 39119,\n 'dept' : 'IT',\n 'title' : 'Programmer'\n },\n 3 : {\n 'name' : 'Joy Rogers',\n 'id_num' : 81774,\n 'dept' : 'Manufacturing',\n 'title' : 'Engineer'\n }\n }\n\n # loop through each dictionary of employee data\n # and create Employee objects\n for i in range(1,4):\n employee = Employee(\n employee_data[i]['name'],\n employee_data[i]['id_num'],\n employee_data[i]['dept'],\n employee_data[i]['title']\n )\n\n # add employee to employee_list\n employee_list.append(employee)\n \n # loop through each employee in employee_list\n # and display all of their data\n for emp in employee_list:\n print(emp)\n print() # print extra blank line for readability\n\n# call main function\n#main()","sub_path":"Class_Practice_Exercises/Classes/Classes_Exercises_4.py","file_name":"Classes_Exercises_4.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"302788491","text":"import requests\nfrom nltk.corpus import stopwords\nimport re\nimport nltk\nimport newspaper\nfrom nltk.stem import SnowballStemmer\nfrom html2text import html2text\n\n\nclass WebService:\n\n def __init__(self):\n pass\n\n async def tokenize_sentence(self, data):\n \"\"\"\n :criteria: expects a dictionary of this structure:\n \"\"\"\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n html = tokenizer.tokenize(data)\n sentences = []\n for data in html:\n sentence_data = dict()\n sentence_data['html'] = data\n sentence_data['text'] = html2text(data)\n sentence_data['ml_techniques_found'] = []\n sentence_data['reg_techniques_found'] = []\n sentences.append(sentence_data)\n return sentences\n\n async def tokenize(self, s):\n \"\"\"Function to remove stopwords from a sentence and return a list of words to match\"\"\"\n word_list = re.findall(r'\\w+', s.lower())\n filtered_words = [word for word in word_list if word not in stopwords.words('english')]\n \"\"\"Perform NLP Lemmatization and Stemming methods\"\"\"\n lemmed = []\n stemmer = SnowballStemmer('english')\n for i in filtered_words:\n lemmed.append(stemmer.stem(str(i)))\n return ' '.join(lemmed)\n\n @classmethod\n async def remove_html_markup_and_found(self, s):\n tag = False\n quote = False\n out = \"\"\n for c in s:\n if c == '<' and not quote:\n tag = True\n elif c == '>' and not quote:\n tag = False\n elif (c == '\"' or c == \"'\") and tag:\n quote = not quote\n elif not tag:\n out = out + c\n sep = '!FOUND:'\n out = out.split(sep, 1)[0]\n return out\n\n async def get_url(self, url, returned_format=None):\n if returned_format == 'html':\n print('[!] HTML support is being refactored. Currently data is being returned plaintext')\n r = requests.get(url)\n\n b = newspaper.fulltext(r.text)\n if b:\n text = str(b).replace('\\n', '
    ')\n print(type(text))\n return (text)\n else:\n return (None)\n\n async def get_url_old(self, url, returned_format='html'):\n \"\"\"Function to download a webpage and return article title and content\"\"\"\n if returned_format == 'html':\n article = newspaper.Article(url, keep_article_html=True)\n article.download()\n article.parse()\n data = article.article_html\n\n return data\n\n","sub_path":"service/web_svc.py","file_name":"web_svc.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"33003929","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Read data from file\ndata = pd.read_csv('data.csv')\n\n# Separate data into features X = [area, dist, indilujo] and target Y = [arriendo]\nX = np.asarray(data.drop('arriendo', axis=1))\nY = data['arriendo']\n\n# Add column of 1's to all features in X\nheight, width = X.shape\nX = np.c_[np.ones(height), X]\n\n# Get the inverse of the matrix multiplication of X transposed and X\ninner = np.linalg.inv(np.matmul(X.T, X))\n\n# Matrix multiplication of last result and X transposed\n# This result is then matrix multiplied by target Y to get the parameters theta\n# for lineal regression\ntheta = np.matmul(np.matmul(inner, X.T), Y)\n\nfor i, param in enumerate(theta, 1):\n print(f\"Theta_{i}: {param}\")\n \npredicted_values = []\nfor a in X:\n val = np.sum(a*theta)\n predicted_values.append(val)\n \nplt.plot(data['arriendo'], label='Valor real')\nplt.plot(predicted_values, label='Predicción')\nplt.ylabel('Arriendo')\nplt.legend()\nplt.show()\n","sub_path":"class/machine_learning/01_linear_regression/linear_reg.py","file_name":"linear_reg.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"88583692","text":"import media\nimport fresh_tomatoes\n\n#create three instances of movies\n\nstar_wars = media.Movie()\nstar_wars.title = \"Star Wars\"\nstar_wars.poster_image_url = \"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSJNBYh89iqm_FNgSPxCZmC_rvBjqzAdDJNW-8WW5pkBreSeuf2XBspNXw\"\nstar_wars.trailer_youtube_url = \"https://www.youtube.com/watch?v=wCc2v7izk8w\"\nstar_wars.actors = [\"Ewan McGregor\", \"Natalie Portman\", \"Hayden Christensen\"]\nstar_wars.rating = 5\n\nstar_trek = media.Movie()\nstar_trek.title = \"Star Trek\"\nstar_trek.poster_image_url = \"http://resizing.flixster.com/neW3_o00gSLTgw31Sf5WF4OT7Qc=/180x270/dkpu1ddg7pbsk.cloudfront.net/movie/11/17/38/11173843_ori.jpg\"\nstar_trek.trailer_youtube_url = \"https://www.youtube.com/watch?v=iGAHnZ555nI\"\nstar_trek.actors = [\"Chris Pine\", \"Zachary Quinto\", \"Ben Cross\"]\nstar_trek.rating = 4\n\nmadmax = media.Movie()\nmadmax.title = \"Max Max\"\nmadmax.poster_image_url = \"http://resizing.flixster.com/GbDqFVUc_9VBNAnanZVQxlYD0ZM=/180x267/dkpu1ddg7pbsk.cloudfront.net/movie/11/19/12/11191276_ori.jpg\"\nmadmax.trailer_youtube_url = \"https://www.youtube.com/watch?v=hEJnMQG9ev8\"\nmadmax.actors = [\"Tom Hardy\", \"Charlize Theron\", \"Nicholas Hoult\"]\nmadmax.rating = 5\n\n#Run fresh_tomatoes function that will generate the web page\n\nfresh_tomatoes.open_movies_page([star_wars, star_trek, madmax])","sub_path":"movie-trailer/entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"452212923","text":"import re\n\nfrom django import urls as django_urls\nfrom django.conf import settings\nfrom django.urls import LocalePrefixPattern, URLPattern, URLResolver\nfrom django.urls.resolvers import RegexPattern, RoutePattern\nfrom django.utils import translation\nfrom django.utils.functional import Promise\n\nfrom django_urlconf_export import language_utils\n\n\ndef _get_url_languages(language_without_country):\n \"\"\"\n Some websites only translate urls once for each language family e.g. \"en\",\n not each language + country combination e.g. \"en-gb\" and \"en-us\".\n\n :param language_without_country: boolean\n Should translated URLs be keyed by e.g. \"en\" rather than \"en-gb\" and \"en-us\"?\n :return: language code e.g. \"en\" or \"en-us\"\n \"\"\"\n if language_without_country:\n return {language_utils.get_without_country(language) for language, _ in settings.LANGUAGES}\n else:\n return {language for language, _ in settings.LANGUAGES}\n\n\ndef _get_regex_pattern(url_pattern, language_without_country):\n \"\"\"\n Export data from a Django URLPattern as JSON\n\n :param url_pattern: URLPattern\n :param language_without_country:\n :return: tuple(string, string or None)\n pattern_type - 'route', 'regex' or 'prefix'\n pattern_regex - string or None\n \"\"\"\n if isinstance(url_pattern, LocalePrefixPattern):\n return \"prefix\", None\n elif isinstance(url_pattern, RegexPattern):\n pattern_type = \"regex\"\n pattern_regex = url_pattern._regex\n elif isinstance(url_pattern, RoutePattern):\n pattern_type = \"route\"\n pattern_regex = url_pattern._route\n else:\n raise ValueError(f\"Invalid URL Pattern type: {url_pattern}\")\n\n if isinstance(pattern_regex, Promise):\n language_regexes = {}\n for lang in _get_url_languages(language_without_country):\n with translation.override(lang):\n language_regexes[lang] = str(pattern_regex)\n return pattern_type, language_regexes\n else:\n return pattern_type, pattern_regex\n\n\ndef _is_allowed(name, whitelist, blacklist):\n \"\"\"\n Check if this url (or url namespace) is allowed to be exported.\n\n :param name: url name OR included urls namespace\n :param whitelist: list of strings; url_names and namespaces, allowed to be exported.\n :param blacklist: list of strings; url_names and namespaces, not allowed to be exported.\n :return: boolean - is this url or namespace allowed to be exported?\n \"\"\"\n if not whitelist and not blacklist:\n return True\n\n if blacklist and not whitelist:\n return not any(re.match(pattern, name) for pattern in blacklist)\n\n if whitelist and not blacklist:\n return any(re.match(pattern, name) for pattern in whitelist)\n\n if whitelist and blacklist:\n for whitelisted_pattern in whitelist:\n if re.match(whitelisted_pattern, name):\n # It's whitelisted. Check it's not blacklisted.\n if not any(\n re.match(blacklisted_pattern, name) for blacklisted_pattern in blacklist\n ):\n return True\n return False\n\n\ndef _get_json_urlpatterns(resolver, whitelist=None, blacklist=None, language_without_country=False):\n \"\"\"\n Export URLconf data from a Django URLResolver, as list of JSON dictionaries\n\n :param resolver: URLResolver - resolver to export URLconf data from\n :param whitelist: list of strings; url_names and namespaces, allowed to be exported.\n :param blacklist: list of strings; url_names and namespaces, not allowed to be exported.\n :param language_without_country: boolean\n Should translated URLs be keyed by e.g. \"en\" rather than \"en-gb\" and \"en-us\"?\n :return: list of JSON URLconf dicts\n \"\"\"\n json_urlpatterns = []\n for django_url in resolver.url_patterns:\n json_url = {}\n\n # Example values:\n # pattern_type | pattern_regex\n # ----------------------------\n # 'route' | '/home/'\n # 'regex' | '^/home/$'\n # 'prefix' | None\n pattern_type, pattern_regex = _get_regex_pattern(\n django_url.pattern, language_without_country\n )\n if pattern_type in [\"route\", \"regex\"]:\n json_url[pattern_type] = pattern_regex\n\n if isinstance(django_url, URLResolver):\n includes = _get_json_urlpatterns(\n django_url, whitelist, blacklist, language_without_country\n )\n # If no live urls are included,\n # skip this URLResolver in the json\n if not includes:\n continue\n json_url[\"includes\"] = includes\n if isinstance(django_url.pattern, LocalePrefixPattern):\n json_url[\"isLocalePrefix\"] = True\n # classPath = \"package.subpackage.ClassName\"\n json_url[\"classPath\"] = \".\".join(\n [\n django_url.pattern.__class__.__module__,\n django_url.pattern.__class__.__qualname__,\n ]\n )\n else:\n # If a namespace is set, check it is allowed\n if django_url.namespace and not _is_allowed(\n django_url.namespace, whitelist, blacklist\n ):\n continue\n json_url[\"app_name\"] = django_url.app_name\n json_url[\"namespace\"] = django_url.namespace\n\n elif isinstance(django_url, URLPattern):\n # Ignore urls without a name,\n # they are typically dead or redirecting.\n # Without a name, we cannot reverse the django_url anyway.\n if not django_url.name:\n continue\n # Check this url name is allowed\n if not _is_allowed(django_url.name, whitelist, blacklist):\n continue\n json_url[\"name\"] = django_url.name\n\n json_urlpatterns.append(json_url)\n return json_urlpatterns\n\n\ndef as_json(urlconf=None, whitelist=None, blacklist=None, language_without_country=None):\n \"\"\"\n Export URLconf data from a module, as list of JSON dictionaries.\n\n :param urlconf: string - root module name to export URLconf from\n :param whitelist: list of strings; url_names and namespaces, allowed to be exported.\n :param blacklist: list of strings; url_names and namespaces, not allowed to be exported.\n :param language_without_country: boolean\n Should translated URLs be keyed by e.g. \"en\" rather than \"en-gb\" and \"en-us\"?\n :return: list of JSON URLconf dicts\n \"\"\"\n\n if urlconf is None:\n urlconf = getattr(settings, \"URLCONF_EXPORT_ROOT_URLCONF\", settings.ROOT_URLCONF)\n\n if whitelist is None:\n whitelist = getattr(settings, \"URLCONF_EXPORT_WHITELIST\", None)\n\n if blacklist is None:\n blacklist = getattr(settings, \"URLCONF_EXPORT_BLACKLIST\", None)\n\n if language_without_country is None:\n language_without_country = getattr(\n settings, \"URLCONF_EXPORT_LANGUAGE_WITHOUT_COUNTRY\", False\n )\n\n root_resolver = django_urls.get_resolver(urlconf)\n\n return _get_json_urlpatterns(root_resolver, whitelist, blacklist, language_without_country)\n\n\ndef get_all_exported_url_names(json_urlpatterns):\n \"\"\"\n Get all names and namespaces in some URLconf JSON.\n\n :param json_urlpatterns: list of JSON URLconf dicts\n :return: list of strings; url_names and namespaces\n \"\"\"\n url_names = set()\n for url in json_urlpatterns:\n included_urls = url.get(\"includes\")\n if included_urls:\n if url[\"namespace\"] is not None:\n url_names.add(url[\"namespace\"])\n url_names |= get_all_exported_url_names(included_urls)\n else:\n url_names.add(url[\"name\"])\n return url_names\n\n\ndef get_all_allowed_url_names(\n urlconf=None, whitelist=None, blacklist=None, language_without_country=None\n):\n \"\"\"\n Useful to check whitelist and blacklist are working as expected\n\n :param urlconf: string - root module name to export URLconf from\n :param whitelist: list of strings; url_names and namespaces, allowed to be exported.\n :param blacklist: list of strings; url_names and namespaces, not allowed to be exported.\n :param language_without_country: boolean\n Should translated URLs be keyed by e.g. \"en\" rather than \"en-gb\" and \"en-us\"?\n :return: list of strings; url_names and namespaces\n \"\"\"\n json_urlpatterns = as_json(urlconf, whitelist, blacklist, language_without_country)\n return get_all_exported_url_names(json_urlpatterns)\n","sub_path":"src/django_urlconf_export/export_urlconf.py","file_name":"export_urlconf.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"336700081","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom distutils.core import Extension\nimport os\n\nversion_path = os.path.join('mpl_toolkits', 'clifford', '_version.py')\nexec(open(version_path).read())\n\nthis_directory = os.path.dirname(__file__)\nwith open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='mpl_toolkits.clifford',\n version=__version__,\n license='MIT',\n description='Matplotlib tools for clifford',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Eric Wieser',\n packages=find_packages(),\n namespace_packages=['mpl_toolkits'],\n install_requires=[\n 'clifford',\n 'matplotlib',\n 'trimesh',\n ],\n package_dir={'mpl_toolkits': 'mpl_toolkits'},\n\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Mathematics',\n \n 'Framework :: Matplotlib',\n\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n project_urls={\n \"Bug Tracker\": \"https://github.com/pygae/mpl_toolkits.clifford/issues\",\n \"Source Code\": \"https://github.com/pygae/mpl_toolkits.clifford\",\n },\n\n python_requires='>=3.5',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"615397846","text":"import numpy as np\nfrom PIL import Image\n\n\nclass PorterDuff(object):\n \"\"\"\n PorterDuff python implementation\n --------------\n References:\n [1] http://graphics.pixar.com/library/Compositing/paper.pdf\n [2] https://en.wikipedia.org/wiki/Alpha_compositing\n [3] https://www.jianshu.com/p/d11892bbe055\n [4] https://blog.csdn.net/IO_Field/article/details/78222527\n [5] https://blog.csdn.net/android_cmos/article/details/78907166\n [6] https://blog.csdn.net/u013085697/article/details/52096703\n \"\"\"\n CLEAR = 0 # [0, 0]\n SRC = 1 # [Sa, Sc]\n DST = 2 # [Da, Dc]\n SRC_OVER = 3 # [Sa + (1 - Sa)*Da, Rc = Sc + (1 - Sa)*Dc]\n DST_OVER = 4 # [Sa + (1 - Sa)*Da, Rc = Dc + (1 - Da)*Sc]\n SRC_IN = 5 # [Sa * Da, Sc * Da]\n DST_IN = 6 # [Sa * Da, Sa * Dc]\n SRC_OUT = 7 # [Sa * (1 - Da), Sc * (1 - Da)]\n DST_OUT = 8 # [Da * (1 - Sa), Dc * (1 - Sa)]\n SRC_ATOP = 9 # [Da, Sc * Da + (1 - Sa) * Dc]\n DST_ATOP = 10 # [Sa, Sa * Dc + Sc * (1 - Da)]\n XOR = 11 # [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]\n DARKEN = 12 # [Sa + Da - Sa*Da, Sc*(1 - Da) + Dc*(1 - Sa) + min(Sc, Dc)]\n LIGHTEN = 13 # [Sa + Da - Sa*Da, Sc*(1 - Da) + Dc*(1 - Sa) + max(Sc, Dc)]\n MULTIPLY = 14 # [Sa * Da, Sc * Dc]\n SCREEN = 15 # [Sa + Da - Sa * Da, Sc + Dc - Sc * Dc]\n ADD = 16 # Saturate(S + D)\n OVERLAY = 17 # A_out = A_dst + A_scr - A_dst * A_src, C_out = 2 * C_dst * C_src (or other methods) (archive [4])\n\n def __init__(self, source_arr, destination_arr):\n \"\"\"\n # More information of straight (unassociated) alpha, and premultiplied (associated) alpha can be seen at [2]\n :param source_arr: source image numpy array of shape [height, width, channels]\n :param destination_arr: destination numpy array of shape [height, width, channels]\n \"\"\"\n # range from [0, 255](uint8) to [0, 1](float32)\n self._Sa = (source_arr[:, :, -1:] / 255).astype(np.float32) # source alpha\n self._Sc = (source_arr[:, :, :-1] / 255).astype(np.float32) # source color\n self._Da = (destination_arr[:, :, -1:] / 255).astype(np.float32) # destination alpha\n self._Dc = (destination_arr[:, :, :-1] / 255).astype(np.float32) # destination color\n # straight(unassociated) alpha to premultiplied(associated) alpha\n self._Sc = self._Sc * self._Sa # premultiplied (associated) alpha\n self._Dc = self._Dc * self._Da # premultiplied (associated) alpha\n # declare output variables\n self._Oa = None # output alpha\n self._Oc = None # output color\n\n def alpha_composition(self, mode):\n if mode == PorterDuff.CLEAR:\n self._clear_mode()\n elif mode == PorterDuff.SRC:\n self._src_mode()\n elif mode == PorterDuff.DST:\n self._dst_mode()\n elif mode == PorterDuff.SRC_OVER:\n self._src_over_mode()\n elif mode == PorterDuff.DST_OVER:\n self._dst_over_mode()\n elif mode == PorterDuff.SRC_IN:\n self._src_in_mode()\n elif mode == PorterDuff.DST_IN:\n self._dst_in_mode()\n elif mode == PorterDuff.SRC_OUT:\n self._src_out_mode()\n elif mode == PorterDuff.DST_OUT:\n self._dst_out_mode()\n elif mode == PorterDuff.SRC_ATOP:\n self._src_atop_mode()\n elif mode == PorterDuff.DST_ATOP:\n self._dst_atop_mode()\n elif mode == PorterDuff.XOR:\n self._xor_mode()\n elif mode == PorterDuff.DARKEN:\n self._darken_mode()\n elif mode == PorterDuff.LIGHTEN:\n self._lighten_mode()\n elif mode == PorterDuff.MULTIPLY:\n self._multiply_mode()\n elif mode == PorterDuff.SCREEN:\n self._screen_mode()\n elif mode == PorterDuff.ADD:\n self._add_mode()\n elif mode == PorterDuff.OVERLAY:\n self._overlay_mode()\n else:\n raise ValueError(\"Not a Valid Mode: {}\".format(mode))\n\n # premultiplied(associated) alpha to straight(unassociated) alpha\n # Because in numpy, (np.array(1)/np.array(0.0)).astype(np.uint8) = 0\n # So, any element in self._Oa is zero doesn't matters. Just don't care about it.\n self._Oc = self._Oc / self._Oa\n # range from [0, 1](float32) to [0, 255](uint8)\n self._Oc = (self._Oc * 255).astype(np.uint8)\n self._Oa = (self._Oa * 255).astype(np.uint8)\n\n return np.concatenate([self._Oc, self._Oa], axis=2)\n\n def _clear_mode(self): # CLEAR = 0 # [0, 0]\n self._Oa = np.ones_like(self._Sa, dtype=np.float32)\n self._Oc = np.ones_like(self._Sc, dtype=np.float32)\n\n def _src_mode(self): # SRC = 1 # [Sa, Sc]\n self._Oa = self._Sa\n self._Oc = self._Sc\n\n def _dst_mode(self): # DST = 2 # [Da, Dc]\n self._Oa = self._Da\n self._Oc = self._Dc\n\n def _src_over_mode(self): # [Sa + (1 - Sa)*Da, Rc = Sc + (1 - Sa)*Dc]\n self._Oa = self._Sa + (1 - self._Sa) * self._Da\n self._Oc = self._Sc + (1 - self._Sa) * self._Dc\n\n def _dst_over_mode(self): # [Sa + (1 - Sa)*Da, Rc = Dc + (1 - Da)*Sc]\n self._Oa = self._Sa + (1 - self._Sa) * self._Da\n self._Oc = self._Dc + (1 - self._Da) * self._Sc\n\n def _src_in_mode(self): # [Sa * Da, Sc * Da]\n self._Oa = self._Sa * self._Da\n self._Oc = self._Sc * self._Da\n\n def _dst_in_mode(self): # [Sa * Da, Sa * Dc]\n self._Oa = self._Sa * self._Da\n self._Oc = self._Sa * self._Dc\n\n def _src_out_mode(self): # [Sa * (1 - Da), Sc * (1 - Da)]\n self._Oa = self._Sa * (1 - self._Da)\n self._Oc = self._Sc * (1 - self._Da)\n\n def _dst_out_mode(self): # [Da * (1 - Sa), Dc * (1 - Sa)]\n self._Oa = self._Da * (1 - self._Sa)\n self._Oc = self._Dc * (1 - self._Sa)\n\n def _src_atop_mode(self): # [Da, Sc * Da + (1 - Sa) * Dc]\n self._Oa = self._Da\n self._Oc = self._Sc * self._Da + (1 - self._Sa) * self._Dc\n\n def _dst_atop_mode(self): # [Sa, Sa * Dc + Sc * (1 - Da)]\n self._Oa = self._Sa\n self._Oc = self._Sa * self._Dc + self._Sc * (1 - self._Da)\n\n def _xor_mode(self): # [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]\n self._Oa = self._Sa + self._Da - 2 * self._Sa * self._Da\n self._Oc = self._Sc * (1 - self._Da) + (1 - self._Sa) * self._Dc\n\n def _darken_mode(self): # [Sa + Da - Sa*Da, Sc*(1 - Da) + Dc*(1 - Sa) + min(Sc, Dc)]\n self._Oa = self._Sa + self._Da - self._Sa * self._Da\n self._Oc = self._Sc * (1 - self._Da) + self._Dc * (1 - self._Sa) + np.min([self._Sc, self._Dc], axis=0)\n\n def _lighten_mode(self): # [Sa + Da - Sa*Da, Sc*(1 - Da) + Dc*(1 - Sa) + max(Sc, Dc)]\n self._Oa = self._Sa + self._Da - self._Sa * self._Da\n self._Oc = self._Sc * (1 - self._Da) + self._Dc * (1 - self._Sa) + np.max([self._Sc, self._Dc], axis=0)\n\n def _multiply_mode(self): # [Sa * Da, Sc * Dc]\n self._Oa = self._Sa * self._Da\n self._Oc = self._Sc * self._Dc\n\n def _screen_mode(self): # [Sa + Da - Sa * Da, Sc + Dc - Sc * Dc]\n self._Oa = self._Sa + self._Da - self._Sa * self._Da\n self._Oc = self._Sc + self._Dc - self._Sc * self._Dc\n\n def _add_mode(self): # Saturate(S + D)\n raise NotImplementedError()\n\n def _overlay_mode(self):\n # A_out = A_dst + A_scr - A_dst * A_src, C_out = 2 * C_dst * C_src (or other methods) (archive [4])\n self._Oa = self._Da + self._Sa - self._Da * self._Sa\n self._Oc = 2 * self._Dc * self._Sc\n\n\ndef porter_duff(x, y, mode):\n return PorterDuff(x, y).alpha_composition(mode)\n\n\nif __name__ == '__main__':\n source_img = Image.open(r\"img\\source.png\").convert(mode='RGBA')\n destination_img = Image.open(r\"img\\destination.png\").convert(mode='RGBA')\n source_arr = np.array(source_img)\n destination_arr = np.array(destination_img)\n\n out_path = r'img\\out.png'\n out_arr = porter_duff(source_arr, destination_arr, PorterDuff.DARKEN)\n Image.fromarray(out_arr, \"RGBA\").save(out_path)\n","sub_path":"PorterDuff.py","file_name":"PorterDuff.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"332839838","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport cv2\nimport os, sys, glob\n\nfrom numba import jit\nfrom etaprogress.progress import ProgressBar\n\"\"\"\nThis module calculate real values of KLD of optical flow with motion platform vector. See\nhttps://en.wikipedia.org/wiki/Kullback-Leibler_divergence.\n\nExamples:\n Nope.\n\nAttributes:\n optflow_to_hist(bin_magnitude, bin_degree, optical_flows)\n\nTodo:\n *using cuda to calculate optical flow, farneback3d see https://pypi.org/project/farneback3d/.\n *Motion platform probability integrity check\n\"\"\"\n\n\n########################################## GOLBAL VARIABLES ########################################\nDIM_OF_IMG = (768, 1024) # Or, we can get dimension of image from file\nOPT_FLOW_REGION = np.index_exp[:, :]\nEXT_JSON = '.json'\nEXT_EXCEL = '.xlsx'\nEXT_MP4 = '.mp4'\nLOAD_DIR = './data/raw/video/'\nSAVE_DIR = './data/preprocessed/video/'\nBIN_MAGNITUDE = [1,6,20,50,100000]\nBIN_DEGREE = np.linspace(15, 345, 12)\nCOLUMNS = [str((n % 12) * 30) + 'deg//~' + str(BIN_MAGNITUDE[n // 12]) for n in range(60)]\n\n####################################################################################################\n\n############################################# FUNCTIONS ############################################\n@jit()\ndef bin_selection(polar):\n tmp = 0\n if polar[1] < 195:\n if polar[1] < 105:\n if polar[1] < 45:\n if polar[1] > 15:\n tmp = 1\n else:\n if polar[1] < 75:\n tmp = 2\n else:\n tmp = 3\n else:\n if polar[1] < 165:\n if polar[1] < 135:\n tmp = 4\n else:\n tmp = 5\n else:\n tmp = 6\n else:\n if polar[1] < 315:\n if polar[1] < 255:\n if polar[1] < 225:\n tmp = 7\n else:\n tmp = 8\n else:\n if polar[1] < 285:\n tmp = 9\n else:\n tmp = 10\n else:\n if polar[1] < 345:\n tmp = 11\n\n if polar[0] < 6:\n if polar[0] > 1:\n tmp += 12\n else:\n if polar[0] < 50:\n if polar[0] < 20:\n tmp += 24\n else:\n tmp += 36\n else:\n tmp += 48\n return tmp\n\ndef capture_to_optflow(cap):\n optical_flows = []\n prevgray = cv2.cvtColor(cap.read()[1][OPT_FLOW_REGION], cv2.COLOR_BGR2GRAY)\n frame_max = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n frame_num = 1\n bar = ProgressBar(frame_max, max_width = 100)\n tick = 0\n while frame_num < frame_max:\n ret, frame = cap.read()\n\n # if frame_num % 20 == 19:\n if frame_num % 10 == 9:\n # if frame_num % 10 == 1:\n gray = cv2.cvtColor(frame[OPT_FLOW_REGION], cv2.COLOR_BGR2GRAY)\n optical_flows.append(\n cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 4, 15, 3, 5, 1.2, 0))\n # if frame_num % 20 == 0:\n if frame_num % 10 == 0:\n prevgray = cv2.cvtColor(frame[OPT_FLOW_REGION], cv2.COLOR_BGR2GRAY)\n bar.numerator = tick\n print(bar, end = '\\r')\n sys.stdout.flush()\n frame_num += 1\n tick += 1\n return optical_flows\n\ndef optflow_to_hist(optical_flows,\n bin_magnitude = BIN_MAGNITUDE,\n bin_degree = BIN_DEGREE):\n \"\"\"Make histogam of optical flows. Currently, it calculate all of frame at end of frame. However\n THIS will be change when problem of motion platform vector distribution is solved.\n\n Args:\n \t bin_magnitude:\n bin_degree:\n optical_flows:\n\n Returns:\n probability of optical flow(12 * 5): sumupped probability\n\n TODO:\n *parallelization\n\t *do not sum up, take each probability.\n \"\"\"\n #assert polars[0].shape == DIM_OF_IMG and polars[1].shape == DIM_OF_IMG,\\\n #'Error: dimention of image is not' + str(DIM_OF_IMG)\n polars_total = []\n for flow in optical_flows:\n polars = np.asarray(cv2.cartToPolar(flow[...,0], flow[...,1],None,None,True))\n polars_total.append(polars.reshape(2, polars.shape[1] * polars.shape[2]).T)\n\n bar2 = ProgressBar(len(polars_total), max_width = 100)\n tick = 0\n counts_set = []\n counts = np.zeros(60)\n ret = []\n for polars in polars_total:\n # counts = np.zeros(60)\n tmp = []\n for polar in polars:\n _bin = bin_selection(polar)\n counts[_bin] += 1\n tmp.append(_bin)\n ret.append(tmp)\n bar2.numerator = tick\n tick += 1\n print(bar2, end = '\\r')\n sys.stdout.flush()\n counts /= counts.sum() # create bins lookup-table\n for frame in ret: # approximately 1024 * 768\n for i in range(len(frame)):\n frame[i] = counts[frame[i]]\n return ret\n\ndef opt_flow_prob_from_file(video_file): # helper function on file\n cap = cv2.VideoCapture(video_file)\n optical_flows = capture_to_optflow(cap)\n cap.release()\n probability = optflow_to_hist(optical_flows)\n return probability\n\ndef opt_flow_prob_from_dir(load_dir): # helper function on directory\n video_files = glob.glob(load_dir + \"*\" + EXT_MP4)\n total_bar = ProgressBar(len(video_files), max_width = 100)\n tick = 0\n tmp = []\n for video_file in video_files:\n video_name = os.path.splitext(os.path.basename(video_file))[0]\n probability = opt_flow_prob_from_file(video_file)\n tmp.append([video_name, probability])\n total_bar.numerator = tick\n tick += 1\n print(str(tick) + \"/21 :\\n\")\n return tmp\n\ndef save(video_name, probability, save_dir = SAVE_DIR):\n save_json_path = save_dir + video_name + EXT_JSON\n df = pd.DataFrame(probability)\n df.to_json(save_json_path)\n\ndef main(args):\n if args.use_default:\n data_set = opt_flow_prob_from_dir(LOAD_DIR)\n total_bar = ProgressBar(len(data_set), max_width = 100)\n tick = 0\n for data in data_set:\n save(data[0], data[1])\n total_bar.numerator = tick\n tick += 1\n print(total_bar, end = '\\r')\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(\n description='Calculate optical flows of video per 10frame and save it as json and excel')\n\n parser.add_argument('-use_default', \"--use_default\", action='store_true')\n parser.add_argument('-dir', \"--dir\", nargs='?', type=str, help='directory')\n parser.add_argument('-file', \"--file\", nargs='?', type=str, help = 'YET IMPLEMENTED!')\n main(parser.parse_args())\n","sub_path":"test/preprocess_video_data_backup.py","file_name":"preprocess_video_data_backup.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587989395","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2020, The OpenThread Authors.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom wpan import verify\nimport wpan\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Test description: Test MLE discover scan with nodes supporting different radios\n#\n\ntest_name = __file__[:-3] if __file__.endswith('.py') else __file__\nprint('-' * 120)\nprint('Starting \\'{}\\''.format(test_name))\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Creating `wpan.Nodes` instances\n\nspeedup = 1\nwpan.Node.set_time_speedup_factor(speedup)\n\nn1 = wpan.Node(wpan.NODE_15_4)\nn2 = wpan.Node(wpan.NODE_TREL)\nn3 = wpan.Node(wpan.NODE_15_4_TREL)\ns1 = wpan.Node(wpan.NODE_15_4)\ns2 = wpan.Node(wpan.NODE_TREL)\ns3 = wpan.Node(wpan.NODE_15_4_TREL)\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Init all nodes\n\nwpan.Node.init_all_nodes()\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Build network topology\n\nn1.form(\"n1\", channel='20')\nn2.form(\"n2\", channel='21')\nn3.form(\"n3\", channel='22')\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Test implementation\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# Scan by scanner nodes (no network)\n\n# Scan by s1 (15.4 only), expect to see n1(15.4) and n3(15.4+trel)\nresult = wpan.parse_scan_result(s1.discover_scan())\nverify(n1.is_in_scan_result(result))\nverify(not n2.is_in_scan_result(result))\nverify(n3.is_in_scan_result(result))\n\n# Scan by s2 (trel only), expect to see n2(trel) and n3(15.4+trel)\nresult = wpan.parse_scan_result(s2.discover_scan())\nverify(not n1.is_in_scan_result(result))\nverify(n2.is_in_scan_result(result))\nverify(n3.is_in_scan_result(result))\n\n# Scan by s3 (trel+15.4), expect to see all nodes\nresult = wpan.parse_scan_result(s3.discover_scan())\nverify(n1.is_in_scan_result(result))\nverify(n2.is_in_scan_result(result))\nverify(n3.is_in_scan_result(result))\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# Scan by the nodes\n\n# Scan by n1 (15.4 only), expect to see only n3(15.4+trel)\nresult = wpan.parse_scan_result(n1.discover_scan())\nverify(not n1.is_in_scan_result(result))\nverify(not n2.is_in_scan_result(result))\nverify(n3.is_in_scan_result(result))\n\n# Scan by n2 (trel only), expect to see only n3(15.4+trel)\nresult = wpan.parse_scan_result(n2.discover_scan())\nverify(not n1.is_in_scan_result(result))\nverify(not n2.is_in_scan_result(result))\nverify(n3.is_in_scan_result(result))\n\n# Scan by n3 (15.4+trel), expect to see n1(15.4) and n2(trel)\nresult = wpan.parse_scan_result(n3.discover_scan())\nverify(n1.is_in_scan_result(result))\nverify(n2.is_in_scan_result(result))\nverify(not n3.is_in_scan_result(result))\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Test finished\n\nwpan.Node.finalize_all_nodes()\n\nprint('\\'{}\\' passed.'.format(test_name))\n","sub_path":"tests/toranj/ncp/test-705-multi-radio-discover-scan.py","file_name":"test-705-multi-radio-discover-scan.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"80468711","text":"import xml.etree.ElementTree as ET\nimport parseutils as pu\n\n#function to open and parse the XML file\ndef readXML(filepath, filename):\n tree = ET.parse(filepath + filename)\n root = tree.getroot()\n return root\n\n#function that returns a list of XML elements with a given name from the etree root\ndef fetch(root, name):\n lst = []\n for command in root:\n ph = command.find(name)\n if ph is None or ph.text is None:\n lst.append(\" \")\n else:\n lst.append(ph.text) \n return lst\n\ndef fetchTXT(root, name):\n lst = []\n for command in root:\n ph = command.find(name)\n if ph is None or ph.text is None:\n lst.append(\" \")\n else:\n ph = pu.embFormat(ph.text)\n lst.append(ph) \n return lst\n\n#function that replaces a substring in a list element with another substring\ndef process(lst, st1, st2):\n for item in lst:\n ph = item\n ph = ph.replace(st1, st2)\n item = ph\n return lst\n\n#get an integer value from a file\ndef getVal(filepath, filename):\n with open(filepath+filename, 'r') as f:\n val = f.read()\n val = int(val)\n return val\n\n#write a value to a file\ndef writeVal(filepath, filename, val):\n with open(filepath+filename, 'w') as f:\n f.write(str(val))\n return\n\n#get the bot token\ndef getToken(filepath):\n with open(filepath+'TOKEN.dcbt', 'r') as f:\n TOKEN = f.read()\n return TOKEN\n\ndef getAdmin(filepath):\n with open(filepath+'ADMIN.dcbt', 'r') as f:\n ADMIN = f.read()\n ADMIN = int(ADMIN)\n return ADMIN\n\ndef getFeed(filepath):\n with open(filepath+'FEED.dcbt', 'r') as f:\n Feed = f.read()\n Feed = int(Feed)\n return Feed\n\ndef getUserList(filepath):\n with open(filepath+'VOTE.dcbt', 'r') as f:\n users = f.read().split(\",\")\n if users == \"0\":\n users = []\n return users\n users = users[:len(users)-1]\n for i in range(len(users)): \n users[i] = int(users[i])\n print(users)\n \n return users\n\ndef writeUserList(filepath, list):\n with open(filepath+'VOTE.dcbt', 'w') as f:\n for i in range(len(list)):\n f.write(str(list[i])+\",\")\n return\n","sub_path":"filehandler.py","file_name":"filehandler.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"53982934","text":"from alg import pq as pq\nfrom alg import dict_pq as dpq\nimport pytest\n\n\ndef test_minPQ():\n l = [1, 10, 11, 5, 20, 1, 1, 34, 54, 65, 23, 23, 54, 75, 8, 87]\n\n # Test with HeapSort\n a = pq.MinPQ(l)\n\n z = []\n\n while a.getSize() > 0:\n z.append(a.getMin())\n\n assert sorted(l) == z\n\n\ndef test_maxPQ():\n l = [1, 10, 11, 5, 20, 1, 1, 34, 54, 65, 23, 23, 54, 75, 8, 87]\n\n # Test with HeapSort\n a = pq.MaxPQ(l)\n\n z = []\n\n while a.getSize() > 0:\n z.append(a.getMax())\n\n assert sorted(l, reverse=True) == z\n\n\ndef test_minDPQ_1():\n l = []\n l.append((1, 'a'))\n l.append((2, 'b'))\n l.append((3, 'c'))\n l.append((3, '1'))\n\n b = dpq.MinDictPQ(l)\n\n assert b.getMin()[1] == 'a'\n assert b.getMin()[1] == 'b'\n\n # Next two elements from PQ has same priority so both result are correct\n res = b.getMin()[1]\n assert res == 'c' or res == '1'\n\n res = b.getMin()[1]\n assert res == 'c' or res == '1'\n\n\ndef test_maxDPQ_2():\n a = dpq.MinDictPQ()\n\n a.insert((1, 'a'))\n a.insert((2, 'b'))\n a.insert((3, 'c'))\n a.insert((3, '1'))\n\n\ndef test_minDPQ_3():\n\n with pytest.raises(ValueError):\n l = []\n l.append((1, 'a'))\n l.append((2, 'b'))\n l.append((3, 'c'))\n l.append((3, '1'))\n l.append((2, 'b'))\n\n b = dpq.MaxDictPQ(l)\n\n\ndef test_maxDPQ_4():\n\n with pytest.raises(KeyError):\n a = dpq.MinDictPQ()\n\n a.insert((1, 'a'))\n a.insert((2, 'b'))\n a.insert((3, 'c'))\n a.insert((3, '1'))\n a.insert((3, '1'))\n\n\ndef test_minDPQ_5():\n\n b = dpq.MinDictPQ()\n b.insert((1, 'a'))\n b.insert((2, 'b'))\n b.insert((3, 'c'))\n b.insert((3, '1'))\n\n b.changePriority((0, 'c'))\n\n assert b.getMin()[1] == 'c'\n assert b.getMin()[1] == 'a'\n assert b.getMin()[1] == 'b'\n assert b.getMin()[1] == '1'\n\n b = dpq.MinDictPQ()\n b.insert((1, 'a'))\n b.insert((2, 'b'))\n b.insert((3, 'c'))\n b.insert((3, '1'))\n\n b.changePriority((5, 'b'))\n b.changePriority((2, 'c'))\n\n assert b.getMin()[1] == 'a'\n assert b.getMin()[1] == 'c'\n assert b.getMin()[1] == '1'\n assert b.getMin()[1] == 'b'\n\n\n# test_minDPQ_1()\n# test_maxDPQ_2()\n# test_minDPQ_3()\n# test_maxDPQ_4()\ntest_minDPQ_5()\n\n\n# test_maxPQ()\n# test_minPQ()\n","sub_path":"alg_1/tests/test_pq.py","file_name":"test_pq.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"414376754","text":"import random\nimport os\nimport copy\n\n\n# Defines cls() to clear the screen of prints\ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n\n\narr = []\nfor i in range(4):\n arr.append(random.randint(1, 5))\n\n\ndef guess(arr, turn):\n temp_arr=arr.copy()\n done = False\n guess_arr = []\n while not done:\n print('turn', turn)\n select = input(\"Pick a number between 1 and 6\")\n cls()\n try:\n if int(select) < 7 and int(select) > 0:\n guess_arr.append(select)\n\n else:\n print('Invalid Number')\n except ValueError:\n print(\"Invalid Number\")\n\n if len(guess_arr) == 4:\n sub = 0\n matches = [0, 0, 0, 0]\n half_matches = []\n for n, i in enumerate(guess_arr):\n if int(i) == temp_arr[n-sub]:\n matches[n] = int(i)\n temp_arr.remove(temp_arr[n-sub])\n sub += 1\n elif int(i) in temp_arr:\n half_matches.append(int(i))\n done = True\n\n winnings = 0\n for i in matches:\n if i:\n winnings += 1\n print(\"Your guesses - \", guess_arr)\n print(\"You guessed %s correctly\" % winnings)\n print(\"You guessed %s correctly, but not in the right place\" % len(half_matches))\n if winnings == 4:\n return True\n\nturn = 0\nfor i in range(5):\n turn += 1\n if guess(arr, turn):\n print('correct = ', arr)\n break\n\n\n\n","sub_path":"codes/leaper.py","file_name":"leaper.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"197756829","text":"\nfrom abc import ABCMeta, abstractmethod\nimport os\nimport os.path as op\nimport shutil\nfrom pymatgen.io.vasp.inputs import VaspInput, Poscar, Incar, Kpoints, Potcar\nfrom pymatgen.io.vasp.outputs import Vasprun, Oszicar\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.electronic_structure.dos import CompleteDos\nfrom pymatgen.electronic_structure.bandstructure import BandStructure\nfrom pymatgen.analysis.transition_state import NEBAnalysis\nfrom pymatgen.entries.computed_entries import ComputedStructureEntry\nfrom pynter.slurm.job_script import ScriptHandler\nfrom pynter.slurm.interface import HPCInterface\nfrom pynter.tools.utils import grep_list\nfrom pynter.data.database.creator import VaspJobDrone\nimport importlib\nimport numpy as np\nimport json\nfrom glob import glob\nfrom pynter.tools.utils import grep\n\nclass Job:\n \n def __init__(self,path=None,inputs=None,job_settings=None,outputs=None,job_script_filename=None,name=None):\n \"\"\"\n Class to control and organize inputs and outputs of a generic job.\n\n Parameters\n ----------\n path : (str), optional\n Path where job is stored. The default is None. If None the work dir is used.\n inputs : (dict), optional\n Dictionary with input data. The default is None.\n job_settings : (dict), optional\n Dictionary with job settings. The default is None. Documentation in ScriptHandler class in slurm.job_script module\n outputs : (dict), optional\n Dictionary with output data. The default is None.\n job_script_filename : (str), optional\n Filename of job script. The default is taken from the key 'filename' in the job_settings in the config file.\n name : (str)\n Name of the job. If none the name is searched in the job script.\n\n \"\"\"\n \n self.path = path if path else os.getcwd()\n self.inputs = inputs\n self.job_settings = job_settings\n self.outputs = outputs\n self.job_script_filename = job_script_filename if job_script_filename else ScriptHandler().filename\n \n self._localdir = HPCInterface().localdir\n self._workdir = HPCInterface().workdir\n self._path_relative = op.abspath(self.path).replace(self._localdir,'')\n \n self.path_in_hpc = self._workdir + self._path_relative\n \n \n if outputs:\n self.get_output_properties()\n \n \n if name:\n self.name = name\n elif self.job_settings:\n self.name = self.job_settings['name']\n elif op.isfile(op.join(self.path,self.job_script_filename)):\n s = ScriptHandler.from_file(self.path,filename=self.job_script_filename)\n self.name = s.settings['name']\n else:\n self.name = 'no_name'\n \n if not self.job_settings:\n self.job_settings = {}\n self.job_settings['name'] = self.name\n\n\n def __str__(self):\n jobclass = self.jobclass\n if hasattr(self,'group'):\n if self.group != '':\n printout = '%s \"%s\" of group \"%s\"' %(jobclass, self.name, self.group)\n else:\n printout = '%s \"%s\"' %(jobclass, self.name)\n else:\n self.group = ''\n printout = '%s \"%s\"' %(jobclass, self.name)\n \n return printout\n \n def __repr__(self):\n return self.__str__()\n \n @property\n def jobclass(self):\n return self.__class__.__name__\n\n \n def cancel_job(self):\n \"\"\"Cancel job on HPC\"\"\"\n hpc = HPCInterface()\n job_id = self.job_id()\n hpc.cancel_jobs(job_id)\n \n return \n\n \n def delete_job_files(self,safety=True):\n \"\"\"\n Delete Job folder (self.path)\n\n Parameters\n ----------\n safety : (bool), optional\n Ask confirmation to delete job. The default is True.\n \"\"\"\n if safety:\n inp = input('Are you sure you want to delete Job %s ? (y/n) : ' %self.name)\n if inp in ('y','Y'):\n shutil.rmtree(self.path)\n print('Deleted Job %s'%self.name)\n else:\n print('Job %s is left unchanged'%self.name)\n else:\n shutil.rmtree(self.path)\n print('Deleted Job %s'%self.name)\n return\n\n\n @abstractmethod\n def get_inputs(self):\n pass\n\n @abstractmethod\n def get_outputs(self):\n pass\n \n @abstractmethod\n def get_output_properties(self):\n pass\n\n @abstractmethod\n def insert_in_database(self):\n pass\n\n\n def job_id(self):\n \"\"\"Get job ID from the queue on HPC\"\"\" \n hpc = HPCInterface()\n stdout,stderr = hpc.qstat(printout=False)\n queue = stdout.splitlines()\n job_lines = grep_list(self.name,queue)\n if job_lines == []:\n raise ValueError (f'Job named \"{self.name}\" is not currently running or pending')\n elif len(job_lines) > 1:\n raise ValueError (f'More than one job named \"{self.name}\" has been found in queue:\\n{stdout}')\n else:\n job_line = job_lines[0].split()\n job_id = job_line[0]\n \n return job_id\n\n\n def job_queue(self):\n \"\"\"\n Print job queue from HPC on screen\n \n Returns\n -------\n stdout : (str)\n Output.\n stderr : (str)\n Error.\n \"\"\"\n hpc = HPCInterface()\n stdout,stderr = hpc.qstat()\n \n return stdout,stderr\n \n\n def run_job(self,write_input=True,sync=True):\n \"\"\"\n Run job on HPC. Input files are automatically written and sync to HPC is performed.\n\n Parameters\n ----------\n write_input : (bool), optional\n Write input file stored in \"inputs\" dictionary. The default is True.\n sync : (bool), optional\n Sync files to HPC before running. The default is True\n\n Returns\n -------\n stdout : (str)\n Output.\n stderr : (str)\n Error.\n \"\"\"\n if write_input:\n self.write_input()\n hpc = HPCInterface()\n if sync:\n self.sync_to_hpc()\n stdout,stderr = hpc.sbatch(path=self.path_in_hpc,job_script_filename=self.job_script_filename)\n \n return stdout,stderr\n \n\n def sync_from_hpc(self,stdouts=False):\n \"\"\"\n Sync job data from HPC to local machine\n\n Parameters\n ----------\n stdouts : (bool), optional\n Return output and error strings. The default is False.\n\n Returns\n -------\n stdout : (str)\n Output.\n stderr : (str)\n Error.\n\n \"\"\"\n hpc = HPCInterface()\n abs_path = op.abspath(self.path)\n localdir = abs_path \n stdout,stderr = hpc.rsync_from_hpc(remotedir=self.path_in_hpc,localdir=localdir)\n if stdouts:\n return stdout,stderr\n else:\n return\n \n \n def sync_to_hpc(self,stdouts=False):\n \"\"\"\n Sync job data from local machine to HPC\n\n Parameters\n ----------\n stdouts : (bool), optional\n Return output and error strings. The default is False.\n\n Returns\n -------\n stdout : (str)\n Output.\n stderr : (str)\n Error.\n\n \"\"\"\n hpc = HPCInterface()\n abs_path = op.abspath(self.path)\n localdir = abs_path \n stdout,stderr = hpc.rsync_to_hpc(localdir=localdir,remotedir=self.path_in_hpc)\n if stdouts:\n return stdout,stderr\n else:\n return\n\n\n def status(self):\n \"\"\"\n Get job status from HPC. \n\n Returns\n -------\n status : (str)\n Job status. Possible status are 'PENDING','RUNNING','NOT IN QUEUE'.\n \"\"\"\n hpc = HPCInterface()\n stdout,stderr = hpc.qstat(printout=False)\n queue = stdout.splitlines()\n job_lines = grep_list(self.name,queue)\n if job_lines == []:\n status = 'NOT IN QUEUE'\n elif len(job_lines) > 1:\n raise ValueError (f'More than one job named \"{self.name}\" has been found in queue:\\n{stdout}')\n else:\n job_line = job_lines[0].split()\n status = job_line[4]\n if status == 'PD':\n status = 'PENDING'\n if status == 'R':\n status = 'RUNNING'\n if status == 'CG':\n status = 'COMPLETED'\n \n return status\n \n \n @abstractmethod\n def write_input():\n pass\n \n \nclass VaspJob(Job):\n \n \n def as_dict(self,**kwargs): \n \"\"\"\n Get VaspJob as dictionary. The Vasprun ouput object is not exported.\n \n Parameters\n ----------\n get_band_structure : (bool), optional\n Export BandStructure as dict. The default is False.\n \n Returns:\n Json-serializable dict representation of VaspJob.\n \"\"\"\n kwargs = self._parse_kwargs(**kwargs)\n \n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"path\": self.path,\n \"inputs\": self.inputs.as_dict(), \n \"job_settings\": self.job_settings,\n \"job_script_filename\":self.job_script_filename,\n \"name\":self.name}\n \n d[\"outputs\"] = {}\n if \"ComputedStructureEntry\" in self.outputs.keys():\n d[\"outputs\"][\"ComputedStructureEntry\"] = self.computed_entry.as_dict()\n \n d[\"is_converged\"] = self.is_converged\n d[\"band_structure\"] = self.band_structure.as_dict() if kwargs['get_band_structure'] else None\n return d\n\n\n def to_json(self,path,**kwargs):\n \"\"\"\n Save VaspJob object as json string or file\n\n Parameters\n ----------\n path : (str), optional\n Path to the destination file. If None a string is exported.\n get_band_structure : (bool), optional\n Export BandStructure as dict. The default is False.\n\n Returns\n -------\n d : (str)\n If path is not set a string is returned.\n\n \"\"\"\n d = self.as_dict(**kwargs)\n if path:\n with open(path,'w') as file:\n json.dump(d,file)\n return\n else:\n return d.__str__() \n\n \n @staticmethod\n def from_dict(d):\n \"\"\"\n Construct VaspJob object from python dictionary.\n \n Returns\n -------\n VaspJob object\n \"\"\"\n path = d['path']\n inputs = VaspInput.from_dict(d['inputs'])\n job_settings = d['job_settings']\n job_script_filename = d['job_script_filename']\n name = d['name']\n outputs={}\n if d['outputs']:\n outputs['ComputedStructureEntry'] = ComputedStructureEntry.from_dict(d['outputs']['ComputedStructureEntry'])\n \n vaspjob = VaspJob(path,inputs,job_settings,outputs,job_script_filename,name)\n \n vaspjob._band_structure = BandStructure.from_dict(d['band_structure']) if d['band_structure'] else None\n vaspjob._is_converged = d['is_converged']\n if outputs:\n for k,v in vaspjob.computed_entry.data.items():\n if k not in vaspjob._default_data_computed_entry:\n setattr(vaspjob,k,v)\n \n return vaspjob\n \n \n @staticmethod\n def from_directory(path,job_script_filename='job.sh',load_outputs=True,**kwargs):\n \"\"\"\n Builds VaspJob object from data stored in a directory. Input files are read using Pymatgen VaspInput class.\n Output files are read usign Pymatgen Vasprun class.\n Job settings are read from the job script file.\n\n Parameters\n ----------\n path : (str)\n Path were job data is stored.\n job_script_filename : (str), optional\n Filename of job script. The default is 'job.sh'.\n kwargs : (dict)\n Arguments to pass to Vasprun parser.\n Returns\n -------\n VaspJob object.\n \n \"\"\"\n \n inputs = VaspInput.from_directory(path)\n outputs = {}\n if load_outputs:\n if op.isfile(op.join(path,'vasprun.xml')):\n try:\n outputs['Vasprun'] = Vasprun(op.join(path,'vasprun.xml'),**kwargs)\n except:\n print('Warning: Reading of vasprun.xml in \"%s\" failed'%path)\n outputs['Vasprun'] = None\n \n s = ScriptHandler.from_file(path,filename=job_script_filename)\n job_settings = s.settings\n \n return VaspJob(path,inputs,job_settings,outputs)\n\n\n @staticmethod\n def from_json(path_or_string):\n \"\"\"\n Build VaspJob object from json file or string.\n\n Parameters\n ----------\n path_or_string : (str)\n If an existing path to a file is given the object is constructed reading the json file.\n Otherwise it will be read as a string.\n\n Returns\n -------\n VaspJob object.\n\n \"\"\"\n if op.isfile(path_or_string):\n with open(path_or_string) as file:\n d = json.load(file)\n else:\n d = json.load(path_or_string)\n return VaspJob.from_dict(d)\n \n\n @property\n def incar(self):\n return self.inputs['INCAR']\n \n @property\n def kpoints(self):\n return self.inputs['KPOINTS']\n \n @property\n def poscar(self):\n return self.inputs['POSCAR']\n \n @property\n def potcar(self):\n return self.inputs['POTCAR']\n\n @property\n def vasprun(self):\n if 'Vasprun' in self.outputs.keys():\n return self.outputs['Vasprun']\n else:\n if not op.exists(op.join(self.path,'vasprun.xml')):\n print('Warning: \"vasprun.xml\" file is not present in Job directory')\n return None\n\n @property\n def computed_entry(self):\n if 'ComputedStructureEntry' in self.outputs.keys():\n return self.outputs['ComputedStructureEntry']\n else:\n return None\n \n\n @property\n def band_structure(self):\n return self._band_structure\n\n \n @property\n def charge(self):\n \"\"\"\n Charge of the system calculated as the difference between the value of \"NELECT\"\n in the INCAR and the number of electrons in POTCAR. If \"NELECT\" is not present \n charge is set to 0.\n \"\"\"\n charge = 0\n if 'NELECT' in self.incar.keys():\n nelect = self.incar['NELECT']\n val = {}\n for p in self.potcar:\n val[p.element] = p.nelectrons\n neutral = sum([ val[el.symbol]*self.initial_structure.composition[el] \n for el in self.initial_structure.composition])\n charge = neutral - nelect\n if not isinstance(charge,int):\n charge = np.around(charge,decimals=1)\n return charge\n\n\n @property\n def energy_gap(self):\n \"\"\"Energy gap read from vasprun.xml with Pymatgen\"\"\"\n band_gap = None\n if self.computed_entry:\n band_gap = self.computed_entry.data['eigenvalue_band_properties'][0]\n \n return band_gap\n \n\n @property\n def final_energy(self):\n \"\"\"Final total energy of the calculation read from vasprun.xml with Pymatgen\"\"\"\n final_energy = None\n if self.computed_entry:\n final_energy = self.computed_entry.data['final_energy']\n \n return final_energy\n \n \n @property\n def final_structure(self):\n \"\"\"Final structure read from \"vasprun.xml\" with Pymatgen\"\"\"\n final_structure = None\n if self.computed_entry:\n if self.computed_entry.data['structures']:\n final_structure = self.computed_entry.data['structures'][-1]\n \n return final_structure \n \n \n @property\n def formula(self):\n \"\"\"Complete formula from initial structure (read with Pymatgen)\"\"\"\n if self.initial_structure:\n return self.initial_structure.composition.formula\n else:\n return None\n\n \n @property\n def hubbards(self):\n \"\"\"\n Generate dictionary with U paramenters from LDAUU tag in INCAR file\n\n Returns\n -------\n U_dict : (dict)\n Dictionary with Elements as keys and U parameters as values.\n \"\"\"\n U_dict = {}\n incar = self.incar\n if 'LDAUU' in incar.keys():\n ldauu = incar['LDAUU']\n elements = self.initial_structure.composition.elements\n if isinstance(ldauu,str):\n ldauu = ldauu.split()\n for i in range(0,len(ldauu)):\n U_dict[elements[i]] = int(ldauu[i])\n else:\n print('No LDAUU tag present in INCAR in Job \"%s\"' %self.name)\n \n return U_dict\n \n @property\n def initial_structure(self):\n \"\"\"Initial structure read from poscar\"\"\"\n if self.poscar:\n poscar = self.poscar\n return poscar.structure \n else:\n print('Warning: inputs[\"POSCAR\"] is not defined')\n return None\n \n \n @property\n def is_converged(self):\n \"\"\"\n Reads Pymatgen Vasprun object and returns \"True\" if the calculation is converged,\n \"False\" if reading failed, and \"None\" if is not present in the outputs dictionary.\n \"\"\"\n if hasattr(self,'_is_converged'):\n return self._is_converged\n else:\n return None\n \n\n @property\n def nelectrons(self):\n \"\"\"\n Number of electrons in the system. If 'NELECT' tag is in INCAR that value is returned.\n Else the sum of valence electrons from POTCAR is returned.\n \"\"\"\n if 'NELECT' in self.incar.keys():\n nelect = self.incar['NELECT']\n else:\n val = {}\n for p in self.potcar:\n val[p.element] = p.nelectrons\n nelect = sum([ val[el.symbol]*self.initial_structure.composition[el] \n for el in self.initial_structure.composition])\n return nelect \n \n\n def delete_output_files(self,safety=True):\n \"\"\"\n Delete files that aren't input files (INCAR,KPOINTS,POSCAR,POTCAR)\n \"\"\"\n if safety:\n inp = input('Are you sure you want to delete outputs of Job %s ?: (y/n)' %self.name)\n if inp in ('y','Y'):\n delete = True\n else:\n delete = False\n else:\n delete= True\n \n if delete: \n files = [f for f in os.listdir(self.path) if os.path.isfile(os.path.join(self.path, f))]\n for f in files:\n if f not in ['INCAR','KPOINTS','POSCAR','POTCAR',self.job_script_filename]:\n os.remove(os.path.join(self.path,f))\n print('Deleted file %s'%os.path.join(self.path,f)) \n return\n \n\n def get_inputs(self,sync=False):\n \"\"\"\n Read VaspInput from directory\n \"\"\"\n if sync:\n self.sync_from_hpc()\n inputs = VaspInput.from_directory(self.path)\n self.inputs = inputs\n return\n \n \n def get_outputs(self,sync=False,get_output_properties=True):\n \"\"\"\n Get outputs dictionary from the data stored in the job directory. \"vasprun.xml\" is \n read with Pymatgen\n \"\"\"\n if sync:\n self.sync_from_hpc()\n path = self.path\n outputs = {}\n if op.isfile(op.join(path,'vasprun.xml')):\n try:\n outputs['Vasprun'] = Vasprun(op.join(path,'vasprun.xml'))\n except:\n print('Warning: Reading of vasprun.xml in \"%s\" failed'%path)\n outputs['Vasprun'] = None\n self.outputs = outputs\n if get_output_properties:\n self.get_output_properties()\n return\n\n \n def get_output_properties(self,**kwargs):\n \"\"\"\n Parse outputs properties from VaspJob.outputs.\n\n Parameters\n ----------\n get_band_structure : (bool), optional\n Get BandStructure object from vasprun. The default is False.\n data : (list), optional\n List of attributes of Vasprun to parse in ComputedStructureEntry. The default is None.\n \"\"\" \n self._is_converged = self._get_convergence()\n \n self._default_data_computed_entry = ['final_energy','structures','eigenvalue_band_properties'] # default imports from Vasprun\n\n kwargs = self._parse_kwargs(**kwargs) \n if self.vasprun:\n data = self._default_data_computed_entry \n optional_attributes = []\n if kwargs['data']:\n for attr in kwargs['data']:\n data.append(attr)\n optional_attributes.append(attr)\n self.outputs['ComputedStructureEntry'] = self.vasprun.get_computed_entry(data=data)\n \n if optional_attributes:\n for attr in optional_attributes:\n value = self.computed_entry.data[attr]\n setattr(self,attr,value)\n\n self._band_structure = self._get_band_structure() if kwargs['get_band_structure'] else None\n \n return\n\n\n def insert_in_database(self,get_doc_only=False,safety=True,check_convergence=True,**kwargs):\n \"\"\"\n Get VaspJob doc and insert in pynter default database with matgendb's VaspToDbTaskDrone.\n\n Parameters\n ----------\n get_doc_only: (bool), optional\n Get only doc with get_task_doc but does not perform the insertion into db. Default is False.\n safety : (bool), optional\n Ask confirmation to insert job. The default is True.\n check_convergence: (bool), optional\n Insert job in DB only if is_converged is True. The default is True.\n **kwargs :\n Args to pass to VaspToDbTaskDrone\n \n Returns\n -------\n drone: \n VaspJobDrone object that contains all attributes of VaspToDbTaskDrone.\n \"\"\"\n drone = VaspJobDrone(self,**kwargs)\n if get_doc_only:\n return drone.get_task_doc_from_files()\n if safety:\n inp = input('Are you sure you want to insert VaspJob %s in database \"%s\", collection \"%s\"? (y/n) : ' \n %(self.name,drone.database,drone.collection)) \n if inp in ('y','Y'):\n assimilate = True\n else:\n assimilate = False\n if assimilate:\n drone.assimilate_job(check_convergence=check_convergence)\n return \n\n \n def write_input(self):\n \"\"\"Write \"inputs\" dictionary to files. The VaspInput class from Pymatgen is used.\"\"\"\n script_handler = ScriptHandler(**self.job_settings)\n script_handler.write_script(path=self.path)\n inputs = self.inputs\n inputs.write_input(output_dir=self.path,make_dir_if_not_present=True)\n return\n\n\n def _get_band_structure(self):\n \"\"\"Get BandStructure objects from Vasprun\"\"\"\n if self.vasprun:\n return self.vasprun.get_band_structure(kpoints_filename=op.join(self.path,'KPOINTS'))\n else:\n return None\n \n\n def _get_convergence(self):\n \"\"\"\n Reads Pymatgen Vasprun object and returns \"True\" if the calculation is converged,\n \"False\" if reading failed, and \"None\" if is not present in the outputs dictionary.\n \"\"\"\n is_converged = None\n if self.outputs:\n if 'Vasprun' in self.outputs.keys():\n is_converged = False\n if self.vasprun:\n vasprun = self.vasprun\n conv_el, conv_ionic = False, False\n if vasprun:\n conv_el = vasprun.converged_electronic\n conv_ionic = vasprun.converged_ionic\n if conv_el and conv_ionic:\n is_converged = True \n return is_converged\n\n\n def _parse_kwargs(self,**kwargs):\n kwargs['data'] = kwargs['data'] if 'data' in kwargs.keys() else None\n kwargs['get_band_structure'] = kwargs['get_band_structure'] if 'get_band_structure' in kwargs.keys() else False \n return kwargs\n\n\n\nclass VaspNEBJob(Job):\n \n \n def as_dict(self):\n \n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"path\": self.path, \n \"job_settings\": self.job_settings,\n \"job_script_filename\":self.job_script_filename,\n \"name\":self.name}\n \n inputs_as_dict = {}\n inputs_as_dict['structures'] = [s.as_dict() for s in self.structures]\n inputs_as_dict['INCAR'] = self.incar.as_dict()\n inputs_as_dict['KPOINTS'] = self.kpoints.as_dict()\n inputs_as_dict['POTCAR'] = self.potcar.as_dict()\n \n d[\"inputs\"] = inputs_as_dict\n d[\"is_step_limit_reached\"] = self.is_step_limit_reached\n d[\"is_converged\"] = self.is_converged\n d[\"r\"] = self.r.tolist()\n d[\"energies\"] = self.energies.tolist()\n d[\"forces\"] = self.forces.tolist()\n \n return d\n \n\n def to_json(self,path):\n \"\"\"\n Save VaspNEBJob object as json string or file\n Parameters\n ----------\n path : (str), optional\n Path to the destination file. If None a string is exported.\n \n Returns\n -------\n d : (str)\n If path is not set a string is returned.\n \"\"\"\n d = self.as_dict()\n if path:\n with open(path,'w') as file:\n json.dump(d,file)\n return\n else:\n return d.__str__() \n\n \n @staticmethod\n def from_dict(d):\n\n path = d['path']\n inputs = {}\n inputs[\"structures\"] = [Structure.from_dict(s) for s in d['inputs']['structures']]\n inputs[\"INCAR\"] = Incar.from_dict(d['inputs']['INCAR'])\n inputs[\"KPOINTS\"] = Kpoints.from_dict(d['inputs']['KPOINTS'])\n inputs[\"POTCAR\"] = Potcar.from_dict(d['inputs']['POTCAR'])\n job_settings = d['job_settings']\n job_script_filename = d['job_script_filename']\n name = d['name']\n outputs={}\n \n vaspNEBjob = VaspNEBJob(path,inputs,job_settings,outputs,job_script_filename,name)\n \n vaspNEBjob._is_step_limit_reached = d['is_step_limit_reached']\n vaspNEBjob._is_converged = d['is_converged']\n vaspNEBjob._r = np.array(d['r'])\n vaspNEBjob._energies = np.array(d['energies'])\n vaspNEBjob._forces = np.array(d['forces'])\n \n return vaspNEBjob\n \n \n @staticmethod\n def from_directory(path,job_script_filename='job.sh',load_outputs=True):\n \"\"\"\n Builds VaspNEBjob object from data stored in a directory. Inputs dict is constructed\n by reading with Pymatgen INCAR, KPOINTS and POTCAR and creating a series of Structure \n objects read from POSCARs in the images folders. \n Inputs is thus a dict with \"structures\", \"INCAR\",\"KPOINTS\",\"POTCAR\" as keys.\n Output files are read usign Pymatgen NEBAnalysis and Vasprun classes.\n Job settings are read from the job script file.\n\n Parameters\n ----------\n path : (str)\n Path were job data is stored.\n job_script_filename : (str), optional\n Filename of job script. The default is 'job.sh'.\n\n Returns\n -------\n VaspNEBJob object.\n \n \"\"\" \n inputs = {}\n structures = []\n path = op.abspath(path)\n dirs = [d[0] for d in os.walk(path)]\n for d in dirs:\n image_name = op.relpath(d,start=path)\n if all(c.isdigit() for c in list(image_name)): #check if folder is image (all characters in folder rel path need to be numbers)\n image_path = d\n structure = Poscar.from_file(op.join(image_path,'POSCAR')).structure\n structures.append(structure)\n\n inputs['structures'] = structures \n inputs['INCAR'] = Incar.from_file(op.join(path,'INCAR'))\n inputs['KPOINTS'] = Kpoints.from_file(op.join(path,'KPOINTS'))\n inputs['POTCAR'] = Potcar.from_file(op.join(path,'POTCAR'))\n \n outputs = {}\n if load_outputs:\n try:\n outputs['NEBAnalysis'] = NEBAnalysis.from_dir(path)\n except:\n print('Warning: NEB output reading with NEBAnalysis in \"%s\" failed'%path)\n outputs['NEBAnalysis'] = None\n \n s = ScriptHandler.from_file(path,filename=job_script_filename)\n job_settings = s.settings\n \n return VaspNEBJob(path,inputs,job_settings,outputs)\n\n\n @staticmethod\n def from_json(path_or_string):\n \"\"\"\n Build VaspJob object from json file or string.\n Parameters\n ----------\n path_or_string : (str)\n If an existing path to a file is given the object is constructed reading the json file.\n Otherwise it will be read as a string.\n Returns\n -------\n VaspJob object.\n \"\"\"\n if op.isfile(path_or_string):\n with open(path_or_string) as file:\n d = json.load(file)\n else:\n d = json.load(path_or_string)\n return VaspNEBJob.from_dict(d)\n\n \n def delete_outputs(self,safety=True):\n \"\"\"\n Delete files that aren't input files (INCAR,KPOINTS,POSCAR,POTCAR)\n \"\"\"\n if safety:\n inp = input('Are you sure you want to delete outputs of Job %s ?: (y/n)' %self.name)\n if inp in ('y','Y'):\n delete = True\n else:\n delete = False\n else:\n delete= True\n \n if delete:\n dirs = self.image_dirs\n dirs.append(self.path)\n for d in dirs: \n files = [f for f in os.listdir(d) if os.path.isfile(os.path.join(d, f))]\n for f in files:\n if f not in ['INCAR','KPOINTS','POSCAR','POTCAR',self.job_script_filename]:\n os.remove(os.path.join(d,f))\n print('Deleted file %s'%os.path.join(d,f)) \n return\n\n \n @property\n def images(self):\n return len(self.inputs['structures'])-2\n \n @property\n def image_dirs(self):\n \"\"\"\n Directories of images for NEB calculations. Directories are selected if all characters in the\n directory name are digits.\n \"\"\"\n dirs = []\n path = self.path\n path = op.abspath(path)\n for d in os.walk(path):\n directory = d[0]\n image_name = op.relpath(directory,start=path)\n if all(c.isdigit() for c in list(image_name)): #check if folder is image (all characters in folder rel path need to be numbers)\n dirs.append(directory)\n dirs.sort()\n return dirs\n \n @property\n def structures(self):\n return self.inputs['structures']\n\n @property\n def incar(self):\n return self.inputs['INCAR']\n \n @property\n def kpoints(self):\n return self.inputs['KPOINTS']\n \n @property\n def potcar(self):\n return self.inputs['POTCAR']\n\n\n @property\n def charge(self):\n \"\"\"\n Charge of the system calculated as the difference between the value of \"NELECT\"\n in the INCAR and the number of electrons in POTCAR. If \"NELECT\" is not present \n charge is set to 0.\n \"\"\"\n charge = 0\n if 'NELECT' in self.incar.keys():\n nelect = self.incar['NELECT']\n val = {}\n for p in self.potcar:\n val[p.element] = p.nelectrons\n neutral = sum([ val[el.symbol]*self.initial_structure.composition[el] \n for el in self.initial_structure.composition])\n charge = neutral - nelect\n if not isinstance(charge,int):\n charge = np.around(charge,decimals=1)\n return charge\n\n\n @property\n def energies(self):\n \"\"\"\n Energies of images read with NEBAnalysis\n \"\"\"\n return self._energies\n\n @property\n def forces(self):\n \"\"\"\n Forces of images read with NEBAnalysis\n \"\"\"\n return self._forces\n \n \n @property\n def formula(self):\n \"\"\"Complete formula from initial structure (read with Pymatgen)\"\"\"\n if self.initial_structure:\n return self.initial_structure.composition.formula\n else:\n return None\n\n \n @property\n def initial_structure(self):\n \"\"\"Initial structure read from first element of \"\"structures\" attribute. \"\"\"\n return self.structures[0]\n\n \n @property\n def is_converged(self):\n \"\"\"\n Reads Pymatgen Vasprun object and returns \"True\" if the calculation is converged,\n or the ionic step limit has been reached reading from the OSZICAR file.\n \"False\" if reading failed, and \"None\" if is not present in the outputs dictionary.\n \"\"\"\n if hasattr(self,'_is_converged'):\n return self._is_converged\n else:\n return None\n\n\n @property\n def is_required_accuracy_reached(self):\n \"\"\"\n True if \"reached required accuracy - stopping structural energy minimisation\" \n is found in most recent out.* file. \n False if file exists but no line is found.\n None if no out.* file exists.\n \"\"\"\n return self._is_required_accuracy_reached\n \n \n @property\n def is_step_limit_reached(self):\n \"\"\"\n Reads number of ionic steps from the OSZICAR file with Pymatgen and returns True if \n is equal to the step limit in INCAR file (NSW tag)\n \"\"\"\n return self._is_step_limit_reached\n\n\n @property\n def neb_analysis(self):\n \"\"\"\n Get NEBAnalysis object from r, energies and forces. Returns None if any of the inputs is None.\n \"\"\"\n if self.r is not None and self.energies is not None and self.forces is not None:\n return NEBAnalysis(self.r, self.energies, self.forces, self.structures)\n else:\n return None\n\n\n @property\n def nelectrons(self):\n \"\"\"\n Number of electrons in the system. If 'NELECT' tag is in INCAR that value is returned.\n Else the sum of valence electrons from POTCAR is returned.\n \"\"\"\n if 'NELECT' in self.incar.keys():\n nelect = self.incar['NELECT']\n else:\n val = {}\n for p in self.potcar:\n val[p.element] = p.nelectrons\n nelect = sum([ val[el.symbol]*self.initial_structure.composition[el] \n for el in self.initial_structure.composition])\n return nelect\n\n\n @property\n def r(self):\n \"\"\"\n Root mean square distances between structures read with NEBAnalysis\n \"\"\"\n return self._r\n\n\n def get_inputs(self,sync=False):\n \"\"\"\n Read inputs from Job directory\n \"\"\"\n if sync:\n self.sync_from_hpc()\n inputs = {}\n structures = []\n path = op.abspath(self.path)\n dirs = [d[0] for d in os.walk(path)]\n for d in dirs:\n image_name = op.relpath(d,start=path)\n if all(c.isdigit() for c in list(image_name)): #check if folder is image (all characters in folder rel path need to be numbers)\n image_path = d\n structure = Poscar.from_file(op.join(image_path,'POSCAR')).structure\n structures.append(structure)\n\n inputs['structures'] = structures \n inputs['INCAR'] = Incar.from_file(op.join(path,'INCAR'))\n inputs['KPOINTS'] = Kpoints.from_file(op.join(path,'KPOINTS'))\n inputs['POTCAR'] = Potcar.from_file(op.join(path,'POTCAR'))\n \n self.inputs = inputs\n return\n\n\n def get_outputs(self,sync=False,get_output_properties=True):\n \"\"\"\n Read outputs from Job directory\n \"\"\"\n if sync:\n self.sync_from_hpc()\n outputs = {}\n path = self.path \n try:\n outputs['NEBAnalysis'] = NEBAnalysis.from_dir(path)\n except:\n print('Warning: NEB output reading with NEBAnalysis in \"%s\" failed'%path)\n outputs['NEBAnalysis'] = None\n \n self.outputs = outputs\n if get_output_properties:\n self.get_output_properties()\n return\n \n\n def get_output_properties(self):\n \"\"\"\n Parse outputs properties from VaspNEBJob.outputs.\n \"\"\"\n \n self._is_required_accuracy_reached = self._get_ionic_relaxation_from_outfile()\n self._is_step_limit_reached = self._get_step_limit_reached() \n self._is_converged = self._get_convergence()\n \n neb = self.outputs['NEBAnalysis']\n self._r = neb.r if neb else None\n self._energies = neb.energies if neb else None\n self._forces = neb.forces if neb else None\n\n return\n\n \n def write_input(self,write_structures=True):\n \"\"\"\n Write input files in all image directories\n \"\"\"\n path = op.abspath(self.path)\n \n self.job_settings['nodes'] = self.images \n incar = self.inputs['INCAR']\n kpoints = self.inputs['KPOINTS']\n potcar = self.inputs['POTCAR']\n job_settings = self.job_settings\n\n if write_structures:\n self.write_structures()\n \n incar.write_file(op.join(path,'INCAR'))\n kpoints.write_file(op.join(path,'KPOINTS'))\n potcar.write_file(op.join(path,'POTCAR'))\n ScriptHandler(**job_settings).write_script(path=path)\n\n \n def write_structures(self):\n \"\"\"\n Writes POSCAR files in image directories\n \"\"\"\n path = self.path\n structures = self.inputs['structures']\n for s in structures:\n index = structures.index(s)\n image_path = op.join(path,str(index).zfill(2)) #folders will be named 00,01,..,XX\n if not op.exists(image_path):\n os.makedirs(image_path)\n Poscar(s).write_file(op.join(image_path,'POSCAR'))\n return\n\n \n def _get_convergence(self):\n \"\"\"\n Returns True if:\n - \"reached required accuracy - stopping structural energy minimisation\" is found in the most recent out file.\n OR\n - Ionic step limit has been reached, which means the step # in OSZICAR file matches the \"NSW\" tag in INCAR.\n Returns False if files named out.* exist but the no \"reached required accuracy\" has been found AND step limit \n has not been reached.\n Returns None if no out.* files have been found.\n \"\"\"\n is_converged = self._get_ionic_relaxation_from_outfile()\n if not is_converged:\n if self.is_step_limit_reached:\n is_converged = True\n \n return is_converged \n\n\n def _get_ionic_relaxation_from_outfile(self):\n \"\"\"\n Useful for NEB because Pymatgen fails to read vasprun file for NEB calculations.\n This function reads the outfile with highest number in the dir and checks for the \n string: \"reached required accuracy - stopping structural energy minimisation\". \n \"\"\"\n reached_accuracy = None\n outfiles = glob(os.path.join(self.path,'out*'))\n if outfiles:\n outfiles.sort()\n outfile = outfiles[-1] #taking more recent out file (\"out.jobid\" with bigger job id)\n lines = grep('reached required accuracy - stopping structural energy minimisation',outfile)\n if lines:\n print('\"reached required accuracy - stopping structural energy minimisation\" found in %s' %outfile)\n reached_accuracy = True\n else:\n reached_accuracy = False\n \n return reached_accuracy\n \n \n def _get_step_limit_reached(self):\n \"\"\"\n Reads number of ionic steps from the OSZICAR file with Pymatgen and returns True if \n is equal to the step limit in INCAR file (NSW tag)\n \"\"\"\n limit_reached = True\n image_dirs = self.image_dirs\n for d in image_dirs:\n if d != image_dirs[0] and d != image_dirs[-1]:\n if not os.path.isfile(os.path.join(d,'OSZICAR')): # check if OSZICAR files are present \n limit_reached = False\n else: \n n_steps = len(Oszicar(os.path.join(d,'OSZICAR')).ionic_steps)\n nsw = Incar.from_file(op.join(self.path,'INCAR'))['NSW'] # check NSW from INCAR in parent directory\n if nsw != n_steps:\n limit_reached = False\n return limit_reached ","sub_path":"pynter/data/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":41591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"93495928","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom layers import GraphAttentionLayer, SpGraphAttentionLayer\n\nimport pdb\n\nclass GAT(nn.Module):\n def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n \"\"\"Dense version of GAT.\"\"\"\n super(GAT, self).__init__()\n self.dropout = dropout\n\n self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)\n\n def forward(self, x, adj):\n x = F.dropout(x, self.dropout, training=self.training)\n \n attention_outs = [att(x, adj) for att in self.attentions]\n zs = [t[0] for t in attention_outs]\n pmq = sum(t[1] for t in attention_outs)\n\n x = torch.cat(zs, dim=1)\n x = F.dropout(x, self.dropout, training=self.training)\n x, pmq2 = self.out_att(x, adj)\n pmq = pmq + pmq2\n x = F.elu(x)\n return F.log_softmax(x, dim=1), pmq\n\n\nclass SpGAT(nn.Module):\n def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n \"\"\"Sparse version of GAT.\"\"\"\n super(SpGAT, self).__init__()\n self.dropout = dropout\n\n self.attentions = [SpGraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention)\n\n self.attentions2 = [SpGraphAttentionLayer(nfeat * nhid, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] # ADD\n for i, attention in enumerate(self.attentions): self.add_module('attention2_{}'.format(i), attention) # ADD\n\n self.out_att = SpGraphAttentionLayer(nhid * nheads, \n nclass, \n dropout=dropout, \n alpha=alpha, \n concat=False)\n\n def forward(self, x, adj):\n x = F.dropout(x, self.dropout, training=self.training)\n x = torch.cat([att(x, adj) for att in self.attentions], dim=1)\n x = F.dropout(x, self.dropout, training=self.training)\n x = torch.cat([att(x, adj) for att in self.attentions2], dim=1) # ADD\n x = F.dropout(x, self.dropout, training=self.training) # ADD\n x = self.out_att(x, adj) # F.elu(self.out_att(x, adj))\n return F.log_softmax(x, dim=1)\n\n","sub_path":"variational hypergat/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"24233222","text":"#!/usr/bin/python3\n\"\"\"\nStarts a Flask web application.\nListens on 0.0.0.0 on port 5000.\nRoutes:\n * /hbnb_filters: HBnB HTML filters page.\n\"\"\"\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage\n\napp = Flask(__name__)\n\n\n@app.route(\"/hbnb_filters\", strict_slashes=False)\ndef hbnb_filters():\n \"\"\"Displays the HBnB filters HTML page.\"\"\"\n states = storage.all(\"State\")\n amenities = storage.all(\"Amenity\")\n return render_template(\"10-hbnb_filters.html\",\n states=states, amenities=amenities)\n\n\n@app.teardown_appcontext\ndef teardown(excpt=None):\n \"\"\"Remove the current SQLAlchemy Session.\"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")\n","sub_path":"web_flask/10-hbnb_filters.py","file_name":"10-hbnb_filters.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"311021715","text":"# -*- coding: utf-8 -*-\n\ndef ExtractPeakrangeAsList(fname, start=1, end=100000, export='tuple', exclude=[(0,0)]):\n \"\"\"\n \n Extract a range of spectra from an mzML file and return them\n -----------\n \n \n Keyword arguments:\n \n fname -- name of the input file \\n\n start -- the id of the first spectrum to return (counting starts at 1) \\n\n end -- the last spectrum to return, leave blank for last spectrum\n export -- sets the export formatting. tuple returns a tuple with (all_mz, all_intensity)\n list returns a list of binary tuples [(mz, int), (mz, int), ...]\n tuple is required for digitizing\n exclude -- takes a list of (start,end) tuples to be ignored during import. Useful for removing of spraying errors.\n \"\"\" \n \n \n import pymzml\n \n try:\n msrun = pymzml.run.Reader(fname)\n except:\n print('You must provide a valid filename')\n return\n\n try:\n msrun[end]\n except:\n print('exporting to the end')\n end = msrun.info['spectrum_count']\n \n if exclude == [(0,0)]:\n print('exporting from %(a)s to %(b)s'%{'a':start, 'b':end})\n else:\n print('exporting from %(a)s to %(b)s with exclusions'%{'a':start, 'b':end})\n \n if export == 'list': \n peakslist=[]\n for i in range(start,end):\n actual_peak = msrun[i].peaks\n peakslist.append(actual_peak)\n \n results={} \n \n for peaks in peakslist:\n for key, value in peaks:\n results[key] = results.get(key, 0) + value\n \n results=list(results.items())\n \n return sorted(results)\n \n elif export == 'tuple':\n mzlist=[]\n intlist=[]\n for i in range(start,end):\n for exclusion in exclude:\n if i not in range(exclusion[0], exclusion[1]):\n for j in msrun[i].mz:\n mzlist.append(j)\n for k in msrun[i].i:\n intlist.append(k)\n \n return mzlist, intlist\n else:\n print('valid export options are list or tuple!')\n return\n\n\ndef PlotPeaklist(peaklist, fname, start, end, istuple=True):\n \"\"\"\n Returns a grpahical representation of a spectrum formatted as list of tuples\n -------------\n \n \n Keyword arguments:\n \n peaklist -- name of the variable containing the tuple\\n\n fname, start, end -- name of the file, for exporting the image\n peaklist -- name of the variable containing the tuple\\n\n istuple -- set to False allows to plot the list of binary tuples returned by ExtractPeakrangeAsList with the value export=list\\n\n \"\"\"\n \n import pylab as plt\n \n if not istuple: \n \n y = [ b for a,b in peaklist ]\n x = [ a for a,b in peaklist ]\n \n else:\n x = peaklist[0]\n y = peaklist[1]\n \n plt.plot(x,y)\n plt.xlabel('m/z')\n plt.ylabel('counts')\n plt.savefig('%(a)s_extracted_from%(b)s_to_%(c)s_spectrum.eps'%{'a': fname[:-5], 'b' : start, 'c': end})\n plt.clf()\n \ndef DigitizePeaklist(peaklist, binsize=0.5, debug=False):\n \"\"\"\n Bin the list of peaks so that resolution decreases\n --------\n\n \n Keyword arguments: \\n\n peaklist -- input peaklist as list of tuples \\n\n binsize -- size of the bins, default 0.5 \\n\n debug -- True activates verbose output\n \"\"\"\n \n import numpy as np\n \n mz = np.array(peaklist[0])\n counts_double = np.array(peaklist[1])\n\n bins=np.arange(0,max(mz)+1,binsize)\n\n inds=np.digitize(mz, bins)\n \n if debug==True:\n print(mz)\n print(bins)\n for n in range(mz.size):\n print(bins[inds[n]-1], \"<=\", mz[n], \"<\", bins[inds[n]])\n\n mz_bins_double=[]\n for m in range(mz.size):\n mz_bins_double.append(bins[inds[m]-1])\n\n mz_bins_double = np.array(mz_bins_double)\n \n mz_counts_tuple={}\n for index, mass in enumerate(mz_bins_double):\n if mass in mz_counts_tuple.keys():\n mz_counts_tuple[mass] += counts_double[index]\n else:\n mz_counts_tuple[mass] = counts_double[index]\n \n mz_bins=[]\n counts=[] \n \n for element in sorted(list(mz_counts_tuple.items())):\n mz_bins.append(element[0])\n counts.append(element[1])\n \n return mz_bins, counts\n \ndef ExportPeaklistToUnidec(peaklist, outdir):\n \"\"\"\n Writes a txt-file readable by UniDec for further spectra processing\n ------\n \n Keyword arguments: \\n\n peaklist -- input peaklist, formatted as tuple (see ExtractPeakrangeAsList) \\n\n outdir -- output directory\n \"\"\"\n mz = peaklist[0]\n counts = peaklist[1]\n \n #print('data format of counts is %s'%counts[0].__class__) \n #print('data format of mz is %s'%mz[0].__class__) \n \n with open(outdir, 'w') as f:\n for index, mass in enumerate(mz):\n f.write('%(a).9f %(b).9f\\n'%{'a' : mass, 'b' : counts[index]})\n f.close()\n \ndef DisplayTIC(fname, start=1, end='end', xaxis='time'):\n \"\"\"\n Displays the TIC of a file and indicates two positions\n -----\n\n Keyword arguments:\n fname -- name of input file (mzML)\n start -- position of the first marker\n end -- position of the second marker\n \"\"\" \n import pymzml\n import pylab as plt\n\n msrun = pymzml.run.Reader(fname)\n \n if end == 'end':\n end=msrun.info['spectrum_count']\n \n if xaxis=='time':\n time=[]\n intensity=[] \n for spectrum in msrun:\n try:\n time.append(spectrum['scan start time']*60)\n intensity.append(spectrum['total ion current'])\n if spectrum['id'] == start:\n starttime=spectrum['scan start time']*60\n elif spectrum['id'] == end:\n endtime=spectrum['scan start time']*60\n except:\n continue\n \n plt.plot((starttime,starttime),(0,max(intensity)),'r')\n plt.plot((endtime,endtime),(0,max(intensity)),'r')\n plt.plot(time,intensity, 'black')\n plt.xlabel('time[sec]')\n plt.ylabel('intensity')\n plt.show()\n \n if xaxis=='id':\n scan_id=[]\n intensity=[]\n for spectrum in msrun:\n try:\n if isinstance(spectrum['id'], int): #removes entry 'TIC' on the last spectrum\n scan_id.append(spectrum['id'])\n intensity.append(spectrum['total ion current'])\n except:\n continue\n \n plt.plot((start,start),(0,max(intensity)),'r')\n plt.plot((end,end),(0,max(intensity)),'r')\n plt.plot(scan_id,intensity, 'black')\n plt.xlabel('scan id')\n plt.ylabel('intensity')\n plt.savefig('%(a)s_extracted_from%(b)s_to_%(c)s_TIC.eps'%{'a': fname[:-5], 'b' : start, 'c': end})\n plt.clf()\n \ndef DisplayXIC(fname,mass2follow):\n \"\"\"\n under construction\n \n \"\"\"\n \n import pymzml\n import pylab as plt \n \n try:\n msrun = pymzml.run.Reader(fname)\n except:\n print('You must provide a valid filename')\n return\n\n timeDependentIntensities = []\n for spectrum in msrun:\n if spectrum['ms level'] == 1:\n for time in spectrum:\n print(time)\n \n","sub_path":"crop_mzml.py","file_name":"crop_mzml.py","file_ext":"py","file_size_in_byte":7391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"448387317","text":"import random\n\nimport namegen\nimport graph\nfrom relationships import relType as rType\n\nclass character(graph.vertex):\n \"\"\" Actor/agent within the story \"\"\"\n def __init__(self):\n graph.vertex.__init__(self)\n self.gender = random.choice(['m','f'])\n self.name = namegen.generateFirstName(self.gender)\n self.victim = False\n # Entity associations\n self.family = None\n # Characters with relationships with this character\n self.relationsByType = {rType.familial:list(), rType.professional:list(), rType.social:list(), rType.romantic:list()}\n self.typesByRelation = dict()\n # Relationship objects involving\n self.relationships = {rType.familial:list(), rType.professional:list(), rType.social:list(), rType.romantic:list()}\n\n def setFamily(self, newFamily):\n if self.family != None:\n print(\"ERROR: Character already has family.\")\n self.family = newFamily\n self.family.addMember(self)\n\n def getFullName(self):\n fullName = str(self.name)\n if self.family:\n fullName += \" \" + str(self.family.surname)\n return fullName\n\n def addRelationship(self, charB, rel):\n if charB in self.relationsByType[rel.type]:\n print(\"ERROR: CharA already has this relationship with charB\")\n return False\n self.relationsByType[rel.type].append(charB)\n self.relationships[rel.type].append(rel)\n # Confirm if charB is already a key\n if not charB in self.typesByRelation.keys():\n self.typesByRelation[charB] = list()\n self.typesByRelation[charB].append(rel)\n return True","sub_path":"characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"27380380","text":"import types\n\nimport django\nfrom django.db import models\nfrom django.db.models import Field\nfrom django.utils.six import with_metaclass\nfrom typedmodels.models import TypedModelMetaclass\n\nfrom models import (\n ShardedTypedModel,\n)\nfrom models.common import ShardedPerTenantModel, ShardedTypedModelManager\n\n\nclass SharedTypedModelMetaclass(TypedModelMetaclass):\n \"\"\"\n This metaclass enables a model for auto-downcasting using a ``type`` attribute.\n \"\"\"\n def __new__(meta, classname, bases, classdict):\n # artifact created by with_metaclass, needed for py2/py3 compatibility\n if classname == 'NewBase':\n return super(SharedTypedModelMetaclass, meta).__new__(\n meta, classname, bases, classdict)\n try:\n ShardedTypedModel\n except NameError:\n # don't do anything for TypedModel class itself\n #\n # ...except updating Meta class to instantiate fields_from_subclasses attribute\n typed_model = super(SharedTypedModelMetaclass, meta).__new__(meta, classname, bases, classdict)\n # We have to set this attribute after _meta has been created, otherwise an\n # exception would be thrown by Options class constructor.\n typed_model._meta.fields_from_subclasses = {}\n return typed_model\n\n # look for a non-proxy base class that is a subclass of TypedModel\n mro = list(bases)\n while mro:\n base_class = mro.pop(-1)\n if issubclass(base_class, ShardedTypedModel) and base_class is not ShardedTypedModel:\n if base_class._meta.proxy:\n # continue up the mro looking for non-proxy base classes\n mro.extend(base_class.__bases__)\n else:\n break\n else:\n base_class = None\n\n if base_class:\n # Enforce that subclasses are proxy models.\n # Update an existing metaclass, or define an empty one\n # then set proxy=True\n class Meta:\n pass\n Meta = classdict.get('Meta', Meta)\n if getattr(Meta, 'proxy', False):\n # If user has specified proxy=True explicitly, we assume that he wants it to be treated like ordinary\n # proxy class, without TypedModel logic.\n return super(TypedModelMetaclass, meta).__new__(meta, classname, bases, classdict)\n Meta.proxy = True\n\n declared_fields = dict((name, element) for name, element in classdict.items() if isinstance(element, Field))\n\n for field_name, field in declared_fields.items():\n # Warnings will be triggered by django's system\n # check for M2M fields setting if we set null to True. Prevent\n # those warnings by setting null only for non-M2M fields.\n if not field.many_to_many:\n field.null = True\n if isinstance(field, models.fields.related.RelatedField):\n # Monkey patching field instance to make do_related_class use created class instead of base_class.\n # Actually that class doesn't exist yet, so we just monkey patch base_class for a while,\n # changing _meta.model_name, so accessor names are generated properly.\n # We'll do more stuff when the class is created.\n old_do_related_class = field.do_related_class\n def do_related_class(self, other, cls):\n base_class_name = base_class.__name__\n cls._meta.model_name = classname.lower()\n old_do_related_class(other, cls)\n cls._meta.model_name = base_class_name.lower()\n field.do_related_class = types.MethodType(do_related_class, field)\n if isinstance(field, models.fields.related.RelatedField) and isinstance(field.rel.to, ShardedTypedModel) and field.rel.to.base_class:\n field.rel.limit_choices_to['type__in'] = field.rel.to._typedmodels_subtypes\n field.rel.to = field.rel.to.base_class\n field.contribute_to_class(base_class, field_name)\n classdict.pop(field_name)\n base_class._meta.fields_from_subclasses.update(declared_fields)\n\n # set app_label to the same as the base class, unless explicitly defined otherwise\n if not hasattr(Meta, 'app_label'):\n if hasattr(getattr(base_class, '_meta', None), 'app_label'):\n Meta.app_label = base_class._meta.app_label\n\n classdict.update({\n 'Meta': Meta,\n })\n\n classdict['base_class'] = base_class\n\n cls = super(TypedModelMetaclass, meta).__new__(meta, classname, bases, classdict)\n\n cls._meta.fields_from_subclasses = {}\n\n if base_class:\n opts = cls._meta\n\n model_name = opts.model_name\n typ = \"%s.%s\" % (opts.app_label, model_name)\n cls._typedmodels_type = typ\n cls._typedmodels_subtypes = [typ]\n if typ in base_class._typedmodels_registry:\n raise ValueError(\"Can't register %s type %r to %r (already registered to %r )\" % (typ, classname, base_class._typedmodels_registry))\n base_class._typedmodels_registry[typ] = cls\n\n type_name = getattr(cls._meta, 'verbose_name', cls.__name__)\n type_field = base_class._meta.get_field('type')\n choices = tuple(list(type_field.choices) + [(typ, type_name)])\n choices_field = '_choices' if django.VERSION < (1, 9) else 'choices'\n setattr(type_field, choices_field, choices)\n\n cls._meta.declared_fields = declared_fields\n\n if django.VERSION < (1, 9):\n # Update related fields in base_class so they refer to cls.\n for field_name, related_field in declared_fields.items():\n if isinstance(related_field, models.fields.related.RelatedField):\n # Unfortunately RelatedObject is recreated in ./manage.py validate, so false positives for name clashes\n # may be reported until #19399 is fixed - see https://code.djangoproject.com/ticket/19399\n related_field.related.opts = cls._meta\n\n # look for any other proxy superclasses, they'll need to know\n # about this subclass\n for superclass in cls.mro():\n if (issubclass(superclass, base_class)\n and superclass not in (cls, base_class)\n and hasattr(superclass, '_typedmodels_type')):\n superclass._typedmodels_subtypes.append(typ)\n\n meta._patch_fields_cache(cls, base_class)\n else:\n # this is the base class\n cls._typedmodels_registry = {}\n\n # Since fields may be added by subclasses, save original fields.\n cls._meta._typedmodels_original_fields = cls._meta.fields\n cls._meta._typedmodels_original_many_to_many = cls._meta.many_to_many\n\n # add a get_type_classes classmethod to allow fetching of all the subclasses (useful for admin)\n\n def get_type_classes(subcls):\n if subcls is cls:\n return list(cls._typedmodels_registry.values())\n else:\n return [cls._typedmodels_registry[k] for k in subcls._typedmodels_subtypes]\n cls.get_type_classes = classmethod(get_type_classes)\n\n def get_types(subcls):\n if subcls is cls:\n return cls._typedmodels_registry.keys()\n else:\n return subcls._typedmodels_subtypes[:]\n cls.get_types = classmethod(get_types)\n\n return cls\n\n\nclass ShardedTypedModel(with_metaclass(SharedTypedModelMetaclass, ShardedPerTenantModel)):\n '''\n This class contains the functionality required to auto-downcast a model based\n on its ``type`` attribute.\n\n To use, simply subclass TypedModel for your base type, and then subclass\n that for your concrete types.\n '''\n objects = ShardedTypedModelManager()\n\n type = models.CharField(choices=(), max_length=255, null=False, blank=False, db_index=True)\n\n # Class variable indicating if model should be automatically recasted after initialization\n _auto_recast = True\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n # Calling __init__ on base class because some functions (e.g. save()) need access to field values from base\n # class.\n\n # Move args to kwargs since base_class may have more fields defined with different ordering\n args = list(args)\n if len(args) > len(self._meta.fields):\n # Daft, but matches old exception sans the err msg.\n raise IndexError(\"Number of args exceeds number of fields\")\n for field_value, field in zip(args, self._meta.fields):\n kwargs[field.attname] = field_value\n args = [] # args were all converted to kwargs\n\n if self.base_class:\n before_class = self.__class__\n self.__class__ = self.base_class\n else:\n before_class = None\n super(ShardedTypedModel, self).__init__(*args, **kwargs)\n if before_class:\n self.__class__ = before_class\n if self._auto_recast:\n self.recast()\n\n def recast(self, typ=None):\n if not self.type:\n if not hasattr(self, '_typedmodels_type'):\n # Ideally we'd raise an error here, but the django admin likes to call\n # model() and doesn't expect an error.\n # Instead, we raise an error when the object is saved.\n return\n self.type = self._typedmodels_type\n\n for base in self.__class__.mro():\n if issubclass(base, ShardedTypedModel) and hasattr(base, '_typedmodels_registry'):\n break\n else:\n raise ValueError(\"No suitable base class found to recast!\")\n\n if typ is None:\n typ = self.type\n else:\n if isinstance(typ, type) and issubclass(typ, base):\n if django.VERSION < (1, 7):\n model_name = typ._meta.module_name\n else:\n model_name = typ._meta.model_name\n typ = '%s.%s' % (typ._meta.app_label, model_name)\n\n try:\n correct_cls = base._typedmodels_registry[typ]\n except KeyError:\n raise ValueError(\"Invalid %s identifier: %r\" % (base.__name__, typ))\n\n self.type = typ\n\n current_cls = self.__class__\n\n if current_cls != correct_cls:\n if django.VERSION < (1, 10) and self._deferred:\n # older django used a special class created on the fly for deferred model instances.\n # So we need to create a new deferred class based on correct_cls instead of current_cls\n from django.db.models.query_utils import DeferredAttribute, deferred_class_factory\n attrs = [k for (k, v) in current_cls.__dict__.items() if isinstance(v, DeferredAttribute)]\n correct_cls = deferred_class_factory(correct_cls, attrs)\n self.__class__ = correct_cls\n\n def save(self, *args, **kwargs):\n if not getattr(self, '_typedmodels_type', None):\n raise RuntimeError(\"Untyped %s cannot be saved.\" % self.__class__.__name__)\n return super(ShardedTypedModel, self).save(*args, **kwargs)\n\n","sub_path":"shardy/models/typed_models.py","file_name":"typed_models.py","file_ext":"py","file_size_in_byte":11683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"472834369","text":"import os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport numpy as np\nfrom qml.aglaia.aglaia import ARMP_G\nimport h5py\nfrom sklearn import model_selection as modsel\n\ndata = h5py.File(\"/Volumes/Transcend/data_sets/CN_isopentane/pruned_dft_with_forces/pruned_isopentane_cn_dft.hdf5\", \"r\")\n\nn_samples = 100\n\nxyz = np.array(data.get(\"xyz\")[:n_samples], dtype=np.float32)\nene = np.array(data.get(\"ene\")[:n_samples], dtype=np.float32)*2625.50\nene = ene - data.get(\"ene\")[0]*2625.50\nzs = np.array(data[\"zs\"][:n_samples], dtype=np.int32)\nforces = np.array(data.get(\"forces\")[:n_samples], dtype=np.float32)\n\nacsf_params = {\"nRs2\":5, \"nRs3\":5, \"nTs\":5, \"rcut\":5, \"acut\":5, \"zeta\":220.127, \"eta\":30.8065}\nestimator = ARMP_G(iterations=5000, l1_reg=0.0, l2_reg=0.0, learning_rate=0.075,\n representation_name='acsf', representation_params=acsf_params, tensorboard=True, store_frequency=2)\n\nestimator.set_xyz(xyz)\nestimator.set_classes(zs)\nestimator.set_properties(ene)\nestimator.set_gradients(forces)\n\nestimator.generate_representation()\n\nidx = np.arange(0, n_samples)\nidx_train, idx_test = modsel.train_test_split(idx, test_size=0, random_state=42, shuffle=True)\n\n# estimator.load_nn()\n\nestimator.fit(idx_train)\n# print(\"Done the fitting\")\n\nene_pred, f_pred = estimator.predict(idx_train)\n\nplt.scatter(ene_pred, ene[idx_train])\nplt.xlabel(\"Predicted energies (kJ/mol)\")\nplt.ylabel(\"DFT energies (kJ/mol)\")\n# plt.savefig(\"mem_aglaia_overfit.png\", dpi=200)\nplt.show()\n\nscore = estimator.score(idx_train)\n# print(\"\\n The score is %s\" % (str(score)))\n\n# os.remove(\"predict.tfrecords\")\n# os.remove(\"training.tfrecords\")\n\n# estimator.save_nn()","sub_path":"aglaia_forces/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"558145","text":"import pytest\n\nfrom mymusiclib.guitar import OpenStrings\nfrom mymusiclib.scales import ChromaticScale, MinorPentatonicScale, MajorPentatonicScale\n\n\ndef test_open_strings():\n o = OpenStrings()\n print(o.notes())\n\n\ndef test_open_strings_fret1():\n o = OpenStrings()\n print(o.notes(1))\n\n\ndef test_open_strings_fret12():\n o = OpenStrings()\n print(o.notes(11))\n\n\n@pytest.fixture\ndef all_frets():\n o = OpenStrings()\n guitar_frets = {}\n for i in range(12):\n guitar_frets[i] = o.notes(i)\n return guitar_frets\n\n\n@pytest.fixture()\ndef all_minor_scales():\n minor_scales = {}\n for ch in ChromaticScale.scale:\n minor_scales[ch] = MinorPentatonicScale().key(ch)\n return minor_scales\n\n\n@pytest.fixture()\ndef all_major_scales():\n major_scales = {}\n for ch in ChromaticScale.scale:\n major_scales[ch] = MajorPentatonicScale().key(ch)\n return major_scales\n\n\ndef test_minor_pentatonic_with_guitar_open_strings(all_frets, all_minor_scales):\n print('\\nAll Minor Pentatonic Scales that can be played with Guitar open strings:')\n for f in all_frets:\n guitar_set = set(all_frets[f])\n for s in all_minor_scales:\n scale_set = set(all_minor_scales[s])\n if scale_set == guitar_set:\n print('Minor scale of {0} Can be played at fret {1} : {2}'.format(s, f, all_minor_scales[s]))\n\n\ndef test_major_pentatonic_with_guitar_open_strings(all_frets, all_major_scales):\n print('\\nAll Major Pentatonic Scales that can be played with Guitar open strings:')\n for f in all_frets:\n guitar_set = set(all_frets[f])\n for s in all_major_scales:\n scale_set = set(all_major_scales[s])\n if scale_set == guitar_set:\n print('Major scale of {0} Can be played at fret {1} : {2}'.format(s, f, all_major_scales[s]))\n","sub_path":"tests/test_guitar.py","file_name":"test_guitar.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"342417464","text":"# main.py\nimport os\nimport csv\nimport sys\nfrom collections import defaultdict\n\n\n# print(\"Please place files in Resources directory\")\n# strfile = input(\"Enter filename you want to analyze: example \")\n\nstrfile = \"election_data_2.csv\"\n\ncsvpath = os.path.join('Resources', strfile)\n\npollDict = defaultdict(int)\ntotalVotes = 0\n\nwith open(csvpath, newline='') as csvfile:\n\tcsvreader = csv.reader(csvfile, delimiter=',')\n\n\tnext(csvreader, None)\n\tfor row in csvreader:\t\t\n\t\tpollDict[row[2]] += 1\n\t\ttotalVotes += 1\n\n\n# Specify the file to write to\noutpath = os.path.join('outpath', 'result_' + csvpath.rsplit('\\\\', 1)[1].rsplit('.',1)[0] + '.txt' )\n# print(outpath)\n\n# Open the file using \"write\" mode. Specify the variable to hold the contents\nwith open(outpath, 'w') as file:\n\t# headers\n\tfile.write(\"Election Results\\n\")\n\tfile.write(\"-----------------------------------\\n\")\n\tfile.write(\"Total Votes: \" + str(totalVotes) + '\\n')\n\tfile.write(\"-----------------------------------\\n\")\n\tprint(\"Election Results\")\n\tprint(\"-----------------------------------\")\n\tprint(\"Total Votes: \" + str(totalVotes))\n\tprint(\"-----------------------------------\")\n\n\t# candidate/percent won/votes won\n\tfor key, value in pollDict.items():\n\t\tpercent = round((value/totalVotes)*100, 1)\n\t\tprint(key + \": \" + str(percent) + \"% (\" + str(value) + \") \")\n\t\tfile.write(key + \": \" + str(percent) + \"% (\" + str(value) + \") \\n\")\n\n\t# found winner\n\tprint(\"-----------------------------------\")\n\tfile.write(\"-----------------------------------\\n\")\n\tmax_value = max(pollDict.values())\n\tmax_key = str([k for k, v in pollDict.items() if v == max_value]).strip(\"[\").strip(\"]\").strip(\"'\")\n\tprint(\"Winner: \" + str(max_key))\n\tprint(\"-----------------------------------\")\n\tfile.write(\"Winner: \" + str(max_key) + \"\\n\")\n\tfile.write(\"-----------------------------------\\n\")","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"524989369","text":"# ensemble 구현(not votingClassifier, just coding)\n\nfrom itertools import cycle\nfrom pandas import DataFrame, Series\n##### file read #####\n#\n# result1 = pd.read_csv(\"../input/ensemble/result1.csv\", sep=',')\n# result2 = pd.read_csv(\"../input/ensemble/result2.csv\", sep=',')\n# result3 = pd.read_csv(\"../input/ensemble/result3.csv\", sep=',')\n#\n#\n\n### 읽은파일 list에 추가해주기\nresult_list = [result1, result2, result3]\n\n\n### ont-hot encoding\nfor i in range(len(result_list)):\n one_hot_encoded = pd.get_dummies(result_list[i].label)\n one_hot_result = pd.concat([DataFrame(result_list[i].acc_id), one_hot_encoded],\n axis=1)\n result_list[i] = one_hot_result\n \nfor i in range(len(result_list)):\n result_list[i]['2month'] = result_list[i]['2month'] * score_list[i]\n result_list[i]['month'] = result_list[i]['month'] * score_list[i]\n result_list[i]['retained'] = result_list[i]['retained'] * score_list[i]\n result_list[i]['week'] = result_list[i]['week'] * score_list[i]\n#print('ont-hot encoding finished')\n\nmerged_result = pd.DataFrame()\nmerged_result['acc_id'] = result_list[0]['acc_id']\nmerged_result['2month'] = result_list[0]['2month']\nmerged_result['month'] = result_list[0]['month']\nmerged_result['retained'] = result_list[0]['retained']\nmerged_result['week'] = result_list[0]['week']\nfor i in range(1,len(result_list)):\n merged_result['2month'] += result_list[i]['2month']\n merged_result['month'] += result_list[i]['month']\n merged_result['retained'] += result_list[i]['retained']\n merged_result['week'] += result_list[i]['week']\n \n \n### find label of max count\nensembled_result = pd.DataFrame()\nensembled_result['acc_id'] = merged_result['acc_id']\ntmp_result = merged_result\ntmp_result = tmp_result.drop(['acc_id'], axis=1)\nmax_label = tmp_result.idxmax(axis=1)\nensembled_result['label'] = max_label\nensembled_result\n\nensembled_result.to_csv(\"ensembled_result_40000_1.csv\", sep=',',index=False)\n","sub_path":"lightgbm_analysis/ensemble_manual.py","file_name":"ensemble_manual.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3159980","text":"from urllib.parse import urljoin\r\n\r\nfrom .core import *\r\nfrom .vparsers import *\r\nfrom .utils import attributeerror_wrapper\r\n\r\n\r\nclass PlatanowyParkE2Parser(SingleWebpageParser):\r\n url = \"http://www.platanowypark.pl/inwestycja/etap-2\"\r\n method = \"GET\"\r\n\r\n schema = [\r\n DataUnit(label=\"Klatka\", parser=DOMTextExtractor(), id=\"entrance\"),\r\n DataUnit(label=\"Numer\", parser=DOMTextExtractor(), id=\"number\"),\r\n DataUnit(label=\"Piętro\", parser=IntParser(DOMTextExtractor()), id=\"floor\"),\r\n DataUnit(label=\"Pokoje\", parser=IntParser(DOMTextExtractor()), id=\"rooms\"),\r\n DataUnit(label=\"Powierzchnia\", parser=AreaParser(DOMTextExtractor()), id=\"area\"),\r\n DataUnit(label=\"Status\", parser=StatusParser(DOMTextExtractor()), id=\"status\"),\r\n DataUnit(label=\"Plan\", parser=LinkParser(DOMElementExtractor(\"a\")), id=\"plan\")\r\n ]\r\n \r\n @attributeerror_wrapper(return_value=[])\r\n def find_records(self, soup):\r\n return soup.find(\"table\").find_all(\"tr\")[1:]\r\n\r\n def filter_records(self):\r\n records = [\r\n record for record in self.records\r\n if record[\"entrance\"] != \"----\"\r\n ]\r\n self.records = records\r\n \r\n def split_record(self, record):\r\n return record.find_all(\"td\")\r\n\r\n def modify_record(self, record, input_record=None):\r\n record[\"number\"] = record[\"number\"].split(\" \")[-1]\r\n record[\"plan\"] = urljoin(self.url, record[\"plan\"])\r\n record[\"fid\"] = self.create_fid(record)\r\n return record\r\n\r\n def create_fid(self, record):\r\n fid_form = \"{entrance}/{number}\"\r\n return fid_form.format(**record)\r\n","sub_path":"parsers/platanowypark.py","file_name":"platanowypark.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296635495","text":"class Solution:\n def numMatchingSubseq(self, s: str, words: List[str]) -> int:\n result = []\n for word in words:\n if self.isSubsequence(word, s):\n result.append(word)\n return len(result)\n \n @lru_cache\n def isSubsequence(self, s: str, t: str) -> bool:\n if not s:\n return True\n index = 0\n for j in range(len(t)):\n if s[index] == t[j]:\n index += 1\n if index == len(s):\n return True\n return False","sub_path":"792-number-of-matching-subsequences/792-number-of-matching-subsequences.py","file_name":"792-number-of-matching-subsequences.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125482769","text":"#!/usr/bin/python\n# coding: utf-8\n\nimport os\n\nimport numpy as np\nimport torch\nfrom sklearn.metrics import f1_score\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef evaluate(predictions: list, labels: list, conds: list, mode: str=\"obj\"):\n assert(len(labels) == len(predictions))\n y_pred = torch.cat(predictions, dim=0)\n y_true = torch.cat(labels, dim=0)\n conds = torch.cat(conds, dim=0)\n \n y_pred = y_pred.cpu().detach().numpy()\n y_pred = y_pred.argmax(axis=1)\n y_true = y_true.cpu().numpy()\n \n if mode != 'type':\n total_a, right_a = 0., 0.\n total_b, right_b = 0., 0.\n for i, p in enumerate(y_pred):\n t = y_true[i]\n flag = int(conds[i] % 2)\n \n total_a += ((p + t) * (flag == 0))\n right_a += ((p * t) * (flag == 0))\n total_b += ((p + t) * (flag == 1))\n right_b += ((p * t) * (flag == 1))\n\n \n f1_a = 2.0 * right_a / total_a\n f1_b = 2.0 * right_b / total_b\n f1 = (f1_a + f1_b) / 2\n\n return f1_a, f1_b, f1\n else:\n f1 = f1_score(y_true, y_pred, average=\"macro\")\n \n return f1\n\ndef load_state(model, model_path):\n if os.path.exists(model_path):\n state = torch.load(model_path)\n epoch = state[\"epoch\"]\n model.load_state_dict(state[\"model\"])\n print(f\"Restore model, epoch: {epoch}\")\n return model, epoch\n else:\n print(f\"Not found {model_path} model\")\n return model, 1\n\n\ndef save_state(model, epoch, model_path):\n torch.save({\"model\": model.state_dict(), \"epoch\": epoch}, str(model_path))\n","sub_path":"code/train/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"343735640","text":"from pathlib import Path\n\nfrom fpdf import FPDF\nfrom test.utilities import assert_pdf_equal\n\nHERE = Path(__file__).resolve().parent\n\n\ndef test_code39(tmp_path):\n pdf = FPDF()\n pdf.add_page()\n pdf.code39(\"fpdf2\", x=50, y=50, w=4, h=20)\n pdf.set_font(\"courier\", \"B\", size=36)\n pdf.text(x=80, y=80, txt=\"fpdf2\")\n assert_pdf_equal(pdf, HERE / \"barcodes_code39.pdf\", tmp_path)\n\n\ndef test_interleaved2of5(tmp_path):\n pdf = FPDF()\n pdf.add_page()\n pdf.interleaved2of5(\"1337\", x=65, y=50, w=4, h=20)\n pdf.set_font(\"courier\", \"B\", size=36)\n pdf.text(x=80, y=80, txt=\"1337\")\n assert_pdf_equal(pdf, HERE / \"barcodes_interleaved2of5.pdf\", tmp_path)\n","sub_path":"essayvenv/Lib/site-packages/test/barcodes/test_barcodes.py","file_name":"test_barcodes.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"558798690","text":"import os\nimport numpy as np\nimport tensorflow as tf\n\n\n# https://github.com/vonclites/squeezenet/blob/master/networks/squeezenet.py\n# https://github.com/ethereon/caffe-tensorflow\n# python convert.py --caffemodel --data-output-path py>\n\nclass SqueezeNet:\n\n def __init__(self, input_tensor, num_classes=1000, mode='train', model_scope='SqueezeNet', channels_first=False, sess=None):\n ''' SqueezeNet v1.1\n Adpated from the Caffe original implementation: https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1\n Reference:\n @article{iandola2016squeezenet,\n title={Squeezenet: Alexnet-level accuracy with 50x fewer parameters and< 0.5 mb model size},\n author={Iandola, Forrest N and Han, Song and Moskewicz, Matthew W and Ashraf, Khalid and Dally, William J and Keutzer, Kurt},\n journal={arXiv preprint arXiv:1602.07360},\n year={2016}\n }\n '''\n\n assert mode in ['train', 'val', 'test']\n\n self.sess = sess\n if self.sess is None:\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n\n self.scope = model_scope\n\n data_format = 'channels_first' if channels_first else 'channels_last'\n concat_axis = 1 if channels_first else 3\n with tf.variable_scope(model_scope, reuse=tf.AUTO_REUSE):\n conv1 = tf.layers.conv2d(input_tensor, 64, 3, 2, padding='valid', activation=tf.nn.relu, data_format=data_format, name='conv1')\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=3, strides=2, data_format=data_format)\n fire2_squeeze1x1 = tf.layers.conv2d(pool1, 16, 1, activation=tf.nn.relu, data_format=data_format, name='fire2_squeeze1x1')\n fire2_expand1x1 = tf.layers.conv2d(fire2_squeeze1x1, 64, 1, activation=tf.nn.relu, data_format=data_format, name='fire2_expand1x1')\n fire2_expand3x3 = tf.layers.conv2d(fire2_squeeze1x1, 64, 3, padding='same', activation=tf.nn.relu, data_format=data_format, name='fire2_expand3x3')\n fire2_concat = tf.concat([fire2_expand1x1, fire2_expand3x3], axis=concat_axis)\n fire3_squeeze1x1 = tf.layers.conv2d(fire2_concat, 16, 1, activation=tf.nn.relu, data_format=data_format, name='fire3_squeeze1x1')\n fire3_expand1x1 = tf.layers.conv2d(fire3_squeeze1x1, 64, 1, activation=tf.nn.relu, data_format=data_format, name='fire3_expand1x1')\n fire3_expand3x3 = tf.layers.conv2d(fire3_squeeze1x1, 64, 3, padding='same', activation=tf.nn.relu, data_format=data_format, name='fire3_expand3x3')\n fire3_concat = tf.concat([fire3_expand1x1, fire3_expand3x3], axis=concat_axis)\n pool3 = tf.layers.max_pooling2d(fire3_concat, 3, 2, data_format=data_format)\n fire4_squeeze1x1 = tf.layers.conv2d(pool3, 32, 1, activation=tf.nn.relu, data_format=data_format, name='fire4_squeeze1x1')\n fire4_expand1x1 = tf.layers.conv2d(fire4_squeeze1x1, 128, 1, activation=tf.nn.relu, data_format=data_format, name='fire4_expand1x1')\n fire4_expand3x3 = tf.layers.conv2d(fire4_squeeze1x1, 128, 3, padding='same', activation=tf.nn.relu, data_format=data_format, name='fire4_expand3x3')\n fire4_concat = tf.concat([fire4_expand1x1, fire4_expand3x3], axis=concat_axis)\n fire5_squeeze1x1 = tf.layers.conv2d(fire4_concat, 32, 1, activation=tf.nn.relu, data_format=data_format, name='fire5_squeeze1x1')\n fire5_expand1x1 = tf.layers.conv2d(fire5_squeeze1x1, 128, 1, activation=tf.nn.relu, data_format=data_format, name='fire5_expand1x1')\n fire5_expand3x3 = tf.layers.conv2d(fire5_squeeze1x1, 128, 3, padding='same', activation=tf.nn.relu, data_format=data_format, name='fire5_expand3x3')\n fire5_concat = tf.concat([fire5_expand1x1, fire5_expand3x3], axis=concat_axis)\n pool5 = tf.layers.max_pooling2d(fire5_concat, 3, 2, data_format=data_format)\n fire6_squeeze1x1 = tf.layers.conv2d(pool5, 48, 1, activation=tf.nn.relu, data_format=data_format, name='fire6_squeeze1x1')\n fire6_expand1x1 = tf.layers.conv2d(fire6_squeeze1x1, 192, 1, activation=tf.nn.relu, data_format=data_format, name='fire6_expand1x1')\n fire6_expand3x3 = tf.layers.conv2d(fire6_squeeze1x1, 192, 3, padding='same', activation=tf.nn.relu, data_format=data_format, name='fire6_expand3x3')\n fire6_concat = tf.concat([fire6_expand1x1, fire6_expand3x3], axis=concat_axis)\n fire7_squeeze1x1 = tf.layers.conv2d(fire6_concat, 48, 1, activation=tf.nn.relu, data_format=data_format, name='fire7_squeeze1x1')\n fire7_expand1x1 = tf.layers.conv2d(fire7_squeeze1x1, 192, 1, activation=tf.nn.relu, data_format=data_format, name='fire7_expand1x1')\n fire7_expand3x3 = tf.layers.conv2d(fire7_squeeze1x1, 192, 3, padding='same', activation=tf.nn.relu, data_format=data_format, name='fire7_expand3x3')\n fire7_concat = tf.concat([fire7_expand1x1, fire7_expand3x3], axis=concat_axis)\n fire8_squeeze1x1 = tf.layers.conv2d(fire7_concat, 64, 1, activation=tf.nn.relu, data_format=data_format, name='fire8_squeeze1x1')\n fire8_expand1x1 = tf.layers.conv2d(fire8_squeeze1x1, 256, 1, activation=tf.nn.relu, data_format=data_format, name= 'fire8_expand1x1')\n fire8_expand3x3 = tf.layers.conv2d(fire8_squeeze1x1, 256, 3, padding='same', activation=tf.nn.relu, data_format=data_format, name='fire8_expand3x3')\n fire8_concat = tf.concat([fire8_expand1x1, fire8_expand3x3], axis=concat_axis)\n fire9_squeeze1x1 = tf.layers.conv2d(fire8_concat, 64, 1, activation=tf.nn.relu, data_format=data_format, name='fire9_squeeze1x1')\n fire9_expand1x1 = tf.layers.conv2d(fire9_squeeze1x1, 256, 1, activation=tf.nn.relu, data_format=data_format, name='fire9_expand1x1')\n fire9_expand3x3 = tf.layers.conv2d(fire9_squeeze1x1, 256, 3, padding='same', activation=tf.nn.relu, data_format=data_format, name='fire9_expand3x3')\n fire9_concat = tf.concat([fire9_expand1x1, fire9_expand3x3], axis=concat_axis)\n drop9 = tf.layers.dropout(fire9_concat, 0.5, training=(mode=='train'))\n\n conv10 = tf.layers.conv2d(\n drop9, num_classes, 1, kernel_initializer=tf.random_normal_initializer(0.0, 0.01),\n activation=tf.nn.relu, data_format=data_format, name='conv10'\n ) # discarded in case of finetuning with less than 1000 classes\n axes = [2, 3] if channels_first else [1, 2]\n logits = tf.reduce_mean(conv10, axes, keepdims=False, name='pool10')\n\n self.output = logits\n self.view = conv10\n\n\n def load_pretrained_imagenet(self):\n ''' Load network weights and biases (format caffe-tensorflow) pretrained on ImageNet.'''\n\n self.load_weights('docrec/models/imagenet.npy', ignore_layers=['conv10'], BGR=True, ignore_missing=False)\n\n\n def load_weights(self, weights_path, ignore_layers=[], BGR=False, ignore_missing=False):\n ''' Load network weights and biases (format caffe-tensorflow).\n data_path: path to the numpy-serialized network weights.\n session: current TensorFlow session.\n first_layer: model first layer will be changed in case of BGR data.\n ignore_layers: layers whose parameters must be ignored.\n BGR: if data is BGR, convert weights from the first layer to RGB.\n ignore_missing: if true, serialized weights for missing layers are ignored.\n '''\n\n first_layer='conv1'\n # data_dict = np.load(weights_path, encoding='latin1').item()\n data_dict = np.load(weights_path, allow_pickle=True).item()\n for layer in data_dict:\n if layer in ignore_layers:\n continue\n for param_name, data in data_dict[layer].items():\n param_name = param_name.replace('weights', 'kernel').replace('biases', 'bias')\n try:\n scope = '{}/{}'.format(self.scope, layer) if self.scope else layer\n with tf.variable_scope(scope, reuse=True):\n var = tf.get_variable(param_name)\n if (layer == first_layer) and BGR and (param_name == 'kernel'):\n data = data[:, :, [2, 1, 0], :] # BGR => RGB\n self.sess.run(var.assign(data))\n except ValueError:\n if not ignore_missing:\n raise\n\n\n def save_weights(self, weights_path, ignore_layers=[]):\n ''' Load network weights and biases (format caffe-tensorflow).\n data_path: path to the numpy-serialized network weights.\n session: current TensorFlow session.\n ignore_layers: layers whose parameters must be ignored.\n '''\n\n data_dict = {}\n for var in tf.trainable_variables():\n layer, param_name = var.op.name.split('/')[-2 :] # exclude scope if existing\n if layer in ignore_layers:\n continue\n data = self.sess.run(var)\n try:\n data_dict[layer][param_name] = data\n except KeyError:\n data_dict[layer] = {param_name: data}\n\n # ckeck directory path\n os.makedirs(os.path.dirname(weights_path), exist_ok=True)\n np.save(weights_path, np.array(data_dict))\n","sub_path":"docrec/models/squeezenet.py","file_name":"squeezenet.py","file_ext":"py","file_size_in_byte":9464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"37690300","text":"import sys\nimport numpy as np\nimport json\nfrom pathlib import Path\n\n\ndef create_type_embeddings():\n project_dir = Path(sys.argv[1])\n emb_name = sys.argv[2]\n do_pool = sys.argv[3] == 'pool'\n cui2id = json.load((project_dir / 'info' / 'cui2id.json').open())\n tui2id = json.load((project_dir / 'info' / 'tui2label.json').open())\n type_id2cui_ids = {int(k): v for k, v in json.load((project_dir / 'info' / 'semtype2cuis.json').open()).items()}\n with np.load(str(project_dir / 'info' / emb_name /'embeddings.npz')) as npz:\n embeddings = npz['embs']\n\n mean = np.mean(embeddings)\n std = np.std(embeddings)\n type_embeddings = np.random.normal(size=[len(tui2id), embeddings.shape[-1]], loc=mean, scale=std)\n if do_pool:\n for tui, i in tui2id.items():\n tui_id = cui2id[tui]\n if tui_id in type_id2cui_ids:\n cui_ids = type_id2cui_ids[tui_id] # ids of each cui of this type\n type_embeddings[i] = np.mean(embeddings[cui_ids], axis=0)\n print(f\"Averaging {len(cui_ids)} cui embeddings for type {tui}\")\n else:\n print(f\"TUI {tui} has no associated concepts in semtype2cuis.json!\")\n else:\n for tui, i in sorted(tui2id.items()):\n type_embeddings[i] = embeddings[cui2id[tui]]\n print(f\"Saving type embedding matrix with shape {type_embeddings.shape}\")\n np.savez_compressed(str(project_dir / 'info' / emb_name / 'type_embeddings.npz'),\n embs=type_embeddings)\n\n\nif __name__ == \"__main__\":\n create_type_embeddings()\n","sub_path":"el/data/create_type_emb_mat.py","file_name":"create_type_emb_mat.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"386405712","text":"class ListNode:\n def __init__(self, item, next):\n \"\"\"\n Post-Conditions: return a ListNode object that has an item entry and a next value\n \"\"\"\n self.item = item\n self.next = next\n\nclass LinkedList:\n def __init__(self):\n \"create an empty list\"\n self.list = []\n self.head = None\n self.tail = None\n self.size = 0\n\n def append(self, item):\n \"\"\"\n Post-Conditions: return a singly linked list of the new node appended to the end of the current list\n \"\"\"\n\n if self.size == 0:\n newNode = ListNode(item, None) \n self.head = newNode\n self.tail = newNode\n self.size += 1\n elif self.size != 0:\n newNode = ListNode(item, None)\n self.tail.next = newNode \n self.tail = self.tail.next \n self.size += 1\n\n def __str__(self):\n \"\"\"\n Post-Conditions: return a string representation of the LinkedList, in a normal list Format\n \"\"\"\n listString = \"[\"\n if self.size > 0:\n currNode = self.head\n while (currNode is not None): \n # Convert to string! \n if type(currNode.item) != type('s'):\n listString += (str(currNode.item) + \", \")\n # Format the element to match a string output in a real list\n elif type(currNode.item) == type('s'):\n listString += (\"'\" + currNode.item +\"'\" + \", \")\n currNode = currNode.next\n #Remove the last two characters \n listString = listString[:-2]\n\n listString += \"]\"\n return listString\n\n def __len__(self):\n \"\"\"\n Post-Conditions: return the length of the LinkedList\n \"\"\"\n return self.size","sub_path":"Assignment 4/A4Q1.py","file_name":"A4Q1.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643956731","text":"# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0 fdm=marker fmr=#{,#}\n\nfrom time import time\n\nimport zmq\n\nfrom ..base_client import RPCClientBase\nfrom ..errors import RPCTimeoutError\nfrom ..utils import get_zmq_classes\n\n\n#-----------------------------------------------------------------------------\n# Synchronous RPC Client\n#-----------------------------------------------------------------------------\n\nclass SyncRPCClient(RPCClientBase): #{\n \"\"\"A synchronous RPC client (blocking, not thread-safe)\"\"\"\n\n def __init__(self, context=None, **kwargs): #{\n \"\"\"\n Parameters\n ==========\n context : Context\n An existing Context instance, if not passed, zmq.Context.instance()\n will be used.\n serializer : Serializer\n An instance of a Serializer subclass that will be used to serialize\n and deserialize args, kwargs and the result.\n \"\"\"\n Context, _ = get_zmq_classes()\n\n if context is None:\n self.context = Context.instance()\n else:\n assert isinstance(context, Context)\n self.context = context\n\n super(SyncRPCClient, self).__init__(**kwargs)\n #}\n \n def _get_tools(self): #{\n \"Returns a tuple (Event, Queue, Future, TimeoutError)\"\n pass # Not needed in this implementation\n #}\n \n def call(self, proc_name, args=[], kwargs={}, ignore=False, timeout=None): #{\n \"\"\"\n Call the remote method with *args and **kwargs\n (may raise exception)\n\n Parameters\n ----------\n proc_name : name of the remote procedure to call\n args : positional arguments of the remote procedure\n kwargs : keyword arguments of the remote procedure\n timeout : | None\n Number of seconds to wait for a reply.\n RPCTimeoutError will be raised if no reply is received in time.\n Set to None, 0 or a negative number to disable.\n\n Returns\n -------\n \n If the call succeeds, the result of the call will be returned.\n If the call fails, `RemoteRPCError` will be raised.\n \"\"\"\n if not (timeout is None or isinstance(timeout, (int, float))):\n raise TypeError(\"timeout param: or None expected, got %r\" % timeout)\n\n if not self._ready:\n raise RuntimeError('bind or connect must be called first')\n\n req_id, msg_list = self._build_request(proc_name, args, kwargs, ignore)\n\n self.socket.send_multipart(msg_list)\n\n if timeout and timeout > 0:\n poller = zmq.Poller()\n poller.register(self.socket, zmq.POLLIN)\n start_t = time()\n deadline_t = start_t + timeout\n\n def recv_multipart():\n timeout_ms = int((deadline_t - time())*1000) # in milliseconds\n #logger.debug('polling with timeout_ms=%s' % timeout_ms)\n if timeout_ms > 0 and poller.poll(timeout_ms):\n msg = self.socket.recv_multipart()\n return msg\n else:\n raise RPCTimeoutError(\"Request %s timed out after %s sec\" % (req_id, timeout))\n else:\n recv_multipart = self.socket.recv_multipart\n\n def recv_yielder(): #{\n while True:\n msg_list = recv_multipart()\n self.logger.debug('received %r' % msg_list)\n reply = self._parse_reply(msg_list)\n\n if reply is None \\\n or reply['req_id'] != req_id:\n continue\n\n if reply['type'] == b'ACK':\n if ignore:\n yield b'OK', None\n return\n else:\n continue\n\n if reply['type'] == b'FAIL':\n raise reply['result']\n\n yield reply['type'], reply['result']\n if reply['type'] == b'OK':\n return\n #}\n\n recv_gen = recv_yielder()\n reply_type, result = next(recv_gen)\n\n if reply_type == b'OK':\n return result\n else:\n return self._yielder(recv_gen, req_id)\n #}\n#}\n\n","sub_path":"netcall/sync/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436707232","text":"import os\nfrom collections import deque\nfrom random import shuffle\n\nimport gym\nimport numpy as np\n# rewards:\n# 0 on stacking\n# -1 on bad placement\n# 1 on enemy tower falling after action\n# -1 on own tower falling after action\nimport torch\nfrom cube_stacking.assets import TEXTURES, get_tex\nfrom gym.spaces import Box\nimport time\nfrom cube_stacking.self_play_policies import POLICY_DIR\nfrom cube_stacking.sim import CubeStacking\nfrom cube_stacking.utils import CAM_POSES, Rewards, RandomPositions, Player\n\nMAX_HEIGHT = 10\nTEST_STEPS = 50\n\nnpa = np.array\n\nREWARDS = {\n \"v0\": {\n Rewards.PlayerFall: -1,\n Rewards.EnemyFall: +1,\n Rewards.DistanceScale: 1,\n Rewards.Floor: 0,\n Rewards.Tie: 0\n },\n \"v1\": {\n Rewards.PlayerFall: 0,\n Rewards.EnemyFall: +1,\n Rewards.DistanceScale: 10,\n Rewards.Floor: 0,\n Rewards.Tie: 0\n },\n \"v2\": {\n Rewards.PlayerFall: -1,\n Rewards.EnemyFall: +1,\n Rewards.DistanceScale: 0,\n Rewards.Floor: -1,\n Rewards.Tie: 0\n },\n}\n\n\nclass TwoPlayerDirectoryFullArena(gym.Env):\n\n def __init__(self,\n randomness,\n headless=True,\n max_height=MAX_HEIGHT,\n arena=4,\n dir=POLICY_DIR,\n eval=False,\n drl=\"ppo\",\n reward_scheme=0,\n no_floor=False,\n textured=False):\n super().__init__()\n self.randomness = randomness\n self.arena = arena\n self.arena_diag = np.sqrt(self.arena**2 + self.arena**2)\n self.max_height = max_height\n self.dir = dir\n self.eval = eval\n self.drl = drl\n self.no_floor = no_floor\n self.textured = textured\n self.rewards = REWARDS[f\"v{reward_scheme}\"]\n self.stats = {\n \"player_correct_stacks\": 0,\n \"opponent_correct_stacks\": 0,\n \"player_floor_placements\": 0,\n \"opponent_floor_placements\": 0,\n \"avg_tower_height\": deque(maxlen=100),\n \"avg_win_rate\": deque(maxlen=100),\n \"avg_cubes_placed_total\": deque(maxlen=100),\n \"avg_player_dist_to_ref\": deque(maxlen=100),\n \"avg_opponent_dist_to_ref\": deque(maxlen=100),\n \"opponnet_policies\": 0\n }\n self.stats_tmp = {}\n\n if not self.textured:\n self.sim = CubeStacking(\n headless=headless, cam=CAM_POSES[\"9.5_block_close\"])\n else:\n self.sim = CubeStacking(headless=headless, halfbox=True, cam=CAM_POSES[\"physnet\"], four_colors=True)\n\n self.action_space = Box(-1, 1, shape=(2,), dtype=np.float32)\n self.observation_space = Box(0, 255, shape=(84, 84, 3), dtype=np.uint8)\n\n self.seed_val = np.random.randint(0, 100000000)\n\n self.ref_cube = None\n self.opponent = None # this will be set to the PPO policy later\n\n self.subfolder = None\n\n def fall_higher_obs(self, action, player):\n assert len(action) == 2\n action = np.clip(action, -1, 1)\n action *= self.arena\n\n action = CubeStacking.apply_randomization(\n action, RandomPositions[self.randomness])\n\n if self.eval:\n color = player\n else:\n color = None\n\n higher = self.sim.place_cube(action, color)\n fall = self.sim.last_cube_fell(TEST_STEPS)\n obs = self.sim.render()\n\n return fall, higher, obs\n\n def _prep_stats(self, win):\n self.stats[\"avg_tower_height\"].append((self.sim.current_max_z + 1) / 2)\n self.stats[\"avg_win_rate\"].append(1 if win else 0)\n self.stats[\"avg_cubes_placed_total\"] = len(self.sim.cubes)\n self.stats[\"success\"] = win\n\n def step(self, action):\n # negative normalized distance to reference cube\n reward_justin_case = -np.linalg.norm(self.ref_cube -\n npa(action)) / self.arena_diag\n self.stats[\"avg_player_dist_to_ref\"].append(reward_justin_case)\n\n fall_player, higher, obs = self.fall_higher_obs(action, Player.Player)\n\n if higher:\n self.stats[\"player_correct_stacks\"] += 1\n else:\n self.stats[\"player_floor_placements\"] += 1\n if self.no_floor:\n self._prep_stats(False)\n return obs, self.rewards[Rewards.Floor], True, self.stats\n\n if fall_player:\n self._prep_stats(False)\n return obs, self.rewards[Rewards.PlayerFall], True, self.stats\n\n if self.max_height == 4 and len(self.sim.cubes) == 4:\n self._prep_stats(False)\n return obs, self.rewards[Rewards.Tie], True, self.stats\n\n done = False\n reward = reward_justin_case * self.rewards[Rewards.DistanceScale]\n\n # opponent's turn\n if self.eval:\n time.sleep(.5)\n opponent_xy = self._play_opponent(obs)\n opp_dist = -np.linalg.norm(self.ref_cube -\n opponent_xy) / self.arena_diag\n self.stats[\"avg_opponent_dist_to_ref\"].append(opp_dist)\n\n fall_opponent, higher_opp, obs = self.fall_higher_obs(\n opponent_xy, Player.Enemy)\n\n if higher_opp:\n self.stats[\"opponent_correct_stacks\"] += 1\n else:\n self.stats[\"opponent_floor_placements\"] += 1\n\n if higher and fall_opponent:\n self._prep_stats(True)\n\n # \"avg_tower_height\": deque(100),\n # \"avg_win_rate\": deque(100)\n reward = self.rewards[Rewards.EnemyFall]\n done = True\n\n if self.sim.current_max_z >= MAX_HEIGHT * 2 - 0.01:\n done = True\n\n if self.max_height == 4 and len(self.sim.cubes) == 4:\n self._prep_stats(False)\n reward = self.rewards[Rewards.Tie]\n done = True\n\n return obs, reward, done, self.stats\n\n def reset(self):\n self.stats[\"player_correct_stacks\"] = 0\n self.stats[\"opponent_correct_stacks\"] = 0\n self.stats[\"player_floor_placements\"] = 0\n self.stats[\"opponent_floor_placements\"] = 0\n if \"success\" in self.stats:\n del self.stats[\"success\"]\n\n self.sim.reset()\n\n if self.textured:\n self.sim.shuffle_textures()\n\n cube_xy = np.random.uniform(-self.arena, self.arena, 2)\n self.sim.place_cube(cube_xy, Player.Starter)\n self.ref_cube = cube_xy / self.arena # to bring into [-1,1]\n\n if self.subfolder is not None:\n self.dir = os.path.join(self.dir, self.subfolder)\n print(\"switched loading directory to:\", self.dir)\n # in order to trigger this only once\n self.subfolder = None\n\n self._init_opponent()\n\n # coin toss if player starts or opponent\n if np.random.rand() < .5:\n obs = self.sim.render()\n opponent_xy = self._play_opponent(obs)\n _, _, obs = self.fall_higher_obs(opponent_xy, Player.Enemy)\n else:\n obs = self.sim.render()\n\n return obs\n\n def render(self, mode='human'):\n pass\n #TODO\n\n def seed(self, seed=None):\n self.seed = seed\n np.random.seed(seed)\n return super().seed(seed)\n\n def close(self):\n self.sim.close()\n super().close()\n\n def _play_opponent(self, obs):\n if self.opponent is None:\n return np.random.uniform(-1, 1, 2)\n\n obs = torch.from_numpy(obs).float().to('cpu')\n # obs /= 255\n obs = obs.permute(2, 0, 1)\n\n if self.drl == \"ppo\":\n # move obs down on the stacc\n # self.stacked_obs[:, :-3] = self.stacked_obs[:, 3:]\n # add new obs on top of stacc\n # self.stacked_obs[:, -3:] = obs\n self.stacked_obs[:, :] = obs\n\n with torch.no_grad():\n _, action, _, _ = self.opponent.act(\n self.stacked_obs,\n self.opp_recurrent_hidden_states,\n self.opp_masks,\n deterministic=True)\n opponent_xy = action.numpy()[0]\n self.opp_masks.fill_(1.0)\n\n elif self.drl == \"td3\":\n opponent_xy = self.opponent.select_action(np.array(obs), \"cpu\")\n\n return opponent_xy\n\n def _init_opponent(self):\n # get dire contents\n\n # print (f\"ENV: SEARCHING '{self.dir}', filtering for '-{self.drl.upper()}-', got:\",os.listdir(self.dir))\n policies = [\n x for x in os.listdir(self.dir)\n if f\"-{self.drl.upper()}-\" in x and \".pt\" in x[-3:]\n ]\n self.stats[\"opponnet_policies\"] = len(policies)\n\n if len(policies) == 0:\n print(\"ENV: no existing policies\")\n self.opponent = None\n return\n\n # if there is only one policy and we've loaded it, we don't need to reload it\n # if there are 3 or fewer policies, then toss a coin to see if we need to relead the policy\n if self.opponent is not None and (len(policies) == 1 or\n (len(policies) <= 3 and\n np.random.rand() < .5)):\n if self.drl == \"ppo\":\n self.opp_masks = torch.zeros(1, 1)\n # self.stacked_obs = torch.zeros(\n # (1, 12, 84, 84)).to(torch.device('cpu'))\n self.stacked_obs = torch.zeros(\n (1, 3, 84, 84)).to(torch.device('cpu'))\n elif self.drl == \"td3\":\n pass\n return\n\n shuffle(policies)\n policy_path = os.path.join(self.dir, policies[0])\n # print(f\"ENV: picking opponent policy '{policy_path}'\")\n\n # We need to use the same statistics for normalization as used in training\n\n if self.drl == \"ppo\":\n # notice the tuple\n self.opponent, _ = \\\n torch.load(policy_path, map_location='cpu')\n elif self.drl == \"td3\":\n self.opponent = \\\n torch.load(policy_path, map_location='cpu')\n\n # print(\"GE: USING POLICY:\", policy_path)\n\n if self.drl == \"ppo\":\n self.opp_recurrent_hidden_states = torch.zeros(\n 1, self.opponent.recurrent_hidden_state_size)\n self.opp_masks = torch.zeros(1, 1)\n # self.stacked_obs = torch.zeros(\n # (1, 12, 84, 84)).to(torch.device('cpu'))\n self.stacked_obs = torch.zeros(\n (1, 3, 84, 84)).to(torch.device('cpu'))\n elif self.drl == \"td3\":\n self.opponent.actor.eval()\n\n\nif __name__ == '__main__':\n import cube_stacking\n import time\n\n # Cubestacc-TwoPlayer-RelativeAct-NonRandom-Headless-v0\n\n env = gym.make(\"Cubestacc-TwoPlayer-Full-H4-NonRandom-PPO-Graphical-v2\")\n\n for i in range(10):\n obs = env.reset()\n time.sleep(1)\n done = False\n while not done:\n obs, rew, done, misc = env.step([0, 0])\n time.sleep(1)\n print(\"reward\", rew)\n\n env.close()\n","sub_path":"cube_stacking/envs/twoplayerdirectory_fullarena.py","file_name":"twoplayerdirectory_fullarena.py","file_ext":"py","file_size_in_byte":11081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344374660","text":"# -*- coding: utf-8 -*-\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework.fields import (ListField, IntegerField,\n SerializerMethodField)\nfrom rest_framework.relations import PrimaryKeyRelatedField\nfrom rest_framework.serializers import ModelSerializer, Serializer\n\nfrom main.common.mixins import ViewsCountMixin\nfrom main.common.serializers import AddressSerializer, InlineModelSerializer\nfrom store.models import Store\nfrom .fields import SettingsField\nfrom .models import Advert, Category\n\n\n__author__ = 'Nikita Akelev'\n__all__ = ('AdvertSerializer', 'AdvertCopySerializer', 'ComplexSerializer',\n 'AdvertFavoriteSerializer')\n\n\nclass CategoryShortSerializer(ModelSerializer):\n class Meta:\n model = Category\n fields = ('id', 'title')\n\n\nclass AdvertSerializer(ModelSerializer, ViewsCountMixin):\n views = SerializerMethodField(method_name='count_views')\n\n class Meta:\n model = Advert\n fields = ('id', 'title', 'description', 'price', 'photo', 'views')\n\n\nclass AdvertCopySerializer(Serializer):\n advert_ids = ListField(child=IntegerField())\n store_ids = ListField(child=IntegerField())\n\n\nclass AdvertFavoriteSerializer(ModelSerializer):\n class Meta:\n model = Advert\n fields = (\n 'id', 'title', 'price', 'photo', 'address', 'condition', 'store')\n\n\nclass AdvertCreateSerializer(ModelSerializer):\n address = AddressSerializer(required=False)\n store = PrimaryKeyRelatedField(required=True, queryset=Store.objects.all())\n\n class Meta:\n model = Advert\n fields = (\n 'title', 'description', 'price', 'store', 'tariff_plan', 'address')\n required = ('store', )\n\n\nclass ComplexSerializer(Serializer):\n advert = AdvertCreateSerializer()\n settings = SettingsField()\n\n def create(self, validated_data):\n result = {}\n validated_data.get['advert']['address'] = \\\n validated_data['advert']['store'].address\n advert = Advert(owner=self.context['request'].user,\n **validated_data['advert'])\n settings_serializer = validated_data['settings']\n settings = settings_serializer.save()\n advert.object_id = settings.id\n advert.content_type = ContentType.objects.get_for_model(settings)\n advert.category_id = int(self.initial_data['settings']['category_id'])\n advert.save()\n result['advert'] = advert\n result['settings'] = settings_serializer\n return result\n\n def update(self, advert, validated_data):\n for attr, value in validated_data['advert'].items():\n setattr(advert, attr, value)\n for attr, value in validated_data['settings'].validated_data.items():\n setattr(advert.settings, attr, value)\n advert.settings.save()\n advert.save()\n return advert\n\n def to_representation(self, instance):\n result = {}\n if isinstance(instance, dict):\n result.update({\n 'advert': AdvertSerializer(instance['advert']).data,\n 'settings': instance['settings'].data\n })\n elif isinstance(instance, Advert):\n advert = AdvertSerializer(instance).data\n ct = ContentType.objects.get_for_id(instance.content_type.id)\n inline_serializer = InlineModelSerializer(ct.model_class())\n settings = ct.get_object_for_this_type(id=instance.object_id)\n settings = inline_serializer(settings).data\n result.update({'advert': advert, 'settings': settings})\n return result","sub_path":"Dot/advert/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"82131574","text":"import numpy as np\n\nclass PatchExtraction: \n \n def __init__(self, grid_size=20):\n '''\n '''\n self.grid_size = grid_size\n self.delta = int( grid_size/2)\n\n def subsample(self, observations):\n '''\n Subsample the \n '''\n array = np.ones(observations.shape)*-1000.\n array[self.delta:array.shape[0]-self.delta, self.delta:array.shape[1]-self.delta] = observations[self.delta:array.shape[0]-self.delta, self.delta:array.shape[1]-self.delta] \n\n # Randomly sample the positive class grid points\n pos_j,pos_i = np.where((array>0))\n random_idx = list(np.random.choice(np.arange(len(pos_j)), size=int(0.5*len(pos_j)), replace=False))\n pos_j_random = list(np.array(pos_j)[random_idx])\n pos_i_random = list(np.array(pos_i)[random_idx])\n\n # Randomly sample the negative class grid points\n neg_j,neg_i = np.where((array==0))\n random_idx = list(np.random.choice(np.arange(len(neg_j)), size=int(0.01*len(neg_j)), replace=False))\n neg_j_random = list(np.array(neg_j)[random_idx]) \n neg_i_random = list(np.array(neg_i)[random_idx]) \n\n labels = [1]*len(pos_j_random) + [0]*len(neg_j_random) \n \n return zip( pos_j_random+neg_j_random, pos_i_random+neg_i_random), labels\n\n def extract_patch(self, data, centers):\n '''\n Extract patches\n data (y,x,v)\n '''\n storm_patches = [ ]\n for obj_y, obj_x in centers:\n storm_patches.append( data[:, obj_y-self.delta:obj_y+self.delta, obj_x-self.delta:obj_x+self.delta] )\n\n return storm_patches\n\n \n\n\n","sub_path":"extraction/GridPointExtraction.py","file_name":"GridPointExtraction.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"459578528","text":"import urllib\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\nimport requests\r\n\r\nim = Image.open(BytesIO(requests.get(\r\n 'http://huge:file@www.pythonchallenge.com/pc/return/cave.jpg').content))\r\nw, h = im.size\r\n\r\nimgs = [Image.new(im.mode, (int(w / 2), int(h / 2))) for dummy in range(4)]\r\nimgs_load = [i.load() for i in imgs]\r\norg = im.load()\r\n\r\n\r\nfor i in range(w):\r\n for j in range(h):\r\n org_pos = (i, j)\r\n new_pos = (i // 2, j // 2)\r\n imgs_load[i % 2 + j % 2 * 2][new_pos] = org[org_pos]\r\n\r\n\r\n[imgs[i].save('%d.png' % i) for i in range(4)]\r\n","sub_path":"python-puzzle/puzzle12.py","file_name":"puzzle12.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"43949210","text":"def lastword(word):\n\tletters = [ord(i) for i in list(word)]\n\n\tl = len(letters)\n\tsol = []\n\ta = None\n\tb = None\n\tfor i in range(l):\n\t\tif len(sol) ==0:\n\t\t\tsol.append(letters[i])\n\t\t\ta = sol[0]\n\t\telse:\n\t\t\tif letters[i] >= a:\n\t\t\t\tsol.insert(0,letters[i])\n\t\t\t\ta = sol[0]\n\t\t\telse:\n\t\t\t\tsol.append(letters[i])\n\n\treturn ''.join(chr(i) for i in sol)\n\n\n\ndef main():\n\tfilename = 'A-large.in'\n\toutput = 'A-large.out'\n\tf = open(filename,'r')\n\t#Output file\n\tout = open(output,'w')\n\twhile True:\n\t\tline = f.readline()\n\t\tif line == '':\n\t\t\tbreak\n\t\tnum_tests = int(line)\n\t\tfor i in xrange(num_tests):\n\t\t\tline = f.readline().strip()\n\t\t\tsol = lastword(line)\n\t\t\ts = 'Case #%s: ' %(i+1)\n\t\t\ts = s + str(sol)\n\t\t\tout.write(s)\n\t\t\tout.write('\\n')\n\t\t\nif __name__ == \"__main__\":\n\tmain()","sub_path":"codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_loveapril_lastword.py","file_name":"16_1_1_loveapril_lastword.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11842073","text":"from django.shortcuts import render\nfrom app.models import Brainstatus\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\n#from registration.backends.simple.views import RegistrationView\nfrom django.contrib.auth.views import login\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import datetime\nimport json,random\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n\ndef populate_db():\n\tyear = [str(i) for i in range(2010,2017)]\n\tmonth = [str(i) for i in range(1,13)]\n\tday = [str(i) for i in range(1,25)]\n\n\tfor i in range(2392,2410):\n\t\tbrainno = str(i)\n\t\tseriesid = 'F'\n\t\ta = '%s-%s-%s'%(random.choice(year),random.choice(month),random.choice(day))\n\t\tdop = datetime.strptime(a,'%Y-%m-%d')\n\t\tb = '%s-%s-%s'%(random.choice(year),random.choice(month),random.choice(day))\n\t\tdoi = datetime.strptime(b,'%Y-%m-%d')\n\t\tstatus = 1\n\t\tc = '%s-%s-%s'%(random.choice(year),random.choice(month),random.choice(day))\n\t\tlastupdate = datetime.strptime(c,'%Y-%m-%d')\n\t\tnextstep = 2\n\t\tBrainstatus(brainno = brainno,seriesid = seriesid,dateofperf = dop,dateofimg = doi,status = status , lastupdate = lastupdate , nextstep = nextstep).save()\n\n@csrf_exempt\ndef brain_status(request):\n\t\n\tif request.is_ajax() and request.POST:\n\t\tsort_order = json.loads(request.body)\t\n\telse:\n\t\tsort_order = {'doi':0,'dop':0,'lu':1}\n\t\n\tkey_map = {'doi':'-dateofimg','dop':'-dateofperf','lu':'-lastupdate'}\t\n\t#print sort_order\t\n\tfor i in sort_order:\n\t\tif sort_order[i] == 1:\n\t\t\torderby = key_map[i]\t\n\n\tbrainnames_ = Brainstatus.objects.all().values_list('brainno')\n\tbrainnames = [i[0].encode('utf8') for i in brainnames_]\t\n\t#populate_db()\n\tbrain_list = Brainstatus.objects.all().order_by(orderby)\n\t#brain_list = Brainstatus.objects.all().order_by('-dateofperf')\n\t#brain_list = Brainstatus.objects.all().order_by('-dateofimg')\n\tpage = request.GET.get('page',1)\n\tpaginator = Paginator(brain_list,10)\n\n\ttry:\n\t\tbrainlist = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tbrainlist = paginator.page(1)\n\texcept EmptyPage:\n\t\tbrainlist = paginator.page(paginator.num_pages)\n\t\n\treturn render(request,'brainlist.html',{'brainlist':brainlist,'brainnames':brainnames})\n\ndef eachbrain(request,brno):\n\n\tbrain_list = Brainstatus.objects.filter(brainno=brno).all()\n\t#brain_list = Brainstatus.objects.all().order_by('-dateofperf')\n\t#brain_list = Brainstatus.objects.all().order_by('-dateofimg')\n\tpage = request.GET.get('page',1)\n\tpaginator = Paginator(brain_list,10)\n\n\ttry:\n\t\tbrainlist = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tbrainlist = paginator.page(1)\n\texcept EmptyPage:\n\t\tbrainlist = paginator.page(paginator.num_pages)\n\t\n\treturn render(request,'singlebrain.html',{'brainlist':brainlist})\n\t\t\n","sub_path":"app/views1.py","file_name":"views1.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"208941103","text":"\"\"\"Signing service\"\"\"\n\nimport threading\nfrom .. import certs\nfrom ..util import crypto, locking\nfrom .service import signer_service, State\n\n\"\"\"Package-wide variables\"\"\"\nprivate_key = None\ninstance_id = None\ncurrent_state = None\nstate_lock = None\npending_states = None\nstate_condition = None\ncurrent_round = None\n\n\ndef init(args):\n global private_key\n global instance_id\n global current_state\n global state_lock\n global pending_states\n global state_condition\n global current_round\n\n current_state = State.RECEIVE_ORDER_BATCH\n state_lock = locking.RWLock()\n pending_states = list()\n state_condition = threading.Condition()\n current_round = 0\n\n instance_id = args.id\n\n if instance_id < 0:\n print(\"Instance ID must be non-negative!\")\n return\n\n # Load private key for signatures\n private_key = crypto.load_private_key(certs.path_to('server.key'))\n\n return signer_service(args)\n\n\ndef setup_argparser(parser):\n parser.set_defaults(func=init)\n parser.add_argument('id', type=int, help='The ID of the signer instance')\n","sub_path":"gnodex/signer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"97743871","text":"\"\"\"\nhttps://leetcode.com/problems/minimum-operations-to-make-a-subsequence/\n\"\"\"\nimport bisect\n\nclass Solution:\n def minOperations(self, target, arr):\n index = {x: i for i, x in enumerate(target)}\n\n # LIS\n stack = []\n for x in arr:\n if x not in index:\n continue\n i = bisect.bisect_left(stack, index[x])\n if i == len(stack):\n stack.append(None)\n stack[i] = index[x]\n\n return len(target) - len(stack)\n","sub_path":"playground/leetcode/solution/1713.py","file_name":"1713.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411795617","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\nfrom pomegranate import *\nimport numpy as np\n\n# create model from file\nwith open('model.txt', 'r') as f:\n model_json = f.read()\n\nmodel = HiddenMarkovModel.from_json(model_json)\n\nwith open('dictionary.txt', 'r') as f:\n dictionary = f.readlines()\n\ndictionary = [w.lower()[:-1] for w in dictionary]\n\n# GUI\nroot = Tk()\nroot.title(\"MyEditor\")\nroot.geometry(\"750x500\")\n\nvar = IntVar(value=1)\nc = Checkbutton(root, text=\"Enable Live Correction\", variable=var)\nc.pack(side=BOTTOM, anchor='w')\n\n\n\nlastword = ''\ndef correction_event(*args):\n text = txt.get(\"insert linestart\", \"insert\")\n \n try:\n word = text[text.rfind(' ') + 1:]\n except:\n word = text\n print(word)\n \n global lastword\n lastword = word\n \n if (var.get() == 1):\n \n if (word.lower() in dictionary):\n print(word.lower() + ' is in the dictionary')\n corrected_word = word\n else:\n seq = np.array(list(word.lower()))\n hmm_prediction = model.predict(seq, algorithm='viterbi')\n corrected_word = ''.join([chr(s + 97) for s in hmm_prediction[1:-1]])\n \n corrected_word = ''.join(\n [c.upper() if word[i].istitle() else c \n for i, c in enumerate(list(corrected_word))]\n )\n \n print(model.log_probability(list(word.lower())))\n print(model.log_probability(list(corrected_word.lower())))\n \n txt.delete(\"%s-%dc\" % (tk.INSERT, len(word)), tk.INSERT)\n txt.insert(tk.INSERT, corrected_word)\n \n \n\n#\ndef newfile():\n msgBox = tk.messagebox.askquestion('New File', 'Are you sure? Current data will be lost')\n if (msgBox == 'yes'):\n txt.delete(\"1.0\", tk.END)\n\ndef openfile():\n filename = filedialog.askopenfilename(\n filetypes=((\"text files\",\"*.txt\"),(\"all files\",\"*.*\"))\n )\n if (filename):\n txt.delete(\"1.0\", tk.END)\n f = open(filename, 'r')\n txt.insert(\"1.0\", f.read())\n \ndef savefile():\n filename = filedialog.asksaveasfilename(\n initialdir = \"/\",\n title = \"Select File\",\n filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\"))\n )\n print(filename)\n f = open(filename, 'w')\n f.write(txt.get('1.0', 'end-1c'))\n f.close()\n tk.messagebox.showinfo('File Saved', 'File Saved')\n\ndef dictionary_event(*args):\n insert_index = len(txt.get(\"1.0\", tk.INSERT))\n end_index = len(txt.get(\"1.0\", tk.END))\n print(insert_index+1)\n print(end_index)\n if (not (insert_index + 1 == end_index)):\n print('not uguali')\n if (insert_index + 1 == end_index):\n if (not txt.get(\"insert linestart\", \"insert\")[-1].isalpha()):\n global lastword\n print('lastword: ' + lastword)\n txt.delete(\"%s-1c-%dc\" % (tk.INSERT, len(lastword)), \"insert-1c\")\n txt.insert(\"insert-1c\", lastword)\n\n\n\nmenubar = Menu(root)\nmenubar.add_command(label=\"New\", command=newfile)\nmenubar.add_command(label=\"Open\", command=openfile)\nmenubar.add_command(label=\"Save\", command=savefile)\nmenubar.add_command(label=\"Undo\", command=dictionary_event)\nroot.config(menu=menubar)\n\n# Vertical (y) Scroll Bar\nyscrollbar = Scrollbar(root)\nyscrollbar.pack(side=RIGHT, fill=Y)\n\n\ntxt = Text(root, width=750, height=500, wrap=NONE,\n yscrollcommand=yscrollbar.set)\ntxt.pack()\n\n# Configure the scrollbars\nyscrollbar.config(command=txt.yview)\n\n\n\ntxt.bind(\"\", correction_event)\ntxt.bind(\".\", correction_event)\ntxt.bind(\",\", correction_event)\ntxt.bind(\";\", correction_event)\ntxt.bind(\"!\", correction_event)\ntxt.bind(\"?\", correction_event)\n\n\ntxt.bind('', dictionary_event)\ntxt.bind('', dictionary_event)\n\nb = Button(root, text='UNDO', command=dictionary_event)\nb.pack()\n\nroot.mainloop()\n","sub_path":"MyEditor.py","file_name":"MyEditor.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624251661","text":"import csv\nimport itertools\n\n\ndef csv_to_list(csv_path):\n with open(csv_path, 'r') as f:\n reader = csv.reader(f)\n your_list = list(reader)\n flattened_list = list(itertools.chain.from_iterable(your_list))[1:]\n return flattened_list\n","sub_path":"ConvertCSVToList.py","file_name":"ConvertCSVToList.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"135655868","text":"import random\nimport io\nimport pprint\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nfrom gym import wrappers\n\n\nclass AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\ndef deep_update_dict(fr, to):\n ''' update dict of dicts with new values '''\n # assume dicts have same keys\n for k, v in fr.items():\n if type(v) is dict:\n deep_update_dict(v, to[k])\n else:\n to[k] = v\n return to\n\n\ndef set_random_seed(seed):\n np.random.seed(seed)\n tf.random.set_random_seed(seed)\n random.seed(seed)\n\n\nimport os\nimport csv\n\n\ndef direct_logging(data, output_dir):\n # import ipdb ; ipdb.set_trace()\n for metric in data:\n metric_dir = output_dir + metric\n if os.path.isdir(metric_dir) != True:\n os.makedirs(metric_dir, exist_ok=True)\n\n with open(metric_dir + '/progress.csv', 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow([data[metric]])\n csvFile.close()\n\n\nclass TensorBoardLogger(object):\n \"\"\"Logging to TensorBoard outside of TensorFlow ops.\"\"\"\n\n def __init__(self, output_dir):\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n self.output_dir = output_dir\n self.file_writer = tf.summary.FileWriter(output_dir)\n\n def log_scaler(self, step, name, value):\n summary = tf.Summary(\n value=[tf.Summary.Value(tag=name, simple_value=value)]\n )\n self.file_writer.add_summary(summary, step)\n\n def log_image(self, step, name, image):\n summary = tf.Summary(\n value=[tf.Summary.Value(\n tag=name,\n image=self._make_image(image)\n )]\n )\n self.file_writer.add_summary(summary, step)\n\n def log_images(self, step, data):\n if len(data) == 0:\n return\n summary = tf.Summary(\n value=[\n tf.Summary.Value(tag=name, image=self._make_image(image))\n for name, image in data.items() if image is not None\n ]\n )\n self.file_writer.add_summary(summary, step)\n\n def _make_image(self, tensor):\n \"\"\"Convert an numpy representation image to Image protobuf\"\"\"\n height, width, channel = tensor.shape\n image = Image.fromarray(tensor)\n output = io.BytesIO()\n image.save(output, format='PNG')\n image_string = output.getvalue()\n output.close()\n return tf.Summary.Image(\n height=height,\n width=width,\n colorspace=channel,\n encoded_image_string=image_string\n )\n\n def add_name_prefix_to_dict(self, _dict, prefix):\n new_dict = {}\n for key in _dict:\n new_dict[prefix + key] = _dict[key]\n return new_dict\n\n def log_dict(self, step, data, name_prefix=''):\n\n data = self.add_name_prefix_to_dict(data, name_prefix)\n summary = tf.Summary(\n value=[\n tf.Summary.Value(tag=name, simple_value=value)\n for name, value in data.items() if value is not None\n ]\n )\n\n direct_logging(data, os.path.join(self.output_dir, 'logs/'))\n self.file_writer.add_summary(summary, step)\n\n def flush(self):\n self.file_writer.flush()\n\n\ndef unwrapped_env(env):\n if isinstance(env, wrappers.TimeLimit) \\\n or isinstance(env, wrappers.Monitor) \\\n or isinstance(env, wrappers.FlattenDictWrapper):\n return env.unwrapped\n return env\n\n\ndef average_metrics(metrics):\n if len(metrics) == 0:\n return {}\n new_metrics = {}\n for key in metrics[0].keys():\n new_metrics[key] = np.mean([m[key] for m in metrics])\n\n return new_metrics\n\n\ndef print_flags(flags, flags_def):\n logging.info(\n 'Running training with hyperparameters: \\n{}'.format(\n pprint.pformat(\n ['{}: {}'.format(key, getattr(flags, key)) for key in flags_def]\n )\n )\n )\n\n\ndef parse_network_arch(arch):\n if len(arch) == 0:\n return []\n return [int(x) for x in arch.split('-')]\n","sub_path":"misc_utils.py","file_name":"misc_utils.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"21867662","text":"import GeneralLib as GL\nfrom pip._vendor.distlib.compat import raw_input\n\n#Function creates a dictionary of relevance data and stores in the json file.\ndef getRelevanceData():\n with open(raw_input(\"Enter the file containing relevance info:\"),\"r\") as f:\n data = f.read().split(\"\\n\")\n\n relevanceInfo = dict() \n for line in data:\n line = line.strip()\n tokens = line.split(\" \")\n if tokens[0] in relevanceInfo.keys():\n relevanceInfo[tokens[0]].append(tokens[2])\n else:\n relevanceInfo.update({tokens[0] : [tokens[2]]})\n \n return relevanceInfo\n\n\nif __name__ == \"__main__\" :\n GL.dictToJson(raw_input(\"Enter json file relevance info will be stored:\"), getRelevanceData())","sub_path":"Phase2/src/GenDicts.py","file_name":"GenDicts.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"473720740","text":"from django.urls import path\nfrom . import views\n\napp_name = 'Home'\n\nurlpatterns = [\n path ('', views.index, name = \"index\"),\n path ('Detection', views.detection, name = \"detection\"),\n path ('Decision', views.decision, name = \"decision\"),\n path ('Deception', views.deception, name = \"deception\"),\n path ('Observation', views.observation, name = \"observation\"),\n]\n","sub_path":"Framework/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"71147432","text":"import logging\nimport tornado.ioloop\nimport maproxy.proxyserver\nimport servicemanager\nimport win32service\nimport win32event\nimport threading\nimport asyncore\nimport os\nimport base64\nimport gl\nimport sqlconns\nimport functions as f\nimport win32serviceutil\nimport time\n\nSERVER_STARTED = 0\n\nAPPNAME = \"IFACE_HTTP_Handler\"\nAPP_VERSION = \"uface 2018.0.5000\"\n# log file names\nERROR_LOG = \"error_log\"\n\nclass AppServerSvc(win32serviceutil.ServiceFramework):\n _svc_name_ = \"iface https server\"\n _svc_display_name_ = \"iface https server\"\n _svc_description_ = \"iface https server from North Time and Data\"\n\n def __init__(self,args):\n win32serviceutil.ServiceFramework.__init__(self, args)\n self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)\n\n def SvcDoRun(self):\n self.ReportServiceStatus(win32service.SERVICE_RUNNING)\n asyncoreThread = threading.Thread(target=asyncore.loop, kwargs={'timeout': 1})\n asyncoreThread.start()\n myStatusThread = threading.Thread(target=win32event.WaitForSingleObject,\n args=(self.hWaitStop, win32event.INFINITE))\n myStatusThread.start()\n\n log_initialise()\n global SERVER_STARTED\n while True:\n if myStatusThread.isAlive():\n if SERVER_STARTED==0:\n if set_env()==True:\n if version_check()==True:\n ssl_certs = {\"certfile\": gl.CERT_FILE,\n \"keyfile\": gl.KEY_FILE}\n # \"client_ssl_options=ssl_certs\" simply means \"listen using SSL\"\n server = maproxy.proxyserver.ProxyServer(\"localhost\",gl.server_port,\n client_ssl_options=ssl_certs)\n log_initialise()\n SERVER_STARTED = 1\n server.listen(gl.https_port)\n logging.getLogger('tornado.access').disabled = True\n try:\n tornado.ioloop.IOLoop.current().start()\n SERVER_STARTED = 1\n except Exception as e:\n f.error_logging(APPNAME, \"we have an issue\", \"error_log\", \"\")\n tornado.ioloop.IOLoop.current().stop()\n else:\n pass\n else:\n break\n time.sleep(1)\n\n def SvcStop(self):\n tornado.ioloop.IOLoop.current().stop()\n log_exit()\n time.sleep(3)\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n win32event.SetEvent(self.hWaitStop)\n\n\ndef set_env():\n maindb = 'timeware_main_6'\n userdb = 'timeware_user_6'\n\n\n if os.path.isfile(gl.GENERAL_INI):\n fob=open(gl.GENERAL_INI, \"r\")\n listme = fob.readlines()\n fob.close()\n else:\n f.error_logging(APPNAME, \"Error reading general.ini file.\", \"error_log\",\"\")\n return False\n try:\n for index in range(len(listme)):\n if \"'\" in listme[index]: continue\n if 'server_port' in listme[index]:\n gl.server_port = int(str.split(listme[index],'=')[1])\n if 'https_port' in listme[index]:\n gl.https_port = int(str.split(listme[index],'=')[1])\n if \"dbmain\" in listme[index]:\n maindb = str.split(listme[index], '=')[1]\n maindb = maindb.replace(\"\\r\", \"\")\n maindb = maindb.replace(\"\\n\", \"\")\n if \"dbuser\" in listme[index]:\n userdb = str.split(listme[index], '=')[1]\n userdb = userdb.replace(\"\\r\", \"\")\n userdb = userdb.replace(\"\\n\", \"\")\n f.error_logging(APPNAME, \"Port is now: \"+str(gl.server_port), \"error_log\", \"\")\n except Exception as e:\n f.error_logging(APPNAME, \"From SetEnv for loop?:\" + e, \"error_log\", \"\")\n return False\n if sqlconns.readsql_connection_timeware_main_6(maindb,userdb) == 1:\n test_comms = sqlconns.testsql(gl.SERVER, gl.SQL_LOGIN, gl.PASSWORD, gl.DATABASE)\n if test_comms == 0:\n f.error_logging(APPNAME, \"Error connecting to SQL server.\", \"error_log\", \"\")\n return False\n return True\n\ndef log_initialise():\n f.error_logging(APPNAME, \"server started.\", \"error_log\",\"\")\n\ndef log_exit():\n f.error_logging(APPNAME, \"iface clean exit.\", \"error_log\",\"\")\n\ndef version_check():\n return True\n if os.path.isfile(gl.LICENSE_TXT):\n fob=open(gl.LICENSE_TXT, \"r\")\n listme = fob.readlines()\n fob.close()\n try:\n version_year = sqlconns.decrypt_with_key(listme[0])\n ret = sqlear = sqlconns.decrypt_with_key(listme[0])\n ret = sqlconns.sql_select_single_field(\"SELECT TOP 1 [data] FROM tversion WHERE [property] like 'database version'\")\n if ret==-1:return False\n database_version = str.split(ret,'.')\n if int(version_year) >= int('20'+database_version[0]):\n return True\n else:\n f.error_logging(APPNAME, \"Version is out of date....cannot start.\", \"error_log\",\"\")\n return False\n except Exception as e:\n return False\n else:\n f.error_logging(APPNAME, \"Error reading license.txt file.\", \"error_log\",\"\")\n return False\n\n\nif __name__ == \"__main__\":\n win32serviceutil.HandleCommandLine(AppServerSvc)\n set_env()\n\n #if set_env()==True:\n # HTTPS->HTTP\n # ssl_certs = {\"certfile\": \"certificate.pem\",\n #\"keyfile\": \"privatekey.pem\"}\n # \"client_ssl_options=ssl_certs\" simply means \"listen using SSL\"\n # print(gl.server_port)\n # server = maproxy.proxyserver.ProxyServer(\"localhost\", gl.server_port,\n # client_ssl_options=ssl_certs)\n # log_initialise()\n # SERVER_STARTED = 1\n #server.listen(gl.https_port)\n #l#ogging.getLogger('tornado.access').disabled = True\n #tornado.ioloop.IOLoop.instance().start()\n\n\n","sub_path":"iface_https_handler.py","file_name":"iface_https_handler.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"458955032","text":"# -*- coding: utf-8 -*-\n\nOPERATORS = (\n (u\">\",u\"Больше\"),\n (u\"<\",u\"Меньше\"),\n (u\">=\",u\"Больше либо равно\"),\n (u\"<=\",u\"Меньше либо равно\"),\n (u\"=\",u\"Равно\"),\n)\n\nFACTOR_FIELDS = (\n (u\"gender\",u\"Пол\"),\n (u\"full_age\",u\"Возраст\"),\n)\n\nVALUE_TYPES = (\n (u\"i\",u\"Целое число\",int),\n (u\"f\",u\"Дробное\",float),\n (u\"b\",u\"Логическое\",bool),\n (u\"u\",u\"Текстовое\",unicode),\n)\n\nRESULTS = (\n (u\"о\",u\"Обнаружено\"),\n (u\"н\",u\"Не обнаружено\"),\n (u\"т\",u\"Отсутствует\"),\n)\n\nTEST_FORM = (\n (u\"кач.\",u\"Качественный\"),\n (u\"п/кол.\",u\"Полуколичественный\"),\n (u\"кол.\",u\"Количественный\"),\n)","sub_path":"apps/lab/vars.py","file_name":"vars.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378261831","text":"#coding: utf8\nimport requests\nimport json\nfrom next_hh_UTC import next_hh_UTC\n\nstationsComiteChampagne =[\n{\t\"commune\":\"Avize\",\"lieu-dit\":\"Avize\",\"lat\":\t48.977\t,\"lng\":\t4.001\t,\"alt\":\t175\t},\n{\t\"commune\":\"Bouzy\",\"lieu-dit\":\"Bouzy\",\"lat\":\t49.091\t,\"lng\":\t4.149\t,\"alt\":\t153\t},\n{\t\"commune\":\"Chatillon sur Marne\",\"lieu-dit\":\"Chatillon\",\"lat\":\t49.098\t,\"lng\":\t3.759\t,\"alt\":\t153\t},\n{\t\"commune\":\"Germaine\",\"lieu-dit\":\"Germaine\",\"lat\":\t49.123\t,\"lng\":\t4.032\t,\"alt\":\t172\t},\n{\t\"commune\":\"Chambrecy\",\"lieu-dit\":\"Chambrecy\",\"lat\":\t49.174\t,\"lng\":\t3.836\t,\"alt\":\t123\t},\n{\t\"commune\":\"Mailly-Champagne\",\"lieu-dit\":\"Mailly\",\"lat\":\t49.155\t,\"lng\":\t4.116\t,\"alt\":\t182\t},\n{\t\"commune\":\"Vert-Toulon\",\"lieu-dit\":\"Vertoul\",\"lat\":\t48.84\t,\"lng\":\t3.905\t,\"alt\":\t155\t},\n{\t\"commune\":\"Vertus\",\"lieu-dit\":\"Vertus\",\"lat\":\t48.902\t,\"lng\":\t3.995\t,\"alt\":\t150\t},\n{\t\"commune\":\"Saint-Thierry\",\"lieu-dit\":\"Sthierry\",\"lat\":\t49.3\t,\"lng\":\t3.96\t,\"alt\":\t135\t},\n{\t\"commune\":\"Les Riceys\",\"lieu-dit\":\"Riceys\",\"lat\":\t47.981\t,\"lng\":\t4.33\t,\"alt\":\t274\t},\n{\t\"commune\":\"Essoyes\",\"lieu-dit\":\"Essoyes\",\"lat\":\t48.039\t,\"lng\":\t4.493\t,\"alt\":\t280\t},\n{\t\"commune\":\"Colombé-la-Fosse\",\"lieu-dit\":\"Colombe\",\"lat\":\t48.261\t,\"lng\":\t4.779\t,\"alt\":\t252\t},\n{\t\"commune\":\"Vitry-le-Croisé\",\"lieu-dit\":\"Vitry\",\"lat\":\t48.144\t,\"lng\":\t4.554\t,\"alt\":\t237\t},\n{\t\"commune\":\"Prunay\",\"lieu-dit\":\"Aérodrome de Reims Prunay\",\"lat\":\t49.21\t,\"lng\":\t4.16\t,\"alt\":\t95\t},\n{\t\"commune\":\"Braine\",\"lieu-dit\":\"Ferme du parc\",\"lat\":\t49.35\t,\"lng\":\t3.53\t,\"alt\":\t61\t},\n{\t\"commune\":\"Changis\",\"lieu-dit\":\"Pont de l'ormois\",\"lat\":\t48.97\t,\"lng\":\t3.01\t,\"alt\":\t70\t},\n{\t\"commune\":\"Esternay\",\"lieu-dit\":\"Exploitation Dandre\",\"lat\":\t48.74\t,\"lng\":\t3.58\t,\"alt\":\t184\t},\n{\t\"commune\":\"Bouy-sur-Orvin\",\"lieu-dit\":\"Le-Clos-De-Macon\",\"lat\":\t48.44\t,\"lng\":\t3.51\t,\"alt\":\t101\t},\n{\t\"commune\":\"St Mard-en-Othe\",\"lieu-dit\":\"RD15\",\"lat\":\t48.17\t,\"lng\":\t3.79\t,\"alt\":\t226\t},\n{\t\"commune\":\"Celles-sur-Ource\",\"lieu-dit\":\"Bourg\",\"lat\":\t48.07\t,\"lng\":\t4.41\t,\"alt\":\t275\t},\n{\t\"commune\":\"Chaumont-Semoutier\",\"lieu-dit\":\"Aérodrome\",\"lat\":\t48.09\t,\"lng\":\t5.05\t,\"alt\":\t300\t},\n{\t\"commune\":\"Mathaux-Etape\",\"lieu-dit\":\"L'Etape\",\"lat\":\t48.35\t,\"lng\":\t4.47\t,\"alt\":\t143\t},\n{\t\"commune\":\"St-Dizier\",\"lieu-dit\":\"Robinson\",\"lat\":\t48.63\t,\"lng\":\t4.9\t,\"alt\":\t139\t},\n{\t\"commune\":\"Vatry\",\"lieu-dit\":\"Aéroport\",\"lat\":\t48.78\t,\"lng\":\t4.17\t,\"alt\":\t179\t},\n{\t\"commune\":\"Mourmelon-le-Grand\",\"lieu-dit\":\"Bourg\",\"lat\":\t49.11\t,\"lng\":\t4.36\t,\"alt\":\t115\t},\n{\t\"commune\":\"Troyes-Barberey\",\"lieu-dit\":\"Aérodrome\",\"lat\":\t48.3255\t,\"lng\":\t4.0117\t,\"alt\":\t119\t}\n]\nresult=[]\nfor station in stationsComiteChampagne :\n print (station[\"commune\"],station[\"lieu-dit\"],station[\"lat\"],station[\"lng\"],station[\"alt\"])\n heures_UTC_prévisions=[4,5,6]\n for heure in heures_UTC_prévisions:\n date_previ=next_hh_UTC(heure)[1]\n path=\"https://61okw3bbmd.execute-api.eu-west-1.amazonaws.com/dev\"\n path=path+\"?code_model=arome-france&code_param=TMP&code_type_niveau=2m\"\n path=path+\"&annee=\"+str(date_previ.year)+\"&mois=\"+str(date_previ.month)+\"&jour=\"+str(date_previ.day)+\"&heure=\"+str(date_previ.hour)+\"&minute=\"+str(date_previ.minute)\n path=path+\"&longi=\"+str(station[\"lng\"])+\"&lati=\"+str(station[\"lat\"])\n rep=requests.get(path)\n rep=json.loads(rep.text)\n print (date_previ,rep[\"ecart_distance(km)\"],rep[\"niveaux\"][0][\"value\"]-273.15)\n \nexit()\n\"\"\"\n res={}\n res[\"station\"]=station\n res[\"previsions\"]=[]\n previs= previsions (\"0025\",\"Tmin(h)\",station[\"lng\"],station[\"lat\"],niveau=2.0)\n for previ in previs[\"previsions\"]:\n res[\"previsions\"].append(\n {\"date\":previ[\"date\"],\"Tmini 2m\":previ[\"valeur\"]})\n print (\"date:\"+str(previ[\"date\"])+\" Tmini 2m:\"+str(previ[\"valeur\"]))\n result.append(res)\n#print (json.dumps(result, indent=4))\n\"\"\"\n ","sub_path":"stationsComiteChampagne_API.py","file_name":"stationsComiteChampagne_API.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344317271","text":"\nclass Validate:\n def __init__(self):\n self.open_list=['[','{','(']\n self.close_list = [']','}',')']\n self.str1 = None\n self.valid = False\n\n def checkstring(self,str1):\n self.str1 = str1\n stack = []\n for i in str1:\n if i in self.open_list:\n stack.append(i)\n elif i in self.close_list :\n pos = self.close_list.index(i)\n if len(stack)>0 and self.open_list[pos] == stack[-1] :\n stack.pop()\n else:\n return \"Invalid\"\n if len(stack)==0:\n return \"Valid\"\n\nobj = Validate()\nstr1 = input(\"Enter string : \")\nprint(str1,'-',obj.checkstring(str1))\n","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648341606","text":"import re\nimport math\nimport requests\n\nfrom .lists import List\nfrom .movies import Movie\n\n# this is the current max the api will return per page\nMAX_RESULTS = 50\n\nclass RottenTomatoesClient(object):\n def __init__(self, api_key):\n self.api_key = api_key\n\n self.server = 'api.rottentomatoes.com/api/public/v1.0/'\n\n def get_resource(self, url, params=None):\n if not re.match('http', url):\n url = \"http://%s%s.json\" % (self.server, url)\n\n request_params = {\n 'apikey': self.api_key,\n 'limit': MAX_RESULTS,\n 'page_limit': MAX_RESULTS,\n }\n\n if params:\n request_params = dict(request_params.items() + params.items())\n\n response = requests.get(url, params=request_params, allow_redirects=True)\n response.raise_for_status() # raise an error if we get one\n\n return response.json()\n\n def parse_results(self, results):\n ''' takes results from search or lists and puts it in a nice format.\n '''\n\n if 'movies' in results:\n # if we have movies, return them\n\n if 'total' in results:\n # find how many pages of results we'll have based on the total count and the amount per page\n pages = int(math.ceil(results['total']/float(MAX_RESULTS)))\n else:\n pages = 1\n\n return {\n 'pages': pages,\n 'movies': [Movie(movie_data, self) for movie_data in results['movies']]\n }\n else:\n # otherwise we probably have more lists\n final_dict = {}\n\n for key in results['links'].keys():\n final_dict[key] = List(results['links'][key], self)\n\n return final_dict\n\n def search(self, query, page=1):\n raw = self.get_resource('movies', params={\n 'q': query,\n 'page': page\n })\n\n return self.parse_results(raw)\n\n def search_by_imdb(self, imdb_id):\n result = self.get_resource('movie_alias', params={\n 'type': 'imdb',\n 'id': imdb_id,\n })\n\n if 'error' in result:\n return None\n else:\n return Movie(result, self)\n\n def lists(self, directory=None, page=1):\n base_list_url = 'lists'\n\n if directory:\n base_list_url = base_list_url + '/' + directory\n\n return self.parse_results(self.get_resource(base_list_url, params={'page': page}))\n","sub_path":"rottentomatoes/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227990696","text":"# Copyright (C) 2019-2020 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pyarrow as pa\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\n\nfrom pyspark.sql.types import *\n\ndef save_png_2D(hex_data, file_name):\n import binascii\n binary_string = binascii.unhexlify(str(hex_data))\n with open(file_name, 'wb') as png:\n png.write(binary_string)\n\ndef print_partitions(df):\n numPartitions = df.rdd.getNumPartitions()\n print(\"Total partitions: {}\".format(numPartitions))\n print(\"Partitioner: {}\".format(df.rdd.partitioner))\n df.explain()\n parts = df.rdd.glom().collect()\n i = 0\n j = 0\n for p in parts:\n print(\"Partition {}:\".format(i))\n for r in p:\n print(\"Row {}:{}\".format(j, r))\n j = j + 1\n i = i + 1\n\ndef pointmap_2D(df, vega):\n @pandas_udf(\"string\", PandasUDFType.GROUPED_AGG)\n def pointmap_wkt(point, conf=vega):\n arr_point = pa.array(point, type='string')\n from arctern import point_map_wkt\n png = point_map_wkt(arr_point, conf.encode('utf-8'))\n buffer = png.buffers()[1].hex()\n return buffer\n\n df = df.coalesce(1)\n hex_data = df.agg(pointmap_wkt(df['point'])).collect()[0][0]\n return hex_data\n\ndef heatmap_2D(df, vega):\n agg_schema = StructType([StructField('point', StringType(), True),\n StructField('w', IntegerType(), True)])\n\n @pandas_udf(agg_schema, PandasUDFType.MAP_ITER)\n def render_agg_UDF(batch_iter):\n for pdf in batch_iter:\n dd = pdf.groupby(['point'])\n dd = dd['w'].agg(['sum']).reset_index()\n dd.columns = ['point', 'w']\n yield dd\n\n @pandas_udf(\"string\", PandasUDFType.GROUPED_AGG)\n def heatmap_wkt(point, w, conf=vega):\n arr_point = pa.array(point, type='string')\n arr_c = pa.array(w, type='int64')\n from arctern import heat_map_wkt\n png = heat_map_wkt(arr_point, arr_c, conf.encode('utf-8'))\n buffer = png.buffers()[1].hex()\n return buffer\n\n first_agg_df = df.mapInPandas(render_agg_UDF).coalesce(1)\n final_agg_df = first_agg_df.mapInPandas(render_agg_UDF).coalesce(1)\n hex_data = final_agg_df.agg(heatmap_wkt(final_agg_df['point'], final_agg_df['w'])).collect()[0][0]\n return hex_data\n\ndef choroplethmap_2D(df, vega):\n agg_schema = StructType([StructField('wkt', StringType(), True),\n StructField('w', IntegerType(), True)])\n\n @pandas_udf(agg_schema, PandasUDFType.MAP_ITER)\n def render_agg_UDF(batch_iter):\n for pdf in batch_iter:\n dd = pdf.groupby(['wkt'])\n dd = dd['w'].agg(['sum']).reset_index()\n dd.columns = ['wkt', 'w']\n yield dd\n\n @pandas_udf(\"string\", PandasUDFType.GROUPED_AGG)\n def choroplethmap_wkt(wkt, w, conf=vega):\n arr_wkt = pa.array(wkt, type='string')\n arr_c = pa.array(w, type='int64')\n from arctern import choropleth_map\n png = choropleth_map(arr_wkt, arr_c, conf.encode('utf-8'))\n buffer = png.buffers()[1].hex()\n return buffer\n\n first_agg_df = df.mapInPandas(render_agg_UDF).coalesce(1)\n final_agg_df = first_agg_df.mapInPandas(render_agg_UDF).coalesce(1)\n hex_data = final_agg_df.agg(choroplethmap_wkt(final_agg_df['wkt'], final_agg_df['w'])).collect()[0][0]\n return hex_data\n","sub_path":"spark/pyspark/arctern_pyspark/render_func.py","file_name":"render_func.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"415840686","text":"import atexit\nimport os\nimport subprocess\n\nfrom django.contrib.staticfiles.management.commands.runserver import (\n Command as StaticfilesRunserverCommand\n)\n\n\nclass Command(StaticfilesRunserverCommand):\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n\n parser.add_argument(\n '--nowebpack',\n action='store_false',\n dest='use_webpack',\n default=True,\n help='Tells Django to NOT start webpack.'\n )\n\n def run(self, **options):\n if options.get('use_webpack'):\n self.set_webpack_environment_variables()\n self.start_webpack(**options)\n\n super().run(**options)\n\n def set_webpack_environment_variables(self):\n # Only set environment variables on the outer process\n outer_process = 'RUN_MAIN' not in os.environ\n\n if outer_process:\n os.environ['DJANGO_IP'] = self.addr\n os.environ['DJANGO_PORT'] = self.port\n\n # Now move Django to another port\n os.environ['WEBPACK_PORT'] = str(int(self.port) + 1)\n\n def start_webpack(self, **options):\n inner_process = \"RUN_MAIN\" in os.environ\n use_reloader = options.get(\"use_reloader\")\n\n # Don\"t start webpack on the inner process with autoreload\n if inner_process and use_reloader:\n return\n\n webpack_args = []\n\n # Only be noisy if verbosity >= 1\n if options.get(\"verbosity\") < 1:\n webpack_args.append(\"--display=errors-only\")\n\n if webpack_args:\n webpack_args = [\"--\"] + webpack_args\n\n self.stdout.write(\">>> Starting webpack\")\n self.webpack_process = subprocess.Popen(\n [\"npm\", \"start\"] + webpack_args,\n shell=False,\n stdin=subprocess.PIPE,\n stdout=self.stdout._out,\n stderr=self.stderr._out,\n )\n self.stdout.write(\">>> Webpack process on pid {}\".format(self.webpack_process.pid))\n\n def kill_webpack_process():\n self.stdout.write(\">>> Closing webpack process\")\n self.webpack_process.terminate()\n self.webpack_process.wait()\n\n atexit.register(kill_webpack_process)\n","sub_path":"apps/core/management/commands/runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"608858175","text":"# -*- coding: utf-8 -*-\n\nimport mysql.connector\nfrom crm.models import Cert\n\ntemp_word = []\ntext = open('321.txt', 'r')\nfor line_text in text:\n t = line_text.split(',')\n temp_word.append(t)\n\nfor tt in temp_word:\n cert = Cert()\n cert.organization_id=1\n cert.cert_type_id=3\n cert.cn = tt[2]\n cert.fio_adm_skzi_install_id = 3\n cert.from_skzi_id=1\n cert.name_skzi_id=2\n cert.status_id=1\n cert.before = tt[1]\n cert.fio = tt[0]\n cert.save()\n\ntext.close()\n#cn = 'Кусакина Юлия Сергеевна'\n\n","sub_path":"import_db.py","file_name":"import_db.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625619947","text":"import numpy as np\nimport scipy\nfrom scipy import ndimage, sqrt, stats, misc, signal\n\n# mask for weird regions of the detector where I don't care about the background subtraction\nmask_weird = np.ones((100,2048))\nmask_weird[0:4,:] = np.nan # edge\nmask_weird[-4:,:] = np.nan # edge\nmask_weird[:,0:4] = np.nan # edge\nmask_weird[:,1020:1350] = np.nan # bullet hole\nmask_weird[94:,1402:1476] = np.nan # scratch\nmask_weird[:,1500:] = np.nan # unreliable bad pixel mask\n\n# find star and return coordinates [y,x]\ndef find_airy_psf(image):\n\n # replace NaNs with zeros to get the Gaussian filter to work\n nanMask = np.where(np.isnan(image) == True)\n image[nanMask] = 0\n \n # Gaussian filter\n imageG = ndimage.filters.gaussian_filter(image, 6) # further remove effect of bad pixels (somewhat redundant?)\n loc = np.argwhere(imageG==np.max(imageG))\n cx = loc[0,1]\n cy = loc[0,0]\n\n return [cy, cx]","sub_path":"modules/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"613342209","text":"\"\"\"\nLength-Area relating functions that describe the shape of the\nriver bed, ensure functions follow the signature:\n(area: Union[np.ndarray, float], *args, **kwargs) ->\n length: Union[np.ndarray, float]\n\nSee preexisting functions for reference.\n\"\"\"\n\nimport numpy as np\n\nfrom abc import ABC, abstractmethod\n\n\nclass RiverBed(ABC):\n def __init__(self, *args, **kwargs):\n pass\n\n def wetted_length(self, *args, **kwargs):\n pass\n\n\nclass ParabolicBed(RiverBed):\n def __init__(self, river_slope):\n self.riverbank_slope = river_slope\n\n def wetted_length(self, water_area):\n river_depth = np.power(\n np.multiply(\n (9. / 16.) * water_area,\n self.riverbank_slope\n ),\n (1. / 3.)\n )\n\n return np.add(\n np.sqrt(\n np.add(\n 4. * np.square(river_depth),\n np.divide(river_depth, self.riverbank_slope)\n )\n ),\n np.divide(\n np.arcsinh(\n 2. * np.sqrt(\n np.multiply(river_depth, self.riverbank_slope))\n ),\n 2. * self.riverbank_slope\n )\n )\n\n\nclass RectangleBed(RiverBed):\n def __init__(self, river_width):\n self.river_width = river_width\n\n def wetted_length(self, area):\n return np.add(\n self.river_width,\n np.divide(\n 2 * area,\n self.river_width\n )\n )\n","sub_path":"Project 2 - Floods/flooding-small-villages-master/flooding/river_shapes.py","file_name":"river_shapes.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"146238764","text":"\r\nshopping_list = []\r\n\r\ndef show_help():\r\n print(\"Items to pick at the stores\")\r\n print(\"\"\"\r\nenter HELP to show help\r\nenter DONE to stop entering items\r\nenter SHOW to view list items\r\n\"\"\"\r\n )\r\n\r\ndef show_list():\r\n print(\"here is your list\")\r\n for item in shopping_list:\r\n print(item)\r\n\r\n\r\ndef add_to_list(new_item):\r\n shopping_list.append(new_item)\r\n print(\"Added {}. List now has {} items\".format(new_item, len(shopping_list)))\r\n\r\nshow_help()\r\n\r\nwhile True:\r\n new_item = input(\"> \")\r\n\r\n if new_item.upper() == 'DONE':\r\n #show_list()\r\n break\r\n elif new_item.upper() == 'HELP':\r\n show_help()\r\n continue\r\n elif new_item.upper() == \"SHOW\":\r\n show_list()\r\n continue\r\n add_to_list(new_item)\r\n\r\nshow_list()\r\n \r\n","sub_path":"shop3.py","file_name":"shop3.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272276243","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 20 18:40:05 2019\n\n@author: yoelr\n\"\"\"\nfrom flexsolve import njitable\nfrom ..utils import thermo_user, Cache\nfrom scipy.optimize import differential_evolution\nfrom .._thermal_condition import ThermalCondition\nfrom .vle import VLE\nimport numpy as np\n\n__all__ = ('LLE', 'LLECache')\n\ndef liquid_activities(mol_l, T, f_gamma):\n total_mol_l = mol_l.sum()\n if total_mol_l:\n x = mol_l / total_mol_l\n gamma = f_gamma(x, T)\n xgamma = x * gamma\n else:\n xgamma = np.ones_like(mol_l)\n return xgamma\n\n@njitable\ndef gibbs_free_energy_of_liquid(mol_l, xgamma):\n xgamma[xgamma <= 0] = 1\n g_mix = (mol_l * np.log(xgamma)).sum()\n return g_mix\n\ndef lle_objective_function(mol_l, mol, T, f_gamma):\n mol_L = mol - mol_l\n xgamma_l = liquid_activities(mol_l, T, f_gamma)\n xgamma_L = liquid_activities(mol_L, T, f_gamma)\n g_mix_l = gibbs_free_energy_of_liquid(mol_l, xgamma_l)\n g_mix_L = gibbs_free_energy_of_liquid(mol_L, xgamma_L)\n g_mix = g_mix_l + g_mix_L\n return g_mix\n\ndef solve_lle_liquid_mol(mol, T, f_gamma, **differential_evolution_options):\n args = (mol, T, f_gamma)\n bounds = np.zeros([mol.size, 2])\n bounds[:, 1] = mol\n result = differential_evolution(lle_objective_function, bounds, args,\n **differential_evolution_options)\n return result.x\n\n@thermo_user\nclass LLE:\n \"\"\"\n Create a LLE object that performs liquid-liquid equilibrium when called.\n Differential evolution is used to find the solution that globally minimizes\n the gibb's free energy of both phases.\n \n Parameters\n ----------\n imol : MaterialIndexer\n Chemical phase data is stored here.\n thermal_condition=None : ThermalCondition, optional\n The temperature and pressure used in calculations are stored here.\n thermo=None : Thermo, optional\n Themodynamic property package for equilibrium calculations.\n Defaults to `thermosteam.settings.get_thermo()`.\n \n Examples\n --------\n >>> from thermosteam import indexer, equilibrium, settings\n >>> settings.set_thermo(['Water', 'Ethanol', 'Octane', 'Hexane'])\n >>> imol = indexer.MolarFlowIndexer(\n ... l=[('Water', 304), ('Ethanol', 30)],\n ... L=[('Octane', 40), ('Hexane', 1)])\n >>> lle = equilibrium.LLE(imol)\n >>> lle(T=360)\n >>> lle\n LLE(imol=MolarFlowIndexer(\n L=[('Water', 2.671), ('Ethanol', 2.284), ('Octane', 39.92), ('Hexane', 0.9885)],\n l=[('Water', 301.3), ('Ethanol', 27.72), ('Octane', 0.07885), ('Hexane', 0.01154)]),\n thermal_condition=ThermalCondition(T=360.00, P=101325))\n \n \"\"\"\n __slots__ = ('_thermo', # [float] Thermo object for estimating mixture properties.\n '_imol', # [MaterialIndexer] Stores vapor and liquid molar data.\n '_thermal_condition', # [ThermalCondition] T and P values are stored here.\n)\n differential_evolution_options = {'seed': 0,\n 'popsize': 12,\n 'tol': 0.002}\n \n def __init__(self, imol, thermal_condition=None, thermo=None):\n self._load_thermo(thermo)\n self._thermal_condition = thermal_condition or ThermalCondition(298.15, 101325.)\n self._imol = imol\n \n def __call__(self, T, P=None):\n \"\"\"\n Perform liquid-liquid equilibrium.\n\n Parameters\n ----------\n T : float\n Operating temperature [K].\n P : float, optional\n Operating pressure [Pa].\n \n \"\"\"\n thermal_condition = self._thermal_condition\n thermal_condition.T = T\n if P: thermal_condition.P = P\n imol = self._imol\n mol, index, lle_chemicals = self.get_liquid_mol_data()\n total_mol = mol.sum()\n if total_mol:\n gamma = self.thermo.Gamma(lle_chemicals)\n mol_l = solve_lle_liquid_mol(mol, T, gamma,\n **self.differential_evolution_options)\n imol['l'][index] = mol_l\n imol['L'][index] = mol - mol_l\n \n def get_liquid_mol_data(self):\n # Get flow rates\n imol = self._imol\n imol['l'] = mol = imol['l'] + imol['L']\n imol['L'] = 0\n index = self.chemicals.get_lle_indices(mol > 0)\n mol = mol[index]\n chemicals = self.chemicals.tuple\n lle_chemicals = [chemicals[i] for i in index]\n return mol, index, lle_chemicals\n \n imol = VLE.imol\n thermal_condition = VLE.thermal_condition\n __format__ = VLE.__format__\n __repr__ = VLE.__repr__\n\nclass LLECache(Cache): load = LLE\ndel Cache ","sub_path":"thermosteam/equilibrium/lle.py","file_name":"lle.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216021556","text":"import vrep\nimport sys\nimport time\nimport numpy as np\nimport cv2\n\nclass Simulation:\n def __init__(self):\n vrep.simxFinish(-1)\n self.clientId = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)\n self.checkConnection()\n\n def startSimulation(self):\n print(\"Press \\\"Start simulation in V-REP\\\"\")\n\n while vrep.simxGetConnectionId(self.clientId) != -1:\n res, visionSensor = vrep.simxGetObjectHandle(self.clientId, 'Vision_sensor', vrep.simx_opmode_oneshot_wait)\n err, resolution, image = vrep.simxGetVisionSensorImage(self.clientId, visionSensor, 0, vrep.simx_opmode_streaming)\n\n if err == vrep.simx_return_ok:\n img = np.array(image, dtype=np.uint8)\n img.resize([resolution[1], resolution[0], 3])\n img = cv2.flip(img, 0)\n\n hsvLowerBorderColor = np.array([82, 25, 0])\n hsvUpperBorderColor = np.array([161, 255, 255])\n\n hsvImg = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)# меняем цветовую модель с BGR на HSV\n thresh = cv2.inRange(hsvImg, hsvLowerBorderColor, hsvUpperBorderColor)# применяем цветовой фильтр\n contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # перебираем все найденные контуры в цикле\n for con in contours:\n rect = cv2.minAreaRect(con)# пытаемся вписать прямоуг��льник\n box = cv2.boxPoints(rect)# поиск четырех вершин прямоугольника\n box = np.int0(box)# округление координат\n\n area = int(rect[1][0] * rect[1][1])\n\n if area > 120:\n peri = cv2.arcLength(con, True)\n approx = cv2.approxPolyDP(con, 0.1 * peri, True)\n\n if len(approx) == 4 or len(approx) == 5:\n (x, y, w, h) = cv2.boundingRect(approx)\n cv2.drawContours(hsvImg, [box], 0, (255, 255, 250), 2)# рисуем прямоугольник\n\n bgrImg = cv2.cvtColor(hsvImg, cv2.COLOR_HSV2BGR)\n cv2.imshow('vision sensor', bgrImg)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\t\n vrep.simxFinish(self.clientId)\n\t\n def checkConnection(self):\n if self.clientId != -1:\n print(\"Connected to remote server\")\n else:\n print('Connection not successful')\n sys.exit('Could not connect')\n\n\t\t\t\nif __name__ == '__main__':\n s = Simulation()\n s.startSimulation()\n cv2.destroyAllWindows()\n\n#Аппроксимация контуров основана на предположении, что кривая может быть аппроксимирована серией коротких отрезков. Это приводит к результирующей аппроксимированной кривой, которая состоит из подмножества точек, которые были определены исходным cruve.","sub_path":"бакалавриат/3.2/Программирование робототехнических комплексов/Программы/Lab3/OpenCV.py","file_name":"OpenCV.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469615629","text":"## 1. Introduction ##\n\nfrom csv import reader\n\nopen_file = open('potus_visitors_2015.csv')\nparser = reader(open_file)\npotus = list(parser)\n\npotus = potus[1:]\n\n\n\n\n## 3. The Datetime Module ##\n\nimport datetime as dt\n\n## 4. The Datetime Class ##\n\nimport datetime as dt\n\nibm_founded = dt.datetime(1911,6,16)\n\nman_on_moon = dt.datetime(1969,7,20,20,17,0)\n\n\n\n## 5. Using Strptime to Parse Strings as Dates ##\n\n# The `potus` list of lists is available from\n# the earlier screen where we created it\n\n\n\nfor row in potus:\n appt_start_date = row[2]\n appt_start_date_dt = dt.datetime.strptime(appt_start_date,\"%m/%d/%y %H:%M\")\n row[2] = appt_start_date_dt\n \n \n\n## 6. Using Strftime to format dates ##\n\nvisitors_per_month = dict()\n\nfor row in potus:\n dt_object = row[2]\n key = dt_object.strftime(\"%B, %Y\")\n if key not in visitors_per_month:\n visitors_per_month[key]=1\n else:\n visitors_per_month[key]+=1\n \nprint(visitors_per_month)\n\n\n## 7. The Time Class ##\n\nappt_times = list()\n\nfor row in potus:\n dt_object = row[2]\n dt_time = dt_object.time()\n appt_times.append(dt_time)\n \n \n\n\n\n## 8. Comparing time objects ##\n\nmin_time = min(appt_times)\nmax_time = max(appt_times)\n\n## 9. Calculations with Dates and Times ##\n\ndt_1 = dt.datetime(1981, 1, 31)\ndt_2 = dt.datetime(1984, 6, 28)\ndt_3 = dt.datetime(2016, 5, 24)\ndt_4 = dt.datetime(2001, 1, 1, 8, 24, 13)\n\nanswer_1 = dt_2 - dt_1\nanswer_2 = dt_3 + dt.timedelta(days=56)\nanswer_3 = dt_4 - dt.timedelta(seconds=3600)\n\n\n\n## 10. Summarizing Appointment Lengths ##\n\nfor row in potus:\n end_date = row[3]\n end_date = dt.datetime.strptime(end_date, \"%m/%d/%y %H:%M\")\n row[3] = end_date\n \nappt_lengths = dict()\n\nfor row in potus:\n appt_start_date = row[2]\n appt_end_date = row[3]\n length = appt_end_date - appt_start_date\n if length in appt_lengths:\n appt_lengths[length]+=1\n else:\n appt_lengths[length]=1\n \nmin_length = min(appt_lengths)\nmax_length = max(appt_lengths)\n\n","sub_path":"DataQuest - Data Analyst/python-for-data-science-intermediate/Working with Dates and Times in Python-353.py","file_name":"Working with Dates and Times in Python-353.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"582102923","text":"#ler texto e criar strings\nwith open('churras.txt', \"r\") as file:\n y=file.readlines()\n#juntar listas em unica string\ns=\",\"\nb=s.join(y)\n#separar palavras pela virgula\nw=b.split(\",\")\n#loop\ni=0\nlista=[]\nwhile i\")\ndef direction(value):\n\tlogging.debug('direction: %s' % value)\n\trosDriving.changeDirection(value)\n\treturn render_template('main.html', **templateData)\n\n@app.route(\"/speed/\")\ndef speed(value):\n\tlogging.debug('speed: %s' % value)\n\trosDriving.changeDirection(value)\n\treturn render_template('main.html', **templateData)\n\n\nif __name__ == \"__main__\":\n\tlogging.basicConfig(level=logging.DEBUG)\n\trosDriving.setup()\n\tapp.run(host='0.0.0.0', port=21000, debug=True)\n","sub_path":"web/drivingController.py","file_name":"drivingController.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"280688603","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def flatten(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n def _flatten(root):\n if not root:\n return None\n l_end = _flatten(root.left)\n r_end = _flatten(root.right)\n if l_end:\n l_end.right = root.right\n root.right = root.left\n root.left = None\n return r_end or l_end or root\n\n _flatten(root)\n\n\nclass Solution2(object):\n \"\"\"Preorder traversal\n \"\"\"\n def flatten(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n stk = [root] if root else []\n while stk:\n cur = stk.pop()\n if cur.right:\n stk += cur.right,\n if cur.left:\n stk += cur.left,\n cur.left, cur.right = None, stk[-1] if stk else None\n","sub_path":"Flatten Binary Tree to Linked List.py","file_name":"Flatten Binary Tree to Linked List.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"56069421","text":"from django.contrib.sites.models import get_current_site\nfrom django.conf import settings\nfrom foundry.models import PageImpression, UserAgent\nfrom preferences import preferences\n\ndef foundry(request):\n \n if not '/admin/' in request.path_info:\n \n user_agent, _ = UserAgent.objects.get_or_create(user_agent=request.META['HTTP_USER_AGENT'])\n user_agent.hits += 1\n user_agent.save()\n \n if request.user.is_authenticated():\n PageImpression.objects.create(path=request.path_info,\n user_agent=request.META['HTTP_USER_AGENT'],\n user=request.user)\n else:\n PageImpression.objects.create(path=request.path_info,\n user_agent=request.META['HTTP_USER_AGENT'])\n \n return {\n 'FOUNDRY': settings.FOUNDRY,\n 'LAYER_PATH': settings.FOUNDRY['layers'][0] + '/',\n 'CURRENT_SITE': get_current_site(request),\n 'ANALYTICS_TAGS': preferences.GeneralPreferences.analytics_tags,\n 'DEFAULT_META_DESCRIPTION': preferences.GeneralPreferences.meta_description,\n }\n","sub_path":"foundry/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"306059318","text":"from gensim.models import word2vec\nimport logging\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\nsentences = word2vec.Text8Corpus('q_07-14.txt')\n\nmodel = word2vec.Word2Vec(sentences, size=200, min_count=20, window=15)\n\nmodel.save(\"w2v.model\")\n\nif __name__ == '__main__':\n print (\"Finish!!!\")\n","sub_path":"m1/learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587466215","text":"import logging\nimport sys\nimport time\nfrom pathlib import Path\nfrom typing import Callable\n\nimport aiohttp\nimport discord\nimport motor.motor_asyncio as aiomotor\nfrom box import Box\nfrom discord.abc import PrivateChannel\nfrom discord.ext import commands\nfrom discord.ext.commands.bot import when_mentioned\nfrom discord.ext.commands.bot import when_mentioned_or\n\nfrom milton import ROOT\nfrom milton.core.changelog_parser import Changelog\nfrom milton.core.changelog_parser import make_changelog\nfrom milton.core.changelog_parser import Version\nfrom milton.core.config import CONFIG\n\nlog = logging.getLogger(__name__)\n\nINTRO = r\"\"\"\n__/\\\\\\\\____________/\\\\\\\\_______/\\\\\\___________________/\\\\\\\\\\\\\\\\\\____________\n__\\/\\\\\\\\\\\\________/\\\\\\\\\\\\______\\/\\\\\\_________________/\\\\\\\\\\\\\\\\\\\\\\\\\\_________\n___\\/\\\\\\//\\\\\\____/\\\\\\//\\\\\\______\\/\\\\\\________________/\\\\\\/////////\\\\\\_______\n____\\/\\\\\\\\///\\\\\\/\\\\\\/_\\/\\\\\\______\\/\\\\\\_______________\\/\\\\\\_______\\/\\\\\\______\n_____\\/\\\\\\__\\///\\\\\\/___\\/\\\\\\______\\/\\\\\\_______________\\/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\_____\n______\\/\\\\\\____\\///_____\\/\\\\\\______\\/\\\\\\_______________\\/\\\\\\/////////\\\\\\____\n_______\\/\\\\\\_____________\\/\\\\\\______\\/\\\\\\_______________\\/\\\\\\_______\\/\\\\\\___\n________\\/\\\\\\_____________\\/\\\\\\______\\/\\\\\\\\\\\\\\\\\\\\\\\\\\\\____\\/\\\\\\_______\\/\\\\\\__\n_________\\///______________\\///_______\\//////////////_____\\///________\\///__\n\nWelcome to the Milton Library Assistant!\n\"\"\"\n\n\nclass Milton(commands.Bot):\n \"\"\"There is only me, and you, and an eternity of doubt.\n\n This is the Bot instance of Milton. It inherits from :class:`commands.Bot`\n so anything passed there can be passed here.\n\n Args:\n config: The parsed configuration for the bot.\n\n Attributes:\n started_on: The ISO timestamp when the bot instance was initiated.\n owner_id: The id snowflake for the owner of the bot.\n DbClient: The client to use to call the MongoDB instance. This\n shouldn't be used, in most cases.\n DB: The database related to Milton as set in the configs.\n This should be used to access collections inside the database.\n http_session: An aiohttp session that can be used to make HTTP requests.\n changelog: The changelog object of the bot.\n version: The version of the bot.\n \"\"\"\n\n def __init__(self, config: Box, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.config: Box = config # Bundle the config inside the bot itself.\n self.started_on: float = time.time()\n self.owner_id: int = self.config.bot.owner_id\n self.http_session: aiohttp.ClientSession = aiohttp.ClientSession()\n\n # move from here to the CHANGELOG file\n path = ROOT.parent\n path /= \"CHANGELOG.md\"\n self.changelog: Changelog = make_changelog(path)\n self.version: Version = self.changelog.latest_version\n\n async def on_ready(self):\n logon_str = f\"Logged in as {self.user}\"\n print(logon_str)\n log.info(logon_str)\n\n def add_cog(self, cog: commands.Cog):\n super().add_cog(cog)\n log.info(f\"Added cog {cog.qualified_name}\")\n\n def run(self, *args, **kwargs):\n \"\"\"Runs the bot with the token from the config.\n\n Anything passed to :class:`commands.Bot` can be passed here too.\n \"\"\"\n if self.config.bot.token is None:\n raise ValueError(\"Bot cannot start without a token\")\n log.info(\"Milton is starting.\")\n print(INTRO)\n\n super().run(self.config.bot.token, *args, **kwargs)\n\n async def close(self):\n \"\"\"Gently closes down the bot instance.\n\n Also handles closing down the various async processes attached to the\n bot that need to be explicitly closed.\n \"\"\"\n # This may get called twice due to some internal thing in discord.py.\n # I cannot do much other than sit and watch it doing twice.\n log.info(\"Closing AIOHTTP session...\")\n await self.http_session.close()\n\n log.info(\"Closing bot loop...\")\n await super().close()\n\n async def on_error(self, event_method, *args, **kwargs):\n \"\"\"Handle unexpected errors raised at the bot level.\n\n Every unhandled exception not in a cog ends up here.\n \"\"\"\n # Skip the prompt line\n if \"CommandInterface\" in self.cogs:\n print(\"\")\n\n info = sys.exc_info()\n log.exception(\"Ignoring exception at the bot level\", exc_info=info)\n\n # Re-print the handle for the CLI cog\n if \"CommandInterface\" in self.cogs:\n print(\">> \", end=\"\")\n\n\nasync def _get_prefix(bot: Milton, message: discord.Message) -> Callable:\n \"\"\"Returns the function to correctly get the prefix based on context.\n\n Attributes:\n bot: The bot to get the prefox for\n message: The message that is triggering the prefix retrieving.\n\n Returns:\n A callable function that returns the prefix.\n \"\"\"\n if isinstance(message.channel, PrivateChannel):\n return when_mentioned(bot, message)\n return when_mentioned_or(CONFIG.prefixes.guild)(bot, message)\n","sub_path":"milton/core/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115469331","text":"def solution(A):\n left_sum = 0\n right_sum = sum(A)\n operation = 0\n for i in A:\n operation += 1\n i = int(i)\n right_sum -= i\n left_sum += i\n if left_sum == right_sum:\n return operation\n return -1\n\nA = [1, 6, 1, 6]\nprint(solution(A))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"309808060","text":"import logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger()\n\n\ndef get_input(filename='day.input'):\n with open(filename, 'r') as file:\n return list(map(int, file.read().split(',')))\n\n\ndef get_raw_input(filename='day.input'):\n with open(filename, 'r') as file:\n return file.read()\n\n\ndef get_list(length=5):\n return list(range(length))\n\n\ndef input_as_bytes_to_lengths(input):\n return [ord(c) for c in input] + [17, 31, 73, 47, 23]\n\n\ndef apply_instructions(circle, instructions, rounds=1):\n index = 0\n skip_size = 0\n _round = 0\n while _round < rounds:\n for length in instructions:\n index = index % len(circle)\n if length <= len(circle):\n double_circle = circle + circle\n sub_list = double_circle[index:index + length]\n sub_list.reverse()\n for i, v in enumerate(sub_list):\n circle[(index + i) % len(circle)] = v\n index += length + skip_size\n skip_size += 1\n _round += 1\n return circle\n\n\ndef to_dense_hash(circle):\n blocks = []\n for i in range(16):\n blocks.append(circle[i * 16:(i + 1) * 16])\n hash = ''\n for block in blocks:\n hash += block_to_hex(block)\n return hash\n\n\ndef block_to_hex(block):\n _val = block[0] ^ block[1]\n for j in range(14):\n _val = _val ^ block[j + 2]\n _hex = format(_val, 'x')\n if len(_hex) == 1:\n _hex = '0' + _hex\n return _hex\n\n\ndef validate(circle):\n return circle[0] * circle[1]\n\n\ndef input_to_hex(input):\n return to_dense_hash(apply_instructions(\n get_list(256), input_as_bytes_to_lengths(input), 64))\n\n\ndef test():\n test_input = get_input('test.input')\n assert validate(apply_instructions(get_list(), test_input)) == 12\n assert (input_as_bytes_to_lengths('1,2,3') ==\n [49, 44, 50, 44, 51, 17, 31, 73, 47, 23])\n assert input_to_hex('') == 'a2582a3a0e66e6e86e3812dcb672a272'\n input_to_hex('AoC 2017') == '33efeb34ea91902bb2f59c9920caa6cd'\n input_to_hex('1,2,3') == '3efbe78a8d82f29979031a4aa0b16a9d'\n input_to_hex('1,2,4') == '63960835bcdc130f0b66d7ff4f6a5a8e'\n\n logger.info('Tests passed')\n\n\ndef main():\n logger.info('Result 1: %s' % validate(\n apply_instructions(get_list(256), get_input())))\n logger.info('Result 2: %s' % input_to_hex(get_raw_input()))\n\n\nif __name__ == '__main__':\n test()\n main()\n","sub_path":"2017/day14/knothash.py","file_name":"knothash.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"214564384","text":"from config_PASCAL_VC import *\nfrom scipy.spatial.distance import cdist\nfrom cls_factorizable_funcs import *\n\n\ndef eval_VCclassifier(category):\n magic_thh=0.45\n K = 4\n total_models = len(all_categories)\n\n print('############################')\n print('class : {}'.format(category))\n\n # load test feat\n test_feat = os.path.join(Feat['cache_dir'], 'feat_{}_test_adv3.pickle'.format(category))\n with open(test_feat,'rb') as fh:\n layer_feature = pickle.load(fh)\n\n N = len(layer_feature)\n print('Total number of test samples: {}'.format(N))\n\n # convert to 0-1 VC encoding\n\n with open(Dict['Dictionary'], 'rb') as fh:\n _, centers, _ = pickle.load(fh)\n\n r_set = [None for nn in range(N)]\n for nn in range(N):\n iheight,iwidth = layer_feature[nn].shape[0:2]\n lff = layer_feature[nn].reshape(-1, featDim)\n lff_norm = lff/np.sqrt(np.sum(lff**2, 1)).reshape(-1,1)\n r_set[nn] = cdist(lff_norm, centers, 'cosine').reshape(iheight,iwidth,-1)\n\n layer_feature_b = [None for nn in range(N)]\n for nn in range(N):\n layer_feature_b[nn] = (r_set[nn] 0:\n node = graphQueue.popleft()\n adjNodes = graph[node]\n remainingNodes = set(adjNodes).difference(set(visitedList))\n if len(remainingNodes) > 0:\n for ele in remainingNodes:\n visitedList.append(ele)\n graphQueue.append(ele)\n return visitedList","sub_path":"basic/breadthFirstSearchGraph.py","file_name":"breadthFirstSearchGraph.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98945331","text":"\"\"\"\nActs as the entry point.\nMain program that calls other programs\n\"\"\"\n\nimport os\n\nimport numpy as np\nfrom LoadDataset import load_CIFAR_Dataset, getCIFAR_as_32Pixels_Image\nimport h5py\nimport argparse\nfrom Features import Features\nfrom softmaxRegression import execute_softmax\nfrom knn_Implement import *\nfrom svm_Implement import *\nfrom ZCAWhitening import zca\n\n\"\"\"\nGLOBAL CONSTANTS\n\"\"\"\nTRAIN_DATASET_ID = 'X_train'\nTRAIN_LABELS_ID = 'y_train'\nTEST_DATASET_ID = 'X_test'\nTEST_LABELS_ID = 'y_test'\nPATH_TO_DATA_DIR = \"../data/\"\nPATH_TO_TRAIN_FILE = PATH_TO_DATA_DIR + 'train.h5'\nPATH_TO_TEST_FILE = PATH_TO_DATA_DIR + 'test.h5'\nDATA_FOLDER_PATH = PATH_TO_DATA_DIR + 'cifar-10-batches-py'\nALGORITHM_ARGS = 'algo' # Values are hardcoded in argsparser\nKNN_ARGS = \"knn\"\nSVM_ARGS = \"svm\"\nZCA_ARGS = \"zca\"\nHOG_ARGS = \"hog\"\nSOFTMAX_ARGS = \"softmax\"\nHSV_ARGS = \"hsv\"\nCNN_ARGS = \"cnn\"\nDEFAULT_CROSS_VALIDATION_FOLDS = 5\n\n\ndef getDataset(load_raw):\n \"\"\"\n Gets the dataset from the H5 files or from the disk based on the args\n :param load_raw:\n :return: Train and Test data\n \"\"\"\n if load_raw:\n X_train, y_train, X_test, y_test = load_CIFAR_Dataset(DATA_FOLDER_PATH)\n\n with h5py.File(PATH_TO_TRAIN_FILE, 'w') as hf:\n hf.create_dataset(TRAIN_DATASET_ID, data=X_train)\n hf.create_dataset(TRAIN_LABELS_ID, data=y_train)\n\n with h5py.File(PATH_TO_TEST_FILE, 'w') as hf:\n hf.create_dataset(TEST_DATASET_ID, data=X_test)\n hf.create_dataset(TEST_LABELS_ID, data=y_test)\n\n else:\n print(\"Loading {} file\".format(PATH_TO_TRAIN_FILE))\n with h5py.File(PATH_TO_TRAIN_FILE, 'r') as hf:\n print('List of arrays in this file: \\n', hf.keys())\n data = hf.get(TRAIN_DATASET_ID)\n X_train = np.array(data)\n data = hf.get(TRAIN_LABELS_ID)\n y_train = np.array(data)\n\n print(\"Loading {} file\".format(PATH_TO_TEST_FILE))\n with h5py.File(PATH_TO_TEST_FILE, 'r') as hf:\n print('List of arrays in this file: \\n', hf.keys())\n data = hf.get(TEST_DATASET_ID)\n X_test = np.array(data)\n data = hf.get(TEST_LABELS_ID)\n y_test = np.array(data)\n print(\"{}\\t{}\\t{}\\t{}\\n\".format(X_train.shape, y_train.shape, X_test.shape, y_test.shape))\n return X_train, y_train, X_test, y_test\n\n\n\ndef getFeatureFunctions(args):\n \"\"\"\n Based on the user's choice pass the appropriate functions\n For more details look into features class\n :param args:\n :return:\n \"\"\"\n functionsArray = Features.getSupportedFunctions()\n functionsList = sub1FromList(args.features)\n featureFunctions = functionsArray[functionsList]\n return Features(featureFunctions)\n\n\ndef sub1FromList(list):\n return [x - 1 for x in list]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='details',\n usage='use \"%(prog)s --help\" for more information',\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"-l\", \"--loadCIFAR\", help=\"loads the data in ../data folder\",\n action=\"store_true\")\n parser.add_argument(ALGORITHM_ARGS, help=\"Enter the algorithm to run(in lowercase)\",\n choices=[KNN_ARGS, SVM_ARGS, SOFTMAX_ARGS, ZCA_ARGS, CNN_ARGS])\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-f\", \"--features\",\n help=\"Enter the feature selection Algorithm(s) Index of your choice\\n\"\n \"1.HOG\\n\"\n \"2.Histogram\\n\",\n nargs='+',\n type=int,\n choices=[1, 2])\n group.add_argument(\"-z\", \"--zca\", help=\"ZCA Whitening\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.algo == KNN_ARGS:\n # TO-DO When KNN is implemented, move this into their preprocessing step\n # TO-DO Feature Extraction takes time, save them into h5 file and load them directly\n X_train, y_train, X_test, y_test = getDataset(args.loadCIFAR)\n if args.zca:\n print(\"ZCA Pre Processing Started\")\n X_train = zca(X_train)\n X_test = zca(X_test)\n print(\"ZCA Pre Processing Completed\")\n elif args.features:\n # Call Feature Extraction techiniques\n X_train, y_train, X_test, y_test = getDataset(args.loadCIFAR)\n X_train = getCIFAR_as_32Pixels_Image(X_train)\n X_test = getCIFAR_as_32Pixels_Image(X_test)\n ftsObj = getFeatureFunctions(args)\n X_train = ftsObj.extract_features(X_train)\n X_test = ftsObj.extract_features(X_test)\n print(X_test.shape)\n print(\"Started with implementing KNN\")\n executeKNN(X_train, y_train, X_test, y_test)\n if args.algo == SOFTMAX_ARGS:\n X_train, y_train, X_test, y_test = getDataset(args.loadCIFAR)\n if args.zca:\n print(\"ZCA Pre Processing Started\")\n X_train = zca(X_train)\n X_test = zca(X_test)\n X_train = getCIFAR_as_32Pixels_Image(X_train)\n X_test = getCIFAR_as_32Pixels_Image(X_test)\n print(\"ZCA Pre Processing Completed\")\n execute_softmax(X_train, y_train, X_test, y_test)\n elif args.features:\n # Call Feature Extraction techiniques\n X_train = getCIFAR_as_32Pixels_Image(X_train)\n X_test = getCIFAR_as_32Pixels_Image(X_test)\n ftsObj = getFeatureFunctions(args)\n X_train = ftsObj.extract_features(X_train)\n X_test = ftsObj.extract_features(X_test)\n execute_softmax(X_train, y_train, X_test, y_test)\n else:\n X_train = getCIFAR_as_32Pixels_Image(X_train)\n X_test = getCIFAR_as_32Pixels_Image(X_test)\n execute_softmax(X_train, y_train, X_test, y_test)\n if args.algo == ZCA_ARGS:\n print(\"This is just and experiment to see that the code works\")\n X_train, y_train, X_test, y_test = getDataset(args.loadCIFAR)\n # construct_image(X_test,y_test,\"original.png\")\n XZ_test = zca(X_test)\n # construct_ZCAimage(XZ_test,y_test,\"zca.png\")\n elif args.algo == SVM_ARGS:\n X_train, y_train, X_test, y_test = getDataset(args.loadCIFAR)\n if args.zca:\n print(\"ZCA Pre Processing Started\")\n X_train = zca(X_train)\n X_test = zca(X_test)\n X_train = getCIFAR_as_32Pixels_Image(X_train)\n X_test = getCIFAR_as_32Pixels_Image(X_test)\n print(\"ZCA Pre Processing Completed\")\n executeSVM(X_train, y_train, X_test, y_test)\n elif args.features:\n # Call Feature Extraction techiniques\n X_train = getCIFAR_as_32Pixels_Image(X_train)\n X_test = getCIFAR_as_32Pixels_Image(X_test)\n ftsObj = getFeatureFunctions(args)\n X_train = ftsObj.extract_features(X_train)\n X_test = ftsObj.extract_features(X_test)\n executeSVM(X_train, y_train, X_test, y_test)\n else:\n X_train = getCIFAR_as_32Pixels_Image(X_train)\n X_test = getCIFAR_as_32Pixels_Image(X_test)\n executeSVM(X_train, y_train, X_test, y_test)\n elif args.algo == CNN_ARGS:\n print(args)\n print(\"CNN yet to be implemented\")\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98017014","text":"# Imports\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Data\ndescr = ['Variant 1', 'Variant 2', 'Variant 3']\nlen_avg = [6.25, 5.52, 6.52]\nlen_std = [0.56, 0.45, 0.59]\nwaittime_avg = [127.70, 112.34, 176.66]\nwaittime_std = [11.20, 8.78, 14.95]\n\ndf = pd.DataFrame({'Variant': descr, 'Rijlengte': len_avg, 'RijStd': len_std, 'Wachttijd': waittime_avg, 'WachtStd': waittime_std})\n\n# Plotting\nsize = 8\nfont = {'size' : 20}\nmatplotlib.rc('font', **font)\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 7))\nax1.bar(df['Variant'], df['Rijlengte'], yerr=df['RijStd'], alpha=0.25, facecolor='black')\nax1.set_ylabel('Personen')\nax1.set_title('Gemiddelde rijlengte')\nax1.grid(ls='dashed')\nax2.bar(df['Variant'], df['Wachttijd'], yerr=df['WachtStd'], alpha=0.25, facecolor='black')\nax2.set_ylabel('Seconden')\nax2.set_title('Gemiddelde wachttijd')\nax2.grid(ls='dashed')\n\nplt.show() # plt.savefig(sometitle.png)","sub_path":"Applied Math/Y1S4/Simulatiemodellen/opdracht2_grafiek_resultaten.py","file_name":"opdracht2_grafiek_resultaten.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"302968870","text":"from itertools import product\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nfrom django.utils.translation import ugettext as _\nfrom transmeta import TransMeta\nfrom stdimage.fields import StdImageField\n\n\nclass ActiveManager(models.Manager):\n \"\"\"\n Manager class that returns active records\n \"\"\"\n def get_query_set(self):\n return super(ActiveManager, self).get_query_set().filter(is_active=True)\n\n\nclass CommonMixin(models.Model):\n \"\"\"\n Abstract base class for common information\n \"\"\"\n is_active = models.BooleanField(default=True)\n last_active = models.DateField(auto_now=True)\n date = models.DateField(auto_now_add=True)\n\n objects = models.Manager()\n active = ActiveManager()\n\n class Meta:\n abstract = True\n\n\nclass ImageMixin(models.Model):\n width = models.IntegerField(blank=True, null=True, editable=False)\n height = models.IntegerField(blank=True, null=True, editable=False)\n\n def save(self, *args, **kwargs):\n try:\n this = ImageMixin.objects.get(id=self.id)\n if this.image != self.image:\n this.image.delete(save=False)\n except: pass # when new photo then we do nothing, normal case\n super(ImageMixin, self).save(args, kwargs)\n\n def delete(self, using=None):\n super(ImageMixin, self).delete(using)\n self.image.delete(save=False)\n\n class Meta:\n abstract = True\n\n\nclass StaticPage(models.Model):\n __metaclass__ = TransMeta\n\n name = models.CharField(max_length=255, verbose_name=_(\"Name\"))\n content = models.TextField(verbose_name=_(\"Name\"))\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n translate = ('content',)\n\n\nclass Site(CommonMixin, ImageMixin):\n __metaclass__ = TransMeta\n\n name = models.CharField(max_length=255, verbose_name=_(\"Name\"))\n description = models.TextField(verbose_name=_(\"Description\"))\n has_airport = models.BooleanField(default=False)\n rating = models.IntegerField()\n order = models.IntegerField()\n do_not_miss = models.TextField(verbose_name=_(\"Do not miss\"))\n recommended_stay = models.TextField(verbose_name=_(\"Recommended stay\"))\n address = models.CharField(max_length=255, verbose_name=_(\"Address\"))\n state = models.CharField(max_length=100, verbose_name=_(\"State\"))\n price = models.TextField(verbose_name=_(\"Price\"))\n good_to_know = models.TextField(verbose_name=_(\"Good to know\"))\n link = models.URLField(blank=True, null=True)\n region = models.ManyToManyField('Region', blank=True, null=True)\n interests = models.ManyToManyField('Interest', blank=True, null=True)\n map = StdImageField(\n upload_to='uploaded_images/%Y/%m/%d',\n max_length=255,\n height_field='height',\n width_field='width',\n size=(453, 294, True),\n thumbnail_size=(195, 150, True),\n blank=True,\n null=True\n )\n location_lat = models.DecimalField(\n u'Location (latitude)', max_digits=10, decimal_places=7, default=0,\n help_text=u\"You can use http://www.getlatlon.com to get a location's coordinates\"\n )\n location_lon = models.DecimalField(\n u'Location (longitude)', max_digits=10, decimal_places=7, default=0,\n help_text=u\"You can use http://www.getlatlon.com to get a location's coordinates\"\n )\n\n\n def get_next(self):\n next = Site.objects.filter(id__gt=self.id)\n if next:\n return next[0]\n return False\n\n\n def get_prev(self):\n prev = Site.objects.filter(id__lt=self.id)\n if prev:\n return prev[0]\n return False\n\n\n def __unicode__(self):\n return self.name\n\n\n @models.permalink\n def get_url(self):\n return ('sitedetails', (), { 'pk': self.id })\n\n\n class Meta:\n translate = (\n 'description',\n 'do_not_miss',\n 'recommended_stay',\n 'address',\n 'state',\n 'price',\n 'good_to_know',\n )\n\n\nclass SiteImage(CommonMixin, ImageMixin):\n site = models.ForeignKey('Site')\n image = StdImageField(\n upload_to='uploaded_images/%Y/%m/%d',\n max_length=255,\n height_field='height',\n width_field='width',\n size=(246, 160, True),\n thumbnail_size=(200, 148, True),\n blank=True,\n null=True\n )\n\n def __unicode__(self):\n return u\"%s image\" % self.site.name\n\n\nclass SiteActivity(CommonMixin, ImageMixin):\n __metaclass__ = TransMeta\n\n\n site = models.ForeignKey('Site')\n name = models.CharField(max_length=255, verbose_name=_(\"Name\"))\n description = models.TextField(verbose_name=_(\"Description\"))\n price = models.CharField(max_length=100)\n image = StdImageField(\n upload_to='uploaded_images/%Y/%m/%d',\n max_length=255,\n height_field='height',\n width_field='width',\n size=(183, 112, True),\n blank=True,\n null=True\n )\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n translate = ('name', 'description')\n\n\nclass SiteActivityImage(CommonMixin, ImageMixin):\n site_activity = models.ForeignKey('SiteActivity')\n image = StdImageField(\n upload_to='uploaded_images/%Y/%m/%d',\n max_length=255,\n height_field='height',\n width_field='width',\n size=(266, 175, True),\n blank=True,\n null=True\n )\n\n def __unicode__(self):\n return u\"%s image\" % self.site_activity.name\n\n\nclass SiteHotel(CommonMixin):\n __metaclass__ = TransMeta\n\n name = models.CharField(max_length=255, verbose_name=_(\"Name\"))\n description = models.TextField(verbose_name=_(\"Description\"))\n price = models.FloatField()\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n translate = ('name', 'description')\n\n\n#class SiteCar(CommonMixin):\n# __metaclass__ = TransMeta\n#\n# name = models.CharField(max_length=255)\n# description = models.TextField()\n#\n# class Meta:\n# translate = ('name', 'description')\n\n\nclass Itinerary(CommonMixin):\n __metaclass__ = TransMeta\n\n user = models.ForeignKey(User, blank=True, null=True)\n session_key = models.CharField(max_length=50, blank=True, null=True)\n name = models.CharField(max_length=255, verbose_name=_(\"Name\"))\n description = models.TextField(verbose_name=_(\"Description\"), blank=True, null=True)\n start_date = models.DateField(blank=True, null=True)\n sites = models.ManyToManyField('Site', through='ItinerarySite')\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n translate = ('name', 'description')\n\n\nclass ItinerarySite(CommonMixin):\n # @todo: find out if car field should be added\n itinerary = models.ForeignKey('Itinerary')\n site = models.ForeignKey('Site')\n nights = models.IntegerField()\n order = models.IntegerField()\n start_date = models.DateField(blank=True, null=True)\n # @todo: you can book multiple hotels\n # hotel = models.ForeignKey('SiteHotel', blank=True, null=True)\n activities = models.ManyToManyField('SiteActivity', through='ItineraryPlaceActivity')\n\n def __unicode__(self):\n return u\"%s - %s\" % (self.itinerary.name, self.site.name)\n\n class Meta:\n ordering = ['order']\n\n\nclass ItineraryPlaceActivity(CommonMixin):\n site = models.ForeignKey('ItinerarySite')\n activity = models.ForeignKey('SiteActivity')\n is_booked = models.BooleanField(default=False)\n\n def __unicode__(self):\n return u\"%s - %s - %s\" % (self.site.itinerary.name, self.site.site.name, self.activity.name)\n\n\nclass Region(CommonMixin):\n name = models.CharField(max_length='255')\n\n def __unicode__(self):\n return self.name\n\n\nclass Interest(CommonMixin):\n __metaclass__ = TransMeta\n\n name = models.CharField(max_length='255', verbose_name=_(\"Name\"))\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n translate = ('name',)\n\n\nclass SiteComment(CommonMixin):\n user = models.ForeignKey(User)\n site = models.ForeignKey(Site)\n comment = models.TextField(verbose_name=_(\"Leave a comment\"))\n\n def __unicode__(self):\n return 'Comment by %s' % self.user.username\n\n\nclass ExampleItinerary(CommonMixin, ImageMixin):\n __metaclass__ = TransMeta\n\n name = models.CharField(max_length=255, verbose_name=_(\"Name\"))\n description = models.TextField(verbose_name=_(\"Description\"), blank=True, null=True)\n map = StdImageField(\n upload_to='uploaded_images/%Y/%m/%d',\n max_length=255,\n height_field='height',\n width_field='width',\n size=(472, 306, True),\n thumbnail_size=(273, 150, True),\n blank=True,\n null=True\n )\n\n def get_next(self):\n next = Itinerary.objects.filter(id__gt=self.id)\n if next:\n return next[0]\n return False\n\n def get_prev(self):\n prev = Itinerary.objects.filter(id__lt=self.id)\n if prev:\n return prev[0]\n return False\n\n class Meta:\n translate = ('name', 'description')\n\n\nclass ExampleItineraryImage(CommonMixin, ImageMixin):\n example_itinerary = models.ForeignKey('ExampleItinerary')\n image = StdImageField(\n upload_to='uploaded_images/%Y/%m/%d',\n max_length=255,\n height_field='height',\n width_field='width',\n size=(246, 160, True),\n blank=True,\n null=True\n )\n\n def __unicode__(self):\n return u\"%s image\" % self.example_itinerary.name\n\n\nclass ExampleItinerarySite(CommonMixin):\n example_itinerary = models.ForeignKey('ExampleItinerary')\n day_number = models.IntegerField()\n from_site = models.ForeignKey('Site', related_name='+')\n to_site = models.ForeignKey('Site', related_name='+')\n\n\nclass ExampleItinerarySiteDetail(models.Model):\n example_itinerary_site = models.ForeignKey('ExampleItinerarySite')\n description = models.CharField(max_length=255)\n\n\nclass ExampleItinerarySiteActivity(models.Model):\n example_itinerary_site = models.ForeignKey('ExampleItinerarySite')\n activity = models.ForeignKey('SiteActivity')\n\n\nclass Session(models.Model):\n key = models.CharField(max_length=50)\n\n\nclass Segment(models.Model):\n distance = models.FloatField(verbose_name=_(\"Driving Hours\"))\n segment = models.IntegerField()\n sites = models.ManyToManyField('Site')\n\n def __unicode__(self):\n return u\"Segment %s\" % self.segment\n\n\nclass Route(models.Model):\n route = models.IntegerField()\n segments = models.ManyToManyField('Segment')\n\n def __unicode__(self):\n return u\"Route %s\" % self.route\n\n\nclass RecommendedItinerary(ImageMixin):\n __metaclass__ = TransMeta\n\n name = models.CharField(max_length=100, verbose_name=_(\"Name\"))\n description = models.TextField(verbose_name=_(\"Description\"), blank=True, null=True)\n routes = models.ManyToManyField('Route')\n map = StdImageField(\n upload_to='uploaded_images/%Y/%m/%d',\n max_length=255,\n height_field='height',\n width_field='width',\n size=(472, 306, True),\n thumbnail_size=(273, 150, True),\n blank=True,\n null=True\n )\n\n def __unicode__(self):\n return u\"%s\" % self.name\n\n def save(self, *args, **kwargs):\n super(RecommendedItinerary, self).save(*args, **kwargs)\n # Remove previously generated data\n RecommendedItineraryCombinations.objects.filter(recommended_itinerary__pk=self.pk).delete()\n # Generate new data\n route_segment_sets = []\n for route in self.routes.all():\n route_segment_sets.append(route.segments.all())\n\n segment_products = list(product(*route_segment_sets))\n for segment_product in segment_products:\n sites = []\n distance = 0\n for segment in segment_product:\n if segment.distance: distance += segment.distance\n sites.extend(segment.sites.all())\n\n itinerary_combinations = RecommendedItineraryCombinations.objects.create(\n recommended_itinerary=self,\n distance=distance,\n nights=len(sites),\n starting_point=sites[0],\n ending_point=sites[-1],\n )\n itinerary_combinations.sites.add(*sites)\n\n class Meta:\n translate = ('name', 'description')\n\nclass RecommendedItineraryCombinations(models.Model):\n recommended_itinerary = models.ForeignKey('RecommendedItinerary')\n sites = models.ManyToManyField('Site')\n distance = models.IntegerField()\n nights = models.IntegerField()\n starting_point = models.ForeignKey('Site', related_name='+')\n ending_point = models.ForeignKey('Site', related_name='+')\n\n def __unicode__(self):\n return u\"%s combinations\" % self.recommended_itinerary.name\n\n\nclass Distance(models.Model):\n point_a = models.ForeignKey('Site', related_name='+')\n point_b = models.ForeignKey('Site', related_name='+')\n distance = models.FloatField()\n hours = models.FloatField()","sub_path":"navig/navig/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"276060975","text":"import os\r\nimport ctypes\r\nfrom ctypes import *\r\n\r\n\r\n#Removes itself from the system\r\n\r\ndef Uninstall(AutorunName, InstallPath, ProcessName, CurrentName, CurrentPath, Temp, ProgramData):\r\n windll.ntdll.RtlSetProcessIsCritical(0, 0, 0) == 0\r\n ctypes.windll.kernel32.SetFileAttributesW(CurrentPath, 0)\r\n ctypes.windll.kernel32.SetFileAttributesW(InstallPath+'\\\\'+ProcessName, 0)\r\n with open(os.path.join(Temp, 'Uninstaller.bat'), 'w') as OPATH:\r\n OPATH.writelines(['taskkill /f /im \"'+CurrentName+'\"\\n', \r\n 'schtasks /delete /f /tn \"'+AutorunName+'\"\\n', \r\n 'del /s /q \"'+CurrentPath+'\"\\n',\r\n 'del /s /q \"'+InstallPath+'\\\\'+ProcessName+'\"\\n',\r\n 'rmdir /s /q \"'+ProgramData+'Files'+'\"'])\r\n while True:\r\n try:\r\n os.startfile(Temp+'Uninstaller.bat', 'runas')\r\n except:\r\n pass\r\n else:\r\n break","sub_path":"Core/Main/Autorun.py","file_name":"Autorun.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"635078736","text":"from collections import Counter\nfrom tac2persian.utils.g2p.phonemizer_api.phonemize import phonemize\nfrom tac2persian.utils.g2p.char_list import char_list, _punctuations, _pad\n\n\nclass Grapheme2Phoneme():\n def __init__(self):\n self.char_list = char_list\n self.punctutations = _punctuations\n # Char to id and id to char conversion\n self.char_to_id = {s: i for i, s in enumerate(self.char_list)}\n self.id_to_char = {i: s for i, s in enumerate(self.char_list)}\n \n # Set first and second languages in 'bilingual' mode\n self.set_bilingual_languages()\n\n def set_bilingual_languages(self, first_lang=\"fa\", second_lang=\"en-us\"):\n \"\"\"Sets languages in bilingual mode.\"\"\"\n self._bilingual_first_lang = first_lang\n self._bilingual_second_lang = second_lang\n\n def text_to_phone(self, text, language=\"fa\"):\n \"\"\"Converts text to phoneme.\"\"\"\n # Count number stars (for bilingual mode)\n char_counts = Counter(text)\n even_stars = char_counts[\"*\"] > 0 and char_counts[\"*\"] % 2 == 0\n\n # If language is set to 'bilingual', split sentence with stars and convert each part separately\n if language == \"bilingual\" and even_stars:\n ph = \"\"\n for i, p in enumerate(text.split(\"*\")):\n if i % 2 == 0:\n ph_fa = phonemize(p, \n strip=False, \n with_stress=True, \n preserve_punctuation=True, \n punctuation_marks=self.punctutations,\n njobs=1, \n backend='espeak', \n language=self._bilingual_first_lang, \n language_switch=\"remove-flags\")\n ph += ph_fa\n else:\n ph_en = phonemize(p, \n strip=False, \n with_stress=True, \n preserve_punctuation=True, \n punctuation_marks=self.punctutations,\n njobs=1, \n backend='espeak', \n language=self._bilingual_second_lang, \n language_switch=\"remove-flags\")\n ph += ph_en\n else:\n # If the language is 'bilingual' but no stars or not odd number of stars\n if language == \"bilingual\":\n lang = self._bilingual_first_lang\n # Otherwise\n else:\n lang = language\n ph = phonemize(text, \n strip=False, \n with_stress=True, \n preserve_punctuation=True, \n punctuation_marks=self.punctutations,\n njobs=1, \n backend='espeak', \n language=lang, \n language_switch=\"remove-flags\")\n return ph\n\n def _should_keep_char(self, \n p):\n \"\"\"Checks if char is valid and is not pad char.\"\"\"\n return p in self.char_list and p not in [_pad]\n\n def phone_to_sequence(self, phons):\n sequence = [self.char_to_id[s] for s in list(phons) if self._should_keep_char(s)]\n return sequence\n \n def text_to_sequence(self, text, language=\"de\"):\n \"\"\"Converts text to sequence of indices.\"\"\"\n sequence = []\n # Get the phoneme for the text\n phons = self.text_to_phone(text, language=language)\n # Convert each phone to its corresponding index\n sequence = [self.char_to_id[s] for s in list(phons) if self._should_keep_char(s)]\n if sequence == []:\n print(\"!! After phoneme conversion the result is None. -- {} \".format(text))\n return sequence\n","sub_path":"tac2persian/utils/g2p/g2p.py","file_name":"g2p.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"13729922","text":"import numpy as np\nimport cv2\nfrom frcnn import data_generators, data_augmentor\nimport json\nimport pickle\nfrom frcnn.data_generators_for_test import calc_rpn\nfrom frcnn.roi_helpers import apply_regr\n\n\ndef show_augmented_annotations(image, augmented_annotation):\n print('num_gt_bboxes={}'.format(len(augmented_annotation['bboxes'])))\n for bbox in augmented_annotation['bboxes']:\n text = bbox['class']\n x1 = bbox['x1']\n y1 = bbox['y1']\n x2 = bbox['x2']\n y2 = bbox['y2']\n # size[0][0] 表示 width, size[0][1] 表示 height, size[1] 表示 baseline\n size = cv2.getTextSize(text, cv2.FONT_HERSHEY_COMPLEX, 1, 1)\n text_origin = (x1, y1)\n\n cv2.rectangle(image, (text_origin[0] - 5, text_origin[1] - size[0][1] - 5),\n (text_origin[0] + size[0][0] + 5, text_origin[1] + 5), (0, 0, 0), 2)\n cv2.rectangle(image, (text_origin[0] - 5, text_origin[1] - size[0][1] - 5),\n (text_origin[0] + size[0][0] + 5, text_origin[1] + 5), (255, 255, 255), -1)\n cv2.putText(image, text, text_origin, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 1)\n cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 1)\n cv2.namedWindow('image_with_annotations', cv2.WINDOW_NORMAL)\n cv2.imshow('image_with_annotations', image)\n cv2.waitKey(0)\n\n\ndef show_pos_anchors_before_rectify(image, y_rpn_cls, max_iou_for_anchor, best_bbox_for_anchor):\n row_ids, col_ids, anchor_ids = np.where(np.logical_and(y_rpn_cls[0, :, :, :9] == 1, y_rpn_cls[0, :, :, 9:] == 1))\n print('num_pos_anchors={}'.format(len(row_ids)))\n colors = [np.random.randint(0, 255, 3) for row_id in row_ids]\n for idx in range(len(row_ids)):\n center_x = col_ids[idx] * C.rpn_stride\n center_y = row_ids[idx] * C.rpn_stride\n w = anchors[anchor_ids[idx]][0]\n h = anchors[anchor_ids[idx]][1]\n x1 = int(round(center_x - w // 2))\n x2 = int(round(center_x + w // 2))\n y1 = int(round(center_y - h // 2))\n y2 = int(round(center_y + h // 2))\n cv2.rectangle(image, (x1, y1), (x2, y2), colors[idx].tolist(), 2)\n max_iou = max_iou_for_anchor[row_ids[idx], col_ids[idx], anchor_ids[idx]]\n best_bbox = best_bbox_for_anchor[row_ids[idx], col_ids[idx], anchor_ids[idx]]\n cv2.putText(image, '{:.2f}-{}'.format(max_iou, best_bbox), (x1, y1 + 10), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 1)\n cv2.namedWindow('image_before_rectify', cv2.WINDOW_NORMAL)\n cv2.imshow('image_before_rectify', image)\n cv2.waitKey(0)\n\n\ndef show_pos_anchors_after_rectify(image, y_rpn_cls, y_rpn_regr):\n row_ids, col_ids, anchor_ids = np.where(np.logical_and(y_rpn_cls[0, :, :, :9] == 1, y_rpn_cls[0, :, :, 9:] == 1))\n for idx in range(len(row_ids)):\n row_id = row_ids[idx]\n col_id = col_ids[idx]\n anchor_id = anchor_ids[idx]\n center_x = col_id\n center_y = row_id\n w = anchors[anchor_id][0] / C.rpn_stride\n h = anchors[anchor_id][1] / C.rpn_stride\n x1 = center_x - int(round(w // 2))\n y1 = center_y - int(round(h // 2))\n tx, ty, tw, th = y_rpn_regr[0, row_id, col_id, 36 + anchor_id * 4: 36 + (anchor_id + 1) * 4]\n x, y, w, h = np.array(apply_regr(x1, y1, w, h, tx, ty, tw, th)) * C.rpn_stride\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.namedWindow('image_after_rectify', cv2.WINDOW_NORMAL)\n cv2.imshow('image_after_rectify', image)\n cv2.waitKey(0)\n\n\ndef show_neg_anchors(image, y_rpn_cls, max_iou_for_anchor, best_bbox_for_anchor):\n row_ids, col_ids, anchor_ids = np.where(np.logical_and(y_rpn_cls[0, :, :, :9] == 1, y_rpn_cls[0, :, :, 9:] == 0))\n print('num_neg_anchors={}'.format(len(row_ids)))\n colors = [np.random.randint(0, 255, 3) for row_id in row_ids]\n for idx in range(len(row_ids)):\n center_x = col_ids[idx] * C.rpn_stride\n center_y = row_ids[idx] * C.rpn_stride\n w = anchors[anchor_ids[idx]][0]\n h = anchors[anchor_ids[idx]][1]\n x1 = int(round(center_x - w // 2))\n x2 = int(round(center_x + w // 2))\n y1 = int(round(center_y - h // 2))\n y2 = int(round(center_y + h // 2))\n cv2.rectangle(image, (x1, y1), (x2, y2), colors[idx].tolist(), 2)\n max_iou = max_iou_for_anchor[row_ids[idx], col_ids[idx], anchor_ids[idx]]\n best_bbox = best_bbox_for_anchor[row_ids[idx], col_ids[idx], anchor_ids[idx]]\n cv2.putText(image, '{:.2f}-{}'.format(max_iou, best_bbox), (x1, y1 + 10), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 1)\n cv2.namedWindow('image_neg_anchors', cv2.WINDOW_NORMAL)\n cv2.imshow('image_neg_anchors', image)\n cv2.waitKey(0)\n\n\nall_annotations = json.load(open('annotation_data.json'))\nclasses_count = json.load(open('classes_count.json'))\ntrain_annotations = [annotation for annotation in all_annotations if annotation['imageset'] == 'train']\nwith open('../config.pickle', 'rb') as f_in:\n C = pickle.load(f_in)\nif C.network == 'resnet50':\n import frcnn.resnet as nn\nelif C.network == 'vgg':\n import frcnn.vgg as nn\n\nanchors = [scale * np.array(ratio) for scale in C.anchor_scales for ratio in C.anchor_ratios]\nfor annotation in train_annotations:\n augmented_annotation, image = data_augmentor.augment(annotation, C, augment=True)\n # show_augmented_annotations(image.copy(), augmented_annotation)\n height, width = image.shape[:2]\n # get image dimensions for resizing\n # 按照最小的边为 600 进行 resize\n (resized_width, resized_height) = data_generators.get_new_image_size(width, height, C.image_min_size)\n # resize the image so that smallest side is 600px\n image = cv2.resize(image, (resized_width, resized_height), interpolation=cv2.INTER_CUBIC)\n y_rpn_cls, y_rpn_regr, max_iou_for_anchor, best_bbox_for_anchor = calc_rpn(C, annotation, width, height,\n resized_width,\n resized_height,\n nn.get_feature_map_size, image)\n show_pos_anchors_before_rectify(image.copy(), y_rpn_cls, max_iou_for_anchor, best_bbox_for_anchor)\n show_pos_anchors_after_rectify(image.copy(), y_rpn_cls, y_rpn_regr)\n show_neg_anchors(image, y_rpn_cls, max_iou_for_anchor, best_bbox_for_anchor)\n","sub_path":"frcnn/test_data_generators.py","file_name":"test_data_generators.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"528017950","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\n\r\nfrom extendedTk import NumberedFrame\r\nfrom highlight import Highlighter\r\nfrom extendedStyle import JSONStyle\r\n\r\nclass Editor(ttk.Notebook):\r\n def __init__(self, master, style, **kw):\r\n ttk.Notebook.__init__(self, master, **kw)\r\n self._tab_texts = []\r\n self._tab_names = []\r\n self._new_tabs = []\r\n self.style = style\r\n\r\n def add_tab(self, name, new=False, **tab_options):\r\n if name in self._tab_names:\r\n return None\r\n \r\n tab = NumberedFrame(self, style=self.style, **tab_options)\r\n self.add(tab, text=name)\r\n self.select(tab)\r\n\r\n tab.text.highlighter = Highlighter(tab.text, self.style)\r\n\r\n # add to list\r\n self._tab_texts.append(tab.text)\r\n self._tab_names.append(name)\r\n if new:\r\n self._new_tabs.append(name)\r\n\r\n return tab.text\r\n\r\n def delete_tab(self, name=None):\r\n if name and name in self._tab_names:\r\n index = self._tab_names.index(name)\r\n self._tab_names.pop(index)\r\n self._tab_texts.pop(index)\r\n self.forget(index)\r\n else:\r\n self._tab_texts.pop(self.index(\"current\"))\r\n self._tab_names.pop(self.index(\"current\"))\r\n self.forget(\"current\")\r\n\r\n def get_text(self):\r\n return self._tab_texts[self.index(\"current\")]\r\n\r\n def set_name(self, name):\r\n if self._tab_names[self.index(\"current\")] != name:\r\n self._tab_names[self.index(\"current\")] = name\r\n self.tab(\"current\", text=name)\r\n\r\n def get_name(self):\r\n return self.tab(\"current\", option=\"text\")\r\n\r\n def get_index(self):\r\n return self.index(\"current\")\r\n\r\n def get_new(self, name):\r\n return name in self._new_tabs\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n\r\n width = 1200\r\n height = 800\r\n\r\n root.title(\"Editor\")\r\n root.geometry(\"{0}x{1}\".format(width, height))\r\n root.rowconfigure(0, weight=1)\r\n root.columnconfigure(0, weight=1)\r\n\r\n filename = \"fibonacci.py\"\r\n\r\n editor = Editor(root, width=1200, height=800, style=JSONStyle(\"style.json\"))\r\n editor.grid(row=0, column=0, sticky=tk.NSEW)\r\n status = tk.Label(root, text=\"Row: 0 | Column: 0\", anchor=tk.W, bg=\"#424242\")\r\n status.grid(row=1, column=0, sticky=tk.EW)\r\n\r\n text = editor.add_tab(filename, True, wrap=tk.NONE, bd=0, padx=5, pady=5, bg=\"#363636\")\r\n\r\n text.read(filename)\r\n\r\n root.mainloop()","sub_path":"editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3360011","text":"print (\"Hello, Samuel!\")\r\n\r\nprint (\"\"\"How are you...\r\nAre you Fine...\r\nAre you Good..\"\"\")\r\n\r\nprint (\"How are you...\\nAre you Fine...\\nAre you Good..\")\r\n\r\nprint (5, 3)\r\nprint (5 + 3)\r\nprint (5 - 3) #practice in phyton\r\nprint (5 / 3)\r\nprint (5 * 3)\r\n\r\nprint (\"Hello\" + \"Samuel\")\r\nprint (\"HelloSamuel\" * 3)\r\n\r\nname = input(\"What is your name?\")\r\nprint(name)\r\nage = input(\"What is your age?\")\r\nprint(age)\r\nprint (name, age)\r\n\r\na = 5\r\nprint (a)\r\nprice =15.50\r\nprint (price)\r\n\r\nname = input(\"What is your name?\")\r\nage = input(\"How old are you?\")\r\n\r\nprint(\"{} is a nice age to be, {}!\".format(age, name))\r\n\r\nname = input(\"What is your name?\")\r\ncolor = input(\"What is your favorite color?\")\r\nfood = input(\"What is your favorite food?\")\r\n\r\nprint(\"Well then {}, I bet you'd love {} {} then!\".format(name,color,food))\r\n\r\nage = int(input(\"How old are you?\"))\r\nprint(\"In 10 years you will be {}\".format(age + 10))\r\n\r\nsiblings = int(input(\"How many brothers and sisters do you have?\"))\r\ntotal_children = siblings + 1\r\n\r\nprint(\"That means your parents have {} children in total.\".format(total_children))\r\n\r\nnumber = 10\r\nnumber += 5 # Adds 5, number is 15\r\nnumber -= 5 # Subtracts 5, number is 10 again\r\nnumber *= 5 # Multiplies by 5, number is 50\r\nnumber /= 5 # Divides by 5, number is 10 again\r\nprint (number)\r\n\r\nnumber = 2\r\nnumber += 3\r\nnumber *= 10\r\nnumber /= 5\r\nnumber += number\r\nprint (number)\r\n\r\nage = int(input(\"How old are you?\"))\r\n\r\nif age <= 12:\r\n print(\"You are a child\")\r\nelif age > 12 and age < 20: \r\n print(\"You are a teen\")\r\nelif age < 0 or age > 125:\r\n print(\"Invalid age\")\r\nelse:\r\n print(\"You are an adult\")\r\n\r\nfood = input(\"What is your favorite food?\")\r\n\r\nif food == \"pizza\":\r\n print(\"Yum!\")\r\nelse:\r\n print(\"Yuck\")\r\n\r\nscore = 0\r\n\r\nanswer = input(\"What does CPU stand for?\")\r\nif answer == \"central processing unit\":\r\n print(\"Correct!\")\r\n score +=1\r\nelse:\r\n print(\"Sorry, wrong answer.\")\r\n \r\nanswer = input(\"How many bits are in a byte?\")\r\nif answer == \"8\":\r\n print(\"Correct!\")\r\n score +=1\r\nelse:\r\n print(\"Sorry, wrong answer.\")\r\n \r\nanswer = input(\"Which is bigger: a kilobyte or a megabyte?\")\r\nif answer == \"megabyte\":\r\n print(\"Correct!\")\r\n score +=1\r\nelse:\r\n print(\"Sorry, wrong answer.\")\r\n \r\nprint(\"You scored {} points!\".format(score))\r\n\r\nfor i in range(5):\r\n print(\"Hello\")\r\n\r\nprint(\"-------- Loop 1 -------\")\r\n\r\nfor i in range(0, 4):\r\n print(i)\r\n\r\nprint(\"-------- Loop 2 -------\")\r\n\r\nfor i in range(1, 7):\r\n print(i)\r\n \r\nprint(\"-------- Loop 3 -------\")\r\n\r\nfor i in range(13, 20):\r\n print(i) \r\n\r\nprint(\"-------- Your loop -------\")\r\n\r\nfor i in range (7,10):\r\n print(i)\r\n\r\ni = 1\r\nwhile i <= 10:\r\n print(i)\r\n i += 1\r\n\r\ni = 1\r\nwhile i < 10:\r\n print(i)\r\n i += 1\r\n\r\n#Example 1\r\npassword = \"Hermoso12!\"\r\n\r\nguess = input(\"Guess the password: \")\r\nwhile guess != password:\r\n print(\"Wrong!\")\r\n guess = input(\"Guess the password: \")\r\n\r\nprint(\"Correct!\")\r\n\r\n#Set up the variable that will run the loop\r\nresponse = \"no\"\r\n\r\n#Repeat loop until we are there!\r\nwhile response != \"yes\":\r\n response = input(\"Are we there yet?\")\r\n\r\nprint(\"Yay!\")\r\n\r\nimport turtle\r\ntia = turtle.Turtle()\r\ntia.pensize(10)\r\n\r\ntia.color(\"red\")\r\ntia.forward(200)\r\ntia.left(90)\r\ntia.color(\"green\")\r\ntia.forward(200)\r\ntia.left(90)\r\ntia.color(\"yellow\")\r\ntia.forward(200)\r\ntia.left(90)\r\ntia.color(\"blue\")\r\ntia.forward(200)\r\n\r\nimport turtle\r\ntia = turtle.Turtle()\r\ntia.pensize(5)\r\n\r\n# Draw a purple square\r\ntia.color(\"purple\")\r\nfor i in range(4):\r\n tia.forward(200)\r\n tia.left(90)\r\n\r\n# Draw an orange triangle\r\ntia.color(\"orange\")\r\nfor i in range(3):\r\n tia.forward(200)\r\n tia.left(120)\r\n\r\n#Create turtle\r\nimport turtle\r\ntia = turtle.Turtle()\r\ntia.color(\"lightGreen\")\r\ntia.pensize(8)\r\n\r\n#Draw a light green square with yellow fill\r\ntia.fillcolor(\"yellow\")\r\ntia.begin_fill()\r\nfor i in range(4):\r\n tia.forward(200)\r\n tia.left(90)\r\ntia.end_fill()\r\n","sub_path":"Phyton intro.py","file_name":"Phyton intro.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"107310170","text":"#achar o maior divisor da fracao.\nn1= int(input(\"Entre o numero: \"))\nn2 = int(input(\"Entre outro numero: \"))\nmenor = n1\n\nif n1>n2:\n menor = n2\nif n2>n1:\n menor = n1\n\ncont = 1\naux = 0\nfor cont in range (1,menor+1):\n if n1%cont == 0 and n2%cont ==0:\n aux = cont\n cont +=1\n\nprint(aux)\nn1 = n1/aux\nn2 = n2/aux\nv = [n1,n2]\nprint(v)\n","sub_path":"simplificando.py","file_name":"simplificando.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487530100","text":"import os\nimport re\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\nfrom libs.lr_scheduler import WarmUpSchedule\nfrom libs.layers import ws_reg\n\n\ndef set_cuda_visible_device(ngpus):\n empty = []\n for i in range(4):\n os.system('nvidia-smi -i ' + str(i) + ' | grep \"No running\" | wc -l > empty_gpu_check')\n f = open('empty_gpu_check')\n out = int(f.read())\n if int(out) == 1:\n empty.append(i)\n if len(empty) < ngpus:\n print('avaliable gpus are less than required')\n cmd = ''\n for i in range(ngpus):\n cmd += str(empty[i]) + ','\n return cmd\n\n\ndef get_learning_rate_scheduler(lr_schedule='stair',\n graph_dim=256,\n warmup_steps=1000,\n init_lr=1e-3,\n decay_steps=500,\n decay_rate=0.1,\n staircase=True):\n scheduler = None\n if lr_schedule == 'warmup':\n scheduler = WarmUpSchedule(\n d_model=graph_dim,\n warmup_steps=warmup_steps\n )\n\n elif lr_schedule == 'stair':\n\n scheduler = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate=init_lr,\n decay_steps=decay_steps,\n decay_rate=decay_rate,\n staircase=staircase\n )\n\n return scheduler\n\n\ndef focal_loss_fn():\n alpha = float(loss_type.split('_')[1])\n gamma = float(loss_type.split('_')[2])\n loss_fn = tfa.losses.SigmoidFocalCrossEntropy(\n from_logits=False, alpha=alpha, gamma=gamma\n )\n return loss_fn\n\n\ndef get_task_options(benchmark_task_type):\n \"\"\"\n :param benchmark_task_type:\n :return: last_activation, loss, metrics\n \"\"\"\n if benchmark_task_type == 'reg':\n return None, tf.keras.losses.MeanSquaredError(), [\n tf.keras.metrics.MeanAbsoluteError(name='MAE'),\n tf.keras.metrics.RootMeanSquaredError(name='RMSE'),\n ]\n else:\n return tf.nn.sigmoid, tf.keras.losses.BinaryCrossentropy(), [\n tf.keras.metrics.BinaryAccuracy(name='Accuracy'),\n tf.keras.metrics.AUC(curve='ROC', name='AUROC'),\n tf.keras.metrics.AUC(curve='PR', name='AUPRC'),\n tf.keras.metrics.Precision(name='Precision'),\n tf.keras.metrics.Recall(name='Recall'),\n ]\n\n\ndef get_regularizer(reg_type, wd=0.0):\n if reg_type == 'ws_reg':\n return ws_reg\n elif reg_type == 'l2_reg':\n return tf.keras.regularizers.l2(l=wd)\n else:\n return None\n\n\nclass WeightDecayCallback(tf.keras.callbacks.Callback):\n def __init__(self, init_lr, coeff, reg_type):\n super(WeightDecayCallback, self).__init__()\n self.init_lr = init_lr\n self.coeff = coeff\n self.reg_type = reg_type\n\n def on_epoch_end(self, epoch, logs=None):\n wd = self.coeff * self.model.optimizer.lr / self.init_lr\n print(\"\\n lr: \", self.model.optimizer.lr, \" wd: \", wd)\n regularizer = get_regularizer(self.reg_type, wd)\n\n decay_attributes = ['kernel_regularizer', 'beta_regularizer', 'gamma_regularizer']\n for layer in self.model.layers:\n for attr in decay_attributes:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n","sub_path":"libs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"35726381","text":"import xml.etree.cElementTree as ET\nfrom .fields import Date, Pin, BridgeID\n\n\nclass TurboBridgeAccount:\n\n def __init__(self, date=None, partnerid=None, accountid=None, name=None, serviceplanid=None):\n self.date = Date(date)\n self.partnerid = partnerid\n self.accountid = accountid\n self.name = name\n self.serviceplanid = serviceplanid\n\n\nclass TurboBridgeBridge:\n def __init__(self, date=None, conferenceid=None, accountid=None, name=None, pin=None):\n self.date = Date(date)\n self.pin = Pin(pin)\n self.accountid = accountid\n self.name = name\n self.conferenceid = BridgeID(conferenceid)\n self.bridgeid = self.conferenceid.bridgeid\n\n\n\n\n\nclass TurboBridgeBaseList:\n\n def __init__(self):\n self.index = 0\n self._list = []\n\n def __len__(self):\n return len(self._list)\n\n def __iter__(self):\n return self\n\n def __getitem__(self, item):\n return self._list[item]\n\n def __setitem__(self, key, value):\n raise Exception(\"Object is immutable\")\n\n def __next__(self):\n self.index += 1\n try:\n return self._list[self.index - 1]\n except IndexError:\n self.index = 0\n raise StopIteration\n\n\n\n\n\nclass TurboBridgeAccountList(TurboBridgeBaseList):\n\n def __init__(self, xml):\n super(TurboBridgeAccountList, self).__init__()\n tree = ET.fromstring(xml)\n try:\n if tree.find('responseList').find('requestItem').find('result').find('error').attrib['message']:\n raise Exception(tree.find('responseList').find('requestItem').find('result').find('error').attrib['message'])\n except AttributeError:\n pass\n for account in tree.find('responseList').find('requestItem').find('result').findall('account'):\n self._list.append(TurboBridgeAccount(date=account.attrib['createdDate'],\n partnerid=account.attrib['partnerID'],\n accountid=account.attrib['accountID'],\n name=account.attrib['name'],\n serviceplanid=account.attrib['servicePlanID']\n ))\n\n\nclass TurboBridgeBridgeList(TurboBridgeBaseList):\n\n def __init__(self, xml):\n super(TurboBridgeBridgeList, self).__init__()\n tree = ET.fromstring(xml)\n try:\n if tree.find('responseList').find('requestItem').find('result').find('error').attrib['message']:\n raise Exception(tree.find('responseList').find('requestItem').find('result').find('error').attrib['message'])\n except AttributeError:\n pass\n for account in tree.find('responseList').find('requestItem').find('result').findall('bridge'):\n self._list.append(TurboBridgeBridge(date=account.attrib['createdDate'],\n accountid=account.attrib['accountID'],\n name=account.attrib['name'],\n conferenceid=account.attrib['conferenceID'],\n pin=account.attrib['pin']\n ))","sub_path":"turbobridge_api/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640490376","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 7 20:42:59 2020\n\n@author: Admin\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef fy(y,v,t):\n g = 9.81\n y = y + v*t - 0.5*g*t**2.0\n return y\n\ndef fv(y1,y2,v):\n g = 9.81\n v = (v**2.0 -2.0*g*(y2-y1))**0.5\n if(y2>y1): \n v = v\n else:\n v = -v\n return v\n\ndef arrastre(y1,y2):\n import random\n val = 0\n i = 0 \n \n while(val==0):\n x = random.random() #Le asigna a x un numero entre 0 y 1\n if(x<=0.15):\n val = 1\n else:\n i = i + 1\n if(y2>y1): \n y2 = y2*(1.0-0.1*x)\n #y2 = y2\n else:\n y2 = y2*(1.0+0.1*x)\n #y2 = y2\n return y2,i\n\nv0 = 10.0\ndt = 0.01\nn = 300\ny = np.zeros(n)\nt = np.zeros(n)\nv = np.zeros(n)\n\ny[0] = 0.0\nt[0] = 0.0\nv[0] = v0\n\nfor i in range(1,n):\n t[i] = t[i-1] + dt\n y[i] = fy(y[i-1],v[i-1],dt)\n [y[i],cont] = arrastre(y[i-1],y[i])\n v[i] = fv(y[i-1],y[i],v[i-1])\n print(\"Iteración:\",i)\n print(\"posicion calculada:\",y[i])\n print(\"velocidad calculada:\",v[i])\n print(\"iteraciones para calcular x:\" , cont)\n if(y[i]<=0.0):\n import sys\n plt.plot(t[0:i],y[0:i])\n sys.exit(\"La partícula ha regresado a su posición inicial\")\n \n#pueden usar break en vez de sys.exit\n","sub_path":"caida_libre_arrastre(1).py","file_name":"caida_libre_arrastre(1).py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"614868642","text":"# (c) Copyright 2009 Cloudera, Inc.\n#\n# vertarget.py\n# Defines VerStringTarget, which creates an intermediate src file containing\n# version string information.\n\nimport os\n\nfrom stitch.targets.targeterror import TargetError\nfrom stitch.targets.target import *\nfrom stitch.targets.anttarget import *\n\nclass VerStringTarget(AntTarget):\n \"\"\" Creates a module/class containing version string information for a\n project.\n\n Can create modules for python and/or Java.\n\n phase: build\n\n version Req - The version string to use\n python_module Opt - The name of the python module to generate\n java_class Opt - The name of the Java class to generate\n\n By default, the string '-test' will be appended to the version\n string supplied. This can be overridden by setting the ant\n property 'release' to true, e.g.:\n\n build -Drelease=true targetname...\n \"\"\"\n\n def __init__(self, version, python_module=None, java_class=None):\n AntTarget.__init__(self)\n\n self.version = version\n self.python_module = python_module\n self.java_class = java_class\n self.required_targets = None\n\n\n def language(self):\n return \"python\" # TODO(aaron) This should be able to return a list.\n\n\n def get_ant_rule_map(self):\n return {\n \"build\" : self.getSafeName() + \"-build\",\n \"default\" : self.getSafeName() + \"-build\",\n \"clean\" : self.getSafeName() + \"-clean\"\n }\n\n\n def get_version(self):\n \"\"\" Return the version string that should be used by depending targets \"\"\"\n return self.force(self.version) + \"${test-suffix}\"\n\n def simple_build_rule(self, rule_name):\n \"\"\" If there is nothing to generate, make a null rule. \"\"\"\n text = \"\\n\"\n return text\n\n\n def antRule(self, rule):\n \"\"\" generates the XML to put in the buildfile\n for the ant rule \"\"\"\n\n (mainName, ruleType) = self.splitRuleName(rule)\n\n if ruleType == \"clean\":\n return self.cleanRule(rule)\n\n if self.python_module == None and self.java_class == None:\n return self.simple_build_rule(rule)\n\n text = \"\\n\"\n\n intermediate_dir = self.getIntermediatePath()\n status_file = os.path.join(intermediate_dir, \"status.file\")\n\n # TODO(aaron): Currently, we generate the uptodate property for\n # this rule but don't actually test it in an block.\n # This is because if you recompile with -Drelease=true, then we\n # want to force regeneration. Similarly, if you then subsequently\n # compile a third time without -Drelease, then we want to revert.\n # The simplest solution is to just always recompile. This technically\n # screws up dependencies and causes extraneous \"recompilation.\" For\n # Java programs, this is a time suck. For python programs, it's just\n # a single extra file copy every time. If we do uswe with java a lot,\n # we will need to figure out how to be more clever.\n\n # lazy import to defeat circular reference.\n import stitch.buildfile as buildfile\n\n text = text + \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\"\"\" % { \"intermediate_dir\" : intermediate_dir,\n \"status_file\" : status_file,\n \"rule\" : rule,\n \"targetsfile\" : buildfile.DEFAULT_BUILD_FILENAME\n }\n\n if self.python_module != None:\n text = text + \"\"\"\n \n \n \n \n \n \n \n \n \n \n\"\"\" % { \"pymodule\" : self.force(self.python_module),\n \"intermediate_dir\" : intermediate_dir,\n \"verstring\" : self.force(self.version)\n }\n\n if self.java_class != None:\n text = text + \"\"\"\n \n \n \n \n \n \n \n \n \n \n\"\"\" % { \"javaclass\" : self.force(self.java_class),\n \"intermediate_dir\" : intermediate_dir,\n \"verstring\" : self.force(self.version)\n }\n\n text = text + \"\\n\"\n return text\n\n\n def getIntermediatePath(self):\n \"\"\" return the path where the thrift output goes \"\"\"\n return os.path.join(\"${genfiles-outdir}/\" + self.getBuildDirectory(),\n \"${version-subdir}\")\n\n def get_assembly_dir(self):\n return self.getIntermediatePath()\n\n def get_python_path(self):\n \"\"\" Return the path generated for python. This contains the\n initial module component, due to a quirk in copyPy.py requiring\n that.\n \"\"\"\n\n if self.python_module == None:\n return \"\"\n\n python_mod_parts = self.force(self.python_module).split(\".\")\n initial_module = python_mod_parts[0]\n if len(python_mod_parts) == 1:\n initial_module = initial_module + \".py\"\n\n return self.getIntermediatePath() + \"/gen-py/\" \\\n + initial_module\n\n\n def intermediatePathsForLang(self, lang):\n \"\"\" return paths specific to a given language \"\"\"\n\n if lang == \"python\" and self.python_module != None:\n return [ self.get_python_path() ]\n elif lang == \"java\" and self.java_class != None:\n return [ self.getIntermediatePath() + \"/gen-java/\" ]\n else:\n return []\n\n\n def intermediatePaths(self):\n \"\"\" return paths to external clients \"\"\"\n\n outList = []\n if self.python_module != None:\n outList.append(self.get_python_path())\n if self.java_class != None:\n outList.append(self.getIntermediatePath() + \"/gen-java\")\n\n return outList\n\n\n def cleanRule(self, rule):\n \"\"\" a rule to clean the genfiles output \"\"\"\n text = \"\\n\"\n dest_dir = self.getIntermediatePath()\n text = text + \" \\n\"\n text = text + \"\\n\"\n return text\n\n\n","sub_path":"src/stitch/targets/vertarget.py","file_name":"vertarget.py","file_ext":"py","file_size_in_byte":7096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"262089382","text":"import socket\nimport json\nimport time\nimport _thread\n\ndef client():\n host = '127.0.0.1'\n port = 8880\n\n client_socket = socket.socket()\n client_socket.connect((host, port))\n\n obj = {\"func\":\"insere\",\"data\": {'ds_dispositivo': 'disp1', 'ds_leitura': 's1='+str(time.time())}}\n msg= json.dumps(obj)\n client_socket.send(msg.encode())\n data = client_socket.recv(1024).decode()\n print(data)\n client_socket.close()\n\n\nif __name__ == '__main__':\n i=0\n while i< 5:\n _thread.start_new_thread(client, tuple([]))\n i= i+1\n\n while True:\n print(\".\")\n time.sleep(2)","sub_path":"replicacao_trabalho_final/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"275865413","text":"#encoding=utf-8\r\nfrom common_data import CommonData\r\nfrom question_query import QuestionQuery\r\nfrom java.util import HashMap\r\n\r\nclass listall(CommonData):\r\n def __init__(self): \r\n CommonData.__init__(self) # some share variable can get\r\n\r\n def execute(self): \r\n self.pluginService = __spring__.getBean(\"pluginService\")\r\n if self.pluginService.checkPluginEnabled(\"questionanswer\") == False:\r\n request.setAttribute(\"message\",u\"该插件已经被管理员禁用。\")\r\n return \"/WEB-INF/mod/show_text.ftl\"\r\n \r\n if self.parentGuid == \"\" or self.parentType == \"\":\r\n return \"/WEB-INF/mod/questionanswer/not_found.ftl\"\r\n \r\n pageIndex = self.params.safeGetStringParam(\"page\")\r\n if pageIndex.isdigit() == False:\r\n pageIndex = \"1\"\r\n \r\n qry = QuestionQuery(\"\"\" q.questionId,q.topic,q.createDate,q.createUserId,q.createUserName,\r\n q.objectGuid,q.createUserId, q.createUserName \"\"\")\r\n qry.parentGuid = self.parentGuid\r\n pager = self.params.createPager()\r\n pager.itemName = u\"问题\"\r\n pager.itemUnit = u\"个\"\r\n pager.pageSize = 20\r\n pager.setCurrentPage(int(pageIndex))\r\n pager.totalRows = qry.count()\r\n q_list = qry.query_map(pager) \r\n \r\n map = HashMap()\r\n map.put(\"SiteUrl\",self.pageFrameService.getSiteUrl())\r\n map.put(\"UserMgrUrl\",self.pageFrameService.getUserMgrUrl())\r\n map.put(\"q_list\", q_list)\r\n map.put(\"pager\", pager)\r\n map.put(\"parentGuid\", self.parentGuid)\r\n map.put(\"parentType\", self.parentType) \r\n \r\n pagedata = self.pageFrameService.transformTemplate(map, \"/WEB-INF/mod/questionanswer/listall.ftl\") \r\n \r\n page_frame = self.pageFrameService.getFramePage(self.parentGuid,self.parentType)\r\n page_frame = page_frame.replace(\"[placeholder_content]\",pagedata)\r\n page_frame = page_frame.replace(\"[placeholder_title]\",u\"全部问题列表\")\r\n\r\n self.writeToResponse(page_frame)","sub_path":"WebContent/mod/questionanswer/listall.py","file_name":"listall.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"29206913","text":"#!/usr/bin/env python\n\nfrom common import factorial\n\nSIDE = 20\n\n# Central binomial coefficients (permutations of 0,1)\n# ((2n)! choose n!^2 )\n\nv = factorial(2 * SIDE) // (factorial(SIDE) ** 2)\n\nprint(v)\n","sub_path":"15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419975099","text":"from pptx import Presentation\nfrom pptx.util import Inches\nfrom pptx.util import Pt\nfrom collections import OrderedDict\nfrom pptx.dml.color import RGBColor\nfrom pptx.enum.text import PP_ALIGN\nfrom pptx.enum.text import MSO_ANCHOR, MSO_AUTO_SIZE\nfrom pptx.oxml.xmlchemy import OxmlElement\nimport csv\n\nclass TableAutomater(object):\n\n def __init__(self):\n self.ppt = Presentation(\"template.pptx\")\n self.__tables = OrderedDict()\n self.current_table = None\n self.light_orange = RGBColor(0xFB, 0xB4, 0x20)\n self.dark_orange = RGBColor(0xF1, 0x79, 0x20)\n self.blue = RGBColor(0x00, 0x64, 0xF7)\n self.white = RGBColor(0xFF, 0xFF, 0xFF)\n\n def create_report(self, path_to_categories):\n self.create_categories(path_to_categories)\n\n for table_name, table_obj in self.__tables.items():\n\n slide = self.ppt.slides.add_slide(self.ppt.slide_layouts[5])\n\n x, y, cx, cy = Inches(0), Inches(0), Inches(10), Inches(2)\n row_count = ((self.longest_labels(table_obj)+2)*2)\n col_count = len(table_obj.categories.keys())*3\n print(\"Total row and column count \", row_count, \" \", col_count)\n shape = slide.shapes.add_table(row_count, col_count, x, y, cx, cy)\n table = shape.table\n\n\n col = 0\n row = 0\n cell = table.cell(row, col)\n to_merge = table.cell(row, col+col_count-1)\n cell.merge(to_merge)\n self.recolor_cell(cell, self.dark_orange)\n cell.text = table_name\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.CENTER\n\n\n col = 0\n row = 2\n cell = table.cell(row, col)\n to_merge = table.cell(row, col + col_count - 1)\n cell.merge(to_merge)\n cell.text = \"AVERAGE CATEGORY SCORE\"\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.CENTER\n self.recolor_cell(cell, self.light_orange)\n\n\n for category_name, category in table_obj.categories.items():\n row = 1\n cell = table.cell(row, col)\n cell.text = category_name\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.CENTER\n to_merge = table.cell(row, col+2)\n cell.merge(to_merge)\n self.recolor_cell(cell, self.dark_orange)\n row += 2\n\n cell = table.cell(row, col)\n cell.text = category.rating_2018\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.RIGHT\n self.recolor_cell(cell, self.light_orange)\n\n col += 1\n cell = table.cell(row, col)\n cell.text = category.rating_2019\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.RIGHT\n self.recolor_cell(cell, self.light_orange)\n\n col += 1\n cell = table.cell(row, col)\n cell.text = category.overall_rating\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.RIGHT\n self.recolor_cell(cell, self.light_orange)\n col -= 2\n row += 1\n for label in category.labels:\n cell = table.cell(row, col)\n cell.text = str(label.name)\n to_merge = table.cell(row, col + 2)\n cell.merge(to_merge)\n self.recolor_cell(cell, self.blue)\n row += 1\n cell = table.cell(row, col)\n cell.text = label.rating_2018\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.RIGHT\n self.recolor_cell(cell, self.blue)\n col += 1\n cell = table.cell(row, col)\n cell.text = label.rating_2019\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.RIGHT\n self.recolor_cell(cell, self.blue)\n col += 1\n cell = table.cell(row, col)\n cell.text = label.overall_rating\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.RIGHT\n self.recolor_cell(cell, self.blue)\n row += 1\n col -= 2\n while row < row_count:\n cell = table.cell(row, col)\n to_merge = table.cell(row, col + 2)\n cell.merge(to_merge)\n cell.text = \" \"\n self.recolor_cell(cell, self.blue)\n row += 1\n print(row, \" \", col)\n cell = table.cell(row, col)\n cell.text = \" \"\n self.recolor_cell(cell, self.blue)\n col += 1\n cell = table.cell(row, col)\n cell.text = \" \"\n self.recolor_cell(cell, self.blue)\n col += 1\n cell = table.cell(row, col)\n cell.text = \" \"\n self.recolor_cell(cell, self.blue)\n row += 1\n col -= 2\n col += 3\n\n def iter_cells(table):\n for row in table.rows:\n for cell in row.cells:\n yield cell\n\n for cell in iter_cells(table):\n for paragraph in cell.text_frame.paragraphs:\n for run in paragraph.runs:\n run.font.size = Pt(8.5)\n def recolor_cell(self, cell, color):\n cell.fill.solid()\n cell.fill.fore_color.rgb = color\n cell.text_frame.paragraphs[0].font.color.rgb = self.white\n\n\n def unicode_dict_reader(self, utf8_data, **kwargs):\n csv_reader = csv.DictReader(utf8_data, **kwargs)\n self.headers = csv_reader.fieldnames\n for row in csv_reader:\n if row['Category name'] != \"\":\n yield {key: value for key, value in row.items()}\n\n def create_categories(self, path_to_categories):\n\n category_data = self.unicode_dict_reader(open(path_to_categories))\n current_function = \"\"\n\n\n\n for cat in category_data:\n function = cat[\"Function\"]\n if current_function != function:\n if self.current_table is not None:\n self.__tables[current_function] = self.current_table\n self.current_table = Table(function)\n current_function = function\n print(\"Made new table \", str(function))\n\n # fill it up with categories\n pass\n\n category_name = cat[\"Category name\"]\n category_overall = cat[\"Category overall\"]\n cat_2018 = cat[\"Category 2018\"]\n cat_2019 = cat[\"Category 2019\"]\n label_name = cat[\"Question label\"]\n label_overall = cat[\"Question overall\"]\n label_2018 = cat[\"Question 2018\"]\n label_2019 = cat[\"Question 2019\"]\n\n if self.already_exists(category_name):\n old_cat = self.current_table.categories[category_name]\n old_cat.add_label(label_name, label_overall, label_2018, label_2019)\n else:\n new_cat = Category(category_name, category_overall, cat_2018, cat_2019)\n new_cat.add_label(label_name, label_overall, label_2018, label_2019)\n self.current_table.categories[category_name] = new_cat\n\n # add last table to list\n self.__tables[current_function] = self.current_table\n\n def already_exists(self, name):\n if self.current_table is None:\n return False\n if self.current_table.categories.get(name) is not None:\n return True\n return False\n\n def longest_labels(self, table_obj):\n max_value = 1\n for cat_name, category in table_obj.categories.items():\n if max_value < len(category.labels):\n max_value = len(category.labels)\n return max_value\n\n def save(self, path_to_output):\n self.ppt.save(path_to_output)\n\n\n\n\nclass Category(object):\n\n def __init__(self, name, overall_rating, last_year_rating, this_year_rating):\n self.__name = name\n self.__labels = []\n self.__overall_rating = overall_rating\n self.__rating_2018 = last_year_rating\n self.__rating_2019 = this_year_rating\n\n def add_label(self, label_name, label_overall_rating, label_2018_rating, label_2019_rating):\n self.__labels.append(Label(label_name, label_overall_rating, label_2018_rating, label_2019_rating))\n\n @property\n def name(self):\n return self.__name\n\n @property\n def labels(self):\n return self.__labels\n\n @property\n def overall_rating(self):\n return self.__overall_rating\n\n @property\n def rating_2018(self):\n return self.__rating_2018\n\n @property\n def rating_2019(self):\n return self.__rating_2019\n\nclass Label(object):\n\n def __init__(self, name, overall_rating, label_2018_rating, label_2019_rating):\n self.__name = name\n self.__overall_rating = overall_rating\n self.__rating_2018 = label_2018_rating\n self.__rating_2019 = label_2019_rating\n\n @property\n def name(self):\n return self.__name\n\n @property\n def overall_rating(self):\n return self.__overall_rating\n\n @property\n def rating_2018(self):\n return self.__rating_2018\n\n @property\n def rating_2019(self):\n return self.__rating_2019\n\nclass Table(object):\n\n def __init__(self, name):\n self.__name = name\n self.__categories = OrderedDict()\n\n def add_category(self, cat, cat_name):\n self.__categories[cat_name] = cat\n\n @property\n def categories(self):\n return self.__categories\n\n @property\n def name(self):\n return self.__name\n\n\n\n\nautomater = TableAutomater()\nautomater.create_report(\"OneDecimalPlace.csv\")\nautomater.save(\"blue_tables.pptx\")\n","sub_path":"BrooksRunningAverageTables/blue_format_tables.py","file_name":"blue_format_tables.py","file_ext":"py","file_size_in_byte":10059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"484905485","text":"from django.conf.urls import url, include\nfrom rest_framework import routers\nfrom .views import *\n\nrouter = routers.DefaultRouter()\nrouter.register(r'^articulos', ArticuloViewSet)\nrouter.register(r'^comercios', ComercioViewSet)\nrouter.register(r'^pedidos', PedidoViewSet)\nrouter.register(r'^pedidos/detalles', DetallePedidoViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]","sub_path":"DeliverEats/Proyecto/Sprints/Sprint 1/Codigo Fuente/DeliveryEat/Main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651341255","text":"'''\n18 https://leetcode.com/problems/kSum/description/\nPassed! (1 shot 1 killed)\n\nThought process:\n- Define problem using functional programming syntax, will reveal input range and special cases\n- Bring it back to 2 sum problem. Take 1 number out at a time\n- Exhaust all interesting features got from sorting\n\nTake away:\n- When in doubt, sort\n- Avoid duplicate results by keeping order inside each result and skipping same values\n- Prune search if we know result is impossible\n\nNext:\n- Iterative approach using 3 loops, find the first elem, the 2nd, then do 2sum with the rest\nhttps://leetcode.com/problems/4sum/discuss/8549/My-16ms-c++-code\n'''\n\n# {a,b,c,d} <- [int] min-0+max, dup\n\nclass Solution:\n def fourSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n def kSum(begin, k, t):\n nonlocal path\n nonlocal res\n\n if k == 2:\n i, j = begin, n - 1\n while i < j:\n s = nums[i] + nums[j] \n if s < t:\n i += 1\n elif s == t:\n # result\n res.append(path + [nums[i], nums[j]])\n\n i += 1\n j -= 1\n while i < j and nums[i] == nums[i-1]: i += 1\n while j > i and nums[j] == nums[j+1]: j -= 1\n else:\n j -= 1\n else:\n # prune\n if sum(nums[n - k:]) < t or sum(nums[begin:begin + k]) > t:\n return\n\n for i in range(begin, n - k + 1):\n # skip duplicate\n if i > begin and nums[i] == nums[i-1]: continue\n\n path.append(nums[i])\n kSum(i + 1, k - 1, t - nums[i])\n path.pop()\n\n if not nums or len(nums) < 4:\n return []\n\n n = len(nums)\n nums.sort()\n res = []\n path = []\n # return ordered list\n kSum(0, 4, target)\n return res\n\n#-------------------------------------------------\ns = Solution()\ntests = [\n ([1, 0, -1, 0, -2, 2], 0)\n , ([0,2,-10,-4,1,-7,-4,9,1,-3,-4,-6,-7,-6,-2,7,-5,2], -29)\n]\n\n[print(s.fourSum(a, t), '<-', t, a) for a, t in tests]\n","sub_path":"2018/lc_4sum.py","file_name":"lc_4sum.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"472050211","text":"#!/home/ubunt/Documents/currel.com/.env/bin/python\n\nimport MySQLdb\nimport cgi\n#import sha, time, Cookie, os\n#import time, Cookie, os\nimport time, os\nimport cgi\nimport sys\nimport time\n\nimport pprint\n\n\n# local libs\nimport db\nimport os\nimport re\nimport style\nimport util\nimport string\nimport utilpub\nimport mcmpages\nimport config\n################################################################################\ndef loginclanding(cgiData,lcsite):\n\tutil.dbglog(\"login: clanding start. lcsite='\"+lcsite+\"'\")\n\ttlang = 'clanding_' + lcsite\n\tlandingpage = utilpub.textblock(tlang,returntext=1)\n\tutil.dbglog(\"coblid set: login landingpage='\"+landingpage+\"'\")\n\terror = utilpub.login(sesid,coblid=coblid)\n\thandled = 0\n\tif error:\n\t\tutil.sendmsg(\"login w/ coblid -login returns error\")\n\t\tstyle.preamble(0,\"MCM WOTC\",\"\")\n\t\tutil.showmsg(\"There is a problem using this link - code 1435\")\t\t\t# unique text in case someone reports it\n\t\tstyle.postamble()\n\t\treturn(1)\n\tif landingpage and not landingpage[:16] == '(text not found:':\n\n\t\t#btitle = \"MCM WOTC\"\n\t\tbtitle = \"FastrackHR Employee OnBoarding\"\t# Per Mike 6/14/17\n\t\tif lcsite == \"ramjobapp\":\n\t\t\tbtitle = \"Welcome to RAM International's Job Application\"\n\n\t\t#style.preamble(0,\"MCM WOTC\",\"\")\n\t\tstyle.preamble(0,btitle,\"\")\n\t\tmcmpages.clienturllandingtlang(cgiData,lcsite,landingpage)\n\t\tstyle.postamble()\n\t\treturn(0)\n\n\t# couldn't find landing page...\n\treturn(1)\n\n################################################################################\ncgiData = cgi.FieldStorage()\ncursor = db.connection()\nconfig.config[\"dyn\"][\"dbh\"] = cursor\n\n# testing for benestream JSON server\n#util.dbglog(\"\\n\\nLOGCGIINPUT \"+repr(cgiData))\n#\n#for field in cgiData.keys():\n#\tdbglog(\" field='\"+field+ \"' data='\"+cgiData[field].value+\"'\")\n\n#util.logcgiinput(cgiData)\n\n#util.dbglog(\"calling login - user=\" + cgiData['login'].value + \" pass=\" + cgiData['password'].value )\n\n\n# cookie handling: http://webpython.codepoint.net/cgi_cookie_based_sid\n\nsite = ''\ncoblid = 0\n\nif (os.environ.has_key('SERVER_NAME')):\n\tservername = string.lower(os.environ['SERVER_NAME'])\t\t# i.e. mcms4\n\tlcsite = servername.split('.')[0]\t\t# cut off domain name leaving host name\n\n#\tmainurlhost = config.config['srv']['mainurl'].split('/')\t\t\t# host part of url should be x[2]\n\tmainurlfqdn = config.config['srv']['mainurl'].split('/') # host part of url should be x[2]\n\tmainurlhost = mainurlfqdn[2].split('.')[0] # host part of url should be x[2]\n\n#\tutil.logcgiinput(cgiData)\n\n\tutil.dbglog('mainurlhost='+repr(mainurlhost)+\" lcsite (from url)=\"+lcsite)\n\t#if lcsite == mainurlhost[2]:\n\tif lcsite == mainurlhost:\n#\t\tutil.sendmsg(\"login.py: regular simple url\")\n\t\tutil.dbglog(\"login.py: regular simple url\")\n\telse:\n#\t\tutil.dbglog(\"clienturl -- setting site to wotc\")\n\t\tsite = 'wotc'\n\n\t\t# os.environ.has_key('HTTP_REFERER') might be something like http://mcms4/cgi-bin/mcmdirections.py\n\t\t# we may want to restrict by referer - can only use clienturl IF coming FROM a known corporate net\n\n\t\tcoblid = utilpub.validateclienturl(lcsite)\n\t\t\n#\t\tutil.sendmsg(\"coblid=\"+str(coblid))\n\t\tconfig.config[\"ses\"][\"clienturllid\"] = coblid\n\t\n\nelse:\n\tutil.sendmsg('no server name')\n\nif not site:\n\tsite = util.sitemode()\t\t\t# return short name of site\n\nif config.config[\"srv\"].has_key('downformaintenance') and config.config[\"srv\"][\"downformaintenance\"]:\n\tutil.dbglog('login.py: down for maintenance')\n\timport downmaintenance\n\tdownmaintenance.downformaintenance() # exits\n\tsys.exit() # page was handled ok\n\n\nif config.config[\"ses\"].has_key('sesid'):\n\tutil.dbglog('sesid key is there ses sesid =' + config.config[\"ses\"]['sesid'])\n\n\n\nutil.logcgiinput(cgiData)\n\nif cgiData.has_key('mode'):\n\tutil.dbglog('mode is there')\n\tif cgiData['mode'].value == 'sms':\n\t\tutil.dbglog('mode is sms')\n\t\tconfig.config[\"dyn\"][\"loginmode\"] = 'sms'\t\t# for login page to know what flavor to show --- mcmpages.loginpage() may be called via checksession, without cgi data\n\t\t\n\nif (not config.config[\"ses\"].has_key('sesid') or config.config[\"ses\"]['sesid'] == '') and cgiData.has_key('mode') and cgiData['mode'].value == 'sms':\n\tutil.dbglog('no session yet AND mode=sms') \n\tif not (cgiData.has_key('vnumber') and cgiData['vnumber'].value): \n\t\tutil.dbglog('vnumber not set or no value....' ) \n\t\tconfig.config[\"ses\"][\"ulevel\"] = 0\n\t\tmcmpages.loginpage(\"\",cgiData)\t\t# exits\n\n\t### getting here means a vnum was entered, so process it.\n\n\nutil.dbglog('login.py: calling checksession ')\n\nsesid = utilpub.checksession() \t# now creates cookie/session table entry if appropriate\t --- this may create session and go to login page and exit! \n\nutil.dbglog('login.py: checksession set sid='+str(sesid))\n\ntracstamp =''\nsmsvnum = 0\nsmsvnumtmp = ''\n\nif config.config[\"ses\"][\"sesid\"]:\n\ttracstamp = str(config.config[\"ses\"][\"sesid\"]) + \"-\" + time.strftime(\"%H%M%S\")\n\tconfig.config[\"ses\"][\"tracstamp\"] = tracstamp\n\n#\tutil.logcgiinput(cgiData)\n\nif cgiData.has_key('lang'):\n\t# sanitize - 2 characters only\n\tnewlang = cgiData['lang'].value[:2]\n\tsql = \"UPDATE session SET lang='\"+newlang+\"' WHERE id = '\"+str(config.config[\"ses\"][\"sesid\"]) + \"' \"\n\tutil.dosql(sql)\n\tconfig.config[\"ses\"][\"lang\"] = newlang\n\n\nif cgiData.has_key('login'):\n\t#error = login(sesid)\n\tif not cgiData.has_key('password') or not cgiData['password']:\n\t\tmcmpages.loginpage(\"No password given\",cgiData)\t\t# exits\n\terror = utilpub.login(sesid,lu=cgiData['login'].value,lp=cgiData['password'].value)\n\tif error:\n\t\tmcmpages.loginpage(\"Login incorrect\",cgiData)\t\t# exits\n\nelif cgiData.has_key('vnumber'):\t# verification number (P+formcode+postfix)\n\tutil.dbglog('*****vnumber login processing *************************')\n\t#error = utilpub.vlogin(cgiData['vnumber'].value)\n\tvnumber = cgiData['vnumber'].value\n\terror = utilpub.vlogin(vnumber)\n\tif error:\n\t\tmcmpages.loginpage(\"V-number is not valid.\",cgiData)\t\t# exits\n\n#\telse:\n#\t\tutil.dbglog('*****vnumber SET module*************************')\n#\t\tutilpub.setmodule(2)\n\t\n\tif len(vnumber) == 6:\n\t\tsmsvnumtmp = re.sub(\"\\D\", \"\", vnumber)\n\t\tif len(smsvnumtmp) == 6:\n\t\t\tsmsvnum = 1\n\n\tif not error and not smsvnum:\t\t# preserve old functionality for non-sms vnumber login (using formcode code\n\t\tutil.dbglog('*****vnumber SET module*************************')\n\t\tutilpub.setmodule(2)\n\n\n\n\n\n\n# temporary - client landing pages should be trieggered by corpfeatures or coblid\n#elif lcsite == 'ods':\n#\ttlang = 'clanding_' + lcsite\n#\tstyle.preamble(0,\"MCM WOTC\",\"\")\n#\tutilpub.textblock(tlang)\n#\tstyle.postamble()\n#\tsys.exit()\n\n\n\n\nelif coblid:\t# special clienturl login\n\n\tret = loginclanding(cgiData,lcsite)\n\tif ret == 0:\n\t\tsys.exit()\t\t# page was handled ok\n\telse:\n\t\tret = loginclanding(cgiData,'default')\n\t\tif ret == 0:\n\t\t\tsys.exit()\t\t# page was handled ok\n\t\telse:\n\t\t\tutil.sendmsg(\"login w/ coblid returns error even on 'default' tlang\")\n\t\t\tstyle.preamble(0,\"MCM WOTC\",\"\")\n\t\t\tutil.showmsg(\"There is a problem using this link - code 1435\")\t\t\t# unique text in case someone reports it\n\t\t\tstyle.postamble()\n\t\t\tsys.exit()\n\n\t\n\t# check for cobrand login landing page (login='ods',clienturl='Y' then look for tlang 'clanding_ods' - if present, serve that.\n# has special 'Macros' for login box and location pulldowns\n#\tutil.dbglog(\"login: clanding start. lcsite='\"+lcsite+\"'\")\n#\ttlang = 'clanding_' + lcsite\n#\tlandingpage = utilpub.textblock(tlang,returntext=1)\n#\tutil.dbglog(\"coblid set: login landingpage='\"+landingpage+\"'\")\n#\terror = utilpub.login(sesid,coblid=coblid)\n#\thandled = 0\n#\tif error:\n#\t\tutil.sendmsg(\"login w/ coblid -login returns error\")\n#\t\tstyle.preamble(0,\"MCM WOTC\",\"\")\n#\t\tutil.showmsg(\"There is a problem using this link - code 1435\")\t\t\t# unique text in case someone reports it\n#\t\tstyle.postamble()\n#\t\tsys.exit()\n\t\t\n#\tif landingpage and not landingpage[:16] == '(text not found:':\n#\n#\n#\t\tstyle.preamble(0,\"MCM WOTC\",\"\")\n##\t\tprint landingpage\n#\t\tmcmpages.clienturllandingtlang(lcsite,landingpage)\n#\t\tstyle.postamble()\n#\t#\thandled = 1\n#\t\tsys.exit()\n\t\n\n\n\t# still here may mean landing page not found. Try with generic one\n\n\n\n\n#\tif not handled:\n#\t\tutil.dbglog(\"coblid set: login\")\n#\t\terror = utilpub.login(sesid,coblid=coblid)\n#\t\tif error:\n#\t\t\tstyle.preamble(0,\"MCM WOTC\",\"\")\n#\t\t\tutil.showmsg(\"There is a problem using this link\")\t\t\n#\t\t\tstyle.postamble()\n#\t\t\tsys.exit()\n\n\n\n\n\nelse: \n\tutil.dbglog(\"fall through call to login page\")\n\tmcmpages.loginpage(\"\",cgiData)\t\t# normal on first request (no login elements\n\n#pp = pprint.PrettyPrinter(indent=2)\n#util.dbglog(\"config: \" + pp.pformat(config.config))\n\n\nutil.dbglog(\"2nd call to checksession***********************************************\")\n#utilpub.checksession() \t# call once more to set up corpfeatures\nsid = config.config[\"ses\"][\"ses\"]\nsesid = config.config[\"ses\"][\"sesid\"]\t\t# added 6/15/16 - if config-srv-logincreatesession set, sessid changes\nutilpub.checksession(overridsid=sid) \t# call once more to set up corpfeatures - force to use current sid to avoid creating a new one\n\nmodule = int(config.config[\"ses\"][\"module\"])\nulevel = int(config.config[\"ses\"][\"ulevel\"])\n\nconfig.config[\"dyn\"][\"tabs2014\"] = 1\n\n\n#if taxcredits:\n#if ulevel == 1 and module == 2:\nif smsvnum:\n\tutil.dbglog(\"sms login\")\n\textraheader='surveyjs'\n\tstyle.preamble(extraheader,\"MCM ONBOARD\",\"accept message\")\n\t#mcmpages.acceptmsg(config.config[\"ses\"][\"peepid\"])\n\tmcmpages.acceptmsg(vnumber)\n\tstyle.postamble()\n\nelif ulevel == 1 and module == 2:\n\t#Job applicant returning to do hiring forms\n\n\tutil.dbglog('login.py calling setempnumber')\n\n\tif config.config[\"corpfeature\"].has_key('setempnumber') and module == 2:\n\t\tpeepid = int(config.config[\"ses\"][\"peepid\"])\n\t\tempnumber = utilpub.setempnumber(peepid)\n\n\n\n\n\n#### these are active on live site, with alerts... Remove them on live site AFTER testing to be sure setempnumber call in all cases correctly\n# 2/12/17\n\n\n\n\n\n#\t# for RAM- set employee number now that the person is 'hired'\n#\tif int(config.config[\"ses\"][\"mcorp\"]) == 1:\n#\t\tpeepid = int(config.config[\"ses\"][\"peepid\"])\n#\t\tempnumber = utilpub.setempnumber(peepid)\n#\n#\tmcorp = int(config.config[\"ses\"][\"mcorp\"])\n#\tif mcorp == 242 or mcorp == 243 or mcorp == 381 or mcorp == 388 or mcorp == 383 or mcorp == 206 :\n#\t\tpeepid = int(config.config[\"ses\"][\"peepid\"])\n#\t\tempnumber = utilpub.setempnumber(peepid)\n\n\n\n\tutil.dbglog(\"module 2; go to hiring questions\")\n\textraheader='surveyjs'\n\tstyle.preamble(extraheader,\"MCM ONBOARD\",\"personalinfo\")\n\tmcmpages.personalinfo(sesid,cgiData,mode=1)\t\n\tstyle.postamble()\n\n\nelif site == 'taxcredits':\n\t\n\tmcmpages.taxcreditshome()\n\tutil.dbglog(\"taxcredits home page\")\n\nelif site == 'wotc':\n\n\tutil.dbglog(\"login(): wotc home page\")\n\t#if int(config.config[\"ses\"][\"ulevel\"]) >= 3:\n\tif ulevel >= 3:\n\t\tstyle.preamble(0,\"MCM WOTC\",\"\")\n\t\tmcmpages.controlpanel(cgiData)\n\telif coblid:\n\t\tmcmpages.clienturllandingpage(cgiData)\n\telse:\n\t\t#mcmpages.agreepage_8850()\t\t\t# agree to esignature\t\t# out 8/12/13\n\t\t#mcmpages.personalinfo(cursor,sesid)\t\n\t\textraheader='surveyjs';\n\t\tstyle.preamble(extraheader,\"MCM WOTC\",\"personalinfo\")\n\t\tmcmpages.personalinfo(sesid,cgiData)\t\n\n\n\tstyle.postamble()\nelse:\n\tutil.dbglog(\"login(): onboard home page\")\n#\tpp = pprint.PrettyPrinter(indent=2)\n#\tutil.dbglog(\"login(): config= \\n\" + pp.pformat(config.config))\n#\tprint \"\\n\"\n#\tmcmpages.onboardhome()\n\n#\tulevel = int(config.config[\"ses\"][\"ulevel\"])\n#\tutil.dbglog(\"ulevel=\"+str(ulevel))\n#\tif (config.config[\"ses\"][\"ulevel\"]) > 8:\n\n\tif int(config.config[\"ses\"][\"ulevel\"]) >= 3:\n\t\tstyle.preamble(0,\"MCM OnBoard\",\"\")\n\t\tmcmpages.controlpanel(cgiData)\n\telse:\n\t\t#mcmpages.agreepage_8850()\t\t\t# agree to esignature\n\t\t#mcmpages.personalinfo(cursor,sesid)\t\n\t\textraheader='surveyjs'\n\t\tstyle.preamble(extraheader,\"MCM OnBoard\",\"personalinfo\")\n\t\tmcmpages.personalinfo(sesid,cgiData)\n\n#\tif ulevel >= 9:\n#\t\tutil.dbglog(\"ulevel admin \")\n#\t\tmcmpages.adminblock()\n#\telse:\n#\t\tutil.dbglog(\"ulevel NOT admin \")\n\n\tstyle.postamble()\n\n################################################################################\n","sub_path":"cgi-bin/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":11866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274310048","text":"from __future__ import absolute_import\nimport os.path as osp\nfrom PIL import Image\nfrom torchvision.transforms import functional as F\nimport torch\nfrom my_dataset import IU_X_RAY\nimport operator\nimport collections\n\n\n\nclass MRG_sampler(object):\n def __init__(self, dataset, root=None, transform=None, range_max = None, range_min = 0):\n super(MRG_sampler, self).__init__()\n self.dataset = dataset\n self.root = root\n self.transform = transform\n\n mesh_term_dict = {}\n mesh_term_list = []\n\n for report in self.dataset:\n for key in report['mesh_term_normal'].keys():\n if key+'__norm' not in mesh_term_dict:\n mesh_term_dict[key+'__norm'] = 0\n elif report['mesh_term_normal'][key] > 0:\n mesh_term_dict[key+'__norm']+= 1\n for key in report['mesh_term_abnormal'].keys():\n if key+'__abnorm' not in mesh_term_dict:\n mesh_term_dict[key + '__abnorm'] = 0\n elif report['mesh_term_abnormal'][key] > 0 :\n mesh_term_dict[key + '__abnorm'] += 1\n print(len(mesh_term_dict))\n mesh_term_dict = sorted(mesh_term_dict.items(), key=operator.itemgetter(1))\n mesh_term_dict.reverse()\n mesh_term_dict = collections.OrderedDict(mesh_term_dict)\n # mesh_term_dict.__reversed__()\n self.mesh_term_dict = mesh_term_dict\n\n for key in mesh_term_dict.keys():\n if range_max != None:\n if range_max > mesh_term_dict[key] > range_min :\n mesh_term_list.append(key)\n else:\n if mesh_term_dict[key] > range_min :\n mesh_term_list.append(key)\n\n self.mesh_term_list = mesh_term_list\n print(len(mesh_term_list))\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self._get_single_item(index) for index in indices]\n return self._get_single_item(indices)\n\n def _get_single_item(self, index):\n filename_pair = self.dataset[index]['filename_pair']\n\n pos_image_name = filename_pair[0]\n side_image_name = filename_pair[1]\n id = self.dataset[index]['index']\n mesh_term_norm_num = self.dataset[index]['mesh_term_normal']\n mesh_term_abnorm_num = self.dataset[index]['mesh_term_abnormal']\n\n targets_onehot = torch.zeros(len(self.mesh_term_list))\n\n one_hot_index = []\n for key in mesh_term_norm_num.keys():\n if mesh_term_norm_num[key]>0 and (key+'__norm') in self.mesh_term_list:\n one_hot_index.append(self.mesh_term_list.index(key+'__norm'))\n\n for key in mesh_term_abnorm_num.keys():\n if mesh_term_abnorm_num[key]>0 and (key+'__abnorm') in self.mesh_term_list:\n one_hot_index.append(self.mesh_term_list.index(key+'__abnorm'))\n\n if len(one_hot_index)>0:\n targets_onehot.scatter_(0, torch.LongTensor(one_hot_index),1)\n\n # Image.open(osp.join(self.root, pos_image_name)).convert('L').show()\n # Image.open(osp.join(self.root, pos_image_name)).convert('RGB').show()\n # Image.open(osp.join(self.root, pos_image_name)).convert('I').show()\n\n pos_img = Image.open(osp.join(self.root,pos_image_name)).convert('RGB')\n side_img = Image.open(osp.join(self.root,side_image_name)).convert('RGB')\n if self.transform is not None:\n pos_img = self.transform(pos_img)\n side_img = self.transform(side_img)\n\n return pos_img, side_img, index, targets_onehot\n\n\n\ndef main():\n dataset = IU_X_RAY('./dataset/','./dataset/val/val_data.json')\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"mrg_sampler.py","file_name":"mrg_sampler.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119952412","text":"\"\"\"\nJogo da forca - V5.0\n\nAutor: Otavio Cruzatto\nData de criacao: 23/07/218\nData de modificacao: 24/07/2018\n\nCaracteristicas desta versao:\n - As palavras sao selecionadas aleatoriamente\n - Durante a mesma secaoo de execucaoo, nenhuma palavra eh repetida\n - Pode-se digitar letras maiusculas e minusculas\n - O arquivo aonde se encontram as palavras eh aberto e fechado\n - O caracter utilizado para cada letra eh o underscore (_)\n - Na setima letra errada, a palavra eh revelada\n - A palavra que eh pega do arquivo eh passada inteira para maiuscula\n\"\"\"\n\nimport random\n\nprint(\"### Jogo da forca ###\")\n\nref_arquivo = open(\"palavras_dicas.txt\",\"r\")\npalavras_chave = ref_arquivo.readlines()\nref_arquivo.close()\nletras_utilizadas = []\nletras_descobertas = []\npalavras_utilizadas = []\nacertou_palavra = False\ndeseja_jogar = True\nquantidade_de_erros = 0\n\nwhile(deseja_jogar == True):\n\n palavra = palavras_chave[random.randrange(0, len(palavras_chave))]\n while palavra in palavras_utilizadas:\n palavra = palavras_chave[random.randrange(0, len(palavras_chave))]\n\n palavras_utilizadas.append(palavra)\n palavra = palavra.split(\",\")\n palavra_selecionada = palavra[0].upper()\n dica = palavra[1].strip()\n\n for contador in range(0, len(palavra_selecionada)):\n letras_descobertas.append(\"_\")\n\n print(\"\\n\\nDica 1: A palavra possui \" + str(len(palavra_selecionada)) + \" letras!\")\n print(\"Dica 2: \" + dica.capitalize())\n\n while(acertou_palavra == False and quantidade_de_erros < 7):\n letra_chutada = (str(input(\"\\n\\nDigite uma letra: \"))).upper()\n\n if letra_chutada in letras_utilizadas:\n print(\"Voce ja digitou esta letra...\")\n letras_utilizadas.append(letra_chutada)\n else:\n letras_utilizadas.append(letra_chutada)\n if letra_chutada in palavra_selecionada:\n for contador in range(0, len(palavra_selecionada)):\n if letra_chutada == palavra_selecionada[contador]:\n letras_descobertas[contador] = letra_chutada\n else:\n quantidade_de_erros = quantidade_de_erros + 1\n print(\"Voce errou: \" + str(quantidade_de_erros) + \" vezes\")\n\n for contador in range(0, len(palavra_selecionada)):\n print(letras_descobertas[contador], end=' ')\n\n if \"_\" in letras_descobertas:\n acertou_palavra = False\n else:\n acertou_palavra = True\n\n if quantidade_de_erros == 7:\n print(\"\\n\\nLamento, voce errou :(\")\n print(\"A palavra era: \" + palavra_selecionada)\n print()\n jogar_novamente = (str(input(\"Deseja jogar novamente? [y/n]\"))).upper()\n else:\n print(\"\\n\\nParabens, voce conseguiu!!!\")\n jogar_novamente = (str(input(\"Deseja jogar novamente? [y/n]\"))).upper()\n\n if jogar_novamente == \"Y\":\n deseja_jogar = True\n acertou_palavra = False\n letras_descobertas = []\n letras_utilizadas = []\n quantidade_de_erros = 0\n else:\n deseja_jogar = False\n print(\"Obrigado por jogar :)\")\n\n if len(palavras_utilizadas) == len(palavras_chave):\n print(\"Palavras esgotadas, em breve teremos mais...\")\n print(\"Obrigado por jogar :)\")\n deseja_jogar = False\n\n","sub_path":"jogo_da_forca/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"375916750","text":"import pickle\nimport networkx as nx\nimport os.path\nimport math\nimport random\nfrom collections import deque\n\n\n\n\n# Assumes no self-loops\n\n\nSTORAGE_PATH = os.path.join(os.path.abspath(\"\"), 'storage')\n\n\n#def betweenness_centrality(tweet_id)\n\ntweet_id = 1084813938892697600 #example. comment out\n\ntweet_path = os.path.join(STORAGE_PATH, f'{tweet_id}.pkl')\n\nwith open(tweet_path, 'rb') as f:\n storage = pickle.load(f)\n\ndigraph = nx.from_dict_of_dicts(storage['digraph'], create_using = nx.DiGraph())\n\n\n\"\"\"\nBrandes' Algorithm from (2001)\"A Faster Algorithm for Betweeness Centrality\"\n\"\"\"\n\nbetweenness_dict = {}\n\n# All betweenness values are initiially 0\nfor vertex in digraph:\n betweenness_dict[vertex] = 0\n\nfor s in digraph:\n stack = deque() # Empty stack (LIFO) for each vertex looped\n # Will use append() to push to the right side of the stack\n # will use pop() to pop from the right side of the stack\n\n p = {} # keys are vertices, values are lists\n sigma = {} # keys are vertices, values are integers\n d = {} # keys are vertices, values are integers\n\n for w in digraph:\n p[w] = []\n for t in digraph:\n sigma[t] = 0\n d[t] = -1\n\n sigma[s] = 1\n d[s] = 0\n queue = deque() # This represents a FIFO queue data structure\n # will use append() to enqueue to the right end of the queue\n # will use popleft() to dequeue the item on the leftmost side\n\n queue.append(s) # First in will be on the left\n while len(queue) > 0:\n v = queue.popleft() # first out will be on the left\n stack.append(v)\n\n for w in digraph.neighbors(v):\n # w found for the first time?\n if d[w] < 0:\n queue.append(w)\n d[w] = d[v] + 1\n # shortest path to w via v?\n if d[w] == d[v] + 1:\n sigma[w] = sigma[w] + sigma[v]\n p[w].append(v)\n delta = {}\n for v in digraph:\n delta[v] = 0\n while len(stack) > 0:\n w = stack.pop()\n for v in p[w]:\n delta[v] = delta[v] + ((sigma[v]/sigma[w])*(1 + delta[w]))\n if w != s:\n betweenness_dict[w] = betweenness_dict[w] + delta[w]\nprint(betweenness_dict)\n","sub_path":"old/betweennesscentrality.py","file_name":"betweennesscentrality.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"533658618","text":"#!/usr/bin/env python3\n\n\"\"\"Dictionary lookup command.\"\"\"\n\nimport re\nimport urllib.parse\n\nimport discord\nfrom discord.ext import commands\n\nBASE_URL_OWL_API = \"https://owlbot.info/api/v1/dictionary/{0}{1}\"\n\nMAX_NUM_RESULTS = 5\n\n\nclass Dictionary:\n \"\"\"Dictionary lookup command.\"\"\"\n\n @commands.command()\n @commands.cooldown(6, 12)\n async def define(self, ctx, word, *args):\n \"\"\"Define a word.\n\n Example usage:\n * define cat\n * define dog\n * define fox\n \"\"\"\n word = word.lower()\n params = \"?{0}\".format(urllib.parse.urlencode({\"format\": \"json\"}))\n url = BASE_URL_OWL_API.format(word, params)\n async with ctx.bot.session.get(url) as response:\n if response.status == 200:\n data = await response.json()\n\n if not data:\n await ctx.send(\"No results found for that word.\")\n return\n\n embed = discord.Embed(title=word)\n embed.url = BASE_URL_OWL_API.format(word, \"\")\n\n results_to_display = min(MAX_NUM_RESULTS, len(data))\n\n for index in range(0, results_to_display):\n result = data[index]\n definition = result.get('defenition')\n description = re.sub(\"<.*?>|\\u00E2|\\u0080|\\u0090\", \"\",\n definition.capitalize())\n example = result.get('example')\n if example:\n example = re.sub(\"<.*?>|\\u00E2|\\u0080|\\u0090\", \"\",\n example.capitalize())\n example = f\"*{example}*\"\n description = f\"{description}\\nExample: {example}\"\n embed.add_field(name=result[\"type\"], value=description)\n\n embed.set_footer(text=(\"Powered by OwlBot | \"\n f\"Showing {results_to_display} of {len(data)} results.\"))\n\n await ctx.send(embed=embed)\n else:\n message = \"Connection failed, or that isn't a word. :<\"\n await ctx.send(message)\n\n\ndef setup(bot):\n \"\"\"Set up the extension.\"\"\"\n bot.add_cog(Dictionary())\n","sub_path":"cogs/lookup/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"100099491","text":"from lib.preprocessing import data_for_training as data\nfrom lib.evaluation import recommendations as recs\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import NMF\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\n\n\nif __name__ == '__main__':\n np.random.seed(0)\n\n # load ratings above count threshold\n ratings_above_count_threshold_df = data.densify_ratings_df(user_ratings_count_threshold=50,\n isbn_ratings_count_threshold=200)\n\n # create test set and copy of original ratings\n train_set, test_set = train_test_split(ratings_above_count_threshold_df, test_size=0.1)\n ratings_copy = train_set.copy()\n\n mean_rating = ratings_above_count_threshold_df['Book-Rating'].mean()\n\n test_set_without_ratings = test_set.copy()\n test_set_without_ratings['Book-Rating'] = np.nan\n\n full_set_without_test_set_ratings = pd.concat([train_set, test_set_without_ratings])\n\n ratings_pivot = full_set_without_test_set_ratings.pivot(index='User-ID', columns='ISBN',\n values='Book-Rating').fillna(mean_rating)\n\n # decomposition and prediction\n R_pivot = ratings_pivot.values\n\n model = NMF(n_components=20, init='random', random_state=0)\n W = model.fit_transform(R_pivot)\n H = model.components_\n all_user_predicted_ratings = np.dot(W, H)\n\n all_user_predicted_ratings_df = pd.DataFrame(all_user_predicted_ratings)\n all_user_predicted_ratings_df.set_axis(ratings_pivot.columns, axis=1, inplace=True)\n all_user_predicted_ratings_df.set_axis(ratings_pivot.index, axis=0, inplace=True)\n\n # making recommendations\n recs_predicted_ratings_df = pd.DataFrame(all_user_predicted_ratings, columns=ratings_pivot.columns).set_index(\n ratings_pivot.index).sample(n=1)\n\n isbn_book_dict = recs.map_isbn_to_names()\n\n for user_id, row in recs_predicted_ratings_df.iterrows():\n topn_results = row.sort_values(ascending=False).iloc[:5]\n print('Finding recommendations for user {0}...'.format(user_id))\n\n rec_index = 1\n for rec_isbn, rec_score in topn_results.iteritems():\n rec_book_name = isbn_book_dict[rec_isbn]\n print('Top {0} ISBN: {1}, {2}'.format(rec_index, rec_isbn, rec_book_name))\n rec_index += 1\n\n # metrics\n actual_ratings = []\n predicted_ratings = []\n for index, row in test_set.iterrows():\n actual_ratings.append(row['Book-Rating'])\n predicted_ratings.append(all_user_predicted_ratings_df.at[row['User-ID'], row['ISBN']])\n\n RMSE_train_set = sqrt(mean_squared_error(R_pivot, all_user_predicted_ratings))\n print('RMSE train set: ', RMSE_train_set)\n\n RMSE_test_set = sqrt(mean_squared_error(actual_ratings, predicted_ratings))\n print('RMSE test set: ', RMSE_test_set)\n","sub_path":"matrix_factorization_nn.py","file_name":"matrix_factorization_nn.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"77243341","text":"# encoding: utf-8\n# encoding: iso-8859-1\n# encoding: win-1252\n\nimport utils\n\n\ndef distinguish_words(trans):\n # função para separar uma frase em palavra por palavra\n j = 0 # apenas um incrementador\n first_read = [] # variavel onde será salvo a frase\n for i in range(len(trans.results)):\n first_read.append(trans.results[i].alternatives[0].transcript)\n # print(first_read)\n mid_word = [[]] # variavel para montar as palavras reconhecidas\n for x in range(len(first_read)): # loop pra olhar quantas frases tem\n for k in range(len(first_read[x])): # loop pra olhar cada caracter da frase\n if ' ' not in first_read[x][k]:\n mid_word[j] += first_read[x][k] # adiciona cada caracter da palavra\n else:\n j += 1 # incrementa o incrementador\n mid_word.append([]) # cria um espaço para próxima palavra\n # print(mid_word)\n word = [] # variavel para juntar os caracteres e formar a palavra\n\n for u in range(len(mid_word)):\n word.append([]) # cria o espaço da palavra\n word[u] = ''.join(mid_word[u]) # junta os caracteres e forma a palavra\n # print(word)\n\n # as linhas abaixo são necessárias pra rodar no python 2\n # for l in range(len(word)): # loop para mudar o código das palavras\n # word[l] = word[l].encode('utf-8') # código pra aceitar ç e acentos\n # print(word[l])\n return word\n\n\ndef fluencia(words):\n # função para realizar a pontuação do teste, o número de animais reconhecidos\n rec_animals = [] # lista para indicar os animais ja reconhecidos\n score = 0\n # print(words)\n h = 0 # variavel auxiliar\n while h < len(words):\n temp = '' # variavel onde vai se combinar as strings\n no_match = 1 # variavel de controle de loop\n lastword = 0\n while no_match == 1: # loop pra testar tds as possibilidades\n for u in range(h, len(words) - lastword): # combina as strings para testar\n if u == h:\n temp = words[u]\n else:\n temp = temp + '-' + words[u]\n # print(temp, lastword)\n\n for s in utils.list_animals: # loop para verificar tds os animais da lista\n # print(s, temp)\n if len(temp) == len(s): # verifica o tamanho das strings\n # print('mesmo tamanho')\n if temp in s: # verifica se eh um animal\n # print('aaaaa', temp)\n if s not in rec_animals: # elimina animais repetidos\n rec_animals.append(s)\n score += 1\n no_match = 0\n print(score, s, temp)\n if len(words) - lastword - h >= 2: # em caso do animal hifado, aumenta o step no prx animal\n h += len(words) - lastword - h\n else:\n h += 1\n break # tds os breaks são para quebrar o loop for\n else:\n if len(words) - lastword - h >= 2: # em caso do animal hifado, aumenta o step no prx animal\n h += len(words) - lastword - h\n else:\n h += 1\n lastword += 1\n no_match = 0\n break\n elif temp[:-1] in s: # elimina animais com semantica semelhantes\n if s not in rec_animals:\n rec_animals.append(s)\n no_match = 0\n score += 1\n print(score, s, temp, 'aaaaaaa')\n h += 1\n break\n else:\n lastword += 1\n h += 1\n no_match = 0\n break\n if s == 'umbrella': # nao houve match com a combinacao, muda a combinacao pra nova tentativa\n lastword += 1\n if lastword + h == len(words) and no_match == 1: # deixou apenas a ultima string e nao teve match, a string nao eh animal e segue\n no_match = 0\n h += 1\n\n return score, rec_animals\n","sub_path":"fluencia.py","file_name":"fluencia.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"121615805","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom jinja2 import Template, Environment, PackageLoader\n\n\ndef fun1():\n \"\"\"\n 使用模板的方法1\n \"\"\"\n temp = Template('Hello {{ name }}')\n print(temp.render(name='lichangan'))\n\n\ndef fun2():\n \"\"\"\n 使用模板的方法2,使用env,其实方法1也是使用env,封装了一层而已\n \"\"\"\n env = Environment()\n temp = env.from_string('Hello {{ name }}')\n print(temp.render(name='zhuangruiying'))\n\n\ndef fun3():\n \"\"\"\n 使用模板的方法3,从template文件夹读取模板文件\n :return:\n \"\"\"\n env = Environment(loader=PackageLoader('template_jinja2', 'jinja2_file'))\n temp = env.get_template('simple.html')\n valus = {\n \"items\": [{\n \"href\": \"www.lichangan.com\",\n \"caption\": '李昌安的个人网站'\n }],\n \"title\": \"个人网站\",\n \"content\": \"牛逼的个人网站\"\n }\n print(temp.render(**valus))\n\n\nif __name__ == '__main__':\n\n # fun1()\n # fun2()\n fun3()\n","sub_path":"learn_web/template_jinja2/temp_jinjia2.py","file_name":"temp_jinjia2.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"611393970","text":"from math import radians, cos, sin, asin, sqrt\nimport pytz\nimport datetime\n\n\n\nphoto_list = [['https://farm9.staticflickr.com/8561/15733325680_b5129c6845.jpg', '129175734@N08'],\n ['https://farm8.staticflickr.com/7549/15920620565_03d9f8585b.jpg', '129175734@N08'],\n ['https://farm9.staticflickr.com/8683/15300980603_7291e0fe47.jpg', '129175734@N08'],\n ['https://farm8.staticflickr.com/7471/15920617805_b819cbd26a.jpg', '129175734@N08'],\n ['https://farm9.staticflickr.com/8583/15733173258_bdab285518.jpg', '129175734@N08'],\n ['https://farm8.staticflickr.com/7508/15300977773_e0baedab0c.jpg', '129175734@N08'], \n ['https://farm8.staticflickr.com/7490/15734615419_87c8aa2d99.jpg', '129175734@N08'],\n ['https://farm8.staticflickr.com/7461/15949594591_23a6d9bb5f.jpg', '84906483@N08'],\n ['https://farm8.staticflickr.com/7545/15249313264_fb7d9fb0e0.jpg', '30850308@N08']]\n\n\n\ndef haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees)\n Usage: haversine(lon1, lat1, lon2, lat2)\n \"\"\"\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n miles = km * 0.621371\n return miles\n\n\n\ndef ndb_result_sort(top3):\n sort_list = {}\n for top3_item in top3:\n sort_list[top3_item.spot_id] = top3_item.avg_max_size\n sorted_list = sorted(sort_list, key=sort_list.__getitem__, reverse=True)[:3]\n top_list = []\n for top3_item in top3:\n if top3_item.spot_id in sorted_list:\n top_list.append(top3_item)\n return top_list\n\n\n\ndef near_by_spot_sort(list_parse, lon1, lat1):\n ''' \n This is a script that parse through all result data to find the closest 6\n spot in parallel to the user's current location.\n -- This will be optimized over time\n\n There are limitations to forecast data for number of spot_ids\n '''\n parse_result_dict = {}\n for data in list_parse:\n parse_result_dict[data.spot_id] = haversine(lon1, lat1, data.latlong.lon, data.latlong.lat)\n # Sorting the result post parsing\n result_sort = sorted(parse_result_dict, key=parse_result_dict.__getitem__, reverse=False)[:6]\n response_result = []\n for item in list_parse:\n if item.spot_id in result_sort:\n spot = item.spot_id\n response_result.append([item, parse_result_dict[spot]])\n response_result = sorted(response_result, reverse = False, key = lambda pair: pair[1])\n return response_result\n\n\ndef get_today():\n utc_time = datetime.datetime.utcnow()\n utc_time = utc_time.replace(tzinfo=pytz.utc)\n today = datetime.datetime.astimezone(utc_time, pytz.timezone('US/Pacific'))\n return today.date()\n\n\ndef state_checker(res):\n for address in res['results']:\n for arr in address['address_components']:\n if (u'administrative_area_level_1' in arr['types'] and\n arr['long_name'] == u'California'):\n return True\n return False\n\n\n\ndef api_error_handler(api_call_function):\n api_call = api_call_function\n count = 0\n while count < 5:\n if api_call != \"API call Failed\":\n break\n elif api_call == \"API call Failed\":\n count += 1\n api_call = api_call_function\n elif count == 4:\n return False\n return api_call\n","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"371232157","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 16 14:13:20 2018\r\n\r\ntkinter中Frame类型\r\n\r\n\"\"\"\r\n\r\nimport tkinter\r\n\r\nwindow = tkinter.Tk()\r\nframe = tkinter.Frame(window)\r\nframe.pack()\r\nframe2 = tkinter.Frame(window, borderwidth=4, relief=tkinter.GROOVE)\r\nframe2.pack()\r\n\r\nfirst = tkinter.Label(frame, text='First label')#first在frame里\r\nfirst.pack()\r\nsecond = tkinter.Label(frame2, text='Second label')#second在frame2里\r\nsecond.pack()\r\nthird = tkinter.Label(frame2, text='Third label')#third在frame2里\r\nthird.pack()\r\n\r\nwindow.mainloop()","sub_path":"tkinter_Frame_Type.py","file_name":"tkinter_Frame_Type.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"330029804","text":"import os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport astropy.units as u\nfrom astropy.table import Table, Column\nfrom gammapy.spectrum.models import PowerLaw\nfrom gammapy.stats import significance_on_off\n\nfrom .utils import save_obj, load_obj, plot_hist\n\n__all__ = [\"CutsOptimisation\", \"CutsDiagnostic\", \"CutsApplicator\"]\n\n\nclass CutsApplicator:\n \"\"\"\n Apply best cut and angular cut to events.\n\n Apply cuts to gamma, proton and electrons that will be further used for\n performance estimation (irf, sensitivity, etc.).\n\n Parameters\n ----------\n config: `dict`\n Configuration file\n outdir: `str`\n Output directory where analysis results is saved\n evt_dict: `dict`\n Dictionary of `pandas.DataFrame`\n \"\"\"\n\n def __init__(self, config, evt_dict, outdir):\n self.config = config\n self.evt_dict = evt_dict\n self.outdir = outdir\n\n # Read table with cuts\n self.table = Table.read(\n os.path.join(\n outdir, \"{}.fits\".format(config[\"general\"][\"output_table_name\"])\n ),\n format=\"fits\",\n )\n\n def apply_cuts(self, debug):\n \"\"\"\n Flag particle types passing either the angular cut or the best cutoff\n and the save the data\n \"\"\"\n\n for particle in self.evt_dict.keys():\n data = self.apply_cuts_on_data(self.evt_dict[particle].copy(), debug)\n data.to_hdf(\n os.path.join(self.outdir, \"{}_processed.h5\".format(particle)),\n key=\"dl2\",\n mode=\"w\",\n )\n\n # update the particle tables to make the IRFs\n self.evt_dict[particle] = data\n\n def apply_cuts_on_data(self, data, debug):\n \"\"\"\n Flag particle passing angular cut and the best cutoff\n\n Parameters\n ----------\n data: `pandas.DataFrame`\n Data set corresponding to one type of particle\n \"\"\"\n\n # in order not to throw away this part of code I now convert the\n # astropy table \"back\" to a Pandas dataframe like in protopipe.perf\n data = data.to_pandas()\n\n # Add columns with False initialisation\n data[\"pass_best_cutoff\"] = np.zeros(len(data), dtype=bool)\n data[\"pass_angular_cut\"] = np.zeros(len(data), dtype=bool)\n\n # colname_reco_energy = self.config[\"column_definition\"][\"reco_energy\"]\n colname_reco_energy = \"ENERGY\"\n # colname_clf_output = self.config[\"column_definition\"][\"classification_output\"][\n # \"name\"\n # ]\n # colname_angular_dist = self.config[\"column_definition\"][\n # \"angular_distance_to_the_src\"\n # ]\n\n # IN GADF THE LAST 2 SHOULD ALWAYS BE\n colname_clf_output = \"EVENT_TYPE\"\n colname_angular_dist = \"THETA\"\n\n # Loop over energy bins and apply cutoff for each slice\n table = self.table[np.where(self.table[\"keep\"].data)[0]]\n for info in table:\n\n if debug:\n print(\n \"Processing bin [{:.3f},{:.3f}]... (cut={:.3f}, theta={:.3f})\".format(\n info[\"emin\"],\n info[\"emax\"],\n info[\"best_cutoff\"],\n info[\"angular_cut\"],\n )\n )\n\n # Best cutoff\n data.loc[\n (data[colname_reco_energy] >= info[\"emin\"])\n & (data[colname_reco_energy] < info[\"emax\"])\n & (data[colname_clf_output] >= info[\"best_cutoff\"]),\n [\"pass_best_cutoff\"],\n ] = True\n # Angular cut\n data.loc[\n (data[colname_reco_energy] >= info[\"emin\"])\n & (data[colname_reco_energy] < info[\"emax\"])\n & (data[colname_angular_dist] <= info[\"angular_cut\"]),\n [\"pass_angular_cut\"],\n ] = True\n\n # Handle events which are not in energy range\n # Best cutoff\n data.loc[\n (data[colname_reco_energy] < table[\"emin\"][0])\n & (data[colname_clf_output] >= table[\"best_cutoff\"][0]),\n [\"pass_best_cutoff\"],\n ] = True\n data.loc[\n (data[colname_reco_energy] >= table[\"emin\"][-1])\n & (data[colname_clf_output] >= table[\"best_cutoff\"][-1]),\n [\"pass_best_cutoff\"],\n ] = True\n # Angular cut\n data.loc[\n (data[colname_reco_energy] < table[\"emin\"][0])\n & (data[colname_angular_dist] <= table[\"angular_cut\"][0]),\n [\"pass_angular_cut\"],\n ] = True\n data.loc[\n (data[colname_reco_energy] >= table[\"emin\"][-1])\n & (data[colname_angular_dist] <= table[\"angular_cut\"][-1]),\n [\"pass_angular_cut\"],\n ] = True\n\n return data\n\n\nclass CutsDiagnostic:\n \"\"\"\n Class used to get some diagnostic related to the optimal working point.\n\n Parameters\n ----------\n config: `dict`\n Configuration file\n indir: `str`\n Output directory where analysis results is located\n \"\"\"\n\n def __init__(self, config, indir):\n self.config = config\n self.indir = indir\n self.outdir = os.path.join(indir, \"diagnostic\")\n if not os.path.exists(self.outdir):\n os.makedirs(self.outdir)\n self.table = Table.read(\n os.path.join(\n indir, \"{}.fits\".format(config[\"general\"][\"output_table_name\"])\n ),\n format=\"fits\",\n )\n\n self.clf_output_bounds = self.config[\"column_definition\"][\n \"classification_output\"\n ][\"range\"]\n\n def plot_optimisation_summary(self):\n \"\"\"Plot efficiencies and angular cut as a function of energy bins\"\"\"\n plt.figure(figsize=(5, 5))\n ax = plt.gca()\n t = self.table[np.where(self.table[\"keep\"].data)[0]]\n\n ax.plot(\n np.sqrt(t[\"emin\"] * t[\"emax\"]),\n t[\"eff_sig\"],\n color=\"blue\",\n marker=\"o\",\n label=\"Signal\",\n )\n ax.plot(\n np.sqrt(t[\"emin\"] * t[\"emax\"]),\n t[\"eff_bkg\"],\n color=\"red\",\n marker=\"o\",\n label=\"Background (p+e)\",\n )\n ax.grid(which=\"both\")\n ax.set_xlabel(\"Reco energy [TeV]\")\n ax.set_ylabel(\"Efficiencies\")\n ax.set_xscale(\"log\")\n ax.set_ylim([0.0, 1.1])\n\n ax_th = ax.twinx()\n ax_th.plot(\n np.sqrt(t[\"emin\"] * t[\"emax\"]),\n t[\"angular_cut\"],\n color=\"darkgreen\",\n marker=\"s\",\n )\n ax_th.set_ylabel(\"Angular cut [deg]\", color=\"darkgreen\")\n ax_th.tick_params(\n \"y\", colors=\"darkgreen\",\n )\n ax_th.set_ylim([0.0, 0.5])\n\n ax.legend(loc=\"upper left\")\n\n plt.tight_layout()\n plt.savefig(os.path.join(self.outdir, \"efficiencies.pdf\"))\n\n return ax\n\n def plot_diagnostics(self):\n \"\"\"Plot efficiencies and rates as a function of score\"\"\"\n\n for info in self.table[np.where(self.table[\"keep\"].data)[0]]:\n obj_name = \"diagnostic_data_emin{:.3f}_emax{:.3f}.pkl.gz\".format(\n info[\"emin\"], info[\"emax\"]\n )\n data = load_obj(os.path.join(self.outdir, obj_name))\n\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))\n ax_eff = axes[0]\n ax_rate = axes[1]\n\n ax_eff = self.plot_efficiencies_vs_score(ax_eff, data, info)\n ax_rate = self.plot_rates_vs_score(\n ax_rate, data, info, self.config[\"analysis\"][\"obs_time\"][\"unit\"]\n )\n\n ax_eff.set_xlim(self.clf_output_bounds)\n ax_rate.set_xlim(self.clf_output_bounds)\n # print('JLK HAAAAACCCCKKKKKKK!!!!')\n # ax_eff.set_xlim(-0.5, 0.5)\n # ax_rate.set_xlim(-0.5, 0.5)\n\n plt.tight_layout()\n plt.savefig(\n os.path.join(\n self.outdir,\n \"diagnostic_{:.2f}_{:.2f}TeV.pdf\".format(\n info[\"emin\"], info[\"emax\"]\n ),\n )\n )\n\n @classmethod\n def plot_efficiencies_vs_score(cls, ax, data, info):\n \"\"\"Plot efficiencies as a function of score\"\"\"\n ax.plot(data[\"score\"], data[\"hist_eff_sig\"], color=\"blue\", label=\"Signal\", lw=2)\n\n ax.plot(\n data[\"score\"],\n data[\"hist_eff_bkg\"],\n color=\"red\",\n label=\"Background (p+e)\",\n lw=2,\n )\n\n ax.plot(\n [info[\"best_cutoff\"], info[\"best_cutoff\"]],\n [0, 1.1],\n ls=\"--\",\n lw=2,\n color=\"darkgreen\",\n label=\"Best cutoff\",\n )\n\n ax.set_xlabel(\"Score\")\n ax.set_ylabel(\"Efficiencies\")\n ax.set_ylim([0.0, 1.1])\n ax.grid(which=\"both\")\n ax.legend(loc=\"lower left\", framealpha=1)\n return ax\n\n @classmethod\n def plot_rates_vs_score(cls, ax, data, info, time_unit):\n \"\"\"Plot rates as a function of score\"\"\"\n scale = info[\"min_flux\"]\n\n opt = {\n \"edgecolor\": \"blue\",\n \"color\": \"blue\",\n \"label\": \"Excess in ON region\",\n \"alpha\": 0.2,\n \"fill\": True,\n \"ls\": \"-\",\n \"lw\": 1,\n }\n error_kw = dict(ecolor=\"blue\", lw=1, capsize=1, capthick=1, alpha=1)\n ax = plot_hist(\n ax=ax,\n data=(data[\"cumul_excess\"] * scale)\n / (info[\"obs_time\"] * u.Unit(time_unit).to(\"s\")),\n edges=data[\"score_edges\"],\n norm=False,\n yerr=False,\n error_kw=error_kw,\n hist_kwargs=opt,\n )\n\n opt = {\n \"edgecolor\": \"red\",\n \"color\": \"red\",\n \"label\": \"Bkg in ON region\",\n \"alpha\": 0.2,\n \"fill\": True,\n \"ls\": \"-\",\n \"lw\": 1,\n }\n error_kw = dict(ecolor=\"red\", lw=1, capsize=1, capthick=1, alpha=1)\n ax = plot_hist(\n ax=ax,\n data=data[\"cumul_noff\"]\n * info[\"alpha\"]\n / (info[\"obs_time\"] * u.Unit(time_unit).to(\"s\")),\n edges=data[\"score_edges\"],\n norm=False,\n yerr=False,\n error_kw=error_kw,\n hist_kwargs=opt,\n )\n\n ax.plot(\n [info[\"best_cutoff\"], info[\"best_cutoff\"]],\n [0, 1.1],\n ls=\"--\",\n lw=2,\n color=\"darkgreen\",\n label=\"Best cutoff\",\n )\n\n max_rate_p = (\n data[\"cumul_noff\"]\n * info[\"alpha\"]\n / (info[\"obs_time\"] * u.Unit(time_unit).to(\"s\"))\n ).max()\n max_rate_g = (\n data[\"cumul_excess\"] / (info[\"obs_time\"] * u.Unit(time_unit).to(\"s\"))\n ).max()\n\n scaled_rate = max_rate_g * scale\n max_rate = scaled_rate if scaled_rate >= max_rate_p else max_rate_p\n\n ax.set_ylim([0.0, max_rate * 1.15])\n ax.set_ylabel(\"Rates [HZ]\")\n ax.set_xlabel(\"Score\")\n ax.grid(which=\"both\")\n ax.legend(loc=\"upper right\", framealpha=1)\n\n ax.text(\n 0.52,\n 0.35,\n CutsDiagnostic.get_text(info),\n horizontalalignment=\"left\",\n verticalalignment=\"bottom\",\n multialignment=\"left\",\n bbox=dict(facecolor=\"white\", alpha=0.5),\n transform=ax.transAxes,\n )\n return ax\n\n @classmethod\n def get_text(cls, info):\n \"\"\"Returns a text summarising the optimisation result\"\"\"\n text = \"E in [{:.2f},{:.2f}] TeV\\n\".format(info[\"emin\"], info[\"emax\"])\n text += \"Theta={:.2f} deg\\n\".format(info[\"angular_cut\"])\n text += \"Best cutoff:\\n\"\n text += \"-min_flux={:.2f} Crab\\n\".format(info[\"min_flux\"])\n text += \"-score={:.2f}\\n\".format(info[\"best_cutoff\"])\n text += \"-non={:.2f}\\n\".format(info[\"non\"])\n text += \"-noff={:.2f}\\n\".format(info[\"noff\"])\n text += \"-alpha={:.2f}\\n\".format(info[\"alpha\"])\n text += \"-excess={:.2f}\".format(info[\"excess\"])\n if info[\"systematic\"] is True:\n text += \"(syst.!)\\n\"\n else:\n text += \"\\n\"\n text += \"-nbkg={:.2f}\\n\".format(info[\"background\"])\n text += \"-sigma={:.2f} (Li & Ma)\".format(info[\"sigma\"])\n\n return text\n\n\nclass CutsOptimisation:\n \"\"\"\n Class used to find best cutoff to obtain minimal\n sensitivity in a given amount of time.\n\n Parameters\n ----------\n config: `dict`\n Configuration file\n evt_dict: `dict`\n Dictionary of `pandas` files\n \"\"\"\n\n def __init__(self, config, evt_dict, verbose_level=0):\n self.config = config\n self.evt_dict = evt_dict\n self.verbose_level = verbose_level\n\n def weight_events(self, model_dict, colname_mc_energy):\n \"\"\"\n Add a weight column to the files, in order to scale simulated data to reality.\n\n Parameters\n ----------\n model_dict: dict\n Dictionary of models\n colname_mc_energy: str\n Column name for the true energy\n \"\"\"\n for particle in self.evt_dict.keys():\n self.evt_dict[particle][\"weight\"] = self.compute_weight(\n energy=self.evt_dict[particle][colname_mc_energy] * u.TeV,\n particle=particle,\n model=model_dict[particle],\n )\n\n def compute_weight(self, energy, particle, model):\n \"\"\"\n Weight particles, according to: [phi_exp(E) / phi_simu(E)] * (t_obs / t_simu)\n where E is the true energy of the particles\n \"\"\"\n conf_part = self.config[\"particle_information\"][particle]\n\n area_simu = (np.pi * conf_part[\"gen_radius\"] ** 2) * u.Unit(\"m2\")\n\n omega_simu = (\n 2 * np.pi * (1 - np.cos(conf_part[\"diff_cone\"] * np.pi / 180.0)) * u.sr\n )\n if particle in \"gamma\": # Gamma are point-like\n omega_simu = 1.0\n\n nsimu = conf_part[\"n_simulated\"]\n index_simu = conf_part[\"gen_gamma\"]\n emin = conf_part[\"e_min\"] * u.TeV\n emax = conf_part[\"e_max\"] * u.TeV\n amplitude = 1.0 * u.Unit(\"1 / (cm2 s TeV)\")\n pwl_integral = PowerLaw(index=index_simu, amplitude=amplitude).integral(\n emin=emin, emax=emax\n )\n\n tsimu = nsimu / (area_simu * omega_simu * pwl_integral)\n tobs = self.config[\"analysis\"][\"obs_time\"][\"value\"] * u.Unit(\n self.config[\"analysis\"][\"obs_time\"][\"unit\"]\n )\n\n phi_simu = amplitude * (energy / (1 * u.TeV)) ** (-index_simu)\n\n if particle in \"proton\":\n phi_exp = model(energy, \"proton\")\n elif particle in \"electron\":\n phi_exp = model(energy, \"electron\")\n elif particle in \"gamma\":\n phi_exp = model(energy)\n else:\n print(\"oups...\")\n\n return ((tobs / tsimu) * (phi_exp / phi_simu)).decompose()\n\n def find_best_cutoff(self, energy_values, angular_values):\n \"\"\"\n Find best cutoff to reach the best sensitivity. Optimisation is done as a function\n of energy and theta square cut. Correct the number of events\n according to the ON region which correspond to the angular cut applied to\n the gamma-ray events.\n\n Parameters\n ----------\n energy_values: `astropy.Quantity`\n Energy bins\n angular_values: `astropy.Quantity`\n Angular cuts\n \"\"\"\n self.results_dict = dict()\n # colname_reco_energy = self.config[\"column_definition\"][\"reco_energy\"]\n # colname_reco_energy = \"ENERGY\"\n clf_output_bounds = self.config[\"column_definition\"][\"classification_output\"][\n \"range\"\n ]\n # colname_angular_dist = self.config[\"column_definition\"][\n # \"angular_distance_to_the_src\"\n # ]\n thsq_opt_type = self.config[\"analysis\"][\"thsq_opt\"][\"type\"]\n\n # Loop on energy\n for ibin in range(len(energy_values) - 1):\n emin = energy_values[ibin]\n emax = energy_values[ibin + 1]\n print(\" ==> {}) Working in E=[{:.3f},{:.3f}]\".format(ibin, emin, emax))\n\n # OLDER PANDAS EQUIVALENT FROM PROTOPIPE\n\n # Apply cuts (energy and additional if there is)\n # query_emin = \"{} > {}\".format(colname_reco_energy, emin.value)\n # query_emax = \"{} <= {}\".format(colname_reco_energy, emax.value)\n # energy_query = \"{} and {}\".format(query_emin, query_emax)\n\n # g = self.evt_dict[\"gamma\"].query(energy_query).copy()\n # p = self.evt_dict[\"proton\"].query(energy_query).copy()\n # e = self.evt_dict[\"electron\"].query(energy_query).copy()\n\n # Apply cuts (energy and additional if there is)\n energy_selected = dict()\n for particle in [\"gamma\", \"proton\", \"electron\"]:\n energy = self.evt_dict[particle][\"ENERGY\"]\n mask_energy = (energy > emin.value) & (energy <= emax.value)\n energy_selected[particle] = self.evt_dict[particle][mask_energy].copy()\n\n g = energy_selected[\"gamma\"]\n p = energy_selected[\"proton\"]\n e = energy_selected[\"electron\"]\n\n if self.verbose_level > 0:\n print(\n \"Total evts for optimisation: Ng={}, Np={}, Ne={}\".format(\n len(g), len(p), len(e)\n )\n )\n\n min_stat = 100\n if len(g) <= min_stat or len(p) <= min_stat or len(e) <= min_stat:\n print(\"Not enough statistics\")\n print(\" g={}, p={} e={}\".format(len(g), len(p), len(e)))\n key = CutsOptimisation._get_energy_key(emin, emax)\n self.results_dict[key] = {\n \"emin\": emin.value,\n \"emax\": emax.value,\n \"keep\": False,\n }\n continue\n\n # To store intermediate results\n results_th_cut_dict = dict()\n\n theta_to_loop_on = angular_values\n if thsq_opt_type in \"r68\":\n theta_to_loop_on = [angular_values[ibin]]\n\n # Loop on angular cut\n for th_cut in theta_to_loop_on:\n if self.verbose_level > 0:\n print(\"- Theta={:.2f}\".format(th_cut))\n\n # Select gamma-rays in ON region\n\n # OLD PANDAS VERSION FROM PROTOPIPE\n # th_query = \"{} <= {}\".format(colname_angular_dist, th_cut.value)\n # sel_g = g.query(th_query).copy()\n\n # ASTROPY VERSION\n mask_theta = g[\"THETA\"] <= th_cut.value\n sel_g = g[mask_theta].copy()\n\n # Correct number of background due to acceptance\n acceptance_g = 2 * np.pi * (1 - np.cos(th_cut.to(\"rad\").value))\n acceptance_p = (\n 2\n * np.pi\n * (\n 1\n - np.cos(\n self.config[\"particle_information\"][\"proton\"][\"offset_cut\"]\n * u.deg.to(\"rad\")\n )\n )\n )\n acceptance_e = (\n 2\n * np.pi\n * (\n 1\n - np.cos(\n self.config[\"particle_information\"][\"electron\"][\n \"offset_cut\"\n ]\n * u.deg.to(\"rad\")\n )\n )\n )\n\n # Add corrected weight taking into account the angular cuts\n # that have been applied to gamma-rays\n sel_g[\"weight_corrected\"] = sel_g[\"weight\"]\n p[\"weight_corrected\"] = p[\"weight\"] * acceptance_g / acceptance_p\n e[\"weight_corrected\"] = e[\"weight\"] * acceptance_g / acceptance_e\n\n # Get binned data as a function of score\n binned_data = self.get_binned_data(\n sel_g, p, e, nbins=2000, score_range=clf_output_bounds\n )\n\n # Get re-binned data as a function of score for diagnostic plots\n re_binned_data = self.get_binned_data(\n sel_g, p, e, nbins=200, score_range=clf_output_bounds\n )\n\n # Get optimisation results\n results_th_cut_dict[CutsOptimisation._get_angular_key(th_cut.value)] = {\n \"th_cut\": th_cut,\n \"result\": self.find_best_cutoff_for_one_bin(\n binned_data=binned_data\n ),\n \"diagnostic_data\": re_binned_data,\n }\n\n # Select best theta cut (lowest flux).\n # In case of equality, select the one with the highest signal\n # efficiency (flux are sorted as a function of decreasing signal\n # efficiencies).\n flux_list = []\n eff_sig = []\n th = []\n key_list = []\n for key in results_th_cut_dict:\n key_list.append(key)\n flux_list.append((results_th_cut_dict[key][\"result\"][\"min_flux\"]))\n eff_sig.append((results_th_cut_dict[key][\"result\"][\"eff_sig\"]))\n th.append(results_th_cut_dict[key][\"th_cut\"])\n\n # In case of equal min fluxes, take the one with bigger sig efficiency\n lower_flux_idx = np.where(np.array(flux_list) == np.array(flux_list).min())[\n 0\n ][0]\n\n if self.verbose_level > 0:\n print(\n \"Select th={:.3f}, cutoff={:.3f} (eff_sig={:.3f}, eff_bkg={:.3f}, flux={:.3f}, syst={})\".format(\n results_th_cut_dict[key_list[lower_flux_idx]][\"th_cut\"],\n results_th_cut_dict[key_list[lower_flux_idx]][\"result\"][\n \"best_cutoff\"\n ],\n results_th_cut_dict[key_list[lower_flux_idx]][\"result\"][\n \"eff_sig\"\n ],\n results_th_cut_dict[key_list[lower_flux_idx]][\"result\"][\n \"eff_bkg\"\n ],\n results_th_cut_dict[key_list[lower_flux_idx]][\"result\"][\n \"min_flux\"\n ],\n results_th_cut_dict[key_list[lower_flux_idx]][\"result\"][\n \"systematic\"\n ],\n )\n )\n\n key = CutsOptimisation._get_energy_key(emin.value, emax.value)\n self.results_dict[key] = {\n \"emin\": emin.value,\n \"emax\": emax.value,\n \"obs_time\": self.config[\"analysis\"][\"obs_time\"][\"value\"],\n \"th_cut\": results_th_cut_dict[key_list[lower_flux_idx]][\"th_cut\"].value,\n \"keep\": True,\n \"results\": results_th_cut_dict[key_list[lower_flux_idx]][\"result\"],\n \"diagnostic_data\": results_th_cut_dict[key_list[lower_flux_idx]][\n \"diagnostic_data\"\n ],\n }\n\n print(\n \" Ang. cut: {:.2f}, score cut: {}\".format(\n self.results_dict[key][\"th_cut\"],\n self.results_dict[key][\"results\"][\"best_cutoff\"],\n )\n )\n\n def find_best_cutoff_for_one_bin(self, binned_data):\n \"\"\"\n Find the best cut off for one bin os the phase space\n \"\"\"\n alpha = self.config[\"analysis\"][\"alpha\"]\n\n # Scan eff_bkg efficiency (going from 0.05 to 0.5, 10 bins as in MARS analysis)\n fixed_bkg_eff = np.linspace(0.05, 0.5, 15)\n\n # Find corresponding indexes\n fixed_bkg_eff_indexes = np.zeros(len(fixed_bkg_eff), dtype=int)\n for idx in range(len(fixed_bkg_eff)):\n the_idx = (\n np.abs(binned_data[\"hist_eff_bkg\"] - fixed_bkg_eff[idx])\n ).argmin()\n fixed_bkg_eff_indexes[idx] = the_idx\n\n # Will contain\n minimal_fluxes = np.zeros(len(fixed_bkg_eff))\n minimal_sigma = np.zeros(len(fixed_bkg_eff))\n minimal_syst = np.zeros(len(fixed_bkg_eff), dtype=bool)\n minimal_excess = np.zeros(len(fixed_bkg_eff))\n\n for iflux in range(len(minimal_fluxes)):\n\n excess = binned_data[\"cumul_excess\"][fixed_bkg_eff_indexes][iflux]\n n_bkg = binned_data[\"cumul_noff\"][fixed_bkg_eff_indexes][iflux] * alpha\n effsig = binned_data[\"hist_eff_sig\"][fixed_bkg_eff_indexes][iflux]\n effbkg = binned_data[\"hist_eff_bkg\"][fixed_bkg_eff_indexes][iflux]\n score = binned_data[\"score\"][fixed_bkg_eff_indexes][iflux]\n minimal_syst[iflux] = False\n\n if n_bkg == 0:\n if self.verbose_level > 0:\n print(\"Warning> To be dealt with\")\n pass\n\n minimal_fluxes[iflux], minimal_sigma[iflux] = self._get_sigma_flux(\n excess, n_bkg, alpha, self.config[\"analysis\"][\"min_sigma\"]\n )\n minimal_excess[iflux] = minimal_fluxes[iflux] * excess\n\n if self.verbose_level > 1:\n print(\n \"eff_bkg={:.2f}, eff_sig={:.2f}, score={:.2f}, excess={:.2f}, bkg={:.2f}, min_flux={:.3f}, sigma={:.3f}\".format(\n effbkg,\n effsig,\n score,\n minimal_excess[iflux],\n n_bkg,\n minimal_fluxes[iflux],\n minimal_sigma[iflux],\n )\n )\n\n if minimal_excess[iflux] < self.config[\"analysis\"][\"min_excess\"]:\n minimal_syst[iflux] = True\n # Rescale flux accodring to minimal acceptable excess\n minimal_fluxes[iflux] = self.config[\"analysis\"][\"min_excess\"] / excess\n minimal_excess[iflux] = self.config[\"analysis\"][\"min_excess\"]\n if self.verbose_level > 1:\n print(\" WARNING> Not enough signal!\")\n\n if minimal_excess[iflux] < self.config[\"analysis\"][\"bkg_syst\"] * n_bkg:\n minimal_syst[iflux] = True\n minimal_fluxes[iflux] = (\n self.config[\"analysis\"][\"bkg_syst\"] * n_bkg / excess\n )\n if self.verbose_level > 1:\n print(\" WARNING> Bkg systematics!\")\n\n # In case of equal min fluxes, take the one with bigger sig efficiency\n # (last value)\n opti_cut_index = np.where(minimal_fluxes == minimal_fluxes.min())[0][-1]\n min_flux = minimal_fluxes[opti_cut_index]\n min_sigma = minimal_sigma[opti_cut_index]\n min_excess = minimal_excess[opti_cut_index]\n min_syst = minimal_syst[opti_cut_index]\n\n best_cut_index = fixed_bkg_eff_indexes[opti_cut_index] # for fine binning\n\n return {\n \"best_cutoff\": binned_data[\"score\"][best_cut_index],\n \"noff\": binned_data[\"cumul_noff\"][best_cut_index],\n \"background\": binned_data[\"cumul_noff\"][best_cut_index] * alpha,\n \"non\": binned_data[\"cumul_excess\"][best_cut_index] * min_flux\n + binned_data[\"cumul_noff\"][best_cut_index] * alpha,\n \"alpha\": alpha,\n \"eff_sig\": binned_data[\"hist_eff_sig\"][best_cut_index],\n \"eff_bkg\": binned_data[\"hist_eff_bkg\"][best_cut_index],\n \"min_flux\": min_flux,\n \"excess\": min_excess,\n \"sigma\": min_sigma,\n \"systematic\": min_syst,\n }\n\n @classmethod\n def _get_sigma_flux(cls, excess, bkg, alpha, min_sigma):\n \"\"\"Compute flux to get `min_sigma` sigma detection. Returns fraction\n of minimal flux and the resulting signifiance\"\"\"\n\n # Gross binning\n flux_level = np.arange(0.0, 10, 0.01)[1:]\n sigma = significance_on_off(\n n_on=excess * flux_level + bkg,\n n_off=bkg / alpha,\n alpha=alpha,\n method=\"lima\",\n )\n\n the_idx = (np.abs(sigma - min_sigma)).argmin()\n min_flux = flux_level[the_idx]\n\n # Fine binning\n flux_level = np.arange(min_flux - 0.05, min_flux + 0.05, 0.001)\n sigma = significance_on_off(\n n_on=excess * flux_level + bkg,\n n_off=bkg / alpha,\n alpha=alpha,\n method=\"lima\",\n )\n the_idx = (np.abs(sigma - min_sigma)).argmin()\n\n return flux_level[the_idx], sigma[the_idx]\n\n @classmethod\n def _get_energy_key(cls, emin, emax):\n return \"{:.3f}-{:.3f}TeV\".format(emin, emax)\n\n @classmethod\n def _get_angular_key(cls, ang):\n return \"{:.3f}deg\".format(ang)\n\n def get_binned_data(self, g, p, e, nbins=100, score_range=[-1, 1]):\n \"\"\"Returns binned data as a dictionnary\"\"\"\n # colname_clf_output = self.config[\"column_definition\"][\"classification_output\"][\n # \"name\"\n # ]\n colname_clf_output = \"EVENT_TYPE\"\n\n res = dict()\n # Histogram of events\n res[\"hist_sig\"], edges = np.histogram(\n # a=g[colname_clf_output].values,\n a=g[colname_clf_output],\n bins=nbins,\n range=score_range,\n # weights=g[\"weight_corrected\"].values,\n weights=g[\"weight_corrected\"],\n )\n res[\"hist_p\"], edges = np.histogram(\n # a=p[colname_clf_output].values,\n a=p[colname_clf_output],\n bins=nbins,\n range=score_range,\n # weights=p[\"weight_corrected\"].values,\n weights=p[\"weight_corrected\"],\n )\n res[\"hist_e\"], edges = np.histogram(\n # a=e[colname_clf_output].values,\n a=e[colname_clf_output],\n bins=nbins,\n range=score_range,\n # weights=e[\"weight_corrected\"].values,\n weights=e[\"weight_corrected\"],\n )\n res[\"hist_bkg\"] = res[\"hist_p\"] + res[\"hist_e\"]\n res[\"score\"] = (edges[:-1] + edges[1:]) / 2.0\n res[\"score_edges\"] = edges\n\n # Efficiencies\n res[\"hist_eff_sig\"] = 1.0 - np.cumsum(res[\"hist_sig\"]) / np.sum(res[\"hist_sig\"])\n res[\"hist_eff_bkg\"] = 1.0 - np.cumsum(res[\"hist_bkg\"]) / np.sum(res[\"hist_bkg\"])\n\n # Cumulative statistics\n alpha = self.config[\"analysis\"][\"alpha\"]\n res[\"cumul_noff\"] = res[\"hist_eff_bkg\"] * sum(res[\"hist_bkg\"]) / alpha\n res[\"cumul_excess\"] = sum(res[\"hist_sig\"]) - np.cumsum(res[\"hist_sig\"])\n res[\"cumul_non\"] = res[\"cumul_excess\"] + res[\"cumul_noff\"] * alpha\n res[\"cumul_sigma\"] = significance_on_off(\n n_on=res[\"cumul_non\"], n_off=res[\"cumul_noff\"], alpha=alpha, method=\"lima\"\n )\n\n return res\n\n def write_results(self, outdir, outfile, format, overwrite=True):\n \"\"\"Write results with astropy utilities\"\"\"\n # Declare and initialise vectors to save\n n = len(self.results_dict)\n feature_to_save = [\n (\"best_cutoff\", float),\n (\"non\", float),\n (\"noff\", float),\n (\"alpha\", float),\n (\"background\", float),\n (\"excess\", float),\n (\"eff_sig\", float),\n (\"eff_bkg\", float),\n (\"systematic\", bool),\n (\"min_flux\", float),\n (\"sigma\", float),\n ]\n emin = np.zeros(n)\n emax = np.zeros(n)\n angular_cut = np.zeros(n)\n obs_time = np.zeros(n)\n keep = np.zeros(n, dtype=bool)\n\n res_to_save = dict()\n for feature in feature_to_save:\n res_to_save[feature[0]] = np.zeros(n, dtype=feature[1])\n\n # Fill data and save diagnostic result\n for idx, key in enumerate(self.results_dict.keys()):\n bin_info = self.results_dict[key]\n if bin_info[\"keep\"] is False:\n keep[idx] = bin_info[\"keep\"]\n continue\n bin_results = self.results_dict[key][\"results\"]\n bin_data = self.results_dict[key][\"diagnostic_data\"]\n\n keep[idx] = bin_info[\"keep\"]\n emin[idx] = bin_info[\"emin\"]\n emax[idx] = bin_info[\"emax\"]\n angular_cut[idx] = bin_info[\"th_cut\"]\n obs_time[idx] = bin_info[\"obs_time\"]\n for feature in feature_to_save:\n res_to_save[feature[0]][idx] = bin_results[feature[0]]\n\n obj_name = \"diagnostic_data_emin{:.3f}_emax{:.3f}.pkl.gz\".format(\n bin_info[\"emin\"], bin_info[\"emax\"]\n )\n\n diagnostic_dir = os.path.join(outdir, \"diagnostic\")\n if not os.path.exists(diagnostic_dir):\n os.makedirs(diagnostic_dir)\n save_obj(bin_data, os.path.join(outdir, \"diagnostic\", obj_name))\n\n # Save data\n t = Table()\n t[\"keep\"] = Column(keep, dtype=bool)\n t[\"emin\"] = Column(emin, unit=\"TeV\")\n t[\"emax\"] = Column(emax, unit=\"TeV\")\n t[\"obs_time\"] = Column(\n obs_time, unit=self.config[\"analysis\"][\"obs_time\"][\"unit\"]\n )\n t[\"angular_cut\"] = Column(angular_cut, unit=\"TeV\")\n for feature in feature_to_save:\n t[feature[0]] = Column(res_to_save[feature[0]])\n t.write(os.path.join(outdir, outfile), format=format, overwrite=overwrite)\n","sub_path":"pyirf/perf/cut_optimisation.py","file_name":"cut_optimisation.py","file_ext":"py","file_size_in_byte":33601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"636228094","text":"locator = {\"last_name\":\"name=lastName\",\n\t\t\t\"search\":\"id=search\"\n\t\t}\nst = locator['search']\nst01 = st[:st.index(\"=\")]\n\nfrom selenium.webdriver.common.by import By\n\nprint(By.LINK_TEXT)\n\nsty = \"Herman Wahyudi\"\nprint(sty[3:])\nprint(sty.replace(\"He\", \"Jo\"))\n\nfor i, j in locator.items():\n\tprint(i, j)\ni, j, k = 0, 9, 0\nif(i == j):\n\tprint(\"L\")\nelse:\n\tprint(\"k\") \ns = \"\"\nif(not s):\n\tprint(\"Kosong\")\nar = [['']*3]*4\nfor i in range(3):\n\tfor j in range(3):\n\t\tar[i][j] = i+j\nprint(ar[0][1])\n\nclass Test:\n\tdef yours(self, move):\n\t\tself.dead, self.fail = 100, True\n\t\twhile move < self.dead:\n\t\t\tif self.fail == True:\n\t\t\t\tprint(\"Allah is always your side\")\n\t\t\t\tmove += 1\n\t\treturn move\n\n# main\n\nTest().yours(0)","sub_path":"Python/testLocator.py","file_name":"testLocator.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"486450365","text":"import pathlib\n\n\nclass CarListCommand:\n @staticmethod\n def build_argparse(subparsers):\n carlist_cmd = subparsers.add_parser('carlist', help='Manipulate car makes & models list')\n carlist_cmd.set_defaults(func=lambda _: carlist_cmd.print_help())\n carlist_subparsers = carlist_cmd.add_subparsers()\n\n def update(ctx):\n ctx.modify_static = True\n ctx.car_make_model_svc.load_car_list(ctx.ns.input)\n\n carlist_update_cmd = carlist_subparsers.add_parser('update', help='Load car makes & models from json file to '\n 'the database')\n carlist_update_cmd.set_defaults(func=update)\n carlist_update_cmd.add_argument('--input', '-i', type=pathlib.Path, help='Input json file', metavar='path')\n\n carlist_show_cmd = carlist_subparsers.add_parser('show')\n carlist_show_cmd.set_defaults(func=lambda ctx: ctx.car_make_model_svc.show_car_list())\n\n","sub_path":"src/carscanner/cli/cmd_car_list.py","file_name":"cmd_car_list.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"273047010","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, prev, next, child):\n self.val = val\n self.prev = prev\n self.next = next\n self.child = child\n\"\"\"\nclass Solution:\n def flatten(self, head: 'Node') -> 'Node':\n if not head:\n return []\n\n\n def _flatten(self, L, row, col):\n # base function\n if row + 1 == len(L):\n return L[row][col:]\n\n L1, L2 = L[row], L[row+1]\n for i, j in enumerate(range(0, len(L2))):\n if L2[i]:\n l = self._flatten(L, row + 1, i)\n return L1[col:col+i+1] + l + L1[col+i+1:]\n\n\n\n def break_list(self, L):\n\n results = []\n value_seen = False\n result = []\n\n for i in L:\n if not i and value_seen:\n results.append(result)\n value_seen = False\n result = []\n else:\n result.append(i)\n value_seen = bool(i)\n if result:\n results.append(result)\n return results\n\n\nif __name__ == '__main__':\n #print(Solution().break_list(\n # [1,2,None,3]))\n print(Solution().break_list(\n [1,2,3,4,5,6,None,None,None,7,8,9,10,None,None,11,12, None]))\n\n #l1 = (Solution().break_list(\n # [1,2,None,3]))\n #print(Solution()._flatten(l1, 0, 0))\n l1 = (Solution().break_list(\n [1,2,3,4,5,6,None,None,None,7,8,9,10,None,None,11,12, None]))\n print(Solution()._flatten(l1, 0, 0))\n","sub_path":"leetcode/430_flatten_multi.py","file_name":"430_flatten_multi.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297827697","text":"#!/usr/bin/env python\n\nfrom waflib.Tools import waf_unit_test\n\ndef options(opt):\n\tgrp = opt.add_option_group('filesystem_stdio options')\n\n\tgrp.add_option('--enable-fs-tests', action='store_true', dest = 'FS_TESTS', default = False,\n\t\thelp = 'enable filesystem_stdio tests')\n\ndef configure(conf):\n\tnortti = {\n\t\t'msvc': ['/GR-'],\n\t\t'default': ['-fno-rtti', '-fno-exceptions']\n\t}\n\tconf.env.append_unique('CXXFLAGS', conf.get_flags_by_compiler(nortti, conf.env.COMPILER_CC))\n\n\tconf.env.FS_TESTS = conf.options.FS_TESTS\n\n\tif conf.env.DEST_OS != 'android':\n\t\tif conf.env.cxxshlib_PATTERN.startswith('lib'):\n\t\t\tconf.env.cxxshlib_PATTERN = conf.env.cxxshlib_PATTERN[3:]\n\ndef build(bld):\n\tbld(name = 'filesystem_includes', export_includes = '.')\n\tbld.shlib(target = 'filesystem_stdio',\n\t\tfeatures = 'cxx',\n\t\tsource = bld.path.ant_glob(['*.c', '*.cpp']),\n\t\tuse = 'filesystem_includes public',\n\t\tinstall_path = bld.env.LIBDIR,\n\t\tsubsystem = bld.env.MSVC_SUBSYSTEM)\n\n\tif bld.env.FS_TESTS:\n\t\t# build in same module, so dynamic linking will work\n\t\t# for now (until we turn libpublic to shared module lol)\n\t\tbld.program(features = 'test',\n\t\t\tsource = 'tests/caseinsensitive.c',\n\t\t\ttarget = 'test_caseinsensitive',\n\t\t\tuse = 'filesystem_includes public DL',\n\t\t\trpath = '$ORIGIN',\n\t\t\tsubsystem = bld.env.CONSOLE_SUBSYSTEM,\n\t\t\tinstall_path = None)\n\t\tbld.add_post_fun(waf_unit_test.summary)\n\t\tbld.add_post_fun(waf_unit_test.set_exit_code)\n","sub_path":"filesystem/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436341042","text":"# 관측된 새, 가장 많이 관측된 새 종류를 찾아내고 그 종류가 2개 이상이라면 id가 가장 낮은 새 종류의 id를\n# 도출한다.\n\nfrom collections import Counter\n\n\ndef migratoryBirds(arr):\n count_dict = Counter(arr)\n birdlist = list()\n for bird, freq in count_dict.items():\n if freq == max(list(count_dict.values())):\n birdlist.append(bird)\n return min(birdlist)\n\n\n# arr = [1, 1, 2, 2, 3]\narr = [\n 1,\n 2,\n 3,\n 4,\n 5,\n 4,\n 3,\n 2,\n]\nprint(migratoryBirds(arr))\n","sub_path":"기타/HackerRank/finished/MigatoryBirds.py","file_name":"MigatoryBirds.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"208713483","text":"# -*- coding: utf-8 -*- \nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse, JsonResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.db.models import Q, Max, Min, Case, When, F, Count\nfrom django.template.loader import render_to_string\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom django.views.generic import CreateView, DeleteView, FormView, ListView, UpdateView, DetailView\n\nfrom .models import *\nfrom .forms import SchedulesForm#, TestItemForm\n\nfrom datetime import datetime, timedelta\nimport json\nfrom qrmProject.loginSession import loginRequired\nfrom .utils import *\n\n#########\n# Views #\n#########\n\n# /capa/model \n@loginRequired\ndef modelView(request): \n # 모델 정보 View\n template = 'capa/capa_model.html'\n user_info = get_user_info(request)\n\n now = datetime.strftime(datetime.now(),'%Y-%m-%d')\n past = datetime.strftime(datetime.now() - timedelta(days=20), '%Y-%m-%d')\n\n params = request.GET\n\n def get_params(param):\n if param == 'start_date':\n return params.get(param, past)\n if param == 'end_date':\n return params.get(param, now)\n if param == 'product_type':\n return params.get(param)\n return params.get(param, '')\n\n product_type = get_params('product_type')\n product = get_params('product')\n model = get_params('model')\n grade = get_params('grade')\n event = get_params('event')\n test_type = get_params('test_type')\n seq = get_params('seq')\n status = get_params('status')\n start_date = get_params('start_date')\n end_date = get_params('end_date')\n export = get_params('export')\n\n if product_type == None:\n product_type = 'tv' \n\n models = TestModel.objects.filter(req_date__range=[start_date, end_date])\n models = models.exclude(Q(test_type_name__in=EXCLUDE_TEST_TYPE)|\n Q(event__icontains='cskd')|Q(seq=0))\n models = models.filter(Q(product_group_name__icontains=product)&\\\n Q(model__icontains=model)&\\\n Q(grade__icontains=grade)&\\\n Q(event__icontains=event)&\\\n Q(test_type_name__icontains=test_type)&\\\n Q(seq__icontains=seq)&\\\n Q(status__icontains=status)).order_by('-req_no','-plan_start','model','seq')\n \n if product_type == 'etc':\n ex_list = []\n for lst in list(product_type_list.values()):\n ex_list.extend(lst)\n models = models.exclude(Q(product_group_name__in=all_type_list))\n elif product_type != 'all':\n models = models.filter(Q(product_group_name__in=product_type_list[product_type]))\n \n context = {\n 'models':models,\n 'user_info':user_info,\n 'product_type':product_type,\n 'product':product,\n 'model':model,\n 'status':status,\n 'grade':grade,\n 'event':event,\n 'test_type':test_type,\n 'seq':seq,\n 'start_date':start_date,\n 'end_date':end_date,\n 'product_list': getFilterList(start_date, end_date, 'model', 'product_group_name'),\n 'event_list': getFilterList(start_date, end_date, 'model', 'event'),\n 'test_type_name_list': getFilterList(start_date, end_date, 'model', 'test_type_name'),\n 'status_list': getFilterList(start_date, end_date, 'model', 'status')\n }\n \n if export == '1':\n print(\"excel exporting\")\n return exportExcel(request, models)\n else:\n return render(request, template, context)\n\n# /capa/item\n@loginRequired\ndef itemView(request): \n # Item view\n template = 'capa/capa_item.html'\n \n user_info = get_user_info(request)\n\n now = datetime.strftime(datetime.now(),'%Y-%m-%d')\n past = datetime.strftime(datetime.now() - timedelta(days=20), '%Y-%m-%d')\n \n params = request.GET\n def get_params(param):\n if param == 'start_date':\n return params.get(param, past)\n if param == 'end_date':\n return params.get(param, now)\n return params.get(param, '')\n \n product_type = get_params('product_type')\n # product = get_params('product')\n model = get_params('model')\n event = get_params('event')\n # grade = get_params('grade')\n seq = get_params('seq')\n test_item = get_params('test_item')\n test_type = get_params('test_type')\n judgement = get_params('judgement')\n start_date = get_params('start_date')\n end_date = get_params('end_date')\n export = get_params('export')\n\n tester_kr = params.get('tester_kr')\n if tester_kr == None:\n tester_kr = user_info.get('kr_name')\n \n if not product_type:\n product_type = 'tv'\n\n if judgement == '-':\n judgement_for_filter = 'null'\n else:\n judgement_for_filter = judgement\n\n items = TestItem.objects.filter(Q(testModel__req_date__range=[start_date,end_date]))\n\n items = items.exclude(\n Q(testModel__test_type_name__in=EXCLUDE_TEST_TYPE)|\\\n Q(testModel__event__icontains='cskd')|\\\n Q(testModel_id__seq=0)|\\\n Q(test_item='etc')).filter(\n # Q(division__icontains=division)&\\\n Q(model__icontains=model)&\\\n Q(testModel_id__seq__icontains=seq)&\\\n # Q(testModel_id__product_group_name__icontains=product)&\\\n Q(testModel_id__event__icontains=event)&\\\n Q(testModel_id__test_type_name__icontains=test_type)&\\\n Q(test_item__icontains=test_item)&\\\n Q(tester_kr__icontains=tester_kr))\\\n .order_by('-testModel__plan_start')\n \n if product_type == 'etc':\n ex_list = []\n for lst in list(product_type_list.values()):\n ex_list.extend(lst)\n items = items.exclude(Q(testModel__product_group_name__in=ex_list))\n else:\n if product_type != 'all':\n items = items.filter(Q(testModel__product_group_name__in=product_type_list[product_type]))\n\n context = {\n 'items':items,\n 'user_info':user_info,\n 'product_type':product_type,\n 'model':model,\n # 'product':product,\n 'judgement':judgement,\n # 'grade':grade,\n 'event':event,\n 'seq':seq,\n 'test_item':test_item,\n 'test_type':test_type,\n 'tester_kr':tester_kr,\n 'start_date':start_date,\n 'end_date':end_date,\n 'product_list':getFilterList(start_date, end_date, 'model', 'product_group_name'),\n 'event_list':getFilterList(start_date, end_date, 'model', 'event'),\n 'test_item_list':getFilterList(start_date, end_date, 'item', 'test_item'),\n 'test_type_list':getFilterList(start_date, end_date, 'model', 'test_type_name')\n }\n if export == '1':\n print(\"excel exporting\")\n return exportExcel(request, items)\n else:\n return render(request, template, context)\n\n# /capa/item/ajax\n@loginRequired\ndef dateinput(request):\n\n def getdate(d):\n if d:\n return datetime.strptime(d, '%y-%m-%d %H:%M')\n\n if not request.method == \"POST\":\n return redirect(reverse_lazy('item'))\n\n post_data = request.POST\n pk = post_data.get('pk', '')\n # print(pk)\n # Define parameters\n plan_start = post_data.get('plan_start',None)\n plan_end = post_data.get('plan_end',None)\n real_start = post_data.get('real_start',None)\n real_end = post_data.get('real_end',None)\n # st = post_data.get('st',0)\n if not pk:\n return redirect(reverse_lazy('item'))\n\n item = TestItem.objects.get(pk=pk)\n event_type = item.testModel.event\n division = item.division\n time_seq = item.testModel.seq \n grade = item.testModel.grade\n test_item = item.test_item\n dv = ['SW_VAL', 'DV']\n pv = ['SW_QAL', 'PV']\n notDev = ['SW_VAL', 'DV', 'SW_QAL', 'PV']\n\n # 각 조건에 맞는 ST 적용\n if int(time_seq) > 3:\n time_seq = 3\n if 'DV' in event_type or 'SW_VAL' in event_type:\n seq = SeqTime.objects.filter(event_type='DV')\n elif 'PV' in event_type or 'SW_QAL' in event_type:\n seq = SeqTime.objects.filter(event_type='PV')\n else:\n seq = SeqTime.objects.filter(event_type='DV')\n # seq = SeqTime.objects.exclude(event_type__in=notDev)\n try:\n if item.div_item.lead_time != 0:\n lead_time = item.div_item.lead_time \n else:\n if 'alt' in event_type.lower():\n lead_time = seq.get(leadTime__test_item=test_item, leadTime__division=division, seq=1).grade_alt\n elif 'int' in event_type.lower():\n lead_time = seq.get(leadTime__test_item=test_item, leadTime__division=division, seq=1).grade_int\n elif 'A' in grade or 'S' in grade:\n lead_time = seq.get(leadTime__test_item=test_item, leadTime__division=division, seq=time_seq).grade_a\n elif 'B' in grade:\n lead_time = seq.get(leadTime__test_item=test_item, leadTime__division=division, seq=time_seq).grade_b\n elif 'C' in grade:\n lead_time = seq.get(leadTime__test_item=test_item, leadTime__division=division, seq=time_seq).grade_c\n elif 'D' in grade:\n lead_time = seq.get(leadTime__test_item=test_item, leadTime__division=division, seq=time_seq).grade_d\n else:\n lead_time = 0\n except Exception as e:\n print(e)\n lead_time = 0\n \n if (plan_start and plan_end) or (real_end and real_start):\n real_time = get_time(real_start, real_end)\n diff = real_time - lead_time\n if real_time is False:\n return False \n else:\n return False\n\n # save to DB \n \n # print(item)\n item.plan_start = getdate(plan_start)\n item.plan_end = getdate(plan_end)\n item.real_start = getdate(real_start)\n item.real_end = getdate(real_end)\n item.diff_time = diff\n item.real_time = real_time\n item.lead_time = lead_time\n item.save()\n \n\n # Define isComplete\n if item.real_time:\n isComplete = True\n else:\n isComplete = False\n\n # Define Context\n context = {\n 'plan_start':plan_start,\n 'plan_end':plan_end,\n 'real_start':real_start,\n 'real_end':real_end,\n # 'st':st,\n 'real_time':real_time,\n 'lead_time':lead_time,\n 'diff':diff,\n 'isComplete':isComplete,\n 'fill_state':True\n }\n\n #return Json\n return HttpResponse(json.dumps(context), content_type=\"application/json\")\n\n# /capa/time\n@loginRequired\ndef leadTime(request):\n template = 'capa/leadTime.html'\n user_info = get_user_info(request)\n\n now = datetime.strftime(datetime.now(),'%Y-%m-%d')\n past = datetime.strftime(datetime.now() - timedelta(days=15), '%Y-%m-%d')\n # now = datetime.now()\n # past = now - timedelta(days=15)\n\n division = ''\n \n querySet_leadtime = LeadTime.objects.all()\n # print(request)\n if request.GET:\n try:\n division = request.GET['division']\n test_item = request.GET['test_item']\n export = request.GET['export']\n except:\n test_item = ''\n export = ''\n\n if request.POST:\n post_data = dict(request.POST)\n if post_data:\n del(post_data['csrfmiddlewaretoken'])\n print(post_data)\n lead_time = LeadTime.objects.get(pk=post_data['pk'][0])\n lead_time.lead_time = post_data['lead_time'][0]\n lead_time.save()\n times = querySet_leadtime.get(pk=post_data['pk'][0])\n \n for i, seq in enumerate(post_data['seq']):\n time = times.seqtime_set.all().filter(seq=seq).filter(event_type=post_data['event_type'][i])[0]\n time.grade_a = post_data['grade_a'][i]\n time.grade_b = post_data['grade_b'][i]\n time.grade_c = post_data['grade_c'][i]\n time.grade_d = post_data['grade_d'][i]\n time.grade_int = post_data['grade_int'][0]\n time.grade_alt = post_data['grade_alt'][0]\n time.save()\n try:\n division = request.GET['division']\n test_item = request.GET['test_item']\n except Exception as e:\n print(e)\n test_item = ''\n elif request.POST:\n post_data = dict(request.POST)\n if post_data:\n del(post_data['csrfmiddlewaretoken'])\n print(post_data)\n lead_time = LeadTime.objects.get(pk=post_data['pk'][0])\n lead_time.lead_time = post_data['lead_time'][0]\n lead_time.save()\n times = querySet_leadtime.get(pk=post_data['pk'][0])\n \n for i, seq in enumerate(post_data['seq']):\n time = times.seqtime_set.all().filter(seq=seq).filter(event_type=post_data['event_type'][i])[0]\n time.grade_a = post_data['grade_a'][i]\n time.grade_b = post_data['grade_b'][i]\n time.grade_c = post_data['grade_c'][i]\n time.grade_d = post_data['grade_d'][i]\n time.grade_int = post_data['grade_int'][0]\n time.grade_alt = post_data['grade_alt'][0]\n time.save()\n try:\n division = request.GET['division']\n test_item = request.GET['test_item']\n except Exception as e:\n print(e)\n test_item = ''\n else:\n test_item = ''\n export = ''\n \n lead_time = querySet_leadtime.filter(Q(division__icontains=division)&Q(test_item__icontains=test_item)).order_by('test_item')\n context = {\n 'user_info':user_info,\n 'division':division,\n 'lead_time':lead_time,\n 'test_item':test_item,\n }\n try:\n if export == '1':\n print(\"excel exporting\")\n return exportExcel(request, lead_time)\n else:\n return render(request, template, context)\n except:\n return render(request, template, context)\n # return render(request, template, context)\n\n# /capa\n@loginRequired\ndef main(request):\n template = 'capa/capa_main.html' \n\n user_info = get_user_info(request)\n\n now = datetime.strftime(datetime.now() + timedelta(days=7), '%Y-%m-%d')\n past = datetime.strftime(datetime.now() - timedelta(days=14), '%Y-%m-%d')\n\n queryset_models = TestModel.objects.all()\n\n params = request.GET\n def get_params(param):\n if param == 'start_date':\n return params.get(param, past)\n if param == 'end_date':\n return params.get(param, now)\n if param == 'product_type':\n return params.get(param)\n return params.get(param, '')\n\n product_type = get_params('product_type')\n product = get_params('product')\n model = get_params('model')\n grade = get_params('grade')\n event = get_params('event')\n test_type_name = get_params('test_type_name')\n seq = get_params('seq')\n end_date = get_params('end_date')\n start_date = get_params('start_date')\n\n if product_type == None:\n product_type = 'tv' \n\n all_type_list = [] \n for key in product_type_list.keys():\n all_type_list.extend(product_type_list[key])\n\n \n models = TestModel.objects.filter(req_date__range=[start_date, end_date])\n models = models.exclude(Q(test_type_name__in=EXCLUDE_TEST_TYPE)|\n Q(event__icontains='cskd')|Q(seq=0))\n models = models.filter(Q(product_group_name__icontains=product)&\\\n Q(model__icontains=model)&\\\n Q(grade__icontains=grade)&\\\n Q(event__icontains=event)&\\\n Q(test_type_name__icontains=test_type_name)&\\\n Q(seq__icontains=seq)).order_by('plan_start','-req_no','-req_date','model','seq')\n \n if product_type == 'etc':\n models = models.exclude(Q(product_group_name__in=all_type_list))\n elif product_type == 'all':\n pass\n else:\n models = models.filter(Q(product_group_name__in=product_type_list[product_type]))\n\n context = {\n 'user_info':user_info,\n 'end_time':end_date,\n 'start_time':start_date,\n 'models':models,\n 'model':model,\n 'event':event,\n 'grade':grade,\n 'test_type_name':test_type_name,\n 'seq':seq,\n 'product_list':getFilterList(start_date, end_date, 'model', 'product_group_name'),\n 'product':product,\n 'product_type':product_type,\n 'event_list': getFilterList(start_date, end_date, 'model', 'event'),\n 'test_type_name_list': getFilterList(start_date, end_date, 'model', 'test_type_name'),\n 'status_list': getFilterList(start_date, end_date, 'model', 'status')\n }\n return render(request, template, context)\n # if request.GET['export'] == 0:\n # return render(request, template, context)\n # else:\n # print(\"excel exporting\")\n # return render(requset, template, context), exportExcel(models)\n\n# /capa/item_status\n@loginRequired\ndef itemStatusView(request):\n template = 'capa/capa_item_status.html' \n\n user_info = get_user_info(request)\n \n # now = datetime.strftime(datetime.now(),'%Y-%m-%d')\n # past = datetime.strftime(datetime.now() - timedelta(days=7), '%Y-%m-%d')\n now = datetime.strftime(datetime.now() + timedelta(days=14), '%Y-%m-%d')\n past = datetime.strftime(datetime.now() - timedelta(days=14), '%Y-%m-%d')\n\n queryset_items = TestItem.objects.all()\n\n params = request.GET\n \n now = params.get('end_date', '')\n past = params.get('start_date', '')\n division = params.get('division', '')\n model = params.get('model', '')\n product = params.get('product', '')\n event = params.get('event', '')\n seq = params.get('seq', '')\n test_item = params.get('test_item', '')\n test_type = params.get('test_type', '')\n judgement = params.get('judgement', '')\n tester_kr = params.get('tester_kr', '')\n\n # items = queryset_items.exclude(testModel__test_type_name__in=EXCLUDE_TEST_TYPE).\\\n # exclude(testModel__event__icontains='cskd').\\\n # exclude(test_item='etc').\\\n # filter(Q(division__icontains=division)&\\\n # Q(model__icontains=model)&\\\n # Q(testModel__event__icontains=event)&\\\n # Q(testModel__seq__icontains=seq)&\\\n # Q(testModel__product_group_name__icontains=product)&\\\n # Q(test_item__icontains=test_item)&\\\n # Q(test_item__icontains=test_item)&\\\n # Q(tester_kr__icontains=tester_kr)).filter(Q(plan_start__range=[past,now])|Q(plan_start=None)).order_by('-plan_start','judgement','test_item')\n\n # 조회된 모든 모델에 대한 일정 Min/Max 를 계산하여 전체 그래프 범위를 지정\n try:\n end_time = end_date\n start_time = start_date\n except:\n end_time = now\n start_time = past\n\n context = {\n 'user_info':user_info,\n 'now':now,\n 'past':past,\n 'model':model,\n 'product':product,\n 'test_item':test_item,\n 'judgement':judgement,\n 'end_time':end_time,\n 'start_time':start_time,\n # 'items':items,\n # 'items':items,\n # 'tester':tester,\n # 'product_list':getFilterList(start_time, end_time, 'model', 'product_group_name'),\n # 'event_list':getFilterList(start_time, end_time, 'model', 'event'),\n # 'test_item_list':getFilterList(start_time, end_time, 'item', 'test_item'),\n # 'test_type_list':getFilterList(start_time, end_time, 'model', 'test_type_name'),\n 'model':model,\n 'event':event,\n 'test_item':test_item,\n 'judgement':judgement,\n 'tester_kr':tester_kr\n }\n return render(request, template, context)\n\n# /capa/item/$PK(TestItem)\n@loginRequired\ndef ItemDetailView(request, pk):\n template_name = 'capa/item_detail.html'\n item = TestItem.objects.get(pk=pk)\n\n context = {'item':item}\n if request.method =='POST':\n post_data = dict(request.POST)\n # print(post_data)\n if post_data:\n # del(post_data['csrfmiddlewaretoken'])\n # print(post_data['tester_kr'])\n item.tester_kr = post_data['tester_kr'][0]\n # print(get_date(post_data['plan_start'][0]))\n item.plan_start = get_date(post_data['plan_start'][0])\n item.plan_end = get_date(post_data['plan_end'][0])\n item.real_start = get_date(post_data['real_start'][0])\n item.real_end = get_date(post_data['real_end'][0])\n item.desc = post_data['desc'][0]\n item.save()\n return render(request, template_name, context)\n else: \n return render(request, template_name, context)\n\n# /capa/schedule\n@loginRequired\ndef scheduleTester(request): \n template = 'capa/capa_schedule.html'\n user_info = get_user_info(request)\n \n start = datetime.strftime(datetime.now() - timedelta(days=3), '%Y-%m-%d')\n end = datetime.strftime(datetime.now() + timedelta(days=12),'%Y-%m-%d')\n range_start = datetime.strptime(start,'%Y-%m-%d')\n\n if request.method == 'GET':\n try:\n department_filter = request.GET['department']\n user = request.GET['user']\n start = request.GET['start']\n end = datetime.strftime(datetime.strptime(start, '%Y-%m-%d') + timedelta(days=15), '%Y-%m-%d')\n range_start = datetime.strptime(start,'%Y-%m-%d')\n except Exception as e:\n print(e)\n department_filter = user_info['department']\n user = ''\n start = start\n end = end\n range_start = datetime.strptime(start,'%Y-%m-%d')\n start_time = datetime.strptime(start, '%Y-%m-%d')\n end_time = datetime.strptime(end, '%Y-%m-%d')\n # Make Date List\n date_list = []\n while not start_time.date() == end_time.date()+timedelta(days=1):\n date_list.append(start_time.date())\n start_time = start_time + timedelta(days=1)\n\n def getTesterEvent(start, end, tester_kr, department):\n events = ScheduleTester.objects.all().filter(start_time__range=[start, end]).filter(Q(userID__department__icontains=department)\\\n &Q(userID__kr_name__icontains=tester_kr))\n retv = []\n for event in events:\n tester_kr = event.userID.kr_name\n tester_idx = event.userID.id\n tester_id = event.userID.adid\n department = event.userID.department\n start_time = event.start_time\n end_time = event.end_time\n event_type = event.work_type\n event_id = event.id\n event_name = event.work_type\n desc = event.desc\n progress = 1\n retv.append({'tester_kr':tester_kr, 'tester_idx':tester_idx, 'tester_id':tester_id, 'department':department, 'start_time':start_time, 'end_time':end_time,\\\n 'event_type':event_type, 'event_id':event_id, 'event_name':event_name, 'desc':desc, 'progress':progress})\n #query = events\n return retv#, query\n\n tester_event = getTesterEvent(start, end, user, department_filter)\n\n users = UserID.objects.filter(Q(department__icontains=department_filter)&\\\n (Q(kr_name__icontains=user)|Q(adid__icontains=user))).order_by('kr_name')#.filter(Q(scehduler_set__start_time__range=[start,end]))\n\n context = {\n 'user_info':user_info,\n 'department_filter':department_filter,\n 'date_list':date_list,\n 'tester_event':tester_event,\n 'users':users,\n 'user':user,\n 'department':user_info['department'],\n 'start':start,\n 'end':end,\n 'range_start':range_start \n }\n\n return render(request, template, context) \n\n# /capa/schedule/$PK(User)\n@loginRequired\ndef testerDetailView(request, pk):\n template = 'capa/capa_tester_detail.html'\n user_info = get_user_info(request)\n\n user = UserID.objects.get(pk=pk)\n now = datetime.strftime(datetime.now(),'%Y-%m-%d')\n start = datetime.strftime(datetime.now() - timedelta(days=30), '%Y-%m-%d')\n end = datetime.strftime(datetime.now(),'%Y-%m-%d')\n range_start = datetime.strptime(start,'%Y-%m-%d')\n\n schedules = ScheduleTester.objects.filter(userID=user).order_by('-start_time')\n context = {\n 'user_info':user_info,\n 'user':user,\n 'start':start,\n 'end':end,\n 'range_start':range_start,\n 'now':now,\n 'schedules':schedules\n }\n \n if request.method == 'POST':\n post_data = dict(request.POST)\n if post_data:\n del(post_data['csrfmiddlewaretoken'])\n new = ScheduleTester(\n userID = user,\n work_type = post_data['work_type'][0],\n start_time = datetime.strptime(post_data['start_time'][0],'%Y-%m-%d %H:%M'),\n end_time = datetime.strptime(post_data['end_time'][0], '%Y-%m-%d %H:%M'),\n desc = post_data['desc'][0])\n new.save()\n return render(request, template, context) \n else:\n try:\n context['range_start'] = datetime.strptime(request.GET['range_start'],'%Y-%m-%d')\n except:\n pass\n return render(request, template, context) \n\n# /capa/schedule/$PK(User)/detail/$PK_DETAIL(Schedule)\nclass ScheduleDetailView(DetailView):\n model = ScheduleTester \n template_name = 'capa/capa_schdule_detail.html'\n pk_url_kwarg = 'pk_detail'\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['currentUser'] = self.request.session.get('adid', False)\n context['kr_name'] = self.request.session.get('kr_name', False)\n context['title'] = self.request.session.get('title', False)\n context['department'] = self.request.session.get('department', False)\n return context\n\n# /capa/schedule/$PK(User)/detail/$PK_DETAIL(Schedule)/delete\nclass ScheduleDeleteView(DeleteView):\n model = ScheduleTester\n template_name = 'capa/capa_tester_detail_delete.html'\n pk_url_kwarg = 'pk_detail'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['currentUser'] = self.request.session.get('adid', False)\n context['kr_name'] = self.request.session.get('kr_name', False)\n context['title'] = self.request.session.get('title', False) \n return context\n\n def get_success_url(self):\n return reverse_lazy('tester_detail', kwargs={'pk': self.object.userID_id})\n\n# /capa/schedule/$PK(User)/detail/$PK_DETAIL(Schedule)/modify\n@loginRequired\ndef scheduleModifyView(request, pk, pk_detail):\n user_info = get_user_info(request)\n\n template = 'capa/capa_tester_detail_modify.html'\n schedules = ScheduleTester.objects.get(pk=pk_detail)\n user = schedules.userID.kr_name + schedules.userID.title\n department_user = schedules.userID.department\n work_type = schedules.work_type\n start_time = schedules.start_time\n end_time = schedules.end_time\n desc = schedules.desc \n\n # print(pk, pk_del, work_type, user, kr_name, start_time, end_time)\n context = {\n 'user_info':user_info,\n 'user':user,\n 'work_type':work_type,\n 'start_time':start_time,\n 'end_time':end_time,\n # 'start_time':start_time.strftime('%y-%m-%d'),\n # 'end_time':end_time.strftime('%y-%m-%d'),\n 'desc':desc\n }\n \n if request.method == 'POST':\n post_data = dict(request.POST)\n # print(post_data)\n if post_data:\n del(post_data['csrfmiddlewaretoken'])\n schedules.work_type = post_data['work_type'][0]\n schedules.start_time = datetime.strptime(post_data['start_time'][0],'%Y-%m-%d %H:%M')\n schedules.end_time = datetime.strptime(post_data['end_time'][0], '%Y-%m-%d %H:%M')\n schedules.desc = post_data['desc'][0]\n schedules.save()\n return redirect(reverse_lazy('tester_detail', kwargs={\"pk\":schedules.userID_id})) \n else:\n return render(request, template, context) \n\n# /capa/model/$MODEL_NAME\n@loginRequired\ndef modelView_detail(request, model_name):\n template = 'capa/capa_model_detail.html'\n user_info = get_user_info(request)\n\n items = TestItem.objects.all().filter(model=model_name).order_by('testModel__test_type_name').exclude(test_item='Etc').exclude(tester_kr='null')\n \n # 5/15 차수 없는 항목 예외 처리\n try:\n max_seq = int(items.aggregate(seq_max=Max('testModel__seq'))['seq_max'])\n except:\n max_seq = 0\n\n results = items.values('testModel__event', 'testModel__test_type_name', 'test_item').distinct().order_by('testModel__event', 'testModel__test_type_name')\n\n context = {\n 'user_info':user_info,\n 'model_name':model_name,\n 'items':items,\n 'max_seq':range(max_seq),\n 'results':results\n }\n\n return render(request, template, context) \n\n# /capa/events\n@loginRequired\ndef event_view(request):\n template = 'capa/capa_event_view.html'\n user_info = get_user_info(request)\n past = datetime.strftime(datetime.now() - timedelta(days=7), '%Y-%m-%d')\n now = datetime.strftime(datetime.now() + timedelta(days=7),'%Y-%m-%d')\n department_list = ['TV개발품질보증1반', 'TV개발품질보증2반']#, 'HE개발품질보증1팀', 'HE개발품질보증2팀' 'HE SW개발품질보증팀']\n \n params = request.GET\n\n # product_type = params.get('product_type','')\n department = params.get('department','TV개발품질보증1반')\n test_item = params.get('test_item','')\n tester_kr_filter = params.get('tester_kr','')\n start_date = params.get('start',past)\n end_date = params.get('end', now)\n\n items = TestItem.objects.all().filter(plan_start__range=[start_date, end_date])\n \n # if product_type == 'etc':\n # ex_list = []\n # for lst in list(product_type_list.values()):\n # ex_list.extend(lst)\n # items = items.exclude(Q(testModel__product_group_name__in=ex_list))\n # else:\n # items = items.filter(Q(testModel__product_group_name__in=product_type_list[product_type]))\n\n events = []\n user_list = UserID.objects.all().filter(Q(department=department))\n filtered_items = items.filter(tester_id__in=user_list.values_list('adid'))\n date_list = []\n start = datetime.strptime(start_date, '%Y-%m-%d')\n end = datetime.strptime(end_date, '%Y-%m-%d')\n\n # Make Date List\n while not start.date() == end.date()+timedelta(days=1):\n date_list.append(start.date())\n start = start + timedelta(days=1)\n\n test_events = []\n for user in user_list:\n tester_id = user.adid\n tester_kr = user.kr_name\n title = user.title\n department = user.department\n personal_event = user.scheduletester_set.all().filter(start_time__range=[start_date, end_date])\n test_event = items.filter(tester_id=tester_id)\n tester_time_interval = getTimeInterval(test_event)\n events.append({\n 'tester_id':tester_id,\n 'tester_kr':tester_kr,\n 'title':title,\n 'department':department,\n 'personal_event':personal_event,\n 'test_event':test_event,\n 'tester_time_interval':tester_time_interval\n })\n # Sort by tester_kr\n sorted_events = sorted(events, key=lambda x: x['tester_kr']) \n\n context = {\n 'user_info':user_info,\n # 'product_type':product_type,\n 'department':department,\n 'events':sorted_events,\n 'start':start_date,\n 'end':end_date,\n 'date_list':date_list,\n 'total_seconds':getTimeInterval(filtered_items)\n # 'plan_total':plan_total,\n # 'real_total':real_total\n }\n \n return render(request, template, context) \n\n# /capa/events\n@loginRequired\ndef _event_view(request):\n template = 'capa/capa_event_view.html'\n user_info = get_user_info(request)\n start_date = datetime.strftime(datetime.now() - timedelta(days=7), '%Y-%m-%d')\n end_date = datetime.strftime(datetime.now() + timedelta(days=7),'%Y-%m-%d')\n department_list = ['TV개발품질보증1반', 'TV개발품질보증2반']#, 'HE개발품질보증1팀', 'HE개발품질보증2팀' 'HE SW개발품질보증팀']\n \n if request.method == 'GET':\n try:\n division = request.GET['division']\n department_filter = request.GET['department']\n test_item = request.GET['test_item']\n tester_kr_filter = request.GET['tester_kr']\n start_date = request.GET['start']\n end_date = request.GET['end']\n except Exception as e:\n _department_filter = department_title if department_title in department_list else 'TV개발품질보증1반'\n print(e)\n division = 'GLZ'\n department_filter = _department_filter\n test_item = ''\n tester_kr_filter = ''\n start_date = start_date\n end_date = end_date\n \n tester_event = getTesterEvent(start_date, end_date, tester_kr_filter, department_filter)\n item_event, events_query = getItemEvent(start_date, end_date, division, department_filter, test_item, tester_kr_filter)\n # Sum Item and Tester Events\n events = tester_event + item_event\n\n # Remove that if start time is none and Sorting\n with_out_none = [item for item in events if not item['start_time'] == None]\n sorted_events = sorted(with_out_none, key=lambda x: x['start_time'])\n\n # Get Adids in all events\n adids = list(set([item['tester_id'] for item in sorted_events ]))\n\n # Get Tester Infomation(kr_name, adid, department)\n name_adids = []\n for adid in adids:\n for sorted_event in sorted_events:\n if adid == sorted_event['tester_id']:\n tester_kr = sorted_event['tester_kr']\n department = sorted_event['department']\n name_adids.append({'kr_name':tester_kr, 'adid':adid, 'department':department})\n \n date_list = []\n start = datetime.strptime(start_date, '%Y-%m-%d')\n end = datetime.strptime(end_date, '%Y-%m-%d')\n\n # Make Date List\n while not start.date() == end.date()+timedelta(days=1):\n date_list.append(start.date())\n start = start + timedelta(days=1)\n events = []\n\n # Make events objects(kr_name, adid, department, event_list(Dictionary obj))\n for name in name_adids:\n _events = [ sorted_event for sorted_event in sorted_events if name['adid'] == sorted_event['tester_id'] ]\n events.append({'kr_name':name['kr_name'], 'adid':name['adid'], 'department':name['department'], 'event_list':_events})\n\n # Get Daily total lead time(plan, real)\n plan_total = {}\n for date in date_list:\n try:\n items = events_query.filter(plan_start__range=[date, date + timedelta(days=1)])\n except:\n continue\n plan_total[date.strftime('%m/%d')] = 0\n for item in items:\n try:\n if item.plan_start.date() == item.plan_end.date():\n plan_total[date.strftime('%m/%d')] += (item.plan_end - item.plan_start).total_seconds()\n else:\n _days = (item.plan_end - item.plan_start).days\n plan_total[date.strftime('%m/%d')] += (item.plan_end - item.plan_start).total_seconds()\\\n - (16 * _days * 60 * 60)\n except Exception as e:\n print(e, item.plan_start, item.plan_end)\n continue\n # print('{} : {}'.format(date.strftime('%m/%d'),plan_total[date.strftime('%m/%d')]))\n \n real_total = {}\n for date in date_list:\n try:\n items = events_query.filter(real_start__range=[date, date + timedelta(days=1)])\n except Exception as e:\n print(e)\n continue\n real_total[date.strftime('%m/%d')] = 0\n for item in items:\n try:\n if item.real_start.date() == item.real_end.date():\n real_total[date.strftime('%m/%d')] += (item.real_end - item.real_start).total_seconds()\n else:\n _days = (item.real_end - item.real_start).days\n print(date ,item.real_end - item.real_start)\n real_total[date.strftime('%m/%d')] += (item.real_end - item.real_start).total_seconds()\\\n - (16 * _days * 60 * 60)\n # print(real_total)\n except Exception as e:\n print(e)\n continue\n\n context = {\n 'user_info':user_info,\n 'department':department_title,\n 'department_filter':department_filter,\n 'events':events,\n 'start':start_date,\n 'end':end_date, \n 'date_list':date_list, \n 'test_item_list':getFilterList(start, end, 'item', 'test_item'),\n 'plan_total':plan_total,\n 'real_total':real_total\n }\n \n return render(request, template, context) \n\n# /capa/analyzer\n@loginRequired\ndef capa_analyzer(request):\n # template\n template = 'capa/capa_analyzer.html' \n\n # User Info\n user_info = get_user_info(request)\n\n # Default date range : 1 year > (100 days)\n now = datetime.strftime(datetime.now(), '%Y-%m-%d')\n past = datetime.strftime(datetime.now() - timedelta(days=100), '%Y-%m-%d')\n\n params = request.GET\n\n def get_params(param):\n if param == 'start_date':\n return params.get(param, past)\n if param == 'end_date':\n return params.get(param, now)\n if param == 'product_type':\n return params.get(param)\n return params.get(param, '')\n\n product_type = get_params('product_type')\n product = get_params('product')\n model = get_params('model')\n grade = get_params('grade')\n event = get_params('event')\n test_type = get_params('test_type')\n test_item = get_params('test_item')\n start_date = get_params('start_date')\n end_date = get_params('end_date')\n export = get_params('export')\n\n if not product_type:\n product_type = 'tv'\n # Query Set\n \n items = TestItem.objects.filter(Q(testModel__req_date__range=[start_date,end_date]))\n \n items = items.exclude(\n Q(testModel__test_type_name__in=EXCLUDE_TEST_TYPE)|\\\n Q(testModel__event__icontains='cskd')|\\\n Q(testModel_id__seq=0)|\\\n Q(test_item='etc')).filter(\n Q(testModel_id__product_group_name__icontains=product)&\\\n Q(model__icontains=model)&\\\n Q(testModel_id__grade__icontains=grade)&\\\n Q(testModel_id__event__icontains=event)&\\\n Q(testModel_id__test_type_name__icontains=test_type)&\\\n Q(test_item__icontains=test_item))\\\n .order_by('-testModel__plan_start')\n\n if product_type == 'etc':\n ex_list = []\n for lst in list(product_type_list.values()):\n ex_list.extend(lst)\n items = items.exclude(Q(testModel__product_group_name__in=ex_list))\n else:\n if product_type != 'all':\n items = items.filter(Q(testModel__product_group_name__in=product_type_list[product_type]))\n\n try:\n max_seq = int(items.aggregate(seq_max=Max('testModel__seq'))['seq_max'])\n except:\n max_seq = 0\n\n # index list\n # division, model, grade, test_type, test_item, event\n idx_list = items.values('division','model','testModel__grade','testModel__test_type_name','test_item','testModel__event','testModel__product_group_name')\\\n .distinct().order_by('testModel__event', 'testModel__test_type_name')\n \n query = items.values('division','model','testModel__grade','testModel__test_type_name','test_item','testModel__event','testModel__seq', 'testModel__product_group_name')\\\n .annotate(max_seq=Max('testModel__seq'))\n # Context\n context = {\n # Common info\n 'user_info':user_info,\n\n # filter\n 'product_type':product_type,\n 'product':product,\n 'model':model,\n 'grade':grade,\n 'test_type':test_type,\n 'test_item':test_item,\n 'event':event,\n 'start_date':start_date,\n 'end_date':end_date,\n \n # filter select option list\n 'product_list':getFilterList(start_date, end_date, 'model', 'product_group_name'),\n 'grade_list':getFilterList(start_date, end_date, 'model', 'grade'),\n 'test_type_list':getFilterList(start_date, end_date, 'model', 'test_type_name'),\n 'test_item_list':getFilterList(start_date, end_date, 'item', 'test_item'),\n 'event_list':getFilterList(start_date, end_date, 'model', 'event'),\n\n #context\n 'max_seq':range(max_seq),\n 'idx_list':idx_list,\n 'items':items\n }\n\n if export == '1':\n print(\"excel exporting\")\n # result = makeSeqData(query, items, max_seq)\n\n return exportExcel(request, query)\n else:\n return render(request, template, context)\n\n@loginRequired\ndef main_page(request):\n template = 'capa/capa_main_page.html'\n user_info = get_user_info(request)\n start = '2017-08-01'\n end = '2018-09-01'\n models = TestModel.objects.filter(create_date__range=[start, end])\n states = {\n 'testing':['Testing', 'Result Writing'],\n 'complete':['Completed', 'Result Approval'],\n 'planning':['Plan Writing', 'Rcv. Waiting','Plan Approval','Rcv. Holding' ]\n }\n testing = models.filter(status__in=states.get('testing'))\n complete = models.filter(status__in=states.get('complete'))\n planning = models.filter(status__in=states.get('planning'))\n \n qs = {\n \"testing\" : [testing.filter(seq__gte=3).count(),testing.count()],\n \"complete\" : [complete.filter(seq__gte=3).count(),[complete.filter(judgement=\"OK\").count(), complete.filter(judgement=\"NG\").count()]],\n \"planning\" : [planning.filter(seq__gte=3).count(),planning.count()],\n }\n context = {\n 'user_info':user_info,\n 'qs':qs\n }\n\n\n return render(request, template, context)\n\n@loginRequired\ndef analysis_page(request):\n context = {}\n template = 'capa/capa_analysis_page.html'\n return render(request, template, context)","sub_path":"apps/capa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":42558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"519375621","text":"from bs4 import BeautifulSoup\nimport requests\nfrom flask import Flask, request\nfrom flask_cors import CORS, cross_origin\napp = Flask(__name__)\n\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n@app.route('/', methods=['GET','POST'])\n@cross_origin()\ndef scrape():\n url=request.args['link']\n soup = BeautifulSoup(requests.get(url).text)\n for script in soup([\"script\", \"style\"]):\n script.extract()\n return soup.get_text() \n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=6100)\n","sub_path":"scrape-website-text/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"127146892","text":"# importing library\nimport arcade\nimport random\n# variables\ntotal_score = 0\nquestions_missed = 0\nquestion = 1\n\n\ndef intro():\n # Welcome statement\n print(\"Welcome to my Final Project! \\n\"\n \"In this project I will be quizzing you on Star Wars trivia!\")\n\n\n# question 1\ndef han_death():\n global question\n global questions_missed\n global total_score\n print(\"Question\", question)\n question += 1\n print(\"In what movie did Han Solo die?\")\n print(\"A) The Phantom Menace \\n\"\n \"B) The Force Awakens \\n\"\n \"C) Rise of Skywalker \\n\"\n \"D) Return of the Jedi \\n \")\n game_choice = input(\"Enter your choice \")\n if game_choice.lower() == \"b\":\n print(\"Congratulations, you got the question right!\\n\" +\n \"Unfortunately he was stabbed by his son with a lightsaber.\")\n total_score += 20\n print(\"Your new score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"The correct answer is D\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions\")\n\n\n# question 2\ndef movie_count():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n total_movies = input(\"How many Star Wars movies are there? \")\n if total_movies == \"9\":\n print(\"Nice job!\")\n total_score += 20\n print(\"Your new score is\", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"There are 9 movies, Rouge One is a movie but does not follow the timeline.\")\n print(\"Your current score is:\", total_score)\n questions_missed += print(\"You have missed \", questions_missed, \" questions\")\n\n# question 3\n\n\ndef george_lucas():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"Why did George Lucas sell the Star Wars franchise to Disney?\")\n print(\"A) He wanted to spend time with his family and raise his daughter \\n\"\n \"B) He was tired of the Star Wars fandom \\n\" \n \"C) He had some scripts written for the next trilogy but was too tired to direct them \\n\" \n \"D) He was stressed about movie reactions\")\n lucas_daughter = input(\"Enter your choice \")\n if lucas_daughter.lower() == \"a\":\n print(\"Congratulations! He wanted to spend time with his family.\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"He sold the franchise to spend time with his family\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 4\n\n\ndef lucas_net_worth():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n net_worth = input(\"What is George Lucas's net worth? \")\n if net_worth.lower() == \"7 billion usd\":\n print(\"That is correct!\")\n total_score += 20\n print(\"Your new score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"His net worth is 7 billion USD\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions\")\n\n# question 5\n\n\ndef species():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\" What species is Jar Jar Binks?\")\n print(\"A) Toydarian\\n\"\n \"B) Wookie\\n\"\n \"C) Gungan\\n\"\n \"D) Geonosian\")\n jarjar_species = input(\"Enter your choice \")\n if jarjar_species.lower() == \"c\":\n print(\"Congratulations! Jar Jar Binks is from Naboo.\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"No, Jar Jar Binks is a Gungan, while the others are other species of creatures from Star Wars\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 6\n\n\ndef stormtrooper_number():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"What is Finn's Stormtrooper number?\")\n print(\"A) TK-421\\n\"\n \"B) CT-7567\\n\"\n \"C) CC-2224\\n\"\n \"D) FN-2187\")\n finn_number = input(\"Enter your choice \")\n if finn_number.lower() == \"d\":\n print(\"Congratulations! The others are from the movies or Clone Wars.\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"No, Finn's number was FN-2187, Poe gave Finn as a name to him\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 7\n\n\ndef shared_scenes():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n scenes_shared = input(\"How many scenes did Darth Vader and C-3PO share? \")\n if scenes_shared == \"1\" or \"one\":\n print(\"Correct! This scene was in The Empire Strikes back.\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"Shockingly there was only 1 scene!\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 8\n\n\ndef skywalker_mother():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"Who is Anakin Skywalkers mother?\")\n print(\"A) Leia Skywalker\\n\"\n \"B) Satine Kryze\\n\"\n \"C) Maz Kanata\\n\"\n \"D) Shimi Skywalker\")\n anakin_mother = input(\"Enter your choice \")\n if anakin_mother.lower() == \"d\":\n print(\"Congratulations! Shimi Skywalker is Anakin's mother\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n else:\n print(\"Shimi Skywalker is Anakin's mother\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 9\n\n\ndef leia_luke():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n twins = input(\"Were Leia and Luke twins? \")\n if twins.lower() == \"yes\":\n print(\"Correct! They were twins of Padme Amidala.\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"They were twins.\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 10\n\n\ndef arm_colour():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"What colour is C-3PO’s arm in the force awakens?\")\n print(\"A) Red\\n\"\n \"B) Purple\\n\"\n \"C) Gold\\n\"\n \"D) Green\")\n metal_arm = input(\"Enter your choice \")\n if metal_arm.lower() == \"a\":\n print(\"Congratulations! C-3PO's arm was red.\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"No, C-3PO's arm was red in The Force Awakens\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 11\n\n\ndef general_grevious():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n grevious_lightsabers = input(\"How many light sabers did General Grevious have? \")\n if grevious_lightsabers == \"4\" or \"four\":\n print(\"Correct! None of these were red lightsabers as well.\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"Shockingly he had 4!\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 12\n\n\ndef yoda_animal():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"What animal nearly played Yoda?\")\n print(\"A) Bear\\n\"\n \"B) Cat\\n\"\n \"C) Monkey\\n\"\n \"D) Eagle\")\n monkey_yoda = input(\"Enter your choice \")\n if monkey_yoda.lower() == \"c\":\n print(\"Congratulations! That would have been a chaotic set!\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"No, Yoda was about to be played by a monkey!\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 13\n\n\ndef younglings_death():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"What did the younglings say before Anakin killed them?\")\n print(\"A) Master Skywalker there are too many of them!\\n\"\n \"B) What are we going to do?\\n\"\n \"C) Master Skywalker there are too many of them! What are we going to do?\\n\"\n \"D) What are we going to do? Master Skywalker there are too many of them!\")\n kid_death = input(\"Enter your choice \")\n if kid_death.lower() == \"c\":\n print(\"Congratulations! These were Sors Bandeam's last words\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n else:\n print(\"No they said,'Master Skywalker there are too many of them! What are we going to do?'\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 14\n\n\ndef temple_location():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"Which is not a temple location for the jedi order? \")\n print(\"A) Coruscant\\n\"\n \"B) Endor\\n\"\n \"C) Ossus\\n\"\n \"D) Tython\")\n temple_planets = input(\"Enter your choice \")\n if temple_planets.lower() == \"b\":\n print(\"Congratulations! There are a total of 4\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"No, there is no temple on Endor.\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 15\n\n\ndef palpatine_granddaughter():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n rey_p = input(\"Who is Senator Palpatine's Granddaughter? \")\n if rey_p.lower() == \"rey\":\n print(\"That is correct!\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"Sheev Palpatine's granddaughter is Rey.\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 16\n\n\ndef height_dif():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"What is the height difference between Harrison Ford and Leia Organa?\")\n print(\"A) 2 inches\\n\"\n \"B) 6 inches\\n\"\n \"C) 2 feet\\n\"\n \"D) 1 foot\")\n han_leia_height = input(\"Enter your choice \")\n if han_leia_height.lower() == \"d\":\n print(\"Congratulations! Carrie Fischer had to stand on a block!\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"No there was a 1 foot height difference.\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 17\n\n\ndef r2d2_height():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"How tall is R2D2?\")\n print(\"A) 0.96m\\n\"\n \"B) 1.4m\\n\"\n \"C) 2.0m\\n\"\n \"D) 0.46m\")\n r2_height = input(\"Enter your choice \")\n if r2_height.lower() == \"a\":\n print(\"Congratulations! R2D2 is the best droid.\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"No, R2D2 was 0.96m tall.\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 18\n\n\ndef maul_apprentice():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"Who was Darth Maul's apprentice\")\n print(\"A) Watto\\n\"\n \"B) Savage Opress\\n\"\n \"C) Boba Fett\\n\"\n \"D) Yoda\")\n maul_brother = input(\"Enter your choice \")\n if maul_brother.lower() == \"b\":\n print(\"Congratulations! Savage Opress was also his brother.\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"Savage Opress was his apprentice and brother\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 19\n\n\ndef yoda_age():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n yoda_age = input(\"How old was Yoda when he died? \")\n if yoda_age == (\"900\"):\n print(\"Congratulations, he died at an old age\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"He died when he was 900\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 20\n\n\ndef credit_dept():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"What was Han Solo’s overall dept to Jabba?\")\n print(\"A) 22,000 credits\\n\"\n \"B) 3,190 credits\\n\"\n \"C) 21,090 credits\\n\"\n \"D) 14,260 credits\")\n credits_owed = input(\"Enter your choice \")\n if credits_owed.lower() == (\"d\"):\n print(\"Congratulations! He was not very responsible\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n\n else:\n print(\"No, he had a dept of 14,260 credits\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 21\n\n\ndef qgj_death():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"Who killed Qui Gon Jinn?\")\n print(\"A) Anakin Skywalker\\n\"\n \"B) Rex\\n\"\n \"C) Sheev Palpatine\\n\"\n \"D) Darth Maul\")\n death_of_qgj = input(\"Enter your choice \")\n if death_of_qgj.lower() == (\"d\"):\n print(\"Congratulations! Sadly he was killed by Darth Maul\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"No, Darth Maul had a life goal of killing him\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n# question 22\n\n\ndef anakin_owner():\n global question\n global questions_missed\n global total_score\n print(\"Question \", question)\n question += 1\n print(\"What is the Toydarian’s name who owned Anakin Skywalker? \")\n print(\"A) Watto\\n\"\n \"B) Chewbacca\\n\"\n \"C) Darth Sidious\\n\"\n \"D) Barara\")\n anakin_toydarian = input(\"Enter your choice \")\n if anakin_toydarian.lower() == (\"a\"):\n print(\"Congratulations! He was owned by Watto\")\n total_score += 20\n print(\"Your score is \", total_score)\n print(\"You have missed \", questions_missed, \" questions.\")\n\n else:\n print(\"No, Watto owned him and his mother\")\n questions_missed += 1\n print(\"You have missed \", questions_missed, \" questions.\")\n\n\n# Make audio function\n\ntheme = arcade.load_sound(\"1-02 Main Title_Rebel Blockade Runne.mp3\")\narcade.play_sound(theme)\n\n\n# Play functions\nintro()\nmy_list = [han_death, movie_count, george_lucas, lucas_net_worth, species,\n stormtrooper_number, shared_scenes, skywalker_mother, leia_luke,\n arm_colour, general_grevious,yoda_animal, younglings_death, temple_location,\n palpatine_granddaughter, height_dif, r2d2_height, maul_apprentice,yoda_age,\n credit_dept, qgj_death,anakin_owner]\n\nnum_questions = random.randint(7, 22)\nquestions_seen = [-1]*num_questions\nfor i in range(num_questions):\n repeat = False\n question_num = random.randint(0, 22)\n for qq in range(num_questions):\n if question_num == questions_seen[qq]:\n repeat = True\n if repeat == False:\n my_list[question_num]()\n questions_seen[i] = question_num\n else:\n i -=1\n\n\narcade.open_window(800, 350, \"Star Wars\")\narcade.set_background_color(arcade.csscolor.BLACK)\ndef final_stars(x,y):\n arcade.draw_circle_filled(x, y, 3, arcade.csscolor.WHITE_SMOKE)\n\n # stars at 100\nfor x in range(25):\n final_stars(75+x*25,100)\n # stars at 150\nfor x in range(9):\n final_stars(75+x*25,150)\nfor x in range(9):\n final_stars(525+x*25,150)\n\n # stars at 200\nfor x in range(9):\n final_stars(75+x*25,200)\nfor x in range(9):\n final_stars(525+x*25,200)\n\n # circles at 250\nfor x in range(25):\n final_stars(75+x*25,250)\n\n # circles at 300\nfor x in range(25):\n final_stars(75+x*25,300)\n\n # circles 450\nfor x in range(25):\n final_stars(75+x*25,50)\n\narcade.draw_text(\"Congratulations!\\n\" +\n \"You finished the game!\\n\" +\n \"You are a Star Wars buff!\",\n 300, 150,\n arcade.color.YELLOW, 17)\narcade.finish_render()\n\narcade.run()","sub_path":"Lab 12 - Final Lab/part_12.py","file_name":"part_12.py","file_ext":"py","file_size_in_byte":18514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344165798","text":"#Copyright 2017 Google Inc. All rights reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Rule for configuring apt GPG keys\"\"\"\n\nload(\"@io_bazel_rules_docker//docker:docker.bzl\", \"docker_build\")\nload(\"//util:run.bzl\", \"container_run_and_extract\")\n\ndef add_apt_key(name, keys, image, gpg_image=None):\n # First build an image capable of adding an apt-key.\n # This requires the keyfile and the \"gnupg package.\"\n\n # If the user specified an alternate base for this, use it.\n # Otherwise use the same base image we want the key in.\n\n if gpg_image == None:\n gpg_image = image\n\n key_image = \"%s.key\" % name\n docker_build(\n name=key_image,\n base=gpg_image,\n directory=\"/gpg\",\n files=keys,\n )\n\n commands = [\n \"apt-get update\",\n \"apt-get install -y -q gnupg\",\n # In a macro we don't get to see exactly what the key file will be named,\n # so we put it in a special directory and use glob.\n \"for file in /gpg/*; do apt-key add \\$file; done\"\n ]\n\n gpg_name=\"%s_gpg\" % name\n container_run_and_extract(\n name=gpg_name,\n image=key_image,\n commands=commands,\n extract_file=\"/etc/apt/trusted.gpg\"\n )\n\n docker_build(\n name=name,\n base=image,\n directory=\"/etc/apt/\",\n files=[gpg_name],\n )\n","sub_path":"package_managers/apt_key.bzl","file_name":"apt_key.bzl","file_ext":"bzl","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"448984299","text":"import argparse\nfrom datetime import datetime, timedelta, timezone\n\nimport dateutil.parser\n\nimport news.crawlers\n\nCRAWLER_DICT = {\n 'chinatimes': news.crawlers.chinatimes.main,\n 'cna': news.crawlers.cna.main,\n 'epochtimes': news.crawlers.epochtimes.main,\n 'ettoday': news.crawlers.ettoday.main,\n 'ftv': news.crawlers.ftv.main,\n 'ltn': news.crawlers.ltn.main,\n 'ntdtv': news.crawlers.ntdtv.main,\n 'setn': news.crawlers.setn.main,\n 'storm': news.crawlers.storm.main,\n 'tvbs': news.crawlers.tvbs.main,\n 'udn': news.crawlers.udn.main,\n}\n\n\ndef parse_argument():\n r'''\n `crawler_name` example: 'cna'\n `current_datetime` example: 2021-06-24T00:00:00Z\n `first_id` example: 55688\n '''\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--crawler_name',\n choices=CRAWLER_DICT.keys(),\n type=str,\n help='Select crawler.',\n )\n parser.add_argument(\n '--db_name',\n type=str,\n help='Assign database to store news.',\n )\n parser.add_argument(\n '--debug',\n type=bool,\n default=False,\n help='Select whether use debug mode.',\n )\n parser.add_argument(\n '--current_datetime',\n type=str,\n default=None,\n help='Specify the upper bound of the news release time. (latest)',\n )\n parser.add_argument(\n '--past_datetime',\n type=str,\n default=None,\n help='Specify the lower bound of the news release time. (oldest)',\n )\n parser.add_argument(\n '--first_idx',\n type=int,\n default=1,\n help='Specify first index id. (smallest)',\n )\n parser.add_argument(\n '--latest_idx',\n type=int,\n default=-1,\n help='Specify latest index id. (largest)',\n )\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n\n args = parse_argument()\n\n # Defaule `current_datetime` now.\n if not args.current_datetime:\n args.current_datetime = datetime.now(timezone.utc)\n else:\n args.current_datetime = dateutil.parser.isoparse(\n args.current_datetime\n )\n\n # Default crawl one day news.\n if not args.past_datetime:\n args.past_datetime = args.current_datetime - timedelta(days=1)\n else:\n args.past_datetime = dateutil.parser.isoparse(\n args.past_datetime\n )\n\n # Run crawler.\n func = CRAWLER_DICT[args.crawler_name]\n param = dict((k, v) for k, v in vars(args).items()\n if k in func.__code__.co_varnames)\n func(**param)\n","sub_path":"run_crawler.py","file_name":"run_crawler.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"241449021","text":"import os\nimport platform\n\ndef getpath():\n\tplat = platform.system().lower()\n\tplatarch = \"32\"\n\tif 'PROGRAMFILES(X86)' in os.environ:\n\t\tplatarch = \"64\"\n\tsteamhome = {\"platform\": plat, \"platarch\": platarch}\n\tif plat == \"windows\":\n\t\timport _winreg\n\t\tregkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,\"Software\\\\Valve\\\\Steam\")\n\t\ttry:\n\t\t\ti = 0\n\t\t\twhile 1:\n\t\t\t\tname, value, type = _winreg.EnumValue(regkey, i)\n\t\t\t\tif name == \"SteamPath\":\n\t\t\t\t\tsteamhome[\"path\"] = value\n\t\t\t\t\tbreak;\n\t\t\t\ti += 1\n\t\texcept WindowsError:\n\t\t\tprint\n\telif plat == \"linux\":\n\t\tsteamhome[\"path\"] = os.path.join(os.path.expanduser('~'),\".local\",\"share\",\"Steam\")\n\treturn steamhome","sub_path":"loader/steamhome.py","file_name":"steamhome.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"641972135","text":"from django.conf.urls import patterns, url\n\nfrom cyoa import views\n\nurlpatterns = patterns('',\n # url(r'^$', views.IndexView.as_view(), name='index'),\n \turl(r'^$', views.index, name='index'),\n url(r'^(?P\\d+)/$', views.DetailView.as_view(), name='detail'),\n# url(r'^(?P\\d+)/results/$', views.ResultsView.as_view(), name='results'),\n url(r'^(?P\\d+)/choose/$', views.choose, name='choose'),\n)\n","sub_path":"devsite/cyoa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"569082359","text":"#!/usr/bin/env python\nfrom flask import Flask, render_template, request, jsonify, abort\nimport json\nimport uuid\nfrom looped import Jump, WebGamePad, create_led, Tuner, Intro, Metronome\nfrom flask_sockets import Sockets\nimport geventwebsocket\nimport gevent\nfrom bibliopixel import colors\nfrom geventwebsocket.handler import WebSocketHandler\n\nNB_ROUNDS = 10\nCOLORS = [\n colors.Turquoise,\n colors.Green,\n colors.Yellow,\n colors.Orange,\n colors.Purple,\n colors.Magenta,\n colors.Coral,\n]\n\n\napp = Flask(__name__)\napp.config.from_envvar('SETTINGS')\nled = create_led(dev=app.config['DEBUG'], length=app.config['STRIP_LENGTH'])\ngamepad = WebGamePad()\nsockets = Sockets(app)\n\napp.state = {\n 'current_animation': None,\n 'players': [],\n 'ws': [],\n 'playing': False,\n 'all_games_ending': [],\n}\n\n\n@sockets.route('/jump')\ndef echo_socket(ws):\n token = request.cookies.get('token')\n player = next((_ for _ in app.state['players'] if _['token'] == token))\n app.state['ws'].append(ws)\n player['connected'] = True\n while not ws.closed:\n ws.receive()\n app.state['ws'].remove(ws)\n player['connected'] = False\n\n\n@app.route('/')\ndef gameList():\n return render_template('index.html')\n\n\ndef send_notifs(msg):\n if type(msg) is dict:\n msg = json.dumps(msg)\n for ws in app.state['ws']:\n if ws.closed:\n app.state['ws'].remove(ws)\n continue\n try:\n ws.send(msg)\n except geventwebsocket.WebSocketError:\n app.state['ws'].remove(ws)\n\n\n@app.route('/jump-connect', methods=['POST'])\ndef connectJump():\n token = request.cookies.get('token', str(uuid.uuid4()))\n player = next((_ for _ in app.state['players'] if _['token'] == token), None)\n if not player:\n player = {\n 'token': token,\n 'color': COLORS[len(app.state['players']) % len(COLORS)]\n }\n app.state['players'].append(player)\n player['connected'] = True\n data = {\n 'players': get_connected_players()\n }\n send_notifs({'type': 'join', 'payload': data})\n response = app.response_class(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n response.set_cookie('token', player['token'])\n return response\n\n\ndef get_connected_players():\n return [_ for _ in app.state['players'] if _.get('connected', True)]\n\n\n@app.route('/jump-start', methods=['POST'])\ndef startJump():\n def onEnd(d):\n app.state['playing'] = False\n app.state['all_games_ending'].append(d)\n send_notifs({'type': 'end', 'payload': app.state['all_games_ending']})\n if len(app.state['all_games_ending']) == NB_ROUNDS:\n app.state['all_games_ending'] = []\n if app.state['playing']:\n return abort(400)\n app.state['playing'] = True\n run_animation(Jump(\n led,\n gamepad=gamepad,\n players=get_connected_players(),\n onDie=lambda d: send_notifs({'type': 'die', 'payload': d}),\n onEnd=onEnd\n ), untilComplete=True)\n response = {\n 'players': get_connected_players()\n }\n send_notifs({'type': 'start', 'payload': response})\n return jsonify(response)\n\n\n@app.route('/tuner', methods=['POST'])\ndef tuner():\n run_animation(Tuner(led))\n return 'ok'\n\n\n@app.route('/controller', methods=['POST'])\ndef controller():\n token = request.cookies.get('token')\n key = request.json and request.json.get('key') or None\n gamepad.click(key or token)\n return 'ok'\n\n\n@app.route('/metronome', methods=['POST'])\ndef metronome():\n bpm = request.json and int(request.json.get('bpm')) or None\n color = request.json and request.json.get('color') or None\n run_animation(Metronome(led, gamepad=gamepad, bpm=bpm, color=color), fps=30)\n return 'ok'\n\n\ndef run_animation(anim, **kwargs):\n if app.state['current_animation']:\n app.state['current_animation'].stopThread(wait=True)\n app.state['current_animation'] = None\n app.state['current_animation'] = anim\n app.state['current_animation'].run(threaded=True, **kwargs)\n\n\nif __name__ == '__main__':\n server = gevent.pywsgi.WSGIServer(('0.0.0.0', app.config['PORT']), app, handler_class=WebSocketHandler)\n run_animation(Intro(led), untilComplete=True)\n server.serve_forever()\n","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"210413972","text":"import time\nimport os\nimport csv\n\nfor y in range(10):\n\n\tos.system('rm -rf /tmp/riofs/*')\n\tos.system('sync')\n\tos.system('(echo 3 | sudo tee /proc/sys/vm/drop_caches) > /dev/null')\n\tos.system('sudo riofs -c ~/.config/riofs/riofs.conf.xml -o direct_io mariomediabucket /storage/riofsS3nc')\n\ttime.sleep(1)\n\ttinit = time.time()\n\tfor x in range(100):\n\t\tos.system(\"dd if=/dev/zero of=/storage/riofsS3nc/riofs/testfile-\" + str(x) + \".txt bs=1MB count=1 oflag=nocache iflag=nocache status=none\")\n\n\tlapse = time.time() - tinit\n\twith open('./stats/rioWriteMany.csv', 'a', newline='') as file:\n\t\twriter = csv.writer(file)\n\t\twriter.writerow([lapse])\n\n\tprint(\"riofs: \" + str(lapse))\n\tif y != 9:\n\t\tos.system('rm -rf /storage/riofsS3nc/riofs/testfile-*')\n\t\ttime.sleep(3)\n\tos.system('fusermount -u /storage/riofsS3nc/')\n\ttime.sleep(1)\n","sub_path":"rio/rioWriteMany.py","file_name":"rioWriteMany.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"632946177","text":"#!/usr/bin/env python\n'''\n\t@author Juan Carlos Aguilera\n'''\n\nimport sys\nimport json\nimport requests\n'''\nserver = IP\nserver = http://...\n'''\nurl = 'http://betatestback.journeytheapp.com' ;\nteamCode = 'vant-TEC' ;\nPORT = 80 ;\nretryCount = 0 ;\nconnectionLost = False;\ncourseA = 'CourseA';\ncourseB = 'CourseB';\ncourseC = 'CourseC';\n\ndef send_http_followLeader(course):\n\t'''\n\t\tGET\n\t\t/followLeader/{course}/{teamCode}\n\t'''\n\t'''\n\t\texpected return value {\"code\":\"23\"}\n\t'''\n\taddr = '/followLeader/';\n\t\n\tserver = url + addr + course + '/' + teamCode ;\n\t\n\tr = requests.get(server);\n\n\tprint(server)\n\n\tif(r.status_code == 200):\n\t\tprint ('Status 200: OK');\n\t\tprint((r.text))\n\telif(r.status_code == 400):\n\t\tprint('Status 400: The requested Form isnt Ok, Please check it');\n\telif(r.status_code == 404):\n\t\tprint('Status 404: Course or Team are Wrong');\n\telif(r.status_code == 500):\n\t\tprint('Status 500: The gate assigment is broken');\n\telif(r.status_code == 503):\n\t\tprint('Status 503: Please try the request again');\n\t\tif(retryCount < 100):\n\t\t\tsend_http_heartbeat(course,timestamp,challenge,latitude,longitude)\n\t\telse:\n\t\t\tretryCount = 0;\n\t\t\tconnectionLost = True;\n\ndef send_http_heartbeat(course,timestamp,challenge,latitude,longitude):\n\t'''\n\t\tPOST\n\t\t/heartbeat/\n\t\tcourse\n\t\tteamCode\n\t\ttimestamp = YYYYMMDDHHMMSS in UTC\n\t\tchallenge = \"speed\", \"docking\", \"path\", \"follow\" or \"return\"\n\t\tlatitude = hddd.dddddd\n\t\tlongitude = hddd.dddddd\n\t'''\n\t'''\n\t\texpected return value {\"success\":}\n\t\tstatus = true //Run still active\n\t\tstatus = felse //Run have ended\n\t'''\n\taddr = '/heartbeat/' + course + '/' + teamCode;\n\n\tserver = url + addr ;\n\n\tpayload = {'timestamp':timestamp, 'challenge':challenge, 'latitude':round(latitude,6), 'longitude':round(longitude,6)} ;\n\n\tr = requests.post(server, data=payload) ;\n\n\tprint(server)\n\tprint(payload)\n\n\tif(r.status_code == 200):\n\t\tprint ('Status 200: OK');\n\t\tprint((r.text))\n\telif(r.status_code == 400):\n\t\tprint('Status 400: The requested Form isnt Ok, Please check it');\n\telif(r.status_code == 404):\n\t\tprint('Status 404: Course or Team are Wrong');\n\telif(r.status_code == 500):\n\t\tprint('Status 500: The gate assigment is broken');\n\telif(r.status_code == 503):\n\t\tprint('Status 503: Please try the request again');\n\t\tif(retryCount < 100):\n\t\t\tsend_http_heartbeat(course,timestamp,challenge,latitude,longitude)\n\n\ndef send_http_docking_2(course,filename):\n\t'''\n\t\tPOST\n\t\t/docking/image/\n\t\tcourse\n\t\tteamCode\n\t\tname = file\n\t\tfilename = 'test.jpg'\n\t'''\n\t'''\n\t\texpected return value {\"id\":}}\n\t'''\n\n\taddr = '/docking/image/' + course +'/' + teamCode ;\n\t\n\theaders = {'Content-type':'multipart/form-data'} ;\n\t\n\tserver = url + addr ;\n\t\n\tfiles = {'name' : open(filename,'rb') } \n\n\tpayload = {'filename': filename } ;\n\n\tr = requests.post(server, data = payload, files=files) ;\n\n\tif(r.status_code == 100):\n\t\tprint ('Status 100: Server is ready to accept multipart chunk');\n\telif(r.status_code == 200):\n\t\tprint ('Status 200: OK');\n\t\tprint((r.text))\n\t\timgId = json.loads(r.text)\n\telif(r.status_code == 202):\n\t\tprint('Status 202: Upload successfully completed');\n\t\tprint((r.text))\n\t\timgId = json.loads(r.text)\n\telif(r.status_code == 400):\n\t\tprint('Status 400: The requested Form isnt Ok, Please check it');\n\telif(r.status_code == 404):\n\t\tprint('Status 404: Course or Team are Wrong');\n\telif(r.status_code == 500):\n\t\tprint('Status 500: The gate assigment is broken');\n\telif(r.status_code == 503):\n\t\tprint('Status 503: Please try the request again');\n\t\tif(retryCount < 100):\n\t\t\tsend_http_heartbeat(course,timestamp,challenge,latitude,longitude)\n\n\treturn imgId\n\n","sub_path":"communication/auvsiServerCommunication.py","file_name":"auvsiServerCommunication.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216400240","text":"import falcon\nimport json\nimport sys\nfrom twee import MentalTruth\n\nclass get_score(object):\n\n def __init__(self):\n self.mental_truth = MentalTruth()\n print('hello')\n\n def on_get(self, req, resp):\n msg = {\n 'works?': 'YEAH POST WORKS !!'\n }\n resp.body = json.dumps(msg)\n resp.status = falcon.HTTP_200\n print(resp.status)\n\n def on_post(self, req, resp):\n # print('post')\n # resp.body = json.dumps(\"yeah we can post\")\n\n data = req.stream.read(req.content_length or 0)\n json_data = json.loads(data)\n\n twitter_handle = json_data['twitter_handle']\n\n result = self.mental_truth.iterate_twitter(twitter_handle)\n result = str(result)\n\n resp.status = falcon.HTTP_201\n msg = {\n 'sentiment_polarity': result\n }\n\n resp.body = json.dumps(msg)","sub_path":"get_score.py","file_name":"get_score.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"167642813","text":"'''\nImplement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.\n\nIf such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order).\n\nThe replacement must be in-place and use only constant extra memory.\n\nHere are some examples. Inputs are in the left-hand column and its corresponding outputs are in the right-hand column.\n\n1,2,3 → 1,3,2\n3,2,1 → 1,2,3\n1,1,5 → 1,5,1\n'''\n\n\n\n\nclass Solution:\n def nextPermutation(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n n = len(nums)\n i, j = n - 2, n-1\n while i >= 0 and nums[i] >= nums[i+1]:\n i -= 1\n if i >= 0:\n while nums[j] <= nums[i]:\n j -= 1\n nums[i], nums[j] = nums[j], nums[i]\n nums[i+1:] = nums[n:-n+i:-1]\n return nums\n\n#\n# def permutations(arr, position, end):\n# if position == end:\n# print(arr)\n# else:\n# for index in range(position, end):\n# print(\"Before\", end=\":\")\n# print(arr)\n# arr[index], arr[position] = arr[position], arr[index]\n# permutations(arr, position+1, end)\n# print(\"after\", end=':')\n# print(arr)\n# arr[index], arr[position] = arr[position], arr[index]\n#\n# arr = [\"a\", \"b\", \"c\"]\n# permutations(arr, 0, len(arr))\n\nif __name__ == '__main__':\n nums = [1, 2, 3]\n print(Solution().nextPermutation(nums))\n","sub_path":"31_Next_Permutation.py","file_name":"31_Next_Permutation.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"619041478","text":"#!/usr/bin/python3\nfrom random import randint\nimport time\nimport os\n\n# Pirate Bartender\n__author__ = 'Les Tallman'\n\nquestions = {\n \"bitter\": \"Are ye a lubber who likes it bitter? \",\n \"fruity\": \"Are ye one for a fruity finish? \",\n # \"salty\": \"Do ye like it with a salty tang? \",\n # \"strong\": \"Do ye like yer drinks strong? \",\n # \"sour\": \" Do ye like yer drinks to make ye pucker? \",\n # \"sweet\": \"Would ye like a bit of sweetness with yer poison? \"\n}\n\ningredients = {\n #\"aromatic\": [\"red wine\", \"clove\", \"coffee\", \"bianco vermouth\"],\n \"bitter\": [{\"name\":\"angostura bitters\", \"qty\":5}], #, {\"name\":\"splash of tonic\", \"qty\":5},\n # {\"name\":\"orange bitters\", \"qty\":5},{\"name\":\"grapefruit\", \"qty\":5}],\n \"fruity\": [{\"name\":\"slice of orange\", \"qty\":5}] #, {\"name\":\"dash of cassis\", \"qty\":5}, {\"name\":\"maraschino cherry\", \"qty\":5}]\n # \"salty\": [\"olives on a stick\", \"salt-dusted rim\", \"rasher of bacon\"],\n # \"sour\": [\"slice of lemon\",\"slice of lime\", \"sour mix\"],\n # \"strong\": [\"glug of rum\", \"slug of whisky\", \"splash of gin\"],\n # \"sweet\": [\"sugar cube\", \"spoonful of honey\", \"splash of cola\"]\n}\n\n\nliquor = {\n \"whiskey\": [\"Scotch\", \"Irish\", \"Single Malt\", \"Blended\", \"Bourbon\", \"Rye\"],\n \"gin\": [\"Botanist\", \"Tanqueray\", \"Bombay\", \"Beefeater\", \"Gordon's\"],\n \"tequila\": [\"Blanco\", \"Joven\", \"Reposado\", \"Anejo\", \"Extra Anejo\"],\n \"rum\": [\"Bacardi\", \"McDowells\", \"Havana Club\", \"Brugal\", \"Contessa\"],\n \"vodka\": [\"Ciroc\", \"Dragon Blue\", \"Grey Goose\", \"Mariette\"],\n 'brandy': [\"Remy Martin\", \"Hennessy\", \"Courvisier\", \"Paul Masson\", \"E&J\"],\n \"sherry\": [\"Fino\", \"Manzanilla\", \"Amontillado\", \"Oloroso\", \"Palo Cortado\"]\n}\n\n\ndef find_user_preferences():\n \"\"\"Find user's drink preferences\"\"\"\n\n drink_preferences = {} # Create a dictionary to hold user's answers\n for ingred_type, question in questions.items(): # type is the key and question is the value assigned to that key.\n print(question) # Print question for user to answer.\n drink_preferences[ingred_type] = input().lower() in [\"y\", \"yes\"]\n print(\"\")\n return drink_preferences # return user's answer to main function.\n\n\ndef create_drink(drink_preferences):\n \"\"\"Create drink based on user's answers\"\"\"\n\n drink = [] # Create list based on the items the user liked\n for ingredient_type, liked in drink_preferences.items():\n if not liked:\n continue\n random_value = randint(0, len(ingredients[ingredient_type]) - 1)\n print(random_value)\n drink.append(ingredients[ingredient_type][random_value])\n track_ingredient_use(ingredients, ingredient_type, random_value)\n return drink, ingredient_type, random_value # Return ingredients randomly selected to run_application function.\n\n\ndef track_ingredient_use(ingredients, ingredient_type, random_value):\n \"\"\"Track the ingredients used for inventory control\"\"\"\n\n ingredients[ingredient_type][random_value][\"qty\"] = ingredients[ingredient_type][random_value][\"qty\"] - 1\n\n if ingredients[ingredient_type][random_value][\"qty\"] == 0:\n print(\"\\nGive me a few, I need to restock the bar!!\\n\")\n time.sleep(15) # Delay for 15 seconds\n random_ingredient_value = randint(0, 5 + 1) # Generate random between 1 & 5.\n ingredients[ingredient_type][random_value][\"qty\"] = \\\n ingredients[ingredient_type][random_value][\"qty\"] + random_ingredient_value\n\n\ndef another_drink(drink, ingredient_type, random_value):\n \"\"\" Ask the user if they would like another drink \"\"\"\n\n drink_question = input(\"\\nWould you like another drink maty? \").lower()\n if drink_question == \"y\" or drink_question == \"yes\":\n print(\"\\nGreat\")\n\n same_drink = input(\"\\nWould you like the same drink you just had? \").lower()\n if same_drink == \"y\" or same_drink == \"yes\":\n track_ingredient_use(ingredients, ingredient_type, random_value)\n serve_drink(drink)\n another_drink(drink, ingredient_type, random_value)\n elif same_drink == \"n\" or same_drink == \"no\":\n run_application() # call the main function and start the questions over.\n\n elif drink_question == \"n\" or drink_question == \"no\":\n print(\"Good night, Be careful sailing' home!\")\n else:\n print(\"Maybe ye had yone one t' many maty...\")\n print(\"Time to walk it off!\")\n\n\ndef serve_drink(drink):\n\n # os.system('clear')\n print(\"Here's your drank..\")\n print(\"Here be t' stuff in it:\")\n # print(drink) # Used for testing\n for ingredient in drink:\n print(\"A {}, {}\".format(ingredient[\"name\"], ingredient[\"qty\"]))\n\n\ndef run_application():\n\n drink_preferences = find_user_preferences()\n drink, ingredient_type, random_value = create_drink(drink_preferences)\n serve_drink(drink)\n another_drink(drink, ingredient_type, random_value)\n\n\ndef main():\n\n run_application()\n\nif __name__ == '__main__':\n main()","sub_path":"PirateBarTender/pirate_bar_tender.py","file_name":"pirate_bar_tender.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58762833","text":"'''\nCreated on Jul 27, 2016\n\n@author: jvazquez\n'''\nimport logging\n\nfrom flask import Blueprint, g, jsonify, request\nfrom flask_jwt import jwt_required\nfrom flasgger import swag_from\n\nfrom user.api.model import User\n\nfrom utils.helpers.acl.decorators import access_to_resource\nfrom utils.helpers.model.validator import sanitize, sanitize_update\nfrom utils.helpers.response_creator.simple_creator import simplest\nfrom user.api.model import Email\nfrom utils.helpers.response_creator.http_statuses import OK, ERROR, CREATED,\\\n NOT_FOUND\nimport json\n\nadmin_module = Blueprint(\"admin_module\", __name__)\nlogger = logging.getLogger(__name__)\n\n\n@admin_module.route(\"/api/admin/user//\", methods=[\"PATCH\"])\n@jwt_required(401)\n@access_to_resource(request, \"admin.privilege\")\n@swag_from(\"admin_update_user.yml\")\ndef update_user(user_id):\n try:\n data = request.get_json()\n rejected_fields = sanitize(User, data)\n sanitize_update(data, [\"id\"], rejected_fields)\n user = g.db_session.query(User).get(user_id)\n if user:\n updated = user.update(**data)\n if updated:\n response = simplest(\"update\", OK, data, rejected_fields)\n else:\n response = simplest(\"update\", ERROR, data, rejected_fields)\n else:\n response = simplest(\"update\", NOT_FOUND, [], [])\n except Exception:\n logger.exception(\"Admin error while update\")\n response = simplest(\"update\", ERROR, data, rejected_fields)\n finally:\n return jsonify(response, OK)\n\n\n@admin_module.route(\"/api/admin/email/configuration//\",\n methods=[\"GET\"])\n@jwt_required(401)\n@access_to_resource(request, \"admin.privilege\")\n@swag_from(\"admin_get_email_conf.yml\")\ndef get_email_conf(email_id):\n email = g.db_session.query(Email).get(email_id)\n if email is None:\n status = NOT_FOUND\n return jsonify([]), status\n else:\n status = OK\n return jsonify(email.to_json()), status\n\n\n@admin_module.route(\"/api/admin/email/configuration/\", methods=['GET'])\n@jwt_required(401)\n@access_to_resource(request, \"admin.privilege\")\n@swag_from(\"admin_get_all_email_conf.yml\")\ndef get_all_email_conf():\n try:\n email_list = list(map(lambda email: email.to_json(),\n g.db_session.query(Email).all()))\n status = OK\n except Exception:\n logger.exception(\"We had an error retrieving the list of users\")\n status = ERROR\n email_list = []\n return jsonify(email_list), status\n\n\n@admin_module.route(\"/api/admin/email/configuration/\", methods=[\"POST\"])\n@jwt_required(401)\n@access_to_resource(request, \"admin.privilege\")\n@swag_from(\"admin_create_email_conf.yml\")\ndef create_email_conf():\n data = request.get_json()\n tenant_id = request.headers.get(\"Tenant\")\n rejected_fields = sanitize(Email, data)\n if len(rejected_fields[\"rejected\"]) > 0:\n logger.exception(\"Admin error while create email configuration,\"\n \"here is rejected fields\")\n response = simplest(\"create\", ERROR, data, rejected_fields)\n status = ERROR\n else:\n try:\n logger.debug(data)\n has_tid = \"{}\" in data[\"body\"]\n has_site = \"{{site}}\" in data[\"body\"]\n if has_tid is False or has_site is False:\n logger.debug(\"Has tid {}\\nHas site {}\".format(has_tid,\n has_site))\n raise Exception(\"No site placeholder found\")\n data[\"body\"] = data[\"body\"].replace(\"{}\", tenant_id)\n mail = Email(**data)\n mail.save(True)\n status = CREATED\n response = simplest(\"create\", CREATED, mail.id)\n except Exception:\n logger.exception(\"Error when create email\")\n status = ERROR\n response = simplest(\"create\", ERROR, data, rejected_fields)\n return jsonify(response), status\n\n\n@admin_module.route(\"/api/admin/email/configuration//\",\n methods=[\"PATCH\"])\n@jwt_required(401)\n@access_to_resource(request, \"admin.privilege\")\n@swag_from(\"admin_update_email_conf.yml\")\ndef update_email_conf(email_id):\n try:\n data = request.get_json()\n rejected_fields = sanitize(Email, data)\n sanitize_update(data, [\"id\"], rejected_fields)\n email = g.db_session.query(Email).get(email_id)\n if email:\n updated = email.update(**data)\n if updated:\n response = simplest(\"update\", OK, data, rejected_fields)\n else:\n response = simplest(\"update\", ERROR, data, rejected_fields)\n else:\n response = simplest(\"update\", OK, [], [])\n except Exception:\n logger.exception(\"Admin error while update email\")\n response = simplest(\"update\", ERROR, data, rejected_fields)\n finally:\n return jsonify(response, OK)\n\n\n@admin_module.route(\"/api/admin/email/configuration//\",\n methods=[\"DELETE\"])\n@jwt_required(401)\n@access_to_resource(request, \"admin.privilege\")\n@swag_from(\"admin_delete_email_conf.yml\")\ndef delete_email_conf(email_id):\n email = g.db_session.query(Email).get(email_id)\n if email is not None:\n email.delete()\n status = OK\n else:\n status = NOT_FOUND\n\n response = simplest(\"delete\", status)\n return jsonify(response), status\n","sub_path":"user/api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119861259","text":"# coding=utf-8\n# @Time : 2019/7/9 13:13\n# @Author : Mandy\nimport ftplib\nimport os\nimport time\nimport zipfile\n\nimport conftest\n\nftp_host = '10.88.0.22'\nftp_username = 'test'\nftp_password = 'pass'\n\n\nclass MyFtp:\n\n def __init__(self):\n self.ftp = ftplib.FTP(ftp_host)\n self.filename = 'SkyVPNDebugItunes.ipa'\n self.local_screen_path = conftest.screenshots_dir\n self.remote_path = r'/Ad_Screenshot/'\n now_time = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\n # 压缩后文件夹的名字\n self.zip_name = \"android_screenshot_\" + now_time + '.zip'\n self.zip_file = self.local_screen_path + self.zip_name\n\n def login(self, user, password):\n self.ftp.login(user, password)\n # print(self.ftp.welcome)\n\n def zip_file_folder(self, zip_file_dir, zip_name):\n \"\"\"\n 压缩指定文件夹\n :param zip_file_dir: 目标文件夹路径\n :param zip_name: 压缩文件保存路径+xxxx.zip\n :return: 无\n \"\"\"\n z = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(zip_file_dir):\n # 这一句很重要,不replace的话,就从根目录开始复制\n fpath = dirpath.replace(zip_file_dir, '')\n # 实现当前文件夹以及包含的所有文件的压缩\n fpath = fpath and fpath + os.sep or ''\n for filename in filenames:\n print('compressing', filename)\n z.write(os.path.join(dirpath, filename), fpath + filename)\n print('compressing finished')\n z.close()\n\n def zip_files(self, zip_file_dir):\n \"\"\"\n 压缩指定文件夹下的所有文件\n :param zip_file_dir: 目标文件夹路径\n :param zip_name: 压缩文件保存路径+xxxx.zip\n :return: 无\n \"\"\"\n zip_name = self.zip_name\n zip = zipfile.ZipFile(zip_name, \"w\", zipfile.ZIP_DEFLATED)\n for path, dirnames, filenames in os.walk(zip_file_dir):\n # 去掉目标跟路径,只对目标文件夹下边的文件及文件夹进行压缩\n fpath = path.replace(zip_file_dir, '')\n for filename in filenames:\n print('[MyLog]--------Compressing', filename)\n zip.write(os.path.join(path, filename), os.path.join(fpath, filename))\n print('[MyLog]--------Compressing finished!')\n zip.close()\n\n def download_file(self, local_file, remote_file): \n # 下载单个文件\n file_handler = open(local_file, 'wb')\n print(file_handler)\n # ftp.retrbinary(\"RETR %s\" % (remote_file), file_handler.write)\n # 接收服务器上文件并写入本地文件\n self.ftp.retrbinary('RETR ' + remote_file, file_handler.write)\n file_handler.close()\n return True\n\n def download_file_tree(self, local_dir, remote_dir): \n # 下载整个目录下的文件\n print(\"[MyLog]--------remote_dir:\", remote_dir)\n if not os.path.exists(local_dir):\n os.makedirs(local_dir)\n self.ftp.cwd(remote_dir)\n remote_names = self.ftp.nlst()\n print(\"[MyLog]--------remote_names\", remote_names)\n for file in remote_names:\n local = os.path.join(local_dir, file)\n print(self.ftp.nlst(file))\n if file.find(\".\") == -1:\n if not os.path.exists(local):\n os.makedirs(local)\n self.download_file_tree(local, file)\n else:\n self.download_file(local, file)\n self.ftp.cwd(\"..\")\n return\n\n def upload_files(self, remote_path):\n \"\"\"\n 压缩指定文件夹\n :param: remote_path,远程文件夹路径\n :param: zip_file,需要上传的文件\n :return: 无\n \"\"\"\n buf_size = 1024\n # 选择操作目录\n self.ftp.cwd(remote_path)\n # 列出目录文件\n # self.ftp.retrlines('LIST')\n # 打开要上传的文件\n upload_file = self.zip_name\n file_handler = open(upload_file, 'rb')\n # 要上传的远程文件地址+文件名\n filename = remote_path + upload_file\n print('[MyLog]--------Uploading', upload_file)\n # 上传文件\n self.ftp.storbinary('STOR %s' % os.path.basename(filename), file_handler, buf_size)\n self.ftp.set_debuglevel(0)\n print('[MyLog]--------Uploading finished! You can link the following address for details: ftp://' + ftp_host + self.remote_path)\n\n def remove_zip(self, zipname):\n os.remove(zipname)\n print('[MyLog]--------Remove zip success!')\n\n # 递归删除目录及其子目录下的文件\n def remove_files(self, path):\n for i in os.listdir(path):\n path_file = os.path.join(path, i) # 取文件绝对路径\n if os.path.isfile(path_file): # 判断是否是文件\n os.remove(path_file)\n else:\n self.remove_files(path_file)\n print('[MyLog]--------Remove files success!')\n\n\n def make_dir(self):\n os.makedirs(self.local_screen_path)\n\n def close(self):\n self.ftp.close()\n\n def main(self):\n self.login(ftp_username, ftp_password)\n self.zip_files(self.local_screen_path)\n # 上传文件\n self.upload_files(self.remote_path)\n self.remove_zip(self.zip_name)\n self.remove_files(conftest.screenshots_list)\n self.close()\n\n\n# if __name__ == \"__main__\":\n# my_ftp = MyFtp()\n\n\n","sub_path":"common/my_ftp.py","file_name":"my_ftp.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"627574732","text":"import pymysql as pymysql\nimport requests\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.common.exceptions import NoSuchElementException\nfrom wordcloud import WordCloud, STOPWORDS\nimport datetime\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport random\n\n\n\ndef click_url(url, header):\n req = requests.get(url, header)\n soup = BeautifulSoup(req.content, 'html.parser')\n return soup\n\n\ndef pagination(page, search, city):\n my_header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',\n \"referrer\": \"www.Google.com\"}\n search = search.replace(\" \", \"%20\")\n # url = 'https://www.indeed.com/jobs?q=software%20developer&l=McKinney%2C%20TX&start=' + str(page)\n url = 'https://www.indeed.com/jobs?q=' + search + '&l=' + city + ',%20TX&start=' + str(page)\n req = requests.get(url, my_header)\n soup = BeautifulSoup(req.content, 'html.parser')\n return soup\n\n\ndef is_state(txt):\n states_abbrev = ('AK', 'AL', 'AR', 'AS', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'GU', 'HI', 'IA', 'ID',\n 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MP', 'MS', 'MT', 'NC', 'ND',\n 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX',\n 'UM', 'UT', 'VA', 'VI', 'VT', 'WA', 'WI', 'WV', 'WY')\n for state in range(len(states_abbrev)):\n if txt.upper() == states_abbrev[state].upper():\n return True\n return False\n\n\ndef split_location(loc):\n location_split = loc.replace(\",\", \"\").replace(\"-\", \"\").replace(\"•\", \"\").split(\" \")\n city = state = zip_code = \"\"\n for element in location_split:\n if element.isnumeric():\n zip_code = element\n elif is_state(element):\n state = element\n elif element.isalpha():\n city = element\n location = {\n 'city': city,\n 'state': state,\n 'zip_code': zip_code,\n }\n return location\n\n\ndef salary_format(sal_split):\n for sal in range(len(sal_split)):\n sal_split[sal] = ''.join(i for i in sal_split[sal] if i.isdigit())\n\n salary_min_hourly = salary_max_hourly = 0\n salary_hourly = 0\n salary_min_year = 0\n salary_max_year = 0\n if len(sal_split) == 1:\n salary_year = int(sal_split[0])\n if salary_year < 1000:\n salary_hourly = salary_year\n salary_year = 0\n salary_min_hourly = 0\n salary_max_hourly = 0\n salary_min_year = 0\n salary_max_year = 0\n else:\n salary_min_year = int(sal_split[0])\n salary_max_year = int(sal_split[1])\n salary_year = 0\n salary_hourly = 0\n if salary_max_year < 1000:\n salary_min_hourly = salary_min_year\n salary_max_hourly = salary_max_year\n salary_min_year = 0\n salary_max_year = 0\n salary = {\n 'salary_year': salary_year,\n 'salary_hourly': salary_hourly,\n 'salary_min_year': salary_min_year,\n 'salary_max_year': salary_max_year,\n 'salary_min_hourly': salary_min_hourly,\n 'salary_max_hourly': salary_max_hourly\n }\n return salary\n\n\ndef check_languages(desc):\n languages_mentioned = [] # will contain languages mentioned in the description\n word_list = desc.split(\" \") # split description into separate strings\n all_languages = [\"JAVA\", \"C\", \"C++\", \"C#\", \"Python\", \".NET\", \"JavaScript\", \"PHP\", \"SQL\", \"OBJECTIVE-C\", \"ASSEMBLY\",\n \"MATLAB\", \"PERL\", \"PASCAL\", \"R\", \"RUBY\", \"VISUAL BASIC\", \"GO\", \"GROOVY\", \"SWIFT\", \"SAS\", \"LUA\",\n \"DART\",\n \"FORTRAN\", \"COBOL\", \"SCRATCH\", \"SCALA\", \"ABAP\", \"LISP\", \"ADA\", \"RUST\", \"KOTLIN\", \"HASKELL\", \"G\",\n \"JULIA\", \"TCL\", \"POSTSCRIPT\", \"ERLANG\", \"BASH\", \"HTML\", \"CSS\", \"ANGULAR\", \"REACT\", \"VUE\",\n \"NODE.JS\", \"NODE\", \"NODEJS\"]\n for word in word_list: # check for matching languages\n for language in all_languages:\n if word.upper() == language.upper():\n languages_mentioned.append(language.upper())\n languages_mentioned = list(dict.fromkeys(languages_mentioned)) # remove duplicates\n return languages_mentioned\n\n\ndef check_degree(desc):\n degrees_mentioned = [] # will contain degrees mentioned in the description\n word_list = desc.replace(\"'\", \"\").split(\" \") # split description into separate strings\n all_degrees = [\"CERTIFICATE\", \"CERTIFICATION\", \"ASSOCIATE\", \"ASSOCIATES\", \"A.S.\", \"BACHELOR\", \"BACHELORS\", \"B.S.\",\n \"MASTER\", \"MASTERS\", \"M.S.\", \"PHD\", \"PH.D\", \"DOCTORATE\", \"DOCTORATES\", \"DOCTORAL\"]\n for word in word_list: # check for matching degrees\n for degree in all_degrees:\n if word.upper() == degree.upper():\n degrees_mentioned.append(degree.upper())\n degrees_mentioned = list(dict.fromkeys(degrees_mentioned)) # remove duplicates\n for degree in range(len(degrees_mentioned)): # organize data for database\n if degrees_mentioned[degree].upper() == 'CERTIFICATION':\n degrees_mentioned[degree] = 'CERTIFICATE'\n elif degrees_mentioned[degree].upper() == 'ASSOCIATE' or degrees_mentioned[degree].upper() == 'A.S.':\n degrees_mentioned[degree] = 'ASSOCIATES'\n elif degrees_mentioned[degree].upper() == 'BACHELOR' or degrees_mentioned[degree].upper() == 'B.S.':\n degrees_mentioned[degree] = 'BACHELORS'\n elif degrees_mentioned[degree].upper() == 'MASTER' or degrees_mentioned[degree].upper() == 'M.S.':\n degrees_mentioned[degree] = 'MASTERS'\n elif degrees_mentioned[degree].upper() == 'PH.D' or degrees_mentioned[degree].upper() == 'DOCTORATE' or degrees_mentioned[degree].upper() == 'DOCTORATES' or degrees_mentioned[degree].upper() == 'DOCTORAL':\n degrees_mentioned[degree] = 'PHD'\n return degrees_mentioned\n\n\ndef check_keywords(desc):\n keywords_mentioned = []\n word_list = desc.replace(\"'\", \"\").split(\" \") # split description into separate strings\n interpersonal_skills = ['Assertiveness','Assertiveness','Bodylanguage','Bullying','Charisma','Clarification','Collaboration','Communication','Communication','Interpersonal','Communication, Barriers to Effective','Communication, Improving','Communication, Non-Verbal','Verbal','Effective','Confidentiality','Conflict','Managing','Conflict','Resolution','Mediation','Conversational','Criticism','Constructive','Criticism','Customer','Telephone','Emotional','Intelligence','Empathy','Employability','Feedback','Group','Behaviours','Cohesiveness','Life-Cycle','Groups', 'Teams','Harassment']\n intrapersonal_skills = ['verbal communication','non-verbal communication','listening','negotiation','solving','decision-making','assertiveness','patience','empathy']\n ide = ['Jupyter ','JupyterLab','Jupyter-Notebooks','RStudio','PyCharm','Notepad++','Spyder','Sublime Text','Vim','Emacs','MATLAB','Atom','Eclipse','NetBeans','IntelliJ','BlueJ','JDeveloper','DrJava','JCreator','jGRASP','Greenfoot','Xcode','Codenvy','RAD','Visual Studio','Visual Studio Code','CodeBlocks','CodeLite','CLion','Qt Creator','Nuclide','WebStorm','Sublime']\n\n for word in word_list: # check for matching degrees\n for key in interpersonal_skills:\n if word.upper() == key.upper():\n keywords_mentioned.append(key.upper())\n for key in intrapersonal_skills:\n if word.upper() == key.upper():\n keywords_mentioned.append(key.upper())\n for key in ide:\n if word.upper() == key.upper():\n keywords_mentioned.append(key.upper())\n keywords = list(dict.fromkeys(keywords_mentioned)) # remove duplicates\n\n # print(keywords)\n return keywords\n\n\n# //////////////////////////////////////////////////////////////////////////////////////// get_post() start\ndef get_post(page):\n my_header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'}\n entries = []\n post = page.find_all('div', class_='jobsearch-SerpJobCard')\n for element in post:\n url_post = element.find('a', attrs={'class': 'jobtitle turnstileLink'})['href']\n url_post = \"https://www.indeed.com\" + url_post\n title = element.find('a', attrs={'class': 'jobtitle'}).text.strip()\n company = element.find('span', attrs={'class': 'company'}).text.strip()\n location_dict = []\n try:\n location = element.find(class_='location').text.strip()\n location_dict = split_location(location)\n except NoSuchElementException:\n location = \"\"\n\n remote = False\n try:\n remote_check = element.find('span', attrs={'class': 'remote'}).text.strip()\n remote = True\n except AttributeError:\n remote = False\n if title.upper().find(\"REMOTE\") != -1:\n remote = True\n\n salary = {}\n try:\n salary_split = element.find('span', attrs={'class': 'salaryText'}).text.strip().replace(',', \"\").replace(\n '$', \"\").strip().split(\"-\")\n salary = salary_format(salary_split)\n except AttributeError:\n salary = {\n 'salary_year': 0,\n 'salary_hourly': 0,\n 'salary_min_year': 0,\n 'salary_max_year': 0,\n 'salary_min_hourly': 0,\n 'salary_max_hourly': 0\n }\n\n job_description = element.find('div', attrs={'class': 'summary'}).text.strip().replace(\"\\n\", \"\")\n\n page = click_url(url_post, my_header)\n job_description = get_description(page)\n\n date_days = 0\n date_text = element.find(class_='date').text.strip()\n if date_text.upper() == \"TODAY\" or date_text.upper() == \"JUST POSTED\":\n date = datetime.datetime.now()\n else:\n date_days = int(''.join(i for i in date_text if i.isdigit()))\n date = datetime.datetime.now() - datetime.timedelta(days=date_days) # subtract 'days ago' from current date\n date = date.strftime(\"%x\") # change to mm/dd/yy format\n\n popular_words = most_common_word(job_description, 10)\n\n entry = {\n 'url': url_post,\n 'title': title,\n 'company': company,\n 'location': location_dict,\n 'remote': remote,\n 'salary': salary,\n 'job_description': job_description,\n 'most_common_words': popular_words,\n 'keywords': check_keywords(job_description),\n 'languages': check_languages(job_description),\n 'degrees': check_degree(job_description),\n 'date': str(date)\n }\n entries.append(entry)\n print(entry)\n return entries\n\n\n# //////////////////////////////////////////////////////////////////////////////////////// get_post() end\n\n\ndef get_description(page):\n job_description = \"\"\n try:\n job_description = page.find('div', attrs={'id': 'jobDescriptionText'}).text.strip().replace(\"\\n\", \" \").replace('\\\\', \"\")\n except AttributeError:\n print(page.current_url)\n print(\"LINE 200 -- job_description not found\")\n return job_description\n\n\ndef most_common_word(desc, num_results):\n desc_list = desc.split(\" \")\n counter = {}\n for i in desc_list:\n if i in counter:\n counter[i] += 1\n else:\n counter[i] = 1\n most_common = sorted(counter, key=counter.get, reverse=True)\n results = most_common[:num_results]\n return results\n\n\ndef connect_to_db():\n conn = pymysql.connect(\n host='127.0.0.1',\n port=3306,\n user='root',\n passwd='###',\n db='indeeddb'\n )\n\n if conn:\n print(\"\\nConnected to database\")\n return conn\n else:\n print(\"Failed to connect to database\")\n return\n\n\ndef insert_to_db(data, conn):\n if len(data) < 1:\n print(\"INVALID DATA\")\n return\n\n my_cursor = conn.cursor()\n\n for entry in data:\n sql = \"INSERT IGNORE INTO post (url,title,company,city,state,zip_code,remote,job_description, date_posted) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n val = (str(entry['url']), str(entry['title']), str(entry['company']), str(entry['location']['city']),\n str(entry['location']['state']), str(entry['location']['zip_code']), entry['remote'],\n str(entry['job_description']), str(entry['date']))\n my_cursor.execute(sql, val)\n for lang in entry['languages']:\n sql = \"INSERT IGNORE INTO languages (url,language) VALUES (%s, %s)\"\n val = (str(entry['url']), str(lang))\n my_cursor.execute(sql, val)\n for degree in entry['degrees']:\n sql = \"INSERT IGNORE INTO degrees (url,degree) VALUES (%s, %s)\"\n val = (str(entry['url']), str(degree))\n my_cursor.execute(sql, val)\n for word in range(len(entry['most_common_words'])):\n sql = \"INSERT IGNORE INTO most_common_words (url,ranking,word) VALUES (%s, %s, %s)\"\n val = (str(entry['url']), int(word), str(entry['most_common_words'][word]))\n my_cursor.execute(sql, val)\n for word in entry['keywords']:\n sql = \"INSERT IGNORE INTO keywords (url,word) VALUES (%s, %s)\"\n val = (str(entry['url']), str(word))\n my_cursor.execute(sql, val)\n sql = \"INSERT IGNORE INTO salary(url,salary_year,salary_hourly,salary_min_year,salary_max_year,salary_min_hourly,salary_max_hourly) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\n val = (str(entry['url']), str(entry['salary']['salary_year']), str(entry['salary']['salary_hourly']),\n str(entry['salary']['salary_min_year']), str(entry['salary']['salary_max_year']),\n str(entry['salary']['salary_min_hourly']), str(entry['salary']['salary_max_hourly']))\n my_cursor.execute(sql, val)\n try:\n conn.commit()\n print(\"Successfully inserted data into database\")\n except pymysql.IntegrityError:\n print(\"Failed to Insert data into database\")\n conn.close()\n\n\ndef select_from_db(sql, conn):\n print(\"~ SELECTING FROM DATABASE\")\n my_cursor = conn.cursor()\n\n my_cursor.execute(sql)\n rows = my_cursor.fetchall()\n return rows\n\n\ndef bar_graph(data, x_title, legend_title):\n if len(data) < 1:\n print(\"Not enough data to make a bar graph\")\n return\n else:\n print(\"********************************\")\n print(\"Status: CREATING BAR GRAPH\")\n print(\"********************************\")\n\n names = []\n vals = []\n for i in range(len(data)):\n names.append(str(data[i][0]))\n vals.append(int(data[i][1]))\n df = pd.DataFrame({x_title:names, legend_title:vals})\n ax = df.plot.bar(x=x_title, y=legend_title, rot=0, color='green', figsize=(15,7))\n plt.xticks(rotation=90)\n plt.tight_layout()\n plt.show()\n\n\ndef pie_chart(data, title):\n if len(data) < 1:\n print(\"Not enough data to make a bar graph\")\n return\n else:\n print(\"********************************\")\n print(\"Status: CREATING PIE CHART\")\n print(\"********************************\")\n\n names = []\n vals = []\n for i in range(len(data)):\n names.append(str(data[i][0]))\n vals.append(int(data[i][1]))\n\n df = pd.DataFrame({title:vals}, index=names)\n plot = df.plot.pie(y=title, figsize=(7,7))\n plt.show()\n\n\ndef line_graph(data, title):\n if len(data) < 1:\n print(\"Not enough data to make a bar graph\")\n return\n else:\n print(\"********************************\")\n print(\"Status: CREATING LINE GRAPH\")\n print(\"********************************\")\n\n names = []\n vals = []\n for i in range(len(data)):\n names.append(str(data[i][0]))\n vals.append(int(data[i][1]))\n print(names)\n df = pd.DataFrame({title:vals}, index=names)\n lines = df.plot.line()\n plt.tight_layout()\n plt.show()\n\n\ndef word_cloud(data):\n new_str = \"\"\n for desc in data:\n new_str += str(desc).replace(\"'\", \"\")\n\n wordcloud = WordCloud(width=800, height=400, max_font_size=100, max_words=100, background_color=\"white\").generate(new_str)\n\n plt.figure(figsize=(20,10))\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.show()\n\n\ndef progress_bar(index, total, bar_len=50, title='Please wait'):\n percent_done = (index + 1) / total * 100\n percent_done = round(percent_done, 1)\n\n done = round(percent_done / (100 / bar_len))\n togo = bar_len - done\n\n done_str = '█' * int(done)\n togo_str = '░' * int(togo)\n\n print(f'\\r{title}: [{done_str}{togo_str}] {percent_done}% done', end=\"\", flush=True)\n\n\nprint(\"=== Indeed Scraper ===\")\nprint(\"1. Start Scraping\")\nprint(\"2. Database\")\nprint(\"0. Exit\")\n\nprint(\"---------------------------------\")\nchoice = int(input(\"Enter an option: \"))\nprint(\"---------------------------------\")\nif choice == 1:\n city = input(\"Enter a city: \")\n search_field = input(\"Enter a search field for Indeed: \")\n num_pages = int(input(\"How many pages would you like to scrape? \"))\n starting_page = int(input(\"Which page would you like to start on? \"))\n if starting_page == 1:\n starting_page = 0\n percent_complete = 0\n print(\"***************************\")\n print(\"Status: STARTING\")\n print(\"~ \" + str(percent_complete) + \" of \" + str(num_pages) + \" pages complete\")\n print(\"***************************\")\n count = 0\n for i in range(starting_page*10, (num_pages*10)+(starting_page*10), 10):\n count += 1\n page = pagination(i, search_field, city)\n results = get_post(page)\n print(results)\n conn = connect_to_db()\n insert_to_db(results, conn)\n print(\"-------------------------------\")\n print(\"~ \" + str(count) + \" of \" + str(num_pages) + \" pages complete\")\n print(\"-------------------------------\")\n rand_time = random.randint(450, 700)\n print(\"*******************************************************\")\n minutes = int(rand_time/60)\n seconds = int((float(rand_time/60) - minutes) * 60)\n print(\"Status: SLEEPING FOR \" + str(minutes) + \" MINUTES and \" + str(seconds) + \" SECONDS\")\n print(\"*******************************************************\")\n for current_time in range(rand_time):\n progress_bar(current_time, rand_time)\n time.sleep(1)\n print(\"\\n\")\n print(\"***************************\")\n print(\"Status: FINISHED\")\n print(\"***************************\")\nelif choice == 2:\n print(\"********************************\")\n print(\"\\t\\t\\tDATABASE\")\n print(\"********************************\")\n print(\"1. Make a SQL query\")\n print(\"2. Gather Statistics\")\n\n print(\"---------------------------------\")\n choice2 = int(input(\"Enter an option: \"))\n print(\"---------------------------------\")\n\n print(\"********************************\")\n print(\"Status: CONNECTING TO DATABASE\")\n print(\"********************************\")\n conn = connect_to_db()\n if choice2 == 1:\n sql = input(\"Enter SQL command: \")\n select_from_db(sql, conn)\n elif choice2 == 2:\n print(\"********************************\")\n print(\"\\t\\t\\tStatistics\")\n print(\"********************************\")\n print(\"1. Make bar graphs\")\n print(\"2. Make pie charts\")\n print(\"3. Make line graphs\")\n print(\"4. Make word clouds\")\n\n print(\"---------------------------------\")\n choice3 = int(input(\"Enter an option: \"))\n print(\"---------------------------------\")\n\n if choice3 == 1:\n # bar graph of most common languages\n sql = \"SELECT city, COUNT(city) \" \\\n \"FROM post \" \\\n \"GROUP BY city \" \\\n \"ORDER BY COUNT(city) DESC\"\n result_city_count = select_from_db(sql, conn)\n bar_graph(result_city_count, 'Cities', 'Job listings from cities')\n\n # bar graph of most common languages\n sql = \"SELECT language, COUNT(language) \" \\\n \"FROM languages \" \\\n \"GROUP BY language \" \\\n \"ORDER BY COUNT(language) DESC\"\n result_language_count = select_from_db(sql, conn)\n bar_graph(result_language_count, 'Languages', 'Language')\n\n # bar graph of amount of remote jobs in cities\n sql = \"SELECT city, COUNT(remote) \" \\\n \"FROM post \" \\\n \"WHERE remote = 1 AND city != 'States' AND city != 'Texas'\" \\\n \"GROUP BY city \" \\\n \"ORDER BY COUNT(remote) DESC\"\n result_language_count = select_from_db(sql, conn)\n bar_graph(result_language_count, 'Cities', 'Remote jobs available')\n elif choice3 == 2:\n # pie chart of most popular degrees\n sql = \"SELECT degree, COUNT(degree) \" \\\n \"FROM degrees \" \\\n \"GROUP BY degree\"\n result_degree_count = select_from_db(sql, conn)\n pie_chart(result_degree_count, 'Degrees')\n elif choice3 == 3:\n # line graphs of different salary categories\n sql = \"SELECT language, AVG(salary_year) FROM salary, languages WHERE salary_year > 0 AND salary.url = languages.url GROUP BY language\"\n result_degree_count = select_from_db(sql, conn)\n line_graph(result_degree_count, 'Average salary per year by language')\n sql = \"SELECT language, AVG(salary_hourly) FROM salary, languages WHERE salary_hourly > 0 AND salary.url = languages.url GROUP BY language\"\n result_degree_count = select_from_db(sql, conn)\n line_graph(result_degree_count, 'Average salary per hour by language')\n sql = \"SELECT language, AVG(salary_min_year) FROM salary, languages WHERE salary_min_year > 0 AND salary.url = languages.url GROUP BY language\"\n result_degree_count = select_from_db(sql, conn)\n line_graph(result_degree_count, 'Average minimum salary per year by language')\n sql = \"SELECT language, AVG(salary_max_year) FROM salary, languages WHERE salary_max_year > 0 AND salary.url = languages.url GROUP BY language\"\n result_degree_count = select_from_db(sql, conn)\n line_graph(result_degree_count, 'Average maximum salary per year by language')\n sql = \"SELECT language, AVG(salary_max_hourly) FROM salary, languages WHERE salary_max_hourly > 0 AND salary.url = languages.url GROUP BY language\"\n result_degree_count = select_from_db(sql, conn)\n line_graph(result_degree_count, 'Average minimum salary per hour by language')\n sql = \"SELECT language, AVG(salary_max_hourly) FROM salary, languages WHERE salary_max_hourly > 0 AND salary.url = languages.url GROUP BY language\"\n result_degree_count = select_from_db(sql, conn)\n line_graph(result_degree_count, 'Average maximum salary per hour by language')\n elif choice3 == 4:\n # word cloud of most popular words in job descriptions\n sql = \"SELECT DISTINCT job_description \" \\\n \"FROM post \"\n result_job_descriptions = select_from_db(sql, conn)\n word_cloud(result_job_descriptions)\n\n # word cloud of most popular words in job descriptions\n sql = \"SELECT DISTINCT word \" \\\n \"FROM keywords \" \\\n \"GROUP BY word\"\n result_keywords = select_from_db(sql, conn)\n word_cloud(result_keywords)\n\nelse:\n exit()\n\n# posts[0].click()\n# print(driver.switch_to.window(driver.window_handles[1]))\n# print(driver.current_url)\n# page = click_url(driver.current_url, my_header)\n# inspect_page(page)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"269073700","text":"import MapReduce\nimport sys\n\n\"\"\"\nWord Count Example in the Simple Python MapReduce Framework\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\ndef mapper(record):\n # key: document identifier\n # value: document contents\n key = record[0]\n value = [\"likes\", record[1]]\n mr.emit_intermediate(key, value)\n key = record[1]\n value = [\"is_liked\", record[0]]\n mr.emit_intermediate(key, value)\n\ndef reducer(key, list_of_values):\n # key: word\n # value: list of occurrence counts\n likes = set()\n is_liked = set()\n\n for v in list_of_values:\n if v[0] == \"likes\":\n likes.add(v[1])\n elif v[0] == \"is_liked\":\n is_liked.add(v[1])\n\n for asym in is_liked - likes:\n mr.emit((asym, key))\n mr.emit((key, asym))\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"assignment3/asymmetric_friendships.py","file_name":"asymmetric_friendships.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356609103","text":"# -*- coding: utf-8 -*-\n\n#----------------------------------------------------------------------\n\n#このプログラムをPC2で実行してください。\n#機能:マイクからの音圧レベルを取得して、PC1へ発信する\n\n#(Agent数が1から3までの場合に適用)\n#----------------------------------------------------------------------\n\n\nfrom pyaudio import PyAudio, paInt16 \nimport numpy as np\nfrom datetime import datetime \nimport wave\nimport socket\nimport time\n\ntime.sleep(22)\n\nNUM_SAMPLES = 2000 \nSAMPLING_RATE = 20000 \nLEVEL = 1500 # 音圧のレベルが1500以下の場合は記録・検出されない\nCOUNT_NUM = 20 # \nSAVE_LENGTH = 8 # \n\n\npa = PyAudio() \nstream = pa.open(format=paInt16, channels=1, rate=SAMPLING_RATE, input=True, \n frames_per_buffer=NUM_SAMPLES)\n\n\n\naddress = ('192.168.1.33', 12345)#PC1のIPアドレス\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\n\nwhile True: #\n \n string_audio_data = stream.read(NUM_SAMPLES) \n\n audio_data = np.fromstring(string_audio_data, dtype=np.short) \n\n large_sample_count = np.sum( audio_data > LEVEL )\n \n data=np.max(audio_data)\n data=str(data)\n s.sendto(data.encode('utf8'), address)\n print ('remote 发送:',data)\n\n","sub_path":"client-1agent.py","file_name":"client-1agent.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"549145371","text":"# Copyright (c) 2021 Alastair Macleod\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\nfrom __future__ import print_function\nimport math\n\nfrom maya import mel\nimport maya.cmds as m\n\nfrom peel_solve import roots, node_list\n\n\"\"\" Runs the maya peelsolver \"\"\"\n\n\ndef load_plugin():\n \"\"\" Loads the PeelSolve plugin \"\"\"\n\n if 'peelsolve' in ''.join(m.pluginInfo(q=True, ls=True)).lower():\n return\n\n ext = None\n os = m.about(os=True)\n if os == \"mac\":\n ext = \"bundle\"\n if os == \"win\":\n ext = \"mll\"\n\n if not ext:\n raise RuntimeError(\"Could not determine OS\")\n\n ver = m.about(v=True)\n m.loadPlugin(\"peelsolve_\" + ver + \"_2540.\" + os)\n\n\ndef go_to_pref_action(sel, trans, rot):\n for j in sel:\n if m.objExists(j + '.peelType') and m.getAttr(j + '.peelType') > 0:\n continue\n\n if rot:\n for attr in ['X', 'Y', 'Z']:\n if not m.objExists(j + '.jointType' + attr):\n continue\n if not m.getAttr(j + '.jointType' + attr):\n continue\n if m.getAttr(j + '.rotateX', lock=True):\n continue\n val = m.getAttr(j + '.preferredAngle' + attr)\n try:\n m.setAttr(j + '.rotate' + attr, val)\n except:\n pass\n\n if trans:\n for attr in ['X', 'Y', 'Z']:\n if not m.objExists(j + '.preferredTrans' + attr):\n continue\n val = m.getAttr(j + '.preferredTrans')\n try:\n m.setAttr(j + '.translate' + attr, val)\n except:\n pass\n\n\ndef solve_args(solve_type):\n on = node_list.options_node()\n values = {}\n\n for val in ['start', 'end', 'increment', 'iterations', 'method', 'timeMode', 'debug',\n 'reverse', 'statistics', 'rootNodes', 'readDirect', 'scale', 'bothways',\n 'refine', 'quat', 'rootfirst', 'gradientSamples', 'threads']:\n values[val] = m.getAttr(on + '.' + val)\n\n if values['timeMode'] == 0:\n values['start'] = m.playbackOptions(q=True, min=True)\n values['end'] = m.playbackOptions(q=True, max=True)\n\n if values['scale'] < 0:\n values['scale'] = 1\n\n if solve_type == 'quick':\n values['refine'] = False\n values['method'] = 0\n values['iterations'] = 50\n\n if solve_type == 'refine':\n values['refine'] = True\n\n args = {'scl': values['scale'], 'i': values['iterations'], 'threads': values['threads']}\n\n if solve_type != 'single':\n args['st'] = values['start']\n args['end'] = values['end']\n args['inc'] = values['increment']\n\n if values['gradientSamples'] == 0: args['gs'] = 1\n if values['gradientSamples'] == 1: args['gs'] = 2\n if values['gradientSamples'] == 2: args['gs'] = 4\n\n for a, b in [('debug', 'debug'), ('statistics', 'stat'), ('reverse', 'r'), ('readDirect', 'rd'),\n ('bothways', 'bw'), ('refine', 'ref'), ('rootfirst', 'rf'), ('quat', 'quat')]:\n if values[a]: args[b] = True\n\n if values['method'] >= 0 and values['method'] <= 3:\n args['m'] = values['method']\n\n return args\n\n\ndef solve(solve_type=None):\n \"\"\" Run a solve using the settings defined on the pref node \"\"\"\n\n rn = roots.ls()\n\n if len(rn) == 0:\n m.error(\"No skeleton top node defined\")\n return None\n\n solve_types = [None, 'quick', 'refine', 'single']\n if solve_type not in solve_types:\n valid_values = 'None,' + ','.join(solve_types[1:])\n msg = \"Invalid solve type: %s, valid values: %s\" % (str(solve_type), valid_values)\n raise RuntimeError(msg)\n\n if m.objExists('PEELSNAPLOC_*'):\n ret = m.confirmDialog(t='Confirm', m='The markers may be locked, do you want to continue',\n b=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No')\n if ret == 'No':\n return None\n\n sels = m.ls(sl=True)\n\n args = solve_args(solve_type)\n\n transforms = m.peelSolve(s=rn, lt=True, ns=True)\n\n delete_keys = m.getAttr(\"peelSolveOptions.deleteKeys\")\n pre_solve_root = m.getAttr(\"peelSolveOptions.preSolveRoot\")\n pre_solve_pose = m.getAttr(\"peelSolveOptions.preSolvePose\")\n\n if solve_type not in ['single', 'refine']:\n at = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']\n if delete_keys == 2:\n m.delete(transforms, channels=True, unitlessAnimationCurves=False, hierarchy='none', at=at)\n elif delete_keys == 1:\n tr = (args['start'], args['end'])\n m.cutKey(transforms, clear=True, time=tr, option='keys', hierarchy='none', at=at)\n\n if pre_solve_root is True:\n m.peelSolve(s=rn, ro=True)\n\n if pre_solve_pose is True:\n go_to_pref_not_root()\n\n # args['e'] = True\n try:\n m.refresh(su=True)\n m.peelSolve(s=rn, e=True, **args)\n finally:\n m.refresh(su=False)\n\n if solve_type != 'single':\n chan = m.peelSolve(s=rn, ns=True, lc=True)\n m.filterCurve(chan, filter='euler')\n\n m.select(sels)\n\n\ndef run(iterations=500, inc=1, root_nodes=None, start=None, end=None):\n \"\"\"\n :param iterations: passed to peelsolve\n :param inc: frame increment\n :param root_nodes: solve roots. Uses the options node if none are provided.\n :param start: start frame for solve\n :param end: end frame for solve range\n Runs the solver with the specified arguments\n \"\"\"\n\n if not root_nodes:\n root_nodes = roots.ls()\n\n # Run the solve\n m.refresh(su=True)\n try:\n if start is None:\n start = m.playbackOptions(q=True, min=True)\n if end is None:\n end = m.playbackOptions(q=True, max=True)\n root_flag = ' '.join(['-s ' + i for i in root_nodes])\n m.peelSolve(s=root_nodes, st=start, end=end, inc=inc, i=iterations)\n finally:\n m.refresh(su=False)\n\n\ndef frame(iterations=500, root_nodes=None):\n \"\"\"\n :param iterations: passed to peelsolve\n :param root_nodes: solve roots. Uses the options node if none are provided.\n \"\"\"\n\n if not root_nodes:\n root_nodes = roots.ls()\n\n root_flag = ' '.join(['-s ' + i for i in root_nodes])\n\n cmd = \"peelSolve -e %s -scl 1 -i %d -threads 2 -gs 1 -quat -m 1;\" \\\n % (root_flag, iterations)\n\n print(cmd)\n mel.eval(cmd)\n\n # mel.eval(\"peelSolve2Run(4);\")\n\n\ndef go_to_pref_not_root():\n rn = roots.ls()\n jnts = m.peelSolve(lp=True, ns=True, s=rn)\n sel = m.ls(sl=True)\n m.select(jnts)\n for r in rn: m.select(r, tgl=True)\n joints = m.ls(sl=True)\n go_to_pref_action(joints, True, True)\n m.select(sel)\n\n\ndef find_char_top():\n root = roots.skel_root()\n\n while 1:\n up = m.listRelatives(root, p=True)\n if not up:\n break\n root = up\n\n return root\n\n\ndef set_mocap_range():\n keys = m.keyframe(node_list.all_markers(), q=True)\n start = math.floor(min(keys))\n end = math.ceil(max(keys))\n m.playbackOptions(min=start, max=end, ast=start, aet=end)\n\n\ndef range_selected():\n sel = m.ls(sl=True)\n if sel is None or len(sel) == 0:\n m.confirmDialog(m=\"Nothing Selected\")\n return\n\n keys = m.keyframe(sel, q=True)\n if keys is None or len(keys) == 0: return\n m.playbackOptions(min=math.floor(min(keys)), max=math.ceil(max(keys)))\n","sub_path":"python/peel_solve/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":8395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"618214772","text":"# FB tag frequency top 100\n# https://leetcode.com/problems/next-permutation/\n\n# Single Pass Approach\n# O(n) time\n# O(1) space\nclass Solution:\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n pointer = 1\n found = False\n while pointer <= len(nums) and not found:\n if nums[-pointer:] == sorted(nums[-pointer:], reverse = True):\n pointer += 1\n else:\n found = True\n \n if not found:\n result = sorted(nums)\n for i in range(len(nums)):\n nums[i] = result[i]\n else:\n tmp_num = nums[-pointer]\n right_lst = sorted(nums[-pointer + 1:])\n for i, num in enumerate(right_lst):\n if num > tmp_num:\n right_lst[i] = tmp_num\n tmp_num = num\n break\n \n nums[-pointer] = tmp_num\n right_lst = sorted(right_lst)\n for i in range(1, pointer):\n nums[-i] = right_lst[-i]","sub_path":"leetcode/31_NextPermutation.py","file_name":"31_NextPermutation.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"307601667","text":"import logging\nimport os\nimport re\nimport sys\n\nfrom skimage.morphology import remove_small_holes\n\nfrom trainingdata import convert_wkt_to_polygon\n\nlogging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(name)s: %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger(__name__)\n\nfrom PIL import Image, ImageDraw\nimport rasterio\nimport numpy as np\nimport json\nfrom rasterio import mask\nfrom rasterio.features import shapes\nfrom shapely.geometry import shape\nimport filemanager\nimport platform\nimport basicconfig as config\n\n\ninput_dir, output_dir = filemanager.get_file_paths_based_on_os(platform.system(), filemanager.Product.grd)\nmask_dir = \"D:\\\\Texana\\\\Processing\\\\\" + config.GRD_MASK_DIR\nclassified_LC_dir = \"D:\\\\Texana\\\\Processing\\\\\" + config.LC_CLASSIFIED_DIR\n\n\ndef is_area_larger_than(image, i, j, iterations, thres):\n curr_area = 0\n for p_x in range(i, min(i + iterations, height)):\n for p_y in range(j, min(j + iterations, width)):\n if image[p_x][p_y] == config.BLACK:\n curr_area += 1\n logger.debug(\"area is {}\".format(curr_area))\n if curr_area > thres:\n return True\n return False\n\n\nmask_json = {}\nloop_dir = classified_LC_dir\nsorted_files = sorted(os.listdir(loop_dir))\nfor folder in sorted_files:\n logger.info(folder)\n img = rasterio.open(loop_dir + folder)\n file_name = re.sub(\"\\\\..*$\", \"\", folder)\n date = file_name.split('_')[-2][:8]\n LC_polygon = convert_wkt_to_polygon(config.TEXANA_WKT_REDUCED)\n [prdt_arr], prdt_xy = mask.mask(dataset=img, shapes=[LC_polygon], nodata=config.NO_DATA, all_touched=True, crop=True)\n msk = Image.fromarray(prdt_arr)\n msk.show()\n\n width, height = prdt_arr.shape[1], prdt_arr.shape[0]\n binary_img = prdt_arr\n logger.info(binary_img.shape)\n sample_spacing = 70\n area_threshold = 0.96 * sample_spacing * sample_spacing\n done = False\n\n # To reduce unnecessary checking, we starting search for the reservoir at an arbitrary point\n for x in range(0, height, sample_spacing // 2):\n for y in range(0, width, sample_spacing // 2):\n if binary_img[x][y] == config.BLACK:\n if is_area_larger_than(binary_img, x, y, sample_spacing, area_threshold) and is_area_larger_than(\n binary_img, x + sample_spacing // 2, y, sample_spacing, area_threshold):\n logger.info(\"Found the reservoir!\")\n ImageDraw.floodfill(msk, (y, x), config.RESERVOIR_COLOR)\n done = True\n break\n if done:\n break\n if not done:\n logger.critical(\"No reservoir was found.\")\n sys.exit(\"No reservoir was found.\")\n\n msk_arr = np.array(msk).astype(np.uint8)\n msk_arr[msk_arr == config.NO_DATA] = config.LAND\n msk_arr[msk_arr == config.WHITE] = config.LAND\n msk_arr = remove_small_holes(msk_arr, area_threshold=2048).astype(np.uint8)\n msk_arr[msk_arr == config.WATER] = config.RESERVOIR_COLOR\n\n mask_prdt_path = mask_dir + date + f'_{config.POLARIZATIONS}' + '.tif'\n with rasterio.open(\n mask_prdt_path,\n 'w',\n driver='GTiff',\n height=height,\n width=width,\n count=1,\n dtype=np.uint8,\n nodata=config.BLACK,\n crs=img.crs,\n transform=img.transform,\n ) as dst:\n dst.write(msk_arr, 1)\n\n with rasterio.open(mask_prdt_path, 'r') as src:\n image = src.read(1, masked=True) # first band\n results = (\n {'properties': {'raster_val': v}, 'geometry': s}\n for i, (s, v) in enumerate(shapes(image, mask=None, transform=src.transform)))\n geoms = list(results)\n shp = shape(geoms[0]['geometry'])\n mask_json[date + f'_{config.POLARIZATIONS}'] = str(shp)\n\nwith open(mask_dir + 'data.json', 'w', encoding='utf-8') as f:\n json.dump(mask_json, f, ensure_ascii=False, indent=4)\nlogger.info(\"Completed\")\n\nif platform.system() == \"Windows\":\n import winsound\n duration = 1000 # milliseconds\n freq = 1000 # Hz\n winsound.Beep(freq, duration)\n winsound.Beep(freq, duration)","sub_path":"datacleaning/permanent_water_mask_texana.py","file_name":"permanent_water_mask_texana.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"633627727","text":"from django.test import TestCase\nfrom api_2007scape_tools.helpers.images import save_image_field_from_url\nfrom api_2007scape_tools.items.models import Item\nfrom config.settings.base import WIKI_ENDPOINT\n\n\nclass ImagesHelperTest(TestCase):\n def test_save_image_file(self):\n item = Item.objects.create(id=0, is_members=True, is_tradeable=True)\n\n save_image_field_from_url(\n item,\n \"icon\",\n \"https://vignette.wikia.nocookie.net/2007scape/images/5/55/Necklace_of_anguish.png/revision/latest?cb=20180125173253\",\n \"Necklace_of_anguish.png\",\n )\n self.assertIsNotNone(item.icon)\n","sub_path":"api_2007scape_tools/helpers/tests/test_images.py","file_name":"test_images.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"287333905","text":"#!/usr/bin/env python3\n# -*- coding: utf-8; mode: python -*-\n\"\"\" A simple script to check property on prime numbers, as explained in this article https://www.quantamagazine.org/mathematicians-discover-prime-conspiracy-20160313/.\n\n- Author: Lilian Besson, (C) 2018.\n- Online: https://bitbucket.org/lbesson/bin/\n- Licence: MIT Licence (http://lbesson.mit-license.org).\n\"\"\"\nfrom sympy import sieve, nextprime\n\n\n\ndef ends_by_0(p):\n return (p % 10) == 9\n\n\ndef next_ends_by_9(p):\n np = nextprime(p)\n return (np % 10) == 9\n\n\ndef next_ends_by_1(p):\n np = nextprime(p)\n return (np % 10) == 1\n\n\n\ndef main(maxn):\n primes = sieve\n primes.extend(maxn)\n primes = primes._list\n nb_primes = len(primes)\n print(f\"We found {nb_primes} primes smaller or equal than {maxn}...\")\n\n filtered_primes = [ p for p in primes if ends_by_0(p) ]\n nb_filtered_primes = len(filtered_primes)\n print(f\"We found {nb_filtered_primes} primes that finishes by 9...\")\n rate = float(nb_filtered_primes) / float(nb_primes)\n print(f\"That's about {rate:.3%}...\")\n\n primes_satisfying_property = [ p for p in filtered_primes if next_ends_by_9(p) ]\n nb_primes_satisfying_property = len(primes_satisfying_property)\n print(f\"\\nWe found {nb_primes_satisfying_property} primes that has next primes finishing by 9...\")\n second_rate = float(nb_primes_satisfying_property) / float(nb_filtered_primes)\n print(f\"That's about {second_rate:.3%}...\")\n\n primes_satisfying_property = [ p for p in filtered_primes if next_ends_by_1(p) ]\n nb_primes_satisfying_property = len(primes_satisfying_property)\n print(f\"\\nWe found {nb_primes_satisfying_property} primes that has next primes finishing by 1...\")\n third_rate = float(nb_primes_satisfying_property) / float(nb_filtered_primes)\n print(f\"That's about {third_rate:.3%}...\")\n print(f\"\\n==> which is about {third_rate/second_rate:.3%} more!\")\n\n return 0\n\nif __name__ == '__main__':\n from sys import argv, exit\n maxn = int(argv[1]) if len(argv) > 1 else 1000\n exit(main(maxn))\n\n","sub_path":"MY_REPOS/my-gists/_CURRENT/8b2c5f97ac/check-property-on-prime-numbers.py","file_name":"check-property-on-prime-numbers.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"36975719","text":"import numpy as np\nfrom assocplots.qqplot import *\nfrom assocplots.manhattan import *\n\ndatf=np.genfromtxt('OCD_ADHD_female.META.CHR.POS', dtype=None, skip_header=1)\ndatm=np.genfromtxt('OCD_ADHD_male.META.CHR.POS', dtype=None, skip_header=1)\n\n##### QQ plot\nimport matplotlib as mpl\nmpl.rcParams['figure.dpi']=150\nmpl.rcParams['savefig.dpi']=150\nmpl.rcParams['figure.figsize']=6.375, 6.375\n\nplt.clf()\nqqplot([datf['f2'], datm['f2']], ['Female $\\lambda=1.104$', 'Male $\\lambda=1.150$'], color=['r','b'], fill_dens=[0.2,0.2], error_type='theoretical', distribution='beta', title='')\nplt.ylim([0,8])\n#plt.show()\nplt.savefig('OCD_ADHD_mal_fem_QQ.png', dpi=300)\n\nget_lambda(datf['f2'], definition = 'median')\n#1.1038305474906456\nget_lambda(datm['f2'], definition = 'median')\n#1.1502208100009044\n\n##### Manhattan Plot\n\nimport matplotlib as mpl\nmpl.rcParams['figure.dpi']=150\nmpl.rcParams['savefig.dpi']=150\nmpl.rcParams['figure.figsize']=[12.375, 6.375]\n\nchrs = [str(i) for i in range(1,23)]\nchrs_names = np.array([str(i) for i in range(1,23)])\nchrs_names[18::2] = ''\n\ncmap = plt.get_cmap('Greys')\ncolors = [cmap(i) for i in [1.0,0.6,1.0,0.6]]\n\nplt.clf()\nmanhattan( datf['f2'], datf['f21'], datf['f20'], 'OCD-ADHD Female Meta',\n p2=datm['f2'], pos2=datm['f21'], chr2=datm['f20'], label2='OCD-ADHD Male Meta',\n type='inverted',\n chrs_plot=[str(i) for i in range(1,23)],\n chrs_names=chrs_names,\n cut = 0,\n title='',\n xlabel='chromosome',\n ylabel='-log10(p-value)',\n lines= [7.3],\n top1 = 10,\n top2 = 10,\n colors = colors)\nplt.savefig('OCD_ADHD_mal_fem_Manhattan.png', dpi=300)\n\n","sub_path":"inverted_manhattan.py","file_name":"inverted_manhattan.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"472510781","text":"from zmodulo.plot.properties.color.color import Color\n\n\nclass LineColor:\n \"\"\" A Z-Tree plot line color \"\"\"\n\n def __init__(self, color=None):\n \"\"\"\n Initializes the LineColor object\n :param color: line color\n :type color: Color\n \"\"\"\n if color is None:\n self.color = Color()\n else:\n self.color = color\n\n self.template = '\\tlinecolor = {line_color};\\n'\n\n def to_str(self):\n \"\"\"\n Converts the LineColor instance to a z-tree plot line property declaration\n :return: Z-Tree linecolor property declaration\n :rtype: str\n \"\"\"\n return self.template.format(line_color=self.color.to_str())\n\n","sub_path":"zmodulo/plot/line/line_color.py","file_name":"line_color.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651818037","text":"import json\nimport time\nfrom typing import NamedTuple, Dict\nfrom lnbits import bolt11\nfrom lnbits.core.services import pay_invoice\nfrom . import db\nfrom .helpers import get_callback_url, LnurlValidationError\n\n\nclass Bleskomat(NamedTuple):\n id: str\n wallet: str\n api_key_id: str\n api_key_secret: str\n api_key_encoding: str\n name: str\n fiat_currency: str\n exchange_rate_provider: str\n fee: str\n\n\nclass BleskomatLnurl(NamedTuple):\n id: str\n bleskomat: str\n wallet: str\n hash: str\n tag: str\n params: str\n api_key_id: str\n initial_uses: int\n remaining_uses: int\n created_time: int\n updated_time: int\n\n def has_uses_remaining(self) -> bool:\n # When initial uses is 0 then the LNURL has unlimited uses.\n return self.initial_uses == 0 or self.remaining_uses > 0\n\n def get_info_response_object(self, secret: str) -> Dict[str, str]:\n tag = self.tag\n params = json.loads(self.params)\n response = {\"tag\": tag}\n if tag == \"withdrawRequest\":\n for key in [\"minWithdrawable\", \"maxWithdrawable\", \"defaultDescription\"]:\n response[key] = params[key]\n response[\"callback\"] = get_callback_url()\n response[\"k1\"] = secret\n return response\n\n def validate_action(self, query: Dict[str, str]) -> None:\n tag = self.tag\n params = json.loads(self.params)\n # Perform tag-specific checks.\n if tag == \"withdrawRequest\":\n for field in [\"pr\"]:\n if not field in query:\n raise LnurlValidationError(f'Missing required parameter: \"{field}\"')\n # Check the bolt11 invoice(s) provided.\n pr = query[\"pr\"]\n if \",\" in pr:\n raise LnurlValidationError(\"Multiple payment requests not supported\")\n try:\n invoice = bolt11.decode(pr)\n except ValueError:\n raise LnurlValidationError(\n 'Invalid parameter (\"pr\"): Lightning payment request expected'\n )\n if invoice.amount_msat < params[\"minWithdrawable\"]:\n raise LnurlValidationError(\n 'Amount in invoice must be greater than or equal to \"minWithdrawable\"'\n )\n if invoice.amount_msat > params[\"maxWithdrawable\"]:\n raise LnurlValidationError(\n 'Amount in invoice must be less than or equal to \"maxWithdrawable\"'\n )\n else:\n raise LnurlValidationError(f'Unknown subprotocol: \"{tag}\"')\n\n async def execute_action(self, query: Dict[str, str]):\n self.validate_action(query)\n used = False\n async with db.connect() as conn:\n if self.initial_uses > 0:\n used = await self.use(conn)\n if not used:\n raise LnurlValidationError(\"Maximum number of uses already reached\")\n tag = self.tag\n if tag == \"withdrawRequest\":\n try:\n payment_hash = await pay_invoice(\n wallet_id=self.wallet,\n payment_request=query[\"pr\"],\n )\n except Exception:\n raise LnurlValidationError(\"Failed to pay invoice\")\n if not payment_hash:\n raise LnurlValidationError(\"Failed to pay invoice\")\n\n async def use(self, conn) -> bool:\n now = int(time.time())\n result = await conn.execute(\n \"\"\"\n UPDATE bleskomat.bleskomat_lnurls\n SET remaining_uses = remaining_uses - 1, updated_time = ?\n WHERE id = ?\n AND remaining_uses > 0\n \"\"\",\n (now, self.id),\n )\n return result.rowcount > 0\n","sub_path":"lnbits/extensions/bleskomat/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"22116217","text":"import nbformat as nbf\nimport sys\nimport parseDocstring as docparse\n\ndef load_notebook(name):\n\treturn nbf.read(name,nbf.current_nbformat)\n\ndef fix_notebook(nb):\n\tfor cell in nb.cells:\n\t\tfix_cell(cell)\n\tjoinSetups(nb)\n\treturn nb\n\nisSetup = lambda cell: 'specialcell_type' in cell['metadata'] and cell['metadata']['specialcell_type'] == 'setup'\nisOK = lambda cell: 'specialcell_type' in cell['metadata'] and cell['metadata']['specialcell_type'] == 'OK'\n\ndef joinSetups(nb):\n\tfirstSetup = [c for c in nb.cells if isSetup(c)][0]\n\tfirstSetup['source'] += \"\\nfrom client.api.assignment import load_assignment\\nautograder = load_assignment('main.ok')\"\n\ndef save_notebook(nb,name):\n\tnbf.write(nb,open(name+\".ipynb\",\"w\"))\n\ndef fix_cell(cell):\n\tif cell['cell_type'] == 'markdown':\n\t\tfix_cell_markdown(cell)\n\telif cell['cell_type'] == 'code':\n\t\tfix_cell_code(cell)\n\tclear_outputs(cell)\n\ndef fix_cell_markdown(cell):\n\tmetadata = cell['metadata']\n\tif 'purpose' in metadata and metadata['purpose'] == 'solution':\n\t\tcell['source'] = 'Enter your solution here'\n\ndef fix_cell_code(cell):\n\tmetadata = cell['metadata']\n\tif 'purpose' in metadata and metadata['purpose'] == 'solution':\n\t\tlines = cell['source'].split('\\n')\n\t\ti = 0\n\t\tresult = []\n\t\twhile i < (len(lines)-1):\n\t\t\tif \"#solution\" in lines[i].lower():\n\t\t\t\tlines[i] = \"\"\n\t\t\t\ti +=1\n\t\t\t\tcontinue\n\t\t\tif '=' in lines[i]:\n\t\t\t\tlines[i] = lines[i].split('=')[0] + '= ... # Write your solution here'\n\t\t\tif 'def ' in lines[i]:\n\t\t\t\tlines[i+1] = '\\t... # Your code here'\n\t\t\t\ti += 2\n\t\t\t\twhile i < len(lines)-1 and '\\t' in lines[i]:\n\t\t\t\t\tdel lines[i]\n\t\t\tresult.append(lines[i])\n\t\t\ti += 1\n\n\t\tcell['source'] = \"\\n\".join(result)\n\ndef clear_outputs(cell):\n\tif 'outputs' in cell:\n\t\tcell['outputs'] = []\n\n\ndef generate_solution(nb):\n\tnb = nb.copy()\n\tnb.cells = [cell for cell in nb.cells if not isOK(cell)]\n\treturn nb\n\nif __name__ == \"__main__\":\n\tfileToParse = sys.argv[-1]\n\tprint(\"Attempting to read from : \", fileToParse+\".ipynb\")\n\tnb = load_notebook(fileToParse+\".ipynb\")\n\toutputLocation = \"build/%s\"%fileToParse\n\n\n\tprint(\"Generating Solution Notebook\")\n\tsolution_nb = generate_solution(nb)\n\tsave_notebook(solution_nb,\"%s/solution\"%outputLocation)\n\n\n\tprint(\"Parsing notebook\")\n\tfix_notebook(nb)\n\n\n\tprint(\"Generating OK Tests\")\n\tdocparse.generateDoctests(nb,outputLocation)\n\tsave_notebook(nb,\"%s/student\"%outputLocation)\n","sub_path":"generators/parseNotebook.py","file_name":"parseNotebook.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368834826","text":"# -*- coding: utf-8 -*-\n'''\nThe sum of the squares of the first ten natural numbers is,\n1² + 2² + ... + 10² = 385\nThe square of the sum of the first ten natural numbers is,\n(1 + 2 + ... + 10)² = 552 = 3025\nHence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.\nFind the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.\n'''\ndef sum_power(number):\n sum=0\n for i in range (number+1):\n sum=sum+(i**2)\n return sum\n\ndef power_sum(number):\n sum=0\n for i in range (number+1):\n sum=sum+i\n return sum**2\n\nprint(power_sum(100)-sum_power(100))\n","sub_path":"problem6.py","file_name":"problem6.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"37182291","text":"from datetime import datetime\nimport logging\nimport uuid\n\nfrom aiohttp_session import get_session\nimport sqlalchemy as sa\n\nfrom app.models import sa_user_sessions\n\nlog = logging.getLogger(__name__)\n\n\nclass UserSession:\n def __init__(self, request):\n \"\"\"\n :param aiohttp.web.Request request:\n \"\"\"\n self.request = request\n\n async def user_id(self):\n \"\"\" Returns the user ID of the session \"\"\"\n session = await get_session(self.request)\n session_id = session.get('session_id')\n\n if session_id:\n client_ip = self.client_ip()\n\n async with self.request.app['pg_engine'].acquire() as conn:\n result = await conn.execute(\n sa.select([sa_user_sessions.c.user_id]).where(sa_user_sessions.c.id == session_id)\n .where(sa_user_sessions.c.client_ip == client_ip))\n return await result.scalar()\n\n async def create(self, user_id):\n \"\"\" Creates the session ID \"\"\"\n session = await get_session(self.request)\n\n session_id = str(uuid.uuid4())\n client_ip = self.client_ip()[:32]\n client_agent = self.request.headers.get('User-Agent', '')[:256]\n\n async with self.request.app['pg_engine'].acquire() as conn:\n await conn.execute(sa_user_sessions.insert().values(\n id=session_id,\n user_id=user_id,\n client_ip=client_ip,\n client_agent=client_agent,\n created_on=datetime.utcnow(),\n ))\n\n session['session_id'] = session_id\n\n async def delete(self):\n \"\"\" Deletes the session ID \"\"\"\n session = await get_session(self.request)\n\n session_id = session.pop('session_id', None)\n client_ip = self.client_ip()[:32]\n\n if session_id:\n try:\n async with self.request.app['pg_engine'].acquire() as conn:\n await conn.execute(sa_user_sessions.delete().where(sa.and_(\n id=session_id,\n client_ip=client_ip,\n )))\n except Exception as e:\n log.error(e)\n\n def client_ip(self):\n \"\"\" Returns the user client IP \"\"\"\n host = '0.0.0.0'\n\n peername = self.request.transport.get_extra_info('peername')\n if peername is not None:\n host, _ = peername\n\n return host\n","sub_path":"app/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"210497019","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, Http404\nfrom django.http import StreamingHttpResponse\nfrom django.http import FileResponse\n\ndef index(request):\n return render(request, 'index.html')\n\ndef download1(request):\n file_path = 'D:\\cat.jpg'\n try:\n r = HttpResponse(open(file_path, 'rb'))\n r['content_type'] = 'application/octet-stream'\n r['Content-Disposition'] = 'attachment; filename=cat.jpg'\n return r\n except Exception:\n raise Http404('Download error')\n\ndef download2(request):\n file_path = 'D:\\duck.jpg'\n try:\n r = StreamingHttpResponse(open(file_path, 'rb'))\n r['content_type'] = 'application/octet-stream'\n r['Content-Disposition'] = 'attachment; filename=duck.jpg'\n return r\n except Exception:\n raise Http404('Download error')\n\ndef download3(request):\n file_path = 'D:\\dog.jpg'\n try:\n f = open(file_path, 'rb')\n r = FileResponse(f, as_attachment=True, filename='dog.jpg')\n return r\n except Exception:\n raise Http404('Download error')","sub_path":"Django Web应用开发实战/chapter4/4.1.4/MyDjango/index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"43480530","text":"import psycopg2, psycopg2.extras\nimport os\nimport glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import patches\nfrom matplotlib.pyplot import figure\nfrom datetime import timedelta, date\n\n\n\n\ndef date_range(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\n# transform array to rectangle shape\ndef trans2rect(arr):\n tarr = []\n trend = arr[0]\n width = 1\n day = 0\n for elm in arr[1:]:\n if elm == trend:\n width += 1\n else:\n tarr.append((trend, day, width))\n trend = elm\n day += width\n width = 1\n tarr.append((trend, day, width))\n return tarr\n\n\n\n\n\nconn = psycopg2.connect(**eval(open('auth.txt').read()))\ncmd = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\ncmd.execute('select * from market_index where mid = 3 and dt=%(dt)s',dict(dt='2005-02-01'))\nrecs = cmd.fetchall()\n\ndf = pd.DataFrame(recs, columns = recs[0].keys())\n\ndf['co'] = df['close']-df['open']\n\n#change column order\ndf.loc[:,['dt', 'tm', 'open', 'close', 'high', 'low']]\n\n\n\npredict_horizon = 10\nlabel_threshold = 0.0007\n\nstart_date = date(2010, 6, 7)\nend_date = date(2010, 6, 9)\n\nfigure(num=None, figsize=(48, 10), dpi=80, facecolor='w', edgecolor='k')\n\n#run from start_date to end_date-1day\nfor single_date in date_range(start_date, end_date):\n cmd.execute('select * from market_index where mid = 1 and dt=%(dt)s',dict(dt=single_date.strftime(\"%Y-%m-%d\")))\n recs = cmd.fetchall()\n\n if recs == []:\n continue;\n\n df = pd.DataFrame(recs, columns = recs[0].keys())\n\n df.sort_values(by='dt')\n\n #df = df[df.origin == True]\n\n df = df.drop(columns = ['mid', 'tm', 'volume', 'origin'])\n\n #percentage change of each row\n #df['pct'] = df['close'].pct_change()\n #df['pct'] = df['pct'].shift(-1)\n\n df['horizon avg'] = 0.000000\n\n #use previous 30mins to predict 10 min horizon(k=10)\n\n #list slicing doesn't include last element; pd.Dataframe loc does include\n for i in df.index:\n df.loc[i,'horizon avg'] = df.loc[i+1:i+predict_horizon]['close'].sum()/float(predict_horizon)\n\n df['pct'] = (df['horizon avg']-df['close'])/df['close']\n\n df['target'] = 1\n\n #labels 0: equal or greater than 0.00015\n #labels 1: between\n #labels 2: smaller or equal to -0.00015\n df.loc[df['pct'] >= label_threshold, 'target'] = 0\n df.loc[df['pct'] <= (-1)*label_threshold, 'target'] = 2\n\n label = df['target'].values.tolist()\n label = label[:-predict_horizon]\n\n ax = plt.subplot(111)\n tans = trans2rect(label)\n\n tans_stats = sorted(tans, key=lambda x: x[2])\n for a in tans:\n if a[0] == 0:\n col = (1,.6,.6)\n elif a[0] == 1:\n col = 'w'\n elif a[0] == 2:\n col = (.6,1,.6) \n\n ax.add_patch(patches.Rectangle((a[1],0), a[2],1, color=col))\n\n close_price = df['close'].values.tolist()\n close_price = [(float(i)-min(close_price))/(max(close_price)-min(close_price)) for i in close_price]\n close_price = close_price[:-predict_horizon]\n \n plt.plot(close_price)\n #plt.plot(ps)\n plt.title('date={}, k={}, threshold={}, #lables={}, max_period={}'.format(single_date, predict_horizon, label_threshold, len(tans), tans_stats[-1][2]))\n plt.savefig('date={}_k={}_threshold={}.png'.format(single_date, predict_horizon, label_threshold*10000))\n plt.clf()\n\n ","sub_path":"Jump_Trend_labeling/Trend/ohlc.py","file_name":"ohlc.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"262964249","text":"from qiskit import IBMQ\nfrom .runner import Runner\n\ndef create_app():\n # Check if account is currently loaded and if it isn't create one\n if not IBMQ.active_account:\n IBMQ.save_account('MY_API_TOKEN')\n\n runner = Runner()\n\n return runner","sub_path":"application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"588100364","text":"from pathlib import Path\nimport sys\n\nsys.path.append(str([p for p in Path(__file__).resolve().parents if (p / '.root.dir').exists()][0]))\n\nimport typing\nimport os\nimport tensorflow as tf\nimport inspect\nimport util.util as util\nimport numpy as np\nfrom dataclasses import dataclass, field\nfrom mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\n\n\n@dataclass\nclass Hyperparameters:\n # -------------------------------------\n _max_weight: float = 1\n\n @property\n def max_weight(self):\n return self._max_weight\n\n # -------------------------------------\n _k: float = 100\n\n @property\n def k(self):\n return self._k\n\n @k.setter\n def k(self, k: float):\n self._k = k\n _min_weight = self._max_weight / self._k\n\n # -------------------------------------\n _min_weight: float = _max_weight / _k\n\n @property\n def min_weight(self):\n return self._min_weight\n\n @min_weight.setter\n def min_weight(self, min_weight: float):\n self._min_weight = min_weight\n _k = self._max_weight / self._min_weight\n\n # -------------------------------------\n alpha_ma: float = 0.95\n\n # @property\n # def alpha_ma(self):\n # return self._alpha_ma\n\n # -------------------------------------\n beta_inhibitory: float = 0.1\n\n # @property\n # def beta_inhibitory(self):\n # return self._beta_inhibitory\n\n\n# ---------------------------------------------------------------------------------------------------------------\n\ndef create_dense_layer(x, input_dim, output_dim, layer_name, weight_init, act=tf.nn.relu):\n with tf.variable_scope(layer_name):\n with tf.variable_scope('weights'):\n W = tf.Variable(weight_init, name='W')\n # W = tf.get_variable('W', [input_dim, output_dim], initializer=weight_init)\n util.variable_summaries(W)\n with tf.variable_scope('biases'):\n b = tf.Variable(tf.constant(0, shape=[output_dim]), name='b')\n util.variable_summaries(b)\n with tf.variable_scope('Wx_plus_b'):\n z = tf.add(tf.matmul(x, W), b)\n a = act(z, name='activation')\n return a\n\n\n# ---------------------------------------------------------------------------------------------------------------\n\n# ---------------------------------------------------------------------------------------------------------------\n\ndef random_norm_weights_unit_length(input_dim: int, output_dim: int):\n # https://stackoverflow.com/questions/33976911/generate-a-random-sample-of-points-distributed-on-the-surface-of-a-unit-sphere\n # https://codereview.stackexchange.com/questions/77927/generate-random-unit-vectors-around-circle?newreg=923d2f062c7147f6a9d53455d3ff8f1e\n mat = np.random.randn(input_dim, output_dim)\n # for d1 in range(output_dim):\n # length: float = 0\n # for d0 in range(input_dim):\n # length += mat[d0][d1]**2\n # print(f'Length of {d1}th column before normalising is: {length**0.5}')\n\n mat /= np.linalg.norm(mat, axis=0)\n # print(f'random_norm_weights_unit_length: mat:\\n{mat}')\n xdata = list()\n ydata = list()\n zdata = list()\n for d1 in range(output_dim):\n length: float = 0\n for d0 in range(input_dim):\n xdata.append(d0)\n ydata.append(d1)\n zdata.append(mat[d0][d1])\n length += mat[d0][d1] ** 2\n # print(f'Length of {d1}th column after normalising is: {length**0.5}')\n\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap='Greens');\n plt.show()\n\n return mat\n\n\ndef random_uniform_weights_inh_and_exc(input_dim: int,\n output_dim: int,\n beta_inhibitory: float,\n min_weight_1: float,\n min_weight_2: float):\n inhibitory = int(output_dim * beta_inhibitory)\n excitatory = int(output_dim - inhibitory)\n assert inhibitory + excitatory == output_dim\n print(f'{inspect.stack()[0][3]}: Creating {inhibitory} inhibitory neurons and '\n f'{excitatory} excitatory neurons with initial values in range [{min_weight_1}; {min_weight_2}]')\n with tf.variable_scope('weights'):\n excitatory_matrix = np.random.uniform(low=min_weight_1, high=min_weight_2, size=(input_dim, excitatory))\n excitatory_w = tf.Variable(initial_value=excitatory_matrix.astype(np.float32), dtype=tf.float32)\n inhibitory_matrix = np.random.uniform(low=min_weight_1, high=min_weight_2, size=(input_dim, inhibitory))\n inhibitory_w = tf.Variable(initial_value=inhibitory_matrix.astype(np.float32), dtype=tf.float32)\n w = tf.concat([excitatory_w, inhibitory_w], axis=1)\n util.variable_summaries(w)\n assert w.shape == (input_dim, output_dim)\n return excitatory_w, inhibitory_w, w\n\n\ndef const_weights_inh_and_exc(input_dim: int, output_dim: int, beta_inhibitory: float, min_weight: float):\n inhibitory = int(output_dim * beta_inhibitory)\n excitatory = int(output_dim - inhibitory)\n assert inhibitory + excitatory == output_dim\n print(f'{inspect.stack()[0][3]}: Creating {inhibitory} inhibitory neurons and '\n f'{excitatory} excitatory neurons with initial value +-{min_weight}')\n\n with tf.variable_scope('weights'):\n excitatory_w = tf.Variable(tf.constant(min_weight, shape=[input_dim, excitatory]), name='excitatory')\n inhibitory_w = tf.Variable(tf.constant(-min_weight, shape=[input_dim, inhibitory]), name='inhibitory')\n w = tf.concat([excitatory_w, inhibitory_w], axis=1)\n util.variable_summaries(w)\n assert w.shape == (input_dim, output_dim)\n return excitatory_w, inhibitory_w, w\n\n\n@dataclass\nclass Weights:\n excitatory_w: typing.Any = None\n inhibitory_w: typing.Any = None\n # assign_excitatory_op: typing.Any = None\n # assign_inhibitory_op: typing.Any = None\n assign_op: typing.Any = None\n w: typing.Any = None\n\n @staticmethod\n def pos_and_neg_const(input_dim: int, output_dim: int, beta_inhibitory: float, min_weight: float) -> 'Weights':\n r: Weights = Weights()\n r.excitatory_w, r.inhibitory_w, r.w = const_weights_inh_and_exc(input_dim=input_dim,\n output_dim=output_dim,\n beta_inhibitory=beta_inhibitory,\n min_weight=min_weight)\n return r\n\n @staticmethod\n def pos_and_neg_random_uniform_const(input_dim: int,\n output_dim: int,\n beta_inhibitory: float,\n min_weight_1: float,\n min_weight_2: float) -> 'Weights':\n r: Weights = Weights()\n r.excitatory_w, r.inhibitory_w, r.w = random_uniform_weights_inh_and_exc(input_dim=input_dim,\n output_dim=output_dim,\n beta_inhibitory=beta_inhibitory,\n min_weight_1=min_weight_1,\n min_weight_2=min_weight_2)\n return r\n\n def split(self, value_to_split):\n # https://stackoverflow.com/questions/47699569/tf-assign-on-tf-concat-tensor-drops-variable-character-of-tensors\n # https://www.tensorflow.org/api_docs/python/tf/split\n axis = 1\n excitatory, inhibitory = tf.split(value=value_to_split,\n num_or_size_splits=[self.excitatory_w.shape[axis].value,\n self.inhibitory_w.shape[axis].value],\n axis=axis)\n assert excitatory.shape[axis] == self.excitatory_w.shape[axis]\n assert inhibitory.shape[axis] == self.inhibitory_w.shape[axis]\n # assert excitatory.shape == self.excitatory_w.shape not necessarily equal\n # assert inhibitory.shape == self.inhibitory_w.shape\n return excitatory, inhibitory\n\n def assign(self, new_w):\n excitatory_w, inhibitory_w = self.split(new_w)\n self.assign_op = tf.group(tf.assign(ref=self.excitatory_w, value=excitatory_w),\n tf.assign(ref=self.inhibitory_w, value=inhibitory_w))\n return self.assign_op\n\n\n@dataclass\nclass Layer:\n def __init__(self, x, weights: Weights, b: float, hp: Hyperparameters) -> None:\n print(f'Layer::{inspect.stack()[0][3]}: x : {x}')\n print(f'Layer::{inspect.stack()[0][3]}: weights: {weights}')\n print(f'Layer::{inspect.stack()[0][3]}: b : {b}')\n self.x = x # tf.transpose(x)\n self.weights = weights\n self.b = b\n\n with tf.variable_scope('Learning'):\n self.alpha_ma_w = tf.scalar_mul(scalar=hp.alpha_ma, x=self.weights.w)\n self.one_minus_alpha_ma_x = tf.scalar_mul(scalar=1. - hp.alpha_ma, x=tf.transpose(self.x))\n self.new_w = self.alpha_ma_w + self.one_minus_alpha_ma_x\n self.learn_weights_op = self.weights.assign(self.new_w)\n\n with tf.variable_scope('Layer'):\n with tf.variable_scope('Wx_plus_b'):\n z = tf.add(tf.matmul(self.x, self.weights.w), self.b)\n activation = tf.nn.relu(z, name='Activation')\n excitatory, inhibitory = self.weights.split(activation)\n\n self.excitatory_y = excitatory\n self.inhibitory_y = tf.scalar_mul(scalar=-1., x = inhibitory)\n\n self.y = tf.concat([self.excitatory_y, self.inhibitory_y], axis=1)\n util.variable_summaries(self.y)\n\n\n weights: typing.Any = None\n b: typing.Any = None\n x: typing.Any = None\n\n excitatory_y: typing.Any = None\n inhibitory_y: typing.Any = None\n y: typing.Any = None\n\n alpha_ma_w: typing.Any = None\n one_minus_alpha_ma_x: typing.Any = None\n new_w: typing.Any = None\n learn_weights_op: typing.Any = None\n\n\ndef train(flags: util.Flags, hp: util.Hyperparameters):\n print(f'\\n\\n\\nStarting training with the following parameters: {hp.to_string()}')\n sess = tf.InteractiveSession()\n\n\ndef main():\n sess = tf.InteractiveSession()\n\n hp: Hyperparameters = Hyperparameters()\n hp.beta_inhibitory = 0.25\n\n flags: util.Flags = util.Flags(log_dir=\"/home/dimanne/devel/tensorboard_logs/\" + os.path.basename(__file__),\n data_dir=\"/home/dimanne/devel/data/\")\n util.clean_existing_tensorboard_dir(flags=flags)\n\n util_hp: util.Hyperparameters = util.Hyperparameters()\n util_hp.batch_size = 1\n util_hp.max_steps = 1\n\n\n # ----------------------------------------------------------------------\n # Prepare input data: Load, Create dataset and iterators\n\n neurons_in = 784\n neurons_out = 10\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n x_train = x_train.reshape(-1, neurons_in).astype(np.float32) / 255\n x_test = x_test.reshape(-1, neurons_in).astype(np.float32) / 255\n\n train_data: util.XYs = util.XYs()\n with tf.variable_scope('train_data'):\n train_data.set_x_and_y_src(x_train, y_train, slice=True)\n train_data.ys.dataset = train_data.ys.dataset.map(lambda z: tf.one_hot(z, neurons_out))\n ds = train_data.zip_datasets().repeat().shuffle(10000).batch(util_hp.batch_size).prefetch(100)\n train_data.set_dataset(sess, ds)\n\n test_data: util.XYs = util.XYs()\n with tf.variable_scope('test_data'):\n test_data.set_x_and_y_src(x_test, y_test, slice=False)\n test_data.ys.dataset = test_data.ys.dataset.map(lambda z: tf.one_hot(z, neurons_out))\n test_data.set_dataset(sess, test_data.zip_datasets()) # .batch(2000))\n\n all_data: util.AllData = util.AllData()\n all_data.set_data(train=train_data, test=test_data)\n\n\n # ----------------------------------------------------------------------\n # Build network\n\n layer: Layer = Layer(x=all_data.x,\n weights=Weights.pos_and_neg_random_uniform_const(input_dim=neurons_in,\n output_dim=4,\n beta_inhibitory=hp.beta_inhibitory,\n min_weight_1=hp.min_weight * 0.05,\n min_weight_2=hp.min_weight),\n b=0,\n hp=hp)\n\n # width = 784\n # mat1 = random_norm_weights_unit_length(input_dim=neurons_in, output_dim=3)\n # l1 = create_dense_layer(all_data.x, input_dim=784, output_dim=width, weight_init=mat1)\n # mat2 = random_norm_weights_unit_length(input_dim=10, output_dim=3)\n # l2 = create_dense_layer(l1, input_dim=width, output_dim=width, weight_init=mat2)\n # mat3 = random_norm_weights_unit_length(input_dim=10, output_dim=3)\n # l3 = create_dense_layer(l2, input_dim=width, output_dim=width, weight_init=mat3)\n # _ = plt.hist(np.transpose(vec))\n # plt.show()\n\n # ----------------------------------------------------------------------\n # Prepare for running training loop and run it\n merged_summary = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(flags.log_dir + '/train', sess.graph)\n # test_writer = tf.summary.FileWriter(flags.log_dir + '/test')\n tf.global_variables_initializer().run()\n\n for i in range(util_hp.max_steps):\n # do_test = True\n collect_full_trace = True\n collect_summary = True\n items_seen_by_network = i * util_hp.batch_size\n progress_str = f'{util_hp.to_string()}, {100. * i / util_hp.max_steps:.1f}% ({i}/{util_hp.max_steps})'\n\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) if collect_full_trace else None\n run_metadata = tf.RunMetadata() if collect_full_trace else None\n summary, x, w, alpha_ma_w, one_minus_alpha_ma_x, new_w = \\\n sess.run([merged_summary,\n layer.x,\n layer.weights.w,\n layer.alpha_ma_w,\n layer.one_minus_alpha_ma_x,\n layer.new_w],\n all_data.feed_dict_for_iterating_over_train_data(),\n options=run_options,\n run_metadata=run_metadata)\n\n print(f'{inspect.stack()[0][3]}: x:\\n{x[176:180]}\\n\\n')\n print(f'{inspect.stack()[0][3]}: w:\\n{w[176:180]}\\n\\n')\n print(f'{inspect.stack()[0][3]}: alpha_ma_w:\\n{alpha_ma_w[176:180]}\\n\\n')\n print(f'{inspect.stack()[0][3]}: one_minus_alpha_ma_x:\\n{one_minus_alpha_ma_x[176:180]}\\n\\n')\n print(f'{inspect.stack()[0][3]}: new_w:\\n{new_w[176:180]}\\n\\n')\n # main: x:\n # [[0. ]\n # [0.5254902]\n # [0.9882353]\n # [0.9882353]]\n #\n # main: w:\n # [[0.01 0.01 0.01 0.01]\n # [0.01 0.01 0.01 0.01]\n # [0.01 0.01 0.01 0.01]\n # [0.01 0.01 0.01 0.01]]\n #\n # main: alpha_ma_w:\n # [[0.0095 0.0095 0.0095 0.0095]\n # [0.0095 0.0095 0.0095 0.0095]\n # [0.0095 0.0095 0.0095 0.0095]\n # [0.0095 0.0095 0.0095 0.0095]]\n #\n # main: one_minus_alpha_ma_x:\n # [[0. ]\n # [0.02627451]\n # [0.04941177]\n # [0.04941177]]\n #\n # main: new_w:\n # [[0.0095 0.0095 0.0095 0.0095 ]\n # [0.03577451 0.03577451 0.03577451 0.03577451]\n # [0.05891177 0.05891177 0.05891177 0.05891177]\n # [0.05891177 0.05891177 0.05891177 0.05891177]]\n\n if collect_summary:\n train_writer.add_summary(summary, global_step=items_seen_by_network)\n if collect_full_trace:\n print(f'{progress_str} Adding run metadata')\n train_writer.add_run_metadata(run_metadata, 'step%03d' % items_seen_by_network)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ai/my/learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":16338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"438707026","text":"# This library file contains all implementations related to text-based exploration actions.\n# The exploration function receives an element \"input_element\" and output k other elements \"output_elements\".\n# Both \"input_element\" and \"output_elements\" consist of reviews IDs.\n# NOTE: In the current version of the code, only \"review exploration\" is implemented.\n\nfrom data.db_interface import DBInterface\nimport configuration \t# This library contains all configurable parameters.\nimport utilities\t\t# This library consists of helper functions.\nimport statistics\nimport time\nimport pandas as pd\nimport random\nimport intex_experiments\n\n\nclass exploration():\n\n def __init__(self, db_interface: DBInterface):\n\n # All exploration parameters inherited from the \"configuration\" library\n self.config = configuration.exploration_configurations\n\n # Lazy loading mechanism is in place, i.e., the data will be loaded only when needed.\n self.data = None\n\n # This is the number if exploration actions, which is equal to the number of relevance\n # ... functions times the number of quality functions.\n self.nb_actions = self.config[\"nb_relevance_functions\"] * \\\n self.config[\"nb_quality_functions\"]\n\n self.db_interface = db_interface\n\n # This is the main exploration function, given all necessary parameters to execute exploration.\n # input_element: \t\t\tThe current item/review under investigation\n # relevance_function: \t\tThe type of relevance computation\n # k_prime:\t\t\t\t\tThe maximum number of relevant items to retrieve (for efficiency reasons)\n # k: \t\t\t\t\t\tThe number of elements in the output / The minimum number of relevant items to retrieve\n # timelimit:\t\t\t\tThe time limit for the optimization process in the quality function.\n # quality_function:\t\t\tThe type of quality computation\n # optimization direction:\tIt defines if the optimization is maximization or minimization.\n def parametrized_explore(self, input_element, relevance_function, k_prime, k, timelimit, quality_function, optim_loops, optim_meter, optimization_direction):\n\n # Step 1 of exploration: Obtain a list of candidates which are relevant to the input element\n elements_shortlist = self.db_interface.get_relevant_elements(\n input_element, relevance_function, k_prime)\n\n nb_items_received = len(\n elements_shortlist[elements_shortlist[\"type\"] == \"item\"]) / k_prime\n # print(nb_items_received)\n\n # Step 2 of exploration: Obtain a list of k optimized elements with respect to the quality function\n output_elements = self.compute_quality(\n elements_shortlist, k, timelimit, quality_function, optim_loops, optim_meter, optimization_direction)\n\n return output_elements\n\n # Functions outside the \"exploration\" often call the following simplified function of exploration, which\n # ... runs by default values set in the configuration library.\n def explore(self, input_element):\n\n # Call the \"parametrized_explore\" function by filling the parameters from the configuration library\n output_elements = self.parametrized_explore(input_element, self.config[\"relevance_function\"],\n self.config[\"k_prime\"], self.config[\"k\"], self.config[\"time_limit\"],\n self.config[\"quality_function\"], self.config[\"nb_optimization_loops\"],\n self.config[\"optimization_meter\"], self.config[\"optimization_direction\"])\n\n return output_elements\n\n # Another variant is \"exploration_by_functions\" where the exploration is executed by determining the\n # ... relevance and quality functions (and not other paramters.)\n # With \"options\", the functionality of operators can be limited, for experimental purposes.\n def explore_by_functions(self, input_element, relevance_function, quality_function):\n\n # apply exploration operator variants TEXT, TSG, ATTRIB, ALL\n relevance_function = intex_experiments.apply_operator_variant(\n relevance_function)\n\n # Call the \"parametrized_explore\" function by filling the parameters from the configuration library\n output_elements = self.parametrized_explore(input_element, relevance_function,\n self.config[\"k_prime\"], self.config[\"k\"], self.config[\"time_limit\"],\n quality_function, self.config[\"nb_optimization_loops\"],\n self.config[\"optimization_meter\"], self.config[\"optimization_direction\"])\n\n return output_elements\n\n def compute_quality(self, elements_shortlist, k, timelimit, quality_function, optim_loops, optim_meter, optimization_direction):\n\n # We initialize output elements with top-k most relevant elements.\n output_element_ids = elements_shortlist.iloc[0:k].id.to_list()\n\n # If the quality function \"none\" is selected, then it suffices to return the\n # ... top-k most relevant elements.\n if quality_function == \"none\":\n return elements_shortlist.iloc[0:k]\n\n # In the quality improvement loops, the \"cursor\" variable shows which candidate element should be selected next.\n cursor = k\n\n # We accumulate time in \"time_spent\" and we stop if we reach the time limit.\n time_spent = 0\n\n # We count how many quality improvement loops are\n # ... performed within the time limit.\n loop_count = 0\n\n if quality_function == \"diverse_numerical\":\n elements_data = elements_shortlist.set_index('id').rating.to_dict()\n else: # \"diverse_review\" or \"coverage_review\"\n elements_data = elements_shortlist.set_index('id').text.to_dict()\n\n candidate_ids = list(elements_data.keys())\n\n # Quality score is a value between 0 and 1. Our aim is to maximize this value, in\n # ... case optimization_direction = \"max\" (and to minimize, otherwise).\n # The function \"compute_quality_score\" computes this score.\n current_quality_score = self.compute_quality_score(\n quality_function, output_element_ids, elements_data)\n\n # The quality improvement loop begins here. It will go on until the time limit is not exceeded.\n while ((time_spent < timelimit and optim_meter == \"timelimit\") or (loop_count < optim_loops and optim_meter == \"nb_optimization_loops\")):\n\n start_time = time.time()\n if not candidate_ids[cursor] in output_element_ids:\n # This inner loop checks for possible replacements in the \"output_elements\" to improve the quality score.\n for i in range(k):\n # Replacement of the ith output element by the candidate at cursor for quality evaluation\n candidate_output_element_ids = output_element_ids.copy()\n candidate_output_element_ids[i] = candidate_ids[cursor]\n # We obtain the score of the list with the replacement, to compare with the current \"output_elements\".\n\n candidate_quality_score = self.compute_quality_score(\n quality_function, candidate_output_element_ids, elements_data)\n\n # Based on the optimization direction, the \"improved quality\" returns True if a better\n # ... quality score is obtained.\n if self.improved_quality(candidate_quality_score, current_quality_score, optimization_direction) == True:\n output_element_ids = candidate_output_element_ids\n current_quality_score = candidate_quality_score\n break\n\n # Cursor will now point to the next element in the shortlist.\n cursor += 1\n\n # If the cursor reaches the end of the shortlist, we simply break the quality improvement loop.\n if cursor == len(elements_shortlist):\n break\n\n end_time = time.time()\n time_spent += (end_time - start_time)\n loop_count += 1\n\n return elements_shortlist[elements_shortlist.id.isin(output_element_ids)]\n\n # The function returns a value between 0 and 1, depending on the semantics of the quality function.\n\n def compute_quality_score(self, quality_function, element_ids, elements_data):\n\n quality = 0\n data = list(map(lambda x: elements_data[x], element_ids))\n if quality_function == \"diverse_numerical\":\n quality = statistics.stdev(data)\n elif quality_function == \"diverse_review\":\n quality = utilities.collective_jaccard(data)\n elif quality_function == \"coverage_review\":\n quality = utilities.unique_word_count(data)\n\n return quality\n\n # Return True if a better quality score is obtained.\n\n def improved_quality(self, new_quality_score, old_quality_score, optimization_direction):\n\n improved = False\n\n if new_quality_score > old_quality_score and optimization_direction == \"max\":\n improved = True\n elif new_quality_score < old_quality_score and optimization_direction == \"min\":\n improved = True\n\n return improved\n","sub_path":"exploration_actions.py","file_name":"exploration_actions.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"45953572","text":"import os\nimport shutil\nimport glob\nimport sys\nimport site\nimport fnmatch\n\nincludes = (\"salabim.py\", \"changelog.txt\", \"license.txt\", \"*.ttf\")\nmainfile = \"salabim.py\"\npackage = \"salabim\"\n\nPythonista = sys.platform == \"ios\"\n\n\ndef main():\n\n if Pythonista:\n documents = \"/Documents\"\n sp = os.getcwd().split(documents)\n if len(sp) != 2:\n print(\"unable to install\")\n exit()\n path = f\"{sp[0]}{documents}/site-packages/{package}\"\n\n else:\n path = f\"{site.getsitepackages()[-1]}{os.sep}{package}\"\n\n if not os.path.isdir(path):\n os.makedirs(path)\n\n files = glob.iglob(\"*.*\")\n\n ok = False\n for file in files:\n if any(fnmatch.fnmatch(file, include) for include in includes):\n if file == mainfile:\n ok = True\n shutil.copy(file, f\"{path}{os.sep}{file}\")\n if not ok:\n print(f\"couldn't find {mainfile} in current directory\")\n return\n\n with open(mainfile, \"r\") as f:\n lines = f.read().splitlines()\n\n realversion = \"?\"\n for line in lines:\n a = line.split(\"__version__ = \")\n if len(a) > 1:\n realversion = a[1].replace(\"'\", \"\").replace('\"', \"\")\n break\n\n with open(f\"{path}{os.sep}__init__.py\", \"w\") as initfile:\n initfile.write(f\"from .{package} import *\\n\")\n initfile.write(f\"from .{package} import __version__\\n\")\n print(f\"{package} {realversion} successfully installed\")\n\n\nmain()\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"507560399","text":"from flask import request\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.exc import InvalidRequestError, IntegrityError\nfrom mcd import app, Session\nfrom mcd.models import *\nfrom mcd.Obj2JSON import jsonobj\nfrom mcd.helpers import API\n\n\n@app.route('/orders/', methods=['GET']) #for Testing:\ndef get_order(id):\n session = Session()\n es = session.query(Order).filter_by(id=id).first()\n return jsonobj(es)\n \n\n@app.route('/orders/', methods=['GET']) #for Testing:\ndef get_orders():\n session = Session()\n f = session.query(Order).all()\n g = [ jsonobj(x) for x in f ]\n return jsonobj(g)\n \n\n@app.route('/orders/new', methods=['POST']) #for Testing:\ndef new_order():\n user = request.form.get('user_id')\n status = request.form.get('status') or \"Pending\"\n urgent = request.form.get('urgent') or False\n timeF = request.form.get('timeFrame') or 0\n \n #items?\n \n session = Session()\n try:\n user = session.query(User).filter_by(id=user).one()\n except NoResultFound:\n return \"NoResultFound\"\n \n \n order = Order(user.id)\n \n try:\n session.add(order)\n session.commit()\n except (IntegrityError,InvalidRequestError):\n pass\n Session.rollback()\n order.list_id = -1\n order.status = \"Error\"\n return jsonobj(order), \"Error\"\n else: \n return jsonobj(order)\n\n","sub_path":"server/mcd/views/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"99847012","text":"import numpy as np\nimport torch\nimport torch.utils.data as tud\n\n\nclass Vocab(tud.Dataset):\n\n def __init__(self, words_to_ids, word_to_subwords, subwords_to_ids):\n\n loader = tud.DataLoader(self,\n batch_size=1000,\n collate_fn=self.collate_fn,\n num_workers=1)\n\n self.words_to_ids = words_to_ids\n self.index_to_word = dict(enumerate(words_to_ids))\n self.word_to_subwords = word_to_subwords\n self.subwords_to_ids = subwords_to_ids\n\n self.loader = loader\n\n def __getitem__(self, index): # Note: uses the first pronunciation\n\n word = self.index_to_word[index]\n seq = self.word_to_subwords[word][0]\n seq_ids = np.array([self.subwords_to_ids[s] for s in seq])\n\n return {\"seq_ids\": seq_ids, \"word_id\": self.words_to_ids[word]}\n\n def __len__(self):\n return len(self.words_to_ids)\n\n def collate_fn(self, batch):\n\n batch_size = len(batch)\n max_seq_len = max([len(ex[\"seq_ids\"]) for ex in batch])\n seqs = torch.zeros(batch_size, max_seq_len, dtype=torch.long)\n seq_lens = torch.zeros(batch_size, dtype=torch.long)\n word_ids = torch.zeros(batch_size, dtype=torch.long)\n\n for i, ex in enumerate(batch):\n seq = ex[\"seq_ids\"]\n seqs[i, :len(seq)] = torch.from_numpy(seq)\n seq_lens[i] = len(seq)\n word_ids[i] = torch.tensor(ex[\"word_id\"])\n\n return {\"view2\": seqs, \"view2_lens\": seq_lens, \"ids\": word_ids}\n","sub_path":"multiview-babel-feature/code/vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119466964","text":"#formulate functions statistics using documents.txt\n\ndef main():\n print(\"---loading txt file----\")\n doc = open('documents.txt','r')\n print(\"---running stat analysis---\")\n tracker = {}\n\n for line in doc:\n \tseg = line.split()\n \t#pop the caller func bc it's unneeded\n \tseg.pop(0)\n \tfor func in seg:\n\t \t\tindex = seg.index(func) + 1\n\t \t\tif func not in tracker:\n\t \t\t\ttracker[func] = {}\n\t \t\twhile index < len(seg):\n\t \t\t\tif seg[index] not in tracker[func]:\n\t \t\t\t\ttracker[func][seg[index]] = 0\n\t \t\t\ttracker[func][seg[index]] = tracker[func][seg[index]] + 1\n\t \t\t\tindex = index + 1\n \n doc.close()\n doc = open('results.txt', 'w')\n for key in tracker:\n \tfor key2 in tracker[key]:\n \t\tdoc.write(key + \" \" + key2 + \" \" + str(tracker[key][key2]))\n \t\tdoc.write(\"\\n\")\n print(\"---done---\")\n\n #view results here\n #print(tracker['malloc']['free'])\n \n\n \nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"form_stats.py","file_name":"form_stats.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"390145227","text":"from discord import Embed, utils\nfrom discord.ext import commands\n\n\n@commands.command()\nasync def poistaseuraus(context, lecture_type=None):\n if lecture_type is None:\n await context.send('Anna argumenttina luentotyyppi id esim. 283777')\n return\n\n channel_id = utils.get(context.guild.channels, name=context.channel.name).id\n bound_course = context.bot.db.get_course_by_channel_id(channel_id)\n if not bound_course:\n await context.send('Tähän kanavaan ei ole liitetty kurssia')\n return\n\n delete_this = context.bot.db.get_course_followed_lecture_by_type(bound_course['id'], lecture_type)\n\n if not delete_this:\n await context.send('Seurattua luentotyyppiä ei löytynyt')\n return\n context.bot.db.delete_followed_lecture(delete_this['id'])\n context.bot.logger.info(f'Stopped following course type {lecture_type} ({delete_this[\"title\"]})')\n\n e = Embed(\n title='Poistettiin luentotyypin seuranta',\n description=f'**{lecture_type}**: {delete_this[\"title\"]}'\n )\n await context.send(embed=e)\n\n\ndef setup(bot):\n bot.add_command(poistaseuraus)\n","sub_path":"koulu_ds_bot/src/events/poistaseuraus.py","file_name":"poistaseuraus.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"364345364","text":"# Django imports\nfrom django.db.models import Prefetch\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import TemplateView\n\n# App imports\nfrom imageboard.models import Board, Thread, Post\n\n\nclass CatalogPage(TemplateView):\n template_name = 'imageboard/catalog_page.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n board_hid = self.kwargs['board_hid']\n\n boards = Board.active_objects.all()\n\n # Get current board\n board = get_object_or_404(Board, hid=board_hid)\n\n threads = (\n Thread.threads\n .prefetch_related(\n Prefetch(\n 'op',\n queryset=Post.posts.filter_op()\n )\n )\n .filter(\n board=board\n )\n .order_by('-is_sticky', '-updated_at')\n [:board.max_threads_num]\n )\n\n context.update({\n 'board': board,\n 'boards': boards,\n 'threads': threads,\n })\n\n return context\n","sub_path":"src/imageboard/views/catalog_page.py","file_name":"catalog_page.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"308613041","text":"#!/usr/bin/env python3\n#-*-coding:utf-8-*\nimport rospy\nimport serial\nimport socket\nimport time\nimport threading\nfrom std_msgs.msg import Int16\nfrom std_msgs.msg import Bool\nfrom std_msgs.msg import Int16MultiArray\n#1\topen\n#2\tclose\n#3\topen error\n#4\tclose error\n#5 \topening\n#6\tclosing\nclass MOTOR():\n\tdef __init__(self):\n\t\trospy.init_node('motor_publisher')\t\t\n\t\tself.rate = rospy.get_param('~rate', 10.0)\n\t\tself.device_port = rospy.get_param('~device_port',\"/dev/ttyUSB0\")\n\t\tself.motor_pub = rospy.Publisher('/motor',Int16,queue_size=50)\n\t\tself.motor_status = rospy.get_param('/motor_status',2)\n\t\tself.motor_command = rospy.get_param('/motor_command',-1)\n\t\ttry:\t\n\t\t\tself.ser = serial.Serial(self.device_port, 115200, timeout=1)\n\t\t\tprint(\"open motor serial port success\")\n\t\texcept Exception as e:\n\t\t\tprint(\"____error___:\",e)\n\tdef callback(self,data):\n\t\tif data.data == True:\n\t\t\tself.motor_command = 1\n\t\telse:\n\t\t\tself.motor_command = 0\n\tdef handlemotor(self):\n\t\tmotor = Int16()\n\t\tif self.motor_command == 1:\n\t\t\tif self.motor_status != 1 and self.motor_status != 3 and self.motor_status != 5:\n\t\t\t\tret = self.ser.write(\"open\".encode())\n\t\t\t\tself.motor_status = 5\n\t\t\t\tprint(\"open motor success\")\n\t\t\t\tself.motor_command = -1\n\t\t\telse:\n\t\t\t\tself.motor_status = 3\n\t\t\t\tprint(\"open motor error\")\n\t\t\t\tself.motor_command = -1\n\t\telif self.motor_command == 0:\n\t\t\tif self.motor_status != 2 and self.motor_status != 4 and self.motor_status != 6:\n\t\t\t\tret = self.ser.write(\"close\".encode())\n\t\t\t\tself.motor_status = 6\n\t\t\t\tprint(\"close motor success\")\n\t\t\t\tself.motor_command = -1\n\t\t\telse:\n\t\t\t\tself.motor_status = 4\n\t\t\t\tprint(\"close motor error\")\n\t\t\t\tself.motor_command = -1\t\t\t\t\t\n\t\tmotor.data = self.motor_status\n\t\t\t\t\n\t\tst = self.ser.read(15).decode(\"utf-8\")\n\t\tif len(st) != 0:\n\t\t\tif st.startswith(\"open\"):\n\t\t\t\tself.motor_status = 1\n\t\t\telif st.startswith(\"close\"):\n\t\t\t\tself.motor_status = 2\n\t\tself.motor_pub.publish(motor)\n\n\tdef spin(self):\n\t\tr = rospy.Rate(self.rate)\n\t\trospy.Subscriber('/doorStatus',Bool,self.callback)\n\t\twhile not rospy.is_shutdown():\n\t\t\tself.handlemotor()\n\t\t\tr.sleep()\n\tdef __del__(self):\n\t\tself.ser.close()\n\t\tprint(\"motor is close\")\nif __name__ == '__main__':\n\tmotor = MOTOR()\n\trospy.loginfo(\"=== motor run\")\n\tmotor.spin()\n\trospy.loginfo(\"=== motor end\")\n","sub_path":"src/motor/src/motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518538522","text":"#!/usr/bin/env python\n\nfrom keras.models import Sequential, model_from_json, model_from_yaml\nfrom keras.layers import Dense, Dropout, Activation, Merge, Flatten, \\\n Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import PReLU\nfrom keras.optimizers import SGD\nfrom keras.callbacks import EarlyStopping\nfrom keras import backend as K\n# from keras.utils.visualize_util import plot as kplt\n\nimport theano as T\n\nfrom os import path\nimport traceback\nimport numpy as np\nfrom numpy import matlib\nfrom scipy import signal, fft, ifft\nfrom scipy.fftpack import dct, idct\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom pylab import figure, plot, subplot, show, imshow, colorbar, axis, title\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable, ImageGrid\nimport h5py as h5\n\n# Data Class\nclass DataSet:\n # data = {}\n # scan = {}\n\n def __init__(self, name):\n # TODO: better initialize\n self.name = name\n self.data = {} # storing the data\n self.probe_geom = ()\n self.angles = ()\n self.fs = 0\n self.fm = 0\n self.c0 = 0\n self.num_chn = 0\n self.csm = () # cross spectral matrix\n\n self.scan = {}\n self.dist = ()\n\n self.pht_pos = ()\n\n self.scat_pos = ()\n\n\n def __print_name(self, name):\n print(name)\n\n def import_data(self, file_path, file_name):\n # TODO\n # try:\n assert path.exists(file_path+file_name), 'File not found.'\n with h5.File(file_path + file_name, 'r') as hf:\n print('This %s dataset contains: ' % file_name)\n hf.visit(self.__print_name)\n print\n\n # except IOError, e:\n # print(IOError, ':', e)\n\n def preprocess(self):\n # Cross spectral matrix\n mode = 1 # 0: without average, of shape (num_angles, nFFT, num_channels, num_channels)\n # 1: with average, of shape (num_angles, num_channels, num_channels)\n\n (num_angles, num_channels, num_samples) = self.data['real'].shape\n nFFT = 512 # 256\n if mode == 0:\n # (samples, channels, rows, cols)\n self.csm = np.zeros((num_angles, nFFT, num_channels, num_channels), dtype=complex)\n for k in np.arange(num_angles):\n for i in np.arange(num_channels):\n s1 = sef.data['real'][k,i,:] # ignore imaginary part\n for j in np.arange(i+1,num_channels):\n s2 = self.data['real'][k,j,:]\n _, self.csm[k,:,j,i] = signal.csd(s1, s2, fs=FREQ_S, nperseg=nFFT, \\\n nfft=nFFT, scaling='density')\n # TODO\n # Diagnal removal: use a better algorithm\n # lambda filter map reduce\n\n elif mode == 1:\n self.csm = np.zeros((num_angles, num_channels, num_channels), dtype=float)\n for k in np.arange(num_angles):\n for i in np.arange(num_channels):\n s1 = self.data['real'][k,i,:]\n for j in np.arange(i+1,num_channels):\n s2 = self.data['real'][k,j,:]\n _, tmp = signal.csd(s1, s2, fs=FREQ_S, nperseg=nFFT, \\\n nfft=nFFT, scaling='density')\n self.csm[k,j,i] = np.abs(np.sum(tmp) / nFFT) # sum,average,abs\n self.csm[k,i,(i+1):num_channels] = self.csm[k,(i+1):num_channels,i]\n print(self.csm.shape)\n print(self.csm)\n\n with h5.File('csm_h5', 'w') as hf:\n hf['csm'] = self.csm\n\n # csm_t = self.csm[1,:,:]\n # img = csm_t.reshape(num_channels, num_channels)\n # plt.figure()\n # plt.imshow(img)\n # plt.show()\n\n # Lower triangle trim\n # Normalize: /Gxx Gyy\n\n def compute_dist(self):\n # Distance matrix\n num_x = len(self.scan['x_axis'])\n num_z = len(self.scan['z_axis'])\n self.dist = np.zeros((num_x, num_z), dtype=float)\n for i in range(num_x):\n for j in range(num_z):\n self.dist[i,j] = np.sqrt(self.scan['x_axis'][i]**2 \\\n + self.scan['z_axis'][j]**2)\n\n def write_data(self, filename, channel_id):\n with h5.File(filename, 'w') as hf:\n # DEBUG: complex value OR absolute value ??\n (num_angles, num_channels, num_samples) = self.data['real'].shape\n one_ch_data = np.sqrt(self.data['real'][channel_id, :, :]**2 \\\n + self.data['imag'][channel_id, :, :]**2)\n hf['time_data'] = one_ch_data.T\n\n # mul_ch_data = np.sqrt( \\\n # self.data['real'].reshape(num_angles*num_channels, num_samples)**2 \\\n # + self.data['imag'].reshape(num_angles*num_channels, num_samples)**2 \\\n # )\n # hf['time_data'] = mul_ch_data.T\n\n\n def show_image(self, prange):\n num_slices = self.data['real'].shape[0]\n plt.figure()\n for i in np.arange(num_slices):\n amp = np.sqrt(self.data['real'][i, :, :]**2 + self.data['imag'][i, :, :]**2)\n plt.subplot(2, 2, i+1)\n plt.imshow(amp, extent=prange)\n plt.title(i+1)\n plt.show()\n\n\ndef img_norm(img):\n min = np.amin(img)\n max = np.amax(img)\n return (img-min) / (max-min)\n\ndef nice_show(fig, data, vmin=None, vmax=None, cmap=None):\n '''\n data is 3D (nCH, nCol, nRow)\n '''\n assert data.ndim==3, 'Data dimension must be 3!'\n if cmap is None:\n cmap = cm.jet\n if vmin is None:\n vmin = data.min()\n if vmax is None:\n vmax = data.max()\n nCH,_,_= data.shape\n nr = int(np.ceil(np.sqrt(nCH)))\n assert nr<=10, 'Too many data channels (>10)!'\n grid = ImageGrid(fig, 111, \\\n nrows_ncols=(nr, nr),\\\n axes_pad=0.1,\\\n add_all=True,\\\n label_mode='L')\n for i in range(nCH):\n ax = grid[i]\n im = ax.imshow(data[i,:,:], vmin=vmin, vmax=vmax, \\\n interpolation='nearest', cmap=cmap)\n# div = make_axes_locatable(ax)\n# cax = div.append_axes('right', size='5%', pad=0.05) # colorbar axis to the right\n# plt.colorbar(im, cax=cax)\n\nclass ANN(object):\n\n \"\"\"Docstring for ANN. \"\"\"\n\n def __init__(self):\n self.in_real = ()\n self.in_imag = ()\n\n self.out_real = ()\n self.out_imag = ()\n\n def train_mlp(self, input, output):\n self.in_real = input.data['real']\n self.in_imag = input.data['imag']\n self.out_real = output.data['real']\n self.out_imag = output.data['imag']\n\n (i_dim_x, i_dim_y, i_dim_z) = self.in_real.shape\n in_dim = i_dim_x*i_dim_y*i_dim_z\n input_data = self.in_real.reshape(in_dim, 1)\n\n (o_dim_x, o_dim_y, o_dim_z) = self.out_real.shape\n out_dim = o_dim_x*o_dim_y*o_dim_z\n output_data = self.out_real.reshape(out_dim, 1)\n\n model = Sequential()\n model.add(Dense(200, input_dim=in_dim, init='uniform'))\n model.add(Activation('relu'))\n # model.add(Dropout(0.25))\n\n model.add(Dense(200))#, init='uniform'))\n model.add(Activation('relu'))\n # model.add(Dropout(0.25))\n\n model.add(Dense(out_dim))#, init='uniform'))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='sgd',\\\n metrics=['accuracy'])\n\n early_stop = EarlyStopping(monitor='val_loss', patience=2)\n hist = model.fit(input_data, output_data, nb_epoch=50, \\\n batch_size=64, validation_split=0.2, \\\n shuffle=True, callbacks=[early_stop])\n print(hist.history)\n #TODO: batch train\n model.train_on_batch()\n\n # Save model\n model_to_save_json = model.to_json()\n open('model_architecture.json', 'w').write(model_to_save_json)\n model_to_save_yaml = model.to_yaml()\n open('model_architecture.yaml', 'w').write(model_to_save_yaml)\n model.save_weights('weights.h5')\n\n def train_cnn(self, input, output):\n num_samples, num_channels, num_rows, num_cols = input.shape\n _, out_dim = output.shape\n\n # Configurations\n batch_size = 30 # note to adjust with the total number of samples\n num_epoch = 10\n # num_filter = 64\n num_row_kernel = 3\n num_col_kernel = 3\n num_pool = 2\n dim_order = 'th' # (samples, channels, rows, cols)\n\n model = Sequential()\n model.add(ZeroPadding2D((1,1),input_shape=(num_channels, num_rows, num_cols)))\n model.add(Convolution2D(64, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv1 = Activation('relu')\n model.add(conv1)\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(64, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv2 = Activation('relu')\n model.add(conv2)\n model.add(MaxPooling2D((num_pool, num_pool), strides=(2,2)))\n\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(128, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv3 = Activation('relu')\n model.add(conv3)\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(128, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv4 = Activation('relu')\n model.add(conv4)\n model.add(MaxPooling2D((num_pool, num_pool), strides=(2,2)))\n\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(256, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv5 = Activation('relu')\n model.add(conv5)\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(256, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv6 = Activation('relu')\n model.add(conv6)\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(256, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv7 = Activation('relu')\n model.add(conv7)\n model.add(MaxPooling2D((num_pool, num_pool), strides=(2,2)))\n\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv8 = Activation('relu')\n model.add(conv8)\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv9 = Activation('relu')\n model.add(conv9)\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv10 = Activation('relu')\n model.add(conv10)\n model.add(MaxPooling2D((num_pool, num_pool), strides=(2,2)))\n\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv11 = Activation('relu')\n model.add(conv11)\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv12 = Activation('relu')\n model.add(conv12)\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order))\n conv13 = Activation('relu')\n model.add(conv13)\n model.add(MaxPooling2D((num_pool, num_pool), strides=(2,2)))\n\n model.add(Flatten())\n model.add(Dense(4096))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(4096))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(out_dim))\n\n '''\n # Net structure\n model = Sequential()\n\n model.add(Convolution2D(num_filter, num_row_kernel, num_col_kernel, \\\n border_mode='same', dim_ordering=dim_order, \\\n input_shape=(num_channels, num_rows, num_cols)))\n # model.add(BatchNormalization(mode=0, axis=1))\n # model.add(Activation('relu'))\n model.add(PReLU())\n\n # Normalize\n model.add(BatchNormalization(mode=0, axis=1))\n\n model.add(Convolution2D(num_filter, num_row_kernel, num_col_kernel))\n # model.add(Activation('relu'))\n model.add(PReLU())\n model.add(MaxPooling2D(pool_size=(num_pool, num_pool)))\n# model.add(Dropout(0.25))\n\n model.add(Convolution2D(num_filter, num_row_kernel, num_col_kernel, \\\n border_mode='same'))\n # model.add(Activation('relu'))\n model.add(PReLU())\n model.add(Convolution2D(num_filter, num_row_kernel, num_col_kernel))\n # model.add(Activation('relu'))\n model.add(PReLU())\n model.add(MaxPooling2D(pool_size=(num_pool, num_pool)))\n# model.add(Dropout(0.25))\n\n model.add(Convolution2D(num_filter, num_row_kernel, num_col_kernel, \\\n border_mode='same'))\n # model.add(Activation('relu'))\n model.add(PReLU())\n model.add(Convolution2D(num_filter, num_row_kernel, num_col_kernel))\n # model.add(Activation('relu'))\n model.add(PReLU())\n model.add(MaxPooling2D(pool_size=(num_pool, num_pool)))\n# model.add(Dropout(0.25))\n\n model.add(Flatten())\n\n model.add(Dense(512))\n model.add(PReLU())\n model.add(Dropout(0.25))\n model.add(Dense(256))\n model.add(PReLU())\n model.add(Dropout(0.3))\n model.add(Dense(128))\n model.add(PReLU())\n model.add(Dropout(0.35))\n model.add(Dense(64))\n model.add(PReLU())\n model.add(Dropout(0.4))\n model.add(Dense(32))\n model.add(PReLU())\n model.add(Dropout(0.5))\n model.add(Dense(32))\n model.add(PReLU())\n model.add(Dropout(0.5))\n\n model.add(Dense(1)) # output 1 pixel\n '''\n\n # Compile\n # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n # model.compile( optimizer=sgd, \\\n # loss='categorical_crossentropy' )\n model.compile( optimizer='adam', \\\n loss='mean_squared_error' )\n\n # early_stop = EarlyStopping(monitor='val_loss', patience=2)\n hist = model.fit(input, output, \\\n batch_size=batch_size, nb_epoch=num_epoch, verbose=1, \\\n validation_split=0.1, shuffle=True)\n # callbacks=[early_stop])\n print(hist.history)\n\n model.get_config()\n # kplt(model, to_file='model.png', show_shapes=True)\n\n# V1\n I1 = input\n print(\"I1 shape: \", I1.shape)\n convout1 = T.function([model.layers[1].input], \\\n conv1.output, allow_input_downcast=True)\n # convout1 = K.function([model.layers[1].input], \\\n # [conv1.output])\n C1 = np.array(convout1(I1))\n print('layer 2: ', model.layers[2].get_config())\n print(\"C1 shape: \", C1.shape)\n W1 = model.layers[1].W.get_value(borrow=True)\n # W1 = model.layers[1].get_weights()[0] # 0 is W, 1 is b\n W1 = np.squeeze(W1)\n print(\"W1 shape: \", W1.shape)\n\n f = plt.figure()\n plt.title('I1')\n nice_show(f,I1[0])\n f = plt.figure()\n plt.title('C1')\n nice_show(f,C1[0])\n f = plt.figure()\n plt.title('W1')\n nice_show(f,W1)\n\n# V2\n convout2 = T.function([model.layers[1].input], \\\n conv2.output, allow_input_downcast=True)\n C2 = np.array(convout2(I1))\n print('layer 5: ', model.layers[5].get_config())\n print(\"C2 shape: \", C2.shape)\n W2 = model.layers[4].W.get_value(borrow=True)\n # W1 = model.layers[1].get_weights()[0] # 0 is W, 1 is b\n W2 = np.squeeze(W2)\n print(\"W2 shape: \", W2.shape)\n\n f = plt.figure()\n plt.title('C2')\n nice_show(f,C2[0])\n f = plt.figure()\n plt.title('W2')\n nice_show(f,W2[0])\n\n l1f = T.function([model.layers[0].input], \\\n model.layers[1].output, allow_input_downcast=True)\n l1o = np.array(l1f(I1))\n print('layer 1: ', model.layers[1].get_config())\n print(\"l1o shape: \", l1o.shape)\n\n l3f = T.function([model.layers[0].input], \\\n model.layers[3].output, allow_input_downcast=True)\n l3o = np.array(l3f(I1))\n print('layer 3: ', model.layers[3].get_config())\n print(\"l3o shape: \", l3o.shape)\n\n l6f = T.function([model.layers[0].input], \\\n model.layers[6].output, allow_input_downcast=True)\n l6o = np.array(l6f(I1))\n print('layer 6: ', model.layers[6].get_config())\n print(\"l6o shape: \", l6o.shape)\n\n\n plt.show()\n\n # TODO: move Prediction to a seperated func\n # Prediction\n predict = model.predict(input, batch_size=batch_size)\n # rmse = np.sqrt(((predict-output)**2).mean(axis=0))\n # print(\"rmse = \")\n # print(rmse)\n\n # model.train_on_batch(self.in_real, out_data_r)\n # model.train_on_batch(self.in_imag, out_data_i)\n\n # TODO: save model\n #model_to_save_json = model.to_json()\n #open('model_architecture.json', 'w').write(model_to_save_json)\n #model_to_save_yaml = model.to_yaml()\n #open('model_architecture.yaml', 'w').write(model_to_save_yaml)\n #model.save_weights('weights.h5')\n\n return predict\n\n def predict(self, X_test, Y_test):\n\n model = model_from_json(open('model_architecture.json').read())\n model = model_from_yaml(open('model_architecture.yaml').read())\n model.load_weights('weights.h5')\n loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)\n\n classes = model.predict_classes(X_test, batch_size=32)\n\n proba = model.predict_proba(X_test, batch_size=32)\n\n def get_interlayer_output(self, num_layer):\n \"\"\"TODO: Docstring for get_interlayer_output.\n :returns: TODO\n\n \"\"\"\n pass\n\n\n\ndef get_2D_dct(img):\n return dct(dct(img.T, norm='ortho').T, norm='ortho')\n\ndef get_2D_idct(coeff):\n return idct(idct(coeff.T, norm='ortho').T, norm='ortho')\n\ndef test_net():\n num_samples = 86\n num_channels = 1\n num_rows = 66\n num_cols = 66\n\n prefix = './sim_data'\n fs = 100e6\n nFFT = 512 # 256\n lenRF = 7000\n input_data = np.zeros((num_samples, num_channels, num_rows, num_cols))\n with h5.File('csm_large_h5', 'r') as hf:\n input_data = np.array(hf['csm'])\n\n label_data_path = './'\n label_data_name = 'animals_n100_26-Jul-2016.mat'\n label_size = 128\n dct_size = 25\n label_data = sio.loadmat(''.join([label_data_path,label_data_name]))['phantom_c']\n output_data = np.zeros((num_samples, dct_size**2))\n for i in range(num_samples):\n img = label_data[i,0][:,:,0]\n # plt.figure()\n # plt.imshow(img)\n\n# Get DCT Coeff\n dct_coeff = get_2D_dct(img)\n # plt.matshow(np.abs(dct_coeff), cmap=plt.cm.Paired)\n\n# Compress Coeff\n dct_coeff_cp = dct_coeff.copy()\n dct_coeff_cp[dct_size:,:] = 0.0\n dct_coeff_cp[:,dct_size:] = 0.0\n\n # Alternative\n # v = np.mean(dct_coeff_cp) + 1.0*np.std(dct_coeff_cp)\n # ind = np.nonzero(dct_coeff_cpimg_m)\n ind_0 = np.nonzero(img_re0.0, dct_coeff_cp.reshape(-1,1)))\n dct_clip = dct_coeff_cp[:dct_size,:dct_size].ravel()\n output_data[i,:] = dct_clip\n\n # Train\n ann = ANN()\n pred = ann.train_cnn(input_data, output_data)\n pred = pred.reshape(num_samples, dct_size, dct_size)\n\n images_pred = []\n for i in range(num_samples):\n dct_pr = pred[i,:]\n dct_pr_cp = np.zeros((label_size,label_size))\n dct_pr_cp[:dct_size,:dct_size] = dct_pr.copy()\n img_pr = get_2D_idct(dct_pr_cp)\n images_pred.append(img_pr)\n with h5.File('images_pred.h5', 'w') as hf:\n hf['images_pred'] = images_pred\n\n\n # print('amp_pr is ')\n # print(amp_pr)\n\n # plt.figure()\n # plt.imshow(amp_pr[0,:,:], extent=(0,0.1,0,0.1))\n # plt.show()\n\ndef test_import():\n num_samples = 86\n num_channels = 1\n num_rows = 66\n num_cols = 66\n\n prefix = './sim_data'\n fs = 100e6\n nFFT = 512 # 256\n lenRF = 7000\n input_data = np.zeros((num_samples, num_channels, num_rows, num_cols))\n with h5.File('csm_large_h5', 'r') as hf:\n input_data = np.array(hf['csm'])\n\n label_data_path = './'\n label_data_name = 'animals_n100_26-Jul-2016.mat'\n label_size = 128*128\n label_data = sio.loadmat(''.join([label_data_path,label_data_name]))['phantom_c']\n output_data = np.zeros((num_samples, label_size))\n for i in range(num_samples):\n output_data[i,:] = label_data[i,0][:,:,0].reshape(-1,1)[:,0]\n\n # print(np.array(label_data['phantom_c'][99, 0]).shape)\n # plt.figure()\n # plt.imshow(np.array(label_data['phantom_c'][99, 0])[:,:,0], extent=[0,0.1,0,0.1])\n # plt.show()\n\ndef test_results():\n label_data_path = './'\n label_data_name = 'animals_n100_26-Jul-2016.mat'\n label_data = sio.loadmat(''.join([label_data_path,label_data_name]))['phantom_c']\n print(label_data.shape)\n\n plt.figure()\n for i in range(10):\n img = label_data[i,0][:,:,0]\n # print('img')\n # print(img)\n plt.subplot(4,3,i+1)\n plt.imshow(img)\n\n with h5.File('images_pred_h5', 'r') as hf:\n images = np.array(hf['images_pred'])\n plt.figure()\n for i in range(10):\n img_t = images[i,:,:]\n # print('img_t')\n # print(img_t)\n # plt.figure()\n # plt.imshow(img_t)\n\n vm = np.mean(img_t)\n img_t[np.nonzero(img_tvm)] = 1\n # print(img_t)\n\n plt.subplot(4,3,i+1)\n plt.imshow(img_t)\n plt.show()\n\n\n '''\n l = 0\n m = 0\n n = 0\n for i in range(1,87):\n m = 0\n for j in range(63,129):\n tmp = sio.loadmat(path.join(prefix, \\\n ''.join(['phant_',str(i),'_rf_ln',str(j),'.mat'])))\n tstart = tmp['tstart']\n rf1 = np.array(tmp['rf_data'][int(tstart*fs):])[0:lenRF,0]\n n = m+1\n for k in range(j+1,129):\n tmp = sio.loadmat(path.join(prefix, \\\n ''.join(['phant_',str(i),'_rf_ln',str(k),'.mat'])))\n tstart = tmp['tstart']\n rf2 = np.array(tmp['rf_data'][int(tstart*fs):])[0:lenRF,0]\n\n # Cross Spectrum\n _, csd = signal.csd(rf1, rf2, fs=fs, nperseg=nFFT, \\\n nfft=nFFT, scaling='density')\n input_data[l,0,m,n] = np.abs(np.sum(csd) / nFFT) # sum,average,abs\n n += 1\n\n input_data[l,0,(m+1):,m] = input_data[l,0,m,(m+1):]\n m += 1\n l += 1\n\n var_min = np.amin(input_data)\n var_max = np.amax(input_data)\n input_data = 10*((input_data-var_min) / (var_max-var_min))\n print(input_data)\n\n with h5.File('csm_large_h5', 'w') as hf:\n hf['csm'] = input_data\n '''\n\n '''\n plt.figure()\n plt.subplot(421)\n img = input_data[10,0,:,:].reshape(num_rows,num_cols)\n plt.imshow(img, extent=[0,0.01,0,0.01])\n\n plt.subplot(422)\n img = input_data[20,0,:,:].reshape(num_rows,num_cols)\n plt.imshow(img, extent=[0,0.01,0,0.01])\n\n plt.subplot(423)\n img = input_data[30,0,:,:].reshape(num_rows,num_cols)\n plt.imshow(img, extent=[0,0.01,0,0.01])\n\n plt.subplot(424)\n img = input_data[40,0,:,:].reshape(num_rows,num_cols)\n plt.imshow(img, extent=[0,0.01,0,0.01])\n\n plt.subplot(425)\n img = input_data[50,0,:,:].reshape(num_rows,num_cols)\n plt.imshow(img, extent=[0,0.01,0,0.01])\n\n\n plt.subplot(426)\n img = input_data[60,0,:,:].reshape(num_rows,num_cols)\n plt.imshow(img, extent=[0,0.01,0,0.01])\n\n plt.subplot(427)\n img = input_data[70,0,:,:].reshape(num_rows,num_cols)\n plt.imshow(img, extent=[0,0.01,0,0.01])\n\n plt.subplot(428)\n img = input_data[80,0,:,:].reshape(num_rows,num_cols)\n plt.imshow(img, extent=[0,0.01,0,0.01])\n plt.show()\n '''\n\n\n\nif __name__ == '__main__':\n # test_import();\n test_net()\n # test_results()\n","sub_path":"ANN_large.py","file_name":"ANN_large.py","file_ext":"py","file_size_in_byte":25375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411900416","text":"#PYTHON HOMEWORK ASSIGNMENT 5\n'''\nAuthor : Dinesh\npublished Date : 26-01-2019\n'''\n\n#check whether given number is prime or not \ndef isPrime(num,i=2):\n if num<=2:\n return True if num==2 else False\n if num%i==0:\n return False\n if i*i>num:\n return True\n return isPrime(num,i+1)\n\n\nfor num in range(1,101):\n if isPrime(num)==True: \n print('Prime')\n else:\n #the number is divible by both 3 and 5\n if num%3==0 and num%5==0: \n print('FizzBuzz')\n #the numebr is only divisible by 3\n elif num%3==0:\n print('Fizz')\n #the number is only divisible by 5\n elif num%5==0:\n print('Buzz')\n #otherwise print that number\n else:\n print(num)\n","sub_path":"assignment5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461482865","text":"import os \nimport math\nimport sys\nimport re\nimport random\n\n\ndef isvowel(s):\n\tv = list(\"AEIOU\")\n\n\tif s in v:\n\t\treturn True\n\telse: return False\n\ndef minion_game(string):\n\tvowel_list = []\n\tconstant_list = []\n\tfor a in string:\n\t\tif (isvowel(a) == True):\n\t\t\tif (a not in vowel_list):\n\t\t\t\tvowel_list.append(a)\n\t\telse:\n\t\t\tif(a not in constant_list):\n\t\t\t\tconstant_list.append(a)\n\t#print('vowel_list = ', vowel_list )\n\t#print('constant_list = ', constant_list)\n\n\t#print('Length of vowel list', len(vowel_list))\n\t#print('Length of constant list', len(constant_list))\n\n\t## Vowels ... Kevin ...#\n\n\t## Now we get the vowels and constants separated\n\t## Counting words for kevin .. \n\t## Create occurrance list\n\t## vowel_list = ['A']\n\t## Create counter list for Each element in vowel list\n\n\toccurrance_counter_for_vowel_characters = []\n\tvowel_compinations = []\n\ti = 0\n\tj = 0\n\tk = 0\n\tcounter = 0\n\n\tvowel_compinations_counter = 0\n\n\tstring_in_list_form =list(string)\n\t#print('string_in_list_form', string_in_list_form)\n\tfor i in range(len(vowel_list)):\n\t\tfor j in range(len(string_in_list_form)):\n\t\t\tif(vowel_list[i] == string_in_list_form[j]):\n\t\t\t\t#counter +=1\n\t\t\t\t#vowel_compinations_counter +=1\n\t\t\t\t# from this position till the end \n\t\t\t\t#vowel_compinations.append(string_in_list_form[j])\n\t\t\t\tk = j\n\t\t\t\twhile( k <= len(string_in_list_form) - 1):\n\t\t\t\t\tvowel_compinations_counter +=1\n\t\t\t\t\tk+=1\n\n\t\t\t\t#while(k <= (len(string_in_list_form) -1)):\n\t\t\t\t\t#vowel_compinations.append(str(string_in_list_form[k]) + str(string_in_list_form[k+m]) )\n\t\t\t\t\t#m +=1\n\t\toccurrance_counter_for_vowel_characters.append(counter)\n\t\n\t#print(occurrance_counter_for_vowel_characters)\n\t#print(vowel_compinations)\n\t#print('vowel_compinations_counter', vowel_compinations_counter)\n\n\n\t## Constants .. stuart\n\tconstant_compination_counter = 0\n\tm = 0\n\tn = 0\n\tl = 0\n\tfor m in range(len(constant_list)):\n\t\tfor n in range(len(string_in_list_form)):\n\t\t\tif(constant_list[m] == string_in_list_form[n]):\n\t\t\t\tl = n\n\t\t\t\twhile( l <= len(string_in_list_form) - 1):\n\t\t\t\t\tconstant_compination_counter +=1\n\t\t\t\t\tl +=1\n\t#print('Constant_compinations_counter',constant_compination_counter)\n\n\tif (constant_compination_counter > vowel_compinations_counter):\n\t\tprint('Stuart',constant_compination_counter)\n\telif (constant_compination_counter < vowel_compinations_counter):\n\t\tprint('Kevin', vowel_compinations_counter )\n\telse:\n\t\tprint('Draw')\n\ntest_string = 'BANANA'\nminion_game(test_string)","sub_path":"1.Captilize.py","file_name":"1.Captilize.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3120144","text":"import unittest\n\nfrom util.observable import Observer, Subject\n\n\nclass TestObserver(Observer):\n\n def on_update(self, arg):\n Observer.on_update(self, arg)\n\n\nclass ObservableTestCase(unittest.TestCase):\n\n @staticmethod\n def test_cases():\n return [\n ObservableTestCase('test_attached'),\n ObservableTestCase('test_detach'),\n ObservableTestCase('test_subject_destroy'),\n ObservableTestCase('test_observable_updated'),\n ObservableTestCase('test_observable_destroy')\n ]\n\n def test_attached(self):\n subject = Subject()\n observer = TestObserver()\n subject.attach(observer)\n\n self.assertTrue(len(subject._observers) == 1)\n self.assertEqual(observer._subject, subject)\n\n def test_detach(self):\n subject = Subject()\n observer = TestObserver()\n subject.attach(observer)\n subject.detach(observer)\n\n self.assertTrue(len(subject._observers) == 0)\n self.assertEqual(observer._subject, None)\n\n def test_subject_destroy(self):\n subject = Subject()\n observer = TestObserver()\n subject.attach(observer)\n subject.destroy()\n\n self.assertTrue(len(subject._observers) == 0)\n self.assertEqual(observer._subject, None)\n\n def test_observable_updated(self):\n subject = Subject()\n observer = TestObserver()\n subject.attach(observer)\n subject.state = 1\n\n self.assertEqual(1, observer._state)\n\n def test_observable_destroy(self):\n subject = Subject()\n observer = TestObserver()\n subject.attach(observer)\n observer.detach()\n\n self.assertTrue(len(subject._observers) == 0)\n self.assertEqual(observer._subject, None)\n","sub_path":"tests/test_util/observable.py","file_name":"observable.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587770845","text":"from utils import *\nimport numpy as np\nimport numpy.linalg as nlg\nimport sparse_coding\n\nDictionary = np.load('experiment_dictionary_16.npz')\nD1, D2 = Dictionary['D1'], Dictionary['D2']\nx1, x2 = Dictionary['x1'], Dictionary['x2']\nimg1_mean, img2_mean = Dictionary['image_1_mean'], Dictionary['image_2_mean']\ny1, y2 = Dictionary['y1'], Dictionary['y2']\n\nprint(\" ||D1^T * D2|| =\", \"%1.4f\" % nlg.norm(np.transpose(D1)@D2),\n \" ||D1^T * D1|| =\", \"%1.4f\" % nlg.norm(np.transpose(D1)@D1),\n \" ||D2^T * D2|| =\", \"%1.4f\" % nlg.norm(np.transpose(D2)@D2))\n\nimage_1 = y1\nimage_2 = y2\n\n\"\"\" set i \"\"\"\ni = 0\n\n\"\"\" build A \"\"\"\nresidual_y1 = image_1 - Dx2image(D1, x1)\nresidual_y2 = image_2 - Dx2image(D2, x2)\n\nresidual_y1 = image2patches(residual_y1, patch_size=8)\nresidual_y2 = image2patches(residual_y2, patch_size=8)\n\nresidual_y1 = patches2flat(residual_y1)\nresidual_y2 = patches2flat(residual_y2)\n\n# a = residual_y1[i, :].reshape(1, 64)\n# b = residual_y2[i, :].reshape(1, 64)\nA = np.concatenate((residual_y1, residual_y2), axis=0)\n\n# \"\"\" test \"\"\"\n# D1x1 = D1@x1\n# Dx = D1x1[:, i].reshape(64, 1)\n# print(A@Dx)\n# print(nlg.norm(residual_y1@D1x1))\n\n\"\"\" build y mixture\"\"\"\ny_mixture = y1 + y2\ny_mixture = image2DLtrain(y_mixture, patch_size=8)\n\n\"\"\" build initial \"\"\"\n# x1_i = x1[:, i].reshape(32, 1)\n# x2_i = x2[:, i].reshape(32, 1)\n\n\"\"\" sparse coding test \"\"\"\n#sc = sparse_coding.SparseCoding2D1A(D1=D1, D2=D2, A=A,\n# lambda_1=0.01, lambda_2=0.01, lambda_3=1, lambda_4=1, lambda_5=0.01, lambda_6=0.01)\n\nsc = sparse_coding.SparseCoding2D(D1=D1, D2=D2)\nprint(\"sparse coding (soft)\")\nx1_sc, x2_sc = sc.get_x(y=y_mixture, x1=None, x2=None, num_iter=3000, proximal_use=\"soft\", soft_coef=0.01)\nprint(\"sparse coding (greedy)\")\nx1_sc, x2_sc = sc.get_x(y=y_mixture, x1=x1_sc, x2=x2_sc, num_iter=3000, proximal_use=\"greedy\", n_nonzero=3)\n\nprint('y1 psnr: ', psnr_wo_mean(Dx2image(D1, x1_sc), y1),\n 'y2 psnr: ', psnr_wo_mean(Dx2image(D2, x2_sc), y2),\n 'D1x1 psnr: ', psnr_wo_mean(Dx2image(D1, x1_sc), Dx2image(D1, x1)),\n 'D2x2 psnr: ', psnr_wo_mean(Dx2image(D2, x2_sc), Dx2image(D2, x2)))\n\nplot_Dx(D1, x1_sc)\nplot_Dx(D2, x2_sc)\n\nprint(\"done\")\n","sub_path":"Final _report/05_4105053128/training_test_19.py","file_name":"training_test_19.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"374963120","text":"import subprocess\nimport shutil\nimport signal\nimport string\nimport random\nimport re\nimport json\nimport time\nimport os\nfrom wpasupplicantconf import WpaSupplicantConf\n\nWPA_SUPPLICANT_CONF_PATH = '/etc/wpa_supplicant/wpa_supplicant.conf'\nWPA_SUPPLIANT_CONF_BACKUP_PATH = '/etc/wpa_supplicant/wpa_supplicant.bak'\n\nfrom flask import Flask, request, send_from_directory, render_template, redirect\napp = Flask(__name__, static_url_path='')\n\ncurrentdir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(currentdir)\n\nssid_list = []\ndef getssid():\n global ssid_list\n if len(ssid_list) > 0:\n return ssid_list\n ssid_list = []\n get_ssid_list = subprocess.check_output(('iw', 'dev', 'wlan0', 'scan', 'ap-force'))\n ssids = get_ssid_list.splitlines()\n for s in ssids:\n s = s.strip().decode('utf-8')\n if s.startswith(\"SSID\"):\n a = s.split(\": \")\n try:\n ssid_list.append(a[1])\n except:\n pass\n print(ssid_list)\n ssid_list = sorted(list(set(ssid_list)))\n return ssid_list\n\ndef id_generator(size=6, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n@app.route('/')\ndef main():\n return render_template('index.html', ssids=getssid())\n\n# Captive portal when connected with iOS or Android\n@app.route('/generate_204')\ndef redirect204():\n return redirect(\"http://192.168.4.1\", code=302)\n\n@app.route('/hotspot-detect.html')\ndef applecaptive():\n return redirect(\"http://192.168.4.1\", code=302)\n\n# Not working for Windows, needs work!\n@app.route('/ncsi.txt')\ndef windowscaptive():\n return redirect(\"http://192.168.4.1\", code=302)\n\ndef check_cred(ssid, password):\n '''Validates ssid and password and returns True if valid and False if not valid'''\n wpadir = currentdir + '/wpa/'\n testconf = wpadir + 'test.conf'\n wpalog = wpadir + 'wpa.log'\n wpapid = wpadir + 'wpa.pid'\n\n if not os.path.exists(wpadir):\n os.mkdir(wpadir)\n\n for _file in [testconf, wpalog, wpapid]:\n if os.path.exists(_file):\n os.remove(_file)\n\n # Generate temp wpa.conf\n result = subprocess.check_output(['wpa_passphrase', ssid, password])\n with open(testconf, 'w') as f:\n f.write(result.decode('utf-8'))\n\n def stop_ap(stop):\n if stop:\n # Services need to be stopped to free up wlan0 interface\n print(subprocess.check_output(['systemctl', \"stop\", \"hostapd\", \"dnsmasq\", \"dhcpcd\"]))\n else:\n print(subprocess.check_output(['systemctl', \"restart\", \"dnsmasq\", \"dhcpcd\"]))\n time.sleep(15)\n print(subprocess.check_output(['systemctl', \"restart\", \"hostapd\"]))\n\n # Sentences to check for\n fail = \"pre-shared key may be incorrect\"\n success = \"CTRL-EVENT-CONNECTED\"\n\n stop_ap(True)\n\n result = subprocess.check_output(['wpa_supplicant',\n \"-Dnl80211\",\n \"-iwlan0\",\n \"-c/\" + testconf,\n \"-f\", wpalog,\n \"-B\",\n \"-P\", wpapid])\n\n checkwpa = True\n while checkwpa:\n with open(wpalog, 'r') as f:\n content = f.read()\n if success in content:\n valid_psk = True\n checkwpa = False\n elif fail in content:\n valid_psk = False\n checkwpa = False\n else:\n continue\n\n # Kill wpa_supplicant to stop it from setting up dhcp, dns\n with open(wpapid, 'r') as p:\n pid = p.read()\n pid = int(pid.strip())\n os.kill(pid, signal.SIGTERM)\n\n stop_ap(False) # Restart services\n return valid_psk\n\n@app.route('/static/')\ndef send_static(path):\n return send_from_directory('static', path)\n\n@app.route('/signin', methods=['POST'])\ndef signin():\n ssid = request.form['ssid']\n password = request.form['password']\n\n print(ssid, password)\n valid_psk = check_cred(ssid, password)\n if not valid_psk:\n # User will not see this because they will be disconnected but we need to break here anyway\n return render_template('ap.html', message=\"Wrong password!\")\n\n restoreFromBackupAndUpdateNetwork(ssid, password)\n\n with open('status.json', 'w') as f:\n f.write(json.dumps({'status':'disconnected'}))\n print(\"Disabling access point\")\n fullPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'disable_ap.sh')\n try:\n print(subprocess.check_output(fullPath, shell=True, stderr=subprocess.STDOUT, cwd=os.path.dirname(os.path.realpath(__file__))))\n except subprocess.CalledProcessError as callEx:\n print(\"Error disabling access point: \" + callEx.output.decode(\"utf-8\"))\n return render_template('index.html', message=\"Please wait 2 minutes to connect.\")\n\ndef wificonnected():\n result = subprocess.check_output(['iwconfig', 'wlan0'])\n matches = re.findall(r'\\\"(.+?)\\\"', result.split(b'\\n')[0].decode('utf-8'))\n if len(matches) > 0:\n print(\"got connected to \" + matches[0])\n return True\n return False\n\ndef restoreFromBackupAndUpdateNetwork(ssid, pwd):\n if os.path.exists(WPA_SUPPLIANT_CONF_BACKUP_PATH):\n shutil.copy2(WPA_SUPPLIANT_CONF_BACKUP_PATH, WPA_SUPPLICANT_CONF_PATH)\n \n lines = []\n with open(WPA_SUPPLICANT_CONF_PATH, 'r') as supplicantFile:\n lines = supplicantFile.readlines()\n supplicantReader = WpaSupplicantConf(lines)\n if (ssid in supplicantReader.networks()):\n supplicantReader.remove_network(ssid)\n if pwd == \"\":\n supplicantReader.add_network(ssid, key_mgmt=\"NONE\")\n else:\n supplicantReader.add_network(ssid, psk=\"\\\"{}\\\"\".format(pwd))\n with open (WPA_SUPPLICANT_CONF_PATH, 'w') as supplicantFile:\n supplicantReader.write(supplicantFile)\n\ndef backupAndEmptySupplicantConf():\n shutil.copy2(WPA_SUPPLICANT_CONF_PATH, WPA_SUPPLIANT_CONF_BACKUP_PATH)\n lines = []\n with open(WPA_SUPPLICANT_CONF_PATH, 'r') as supplicantFile:\n lines = supplicantFile.readlines()\n supplicantReader = WpaSupplicantConf(lines)\n\n for network in list(supplicantReader.networks().keys()):\n supplicantReader.remove_network(network)\n with open (WPA_SUPPLICANT_CONF_PATH, 'w') as supplicantFile:\n supplicantReader.write(supplicantFile)\n\nif __name__ == \"__main__\":\n # get status\n s = {'status':'disconnected'}\n if not os.path.isfile('status.json'):\n with open('status.json', 'w') as f:\n f.write(json.dumps(s))\n else:\n s = json.load(open('status.json'))\n\n # check connection\n if wificonnected():\n s['status'] = 'connected'\n else:\n if s['status'] == 'connected': # Don't change if status in status.json is hostapd\n s['status'] = 'disconnected'\n\n with open('status.json', 'w') as f:\n f.write(json.dumps(s))\n if s['status'] == 'disconnected':\n s['status'] = 'hostapd'\n with open('status.json', 'w') as f:\n f.write(json.dumps(s))\n \n print(\"Enabling access point\")\n backupAndEmptySupplicantConf()\n fullPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'enable_ap.sh')\n try:\n print(subprocess.check_output(fullPath, shell=True, stderr=subprocess.STDOUT, cwd=os.path.dirname(os.path.realpath(__file__))))\n except subprocess.CalledProcessError as callEx:\n print(\"Error enabling access point: \" + callEx.output.decode(\"utf-8\"))\n\n elif s['status'] == 'connected':\n print(\"Connected to WiFi - no access point needed\")\n pass\n else:\n print(\"Running turnkey portal\")\n app.run(host=\"0.0.0.0\", port=80, threaded=True)\n","sub_path":"startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":7819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"15851688","text":"\"\"\"\n\nFastener Unit Tests\n\nname: fastener_tests.py\nby: Gumyr\ndate: August 24th 2021\n\ndesc: Unit tests for the fastener sub-package of cq_warehouse\n\nlicense:\n\n Copyright 2021 Gumyr\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\n# from os import setsid\nfrom typing import Set\nimport unittest\nfrom pydantic.main import BaseModel\nfrom tests import BaseTest\nimport cadquery as cq\nfrom cq_warehouse.fastener import (\n HexNut,\n SquareNut,\n SocketHeadCapScrew,\n ButtonHeadCapScrew,\n HexBolt,\n SetScrew,\n ExternalThread,\n InternalThread,\n decode_imperial_size,\n imperial_str_to_float,\n metric_str_to_float,\n is_safe,\n)\n\n# import cadquery as cq\n\nMM = 1\nIN = 25.4 * MM\nVERBOSE = False\nFULLTEST = False\n\n\nclass TestSupportFunctions(BaseTest):\n def test_decode_imperial_size(self):\n self.assertTupleAlmostEquals((1.524, 0.3175), decode_imperial_size(\"#0-80\"), 5)\n self.assertTupleAlmostEquals(\n (1.25 * IN, IN / 32), decode_imperial_size(\"1 1/4-32\"), 5\n )\n\n def test_is_safe(self):\n self.assertTrue(is_safe(\"1 1/8\"))\n self.assertFalse(is_safe(\"rm -rf *.*\"))\n\n def test_imperial_str_to_float(self):\n self.assertAlmostEqual(imperial_str_to_float(\"1 1/2\"), 1.5 * IN)\n with self.assertRaises(ValueError):\n imperial_str_to_float(\"rm -rf *.*\")\n\n def test_metric_str_to_float(self):\n self.assertEqual(metric_str_to_float(\" 1000 \"), 1000)\n with self.assertRaises(ValueError):\n metric_str_to_float(\"rm -rf *.*\")\n\n\nclass TestExternalThread(BaseTest):\n def test_exterior_thread(self):\n \"\"\" Simple validity check for an exterior thread \"\"\"\n\n thread = ExternalThread(\n major_diameter=0.1900 * IN, pitch=IN / 32, length=(1 / 4) * IN\n )\n self.assertTrue(thread.cq_object.isValid())\n self.assertIsNone(thread.external_thread_core_radius)\n with self.assertRaises(ValueError):\n ExternalThread(major_diameter=5, pitch=1, length=5, hand=\"righty\")\n\n\nclass TestInternalThread(BaseTest):\n def test_interior_thread(self):\n \"\"\" Simple validity check for an interior thread \"\"\"\n\n thread = InternalThread(\n major_diameter=0.1900 * IN, pitch=IN / 32, length=(1 / 4) * IN\n )\n self.assertTrue(thread.cq_object.isValid())\n with self.assertRaises(ValueError):\n InternalThread(major_diameter=5, pitch=1, length=5, hand=\"righty\")\n\n\nclass TestNutParent(BaseTest):\n def test_nut_parameters(self):\n with self.assertRaises(ValueError):\n HexNut(size=\"missing\")\n\n\nclass TestHexNut(BaseTest):\n \"\"\" Test HexNut class functionality \"\"\"\n\n def test_hexnut_interface_options(self):\n \"\"\" Validate both interface types are functional \"\"\"\n nut = HexNut(size=\"M4-0.7\", simple=True)\n self.assertTrue(nut.cq_object.isValid())\n nut = HexNut(\n width=7,\n thickness=3.2,\n thread_diameter=4,\n thread_pitch=0.7,\n hand=\"left\",\n simple=True,\n )\n self.assertTrue(nut.cq_object.isValid())\n\n def test_hexnut_validity(self):\n \"\"\" Simple validity check for all the stand sized hex head nuts \"\"\"\n\n if FULLTEST:\n test_set = HexNut.metric_sizes() + HexNut.imperial_sizes()\n else:\n test_set = HexNut.metric_sizes()[:1]\n for i, size in enumerate(test_set):\n if size in [\"M6-1\", \"1/4-20\", \"1/4-28\", \"5/16-18\", \"5/16-24\"]:\n continue\n if VERBOSE:\n print(f\"Testing HexNut size {size} - {i+1} of {len(test_set)}\")\n with self.subTest(size=size):\n self.assertTrue(HexNut(size=size).cq_object.isValid())\n\n\nclass TestSquareNut(BaseTest):\n \"\"\" Test SquareNut class functionality \"\"\"\n\n def test_squarenut_validity(self):\n \"\"\" Simple validity check for all the stand sized square head nuts \"\"\"\n\n if FULLTEST:\n test_set = SquareNut.metric_sizes() + SquareNut.imperial_sizes()\n else:\n test_set = SquareNut.imperial_sizes()[:1]\n for i, size in enumerate(test_set):\n if size in [\"M6-1\", \"1/4-20\", \"1/4-28\", \"5/16-18\", \"5/16-24\"]:\n continue\n if VERBOSE:\n print(f\"Testing SquareNut size {size} - {i+1} of {len(test_set)}\")\n with self.subTest(size=size):\n self.assertTrue(SquareNut(size=size).cq_object.isValid())\n\n\nclass TestScrewParent(BaseTest):\n def test_screw_parameters(self):\n with self.assertRaises(AttributeError):\n HexBolt(size=\"M3-0.5\")\n with self.assertRaises(ValueError):\n HexBolt(size=\"missing\", length=5)\n # thread_length too long\n with self.assertRaises(ValueError):\n HexBolt(\n length=5,\n head_width=5,\n head_height=3,\n thread_diameter=2,\n thread_pitch=0.5,\n thread_length=12,\n )\n\n def test_screw_measurements(self):\n self.assertGreater(len(HexBolt.metric_sizes()), 0)\n self.assertGreater(len(HexBolt.imperial_sizes()), 0)\n\n def test_stepped_bolt(self):\n head = HexBolt(size=\"M3-0.5\", length=15 * MM, simple=True).head\n shank = ExternalThread(\n major_diameter=3 * MM, pitch=0.5 * MM, length=5 * MM, simple=True\n ).make_shank(body_length=10 * MM, body_diameter=4 * MM)\n hex_bolt = head.union(shank, glue=True)\n # cq.exporters.export(hex_bolt.val(), \"hex_bolt.step\")\n self.assertTrue(hex_bolt.val().isValid())\n\n\nclass TestHexBolt(BaseTest):\n \"\"\" Test HexBolt class functionality \"\"\"\n\n def test_hexbolt_validity(self):\n \"\"\" Simple validity check for all the stand sized hex head bolts \"\"\"\n\n if FULLTEST:\n test_set = HexBolt.metric_sizes() + HexBolt.imperial_sizes()\n else:\n test_set = HexBolt.metric_sizes()[:1]\n for i, size in enumerate(test_set):\n if VERBOSE:\n print(f\"Testing HexBolt size {size} - {i+1} of {len(test_set)}\")\n with self.subTest(size=size):\n self.assertTrue(HexBolt(size=size, length=5 * MM).cq_object.isValid())\n\n\nclass TestSocketHeadCapScrew(BaseTest):\n \"\"\" Test SocketHeadCapScrew class functionality \"\"\"\n\n def test_socket_head_cap_screw_validity(self):\n \"\"\" Simple validity check for all the stand sized socket head cap screws \"\"\"\n\n if FULLTEST:\n test_set = (\n SocketHeadCapScrew.metric_sizes() + SocketHeadCapScrew.imperial_sizes()\n )\n else:\n test_set = SocketHeadCapScrew.metric_sizes()[:1]\n\n for i, size in enumerate(test_set):\n if VERBOSE:\n print(\n f\"Testing SocketHeadCapScrew size {size} - {i+1} of {len(test_set)}\"\n )\n with self.subTest(size=size):\n self.assertTrue(\n SocketHeadCapScrew(size=size, length=5 * MM).cq_object.isValid()\n )\n\n def test_socket_head_cap_screw_thread_length(self):\n \"\"\" Set the thread length parameter \"\"\"\n self.assertTrue(\n SocketHeadCapScrew(\n length=20,\n head_diameter=10,\n head_height=5,\n thread_diameter=5,\n thread_pitch=1,\n thread_length=10,\n socket_size=4,\n socket_depth=2,\n hand=\"left\",\n simple=True,\n ).cq_object.isValid()\n )\n\n\nclass TestButtonHeadCapScrew(BaseTest):\n \"\"\" Test ButtonHeadCapScrew class functionality \"\"\"\n\n def test_button_head_cap_screw_validity(self):\n \"\"\" Simple validity check for all the stand sized button head cap screws \"\"\"\n\n if FULLTEST:\n test_set = (\n ButtonHeadCapScrew.metric_sizes() + ButtonHeadCapScrew.imperial_sizes()\n )\n else:\n test_set = ButtonHeadCapScrew.metric_sizes()[:1]\n for i, size in enumerate(test_set):\n if VERBOSE:\n print(\n f\"Testing ButtonHeadCapScrew size {size} - {i+1} of {len(test_set)}\"\n )\n with self.subTest(size=size):\n self.assertTrue(\n ButtonHeadCapScrew(size=size, length=5 * MM).cq_object.isValid()\n )\n\n\nclass TestSetScrew(BaseTest):\n \"\"\" Test SetScrew class functionality \"\"\"\n\n def test_setscrew_validity(self):\n \"\"\" Simple validity check for all the stand sized setscrews \"\"\"\n\n self.assertIsNone(SetScrew(size=\"#4-40\", length=5).make_head())\n self.assertIsNone(SetScrew(size=\"#4-40\", length=5).head)\n self.assertIsNone(SetScrew(size=\"#4-40\", length=5).shank)\n\n if FULLTEST:\n test_set = SetScrew.metric_sizes()\n else:\n test_set = SetScrew.metric_sizes()[:1]\n for i, size in enumerate(test_set):\n if size in [\"M20-2.5\"]:\n continue\n if VERBOSE:\n print(f\"Testing SetScrew size {size} - {i+1} of {len(test_set)}\")\n min_length = SetScrew.metric_parameters[size][\"Socket_Depth\"] * 1.5\n with self.subTest(size=size):\n self.assertTrue(\n SetScrew(size=size, length=min_length).cq_object.isValid()\n )\n if FULLTEST:\n test_set = SetScrew.imperial_sizes()\n else:\n test_set = []\n for i, size in enumerate(test_set):\n if size in [\"#0-80\", \"3/8-16\", \"3/8-24\", \"7/16-14\", \"5/8-11\"]:\n continue\n if VERBOSE:\n print(f\"Testing SetScrew size {size} - {i+1} of {len(test_set)}\")\n min_length = SetScrew.imperial_parameters[size][\"Socket_Depth\"] * 1.5\n with self.subTest(size=size):\n self.assertTrue(\n SetScrew(size=size, length=min_length).cq_object.isValid()\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/fastener_tests.py","file_name":"fastener_tests.py","file_ext":"py","file_size_in_byte":10626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"464741052","text":"import time\nimport json\nimport requests\nimport urllib\n\nfrom DbHelperI import DbHelper\ndb = DbHelper()\n\n#Default variables keep it in CAPS\nTOKEN = r'YOUR TOKEN HERE'\nURL = r'https://api.telegram.org/bot{0}/'.format(TOKEN)\n#print(URL)\n\n#Get things from url, vinay dont mess with variables keep it understandable\n\ndef get_url(url):\n response = requests.get(url)\n return response.content.decode('utf8')\n\n#print(get_url(URL))\n\ndef get_json_from_url(url):\n content = json.loads(get_url(url))\n return content\n\ndef get_me():\n url = URL + 'getme'\n return get_json_from_url(url)\n\ndef get_updates(offset=None):\n url = URL + \"getUpdates?timeout=100\"\n if offset:\n url += \"&offset={0}\".format(offset)\n return get_json_from_url(url)\n\n#newly adding get_last_update_id, handle_updates, keyboard\n\ndef build_keyboard(items):\n keyboard = [[item] for item in items]\n reply_markup = {'keyboard': keyboard, 'one_time_keyboard': True}\n return json.dumps(reply_markup)\n\ndef hide_keyboard():\n keyboardHide = {'hide_keyboard' :True}\n return json.dumps(keyboardHide)\n\ndef get_last_update_id(updates):\n update_ids=[]\n for update in updates['result']:\n update_ids.append(int(update['update_id']))\n return max(update_ids)\n \ndef get_last_chat_id_and_text(updates):\n #updates is dictionary datastructure with ok and result as keys :)\n total_updates = len(updates['result'])\n last_update = total_updates - 1\n text = updates['result'][last_update]['message']['text']\n chat_id = updates['result'][last_update]['message']['chat']['id']\n return (text,chat_id)\n\ndef send_message(text, chat_id, reply_markup = None):\n text = urllib.parse.quote_plus(text)\n url= URL + \"sendMessage?text={0}&chat_id={1}&parse_mode=Markdown\".format(text, chat_id)\n if reply_markup:\n url += '&reply_markup={0}'.format(reply_markup)\n get_url(url)\n #print(\"DOne\")\n \nwelcome = '''\nWelcome to your personal To Do list.\nSend any text to me and I'll store it as an item.\nExtra commands Send\n/start to show this again\n/show to show tasks\n/done to remove items\n/exit to terminate any process\nBot by @vinay26k\n'''\n\ndef not_empty(chat):\n if len(db.get_items(chat)):\n return True\n else:\n return False\n\ndef add(text,chat):\n db.add_item(text,chat)\n items = db.get_items(chat)\n message = \"\\n\".join(items)\n send_message(message, chat)\n \ndef handle_updates(updates):\n for update in updates[\"result\"]:\n text = update[\"message\"][\"text\"]\n chat = update[\"message\"][\"chat\"][\"id\"]\n items = db.get_items(chat)\n if text == \"/done\":\n #print(len(db.get_items(chat)))\n if len(db.get_items(chat)):\n keyboard = build_keyboard(items)\n send_message(\"Select an item to delete\", chat, keyboard)\n else:\n #print(len(db.get_items(chat)))\n keyboard = hide_keyboard()\n send_message(\"All tasks completed, add more tasks\",chat,keyboard)\n #send_message(\"All tasks completed, add more tasks\",chat,None)\n #items =['/exit']\n #keyboard = build_keyboard(items)\n #send_message(\"All tasks completed, add more tasks\",chat,keyboard)\n elif text == \"/start\":\n send_message(welcome,chat)\n elif text == \"/show\":\n if not_empty(chat):\n items = db.get_items(chat)\n message = \"\\n\".join(items)\n send_message(message, chat,None)\n else:\n send_message(\"All tasks completed, add more tasks\",chat,None)\n elif text=='/exit':\n keyboard = hide_keyboard()\n send_message(\"Process terminated\",chat,keyboard)\n elif text.startswith(\"/\"):\n send_message('Process terminated',chat,None)\n continue\n \n continue\n elif text in items:\n db.delete_item(text,chat)\n items = db.get_items(chat)\n n = len(items)\n if n:\n keyboard = build_keyboard(items)\n send_message(\"Select an item to delete\\n or /exit to terminate\", chat, keyboard)\n else:\n #items =['/exit']\n #keyboard = build_keyboard(items)\n #send_message(\"All tasks completed, add more tasks\",chat,keyboard)\n #print(len(db.get_items(chat)))\n keyboard = hide_keyboard()\n send_message(\"All tasks completed, add more tasks\",chat,keyboard)\n else:\n add(text,chat)\n '''\n db.add_item(text,chat)\n items = db.get_items(chat)\n message = \"\\n\".join(items)\n send_message(message, chat)'''\n\n\n#modified main now \ndef main():\n db.setup()\n last_update_id = None\n while True:\n #print(\"Getting Updates\")\n updates = get_updates(last_update_id)\n if(len(updates['result']))>0:\n last_update_id = get_last_update_id(updates) + 1\n handle_updates(updates)\n time.sleep(0.5)\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"Projects/TelegramBot/TelegramBot.py","file_name":"TelegramBot.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"452589946","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('speak2learn', '0006_auto_20141228_2236'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='videoconnection',\n old_name='stundent_online',\n new_name='student_online',\n ),\n ]\n","sub_path":"speak2learn/migrations/0007_auto_20141228_2248.py","file_name":"0007_auto_20141228_2248.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"208806555","text":"import pytest\n\nfrom topic_05_data_structure.practice.list_comprehension_2_list_pow_even import list_pow_even\n\nparams = [\n ([], 'Must be int!'),\n (None, 'Must be int!'),\n ('1', 'Must be int!'),\n\n (-3, []),\n (0, []),\n (1, [0]),\n (2, [0]),\n (3, [0, 4]),\n (4, [0, 4]),\n (5, [0, 4, 16]),\n (6, [0, 4, 16]),\n (7, [0, 4, 16, 36]),\n]\n\nids = [\"n=%s => %s\" % (n, expected) for (n, expected) in params]\n\n\n@pytest.mark.parametrize(argnames=\"n, expected\",\n argvalues=params,\n ids=ids)\ndef test_list_pow_even(n, expected):\n assert list_pow_even(n) == expected\n\n\n\n","sub_path":"topic_05_data_structure/practice/tests/list_comprehension_2_list_pow_even_test.py","file_name":"list_comprehension_2_list_pow_even_test.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"29605923","text":"\"\"\"\n在tensorflow中实现多次训练的梯度平均后,再更新权重\n失败放弃。\n考虑到在tf中更新权重,需要再另建权重的梯度标量,对历史梯度进行保存,数量太大,估计内存承受不了。\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport time\n\n\nclass Confg:\n def __init__(self):\n self.name = 'test'\n self.version = 'update_grads_after_times_in_tf'\n\n self.gpu_num = 1\n\n self.num_cls = 10\n\n self.save_path = f'./models/{self.name}_{self.version}'\n\n self.batch_size = 16\n\n\ncfg = Confg()\n\n\nclass Tensor:\n def __init__(self):\n with tf.device('/gpu:0'):\n # 占位符\n self.training = tf.placeholder(tf.bool, shape=[], name='training')\n # 优化器\n opt = tf.train.AdamOptimizer()\n # 计步器\n self.global_step = tf.get_variable('global_step', shape=[],\n initializer=tf.constant_initializer(0),\n trainable=False)\n self.add_global_step = tf.assign_add(self.global_step, 1)\n\n with tf.variable_scope('APP'): # 这句话很关键!\n self.sub_ts = []\n for i in range(cfg.gpu_num):\n first = False if i == 0 else True\n with tf.device(f'/gpu:{i}'):\n print(f'GPU: {i}')\n self.sub_ts.append(SubTensor(opt, self.training, first))\n\n with tf.device('/gpu:0'):\n print('Merging grads...')\n self.grads1 = self.merge_grads(lambda ts: ts.grads1)\n # # [(grad, var), (grad, avr), ...]\n\n # 若是重新来了一次,则把梯度累加\n self.should_new_grads1 = tf.placeholder(tf.bool, [], 'should_new_grads1')\n self.update_grads1 = self.create_zero_grads_like(self.grads1)\n # 累加操作符的列表\n self.assgin_add_grads1 = self.op_add_grads(self.update_grads1, self.grads1, self.should_new_grads1)\n # 平均操作符的列表\n assign_avg_grads1 = self.op_avg_grads(self.update_grads1, 4)\n\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS) + assign_avg_grads1):\n self.train1 = opt.apply_gradients(zip(self.update_grads1,\n [gv[1] for gv in self.grads1]))\n\n print('Reduce_meaning loss1...')\n self.loss1 = tf.reduce_mean([_ts.loss1 for _ts in self.sub_ts])\n\n def merge_grads(self, f):\n \"\"\"\n ts.grads [(grad, var), (grad, var), ...]\n :return: [(grad, var), (grad, var), ...]\n \"\"\"\n var_grad = {} # var: [grad1, grad2, ...]\n var_IndexedSlices = {} # var: [IndexedSlices1, IndexedSlices2, ...]\n for ts in self.sub_ts:\n for grad, var in f(ts):\n if grad is None:\n grad = tf.zeros_like(var, dtype=tf.float32)\n if isinstance(grad, tf.IndexedSlices):\n if var not in var_IndexedSlices:\n var_IndexedSlices[var] = []\n var_IndexedSlices[var].append(grad)\n else:\n if var not in var_grad:\n var_grad[var] = []\n var_grad[var].append(grad)\n\n # 返回用来求梯度的gv对\n # 普通var-grads直接求平均\n grad_var = [(tf.reduce_mean(var_grad[var], axis=0), var) for var in var_grad]\n # grad_var = [(var_grad[var][0], var) for var in var_grad]\n # 切片,则把不同GPU得到的切片值、索引,拼接起来,再形成新的切片\n for var in var_IndexedSlices:\n IndexedSlices = var_IndexedSlices[var] # [IndexedSlices1, IndexedSlices2, ...]\n indices = tf.concat([i.indices for i in IndexedSlices], axis=0)\n values = tf.concat([i.values for i in IndexedSlices], axis=0)\n new_IndexedSlices = tf.IndexedSlices(values, indices)\n grad_var.append((new_IndexedSlices, var))\n return grad_var\n\n def create_zero_grads_like(self, grad_vars):\n new_grad_vars = []\n for ind, gv in enumerate(grad_vars):\n grad, var = gv\n new_grad = tf.get_variable(name=f'{ind}', shape=tf.shape(gv), dtype=tf.float32,\n trainable=False, initializer=tf.initializers.zeros())\n new_grad_vars.append((new_grad, var))\n return new_grad_vars\n\n def op_add_grads(self, update_grads, grads, should_new_grads):\n ops = []\n # 若should_new_grads,则将grad付给update_grad\n # 否则,将grad_update_grad付给update_grad\n for ind, gv in enumerate(grads):\n grad, var = gv\n op = tf.cond(should_new_grads,\n lambda: tf.assign(update_grads[ind], grad),\n lambda: tf.assign(update_grads[ind], update_grads[ind] + grad))\n ops.append(op)\n return ops\n\n def op_avg_grads(self, grads, num):\n return [tf.assign(grads[0][0], grads[0][0] / num) for i in range(len(grads))]\n\n\nclass SubTensor:\n def __init__(self, opt, training, first):\n self._training = training\n self._first = first\n\n self.x = tf.placeholder(tf.float32, shape=[None, 16, 16, 3], name='x')\n self.y = tf.placeholder(tf.int32, shape=[None, ], name='y')\n y = tf.one_hot(self.y, depth=cfg.num_cls, dtype=tf.float32)\n\n x = self.conv(self.x, name='conv', reuse=first)\n x = tf.layers.flatten(x)\n x = self.dense(x, name='dense', reuse=first)\n\n loss1 = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=x)\n self.loss1 = tf.reduce_mean(loss1)\n var1 = [v for v in tf.trainable_variables()]\n self.grads1 = opt.compute_gradients(self.loss1, var_list=var1)\n\n def conv(self, x, name, reuse):\n with tf.variable_scope(name, reuse=reuse):\n for ind in range(4):\n x = tf.layers.conv2d(x, 3, 3, 1, name=f'c3s1_{ind}', padding='same')\n x = tf.layers.batch_normalization(x, name=f'bn{ind}', training=self._training)\n x = tf.nn.relu(x)\n x = tf.layers.max_pooling2d(x, 2, 2, padding='same')\n return x\n\n def dense(self, x, name, reuse):\n with tf.variable_scope(name, reuse=reuse):\n x = tf.layers.dense(x, 1, name='dense1')\n x = tf.layers.dense(x, 1, name='dense2')\n x = tf.layers.dense(x, cfg.num_cls, name='dense3')\n return x\n\n\nclass App:\n def __init__(self):\n self.inputs = [np.random.normal(size=[cfg.batch_size, 16, 16, 3]),\n np.random.normal(size=[cfg.batch_size, 16, 16, 3]),\n np.random.normal(size=[cfg.batch_size, 16, 16, 3]),\n np.random.normal(size=[cfg.batch_size, 16, 16, 3])]\n self.labels = [np.random.randint(0, cfg.num_cls + 1, size=[cfg.batch_size, ]),\n np.random.randint(0, cfg.num_cls + 1, size=[cfg.batch_size, ]),\n np.random.randint(0, cfg.num_cls + 1, size=[cfg.batch_size, ]),\n np.random.randint(0, cfg.num_cls + 1, size=[cfg.batch_size, ])]\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.ts = Tensor()\n conf = tf.ConfigProto()\n conf.allow_soft_placement = True\n self.session = tf.Session(config=conf)\n self.saver = tf.train.Saver()\n self.session.run(tf.global_variables_initializer())\n try:\n self.saver.restore(self.session, cfg.save_path)\n print(f'Restore model from f{cfg.save_path} succeed.')\n except:\n print(f'Restore model from f{cfg.save_path} failed.')\n\n def train(self):\n epochs = 5\n run_list = [self.ts.global_step, self.ts.loss1, self.ts.grads1, self.ts.assgin_add_grads1]\n grads1, loss1 = None, None\n for epoch in range(epochs):\n feed_dict = {self.ts.training: True}\n # 多次喂入值,并更新累积的梯度\n for i in range(4):\n if i == 0:\n feed_dict.update({self.ts.should_new_grads1: True})\n else:\n feed_dict.update({self.ts.should_new_grads1: False})\n for ind_gpu in range(cfg.gpu_num):\n sub_ts = self.ts.sub_ts[ind_gpu]\n feed_dict.update({sub_ts.x: self.inputs[i], sub_ts.y: self.labels[i]})\n step, _loss1, _grads1, _ = self.session.run(run_list, feed_dict)\n if i == 0:\n loss1 = _loss1\n else:\n loss1 += _loss1\n print('_loss1:\\n', _loss1)\n print('_grads1:\\n', _grads1)\n _update_grads1 = self.session.run(self.ts.update_grads1)\n print('_update_grads:\\n', _update_grads1)\n # 更新梯度\n _, avg_grads1 = self.session.run([self.ts.train1, self.ts.avg_grads1], feed_dict)\n print('loss1:\\n', loss1)\n print('avg_grads1:\\n', avg_grads1)\n print(f'EPOCH {epoch} step={step} loss1={loss1 / 4}')\n self.session.run(self.ts.add_global_step)\n\n def close(self):\n self.session.close()\n\n\nif __name__ == '__main__':\n app = App()\n\n app.train()\n\n app.close()\n","sub_path":"try/try_update_grads_in_tf.py","file_name":"try_update_grads_in_tf.py","file_ext":"py","file_size_in_byte":9477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"564365924","text":"from django.shortcuts import render\nfrom .models import Cryptocurrency\n\nimport django\n\ndjango.setup()\n\n\n# Create your views here.\ndef display_cryptocoins(request):\n order_param = request.GET.get('order_param', 'rank')\n order_direction = request.GET.get('order_direction', 'asc')\n order_by = 'rank'\n\n coins = Cryptocurrency.objects.all()\n print(coins)\n\n search = request.GET.get('search')\n if search:\n coins = coins.filter(name__icontains=search)\n\n if order_param == 'price':\n order_by = 'price_usd'\n if order_direction == 'desc':\n order_by = '-' + order_by\n\n coins = coins.order_by(order_by)\n\n return render(request, 'cryptocoins.html', {\n 'order_param': order_param,\n 'order_direction': order_direction,\n 'coins': coins\n })\n","sub_path":"coinmarketcap/cryptocoins/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518211416","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2015 Michael Gruber\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom unittest import TestCase, main\nfrom mock import patch\n\nfrom Phoenix import filter_configuration\nfrom hamcrest import assert_that, equal_to\n\n\nclass LoggingAndPassThroughTests(TestCase):\n\n def test_should_pass_through_if_configuration_is_empty(self):\n\n filter_configuration(configuration={})\n\n @patch('Phoenix.LOGGER')\n def test_should_warn_when_configuration_parameter_unknown(self, mock_logger):\n\n configuration = {'foobar': []}\n\n filter_configuration(configuration)\n\n mock_logger.warning.assert_called_with(u'Ignoring unknown configuration parameter \"%s\".', 'foobar')\n\n @patch('Phoenix.LOGGER')\n def test_should_not_warn_when_configuration_parameter_files_given(self, mock_logger):\n\n configuration = {'files': []}\n\n filter_configuration(configuration)\n\n assert not mock_logger.warning.called\n\n\n@patch('Phoenix.expandvars')\n@patch('Phoenix.expanduser')\nclass FilterConfigurationTests(TestCase):\n\n def test_should_expand_user_home_directory_on_given_file(self, mock_expanduser, mock_expandvars):\n\n configuration = {'files': ['~/.bashrc']}\n\n filter_configuration(configuration)\n\n mock_expanduser.assert_called_with('~/.bashrc')\n\n def test_should_return_file_with_expanded_home_directory(self, mock_expanduser, mock_expandvars):\n\n configuration = {'files': ['~/.bashrc']}\n\n mock_expanduser.side_effect = self.fake_expanduser\n mock_expandvars.side_effect = self.fake_expandvars\n\n actual_configuration = filter_configuration(configuration)\n\n assert_that(actual_configuration, equal_to({'files': ['/home/user/.bashrc']}))\n\n def test_should_return_expand_user_home_directory_on_second_file(self, mock_expanduser, mock_expandvars):\n\n configuration = {'files': ['~/.bashrc', '/etc/group']}\n\n filter_configuration(configuration)\n\n mock_expanduser.assert_called_with('/etc/group')\n\n def test_should_return_two_files_and_expand_user_home_directories(self, mock_expanduser, mock_expandvars):\n\n configuration = {'files': ['~/.bashrc', '/etc/group']}\n\n mock_expanduser.side_effect = self.fake_expanduser\n mock_expandvars.side_effect = self.fake_expandvars\n\n actual_configuration = filter_configuration(configuration)\n\n assert_that(actual_configuration, equal_to({'files': ['/home/user/.bashrc', '/etc/group']}))\n\n def test_should_return_three_files_and_expand_user_home_directories(self, mock_expanduser, mock_expandvars):\n\n configuration = {'files': ['/etc/group', '~/.bashrc', '~/.profile']}\n\n mock_expanduser.side_effect = self.fake_expanduser\n mock_expandvars.side_effect = self.fake_expandvars\n\n mock_expanduser.side_effect = self.fake_expanduser\n\n actual_configuration = filter_configuration(configuration)\n\n expected_config = {'files': ['/etc/group', '/home/user/.bashrc', '/home/user/.profile']}\n assert_that(actual_configuration, equal_to(expected_config))\n\n def test_should_return_files_with_expanded_variables_and_home_directories(self, mock_expanduser, mock_expandvars):\n\n configuration = {'files': ['${XAUTHORITY}', '~/.bashrc', '~/.profile']}\n\n mock_expanduser.side_effect = self.fake_expanduser\n mock_expandvars.side_effect = self.fake_expandvars\n\n actual_configuration = filter_configuration(configuration)\n\n expected_config = {'files': ['/home/user/.Xauthority', '/home/user/.bashrc', '/home/user/.profile']}\n assert_that(actual_configuration, equal_to(expected_config))\n\n def test_should_expand_variables_and_home_directories_in_all_parameters(self, mock_expanduser, mock_expandvars):\n\n configuration = {'files': ['~/.bashrc'], 'directories': ['${HOME}/Music']}\n\n mock_expanduser.side_effect = self.fake_expanduser\n mock_expandvars.side_effect = self.fake_expandvars\n\n actual_configuration = filter_configuration(configuration)\n\n expected_config = {'files': ['/home/user/.bashrc'], 'directories': ['/home/user/Music']}\n assert_that(actual_configuration, equal_to(expected_config))\n\n def test_should_norm_paths(self, mock_expanduser, mock_expandvars):\n\n configuration = {'files': ['~////.bashrc'], 'directories': ['${HOME}/////Music///////']}\n\n mock_expanduser.side_effect = self.fake_expanduser\n mock_expandvars.side_effect = self.fake_expandvars\n\n actual_configuration = filter_configuration(configuration)\n\n expected_config = {'files': ['/home/user/.bashrc'], 'directories': ['/home/user/Music']}\n assert_that(actual_configuration, equal_to(expected_config))\n\n def fake_expanduser(self, path):\n return path.replace('~', '/home/user')\n\n def fake_expandvars(self, path):\n return path.replace('${XAUTHORITY}', '/home/user/.Xauthority').replace('${HOME}', '/home/user')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/filter_configuration_tests.py","file_name":"filter_configuration_tests.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"499791954","text":"#!/usr/bin/python\n\n# This script creates an interactive plot of\n# sales forecasts by company\n#\n# 2019 - Jaime Lopez \n\nimport pandas as pd\nfrom bokeh.models import ColumnDataSource, DataRange1d, Select\nfrom bokeh.plotting import figure, save, output_file\nfrom bokeh.layouts import column, row\nfrom bokeh.io import curdoc\n# from datetime import datetime\n\ndef dataset(source, company):\n \"\"\"\n This functions filters the dataset by company\n and returns a ColumnDataSource object\n \"\"\"\n df = source[source.company == company].copy()\n return ColumnDataSource(df)\n\ndef make_plot(source, title):\n \"\"\"\n Main function to plot data\n \"\"\"\n plot = figure(x_axis_type=\"datetime\", plot_width=800)\n plot.title.text = title\n plot.scatter(x='ds', y='sales', color='blue', source=source)\n plot.line(x='ds', y='sales', color='green', line_width=0.3, source=source)\n plot.yaxis.axis_label = \"Predicted Sales (USD)\"\n return plot\n\ndef update_plot(attr, old, new):\n plot.title.text = new\n company = company_select.value\n src = dataset(df, company)\n source.data.update(src.data)\n\n# Data preparation\ndf = pd.read_csv('salesvalues.csv')\n# df['ds'] = df.ds.apply(datetime.fromisoformat)\ndf['ds'] = pd.to_datetime(df['ds'])\ndf['company'] = df.company.apply(str)\n# Model preparation\ncompany = df.company.values[0]\ncompany_select = Select(value=company, title='company',\n options=list(df.company.sort_values().unique()))\nsource = dataset(df, company)\n# Plotting data\nplot = make_plot(source, company)\n# Setting interaction\ncompany_select.on_change('value', update_plot)\n# Display output\ncontrols = column(company_select)\ncurdoc().add_root(row(plot, controls))\ncurdoc().title = 'Forecast'\n","sub_path":"vrs/bokehdemo/sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166844262","text":"# -*- coding: utf-8 -*-\n\"\"\"\n process.tests.test_refine\n\"\"\"\nimport unittest\n\nfrom process.refine import Refine\n\n\nclass TestRefine(unittest.TestCase):\n\n def make_refine(self, bundle_unit):\n return Refine(bundle_unit)\n\n def test_refine_text(self):\n \"\"\"\"\"\"\n buf = \"\"\"\n \n \n

    he1ll4o wo6rl8d9

    \n \n \n \"\"\"\n expected_buf = 'B1B1D2D3d4d6e8H9HhiLLlllMMOOoopprTTwYY'\n r = self.make_refine(7)\n # 실행\n buf = r.refine_text(buf)\n # 검증\n assert buf == expected_buf\n\n def test_obtain_result(self):\n \"\"\"\"\"\"\n buf = 'd0E1e2h3l3l5l7O7o8o8R9rWw'\n expected_buf = {'quotient': 'd0E1e2h3l3l5l7O7o8o8R', 'remainder': '9rWw'}\n r = self.make_refine(7)\n # 실행\n buf = r.obtain_result(buf)\n # 검증\n assert buf == expected_buf","sub_path":"process/tests/test_refine.py","file_name":"test_refine.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"158208154","text":"# -*- encoding:utf-8 -*-\n# Copyright (c) Alibaba, Inc. and its affiliates.\nimport logging\n\nimport tensorflow as tf\n\nfrom easy_rec.python.builders import loss_builder\nfrom easy_rec.python.model.rank_model import RankModel\nfrom easy_rec.python.protos import tower_pb2\n\nif tf.__version__ >= '2.0':\n tf = tf.compat.v1\n\n\nclass MultiTaskModel(RankModel):\n\n def __init__(self,\n model_config,\n feature_configs,\n features,\n labels=None,\n is_training=False):\n super(MultiTaskModel, self).__init__(model_config, feature_configs,\n features, labels, is_training)\n self._task_towers = []\n self._task_num = None\n self._label_name_dict = {}\n\n def _init_towers(self, task_tower_configs):\n \"\"\"Init task towers.\"\"\"\n self._task_towers = task_tower_configs\n self._task_num = len(task_tower_configs)\n for i, task_tower_config in enumerate(task_tower_configs):\n assert isinstance(task_tower_config, tower_pb2.TaskTower) or \\\n isinstance(task_tower_config, tower_pb2.BayesTaskTower), \\\n 'task_tower_config must be a instance of tower_pb2.TaskTower or tower_pb2.BayesTaskTower'\n tower_name = task_tower_config.tower_name\n\n # For label backward compatibility with list\n if self._labels is not None:\n if task_tower_config.HasField('label_name'):\n label_name = task_tower_config.label_name\n else:\n # If label name is not specified, task_tower and label will be matched by order\n label_name = list(self._labels.keys())[i]\n logging.info('Task Tower [%s] use label [%s]' %\n (tower_name, label_name))\n assert label_name in self._labels, 'label [%s] must exists in labels' % label_name\n self._label_name_dict[tower_name] = label_name\n\n def _add_to_prediction_dict(self, output):\n for task_tower_cfg in self._task_towers:\n tower_name = task_tower_cfg.tower_name\n self._prediction_dict.update(\n self._output_to_prediction_impl(\n output[tower_name],\n loss_type=task_tower_cfg.loss_type,\n num_class=task_tower_cfg.num_class,\n suffix='_%s' % tower_name))\n\n def build_metric_graph(self, eval_config):\n \"\"\"Build metric graph for multi task model.\"\"\"\n metric_dict = {}\n for task_tower_cfg in self._task_towers:\n tower_name = task_tower_cfg.tower_name\n for metric in task_tower_cfg.metrics_set:\n metric_dict.update(\n self._build_metric_impl(\n metric,\n loss_type=task_tower_cfg.loss_type,\n label_name=self._label_name_dict[tower_name],\n num_class=task_tower_cfg.num_class,\n suffix='_%s' % tower_name))\n return metric_dict\n\n def build_loss_graph(self):\n \"\"\"Build loss graph for multi task model.\"\"\"\n for task_tower_cfg in self._task_towers:\n tower_name = task_tower_cfg.tower_name\n loss_weight = task_tower_cfg.weight * self._sample_weight\n\n if hasattr(task_tower_cfg, 'task_space_indicator_label') and \\\n task_tower_cfg.HasField('task_space_indicator_label'):\n in_task_space = tf.to_float(\n self._labels[task_tower_cfg.task_space_indicator_label] > 0)\n loss_weight = loss_weight * (\n task_tower_cfg.in_task_space_weight * in_task_space +\n task_tower_cfg.out_task_space_weight * (1 - in_task_space))\n\n self._loss_dict.update(\n self._build_loss_impl(\n task_tower_cfg.loss_type,\n label_name=self._label_name_dict[tower_name],\n loss_weight=loss_weight,\n num_class=task_tower_cfg.num_class,\n suffix='_%s' % tower_name))\n\n kd_loss_dict = loss_builder.build_kd_loss(self.kd, self._prediction_dict,\n self._labels)\n self._loss_dict.update(kd_loss_dict)\n\n return self._loss_dict\n\n def get_outputs(self):\n outputs = []\n for task_tower_cfg in self._task_towers:\n tower_name = task_tower_cfg.tower_name\n outputs.extend(\n self._get_outputs_impl(\n task_tower_cfg.loss_type,\n task_tower_cfg.num_class,\n suffix='_%s' % tower_name))\n return outputs\n","sub_path":"easy_rec/python/model/multi_task_model.py","file_name":"multi_task_model.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"243024102","text":"# -*- coding: utf-8 -*-\n\"\"\"Clowder command line herd controller\n\n.. codeauthor:: Joe Decapo \n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\n\nfrom cement.ext.ext_argparse import ArgparseController, expose\n\nfrom clowder.clowder_controller import CLOWDER_CONTROLLER\nfrom clowder.clowder_repo import print_clowder_repo_status_fetch\nfrom clowder.util.clowder_utils import (\n filter_groups,\n filter_projects,\n options_help_message,\n run_group_command,\n run_project_command,\n validate_groups,\n validate_projects\n)\nfrom clowder.util.connectivity import network_connection_required\nfrom clowder.util.decorators import valid_clowder_yaml_required\nfrom clowder.util.parallel_commands import herd_parallel\n\n\nclass HerdController(ArgparseController):\n \"\"\"Clowder herd command controller\"\"\"\n\n class Meta:\n \"\"\"Clowder herd Meta configuration\"\"\"\n\n label = 'herd'\n stacked_on = 'base'\n stacked_type = 'embedded'\n description = 'Clone and update projects with latest changes'\n\n @expose(\n help='Clone and update projects with latest changes',\n arguments=[\n (['--parallel'], dict(action='store_true', help='run commands in parallel')),\n (['--protocol'], dict(choices=['https', 'ssh'], nargs=1, default=None, metavar='PROTOCOL',\n help='Protocol to clone new repos with')),\n (['--rebase', '-r'], dict(action='store_true', help='use rebase instead of pull')),\n (['--depth', '-d'], dict(default=None, type=int, nargs=1, metavar='DEPTH', help='depth to herd')),\n (['--branch', '-b'], dict(nargs=1, default=None, metavar='BRANCH', help='branch to herd if present')),\n (['--tag', '-t'], dict(nargs=1, default=None, metavar='TAG', help='tag to herd if present')),\n (['--groups', '-g'], dict(choices=CLOWDER_CONTROLLER.get_all_group_names(),\n default=CLOWDER_CONTROLLER.get_all_group_names(),\n nargs='+', metavar='GROUP',\n help=options_help_message(CLOWDER_CONTROLLER.get_all_group_names(),\n 'groups to herd'))),\n (['--projects', '-p'], dict(choices=CLOWDER_CONTROLLER.get_all_project_names(),\n nargs='+', metavar='PROJECT',\n help=options_help_message(CLOWDER_CONTROLLER.get_all_project_names(),\n 'projects to herd'))),\n (['--skip', '-s'], dict(choices=CLOWDER_CONTROLLER.get_all_project_names(),\n nargs='+', metavar='PROJECT', default=[],\n help=options_help_message(CLOWDER_CONTROLLER.get_all_project_names(),\n 'projects to skip')))\n ]\n )\n def herd(self):\n \"\"\"Clowder herd command entry point\"\"\"\n\n self._herd()\n\n @network_connection_required\n @valid_clowder_yaml_required\n @print_clowder_repo_status_fetch\n def _herd(self):\n \"\"\"Clowder herd command private implementation\"\"\"\n\n branch = None if self.app.pargs.branch is None else self.app.pargs.branch[0]\n tag = None if self.app.pargs.tag is None else self.app.pargs.tag[0]\n depth = None if self.app.pargs.depth is None else self.app.pargs.depth[0]\n protocol = None if self.app.pargs.protocol is None else self.app.pargs.protocol[0]\n\n kwargs = {'group_names': self.app.pargs.groups, 'project_names': self.app.pargs.projects,\n 'skip': self.app.pargs.skip, 'branch': branch, 'tag': tag,\n 'depth': depth, 'rebase': self.app.pargs.rebase, 'protocol': protocol}\n\n if self.app.pargs.parallel:\n herd_parallel(CLOWDER_CONTROLLER, **kwargs)\n if os.name == \"posix\":\n return\n\n herd(CLOWDER_CONTROLLER, **kwargs)\n\n\ndef herd(clowder, group_names, **kwargs):\n \"\"\"Clone projects or update latest from upstream\n\n .. py:function:: herd(clowder, group_names, branch=None, tag=None, depth=0, rebase=False, project_names=None, skip=[], protocol=None)\n\n :param ClowderController clowder: ClowderController instance\n :param list[str] group_names: Group names to herd\n\n Keyword Args:\n branch (str): Branch to attempt to herd\n tag (str): Tag to attempt to herd\n depth (int): Git clone depth. 0 indicates full clone, otherwise must be a positive integer\n protocol (str): Git protocol ('ssh' or 'https')\n rebase (bool): Whether to use rebase instead of pulling latest changes\n project_names (list[str]) project_names: Project names to herd\n skip (list[str]): Project names to skip\n \"\"\"\n\n project_names = kwargs.get('project_names', None)\n skip = kwargs.get('skip', [])\n branch = kwargs.get('branch', None)\n tag = kwargs.get('tag', None)\n depth = kwargs.get('depth', None)\n rebase = kwargs.get('rebase', False)\n protocol = kwargs.get('protocol', None)\n\n if project_names is None:\n groups = filter_groups(clowder.groups, group_names)\n validate_groups(groups)\n for group in groups:\n run_group_command(group, skip, 'herd', branch=branch, tag=tag,\n depth=depth, rebase=rebase, protocol=protocol)\n return\n\n projects = filter_projects(clowder.groups, project_names=project_names)\n validate_projects(projects)\n for project in projects:\n run_project_command(project, skip, 'herd', branch=branch, tag=tag,\n depth=depth, rebase=rebase, protocol=protocol)\n","sub_path":"src/clowder/cli/herd_controller.py","file_name":"herd_controller.py","file_ext":"py","file_size_in_byte":5793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"337463085","text":"import json\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport re\n\n\ndata_model = \"../../../data_models/environment/floodSensor/env_flood_climoPune_0.json\"\n\ndm_list = data_model.split(\"/\")\ndm_name = dm_list[-1]\npath_to_dm = dm_list[dm_list.index(\"data_models\"):-1]\npath_to_dm_folder = \"\".join(a+\"/\" for a in dm_list[:-1])\nfolder_path = \"\".join(a+\"/\" for a in path_to_dm)\nprint(folder_path)\n\n\ncore_context = \"https://raw.githubusercontent.com/iudx/iudx-ld/master/base_schemas/v0.0.0/core_context.json\"\ncommon_context = \"https://raw.githubusercontent.com/iudx/iudx-ld/master/base_schemas/v0.0.0/common_context.json\"\nmiscSchemaOrgDefs = \"https://raw.githubusercontent.com/iudx/iudx-ld/master/base_schemas/v0.0.0/miscSchemaOrgDefs.json\"\ngeometry_schema = \"https://raw.githubusercontent.com/iudx/iudx-ld/master/base_schemas/v0.0.0/geometry-schema.json\"\n\ndm_url = \"https://raw.githubusercontent.com/iudx/iudx-ld/master/\" + folder_path + dm_name + \"#/properties/\"\ndm_context_url = \"https://raw.githubusercontent.com/iudx/iudx-ld/master/\" + folder_path + folder_path.split(\"/\")[-1] + \"_context.json\" + \"#/properties/\"\n\ndm = {}\nwith open(data_model, \"r\") as f:\n dm = json.load(f)\n\nprops = dm[\"properties\"]\n\ndm[\"@context\"] = []\ndm[\"@context\"].append(core_context)\ndm[\"@context\"].append(common_context)\ndm[\"@context\"].append(miscSchemaOrgDefs)\ndm[\"@context\"].append(geometry_schema)\ndm[\"@context\"].append(dm_context_url)\n\ndm_context = {}\ndm_context[\"@context\"] = {}\n\n\n\nfor prop in props:\n if(re.search('time', prop, re.IGNORECASE)):\n dm_context[\"@context\"][prop] = {\"@id\":dm_context_url + prop, \"@type\": \"TimeProperty\"}\n else:\n dm_context[\"@context\"][prop] = {\"@id\":dm_context_url + prop, \"@type\": \"Property\"}\n if(\"describes\" in props[prop].keys()):\n dm_context[prop] = props[prop][\"describes\"]\n if(\"units\" in props[prop].keys()):\n props[prop][\"unitText\"] = props[prop][\"units\"]\n if(\"oneOf\" in props[prop].keys()):\n props[prop].pop(\"oneOf\")\n if(\"valueSchema\" in props[prop].keys()):\n props[prop].pop(\"valueSchema\")\n\n\nprint(json.dumps(dm, indent=4, sort_keys=True))\n","sub_path":"utils/migrate/ld_0_1/migrate_data_model.py","file_name":"migrate_data_model.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583077640","text":"#!/usr/bin/env python3\n\nimport pdb\n\nimport os.path\nimport os\nimport string\nimport re\n\nimport threading\nimport subprocess\nimport shlex\nimport random\nimport logging\nimport tempfile\nimport shutil\nimport signal\nimport datetime\n\nimport pymongo\nimport bson\nimport tornado.ioloop\nimport tornado.web\nimport tornado.options\nimport tornado.httpserver\nfrom tornado.options import define, options\n\nimport hashlib\n\n\n#RFC 2822 Email Address\nEMAIL_REGEX = re.compile(r\"\"\"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])\"\"\")\n\n\ndefine(\"port\", default = 8888, type = int, help = \"port to listen\")\ndefine(\"db_host\", default = \"localhost:27017\", help = \"database host\")\ndefine(\"db_name\", default = \"oj\", help = \"database name\")\n\njudge_event = threading.Event()\n\nglobal_db = pymongo.MongoClient(host = options.db_host)[options.db_name]\nif not __debug__:\n global_db.write_concern = {'w': 0}\n\nexiting = False\n\ncompile_command = [\n \"gcc -w -O2 -o {0} -x c -\",\n \"g++ -w -O2 -o {0} -x c++ -\",\n ]\n\n#logging.basicConfig(filename='oj.log',level=logging.DEBUG)\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n (r\"/\", MainHandler),\n (r\"/submit\", SubmitHandler),\n\n (r\"/status\", StatusHandler),\n (r\"/status/([^/]+)\", StatusSourceHandler),\n\n #(r\"/user/([^/]+)\", UserHandler),\n (r\"/userlist\", UserListHandler),\n\n (r\"/auth/login\", LoginHandler),\n (r\"/auth/logout\", LogoutHandler),\n\n (r\"/problem/list\", ProblemListHandler),\n #(r\"/problem/add\", ProblemAddHandler),\n #(r\"/problem/(\\d+)/edit\", ProblemEditHandler),\n (r\"/problem/(\\d+)\", ProblemHandler),\n #(r\"/problem/add_tp\", ProblemAddTestPointHandler),\n ]\n\n settings = dict(\n title = \"CDUT Online Judge\",\n template_path = os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path = os.path.join(os.path.dirname(__file__), \"static\"),\n languages = [\"C\", \"C++\", \"Java\", \"Pascal\"],\n login_url = \"/auth/login\",\n cookie_secret = \"CDUT_Online_Judge 0108\",\n xsrf_cookies = True,\n debug = __debug__,\n )\n\n tornado.web.Application.__init__(self, handlers, **settings)\n\n JudgeThread().start()\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n @property\n def db(self):\n return global_db\n\n def get_current_user(self):\n username = self.get_secure_cookie(\"username\")\n if not username:\n return None\n\n current_userinfo = self.db.users.find_one({\"_id\" : username})\n return current_userinfo\n\n\nclass MainHandler(BaseHandler):\n def get(self):\n self.render(\"index.html\")\n\n\nclass SubmitHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n self.render(\"submit.html\")\n\n @tornado.web.authenticated\n def post(self):\n if not self.get_argument(\"pid\").isdigit()\\\n or not self.db.problems.find_one({\"_id\" : int(self.get_argument(\"pid\"))})\\\n or not self.get_argument(\"lang\").isdigit()\\\n or int(self.get_argument(\"lang\")) > len(self.settings[\"languages\"]):\n raise tornado.web.HTTPError(400)\n else:\n self.db.status.ensure_index(\"_id\", pymongo.DESCENDING)\n self.db.status.insert({\n \"username\" : self.current_user[\"_id\"],\n \"pid\" : int(self.get_argument(\"pid\")),\n \"language\" : int(self.get_argument(\"lang\")),\n \"result\" : 0,\n \"code\" : self.get_argument(\"src\"),\n }, w=1)\n\n self.set_cookie(\"def_lang\", self.get_argument(\"lang\"), expires_days=30)\n self.redirect(\"status?pid={}\".format(self.get_argument(\"pid\")))#TODO:add pid\n\n judge_event.set()\n logging.debug(\"JudgeThread notified.\")\n\n\nclass StatusHandler(BaseHandler):\n result_code = [\n \"Waiting\",\n \"Judging\",\n \"Accepted\",\n \"Compile Error\",\n \"Wrong Answer\",\n \"Time Limit Exceeded\",\n \"Memory Limit Exceeded\",\n ]\n\n def get(self):\n\n status_list = self.db.status.find({\n \"_id\" : {\n \"$lt\" : bson.objectid.ObjectId(self.get_argument(\"top\",\n \"ffffffffffffffffffffffff\")),\n \"$gt\" : bson.objectid.ObjectId(self.get_argument(\"bottom\",\n \"000000000000000000000000\"))\n },\n }).limit(20).sort(\"_id\", pymongo.DESCENDING)\n #TODO:add filter\n\n self.render(\"status.html\",\n status_list = list(status_list)\n )\n\n\nclass StatusSourceHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self, sid):\n status = self.db.status.find_one({\"_id\" : bson.objectid.ObjectId(sid)})\n\n if not status:\n raise tornado.web.HTTPError(404)\n elif self.current_user[\"_id\"] != status[\"username\"]:\n raise tornado.web.HTTPError(403)\n else:\n self.render(\"source.html\",\n code = status[\"code\"],\n compile_info = status.get(\"compile_info\", \"\"),\n )\n\n\nclass UserHandler(BaseHandler):\n def get(self):\n pass\n\n\nclass UserListHandler(BaseHandler):\n def get(self):\n user_list = self.db.users.find().sort(\"score\", pymongo.DESCENDING)\n self.render(\"user_list.html\",\n user_list = list(user_list)\n )\n\n\nclass LoginHandler(BaseHandler):\n page_errors = [\n \"Invalid name.\",\n \"Wrong password.\",\n \"Invalid student id.\",\n \"Invalid student id & name pair.\",\n ]\n\n def get(self):\n if self.current_user:\n self.redirect(self.get_argument(\"next\", \"/\"))\n return\n self.render(\"login.html\")\n\n def post(self):\n if self.current_user:\n self.redirect(self.get_argument(\"next\", \"/\"))\n return\n\n #TODO: change to atom operation\n if not self.get_argument(\"stu_id\").isdigit() \\\n or len(self.get_argument(\"stu_id\")) != 12:\n self.redirect(\"login?type=2\")\n elif len(self.get_argument(\"username\")) > 32 :\n self.redirect(\"login?type=0&stu_id={}\".format(\n self.get_argument(\"stu_id\"),\n ))\n elif self.get_argument(\"password\") != \"PDA_Contest\":\n self.redirect(\"login?type=1&stu_id={}\".format(\n self.get_argument(\"stu_id\"),\n ))\n elif self.db.users.find_one({\"_id\" : self.get_argument(\"stu_id\")}):\n if self.db.users.find_one({\"_id\" : self.get_argument(\"stu_id\")})[\"name\"] == \\\n self.get_argument(\"username\"):\n if \"remember\" in self.request.arguments:\n self.set_secure_cookie(\"username\", str(self.get_argument(\"stu_id\")))\n else:\n self.set_secure_cookie(\"username\", str(self.get_argument(\"stu_id\")), None)\n self.redirect(self.get_argument(\"next\", \"/\"))\n else:\n self.redirect(\"login?type=3\")\n else:\n self.db.users.ensure_index(\"score\", pymongo.DESCENDING)\n self.db.users.insert({\n \"_id\" : self.get_argument(\"stu_id\"),\n \"name\" : self.get_argument(\"username\"),\n \"ac_list\" : [],\n \"score\" : 0,\n })\n if \"remember\" in self.request.arguments:\n self.set_secure_cookie(\"username\", str(self.get_argument(\"stu_id\")))\n else:\n self.set_secure_cookie(\"username\", str(self.get_argument(\"stu_id\")), None)\n self.redirect(self.get_argument(\"next\", \"/\"))\n\n\nclass LogoutHandler(BaseHandler):\n def get(self):\n self.clear_all_cookies()\n self.redirect(self.get_argument(\"next\", \"/\"))\n\n\nclass ProblemListHandler(BaseHandler):\n def get(self):\n if not self.get_cookie(\"problem_per_page\"):\n self.set_cookie(\"problem_per_page\", \"30\", expires_days=30)\n problem_per_page = 30\n else:\n problem_per_page = int(self.get_cookie(\"problem_per_page\"))\n\n problem_list = self.db.problems.find({\n \"_id\" : {\"$gte\" : problem_per_page * self.get_argument(\"page\", 0) - 1}\n }).limit(problem_per_page)\n\n self.render(\"problem_list.html\",\n problem_list = list(problem_list),\n )\n\n\nclass ProblemAddHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n self.render(\"problem_add.html\")\n\n @tornado.web.authenticated\n def post(self):\n self.db.problems.insert({\n \"_id\" : int(self.get_argument(\"pid\")),\n \"title\" : self.get_argument(\"title\"),\n \"content\" : self.get_argument(\"content\"),\n \"score\" : int(self.get_argument(\"score\")),\n \"submit_num\" : 0,\n \"accept_num\" : 0,\n \"tp_list\" : [],\n \"time\" : datetime.datetime.utcnow(),\n })\n self.redirect(\"list\")\n\n\nclass ProblemEditHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n pass\n\n\nclass ProblemAddTestPointHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n self.render(\"problem_add_tp.html\")\n\n @tornado.web.authenticated\n def post(self):\n print(self.request.arguments)\n print(type(self.request.arguments[\"input\"][0]))\n p = self.db.problems.find_one({\"_id\" : int(self.get_argument(\"pid\"))})\n p[\"tp_list\"].append({\n \"in\" : self.request.arguments[\"input\"][0],\n \"out\" : self.request.arguments[\"output\"][0],\n })\n self.db.problems.save(p)\n self.redirect(\"list\")\n\n\n\nclass ProblemHandler(BaseHandler):\n def get(self, pid):\n prob = self.db.problems.find_one({\"_id\" : int(pid)})\n\n if not prob:\n raise tornado.web.HTTPError(404)\n else:\n self.render(\"problem.html\", prob = prob)\n\n\nclass JudgeThread(threading.Thread):\n def run(self):\n temp_dir = tempfile.mkdtemp()\n\n #TODO:optimize logic\n while not exiting:\n judging_submit = global_db.status.find_and_modify(\n query = { \"result\" : 0 },\n update = { \"$set\" : { \"result\" : 1 } }\n )\n while judging_submit:\n logging.info(\"judging {}\".format(str(judging_submit[\"_id\"])))\n\n exe_path = temp_dir + '/' + str(judging_submit[\"_id\"])\n\n p = subprocess.Popen(shlex.split(\n compile_command[judging_submit[\"language\"]].\\\n format(str(judging_submit[\"_id\"]))),\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE,\n stderr = subprocess.STDOUT,\n cwd = temp_dir,\n )\n try:\n judging_submit[\"compile_info\"] = p.communicate(judging_submit[\"code\"])[0]\n except UnicodeEncodeError:\n p.terminate()\n judging_submit[\"result\"] = 3\n else:\n if p.returncode:\n judging_submit[\"result\"] = 3\n else:\n problem = global_db.problems.find_one({ \"_id\" : judging_submit[\"pid\"] })\n for tp in problem[\"tp_list\"]:\n p = subprocess.Popen(\n exe_path,\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE,\n cwd = temp_dir,\n )\n t = threading.Timer(2, terminate_timer, [p])\n t.start()\n\n #tp[\"out\"] = output\n if p.communicate(tp[\"in\"])[0] != tp[\"out\"]:\n t.cancel()\n if p.returncode == -15 :\n judging_submit[\"result\"] = 5\n else:\n logging.debug(p.returncode)\n judging_submit[\"result\"] = 4\n break\n t.cancel()\n else:\n #global_db.problems.save(problem)\n judging_submit[\"result\"] = 2\n u = global_db.users.find_one({ \"_id\" : judging_submit[\"username\"] })\n if not judging_submit[\"pid\"] in u[\"ac_list\"]:\n u[\"ac_list\"].append(judging_submit[\"pid\"])\n u[\"score\"] += problem.get(\"score\", 10)\n global_db.users.save(u)\n\n os.remove(exe_path)\n\n global_db.status.save(judging_submit)\n\n judging_submit = global_db.status.find_and_modify(\n query = { \"result\" : 0 },\n update = { \"$set\" : { \"result\" : 1 } }\n )\n\n judge_event.clear()\n if not exiting:\n logging.debug(\"JudgeThread waiting.\")\n judge_event.wait(60)\n\n os.rmdir(temp_dir)\n\n\ndef signal_handler(signum, frame):\n global exiting\n tornado.ioloop.IOLoop.instance().stop()\n exiting = True\n judge_event.set()\n\ndef kill_timer(p):\n logging.debug(\"kill!\")\n p.kill()\n\ndef terminate_timer(p):\n logging.debug(\"terminate!\")\n p.terminate()\n\n\nif __name__ == \"__main__\":\n tornado.options.parse_command_line()\n signal.signal(signal.SIGINT, signal_handler)\n http_server = tornado.httpserver.HTTPServer(Application())\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"web/oj.py","file_name":"oj.py","file_ext":"py","file_size_in_byte":14588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"428038810","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0044_loantransactionentry_transaction_type'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='loanaccount',\n name='general_ledger_code',\n field=models.CharField(default=b'14100000', max_length=15),\n ),\n migrations.AlterField(\n model_name='loantransactionentry',\n name='account',\n field=models.ForeignKey(related_query_name=b'account_transaction', related_name='account_transactions', to='core.LoanAccount', null=True),\n ),\n migrations.AlterField(\n model_name='loantransactionentry',\n name='gl_account_code',\n field=models.CharField(max_length=20, null=True, verbose_name=b'Loan Ledger Account', choices=[(b'14100000', b'Portfolio Control'), (b'11220000', b'Fund Source'), (b'14400000', b'Loans Recovered'), (b'41100000', b'Interest Income Account'), (b'41200000', b'Fee Income Account'), (b'41300000', b'Penalties Income Account'), (b'14300000', b'Allowance for Loan Loss'), (b'15100000', b'Income Receivable Account'), (b'51200000', b'Write-off Account')]),\n ),\n migrations.AlterField(\n model_name='loantransactionentry',\n name='item_type',\n field=models.IntegerField(choices=[(0, b'Credit'), (1, b'Debit')]),\n ),\n migrations.AlterField(\n model_name='loantransactionentry',\n name='status',\n field=models.IntegerField(default=0, choices=[(0, b'Pending Posting'), (1, b'Posted'), (2, b'On Hold')]),\n ),\n migrations.AlterField(\n model_name='loantransactionentry',\n name='transaction_id',\n field=models.UUIDField(),\n ),\n migrations.AlterField(\n model_name='loantransactionentry',\n name='transaction_type',\n field=models.IntegerField(null=True, choices=[(1, b'Loan Disbursal'), (2, b'Apply Interest on Account'), (3, b'Apply Fee on Account'), (4, b'Apply Penalty on Account'), (5, b'Principal Posting'), (6, b'Interest Posting'), (7, b'Fee Posting'), (8, b'Penalty Posting'), (9, b'Principal Write-Off'), (10, b'Interest Write-Off'), (11, b'Fee Write-Off'), (12, b'Penalty Write-Off')]),\n ),\n ]\n","sub_path":"ajabsacco/core/migrations/0045_auto_20150518_1454.py","file_name":"0045_auto_20150518_1454.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"638121633","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, Http404\nfrom .models import user_data, consumption_data\n\n# Create your views here.\n\ndef summary(request):\n try:\n all_users = user_data.objects.all().order_by('user_id')\n total_consumption = consumption_data.get_total_consumption();\n context = {\n 'user_data' : all_users,\n 'total_consumption' : total_consumption\n }\n except Exception as e:\n raise Http404(str(e))\n\n return render(request, 'consumption/summary.html', context)\n\ndef detail(request, user_id):\n user_detail = get_object_or_404(user_data, pk=user_id)\n user_consumption = consumption_data.get_user_consumption(user_id)\n aggregated_user_data = consumption_data.aggregated_user_data(user_id)\n context = {\n 'user_detail' : user_detail,\n 'user_consumption' : user_consumption,\n 'aggregated_user_data' : aggregated_user_data\n }\n \n return render(request, 'consumption/detail.html', context)\n","sub_path":"dashboard/consumption/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"172875544","text":"import os\nimport signal as stdlib_signal\nfrom contextlib import closing\n\nimport aiopg\nimport psycopg2\nimport pytest\nfrom psycopg2 import sql\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\n\nfrom procrastinate import aiopg_connector\nfrom procrastinate import app as app_module\nfrom procrastinate import jobs, schema, testing\n\n# Just ensuring the tests are not polluted by environment\nfor key in os.environ:\n if key.startswith(\"PROCRASTINATE_\"):\n os.environ.pop(key)\n\n\ndef _execute(cursor, query, *identifiers):\n cursor.execute(\n sql.SQL(query).format(\n *(sql.Identifier(identifier) for identifier in identifiers)\n )\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef setup_db():\n\n with closing(psycopg2.connect(\"\", dbname=\"postgres\")) as connection:\n connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n with connection.cursor() as cursor:\n _execute(\n cursor, \"DROP DATABASE IF EXISTS {}\", \"procrastinate_test_template\"\n )\n _execute(cursor, \"CREATE DATABASE {}\", \"procrastinate_test_template\")\n\n connector = aiopg_connector.PostgresConnector(dbname=\"procrastinate_test_template\")\n schema_manager = schema.SchemaManager(connector=connector)\n schema_manager.apply_schema()\n # We need to close the psycopg2 underlying connection synchronously\n connector._connection._conn.close()\n\n with closing(\n psycopg2.connect(\"\", dbname=\"procrastinate_test_template\")\n ) as connection:\n connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n yield connection\n\n with closing(psycopg2.connect(\"\", dbname=\"postgres\")) as connection:\n connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n with connection.cursor() as cursor:\n _execute(\n cursor, \"DROP DATABASE IF EXISTS {}\", \"procrastinate_test_template\"\n )\n\n\n@pytest.fixture\ndef connection_params(setup_db):\n with setup_db.cursor() as cursor:\n _execute(cursor, \"DROP DATABASE IF EXISTS {}\", \"procrastinate_test\")\n _execute(\n cursor,\n \"CREATE DATABASE {} TEMPLATE {}\",\n \"procrastinate_test\",\n \"procrastinate_test_template\",\n )\n\n yield {\"dsn\": \"\", \"dbname\": \"procrastinate_test\"}\n\n with setup_db.cursor() as cursor:\n _execute(cursor, \"DROP DATABASE IF EXISTS {}\", \"procrastinate_test\")\n\n\n@pytest.fixture\nasync def connection(connection_params):\n async with aiopg.connect(**connection_params) as connection:\n yield connection\n\n\n@pytest.fixture\nasync def pg_connector(connection_params):\n connector = aiopg_connector.PostgresConnector(**connection_params)\n yield connector\n connection = await connector._get_connection()\n await connection.close()\n\n\n@pytest.fixture\ndef kill_own_pid():\n def f(signal=stdlib_signal.SIGTERM):\n os.kill(os.getpid(), signal)\n\n return f\n\n\n@pytest.fixture\ndef connector():\n return testing.InMemoryConnector()\n\n\n@pytest.fixture\ndef app(connector):\n return app_module.App(connector=connector)\n\n\n@pytest.fixture\ndef job_store(app):\n return app.job_store\n\n\n@pytest.fixture\ndef job_factory():\n defaults = {\n \"id\": 42,\n \"task_name\": \"bla\",\n \"task_kwargs\": {},\n \"lock\": None,\n \"queue\": \"queue\",\n }\n\n def factory(**kwargs):\n final_kwargs = defaults.copy()\n final_kwargs.update(kwargs)\n return jobs.Job(**final_kwargs)\n\n return factory\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551532161","text":"import RPi.GPIO as GPIO\nimport time\n\n# Asignando pines a variables\nLED = 17\nBUTTON = 7\n# Ponemos en modo BCM para contar los pines\nGPIO.setmode(GPIO.BCM)\n\n# Configuramos las entradas y salidas\nGPIO.setup(LED, GPIO.OUT, initial=1)\nGPIO.setup(BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Hacemos un pull down para poner un 0 por defecto (sin pulsar)\n\n# Encendemos el LED poniendole un uno\nGPIO.output(LED,1)\n\n# Leemos el estado del boton y lo sacamos por pantalla\n# Si esta activado ponemos un uno.\n\n# Lo hacemos por interrupcion\n\ncount = 0\n\n# Funcion que se ejecutara con la interrupcion\ndef subiendo(event):\n global count\n count += 1\n print(\"Pulsacion {} - Subiendo! (Pin: {})\".format(count,event))\n GPIO.output(LED,1)\n \ndef bajando(event):\n global count\n count += 1\n print(\"Pulsacion {} - Bajando! (Pin: {})\".format(count,event))\n GPIO.output(LED,1)\n \ndef cambio(event):\n global count\n count += 1\n print(\"Pulsacion {} - Cambiado! (Pin: {})\".format(count,event))\n if GPIO.input(BUTTON) == 1:\n print(\"Subida!\")\n else:\n print(\"Bajada\")\n\n# Declaracion de la interrupcion. Subida, bajada, cambio. Pero solo se puede una.\n# Pulsado\n#GPIO.add_event_detect(BUTTON,GPIO.RISING, callback=subiendo)\n#Sin pulsar\n#GPIO.add_event_detect(BUTTON,GPIO.FALLING, callback=bajando)\n#Cualquiera\nGPIO.add_event_detect(BUTTON,GPIO.BOTH, callback=cambio)\n\n\n\n\n","sub_path":"Dia8/GPIO/kk_Led.py","file_name":"kk_Led.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"526749195","text":"from datetime import datetime\nimport os\nimport pytest\nfrom selenium.webdriver.chrome import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom jproperties import Properties\n\ndriver = None\n\npytest_plugins = [\n \"data.testdata.InvalidLoginData\"\n ]\n\n@pytest.fixture(scope=\"class\")\n\ndef setUp(request):\n global driver\n configs = Properties()\n with open(\"data/appdata.properties\", \"rb\") as config_file:\n configs.load(config_file)\n driver = webdriver.WebDriver(ChromeDriverManager().install())\n driver.get(configs.get(\"URL\")[0])\n driver.maximize_window()\n driver.implicitly_wait(configs.get(\"ImplicitWait\")[0])\n request.cls.driver = driver\n request.cls.configs = configs\n yield\n driver.quit()\n\n\n@pytest.mark.hookwrapper\ndef pytest_runtest_makereport(item):\n \"\"\"\n Extends the PyTest Plugin to take and embed screenshot in html report, whenever test fails.\n :param item:\n \"\"\"\n pytest_html = item.config.pluginmanager.getplugin(\"html\")\n outcome = yield\n report = outcome.get_result()\n extra = getattr(report, \"extra\", [])\n\n if report.when == \"call\" or report.when == \"setup\":\n\n xfail = hasattr(report, \"wasxfail\")\n\n if (report.skipped and xfail) or (report.failed and not xfail):\n file_name = os.path.abspath(os.curdir)+\"\\\\Screenshots\\\\\"+datetime.now().strftime(\"%d-%m-%Y %H-%M-%S\")+ \".png\"\n _capture_screenshot(file_name)\n if file_name:\n html = (\n '
    \"screenshot\"
    ' % file_name\n )\n extra.append(pytest_html.extras.html(html))\n report.extra = extra\n\ndef _capture_screenshot(name):\n driver.get_screenshot_as_file(name)\n\n@pytest.hookimpl(tryfirst=True)\ndef pytest_configure(config):\n if not os.path.exists(os.path.abspath(os.curdir)+\"\\\\reports\"):\n os.makedirs(os.path.abspath(os.curdir)+\"\\\\reports\")\n config.option.htmlpath = (\n \"reports/\" + datetime.now().strftime(\"%d-%m-%Y %H-%M-%S\") + \".html\"\n )","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"90795004","text":"# http://codeforces.com/contest/492/problem/A\n\nn = int(input())\nmark = [0,1,3,6]\n\ndef cal(n,arr):\n if n= 0:\n n -= mark[i]\n i += 1\n cal(i,mark)\nprint(i-1)","sub_path":"CodeForces/problem/A/492A_VanyaAndCubes.py","file_name":"492A_VanyaAndCubes.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"428292421","text":"from valentina.app.models import Profile\n\nFB_GENDER = {'feminino': Profile.FEMALE, 'masculino': Profile.MALE}\n\n\ndef save_profile(backend, user, response, *args, **kwargs):\n if backend.name == 'facebook':\n profile, created = Profile.objects.get_or_create(user=user)\n profile.gender = FB_GENDER.get(response.get('gender'), Profile.OTHER)\n profile.timezone = response.get('timezone', '')\n profile.access_token = response.get('access_token', '')\n profile.save()\n","sub_path":"valentina/app/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"96478043","text":"#!/usr/bin/python3\n\"\"\"\nLists all states from the database with specifications\n\"\"\"\n\nimport MySQLdb\nimport sys\n\nif __name__ == '__main__':\n\n db_help = MySQLdb.connect(host=\"localhost\",\n port=3306,\n user=sys.argv[1],\n passwd=sys.argv[2],\n db=sys.argv[3])\n cur_help = db_help.cursor()\n cur_help.execute(\"SELECT cities.name FROM cities JOIN states ON cities.state_id =states.id WHERE states.name = %s ORDER BY cities.id\", (sys.argv[4],))\n states = cur_help.fetchall()\n val_help = \"\"\n for navigate in range(len(states)):\n if navigate != len(states) - 1:\n val_help += states[navigate][0] + \", \"\n else:\n val_help += states[navigate][0]\n print(val_help)\n cur_help.close()\n db_help.close()\n","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"52821889","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport numpy as np\n\nclass ScoringNetwork(nn.Module):\n def __init__(self, inp_dim, hidden_layer_size, arch=1):\n '''\n Assigns a weight to each fact based on a simple feedforward\n architecture\n '''\n super(ScoringNetwork, self).__init__()\n self.arch = arch\n # if arch == 1:\n self.fc1 = nn.Linear(4 * inp_dim, hidden_layer_size)\n self.fc2 = nn.Linear(hidden_layer_size, 1)\n # else:\n # self.fc1 = nn.Linear(3 * inp_dim, hidden_layer_size)\n # self.fc2 = nn.Linear(hidden_layer_size, 1)\n\n def construct_feature_vector(self, x, M, Q):\n '''\n Given input facts, question and memory at previous timestep,\n this function computes the feature vector for feeding into the\n scoring network.\n Refer: https://arxiv.org/pdf/1603.01417.pdf (section 3.3)\n '''\n if self.arch == 1:\n M = M.expand_as(x)\n Q = Q.expand_as(x)\n new_feature = torch.cat([\n x * Q,\n x * M,\n torch.abs(x - Q),\n torch.abs(x - M)\n ], dim=2)\n return new_feature\n else:\n M = M.expand_as(x)\n Q = Q.expand_as(x)\n new_feature = torch.cat([x, M, Q], dim=2)\n return new_feature\n\n def forward(self, x, M, Q):\n x = self.construct_feature_vector(x, M, Q)\n x = F.tanh(self.fc1(x))\n x = self.fc2(x)\n x = x.view(x.size()[0], -1)\n G = F.sigmoid(x)\n return G\n\nclass MemoryUpdateNetwork(nn.Module):\n def __init__(self, memory_size, hidden_layer_size):\n super(MemoryUpdateNetwork, self).__init__()\n self.fc1 = nn.Linear(3 * memory_size, hidden_layer_size)\n\n def forward(self, new_M, prev_M, Q):\n x = torch.cat([new_M, prev_M, Q], dim=2)\n x = F.leaky_relu(self.fc1(x))\n return x\n\nclass MemoryUpdateCell(nn.Module):\n def __init__(self, inp_dim, hidden_size):\n super(MemoryUpdateCell, self).__init__()\n self.hidden_size = hidden_size\n self.attention_cell = nn.GRU(hidden_size, hidden_size, batch_first=True)\n\n def forward(self, C, G):\n '''\n Iterate through each fact `C_t` till last fact is reached.\n The memory update resulting after last fact is assigned to be the\n episode tensor.\n\n @sizes:\n C: [batch X num_sentences X hidden_size]\n G: [batch X num_sentences]\n prev_M: [batch X 1 X hidden_size]\n\n M: [batch X hidden_size]\n '''\n num_sentences = C.size()[1]\n h = Variable(torch.zeros(self.hidden_size))\n for sentence in range(num_sentences):\n c_t = C[:, sentence, :]\n g_t = G[:, sentence]\n if sentence == 0:\n h = h.unsqueeze(0).expand_as(c_t)\n h = h.unsqueeze(0)\n g_t = g_t.unsqueeze(1).expand_as(h)\n c_t = c_t.unsqueeze(1)\n h = g_t * self.attention_cell(c_t, h)[1] + (1 - g_t) * h\n return h.transpose(0, 1)\n\n\nclass EpisodicMemory(nn.Module):\n def __init__(self, hidden_size, scoring_net_hidden_size=120, arch=1):\n super(EpisodicMemory, self).__init__()\n self.memory_update = MemoryUpdateCell(hidden_size, hidden_size)\n self.scoring_net = ScoringNetwork(hidden_size, scoring_net_hidden_size, arch)\n self.arch = arch\n\n def forward(self, C, Q, prev_M):\n '''\n @args:\n C: Facts received from the input module\n Q: Question vector\n prev_M: Initial memory of the module before episode\n\n @output:\n A tensor representing the new memory after the episode\n\n @sizes:\n C: [batch X num_sentences X hidden_size]\n Q: [batch X 1 X hidden_size]\n prev_M: [batch X 1 X hidden_size]\n\n next_M: [batch X 1 X hidden_size]\n '''\n G = self.scoring_net(C, Q, prev_M)\n next_M = self.memory_update(C, G)\n return next_M\n","sub_path":"episodic_memory.py","file_name":"episodic_memory.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"145189578","text":"# -- coding: utf-8 --\n\"\"\"\nCreated on Wed Aug 26 15:33:32 2020\n\n@author: Adlla Katarine e Daniel Alves\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport six\nimport math\n\n\n\n#transformando meses para ficar mais legível \ndef meses(db):\n for i in range(len(db)):\n ver = db.loc[i ,'Data Medicao'].split('-')\n \n if ver[1] == '01':\n db.loc[i ,'Data Medicao'] = 'JAN'\n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '02':\n db.loc[i ,'Data Medicao'] = 'FEV' \n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '03':\n db.loc[i ,'Data Medicao'] = 'MAR' \n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '04':\n db.loc[i ,'Data Medicao'] = 'ABR'\n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '05':\n db.loc[i ,'Data Medicao'] = 'MAI'\n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '06':\n db.loc[i ,'Data Medicao'] = 'JUN'\n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '07':\n db.loc[i ,'Data Medicao'] = 'JUL'\n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '08':\n db.loc[i ,'Data Medicao'] = 'AGO'\n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '09':\n db.loc[i ,'Data Medicao'] = 'SET'\n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '10':\n db.loc[i ,'Data Medicao'] = 'OUT'\n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '11':\n db.loc[i ,'Data Medicao'] = 'NOV'\n db.loc[i ,'Ano Medicao'] = ver[0] \n elif ver[1] == '12':\n db.loc[i ,'Data Medicao'] = 'DEZ'\n db.loc[i ,'Ano Medicao'] = ver[0] \n\n\n\n#criando gráficos\ndef plot_map(df_ano, atributo, ano):\n df_ano = df_ano.reset_index()\n for i in range(len(df_ano)): \n data = df_ano.loc[i, 'Data Medicao']\n data = data.split()\n df_ano.loc[i, 'Data Medicao'] = str(data[0])\n \n plt.bar(df_ano['Data Medicao'], df_ano[atributo], color='#37777D')\n \n \n plt.xticks(df_ano['Data Medicao'])\n plt.ylabel(atributo)\n plt.title(atributo + ' por mês do ano '+ ano)\n \n plt.savefig(atributo+ str(ano) +'.png')\n \n plt.close()\n\n\n\n#separando dados para criação da tabela\ndef plot_dados(df_ano, cidade):\n #removendo dados desnecessários\n df_ano = df_ano.drop(columns=['DIRECAO PREDOMINANTE DO VENTO; MENSAL(° (gr))', \n 'EVAPORACAO DO PICHE; MENSAL(mm)',\n 'EVAPOTRANSPIRACAO POTENCIAL; BH MENSAL(mm)', 'EVAPOTRANSPIRACAO REAL; BH MENSAL(mm)',\n 'PRESSAO ATMOSFERICA AO NIVEL DO MAR; MEDIA MENSAL(mB)', 'PRESSAO ATMOSFERICA; MEDIA MENSAL(mB)',\n 'VENTO; VELOCIDADE MAXIMA MENSAL(m/s)',\n 'VENTO; VELOCIDADE MEDIA MENSAL(m/s)', 'VISIBILIDADE; MEDIA MENSAL(codigo)',\n 'Unnamed: 18', 'Latitude', 'Longitude'])\n #transformando em um dicionário\n df = df_ano.to_dict() \n \n #transformando em um dataframe\n df = pd.DataFrame.from_dict(df)\n \n anos = df[\"Ano Medicao\"].str.contains('2020')\n \n for i in range(0, len(anos)):\n if anos[i]:\n df = df.drop(i)\n \n \n #criando csv das informações\n df.to_csv(\"dados_\"+str(cidade)+\".csv\", index=False)\n \n #chamando função para criação do arquivo\n plot_arquivo(df, cidade) \n \n \n \n#criando arquivo com os dados para tabela\ndef plot_arquivo(df, cidade):\n #abertura do arquivo\n arquivo = open(\"dados_\"+cidade+\".txt\", \"a\")\n frases = [] #lista para salvar as linhas\n colunas = df.columns.values #colunas da tabela\n \n #atributos\n for j in range(1, len(colunas)-1):\n st = '\\n' + ' Mês || ' + str(colunas[j]) + '\\n'\n frases.append(st)\n frases.append('------------------------------------------------------\\n')\n st = ' ' + df['Ano Medicao'][0] + ' |' + df['Ano Medicao'][12]+ ' |' + df['Ano Medicao'][24] + ' |' + df['Ano Medicao'][36] +' |'+ df['Ano Medicao'][48] + '\\n'\n frases.append(st) \n \n \n #meses\n for i in range(0, 12):\n st = str(df['Data Medicao'][i]) + ' || ' + str(round(df[colunas[j]][i], 3)) +'|'+ str(round(df[colunas[j]][i+12], 3))+ '|'+ str(round(df[colunas[j]][i+24], 3)) +'|'+ str(round(df[colunas[j]][i+36], 3)) +'|'+ str(round(df[colunas[j]][i+48], 3)) + '\\n'\n frases.append(st)\n\n \n arquivo.writelines(frases)\n\n\n\n#criando tabela para comparações de atributos\ndef plot_table(cidade, ocorrencias):\n cidade = cidade.reset_index(drop=True) #resetando o index da cidade que recebe a cada ano\n #colunas do dataframe\n columns = ('INSOLACAO TOTAL; MENSAL(h)', 'PRECIPITACAO TOTAL; MENSAL(mm)', \n 'TEMPERATURA MAXIMA MEDIA; MENSAL(°C)', 'TEMPERATURA MEDIA COMPENSADA; MENSAL(°C)',\n 'TEMPERATURA MINIMA MEDIA; MENSAL(°C)', 'UMIDADE RELATIVA DO AR; MEDIA MENSAL(%)', 'OCORRÊNCIAS')\n #linhas do dataframe\n rows = ['JAN', 'FEV', 'MAR', 'ABR', 'MAI', 'JUN', 'JUL', 'AGO', 'SET', 'OUT', 'NOV', 'DEZ', 'TOTAL']\n \n #criação do dataframe\n df = pd.DataFrame(index=rows, columns=columns) \n \n #atributos\n percorrer = ['INSOLACAO TOTAL; MENSAL(h)', 'PRECIPITACAO TOTAL; MENSAL(mm)', \n 'TEMPERATURA MAXIMA MEDIA; MENSAL(°C)', 'TEMPERATURA MEDIA COMPENSADA; MENSAL(°C)', \n 'TEMPERATURA MINIMA MEDIA; MENSAL(°C)', 'UMIDADE RELATIVA DO AR; MEDIA MENSAL(%)']\n \n #adicionando linha de total\n cidade.loc[len(cidade)+1, :] = np.nan\n \n #adicionando valores dos atributos\n for atributo in percorrer:\n total=0\n df[atributo] = cidade[atributo].values\n for i in range(0, len(cidade[atributo].values) - 1):\n total += cidade[atributo].values[i]\n \n df.loc['TOTAL', atributo] = total\n \n df['OCORRÊNCIAS'] = ocorrencias.values\n\n df.reset_index(level=0, inplace=True)\n\n #normalizando valores\n for atributo in percorrer:\n for i in range(0, len(df[atributo].values)):\n df.loc[i, atributo] = round(df.loc[i, atributo], 4)\n \n #renomeando colunas\n df.rename(columns={'INSOLACAO TOTAL; MENSAL(h)': 'INSOLACAO', 'PRECIPITACAO TOTAL; MENSAL(mm)': 'PRECIPITACAO',\n 'TEMPERATURA MAXIMA MEDIA; MENSAL(°C)': 'TEMPERATURA MAX', 'TEMPERATURA MEDIA COMPENSADA; MENSAL(°C)': 'TEMPERATURA MED',\n 'TEMPERATURA MINIMA MEDIA; MENSAL(°C)': 'TEMPERATURA MIN', 'UMIDADE RELATIVA DO AR; MEDIA MENSAL(%)': 'UMIDADE AR'}, inplace = True)\n\n render_mpl_table(df)\n\n\n\n#separar estações do ano na tabela da estação\ndef estacoes_estacoes(cidade, df, atributo, ano, anos):\n #separando os meses para estação \"VERÃO\"\n aux1 = cidade.loc[0, atributo]\n aux2 = cidade.loc[1, atributo]\n aux3 = cidade.loc[2, atributo]\n \n df.loc['VERAO', atributo] += round((aux1+aux2+aux3)/3, 4)\n if ano == 2019:\n df.loc['VERAO', atributo] = df.loc['VERAO', atributo]/len(anos)\n \n #separando os meses para estação \"OUTONO\"\n aux1 = cidade.loc[3, atributo]\n aux2 = cidade.loc[4, atributo]\n aux3 = cidade.loc[5, atributo]\n \n \n if(math.isnan(aux1)):\n df.loc['OUTONO', atributo] += round((aux2+aux3)/3, 4)\n #print('aqui')\n else:\n df.loc['OUTONO', atributo] += round((aux1+aux2+aux3)/3, 4)\n if ano == 2019:\n df.loc['OUTONO', atributo] = df.loc['OUTONO', atributo]/len(anos)\n\n #separando os meses para estação \"INVERNO\"\n aux1 = cidade.loc[6, atributo]\n aux2 = cidade.loc[7, atributo]\n aux3 = cidade.loc[8, atributo]\n \n if(math.isnan(aux2)):\n df.loc['INVERNO', atributo] += round((aux1+aux3)/3, 4)\n #print('aqui')\n else:\n df.loc['INVERNO', atributo] += round((aux1+aux2+aux3)/3, 4)\n if ano == 2019:\n df.loc['INVERNO', atributo] = df.loc['INVERNO', atributo]/len(anos)\n \n #separando os meses para estação \"PRIMAVERA\"\n aux1 = cidade.loc[9, atributo]\n aux2 = cidade.loc[10, atributo]\n aux3 = cidade.loc[11, atributo]\n \n if(math.isnan(aux2)):\n df.loc['PRIMAVERA', atributo] += round((aux1+aux3)/3, 4)\n #print('aqui')\n else:\n df.loc['PRIMAVERA', atributo] += round((aux1+aux2+aux3)/3, 4)\n\n if ano == 2019:\n df.loc['PRIMAVERA', atributo] = df.loc['PRIMAVERA', atributo]/len(anos)\n\n#separar estações do ano na tabela de ocorrências\ndef estacoes_ocorrencia(ocorrencias, df, ano, anos):\n #separando os meses para estação \"VERÃO\"\n aux1 = ocorrencias[0]\n aux2 = ocorrencias[1]\n aux3 = ocorrencias[2]\n\n \n df.loc['VERAO', 'OCORRENCIAS'] += round((aux1+aux2+aux3), 4)\n '''if ano == 2019:\n df.loc['VERAO', 'OCORRENCIAS'] = df.loc['VERAO', 'OCORRENCIAS']/len(anos)'''\n \n #separando os meses para estação \"OUTONO\"\n aux1 = ocorrencias[3]\n aux2 = ocorrencias[4]\n aux3 = ocorrencias[5]\n\n df.loc['OUTONO', 'OCORRENCIAS'] += round((aux1+aux2+aux3), 4)\n #print(df.loc['OUTONO', 'OCORRENCIAS'])\n '''if ano == 2019:\n df.loc['OUTONO', 'OCORRENCIAS'] = df.loc['OUTONO', 'OCORRENCIAS']/len(anos)'''\n \n #separando os meses para estação \"INVERNO\"\n aux1 = ocorrencias[6]\n aux2 = ocorrencias[7]\n aux3 = ocorrencias[8]\n \n df.loc['INVERNO', 'OCORRENCIAS'] += round((aux1+aux2+aux3), 4)\n '''if ano == 2019:\n df.loc['INVERNO', 'OCORRENCIAS'] = df.loc['INVERNO', 'OCORRENCIAS']/len(anos)'''\n \n #separando os meses para estação \"PRIMAVERA\"\n aux1 = ocorrencias[9]\n aux2 = ocorrencias[10]\n aux3 = ocorrencias[11]\n \n df.loc['PRIMAVERA', 'OCORRENCIAS'] += round((aux1+aux2+aux3), 4)\n '''if ano == 2019:\n df.loc['PRIMAVERA', 'OCORRENCIAS'] = df.loc['PRIMAVERA', 'OCORRENCIAS']/len(anos)'''\n\n\n\n#tabela relacionando com as estações do ano\ndef plot_estacoes(cidade, ocorrencias):\n cidade = cidade.reset_index(drop=True) #resetando o index da cidade que recebe a cada ano\n ocorrencias = ocorrencias.reset_index(drop=True) #resetando o index das ocorrencias que recebe a cada ano\n \n columns = ['PRECIPITACAO TOTAL; MENSAL(mm)', 'OCORRENCIAS']\n rows = ['VERAO', 'OUTONO', 'INVERNO', 'PRIMAVERA']\n \n #criação do dataframe\n df = pd.DataFrame(index=rows, columns=columns) \n \n percorrer = ['PRECIPITACAO TOTAL; MENSAL(mm)']\n \n for atributo in percorrer:\n estacoes_estacoes(cidade, df, atributo)\n \n estacoes_ocorrencia(ocorrencias, df)\n \n df.reset_index(level=0, inplace=True)\n\n #renomeando colunas\n df.rename(columns={'PRECIPITACAO TOTAL; MENSAL(mm)': 'PRECIPITACAO'}, inplace = True)\n\n render_mpl_table(df)\n \n\n \n#transformando tabela em imagem. Retirado de \"https://www.semicolonworld.com/question/58193/how-to-save-the-pandas-dataframe-series-data-as-a-figure\"\ndef render_mpl_table(data, col_width=10, row_height=0.625, font_size=11,\n header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',\n bbox=[0, 0, 1, 1], header_columns=0,\n ax=None, **kwargs):\n if ax is None:\n size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])\n fig, ax = plt.subplots(figsize=size)\n ax.axis('off')\n\n mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)\n\n mpl_table.auto_set_font_size(False)\n mpl_table.set_fontsize(font_size)\n\n for k, cell in six.iteritems(mpl_table._cells):\n cell.set_edgecolor(edge_color)\n if k[0] == 0 or k[1] < header_columns:\n cell.set_text_props(weight='bold', color='w')\n cell.set_facecolor(header_color)\n else:\n cell.set_facecolor(row_colors[k[0]%len(row_colors) ])\n return ax\n\n render_mpl_table(data, header_columns=0, col_width=2.0) \n\n\n\n''' Cria dataFrame de um atributo e suas ocorrencias por estação e adiciona os seus valores. '''\ndef atributoEstacao(atributo, df_lencois, df_piata, df_itirucu, ocorrencias, anos):\n #splitAtributo = atributo.split(';')\n columns = [atributo, 'OCORRENCIAS']\n rows = ['VERAO', 'OUTONO', 'INVERNO', 'PRIMAVERA']\n \n #criação do dataframe\n dfLencois = pd.DataFrame(index=rows, columns=columns)\n dfLencois.fillna(0,inplace=True)\n dfPiata = pd.DataFrame(index=rows, columns=columns)\n dfPiata.fillna(0,inplace=True)\n dfItirucu = pd.DataFrame(index=rows, columns=columns)\n dfItirucu.fillna(0,inplace=True)\n \n \n for ano in anos:\n ocorrenciaEstacoes(df_lencois.query(\"`Ano Medicao` == \" + str(ano)), ocorrencias[' '+ str(ano)], atributo, dfLencois, ano, anos)\n if(atributo != 'INSOLACAO TOTAL; MENSAL(h)'):\n ocorrenciaEstacoes(df_piata.query(\"`Ano Medicao` == \" + str(ano)), ocorrencias[' '+ str(ano)], atributo, dfPiata, ano, anos)\n ocorrenciaEstacoes(df_itirucu.query(\"`Ano Medicao` == \" + str(ano)), ocorrencias[' '+ str(ano)], atributo, dfItirucu, ano, anos)\n dfLencois.reset_index(level=0, inplace=True)\n if(atributo != 'INSOLACAO TOTAL; MENSAL(h)'):\n dfPiata.reset_index(level=0, inplace=True)\n dfItirucu.reset_index(level=0, inplace=True)\n \n plotOcorrenciaEstacoes(dfLencois, dfPiata, dfItirucu, rows, atributo, 'Estacao')\n\n\n\n''' Add os valores das estações e ocorrências no dataFrame de estações atraves de duas outras funções. '''\ndef ocorrenciaEstacoes(cidade, ocorrencias, atributo, df, ano, anos):\n cidade = cidade.reset_index(drop=True) #resetando o index da cidade que recebe a cada ano\n ocorrencias = ocorrencias.reset_index(drop=True) #resetando o index das ocorrencias que recebe a cada ano\n \n estacoes_estacoes(cidade, df, atributo, ano, anos)\n estacoes_ocorrencia(ocorrencias, df, ano, anos)\n \n \n \n''' Cria dataFrame de um atributo e suas ocorrencias por mes e adiciona os seus valores. '''\ndef atributoAno(atributo, df_lencois, df_piata, df_itirucu, ocorrencias, anos):\n columns = [atributo, 'OCORRENCIAS']\n rows = ['JAN', 'FEV', 'MAR', 'ABR', 'MAI', 'JUN', 'JUL', 'AGO', 'SET', 'OUT', 'NOV', 'DEZ']\n dfLencois = pd.DataFrame(index=rows, columns=columns)\n dfLencois.fillna(0,inplace=True)\n dfPiata = pd.DataFrame(index=rows, columns=columns)\n dfPiata.fillna(0,inplace=True)\n dfItirucu = pd.DataFrame(index=rows, columns=columns)\n dfItirucu.fillna(0,inplace=True)\n \n ocorrenciaMeses(ocorrencias[str('Ocorrências')], atributo, df_lencois, dfLencois, rows, anos)\n if(atributo != 'INSOLACAO TOTAL; MENSAL(h)'):\n ocorrenciaMeses(ocorrencias[str('Ocorrências')], atributo, df_piata, dfPiata, rows, anos)\n ocorrenciaMeses(ocorrencias[str('Ocorrências')], atributo, df_itirucu, dfItirucu, rows, anos)\n \n dfLencois.reset_index(level=0, inplace=True)\n if(atributo != 'INSOLACAO TOTAL; MENSAL(h)'):\n dfPiata.reset_index(level=0, inplace=True)\n dfItirucu.reset_index(level=0, inplace=True)\n plotOcorrenciaEstacoes(dfLencois, dfPiata, dfItirucu, rows, atributo, 'Mes')\n\n\n\n''' Add os valores dos meses e ocorrências no dataFrame de mes. '''\ndef ocorrenciaMeses(ocorrencias, atributo, df, dfOcorrencias, rows, anos):\n for i in range(12):\n df_mes = df[df[df.columns[0]].str.contains(rows[i])]\n \n dfOcorrencias.loc[rows[i], atributo] = df_mes[atributo].mean()\n dfOcorrencias.loc[rows[i], 'OCORRENCIAS'] = ocorrencias[i]\n \n\n''' Cria o gráfico de Ocorrências x Mês/Estação. '''\ndef plotOcorrenciaEstacoes(df, df2, df3, rows, atributo, aux):\n splitAtributo = atributo.split(';')\n splitAtributo = splitAtributo[0]\n auxAtributo = atributo.split('MENSAL')\n auxAtributo = auxAtributo[1]\n \n \n fig, ax1 = plt.subplots()\n # primeiro defino a sequência (numérica) do eixo x \n # (lembrando xticks não recebem strings)4\n auxL = []\n if len(rows) == 12:\n auxL = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n else:\n auxL = [0, 1, 2, 3]\n ax1.set_xticks(auxL)\n # Agora coloco os nomes dos estados como estiquetas\n ax1.set_xticklabels(df.index.tolist())\n # Duplico e vinculo o novo axe `ax2` ao orginal `ax1`\n ax2 = ax1.twinx()\n # Plotar\n \n ax1.bar(rows, df['OCORRENCIAS'], color='#37777D', label = 'Ocorrências')\n ax1.set_ylabel(\"OCORRENCIAS\")\n ax2.plot(rows, df[atributo], color='r', marker='^', linestyle='-', linewidth=2, label = splitAtributo.capitalize() + auxAtributo + ' - Lençóis')\n if(atributo != 'INSOLACAO TOTAL; MENSAL(h)'):\n ax2.plot(rows, df2[atributo], color='g', marker='s', linestyle='-', linewidth=2, label = splitAtributo.capitalize() + auxAtributo + ' - Piatã')\n ax2.plot(rows, df3[atributo], color='m', marker='o', linestyle='-', linewidth=2, label = splitAtributo.capitalize() + auxAtributo + ' - Itiruçu')\n #ax1.legend(loc=0)\n ax2.set_ylabel(str(splitAtributo + auxAtributo))\n \n lines, labels = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n ax2.legend(lines + lines2, labels + labels2, loc=\"upper right\")\n #ax2.legend(loc=0)\n #plt.title('Ocorrências x ' + str(splitAtributo) + ' 2015 - 2019')\n #plt.legend(loc=\"upper left\")\n plt.savefig(splitAtributo + '_' + aux + '.png')\n\n # Plotando o Gráfico\n plt.figure();\n'''\n\nlns1 = ax.plot(time, Swdown, '-', label = 'Swdown')\nlns2 = ax.plot(time, Rn, '-', label = 'Rn')\nax2 = ax.twinx()\nlns3 = ax2.plot(time, temp, '-r', label = 'temp')\n\n# added these three lines\nlns = lns1+lns2+lns3\nlabs = [l.get_label() for l in lns]\nax.legend(lns, labs, loc=0)\n\nlns = ax1 + ax2\nlabs = [l.get_label() for l in lns]\nax.legend(lns, labs, loc=0)\n\n'''\n\n\ndef main():\n '''\n #dataframe das cidades\n #df_Estacao_Morro = pd.read_csv('.\\\\convencionais\\\\dados_83184_M_2015-01-01_2020-07-31.csv')\n df_Estacao_Lencois = pd.read_csv('.\\\\estacoes\\\\convencionais\\\\dados_83242_M_2015-01-01_2020-07-31.csv')\n \n #meses(df_Estacao_Morro)\n meses(df_Estacao_Lencois)\n \n \n #criação tabela e do csv\n #plot_dados(df_Estacao_Morro, 'Morro')\n plot_dados(df_Estacao_Lencois, 'Lencois')\n \n #armazenando nomes das colunas\n colunas = df_Estacao_Morro.columns.values\n \n criação de gráficos\n for i in range(1, 18):\n plot_map(df_Estacao_Morro[df_Estacao_Morro['Data Medicao'].str.contains(\"2015\")], colunas[i], '2015') \n plot_map(df_Estacao_Morro[df_Estacao_Morro['Data Medicao'].str.contains(\"2016\")], colunas[i], '2016')\n plot_map(df_Estacao_Morro[df_Estacao_Morro['Data Medicao'].str.contains(\"2017\")], colunas[i], '2017')\n plot_map(df_Estacao_Morro[df_Estacao_Morro['Data Medicao'].str.contains(\"2018\")], colunas[i], '2018')\n plot_map(df_Estacao_Morro[df_Estacao_Morro['Data Medicao'].str.contains(\"2019\")], colunas[i], '2019')\n plot_map(df_Estacao_Morro[df_Estacao_Morro['Data Medicao'].str.contains(\"2020\")], colunas[i], '2020')\n '''\n \n #dataframe das informações\n #df_morro = pd.read_csv('.\\\\estacoes\\\\dados_Morro.csv')\n df_lencois = pd.read_csv('.\\\\estacoes\\\\dados_Lencois.csv')\n df_lencois = df_lencois.rename(columns={'TEMPERATURA MEDIA COMPENSADA; MENSAL(°C)': 'TEMPERATURA MEDIA; MENSAL(°C)'})\n \n df_piata = pd.read_csv('.\\\\estacoes\\\\dados_Piata.csv')\n df_piata = df_piata.rename(columns={'Mes': 'Data Medicao'})\n df_piata = df_piata.rename(columns={'Ano': 'Ano Medicao'})\n df_piata = df_piata.rename(columns={'PRECIPITACAO TOTAL; DIARIO (AUT)(mm)': 'PRECIPITACAO TOTAL; MENSAL(mm)'})\n df_piata = df_piata.rename(columns={'TEMPERATURA MEDIA; DIARIA (AUT)(°C)': 'TEMPERATURA MEDIA; MENSAL(°C)'})\n df_piata = df_piata.rename(columns={'UMIDADE RELATIVA DO AR; MEDIA DIARIA (AUT)(%)': 'UMIDADE RELATIVA DO AR; MEDIA MENSAL(%)'})\n \n df_piata = df_piata[['Data Medicao','PRECIPITACAO TOTAL; MENSAL(mm)','TEMPERATURA MEDIA; MENSAL(°C)','UMIDADE RELATIVA DO AR; MEDIA MENSAL(%)','Ano Medicao']]\n\n df_itirucu = pd.read_csv('.\\\\estacoes\\\\dados_Itirucu.csv')\n df_itirucu = df_itirucu.rename(columns={'Mes': 'Data Medicao'})\n df_itirucu = df_itirucu.rename(columns={'Ano': 'Ano Medicao'})\n df_itirucu = df_itirucu.rename(columns={'PRECIPITACAO TOTAL; DIARIO (AUT)(mm)': 'PRECIPITACAO TOTAL; MENSAL(mm)'})\n df_itirucu = df_itirucu.rename(columns={'TEMPERATURA MEDIA; DIARIA (AUT)(°C)': 'TEMPERATURA MEDIA; MENSAL(°C)'})\n df_itirucu = df_itirucu.rename(columns={'UMIDADE RELATIVA DO AR; MEDIA DIARIA (AUT)(%)': 'UMIDADE RELATIVA DO AR; MEDIA MENSAL(%)'})\n \n df_itirucu = df_itirucu[['Data Medicao','PRECIPITACAO TOTAL; MENSAL(mm)','TEMPERATURA MEDIA; MENSAL(°C)','UMIDADE RELATIVA DO AR; MEDIA MENSAL(%)','Ano Medicao']]\n \n \n #dataframe das ocorrências\n ocorrencias = pd.read_csv('.\\\\Gráficos_Tabelas\\\\ocorrencias_por_mes_ano.csv')\n \n \n anos = [2015, 2016, 2017, 2018, 2019]\n \n \n '''\n #criação de tabelas por mês\n \n for ano in anos:\n plot_table(df_morro.query(\"`Ano Medicao` == \" + str(ano)), ocorrencias[' '+ str(ano)])\n plot_table(df_lencois.query(\"`Ano Medicao` == \" + str(ano)), ocorrencias[' '+ str(ano)])\n ''' \n\n #criação de tabelas por estações do ano \n '''for ano in anos:\n #plot_estacoes(df_morro.query(\"`Ano Medicao` == \" + str(ano)), ocorrencias[' '+ str(ano)])\n #plot_estacoes(df_lencois.query(\"`Ano Medicao` == \" + str(ano)), ocorrencias[' '+ str(ano)])\n exemplo(df_morro.query(\"`Ano Medicao` == \" + str(ano)), ocorrencias[' '+ str(ano)], ano)\n exemplo(df_lencois.query(\"`Ano Medicao` == \" + str(ano)), ocorrencias[' '+ str(ano)], ano)'''\n \n coluna = ['INSOLACAO TOTAL; MENSAL(h)', 'PRECIPITACAO TOTAL; MENSAL(mm)',\n 'TEMPERATURA MEDIA; MENSAL(°C)', 'UMIDADE RELATIVA DO AR; MEDIA MENSAL(%)']\n \n for atributo in coluna:\n atributoEstacao(atributo, df_lencois, df_piata, df_itirucu, ocorrencias, anos)\n atributoAno(atributo, df_lencois, df_piata, df_itirucu, ocorrencias, anos)\n\n\n\nif __name__ == '__main__': # chamada da funcao principal\n main()","sub_path":"estacoes/analise_Dados_Metereologicos_Convencionais.py","file_name":"analise_Dados_Metereologicos_Convencionais.py","file_ext":"py","file_size_in_byte":22413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"386020714","text":"#25.10.2018\r\ndef equal_items(given_list, given_string):\r\n a = \"\"\r\n for element in given_list:\r\n if type(element) == list:\r\n for secondary_element in element:\r\n a += \"{0}\".format(secondary_element)\r\n else:\r\n a += \"{0}\".format(element)\r\n\r\n if a == given_string:\r\n return True\r\n return False\r\n\r\nprint(bool(equal_items([\"a\", [\"b\", \"c\"]], \"abc\")))\r\n","sub_path":"List/Problem_57.py","file_name":"Problem_57.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"122686338","text":"import george\nimport emcee\nimport numpy as np\n\nfrom copy import deepcopy\n\nfrom tuner.util import normalization\nfrom tuner.models.gaussian_process import GaussianProcess\nfrom tuner.models.gaussian_process_mcmc import GaussianProcessMCMC\n\n\ndef normalize(X, lower, upper):\n X_norm, _, _ = normalization.zero_one_normalization(X[:, :-1], lower, upper)\n X_norm = np.concatenate((X_norm, np.rint(X[:, None, -1])), axis=1)\n return X_norm\n\n\nclass MTBOGPMCMC(GaussianProcessMCMC):\n def __init__(self, kernel,\n prior=None, n_hypers=20,\n chain_length=2000, burnin_steps=2000,\n normalize_output=False,\n rng=None,\n lower=None,\n upper=None,\n noise=-8):\n\n self.hypers = None\n super(MTBOGPMCMC, self).__init__(kernel, prior,\n n_hypers, chain_length,\n burnin_steps,\n normalize_output=normalize_output,\n normalize_input=False,\n rng=rng, lower=lower,\n upper=upper, noise=noise)\n\n def train(self, X, y, do_optimize=True, **kwargs):\n self.X = normalize(X, self.lower, self.upper)\n\n if self.normalize_output:\n # Normalize output to have zero mean and unit standard deviation\n self.y, self.y_mean, self.y_std = normalization.zero_mean_unit_var_normalization(y)\n else:\n self.y = y\n\n # Use the mean of the data as mean for the GP\n mean = np.mean(self.y, axis=0)\n self.gp = george.GP(self.kernel, mean=mean)\n\n if do_optimize:\n # We have one walker for each hyperparameter configuration\n sampler = emcee.EnsembleSampler(self.n_hypers,\n len(self.kernel.pars) + 1,\n self.loglikelihood)\n\n # Do a burn-in in the first iteration\n if not self.burned:\n # Initialize the walkers by sampling from the prior\n if self.prior is None:\n self.p0 = np.random.rand(self.n_hypers, len(self.kernel.pars) + 1)\n else:\n self.p0 = self.prior.sample_from_prior(self.n_hypers)\n # Run MCMC sampling\n self.p0, _, _ = sampler.run_mcmc(self.p0,\n self.burnin_steps,\n rstate0=self.rng)\n\n self.burned = True\n\n # Start sampling\n pos, _, _ = sampler.run_mcmc(self.p0,\n self.chain_length,\n rstate0=self.rng)\n\n # Save the current position, it will be the start point in\n # the next iteration\n self.p0 = pos\n\n # Take the last samples from each walker\n self.hypers = sampler.chain[:, -1]\n\n else:\n if self.hypers is None:\n self.hypers = self.gp.kernel[:].tolist()\n self.hypers.append(self.noise)\n self.hypers = [self.hypers]\n\n self.models = []\n for sample in self.hypers:\n\n # Instantiate a GP for each hyperparameter configuration\n kernel = deepcopy(self.kernel)\n #kernel.pars = np.exp(sample[:-1])\n kernel.vector = sample[:-1]\n noise = np.exp(sample[-1])\n model = MTBOGP(kernel,\n normalize_output=self.normalize_output,\n noise=noise,\n lower=self.lower,\n upper=self.upper,\n rng=self.rng)\n model.train(X, y, do_optimize=False)\n self.models.append(model)\n\n self.is_trained = True\n\n\nclass MTBOGP(GaussianProcess):\n def __init__(self, kernel, prior=None,\n noise=1e-3, use_gradients=False,\n normalize_output=False,\n lower=None, upper=None, rng=None):\n super(MTBOGP, self).__init__(kernel=kernel,\n prior=prior,\n noise=noise,\n use_gradients=use_gradients,\n normalize_output=normalize_output,\n normalize_input=False,\n lower=lower,\n upper=upper,\n rng=rng)\n\n def train(self, X, y, do_optimize=True):\n self.original_X = X\n X_norm = normalize(X, self.lower, self.upper)\n return super(MTBOGP, self).train(X_norm, y, do_optimize)\n\n def predict(self, X_test, full_cov=False, **kwargs):\n X_norm = normalize(X_test, self.lower, self.upper)\n return super(MTBOGP, self).predict(X_norm, full_cov)\n\n def sample_functions(self, X_test, n_funcs=1):\n X_norm = normalize(X_test, self.lower, self.upper)\n return super(MTBOGP, self).sample_functions(X_norm, n_funcs)\n\n def get_incumbent(self):\n \"\"\"\n Returns the best observed point and its function value\n\n Returns\n ----------\n incumbent: ndarray (D,)\n current incumbent\n incumbent_value: ndarray (N,)\n the observed value of the incumbent\n \"\"\"\n\n projection = np.ones([self.original_X.shape[0], 1]) * 1\n\n X_projected = np.concatenate((self.original_X[:, :-1], projection), axis=1)\n X_norm = normalize(X_projected, self.lower, self.upper)\n\n m, _ = self.predict(X_norm)\n\n best = np.argmin(m)\n incumbent = X_projected[best]\n incumbent_value = m[best]\n\n return incumbent, incumbent_value\n","sub_path":"autotf/tuner/models/mtbo_gp.py","file_name":"mtbo_gp.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"132864660","text":"# Import needed libraries\nimport numpy as np\nimport gzip\nimport sys\n\n# Read imput line\nin_file = sys.argv[1] \nout_file = sys.argv[2]\nl = int(sys.argv[3])\n\n\n# Compute the mean expression of each gene and save it on an array\nc = 0\nmeans = []\nwith gzip.open(in_file) as infile:\n for line in infile:\n c += 1\n if c <= 3: continue\n line = line.strip().split()\n if len(line) < 40: continue\n line = tuple(float(i) for i in line[2:])\n means.append(np.mean(line))\n\n# Select the id of the tissues that are in the top 5k\nm = sorted(means, reverse=True)[l]\nmeans = np.array(means)\nselected_ind = np.where(means>m)\n\n# Save the results\nnp.save(out_file, selected_ind)","sub_path":"scripts/getTopGenesInd.py","file_name":"getTopGenesInd.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"520869877","text":"import random\n\nfrom django.core.management.base import BaseCommand\n\nfrom apps.core.models import Category, Product\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('count', type=int)\n\n def handle(self, *args, **options):\n Category.objects.get_or_create(title='category 1')\n Category.objects.get_or_create(title='category 2')\n Category.objects.get_or_create(title='category 3')\n\n category_ids = Category.objects.values_list('id', flat=True)\n\n product_list = []\n\n count = options.get('count')\n\n for i in range(1, count + 1):\n product_list.append(\n Product(\n title=f'product #{i}',\n category_id=random.choice(category_ids),\n price=round(random.uniform(100, 100000), 2),\n discount=random.randint(1, 99),\n description=f'description #{i}'\n )\n )\n\n Product.objects.bulk_create(product_list)\n\n self.stdout.write(\n self.style.SUCCESS(f'{count} products was created.')\n )\n","sub_path":"apps/core/management/commands/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"204309492","text":"#############################################################################\n#\n# Copyright (c) 2006-2007 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Grokkers for the static resource directory.\"\"\"\n\nimport os\n\nfrom zope import interface, component\nfrom zope.security.checker import NamesChecker\nfrom zope.publisher.interfaces.browser import IDefaultBrowserLayer\n\nimport martian\nfrom martian.error import GrokError\n\nfrom grokcore.view import components\n\nallowed_resource_names = ('GET', 'HEAD', 'publishTraverse', 'browserDefault',\n 'request', '__call__')\nallowed_resourcedir_names = allowed_resource_names + ('__getitem__', 'get')\n\nclass StaticResourcesGrokker(martian.GlobalGrokker):\n\n def grok(self, name, module, module_info, config, **kw):\n # we're only interested in static resources if this module\n # happens to be a package\n if not module_info.isPackage():\n return False\n\n resource_path = module_info.getResourcePath('static')\n if os.path.isdir(resource_path):\n static_module = module_info.getSubModuleInfo('static')\n if static_module is not None:\n if static_module.isPackage():\n raise GrokError(\n \"The 'static' resource directory must not \"\n \"be a python package.\",\n module_info.getModule())\n else:\n raise GrokError(\n \"A package can not contain both a 'static' \"\n \"resource directory and a module named \"\n \"'static.py'\", module_info.getModule())\n\n # public checker by default\n checker = NamesChecker(allowed_resourcedir_names)\n\n resource_factory = components.DirectoryResourceFactory(\n resource_path, checker, module_info.dotted_name)\n adapts = (IDefaultBrowserLayer,)\n provides = interface.Interface\n name = module_info.dotted_name\n config.action(\n discriminator=('adapter', adapts, provides, name),\n callable=component.provideAdapter,\n args=(resource_factory, adapts, provides, name),\n )\n return True\n\n\n","sub_path":"grokcore.view/tags/1.1/src/grokcore/view/meta/static.py","file_name":"static.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"86888444","text":"from bots import *\n\ndef assistant(command):\n \"if statements for executing commands\"\n\n if 'what\\'s up' in command:\n talk_bot.talk('Don\\'t you have better job to do')\n\n elif 'joke' in command:\n \tjoke_bot.telljoke()\n\n elif 'open browser' in command:\n \tbrowser_bot.open(command,1)\n\n elif 'where am i' in command:\n location_bot.locate_me()\n\n elif 'go from' in command:\n location_bot.directions(command)\n\n else:\n \ttalk_bot.talk('Sorry, I am yet to learn to do those, here are some search results from the web')\n \tbrowser_bot.open(command,0)\n\ntalk_bot.talk('Hi, how may I help you')\n\n# loop to continue executing multiple commands\nwhile True:\n assistant(listener_bot.recognize())\n\n# test without microphone\n# command = \"go from chennai hyderabad\"\n# count = 0\n# while count < 1:\n# assistant(command)\n# count+=1\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594551083","text":"import boto3\r\nimport json\r\nfrom datetime import datetime\r\nimport calendar\r\nimport random\r\nimport time\r\nimport uuid\r\nimport sys\r\nimport pytz\r\n\r\ntz_lima = pytz.timezone('America/Lima')\r\n\r\nname_stream = 'ComprasStream'\r\nlist_products = ['Laptop HP', 'Celular', 'Disco externo 1TB SSD','Bicicleta Giant','Tablet','Raspberry Pi 4B','Echo Dot 3gen','Reloj','USB 64GB 3.0','TV Samsung 4K 32','Play Station 4','Parlante','Libro AWS','Mac Book Pro 14','Funko Forest Gump']\r\n\r\nkinesis = boto3.client('kinesis', region_name = 'us-east-1')\r\n\r\ndef put_to_stream(kinesis):\r\n datetime_lima = datetime.now(tz_lima)\r\n record = {\r\n 'id_compra': str(uuid.uuid4()),\r\n 'fecha_reg': datetime_lima.strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n 'producto': random.choice(list_products)\r\n }\r\n print(record)\r\n kinesis.put_record(\r\n StreamName = name_stream,\r\n Data = json.dumps(record),\r\n PartitionKey = 'a-partition'\r\n )\r\n\r\ni = 0\r\nwhile i < 20:\r\n i += 1\r\n put_to_stream(kinesis)\r\n time.sleep(.3)\r\n\r\n\r\n\r\n","sub_path":"WriteKinesisStreamsCompra.py","file_name":"WriteKinesisStreamsCompra.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205360314","text":"import os\nimport cherrypy\nimport sqlite3\nimport API\nimport Serve\nimport Client\nimport thread_tasks\nimport socket\n\n# LISTEN_IP = \"192.168.1.6\" # IPv4 connection router to this device\nhostname = socket.gethostname()\nLISTEN_IP = socket.gethostbyname(hostname)\nLISTEN_PORT = 10204 # Port to be communicated with\n## ##\n# Note if you are in uni, you use IPv4 for LOCATION_ADDRESS #\n## ##\n# LOCATION_ADRESS = \"122.60.172.73:80\" # External IP for connection externallly\nLOCATION_ADRESS = LISTEN_IP # External IP for connection externallly\nWORLD_CONNECTION = '0' # 0 Uni computer / 1 Uni WiFi / 2 External Connection\n\nSESSION_DB = 'session.db' # Database for multiple user session\n\n# Cross Origin Scripting Function\n# This used to enable frameworks like Angular make backend calls to CherryPy\n\ndef cors():\n if cherrypy.request.method == 'OPTIONS':\n cherrypy.response.headers['Access-Control-Allow-Methods'] = 'POST'\n cherrypy.response.headers['Access-Control-Allow-Headers'] = 'content-type'\n cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'\n return True\n else:\n cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'\n\n# Main function to start CherryPy\ndef main():\n\t# CherryPy configuration\n conf = {\n \t# Route all / sessions to current directory folder\n '/': {\n 'tools.sessions.on': True,\n 'tools.staticdir.root': os.path.abspath(os.getcwd())\n },\n # Route all /static to /Bundled folder\n '/static': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': './Bundled',\n },\n }\n\n # CherryPy socket configuration\n cherrypy.config.update({'server.socket_host': LISTEN_IP,\n 'server.socket_port': LISTEN_PORT,\n 'engine.autoreload.on': True,\n })\n\n # CORS function per request\n cherrypy.tools.cors = cherrypy._cptools.HandlerTool(cors)\n \n\n print(\"========================================\")\n print(\" Teresito Magbag\")\n print(\" University of Auckland\")\n print(\" COMPSYS302 - CherryPy / Angular\")\n print(\"========================================\") \n \n cherrypy.tree.mount(Serve.Web_Page(), \"/\", conf) # Serves the webpage\n cherrypy.tree.mount(API.Interface(), \"/api\", conf) # End points for my peers\n cherrypy.tree.mount(Client.Interface(), \"/client\", conf) # Client communication\n\n\n cherrypy.engine.subscribe('start', start_session) # Starting the threading and database ssessions\n cherrypy.engine.subscribe('stop', stop_session) # Dropping all database sessions\n\n cherrypy.engine.start() # Start CherryPy\n cherrypy.engine.block()\n \n# Start function before CherryPy continues\ndef start_session():\n\t# Background thread task to ping_check my peers every 2 minutes \n interval_ping = cherrypy.process.plugins.BackgroundTask(\n 120, thread_tasks.ping_checkServers, [LOCATION_ADRESS, WORLD_CONNECTION])\n\n # Background thread task to update user_list used for ping_check everyone 30 seconds\n interval_list = cherrypy.process.plugins.BackgroundTask(\n 30, thread_tasks.updateDBList)\n\n # Start Threads\n interval_ping.start()\n interval_list.start()\n\n # Create table for sessions\n createSESSION = \"\"\" CREATE TABLE IF NOT EXISTS \"USER_SESSION\" (\n\t\"USER\" TEXT NOT NULL UNIQUE,\n\t\"APIKEY\" TEXT NOT NULL UNIQUE,\n\t\"PRIVATE_DATA\" TEXT UNIQUE,\n\t\"PRIVATE_KEY\" TEXT UNIQUE,\n\t\"PUBLIC_KEY\" TEXT UNIQUE,\n\t\"TIME\" INTEGER NOT NULL,\n\t\"STATUS\" TEXT,\n \"EDKEY\"\tTEXT\n ); \"\"\"\n\n # Create table for user_list to ping\n createLIST = \"\"\" CREATE TABLE IF NOT EXISTS \"USER_LIST\" (\n\t\"USER\"\tTEXT NOT NULL UNIQUE,\n\t\"ADDRESS\"\tTEXT NOT NULL,\n\t\"LOCATION\"\tTEXT NOT NULL,\n\t\"PUBLIC_KEY\"\tTEXT NOT NULL UNIQUE,\n\t\"TIME\"\tINTEGER NOT NULL,\n\t\"STATUS\"\tTEXT NOT NULL\n );\"\"\"\n\n # Execute creation of table. Drop them first if server was abruptly killed\n with sqlite3.connect(SESSION_DB) as con:\n con.execute(\"DROP TABLE IF EXISTS USER_SESSION\")\n con.execute(\"DROP TABLE IF EXISTS USER_LIST\")\n con.execute(createSESSION)\n con.execute(createLIST)\n\n# Stop function before CherryPy ends\ndef stop_session():\n\t# Drop all table for the ssesions\n with sqlite3.connect(SESSION_DB) as con:\n con.execute(\"DROP TABLE IF EXISTS USER_SESSION\")\n con.execute(\"DROP TABLE IF EXISTS USER_LIST\")\n\nif __name__ == '__main__':\n main() # Function to call on running this script\n","sub_path":"Project/BackEnd/Python Server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230835834","text":"import socket\r\n\r\n#make socket\r\ns = socket.socket()\r\n#definite port 3000\r\nport = 3000\r\ns.bind(('', port))\r\nprint ('\\nSocket binded to',port)\r\n\r\n#listening mode\r\ns.listen(5)\r\nprint ('\\nSocket is listening ...')\r\n#accept the request connection from client\r\nc, addr = s.accept()\r\nprint ('\\nGot connection from ', addr)\r\n\r\nwhile True:\r\n\t#receive data send by client\r\n\tdata = c.recv(1024)\r\n\t#decode, because client send it in bytes conversion\r\n\tdata = data.decode('utf-8')\r\n\tprint (data)\r\n\t#split the data\r\n\tdata = data.split(' ')\r\n\tfor i in range(3):\r\n\t\tprint ('data',i,':',data[i])\r\n\t#make the operand become an integer not bytes/string\r\n\tdata[0] = int(data[0])\r\n\tdata[2] = int(data[2])\r\n\r\n\t#condition to calculate\r\n\tif data[1]=='*':\r\n\t\tres = data[0]*data[2]\r\n\telif data[1]=='/':\r\n\t\tres = int(data[0]/data[2]) #in order to the result is not float data type\r\n\telif data[1]=='+':\r\n\t\tres = data[0]+data[2]\r\n\telif data[1]=='-':\r\n\t\tres = data[0]-data[2]\r\n\telse: #if client input the operator exclude 4 operator provided by server\r\n\t\tprint ('The operand is not defined in this socket.')\r\n\r\n\tprint ('result is',res,'\\n')\r\n\t#result send to the client in encode bytes again\r\n\tres = bytes(str(res), 'utf-8')\r\n\tc.send(res)\r\n\r\n#server close\r\ns.close()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"490952707","text":"##############################################################################\n# Removal of the \"__license__\" line or content from \"__license__\", or removal\n# of \"__author__\" in this or any constituent # component or file constitutes a\n# violation of the licensing and copyright agreement.\n__author__ = \"Mike Rightmire\"\n__copyright__ = \"BioCom Software\"\n__license__ = \"Telemend\"\n__license_file__= \"Clause1.PERPETUAL_AND_UNLIMITED_LICENSING_TO_THE_CLIENT.py\"\n__version__ = \"0.9.6.0\"\n__maintainer__ = \"Mike Rightmire\"\n__email__ = \"Mike.Rightmire@BiocomSoftware.com\"\n__status__ = \"Development\"\n##############################################################################\n\n\nfrom BiocomCommon.loghandler import log\n\ndef _format_original_error(e):\n return \"\".join([str(e), \". \"])\n# return \"\".join([\"[Original error: \", str(e), \"]\"])\n\ndef _log_error(message, e):\n # Format message\n e = \"\".join([str(_format_original_error(e)), message])\n # Send to log\n log.error(e)\n return\n","sub_path":"BiocomCommon/errorhandler/formatters.py","file_name":"formatters.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"64219309","text":"# -*- coding: utf-8 -*-\n\"\"\"\nA mixin to use on your dougrain Document.\n\n:author: 2013, Pascal Hartig \n:license: BSD\n\"\"\"\n\nfrom __future__ import absolute_import, print_function, unicode_literals\nfrom dougrain.document import mutator\nfrom .form import Form\n\n\nFORMS_KEY = '_forms'\n\n\nclass FormsMixin(object):\n\n def form(self, href, **kwargs):\n \"\"\"Returns a new form relative to this resource.\"\"\"\n\n return Form(dict(href=href, **kwargs), self.base_uri)\n\n @mutator()\n def set_form(self, rel, target, **kwargs):\n \"\"\"Adds a form to the document.\n\n Calling code should use this method to add forms instead of\n modifying ``forms`` directly.\n\n This method adds a form to the given ``target`` to the document with\n the given ``rel``.\n\n If ``target`` is a string, a form is added with ``target`` as its\n ``href`` property and other properties from the keyword arguments.\n\n If ``target`` is a ``Form`` object, it is added to the document and the\n keyword arguments are ignored.\n\n Arguments:\n\n - ``rel``: a string specifying the link relationship type of the link.\n It should be a well-known link relation name from the IANA registry\n (http://www.iana.org/assignments/link-relations/link-relations.xml),\n a full URI, or a CURIE.\n - ``target``: the action of the form.\n\n \"\"\"\n\n # Currently only used to match forms itself\n if hasattr(target, 'as_form'):\n form = target.as_form()\n else:\n form = self.form(target, **kwargs)\n\n forms = self.o.setdefault(FORMS_KEY, {})\n\n new_form = form.as_object()\n # Replace the current form instead of appending it\n forms[rel] = new_form\n\n @mutator()\n def delete_form(self, rel=None):\n \"\"\"Removes a form resource from this document identified by its\n ``rel``.\n\n Arguments:\n - ``rel``: an optional string specifying form relationship type to be\n removed. If omitted, all forms will be removed.\n \"\"\"\n\n if FORMS_KEY not in self.o:\n return\n\n if rel is None:\n for rel in self.o[FORMS_KEY]:\n self.delete_form(rel)\n\n return\n\n if rel not in self.o[FORMS_KEY]:\n return\n\n del self.o[FORMS_KEY][rel]\n","sub_path":"dougrain_forms/mixin.py","file_name":"mixin.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"150262951","text":"from django.contrib import admin\nfrom . import models\n\n# Register your models here.\n\nclass CommentAdmin(admin.ModelAdmin):\n list_display = [\n 'pk',\n 'comment_text',\n 'author',\n 'created',\n 'content_type',\n 'object_id',\n ]\n\nadmin.site.register(models.Comment, CommentAdmin) \n\n","sub_path":"src/comments/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620920471","text":"##################################\n# fichier 05-grand-inventaire-obligatoire.py\n# nom de l'exercice : Grand inventaire\n# url : http://www.france-ioi.org/algo/task.php?idChapter=651&idTask=0&sTab=task&iOrder=7\n# type : obligatoire\n#\n# Chapitre : chapitre-2-decouverte-tableaux\n#\n# Compétence développée : \n#\n# auteur : \n##################################\n\n# chargement des modules\n\n\n# mettre votre code ici\n\nproduits = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\nnbOperations = int(input())\n\nfor loop in range(nbOperations):\n numeroIngredient = int(input())\n quantite = int(input())\n numeroIngredient = numeroIngredient - 1\n produits[numeroIngredient] = produits[numeroIngredient] + quantite\n \nidProduit = 0\nfor loop in range(10):\n print(produits[idProduit])\n idProduit = idProduit + 1\n","sub_path":"niveau-02/chapitre-2-decouverte-tableaux/05-grand-inventaire-obligatoire.py","file_name":"05-grand-inventaire-obligatoire.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"646670533","text":"# RPVR UDP SOCKET CLIENT\n\nimport socket\nimport os\nimport time\n\n# adjust port if necessary\nport = 6969\n\nnewSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nnewSocket.bind(('', port))\n\nwhile True:\n os.system(\"cls\")\n data, serverIP = newSocket.recvfrom(65)\n values = data.decode(\"utf-8\")\n values = values.split(\"$\")\n\n print(\" Gyro X: \"+values[0])\n print(\" Gyro Y: \"+values[1])\n print(\" Gyro Z: \"+values[2])\n print(\"Accel X: \"+values[3])\n print(\"Accel Y: \"+values[4])\n print(\"Accel Z: \"+values[5]+\"\\n\")\n\n time.sleep(0.05)","sub_path":"rpvrClient_UDP.py","file_name":"rpvrClient_UDP.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225344434","text":"#!/usr/bin/env python3\n\nfrom tkinter import *\nimport tkinter as tk\n\nroot = Tk()\nroot.title(\"Tk Dropdown examples\")\n\n# Add a grid\nmainframe = Frame(root)\nmainframe.grid(column=0, row=0, sticky=\"nwes\")\nmainframe.columnconfigure(0, weight=1)\nmainframe.rowconfigure(0, weight=1)\nmainframe.pack(pady=100, padx=100)\n\n# Create a tkinter variable\ntkvar = StringVar(root)\n\n\n# Dictionary with options\nchoices = {'Pizza', 'Lasagne', \"Fries\", \"Potatoes\"}\ntkvar.set('Pizza') # set the default\n\npopupMenu = OptionMenu(mainframe, tkvar, *choices)\nLabel(mainframe, text=\"Choose a dish\").grid(row=1, column=1)\npopupMenu.grid(row=2, column=1)\n\n\n# on change dropdown value\ndef change_dropdown(*args):\n print(tkvar.get())\n\n\n# link function to change dropdown\ntkvar.trace('w', change_dropdown)\n\nroot.mainloop()","sub_path":"Sample/dropdown_menu.py","file_name":"dropdown_menu.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"615426017","text":"#import anaconda\nimport random\nimport nltk\n\n\nBOT_CONFIG = {\n 'intents': {'hello': {'examples': ['Привет', 'Здравствуйте', 'Добрый день',\"привет\",\"как дела?\"],\\\n 'responses': ['Привет, человек', 'Здравствуйте!', 'Шалом, юзер']},\\\n 'bye': {'examples': ['Пока', 'Досвидания', 'Увидимся','пока',\"покакашка\"],\\\n 'responses': ['Прощай, человек. Приходи ещё.', 'Увидимся']}},\\\n 'failure_phrases': ['Мне непонятно','Перефразируйте, пожалуйста','Не умею отвечать на такое',\\\n \"перефразируйте\",\"мне не понятно\", \"что-то я совсем непонятно\"]\\\n }\n\n\n\ndef get_failure_phrase():\n failure_phrase =BOT_CONFIG['failure_phrases']\n return random.choice(failure_phrase)\n\n\n\ndef get_intent(question):\n \n for intent, intent_value in BOT_CONFIG[\"intents\"].items():\n #print(intent, intent_value['examples'])\n for example in intent_value['examples']:\n #print (example)\n #if example != question:\n d = nltk.edit_distance(example.lower(), question.lower())\n #print(question)\n diff = d/ len(example)\n if diff <0.4:\n return intent\n\ndef get_generative_answer(question):\n return\n\ndef get_answer_by_intent(intent):\n phrases = BOT_CONFIG[\"intents\"][intent]['responses']\n #return print(phrases[0])\n return print(random.choice(phrases) ) \n\ndef bot(question):\n #nlu\n intent = get_intent(question)\n \n #получение ответа\n #заготовленные ответы\n if intent:\n return get_answer_by_intent(intent)\n print(get_answer_by_intent(intent))\n \n #генерация\n answer = get_generative_answer(question)\n if answer:\n return answer\n #Заглушка\n return get_failure_phrase()\n\n\n\n\n\nquestion = \"пивет\"\nbot(question)\nprint(\"161020\")\n\n","sub_path":"nlu1.py","file_name":"nlu1.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"68659419","text":"import numpy as np\nimport re\n\ninputfile = \"input\"\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\nneighbouring_coordinates = [np.array((1, 1, 1, 1), dtype=int) - (i % 3, (i // 3) % 3, (i // 9) % 3, i // 27) for i in\n range(3 * 3 * 3 * 3) if i != (27 * 3) // 2]\n\n\ndef append_if_exists(dictionary: dict, key, val):\n if key not in dictionary.keys():\n dictionary[key] = []\n\n dictionary[key].append(val)\n\n\ndef main():\n lines = []\n with open(inputfile) as infile:\n while True:\n line = infile.readline()\n if not line:\n break\n lines.append(line.strip())\n\n print(f\"Read {len(lines)} lines\")\n\n allergen_candidates = {}\n cannot_be_allergen = {}\n all_ingredients = set()\n\n for line in lines:\n parts = line.split(\" (contains \")\n ingredients = set(parts[0].split(\" \"))\n allergens = parts[1][:-1].split(\", \")\n all_ingredients.update(ingredients)\n\n for allergen in allergens:\n if allergen not in allergen_candidates.keys():\n allergen_candidates[allergen] = set()\n cannot_be_allergen[allergen] = set()\n\n allergen_candidates[allergen].update(ingredients)\n\n cannot_be_allergen[allergen].update(allergen_candidates[allergen].difference(ingredients))\n allergen_candidates[allergen].intersection_update(ingredients)\n\n not_allergen = all_ingredients.copy()\n for allergen, its_allergen_candidates in allergen_candidates.items():\n not_allergen.difference_update(its_allergen_candidates)\n\n wrong_approach = set()\n for allergen, not_allergen_candidate in cannot_be_allergen.items():\n print(f\"Coan be {allergen}: {allergen_candidates[allergen]}\")\n if len(wrong_approach) == 0:\n wrong_approach.update(cannot_be_allergen[allergen])\n else:\n wrong_approach.intersection_update(cannot_be_allergen[allergen])\n\n\n count = 0\n for line in lines:\n parts = line.split(\" (contains \")\n ingredients = set(parts[0].split(\" \"))\n for is_not_allergen in not_allergen:\n count += is_not_allergen in ingredients\n\n print(f\"Number of non-allergen items: {len(not_allergen)}, count appear: {count} ({not_allergen})\")\n print(f\"Other approach: {len(wrong_approach)}: {wrong_approach}\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"511013240","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 2 11:22:56 2019\n\n@author: yolandatiao\n\"\"\"\n\n########## shRNA library analysis ##########\n# Author: Huitian (Yolanda) Diao\n# Apr. 2nd, 2019\n\n# Input files:\n# - csv format\n# - 3 columns: type, shRNA, count\n# type: target / control\n# shRNA: genename.1\n# count: integer\n\n########## Import ##########\nfrom __future__ import print_function\nimport csv\nimport glob\nimport os\nfrom astropy.io import ascii\nfrom astropy.table import Table, join, vstack\nfrom scipy.special import gammaln\nfrom scipy.special import psi\nfrom scipy.misc import factorial\nfrom scipy.optimize import fmin_l_bfgs_b as optim\nimport sys\nfrom sklearn.preprocessing import quantile_transform\nimport scipy.stats as st\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n########## Self defined functions ##########\ndef fit_nbinom(X, initial_params=None):\n # Copyright (C) 2014 Gokcen Eraslan\n # https://github.com/gokceneraslan/fit_nbinom/blob/master/fit_nbinom.py\n # X is a numpy array representing the data\n # initial params is a numpy array representing the initial values of\n # size and prob parameters\n infinitesimal = np.finfo(np.float).eps\n\n def log_likelihood(params, *args):\n r, p = params\n X = args[0]\n N = X.size\n\n #MLE estimate based on the formula on Wikipedia:\n # http://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation\n result = np.sum(gammaln(X + r)) \\\n - np.sum(np.log(factorial(X))) \\\n - N*(gammaln(r)) \\\n + N*r*np.log(p) \\\n + np.sum(X*np.log(1-(p if p < 1 else 1-infinitesimal)))\n\n return -result\n\n def log_likelihood_deriv(params, *args):\n r, p = params\n X = args[0]\n N = X.size\n\n pderiv = (N*r)/p - np.sum(X)/(1-(p if p < 1 else 1-infinitesimal))\n rderiv = np.sum(psi(X + r)) \\\n - N*psi(r) \\\n + N*np.log(p)\n\n return np.array([-rderiv, -pderiv])\n\n if initial_params is None:\n #reasonable initial values (from fitdistr function in R)\n m = np.mean(X)\n v = np.var(X)\n size = (m**2)/(v-m) if v > m else 10\n\n #convert mu/size parameterization to prob/size\n p0 = size / ((size+m) if size+m != 0 else 1)\n r0 = size\n initial_params = np.array([r0, p0])\n\n bounds = [(infinitesimal, None), (infinitesimal, 1)]\n optimres = optim(log_likelihood,\n x0=initial_params,\n #fprime=log_likelihood_deriv,\n args=(X,),\n approx_grad=1,\n bounds=bounds)\n\n params = optimres[0]\n return {'size': params[0], 'prob': params[1]}\n\ndef pctgTotal(inFile):\n #inFile = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_0_Raw/3_flt/P1-7_Input_flt.csv\"\n #print(inFile)\n inFileName = inFile.split(\"/\")[-1].replace(\".csv\", \"\")\n outFileName = inFileName + \"_pctg.csv\"\n inTab = ascii.read(inFile)\n countList = inTab.columns[len(inTab.colnames)-1]\n countSum = sum(countList)\n pctgList = [float(x)/countSum*100 for x in countList]\n inTab['pctg'] = pctgList\n ascii.write(inTab, outFileName, format=\"csv\", overwrite=\"True\")\n\n\ndef ZScore(inFile):\n #inFile = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_0_Raw/3_flt/P1-7_Input_flt.csv\"\n #print(inFile)\n inFileName = inFile.split(\"/\")[-1].replace(\".csv\", \"\")\n outFileName = inFileName + \"_ZScore.csv\"\n inTab = ascii.read(inFile)\n countList = inTab.columns[2]\n countSum = sum(countList)\n countAvg = countSum/len(countList)\n countStd = np.std(np.array(countList))\n ZList = [((float(x) - countAvg)/countStd) for x in countList]\n inTab['ZScore'] = ZList\n ascii.write(inTab, outFileName, format=\"csv\", overwrite=\"True\")\n\ndef fltOutlier(inFile):\n inFileName = inFile.split(\"/\")[-1].replace(\"_ZScore.csv\", \"\")\n outFileName = inFileName + \"_fltOutlier.csv\"\n with open(inFile, \"r\") as fin:\n with open(outFileName, \"w\") as fout:\n rfin = csv.reader(fin, delimiter=\",\")\n wfout = csv.writer(fout, delimiter=\",\")\n header = next(rfin)\n newheader = header[0:-1]\n wfout.writerow(newheader)\n for row in rfin:\n if ((float(row[3]) <= 2.5) and (float(row[3]) >= -2.5)):\n wfout.writerow(row[0:-1])\n \ndef nbPctg(inFile):\n #inFile = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/P1-7_Input_flt_pctg.csv\"\n print(inFile)\n inFileName = inFile.split(\"/\")[-1].replace(\".csv\", \"\")\n outFileName = inFileName + \"_nbPctg.csv\"\n inTab = ascii.read(inFile)\n pctgList = inTab.columns[3]\n pctgList_million = [x*1000000 for x in pctgList]\n allNb = fit_nbinom(np.array(pctgList_million))\n nbPctgList = [st.nbinom.cdf(x, allNb['size'], allNb['prob'])*100 for x in pctgList_million] \n inTab['nbPctg'] = nbPctgList\n ascii.write(inTab, outFileName, format=\"csv\", overwrite=\"True\") \n\ndef nbPctgTotal(inFile, allNb):\n #inFile = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/P1-7_Input_flt_pctg.csv\"\n print(inFile)\n inFileName = inFile.split(\"/\")[-1].replace(\".csv\", \"\")\n outFileName = inFileName + \"_nbPctgTotal.csv\"\n inTab = ascii.read(inFile)\n pctgList = inTab.columns[len(inTab.colnames)-1]\n pctgList_million = [x*1000000 for x in pctgList]\n nbPctgList = [st.nbinom.cdf(x, allNb['size'], allNb['prob'])*100 for x in pctgList_million] \n inTab['nbPctg'] = nbPctgList\n ascii.write(inTab, outFileName, format=\"csv\", overwrite=\"True\") \n \ndef nbRawCount(inFile):\n #inFile = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/P1-7_Input_flt_pctg.csv\"\n print(inFile)\n inFileName = inFile.split(\"/\")[-1].replace(\".csv\", \"\")\n outFileName = inFileName + \"_nbRawCount.csv\"\n inTab = ascii.read(inFile)\n countList = inTab.columns[2]\n allNb = fit_nbinom(np.array(countList))\n nbRawList = [st.nbinom.cdf(x, allNb['size'], allNb['prob'])*100 for x in countList] \n inTab['nbRawCount'] = nbRawList\n ascii.write(inTab, outFileName, format=\"csv\", overwrite=\"True\") \n\n########## Main ##########\n#----- Directory\nwk_dir = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/OldTrials/20190403_Exp56_nbPctgToAll\"\nos.chdir(wk_dir)\n\n#----- Calculate Z-Score, filter outliers\nfor file in glob.glob(\"/Volumes/Yolanda/CRF_Screen/InVivo/1_0_Raw/3_combined/Exp56/*.csv\"):\n ZScore(file)\n\nfor file in glob.glob(\"*ZScore.csv\"):\n fltOutlier(file)\n\n#----- Calculate percentages of counts in each groups\nfor file in glob.glob(\"*fltOutlier.csv\"):\n pctgTotal(file)\n\n#----- Calculate percentiles of count percentage in total distribution\npctgmillion_list = []\nfor file in glob.glob(\"*pctg.csv\"):\n tab = ascii.read(file)\n pctgmillion_list += [x*1000000 for x in list(tab[\"pctg\"])]\npctgmillion_list.sort()\nplt.plot(pctgmillion_list)\nplt.show\nall_nb = fit_nbinom(np.array(pctgmillion_list))\n\nfor file in glob.glob(\"*pctg.csv\"):\n nbPctgTotal(file, all_nb)\n \n \n###--- Gate comparisons\n#----- Q1vQ4\ndef Q4minusQ1(q4File, q1File):\n #q4File = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/P1-7_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\"\n #q1File = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/P1-7_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\"\n group = q4File.split(\"/\")[-1].split(\"_\")[0]\n outName = group + \"_Q4minusQ1.csv\"\n q4Tab = ascii.read(q4File)\n q1Tab = ascii.read(q1File)\n del q4Tab['count']\n del q4Tab['pctg']\n del q4Tab['type']\n del q1Tab['count']\n del q1Tab['pctg']\n del q1Tab['type']\n q4Tab[\"nbPctg\"].name = \"nbPctg_Q4\"\n q1Tab[\"nbPctg\"].name = \"nbPctg_Q1\"\n allTab = join(q4Tab, q1Tab, join_type=\"inner\", keys=\"shRNA\")\n q4minusq1 = [x-y for index, (x,y) in enumerate(zip(list(allTab[\"nbPctg_Q4\"]), list(allTab[\"nbPctg_Q1\"])))]\n allTab[\"q4minusq1_nbPctg\"] = q4minusq1\n ascii.write(allTab, outName, format=\"csv\", overwrite=True)\n \n\nP17Q4 = \"P1-7_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP17Q1 = \"P1-7_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nQ4minusQ1(P17Q4, P17Q1)\n\nP814Q4 = \"P8-14_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP814Q1 = \"P8-14_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nQ4minusQ1(P814Q4, P814Q1)\n\nP1521Q4 = \"P15-21_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP1521Q1 = \"P15-21_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nQ4minusQ1(P1521Q4, P1521Q1)\n\n\n\ndef Q3minusOther(q4File, q3File, q2File, q1File):\n #q4File = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/P1-7_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\"\n #q1File = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/P1-7_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\"\n group = q4File.split(\"/\")[-1].split(\"_\")[0]\n outName = group + \"_Q3minusOther.csv\"\n q4Tab = ascii.read(q4File)\n q3Tab = ascii.read(q3File)\n q2Tab = ascii.read(q2File)\n q1Tab = ascii.read(q1File)\n del q4Tab['count']\n del q3Tab['count']\n del q2Tab['count']\n del q1Tab['count']\n del q4Tab['pctg']\n del q3Tab['pctg']\n del q2Tab['pctg']\n del q1Tab['pctg']\n del q4Tab['type']\n del q3Tab['type']\n del q2Tab['type']\n del q1Tab['type']\n q4Tab[\"nbPctg\"].name = \"nbPctg_Q4\"\n q3Tab[\"nbPctg\"].name = \"nbPctg_Q3\"\n q2Tab[\"nbPctg\"].name = \"nbPctg_Q2\"\n q1Tab[\"nbPctg\"].name = \"nbPctg_Q1\"\n allTab = join(q4Tab, q3Tab, join_type=\"inner\", keys=\"shRNA\")\n allTab = join(allTab, q2Tab, join_type=\"inner\", keys=\"shRNA\")\n allTab = join(allTab, q1Tab, join_type=\"inner\", keys=\"shRNA\")\n avg124 = [(x+y+z)/3 for index, (x,y,z) in enumerate(zip(list(allTab[\"nbPctg_Q1\"]),list(allTab[\"nbPctg_Q2\"]),list(allTab[\"nbPctg_Q4\"])))]\n q3minusOther = [x-y for index, (x,y) in enumerate(zip(list(allTab[\"nbPctg_Q3\"]), avg124))]\n allTab[\"q3minusOther_nbPctg\"] = q3minusOther\n ascii.write(allTab, outName, format=\"csv\", overwrite=True)\n\nP17Q4 = \"P1-7_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP17Q3 = \"P1-7_Q3_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP17Q2 = \"P1-7_Q2_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP17Q1 = \"P1-7_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nQ3minusOther(P17Q4, P17Q3, P17Q2, P17Q1)\n\nP814Q4 = \"P8-14_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP814Q3 = \"P8-14_Q3_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP814Q2 = \"P8-14_Q2_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP814Q1 = \"P8-14_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nQ3minusOther(P814Q4, P814Q3, P814Q2, P814Q1)\n\nP1521Q4 = \"P15-21_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP1521Q3 = \"P15-21_Q3_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP1521Q2 = \"P15-21_Q2_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP1521Q1 = \"P15-21_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nQ3minusOther(P1521Q4, P1521Q3, P1521Q2, P1521Q1)\n\n\ndef InputVsRest(q4File, q3File, q2File, q1File,inputFile):\n #q4File = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/P1-7_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\"\n #q1File = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/P1-7_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\"\n group = q4File.split(\"/\")[-1].split(\"_\")[0]\n outName = group + \"_InputMinusAvg.csv\"\n q4Tab = ascii.read(q4File)\n q3Tab = ascii.read(q3File)\n q2Tab = ascii.read(q2File)\n q1Tab = ascii.read(q1File)\n inputTab = ascii.read(inputFile)\n del q4Tab['count']\n del q3Tab['count']\n del q2Tab['count']\n del q1Tab['count']\n del inputTab['count']\n del q4Tab['pctg']\n del q3Tab['pctg']\n del q2Tab['pctg']\n del q1Tab['pctg']\n del inputTab['pctg']\n del q4Tab['type']\n del q3Tab['type']\n del q2Tab['type']\n del q1Tab['type']\n del inputTab['type']\n q4Tab[\"nbPctg\"].name = \"nbPctg_Q4\"\n q3Tab[\"nbPctg\"].name = \"nbPctg_Q3\"\n q2Tab[\"nbPctg\"].name = \"nbPctg_Q2\"\n q1Tab[\"nbPctg\"].name = \"nbPctg_Q1\"\n inputTab[\"nbPctg\"].name = \"nbPctg_input\"\n allTab = join(q4Tab, q3Tab, join_type=\"inner\", keys=\"shRNA\")\n allTab = join(allTab, q2Tab, join_type=\"inner\", keys=\"shRNA\")\n allTab = join(allTab, q1Tab, join_type=\"inner\", keys=\"shRNA\")\n allTab = join(allTab, inputTab, join_type=\"inner\", keys=\"shRNA\")\n avgAll = [(a+b+c+d)/4 for index, (a,b,c,d) in enumerate(zip(list(allTab[\"nbPctg_Q1\"]),list(allTab[\"nbPctg_Q2\"]),list(allTab[\"nbPctg_Q3\"]),list(allTab[\"nbPctg_Q4\"])))]\n InminusAvg = [x-y for index, (x,y) in enumerate(zip(list(allTab[\"nbPctg_input\"]), avgAll))]\n allTab[\"inputMinusAvg_nbPctg\"] = InminusAvg\n ascii.write(allTab, outName, format=\"csv\", overwrite=True)\n\nP17Q4 = \"P1-7_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP17Q3 = \"P1-7_Q3_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP17Q2 = \"P1-7_Q2_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP17Q1 = \"P1-7_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP17input = \"P1-7_Input_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nInputVsRest(P17Q4, P17Q3, P17Q2, P17Q1, P17input)\n\nP814Q4 = \"P8-14_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP814Q3 = \"P8-14_Q3_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP814Q2 = \"P8-14_Q2_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP814Q1 = \"P8-14_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP814input = \"P8-14_Input_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nInputVsRest(P814Q4, P814Q3, P814Q2, P814Q1, P814input)\n\nP1521Q4 = \"P15-21_Q4_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP1521Q3 = \"P15-21_Q3_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP1521Q2 = \"P15-21_Q2_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP1521Q1 = \"P15-21_Q1_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nP1521input = \"P15-21_Input_flt_fltOutlier_pctg_nbPctgTotal.csv\" \nInputVsRest(P1521Q4, P1521Q3, P1521Q2, P1521Q1, P1521input)\n\n\n###--- Average effect by pool (calculate only the last column)\ndef avgByGene(inFile):\n #inFile = \"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190401/GateComparisons/P1-7_InputMinusAvg.csv\"\n outFile = inFile.replace(\".csv\", \"_byGene.csv\")\n inTab = ascii.read(inFile)\n inTabCols = inTab.colnames\n for x in range(1, len(inTabCols)-1):\n del inTab[inTabCols[x]]\n inTabshRNA = list(inTab[\"shRNA\"])\n inTabGene = [x.split(\".\")[0] for x in inTabshRNA]\n inTab[\"geneName\"] = inTabGene\n inTab_byGeneName = inTab.group_by(\"geneName\")\n \n with open(outFile, \"w\") as fout:\n wfout = csv.writer(fout, delimiter=\",\")\n wfout.writerow([\"GeneName\", inTab.colnames[1]])\n for groupX in inTab_byGeneName.groups:\n nameX = groupX[\"geneName\"][0]\n groupXAvg = sum(list(groupX.columns[1]))/len(list(groupX.columns[1]))\n newRow = [nameX, groupXAvg]\n wfout.writerow(newRow)\n\nos.chdir(\"/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/OldTrials/20190403_Exp56_nbPctgToAll/GateComparisons\")\nfor file in glob.glob(\"*.csv\"):\n avgByGene(file)\n\n###--- Integrate data\ndef mergeTables(fileList):\n #fileList = [\"P15-21_Q4minusQ1_byGene.csv\"]\n outFileName = fileList[0].split(\"_\")[-2] + \"_\" + fileList[0].split(\"_\")[-1]\n with open(outFileName, \"w\") as fout:\n wfout = csv.writer(fout, delimiter=\",\")\n wfout.writerow([\"Pool\", \"Gene\", \"nbPctgShift\"])\n for file in fileList:\n with open(file, \"r\") as fin:\n pool = file.split(\"_\")[0]\n rfin = csv.reader(fin, delimiter=\",\")\n next(rfin)\n for row in rfin:\n wfout.writerow([pool] + row)\n \nmergeTables([\"P1-7_InputMinusAvg_byGene.csv\",\n \"P8-14_InputMinusAvg_byGene.csv\",\n \"P15-21_InputMinusAvg_byGene.csv\"])\n\nmergeTables([\"P1-7_Q3minusOther_byGene.csv\",\n \"P8-14_Q3minusOther_byGene.csv\",\n \"P15-21_Q3minusOther_byGene.csv\"])\n\nmergeTables([\"P1-7_Q4minusQ1_byGene.csv\",\n \"P8-14_Q4minusQ1_byGene.csv\",\n \"P15-21_Q4minusQ1_byGene.csv\"])\n\n\n\n\n","sub_path":"0.1_Codes_Invivo/old_codes/shRNAlibrary_analysis_0403_Exp56_nbPctgToTotal.py","file_name":"shRNAlibrary_analysis_0403_Exp56_nbPctgToTotal.py","file_ext":"py","file_size_in_byte":15597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"224473543","text":"from libqtile.manager import Key, Screen, Group\nfrom libqtile.command import lazy\nfrom libqtile import layout, bar, widget\n\n# Cleaner key aliases\nalt = \"mod1\"\nsuper = \"mod4\"\nshift = \"shift\"\ncontrol = \"control\"\n\ngroups = [\n Group(\"q\"),\n Group(\"w\"),\n Group(\"e\"),\n Group(\"r\"),\n Group(\"t\"),\n Group(\"y\"),\n Group(\"u\"),\n Group(\"i\"),\n Group(\"o\"),\n Group(\"p\"),\n]\n\nRUN = \"dmenu_run\"\nTERMINAL = \"gnome-terminal\"\nSUSPEND = \"dbus-send --system --print-reply --dest=org.freedesktop.UPower /org/freedesktop/UPower org.freedesktop.UPower.Suspend\"\n\nkeys = [\n Key([super], g.name, lazy.group[g.name].toscreen()) for g in groups\n] + [\n Key([super, shift], g.name, lazy.window.togroup(g.name)) for g in groups\n] + [\n Key([super], \"k\", lazy.layout.down()),\n Key([super], \"j\", lazy.layout.up()),\n Key([super, control], \"k\", lazy.layout.shuffle_down()),\n Key([super, control], \"j\", lazy.layout.shuffle_up()),\n Key([alt], \"Tab\", lazy.layout.next()),\n Key([super, shift], \"space\", lazy.layout.rotate()),\n Key([super, shift], \"Return\", lazy.layout.toggle_split()),\n Key([super], \"h\", lazy.to_screen(1)),\n Key([super], \"l\", lazy.to_screen(0)),\n Key([], \"F1\", lazy.spawn(TERMINAL)),\n Key([alt], \"F2\", lazy.spawn(RUN)),\n Key([super], \"Tab\", lazy.nextlayout()),\n Key([super, shift], \"c\", lazy.window.kill()),\n Key([super], \"Escape\", lazy.spawn(SUSPEND)),\n]\n\nlayouts = [\n layout.Max(),\n layout.Stack(stacks=2),\n layout.Tile(ratio=0.25),\n]\n\nscreens = [\n Screen(\n top=bar.Bar([\n widget.GroupBox(),\n widget.WindowName(),\n widget.Clock('%Y-%m-%d %H:%M'),\n widget.Systray(),\n widget.CPUGraph(width=200, graph_color='22FF44', fill_color='11AA11'),\n widget.MemoryGraph(width=200, graph_color='22FF44', fill_color='11AA11'),\n ], 24),\n ),\n]\n","sub_path":"user-configs/twm/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"349415018","text":"import json\nimport uuid\nimport operator\nimport collections\n\nimport psycopg2.errors\n\nimport estore.errors\n\ndef prepare_item(item):\n return collections.namedtuple('Consumer', ('id', 'name'))(*item)\n\nclass Event:\n def __init__(self, db):\n self.__db = db\n\n async def add_event(self, stream, name, version, body, headers=None):\n if not headers:\n headers = {}\n if not 'aggregate' in headers and '.' in name:\n headers['aggregate'], _ = name.split('.', 1)\n headers = json.dumps(headers)\n try:\n await self.__db.execute('CALL add_event(%s, %s, %s, %s, %s)', stream, name, version, body, headers)\n except psycopg2.errors.UniqueViolation:\n pass\n\n async def consume(self, consumer_id, callback):\n pass\n\n async def get_stream(self, stream_id, snapshot=True):\n query = \"\"\"\n SELECT\n e.id,\n e.seq,\n e.stream,\n e.created,\n e.version,\n e.name,\n e.body,\n e.headers\n FROM\n event AS e\n LEFT JOIN event AS x ON (x.name = 'Snapshot' AND x.stream = e.stream AND x.version>e.version)\n WHERE\n e.stream = %s AND x.id IS NULL\n ORDER BY\n e.version\"\"\"\n\n results = await self.__db.execute(query, stream_id)\n\n keys = list(map(operator.attrgetter('name'), results.description))\n async for item in results:\n yield dict(zip(keys, item))\n\nclass Consumer:\n def __init__(self, db):\n self.__db = db\n\n async def register(self, name):\n consumer_id = uuid.uuid4()\n try:\n await self.__db.execute(\n \"INSERT INTO consumer (id, name) VALUES (%s, %s)\", consumer_id, name)\n except psycopg2.errors.UniqueViolation:\n raise estore.errors.AlreadyExists(f\"User '{name}' already exists\")\n return consumer_id\n\n async def get_by_name(self, name):\n cur = await self.__db.execute(\"SELECT id, name FROM consumer WHERE name=%s\", name)\n if not cur.rowcount:\n raise estore.errors.DoesNotExist(f\"User '{name}' does not exist\")\n return prepare_item(await cur.fetchone())\n\n async def get_by_id(self, consumer_id):\n cur = await self.__db.execute(\"SELECT id, name FROM consumer WHERE id=%s\", consumer_id)\n if not cur.rowcount:\n raise estore.errors.DoesNotExist(f\"User by id '{consumer_id}' does not exist\")\n return prepare_item(await cur.fetchone())\n\n async def delete(self, consumer_id):\n cur = await self.__db.execute(\"DELETE FROM consumer WHERE id=%s\", consumer_id)\n if not cur.rowcount:\n raise estore.errors.DoesNotExist(f\"User by id '{consumer_id}' does not exist\")\n\n async def subscribe(self, consumer_id, pattern):\n subscription_id = uuid.uuid4()\n cur = await self.__db.execute(\n \"INSERT INTO subscription (id, name, routing_key) VALUES (%s, %s, %s)\",\n subscription_id, consumer_id, pattern)\n","sub_path":"estore/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"579998187","text":"N = int(input())\n\ndef is_include7(number):\n ans = False\n for n in range(13):\n # 10*nの位が7かどうか\n if (number // 10**n) % 10 == 7:\n ans = True\n break\n if (number // 8**n) % 8 == 7:\n ans = True\n break\n return ans\n\n\ncount = 0\nfor n in range(1, N+1):\n if not is_include7(n):\n count += 1\n\nprint(count)","sub_path":"ABC176-200/186/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"223094253","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nimport glob\nimport os\n\narr_bus = ['17','40','68', '81', '138-1']\narr_time = ['AM','PM']\n\nfor num in arr_bus:\n input_path = 'csv/busStop/busStop_by'+num\n for time in arr_time:\n all_files = glob.glob(os.path.join(input_path, \\\n 'information_predict_arrive_busStop_by'+ num +'*'+ time +'.csv'))\n output_file = 'csv/Extract_Refine_by' + num +\"_\"+time+'.csv'\n all_data_frames = []\n\n for input_file in all_files:\n data_frame = pd.read_csv(input_file ,header = 1,index_col=1,encoding=\"euc-kr\")\n data_frame_drop = data_frame.dropna(axis=0)\n\n data_frame_drop['Current_Time'] = pd.to_datetime(data_frame_drop[\"Current_Time\"])\n all_data_frames.append(data_frame_drop)\n\n # 결과를 출력합니다.\n print(all_data_frames)\n\n data_frame_concat = pd.concat(all_data_frames, axis=0, ignore_index=True)\n data_frame_concat.to_csv(output_file, index = False)","sub_path":"Project/05_Extract_BusStop_Refine_Sum/05_Extract_Refine.py","file_name":"05_Extract_Refine.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624874924","text":"#coding: utf-8\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport sys\nfrom time import gmtime, strftime\n\nMANUAL_CLASSIFICATION = sys.argv[1]\nMODEL_CLASSIFICATION = sys.argv[2]\nFILE_NAME = sys.argv[3]\n\ndef is_in(dir, file, cat):\n dir_cat = dir + cat + \"/\"\n for f in listdir(dir_cat):\n if isfile(join(dir_cat, f)):\n if(f == file):\n return True\n return False\n\n\ndef verify_is(log_file, dir, cat, dir2, link):\n texto = \"\"\n dir_cat = dir + cat + \"/\"\n for f in listdir(dir_cat):\n if isfile(join(dir_cat, f)):\n if(is_in(dir2, f, cat) != True):\n categories = [\"pos\", \"neg\", \"neu\"]\n categories.remove(cat)\n for categ in categories:\n if (is_in(dir2, f, categ) == True):\n texto += \"- %s deveria ser \\\"%s\\\", mas está em \\\"%s\\\" \\n\" % (link, categ, f, f, categ, cat)\n else:\n texto += \"- %s está certo (%s)\\n\" % (link, cat, f, f, cat)\n log_file.write(texto)\n\n\n\n# dir_ver1 = '../raw-data/1_release_hadoop_classified_sentistrength1/'\n# dir_ver2 = '../raw-data/1_release_hadoop_classified_manual/'\n\ndef verify():\n log = open('%s' % (FILE_NAME), 'a')\n texto = \"# %s \\n\" % (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n log.write(texto)\n link = \"https://github.com/jordaos/Analyzing-Hadoop-feelings/tree/master/raw-data/%s\" % \\\n ((MANUAL_CLASSIFICATION.split(\"/\"))[-2])\n\n verify_is(log, MODEL_CLASSIFICATION, \"pos\", MANUAL_CLASSIFICATION, link) # Verificar os positivos\n verify_is(log, MODEL_CLASSIFICATION, \"neg\", MANUAL_CLASSIFICATION, link) # Verificar os negativos\n verify_is(log, MODEL_CLASSIFICATION, \"neu\", MANUAL_CLASSIFICATION, link) # Verificar os neutros\n\n log.close()\n\nverify()","sub_path":"src/verify-similarity.py","file_name":"verify-similarity.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"5014140","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom .models import Item\nimport re\n\nclass ItemForm(forms.ModelForm):\n \n class Meta:\n model = Item\n fields = ['item_code', 'name', 'size', 'start_at', 'end_at', 'category', 'family', 'genus', 'comment', 'state']\n\n def clean_size(self):\n \n size = self.cleaned_data['size']\n if(size==None):\n print(size)\n return size \n \n if(re.search(r'[0-9]', size) != None):\n raise forms.ValidationError(\"数字は半角で入力してください。\")\n elif(size.find('cm')!=-1 or size.find('㎝')!=-1):\n raise forms.ValidationError(\"「cm」は半角で入力してください。\")\n return size\n","sub_path":"item/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"560772936","text":"import os\nimport sys\nsys.path.insert(1, '/home/pi/tensorflow/models/research/object_detection')\nimport csv\nfrom time import sleep\nimport system_stats\nimport cv2\nimport numpy as np\nimport tensorflow as tf\n\nimport sys\n\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n\nimport image\n#------------------------------------------------------------------------------------------------------------------------------------\n\n# This is needed since the working directory is the object_detection folder.\nsys.path.append('..')\n\n\nOD_FOLDER = '/home/pi/tensorflow/models/research/object_detection/'\nGRAPH_FOLDER = '/home/pi/tensorflow/models/research/object_detection/aircraft_v1'\n\nCWD_PATH = os.getcwd()\n\nPATH_TO_CKPT = os.path.join(GRAPH_FOLDER,'aircraft_v1.pb')\n\nPATH_TO_LABELS = os.path.join(OD_FOLDER,'data','aircraft_v1.pbtxt')\nNUM_CLASSES = 1\n## Load the label map.\n# Label maps map indices to category names, so that when the convolution\n# network predicts `5`, we know that this corresponds to `airplane`.\n# Here we use internal utility functions, but anything that returns a\n# dictionary mapping integers to appropriate string labels would be fine\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n# Load the Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n sess = tf.Session(graph=detection_graph)\n# Define input and output tensors (i.e. data) for the object detection classifier\n# Input tensor is the image\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n# Output tensors are the detection boxes, scores, and classes\n# Each box represents a part of the image where a particular object was detected\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n# Each score represents level of confidence for each of the objects.\n# The score is shown on the result image, together with the class label.\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n# Number of objects detected\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n#------------------------------------------------------------------------------------------------------------------------------------\n\n\n\n\nresults = dict()\n\nresolutions = {\"0.3\": (640,480), \"800x600\": (800,600), \"1024x768\": (1024,768), \"1\":(1280,960), \"2\":(1600,1200), \"3\": (2048,1536), \"4\": (2240,1680), \"5\": (2560,1920 ), \"6\":(3032,2008), \"7\":(3072,2304 ), \"8\": (3264,2448)}\n\nfor format, res in resolutions.items():\n\n video = cv2.VideoCapture(0)\n video.set(6, 1196444237) # MJPEG\n video.set(3,res[0])\n video.set(4,res[1])\n\n i = 0\n frame_rates = []\n\n while i < 100:\n e1 = cv2.getTickCount()\n fps = video.get(5)\n ret, frame = video.read()\n\n frame_expanded = np.expand_dims(frame, axis=0)\n\n # Perform the actual detection by running the model with the image as input\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: frame_expanded})\n\n # Draw the results of the detection (aka 'visulaize the results')\n vis_util.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=2,\n min_score_thresh=0.60)\n\n\n\n cv2.imshow(\"frame\", frame)\n e2 = cv2.getTickCount()\n freq = cv2.getTickFrequency()\n t = (e2 - e1) / freq\n frame_rate_calc = 1 / t\n\n #print(format, res, fps, frame_rate_calc)\n frame_rates.append(frame_rate_calc)\n key = cv2.waitKey(1)\n if key == 27:\n break\n if i == 9:\n global cpu\n cpu = system_stats.cpu()\n\n\n i += 1\n\n\n\n\n video.release()\n cv2.destroyAllWindows()\n\n results.update({format: {\"format\": format, \"width\": res[0], \"height\": res[1], \"max_fps\": fps, \"fps\": [round(i, 2) for i in frame_rates], \"fps_mean\": round(np.mean(frame_rates), 2), \"cpu\": cpu}})\n\n sleep(5)\n\n\n\nwith open('fps_mean_test_tf.csv', 'w', newline='') as csvfile:\n fieldnames = ['format', 'width',\"height\", \"max_fps\", \"fps\", \"fps_mean\", \"cpu\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for key, result in results.items():\n writer.writerow(result)","sub_path":"ct_fps_test_tf.py","file_name":"ct_fps_test_tf.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"623017998","text":"import os\nimport Tkinter\n\nfrom memops.api.Implementation import MemopsRoot\n\nfrom ccpnmr.format.converters.CnsFormat import CnsFormat\n\nfrom ccpnmr.format.general.Util import (createExperiment,\n getRefExpFromOldExpType)\n\nfrom pdbe.nmrStar.IO.NmrStarExport import NmrStarExport\n\nif __name__ == '__main__':\n\n # Similar start to code as that found in ex6.py.\n project = MemopsRoot(name = 'sampleTest')\n guiRoot = Tkinter.Tk()\n\n cnsObj = CnsFormat(project, guiRoot)\n\n nmrProject = project.currentNmrProject = \\\n project.newNmrProject(name = project.name)\n strucGen = nmrProject.newStructureGeneration()\n\n cnsDir = '../data/cns'\n cnsFile = os.path.join(cnsDir, 'cns_1.pdb')\n\n cnsObj.readCoordinates([cnsFile],\n strucGen = strucGen,\n minimalPrompts = 1,\n linkAtoms = 0)\n\n # Find molecule made by readCoordinates and set isFinalised to True.\n molecule = project.findFirstMolecule()\n molecule.isFinalised = True\n\n # Create a classification store.\n classif = project.CurrentClassification = \\\n project.newClassification(namingSystem = 'local')\n\n # Make a new sampleCategory object - required for sample objects.\n sampCat = classif.newSampleCategory(name = 'myCategory')\n\n # Also need a sampleStore object.\n sampStore = project.currentSampleStore = \\\n project.newSampleStore(name = project.name)\n\n # And make the new sample from the sampleStore with the sampleCategory.\n keywds = {'name': 'mySample',\n 'sampleCategories': [sampCat],\n 'ph': 5.0}\n sample = sampStore.newSample(**keywds)\n\n # Reference store for making molecule components.\n refStore = project.currentRefSampleComponentStore = \\\n project.newRefSampleComponentStore(name = project.name)\n\n # New molComponent object connected to the protein molecule - subclass\n # of AbstractComponent. Needed for making a new sample component.\n keywds2 = {'name': molecule.name,\n 'molecule': molecule,\n 'molType': 'protein'}\n molComp = refStore.newMolComponent(**keywds2)\n\n # Concentration data for a sampleComponent for the protein molecule.\n keywds3 = {'concentration': 0.001,\n 'concentrationError': 0.0002,\n 'concentrationUnit': 'M'}\n sampComp = sample.newSampleComponent(refComponent = molComp, **keywds3)\n\n # Old way of labelling information for the protein molecule - see ex11b.py\n # for the new way to do this (which will be seen in the NmrStar file).\n\n keywds4 = {'elementName': 'C',\n 'incorporation': 0.99,\n 'isUniform': True,\n 'labelName': '13C',\n 'labelType': 'isotope',\n 'massNumber': 13}\n label13C = molComp.newLabel(**keywds4)\n\n keywds5 = {'elementName': 'N',\n 'incorporation': 0.98,\n 'isUniform': True,\n 'labelName': '15N',\n 'labelType': 'isotope',\n 'massNumber': 15}\n label15N = molComp.newLabel(**keywds5)\n\n # New substance object for NaCl - subclass of AbstractComponent.\n # Note, a substance doesn't need a molecule unlike a molComponent.\n saltComp = refStore.newMolComponent(name = 'NaCl',\n details = 'NaCl')\n\n # New sampleComponent object connected to the substance object.\n keywds6 = {'concentration': 0.1,\n 'concentrationUnit': 'M'}\n sampComp2 = sample.newSampleComponent(refComponent = saltComp, **keywds6)\n\n # sampleConditionSet object - parent of individual sampleCondition objects.\n sampCondSet = nmrProject.newSampleConditionSet(name = 'sample_conditions')\n\n # Create two sampleCondition objects.\n keywds7 = {'value': 5.0,\n 'condition': 'pH'}\n sampCondPh = sampCondSet.newSampleCondition(**keywds7)\n\n keywds8 = {'value': 298,\n 'condition': 'Temperature',\n 'unit': 'K'}\n sampCondTemp = sampCondSet.newSampleCondition(**keywds8)\n\n # Easy way to make a test NMR experiment for connecting to\n # sample information.\n refExpType = 'noesy_hsqc_HCNH.hhcn'\n refExp = getRefExpFromOldExpType(project, refExpType)\n nmrExp = createExperiment(project, '13c_15n_noesy_test', refExp)\n\n # Link the sample information to this experiment.\n nmrExp.setSample(sample)\n nmrExp.setSampleConditionSet(sampCondSet)\n\n # Find the molSystem to connect to the experiment.\n molSystem = project.findFirstMolSystem()\n nmrExp.addMolSystem(molSystem)\n\n # affiliationStore for NMR spectrometer manufacturer.\n affStore = project.currentAffilitationStore = \\\n project.newAffiliationStore(name = project.name)\n\n manufacturer = affStore.newOrganisation(name = 'Varian')\n\n # instrumentStore for the NMR machine.\n instrStore = project.currentInstrumentStore = \\\n project.newInstrumentStore(name = project.name)\n\n keywds9 = {'name': 'Varian_Inova_600',\n 'manufacturer': manufacturer,\n 'nominalFreq': '600',\n 'model': 'Inova',\n 'protonFreq': 600.1}\n spec = instrStore.newNmrSpectrometer(**keywds9)\n\n # Connect the NMR experiment to the spectrometer.\n nmrExp.setSpectrometer(spec)\n\n # Add some methods and software - made from a parent methodStore object.\n methStore = project.currentMethodStore = \\\n project.newMethodStore(name = project.name)\n\n keywds10 = {'name': 'assignment',\n 'task': 'NMR peak assignment'}\n method1 = methStore.newMethod(**keywds10)\n\n keywds11 = {'name': 'peak picking',\n 'task': 'NMR peak picking'}\n method2 = methStore.newMethod(**keywds11)\n\n keywds12 = {'name': 'Analysis',\n 'vendorName': 'ccpNmr',\n 'version': '1.0',\n 'methods': [method1, method2]}\n software = methStore.newSoftware(**keywds12)\n\n # Make a new BMRB Entry.\n entryStore = project.currentNmrEntryStore = \\\n project.newNmrEntryStore(name = project.name)\n entry = entryStore.newEntry(name = project.name)\n\n # Set the relevant links to the new BMRB Entry.\n entry.setMolSystem(molSystem)\n entry.addExperiment(nmrExp)\n\n # The plural of software is software - hence setSoftware requires\n # a set of software.\n entry.setSoftware([software]) # or entry.addSoftware(software)\n entry.addStructureGeneration(strucGen)\n\n curDir = os.path.abspath('../data')\n nmrStarDir = os.path.join(curDir, 'nmrStar')\n\n if not os.path.exists(nmrStarDir):\n os.mkdir(nmrStarDir)\n\n outNmrStarFile = os.path.join(nmrStarDir, 'nmrStar3.str')\n\n nmrStarExport = NmrStarExport(entry, nmrStarVersion = '3.1')\n nmrStarExport.createFile(outNmrStarFile)\n nmrStarExport.writeFile()\n","sub_path":"ccpnmr2.4/python/ccp/examples/help_doc/src/ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"420301638","text":"#-*- coding: utf-8 -*-\n\"\"\"\n@file\n@brief Magic command to communicate with an Hadoop cluster.\n\"\"\"\nimport sys, os, pandas\n\nfrom IPython.core.magic import Magics, magics_class, line_magic, cell_magic\nfrom IPython.core.magic import line_cell_magic\nfrom IPython.core.display import HTML \n\nfrom .sql_interface import InterfaceSQL\n\n@magics_class\nclass MagicSQL(Magics):\n \"\"\"\n Defines SQL commands to play with `sqlite3 `_\n \"\"\"\n \n def get_connection(self):\n \"\"\"\n returns the connection stored in the workspace\n \"\"\"\n if self.shell is None:\n raise Exception(\"No detected workspace.\")\n \n if \"DB\" not in self.shell.user_ns:\n raise KeyError(\"No opened sqlite3 database.\")\n \n return self.shell.user_ns[\"DB\"]\n \n @line_magic\n def SQL_connect(self, line):\n \"\"\"\n define ``SQL_connect``\n \"\"\"\n filename = line.strip()\n if len(filename) == 0:\n print(\"Usage:\")\n print(\" %SQL_connect \")\n else:\n obj = InterfaceSQL.create(filename)\n obj.connect()\n self.shell.user_ns[\"DB\"] = obj\n return obj\n \n @line_magic\n def SQL_close(self, line):\n \"\"\"\n define ``SQL_close``\n \"\"\"\n db = self.get_connection()\n db.close()\n del self.shell.user_ns[\"DB\"]\n \n @line_magic\n def SQL_tables(self, line):\n \"\"\"\n define ``SQL_tables``\n \"\"\"\n db = self.get_connection()\n return db.get_table_list()\n \n def SQL_schema(self, line):\n \"\"\"\n define ``SQL_schema``\n \"\"\"\n if len(line) == 0:\n print(\"Usage:\")\n print(\" %SQL_schema \")\n else:\n db = self.get_connection()\n return db.get_table_columns(line)\n \n @line_cell_magic\n def SQL(self, line, cell = None):\n \"\"\"\n defines command ``%%SQL``\n \"\"\"\n def usage():\n print(\"Usage:\")\n print(\" %SQL \")\n print(\"or\")\n print(\" %%SQL \")\n print(\" \")\n \n cont = True\n addv = None\n if cell is None:\n if len(line) == 0:\n usage()\n cont = False\n else:\n query = line.strip()\n \n if self.shell is not None and query in self.shell.user_ns:\n query = self.shell.user_ns[query]\n \n elif len(cell) == 0 :\n usage()\n cont = False\n else:\n query = cell\n addv = line.strip()\n if len(addv) == 0 : addv = None\n \n if cont:\n db = self.get_connection()\n df = df.execute(query)\n \n if addv is not None and self.shell is not None:\n self.shell.user_ns[addv] = df\n return df\n \n\ndef register_sql_magics():\n \"\"\"\n register magics function, can be called from a notebook\n \"\"\"\n ip = get_ipython()\n ip.register_magics(MagicSQL)\n ","sub_path":"src/pyensae/sql/magic_sql.py","file_name":"magic_sql.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"91540574","text":"#!/usr/bin/env python3\n\n# Famous Algorithm - Kadane's ALgorithm - AlgoExpert\nimport unittest\n\n\n# O(n) time | O(1) space\ndef kadanesAlgorithm(array):\n maxEndingHere = array[0]\n maxSoFar = array[0]\n for i in range(1, len(array)):\n num = array[i]\n maxEndingHere = max(num, maxEndingHere + num)\n maxSoFar = max(maxSoFar, maxEndingHere)\n return maxSoFar\n\n\nclass TestProgram(unittest.TestCase):\n def test_case_1(self):\n self.assertEqual(\n kadanesAlgorithm([3, 5, -9, 1, 3, -2, 3, 4, 7, 2, -9, 6, 3, 1, -5, 4]), 19\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"challenge_expert_probs/Python/md/18md_kadane_algorithm.py","file_name":"18md_kadane_algorithm.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"212362881","text":"try:\n with open(\"covid.txt\") as rona:\n covid = rona.read()\nexcept FileNotFoundError:\n covid = None\n\ncorona_file = [k for k in covid.split(\"\\n\") if len(k)>3] #list comprehence\nlockdown = {k.split(\":\")[0]:k.split(\":\")[1] for k in corona_file}\n# print(type(lockdown)) check if it is a dictionary\n\n#test with key & value\nfor key,val in lockdown.items():\n print (key, \"=>\", val)\n\nprint(lockdown)\n","sub_path":"tut_five.py","file_name":"tut_five.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"332633902","text":"import numpy as np\n\nfrom collections import deque\n\nimport gradient\nimport nn\n\nclass runner(object):\n def __init__(self, master, network, env_set, config):\n self.config = config\n\n self.grads = {}\n\n self.gamma = config.get(\"gamma\")\n\n self.update_reward_steps = config.get(\"update_reward_steps\")\n\n self.total_steps = 0\n self.total_actions = 0\n\n self.last_rewards = deque()\n self.last_rewards_size = 100\n\n self.max_reward = 0\n\n self.import_self_weight = config.get('import_self_weight')\n\n self.ra_range_begin = config.get(\"ra_range_begin\")\n self.ra_alpha_cap = config.get(\"ra_alpha_cap\")\n self.ra_alpha = config.get(\"ra_alpha\")\n\n self.batch = []\n self.batch_size = config.get(\"batch_size\")\n \n self.state_steps = config.get(\"state_steps\")\n self.input_shape = config.get(\"input_shape\")\n\n self.envs = env_set.envs\n self.master = master\n self.network = network\n\n def get_actions(self, states):\n input = [s.read() for s in states]\n\n def random_choice(p):\n ra_alpha = 0.1\n random_choice = np.random.choice([True, False], p=[ra_alpha, 1.-ra_alpha])\n\n if random_choice:\n return np.random.randint(0, len(p))\n\n return np.random.choice(len(p), p=p)\n\n action_probs, values = self.network.predict(input)\n #actions = [random_choice(p) for p in action_probs]\n actions = [np.random.choice(len(p), p=p) for p in action_probs]\n\n return actions, values\n\n def run_sample(self, batch):\n states_shape = (len(batch), self.input_shape[0], self.input_shape[1], self.input_shape[2] * self.state_steps)\n states = np.zeros(shape=states_shape)\n new_states = np.zeros(shape=states_shape)\n\n rashape = (len(batch), 1)\n reward = np.zeros(shape=rashape)\n action = np.zeros(shape=rashape)\n\n idx = 0\n for e in batch:\n s, a, r, sn, done = e\n\n states[idx] = s.read()\n new_states[idx] = sn.read()\n action[idx] = a\n reward[idx] = r\n idx += 1\n\n self.master.train(states, action, reward)\n self.network.import_params(self.master.export_params(), self.import_self_weight)\n\n def run_batch(self, h):\n if len(h) == 0:\n return\n\n self.run_sample(h)\n\n def update_reward(self, e, done):\n local_batch = e.last(self.batch_size)\n\n rev = 0.0\n if not done:\n s, a, r, sn, done = local_batch[-1]\n\n _, rev = self.get_actions([sn])\n\n h = []\n for elm in reversed(local_batch):\n s, a, r, sn, done = elm\n rev = r + self.gamma * rev\n\n h.append((s, a, rev, sn, done))\n\n self.batch += h\n if len(self.batch) >= self.batch_size:\n self.run_batch(self.batch)\n self.batch = []\n\n def run(self, coord, check_save):\n states = []\n running_envs = []\n while not coord.should_stop():\n if len(running_envs) == 0:\n running_envs = self.envs\n states = [e.reset() for e in running_envs]\n\n actions, values = self.get_actions(states)\n new_states = []\n new_running_envs = []\n for e, s, a, v in zip(running_envs, states, actions, values):\n sn, reward, done = e.step(s, a)\n\n if e.total_steps % self.update_reward_steps == 0 or done:\n self.update_reward(e, done)\n\n e.clear()\n\n if done:\n self.network.update_reward(e.creward)\n self.master.update_reward(e.creward)\n\n if len(self.last_rewards) >= self.last_rewards_size:\n self.last_rewards.popleft()\n\n self.last_rewards.append(e.creward)\n\n mean = np.mean(self.last_rewards)\n std = np.std(self.last_rewards)\n max_last = np.max(self.last_rewards)\n\n if e.creward > self.max_reward:\n self.max_reward = e.creward\n\n print(\"%s: %3d %2d/%d reward: %3d/%3d/%3d, total steps: %6d/%4d, mean reward over last %3d episodes: %.1f, std: %.1f\" % (\n e.eid, e.episodes, len(running_envs), len(self.envs),\n e.creward, max_last, self.max_reward, e.total_steps, e.total_steps_diff(),\n len(self.last_rewards), mean, std))\n\n e.clear_stats()\n else:\n new_states.append(sn)\n new_running_envs.append(e)\n\n check_save(e.total_steps)\n\n states = new_states\n running_envs = new_running_envs\n\n coord.request_stop()\n\n","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"358835335","text":" \r\ndef to_hex(n):\r\n t1=n%16\r\n t2=n//16\r\n if (t1>9):\r\n s2=chr(t1+87)\r\n else:\r\n s2=str(t1)\r\n s1=\"\"\r\n if(n>16):\r\n if (t2>9):\r\n s1=chr(t2+87)\r\n else:\r\n s1=str(t2)\r\n s=s1+s2\r\n return s\r\n\r\ndef hex_color (red, green, blue):\r\n r=\"\"\r\n b=\"\"\r\n g=\"\"\r\n if (red<=16):\r\n r=\"0\"\r\n if (green<=16):\r\n g=\"0\"\r\n if (blue<=16):\r\n b=\"0\"\r\n r=r+to_hex(red)\r\n g=g+to_hex(green)\r\n b=b+to_hex(blue)\r\n k=\"#\"+r+g+b\r\n return k","sub_path":"tohex.py","file_name":"tohex.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568206493","text":"from tqdm import tqdm\nimport numpy as np\nimport os.path, pickle\nimport os,sys\nfrom os.path import normpath,join,dirname\nfrom utils_graph import conceptnet_graph, domain_aggregated_graph, subgraph_for_concept\nBase_DIR=normpath(join(os.path.dirname(os.path.abspath(__file__)), '../..'))\nsys.path.insert(0,Base_DIR)\nfrom data.data_process_utils.concept_util import getDomainDataURL, getAllConcepts\nimport argparse\n\nimport sys\ndefaultencoding = 'utf-8'\nif sys.getdefaultencoding() != defaultencoding:\n reload(sys)\n sys.setdefaultencoding(defaultencoding)\n# 没有起到切换encoding的效果,变成在open()上改\n\n\nif __name__ == '__main__':\n # 目标是拿到各种map\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_path\", default=\"data/domain_data/processed_data\",\n help=\"data_json_dir\")\n parser.add_argument(\"--domain\", default=\"books\",\n help=\"domain name\")\n parser.add_argument(\"--kg\", default=\"wordNet\",\n help=\"knowledge graph type name\")\n opt = parser.parse_args()\n\n print ('Extracting seed concepts from ' + opt.domain)\n\n domainList = [opt.domain]# , \"books\", \"dvd\",\"electronics\", \"kitchen\" 用于控制从大图中抽取什么样的节点作为\n\n urlList = getDomainDataURL(domainList,opt.data_path)\n all_seeds = getAllConcepts(urlList) # 有接近两万个,而concept每小时最多拿到3600个\n\n if opt.kg == \"conceptNet\":\n print ('Creating conceptNet graph.')#是独立的超参数大图图,应该来源于ConceptNet + opinionconceptTriple:118651\n G, G_reverse, concept_map, relation_map = conceptnet_graph('conceptnet_english_ours.txt')# 上面应该加入我们的节点\n elif opt.kg == \"wordNet\":\n print('Creating wordNet graph.') # 是独立的超参数大图图,应该来源于ConceptNet + opinionconceptTriple:118651\n G, G_reverse, concept_map, relation_map = conceptnet_graph('wordnet_aspect2opinion_english_ours.txt') # 上面应该加入我们的节点\n else:\n print('no such a graph triples .txt.')\n exit()\n # concept_map单纯的word2int\n print ('Num seed concepts:', len(all_seeds))\n print ('Populating domain aggregated sub-graph with seed concept sub-graphs.')\n triplets, unique_nodes_mapping = domain_aggregated_graph(all_seeds, G, G_reverse, concept_map, relation_map)# @jinhui 这边是全部dataset的吗?这个只是做为一个快速找到邻居的工具\n # unique_nodes_mapping 80908 其实是过滤掉一些all_seeds无关的,将index缩短 triplets中的是压缩过的 {conceptMap:coutureindex}\n print ('Creating sub-graph for seed concepts.')\n concept_graphs = {}# 每个seed 一个graph是为什么?在总图中把相关的东西都筛选出来\n\n # for node in tqdm(all_seeds, desc='Instance', position=0):\n # concept_graphs[node] = subgraph_for_concept(node, G, G_reverse, concept_map, relation_map)# concept_map relation_map来表示的\n\n # Create mappings\n inv_concept_map = {v: k for k, v in concept_map.items()}\n inv_unique_nodes_mapping = {v: k for k, v in unique_nodes_mapping.items()}\n inv_word_index = {}\n for item in inv_unique_nodes_mapping:\n inv_word_index[item] = inv_concept_map[inv_unique_nodes_mapping[item]]# index 是 小图的\n word_index = {v: k for k, v in inv_word_index.items()}\n inv_relation_map = {v: k for k, v in relation_map.items()}\n print ('Saving files.')\n fileName = \"\"\n for domain in domainList:\n fileName = fileName + domain + \"_\"\n pass\n fileName = join(\"preprocess_data\", fileName) + opt.kg\n if not os.path.exists(fileName):\n os.makedirs(fileName)\n pickle.dump(all_seeds, open(fileName + '/all_seeds.pkl', 'wb'))\n pickle.dump(concept_map, open(fileName + '/concept_map.pkl', 'wb'))# 在大图上的index\n pickle.dump(relation_map, open(fileName + '/relation_map.pkl', 'wb'))\n pickle.dump(unique_nodes_mapping, open(fileName + '/unique_nodes_mapping.pkl', 'wb'))# 大图index到小图index\n pickle.dump(word_index, open(fileName + '/word_index.pkl', 'wb'))\n # pickle.dump(concept_graphs, open(fileName + '/concept_graphs.pkl', 'wb'))#每个concept的独立子图,就是多了成索引\n\n np.ndarray.dump(triplets, open(fileName + '/triplets.np', 'wb')) #过滤后的concept的子图index\n print ('Completed.')\n\n\n","sub_path":"extention/Graph_Embedding/preprocess_graph.py","file_name":"preprocess_graph.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"443615776","text":"from mutagen.mp3 import MP3\nfrom mutagen.easyid3 import EasyID3\nfrom playlist import Playlist, load\nfrom song import Song\nimport os\nimport glob\nimport multiprocessing\nimport sys\n\n\nclass MusicCrawler:\n def __init__(self, pathname):\n self.pathname = pathname\n self.mp3s = self.load(pathname)\n\n def load(self, pathname):\n os.chdir(pathname)\n mp3s = []\n for filename in glob.glob(\"*.mp3\"):\n path = (pathname+\"/\"+filename).replace(\" \", \"\\ \")\n mp3s.append((MP3(filename, ID3=EasyID3), path))\n return mp3s\n\n def generate_playlist(self):\n playlist = Playlist(\"New Playlist\")\n for mp3 in self.mp3s:\n try:\n title = mp3[0].tags[\"title\"][0]\n except:\n title = \"Unknown Title\"\n try:\n artist = mp3[0].tags[\"artist\"][0]\n except:\n artist = \"Unknown Artist\"\n try:\n album = mp3[0].tags[\"album\"][0]\n except:\n album = \"Unknown Album\"\n bitrate = mp3[0].info.bitrate\n length = round(mp3[0].info.length)\n playlist.add_song(Song(title, artist, album, 0, length, bitrate, mp3[1]))\n return playlist\n\n\n","sub_path":"week2/Music Library/music_crawler.py","file_name":"music_crawler.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"8705673","text":"# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance\n# with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"LICENSE.txt\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and\n# limitations under the License.\nimport configparser\nimport pytest\nfrom assertpy import assert_that\n\nfrom pcluster.cluster_model import ClusterModel, infer_cluster_model\n\n\n@pytest.mark.parametrize(\n \"config_parser_cluster_dict, cfn_stack, expected_cluster_model\",\n [\n # queue_settings present in config\n ({\"queue_settings\": \"queue1, queue2\"}, None, ClusterModel.HIT),\n # no queue_settings in config and no cfn params\n ({}, None, ClusterModel.SIT),\n # no queue_settings in config and slurm scheduler in cfn params\n (\n {},\n {\n \"Parameters\": [{\"ParameterKey\": \"Scheduler\", \"ParameterValue\": \"slurm\"}],\n \"Tags\": [{\"Key\": \"Version\", \"Value\": \"2.10.0\"}],\n },\n ClusterModel.HIT,\n ),\n (\n {},\n {\n \"Parameters\": [{\"ParameterKey\": \"Scheduler\", \"ParameterValue\": \"slurm\"}],\n \"Tags\": [{\"Key\": \"Version\", \"Value\": \"2.9.0\"}],\n },\n ClusterModel.HIT,\n ),\n # slurm scheduler in cfn params but SIT version\n (\n {},\n {\n \"Parameters\": [{\"ParameterKey\": \"Scheduler\", \"ParameterValue\": \"slurm\"}],\n \"Tags\": [{\"Key\": \"Version\", \"Value\": \"2.8.91\"}],\n },\n ClusterModel.SIT,\n ),\n # no queue_settings in config and no slurm scheduler in cfn params\n (\n {},\n {\n \"Parameters\": [{\"ParameterKey\": \"Scheduler\", \"ParameterValue\": \"sge\"}],\n \"Tags\": [{\"Key\": \"Version\", \"Value\": \"2.8.91\"}],\n },\n ClusterModel.SIT,\n ),\n (\n {},\n {\n \"Parameters\": [{\"ParameterKey\": \"Scheduler\", \"ParameterValue\": \"torque\"}],\n \"Tags\": [{\"Key\": \"Version\", \"Value\": \"2.10.0\"}],\n },\n ClusterModel.SIT,\n ),\n ],\n)\ndef test_cluster_model(config_parser_cluster_dict, cfn_stack, expected_cluster_model):\n config_parser_dict = {\"cluster default\": config_parser_cluster_dict}\n\n config_parser = configparser.ConfigParser()\n config_parser.read_dict(config_parser_dict)\n\n cluster_model = infer_cluster_model(config_parser, \"default\", cfn_stack)\n assert_that(cluster_model).is_equal_to(expected_cluster_model)\n","sub_path":"cli/tests/pcluster/config/test_cluster_model.py","file_name":"test_cluster_model.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"76556635","text":"from sklearn.svm import SVC\nfrom sklearn.model_selection import KFold\nfrom sklearn.utils import resample\nimport numpy\n\ndef classify(data,labels):\n\tX = data\n\ty = labels\n\tX,y = resample(X,y,random_state=0)\n\n\t#classify\n\tprint('Using SVM, K Fold cross validation')\n\tclf = SVC(gamma=0.001)\n\tkf = KFold(n_splits=7,shuffle=True)\n\tscores=[]\n\tcount=0\n\tfor train,test in kf.split(X):\n\t\tcount+=1\n\t\ttrainX = X[train]\n\t\ttrainY = y[train]\n\t\ttestX = X[test]\n\t\ttestY = y[test]\n\t\tclf.fit(trainX,trainY)\n\t\tscore = clf.score(testX,testY)\n\t\tprint('fold ',count,' : ',score)\n\t\tscores.append(score)\n\tscores = numpy.array(scores)\n\treturn scores.mean(),clf","sub_path":"Classifier/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"430669968","text":"#!/usr/bin/python\n# -*- coding: utf_8 -*-\n\n# By Dennis Drescher (sparkycbr at gmail dot com)\n\n###############################################################################\n######################### Description/Documentation ###########################\n###############################################################################\n\n# This class will handle project data processing operations.\n\n###############################################################################\n################################ Component Class ##############################\n###############################################################################\n# Firstly, import all the standard Python modules we need for\n# this process\n\nimport codecs, os, shutil, subprocess, zipfile, StringIO\nfrom configobj import ConfigObj\n\n# Load the local classes\nfrom rapuma.core.tools import Tools\nfrom rapuma.core.user_config import UserConfig\nfrom rapuma.core.proj_local import ProjLocal\nfrom rapuma.core.proj_log import ProjLog\n#from rapuma.core.paratext import Paratext\nfrom rapuma.project.proj_config import Config\n\n\nclass ProjProcess (object) :\n\n def __init__(self, pid, gid = None, projectConfig = None) :\n '''Intitate the whole class and create the object.'''\n\n self.pid = pid\n self.tools = Tools()\n self.user = UserConfig()\n self.userConfig = self.user.userConfig\n if projectConfig :\n self.projectConfig = projectConfig\n else :\n self.proj_config = Config(self.pid)\n self.proj_config.getProjectConfig()\n self.projectConfig = self.proj_config.projectConfig\n self.local = ProjLocal(pid, gid, self.projectConfig)\n self.log = ProjLog(pid)\n\n # Log messages for this module\n self.errorCodes = {\n '0000' : ['MSG', 'Placeholder message'],\n 'XPRT-000' : ['MSG', 'Messages for export issues (probably only in project.py)'],\n 'XPRT-005' : ['MSG', 'Unassigned error message ID.'],\n 'XPRT-010' : ['ERR', 'Export file name could not be formed with available configuration information.'],\n 'XPRT-020' : ['ERR', 'Unable to export: [<<1>>].'],\n 'XPRT-030' : ['MSG', 'Files exported to [<<1>>].'],\n 'XPRT-040' : ['MSG', 'Beginning export, please wait...'],\n 'XPRT-050' : ['MSG', 'Unassigned error message ID.'],\n\n '1210' : ['MSG', 'Processes completed successfully on: [<<1>>] by [<<2>>]'],\n '1220' : ['ERR', 'Processes for [<<1>>] failed. Script [<<2>>] returned this error: [<<3>>]'],\n '1240' : ['MSG', 'Component group preprocessing [<<1>>] for group [<<2>>].'],\n '1260' : ['ERR', 'Installed the default component preprocessing script. Editing will be required for it to work with your project.'],\n '1265' : ['LOG', 'Component preprocessing script is already installed.'],\n\n }\n\n\n###############################################################################\n############################### Export Functions ##############################\n###############################################################################\n####################### Error Code Block Series = 0200 ########################\n###############################################################################\n\n# FIXME: This needs to be rewritten\n\n #def export (self, cType, cName, path = None, script = None, bundle = False, force = False) :\n #'''Facilitate the exporting of project text. It is assumed that the\n #text is clean and ready to go and if any extraneous publishing info\n #has been injected into the text, it will be removed by an appropreate\n #post-process that can be applied by this function. No validation\n #will be initiated by this function.'''\n \n ## FIXME - Todo: add post processing script feature\n\n ## Probably need to create the component object now\n #self.createComponent(cName)\n\n ## Figure out target path\n #if path :\n #path = self.tools.resolvePath(path)\n #else :\n #parentFolder = os.path.dirname(self.local.projHome)\n #path = os.path.join(parentFolder, 'Export')\n\n ## Make target folder if needed\n #if not os.path.isdir(path) :\n #os.makedirs(path)\n\n ## Start a list for one or more files we will process\n #fList = []\n\n ## Will need the stylesheet for copy\n #projSty = self.projectConfig['Managers'][cType + '_Style']['mainStyleFile']\n #projSty = os.path.join(self.local.projStyleFolder, projSty)\n ## Process as list of components\n\n #self.log.writeToLog('XPRT-040')\n #for cid in self.components[cName].getSubcomponentList(cName) :\n #cidCName = self.components[cName].getRapumaCName(cid)\n #ptName = PT_Tools(self).formPTName(cName, cid)\n ## Test, no name = no success\n #if not ptName :\n #self.log.writeToLog('XPRT-010')\n #self.tools.dieNow()\n\n #target = os.path.join(path, ptName)\n #source = os.path.join(self.local.projComponentFolder, cidCName, cid + '.' + cType)\n ## If shutil.copy() spits anything back its bad news\n #if shutil.copy(source, target) :\n #self.log.writeToLog('XPRT-020', [self.tools.fName(target)])\n #else :\n #fList.append(target)\n\n ## Start the main process here\n #if bundle :\n #archFile = os.path.join(path, cName + '_' + self.tools.ymd() + '.zip')\n ## Hopefully, this is a one time operation but if force is not True,\n ## we will expand the file name so nothing is lost.\n #if not force :\n #if os.path.isfile(archFile) :\n #archFile = os.path.join(path, cName + '_' + self.tools.fullFileTimeStamp() + '.zip')\n\n #myzip = zipfile.ZipFile(archFile, 'w', zipfile.ZIP_DEFLATED)\n #for f in fList :\n ## Create a string object from the contents of the file\n #strObj = StringIO.StringIO()\n #for l in open(f, \"rb\") :\n #strObj.write(l)\n ## Write out string object to zip\n #myzip.writestr(self.tools.fName(f), strObj.getvalue())\n #strObj.close()\n ## Close out the zip and report\n #myzip.close()\n ## Clean out the folder\n #for f in fList :\n #os.remove(f)\n #self.log.writeToLog('XPRT-030', [self.tools.fName(archFile)])\n #else :\n #self.log.writeToLog('XPRT-030', [path])\n\n #return True\n\n\n###############################################################################\n########################## Text Processing Functions ##########################\n###############################################################################\n######################## Error Code Block Series = 1200 #######################\n###############################################################################\n\n def turnOnOffPreprocess (self, gid, onOff) :\n '''Turn on or off preprocessing on incoming component text.'''\n\n self.projectConfig['Groups'][gid]['usePreprocessScript'] = onOff\n self.tools.writeConfFile(self.projectConfig)\n self.log.writeToLog(self.errorCodes['1240'], [str(onOff), gid])\n\n\n def checkForPreprocessScript (self, gid) :\n '''Check to see if a preprocess script is installed. If not, install the\n default script and give a warning that the script is not complete.'''\n\n # First make sure the Scripts folder is there\n if not os.path.isdir(self.local.projScriptFolder) :\n os.makedirs(self.local.projScriptFolder)\n\n # Check and copy if needed\n if not os.path.isfile(self.local.groupPreprocessFile) :\n shutil.copy(self.local.rpmPreprocessFile, self.local.groupPreprocessFile)\n self.tools.makeExecutable(self.local.groupPreprocessFile)\n self.log.writeToLog(self.errorCodes['1260'])\n else :\n self.log.writeToLog(self.errorCodes['1265'])\n\n\n def runProcessScript (self, target, scriptFile) :\n '''Run a text processing script on a component. This assumes the \n component and the script are valid and the component lock is turned \n off. If not, you cannot expect any good to come of this.'''\n\n # subprocess will fail if permissions are not set on the\n # script we want to run. The correct permission should have\n # been set when we did the installation.\n err = subprocess.call([scriptFile, target])\n if err == 0 :\n self.log.writeToLog(self.errorCodes['1210'], [self.tools.fName(target), self.tools.fName(scriptFile)])\n else :\n self.log.writeToLog(self.errorCodes['1220'], [self.tools.fName(target), self.tools.fName(scriptFile), str(err)])\n return False\n\n return True\n\n\n def scriptInstall (self, source, target) :\n '''Install a script. A script can be a collection of items in\n a zip file or a single .py script file.'''\n\n scriptTargetFolder, fileName = os.path.split(target)\n if self.tools.isExecutable(source) :\n shutil.copy(source, target)\n self.tools.makeExecutable(target)\n elif self.tools.fName(source).split('.')[1].lower() == 'zip' :\n myZip = zipfile.ZipFile(source, 'r')\n for f in myZip.namelist() :\n data = myZip.read(f, source)\n # Pretty sure zip represents directory separator char as \"/\" regardless of OS\n myPath = os.path.join(scriptTargetFolder, f.split(\"/\")[-1])\n try :\n myFile = open(myPath, \"wb\")\n myFile.write(data)\n myFile.close()\n except :\n pass\n myZip.close()\n return True\n else :\n self.tools.dieNow('Script is an unrecognized type: ' + self.tools.fName(source) + ' Cannot continue with installation.')\n\n\n def installPostProcess (self, cType, script, force = None) :\n '''Install a post process script into the main components processing\n folder for a specified component type. This script will be run on \n every file of that type that is imported into the project. Some\n projects will have their own specially developed post process\n script. Use the \"script\" var to specify a process (which should be\n bundled in a system compatable way). If \"script\" is not specified\n we will copy in a default script that the user can modify. This is\n currently limited to Python scripts only which do in-place processes\n on the target files. The script needs to have the same name as the\n zip file it is bundled in, except the extention is .py instead of\n the bundle .zip extention.'''\n\n # Define some internal vars\n Ctype = cType.capitalize()\n oldScript = ''\n scriptName = os.path.split(script)[1]\n scriptSourceFolder = os.path.split(script)[0]\n scriptTarget = os.path.join(self.local.projScriptFolder, self.tools.fName(script).split('.')[0] + '.py')\n if scriptName in self.projectConfig['CompTypes'][Ctype]['postprocessScripts'] :\n oldScript = scriptName\n\n # First check for prexsisting script record\n if not force :\n if oldScript :\n self.log.writeToLog('POST-080', [oldScript])\n return False\n\n # In case this is a new project we may need to install a component\n # type and make a process (components) folder\n if not self.components[cType] :\n self.tools.addComponentType(self.projectConfig, self.local, cType)\n\n # Make the target folder if needed\n if not os.path.isdir(self.local.projScriptFolder) :\n os.makedirs(self.local.projScriptFolder)\n\n # First check to see if there already is a script file, return if there is\n if os.path.isfile(scriptTarget) and not force :\n self.log.writeToLog('POST-082', [self.tools.fName(scriptTarget)])\n return False\n\n # No script found, we can proceed\n if not os.path.isfile(scriptTarget) :\n self.scriptInstall(script, scriptTarget)\n if not os.path.isfile(scriptTarget) :\n self.tools.dieNow('Failed to install script!: ' + self.tools.fName(scriptTarget))\n self.log.writeToLog('POST-110', [self.tools.fName(scriptTarget)])\n elif force :\n self.scriptInstall(script, scriptTarget)\n if not os.path.isfile(scriptTarget) :\n self.tools.dieNow('Failed to install script!: ' + self.tools.fName(scriptTarget))\n self.log.writeToLog('POST-115', [self.tools.fName(scriptTarget)])\n\n # Record the script with the cType post process scripts list\n scriptList = self.projectConfig['CompTypes'][Ctype]['postprocessScripts']\n if self.tools.fName(scriptTarget) not in scriptList :\n self.projectConfig['CompTypes'][Ctype]['postprocessScripts'] = self.tools.addToList(scriptList, self.tools.fName(scriptTarget))\n self.tools.writeConfFile(self.projectConfig)\n\n return True\n\n\n def removePostProcess (self, cType) :\n '''Remove (actually disconnect) a preprocess script from a\n\n component type. This will not actually remove the script. That\n would need to be done manually. Rather, this will remove the\n script name entry from the component type so the process cannot\n be accessed for this specific component type.'''\n\n Ctype = cType.capitalize()\n # Get old setting\n old = self.projectConfig['CompTypes'][Ctype]['postprocessScripts']\n # Reset the field to ''\n if old != '' :\n self.projectConfig['CompTypes'][Ctype]['postprocessScripts'] = ''\n self.tools.writeConfFile(self.projectConfig)\n self.log.writeToLog('POST-130', [old,Ctype])\n\n else :\n self.log.writeToLog('POST-135', [cType.capitalize()])\n\n return True\n\n\n\n\n","sub_path":"lib/rapuma/core/proj_process.py","file_name":"proj_process.py","file_ext":"py","file_size_in_byte":14474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"466796813","text":"from UI.ui import *\n\n\nclass Dialog(QtWidgets.QDialog):\n def __init__(self, parent=None):\n super().__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n\n # set the window always on top\n self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint |\n QtCore.Qt.X11BypassWindowManagerHint)\n\n # fix the window size and title\n self.setFixedSize(self.width(), self.height())\n self.setWindowTitle('Cst Plot Data Processor')\n\n # add system tray\n self.trayIcon = QtWidgets.QSystemTrayIcon(self)\n self.trayIcon.setIcon(QtGui.QIcon(r\".\\UI\\icon.png\"))\n self.trayIcon.activated.connect(self.icon_activated)\n\n # add open file or folder\n self.ui.buttonOpenFile.clicked.connect(self.open_file)\n\n # add behavior after tray icon is double clicked\n def icon_activated(self, reason):\n if reason == QtWidgets.QSystemTrayIcon.DoubleClick:\n self.showNormal()\n self.activateWindow()\n self.trayIcon.hide()\n\n def changeEvent(self, e):\n # hide tray icon when close the window\n if e.type() == QtCore.QEvent.Close:\n self.trayIcon.hide()\n # minimize as icon after click minimize button\n elif e.type() == QtCore.QEvent.WindowStateChange and self.isMinimized():\n self.trayIcon.show()\n self.hide()\n\n def open_file(self):\n dialog = QtWidgets.QFileDialog(self, \"Open File\",\n QtCore.QDir.homePath(),\n \"Text File (*.txt)\")\n dialog.setFileMode(QtWidgets.QFileDialog.ExistingFile)\n if dialog.exec():\n self.ui.lineFilePath.setText(dialog.selectedFiles()[0])\n","sub_path":"UI/dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"229686996","text":"import random\nfrom copy import deepcopy\n\nlower = 999\nupper = 10000\n\noct_list = []\nhep_list = []\nhex_list = []\npen_list = []\nsq_list = []\ntri_list = []\n\noct = 0\ni = 1\nwhile oct < 10000:\n oct = i * (3 * i - 2)\n if lower < oct < upper:\n oct_list.append(str(oct))\n i += 1\n\nhep = 0\ni = 1\nwhile hep < 10000:\n hep = int(i * (5 * i - 3) / 2)\n if lower < hep < upper:\n hep_list.append(str(hep))\n i += 1\n\nhex = 0\ni = 1\nwhile hex < 10000:\n hex = i * (2 * i - 1)\n if lower < hex < upper:\n hex_list.append(str(hex))\n i += 1\n\npen = 0\ni = 1\nwhile pen < 10000:\n pen = int(i * (3 * i - 1) / 2)\n if lower < pen < upper:\n pen_list.append(str(pen))\n i += 1\n\nsq = 0\ni = 1\nwhile sq < 10000:\n sq = i ** 2\n if lower < sq < upper:\n sq_list.append(str(sq))\n i += 1\n\ntri = 0\ni = 1\nwhile tri < 10000:\n tri = int(i * (i + 1) / 2)\n if lower < tri < upper:\n tri_list.append(str(tri))\n i += 1\n\nlst_of_lists = [hep_list, hex_list, pen_list, sq_list, tri_list]\n\nfor x in range(1, 60):\n first_item = random.choice(oct_list)\n remaining_list = deepcopy(lst_of_lists)\n for i in range(1, 500):\n second_list = random.choice(remaining_list)\n second_item = random.choice(second_list)\n if first_item[-2:] == second_item[:2]:\n remaining_list.remove(second_list)\n for j in range(1, 500):\n third_list = random.choice(remaining_list)\n third_item = random.choice(third_list)\n if second_item[-2:] == third_item[:2]:\n remaining_list.remove(third_list)\n for k in range(1, 500):\n fourth_list = random.choice(remaining_list)\n fourth_item = random.choice(fourth_list)\n if third_item[-2:] == fourth_item[:2]:\n remaining_list.remove(fourth_list)\n for l in range(1, 500):\n fifth_list = random.choice(remaining_list)\n fifth_item = random.choice(fifth_list)\n if fourth_item[-2:] == fifth_item[:2]:\n remaining_list.remove(fifth_list)\n for m in range(1, 250):\n sixth_list = random.choice(remaining_list)\n sixth_item = random.choice(sixth_list)\n if fifth_item[-2:] == sixth_item[:2] and sixth_item[-2:] == first_item[:2]:\n print(first_item, second_item, third_item, fourth_item, fifth_item,\n sixth_item)\n print(int(first_item) + int(second_item) + int(third_item) + int(\n fourth_item) + int(fifth_item) + int(sixth_item))\n break\n else:\n remaining_list.append(fifth_list)\n continue\n break\n else:\n remaining_list.append(fourth_list)\n continue\n break\n else:\n remaining_list.append(third_list)\n continue\n break\n else:\n remaining_list.append(second_list)\n continue\n break\n else:\n continue\n break\n","sub_path":"Problem061.py","file_name":"Problem061.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"222086871","text":"#---------------------------------------------------------------\n# SPACE INVADERS\n# Andru Onciul\n# 10/03/2018\n# Use the arrows to move and\n# Press Space to fire\n#---------------------------------------------------------------\nimport turtle as turtle\nimport math\nimport random\nimport winsound\nimport time\n\n\n#Variables\nplayerspeed = 15\ninvaderspeed = 8\ninvaderdownspeed = 40\nbulletspeed = 25\nmaxscore= 100\nmaxlives= 3\nscore= 0\n\n#Ask to begin or not\nwanttoplay=True\nhasntlost=True\ningame=False\n\n#Set up the screen\nwn = turtle.Screen()\nwn.bgcolor(\"blue\")\nwn.title(\"Space Invaders\")\nwn.bgpic(\"background1.gif\")\n#wn.tracer(3)\n\n#Register shapes for turtles\nturtle.register_shape(\"invader (1).gif\")\nturtle.register_shape(\"player.gif\")\nturtle.register_shape(\"heart_noback.gif\")\nturtle.register_shape(\"minivaisseau.gif\")\nturtle.register_shape(\"insecte1.gif\")\n\n#Draw border\nmypen= turtle.Turtle()\nmypen.penup()\nmypen.color(\"white\")\nmypen.speed(0)\nmypen.setposition(-300,-300)\nmypen.pendown()\nmypen.pensize(3)\nfor side in range(4):\n mypen.forward(600)\n mypen.left(90)\n\nmypen.hideturtle()\n\n#Draw title\nscore_pen = turtle.Turtle()\nscore_pen.speed(0)\nscore_pen.color(\"white\")\nscore_pen.penup()\nscore_pen.setposition(0,310)\nscorestr=\"SPACE INVADERS\"\nscore_pen.write(scorestr,False,align=\"center\",font=(\"Arial black\",14,\"bold\"))\nscore_pen.hideturtle()\n\n#Functions\nimport msvcrt as m\ndef wait():\n m.getch()\n \ndef move_left():\n if ingame==True:\n x = player.xcor()\n x-= playerspeed\n if x < -280:\n x=280\n player.setx(x)\n \ndef move_right():\n if ingame==True:\n x = player.xcor()\n x+= playerspeed\n if x > 280:\n x=280\n player.setx(x)\n \ndef closewn():\n global wn\n wn.bye()\n \ndef fire():\n global bulletstate\n for i in range(len(bullets)):\n if bulletstate[i]== \"ready\" and ingame==True:\n laser()\n bulletstate[i]= \"fire\"\n x= player.xcor()\n y= player.ycor() + 10\n bullets[i].setposition(x,y)\n bullets[i].showturtle()\n break\n\ndef inCollision(a,b):\n d= math.sqrt( math.pow(a.xcor()-b.xcor(),2) + math.pow(a.ycor()-b.ycor(),2))\n if d< 15:\n return True\n return False\n \ndef laser():\n winsound.PlaySound(\"laser.wav\", winsound.SND_ASYNC)\n \ndef explosion():\n winsound.PlaySound(\"explosion.wav\", winsound.SND_ASYNC)\n\ndef play():\n global wanttoplay\n global hasntlost\n wanttoplay=True\n hasntlost=True\n \ndef replay():\n global wanttoplay\n global hasntlost\n wanttoplay=True\n hasntlost=True\n \n#Set keyboard bindings\nturtle.listen()\nturtle.onkey(move_left,\"Left\")\nturtle.onkey(move_right,\"Right\")\nturtle.onkey(fire,\"space\")\nturtle.onkey(closewn,\"Escape\")\nturtle.onkey(replay,\"r\")\nturtle.onkey(play,\"p\")\n\n#Create score pen\nscore_pen = turtle.Turtle()\nscore_pen.speed(0)\nscore_pen.color(\"white\")\nscore_pen.penup()\n\n#Create player\nplayer = turtle.Turtle()\nplayer.color(\"blue\")\nplayer.shape(\"minivaisseau.gif\")\nplayer.penup()\nplayer.speed(0) #vitesse d'animation (0 c le max)\nplayer.setposition(0,-250)\nplayer.setheading(90)\nplayer.hideturtle()\n\n#Create player bullets\nbullets=[]\nbulletstate=[]\nfor i in range(5):\n bullet=turtle.Turtle()\n bullet.color(\"yellow\")\n bullet.shape(\"triangle\")\n bullet.penup()\n bullet.speed(0)\n bullet.setheading(90)\n bullet.shapesize(0.5,0.5)\n bullet.setposition(0,310) #hors de la carte pour pas quelle detruise un vaisseau sans que je tire\n bullet.hideturtle()\n bullets.append(bullet)\n bulletstate.append(\"ready\")\n #Define bullet state\n #ready - ready to fire\n #fire - th bullet is firing\n\n\n#Create multiple ennemies\nmaxinvaders= 6\ninvaders=[]\nfor count in range(maxinvaders): \n invaders.append(turtle.Turtle())\n invaders[count].hideturtle()\nfor invader in invaders:\n invader.color(\"red\")\n invader.shape(\"insecte1.gif\")\n invader.penup()\n invader.speed(0)\n x= random.randint(-200,200)\n y= random.randint(100,250)\n invader.setposition(x,y)\n \n#Create live turtle\nhearts=[]\nfor i in range(maxlives):\n heart=turtle.Turtle()\n heart.hideturtle()\n heart.speed(3)\n heart.shape(\"heart_noback.gif\")\n heart.penup()\n heart.setposition(200 + i*35,270)\n hearts.append(heart)\n\n\n#----------------------------------------------------------------------------------------------------\n#Now the main loop that will play the game\n#----------------------------------------------------------------------------------------------------\n\n\n\n#MAIN LOOP\nwhile True:\n #Write Begining text\n score_pen.clear()\n score_pen.setposition(0,0)\n score_pen.write(\"ARE YOU READY ?\",False,align=\"center\",font=(\"Arial black\",20,\"bold\"))\n score_pen.setposition(0,-30)\n score_pen.write(\"Press P to launch the game\",False,align=\"center\",font=(\"Arial\",15,\"normal\"))\n score_pen.setposition(0,-50)\n score_pen.write(\"Use the arrows to move and fire with Space\",False,align=\"center\",font=(\"Arial\",15,\"normal\"))\n score_pen.hideturtle()\n\n #Wait for player to press P\n wanttoplay=False\n while True:\n t=turtle.Turtle()\n y= t.ycor()+1 #il se passe qlqch en fond/background pour que ce soit pas un bouble instantannée et qu'il ait le temps de capter\n t.sety(y) #qd utilisateur clique sur p\n if wanttoplay == True:\n break\n \n #Make all turtles visibles for begining of the match\n player.showturtle()\n for invader in invaders:\n invader.showturtle()\n x= random.randint(-200,200)\n y= random.randint(100,250)\n invader.setposition(x,y)\n \n #Draw score\n score_pen.clear()\n score_pen.setposition(-290,270)\n scorestr=\"Score: %s\" %score\n score_pen.write(scorestr,False,align=\"left\",font=(\"Arial\",14,\"normal\"))\n score_pen.hideturtle()\n\n #Drawn number of lifes\n for heart in hearts:\n heart.showturtle()\n\n #The Game can now begin\n ingame = True\n print(\"begin game\")\n score=0\n lives=maxlives\n \n #Now lets make everything move\n while score < maxscore and hasntlost==True:\n for i in range(len(bullets)):\n for invader in invaders:\n #move invaders\n x= invader.xcor()\n x+= invaderspeed\n invader.setx(x)\n #move invaders down\n if invader.xcor() > 280:\n #Move all ennemies down\n for inv in invaders:\n y= inv.ycor()\n y-= invaderdownspeed\n inv.sety(y)\n #Change all ennemies direction\n invaderspeed *= -1\n\n if invader.xcor() < -280:\n for inv in invaders: \n y= inv.ycor()\n y-= invaderdownspeed\n inv.sety(y)\n invaderspeed *= -1\n \n #Check collision bullet/invader\n if inCollision(bullets[i],invader):\n explosion()\n bullets[i].hideturtle()\n bulletstate[i]= \"ready\"\n bullets[i].setposition(0,-400)\n #reset. ennemis\n x= random.randint(-200,200)\n y= random.randint(100,250)\n invader.setposition(x,y)\n #Change score\n score+=100\n score_pen.setposition(-290,270)\n scorestr=\"Score: %s\" %score\n score_pen.clear()\n score_pen.write(scorestr,False,align=\"left\",font=(\"Arial\",14,\"normal\"))\n score_pen.hideturtle()\n #Lose a life\n if inCollision(player,invader):\n explosion()\n lives -=1\n #Reset ennemi\n x= random.randint(-200,200)\n y= random.randint(100,250)\n invader.setposition(x,y)\n #Write remaining lives\n hearts[lives].hideturtle()\n if lives == 0:\n hasntlost=False\n break\n #Respawn ennemies if they go too low\n if invader.ycor() < -290:\n x= random.randint(-200,200)\n y= random.randint(100,250)\n invader.setposition(x,y)\n \n #Move the bullet\n if bulletstate[i] == \"fire\":\n y= bullets[i].ycor()\n y+= bulletspeed\n bullets[i].sety(y)\n #Check bullet in borders\n if bullets[i].ycor() > 280:\n bullets[i].hideturtle()\n bulletstate[i]=\"ready\"\n \n ingame=False\n \n #Hide the turtles\n player.hideturtle()\n for invader in invaders:\n invader.hideturtle()\n bullet.hideturtle()\n \n #Draw the appropriate finale message\n if score == maxscore:\n score_pen.setposition(0,0)\n score_pen.write(\"YOU WON\",False,align=\"center\",font=(\"Arial black\",20,\"bold\"))\n score_pen.setposition(0,-30)\n score_pen.write(\"Press Escape to exit the game or Press R to retry\",False,align=\"center\",font=(\"Arial\",14,\"normal\"))\n score_pen.hideturtle()\n elif lives == 0:\n score_pen.setposition(0,0)\n score_pen.write(\"GAME OVER\",False,align=\"center\",font=(\"Arial black\",20,\"bold\"))\n score_pen.setposition(0,-30)\n score_pen.write(\"Press Escape to exit the game or Press R to retry\",False,align=\"center\",font=(\"Arial\",14,\"normal\"))\n score_pen.hideturtle()\n \n #Wait for player answer\n wanttoplay= False\n while True:\n bullet.sety(310)\n y= bullet.ycor()+1 #il se passe qlqch en fond pour que ce soit pas un bouble instantannée et qu'il ait le temps de capter\n bullet.sety(y) #qd utilisateur clique sur p\n if wanttoplay == True:\n break\n \n \nwn.mainloop()\n","sub_path":"Info JEU/Space Invaders/Space-invaders.py","file_name":"Space-invaders.py","file_ext":"py","file_size_in_byte":10114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"514441801","text":"import pyautogui\r\n\r\nprint('Press ctrl+c to quit')\r\ntry:\r\n while True:\r\n #Get and print mouse coordinates\r\n x,y = pyautogui.position()\r\n positionStr='X: ' + str(x).rjust(4) +' Y: '+str(y).rjust(4)\r\n print(positionStr,end='')\r\n print('\\b'*len(positionStr), end='', flush=True)\r\nexcept KeyboardInterrupt:\r\n print('\\n Done')\r\n","sub_path":"mousePosition.py","file_name":"mousePosition.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"1788470","text":"#!/usr/bin/python\n# -*- coding: iso-8859-15 -*-\nimport argparse\nimport csv\nimport datetime\nimport os\nimport os.path\nimport OpenSSL\nimport ssl, socket\n\n__author__ = 'Benjamin Raum'\n\nparser = argparse.ArgumentParser(description='Script to check SSL Certificates')\nparser.add_argument('-i','--input', help='Input file name',required=True)\nparser.add_argument('-o','--output',help='Output file name', required=True)\nargs = parser.parse_args()\ninfile = args.input\noutfile = args.output\n\ndef getcertinfo(url):\n global organisation\n url = url.strip()\n cert=ssl.get_server_certificate((url, 443))\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)\n exp_date = datetime.datetime.strptime(x509.get_notAfter(),'%Y%m%d%H%M%SZ')\n subject = dict(x509.get_subject().get_components())\n organisation = subject.get('O')\n return(url, str(exp_date), organisation)\n\ndef check_files():\n if os.path.isfile(infile) and os.access(infile, os.R_OK):\n pass\n else:\n print(\"Input-File is either missing or not readable\")\n if os.path.isfile(outfile):\n print(\"Output File is already there!\")\n\ncheck_files()\n\nf = open(outfile, 'w')\nwith open(infile) as file:\n readCSV = csv.reader(file, delimiter=',')\n for line in readCSV:\n print(line)\n url = line[0]\n if url.startswith(\"#\"):\n continue\n warn = line[1]\n crit = line[2]\n try:\n comment = line.split(\"#\")[1]\n comment = comment.strip()\n except IndexError:\n url = line\n comment = \"\"\n url, expdate, orga = certinfos = getcertinfo(url)\n i = str(url + ',' + expdate + ',' + orga + ',' + warn + ',' + crit + ',' + comment + '\\n')\n f.write(i)\n\nf.close()\n\n\n'''\nsolution for locale pem certificates:\n>>> from datetime import datetime\n>>> from OpenSSL import crypto as c\n>>> cert = c.load_certificate(c.FILETYPE_PEM, file('path/to/mongodb.Abnahme.pem').read())\n>>> datetime.strptime(cert.get_notAfter(),\"%Y%m%d%H%M%SZ\")\ndatetime.datetime(2019, 10, 14, 12, 46, 35)\n'''\n","sub_path":"certcheck.py","file_name":"certcheck.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"357538897","text":"from abc import ABCMeta, abstractmethod\n\nimport pandas as pd\n\n\nclass Portfolio(object):\n \"\"\"An abstract base class representing a portfolio of\n positions (including both instruments and cash), determined\n on the basis of a set of signals provided by a Strategy.\"\"\"\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def generate_positions(self):\n \"\"\"Provides the logic to determine how the portfolio\n positions are allocated on the basis of forecasting\n signals and available cash.\"\"\"\n raise NotImplementedError(\"Should implement generate_positions()!\")\n\n @abstractmethod\n def backtest_portfolio(self):\n \"\"\"Provides the logic to generate the trading orders\n and subsequent equity curve (i.e. growth of total equity),\n as a sum of holdings and cash, and the bar-period returns\n associated with this curve based on the 'positions' DataFrame.\n\n Produces a portfolio object that can be examined by\n other classes/functions.\"\"\"\n raise NotImplementedError(\"Should implement backtest_portfolio()!\")\n\n\nclass MarketOnOpenPortfolio(Portfolio):\n \"\"\"Inherits Portfolio to create a system that purchases 100 units of\n a particular symbol upon a long/short signal, assuming the market\n open price of a bar.\n\n In addition, there are zero transaction costs and cash can be immediately\n borrowed for shorting (no margin posting or interest requirements).\n\n Requires:\n symbol - A stock symbol which forms the basis of the portfolio.\n bars - A DataFrame of bars for a symbol set.\n signals - A pandas DataFrame of signals (1, 0, -1) for each symbol.\n initial_capital - The amount in cash at the start of the portfolio.\"\"\"\n\n def __init__(self, symbol, bars, signals, initial_capital=100000.0):\n self.symbol = symbol\n self.bars = bars\n self.signals = signals\n self.initial_capital = float(initial_capital)\n self.positions = self.generate_positions()\n\n def generate_positions(self):\n \"\"\"Creates a 'positions' DataFrame that simply longs or shorts\n 100 of the particular symbol based on the forecast signals of\n {1, 0, -1} from the signals DataFrame.\"\"\"\n positions = pd.DataFrame(index=self.signals.index).fillna(0.0)\n positions[self.symbol] = 100*self.signals['signal']\n return positions\n\n def backtest_portfolio(self):\n \"\"\"Constructs a portfolio from the positions DataFrame by\n assuming the ability to trade at the precise market open price\n of each bar (an unrealistic assumption!).\n\n Calculates the total of cash and the holdings (market price of\n each position per bar), in order to generate an equity curve\n ('total') and a set of bar-based returns ('returns').\n\n Returns the portfolio object to be used elsewhere.\"\"\"\n\n # Construct the portfolio DataFrame to use the same index\n # as 'positions' and with a set of 'trading orders' in the\n # 'pos_diff' object, assuming market open prices.\n portfolio = self.positions*self.bars['Open']\n pos_diff = self.positions.diff()\n\n # Create the 'holdings' and 'cash' series by running through\n # the trades and adding/subtracting the relevant quantity from\n # each column\n portfolio['holdings'] = (self.positions*self.bars['Open']).sum(axis=1)\n portfolio['cash'] = self.initial_capital - (pos_diff*self.bars['Open']).sum(axis=1).cumsum()\n\n # Finalise the total and bar-based returns based on the 'cash'\n # and 'holdings' figures for the portfolio\n portfolio['total'] = portfolio['cash'] + portfolio['holdings']\n portfolio['returns'] = portfolio['total'].pct_change()\n return portfolio\n\n\nclass MarketOnClosePortfolio(Portfolio):\n \"\"\"Encapsulates the notion of a portfolio of positions based\n on a set of signals as provided by a Strategy.\n\n Requires:\n symbol - A stock symbol which forms the basis of the portfolio.\n bars - A DataFrame of bars for a symbol set.\n signals - A pandas DataFrame of signals (1, 0, -1) for each symbol.\n initial_capital - The amount in cash at the start of the portfolio.\"\"\"\n\n def __init__(self, symbol, data, signals, initial_capital=100000.0):\n self.symbol = symbol\n self.data = data\n self.signals = signals\n self.initial_capital = float(initial_capital)\n self.positions = self.generate_positions()\n\n def generate_positions(self):\n return pd.Series(index=self.signals.index, data=100.0*self.signals['signal'])\n\n def backtest_portfolio(self):\n portfolio = pd.DataFrame(index=self.signals.index)\n portfolio['holdings'] = self.positions * self.data['close']\n portfolio['cash'] = self.initial_capital - (self.positions.diff() * self.data['close']).cumsum()\n portfolio['total'] = portfolio['cash'] + portfolio['holdings']\n portfolio['returns'] = portfolio['total'].pct_change()\n\n #pd.set_option('display.max_columns', 500)\n #pd.set_option('display.width', 1000)\n #pd.set_option('display.max_rows', 500)\n #print(returns)\n\n return portfolio\n\n","sub_path":"ats/portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625476336","text":"# https://open.kattis.com/problems/cudoviste\n\ndef count_symbols(symbol, x, y, city_map) -> int:\n row_1 = sum([1 for index in [x, x+1] if city_map[y][index] == symbol])\n row_2 = sum([1 for index in [x, x+1] if city_map[y+1][index] == symbol])\n return row_1 + row_2\n\n\ndef cudoviste(city_map):\n squash_cars = [0] * 5\n for y in range(len(city_map) - 1):\n for x in range(len(city_map[0]) - 1):\n if count_symbols('#', x, y, city_map) >= 1:\n continue\n squash_cars[count_symbols('X', x, y, city_map)] += 1\n return squash_cars\n\n\ncity_map = ['#..#',\n '..X.',\n '..X.',\n '#XX#']\n\nmap_2 = ['..XX.',\n '.#XX.',\n '..#..',\n '.....',\n ]\n\ndef test_cudoviste():\n assert cudoviste(city_map) == [1,1,2,1,0]\n\ndef test_map2():\n assert cudoviste(map_2) == [2,1,1,0,1]\n\ndef test_counter():\n assert count_symbols('#', 0, 0, city_map) == 1\n assert count_symbols('#', 1, 0, city_map) == 0\n assert count_symbols('#', 2, 0, city_map) == 1\n assert count_symbols('#', 0, 0, city_map) == 1\n assert count_symbols('#', 0, 0, city_map) == 1\n\ndef test_c2():\n assert count_symbols('X', 2, 1, city_map) == 2\n\n\nif __name__ == '__main__':\n R, C = list(map(int, input().split()))\n the_map = []\n for _ in range(R):\n the_map.append(input())\n for num_smash in cudoviste(the_map):\n print(num_smash)","sub_path":"python/1_5/cudoviste.py","file_name":"cudoviste.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"17202160","text":"import bill\n\n\ndef check_input(prompt, range_, min_, max_):\n while True:\n user_input = input(prompt)\n try:\n user_input = int(user_input)\n except ValueError:\n print(\"Input type must be int\\n\")\n continue\n\n # if user_input not in range_:\n # print(\"Input must be in range [{0.start}..{0.stop}).\\n\".format(range_))\n # print(\"min e max\", min_, max_)\n if user_input < min_:\n print(\"Input must be greater than or equal to {0}.\\n\".format(min_))\n elif user_input > max_:\n print(\"Input must be less than or equal to {0}.\\n\".format(max_))\n else:\n return user_input\n\n\ndef select_num_of_bills():\n prompt = \"How many bills do you want to generate (min: 1, max: 5, exit: 0) \\n\"\n range_ = range(10)\n return check_input(prompt, range_, 0, 5)\n\n\ndef select_bill_type():\n suffix = \"\"\n for j in range(len(bill.Bill.type_names)):\n suffix += \"\\n\\t\" + str(j) + \": \" + bill.Bill.type_names[j] + \" \"\n prompt = \"Choose the type of bill:\" + suffix + \"\\n\"\n\n range_ = range(len(bill.Bill.type_names))\n return check_input(prompt, range_, 0, len(bill.Bill.type_names) - 1)\n\n\ndef select_amount_of_numbers():\n prompt = \"Choose the amount of numbers to generate (max 10 per bill) \\n\"\n range_ = range(10)\n return check_input(prompt, range_, 1, 10)\n\n\ndef select_bill_city():\n suffix = \"\"\n for j in range(len(bill.Bill.city_names)):\n suffix += \"\\n\\t\" + str(j) + \": \" + bill.Bill.city_names[j] + \" \"\n prompt = \"Choose the \\\"city\\\" (aka \\\"ruota\\\") of the bill: \" + suffix + \"\\n\"\n\n range_ = range(len(bill.Bill.city_names))\n return check_input(prompt, range_, 0, len(bill.Bill.city_names) - 1)\n\n\nif __name__ == '__main__':\n num_of_bills = select_num_of_bills()\n\n if num_of_bills == 0:\n print(\"Exit\")\n exit(0)\n else:\n bill_list = []\n for i in range(num_of_bills):\n print(\"-----\\nBill number\", i+1)\n bill_type = select_bill_type()\n amount_of_numbers = select_amount_of_numbers()\n city = select_bill_city()\n\n a_bill = bill.Bill(bill_type, amount_of_numbers, city)\n bill_list.append(a_bill)\n\n for bill in bill_list:\n print(bill)\n","sub_path":"lotto_game.py","file_name":"lotto_game.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"289350933","text":"import numpy as np\n\nfilename = 'n.txt'\ndata = []\n\ndef read_file(filename):\n\t\"\"\"reads a txt file. Format = 1 row - 1 int\n\treturns array with all ints\"\"\"\n\tdata = []\n\twith open(filename) as f:\n\t\tfor l in f.readlines():\n\t\t\tdata.append(int(l.rstrip()))\n\treturn data\n\ndef calculate_sum(data):\n\t\"\"\"arg: int-array\n\treturn sum of integer array\n\t\"\"\"\n\t#sum_ = 0\n\t#for i in data:\n\t#\tsum_ += i\n\tsum_ = np.sum(data)\n\treturn sum_\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"8688461","text":"from PyQt5.QtWidgets import QWidget\nfrom GUIs.TrackInfo.TrackInfo_QT import Ui_Form\n\n\nclass TrackInfo(QWidget, Ui_Form):\n\n def __init__(self, number, options, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setupUi(self)\n\n self.number = number\n self.options = options\n\n self.Track_Label.setText('Track ' + str(self.number))\n self.Instrument_Options.addItems(self.options)\n\n def current_option(self):\n return self.Instrument_Options.currentText()\n\n def delete(self):\n self.hiddenLayout.removeWidget(self.Track_Label)\n self.hiddenLayout.removeWidget(self.Instrument_Options)\n self.hiddenLayout.removeWidget(self.Full_Frame)\n\n self.Track_Label.deleteLater()\n self.Instrument_Options.deleteLater()\n self.Full_Frame.deleteLater()\n\n def isChecked(self): return self.Track_Label.isChecked()\n","sub_path":"TP2/TP2 - G1/Program/GUIs/TrackInfo/TrackInfo.py","file_name":"TrackInfo.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"633009024","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n#\n# ---------------------------------------------------------------------\n# Copyright (C) 2019-Present\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\nfrom .logger import logger\nfrom .command import (\n AddNodeCommand, QueryCommand, MoveNodeCommand, DeleteNodeCommand\n)\nfrom .tree import TreeManager\n\n\nclass HandlerManager:\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n return '{}'.format(self.__class__.__name__)\n\n @property\n def root(self):\n return self._root\n\n def execute(self, cmd):\n logger.debug(\"{}: Execute: {}\".format(self, str(cmd)))\n\n response = cmd.failure_response\n\n if isinstance(cmd, AddNodeCommand):\n response = self.insert_node(cmd)\n elif isinstance(cmd, QueryCommand):\n response = self.traverse(cmd)\n elif isinstance(cmd, MoveNodeCommand):\n response = self.move_node(cmd)\n elif isinstance(cmd, DeleteNodeCommand):\n response = self.delete_node(cmd)\n return response\n\n def insert_node(self, cmd):\n parent_id = cmd.parent\n if not parent_id:\n if self.root is None:\n self._root = TreeManager.insert(None, cmd.id, cmd.name)\n return cmd.success_response\n return cmd.failure_response\n\n new_node_id = cmd.id\n new_node_name = cmd.name\n if not new_node_id or not new_node_name:\n logger.debug(\"{}: Some required attributes are not specified in request.\".format(self))\n return cmd.failure_response\n\n parent_node = TreeManager.search(self.root, 'id', parent_id)\n if parent_node is None:\n logger.debug(\"{}: Node with ID={} don't found\".format(self, parent_id))\n return cmd.failure_response\n\n duplicate_id_node = TreeManager.search(self.root, 'id', new_node_id)\n if duplicate_id_node is not None:\n logger.debug(\"{}: Founded duplicated node with ID={} Node info: '{}'\".format(\n self, new_node_id, duplicate_id_node))\n return cmd.failure_response\n\n duplicate_name_node = TreeManager.search(parent_node, 'data', new_node_name)\n if duplicate_name_node is not None:\n logger.debug(\"{}: Founded duplicate node with NAME={} Node info: '{}'\".format(\n self, new_node_name, duplicate_name_node))\n if duplicate_name_node.parent == parent_id:\n return cmd.failure_response\n return cmd.failure_response\n\n new_node = TreeManager.insert(parent_node, new_node_id, new_node_name)\n logger.debug(\"{}: New node was inserted successfully: {} \".format(self, new_node))\n\n return cmd.success_response\n\n def delete_node(self, cmd):\n node_id = cmd.id\n deleted_node = TreeManager.search(self.root, 'id', node_id)\n\n if deleted_node is None:\n logger.debug(\"{}: Node for delete not found\".format(self))\n return cmd.failure_response\n\n if deleted_node.has_children:\n logger.debug(\"{}: Node for delete has children\".format(self))\n return cmd.failure_response\n\n deleted_node = TreeManager.delete_node(deleted_node, 'id', node_id)\n logger.debug(\"{}: Node was deleted: {} \".format(self, deleted_node))\n return cmd.success_response\n\n def move_node(self, cmd):\n from_node_id = cmd.from_\n to_node_id = cmd.to_\n\n if not from_node_id or not to_node_id:\n logger.info(\"{}: Some ids are not specified. From: '{}', To: '{}'\".format(\n self, from_node_id, to_node_id))\n return cmd.failure_response\n\n if from_node_id == to_node_id:\n logger.info(\"{}: Ids are equal\".format(self))\n return cmd.failure_response\n\n if from_node_id == self._root.id:\n logger.info(\"{}: You mustn't move root node\".format(self))\n return cmd.failure_response\n\n to_node = TreeManager.search(self.root, 'id', to_node_id)\n if to_node is None:\n logger.info(\"{}: Destination node for moving not found\".format(self))\n return cmd.failure_response\n\n from_node = TreeManager.search(self.root, 'id', from_node_id)\n if from_node is None:\n logger.info(\"{}: Node for moving not found\".format(self))\n return cmd.failure_response\n\n can_move = not TreeManager.is_child(from_node, to_node)\n if not can_move:\n logger.info(\"{}: You mustn't create cycle\".format(self))\n return cmd.failure_response\n\n moved_node = TreeManager.move_node(from_node, to_node)\n if moved_node is None:\n logger.info(\"{}: Moved node after moving mustn't be empty\".format(self))\n return cmd.failure_response\n\n return cmd.success_response\n\n def traverse(self, cmd):\n def apply_filters(elements, root_node):\n if cmd.names:\n elements = filter(lambda x: x.data in cmd.names, elements)\n if cmd.from_nodes:\n elements = filter(lambda x: x.id in cmd.from_nodes, elements)\n if cmd.min_depth:\n elements = filter(lambda x: x.level_between(root_node) >= cmd.min_depth, elements)\n if cmd.max_depth:\n elements = filter(lambda x: x.level_between(root_node) <= cmd.max_depth, elements)\n return elements\n\n result = []\n root_nodes = [self.root]\n if cmd.from_parents:\n root_nodes = []\n for root_id in cmd.from_parents:\n root_node = TreeManager.search(self.root, 'id', root_id)\n if root_node is None:\n continue\n root_nodes += [root_node]\n\n for root_node in root_nodes:\n tree_data = TreeManager.traverse_inorder(root_node)\n tree_data = apply_filters(tree_data, root_node)\n result += tree_data\n\n result = map(lambda x: x.to_json, result)\n result = dict(nodes=result)\n return result\n","sub_path":"src/core/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"46317166","text":"\nimport pygame\nimport os, sys\nfrom pygame.locals import * # Further explanation can be found at: https://www.pygame.org/docs/ref/locals.html#module-pygame.locals\nfrom random import randint\npygame.init()\npygame.mixer.init()\n\n\n\n# Constants\nWINDOW_WIDTH = 1280\nWINDOW_HEIGHT = 720\nDIFFICULTY = \"easy\"\nlast_level = \"iesb\"\ngame_speed = 20\ni = 20\npoints = 0\nobstacles = []\n# Sound constants\njump_path = os.path.join(\"Sounds\", \"jump.wav\")\njump_sound = pygame.mixer.Sound(jump_path)\n\n# Clock ticking for FPS (60fps)\nclock = pygame.time.Clock()\n\n\n# Creating the window of the game\nWINDOW = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\npygame.display.set_caption(\"TechTime Racers\") # Window name\n\n\n# Techie \"Animations\"\ntechie_run = [pygame.image.load(os.path.join(\"Assets/Techie\", \"Techie Run Frame 1.PNG\")).convert_alpha(), \n pygame.image.load(os.path.join(\"Assets/Techie\", \"Techie Run Frame 2.PNG\")).convert_alpha()]\ntechie_jump = pygame.image.load(os.path.join(\"Assets/Techie\", \"Techie Jump.PNG\")).convert_alpha()\ntechie_duck = pygame.image.load(os.path.join(\"Assets/Techie\", \"Techie Duck.PNG\")).convert_alpha()\n\n\n# obstacles\narduino_png = [pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Arduino 1.PNG\")).convert_alpha(),\n pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Arduino 2.PNG\")).convert_alpha()]\nsign_png = [pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Stop Sign.PNG\")).convert_alpha(),\n pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Yield Sign.PNG\")).convert_alpha()]\npn_table_png = [pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Picnic Table 1.PNG\")).convert_alpha(),\n pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Picnic Table 2.PNG\")).convert_alpha()]\nsquirrel_png = [pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Squirrel 1.PNG\")).convert_alpha(),\n pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Squirrel 2.PNG\")).convert_alpha()]\nbird_png = [pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Bird Fly 1.PNG\")).convert_alpha(),\n pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Bird Fly 2.PNG\")).convert_alpha()]\nbooks_png = [pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Book 1.PNG\")).convert_alpha(),\n pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Book 2.PNG\")).convert_alpha()]\nfootball_ground_png = [pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Football Ground.PNG\")).convert_alpha(),\n pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Football Ground.PNG\")).convert_alpha()]\nfootball_air_png = [pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Football Air.PNG\")).convert_alpha(), \n pygame.image.load(os.path.join(\"Assets/Obstacles\", \"Football Air.PNG\")).convert_alpha()]\n\n\n# Class for Techie, playable character\nclass Player:\n x_pos = 100\n y_pos = 390\n y_pos_duck = 470\n static_jump_vel = 8.5\n\n def __init__(self):\n self.run_png = techie_run\n self.jump_png = techie_jump\n self.duck_png = techie_duck\n\n self.isRun = True\n self.isJump = False\n self.isDuck = False\n\n self.step_index = 0\n self.dynamic_jump_vel = self.static_jump_vel\n self.image = self.run_png[0]\n self.player_rect = self.image.get_rect()\n self.player_rect.x = self.x_pos\n self.player_rect.y = self.y_pos\n\n # used to check the status of the player and what is happening\n def update(self, user_input):\n if self.isRun:\n self.run()\n if self.isJump:\n self.jump()\n if self.isDuck:\n self.duck()\n\n if self.step_index >= 10:\n self.step_index = 0\n \n if user_input[pygame.K_UP] and not self.isJump:\n self.isRun = False\n self.isJump = True\n pygame.mixer.Sound.play(jump_sound) # jump sound plays when techie jumps\n self.isDuck = False\n elif user_input[pygame.K_DOWN] and not self.isJump:\n self.isRun = False\n self.isJump = False\n self.isDuck = True\n elif not (self.isJump or user_input[pygame.K_DOWN]):\n self.isRun = True\n self.isJump = False\n self.isDuck = False\n\n # running logic\n def run(self):\n self.image = self.run_png[self.step_index // 5]\n self.player_rect = self.image.get_rect()\n self.player_rect.x = self.x_pos\n self.player_rect.y = self.y_pos\n self.step_index += 1\n \n # jumping logic\n def jump(self):\n self.image = self.jump_png\n if self.isJump:\n self.player_rect.y -= self.dynamic_jump_vel * 8\n self.dynamic_jump_vel -= 0.65\n if self.dynamic_jump_vel < - self.static_jump_vel:\n self.isJump = False\n self.dynamic_jump_vel = self.static_jump_vel\n \n # jumping logic\n def duck(self):\n self.image = self.duck_png\n self.player_rect = self.image.get_rect()\n self.player_rect.x = self.x_pos\n self.player_rect.y = self.y_pos_duck\n self.step_index += 1\n\n # drawing Techie on the screen\n def draw(self, WINDOW):\n WINDOW.blit(self.image, (self.player_rect.x, self.player_rect.y))\n\n\n# class for every obstacle to inherit from\nclass Obstacle:\n def __init__(self, image, type):\n self.image = image\n self.type = type\n self.rect = self.image[self.type].get_rect()\n self.rect.x = WINDOW_WIDTH\n\n def update(self):\n self.rect.x -= game_speed\n if self.rect.x < -self.rect.width:\n obstacles.pop()\n \n def draw(self, WINDOW):\n WINDOW.blit(self.image[self.type], self.rect)\n\n\nclass Arduino(Obstacle):\n def __init__(self, image):\n self.type = randint(0, 1)\n super().__init__(image, self.type)\n self.rect.y = 500\n\n\nclass Sign(Obstacle):\n def __init__(self, image):\n self.type = randint(0, 1)\n super().__init__(image, self.type)\n self.rect.y = 385\n\n\nclass Books(Obstacle):\n def __init__(self, image):\n self.type = randint(0, 1)\n super().__init__(image, self.type)\n self.rect.y = 450\n\n\nclass PicnicTable(Obstacle):\n def __init__(self, image):\n self.type = randint(0, 1)\n super().__init__(image, self.type)\n self.rect.y = 500\n\n\nclass Squirrel(Obstacle):\n def __init__(self, image):\n self.type = randint(0, 1)\n super().__init__(image, self.type)\n self.rect.y = 480\n\n\nclass FootballGround(Obstacle):\n def __init__(self, image):\n self.type = randint(0, 1)\n super().__init__(image, self.type)\n self.rect.y = 500\n\n\nclass FootballAir(Obstacle):\n def __init__(self, image):\n self.type = 0\n super().__init__(image, self.type)\n self.rect.y = 350\n self.index = 0\n \n def draw(self, WINDOW):\n if self.index >= 9:\n self.index = 0\n WINDOW.blit(self.image[self.index//5], self.rect)\n\n\nclass Bird(Obstacle):\n def __init__(self, image):\n self.type = 0\n super().__init__(image, self.type)\n self.rect.y = 300\n self.index = 0\n \n def draw(self, WINDOW):\n if self.index >= 9:\n self.index = 0\n WINDOW.blit(self.image[self.index//5], self.rect)\n\n\n# Backgrounds\nmain_menu_bg_img = pygame.image.load(os.path.join(\"Assets/Screens\", \"main menu.png\")).convert_alpha()\nmain_menu_bg = pygame.transform.scale(main_menu_bg_img, (1280, 720))\n\nplay_screen_bg_img = pygame.image.load(os.path.join(\"Assets/Screens\", \"play screen.png\")).convert_alpha()\nplay_screen_bg = pygame.transform.scale(play_screen_bg_img, (1280, 720))\n\nlevel_select_bg_img = pygame.image.load(os.path.join(\"Assets/Screens\", \"level select.png\")).convert_alpha()\nlevel_select_bg = pygame.transform.scale(level_select_bg_img, (1280, 720))\n\nsettinngs_bg_img = pygame.image.load(os.path.join(\"Assets/Screens\", \"settings screen.png\")).convert_alpha()\nsettings_bg = pygame.transform.scale(settinngs_bg_img, (1280, 720))\n\n# level dictionary\n# the list contains path for [background, foreground, obstacles]\nlvls_dict = {\n \"iesb\": [\"Outside_IESB.png\", \"Road.PNG\", arduino_png],\n \"bogard\": [\"Bogard.png\", \"Road.PNG\", sign_png],\n \"clock\": [\"Clock_Tower.png\", \"Brick.PNG\", pn_table_png],\n \"lotm\": [\"Lady_of_Mist.png\", \"Brick.PNG\", squirrel_png],\n \"wyly\": [\"Wyly.png\", \"Brick.PNG\", books_png],\n \"endless\": [\"The_Joe.png\", \"Grass.PNG\", football_ground_png]}\n\n# Sound\n\n# main menu\ndef main_menu():\n click = False\n while True:\n\n # creating the background image\n WINDOW.blit(main_menu_bg, (0, 0))\n\n # position of the mouse\n mx, my = pygame.mouse.get_pos()\n\n # used to find position of mouse, helps define rectangle parameters\n # print(mx, my)\n\n # invisible rectangles for the buttons\n play_button = pygame.Rect(538, 272, 268, 97)\n level_select_button = pygame.Rect(537, 408, 268, 97)\n quit_button = pygame.Rect(538, 545, 268, 97)\n settings_button = pygame.Rect(1214, 10, 51, 44)\n\n # button functions\n if play_button.collidepoint((mx, my)):\n if click:\n play_screen()\n if level_select_button.collidepoint((mx, my)):\n if click:\n level_select()\n if quit_button.collidepoint((mx, my)):\n if click:\n pygame.quit()\n sys.exit()\n if settings_button.collidepoint((mx, my)):\n if click:\n settings()\n\n # used to draw rectangles to see their position\n # pygame.draw.rect(WINDOW, (255, 0, 0), button_1)\n # pygame.draw.rect(WINDOW, (255, 0, 0), button_2)\n # pygame.draw.rect(WINDOW, (255, 0, 0), button_3)\n # pygame.draw.rect(WINDOW, (255, 0, 0), button_4)\n\n # check for clicks or button push\n click = False\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n settings()\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n pygame.display.update()\n clock.tick(60)\n\n\n# play screen\ndef play_screen():\n click = False\n run = True\n while run:\n\n\n # draw background\n WINDOW.blit(play_screen_bg, (0, 0))\n\n # get position of mouse\n mx, my = pygame.mouse.get_pos()\n # print(mx, my)\n\n # invisible buttons\n new_game_button = pygame.Rect(292, 354, 269, 98)\n continue_button = pygame.Rect(719, 354, 269, 98)\n back_button = pygame.Rect(9, 6, 33, 34)\n\n # button functions\n if new_game_button.collidepoint((mx, my)):\n if click:\n game(\"iesb\")\n if continue_button.collidepoint((mx, my)):\n if click:\n game(last_level)\n if back_button.collidepoint((mx, my)):\n if click:\n run = False\n\n # check for clicks or button push\n click = False\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n run = False\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n # used to draw rectangles to see their position\n # pygame.draw.rect(WINDOW, (255, 0, 0), new_game_button)\n # pygame.draw.rect(WINDOW, (255, 0, 0), continue_button)\n # pygame.draw.rect(WINDOW, (0, 255, 0), back_button)\n\n pygame.display.update()\n clock.tick(60)\n\n\n# settings screen\ndef settings():\n global i, game_speed\n global DIFFICULTY\n click = False\n run = True\n while run:\n\n # draw background\n WINDOW.blit(settings_bg, (0, 0))\n\n # get position of mouse\n mx, my = pygame.mouse.get_pos(292, 354, 269, 98)\n # print(mx, my)\n\n # invisible buttons\n easy_button = pygame.Rect(520, 232, 270, 99)\n medium_button = pygame.Rect(520, 376, 270, 99)\n hard_button = pygame.Rect(520, 518, 270, 99)\n back_button = pygame.Rect(11, 8, 33, 34)\n menu_button = pygame.Rect(1186, 7, 85, 32)\n\n # button functions\n if easy_button.collidepoint((mx, my)):\n if click:\n DIFFICULTY = \"easy\"\n i = 20\n game_speed = 20\n run = False\n if medium_button.collidepoint((mx, my)):\n if click:\n DIFFICULTY = \"medium\"\n i = 30\n game_speed = 30\n run = False\n if hard_button.collidepoint((mx, my)):\n if click:\n DIFFICULTY = \"hard\"\n i = 40\n game_speed = 40\n run = False\n if back_button.collidepoint((mx, my)):\n if click:\n run = False\n if menu_button.collidepoint((mx, my)):\n if click:\n main_menu()\n run = False\n\n # check for clicks or button push\n click = False\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n run = False\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n # draw rects\n # pygame.draw.rect(WINDOW, (255, 0, 0), easy_button)\n # pygame.draw.rect(WINDOW, (255, 0, 0), medium_button)\n # pygame.draw.rect(WINDOW, (255, 0, 0), hard_button)\n # pygame.draw.rect(WINDOW, (0, 255, 0), back_button)\n # pygame.draw.rect(WINDOW, (0, 255, 0), menu_button)\n\n pygame.display.update()\n clock.tick(60)\n\n\n# level select screen\ndef level_select():\n click = False\n run = True\n while run:\n\n # draw background\n WINDOW.blit(level_select_bg, (0, 0))\n\n # get position of mouse\n mx, my = pygame.mouse.get_pos()\n # print(mx, my)\n\n # invisible buttons\n lvl_iesb_button = pygame.Rect(211, 296, 270, 99)\n lvl_bogard_button = pygame.Rect(534, 296, 270, 99)\n lvl_clock_button = pygame.Rect(875, 296, 270, 99)\n lvl_lotm_button = pygame.Rect(211, 490, 270, 99)\n lvl_wyly_button = pygame.Rect(534, 490, 270, 99)\n lvl_endless_button = pygame.Rect(875, 490, 270, 99)\n back_button = pygame.Rect(4, 5, 33, 34)\n\n # button functions\n if lvl_iesb_button.collidepoint((mx, my)):\n if click:\n game(\"iesb\")\n if lvl_bogard_button.collidepoint((mx, my)):\n if click:\n game(\"bogard\")\n if lvl_clock_button.collidepoint((mx, my)):\n if click:\n game(\"clock\")\n if lvl_lotm_button.collidepoint((mx, my)):\n if click:\n game(\"lotm\")\n if lvl_wyly_button.collidepoint((mx, my)):\n if click:\n game(\"wyly\")\n if lvl_endless_button.collidepoint((mx, my)):\n if click:\n game(\"endless\")\n if back_button.collidepoint((mx, my)):\n if click:\n run = False\n\n # check for clicks or button push\n click = False\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n run = False\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n # draw rects\n # pygame.draw.rect(WINDOW, (255, 0, 0), lvl_iesb_button)\n # pygame.draw.rect(WINDOW, (255, 0, 0), lvl_bogard_button)\n # pygame.draw.rect(WINDOW, (255, 0, 0), lvl_clock_button)\n # pygame.draw.rect(WINDOW, (255, 0, 0), lvl_lotm_button)\n # pygame.draw.rect(WINDOW, (255, 0, 0), lvl_wyly_button)\n # pygame.draw.rect(WINDOW, (255, 0, 0), lvl_endless_button)\n # pygame.draw.rect(WINDOW, (255, 0, 0), back_button)\n\n pygame.display.update()\n clock.tick(60)\n\n \n# game event Loop\ndef game(level_key):\n global points, obstacles, i, game_speed\n \n # creating the player\n techie = Player()\n\n # i is used to move the screen\n i = game_speed\n\n # used to keep score\n points = 0\n font = pygame.font.Font(\"freesansbold.ttf\", 20)\n def score():\n global points\n points += 1\n point_display = font.render(f\"Points: {points}\", True, (0, 0, 0))\n pd_rect = point_display.get_rect()\n pd_rect.center = (1200, 30)\n WINDOW.blit(point_display, pd_rect)\n\n\n # background\n bg_img = pygame.image.load(os.path.join(\"Assets/Backgrounds\", lvls_dict[level_key][0])).convert_alpha()\n bg = pygame.transform.scale(bg_img, (1280, 720))\n\n # foreground\n fg_img = pygame.image.load(os.path.join(\"Assets/Foreground\", lvls_dict[level_key][1])).convert_alpha()\n fg = pygame.transform.scale(fg_img, (1280, 570))\n\n run = 1\n while run:\n \n # drawing the bg\n WINDOW.fill((0, 0, 0))\n WINDOW.blit(bg, (i, 0))\n WINDOW.blit(bg, (WINDOW_WIDTH + i, 0))\n\n #drawing the fg\n WINDOW.blit(fg, (0, 150))\n WINDOW.blit(fg, (i, 150))\n WINDOW.blit(fg, (WINDOW_WIDTH + i, 150))\n\n # moving the bg & fg\n if i <= -WINDOW_WIDTH:\n WINDOW.blit(bg, (WINDOW_WIDTH + i, 0))\n WINDOW.blit(fg, (WINDOW_WIDTH + i, 150))\n i = 0\n \n # speed at which bg moves\n i -= game_speed\n\n # fps\n clock.tick(60)\n\n # checking for quit or pause game\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n obstacles.pop()\n settings()\n \n # increment score\n score()\n\n # grabbing input for Player class\n user_input = pygame.key.get_pressed()\n\n # drawing player\n techie.draw(WINDOW)\n techie.update(user_input)\n\n # creating obstacles according to level\n if len(obstacles) == 0:\n if level_key == \"iesb\":\n obstacles.append(Arduino(lvls_dict[level_key][2]))\n elif level_key == \"bogard\":\n obstacles.append(Sign(lvls_dict[level_key][2]))\n elif level_key == \"clock\":\n obstacles.append(PicnicTable(lvls_dict[level_key][2]))\n elif level_key == \"lotm\":\n if randint(0, 1) == 0:\n obstacles.append(Squirrel(lvls_dict[level_key][2]))\n else:\n obstacles.append(Bird(bird_png))\n elif level_key == \"wyly\":\n if randint(0, 1) == 0:\n obstacles.append(Books(lvls_dict[level_key][2]))\n else:\n obstacles.append(Bird(bird_png))\n elif level_key == \"endless\":\n if randint(0, 1) == 0:\n obstacles.append(FootballGround(lvls_dict[level_key][2]))\n else:\n obstacles.append(FootballAir(football_air_png))\n\n # drawing obstacle and hit detection\n for obstacle in obstacles:\n obstacle.draw(WINDOW)\n obstacle.update()\n\n '''# used to show hitboxes\n pygame.draw.rect(WINDOW, (255, 0, 0), techie.player_rect, 2)\n pygame.draw.rect(WINDOW, (255, 0, 0), obstacle.rect, 2)'''\n\n # checking for collision of hitboxes\n if techie.player_rect.colliderect(obstacle.rect):\n pygame.time.delay(2000)\n obstacles.pop()\n run = 0\n\n pygame.display.update()\n\nmain_menu()\n\n\"\"\"Idea for end level implementation. Maybe have an if condition with the points??? Ill try to figure out a better way later -John\"\"\"\n\"\"\"Need to add a start_event and end_event for the levels\"\"\"\n\"\"\"Need to figure out how to \"continue\" \"\"\"\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"570921278","text":"# import the nessecary pieces from Flask\r\nfrom flask import Flask,render_template, request,jsonify,Response\r\nimport os\r\nPEOPLE_FOLDER = os.path.join('static', 'images')\r\n\r\n#Create the app object that will route our calls\r\napp = Flask(__name__)\r\napp.config['UPLOAD_FOLDER'] = PEOPLE_FOLDER\r\napp.config['NAME'] = \"\"\r\napp.config['COUNT'] = 0\r\n# Add a single endpoint that we can use for testing\r\n@app.route('/')\r\ndef start():\r\n return render_template('start.html')\r\n@app.route('/q1')\r\ndef q1():\r\n app.config['NAME'] = request.args.get(\"fname\")\r\n return render_template('q1.html')\r\n@app.route('/q2', methods = ['POST'])\r\ndef q2():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n return render_template('q2.html')\r\n@app.route('/q3', methods = ['POST'])\r\ndef q3():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n return render_template('q3.html')\r\n@app.route('/q4', methods = ['POST'])\r\ndef q4():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n return render_template('q5.html')\r\n@app.route('/q5', methods = ['POST'])\r\ndef q5():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n return render_template('q5.html')\r\n@app.route('/q6', methods = ['POST'])\r\ndef q6():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n return render_template('q6.html')\r\n@app.route('/q7', methods = ['POST'])\r\ndef q7():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n return render_template('q7.html')\r\n@app.route('/q8', methods = ['POST'])\r\ndef q8():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n return render_template('q8.html')\r\n@app.route('/q9', methods = ['POST'])\r\ndef q9():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n return render_template('q9.html')\r\n@app.route('/q10', methods = ['POST'])\r\ndef q10():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n return render_template('q10.html')\r\n@app.route('/result', methods = ['POST'])\r\ndef result():\r\n option = request.form['option']\r\n option = int(option)\r\n app.config['COUNT'] = app.config['COUNT'] + option\r\n val = app.config['COUNT']\r\n character = \"\"\r\n if val >= 1 and val <= 8:\r\n character = \"ross\"\r\n elif val >=9 and val <= 16:\r\n character = \"rachel\"\r\n elif val >= 17 and val <= 24:\r\n character = \"monica\"\r\n elif val >= 25 and val <= 32:\r\n character = \"chandler\"\r\n elif val >= 33 and val <= 40:\r\n character = \"joey\"\r\n else:\r\n character = \"phoebe\"\r\n filename = character + \".jpg\"\r\n full_filename = os.path.join(app.config['UPLOAD_FOLDER'], filename)\r\n return render_template('result.html', user_image = full_filename, user_name = app.config['NAME'], char_name = character)\r\n#When run from command line, start the server\r\nif __name__ == '__main__':\r\n app.run(debug = True)","sub_path":"flask website/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"61722257","text":"from ecldoc.parseDoc import construct_type\nfrom .genTXT import CPL, _break\nimport lxml.html as H\n\n##########################################################\n\ndef convertToMarkdown(html_text) :\n '''\n Convert HTML String to Markdown Format\n '''\n root = H.fragment_fromstring(html_text, create_parent='div')\n text = parseHTMltoMK(root)\n return text\n\ndef parseHTMltoMK(element) :\n '''\n Convert Single HTML element to Markdown Text (recursive)\n '''\n text = ''\n if element.text :\n text += element.text\n for e in element.iterchildren() :\n text += parseHTMltoMK(e)\n if e.tail :\n text += e.tail\n\n if element.tag in ['p', 'pre', 'ul', 'ol', 'table'] :\n text = '\\n' + text + '\\n'\n\n if element.tag == 'br' :\n text = '\\n'\n\n if element.tag == 'code' :\n text = '```' + text + '```'\n\n if element.tag == 'li' and element.getparent().tag == 'ul':\n text = '+ ' + text + '\\n'\n\n if element.tag == 'li' and element.getparent().tag == 'ol':\n text = '# ' + text + '\\n'\n\n if element.tag == 'td' :\n text = text + ' | '\n\n if element.tag == 'tr' :\n text = '| ' + text + '\\n'\n\n if element.tag == 'hr' :\n text = '\\n************\\n'\n\n if element.tag == 'a' :\n text = text + ' <' + element.attrib['href'] + '>'\n\n return text\n\n##########################################################\n\ndef parseTag(text, heading) :\n heading = heading\n text_break = _break(text, CPL * 0.5)\n spaces = [heading] + ([' ' * len(heading)] * (len(text_break)-1))\n text_list = [(a + b) for a, b in zip(spaces, text_break)]\n return text_list\n\ndef render_param(tag_param) :\n lines = []\n for t in tag_param.tuples['tuples'] :\n lines.append(parseTag(construct_type(t[2]) + ' --- ' + t[1], 'Parameter : ' + t[0] + ' '))\n return lines\n\ndef render_field(tag_field) :\n lines = []\n for t in tag_field.tuples['tuples'] :\n lines.append(parseTag(construct_type(t[2]) + ' --- ' + t[1], 'Field : ' + t[0] + ' '))\n return lines\n\ndef render_return(tag_return) :\n lines = []\n if len(tag_return.docstrings) == 0 and tag_return.return_type is None : return lines\n lines.append(parseTag(construct_type(tag_return.return_type) + ' ' + tag_return.return_desc, 'Return : '))\n return lines\n\ndef render_see(tag_see) :\n lines = []\n for t in tag_see.tuples['tuples'] :\n lines.append(parseTag(t[0], 'See : '))\n return lines\n\ndef render_parent(tag_parent) :\n lines = []\n for t in tag_parent.tuples['tuples'] :\n lines.append(parseTag(t[0] + ' <' + t[1] + '>', 'Parent : '))\n return lines\n\ndef render_content(tag_content) :\n mktext = convertToMarkdown(tag_content.text).split('\\n')\n lines = []\n for t in mktext :\n lines += _break(t, CPL*0.75)\n return lines\n\ndef render_firstline(tag_firstline) :\n lines = []\n lines.append([tag_firstline.text])\n return lines\n\ndef render_generaltag(tag_generaltag) :\n lines = []\n for t in tag_generaltag.tuples['tuples'] :\n lines.append(parseTag(t[0], tag_generaltag.tuples['name'] + ' : '))\n return lines\n\ntag_renders = { 'param' : render_param,\n 'field' : render_field,\n 'return' : render_return,\n 'see' : render_see,\n 'parent' : render_parent,\n 'content' : render_content,\n 'firstline' : render_firstline,\n 'generaltag' : render_generaltag\n }\n","sub_path":"src/ecldoc/Formats/TXT/tagTXT.py","file_name":"tagTXT.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"542877412","text":"\n# conv_mnist_train.py -- Training Neural Network with LeNet-5 .\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\n# Load constants and forward propagation function in conv_mnist_inference.py\nimport conv_mnist_inference\n\n# Network Arguments\nBATCH_SIZE = 100\nLEARNING_RATE_BASE = 0.01\nLEARNING_RATE_DECAY = 0.99\nREGULARIZATION_RATE = 0.0001\nTRAINING_STEPS = 30000\nMOVING_AVERAGE_DECAY = 0.99\n\n# Model saved path .\nMODEL_SAVE_PATH = './model/'\nMODEL_NAME = \"model.ckpt\"\n\ndef train(mnist):\n\t# Define inputs placeholder which is a 4-dimension matrix .\n\t#\n\t# [samples_in_batch, sample_length, sample_width, sample_depth]\n\tx = tf.placeholder(tf.float32, [BATCH_SIZE,\n\t\t\t\t\tconv_mnist_inference.IMAGE_SIZE,\n\t\t\t\t\tconv_mnist_inference.IMAGE_SIZE,\n\t\t\t\t\tconv_mnist_inference.NUM_CHANNELS],\n\t\t\t\t\tname='x-input')\n\ty_ = tf.placeholder(tf.float32, [BATCH_SIZE, conv_mnist_inference.OUTPUT_NODE], name='y-input')\n\n\t# Regularizer defination .\n\tregularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n\n\t# Forward propagation by calling conv_mnist_inference.inference()\n\ty = conv_mnist_inference.inference(x, True, regularizer)\n\t# Record global training steps .\n\tglobal_step = tf.Variable(0, trainable=False)\n\n\t# Define loss-function , learning-rate, moving-average and training-steps .\n\t#\n\t# Apply MovingAverage to all trainable variables to get a robust model . \n\tvariable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n\tvariables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n\tcross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))\n\tcross_entropy_mean = tf.reduce_mean(cross_entropy)\n\t# Add cross-entropy and regularizer saved in collection .\n\tloss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))\n\n\t# learning_rate = LEARNING_RATE_BASE * LEARNING_RATE_DECAY ^ \n\t# (global_step / (mnist.train.num_examples/BATCH_SIZE))\n\tlearning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,\n\t\t\t\t\t\t\tglobal_step,\n\t\t\t\t\t\t\tmnist.train.num_examples/BATCH_SIZE,\n\t\t\t\t\t\t\tLEARNING_RATE_DECAY, staircase=True)\n\ttrain_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n\n\t# Update both variables and the moving-averages .\n\t# tensorflow.no_op() is a operation placeholder .\n\twith tf.control_dependencies([train_step, variables_averages_op]):\n\t\ttrain_op = tf.no_op(name='train')\n\n\t# Initialize persistence class .\n\tsaver = tf.train.Saver()\n\n\twith tf.Session() as sess:\n\t\ttf.global_variables_initializer().run()\n\n\t\t# Raw training process , no validating and testing .\n\t\tfor i in range(TRAINING_STEPS):\n\t\t\txs, ys = mnist.train.next_batch(BATCH_SIZE)\n\n\t\t\t# Reshape xs to be a 4-dimension matrix .\n\t\t\treshaped_xs = np.reshape(xs, (BATCH_SIZE,\n conv_mnist_inference.IMAGE_SIZE,\n conv_mnist_inference.IMAGE_SIZE,\n conv_mnist_inference.NUM_CHANNELS))\n\n\t\t\t_, loss_value, step = sess.run([train_op, loss, global_step],\n\t\t\t\t\t\t\tfeed_dict={x: reshaped_xs, y_: ys})\n\n\t\t\t# Save model every 1000 steps .\n\t\t\tif i % 1000 == 0:\n\t\t\t# Calculate current loss on training dataset .\n\t\t\t\tprint(\"After %d training steps, loss on training batch is %g .\" %(step, loss_value))\n\n\t\t\t\t# Save current model .\n\t\t\t\tsaver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)\ndef main(argv=None):\n\tmnist = input_data.read_data_sets(\"./data/\", one_hot=True)\n\ttrain(mnist)\n\nif __name__ == '__main__':\n\ttf.app.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tensorflow-tutorial/cnn_in_practice/conv_mnist_train.py","file_name":"conv_mnist_train.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532404001","text":"def test_to_excel_multiindex_cols(self, merge_cells, engine, ext, frame):\n arrays = np.arange((len(frame.index) * 2)).reshape(2, (- 1))\n new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])\n frame.index = new_index\n new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)])\n frame.columns = new_cols_index\n header = [0, 1]\n if (not merge_cells):\n header = 0\n frame.to_excel(self.path, 'test1', merge_cells=merge_cells)\n reader = ExcelFile(self.path)\n df = pd.read_excel(reader, 'test1', header=header, index_col=[0, 1])\n if (not merge_cells):\n fm = frame.columns.format(sparsify=False, adjoin=False, names=False)\n frame.columns = ['.'.join(map(str, q)) for q in zip(*fm)]\n tm.assert_frame_equal(frame, df)","sub_path":"Data Set/bug-fixing-4/06a6b496a4608bdcc54c8e0ad85197437257d9dc--bug.py","file_name":"06a6b496a4608bdcc54c8e0ad85197437257d9dc--bug.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"508070504","text":"T = int(input())\nfor idx in range(T):\n s = list(input())\n # 들어오는 문자를 확인할 스택\n stack = []\n # stack에 채울 숫자를 하나하나 확인\n for i in range(len(s)):\n # stack이 비어있지 않고 넣으려는 값이랑 같은 값이 마지막에 존재하면\n # stack의 top 값을 제거한다.\n if stack and stack[-1] == s[i]:\n stack.pop()\n # 위의 상황이 아니면 s[i]를 추가한다.\n else:\n stack.append(s[i])\n print(f\"#{idx+1} {len(stack)}\")\n \n","sub_path":"SW Samsung Academy/Intermediate/stack1_반복문자지우기.py","file_name":"stack1_반복문자지우기.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496829353","text":"import Algorithmia\n\ndef sentiment(text):\n input = {\n \"document\": text\n }\n client = Algorithmia.client('simLxeLhdObYV+LWP0AMm2xEt7D1')\n algo = client.algo('nlp/SentimentAnalysis/1.0.4')\n return algo.pipe(input).result#returns list of dictionaries,\"document\",\"sentiment\"\n#of value string and decimel respectively\n","sub_path":"Sentiment.py","file_name":"Sentiment.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648072844","text":"# Description\n# Find any position of a target number in a sorted array. Return -1 if target does not exist.\n\n# Have you met this question in a real interview? \n# Example\n# Given [1, 2, 2, 4, 5, 5].\n\n# For target = 2, return 1 or 2.\n\n# For target = 5, return 4 or 5.\n\n# For target = 6, return -1.\n\n# Challenge\n# O(logn) time\n\ndef binary_search(arr, target):\n\tif not arr or len(arr) == 0 or not target:\n\t\treturn -1\n\n\tstart, end = 0, len(arr) - 1\n\n\twhile start <= end:\n\t\tmid = (start + end) // 2\n\t\tif arr[mid] == target:\n\t\t\treturn mid\n\t\telif arr[mid] < target:\n\t\t\tstart = mid + 1\n\t\telse:\n\t\t\tend = mid - 1\n\n\treturn -1\n\nprint(binary_search([1, 2, 2, 4, 5, 5], 6))","sub_path":"Google/BInarySearch.py","file_name":"BInarySearch.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4497400","text":"# put your code here.\ndef build_dict(fname):\n\t\"\"\"Given a file, builds a dictionary of each word in the file.\"\"\"\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn\n\n\n\nimport sys\nfname = sys.argv[-1]\n\nbuild_dict(fname)\n\n# def build_better_dict(fname):\n# \t\"\"\"Given a file builds a dictionary using counter collections.\"\"\"\n\n# \twith open(fname) as file:\n# \t\tfor line in file:\n# \t\t\tline = line.rstrip()\n# \t\t\tline =line.split(' ')\n# \t\t\tfor word in line:\n# \t\t\t\tword = word.strip('\"!.,?_;():')\n# \t\t\t\tword = word.lower()\n# \t\t\t\t","sub_path":"wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391001641","text":"import numpy as np\nimport cv2\nfrom collections import deque\nfrom keras.models import load_model\n\ncnn_model = load_model('cnn_model.h5')\n# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades\n\n#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\ncap = cv2.VideoCapture(0)\n\nX = 0\nY = 0\nW = 350\nH = 350\nletters = { 1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e', 6: 'f', 7: 'g', 8: 'h', 9: 'i', 10: 'j',\n11: 'k', 12: 'l', 13: 'm', 14: 'n', 15: 'o', 16: 'p', 17: 'q', 18: 'r', 19: 's', 20: 't',\n21: 'u', 22: 'v', 23: 'w', 24: 'x', 25: 'y', 26: 'z'}\n\nwhile 1:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),3)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n\n blur = cv2.GaussianBlur(gray, (35, 35), 0)\n _, thresh_out = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n roi = thresh_out[X:X+W, Y:Y+W]\n contours, hierarchy = cv2.findContours(roi, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n cv2.drawContours(frame, contours, -1, (0,255,0), 3)\n # if counters are present\n if len(contours) > 0:\n contour = max(contours, key = cv2.contourArea)\n\n if cv2.contourArea(contour) > 2800:\n x, y, w, h = cv2.boundingRect(contour)\n newImage = roi[y:y + h, x:x + w]\n #cv2.imshow(\"frame\", newImage)\n newImage = cv2.resize(newImage, (28, 28))\n #newImage = np.array(newImage)\n prediction1 = cnn_model.predict(newImage.reshape(1,28,28,1))[0]\n print(prediction1)\n prediction1 = np.argmax(prediction1)\n print(prediction1)\n #print(prediction1)\n \n\n cv2.rectangle(frame, (X, Y), (X + W, Y + H), (0, 255, 0), 2)\n cv2.putText(frame, \"CNN Prediction(430k): \" + str(letters[int(prediction1)+1]), (10, 600),\n cv2.FONT_HERSHEY_COMPLEX, 1.0, (0, 0, 255), 2)\n frame1 = cv2.resize(frame, (0,0), fx=0.8, fy=0.8)\n cv2.imshow(\"OCR-Face-Eyes\", frame1)\n\n #cv2.imshow('frame',frame)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"FullWorkingCV.py","file_name":"FullWorkingCV.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"392359369","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/src/sentry/src/sentry/models/environment.py\n# Compiled at: 2019-08-16 17:27:45\nfrom __future__ import absolute_import, print_function\nfrom django.db import IntegrityError, models, transaction\nfrom django.utils import timezone\nfrom sentry.constants import ENVIRONMENT_NAME_PATTERN, ENVIRONMENT_NAME_MAX_LENGTH\nfrom sentry.db.models import BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr\nfrom sentry.utils.cache import cache\nfrom sentry.utils.hashlib import md5_text\nimport re\nOK_NAME_PATTERN = re.compile(ENVIRONMENT_NAME_PATTERN)\n\nclass EnvironmentProject(Model):\n __core__ = False\n project = FlexibleForeignKey('sentry.Project')\n environment = FlexibleForeignKey('sentry.Environment')\n is_hidden = models.NullBooleanField()\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_environmentproject'\n unique_together = (('project', 'environment'), )\n\n\nclass Environment(Model):\n __core__ = False\n organization_id = BoundedPositiveIntegerField()\n projects = models.ManyToManyField('sentry.Project', through=EnvironmentProject)\n project_id = BoundedPositiveIntegerField(null=True)\n name = models.CharField(max_length=64)\n date_added = models.DateTimeField(default=timezone.now)\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_environment'\n unique_together = (('organization_id', 'name'), )\n\n __repr__ = sane_repr('organization_id', 'name')\n\n @classmethod\n def is_valid_name(cls, value):\n \"\"\"Limit length and reject problematic bytes\n\n If you change the rules here also update the event ingestion schema\n in sentry.interfaces.schemas\n \"\"\"\n if len(value) > ENVIRONMENT_NAME_MAX_LENGTH:\n return False\n else:\n return OK_NAME_PATTERN.match(value) is not None\n\n @classmethod\n def get_cache_key(cls, organization_id, name):\n return 'env:2:%s:%s' % (organization_id, md5_text(name).hexdigest())\n\n @classmethod\n def get_name_or_default(cls, name):\n return name or ''\n\n @classmethod\n def get_for_organization_id(cls, organization_id, name):\n name = cls.get_name_or_default(name)\n cache_key = cls.get_cache_key(organization_id, name)\n env = cache.get(cache_key)\n if env is None:\n env = cls.objects.get(name=name, organization_id=organization_id)\n cache.set(cache_key, env, 3600)\n return env\n\n @classmethod\n def get_or_create(cls, project, name):\n name = cls.get_name_or_default(name)\n cache_key = cls.get_cache_key(project.organization_id, name)\n env = cache.get(cache_key)\n if env is None:\n env = cls.objects.get_or_create(name=name, organization_id=project.organization_id)[0]\n cache.set(cache_key, env, 3600)\n env.add_project(project)\n return env\n\n def add_project(self, project, is_hidden=None):\n cache_key = 'envproj:c:%s:%s' % (self.id, project.id)\n if cache.get(cache_key) is None:\n try:\n with transaction.atomic():\n EnvironmentProject.objects.create(project=project, environment=self, is_hidden=is_hidden)\n cache.set(cache_key, 1, 3600)\n except IntegrityError:\n cache.set(cache_key, 1, 3600)\n\n return\n\n @staticmethod\n def get_name_from_path_segment(segment):\n if segment != 'none':\n return segment\n return ''","sub_path":"pycfiles/sentry-10.0.0-py27-none-any/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401159069","text":"from django.db import models\n\n# Create your models here.\n\nEVENTS = (\n\t\t('OTHERS', \"OTHERS\"),\n\t\t('ANDROID APP DEVELOPMENT', \"ANDROID APP DEVELOPMENT\"),\n\t\t('ACTOMANIA', \"ACTOMANIA\"),\n\t\t('CALLIGRAPHY', \"CALLIGRAPHY\"),\n\t\t('CODING', \"CODING\"),\n\t\t('CHESS+CARROM', \"CHESS+CARROM\"),\n\t\t('EXHIBITION', \"EXHIBITION\"),\n\t\t('ENTERTAINMENT QUIZ', \"ENTERTAINMENT QUIZ\"),\n\t\t('GULLY CRICKET', \"GULLY CRICKET\"),\n\t\t('HIT AND WIN', \"HIT AND WIN\"),\n\t\t('ISLAMIC QUIZ', \"ISLAMIC QUIZ\"),\n\t\t('GENERAL QUIZ', \"GENERAL QUIZ\"),\n\t\t('GROUP DANCE', \"GROUP DANCE\"),\n\t\t('LASER TAG', \"LASER TAG\"),\n\t\t('LAN GAMING', \"LAN GAMING\"),\n\t\t('MASTERCHEF', \"MASTERCHEF\"),\n\t\t('NUKAD NAKTAK', \"NUTUK NAKTAK\"),\n\t\t('ROBO RACE', \"ROBO RACE\"),\n\t\t('STREET DANCE', \"STREET DANCE\"),\n\t\t('SNAP IT', \"SNAP IT\"),\n\t\t('SOLO DANCING', \"SOLO DANCING\"),\n\t\t('SOLO SINGING', \"SOLO SINGING\"),\n\t\t('ROBO RACE', \"ROBO RACE\"),\n)\n\n\nclass Contact(models.Model):\n\tevent = models.CharField(max_length=120, choices=EVENTS)\n\tname = models.CharField(max_length=120)\n\tphone_number = models.CharField(max_length=10, unique=True)\n\temail = models.EmailField()\n\n\n\tdef __unicode__(self):\n\t\treturn str(self.id)\n","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"346213244","text":"import requests\n\nfontemoedas = requests.get(\"https://free.currconv.com/api/v7/currencies?apiKey=4f0ca69d0747e3d4c624\").json()\nmoedas = list(fontemoedas['results'].keys())\n\nprint(\"Bem vindo ao Câmbio de Moedas\")\nwhile True:\n print(\"Escolha uma opção:\")\n print(\"-> 1 - Listar todas as moedas disponíveis\")\n print(\"-> 2 - Converter valor para outra moeda\")\n print(\"-> 3 - Cotação da moeda\")\n print(\"-> 4 - Sair\")\n\n try:\n escolhausuario = int(input())\n except:\n print(\"Escolha inválida\")\n break\n\n if escolhausuario == 1:\n print(moedas)\n\n elif escolhausuario == 2:\n try:\n print(\"Insira a moeda inicial:\")\n moedaInicial = input().upper()\n if moedaInicial not in moedas:\n raise Exception\n\n print(\"insira o valor a ser convertido: \")\n valorInicial = int(input())\n\n\n print(\"Inserir a moeda final:\")\n moedaConvertida = input().upper()\n if moedaConvertida not in moedas:\n raise Exception\n\n Id = moedaInicial + \"_\" + moedaConvertida\n resp = requests.get(\"http://free.currconv.com/api/v7/convert?apiKey=4f0ca69d0747e3d4c624&q=\" + Id + \"&compact=ultra\").json()\n taxa = resp[Id]\n valorFinal = valorInicial * taxa\n print(\"1 \" + moedaInicial + \" = \" + str(taxa) + \" \" + moedaConvertida)\n print(str(valorInicial) + \" \" + moedaInicial + \" = \" + str(valorFinal) + \" \" + moedaConvertida)\n\n data = open(Id + \".csv\", \"w+\")\n novoCambio = str(valorInicial) + \";\" + moedaInicial + \";\" + str(valorFinal) + \";\" + moedaConvertida\n data.write(novoCambio)\n data.close()\n\n print(\"Deseja continuar? (S/N):\")\n if input().upper() == \"N\":\n break\n except:\n print(\"Valor inválido\")\n\n elif escolhausuario == 3:\n try:\n print(\"Insira a moeda inicial:\")\n moedaInicial = input().upper()\n if moedaInicial not in moedas:\n raise Exception\n\n print(\"Insira a moeda final:\")\n moedaConvertida = input().upper()\n if moedaConvertida not in moedas:\n raise Exception\n\n Id = moedaInicial + \"_\" + moedaConvertida\n resp = requests.get(\"http://free.currconv.com/api/v7/convert?apiKey=4f0ca69d0747e3d4c624&q=\" + Id + \"&compact=ultra\").json()\n valorconvertido = resp[Id]\n print(\"1 \" + moedaInicial + \" = \" + str(valorconvertido) + \" \" + moedaConvertida)\n\n data = open(Id + \".csv\", \"w+\")\n novoCambio = moedaInicial + \",\" + str(valorconvertido) + \",\" + moedaConvertida\n data.write(novoCambio)\n data.close()\n\n print(\"Deseja continuar? (S/N):\")\n if input().upper() == \"N\":\n break\n except:\n print(\"Valor inválido\")\n elif escolhausuario == 4:\n break\n else:\n print(\"Escolha inválida\")\n break\n\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"629789400","text":"import numpy as np\nfrom keras.models import Model, Sequential, load_model\nfrom keras.layers import Dense, Input, BatchNormalization\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as plt\nimport csv\nfrom keras import backend as K\nimport keras\nfrom keras.callbacks import EarlyStopping\n# from tensorflow.keras import backend as K\n# from tensorflow.python.keras import backend as K\n\noutput_size=5 #參數大小\ninput_size = 8192 #輸入Feature大小\nClassSampleNum = 150 #每個類別的樣本數\nTestSetNum = 50\nf_min = 150\nf_max = 70000\nResolution = 140000 / 16384\ndef getSample(path):\n\tlabel = []\n\tinput = []\n\twith open(path, 'r', encoding='utf-8') as data:\n\t\tread = csv.reader(data)\n\t\tfirst_skip=False\n\t\tfor line in read:\n\t\t\tif first_skip:\n\t\t\t\tfirst_skip=False\n\t\t\t\tcontinue\n\t\t\t#one_hot=np.zeros(output_size)\n\t\t\t#one_hot[int(line[0])]=1\n\t\t\t\n\t\t\t# label.append(int(line[0]))\n\t\t\traw = []\n\t\t\tfor i in line[0::]:\n\t\t\t\tnum=float(i)\n\t\t\t\tif num>0:\n\t\t\t\t\traw.append(num)\n\t\t\t\telse:\n\t\t\t\t\traw.append(0)\n\t\t\traw=np.array(raw)\n\t\t\traw=raw/np.average(raw)\n\t\t\tinput.append(raw)\n\treturn np.array(input),np.array(label)\n\nx,y = getSample(\"sample_5_0825.csv\")\nprint(x.shape)\nprint(y.shape) \n\n\ndef Reorganize(x):\n x = np.reshape(x, (len(x), 8192))\n x_mean_col = np.mean(x , axis = 0)\n print(x_mean_col)\n x_mean = np.mean(x_mean_col[int((150/Resolution))::])*0.4\n print(x_mean)\n f = -Resolution\n flag = 0\n freq = []\n Idx = []\n #Select region\n print(\"Select region\")\n for i in range(input_size):\n f += Resolution\n if f > f_min and x_mean_col[i] > x_mean:\n if flag == 0:\n flag = 1\n freq.append([f])\n Idx.append([i])\n if f > f_min and x_mean_col[i] < x_mean:\n if flag == 1:\n flag = 0\n freq[len(freq)-1].append(f)\n Idx[len(Idx)-1].append(i)\n \n #merge\n print(\"merge\")\n j=0\n freq = np.array(freq)\n Idx = np.array(Idx)\n for i in range(len(freq)):\n if i == 0:\n continue\n if freq[j+1][0] - freq[j][1] < 300:\n freq[j][1] = freq[j+1][0]\n Idx[j][1] = Idx[j+1][0]\n freq = np.delete(freq, j+1, 0)\n Idx = np.delete(Idx, j+1, 0)\n else:\n j += 1\n \n #Reorganize\n print(\"Reorganize\")\n new_x = []\n for i in range(output_size*ClassSampleNum):\n new_x.append([])\n for idx in Idx:\n new_x[i].extend(x[i][idx[0]:idx[1]])\n \n # plt.plot(new_x[0][0:2369])\n # plt.title(\"Feature Map\")\n # plt.xlabel(\"Feature point\")\n # plt.ylabel(\"Amplitude\")\n # plt.show()\n \n new_x = np.reshape(new_x,(len(new_x), len(new_x[0])))\n print(np.shape(new_x))\n print(\"Freq. region : \")\n print(freq)\n print(Idx)\n \n # x_label = np.reshape(freq,(len(freq)*2))\n # y_label = np.ones(len(x_label))\n # plt.plot(np.arange(8191)*(Resolution)+Resolution, x_mean_col)\n # plt.bar(x_label,10,100, color='r')\n # plt.plot([0,70000], [x_mean, x_mean])\n # plt.xlabel(\"Frequency\")\n # plt.ylabel(\"Amplitude\")\n # plt.legend([\"Signal\", \"Threshold\", \"Select_Region\"])\n # plt.show()\n return new_x, len(new_x[0])\n\n\ndef LayerCreate(layerNum, maxNode, Activation, room=None):\n node = maxNode\n nodeList = []\n if room == None:\n room = 2\n # encoder\n model.add(Dense(maxNode, activation=Activation, input_shape = (input_size,)))\n print(Activation, input_size)\n nodeList.append(maxNode)\n if layerNum == 0 or maxNode == 1:\n model.add(Dense(input_size))\n return 0\n\n for i in range(layerNum):\n node = int(node/room)\n nodeList.append(node)\n model.add(Dense(node))\n if node <=1:\n break\n # decoder\n for i in range(len(nodeList)):\n model.add(Dense(nodeList[len(nodeList)-i-1]))\n model.add(Dense(input_size))\n\n# x, input_size = Reorganize(x)\nprint(np.shape(x), np.shape(y))\nstd_x = np.std(x)\nmean_x = np.mean(x)\nx = (x-mean_x)/std_x\nprint(std_x, mean_x, x)\n\n# in order to plot in a 2D figure\nencoding_dim = 2\nstep = 0\nloss_set = []\nlayer_set = []\nzoom_set = []\nlr_set = []\nnode_set = []\nlayer_set = []\nactivation_set = []\nloss_func_set = []\nevaluate_history = []\n\n# GS parameter\nNodes = [10,9,8,7,6,5,4,3,2,1]\nLayers = [2,1,0]\nLRs = [0.001,0.01]\nActivations = ['relu','tanh','sigmoid']\nLoss_funcs = ['mae','mse']\n# GS parameter:layer zoom lr activation loss_func\nfor Node in Nodes:\n for Layer in Layers:\n for LR in LRs:\n for Activation in Activations:\n for Loss_func in Loss_funcs:\n step = step + 1\n for j in range(output_size):\n print(\"Node:\", Node, \"LR:\", LR ,\"Activation:\", Activation, \"Loss_func:\", Loss_func)\n K.clear_session()\n model = Sequential()\n # model.add(BatchNormalization(axis=-1, epsilon=0.001, center=True, input_shape = (input_size,))) \n LayerCreate(Layer, Node, Activation, 2)\n print(model.summary())\n adam = Adam(lr=LR)\n model.compile(optimizer='adam', loss=Loss_func)\n ES_Acc = EarlyStopping(monitor='val_loss',min_delta=0, mode='min', verbose=1, patience=100)\n history = model.fit(x[j*ClassSampleNum:((j+1)*ClassSampleNum)-TestSetNum-1,:], x[j*ClassSampleNum:((j+1)*ClassSampleNum)-TestSetNum-1,:], \n epochs=600, batch_size=600, shuffle=True, callbacks=([ES_Acc]), \n validation_data=(x[(j*ClassSampleNum)+(ClassSampleNum-TestSetNum):(j+1)*ClassSampleNum-1], x[(j*ClassSampleNum)+(ClassSampleNum-TestSetNum):(j+1)*ClassSampleNum-1]))\n model.save('./AE_Model/model_'+repr(j)+'/model_'+repr(j)+'.h5')\n \n loss_set.append(min(history.history['loss']))\n node_set.append(Node)\n layer_set.append(Layer)\n lr_set.append(LR)\n activation_set.append(Activation)\n loss_func_set.append(Loss_func)\n with open('GS_AE_0807_5p_Nor_STD_layer_clear.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['Set', 'loss', 'Layer','Node','LR','Activation','loss_Func'])\n for i in range(len(loss_set)):\n writer.writerow([i+1, loss_set[i], layer_set[i], node_set[i], lr_set[i], activation_set[i], loss_func_set[i]])\n \n pred_Data = []\n for i in range(output_size*ClassSampleNum):\n if (i%ClassSampleNum >= (ClassSampleNum-TestSetNum)):\n pred_Data.append([x[i]])\n pred_Data = np.array(pred_Data)\n print(np.shape(pred_Data))\n class_model = []\n for i in range(output_size):\n class_model.append(load_model('./AE_Model/model_'+repr(i)+'/model_'+repr(i)+'.h5'))\n # class_model.append(tf.keras.models.load_model(\"model_\"+repr(i)+\".h5\"))\n evaluate_result = []\n for i in range(output_size):\n cc = []\n for j in range(output_size*TestSetNum):\n cc.append(class_model[i].evaluate(pred_Data[j],pred_Data[j]))\n evaluate_result.append(cc)\n \n ##save the loss of evaluate to csv\n evaluate_history.extend(evaluate_result)\n with open('GS_AE_0807_evaluateResult.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['Set','Phone_1', 'Phone_2', 'Phone_3','Phone_4','Phone_4'])\n writer.writerows(evaluate_history)\n\n #畫圖&存圖\n for i in range(output_size):\n plt.plot(evaluate_result[i])\n plt.grid(color='gray', linestyle='-', linewidth=0.5)\n plt.xlabel(' I-7p Sam_N9 Sam-A51 ASUS Sony')\n plt.ylabel('loss')\n plt.legend(['Model_I-7p','Model_Sam_N9','Model_Sam-A51','Model_ASUS','Model_Sony','Model_L6','Model_L7','Model_L8','Model_L9','Model_L10','Model_L11','Model_L12','Model_L13','Model_L14'])\n new_ticks = np.linspace(0, output_size*TestSetNum, output_size+1)\n plt.xticks(new_ticks)\n plt.title(\"Layer:\" +repr(Layer)+ \"Node:\" +repr(Node)+ \"LR:\"+ repr(LR)+ \"Acti.:\"+ repr(Activation)+ \"Loss_func:\"+ repr(Loss_func))\n filename1 = '.\\GS_Result/Set%03d.png' % (step)\n plt.savefig(filename1)\n plt.close()\n ","sub_path":"main_AE_newGS_lock.py","file_name":"main_AE_newGS_lock.py","file_ext":"py","file_size_in_byte":9075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"630312045","text":"import datetime, uuid, jwt\nfrom server import app, bcrypt, db\nfrom server.models.User import User\nfrom flask import jsonify, request, make_response, current_app\nfrom textwrap import dedent\nfrom flask_cors import cross_origin\n\n# Just for development purpose i.e. to display all users in the database\n@app.route(\"/users/display\", methods=[\"GET\"])\n@cross_origin()\ndef display():\n users = User.query.all()\n output = []\n\n for user in users:\n user_data = {}\n user_data[\"id\"] = user.id\n user_data[\"username\"] = user.username\n user_data[\"email\"] = user.email\n output.append(user_data)\n\n return jsonify({\"users\": output})\n\n\n# Login Endpoint\n@app.route(\"/users/login\", methods=[\"GET\", \"POST\"])\n@cross_origin()\ndef login():\n body = request.json\n # return jsonify({\"user\": auth})\n\n if not body:\n return make_response(\n \"Could not verify\",\n 401,\n {\"WWW-Authenticate\": 'Basic realm=\"Login required!\"'},\n )\n\n user = User.query.filter_by(email=body[\"email\"]).first()\n\n if not user:\n return make_response(\n \"Could not verify\",\n 401,\n {\"WWW-Authenticate\": 'Basic realm=\"Login required!\"'},\n )\n\n if bcrypt.check_password_hash(user.password, body[\"password\"]):\n token = jwt.encode(\n {\n \"id\": user.id,\n \"exp\": datetime.datetime.utcnow() + datetime.timedelta(minutes=30),\n },\n app.config[\"SECRET_KEY\"],\n )\n return jsonify({\"token\": token, \"message\": \"Login Successful!\"})\n\n return make_response(\n \"Could not verify\", 401, {\"WWW-Authenticate\": 'Basic realm=\"Login required!\"'}\n )\n\n\n@app.route(\"/users/register\", methods=[\"POST\"])\n@cross_origin()\ndef signup():\n \"\"\"\n This route expectes a JSON request body on the format:\n {\n 'username' : \"abc\",\n 'password': \"abc\",\n 'email': \"abc@abc\"\n }\n \"\"\"\n try:\n body = request.json\n if body:\n username = body[\"username\"]\n password = body[\"password\"]\n email = body[\"email\"]\n hashed_password = bcrypt.generate_password_hash(password)\n\n check_user = User.query.filter_by(email=body[\"email\"]).first()\n\n if not check_user:\n user = User(\n id=str(uuid.uuid4()),\n username=username,\n password=hashed_password,\n email=email,\n )\n db.session.add(user)\n db.session.commit()\n\n current_app.logger.info(\n f\" Adding new user: {user.username}, email: {user.email}\"\n )\n return make_response(\n \"Registration Successful!\".format(user.username),\n 201,\n )\n else:\n return make_response(\"User already registered! Please sign in.\", 409)\n except:\n current_app.logger.error(\" Unable to parse POST request.\")\n raise\n\n return make_response(\"Wrong parameters. Try again\", 400)\n","sub_path":"server/routes/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"524707239","text":"from matplotlib import pyplot as plt\nimport cv2\nimport numpy as np\n\ndef labelDetection(scrImagePath = 'zigzag2.jpeg'):\n\n img = cv2.imread(scrImagePath)\n #dictionary to store the values of the rectangles drawn around zigzag\n detectedLabels = {}\n dictionaryCounter = 0\n #filtering the image by using gaussian filter then\n img_blurred = cv2.GaussianBlur(img , (5,5), 0)\n img_gray = cv2.cvtColor(img_blurred,cv2.COLOR_BGR2GRAY)\n # Apply Threshold\n ret,thresholded_image = cv2.threshold(img_gray,130,255,0,cv2.THRESH_BINARY_INV)\n contours, hierarchy = cv2.findContours(thresholded_image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # print('Contour len'+str(len(contours)))\n #A loop over contours to check which of them has lines that lies within the zigzag range\n for c in contours:\n approx = cv2.approxPolyDP(c, 0.01 * cv2.arcLength(c,True),True)\n x = approx.ravel()[0]\n y = approx.ravel()[1]\n print(len(approx))\n if len(approx) >= 15 and len(approx)<40:\n cv2.drawContours(img,c,-1,(0,255,0),10)\n x,y,w,h = cv2.boundingRect(approx)\n detectedLabels[dictionaryCounter] = [x,y,x+w,y+h]\n dictionaryCounter +=1\n print('The Value :'+str(len(approx)))\n\n\n cv2.drawContours(img,contours,-1,(0,255,0),10)\n\n titles = ['Image','Image Blurred','After Gray Effetct','Image After Thresholding']\n\n images = [img,img_blurred,img_gray,thresholded_image]\n print(detectedLabels)\n for c in detectedLabels.values():\n img=cv2.rectangle(img,(c[0],c[1]),(c[2],c[3]),(255,0,0),5)\n for i in range(len(images)):\n plt.subplot(2, 3, i+1), plt.imshow(images[i], 'gray')\n plt.title(titles[i])\n plt.xticks([]),plt.yticks([])\n\n plt.show()\n return detectedLabels\nlabelDetection('zigzag2.jpeg')\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"LabelDetection.py","file_name":"LabelDetection.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"421542706","text":"def question5():\n numbers = list(map(int, input().split()))\n del numbers[-1]\n numbers.reverse()\n string = ''\n for num in numbers:\n string += str(num) + ' '\n string.strip(' ')\n return string\nif __name__ == '__main__':\n print(question5(),end = '')","sub_path":"Code/CodeRecords/2949/60901/264324.py","file_name":"264324.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274691620","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport json, datetime\nfrom common.base import Config, MyPymysql, my_log, MyGuid\n\n\n# 写入数据表信息\ndef CreateDataTableInfor(data):\n # DataTableInfo = {\"DataTableID\": DataTableID, \"QuesID\": QuesID, \"DataServerIP\": ConfInfo[\"host\"],\n # \"DataServerPort\": ConfInfo[\"port\"], \"DatabaseName\": ConfInfo[\"db_name\"],\n # \"DataTableName\": table_subname, \"DataTableStatus\": DataTableStatus}\n sql = \"insert into `meta_data_table` SET DataTableID='{}', QuesID={}, DataServerIP='{}',DataServerPort={}, DatabaseName='{}', \" \\\n \"DataTableName='{}', DataTableStatus={};\".format(\n data[\"DataTableID\"], data[\"QuesID\"],\n data[\"DataServerIP\"], data[\"DataServerPort\"],\n data[\"DatabaseName\"], data[\"DataTableName\"], data[\"DataTableStatus\"])\n ret = MyPymysql('metadata')\n ret.idu_sql(sql)\n ret.close()\n\n\n# 创建数据表 ---废弃\ndef create_data_table(vartypes, width, valuetypes, formats, varnames, tablename, libname=\"data\"):\n sql = \"\"\"CREATE TABLE `{}` (\"\"\".format(tablename)\n for i in range(len(varnames)):\n if valuetypes[i] == \"FLOAT\":\n num = width[i].split(\".\")\n IntegeNum = str(int(num[0]) + 10)\n s = \"`{}` {}({},{}) DEFAULT NULL\".format(varnames[i], valuetypes[i], IntegeNum, num[1])\n elif valuetypes[i] == \"DATETIME\":\n s = \"`{}` {} DEFAULT NULL\".format(varnames[i], valuetypes[i])\n elif valuetypes[i] == \"DATE\":\n s = \"`{}` {} DEFAULT NULL\".format(varnames[i], valuetypes[i])\n elif valuetypes[i] == \"VARCHAR\":\n s = \"`{}` {}({}) DEFAULT NULL\".format(varnames[i], valuetypes[i], width[i])\n else:\n s = \"`{}` {}({}) DEFAULT NULL\".format(varnames[i], valuetypes[i], width[i])\n\n if i < len(varnames) - 1:\n sql = sql + s + \",\"\n elif i == len(varnames) - 1:\n sql = sql + s\n\n sql = sql + \") ENGINE=InnoDB DEFAULT CHARSET=UTF8\"\n\n ret = MyPymysql('data')\n ret.idu_sql(\"\"\"DROP TABLE IF EXISTS {}\"\"\".format(tablename))\n ret.idu_sql(sql)\n ret.close()\n\n\nclass CreateDataTable_two():\n \"\"\"优化后的创建表\"\"\"\n\n def __init__(self, libname=\"notdbMysql\"):\n self.libname = libname\n self.res = MyPymysql(self.libname)\n\n def MetaDataTableName_sql(self):\n \"\"\"\n 1. 先判断meta_data库里面最后一条数据的databaseName的库名, 得到count, 如果大于10000, 那么在创建新库\n\n \"\"\"\n selectMetaDataTableName_sql = \"\"\"select DatabaseName from db_metadata.meta_data_table Order by DatabaseName limit 1;\"\"\"\n MetaDataTableName = self.res.selectone_sql(selectMetaDataTableName_sql)\n MetaDataTableName = MetaDataTableName if MetaDataTableName else {\"DatabaseName\": \"db_data_20170524164100\"}\n MetaDataTableNameCount_sql = \"\"\"SELECT COUNT(1) as count FROM information_schema.tables WHERE TABLE_SCHEMA = '%s';\"\"\" % \\\n MetaDataTableName[\"DatabaseName\"]\n MetaDataTableNameCount = self.res.selectone_sql(MetaDataTableNameCount_sql)\n return MetaDataTableNameCount[\"count\"], MetaDataTableName[\"DatabaseName\"]\n\n def CreateNewDataBase_sql(self):\n \"\"\"\n 创建新的库\n :return:\n \"\"\"\n dataBaseNewName = \"db_data\" + str(datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n createNewDataBase_sql = \"\"\"CREATE DATABASE %s CHARACTER SET=UTF8;\"\"\" % dataBaseNewName\n self.res.insert_sql(createNewDataBase_sql)\n return dataBaseNewName\n\n def CreateNewDataTable_sql(self, vartypes, width, valuetypes, formats, varnames, tablename):\n \"\"\"\n 创建新的数据表\n 先判断这个库是否有10000个表\n 如果有,新建库\n 没有,继续使用\n :return:\n \"\"\"\n # 判断库名函数, 如果库里表>10000, 则新建\n count, DataBaseName = self.MetaDataTableName_sql()\n DataBaseName = DataBaseName if count < 10000 else self.CreateNewDataBase_sql()\n\n sql = \"\"\"CREATE TABLE {}.{} (\"\"\".format(DataBaseName, tablename)\n for i in range(len(varnames)):\n if valuetypes[i] == \"FLOAT\":\n num = width[i].split(\".\")\n IntegeNum = str(int(num[0]) + 10)\n s = \"`{}` {}({},{}) DEFAULT NULL\".format(varnames[i], valuetypes[i], IntegeNum, num[1])\n elif valuetypes[i] == \"DATETIME\":\n s = \"`{}` {} DEFAULT NULL\".format(varnames[i], valuetypes[i])\n elif valuetypes[i] == \"DATE\":\n s = \"`{}` {} DEFAULT NULL\".format(varnames[i], valuetypes[i])\n elif valuetypes[i] == \"VARCHAR\":\n s = \"`{}` {}({}) DEFAULT NULL\".format(varnames[i], valuetypes[i], width[i])\n else:\n s = \"`{}` {}({}) DEFAULT NULL\".format(varnames[i], valuetypes[i], width[i])\n\n if i < len(varnames) - 1:\n sql = sql + s + \",\"\n elif i == len(varnames) - 1:\n sql = sql + s\n\n sql = sql + \") ENGINE=InnoDB DEFAULT CHARSET=UTF8\"\n self.res.insert_sql(sql)\n return DataBaseName\n\n def close(self):\n self.res.close()\n\n\nclass writer_data_table():\n def __init__(self, libname=\"notdbMysql\"):\n self.libname = libname\n self.res = MyPymysql('notdbMysql')\n\n def insert_sql(self, DataBaseName, tablename, data):\n data = tuple(data)\n sql = \"\"\"insert INTO {}.{} VALUES {};\"\"\".format(DataBaseName, tablename, data)\n sql = sql.replace(\"None\", \"Null\")\n self.res.idu_sql(sql)\n\n def close(self):\n self.res.close()\n\n\nclass writer_information_tables():\n def __init__(self, libname=\"metadata\"):\n self.libname = libname\n self.res = MyPymysql('metadata')\n\n def insert_sql(self, data):\n # sql = \"\"\"insert INTO `meta_variable` SET DataTableID=%s, OrderNum=%s, VarName=\"%s\", VarType=\"%s\", VarWidth=%s,\n # VarDecimals=\"%s\", OriginFormats=\"%s\", VarMeasure=%s, VarValues=\"%s\", VarMissing=\"%s\", VarTopic=\"%s\",\n # VarLabel=\"%s\", OriginQuestion=\"%s\", OtherLangLabel=\"%s\", DataFrom=%s, DeriveFrom=\"%s\", VarRole=%s, VarVersion=%s,\n # ReviseFrom=%s, ReviseTime=\"%s\", ReviseUserID=%s, VarNote=\"%s\", VarStatus=%s;\"\"\"\n # value = (\n # data[\"DataTableID\"],\n # data[\"OrderNum\"],\n # data[\"VarName\"],\n # data[\"VarType\"],\n # data[\"VarWidth\"],\n # data[\"VarDecimals\"],\n # data[\"OriginFormats\"],\n # data[\"VarMeasure\"],\n # data[\"VarValues\"],\n # data[\"VarMissing\"],\n # data[\"VarTopic\"],\n # data[\"VarLabel\"],\n # data[\"OriginQuestion\"],\n # data[\"OtherLangLabel\"],\n # data[\"DataFrom\"],\n # data[\"DeriveFrom\"],\n # data[\"VarRole\"],\n # data[\"VarVersion\"],\n # data[\"ReviseFrom\"],\n # data[\"ReviseTime\"],\n # data[\"ReviseUserID\"],\n # data[\"VarNote\"],\n # data[\"VarStatus\"])\n # # print(sql)\n # # self.res.idu_sql(sql)\n # self.res.insert_sql(sql, value=value)\n sql = \"\"\"insert INTO `meta_variable` SET DataTableID={}, OrderNum={}, VarName='{}', VarType='{}', \n VarWidth={}, VarDecimals='{}', OriginFormats=\"%s\", VarMeasure={}, VarValues=\"%s\", VarMissing='{}', \n VarTopic='{}', VarLabel=\"%s\", OriginQuestion='{}', OtherLangLabel='{}', DataFrom='{}', DeriveFrom='{}', \n VarRole='{}', VarVersion={}, ReviseFrom={}, ReviseTime='{}', ReviseUserID={}, VarNote='{}', VarStatus={};\"\"\".format(\n data[\"DataTableID\"],\n data[\"OrderNum\"],\n data[\"VarName\"],\n data[\"VarType\"],\n data[\"VarWidth\"],\n data[\"VarDecimals\"],\n data[\"VarMeasure\"],\n\n data[\"VarMissing\"],\n data[\"VarTopic\"],\n\n data[\"OriginQuestion\"],\n data[\"OtherLangLabel\"],\n data[\"DataFrom\"],\n data[\"DeriveFrom\"],\n data[\"VarRole\"],\n data[\"VarVersion\"],\n data[\"ReviseFrom\"],\n data[\"ReviseTime\"],\n data[\"ReviseUserID\"],\n data[\"VarNote\"],\n data[\"VarStatus\"])\n value = (data[\"OriginFormats\"], data[\"VarValues\"], data[\"VarLabel\"])\n # print(sql)\n # self.res.idu_sql(sql)\n self.res.insert_sql(sql, value=value)\n\n def close(self):\n self.res.close()\n\n\nclass writer_option_table():\n def __init__(self, libname=\"metadata\"):\n self.libname = libname\n self.res = MyPymysql(self.libname)\n\n def insert_sql(self, data):\n sql = \"\"\"insert INTO `meta_option` SET UserID='{}', ProjectID='{}', QuesID='{}', ColumnID='{}', OptionID='{}', OptionNM='{}', EffectFlag=1, DataTableID='{}';\"\"\".format(\n data[\"UserID\"],\n data[\"ProjectID\"],\n data[\"QuesID\"],\n data[\"ColumnID\"],\n data[\"OptionID\"],\n data[\"OptionNM\"],\n data[\"DataTableID\"]\n )\n self.res.idu_sql(sql)\n\n def close(self):\n self.res.close()\n","sub_path":"models/UploadSpssModel.py","file_name":"UploadSpssModel.py","file_ext":"py","file_size_in_byte":9176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"351722671","text":"#encoding: utf8 \n\nfrom select import select\nfrom socket import socket\nfrom socket import AF_INET,SOCK_STREAM\nfrom recv_packet import RecvPacket\nfrom opccode import handlePacket\n\nclass TcpClient:\n\n def __init__(self,host=\"localhost\",port=8888):\n self.connectSocket=socket(AF_INET,SOCK_STREAM)\n self.sendData=\"\"\n self.recvData=\"\"\n self.buffers=[]\n\n def connect(self,host=\"localhost\",port=8888):\n self.connectSocket.connect((host,port))\n self.connectSocket.setblocking(0)\n\n def recvPackets(self):\n reads,_,errors=select([self.connectSocket],[],[],0.0001)\n if self.connectSocket in reads:\n self.read()\n\n def sendPackets(self):\n _,writes,errors=select([],[self.connectSocket],[],0.0001)\n if self.connectSocket in writes:\n self.write()\n\n def handlePackets(self):\n for buffer in self.buffers:\n packet=RecvPacket(buffer)\n handlePacket(packet)\n self.buffers=[]\n\n def read(self):\n data=self.recvData+self.connectSocket.recv(1024)\n dataLength=len(data)\n\n lengthBeginIndex=0\n contentBeginIndex=2\n\n if dataLength>contentBeginIndex:\n contentLength=ord(data[lengthBeginIndex])*0x100+ord(data[lengthBeginIndex+1])\n packetLength=contentLength+contentBeginIndex\n while dataLength>=packetLength:\n content=data[contentBeginIndex:contentBeginIndex+contentLength]\n\n self.buffers.append(content)\n\n data=data[contentBeginIndex+contentLength:]\n dataLength=len(data)\n if dataLength>=contentBeginIndex:\n contentLength=ord(data[lengthBeginIndex])*0x100+ord(data[lengthBeginIndex+1])\n packetLength=contentLength+contentBeginIndex\n else:\n break\n self.recvData=data\n\n def write(self):\n data=self.sendData\n amount=self.connectSocket.send(data)\n self.sendData=data[amount:]\n\ntcpClient=TcpClient()\n\n","sub_path":"client/tcp_client.py","file_name":"tcp_client.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"244058259","text":"\"\"\"schema init\n\nRevision ID: 2d2a045a6c0\nRevises: None\nCreate Date: 2013-11-20 11:23:16.139647\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2d2a045a6c0'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_table('stage',\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('display_name', sa.String(150), unique=True,\n nullable=False),\n sa.Column('quiz_name', sa.String(150), nullable=False),\n sa.Column('prev_stage_id', sa.Integer,\n sa.ForeignKey('stage.id'))\n )\n\n op.create_table('record',\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('session_id', sa.String(64), nullable=False),\n sa.Column('stage_id', sa.Integer,\n sa.ForeignKey('stage.id')),\n sa.Column('key', sa.String(64), nullable=False,\n unique=True),\n sa.Column('next_record_id', sa.Integer,\n sa.ForeignKey('record.id'))\n )\n\n\ndef downgrade():\n op.drop_table('record')\n op.drop_table('stage')\n","sub_path":"migrate/versions/2d2a045a6c0_schema_init.py","file_name":"2d2a045a6c0_schema_init.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"542876059","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 31 16:27:33 2018\r\n\r\n@author: 1\r\n\"\"\"\r\nfrom math import log\r\nimport operator\r\n\r\ndef createDataSet():\r\n dataSet = [[1, 1, 'yes'],\r\n [1, 1, 'yes'],\r\n [1, 0, 'no'],\r\n [0, 1, 'no'],\r\n [0, 1, 'no']]\r\n labels = ['no surfacing','flippers']\r\n #change to discrete values\r\n return dataSet, labels\r\n\r\ndef calcShannonEnt(dataSet):\r\n numEntries=len(dataSet)\r\n labelCounts={}\r\n for featVec in dataSet:\r\n currentLabel=featVec[-1]\r\n if currentLabel not in labelCounts.keys():labelCounts[currentLabel]=0\r\n labelCounts[currentLabel]+=1\r\n shannonEnt=0.0\r\n for key in labelCounts:\r\n prob=float(labelCounts[key])/numEntries\r\n shannonEnt-=prob*log(prob,2)\r\n return shannonEnt\r\n\r\ndef splitDataSet(dataSet,axis,value):\r\n retDataSet=[]\r\n for featVec in dataSet:\r\n if featVec[axis]==value:\r\n reducedFeatVec=featVec[:axis]\r\n reducedFeatVec.extend(featVec[axis+1:])\r\n retDataSet.append(reducedFeatVec)\r\n return retDataSet\r\n\r\ndef chooseBestFeatureToSplit(dataSet):\r\n numFeatures=len(dataSet[0])-1\r\n baseEntropy=calcShannonEnt(dataSet)\r\n bestInfoGain=0.0;bestFeature=-1\r\n for i in range(numFeatures):\r\n featList=[example[i] for example in dataSet]\r\n uniqueVals=set(featList)\r\n newEntropy=0.0\r\n for value in uniqueVals:\r\n subDataSet=splitDataSet(dataSet,i,value)\r\n prob=len(subDataSet)/float(len(dataSet))\r\n newEntropy+=prob*calcShannonEnt(subDataSet)\r\n infoGain=baseEntropy-newEntropy\r\n if (infoGain>bestInfoGain):\r\n bestInfoGain=infoGain\r\n bestFeature=i\r\n return bestFeature\r\n\r\ndef majorityCnt(classList):\r\n classCount={}\r\n for vote in classCount:\r\n if vote not in classCount.keys():classCount[vote]=0\r\n classCount+=1\r\n sortedClassCount=sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)\r\n return sortedClassCount[0][0]\r\n\r\ndef createTree(dataSet,labels):\r\n classList=[example[-1] for example in dataSet]\r\n if classList.count(classList[0])==len(classList):\r\n return classList[0]#如果数据都属于同一类,输出该类\r\n if len(dataSet[0])==1:\r\n return majorityCnt(classList)#只剩一个特征,输出最多类\r\n bestFeat=chooseBestFeatureToSplit(dataSet)\r\n bestFeatLabel=labels[bestFeat]\r\n myTree={bestFeatLabel:{}}\r\n del(labels[bestFeat])\r\n featValues=[example[bestFeat] for example in dataSet]\r\n uniqueVals=set(featValues)\r\n for value in uniqueVals:\r\n subLabels=labels[:]\r\n myTree[bestFeatLabel][value]=createTree(splitDataSet(dataSet,bestFeat,value),subLabels)\r\n return myTree\r\n\r\ndef classify(inputTree,featLabels,testVec):\r\n firstStr=inputTree.keys()[0]\r\n secondDict=inputTree[firstStr]\r\n featIndex=featLabels.index(firstStr)\r\n key = testVec[featIndex]\r\n valueOfFeat=secondDict[key]\r\n if isinstance(valueOfFeat,dict):\r\n classLabel=classify(valueOfFeat,featLabels,testVec)\r\n else: classLabel=valueOfFeat\r\n return classLabel\r\n\r\ndef storeTree(inputTree,filename):\r\n import pickle\r\n fw=open(filename,'w')\r\n pickle.dump(inputTree,fw)\r\n fw.close()\r\ndef grabTree(filename):\r\n import pickle\r\n fr=open(filename)\r\n return pickle.load(fr)\r\n\r\nif __name__=='__main__':\r\n data,labels=createDataSet()\r\n tree=createTree(data,labels)\r\n print(tree)\r\n #storeTree(tree,'tree.txt')\r\n myTree=grabTree('classifierStorage.txt')\r\n label=classify(myTree,labels,[1,1])\r\n print(label)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"self_decision_tree.py","file_name":"self_decision_tree.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"400206773","text":"# -*- coding: utf-8 -*-\n\nfrom os import listdir\nfrom os.path import isfile\nfrom subprocess import call\nfrom sys import argv\n\n\"\"\"Simple script para convertir los notebooks a html compatible con\nReveal.js\"\"\"\n\nsize = [\"1024\",\"768\"]\n\nif \"-w\" not in argv and \"-h\" not in argv:\n print(\"Generando presentación de %sx%s, al no haber recibido las flags -w -h\" \n % (size[0],size[1]))\nelse:\n size[0] = argv[argv.index(\"-w\")+1]\n size[1] = argv[argv.index(\"-h\")+1]\n print(\"Generando presentación de %sx%s...\" % (size[0],size[1]))\n\nnotebooks = [nb for nb in listdir(\".\") if isfile(nb) and nb.endswith(\".ipynb\")]\nfor notebook in notebooks:\n # Realmente Jupyter viene con el conversor\n call([\"ipython\", \"nbconvert\", notebook, \"--to\", \"slides\"])\n # Pero tiene un fallo: carga la biblioteca require.js,\n # que entra en conflicto con las otras libs del notebook.\n # Además, así podemos especificar ancho y alto de la\n # presentación.\n with open(notebook.replace(\".ipynb\",\".slides.html\"), \"r\") as entrada:\n lineas = entrada.readlines()\n salida = []\n for linea in range(len(lineas)):\n if \"require.js\" in lineas[linea]:\n pass\n else:\n if \"Reveal.initialize\" in lineas[linea]:\n salida.append(lineas[linea]+\"width:\" + size[0]\n + \",\\nheight:\" + size [1] \n + \",\\nmargin: 0.1,\\nminScale: 0.05,\\nmaxScale: 3.5,\\n\"\n )\n elif \"\" in lineas[linea]:\n salida.append('\\n'\n + lineas[linea]\n )\n elif '' in lineas[linea]:\n salida.append('\\n')\n else:\n salida.append(lineas[linea])\n\n # Específico para este Notebook\n for linea in range(len(salida)-2):\n if 'img src=\"images/cover.png\"' in salida[linea]:\n del salida[linea - 4]\n del salida[linea - 4]\n\n for linea in range(len(salida)-2):\n if 'img src=\"images/scikit_main.png\"' in salida[linea]:\n del salida[linea - 5]\n del salida[linea - 5]\n\n for linea in range(len(salida)-2):\n if 'img src=\"images/scikit_main.png\"' in salida[linea]:\n tmp = salida[linea - 1]\n del salida[linea-1]\n salida.insert(linea-1, tmp.replace(\"


    \", \"\"))\n\n for linea in range(len(salida)-2):\n if 'img src=\"images/ml_map.png\"' in salida[linea]:\n del salida[linea - 4]\n del salida[linea - 4]\n\n for linea in range(len(salida)-2):\n if 'img src=\"images/tree.png\"' in salida[linea]:\n del salida[linea - 4]\n del salida[linea - 4]\n\n for linea in range(len(salida)-2):\n if 'img src=\"images/fold_cv.png\"' in salida[linea]:\n del salida[linea - 4]\n del salida[linea - 4]\n\n for linea in range(len(salida)-2):\n if 'img src=\"images/pipeline_flow.png\"' in salida[linea]:\n del salida[linea - 5]\n del salida[linea - 5]\n\n for linea in range(len(salida)-2):\n if 'img src=\"images/questions.png\"' in salida[linea]:\n del salida[linea - 4]\n del salida[linea - 4]\n\n # Fin de lo específico\n if \"--make-index.html\" in argv:\n with open(\"index.html\",\"w\") as archivo:\n archivo.write(\"\".join(salida))\n else:\n with open(notebook.replace(\".ipynb\",\".slides.html\"),\"w\") as archivo:\n archivo.write(\"\".join(salida))\nprint(\"Conversión realizada.\")\n\n","sub_path":"conversor.py","file_name":"conversor.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"327041760","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/insights/parsers/net_namespace.py\n# Compiled at: 2019-05-16 13:41:33\n\"\"\"\nNetworkNamespace = ``/bin/ls /var/run/netns``\n=============================================\n\nThis specs provides list of network namespace created on the host machine.\n\nTypical output of this command is as below::\n\n temp_netns temp_netns_2 temp_netns_3\n\nThe ``/bin/ls /var/run/netns`` is prefered over ``/bin/ip netns list`` because it works on\nall RHEL versions, no matter ip package is installed or not.\n\nExamples:\n >>> type(netns_obj)\n \n >>> netns_obj.netns_list\n ['temp_netns', 'temp_netns_2', 'temp_netns_3']\n >>> len(netns_obj.netns_list)\n 3\n\"\"\"\nfrom insights import Parser, parser, get_active_lines\nfrom insights.parsers import SkipException\nfrom insights.specs import Specs\n\n@parser(Specs.namespace)\nclass NetworkNamespace(Parser):\n\n def parse_content(self, content):\n if not content:\n raise SkipException('Nothing to parse.')\n self._netns_list = []\n for line in get_active_lines(content):\n self._netns_list.extend(line.split())\n\n @property\n def netns_list(self):\n \"\"\"\n This method returns list of network namespace created\n in process memory.\n\n Returns:\n\n `list` of network namepaces if exists.\n \"\"\"\n return self._netns_list","sub_path":"pycfiles/insights_core-3.0.163-py2.7/net_namespace.py","file_name":"net_namespace.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401737904","text":"from CASIAProcessor import DataPreprocessor\r\nfrom CASIAProcessor import CNNModel, CNNModelDev\r\nfrom CASIAProcessor import ResNetLFWProcessor\r\nfrom CASIAProcessor import LFWProcessor\r\n\r\nfrom keras import backend as K\r\nimport time\r\n\r\n__author__ = 'Deliang Yang, Mengying Sun'\r\n# FILE_PATH = 'F:/cse802_data/casia_mtcnn_cropped2/'\r\n\r\nCLS_NUM = 225 # number of classes running every large step\r\n\r\n\r\ndef run():\r\n start_time = time.time()\r\n current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))\r\n print('Task begins. Time stamp: ' + current_time)\r\n\r\n function_flag = 5\r\n\r\n if function_flag == 0:\r\n \r\n for i in range(0, 47):\r\n epoch_start_time = time.time()\r\n start_index = CLS_NUM * i\r\n end_index = CLS_NUM * i + CLS_NUM\r\n print('Training range:', start_index, end_index)\r\n dp0 = DataPreprocessor(start_index, end_index)\r\n dp0.run()\r\n cnn_mdl0 = CNNModel(dp0.X_train, dp0.Y_train, 'v3')\r\n cnn_mdl0.run()\r\n if i in [8, 16, 24, 32, 40]:\r\n K.clear_session()\r\n del dp0\r\n del cnn_mdl0\r\n\r\n end_time = time.time()\r\n print('20 epochs, session time: ' + '%.3f' % (end_time - epoch_start_time) + ' s')\r\n print('---')\r\n\r\n elif function_flag == 1:\r\n dp0 = DataPreprocessor(0, 3)\r\n dp0.run()\r\n cnn_mdl0 = CNNModel(dp0.X_train, dp0.Y_train, 'v3')\r\n cnn_mdl0.inter_output()\r\n\r\n elif function_flag == 2:\r\n # convert LFW database to feature matrix CSV file\r\n lfwp0 = LFWProcessor()\r\n lfwp0.run()\r\n\r\n elif function_flag == 3:\r\n # generating the development data set\r\n dp0 = DataPreprocessor(0, 100)\r\n dp0.get_file_list()\r\n dp0.load_pic_dev_pkl()\r\n\r\n elif function_flag == 4:\r\n cnn_mdl_dev0 = CNNModelDev()\r\n cnn_mdl_dev0.dev_fitting()\r\n\r\n elif function_flag == 5:\r\n cnn_mdl0 = CNNModel(None, None, 'v10')\r\n for j in range(5):\r\n epoch_start_time = time.time()\r\n\r\n cnn_mdl0.flow_from_dir_fitting(str(j))\r\n\r\n # K.clear_session()\r\n\r\n end_time = time.time()\r\n\r\n print('10 epochs done, ' + '%.3f' % (end_time - epoch_start_time) + ' s. ', 'Total time lapse:', '%.3f' % (end_time - start_time))\r\n print('---')\r\n\r\n elif function_flag == 7:\r\n resnet0 = ResNetLFWProcessor()\r\n resnet0.csv_generator()\r\n\r\n\r\n end_time = time.time()\r\n current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))\r\n\r\n print(current_time)\r\n print('Total execution time: ' + '%.3f' % (end_time - start_time) + ' s')\r\n\r\n\r\nif __name__ == '__main__':\r\n run()\r\n","sub_path":"src/hpcc/cnn_main.py","file_name":"cnn_main.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"324512443","text":"from quip.db.row import Row\n\nclass DeferredRow(Row):\n def __init__(self, table, id):\n row_data = dict((col, None) for col in table.columns)\n row_data[table.id_column] = id\n super(DeferredRow, self).__init__(table, row_data)\n self.loaded = False\n\n def __getitem__(self, key):\n if key in self._user_data:\n return self._user_data[key]\n\n if not self.loaded and key != self.table.id_column:\n row = self.table.get(self[self.table.id_column])\n self._db_data = row.export()\n self.loaded = True\n return self._db_data[key]\n","sub_path":"deferred_row.py","file_name":"deferred_row.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10025058","text":"from database import Database\r\nimport secrets\r\nimport random\r\n\r\nclass Summary():\r\n category = None\r\n summary = None\r\n answer = None\r\n\r\n def __init__(self):\r\n self.set_category()\r\n self.set_summary()\r\n\r\n\r\n def set_category(self):\r\n # List of categories created from the method names in the database file..\r\n databaselist = [category for category in dir(Database) if not category.startswith(\"__\")]\r\n categories_menu = {}\r\n\r\n # Get user catagory choice\r\n while True:\r\n print(\"Choose category:\\n\")\r\n # enum through categories in database, build category menu and print category options\r\n for i, category in enumerate(databaselist):\r\n categories_menu[i+1] = category\r\n print(f\"{i+1}: {category.upper()}\")\r\n category_num = input(\"\\n> \")\r\n\r\n if category_num.isdigit() and 0 < int(category_num) < max(categories_menu.keys())+1:\r\n break\r\n else:\r\n print(\"\\nIncorrect input\\n\")\r\n\r\n category_num = int(category_num)\r\n self.category = categories_menu[category_num]\r\n\r\n def set_summary(self): \r\n # getattr essentially calls the menthod using the chosen category(they have the same name)\r\n category = getattr(Database, self.get_category())\r\n category_dict = category()\r\n\r\n # get random summary from category... returns tuple.. (\"naruto\",\"initially set in konoha village what what\")\r\n category_list = list(category_dict.items())\r\n random.shuffle(category_list)\r\n random_answer_summary_pair = secrets.choice(category_list)\r\n\r\n self.answer = random_answer_summary_pair[0]\r\n self.summary = random_answer_summary_pair[1]\r\n \r\n def get_category(self):\r\n return self.category\r\n\r\n def get_summary(self):\r\n return self.summary\r\n\r\n def get_answer(self):\r\n return self.answer\r\n","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"537422176","text":"# -*- encoding: utf-8 -*-\nfrom django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n url(r'^empleado/nuevo/$', views.EmpleadoCreateView.as_view(), name='create'),\n #url(r'^empleado/(?P[-\\w\\W\\d]+)/modificar/$', views.EmpleadoUpdateView.as_view(), name='update'),\n url(r'^empleado/(?P[-\\w\\W\\d]+)/modificar/$', views.EmpleadoUpdate.as_view(), name='update'),\n url(r'^empleado/(?P[-\\w\\W\\d]+)/$', views.EmpleadoDetailView.as_view(), name='detail'),\n url(r'^empleados/$', views.EmpleadoControlListView.as_view(), name='control'),\n url(r'^empleados/(?P[-\\w\\W\\d]+)/$', views.EmpleadoControlListView.as_view(), name='control'),\n]\n","sub_path":"empleados/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"299309557","text":"import glfw\r\nfrom OpenGL.GL import *\r\n\r\ndef render():\r\n # Clear color buffer\r\n glClear(GL_COLOR_BUFFER_BIT)\r\n glLoadIdentity()\r\n glBegin(GL_TRIANGLES)\r\n glVertex2f(0.0, 0.0)\r\n glVertex2f(-1.0, -1.0)\r\n glVertex2f(1.0, -1.0)\r\n glEnd()\r\n pass\r\n\r\ndef main():\r\n if not glfw.init():\r\n return\r\n\r\n window = glfw.create_window(640, 480, \"Hello World\", None, None)\r\n\r\n if not window:\r\n glfw.terminate()\r\n return\r\n\r\n glfw.make_context_current(window)\r\n\r\n while not glfw.window_should_close(window):\r\n glfw.poll_events()\r\n\r\n render()\r\n glfw.swap_buffers(window)\r\n\r\n glfw.terminate()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"first_opengl_program.py","file_name":"first_opengl_program.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"275840449","text":"# Copyright 2019 NEC Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\n[概要]\n 運用基盤連携処理\n\n[引数]\n \n\n[戻り値]\n \n\"\"\"\n\nimport json\nimport traceback\nimport requests\nimport urllib3\nimport ssl\nimport pika\nimport multiprocessing\nfrom urllib3.exceptions import InsecureRequestWarning\n\nfrom django.conf import settings\nfrom django.urls import reverse\n\nfrom libs.commonlibs.oase_logger import OaseLogger\nfrom libs.backyardlibs.monitoring_adapter.oase_monitoring_adapter_common_libs import _produce\nfrom libs.backyardlibs.monitoring_adapter.oase_monitoring_adapter_common_libs import _rabbitMQ_conf\nfrom libs.webcommonlibs.events_request import EventsRequestCommon\n\nurllib3.disable_warnings(InsecureRequestWarning)\nssl._create_default_https_context = ssl._create_unverified_context\nlogger = OaseLogger.get_instance()\n\n# 設定情報読み込み\n_mq_settings = None\n\n# RabbitMQ接続\n_channel = None\n_connection = None\n_properties = None\n\nmq_lock = multiprocessing.Lock()\n\n\ndef send_request(request_data_dic):\n \"\"\"\n [メソッド概要]\n RabbitMQに整形済データをリクエストに投げる\n \"\"\"\n\n logger.logic_log('LOSI00001', 'request_data_dic: %s' % len(request_data_dic))\n\n result = True\n msg = ''\n trace_id_list = []\n data_count = 0\n\n try:\n\n data_count = len(request_data_dic['request'])\n # リクエストデータの有無確認\n if data_count <= 0:\n result = False\n logger.system_log('LOSM38004')\n raise\n\n trace_id_list = EventsRequestCommon.generate_trace_id(req=data_count)\n if len(trace_id_list) != data_count:\n result = False\n logger.system_log('LOSM38022')\n raise\n\n for i, data in enumerate(request_data_dic['request']):\n\n data['traceid'] = trace_id_list[i]\n data = json.dumps(data)\n\n _rabbitMQ_conf()\n\n # RabbitMQへ送信\n mq_lock.acquire()\n _produce(data)\n mq_lock.release()\n\n except Exception as e:\n if result:\n result = False\n logger.system_log('LOSM38006', traceback.format_exc())\n\n logger.logic_log('LOSI00002', 'result: %s' % (result))\n\n return result\n","sub_path":"oase-root/libs/backyardlibs/monitoring_adapter/Datadog/Datadog_request.py","file_name":"Datadog_request.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"1023449","text":"import re\r\n\r\n# digit at the beginning of the string and a digit at the end of the string\r\npattern_one = r'^\\d+.+\\d$'\r\n\r\n# A string that contains only whitespace characters or word characters\r\npattern_two = r'[\\s\\w]+'\r\n\r\n# A string containing no whitespace characters\r\npattern_three = r'[\\S]+'\r\n","sub_path":"python_l2_handson/exercise_four.py","file_name":"exercise_four.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"209073436","text":"def Dots(string):\r\n list_string = string.split('...')\r\n list_elemt = []\r\n for item in list_string:\r\n num_ini = item[0]\r\n try:\r\n if isinstance(int(num_ini), int):\r\n num_ini = int(num_ini)\r\n except:\r\n num_ini = 0\r\n\r\n num_fin = item[-1]\r\n try:\r\n if not isinstance(int(num_fin), int):\r\n num_fin = int(num_fin)\r\n except:\r\n num_fin = 0\r\n list_elemt.append([num_ini, num_fin])\r\n\r\n for index, item in enumerate(list_elemt):\r\n pass\r\n # ...\r\n\r\n\r\nDots('arrb6...4xxbl5...eee5')","sub_path":"desafio2.py","file_name":"desafio2.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"326854721","text":"from blocks.model import Model\nfrom blocks.serialization import load_parameter_values\n\nfrom rnn.visualize.visualize_gates import (\n visualize_gates_soft, visualize_gates_lstm)\nfrom rnn.visualize.visualize_states import visualize_states\nfrom rnn.visualize.visualize_gradients import visualize_gradients\n# from rnn.visualize.visualize_gradients import visualize_jacobian\nfrom rnn.visualize.visualize_presoft import visualize_presoft\nfrom rnn.visualize.visualize_matrices import visualize_matrices\nfrom rnn.visualize.visualize_singular_values import visualize_singular_values\nfrom rnn.visualize.visualize_gradients_flow_pie import visualize_gradients_flow_pie\nfrom rnn.visualize.visualize_generate import visualize_generate\n\n\ndef run_visualizations(cost, updates,\n train_stream, valid_stream,\n args,\n hidden_states=None, gate_values=None):\n\n # Load the parameters from a dumped model\n assert args.load_path is not None\n model = Model(cost)\n model.set_parameter_values(load_parameter_values(args.load_path))\n\n # Run a visualization\n if args.visualize == \"generate\":\n visualize_generate(cost,\n hidden_states, updates,\n train_stream, valid_stream,\n args)\n\n elif args.visualize == \"gates\" and (gate_values is not None):\n if args.rnn_type == \"lstm\":\n visualize_gates_lstm(gate_values, hidden_states, updates,\n train_stream, valid_stream,\n args)\n elif args.rnn_type == \"soft\":\n visualize_gates_soft(gate_values, hidden_states, updates,\n train_stream, valid_stream,\n args)\n else:\n assert False\n\n elif args.visualize == \"states\":\n visualize_states(hidden_states, updates,\n train_stream, valid_stream,\n args)\n\n elif args.visualize == \"gradients\":\n visualize_gradients(hidden_states, updates,\n train_stream, valid_stream,\n args)\n\n elif args.visualize == \"jacobian\":\n visualize_jacobian(hidden_states, updates,\n train_stream, valid_stream,\n args)\n\n elif args.visualize == \"presoft\":\n visualize_presoft(cost,\n hidden_states, updates,\n train_stream, valid_stream,\n args)\n\n elif args.visualize == \"matrices\":\n visualize_matrices(args)\n\n elif args.visualize == \"trained_singular_values\":\n visualize_singular_values(args)\n\n elif args.visualize == \"gradients_flow_pie\":\n visualize_gradients_flow_pie(hidden_states, updates,\n args)\n\n else:\n assert False\n","sub_path":"rnn/visualize/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"54663514","text":"import cv2\nfrom VideoDevice import VideoDevice\nfrom TargetDetector import TargetDetector\nfrom TargetProcessor import TargetProcessor\n\ncv2.namedWindow(\"Camera Feed\", cv2.WINDOW_AUTOSIZE)\nvideo = VideoDevice()\nwhile True:\n #get image from camera\n frame = video.feed()\n #initialize targetDetector and targetProcessor\n targetDetector = TargetDetector()\n targetProcessor = TargetProcessor()\n\n #threshold\n targetDetector.putImage(frame)\n threshed = targetDetector.threshold()\n\n #contour\n targetProcessor.putThreshed(threshed,frame)\n contour = targetProcessor.contour()\n\n cv2.drawContours(frame, contour, -1, (10,255,255), 5)\n #cv2.imshow(\"Camera Feed\", frame)\n targetProcessor.calculateData()\n key = cv2.waitKey(10)\n if key == 27:\n cv2.destroyWindow(\"Camera Feed\")\n break\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368420460","text":"\"\"\"\n find the largest file of a given type in an arbitrary directory tree\n\"\"\"\n\nimport sys, os, pprint\n\ntrace = 0 # debug flag 0=off, 1=dirs, 2=+files\ndirname, extname = os.curdir, '.py'\nif len(sys.argv) > 1: dirname = sys.argv[1]\nif len(sys.argv) > 2: extname = sys.argv[2]\nif len(sys.argv) > 3: trace = int(sys.argv[3])\n\n\ndef tryprint(arg):\n try:\n print(arg)\n except UnicodeEncodeError:\n print(arg.encode())\n\n\nvisited = {}\nallsizes = []\nfor (path, dir, files) in os.walk(dirname):\n if trace: tryprint(path)\n path = os.path.normpath(path)\n fixcase = os.path.normcase(path)\n if fixcase in visited:\n if trace: tryprint('skipping ' + path)\n else:\n visited[fixcase] = True\n for filename in files:\n if filename.endswith(extname):\n if trace > 1: tryprint('+++' + filename)\n fullname = os.path.join(path, filename)\n try:\n bytesize = os.path.getsize(fullname)\n linesize = len(open(fullname, 'rb').readlines())\n except Exception:\n print('error', sys.exc_info()[0])\n else:\n allsizes.append((bytesize, linesize, fullname))\nfor (title, key) in [('byte', 0), ('lines', 1)]:\n print('Sort by {}'.format(title))\n allsizes.sort(key=lambda x: x[key])\n pprint.pprint(allsizes[:3])\n pprint.pprint(allsizes[-3:])\n","sub_path":"System/FileTools/bigext-tree.py","file_name":"bigext-tree.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"538089200","text":"def steps(line):\n for segment in line.split(\",\"):\n if segment[0] == 'U':\n delta = (0, -1)\n elif segment[0] == 'D':\n delta = (0, 1)\n elif segment[0] == 'L':\n delta = (-1, 0)\n elif segment[0] == 'R':\n delta = (1, 0)\n\n yield from [delta] * int(segment[1:])\n\n\ndef wire(line):\n x, y = 0, 0\n for dx, dy in steps(line):\n yield (x, y)\n x += dx\n y += dy\n yield (x, y)\n\n\ndef manhattan(pos):\n return abs(pos[0]) + abs(pos[1])\n\n\nwith open(\"input.txt\") as f:\n wires = [list(wire(line)) for line in f]\n\ncollisions = set(wires[0][1:]) & set(wires[1][1:])\n\nprint(\"Part one: %d\" % min(manhattan(pos) for pos in collisions))\nprint(\"Part two: %d\" % min(wires[0].index(pos) + wires[1].index(pos) for pos in collisions))\n","sub_path":"3/frerich_main.py","file_name":"frerich_main.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406334439","text":"import classes\nimport config\nimport utils\n\n\ndef main():\n\n # Create object with players\n players = utils.get_players(config.N_PLAYERS)\n\n # Create board with properties\n board = utils.get_board(config.BOARD_FILENAME)\n\n # Continue playing as long as more than one player remains in game\n while len(players) > 1:\n\n # Take turns\n for turn in range(config.N_PLAYERS):\n\n # Define current player\n curr_player = players[turn]\n\n # Double roll counter\n n_double_roll = 0\n\n # Continue turn until player rolls no doubles or goes to jail\n while True:\n\n # Roll dice\n roll, rolled_double = utils.roll_dice()\n\n # Update double roll counter\n n_double_roll += int(rolled_double)\n\n # If player is in jail\n if players[turn].jail_turns > 0:\n\n # Select jail strategy\n curr_player.choose_jail_strategy(rolled_double)\n\n # If player is still in jail\n if curr_player.jail_turns > 0:\n break\n\n # If player rolled less than 3 doubles\n if n_double_roll < 3:\n\n # Move player\n curr_player.move(roll)\n\n # Define current board space\n curr_space = board[curr_player.position]\n\n for case in classes.Switch(type(curr_space).__name__):\n if case('Street'):\n curr_player.evaluate_buy(curr_space, players)\n\n # If no double rolled, end turn\n if not rolled_double:\n break\n\n # Otherwise, send player to jail and end turn\n elif n_double_roll == 3:\n\n curr_player.go_to_jail()\n break\n\n # Now here is where we start interacting with the board\n # type(board[4]).__name__\n","sub_path":"src/monopoly.py","file_name":"monopoly.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"220538709","text":"import numpy as np\nimport random\nimport math\n\nnp.random.seed(42)\n\nclass mlp:\n\n def __init__(self, inputs, targets, nhidden, eta, linear=False):\n\n self.eta = eta #learning rate\n\n self.linear = linear #activation on the output\n\n self.nhidden = nhidden #number of nodes in hidden layer\n self.n_in = inputs.shape[1] #Number of nodes in input layer\n self.n_out = targets.shape[1] #NUmber of nodes in output layer\n\n # First weight layer\n self.v = np.random.uniform(-0.7,0.7,((self.n_in, self.nhidden)))\n # Second weight layer\n self.w = np.random.uniform(-0.7,0.7,((self.nhidden, self.n_out)))\n #Hidden layer activation levels\n self.a = np.zeros(self.nhidden)\n\n #Hidden bias\n self.hidden_bias = np.zeros(self.nhidden) + 0.01\n\n #Output bias\n self.output_bias = np.zeros(self.n_out) + 0.01\n\n # Sigmoid activation function\n def sigmoid(self, h):\n\n y = 1 / (1 + np.exp(-h))\n\n return y\n\n # Derivative of the sigmoid function\n def delta_sigmoid(self, h):\n\n y = self.sigmoid(h) * (1 - self.sigmoid(h))\n\n return y\n\n # Error at the output\n def delta_output(self, out_pred, out_real):\n\n delta = (out_pred - out_real)\n\n if (not self.linear):\n delta = delta*self.delta_sigmoid(out_real)\n\n return delta\n\n # Error in the hidden layer\n def delta_hidden(self, output_error):\n\n delta = self.a*(1 - self.a) * (output_error @ self.w.T)\n return delta\n\n # See report for details\n def earlystopping(self, inputs, targets, valid, validtargets):\n\n n_epochs = 10\n prev_error = 1\n\n for i in range(n_epochs):\n\n self.train(inputs, targets)\n score = self.score(valid, validtargets)\n error = 1 - score\n\n error_change = (error - prev_error)\n\n if (i != 0 and error_change > 0):\n print('-- Earlystopping after %i epochs --' % (i+1))\n break\n else:\n prev_error = error\n\n #Training with random batches of size 100\n #Batches were not used for the regression problem\n def train(self, inputs, targets, iterations=10):\n\n n_train = inputs.shape[0]\n k = 100\n\n for i in range(iterations):\n\n #Shuffle data\n p = np.random.permutation(n_train)\n inputs, targets = inputs[p], targets[p]\n\n inp = inputs[0:k]\n tar = targets[0:k]\n\n n_batch = inp.shape[0]\n\n for j in range(n_batch):\n\n pred = self.forward(inp[j])\n true = tar[j]\n\n output_error = self.delta_output(pred, true)\n hidden_error = self.delta_hidden(output_error)\n\n output_bias_gradient = np.sum(output_error, axis=0)\n hidden_bias_gradient = np.sum(hidden_error, axis=0)\n\n updatew = np.ones(self.w.shape)\n updatew = (updatew * output_error).T * self.a * self.eta\n self.w -= updatew.T\n\n updatev = np.ones(self.v.shape)\n updatev = (updatev * hidden_error).T * inp[j,:] * self.eta\n self.v -= updatev.T\n\n self.output_bias -= self.eta * output_bias_gradient\n self.hidden_bias -= self.eta * hidden_bias_gradient\n\n #Feed the network forward\n def forward(self, inputs):\n\n # Calculate hidden layer\n self.a = self.sigmoid((self.v.T @ inputs) + self.hidden_bias)\n\n # Calculate output\n out = ((self.w.T @ self.a) + self.output_bias)\n\n if (not self.linear):\n out = self.sigmoid(out)\n\n return out\n\n #Optional confusion matrix\n def confusion(self, inputs, targets):\n\n n_data = inputs.shape[0]\n\n matrix = np.zeros((self.n_out, self.n_out))\n\n error = 0\n\n for i in range(n_data):\n\n pred = self.forward(inputs[i])\n true = targets[i]\n\n pred_class = np.argmax(pred)\n true_class = np.argmax(true)\n\n if (pred_class != true_class):\n error += 1\n\n matrix[pred_class][true_class] += 1\n\n prediction_rate = (n_data - error)/n_data\n\n print('Prediction rate: %f' %(prediction_rate))\n print('Confusion Matrix:')\n print('Rows - Predicted values\\nColumns - True values')\n\n print(matrix)\n\n return prediction_rate\n\n #Accuracy score\n def score(self, inputs, targets):\n\n n = inputs.shape[0]\n\n score = 0.0\n\n for i in range(n):\n\n pred = self.forward(inputs[i])\n true = targets[i]\n\n pred_class = np.argmax(pred)\n true_class = np.argmax(true)\n\n if (pred_class == true_class):\n score += 1\n\n return score/n\n","sub_path":"Project 2/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"189293812","text":"#! /usr/bin/env python\nfrom __future__ import print_function\nfrom analyze_assembly import _load_coords\nimport argparse\nfrom collections import defaultdict\nimport screed\n\ndef main():\n p = argparse.ArgumentParser()\n p.add_argument('--min-ident', type=float, default=99.0)\n p.add_argument('--min-length', type=int, default=500)\n p.add_argument('coords_file')\n p.add_argument('contigs_files', nargs='*')\n a = p.parse_args()\n\n min_ident = a.min_ident\n min_length = a.min_length\n\n include_only = set()\n if a.contigs_files:\n for filename in a.contigs_files:\n for record in screed.open(filename):\n include_only.add(record.name.split()[0])\n\n matches = defaultdict(int)\n\n for s1, e1, s2, e2, ident, name1, name2 in _load_coords(a.coords_file):\n length = (e1 - s1 + 1)\n if length >= min_length and ident >= min_ident:\n if not include_only or name1 in include_only:\n matches[name1] += length\n\n matches_list = list(matches.items())\n matches_list.sort(key=lambda x: -x[1])\n\n sum_total = 0.\n for k, v in matches_list:\n print('{},{}'.format(k, v))\n sum_total += v\n\n print('TOTAL,{}'.format(sum_total))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pipeline/summarize-coords.py","file_name":"summarize-coords.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"580777357","text":"import glob\nfrom os import system\nimport re\n\n\ndef sh(script):\n system(\"bash -c '%s'\" % script)\n\n\ndataNames = \"FASTA/data.names\"\nlistOfClusters = []\nlistOfClasses = []\ncluster_seqs_stats_path = \"RESULTS/*.cluster.all\"\ncluster_seqs_stats_files = glob.glob(cluster_seqs_stats_path)\n\nblackList = []\nnumberOfClusters = 0\nfor singleFile in sorted(cluster_seqs_stats_files):\n numberOfClusters += 1\n with open(singleFile, \"r\") as f:\n for line in f.readlines():\n uniqueId = line.split()[6]\n clustNum = line.split()[1]\n rnaClass, sep, tail = uniqueId.partition(\"_\")\n listOfClasses.append(rnaClass)\n listOfClusters.append(clustNum)\n with open(dataNames, \"r\") as names:\n for line in names.readlines():\n fullUniqeId = line.split()[3]\n rnaClass, sep, tail = fullUniqeId.partition(\"_\")\n short_unique = re.findall(\"_\".join([\"[^_]+\"] * 2), fullUniqeId)[0]\n if short_unique == uniqueId:\n blackList.append(uniqueId)\n\nnumberOfClusters += 1 # 1 cluster for all unassigned seqs\nwith open(dataNames, \"r\") as names:\n for line in names.readlines():\n fullUniqeId = line.split()[3]\n rnaClass, sep, tail = fullUniqeId.partition(\"_\")\n short_unique = re.findall(\"_\".join([\"[^_]+\"] * 2), fullUniqeId)[0]\n rnaClass, sep, tail = fullUniqeId.partition(\"_\")\n if short_unique not in blackList:\n listOfClasses.append(rnaClass)\n listOfClusters.append(str(numberOfClusters))\n numberOfClusters += 1 # separate cluster for all unassigned seqs\n\ntoWrite = \"\"\nfor i in range(len(listOfClusters)):\n toWrite += listOfClasses[i] + \"\\t\" + listOfClusters[i] + '\\n'\nwith open(\"RESULTS/fullTab.tabular\", \"w\") as full:\n full.write(toWrite)\n","sub_path":"tools/GraphClust/CollectResults/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"389495614","text":"#!/usr/bin/python\n#\n# Copyright 2014 Quip\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Backs up a Quip account to a local folder.\n\nThis is a sample app for the Quip API - https://quip.com/api/.\n\"\"\"\n\nimport argparse\nimport datetime\nimport logging\nimport os.path\nimport re\nimport shutil\nimport sys\nimport urllib3\nimport xml.etree.cElementTree\nimport xml.sax.saxutils\nimport json\n\nimport quip\n\n_BASE_DIRECTORY = os.path.dirname(os.path.abspath(__file__))\n_STATIC_DIRECTORY = os.path.abspath(os.path.join(_BASE_DIRECTORY, 'static'))\n_TEMPLATE_DIRECTORY = os.path.abspath(\n os.path.join(_BASE_DIRECTORY, 'templates'))\n_OUTPUT_STATIC_DIRECTORY_NAME = '_static'\n_MAXIMUM_TITLE_LENGTH = 64\n\nclass HeadersDict(dict):\n \"\"\"Half-assed substitution for the `httplib.HTTPMessage` (I think?) that\n the object returned by `urllib2.urlopen().info()`. The important part is\n it's dict-like and treats keys as case-insensitive during reads.\n\n `httplib.HTTPMessage` has no API documentation and says \"It is not directly\n instantiated by the users.\" [1], so us users now got this. It's used for\n the `Blob#headers` property and returned from `Blob#info()`.\n\n [1]: https://docs.python.org/2/library/httplib.html#httplib.HTTPMessage\n \"\"\"\n \n def __getitem__(self, key):\n return super(HeadersDict, self).__getitem__(key.lower())\n\n def get(self, key, *args, **kwds):\n return super(HeadersDict, self).get(key.lower(), *args, **kwds)\n\nclass Blob(object):\n \"\"\"\n What `BaqupClient#get_blob()` returns instead of the \"file-like\" \n whatever [1] that `quip.QuipClient#get_blob()` does.\n \n This change was made to facilitate caching downloaded blobs, because it\n was painfully unclear how to instantiate the classes `urllib2` uses for\n ourselves, even after reading the source.\n \n It makes some attempt to mimic the API of the \"file-like\" object from \n `urllib2.urlopen`, and seems to be sufficient enough for this script.\n \"\"\"\n \n def __init__(self, id, contents, headers, url, code):\n self.id = id\n self.contents = contents\n self.headers = HeadersDict(headers)\n self.url = url\n self.code = code\n\n def read(self):\n return self.contents\n\n def geturl(self):\n return self.url\n\n def info(self):\n return self.headers\n \n def getcode(self):\n return self.code \n\nclass BaqupClient(quip.QuipClient):\n \"\"\"Subclass of `quip.QuipClient` that implements read caching for a poor- \n person's resume capability: all client GET requests are cached in the\n `BaqupClient#cache_dir` directory and read from there if present instead of\n hitting the API, allowing the script to tear through the threads it's\n already backed up pretty quickly on subsequent runs. This also avoids\n incurring the associated rate limit costs.\n\n Combined with the rate limiting work in `quip.py`, this allowed us to get\n through backing up ~2.6K entities.\n \n To clear the cache, just delete the cache directory.\n \"\"\"\n \n def __init__(self, cache_dir, *args, **kwds):\n \"\"\"Construct a client.\n \n Parameters are the same as `quip.QuipClient`, with the addition of\n `cache_dir`, which is the directory to write cache files to. It will be\n created if it doesn't exist.\n \"\"\"\n self.cache_dir = _normalize_path(cache_dir)\n super(BaqupClient, self).__init__(*args, **kwds)\n \n def get_blob(self, thread_id, blob_id):\n \"\"\"Returns a `Blob` object with the contents of the given `blob_id`.\n\n It seems that `thread_id` is required to form the API URL, but it\n *appears* that blob IDs are unique (though the same blob may appear\n in more than one thread), and this class takes advantage of this \n assumption.\n \"\"\"\n if self._cache_has_blob(blob_id):\n return self._cache_get_blob(blob_id)\n \n response = super(BaqupClient, self).get_blob(thread_id, blob_id)\n \n blob = Blob(\n id = blob_id,\n contents = response.read(),\n headers = response.info().dict,\n url = response.geturl(),\n code = response.getcode(),\n )\n \n self._cache_put_blob(blob)\n \n return blob\n \n def _fetch_file_with_caching(self, path, fmt, **args):\n url = self._url(path, **args)\n cache_key = re.sub(r'https?\\:\\/\\/', '', url)\n\n if self._cache_has(key=cache_key, format=fmt):\n return self._cache_get(key=cache_key, format=fmt)\n \n contents = super(BaqupClient, self)._fetch_file(path, **args)\n\n self._cache_put(key=cache_key, format=fmt, value=contents)\n return contents\n\n def _fetch_json(self, path, post_data=None, **args):\n \"\"\"When `post_data` is `None`, we assume this is a read request and\n proxy to `#_fetch_json_with_caching()`. Otherwise, call goes \n strait through to `quip.QuipClient#_fetch_json()`.\n \"\"\"\n if post_data is None:\n return self._fetch_json_with_caching(path, **args)\n else:\n return super(BaqupClient, self)._fetch_json(path, \n post_data=post_data, **args)\n \n def _fetch_json_with_caching(self, path, **args):\n \"\"\"Pretty much what it sounds like... if it's in the cache, it's read\n from there. If it's not, the request is made and the results are put \n into the cache.\n \"\"\"\n url = self._url(path, **args)\n cache_key = re.sub(r'^https?\\:\\/\\/', '', url)\n \n if self._cache_has(key=cache_key, format='json'):\n return self._cache_get(key=cache_key, format='json')\n \n value = super(BaqupClient, self)._fetch_json(path, post_data=None,\n **args)\n \n self._cache_put(key=cache_key, format='json', value=value)\n \n return value\n \n def _cache_filepath(self, key, format):\n \"\"\"Get the fully-formed file path a cache entry with `key` and \n `format` will be saved at.\n \n Returns a string.\n \"\"\"\n return os.path.join(self.cache_dir,\n \"{key}.{format}\".format(key=key, format=format))\n \n def _cache_has(self, key, format):\n \"\"\"Is this key/format pair in the cache?\n \n Returns a boolean.\n \"\"\"\n return os.path.isfile(self._cache_filepath(key=key, format=format))\n \n def _cache_get(self, key, format):\n \"\"\"Get a value out of the cache.\n\n This will raise if it's not there, so use `#_cache_has()` to check first\n or handle the exception.\n\n If `format` is 'json', returns the parse of the cache file contents.\n\n If `format` is 'bin', the cache file is opened in binary mode and you\n get back whatever Python 2 uses for read binary data. I think it's a\n string from what I remember.\n\n Otherwise, returns the result of a regular file read.\n \"\"\"\n filepath = self._cache_filepath(key=key, format=format)\n \n logging.debug(\"CACHE GET key=%s format=%s from %s\",\n key, format, filepath)\n \n mode = 'r'\n if format == 'bin'or format == 'docx' \\\n or format == 'xlsx' or format == 'pdf':\n mode = 'rb'\n \n with open(filepath, mode=mode) as fp:\n contents = fp.read()\n \n if format == 'json':\n return json.loads(contents)\n else:\n return contents\n \n def _cache_put(self, key, format, value):\n \"\"\"Put a value in the cache at a `key` and `format`.\n \n `key` is used as the relative path from `#cache_dir`, and `format`\n is used as the file extension.\n \n If format is 'json', `value` will be JSON encoded for writing.\n \n If format is 'bin', the file will be written in binary mode.\n \n Doesn't return.\n \"\"\"\n filepath = self._cache_filepath(key=key, format=format)\n \n logging.debug(\"CACHE SET key=%s format=%s to %s\",\n key, format, filepath)\n \n _ensure_path_exists(os.path.dirname(filepath))\n \n mode = 'w'\n if format == 'bin'or format == 'docx' \\\n or format == 'xlsx' or format == 'pdf':\n mode = 'wb'\n \n if format == 'json':\n value = json.dumps(value, indent=2, sort_keys=True)\n \n with open(filepath, mode=mode) as fp:\n fp.write(value)\n \n def _cache_blob_keys(self, blob_id):\n \"\"\"Returns a pair of strings as a `tuple`. The first element is the\n cache key for blob metadata; the second is for the contents.\n \n You may notice that only the blob's ID is used to form the keys;\n the thread ID is omitted.\n \n Though blobs are retrieved through threads, blob IDs alone appear to \n uniquely identify them: from what I've seen, if you have a blob ID `B`\n and thread with IDs `X` and `Y` reference the blob via paths\n `/blob/X/B` and `/blob/Y/B`, that means the same blob is referenced in\n both `A` and `B`.\n \n Using that assumption, we cache blobs by **only their blob ID**, \n avoiding retreiving and storing the same blob multiple times.\n \"\"\"\n \n base_key = (re.sub(r'^https?\\:\\/\\/', '', self.base_url) + \n \"/1/blob/\" + blob_id)\n return (base_key + \".metadata\", base_key + \".contents\")\n \n def _cache_has_blob(self, blob_id):\n \"\"\"Helper method to see if the cache has a blob, by checking for \n *both* it's metadata and contents entries.\n \n Return as boolean.\n \"\"\"\n metadata_key, contents_key = self._cache_blob_keys(blob_id)\n \n return (self._cache_has(key=metadata_key, format='json')\n and self._cache_has(key=contents_key, format='bin'))\n \n def _cache_get_blob(self, blob_id):\n \"\"\"Helper method to get both a blob's metadata and contents entries and\n assemble them into a `Blob` instance.\n \n Like `#_cache_get()`, the entries need to be there, or it'll raise.\n \n Returns a `Blob`.\n \"\"\"\n metadata_key, contents_key = self._cache_blob_keys(blob_id)\n\n metadata = self._cache_get(key=metadata_key, format='json')\n contents = self._cache_get(key=contents_key, format='bin')\n\n return Blob(\n id = blob_id,\n contents = contents,\n headers = metadata['headers'],\n url = metadata['url'],\n code = metadata['code'],\n )\n \n def _cache_put_blob(self, blob):\n \"\"\"Helper method to put a `Blob` into the cache, writing both its\n metadata and contents entries.\n \n Doesn't return.\n \"\"\"\n metadata_key, contents_key = self._cache_blob_keys(blob.id)\n \n self._cache_put(\n key = metadata_key,\n format = 'json',\n value = dict(\n id = blob.id,\n headers = blob.headers,\n url = blob.url,\n code = blob.code,\n )\n )\n \n self._cache_put(key=contents_key, format='bin', value=blob.contents)\n\ndef _backup_thread_as_docx(thread, client, output_directory, depth):\n thread_id = thread[\"thread\"][\"id\"]\n title = thread[\"thread\"][\"title\"]\n logging.info(\"%sBacking up thread %s (%s)...\",\n \" \" * depth, title, thread_id)\n sanitized_title = _sanitize_title(title)\n if thread[\"thread\"][\"type\"] == \"document\":\n contents = client._fetch_file_with_caching(\n 'threads/{tid}/export/docx'.format(tid=thread_id), 'docx', args=None)\n document_file_name = sanitized_title + \".docx\"\n document_output_path = os.path.join(\n output_directory, document_file_name)\n with open(document_output_path, 'wb') as document_file:\n document_file.write(contents)\n\ndef _backup_thread_as_xlsx(thread, client, output_directory, depth):\n thread_id = thread[\"thread\"][\"id\"]\n title = thread[\"thread\"][\"title\"]\n logging.info(\"%sBacking up thread %s (%s)...\",\n \" \" * depth, title, thread_id)\n sanitized_title = _sanitize_title(title)\n if thread[\"thread\"][\"type\"] == \"spreadsheet\":\n contents = client._fetch_file_with_caching(\n 'threads/{tid}/export/xlsx'.format(tid=thread_id), 'xlsx', args=None)\n document_file_name = sanitized_title + \".xlsx\"\n document_output_path = os.path.join(\n output_directory, document_file_name)\n with open(document_output_path, 'wb') as document_file:\n document_file.write(contents)\n\ndef _backup_thread_as_pdf(thread, client, output_directory, depth):\n thread_id = thread[\"thread\"][\"id\"]\n title = thread[\"thread\"][\"title\"]\n logging.info(\"%sBacking up thread %s (%s)...\",\n \" \" * depth, title, thread_id)\n sanitized_title = _sanitize_title(title)\n if thread[\"thread\"][\"type\"] == \"slides\":\n contents = client._fetch_file_with_caching(\n 'threads/{tid}/export/pdf'.format(tid=thread_id), 'pdf', args=None)\n document_file_name = sanitized_title + \".pdf\"\n document_output_path = os.path.join(\n output_directory, document_file_name)\n with open(document_output_path, 'wb') as document_file:\n document_file.write(contents)\n\ndef main():\n logging.getLogger().setLevel(logging.DEBUG)\n\n parser = argparse.ArgumentParser(description=\"Backup of a Quip account\")\n\n parser.add_argument(\"--access_token\", required=True,\n help=\"Access token for the user whose account should be backed up\")\n parser.add_argument(\"--root_folder_id\", default=None,\n help=\"If provided, only the documents in the given folder will be \"\n \"backed up. Otherwise all folder and documents will be backed up.\")\n parser.add_argument(\"--quip_api_base_url\", default=None,\n help=\"Alternative base URL for the Quip API. If none is provided, \"\n \"https://platform.quip.com will be used\")\n parser.add_argument(\"--output_directory\", default=\"./\",\n help=\"Directory where to place backup data.\")\n parser.add_argument(\"--cache_directory\", default=None,\n help=\"Directory where to cache downloaded thread data\")\n parser.add_argument(\"--use_rate_limiting\", action='store_true',\n help=\"Watch API rate limit and wait when it runs out\")\n\n args = parser.parse_args()\n\n cache_dir = args.cache_directory\n \n if cache_dir is not None:\n cache_dir = _normalize_path(cache_dir)\n _ensure_path_exists(cache_dir)\n client = BaqupClient(\n cache_dir=cache_dir,\n access_token=args.access_token, base_url=args.quip_api_base_url,\n request_timeout=120, use_rate_limiting=bool(args.use_rate_limiting))\n else:\n client = BaqupClient(\n access_token=args.access_token, base_url=args.quip_api_base_url,\n request_timeout=120, cache_dir=cache_dir,\n use_rate_limiting=bool(args.use_rate_limiting))\n \n output_directory = os.path.join(\n _normalize_path(args.output_directory), \"baqup\")\n _ensure_path_exists(output_directory)\n shutil.rmtree(output_directory, ignore_errors=True)\n output_static_diretory = os.path.join(\n output_directory, _OUTPUT_STATIC_DIRECTORY_NAME)\n shutil.copytree(_STATIC_DIRECTORY, output_static_diretory)\n _run_backup(client, output_directory, args.root_folder_id)\n\ndef _run_backup(client, output_directory, root_folder_id):\n user = client.get_authenticated_user()\n processed_folder_ids = set()\n if root_folder_id:\n _descend_into_folder(root_folder_id, processed_folder_ids,\n client, output_directory, 0)\n else:\n _descend_into_folder(user[\"private_folder_id\"], processed_folder_ids,\n client, output_directory, 0)\n _descend_into_folder(user[\"starred_folder_id\"], processed_folder_ids,\n client, output_directory, 0)\n logging.info(\"Looking for conversations\")\n conversation_threads = _get_conversation_threads(client)\n if conversation_threads:\n conversations_directory = os.path.join(output_directory, \"Conversations\")\n _ensure_path_exists(conversations_directory)\n for thread in conversation_threads:\n if thread[\"thread\"][\"type\"] == \"document\":\n _backup_thread_as_docx(\n thread, client, conversations_directory, 1)\n elif thread[\"thread\"][\"type\"] == \"slides\":\n _backup_thread_as_pdf(\n thread, client, conversations_directory, 1)\n elif thread[\"thread\"][\"type\"] == \"spreadsheet\":\n _backup_thread_as_xlsx(\n thread, client, conversations_directory, 1)\n\ndef _descend_into_folder(folder_id, processed_folder_ids, client,\n output_directory, depth):\n if folder_id in processed_folder_ids:\n return\n processed_folder_ids.add(folder_id)\n try:\n folder = client.get_folder(folder_id)\n except quip.QuipError as e:\n if e.code == 403:\n logging.warning(\"%sSkipped over restricted folder %s.\",\n \" \" * depth, folder_id)\n else:\n logging.warning(\"%sSkipped over folder %s due to unknown error %d.\",\n \" \" * depth, folder_id, e.code)\n return\n except urllib3.HTTPError as e:\n logging.warning(\"%sSkipped over folder %s due to HTTP error %d.\",\n \" \" * depth, folder_id, e.code)\n return\n title = folder[\"folder\"].get(\"title\", \"Folder %s\" % folder_id)\n logging.info(\"%sBacking up folder %s...\", \" \" * depth, title)\n folder_output_path = os.path.join(output_directory, _sanitize_title(title))\n _ensure_path_exists(folder_output_path)\n for child in folder[\"children\"]:\n if \"folder_id\" in child:\n _descend_into_folder(child[\"folder_id\"], processed_folder_ids,\n client, folder_output_path, depth + 1)\n elif \"thread_id\" in child:\n thread = client.get_thread(child[\"thread_id\"])\n if thread[\"thread\"][\"type\"] == \"document\":\n _backup_thread_as_docx(\n thread, client, folder_output_path, depth + 1)\n elif thread[\"thread\"][\"type\"] == \"slides\":\n _backup_thread_as_pdf(\n thread, client, folder_output_path, depth + 1)\n elif thread[\"thread\"][\"type\"] == \"spreadsheet\":\n _backup_thread_as_xlsx(\n thread, client, folder_output_path, depth + 1)\n\ndef _backup_thread(thread, client, output_directory, depth):\n thread_id = thread[\"thread\"][\"id\"]\n title = thread[\"thread\"][\"title\"]\n logging.info(\"%sBacking up thread %s (%s)...\",\n \" \" * depth, title, thread_id)\n sanitized_title = _sanitize_title(title)\n if \"html\" in thread:\n # Parse the document\n try:\n tree = client.parse_document_html(thread[\"html\"])\n except xml.etree.cElementTree.ParseError as e:\n logging.error(\n \"Error parsing thread %s (%s), skipping backup: %s\" % (\n title, thread_id, e))\n return\n\n # Download each image and replace with the new URL\n for img in tree.iter(\"img\"):\n src = img.get(\"src\")\n if not src.startswith(\"/blob\"):\n continue\n _, _, thread_id, blob_id = src.split(\"/\")\n blob_response = client.get_blob(thread_id, blob_id)\n content_disposition = blob_response.info().get(\n \"Content-Disposition\")\n if content_disposition:\n image_filename = content_disposition.split('\"')[-2]\n else:\n image_filename = \"image.png\"\n image_output_path = os.path.join(output_directory, image_filename)\n with open(image_output_path, \"w\") as image_file:\n image_file.write(blob_response.read())\n img.set(\"src\", image_filename)\n html = unicode(xml.etree.cElementTree.tostring(tree))\n # Strip the tags that were introduced in parse_document_html\n html = html[6:-7]\n\n document_file_name = sanitized_title + \".html\"\n document_output_path = os.path.join(\n output_directory, document_file_name)\n document_html = _DOCUMENT_TEMPLATE % {\n \"title\": _escape(title),\n \"stylesheet_path\": (\"../\" * depth) +\n _OUTPUT_STATIC_DIRECTORY_NAME + \"/main.css\",\n \"body\": html,\n }\n with open(document_output_path, \"w\") as document_file:\n document_file.write(document_html.encode(\"utf-8\"))\n messages = _get_thread_messages(thread_id, client)\n if messages:\n title_suffix = \"messages\" if \"html\" in thread else thread_id\n message_file_name = \"%s (%s).html\" % (sanitized_title, title_suffix)\n messages_output_path = os.path.join(output_directory, message_file_name)\n messages_html = _MESSAGES_TEMPLATE % {\n \"title\": _escape(title),\n \"stylesheet_path\": (\"../\" * depth) +\n _OUTPUT_STATIC_DIRECTORY_NAME + \"/main.css\",\n \"body\": \"\".join([_MESSAGE_TEMPLATE % {\n \"author_name\":\n _escape(_get_user(client, message[\"author_id\"])[\"name\"]),\n \"timestamp\": _escape(_format_usec(message[\"created_usec\"])),\n \"message_text\": _escape(message[\"text\"]),\n } for message in messages])\n }\n with open(messages_output_path, \"w\") as messages_file:\n messages_file.write(messages_html.encode(\"utf-8\"))\n\ndef _get_thread_messages(thread_id, client):\n max_created_usec = None\n messages = []\n while True:\n chunk = client.get_messages(\n thread_id, max_created_usec=max_created_usec, count=100)\n messages.extend(chunk)\n if chunk:\n max_created_usec = chunk[-1][\"created_usec\"] - 1\n else:\n break\n messages.reverse()\n return messages\n\ndef _get_conversation_threads(client):\n max_updated_usec = None\n threads = []\n thread_ids = set()\n while True:\n chunk = client.get_recent_threads(\n max_updated_usec=max_updated_usec, count=50).values()\n try:\n chunk.sort(key=lambda t:t[\"thread\"][\"updated_usec\"], reverse=True)\n except Exception as e:\n #logging.error(e)\n chunk = sorted(chunk, key=lambda t:t[\"thread\"][\"updated_usec\"], reverse=True)\n #logging.debug(type(chunk))\n #logging.debug(vars(chunk))\n #logging.debug(chunk)\n threads.extend([t for t in chunk\n if \"html\" not in t and t[\"thread\"][\"id\"] not in thread_ids])\n thread_ids.update([t[\"thread\"][\"id\"] for t in chunk])\n if chunk:\n chunk_max_updated_usec = chunk[-1][\"thread\"][\"updated_usec\"] - 1\n if chunk_max_updated_usec == max_updated_usec:\n logging.warning(\"New chunk had the same max_updated_usec (%d) \"\n \"as the last one, can't get any older threads\",\n max_updated_usec)\n break\n max_updated_usec = chunk_max_updated_usec\n else:\n break\n logging.info(\" Got %d threads, paged back to %s\",\n len(threads), _format_usec(max_updated_usec))\n threads.reverse()\n return threads\n\ndef _ensure_path_exists(directory_path):\n if os.path.exists(directory_path):\n return\n os.makedirs(directory_path)\n\ndef _normalize_path(path):\n return os.path.abspath(os.path.expanduser(path))\n\ndef _sanitize_title(title):\n sanitized_title = re.sub(r\"\\s\", \" \", title)\n sanitized_title = re.sub(r\"(?u)[^- \\w.]\", \"\", sanitized_title)\n if len(sanitized_title) > _MAXIMUM_TITLE_LENGTH:\n sanitized_title = sanitized_title[:_MAXIMUM_TITLE_LENGTH]\n return sanitized_title\n\n_user_cache = {}\ndef _get_user(client, id):\n if id not in _user_cache:\n try:\n _user_cache[id] = client.get_user(id)\n except quip.QuipError:\n _user_cache[id] = {\"id\": id, \"name\": \"Unknown user %s\" % id}\n return _user_cache[id]\n\ndef _read_template(template_file_name):\n template_path = os.path.join(_TEMPLATE_DIRECTORY, template_file_name)\n with open(template_path, \"r\") as template_file:\n return \"\".join(template_file.readlines())\n\ndef _escape(s):\n return xml.sax.saxutils.escape(s, {'\"': \""\"})\n\ndef _format_usec(usec):\n return datetime.datetime.utcfromtimestamp(usec / 1000000.0).isoformat()\n\n_DOCUMENT_TEMPLATE = _read_template(\"document.html\")\n_MESSAGE_TEMPLATE = _read_template(\"message.html\")\n_MESSAGES_TEMPLATE = _read_template(\"messages.html\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"samples/baqup/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"17159333","text":"from sqlalchemy.orm.exc import NoResultFound\n\nfrom .exceptions import ApiError\n\n# -----------------------------------------------------------------------------\n\n\nclass RelatedBase(object):\n def __init__(self, **kwargs):\n self._view_classes = kwargs\n\n def __call__(self, data, view):\n for field_name, view_class in self._view_classes.items():\n many = view.deserializer.fields[field_name].many\n self.resolve_nested(data, field_name, view_class, many=many)\n\n return data\n\n def resolve_nested(self, data, field_name, view_class, many=False):\n try:\n nested_data = data[field_name]\n except KeyError:\n # If this field were required, the deserializer already would have\n # raised an exception.\n return\n\n try:\n if many:\n if not nested_data:\n resolved = []\n else:\n view = view_class()\n resolved = [\n self.get_related_item(nested_datum, view)\n for nested_datum in nested_data\n ]\n else:\n resolved = self.get_related_item(nested_data, view_class())\n except ApiError as e:\n pointer = '/data/{}'.format(field_name)\n raise e.update({'source': {'pointer': pointer}})\n\n data[field_name] = resolved\n\n def get_related_item(self, related_data, related_view):\n related_id = self.get_related_id(related_data, related_view)\n\n try:\n related_item = related_view.get_item(related_id)\n except NoResultFound:\n raise ApiError(422, {'code': 'invalid_related.not_found'})\n\n return related_item\n\n def get_related_id(self, related_data, related_view):\n raise NotImplementedError()\n\n\nclass NestedRelated(RelatedBase):\n def get_related_id(self, related_data, related_view):\n try:\n related_id = related_data['id']\n except KeyError:\n raise ApiError(422, {'code': 'invalid_related.missing_id'})\n\n return related_id\n","sub_path":"flask_resty/related.py","file_name":"related.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"458545381","text":"import pickle \n\ndef pickleOpen(p):\n file=open(p,'rb')\n data = pickle.load(file)\n file.close()\n return data\n\n\n\n# Read in the list of all files before moving \nhomeDir='/Users/ChatNoir/Google Drive/Herbiarum_Notes/ImageSpreadSheets/'\n\n\ndupFiles = pickleOpen(homeDir+'no_duplicate_Aug12.pkl')\ndupDict = dict((k.upper(), v) for k, v in dupFiles.items())\n\n# Read in the list of files that were moved with good barcodes\ngoodFiles = pickleOpen(homeDir+'no_newPaths_Aug12.pkl')\ngoodDict = dict((k.upper(), v) for k, v in goodFiles.items())\n\nbadFiles = pickleOpen(homeDir+'no_badBarcode_Aug12.pkl')\nbadDict = dict((k.upper(), v) for k, v in badFiles.items())\n\nlen(goodDict)\nlen(goodFiles)\n\nlen(badDict)\nlen(badFiles)\n\nallDict = {**goodDict, **badDict}\n\nset(movedDict).difference(set(allDict))\nset(allDict).difference(set(movedDict))\n\nimageFiles = pickleOpen(homeDir+'no_imageFiles_Aug12_filename.pkl')\nimageDict = dict((k.upper(), v) for k, v in imageFiles.items())\n\nmovedFiles = pickleOpen(homeDir+'no_movedFiles_Aug13_filename.pkl')\nmovedDict = dict((k.upper(), v) for k, v in movedFiles.items())\nlen(movedDict)\nlen(imageDict)\n# All in A, that are not in B \nset(movedDict).difference(set(imageDict))\nset(imageDict).difference(set(movedDict))\n\n\nimageFiles = pickleOpen(homeDir+'no_imageFiles_Aug12_barcode.pkl')\nimageDict = dict((k.upper(), v) for k, v in imageFiles.items())\n\nmovedFiles = pickleOpen(homeDir+'no_movedFiles_Aug13_barcode.pkl')\nmovedDict = dict((k.upper(), v) for k, v in movedFiles.items())\n\nlen(movedDict)\nlen(imageDict)\n# All in A, that are not in B \nset(movedDict).difference(set(imageDict))\nset(imageDict).difference(set(movedDict))\n\n\n\n\n\nset(p29lsu).difference(set(p30lsu))\nset(p30lsu).difference(set(p29lsu))\n\n\n\nlsa = pickleOpen('/Users/ChatNoir/Projects/HerbariumRA/ggmountlsa303home/lsa303Jun06.pkl')\ncbf29 = pickleOpen('/Users/ChatNoir/Projects/HerbariumRA/gmount1cyberflorahome/oldPathDictionary29.pkl')\ncbf30 = pickleOpen('/Users/ChatNoir/Projects/HerbariumRA/gmount1cyberflorahome/oldPathDictionaryMay30.pkl')\ncbf28 = pickleOpen('/Users/ChatNoir/Projects/HerbariumRA/gmount1cyberflorahome/barcodeImageDict.pkl')\ncsv30 = pickleOpen('/Users/ChatNoir/Projects/HerbariumRA/gmount1cyberflorahome/portalDictionaryMay30.pkl')\n\nd1 = list(k.upper().split(\"-\")[0] for k, v in lsa.items())\nd2 = list(k.upper().split(\"-\")[0] for k, v in cbf29.items())\nd3 = list(k.upper().split(\"-\")[0] for k, v in cbf30.items())\nd4 = list(k.upper().split(\"-\")[0] for k, v in cbf28.items())\np30 = list(k.upper().split(\"-\")[0] for k, v in csv.items())\n\nd2lsu,d2lsus = splitLSU(d2)\nd3lsu,d3lsus = splitLSU(d3)\nd4lsu,d4lsus = splitLSU(d4)\np30lsu,p30lsus = splitLSU(p30)\n\n\n# All in A, that are not in B \nset(d4lsu).difference(set(d1))\n\nlen(set(d1).difference(set(d4lsu)))\n\n\nset(d2lsu).difference(set(d1))\nset(d1).difference(set(d2lsu))\nLSU00051504\n\nset(d2lsu).difference(set(d3lsu))\nset(d3lsu).difference(set(d2lsu))\n\n\nLSU00176590\n\n\n# All in A, that are not in B \nlen(set(p30lsu).difference(set(d1)))\n\nlen(set(d1).difference(set(p30lsu)))\n","sub_path":"Old/RestructuringLSUCollections/ReorganizeImages/ComparePklsNO.py","file_name":"ComparePklsNO.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134699101","text":"from zoundry.appframework.constants import IZAppServiceIDs\r\nfrom zoundry.appframework.global_services import getApplicationModel\r\nfrom zoundry.appframework.services.urlfetch.urlfetchsvc import IZURLFetchListener\r\nfrom zoundry.appframework.ui.util.uiutil import fireUIExecEvent\r\nfrom zoundry.base.util.zthread import IZRunnable\r\nfrom zoundry.blogapp.constants import IZBlogAppServiceIDs\r\nfrom zoundry.blogapp.ui.common.blogpostswidgets import ZBlogPostsListByImageQueryModel\r\nfrom zoundry.blogapp.ui.common.blogpostswidgets import ZWhereFoundBlogPostListView\r\nfrom zoundry.blogapp.ui.views.standard.ctxview.details.commondetails import IZDetailsPanelFactory\r\nfrom zoundry.blogapp.ui.views.standard.ctxview.details.commondetails import ZAbstractDetailsPanel\r\nfrom zoundry.blogapp.ui.views.standard.ctxview.imgdetails.imgdetailsimpl.infodetailswidgets import ZImagePreviewPanel\r\nfrom zoundry.blogapp.ui.views.standard.ctxview.imgdetails.imgdetailsimpl.infodetailswidgets import ZImageSummaryPanel\r\nimport wx\r\n\r\n# ----------------------------------------------------------------------------------------\r\n# The model used by the \"image info\" details panel - the details panel that shows info\r\n# about a selected image.\r\n# ----------------------------------------------------------------------------------------\r\nclass ZInfoImageDetailsModel(ZBlogPostsListByImageQueryModel):\r\n\r\n def __init__(self):\r\n ZBlogPostsListByImageQueryModel.__init__(self)\r\n self.urlFetchService = getApplicationModel().getService(IZAppServiceIDs.URL_FETCH_SERVICE_ID)\r\n self.indexService = getApplicationModel().getService(IZBlogAppServiceIDs.DOCUMENT_INDEX_SERVICE_ID)\r\n # end __init__()\r\n\r\n def getUrlFetchService(self):\r\n return self.urlFetchService\r\n # end getUrlFetchService()\r\n\r\n# end ZInfoImageDetailsModel\r\n\r\n\r\n# ----------------------------------------------------------------------------------------\r\n# A helper class for executing code that updates the details panel UI. This class is\r\n# needed because we want to run the code on the UI thread.\r\n# ----------------------------------------------------------------------------------------\r\nclass ZInfoImageDetailsPanelUIUpdater(IZRunnable):\r\n\r\n MODE_CONNECT = 0\r\n MODE_CONNECT_ERROR = 1\r\n MODE_DOWNLOAD_START = 2\r\n MODE_DOWNLOAD = 3\r\n MODE_DOWNLOAD_COMPLETE = 4\r\n MODE_DOWNLOAD_ERROR = 5\r\n\r\n def __init__(self, panel, data, mode):\r\n self.panel = panel\r\n self.data = data\r\n self.mode = mode\r\n # end __init__()\r\n\r\n def run(self):\r\n if self.mode == ZInfoImageDetailsPanelUIUpdater.MODE_CONNECT:\r\n self.panel.updateFromConnectionRespInfo(self.data)\r\n elif self.mode == ZInfoImageDetailsPanelUIUpdater.MODE_CONNECT_ERROR:\r\n self.panel.updateFromConnectionError(self.data)\r\n elif self.mode == ZInfoImageDetailsPanelUIUpdater.MODE_DOWNLOAD_COMPLETE:\r\n self.panel.updateFromConnectionResp(self.data)\r\n elif self.mode == ZInfoImageDetailsPanelUIUpdater.MODE_DOWNLOAD_ERROR:\r\n self.panel.updateFromDownloadError(self.data)\r\n # end run()\r\n\r\n# end ZInfoImageDetailsPanelUIUpdater\r\n\r\n\r\n# ----------------------------------------------------------------------------------------\r\n# A concrete impl of a image details panel. This one shows 'general' information\r\n# about the image.\r\n# ----------------------------------------------------------------------------------------\r\nclass ZInfoImageDetailsPanel(ZAbstractDetailsPanel, IZURLFetchListener):\r\n\r\n def __init__(self, parent):\r\n self.model = ZInfoImageDetailsModel()\r\n self.fetcher = None\r\n ZAbstractDetailsPanel.__init__(self, parent)\r\n # end __init__()\r\n\r\n def _createWidgets(self):\r\n self.imagePreview = ZImagePreviewPanel(self)\r\n self.summary = ZImageSummaryPanel(self)\r\n self.blogPostListView = ZWhereFoundBlogPostListView(self, self.model)\r\n # end _createWidgets()\r\n\r\n def _bindWidgetEvents(self):\r\n pass\r\n # end _bindWidgetEvents()\r\n\r\n def _layoutWidgets(self):\r\n vBox = wx.BoxSizer(wx.VERTICAL)\r\n vBox.Add(self.summary, 0, wx.EXPAND | wx.BOTTOM, 5)\r\n vBox.Add(self.blogPostListView, 1, wx.EXPAND)\r\n\r\n hBox = wx.BoxSizer(wx.HORIZONTAL)\r\n hBox.Add(self.imagePreview, 0)\r\n hBox.AddSizer(vBox, 1, wx.EXPAND | wx.LEFT, 5)\r\n\r\n box = wx.BoxSizer(wx.VERTICAL)\r\n box.AddSizer(hBox, 1, wx.EXPAND | wx.ALL, 5)\r\n\r\n self.SetAutoLayout(True)\r\n self.SetSizer(box)\r\n # end _layoutWidgets()\r\n\r\n def destroy(self):\r\n # Cancel any Image fetching that might be going on\r\n if self.fetcher is not None and not self.fetcher.isDone():\r\n self.fetcher.cancel()\r\n # end destroy()\r\n\r\n def onSelectionChanged(self, data):\r\n (blog, imageIDO) = data #@UnusedVariable\r\n \r\n # Cancel any Image fetching that might be going on\r\n if self.fetcher is not None and not self.fetcher.isDone():\r\n self.fetcher.cancel()\r\n\r\n # Set the current imageIDO in the model\r\n self.model.setImageIDO(imageIDO)\r\n\r\n # Reset the UI for the widgets\r\n self.imagePreview.reset()\r\n self.summary.reset()\r\n self.Layout()\r\n\r\n # Refresh the list of blog posts\r\n self.blogPostListView.refresh()\r\n\r\n # Start fetching the image in the background - events will update the UI\r\n url = imageIDO.getUrl()\r\n self.fetcher = self.model.getUrlFetchService().fetch(url, self)\r\n # end onSelectionChanged()\r\n\r\n def updateFromConnectionError(self, error):\r\n self.imagePreview.updateFromError(error)\r\n self.summary.updateFromError(error)\r\n self.Layout()\r\n # end updateFromConnectionError()\r\n\r\n def updateFromConnectionRespInfo(self, connectionRespInfo):\r\n self.imagePreview.updateFromConnectionRespInfo(connectionRespInfo)\r\n self.summary.updateFromConnectionRespInfo(connectionRespInfo)\r\n self.Layout()\r\n # end updateFromConnectionRespInfo()\r\n\r\n def updateFromDownloadError(self, error):\r\n self.imagePreview.updateFromError(error)\r\n self.summary.updateFromError(error)\r\n self.Layout()\r\n # end updateFromDownloadError()\r\n\r\n def updateFromConnectionResp(self, connectionResp):\r\n self.imagePreview.updateFromConnectionResp(connectionResp)\r\n self.summary.updateFromConnectionResp(connectionResp)\r\n self.Layout()\r\n # end updateFromConnectionResp()\r\n\r\n def onCancel(self, fetcher):\r\n if self.fetcher == fetcher:\r\n self.fetcher = None\r\n # end onCancel()\r\n\r\n def onConnect(self, fetcher, connectionRespInfo): #@UnusedVariable\r\n updater = ZInfoImageDetailsPanelUIUpdater(self, connectionRespInfo, ZInfoImageDetailsPanelUIUpdater.MODE_CONNECT)\r\n fireUIExecEvent(updater, self)\r\n # end onConnect()\r\n\r\n def onConnectError(self, fetcher, error): #@UnusedVariable\r\n updater = ZInfoImageDetailsPanelUIUpdater(self, error, ZInfoImageDetailsPanelUIUpdater.MODE_CONNECT_ERROR)\r\n fireUIExecEvent(updater, self)\r\n # end onConnectError()\r\n\r\n def onContentDownloadStart(self, fetcher, contentLength): #@UnusedVariable\r\n updater = ZInfoImageDetailsPanelUIUpdater(self, contentLength, ZInfoImageDetailsPanelUIUpdater.MODE_DOWNLOAD_START)\r\n fireUIExecEvent(updater, self)\r\n # end onContentDownloadStart()\r\n\r\n def onContentDownload(self, fetcher, numBytes): #@UnusedVariable\r\n updater = ZInfoImageDetailsPanelUIUpdater(self, numBytes, ZInfoImageDetailsPanelUIUpdater.MODE_DOWNLOAD)\r\n fireUIExecEvent(updater, self)\r\n # end onContentDownload()\r\n\r\n def onContentDownloadComplete(self, fetcher, connectionResp): #@UnusedVariable\r\n updater = ZInfoImageDetailsPanelUIUpdater(self, connectionResp, ZInfoImageDetailsPanelUIUpdater.MODE_DOWNLOAD_COMPLETE)\r\n fireUIExecEvent(updater, self)\r\n # end onContentDownloadComplete()\r\n\r\n def onContentDownloadError(self, fetcher, error): #@UnusedVariable\r\n updater = ZInfoImageDetailsPanelUIUpdater(self, error, ZInfoImageDetailsPanelUIUpdater.MODE_DOWNLOAD_ERROR)\r\n fireUIExecEvent(updater, self)\r\n # end onContentDownloadError()\r\n\r\n# end ZInfoImageDetailsPanel\r\n\r\n\r\n# ----------------------------------------------------------------------------------------\r\n# An impl of a image details panel factory that creates a panel for \"Image Info\"\r\n# information about the post.\r\n# ----------------------------------------------------------------------------------------\r\nclass ZInfoImageDetailsPanelFactory(IZDetailsPanelFactory):\r\n\r\n def createDetailsPanel(self, parent):\r\n return ZInfoImageDetailsPanel(parent)\r\n # end createDetailsPanel()\r\n\r\n# end ZInfoImageDetailsPanelFactory\r\n","sub_path":"src/python/zoundry/blogapp/ui/views/standard/ctxview/imgdetails/imgdetailsimpl/infodetails.py","file_name":"infodetails.py","file_ext":"py","file_size_in_byte":8802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42819773","text":"import matplotlib.pyplot as plt\nimport pickle\nimport seaborn as sns\nimport sys\n\nsns.set()\n\ndropout_path = \"Dropout_baseline/\"\nsgd_path = \"SGD_baseline/\"\nbayesian_path = \"bayes1/\"\n\ndropout_error = [round(100-accu, 2) for accu in pickle.load(open(dropout_path+\"test_accu_lst.pkl\", \"rb\"))]\nsgd_error = [round(100-accu, 2) for accu in pickle.load(open(sgd_path+\"test_accu_lst.pkl\", \"rb\"))]\nbayesian_error = [round(100-accu, 2) for accu in pickle.load(open(bayesian_path+\"test_accu_lst.pkl\", \"rb\"))]\n\nplt.plot(dropout_error, label=\"Dropout\")\nplt.plot(sgd_error, label=\"Vanilla SGD\")\nplt.plot(bayesian_error, label=\"Bayes by Backprop\")\nplt.ylim(0.8, 2.3)\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Test error (%)\")\n\nplt.legend()\nplt.show()\n","sub_path":"results/mnist/plot_test_error.py","file_name":"plot_test_error.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117551015","text":"from conans import ConanFile, tools, CMake\nfrom conans.errors import ConanInvalidConfiguration\nfrom conans.tools import Version\nfrom fnmatch import fnmatch\nimport os\nimport tarfile\n\n\nclass FruitConan(ConanFile):\n name = \"fruit\"\n description = \"C++ dependency injection framework\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/google/fruit\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"fruit\", \"injection\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False],\n \"use_boost\": [True, False],\n \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"use_boost\": True, \"fPIC\": True}\n generators = \"cmake\", \"cmake_find_package\"\n exports_sources = [\"CMakeLists.txt\", \"patches/*\"]\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def build_requirements(self):\n if self.options.use_boost:\n self.build_requires(\"boost/1.72.0\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n compiler = str(self.settings.compiler)\n compiler_version = Version(self.settings.compiler.version.value)\n\n minimal_version = {\n \"gcc\": \"5\",\n \"clang\": \"3.5\",\n \"apple-clang\": \"7.3\",\n \"Visual Studio\": \"14\"\n }\n\n if compiler in minimal_version and \\\n compiler_version < minimal_version[compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports\"\n \" at least C++11. %s %s is not\"\n \" supported.\" % (self.name, compiler, compiler_version))\n\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, \"11\")\n\n @property\n def _extracted_dir(self):\n return self.name + \"-\" + self.version\n\n def _get_source(self):\n filename = os.path.basename(self.conan_data[\"sources\"][self.version][\"url\"])\n tools.download(filename=filename, **self.conan_data[\"sources\"][self.version])\n\n with tarfile.TarFile.open(filename, 'r:*') as tarredgzippedFile:\n # NOTE: The archive file contains the file names build and BULD\n # in the extras/bazel_root/third_party/fruit directory.\n # Extraction fails on a case-insensitive file system due to file\n # name conflicts.\n # Exclude build as a workaround.\n exclude_pattern = \"%s/extras/bazel_root/third_party/fruit/build\" % (self._extracted_dir,)\n members = list(filter(lambda m: not fnmatch(m.name, exclude_pattern),\n tarredgzippedFile.getmembers()))\n tarredgzippedFile.extractall(\".\", members=members)\n\n def source(self):\n self._get_source()\n\n os.rename(self._extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n if not self._cmake:\n self._cmake = CMake(self)\n self._cmake.definitions[\"FRUIT_USES_BOOST\"] = self.options.use_boost\n self._cmake.definitions[\"FRUIT_ENABLE_COVERAGE\"] = False\n\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def _patch_files(self):\n if self.version in self.conan_data[\"patches\"]:\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def build(self):\n self._patch_files()\n\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs = [\"m\"]\n","sub_path":"recipes/fruit/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"184163832","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# Copyright 2016 RIFT.io Inc\n\n\nfrom rift.mano.tosca_translator.common.utils import _\nfrom rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource\n\nfrom toscaparser.common.exception import ValidationError\n\n\n# Name used to dynamically load appropriate map class.\nTARGET_CLASS_NAME = 'ToscaNetworkPort'\nTOSCA_LINKS_TO = 'tosca.relationships.network.LinksTo'\nTOSCA_BINDS_TO = 'tosca.relationships.network.BindsTo'\n\n\nclass ToscaNetworkPort(ManoResource):\n '''Translate TOSCA node type tosca.nodes.network.Port.'''\n\n toscatype = 'tosca.nodes.network.Port'\n\n VALID_TYPES = ['VIRTIO', 'VPORT']\n\n def __init__(self, log, nodetemplate, metadata=None):\n super(ToscaNetworkPort, self).__init__(log,\n nodetemplate,\n type_='port',\n metadata=metadata)\n # Default order\n self.order = 0\n self.vnf = None\n self.cp_name = None\n pass\n\n def handle_properties(self):\n tosca_props = self.get_tosca_props()\n self.log.debug(_(\"Port {0} with tosca properties: {1}\").\n format(self.name, tosca_props))\n port_props = {}\n for key, value in tosca_props.items():\n port_props[key] = value\n\n if 'cp_type' not in port_props:\n port_props['cp_type'] = 'VPORT'\n else:\n if not port_props['cp_type'] in ToscaNetworkPort.VALID_TYPES:\n err_msg = _(\"Invalid port type, {0}, specified for {1}\"). \\\n format(port_props['cp_type'], self.name)\n self.log.warn(err_msg)\n raise ValidationError(message=err_msg)\n\n if 'vdu_intf_type' not in port_props:\n port_props['vdu_intf_type'] = 'VIRTIO'\n else:\n if not port_props['vdu_intf_type'] in ToscaNetworkPort.VALID_TYPES:\n err_msg = _(\"Invalid port type, {0}, specified for {1}\"). \\\n format(port_props['vdu_intf_type'], self.name)\n self.log.warn(err_msg)\n raise ValidationError(message=err_msg)\n\n self.cp_name = port_props['name']\n self.properties = port_props\n\n def handle_requirements(self, nodes):\n tosca_reqs = self.get_tosca_reqs()\n tosca_caps = self.get_tosca_caps()\n self.log.debug(\"VNF {0} requirements: {1}\".\n format(self.name, tosca_reqs))\n\n vnf = None # Need vnf ref to generate cp refs in vld\n vld = None\n '''\n if len(tosca_reqs) != 2:\n err_msg = _(\"Invalid configuration as incorrect number of \"\n \"requirements for CP {0} are specified\"). \\\n format(self)\n self.log.error(err_msg)\n raise ValidationError(message=err_msg)\n '''\n for req in tosca_reqs:\n if 'virtualBinding' in req:\n target = req['virtualBinding']['target']\n node = self.get_node_with_name(target, nodes)\n if node:\n vnf = node.vnf\n self.vnf = node._vnf\n if not vnf:\n err_msg = _(\"No vnfs linked to a VDU {0}\"). \\\n format(node)\n self.log.error(err_msg)\n raise ValidationError(message=err_msg)\n cp = {}\n cp['name'] = self.properties['name']\n cp['type'] = self.properties['cp_type']\n self.log.debug(_(\"Connection Point entry for VNF {0}:{1}\").\n format(vnf, cp))\n if 'connection-point' not in vnf.properties:\n vnf.properties['connection-point'] = []\n vnf.properties['connection-point'].append(cp)\n ext_intf = {}\n ext_intf['name'] = self.properties['vdu_intf_name']\n ext_intf['virtual-interface'] = \\\n {'type': self.properties['vdu_intf_type']}\n ext_intf['vnfd-connection-point-ref'] = \\\n self.properties['name']\n if 'external-interface' not in node.properties:\n node.properties['external-interface'] = []\n node.properties['external-interface'].append(ext_intf)\n else:\n err_msg = _(\"Connection point {0}, VDU {1} \"\n \"specified not found\"). \\\n format(self.name, target)\n self.log.error(err_msg)\n raise ValidationError(message=err_msg)\n elif 'virtualLink' in req:\n target = req['virtualLink']['target']\n node = self.get_node_with_name(target, nodes)\n if node:\n vld = node\n else:\n err_msg = _(\"CP {0}, VL {1} specified not found\"). \\\n format(self, target)\n self.log.error(err_msg)\n raise ValidationError(message=err_msg)\n\n if 'sfc' in tosca_caps and vnf:\n if 'sfc_type' in tosca_caps['sfc']:\n vnf.properties['service-function-chain'] = tosca_caps['sfc']['sfc_type'].upper()\n if 'sf_type' in tosca_caps['sfc']:\n vnf.properties['service-function-type'] = tosca_caps['sfc']['sf_type']\n\n if vnf:\n cp_ref = {}\n cp_ref['vnfd-connection-point-ref'] = self.properties['name']\n cp_ref['vnfd-id-ref'] = vnf.properties['id']\n cp_ref['member-vnf-index-ref'] = \\\n vnf._const_vnfd['member-vnf-index']\n else:\n err_msg = _(\"CP {0}, VNF {1} not found\"). \\\n format(self, vnf, vld)\n self.log.error(err_msg)\n raise ValidationError(message=err_msg)\n","sub_path":"osm/SO/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py","file_name":"tosca_network_port.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"519235185","text":"# RMIT University Vietnam\n# Dinh Long Nguyen - s3804737\n# vote.py\n\nimport os.path\nimport os\nimport random\nimport copy\nimport vote_end\nfrom uuid import uuid4\n\n# import neccesary config\nfrom config import number_of_bits, number_of_candidates\n\n# defining functions\ndef get_public_key():\n \"\"\"\n get public key from file\n \"\"\"\n file = open(\"public.txt\", \"r\")\n data = file.readline().split(\" \")\n n = data[0]\n g = data[1]\n print(\"Get public key completed!\")\n return (n, g)\n\n\ndef get_voter():\n \"\"\"\n get voter list from file\n \"\"\"\n voter = []\n file_voter = open(\"voter.txt\", \"r\")\n for x in file_voter:\n data = x.strip().split(\" \")\n if len(data) == 4:\n voter.append(data)\n print(\"Get voter information completed!\")\n return voter\n\n\ndef get_voter_index(voter, name, id):\n \"\"\"\n get index of current voter based on name and id if available\n \"\"\"\n # return -1 if no voter info found\n ans = -1\n for i in range(0, len(voter)):\n # criteria to find voter\n if voter[i][0] == name and voter[i][1] == id:\n ans = i\n return ans\n\n\ndef has_voter_vote(voter, index):\n \"\"\"\n check to see if voter has voted\n \"\"\"\n return voter[index][2] != \"0\"\n\n\ndef submit_vote(vote, vote_id, n, g, voter_arr, voter_index):\n \"\"\"\n Submit Vote\n 1. generate message m based on the total number of bits\n 2. encrypt message m \n 3. output encrypted vote detail to file\n 4. Update voter detail\n 4. return updated voter list\n \"\"\"\n # Perform deepcopy to prevent mutable value change\n voter = copy.deepcopy(voter_arr)\n # Translate vote to approprimate binary value for encryption, then back to decimal value m for encryption\n m = int(\"1\".zfill(number_of_bits) + ''.join([\"\".zfill(number_of_bits) for i in range(0, vote - 1)]), 2)\n # perform paillier encryption\n r = random.randrange(1, n+1)\n C = int(((g**m)*(r**n)) % (n*n))\n # save encryption to file\n file = open(\"encrypted_vote.txt\", \"a\")\n file.write(str(C) + \" \" + str(vote_id) + \"\\n\")\n file.close()\n # Update voter info\n voter[voter_index][2] = \"1\"\n voter[voter_index][3] = str(vote_id)\n # write updated detail to file\n file_voter = open(\"voter.txt\", \"w\")\n for i in voter:\n file_voter.write(' '.join(i) + \"\\n\")\n file_voter.close()\n # return updated voter\n return voter\n\ndef has_all_vote(voter):\n \"\"\"\n Check to see if all voters has voted to determine whether the vote can end\n \"\"\"\n for x in voter:\n if x[2] == \"0\":\n return 0\n return 1\n\ndef has_vote_end():\n \"\"\"\n Check to see if the vote has been ended\n \"\"\"\n file = open('vote_status.txt', 'r')\n if file.readline() == \"1\":\n return True\n return False\n\n\ndef execute_main():\n # Check to see if necessary files is available\n if not os.path.isfile('public.txt') or not os.path.isfile('voter.txt') or not os.path.isfile('vote_status.txt'):\n print(\"Some File are missing! Please re-run generation function in voting_authority.py file!\")\n return\n # Check to see if election has ended\n if has_vote_end():\n print(\"Vote has ended! Please re-run generation function in voting_authority.py file to reset the election!\")\n return\n # Initialize data\n n, g = get_public_key()\n voter = get_voter()\n run = True\n print(\"Simulate getting voter information (get voter name and id). In real use case, voter name and id (card_id) will be embeded in a smartcard. Voter will not be required to enter this information by hand.\")\n while run:\n # Check to see if vote has ended after each loop\n if has_vote_end():\n print(\"Vote has ended! Please re-run generation function in voting_authority.py file to reset the election!\")\n break\n # Enter voter info\n print(\"-----------------------------------------------------------\")\n name = input(\"Enter your name (ex: Voter1): \")\n id = input(\n \"Enter your card_id (card_id = the digit of your voter (ex: Voter1 -> card_id = 1 )): \")\n # get voter index in list\n current_voter_index = get_voter_index(voter, name, id)\n # if voter is not found\n if current_voter_index == -1:\n print(\"-----------------------------------------------------------\")\n print(\"The infomration you provided is not correct! Your voter and id might not exists or it might not match our database! Please try again!\")\n continue\n # check to see if voter has voted\n if has_voter_vote(voter, current_voter_index):\n print(\"-----------------------------------------------------------\")\n print(name, \"has voted! One voter can't submit 2 ballots!\")\n continue\n # enter vote option\n vote = int(input(\n \"Who do you want to vote for (1 to \" + str(number_of_candidates) + \"): \"))\n # Validate vote input\n while vote < 1 or vote > number_of_candidates:\n print(\"Incorrect vote! Please enter again!\")\n vote = int(input(\n \"Who do you want to vote for (1 to \" + str(number_of_candidates) + \"): \"))\n # Genterate vote_id using uuid library\n vote_id = uuid4()\n # Submit vote\n voter = submit_vote(vote, vote_id, int(n), int(g),\n voter, current_voter_index)\n print(\"-----------------------------------------------------------\")\n print(\"Vote complete!\")\n print(\"-----------------------------------------------------------\")\n print(\"Your vote id is:\", str(vote_id))\n print(\"You can use your vote id to see that your vote is on election bulletin board, assuring you that your vote has been submitted and will be counted in the election!\")\n print(\"-----------------------------------------------------------\")\n # Check to see if all voter has voted\n if has_all_vote(voter):\n # End vote process if all voter has voted\n vote_end.end()\n print(\"All voters has voted, vote has ended!\")\n run = False\n else: \n # Allow user to decide to continue voting or not\n inp2 = input(\n \"Continue voting as another person? Enter 1 for yes, other for no: \")\n if inp2 != \"1\":\n run = False\n else:\n # reset voter list data if user continue to vote\n voter = get_voter()\n\n# Main function\n# Run if the file is executed directly (not through import)\nif __name__ == \"__main__\":\n execute_main()","sub_path":"vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":6629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235854389","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom capif_routing_info.models.base_model_ import Model\nimport re\nfrom capif_routing_info import util\n\nimport re # noqa: E501\n\nclass Ipv4AddressRange(Model):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, start=None, end=None): # noqa: E501\n \"\"\"Ipv4AddressRange - a model defined in OpenAPI\n\n :param start: The start of this Ipv4AddressRange. # noqa: E501\n :type start: str\n :param end: The end of this Ipv4AddressRange. # noqa: E501\n :type end: str\n \"\"\"\n self.openapi_types = {\n 'start': str,\n 'end': str\n }\n\n self.attribute_map = {\n 'start': 'start',\n 'end': 'end'\n }\n\n self._start = start\n self._end = end\n\n @classmethod\n def from_dict(cls, dikt) -> 'Ipv4AddressRange':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The Ipv4AddressRange of this Ipv4AddressRange. # noqa: E501\n :rtype: Ipv4AddressRange\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def start(self):\n \"\"\"Gets the start of this Ipv4AddressRange.\n\n String identifying a IPv4 address formatted in the \\\"dotted decimal\\\" notation as defined in RFC 1166. # noqa: E501\n\n :return: The start of this Ipv4AddressRange.\n :rtype: str\n \"\"\"\n return self._start\n\n @start.setter\n def start(self, start):\n \"\"\"Sets the start of this Ipv4AddressRange.\n\n String identifying a IPv4 address formatted in the \\\"dotted decimal\\\" notation as defined in RFC 1166. # noqa: E501\n\n :param start: The start of this Ipv4AddressRange.\n :type start: str\n \"\"\"\n if start is not None and not re.search(r'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$', start): # noqa: E501\n raise ValueError(\"Invalid value for `start`, must be a follow pattern or equal to `/^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$/`\") # noqa: E501\n\n self._start = start\n\n @property\n def end(self):\n \"\"\"Gets the end of this Ipv4AddressRange.\n\n String identifying a IPv4 address formatted in the \\\"dotted decimal\\\" notation as defined in RFC 1166. # noqa: E501\n\n :return: The end of this Ipv4AddressRange.\n :rtype: str\n \"\"\"\n return self._end\n\n @end.setter\n def end(self, end):\n \"\"\"Sets the end of this Ipv4AddressRange.\n\n String identifying a IPv4 address formatted in the \\\"dotted decimal\\\" notation as defined in RFC 1166. # noqa: E501\n\n :param end: The end of this Ipv4AddressRange.\n :type end: str\n \"\"\"\n if end is not None and not re.search(r'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$', end): # noqa: E501\n raise ValueError(\"Invalid value for `end`, must be a follow pattern or equal to `/^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$/`\") # noqa: E501\n\n self._end = end\n","sub_path":"services/TS29222_CAPIF_Routing_Info_API/capif_routing_info/models/ipv4_address_range.py","file_name":"ipv4_address_range.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"114160511","text":"\n\nfrom xai.brain.wordbase.verbs._misquote import _MISQUOTE\n\n#calss header\nclass _MISQUOTING(_MISQUOTE, ):\n\tdef __init__(self,): \n\t\t_MISQUOTE.__init__(self)\n\t\tself.name = \"MISQUOTING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"misquote\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_misquoting.py","file_name":"_misquoting.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102754692","text":"# 请实现一个函数,将一个字符串中的每个空格替换成“%20”。\n# 例如,当字符串为We Are Happy.则经过替换之后的字符串为We%20Are%20Happy。\n\ndef replaceSpace(string):\n result = []\n for i in string:\n if i == \" \":\n result.append(\"%20\")\n else:\n result.append(i)\n return ''.join(result)\n # 数组转字符串 \"\".join(list) ,其中前面引号内容是单个字符之间的分隔符\n\n\nstr = \"We Are Happy\"\nresult = replaceSpace(str)\nprint(result)","sub_path":"Python/niuke/替换空格.py","file_name":"替换空格.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"246179789","text":"import pandas as pd\nimport xlwings as xw\nimport datetime\n\n\nbook = xw.Book('count.xlsx')\nshit = book.sheets('Sheet1')\n\nnow = datetime.datetime.now()\ntime = now.strftime(\"%d %y\")\n\nmain_frame = pd.read_excel('count.xlsx',name='Sheet1')\n\ndate = list(main_frame['Time'])\nproject = list(main_frame['Project'])\nsection = list(main_frame['Section'])\ntype = list(main_frame['Type'])\ncount = list(main_frame['Count'])\ntotal = list(main_frame['Total'])\n\nproj_inp = []\nsec_inp = []\ntype_inp = []\ncount_inp = []\n\nlist1 = []\nlist2 = []\nlist3 = []\nlist4 = []\nsum2 = 0\ntotality = 0\nsummation = 0\n\nnumber_of_programs = int(input(\"Enter the number of programs\"))\nfor i in range(number_of_programs):\n proj_inp.append(str(input(\"Enter the project\")))\n sec_inp.append(str(input(\"Enter the section\")))\n type_inp.append(str(input(\"Enter the type\")))\n count_inp.append(int(input(\"Enter the count\")))\n\ndef printing():\n shit.range('A' + str(len(date) + 2)).value = time\n shit.range('B' + str(len(project) + 2)).options(transpose=True).value = proj_inp\n shit.range('C' + str(len(section) + 2)).options(transpose=True).value = sec_inp\n shit.range('D' + str(len(type) + 2)).options(transpose=True).value = type_inp\n shit.range('E' + str(len(count) + 2)).options(transpose=True).value = count_inp\n\ndef list_cut(list,i):\n fin_list = list[i:]\n return fin_list\n\ndef list_sum(list):\n y = 0\n for i in range(len(list)):\n y = y + list[i]\n return y\n\ndef list_remove(list,iter):\n list.remove(list[iter])\n\n\ndef del_check():\n for iter in range(len(list1)):\n proj_inp.remove(list1[iter])\n for iter in range(len(list2)):\n sec_inp.remove(list2[iter])\n for iter in range(len(list3)):\n type_inp.remove(list3[iter])\n for iter in range(len(list4)):\n count_inp.remove(list4[iter])\n\n\n\n\ndef same_check():\n #print(\"And we should be here\")\n for iter in range(len(proj_inp)):\n for iter1 in range(len(project1)):\n if project1[iter1] == proj_inp[iter]:\n if section1[iter1] == sec_inp[iter]:\n if type1[iter1] == type_inp[iter]:\n #print(\"Increment the count\")\n x = 0\n x = count1[iter1] + count_inp[iter]\n list1.append(proj_inp[iter])\n list2.append(sec_inp[iter])\n list3.append(type_inp[iter])\n list4.append(count_inp[iter])\n shit.range('E' + str(reference + iter1 + 2)).value = x\n else:\n continue\n else:\n continue\n else:\n continue\n\n\nnew_sum = 0\nexisting_sum = 0\nnew_sum = list_sum(count_inp)\nreference = 0\n\nif len(date):\n if time in date:\n for iter in range(len(date)):\n if date[iter] == time:\n project1 = list_cut(project,iter)\n section1 = list_cut(section,iter)\n type1 = list_cut(type,iter)\n count1 = list_cut(count,iter)\n existing_sum = list_sum(count1)\n reference = iter\n same_check()\n del_check()\n #print(proj_inp,sec_inp,type_inp,count_inp)\n for iter in range(len(proj_inp)):\n shit.range('B' + str(len(project1) + reference + 2)).value = proj_inp[iter]\n shit.range('C' + str(len(section1) + reference + 2)).value = sec_inp[iter]\n shit.range('D' + str(len(type1) + reference + 2)).value = type_inp[iter]\n shit.range('E' + str(len(count1) + reference + 2)).value = count_inp[iter]\n else:\n #print(\"But we are here\")\n printing()\n sum1 = list_sum(count_inp)\n print(sum1)\n shit.range('F' + str(len(type) + 2)).value = sum1\n\nelse:\n #print(\"But we are here\")\n printing()\n sum2 = list_sum(count_inp)\n print(sum2)\n shit.range('F' + str(len(type) + 2)).value = sum2\n\n\"\"\"\nelse:\n printing()\n sum2 = list_sum(count_inp)\n print(sum2)\n shit.range('F' + str(2)).value = sum2\n\"\"\"\n\n\"\"\"\nnumber_of_programs = int(input(\"Enter the number of programs\"))\nfor x in range(number_of_programs):\n different.append(input('Which program did you work on Today'))\n number.append(int(input(\"How many requirements did you work on?\")))\n type1.append(str(input(\"What type was it ? Press A for analyzed, Press D for drafted , Press R for rework\")))\n\nprint(number_of_programs)\nprint(different)\nprint(number)\nprint(type1)\n\n\n\nprint(type(time))\nprint(type(date))\nfor i in range(len(date)):\n print(date[i])\n print(type(date[i]))\n if date[i] == time:\n print(date[i])\n print(\"Here\")\n print(time)\n\n\nif len(date):\n for iter in range(len(date)):\n if date[iter] == time:\n break\n else:\n shit.range('C' + str(len(date) + 3)).value = time\nelse:\n shit.range('C' + str(len(date) + 3)).value = time\n\n#Program\nprogram = main_frame['Program']\nif len(program):\n for iter2 in range(len(program)):\n shit.range('D' + str(len(program) + 3)).options(transpose=True).value = different\nelse:\n shit.range('D' + str(len(program) + 3)).options(transpose=True).value = different\n\n#No.of requirements\nrequirements = main_frame['No.of requirements']\nif len(requirements):\n for iter3 in range(len(requirements)):\n shit.range('E' + str(len(requirements) + 3)).options(transpose=True).value = number\nelse:\n shit.range('E' + str(len(requirements) + 3)).options(transpose=True).value = number\n\n#Type_of_requirement\ntypes = main_frame['Type']\nif len(types):\n for iter4 in range(len(types)):\n for iter5 in range(len(type1)):\n if type1[iter5] =='A':\n shit.range('F' + str(len(types) + 3)).value = 'Analyzed'\n elif type1[iter5] =='D':\n shit.range('F' + str(len(types) + 3)).value = 'Drafted'\n elif type1[iter5] =='R':\n shit.range('F' + str(len(types) + 3)).value = \"Reworked\"\nelse:\n for iter4 in range(len(types)):\n for iter5 in range(len(type1)):\n if type1[iter5] == \"A\":\n shit.range('F' + str(len(types) + 3)).value = 'Analyzed'\n elif type1[iter5] == \"D\":\n shit.range('F' + str(len(types) + 3)).value = 'Drafted'\n elif type1[iter5] == \"R\":\n shit.range('F' + str(len(types) + 3)).value = \"Reworked\"\n\n#Total\ny = 0\nfor iter6 in range(len(number)):\n y = y + number[iter6]\nprint(y)\ntotal = main_frame['Total']\nif len(total):\n for iter7 in range(len(total)):\n shit.range('G' + str(len(total) + 3)).value = y\nelse:\n shit.range('G' + str(len(total) + 3)).value = y\n\nbook.save()\n\n\"\"\"\n\"\"\"\ndef same_check(list1,list2,list3,list4,number):\n for iter in range(len(proj_inp)):\n for iter1 in range(len(list1)):\n if proj_inp[iter] in list1:\n print(\"The project exists\")\n if proj_inp[iter] == list1[iter1]:\n if sec_inp[iter] == list2[iter1]:\n if type_inp[iter] == list3[iter1]:\n print(\"Increase the count\")\n x = 0\n x = list4[iter1] + count_inp[iter]\n print(x)\n shit.range('E' + str(number + iter1 + 2)).value = x\n else:\n print(\"The project does not exist\")\n shit.range('B' + str(number + iter + 2)).value = proj_inp[iter]\n shit.range('C' + str(number + iter + 2)).value = sec_inp[iter]\n shit.range('D' + str(number + iter + 2)).value = type_inp[iter]\n shit.range('E' + str(number + iter + 2)).value = count_inp[iter]\n\"\"\"\n\nfinal_sum = existing_sum + new_sum\nshit.range('F' + str(reference + 2)).value = final_sum\nbook.save()\nbook.close()\n","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":8042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"130338276","text":"from django.conf.urls import include, url\n\nfrom songs import views\nfrom songs import redirects\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name=\"songbook\"),\n url(r'^(?P[-\\w]+)/$', views.EntityView.as_view(),\n name=\"songbook_entity\"),\n # Obsolete url redirects.\n url(r'^(?P[-\\w]+)/(?P[-\\w]+)/', include([\n url(r'^$', redirects.SongRedirectView.as_view()),\n url(r'^drukuj/$', redirects.SongRedirectView.as_view()),\n ])),\n]\n","sub_path":"songs/urls_entity.py","file_name":"urls_entity.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"151220428","text":"\n'''\nThis python script is used to write log entries to a log file.\n'''\n\nimport re\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\nimport os\n\ndef setup_logger():\n log_format = \"%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s\"\n log_level = logging.INFO \n handler = TimedRotatingFileHandler(os.getcwd()+'\\log\\log.txt', when=\"midnight\", interval=1)\n handler.setLevel(log_level)\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n\n # add a suffix which you want\n handler.suffix = \"%Y%m%d\"\n\n # need to change the extMatch variable to match the suffix for it\n handler.extMatch = re.compile(r\"^\\d{8}$\")\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n return logger\n","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"538856414","text":"#2014103332 김민 기 \nimport numpy as np\nimport matplotlib.pyplot as plt\nhbar=1\nm=1\nN = 4097\na = 200.0\nb = 2.\nx = np.linspace(-a/2.,a/2.,N)\nh = x[1]-x[0] # 간 격\nV=np.zeros(N) #potential\nV0=20\nfor i in range(N):\n if x[i]< -b/2. or x[i]> b/2.:\n V[i]= V0\n\nMdd = 1./(h*h)*(np.diag(np.ones(N-1),-1) -2* np.diag(np.ones(N),0) + np.diag(np.ones(N-1),1))\n# Banded matrix : laplacian( by jacobi method) therefore the hamiltonian is,\nH = -(hbar*hbar)/(2.0*m)*Mdd + np.diag(V) #eigenvalue eq. with banded mat.\nE,psiT = np.linalg.eigh(H) #for energy eigenvalue and eigenvector\npsi = np.transpose(psiT)\n# take the transpose of psiT to the wavefunction vectors can accessed as psi[n]\nplt.figure(figsize=(10,7))\nplt.xlim((-3*b,3*b))\nplt.plot(x,V,color=\"Gray\",label=\"V(x) \")\nfor i in range(2):\n if E[i]8.3f}\".format(i,E[i]))\n else:\n plt.plot(x,psi[i]/np.sqrt(h),label=\"$E_{}$={:>8.3f}\".format(i,E[i]))\nplt.legend()\nplt.show()\n","sub_path":"computational_physics/Finals/2014103332 김민 기기말고.py","file_name":"2014103332 김민 기기말고.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"409712234","text":"\"\"\"Integration test to check mce performance\"\"\"\nimport time\nimport sys\nimport os.path as op\n\nimport os\nos.environ['PYQTGRAPH_QT_LIB'] = 'PyQt5'\nfrom pyqtgraph import QtCore, QtGui\nimport pyqtgraph\n\npyqtgraph.setConfigOption('useOpenGL', True)\n\nfrom cognigraph.helpers.brainvision import read_fif_data\nfrom cognigraph.pipeline import Pipeline\nfrom cognigraph.nodes import sources, processors, outputs\nfrom cognigraph import TIME_AXIS\nfrom cognigraph.gui.window import GUIWindow\nimport logging\n\napp = QtGui.QApplication(sys.argv)\n\npipeline = Pipeline()\n\ncur_dir = '/home/dmalt/Code/python/cogni_submodules'\ntest_data_path = cur_dir + '/tests/data/'\nprint(test_data_path)\nsim_data_fname = 'raw_sim.fif'\n# sim_data_fname = 'Koleno.fif'\nfwd_fname = 'dmalt_custom_mr-fwd.fif'\n\nsurf_dir = '/home/dmalt/mne_data/MNE-sample-data/subjects/sample/surf'\n\nfwd_path = op.join(test_data_path, fwd_fname)\nsim_data_path = op.join(test_data_path, sim_data_fname)\n\nsource = sources.FileSource(file_path=sim_data_path)\nsource.MAX_SAMPLES_IN_CHUNK = 10000\nsource.loop_the_file = True\npipeline.source = source\n\n# Processors\npreprocessing = processors.Preprocessing(collect_for_x_seconds=30)\npipeline.add_processor(preprocessing)\n\nlinear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0)\npipeline.add_processor(linear_filter)\n\nbeamformer = processors.Beamformer(forward_model_path=fwd_path,\n is_adaptive=True, output_type='activation',\n forgetting_factor_per_second=0.95)\npipeline.add_processor(beamformer)\n\n\nenvelope_extractor = processors.EnvelopeExtractor(0.995)\npipeline.add_processor(envelope_extractor)\n\n# Outputs\nglobal_mode = outputs.ThreeDeeBrain.LIMITS_MODES.GLOBAL\nthree_dee_brain = outputs.ThreeDeeBrain(\n limits_mode=global_mode, buffer_length=10, surfaces_dir=surf_dir)\npipeline.add_output(three_dee_brain)\n# pipeline.add_output(outputs.LSLStreamOutput())\n# pipeline.initialize_all_nodes()\nfile_output = outputs.FileOutput()\ntorch_output = outputs.TorchOutput()\n\nsignal_viewer = outputs.SignalViewer()\npipeline.add_output(signal_viewer, input_node=linear_filter)\npipeline.add_output(file_output, input_node=beamformer)\npipeline.add_output(torch_output, input_node=source)\n\nwindow = GUIWindow(pipeline=pipeline)\nwindow.init_ui()\nwindow.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)\nwindow.show()\n\n\nbase_controls = window._controls._base_controls\nsource_controls = base_controls.source_controls\nprocessors_controls = base_controls.processors_controls\noutputs_controls = base_controls.outputs_controls\n\nsource_controls.source_type_combo.setValue(\n source_controls.SOURCE_TYPE_PLACEHOLDER)\n\n\nlinear_filter_controls = processors_controls.children()[0]\n\nenvelope_controls = processors_controls.children()[2]\n# envelope_controls.disabled.setValue(True)\n\n\nthree_dee_brain_controls = outputs_controls.children()[0]\nthree_dee_brain_controls.limits_mode_combo.setValue('Global')\nthree_dee_brain_controls.threshold_slider.setValue(45)\n# three_dee_brain_controls.limits_mode_combo.setValue('Local')\n\nwindow.initialize()\n\n# start_s, stop_s = 80, 100\n# with source.not_triggering_reset():\n# source.data, _ = read_fif_data(sim_data_path, time_axis=TIME_AXIS, start_s=start_s, stop_s=stop_s)\n\ndef run():\n logging.debug('Start iteration')\n pipeline.update_all_nodes()\n logging.debug('End iteration')\n # pass\n # print(pipeline.source._samples_already_read / 500)\n\nclass AsyncUpdater(QtCore.QRunnable):\n _stop_flag = False\n \n def __init__(self):\n super(AsyncUpdater, self).__init__()\n self.setAutoDelete(False)\n\n def run(self):\n self._stop_flag = False\n \n while self._stop_flag == False:\n start = time.time()\n pipeline.update_all_nodes()\n end = time.time()\n \n # Force sleep to update at 10Hz\n if end - start < 0.1:\n time.sleep(0.1 - (end - start))\n \n def stop(self):\n self._stop_flag = True\n\npool = QtCore.QThreadPool.globalInstance()\nupdater = AsyncUpdater()\nis_paused = True\n\ndef toggle_updater():\n global pool\n global updater\n global is_paused\n \n if is_paused == True:\n is_paused = False\n pool.start(updater)\n else:\n is_paused = True\n updater.stop()\n pool.waitForDone()\n \nwindow.run_button.clicked.connect(toggle_updater)\n\n# Убираем предупреждения numpy, иначе в iPython некрасиво как-то Ж)\nimport numpy as np\nnp.warnings.filterwarnings('ignore')\n\n# Show window and exit on close\nwindow.show()\nupdater.stop()\npool.waitForDone()\nsys.exit(app.exec_())\n\n# timer = QtCore.QTimer()\n# timer.timeout.connect(run)\n# frequency = pipeline.frequency\n# output_frequency = 1000\n# # timer.setInterval(1000. / frequency * 10)\n# # timer.setInterval(1000. / output_frequency)\n# timer.setInterval(0)\n\n# source.loop_the_file = True\n# # source.MAX_SAMPLES_IN_CHUNK = int(frequency / output_frequency)\n# source.MAX_SAMPLES_IN_CHUNK = 10000\n# # source.MAX_SAMPLES_IN_CHUNK = 5\n# # envelope.disabled = True\n\n\n# if __name__ == '__main__':\n# import sys\n\n# timer.start()\n # while True:\n # pipeline.update_all_nodes()\n # timer.start()\n # timer.stop()\n\n # TODO: this runs when in iPython. It should not.\n # Start Qt event loop unless running in interactive mode or using pyside.\n # if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n # sys.exit(QtGui.QApplication.instance().exec_())\n","sub_path":"tests/systemtest_beam.py","file_name":"systemtest_beam.py","file_ext":"py","file_size_in_byte":5561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"500630038","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport threading\nfrom time import gmtime, strftime\n\n# pylint: disable=no-name-in-module,import-error\nfrom base import \\\n Application, \\\n Plugin, \\\n Settings, \\\n configuration, \\\n ConfigurationManager, \\\n ConfigurationNumber, \\\n ConfigurationString, \\\n implements, \\\n ISignalObserver, \\\n slot\nimport paho.mqtt.client as mqtt\nimport logging\nfrom board import Board\nfrom telldus import DeviceManager, Device, Thermostat\nimport netifaces\n\n#try:\n#\tfrom zwave.telldus import TelldusZWave\n#\tfrom zwave.base import CommandClass, ZWSendDataMsg\n#except ImportError:\n#\tTelldusZWave = None\n# pylint: enable=no-name-in-module,import-error\n\n__name__ = 'HASSMQTT' # pylint: disable=W0622\n\nScaleConverter = {\n Device.WATT: {\n 1: 'kVAh', #Device.SCALE_POWER_KVAH\n Device.SCALE_POWER_KWH: 'kWh',\n Device.SCALE_POWER_WATT: 'W',\n 4: 'V', #Device.SCALE_POWER_VOLT\n 5: 'A', #Device.SCALE_POWER_AMPERE\n 6: 'PF' #Device.SCALE_POWER_POWERFACTOR\n },\n Device.TEMPERATURE: {\n Device.SCALE_TEMPERATURE_CELCIUS: u'°C',\n Device.SCALE_TEMPERATURE_FAHRENHEIT: u'°F'\n },\n Device.HUMIDITY: {\n Device.SCALE_HUMIDITY_PERCENT: '%'\n },\n Device.RAINRATE: {\n Device.SCALE_RAINRATE_MMH: 'mm/h'\n },\n Device.RAINTOTAL: {\n Device.SCALE_RAINTOTAL_MM: 'mm'\n },\n Device.WINDDIRECTION: {\n 0: ''\n },\n Device.WINDAVERAGE: {\n Device.SCALE_WIND_VELOCITY_MS: 'm/s'\n },\n Device.WINDGUST: {\n Device.SCALE_WIND_VELOCITY_MS: 'm/s'\n },\n Device.LUMINANCE: {\n Device.SCALE_LUMINANCE_PERCENT: '%',\n Device.SCALE_LUMINANCE_LUX: 'lux'\n },\n Device.BAROMETRIC_PRESSURE: {\n Device.SCALE_BAROMETRIC_PRESSURE_KPA: 'kPa'\n }\n}\n\nClassConverter = {\n Device.TEMPERATURE: 'temperature',\n Device.HUMIDITY: 'humidity',\n Device.BAROMETRIC_PRESSURE: 'pressure',\n Device.LUMINANCE: 'illuminance'\n}\n\ndef getMacAddr(compact = True):\n addrs = netifaces.ifaddresses(Board.networkInterface())\n try:\n mac = addrs[netifaces.AF_LINK][0]['addr']\n except (IndexError, KeyError):\n return ''\n return mac.upper().replace(':', '') if compact else mac.upper()\n\ndef slugify(value):\n allowed_chars = set('_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')\n return filter(lambda x: x in allowed_chars, value.replace(' ', '_').replace('-', '_'))\n\n@configuration(\n username = ConfigurationString(\n defaultValue='',\n title='MQTT Username',\n description='Username'\n ),\n password=ConfigurationString(\n defaultValue='',\n title='MQTT Password',\n description='Password'\n ),\n hostname=ConfigurationString(\n defaultValue='',\n title='MQTT Hostname',\n description='Hostname'\n ),\n port=ConfigurationNumber(\n defaultValue=1883,\n title='MQTT Port',\n description='Port'\n ),\n discovery_topic=ConfigurationString(\n defaultValue='homeassistant',\n title='Autodiscovery topic',\n description='Homeassistants autodiscovery topic'\n ),\n device_name=ConfigurationString(\n defaultValue='znet',\n title='Device name',\n description='Name of this device'\n ),\n base_topic=ConfigurationString(\n defaultValue='telldus',\n title='Base topic',\n description='Base topic for this device'\n ),\n state_retain=ConfigurationNumber(\n defaultValue=1,\n title='Retain state changes, 1 = yes, 0 = no',\n description='Post state changes with retain'\n ),\n devices_configured=ConfigurationString(\n defaultValue='',\n hidden=True,\n title='Internal, do not change',\n description='Internal, do not change. Used to store what devices has been published.'\n )\n)\nclass Client(Plugin):\n implements(ISignalObserver)\n\n def __init__(self):\n self._ready = False\n self._running = True\n self._knownDevices = None\n Application().registerShutdown(self.onShutdown)\n self.client = mqtt.Client(userdata = self)\n self.client.on_disconnect = self.onDisconnect\n self.client.on_connect = self.onConnect\n self.client.on_message = self.onMessage\n if self.config('hostname'):\n Application().queue(self.connect)\n\n def getSlugifiedConfig(self, name):\n return slugify(self.config(name))\n\n def getBaseTopic(self):\n return self.getSlugifiedConfig('base_topic')\n\n def getDeviceName(self):\n return self.getSlugifiedConfig('device_name')\n\n def onShutdown(self):\n self._running = False \n self.disconnect()\n\n def updateConfig(self):\n self.debug('Updating config.devices_configured to : %s' % self._knownDevices)\n try:\n self.setConfig('devices_configured', self._knownDevices)\n except Exception as e:\n self.debug('updateConfig error %s' % str(e))\n\n def getKnownDevices(self):\n if not self._knownDevices:\n if self.config('devices_configured'):\n self._knownDevices = [tuple(x) for x in json.loads(self.config('devices_configured'))]\n else:\n self._knownDevices = []\n return self._knownDevices\n \n def setKnownDevices(self, devices):\n self._knownDevices = devices\n self.updateConfig()\n\n def isKnownDevice(self, type, devId, deviceId):\n devices = self.getKnownDevices()\n return (type, str(devId), str(deviceId)) in devices\n\n def addKnownDevice(self, type, devId, deviceId):\n devices = self.getKnownDevices()\n devices.append((type, str(devId), str(deviceId)))\n self.setKnownDevices(devices)\n \n def delKnownDevice(self, type, devId, deviceId):\n devices = self.getKnownDevices()\n devices.remove((type, devId, deviceId))\n self.setKnownDevices(devices)\n\n def configWasUpdated(self, key, value):\n self.debug('config updated %s %s' % (key, value))\n if key == 'state_retain' and value == '0':\n # clear retained messages\n try:\n for type, _, fullId in self.getKnownDevices():\n deviceTopic = self.getDeviceTopic(type, fullId)\n self.debug('clear retain for %s/state' % deviceTopic)\n self.client.publish('%s/state' % deviceTopic, None, retain = True)\n except Exception as e:\n self.debug('clear retain error %s' % str(e))\n if not key in ['devices_configured', 'state_retain']:\n self.disconnect()\n Application().queue(self.connect)\n\n def tearDown(self):\n try:\n for type, _, fullId in self.getKnownDevices():\n deviceTopic = self.getDeviceTopic(type, fullId)\n self.client.publish('%s/config' % deviceTopic, '', retain = True)\n self.client.publish('%s/state' % deviceTopic, '', retain = True)\n except Exception as e:\n self.debug('tearDown %s' % str(e))\n self._knownDevices = []\n self.updateConfig()\n self.disconnect()\n\n def disconnect(self):\n #self.client.disconnect()\n self.client.loop_stop()\n self._running = False\n self._ready = False\n\n def connect(self):\n username = self.config('username')\n password = self.config('password')\n base_topic = self.getBaseTopic()\n device_name = self.getDeviceName()\n hostname = self.config('hostname')\n port = self.config('port')\n\n if username != '':\n self.client.username_pw_set(username, password)\n self.client.will_set(\n '%s/%s/available' % (base_topic, device_name) if base_topic \\\n else '%s/available' % device_name, \n 'offline', \n 0, \n True\n )\n self.client.connect_async(hostname, port, keepalive=10)\n self.client.loop_start()\n\n def debug(self, msg):\n logging.info('HASS DBG: %s', msg)\n base_topic = self.getBaseTopic()\n device_name = self.getDeviceName()\n debugTopic = (\n '%s/%s/debug' % (base_topic, device_name) if base_topic \\\n else '%s/debug' % device_name\n )\n time = strftime('%H:%M:%S', gmtime())\n self.client.publish(debugTopic, '%s - %s' % (time, msg))\n\n def getDeviceType(self, device):\n capabilities = device.methods()\n devicetype = device.allParameters().get('devicetype')\n if devicetype == Device.TYPE_THERMOSTAT:\n return 'climate'\n elif devicetype == Device.TYPE_REMOTE_CONTROL:\n return 'remote'\n elif capabilities & Device.DIM:\n return 'light'\n elif capabilities & Device.TURNON:\n return 'switch'\n elif capabilities & Device.UP:\n return 'cover'\n elif capabilities & Device.BELL:\n return 'switch'\n else:\n return 'binary_sensor'\n\n def getDeviceTopic(self, type, id):\n discoverTopic = self.getSlugifiedConfig('discovery_topic')\n telldusName = self.getDeviceName() or 'telldus'\n if type in ['remote']:\n type = 'binary_sensor'\n return '%s/%s/%s/%s' % (discoverTopic, type, telldusName, id)\n\n def getSensorId(self, deviceId, valueType, scale):\n return slugify('%s_%s_%s' % (deviceId, valueType, scale))\n\n def getBatteryId(self, device):\n return slugify('%s_%s_battery' % (getMacAddr(), device.id()))\n\n def formatBattery(self, battery):\n return {\n Device.BATTERY_LOW: 1,\n Device.BATTERY_UNKNOWN: None,\n Device.BATTERY_OK: 100\n }.get(battery, int(battery))\n\n def formatScale(self, type, scale):\n return ScaleConverter.get(type, {}).get(scale, '')\n\n def getClimateModes(self, device):\n params = device.allParameters() if hasattr(device, 'allParameters') else device.parameters()\n modes = params.get('thermostat', {}).get('modes', ['auto'])\n return modes\n\n def getClimateMode(self, device):\n state, stateValue = device.state()\n thermoValues = device.stateValue(Device.THERMOSTAT)\n availModes = self.getClimateModes(device)\n return thermoValues.get('mode') or availModes[0]\n\n def getClimateSetPoint(self, device, mode = None):\n thermoValues = device.stateValue(Device.THERMOSTAT)\n setpoint = thermoValues.get('setpoint')\n if isinstance(setpoint, dict) and mode:\n setpoint = setpoint.get(mode)\n return setpoint\n\n def climateState(self, device):\n try:\n if self.getDeviceType(device) != \"climate\":\n return\n\n thermoValues = device.stateValue(Device.THERMOSTAT)\n sensorValues = device.sensorValues()\n tempValues = sensorValues[Device.TEMPERATURE]\n mode = self.getClimateMode(device)\n setpoint = self.getClimateSetPoint(device, mode)\n\n self.debug(\n 'climateState %s, sensorValues: %s, thermovalues: %s, setPoint: %s, mode: %s' % \\\n (device.id(), sensorValues, thermoValues, setpoint, mode)\n )\n\n payload = {\n 'setpoint': setpoint,\n 'mode': { Thermostat.MODE_FAN: 'fan_only' }.get(mode, mode),\n }\n\n if device.isSensor() and sensorValues[Device.TEMPERATURE]:\n value = tempValues[0] if isinstance(tempValues, list) else tempValues\n payload.update({\n 'temperature': value.get('value'),\n })\n\n stateTopic = '%s/state' % self.getDeviceTopic(\"climate\", device.id())\n self.client.publish(stateTopic, json.dumps(payload), retain = True)\n except Exception as e:\n self.debug('climateState exception %s' % str(e))\n\n def deviceState(self, device):\n try:\n state, stateValue = device.state()\n\n deviceType = self.getDeviceType(device)\n if not deviceType:\n return\n\n self.debug('deviceState %s, state: %s, value: %s' % (device.id(), state, stateValue))\n\n stateTopic = '%s/state' % self.getDeviceTopic(deviceType, device.id())\n payload = ''\n\n retain = True\n if deviceType in ['light']:\n if state == Device.DIM:\n payload = json.dumps({\n 'state': 'ON' if stateValue and int(stateValue) > 0 else 'OFF',\n 'brightness': int(stateValue) if stateValue else 0\n })\n else:\n payload = json.dumps({\n 'state': 'ON' if state == Device.TURNON else 'OFF',\n 'brightness': (int(stateValue) if stateValue else 255) if state == Device.TURNON else 0\n })\n elif deviceType in ['remote']:\n payload = 'ON' if state in [Device.TURNON] else 'OFF'\n retain = False\n elif deviceType in ['switch']:\n payload = 'ON' if state in [Device.TURNON, Device.BELL] else 'OFF' \n elif deviceType in ['binary_sensor']:\n payload = 'ON' if state in [Device.TURNON] else 'OFF' \n elif deviceType in ['cover']:\n payload = 'OPEN' if state == Device.UP else 'CLOSED' if state == Device.DOWN else 'STOP'\n\n use_retain = retain and (self.config('state_retain') == 1)\n self.client.publish(stateTopic, payload, retain = use_retain)\n if state == Device.BELL:\n self.client.publish(stateTopic, 'OFF', retain = use_retain)\n except Exception as e:\n self.debug('deviceState exception %s' % str(e))\n\n def sensorState(self, device, valueType, scale):\n try:\n sensorId = self.getSensorId(device.id(), valueType, scale)\n for sensor in device.sensorValues()[valueType]:\n if sensor['scale'] == scale:\n self.debug('sensorState %s' % sensor)\n payload = { \n 'value': sensor['value'],\n 'lastUpdated': sensor.get('lastUpdated')\n }\n self.client.publish(\n '%s/state' % self.getDeviceTopic('sensor', sensorId),\n json.dumps(payload),\n retain = True\n )\n except Exception as e:\n self.debug('sensorState exception %s' % str(e))\n\n def batteryState(self, device):\n try:\n self.client.publish(\n '%s/state' % self.getDeviceTopic('sensor', self.getBatteryId(device)),\n self.formatBattery(device.battery()),\n retain = True\n )\n except Exception as e:\n self.debug('batteryState exception %s' % str(e))\n\n def publish_discovery(self, device, type, deviceId, config):\n base_topic = self.getBaseTopic()\n device_name = self.getDeviceName()\n config.update({\n 'unique_id': '%s_%s' % (getMacAddr(), deviceId),\n 'availability_topic': (\n '%s/%s/available' % (base_topic, device_name) if base_topic \\\n else '%s/available' % device_name\n ),\n 'device': {\n 'identifiers': device.getOrCreateUUID(),\n 'manufacturer': device.protocol().title(),\n 'model': device.model().title(), # Model is always 'n/a' but is supposed to be updated.\n 'name': device.name(),\n 'via_device': getMacAddr(),\n }\n })\n self.client.publish(\n '%s/config' % self.getDeviceTopic(type, deviceId), \n json.dumps(config),\n retain = True\n )\n return (type, str(device.id()), str(deviceId))\n\n def remove_discovery(self, type, devId, fullId):\n deviceTopic = self.getDeviceTopic(type, fullId)\n self.debug('remove discovered device %s,%s,%s : %s' % (type, devId, fullId, deviceTopic))\n self.client.publish('%s/config' % deviceTopic, '', retain = True)\n self.client.publish('%s/state' % deviceTopic, '', retain = True)\n\n def discoverClimate(self, device):\n deviceTopic = self.getDeviceTopic('climate', device.id())\n try:\n sensorValues = device.sensorValues()\n thermoValues = device.stateValue(Device.THERMOSTAT)\n availModes = self.getClimateModes(device)\n\n climateConfig = {\n 'name': device.name(),\n 'temperature_command_topic': '%s/set/setpoint' % deviceTopic,\n 'json_attributes_topic': '%s/attr' % deviceTopic,\n 'json_attributes_template': '{{ json_value }}',\n }\n\n if device.isSensor() and sensorValues[Device.TEMPERATURE]:\n climateConfig.update({\n 'current_temperature_topic': '%s/state' % deviceTopic,\n 'current_temperature_template': '{{ value_json.temperature }}',\n # Only after https://github.com/home-assistant/home-assistant/pull/30602\n #'unit_of_measurement': ScaleConverter.get(Device.TEMPERATURE).get()\n })\n\n if availModes:\n climateConfig.update({\n 'modes': availModes,\n 'mode_command_topic': '%s/set/mode' % deviceTopic,\n 'mode_state_topic': '%s/state' % deviceTopic,\n 'mode_state_template': '{{ value_json.mode }}',\n })\n\n if thermoValues.get('setpoint', None) is not None:\n climateConfig.update({\n 'temperature_state_topic': '%s/state' % deviceTopic,\n 'temperature_state_template': '{{ value_json.setpoint }}',\n })\n\n self.client.publish(\n '%s/attr' % deviceTopic,\n json.dumps({ 'modes': availModes }),\n retain = True\n )\n\n return self.publish_discovery(device, 'climate', device.id(), climateConfig)\n except Exception as e:\n self.debug('discoverThermostat %s' % str(e))\n\n def discoverBattery(self, device):\n deviceTopic = self.getDeviceTopic('sensor', self.getBatteryId(device))\n try:\n sensorConfig = {\n 'name': '%s - Battery' % device.name(),\n 'unit_of_measurement': '%',\n 'device_class': 'battery',\n 'state_topic': '%s/state' % deviceTopic\n }\n return self.publish_discovery(device, 'sensor', self.getBatteryId(device), sensorConfig)\n except Exception as e:\n self.debug('discoverBattery %s' % str(e))\n\n def discoverSensor(self, device, valueType, scale):\n sensorId = self.getSensorId(device.id(), valueType, scale)\n deviceTopic = self.getDeviceTopic(\"sensor\", sensorId)\n try:\n sensorConfig = {\n 'name': '%s %s - %s' % (\n device.name(), \n Device.sensorTypeIntToStr(valueType), \n self.formatScale(valueType, scale)\n ),\n 'state_topic': '%s/state' % deviceTopic,\n 'value_template': '{{ value_json.value }}',\n 'json_attributes_topic': '%s/state' % deviceTopic,\n 'unit_of_measurement': self.formatScale(valueType, scale),\n }\n if ClassConverter.get(valueType, None):\n sensorConfig.update({\n 'device_class': ClassConverter.get(valueType, None)\n })\n\n sensorId = self.getSensorId(device.id(), valueType, scale)\n return self.publish_discovery(device, 'sensor', sensorId, sensorConfig)\n except Exception as e:\n self.debug('discoverSensor %s' % str(e))\n\n def discoverDevice(self, device):\n try:\n deviceType = self.getDeviceType(device)\n if not deviceType:\n return None\t\n\n deviceTopic = self.getDeviceTopic(deviceType, device.id())\n deviceConfig = { \n 'name': device.name(),\n 'state_topic': '%s/state' % deviceTopic\n }\n\n if deviceType in ['remote']:\n deviceConfig.update({\n 'expire_after': 1\n })\n if deviceType in ['switch', 'light', 'cover']:\n deviceConfig.update({\n 'command_topic': '%s/set' % deviceTopic\n })\n if deviceType == 'light':\n deviceConfig.update({\n 'schema': 'json',\n 'brightness': True\n })\n if deviceType == 'switch' and (device.methods() & Device.BELL):\n deviceConfig.update({\n 'payload_on': 'BELL'\n })\n\n self.debug('device is device: %s' % json.dumps({\n 'deviceType': deviceType,\n 'deviceTopic': deviceTopic,\n 'deviceConfig': deviceConfig\n }))\n\n return self.publish_discovery(device, deviceType, device.id(), deviceConfig)\n except Exception as e:\n self.debug('discoverDevice %s' % str(e))\n\n def discovery(self, device):\n result = []\n try:\n if device.battery() and device.battery() != Device.BATTERY_UNKNOWN:\n self.debug('device %s has battery' % device.id())\n result.append(self.discoverBattery(device))\n self.batteryState(device)\n\n if device.deviceType() == Device.TYPE_THERMOSTAT:\n self.debug('device %s is climate' % device.id())\n result.append(self.discoverClimate(device))\n self.climateState(device)\n else:\n if device.isSensor():\n self.debug('device %s has sensors' % device.id())\n for type, sensors in device.sensorValues().items():\n self.debug('sensortype %s has %s' % (type, sensors))\n for sensor in sensors:\n result.append(self.discoverSensor(device, type, sensor['scale']))\n self.sensorState(device, type, sensor['scale'])\n\n if device.isDevice():\n self.debug('device %s is a device' % device.id())\n item = self.discoverDevice(device)\n result.append(item)\n if item[0] != \"remote\":\n self.deviceState(device)\n except Exception as e:\n self.debug('discovery %s' % str(e))\n return [x for x in result if x]\n\n def publish_hub_device(self):\n base_topic = self.getBaseTopic()\n device_name = self.getDeviceName()\n deviceId = 'hub'\n config = {\n 'name': device_name,\n 'state_topic': (\n '%s/%s/available' % (base_topic, device_name) if base_topic \\\n else '%s/available' % device_name\n ),\n 'payload_on': 'online',\n 'payload_off': 'offline',\n 'device_class': 'connectivity',\n 'unique_id': '%s_%s' % (getMacAddr(), deviceId),\n 'availability_topic': (\n '%s/%s/available' % (base_topic, device_name) if base_topic \\\n else '%s/available' % device_name\n ),\n 'device': {\n 'identifiers': getMacAddr(),\n 'connections': [['mac', getMacAddr(False)]],\n 'manufacturer': 'Telldus Technologies',\n 'model': Board.product().replace('-', ' ').title().replace(' ', '_'),\n 'name': device_name,\n 'sw_version': Board.firmwareVersion()\n }\n }\n self.client.publish(\n '%s/config' % self.getDeviceTopic('binary_sensor', deviceId),\n json.dumps(config),\n retain = True\n )\n return (deviceId, deviceId, deviceId)\n\n\n def run_discovery(self):\n self.debug('discover devices')\n try:\n # publish devices\n publishedDevices = [self.publish_hub_device()]\n deviceManager = DeviceManager(self.context)\n devices = deviceManager.retrieveDevices()\n for device in devices:\n try:\n self.debug(json.dumps({\n 'deviceId': device.id(),\n 'type': self.getDeviceType(device),\n 'name': device.name(),\n 'isDevice': device.isDevice(),\n 'isSensor': device.isSensor(),\n 'methods': device.methods(),\n 'battery': device.battery(),\n 'parameters': device.allParameters() if hasattr(device, 'allParameters') else device.parameters(),\n 'typeStr': device.typeString(),\n 'sensors': device.sensorValues(),\n 'state': device.state()\n }))\n publishedDevices.extend(self.discovery(device))\n except Exception as e:\n self.debug('run_discovery device exception %s' % str(e))\n\n for type, devId, fullId in list(set(self.getKnownDevices()) - set(publishedDevices)):\n self.remove_discovery(type, devId, fullId)\n\n self.setKnownDevices(publishedDevices)\n except Exception as e:\n self.debug('run_discovery exception %s' % str(e))\n\n def onConnect(self, client, userdata, flags, result):\n base_topic = userdata.config('base_topic')\n device_name = userdata.config('device_name')\n client.publish(\n '%s/%s/available' % (base_topic, device_name) if base_topic \\\n else '%s/available' % device_name, \n 'online', \n 0, \n True\n )\n try:\n userdata.run_discovery()\n #subscribe to commands\n userdata.debug('subscribing')\n client.subscribe('%s/+/%s/+/set' % (userdata.config('discovery_topic'), device_name))\n client.subscribe('%s/+/%s/+/set/+' % (userdata.config('discovery_topic'), device_name))\n userdata._ready = True\n except Exception as e:\n userdata.debug('OnConnect error %s' % str(e))\n\n def onDisconnect(self, client, userdata, rc):\n self.debug(\"Mqtt disconnected\")\n userdata._ready = False\n\n @slot('deviceAdded')\n def onDeviceAdded(self, device):\n if not self._running:\n return\n try:\n self.debug('Device added %s' % device.id())\n devices = self.getKnownDevices()\n devices.extend(self.discovery(device))\n self.setKnownDevices(devices)\n except Exception as e:\n self.debug('onDeviceAdded error %s' % str(e))\n\n @slot('deviceRemoved')\n def onDeviceRemoved(self, deviceId):\n if not self._running:\n return\n try:\n self.debug('Device removed %s' % deviceId)\n devices = self.getKnownDevices()\n for type, devId, fullId in devices:\n if devId == str(deviceId):\n self.remove_discovery(type, devId, fullId)\n devices = [x for x in devices if x[1] != str(deviceId)]\n self.setKnownDevices(devices)\n except Exception as e:\n self.debug('onDeviceRemoved error %s' % str(e))\n\n @slot('deviceUpdated')\n def onDeviceUpdated(self, device):\n if not self._running:\n return\n try:\n self.debug('Device updated %s' % device.id())\n devices = self.getKnownDevices()\n for type, devId, fullId in devices:\n if devId == str(device.id()):\n self.remove_discovery(type, devId, fullId)\n devices = [x for x in devices if x[1] != str(device.id())]\n devices.extend(self.discovery(device))\n self.setKnownDevices(devices)\n except Exception as e:\n self.debug('onDeviceUpdated error %s' % str(e))\n\n @slot('rf433RawData')\n def onRawData(self, data,*__args, **__kwargs):\n if not self._running:\n return\n self.debug(json.dumps(data))\n\n @slot('sensorValueUpdated')\n def onSensorValueUpdated(self, device, valueType, value, scale):\n if not self._ready or not self._running:\n return\n self.debug(json.dumps({\n 'type': 'sensorValueUpdated',\n 'deviceId': device.id(),\n 'valueType': valueType,\n 'value': value,\n 'scale': scale,\n 'battery': device.battery()\n }))\n if self.getDeviceType(device) == 'climate':\n if not self.isKnownDevice('climate', device.id(), device.id()):\n self.debug('A wild climate device appeared! deviceId: %s' % device.id())\n type, devId, deviceId = self.discoverClimate(device)\n self.addKnownDevice(type, devId, deviceId)\n self.climateState(device)\n else:\n sensorId = self.getSensorId(device.id(), valueType, scale)\n if not self.isKnownDevice('sensor', device.id(), sensorId):\n self.debug('A wild sensor appeared! deviceId: %s, sensorId: %s' % (device.id(), sensorId))\n type, devId, deviceId = self.discoverSensor(device, valueType, scale)\n self.addKnownDevice(type, devId, deviceId)\n self.sensorState(device, valueType, scale)\n if device.battery() and device.battery() != Device.BATTERY_UNKNOWN:\n self.batteryState(device)\n\n @slot('deviceStateChanged')\n def onDeviceStateChanged(self, device, state, stateValue, origin=None):\n if not self._ready or not self._running:\n return\n deviceType = self.getDeviceType(device)\n self.debug(json.dumps({\n 'type': 'deviceStateChanged',\n 'deviceId': device.id(),\n 'state': state,\n 'stateValue': stateValue,\n 'origin': origin,\n 'devicetype': deviceType\n }))\n if not deviceType:\n return\n if self.getDeviceType(device) == 'climate':\n if not self.isKnownDevice('climate', device.id(), device.id()):\n self.debug('A wild climate device appeared! deviceId: %s' % device.id())\n type, devId, deviceId = self.discoverClimate(device)\n self.addKnownDevice(type, devId, deviceId)\n self.climateState(device)\n else:\n if not self.isKnownDevice(deviceType, device.id(), device.id()):\n self.debug('A wild device appeared! type: %s, deviceId: %s' % (deviceType, device.id()))\n type, devId, deviceId = self.discoverDevice(device)\n self.addKnownDevice(type, devId, deviceId)\n self.deviceState(device)\n if device.battery():\n self.batteryState(device)\n\n def onMessage(self, client, userdata, msg):\n try:\n topic = msg.topic\n payload = msg.payload\n\n if topic.split('/')[-1] == 'set':\n topicType = 'set'\n else:\n topicType = topic.split('/')[-1]\n deviceManager = DeviceManager(userdata.context)\n \n device_id = int(msg.topic.split('/')[3])\n device = deviceManager.device(device_id)\n deviceType = userdata.getDeviceType(device)\n if not deviceType:\n return\n\n userdata.debug(json.dumps({\n 'type': 'command',\n 'device_id': device_id,\n 'device_type': deviceType,\n 'command': payload\n }))\n\n def failed(reason, **__kwargs):\n self.debug('Device command failed: %s' % reason)\n\n if deviceType == 'climate':\n if topicType == 'mode':\n mode = { 'fan_only': Thermostat.MODE_FAN }.get(payload, payload)\n setpoint = self.getClimateSetPoint(device, mode)\n if setpoint is not None:\n value = { \n 'mode': mode, \n 'changeMode': True, \n 'temperature': self.getClimateSetPoint(device, mode) \n }\n self.debug('Command THERMOSTAT value: %s' % value)\n device.command(\n Device.THERMOSTAT, \n value = value,\n origin = 'mqtt_hass',\n failure = failed\n )\n else:\n self.debug('Can not set mode, setpoint none')\n if topicType == 'setpoint':\n setpoint = float(payload) if payload else None\n if setpoint is not None:\n value = { \n 'mode': self.getClimateMode(device), \n 'changeMode': False, \n 'temperature': setpoint\n }\n self.debug('Command THERMOSTAT value: %s' % value)\n device.command(\n Device.THERMOSTAT,\n value = value,\n origin = 'mqtt_hass',\n failure = failed\n )\n else:\n self.debug('Can not update setpoint, setpoint none (%s)' % payload)\n\n elif deviceType == 'light':\n payload = json.loads(payload)\n if 'brightness' in payload:\n if int(payload['brightness']) == 0:\n device.command(\n Device.TURNOFF, \n origin = 'mqtt_hass',\n failure = failed\n )\n else:\n device.command(\n Device.DIM, \n value = int(payload['brightness']), \n origin = 'mqtt_hass',\n failure = failed\n )\n else:\n device.command(\n Device.TURNON if payload['state'].upper() == 'ON' \\\n else Device.TURNOFF, \n value = 255, \n origin = 'mqtt_hass',\n failure = failed\n )\n\n elif deviceType == 'switch':\n device.command(\n Device.TURNON if payload.upper() == 'ON' \\\n else Device.BELL if payload.upper() == 'BELL' \\\n else Device.TURNOFF, \n origin = 'mqtt_hass',\n failure = failed\n )\n\n elif deviceType == 'cover':\n device.command(\n Device.UP if payload.upper() == 'OPEN' \\\n else Device.DOWN if payload.upper() == 'CLOSE' else \\\n Device.STOP, \n origin = 'mqtt_hass',\n failure = failed\n )\n except Exception as e:\n userdata.debug('onMessage exception %s' % str(e))\n\n \n","sub_path":"hass_client/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":30786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"326833320","text":"#! /usr/bin/env python\n\nfrom matplotlib.backends import qt4_compat\nuse_pyside = qt4_compat.QT_API == qt4_compat.QT_API_PYSIDE\nif use_pyside:\n from PySide.QtGui import *\nelse:\n from PyQt4.QtGui import *\nfrom ui_mainwindow import Ui_MainWindow\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n\n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n\n mainWindow = MainWindow()\n mainWindow.show()\n\n app.exec_()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"180876365","text":"def get_neighbours(individual, gene_letters):\n \"\"\"\n Creates a list of neighbours created by changing a single bit of the individual\n :param individual: a binary string\n :param gene_letters: the valid values for each individual's cell\n :return: a list of binary strings representing the individual's neighbours\n \"\"\"\n neighbours = []\n for i in range(len(individual)):\n for g in gene_letters:\n if g != individual[i]:\n neighbours.append(individual[:i] + g + individual[i + 1:])\n return neighbours\n\n\ndef find_best_neighbour(individual, gene_letters, fitness_function):\n \"\"\"\n Find the best neighbour of the individual according to the fitness function (y=f(x)) provided\n Result doesn't include the main individual, if individual == best_neighbour, the algorithm is searching in a plateau\n :param individual: a binary string\n :param gene_letters: the valid values for each individual's cell\n :param fitness_function: the fitness function represented as y=f(x)\n :return: the binary string of the fittest neighbour\n \"\"\"\n neighbours = get_neighbours(individual, gene_letters)\n best_neighbour = neighbours[0]\n for neighbour in neighbours[1:]:\n if fitness_function(neighbour) > fitness_function(best_neighbour):\n best_neighbour = neighbour\n return best_neighbour\n\n\ndef sideways_move(start, f, gene_letters, tabu_size=None, max_sideway=None):\n \"\"\"\n Perform a sideways search\n :param start: starting point of the search\n :param f: the function to perform search on\n :param gene_letters: the valid values for each individual's cell\n :param tabu_size: maximum size of the tabu list\n if none, no tabu list will be used\n :param max_sideway: maximum number of sideways moves allowed\n if none, infinite number of sideways moves are allowed\n :return: the first neighbour with a better f(x) than the starting point\n or None if search fails\n \"\"\"\n tabu_list = []\n result = start\n while f(result) == f(start) and result not in tabu_list and (max_sideway is None or max_sideway >= 0):\n result = find_best_neighbour(result, gene_letters, f)\n if f(result) == f(start):\n for neighbour in get_neighbours(result, gene_letters):\n tabu_list.append(neighbour)\n if len(tabu_list) > tabu_size:\n del tabu_list[0]\n if max_sideway is not None:\n max_sideway -= 1\n return result if f(result) > f(start) else None\n\n\ndef hillclimb_search(start, f, gene_letters, max_improve=None, tabu_size=None, max_sideway=None):\n \"\"\"\n Perform a hill-climb search on the function f (y=f(x)) with the given starting point\n :param start: the starting point of the search\n :param f: the function to perform search on\n :param gene_letters: the valid values for each individual's cell\n :param max_improve: the number of allowed improving moves allowed\n if none, search will continue until no better solution is found\n :param tabu_size: maximum size of the tabu list\n if none, no tabu list will be used\n :param max_sideway: maximum number of sideways moves allowed\n if none, infinite number of sideways moves are allowed\n :return: result of the search\n \"\"\"\n result = start\n better_solution_found = True\n while better_solution_found and (max_improve is None or max_improve > 0):\n best_neighbour = find_best_neighbour(result, gene_letters, f)\n if f(best_neighbour) == f(result):\n sideway_result = sideways_move(start, f, gene_letters, tabu_size, max_sideway)\n if sideway_result is None:\n better_solution_found = False\n elif f(best_neighbour) > f(result):\n result = best_neighbour\n elif f(best_neighbour) < f(result):\n better_solution_found = False\n if max_improve is not None:\n max_improve -= 1\n return result\n","sub_path":"genetic_hillclimb_hybrid_algorithm/hillclimb.py","file_name":"hillclimb.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4408242","text":"import os\r\nimport cv2\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom picamera.array import PiRGBArray\r\nfrom picamera import PiCamera\r\nimport tensorflow as tf\r\nimport argparse\r\nimport sys\r\nimport time\r\nimport csv\r\n\r\n######## BOILERPLATE CODE #######\r\n# Set up camera constants\r\n#IM_WIDTH = 1280\r\n#IM_HEIGHT = 720\r\nIM_WIDTH = 640 # Use smaller resolution for\r\nIM_HEIGHT = 480 # slightly faster framerate\r\n\r\n# Select camera type (if user enters --usbcam when calling this script,\r\n# a USB webcam will be used)\r\ncamera_type = 'picamera'\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--usbcam', help='Use a USB webcam instead of picamera',\r\n action='store_true')\r\nargs = parser.parse_args()\r\nif args.usbcam:\r\n camera_type = 'usb'\r\n\r\n# This is needed since the working directory is the object_detection folder.\r\nsys.path.append('..')\r\n\r\n# Import utilites\r\nfrom object_detection.utils import label_map_util\r\nfrom object_detection.utils import visualization_utils as vis_util\r\n\r\n# Name of the directory containing the object detection module we're using\r\nMODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'\r\n\r\n# Grab path to current working directory\r\nCWD_PATH = os.getcwd()\r\n\r\n# Path to frozen detection graph .pb file, which contains the model that is used\r\n# for object detection.\r\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\r\n\r\n# Path to label map file\r\nPATH_TO_LABELS = os.path.join(CWD_PATH,'data','mscoco_label_map.pbtxt')\r\n\r\n# Number of classes the object detector can identify\r\nNUM_CLASSES = 90\r\n\r\n## Load the label map.\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\n\r\n# Load the Tensorflow model into memory.\r\ndetection_graph = tf.Graph()\r\nwith detection_graph.as_default():\r\n od_graph_def = tf.compat.v1.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n sess = tf.compat.v1.Session(graph=detection_graph)\r\n\r\n\r\n# Define input and output tensors (i.e. data) for the object detection classifier\r\n\r\n# Input tensor is the image\r\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n\r\n# Output tensors are the detection boxes, scores, and classes\r\n# Each box represents a part of the image where a particular object was detected\r\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n\r\n# Each score represents level of confidence for each of the objects.\r\n# The score is shown on the result image, together with the class label.\r\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n\r\n# Number of objects detected\r\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n\r\n#Initialises the list for output\r\noutput = []\r\n\r\n# creating a fucntion \r\ndef group_counting():\r\n \r\n # Initialize frame rate calculation\r\n frame_rate_calc = 1\r\n freq = cv2.getTickFrequency()\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n \r\n # Initialize Picamera and grab reference to the raw capture\r\n camera = PiCamera()\r\n camera.resolution = (IM_WIDTH,IM_HEIGHT)\r\n camera.framerate = 10\r\n rawCapture = PiRGBArray(camera, size=(IM_WIDTH,IM_HEIGHT))\r\n rawCapture.truncate(0)\r\n\r\n #Standard setup for the live object viewer\r\n for frame1 in camera.capture_continuous(rawCapture, format=\"bgr\",use_video_port=True):\r\n \r\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\r\n # i.e. a single-column array, where each item in the column has the pixel RGB value\r\n frame = np.copy(frame1.array)\r\n frame.setflags(write=1)\r\n frame_expanded = np.expand_dims(frame, axis=0)\r\n\r\n # Perform the actual detection by running the model with the image as input\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: frame_expanded})\r\n \r\n ####### OBJECT SELECTION AND COUNTING CODE STARTS HERE #######\r\n # pulling raw output from object detection. Creates a list of dicts \r\n # with details of each of the objects meeting the threshold in a given frame.\r\n Validobj = [category_index.get(value) for index, value in enumerate (classes[0]) if scores [0,index]>0.5]\r\n \r\n # Choose your object\r\n to_detect = 'person' \r\n \r\n # Creates a log if the chosen object has been detected.\r\n if Validobj:\r\n data = [i[\"name\"] for i in Validobj]\r\n # If in the given frame the number of a given object detected meets the condition then a log is made \r\n if data.count(to_detect)>0:\r\n # Writes a line with how many of the object was detected along with a timestamp\r\n Summary = [\"There is a group of \" + str(data.count(to_detect)) + \" people\" ,time.ctime()]\r\n print(Summary)\r\n \r\n evidence_stamp = [data.count(to_detect),to_detect,time.ctime()]\r\n output.append(evidence_stamp)\r\n \r\n # This writes the data gathered in the output to a logfile\r\n with open('output.csv','w',newline = '\\n') as file:\r\n writer = csv.writer(file)\r\n writer.writerows(output)\r\n time.sleep(5)\r\n\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\n rawCapture.truncate(0)\r\n\r\n camera.close()\r\n\r\ncv2.destroyAllWindows()\r\n\r\ntry:\r\n while True:\r\n group_counting()\r\nexcept KeyboardInterrupt:\r\n sys.exit()","sub_path":"mobile_group_detection.py","file_name":"mobile_group_detection.py","file_ext":"py","file_size_in_byte":5963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"635838869","text":"#!/usr/bin/python\nfrom pymodbus.client.sync import ModbusSerialClient as ModbusClient\nfrom pymodbus.exceptions import ModbusIOException\nfrom pymodbus.mei_message import ReadDeviceInformationRequest\nfrom pymodbus.constants import DeviceInformation\nfrom datetime import datetime\n\n# make sure you are connected...\n# dmesg | grep ttyUSB\n\n\nunits = [1, 2, 3];\n\nfor UNIT in units:\n\n print(\"----[ UNIT:\", UNIT,\"]----------\");\n\n client = ModbusClient(\n method = \"rtu\",\n port = \"/dev/ttyUSB0\",\n baudrate = 115200,\n timeout = 1.5\n )\n\n\n if client.connect():\n print(\"Reading Real-Time Clock from device...\")\n result = client.read_holding_registers(0x9013, 3, unit=UNIT)\n if isinstance(result, Exception):\n print(\"got exception\")\n print(result)\n elif result.function_code >= 0x80:\n print(\"got bad function code\")\n print(result.function_code)\n print(result)\n else:\n print(\"... Date({}, {}, {}, {}, {}, {})\".format(\n 2000 + (result.registers[2] >> 8),\n result.registers[2] & 0xFF,\n result.registers[1] >> 8,\n result.registers[1] & 0xFF,\n result.registers[0] >> 8,\n result.registers[0] & 0xFF\n ))\n\n print(\"Today is ...\")\n # Local time\n now = datetime.now()\n print(\"... {}\".format(now))\n newData = [0,0,0]\n newData[2] = ((now.year - 2000) << 8) + now.month\n newData[1] = (now.day << 8) + now.hour\n newData[0] = (now.minute << 8) + now.second\n print(\"... Date({}, {}, {}, {}, {}, {})\".format(\n 2000 + (newData[2] >> 8),\n newData[2] & 0xFF,\n newData[1] >> 8,\n newData[1] & 0xFF,\n newData[0] >> 8,\n newData[0] & 0xFF\n ))\n\n print(\"Updating Device RTC...\")\n result = client.write_registers(0x9013, newData, unit=UNIT)\n if isinstance(result, Exception):\n print(\"got exception\")\n print(result)\n elif result.function_code >= 0x80:\n print(\"got bad function code\")\n print(result.function_code)\n print(result)\n else:\n print(\"... RTC clock update command successful\")\n\n print(\"Verifying...\")\n result = client.read_holding_registers(0x9013, 3, unit=UNIT)\n if isinstance(result, Exception):\n print(\"got exception\")\n print(result)\n elif result.function_code >= 0x80:\n print(\"got bad function code\")\n print(result.function_code)\n print(result)\n else:\n print(\"... Date({}, {}, {}, {}, {}, {})\".format(\n 2000 + (result.registers[2] >> 8),\n result.registers[2] & 0xFF,\n result.registers[1] >> 8,\n result.registers[1] & 0xFF,\n result.registers[0] >> 8,\n result.registers[0] & 0xFF\n ))\n\n\n client.close()\n else:\n print(\"unable to connect\")\nprint(\"---- [ done ] --------\")\n","sub_path":"charge-controller/modbus-sync-clock.py","file_name":"modbus-sync-clock.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"51503065","text":"# math3d_test.py\n\nimport sys\nimport math\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nfrom math3d_triangle_mesh import TriangleMesh, Polyhedron\nfrom math3d_triangle import Triangle\nfrom math3d_vector import Vector\nfrom math3d_sphere import Sphere\nfrom math3d_transform import AffineTransform\n\nclass Window(QtGui.QOpenGLWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n \n self.tri_mesh_a = TriangleMesh.make_polyhedron(Polyhedron.HEXAHEDRON)\n radius = (Vector(math.sqrt(2.0), math.sqrt(2.0), 0.0) - Vector(0.0, 1.0, 0.0)).length()\n self.tri_mesh_b = Sphere(Vector(math.sqrt(2.0), math.sqrt(2.0), 0.0), radius).make_mesh(subdivision_level=2)\n self.tri_mesh_c = Sphere(Vector(-math.sqrt(2.0), math.sqrt(2.0), 0.0), radius).make_mesh(subdivision_level=2)\n\n #transform = AffineTransform(translation=Vector(-1.0, 0.0, 0.0))\n #self.tri_mesh_a = transform(self.tri_mesh_a)\n\n #transform = AffineTransform(translation=Vector(1.0, 0.0, -0.5))\n #self.tri_mesh_b = transform(self.tri_mesh_b)\n\n #self.tri_mesh_a = TriangleMesh()\n #self.tri_mesh_a.add_triangle(Triangle(Vector(0.0, 0.0, 0.0), Vector(5.0, 0.0, 0.0), Vector(0.0, 5.0, 0.0)))\n\n #self.tri_mesh_b = TriangleMesh()\n #self.tri_mesh_b.add_triangle(Triangle(Vector(1.0, 1.0, 3.0), Vector(4.0, 4.0, 3.0), Vector(1.0, 1.0, -2.0)))\n #self.tri_mesh_b.add_triangle(Triangle(Vector(1.0, 1.0, -2.0), Vector(4.0, 4.0, 3.0), Vector(5.0, 1.0, 0.0)))\n #self.tri_mesh_b.add_triangle(Triangle(Vector(1.0, 1.0, 3.0), Vector(1.0, 1.0, -2.0), Vector(-2.0, -6.0, -2.0)))\n \n self.back_mesh = None\n self.front_mesh = None\n \n self.orient = Vector(0.0, 0.0, 0.0)\n self.dragging_mouse = False\n self.drag_pos = None\n self.zoom = 5.0\n\n def initializeGL(self):\n glEnable(GL_DEPTH_TEST)\n glClearColor(0.0, 0.0, 0.0, 0.0)\n \n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n \n glShadeModel(GL_SMOOTH)\n \n glLightfv(GL_LIGHT0, GL_POSITION, [1.0, 1.0, 1.0, 0.0])\n glLightfv(GL_LIGHT0, GL_AMBIENT, [1.0, 1.0, 1.0, 1.0])\n glLightfv(GL_LIGHT0, GL_DIFFUSE, [1.0, 1.0, 1.0, 1.0])\n \n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n \n glEnable(GL_CULL_FACE)\n glCullFace(GL_BACK)\n\n def paintGL(self):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n viewport = glGetIntegerv(GL_VIEWPORT)\n width = viewport[2]\n height = viewport[3]\n\n aspect_ratio = float(width) / float(height)\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(60.0, aspect_ratio, 0.1, 1000.0)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(0.0, 0.0, self.zoom, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)\n \n glPushMatrix()\n glRotatef(self.orient.x, 1.0, 0.0, 0.0)\n glRotatef(self.orient.y, 0.0, 1.0, 0.0)\n glRotatef(self.orient.z, 0.0, 0.0, 1.0)\n\n glEnable(GL_LIGHTING)\n \n glMaterialfv(GL_FRONT, GL_SPECULAR, [1.0, 1.0, 1.0, 1.0])\n glMaterialfv(GL_FRONT, GL_SHININESS, [30.0])\n\n if self.back_mesh is not None:\n glMaterialfv(GL_FRONT, GL_AMBIENT, [0.0, 0.3, 0.3, 1.0])\n glMaterialfv(GL_FRONT, GL_DIFFUSE, [0.0, 1.0, 1.0, 1.0])\n self.back_mesh.render()\n\n if self.front_mesh is not None:\n glMaterialfv(GL_FRONT, GL_AMBIENT, [0.3, 0.3, 0.0, 1.0])\n glMaterialfv(GL_FRONT, GL_DIFFUSE, [1.0, 1.0, 0.0, 1.0])\n self.front_mesh.render()\n\n if self.tri_mesh_a is not None:\n glMaterialfv(GL_FRONT, GL_AMBIENT, [0.3, 0.0, 0.0, 1.0])\n glMaterialfv(GL_FRONT, GL_DIFFUSE, [1.0, 0.0, 0.0, 1.0])\n self.tri_mesh_a.render()\n\n if self.tri_mesh_b is not None:\n glMaterialfv(GL_FRONT, GL_AMBIENT, [0.0, 0.3, 0.0, 0.3])\n glMaterialfv(GL_FRONT, GL_DIFFUSE, [0.0, 1.0, 0.0, 0.3])\n self.tri_mesh_b.render()\n \n if self.tri_mesh_c is not None:\n glMaterialfv(GL_FRONT, GL_AMBIENT, [0.0, 0.0, 0.3, 0.3])\n glMaterialfv(GL_FRONT, GL_DIFFUSE, [0.0, 0.0, 1.0, 0.3])\n self.tri_mesh_c.render()\n \n glPopMatrix()\n\n glFlush()\n\n def resizeGL(self, width, height):\n glViewport(0, 0, width, height)\n\n def mousePressEvent(self, event):\n button = event.button()\n if button == QtCore.Qt.LeftButton:\n self.dragging_mouse = True\n self.drag_pos = event.localPos()\n \n def mouseMoveEvent(self, event):\n if self.dragging_mouse:\n pos = event.localPos()\n delta = pos - self.drag_pos\n self.drag_pos = pos\n sensativity_factor = 2.0\n self.orient.x += sensativity_factor * float(delta.y())\n self.orient.y += sensativity_factor * float(delta.x())\n self.update()\n \n def mouseReleaseEvent(self, event):\n if self.dragging_mouse:\n self.dragging_mouse = False\n self.drag_pos = None\n \n def wheelEvent(self, event):\n delta = event.angleDelta()\n delta = float(delta.y()) / 120.0\n zoom_factor = 0.5\n self.zoom += delta * zoom_factor\n self.update()\n \n def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_F1:\n if self.tri_mesh_a is not None and self.tri_mesh_b is not None:\n self.back_mesh, self.front_mesh = self.tri_mesh_a.split_against_mesh(self.tri_mesh_b)\n self.tri_mesh_a = None\n self.tri_mesh_b = None\n #self.back_mesh = None\n self.update()\n\ndef exceptionHook(cls, exc, tb):\n sys.__excepthook__(cls, exc, tb)\n\nif __name__ == '__main__':\n sys.excepthook = exceptionHook\n\n app = QtGui.QGuiApplication(sys.argv)\n\n win = Window()\n win.resize(640, 480)\n win.show()\n\n sys.exit(app.exec_())","sub_path":"math3d_test.py","file_name":"math3d_test.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"637292563","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nimport sys\nimport imghdr\nimport time\nimport io\nimport re\nimport datetime\nimport random\nfrom urllib.request import Request\nfrom urllib.request import urlopen\nif(os.name=='nt'):\n print(u'windows 系统')\nelse:\n print(u'linux')\n\nproxyipurl='http://www.xicidaili.com/'\nheader = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 UBrowser/6.1.2107.204 Safari/537.36'}\nISOTIMEFORMAT='%Y-%m-%d %X'\n\n#获取代理IP\ndef get_ip_list(proxyipurl):\n request = Request(proxyipurl, headers=header)\n response = urlopen(request)\n obj = BeautifulSoup(response, 'lxml')\n ip_text = obj.findAll('tr', {'class': 'odd'})\n ip_list = []\n for i in range(len(ip_text)):\n ip_tag = ip_text[i].findAll('td')\n ip_port = ip_tag[1].get_text() + ':' + ip_tag[2].get_text()\n ip_list.append(ip_port)\n # print(\"共收集到了{}个代理IP\".format(len(ip_list)))\n # print(ip_list)\n #检测IP是否可用 \n for ip in ip_list:\n try:\n proxy_host='https://'+ip \n proxy_temp={\"https:\":proxy_host} \n res=urllib.urlopen(url,proxies=proxy_temp).read()\n except Exception as e: \n ip_list.remove(ip)\n continue\n return ip_list\n#从IPlist中获取随机地址\ndef get_random_ip(ip_list):\n #ip_list = get_ip_list(bsObj)\n random_ip = 'http://' + random.choice(ip_list)\n proxy_ip = {'http:':random_ip}\n return proxy_ip\n\nfile=open(\"D:/down/xiaoshuo/jiqiao/jiqiao_2019-01-27_0.txt\")\nip_list=get_ip_list(proxyipurl)\n#获取总行数\npath='D:/down/xiaoshuo/jiqiao/'+datetime.datetime.now().strftime('%Y-%m-%d')+'/'\nif not(os.path.exists(path)):\n os.makedirs(path)\nfor num,value in enumerate(file,1):\n print('第'+str(num)+'行:')\n line=value.strip('\\n')\n print(line)\n #获���代理服务器\n proxyip=get_random_ip(ip_list)\n #print('proxyip:'+str(proxyip))\n\n html=requests.get(line,headers = header, proxies=proxyip)\n html.encoding='utf-8'\n itemSoup=BeautifulSoup(html.text,'lxml')\n title=itemSoup.title.string\n title=re.sub(r'<+|>+|/+|‘+|’+|\\?+|\\|+|\"+|\\:+|\\:+|\\【+|\\】+|\\.+|\\~+|\\*+','',title)\n newTitle=title.split('www')[-1]\n \n print(str(newTitle.strip()))\n textContent=itemSoup.select(\"body div[class='maomi-content'] main[id='main-container'] div[class='content']\")\n print('text数量:'+str(len(textContent)))\n if len(textContent)==0:\n #os.chdir(path)\n f=open('linglei_'+datetime.datetime.now().strftime('%Y-%m-%d')+'_未下载.txt','a+')\n f.write('第'+str(num)+'行:'+line+','+newTitle+'\\n')\n f.close()\n else:\n os.chdir(path)\n f=open(newTitle.strip()+'.txt','a+',encoding='utf8')\n for i in range(0,len(textContent)):\n text=textContent[i].text\n os.chdir(path)\n f.write(text+'\\n')\n f.close()\n print(\"-----down over----------------\")\nfile.close\nprint(\"all over\")\n","sub_path":"xiaoshuo/jiqiao/downText.py","file_name":"downText.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"614964782","text":"def newFib(first,second,term):\n termlist = [first,second]\n for i in range (term - 2):\n newNum = first + second\n termlist.append(newNum)\n first = second\n second = newNum\n return termlist\n\ndef main():\n fir = int(input(\"Enter the first term of the series: \"))\n sec = int(input(\"Enter the second term of the series: \"))\n term = int(input(\"Enter the number of terms you want to see: \"))\n nfib = newFib(fir,sec,term)\n print(\"The first \",term,\" terms of the new series are: \")\n idx = 0\n for idx in range(term):\n print(nfib[idx],\" \",end=\"\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"newFib.py","file_name":"newFib.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"322207417","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /Users/gpl/software/python/corrfitter/examples/etas-Ds.py\n# Compiled at: 2019-03-26 09:25:48\nfrom __future__ import print_function\nimport collections, gvar as gv, numpy as np, corrfitter as cf\nSHOWPLOTS = True\nSVDCUT = 8e-05\n\ndef main():\n data = make_data('etas-Ds.h5')\n fitter = cf.CorrFitter(models=make_models())\n p0 = None\n for N in [1, 2, 3, 4]:\n print(30 * '=', 'nterm =', N)\n prior = make_prior(N)\n fit = fitter.lsqfit(data=data, prior=prior, p0=p0, svdcut=SVDCUT)\n print(fit.format(pstyle=None if N < 4 else 'v'))\n p0 = fit.pmean\n\n print_results(fit, prior, data)\n if SHOWPLOTS:\n fit.show_plots()\n print('\\n==================== add svd, prior noise')\n noisy_fit = fitter.lsqfit(data=data, prior=prior, p0=fit.pmean, svdcut=SVDCUT, add_svdnoise=True, add_priornoise=True)\n print(noisy_fit.format(pstyle=None))\n p = key_parameters(fit.p)\n noisy_p = key_parameters(noisy_fit.p)\n print(' fit:', p)\n print('noisy fit:', noisy_p)\n print(' ', gv.fmt_chi2(gv.chi2(p - noisy_p)))\n for sim_pdata in fitter.simulated_pdata_iter(n=2, dataset=cf.read_dataset('etas-Ds.h5'), p_exact=fit.pmean):\n print('\\n==================== simulation')\n sim_fit = fitter.lsqfit(pdata=sim_pdata, prior=prior, p0=fit.pmean, svdcut=SVDCUT)\n print(sim_fit.format(pstyle=None))\n p = key_parameters(fit.pmean)\n sim_p = key_parameters(sim_fit.p)\n print('simulated - exact:', sim_p - p)\n print(' ', gv.fmt_chi2(gv.chi2(p - sim_p)))\n\n return\n\n\ndef key_parameters(p):\n \"\"\" collect key fit parameters in dictionary \"\"\"\n ans = gv.BufferDict()\n for k in ['etas:a', 'etas:dE', 'Ds:a', 'Ds:dE']:\n ans[k] = p[k][0]\n\n ans['Vnn'] = p['Vnn'][(0, 0)]\n return ans\n\n\ndef make_data(datafile):\n \"\"\" Read data from datafile and average it. \"\"\"\n dset = cf.read_dataset(datafile)\n return gv.dataset.avg_data(dset)\n\n\ndef make_models():\n \"\"\" Create models to fit data. \"\"\"\n tmin = 5\n tp = 64\n models = [\n cf.Corr2(datatag='etas', tp=tp, tmin=tmin, a='etas:a', b='etas:a', dE='etas:dE'),\n cf.Corr2(datatag='Ds', tp=tp, tmin=tmin, a=('Ds:a', 'Dso:a'), b=('Ds:a', 'Dso:a'), dE=('Ds:dE',\n 'Dso:dE')),\n cf.Corr3(datatag='3ptT15', T=15, tmin=tmin, a='etas:a', dEa='etas:dE', b=('Ds:a', 'Dso:a'), dEb=('Ds:dE',\n 'Dso:dE'), Vnn='Vnn', Vno='Vno'),\n cf.Corr3(datatag='3ptT16', T=16, tmin=tmin, a='etas:a', dEa='etas:dE', b=('Ds:a', 'Dso:a'), dEb=('Ds:dE',\n 'Dso:dE'), Vnn='Vnn', Vno='Vno')]\n return models\n\n\ndef make_prior(N):\n \"\"\" Create priors for fit parameters. \"\"\"\n prior = gv.BufferDict()\n metas = gv.gvar('0.4(2)')\n prior['log(etas:a)'] = gv.log(gv.gvar(N * ['0.3(3)']))\n prior['log(etas:dE)'] = gv.log(gv.gvar(N * ['0.5(5)']))\n prior['log(etas:dE)'][0] = gv.log(metas)\n mDs = gv.gvar('1.2(2)')\n prior['log(Ds:a)'] = gv.log(gv.gvar(N * ['0.3(3)']))\n prior['log(Ds:dE)'] = gv.log(gv.gvar(N * ['0.5(5)']))\n prior['log(Ds:dE)'][0] = gv.log(mDs)\n prior['log(Dso:a)'] = gv.log(gv.gvar(N * ['0.1(1)']))\n prior['log(Dso:dE)'] = gv.log(gv.gvar(N * ['0.5(5)']))\n prior['log(Dso:dE)'][0] = gv.log(mDs + gv.gvar('0.3(3)'))\n prior['Vnn'] = gv.gvar(N * [N * ['0(1)']])\n prior['Vno'] = gv.gvar(N * [N * ['0(1)']])\n return prior\n\n\ndef print_results(fit, prior, data):\n \"\"\" Report best-fit results. \"\"\"\n print('Fit results:')\n p = fit.p\n E_etas = np.cumsum(p['etas:dE'])\n a_etas = p['etas:a']\n print(' Eetas:', E_etas[:3])\n print(' aetas:', a_etas[:3])\n E_Ds = np.cumsum(p['Ds:dE'])\n a_Ds = p['Ds:a']\n print('\\n EDs:', E_Ds[:3])\n print(' aDs:', a_Ds[:3])\n E_Dso = np.cumsum(p['Dso:dE'])\n a_Dso = p['Dso:a']\n print('\\n EDso:', E_Dso[:3])\n print(' aDso:', a_Dso[:3])\n Vnn = p['Vnn']\n print('\\n etas->V->Ds =', Vnn[(0, 0)])\n outputs = collections.OrderedDict()\n outputs['metas'] = E_etas[0]\n outputs['mDs'] = E_Ds[0]\n outputs['Vnn'] = Vnn[(0, 0)]\n inputs = collections.OrderedDict()\n inputs['statistics'] = data\n inputs['svd'] = fit.svdcorrection\n inputs.update(prior)\n print('\\n' + gv.fmt_values(outputs))\n print(gv.fmt_errorbudget(outputs, inputs))\n print('\\n')\n\n\nimport sys\nif sys.argv[1:]:\n SHOWPLOTS = eval(sys.argv[1])\nif SHOWPLOTS:\n try:\n import matplotlib\n except ImportError:\n SHOWPLOTS = False\n\nif __name__ == '__main__':\n gv.ranseed(123456)\n main()","sub_path":"pycfiles/Corrfunc-2.3.3.tar/etas-Ds.py","file_name":"etas-Ds.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"555349505","text":"import math\n\ndef simple_generator_function():\n yield 1\n yield 2\n yield 3\n\ndef test1():\n for value in simple_generator_function():\n print(value)\n\ndef print_successive_primes(iterations, base=10):\n prime_generator = get_primes(base)\n print('before send None')\n print(prime_generator.send(None))\n print('after send None')\n for power in range(iterations):\n print('before send %s' % base ** power)\n print(prime_generator.send(base ** power))\n print('after send %s' % base ** power)\n\ndef get_primes(number):\n print('enter get_primes, number %s' % number)\n while True:\n print('in while, number %s' % number)\n if is_prime(number):\n number = yield number\n print('after yield, number %s' % number)\n number += 1\n \n\ndef is_prime(number):\n if number > 1:\n if number == 2:\n return True\n if number % 2 == 0:\n return False\n for current in range(3, int(math.sqrt(number) + 1), 2):\n if number % current == 0: \n return False\n return True\n return False\n\nif __name__ == '__main__':\n\tprint_successive_primes(10)\n","sub_path":"python/simple_generator.py","file_name":"simple_generator.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"450958361","text":"# Remove for production - tracks changes to modules files\nfrom gluon.custom_import import track_changes; track_changes(True)\nfrom gluon.tools import Auth\n\nfrom markdown2 import markdown\n\ndb = DAL(\"sqlite://storage.db\")\n\nauth = Auth(db)\nauth.settings.create_user_groups = False\nauth.settings.everybody_group_id = 2 # commentor\n\ndisabled_auth_actions = [\"profile\"]\nauth.settings.actions_disabled.extend(disabled_auth_actions)\n\nauth.settings.extra_fields[\"auth_user\"] = [Field(\"username\")]\nauth.define_tables(username=True)\n\ndb.auth_user._format = lambda row: \"%s\" % (row.username)\n\ndb.define_table('post',\n Field('title', 'string', length=256),\n Field('body', 'text'),\n Field('markdown', 'boolean', default=False, notnull=True),\n Field('user_id', db.auth_user, default=auth.user_id, notnull=True),\n Field('created', 'datetime', default=request.now, notnull=True),\n Field('updated', 'datetime', default=request.now, update=request.now, notnull=True),\n format=lambda row: row.title)\n\n\ndb.define_table('comment',\n Field('user_id', db.auth_user, default=auth.user_id, notnull=True),\n Field('post_id', db.post, notnull=True),\n Field('body', 'text'),\n Field('reply_to_comment_id', 'reference comment'),\n Field('created', 'datetime', default=request.now, notnull=True),\n Field('updated', 'datetime', default=request.now, update=request.now, notnull=True),\n format=lambda row: \"%s - %s\" % (row.user_id, row.body[:50]))\n\ndb.post.title.requires = IS_NOT_EMPTY()\ndb.post.user_id.readable = db.post.user_id.writable = False\ndb.post.created.readable = db.post.created.writable = False\ndb.post.updated.readable = db.post.updated.writable = False\n\ndb.comment.user_id.readable = db.comment.user_id.writable = False\ndb.comment.post_id.requires = IS_NOT_EMPTY()\ndb.comment.body.requires = IS_NOT_EMPTY()\ndb.comment.reply_to_comment_id.readable = db.comment.reply_to_comment_id.writable = False\ndb.comment.created.readable = db.comment.created.writable = False\ndb.comment.updated.readable = db.comment.updated.writable = False\n","sub_path":"models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"120663352","text":"''' Typoglycemia is a relatively new word given to a purported recent discovery\nabout how people read written text. As wikipedia puts it:\n\n The legend, propagated by email and message boards, purportedly demonstrates\n that readers can understand the meaning of words in a sentence even when the\n interior letters of each word are scrambled. As long as all the necessary letters\n are present, and the first and last letters remain the same, readers appear to\n have little trouble reading the text.\n\nOr as Urban Dictionary puts it:\n\n Typoglycemia\n The mind's ability to decipher a mis-spelled word if the first and last\n letters of the word are correct.\n\n The word Typoglycemia describes Teh mdin's atbiliy to dpeihecr a msi-selpeld\n wrod if the fsirt and lsat lteetrs of the wrod are cerorct.\n\nInput Description\n\nAny string of words with/without punctuation.\nOutput Description\n\nA scrambled form of the same sentence but with the word's first and last\nletter's positions intact.'''\n\nfrom random import randrange\n\n\ndef sattoloCycle(items):\n ''' str -> str\n shuffle a string'''\n apos = ''\n\n items = list(items)\n i = len(items)\n while i > 1:\n i = i - 1\n j = randrange(i) # 0 <= j <= i-1\n items[j], items[i] = items[i], items[j]\n item_string = ''\n for item in items:\n item_string += item\n\n return item_string\n\n\nif __name__ == '__main__':\n\n\n\n with open('challenge240.txt') as f:\n while True:\n txt = f.readline()\n # detect end of text\n if len(txt) == 0:\n break\n output = ''\n out_string = ''\n comma_index = 0\n index = 0\n\n txt.split()\n for word in txt.split():\n new_word = word\n if len(word) > 3:\n pre_shuffle = word\n\n if \"'\" in word:\n # find index of any ' character\n index = word.find(\"'\")\n pre_shuffle = word.replace(\"'\", '')\n\n # note start and end characters\n start_char = word[0]\n if \".\" in word or \",\" in word:\n last_char = word[-2:]\n else:\n last_char =word[-1]\n\n\n # strip off first and last characters\n if \",\" in word or \".\" in word:\n to_shuffle = pre_shuffle[1:-2]\n else:\n to_shuffle = pre_shuffle[1:-1]\n\n # shuffle the resulting string\n new_word = sattoloCycle(to_shuffle)\n output = start_char + new_word + last_char\n\n # replace apostrophe if present\n if \"'\" in word:\n output2 = output[:index] + word[index] + output[index:]\n output = output2\n\n\n out_string = out_string + output + ' '\n\n\n else:\n out_string = out_string + word + ' '\n print(out_string)","sub_path":"albums/3/challenge240_easy/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225765894","text":"def bubble_sort(array):\n \"O(n2)\"\n lens = len(array) - 1\n for i in range(lens):\n for j in range(lens - i):\n if array[j] > array[j + 1]:\n array[j], array[j + 1] = array[j + 1], array[j]\n return array\n\n\ndef select_sort(array):\n \"O(n2)\"\n lens = len(array)\n for i in range(lens):\n min_idx = i\n for j in range(i + 1, lens):\n if array[j] < array[min_idx]:\n min_idx = j\n if min_idx != i:\n array[i], array[min_idx] = array[min_idx], array[i]\n return array\n\n\ndef insert_sort(array):\n \"O(n2)\"\n lens = len(array)\n for i in range(1, lens):\n val = array[i]\n pos = i\n while pos > 0 and val < array[pos - 1]:\n # 把大的值往后移动\n array[pos] = array[pos - 1]\n pos -= 1\n array[pos] = val\n return array\n\n\ndef quick_sort(array):\n size = len(array)\n if not array or size < 2:\n return array\n pivot_idx = 0\n pivot = array[pivot_idx]\n less_part = [array[i] for i in range(size) if array[i] <= pivot and pivot_idx != i]\n great_part = [array[i] for i in range(size) if array[i] > pivot and pivot_idx != i]\n return quick_sort(less_part) + [pivot] + quick_sort(great_part)\n\n\ndef test_sort():\n import random\n\n seq = list(range(10))\n random.shuffle(seq)\n\n assert bubble_sort(seq) == sorted(seq)\n assert select_sort(seq) == sorted(seq)\n assert insert_sort(seq) == sorted(seq)\n assert quick_sort(seq) == sorted(seq)\n\n","sub_path":"ehco/sort/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"494970562","text":"import tweepy\nfrom tweepy import OAuthHandler\nimport time\nimport pandas as pd\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nimport subprocess\nimport os\nfrom .settings import BACKEND_DIR\nimport functools\n\nRE_EMOJI = re.compile('[\\U00010000-\\U0010ffff]', flags=re.UNICODE)\npwd = BACKEND_DIR+\"/outs/\"\nfeature_filepath = pwd + \"feats.dic\"\ndf2 = pd.read_table(feature_filepath, encoding=\"ISO-8859-1\", header=None)\ndf2.columns = ['feature_id', 'feature']\nfeatures = {}\nfor index, row in df2.iterrows():\n key = row[\"feature\"]\n value = row[\"feature_id\"]\n features[key] = value\n \n\n\ndef strip_emoji(text):\n return RE_EMOJI.sub(r'', text)\n\ndef load_api():\n consumer_key = \"QU9kxhHKkdO00XjNeMDnyVIDX\"\n consumer_secret = \"LF96QFSvwWRA0b1i3bQyxvxOLe2juvnlKfUi0MStebRuqHMcko\"\n access_key = \"65542518-7FR8JkLYkgLBY43CgflDmiq8z8UuMhnML1KFAbfqA\"\n access_secret = \"OUu0qUm58Le7WAjsoRZ7E91P8joX7F2e5ynC5COlcsDkn\"\n\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n return tweepy.API(auth)\n\ndef preprocess(lines, s):\n text1 = lines.lower() # do lower casing\n result = re.sub(\"http\\S*\", \"\", text1) # remove the http links\n\n result1 = re.sub(\"RT \\S*\", \"\", result)\n reg2 = '[^\\w\\#\\@\\_]'\n out = re.compile(reg2).split(result1)\n result = [x for x in out if x not in s] # stopwords removal\n return result\n\ndef stemming(lists):\n ps = PorterStemmer()\n temp = set() # create a set to avoid duplicates\n for w in lists:\n temp.add(ps.stem(w))\n result = list(filter(None, list(temp)))\n return result # return the list\n\n@functools.lru_cache()\ndef loadfeatures():\n pwd = BACKEND_DIR+\"/outs/\"\n feature_filepath = pwd + \"feats.dic\"\n df2 = pd.read_table(feature_filepath, encoding=\"ISO-8859-1\", header=None)\n df2.columns = ['feature_id', 'feature']\n features = {}\n for index, row in df2.iterrows():\n key = row[\"feature\"]\n value = row[\"feature_id\"]\n features[key] = value\n return features\n\ndef classify():\n stoplist = stopwords.words('english')\n\n s = set(stoplist)\n\n #wd = \"/Users/varunsharma/Downloads/TTDS DATA/\"\n wd = BACKEND_DIR+\"/outs/\"\n #train_filepath = wd+\"Tweets.cat.train\"\n #df = pd.read_table(train_filepath, encoding=\"ISO-8859-1\", header=None)\n\n test_filepath = wd + \"valid.test\"\n df1 = pd.read_table(test_filepath, encoding=\"ISO-8859-1\", header=None)\n\n #pwd = \"/Users/varunsharma/Downloads/TTDS DATA/\"\n \n # pwd = BACKEND_DIR+\"/outs/\"\n # feature_filepath = pwd + \"feats.dic\"\n # df2 = pd.read_table(feature_filepath, encoding=\"ISO-8859-1\", header=None)\n\n #df.columns = ['tweet_id', 'text', 'classification']\n df1.columns = ['tweet_id', 'text', 'classification']\n \n \n\n classdict = {\"Animals\": 1,\n \"Agriculture\": 2,\n \"Architecture\": 3,\n \"Art and Photography\": 4,\n \"Automobile\": 5,\n \"Business & Finance\": 6,\n \"Children\": 7,\n \"Comics & Humor\": 8,\n \"Computers and Electronics\": 9,\n \"Food and Beverages\": 10,\n \"Education\": 11,\n \"Ethnic\": 12,\n \"Fashion and Style\": 13,\n \"Health and Fitness\": 14,\n \"History\": 15,\n \"Literature\": 16,\n \"Medical\": 17,\n \"Music\": 18,\n \"Politics\": 19,\n \"Psychology\": 20,\n \"Religion\": 21,\n \"Science and Nature\": 22,\n \"Sports and Recreation\": 23,\n \"TV and Movie\": 24,\n \"Weather\": 25}\n\n # features=loadfeatures()\n #with open(\"/Users/varunsharma/Downloads/TTDS DATA/feats_valid.test\", mode=\"w\", encoding=\"utf-8\") as file: \n with open(wd+\"feats_valid.test\", mode=\"w\", encoding=\"utf-8\") as file:\n for index, row in df1.iterrows():\n wordList = []\n text = row[\"text\"]\n tokens = preprocess(str(text), s)\n words = stemming(tokens)\n\n temp = []\n for word in words:\n # Duplicate the words starting with hash\n if str.startswith(word, \"#\", 0) and len(word) > 1:\n copy = word[1:]\n wordList.append(copy)\n wordList.append(word)\n file.write(str(1) + \"\\t\")\n\n for word in wordList:\n if word in features:\n featureid = features.get(word) # get the feature id\n if featureid not in temp:\n temp.append(featureid)\n\n temp.sort()\n for t in temp:\n file.write(str(t) + \":1\" + \" \")\n file.write(\"#\" + str(123) + \"\\n\")\n file.close()\n\ndef main(geo_code):\n api = load_api()\n query = \"%20\"\n language = \"en\"\n # Calling the user_timeline function with our parameters\n tweetSet = set([0])\n old_id = \"\"\n\n results = api.search(q=query, lang=language, geocode=\"39.8,-95.583068847656,2500km\", count=\"1\")\n #print(len(results))\n count = 0\n temp = 0\n\n wd = BACKEND_DIR+'/outs/'\n\n for tweet in results:\n old_id = tweet.id_str\n with open(wd+\"valid.test\", mode=\"w\", encoding=\"utf-8\") as file:\n\n for x in range(15):\n results = api.search(q=query, lang=language, geocode=geo_code+\",200km\", since_id=old_id, count=\"100\")\n for tweet in results:\n count += 1\n if tweet.id_str not in tweetSet:\n if temp != 0:\n file.write(\"\\n\")\n file.write(tweet.id_str + \"\\t\" + strip_emoji(tweet.text).strip().replace(\"\\n\",\"\") + \"\\t\"+\"TEST\")\n temp += 1\n tweetSet.add(int(tweet.id_str))\n if len(tweetSet) > 50:\n break\n time.sleep(1)\n #print(len(tweetSet))\n old_id = max(tweetSet)\n #print(len(tweetSet))\n classify()\n #subprocess.call(['/Users/varunsharma/Downloads/svm_multiclass/svm_multiclass_classify', '/Users/varunsharma/Downloads/TTDS DATA/feats_valid.test', '/Users/varunsharma/Downloads/svm_multiclass/model', '/Users/varunsharma/Downloads/svm_multiclass/pred3.out'])\n #subprocess.call([BACKEND_DIR+'/svm_multiclass/svm_multiclass_classify', BACKEND_DIR+'/outs/feats_valid.test', BACKEND_DIR+'/svm_multiclass/model', BACKEND_DIR+'/svm_multiclass/pred3.out'])\n subprocess.call([BACKEND_DIR+'/svm_multiclass_linux64/svm_multiclass_classify', BACKEND_DIR+'/outs/feats_valid.test', BACKEND_DIR+'/svm_multiclass_linux64/model', BACKEND_DIR+'/svm_multiclass_linux64/pred3.out'])\n result_dict = readTopics()\n return result_dict\n\ndef readTopics():\n #pwd = BACKEND_DIR+\"/svm_multiclass/\"\n pwd = BACKEND_DIR+\"/svm_multiclass_linux64/\"\n pred_filepath = pwd + \"pred3.out\"\n df_p = pd.read_table(pred_filepath, header=None, sep=\" \", usecols=[0])\n\n freq_ordered = df_p[0].value_counts()\n top5 = freq_ordered.head(5)\n total = top5.sum()\n top5df = top5.to_frame()\n\n top5_percent = ((top5df[0]/total)*100).to_frame()\n top5_dict = top5_percent.to_dict()[0]\n return top5_dict\ndef hello(strr):\n return str('Hello '+strr)\n","sub_path":"tweetsbycat/RetrieveTweets.py","file_name":"RetrieveTweets.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"348704516","text":"#\n# @lc app=leetcode id=8 lang=python3\n#\n# [8] String to Integer (atoi)\n#\nclass Solution:\n def myAtoi(self, str: str) -> int:\n if len(str)==0:return 0\n data = str.strip()\n if not data:return 0\n temp = [\"-\",\"+\",\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\n if data[0] not in temp:\n return 0\n sign = 1\n if data[0] == \"-\":\n data = data[1:]\n sign = -1\n elif data[0] == \"+\":\n data = data[1:]\n sign = 1\n res = 0\n i = 0\n while i < len(data) and data[i].isdigit():\n res = res * 10 + ord(data[i]) - ord(\"0\")\n i += 1\n return max(-2**31,min(sign*res,2**31-1))\n \n\n","sub_path":"8.string-to-integer-atoi.py","file_name":"8.string-to-integer-atoi.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"153103796","text":"import unittest\nfrom unittest.mock import patch\n\nfrom .carta import Carta\nfrom .mazo import Mazo\nfrom . import ESPADA, BASTO, ORO, COPA\nfrom .player import Player\nfrom .game import Game\n\n\nclass TestCartas (unittest.TestCase):\n # CREACIONES\n def test_si_se_crea_carta(self):\n carta1 = Carta(ESPADA, 1)\n self.assertIsInstance(carta1, Carta)\n # POSICIONES\n\n def test_obtener_posicion_cero(self):\n cartaMacho = Carta(ESPADA, 1)\n self.assertEqual(cartaMacho.get_position(), 0)\n\n def test_obtener_posicion_uno(self):\n cartaMacho = Carta(BASTO, 1)\n self.assertEqual(cartaMacho.get_position(), 1)\n\n def test_obtener_posicion_trece(self):\n cartaMacho = Carta(ESPADA, 4)\n self.assertEqual(cartaMacho.get_position(), 13)\n\n # COMPARACIONES\n def test_comparar_as_espadas_con_as_bastos(self):\n carta1 = Carta(ESPADA, 1)\n carta2 = Carta(BASTO, 1)\n result = carta1.compare_with(carta2)\n self.assertEqual(result, 'GREATER')\n\n def test_comparar_4_de_espadas_con_4_de_bastos(self):\n carta1 = Carta(ESPADA, 4)\n carta2 = Carta(BASTO, 4)\n result = carta1.compare_with(carta2)\n self.assertEqual(result, 'EQUAL')\n\n def test_comparar_7_de_espadas_con_7_de_bastos(self):\n carta1 = Carta(ESPADA, 7)\n carta2 = Carta(BASTO, 7)\n result = carta1.compare_with(carta2)\n self.assertEqual(result, 'GREATER')\n\n def test_comparar_5_de_espadas_con_2_de_bastos(self):\n carta1 = Carta(ESPADA, 5)\n carta2 = Carta(BASTO, 2)\n result = carta1.compare_with(carta2)\n self.assertEqual(result, 'LOWER')\n\n\nclass TestMazo(unittest.TestCase):\n def test_repartir_cartas_uno(self):\n mazo = Mazo()\n result = mazo.get_card()\n self.assertIsInstance(result, Carta)\n\n def test_repartir_dos_cartas_distintas(self):\n mazo = Mazo()\n result1 = mazo.get_card()\n result2 = mazo.get_card()\n self.assertNotEqual(result1, result2)\n\n def test_contar_cartas_del_mazo_repartiendo_dos(self):\n mazo = Mazo()\n mazo.get_card()\n mazo.get_card()\n self.assertEqual(len(mazo.mazo), 38)\n\n @unittest.mock.patch('random.randint')\n def test_verificar_cartas_sacadas_2(self, mock_rand_int):\n mock_rand_int.return_value = 0\n mazo = Mazo()\n mazo.get_card()\n result2 = mazo.get_card()\n cartaCorrecta = Carta('basto', 1)\n self.assertEqual(cartaCorrecta.suit, result2.suit)\n self.assertEqual(cartaCorrecta.number, result2.number)\n\n @unittest.mock.patch('random.randint')\n def test_verificar_cartas_sacadas_4(self, mock_rand_int):\n mock_rand_int.return_value = 0\n mazo = Mazo()\n result2 = mazo.get_card()\n result2 = mazo.get_card()\n result2 = mazo.get_card()\n result2 = mazo.get_card()\n cartaCorrecta = Carta('oro', 7)\n self.assertEqual(cartaCorrecta.suit, result2.suit)\n self.assertEqual(cartaCorrecta.number, result2.number)\n\n @unittest.mock.patch('random.randint')\n def test_verificar_cartas_sacadas_2_de_a_4(self, mock_rand_int):\n mock_rand_int.return_value = 4\n mazo = Mazo()\n result2 = mazo.get_card()\n result2 = mazo.get_card()\n cartaCorrecta = Carta('basto', 3)\n self.assertEqual(cartaCorrecta.number, result2.number)\n self.assertEqual(cartaCorrecta.suit, result2.suit)\n\n def test_states(self):\n player01 = Player('1')\n player02 = Player('2')\n deck = Mazo()\n game = Game([player01, player02], deck)\n game.deal()\n self.assertEqual(game.get_state(), [0, None, None, None, None])\n\n\nclass TestGame(unittest.TestCase):\n player01 = Player('1')\n player02 = Player('2')\n\n def test_deal_cards(self):\n # setup\n deck = Mazo()\n game = Game([self.player01, self.player02], deck)\n # test\n game.deal()\n # assert\n self.assertEqual(len(self.player01.hiddenCards), 3)\n self.assertEqual(len(self.player02.hiddenCards), 3)\n\n def test_reset_hand(self):\n deck = Mazo()\n game = Game([self.player01, self.player02], deck)\n game.deal()\n self.player01.play_card(1)\n self.player01.reset_hand()\n self.assertEqual(len(self.player01.hiddenCards), 0)\n self.assertEqual(len(self.player01.playedCards), 0)\n\n def test_change_hand(self):\n deck = Mazo()\n game = Game([self.player01, self.player02], deck)\n game.deal()\n self.player01.play_card(0)\n self.player02.play_card(0)\n game.deal()\n self.assertFalse(self.player01.is_hand)\n self.assertTrue(self.player02.is_hand)\n\n def test_play_first_card_p1(self):\n del self.player01.hiddenCards[:]\n del self.player01.playedCards[:]\n carta_0 = Carta(ESPADA, 3)\n carta_1 = Carta(BASTO, 7)\n carta_2 = Carta(ESPADA, 5)\n self.player01.hiddenCards = [carta_0, carta_1, carta_2]\n # test\n self.player01.play_card(1)\n # assert\n self.assertEqual(\n self.player01.hiddenCards,\n [carta_0, carta_2],\n )\n self.assertEqual(\n self.player01.playedCards,\n [carta_1]\n )\n\n def test_who_is_next_greater_p1(self):\n deck = Mazo()\n game = Game([self.player01, self.player02], deck)\n game.deal()\n self.player01.hiddenCards = []\n carta_0 = Carta(ESPADA, 3)\n carta_1 = Carta(BASTO, 7)\n carta_2 = Carta(ESPADA, 5)\n self.player01.hiddenCards = [carta_0, carta_1, carta_2]\n self.player01.play_card(1)\n carta_3 = Carta(ORO, 2)\n carta_4 = Carta(BASTO, 4)\n carta_5 = Carta(COPA, 5)\n self.player02.hiddenCards = []\n self.player02.hiddenCards = [carta_3, carta_4, carta_5]\n self.player02.play_card(1)\n result = game.who_is_next()\n self.assertEqual(result, \"PLAYER1\")\n\n def test_who_is_next_same_card_p2(self):\n deck = Mazo()\n self.player01 = Player('1')\n self.player02 = Player('2')\n game = Game([self.player01, self.player02], deck)\n\n game.deal()\n game.deal()\n self.player01.reset_hand()\n self.player02.reset_hand()\n\n carta_0 = Carta(ESPADA, 3)\n carta_1 = Carta(BASTO, 7)\n carta_2 = Carta(ESPADA, 5)\n self.player01.hiddenCards = [carta_0, carta_1, carta_2]\n self.player01.play_card(0)\n\n carta_3 = Carta(ORO, 3)\n carta_4 = Carta(BASTO, 4)\n carta_5 = Carta(COPA, 5)\n self.player02.hiddenCards = [carta_3, carta_4, carta_5]\n self.player02.play_card(0)\n\n result = game.who_is_next()\n self.assertEqual(result, \"PLAYER2\")\n\n\nclass test_cantos(unittest.TestCase):\n player01 = Player('1')\n player02 = Player('2')\n\n def test_envido_player_01(self):\n deck = Mazo()\n game = Game([self.player01, self.player02], deck)\n game.deal()\n resultado = game.cantos_envido(0, \"Envido\")\n self.assertEqual(resultado, [\"1\", \"Envido\"])\n\n def test_envido_player_02(self):\n deck = Mazo()\n game = Game([self.player01, self.player02], deck)\n game.deal()\n resultado = game.cantos_envido(1, \"Envido\")\n self.assertEqual(resultado, None)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"truco/test_cartas.py","file_name":"test_cartas.py","file_ext":"py","file_size_in_byte":7414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"538938737","text":"from user_test.settings import *\n\nDEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\nROOT_URLCONF = 'user_test.urls'\n\nSECRET_KEY = os.environ.get('SECRET_KEY')\n","sub_path":"src/user_test/settings_tests.py","file_name":"settings_tests.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"617931372","text":"class DataReturn:\n f = []\n b = []\n Tb = []\n header = {}\n\n def __repr__(self):\n s = ''\n for i, b in enumerate(self.b):\n bstr = 'b = {}:'.format(b)\n s += bstr + '\\n f = '\n f = ['{:6.1f}'.format(x) for x in self.f]\n s += ' '.join(f) + ' GHz\\nTb = '\n T = ['{:6.1f}'.format(x) for x in self.Tb[i]]\n s += ' '.join(T) + ' K\\n'\n return s\n","sub_path":"data_handling.py","file_name":"data_handling.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"161626124","text":"from .models import ORMContact\nfrom .entities import Contact\n\nclass ContactRepository:\n def _decode_db_contact(self, db_contact):\n return Contact(id=db_contact.id,phone_number=db_contact.phone_number, name=db_contact.name, email=db_contact.email, photo=db_contact.photo)\n \n def get_all_contact(self):\n all_contacts = ORMContact.objects.values().order_by('-id')\n contacts = []\n for all_contact in all_contacts:\n contacts.append(all_contact)\n return contacts \n\n\n def get_contact_by_id(self, id):\n try:\n db_contact = ORMContact.objects.get(id=id)\n return self._decode_db_contact(db_contact)\n except ORMContact.DoesNotExist:\n return \"No data valid\"\n\n def crate_new_contact(self, contact):\n db_contact = ORMContact.objects.create(phone_number=contact.phone_number, name=contact.name, email=contact.email, photo=contact.photo)\n return self._decode_db_contact(db_contact)\n\n def update_contact(self, contact):\n orm_contact = ORMContact.objects.get(id=contact.id)\n orm_contact.phone_number = contact.phone_number\n orm_contact.name = contact.name\n orm_contact.email = contact.email\n orm_contact.photo = contact.photo\n orm_contact.save()\n return self._decode_db_contact(orm_contact)\n\n def delete_contact(self, id):\n orm_contact = ORMContact.objects.get(id=id)\n orm_contact.delete()\n return self._decode_db_contact(orm_contact)","sub_path":"kredivo/contact/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"543607521","text":"# from keras.applications import InceptionV3\nfrom keras.applications.vgg16 import VGG16\n\n# REFERENCES:\n\n# VGG16\n# https://arxiv.org/abs/1409.1556\n\nfrom keras.optimizers import Adam, SGD\n\n# Keras imports\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, Activation, GlobalAveragePooling2D\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\n# from keras.callbacks import EarlyStopping\n\n# import glob\nimport json\n\nfrom loader_bot_omega import LoaderBot # dynamic full image augmentation\n# from loader_bot import LoaderBot\n\nimport time\nfrom splitter import get_skfold_data\n\n# import pandas as pd\n# import numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\n\n# https://github.com/DeepLearningSandbox/DeepLearningSandbox/blob/master/transfer_learning/fine-tune.py\n\ndef setup_to_transfer_learn(model, base_model, optimizer):\n \"\"\"Freeze all layers and compile the model\"\"\"\n\n for layer in base_model.layers:\n layer.trainable = False\n\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n\n\ndef add_brian_layers(base_model, num_classes, dropout=0.5):\n \"\"\"Add last layer to the convnet\n Args:\n base_model: keras model excluding top\n nb_classes: # of classes\n Returns:\n new keras model with last layer\n \"\"\"\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(1024, activation='relu', kernel_initializer='he_normal')(x) #new FC layer, random init\n # x = Dense(1024, activation='relu')(x)\n x = Dropout(dropout)(x)\n\n x = Dense(512, activation='relu', kernel_initializer='he_normal')(x) #new FC layer, random init\n # x = Dense(512, activation='relu')(x) #new FC layer, random init\n x = Dropout(dropout)(x)\n\n x = Dense(256, activation='relu', kernel_initializer='he_normal')(x) #new FC layer, random init\n # x = Dense(256, activation='relu')(x) #new FC layer, random init\n x = Dropout(dropout)(x)\n\n predictions = Dense(num_classes, activation='softmax')(x) #new softmax layer\n\n model = Model(inputs=base_model.input, outputs=predictions)\n\n return model\n\n\ndef add_double_brian_layers(base_model, num_classes, dropout=0.5):\n \"\"\"Add last layer to the convnet\n Args:\n base_model: keras model excluding top\n nb_classes: # of classes\n Returns:\n new keras model with last layer\n \"\"\"\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(2048, activation='relu', kernel_initializer='he_normal')(x) #new FC layer, random init\n # x = Dense(1024, activation='relu')(x)\n x = Dropout(dropout)(x)\n\n x = Dense(1024, activation='relu', kernel_initializer='he_normal')(x) #new FC layer, random init\n # x = Dense(512, activation='relu')(x) #new FC layer, random init\n x = Dropout(dropout)(x)\n\n x = Dense(512, activation='relu', kernel_initializer='he_normal')(x) #new FC layer, random init\n # x = Dense(256, activation='relu')(x) #new FC layer, random init\n x = Dropout(dropout)(x)\n\n predictions = Dense(num_classes, activation='softmax')(x) #new softmax layer\n\n model = Model(inputs=base_model.input, outputs=predictions)\n\n return model\n\n\ndef setup_to_finetune(model, freeze, optimizer):\n \"\"\"Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers.\n note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the inceptionv3 arch\n Args:\n model: keras model\n \"\"\"\n for layer in model.layers[:freeze]:\n layer.trainable = False\n\n for layer in model.layers[freeze:]:\n layer.trainable = True\n\n # adam = Adam(lr=lr)\n # sgd = SGD(lr=lr, momentum=0.9)\n\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n\n\ndef plot_hist(history, info_str, epochs=2, augmentation=1, sprint=False):\n '''\n Make a plot of the rate of error as well as the accuracy of the model\n during training. Also include a line at error 0.20 which was the original\n minimum acceptable error (self imposed) to submit results to the test\n set when doing 3-way split.\n Even after performance regularly exceeded the minimum requirement the line\n was unchanged so that all of the graphs would be relative to each other.\n Also it was still useful to see how a model's error was performing relative\n to this baseline.\n Also, the 2 charts written as a png had the filename coded to include\n hyperparameters that were used in the model when the chart was created.\n This allowed a simple visual evaluation of a model's performance when\n doing randomized hyperparameter search. If a model appeared to be high\n performing then the values could be reused in order to attempt to\n replicate the result.\n '''\n fig, axs = plt.subplots(1, 2, figsize=(16, 8))\n\n fig.suptitle(\"\", fontsize=12, fontweight='normal')\n\n # stuff for marking the major and minor ticks dynamically relative\n # to the numper of epochs used to train\n major_ticks = int(epochs / 10.0)\n minor_ticks = int(epochs / 20.0)\n\n title_text = \"Homewares and Furniture Image Identification\\n Train Set and Dev Set\"\n ACC = 0.817 # record accuracy\n if sprint is True:\n ACC = 0.740\n title_text = \"SPRINT: Homewares and Furniture Image Identification\\n Train Set and Dev Set\"\n\n if major_ticks < 2:\n major_ticks = 2\n\n if minor_ticks < 1:\n minor_ticks = 1\n\n majorLocator = MultipleLocator(major_ticks)\n majorFormatter = FormatStrFormatter('%d')\n minorLocator = MultipleLocator(minor_ticks)\n\n # correct x axis\n history['loss'] = [0.0] + history['loss']\n history['val_loss'] = [0.0] + history['val_loss']\n history['acc'] = [0.0] + history['acc']\n history['val_acc'] = [0.0] + history['val_acc']\n\n x_line = [ACC] * (epochs + 1) # this line is now for accuracy of test set\n\n # stuff for the loss chart\n axs[0].set_title(title_text)\n\n if augmentation > 1:\n axs[0].set_xlabel('Epochs\\nAugmentation of {:3d}'.format(augmentation))\n else:\n axs[0].set_xlabel('Epochs')\n\n axs[0].set_xlim(1, epochs)\n axs[0].set_ylabel('Loss')\n# axs[0].set_ylim(0, 15)\n\n axs[0].plot(history['loss'], color=\"blue\", linestyle=\"--\", alpha=0.8, lw=1.0)\n axs[0].plot(history['val_loss'], color=\"blue\", alpha=0.8, lw=1.0)\n axs[0].legend(['Training', 'Validation'])\n axs[0].xaxis.set_major_locator(majorLocator)\n axs[0].xaxis.set_major_formatter(majorFormatter)\n\n # for the minor ticks, use no labels; default NullFormatter\n axs[0].xaxis.set_minor_locator(minorLocator)\n\n # stuff for the accuracy chart\n axs[1].set_title(title_text)\n\n if augmentation > 1:\n axs[0].set_xlabel('Epochs\\nAugmentation of {:3d}'.format(augmentation))\n else:\n axs[0].set_xlabel('Epochs')\n\n axs[1].set_xlim(1, epochs)\n axs[1].set_ylabel('Accuracy')\n axs[1].set_ylim(0.0, 1.0)\n axs[1].plot(x_line, color=\"red\", alpha=0.3, lw=4.0)\n axs[1].plot(history['acc'], color=\"blue\", linestyle=\"--\", alpha=0.5, lw=1.0)\n axs[1].plot(history['val_acc'], color=\"blue\", alpha=0.8, lw=1.0)\n axs[1].plot(x_line, color=\"red\", linestyle=\"--\", alpha=0.8, lw=1.0)\n axs[1].legend(['Record Accuracy ({:1.2f})'.format(ACC), 'Training', 'Validation'], loc='lower right')\n axs[1].xaxis.set_major_locator(majorLocator)\n axs[1].xaxis.set_major_formatter(majorFormatter)\n\n # for the minor ticks, use no labels; default NullFormatter\n axs[1].xaxis.set_minor_locator(minorLocator)\n\n plt.savefig(\"../imgs/\" + info_str, facecolor='w', edgecolor='w', transparent=False)\n # plt.show()\n\n\ndef run():\n # data_link_dict = get_skfold_data(path=\"../data/imgs/*.jpg\")\n start_time = time.time()\n\n # decommisioned because inflight data augmentation solves a lot of these\n # problems\n\n # Use json to load the permanent dictionary that has been Created\n with open(\"../data/data_splits.json\") as infile:\n data_link_dict = json.load(infile)\n\n EPOCHS = 10\n AUGMENTATION = 1 # could do 3 epochs of 10 augmentation or 30 of 1 which\n # provides more data for plots to work with\n\n DO = 0.55 # drop out\n\n # for Adam inital LR of 0.0001 is a good starting point\n # for SGD initial LR of 0.001 is a good starting point\n LR = 0.00025\n DECAY = 0.5e-6\n OPTIMIZER = Adam(lr=LR, decay=DECAY)\n # OPTIMIZER = SGD(lr=LR, momentum=0.9, nesterov=True)\n\n # NB_IV3_LAYERS_TO_FREEZE = 172\n NB_IV3_LAYERS_TO_FREEZE = 18\n MODEL_ID = 'v2_3g'\n\n plot_file = \"model_{:}.png\".format(MODEL_ID)\n weights_file = \"weights/model_{:}_weights.h5\".format(MODEL_ID)\n history_file = \"histories/history_{:}.json\".format(MODEL_ID)\n\n # user parameters for LoaderBot v1.0\n # Parameters for Generators\n params = {'dim': (299, 299),\n 'batch_size': 64,\n 'n_classes': 128,\n 'n_channels': 3,\n 'shuffle': False}\n\n # These parameters are for LoaderBot v2.0\n # Parameters for Generators\n params = {'dim': (224, 224),\n 'batch_size': 64,\n 'n_classes': 128,\n 'n_channels': 3,\n 'augmentation': AUGMENTATION,\n 'shuffle': True}\n\n # Parameters for Generators\n test_params = {'dim': (224, 224),\n 'batch_size': 64,\n 'n_classes': 128,\n 'n_channels': 3,\n 'augmentation': 1,\n 'augment': False,\n 'shuffle': True}\n\n # Datasets\n X_train_img_paths = data_link_dict[\"X_test_2\"]\n y_train = data_link_dict[\"y_test_2\"]\n\n X_test_img_paths = data_link_dict[\"X_test_3\"]\n y_test = data_link_dict[\"y_test_3\"]\n\n # Generators\n training_generator = LoaderBot(X_train_img_paths, y_train, **params)\n validation_generator = LoaderBot(X_test_img_paths, y_test, **test_params)\n\n # setup model\n # base_model = InceptionV3(weights='imagenet', include_top=False) #include_top=False excludes final FC layer\n base_model = VGG16(include_top=False, weights='imagenet')\n\n # seems like in Keras not including the top will exclude the FC layers at the\n # top, not just the softmax categories\n # # try to pop some layers to get to the top 'maxpool' then rebuild from there\n # base_model.pop()\n # base_model.pop()\n # base_model.pop()\n #\n # base_model.summary()\n\n model = add_double_brian_layers(base_model, 128, DO)\n\n # mini-train 1, like normal\n # transfer learning\n setup_to_transfer_learn(model, base_model, OPTIMIZER)\n\n # model.summary()\n\n print(\"model layers:\", model.layers)\n print(\"len model layers:\", len(model.layers))\n\n # Run model\n history_t1 = model.fit_generator(generator=training_generator,\n validation_data=validation_generator,\n epochs=EPOCHS,\n use_multiprocessing=False)\n\n # mini-train 2\n OPTIMIZER = Adam(lr=LR / 2.0, decay=DECAY)\n # try to fine tune some of the InceptionV3 layers also\n setup_to_finetune(model, NB_IV3_LAYERS_TO_FREEZE - 2, OPTIMIZER)\n\n print(\"\\n\\n Starting epoch {:}\\n\\n\".format(EPOCHS + 1))\n\n # Run model\n history_t2 = model.fit_generator(generator=training_generator,\n validation_data=validation_generator,\n epochs=EPOCHS,\n use_multiprocessing=False)\n\n # mini-train 3\n OPTIMIZER = Adam(lr=LR / 4.0, decay=DECAY)\n # try to fine tune some of the InceptionV3 layers also\n setup_to_finetune(model, NB_IV3_LAYERS_TO_FREEZE - 4, OPTIMIZER)\n\n print(\"\\n\\n Starting epoch {:}\\n\\n\".format(EPOCHS * 2 + 1))\n\n # Run model\n history_t3 = model.fit_generator(generator=training_generator,\n validation_data=validation_generator,\n epochs=EPOCHS,\n use_multiprocessing=False)\n\n # mini-train 4\n OPTIMIZER = Adam(lr=LR / 8.0, decay=DECAY)\n # try to fine tune some of the InceptionV3 layers also\n setup_to_finetune(model, NB_IV3_LAYERS_TO_FREEZE - 6, OPTIMIZER)\n\n print(\"\\n\\n Starting epoch {:}\\n\\n\".format(EPOCHS * 3 + 1))\n\n # Run model\n history_t4 = model.fit_generator(generator=training_generator,\n validation_data=validation_generator,\n epochs=EPOCHS,\n use_multiprocessing=False)\n\n # save the weights in case we want to predict on them later\n model.save(weights_file)\n\n history_tl = history_t1.history\n history_tl[\"acc\"] += history_t2.history[\"acc\"]\n history_tl[\"val_acc\"] += history_t2.history[\"val_acc\"]\n history_tl[\"loss\"] += history_t2.history[\"loss\"]\n history_tl[\"val_loss\"] += history_t2.history[\"val_loss\"]\n #\n history_tl = history_t1.history\n history_tl[\"acc\"] += history_t3.history[\"acc\"]\n history_tl[\"val_acc\"] += history_t3.history[\"val_acc\"]\n history_tl[\"loss\"] += history_t3.history[\"loss\"]\n history_tl[\"val_loss\"] += history_t3.history[\"val_loss\"]\n\n history_tl = history_t1.history\n history_tl[\"acc\"] += history_t4.history[\"acc\"]\n history_tl[\"val_acc\"] += history_t4.history[\"val_acc\"]\n history_tl[\"loss\"] += history_t4.history[\"loss\"]\n history_tl[\"val_loss\"] += history_t4.history[\"val_loss\"]\n\n plot_hist(history_tl, plot_file, epochs=len(history_tl[\"acc\"]), sprint=True)\n\n # try to save the history so models can be more easily compared and Also\n # to better log results if going back is needed\n with open(history_file, \"w\") as outfile:\n json.dump(history_tl, outfile)\n\n print(\"\\n\\n\\n\\nCompleted in {:6.2f} hrs\".format(((time.time() - start_time)) / 3600)) # convert to hours\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"src/model_duck_2_3.py","file_name":"model_duck_2_3.py","file_ext":"py","file_size_in_byte":14020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"209263217","text":"from os import listdir, getcwd, chdir, makedirs\nfrom os.path import isfile, join, exists\nimport time\nfrom datetime import datetime\nimport json\nimport requests\nimport internetarchive\nfrom internetarchive import get_session\n\n# determine the file type to be downloaded. Add more as needed\nfile_type = [\n\t'_djvu.txt',\n\t'.gif',\n\t'.pdf'\n\t]\n\n# Internet Archive API access tokens\nIA_access = {'s3': {'access': '', 'secret': ''}}\n\n\ndef search_IA():\n\t# set the working directory \n\twork_dir = getcwd()\n\tchdir(work_dir)\n\t#authenticate to Internet Archives\n\ts = get_session(config=IA_access)\n\ts.access_key\n\t#search IA/heresies_magazine collection\n\tsearch = internetarchive.search_items('collection:heresies_magazine')\n\tprint ('As of %s there are %s items in heresies_magazine collection of Internet Archives' % (datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), len(search)))\n\t# iterate over the results\n\tfor i, result in enumerate(search):\n\t # set itemid as identifier (id in Internet Archive)\n\t itemid = result['identifier']\n\t print ('file: %s' %(itemid))\n\t # get all file types for the itemid\n\t for filetype in file_type:\n\t \t# removing extra characters for naming. If you are downloading similar file type (e.g. .pdf and .pdf_meta.txt) this will make sure to store them in different folders \n\t \tft = filetype.replace(\"djvu\", \"\").replace(\".\", \"\").replace(\"_\", \"\")\n\t \tprint ('getting %s files' %(ft))\n\t \tfolder = 'files/%s' %(ft)\n\t \tif not exists(folder):\n\t \t\tmakedirs(folder)\n\t \tchdir(folder)\n\t \tfilename = itemid + filetype\n\t \tif isfile(filename):\n\t \t\tprint ('%s of type %s is in folder -- skipping download' %(itemid, filetype))\n\t \telse:\n\t \t\titem = internetarchive.get_item(itemid)\n\t \t\tfile = item.get_file(itemid + filetype)\n\t \t\ttry:\n\t \t\t\tfile.download()\n\t \t\texcept:\n\t \t\t\tPrintException()\n\t \tchdir(work_dir)\n\ndef PrintException():\n\texc_type, exc_obj, tb = sys.exc_info()\n\tf = tb.tb_frame\n\tlineno = tb.tb_lineno\n\tfilename = f.f_code.co_filename\n\tlinecache.checkcache(filename)\n\tline = linecache.getline(filename, lineno, f.f_globals)\n\tprint(\"EXCEPTION IN (%s, LINE %s '%s'): %s\" % (filename, lineno, line.strip(), exc_obj))\n\n\n\nif __name__ == \"__main__\":\n\tsearch_IA()\n","sub_path":"scripts/ia/Heresies.py","file_name":"Heresies.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"511928150","text":"__author__=\"KOLANICH\"\n__license__=\"Unlicense\"\n__copyright__=\"\"\"\nThis is free and unencumbered software released into the public domain.\nAnyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means.\nIn jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\nFor more information, please refer to \n\"\"\"\n\n\nimport sys, os, re\nfrom pathlib import Path\nfrom _io import _IOBase, BytesIO\nfrom collections import defaultdict, OrderedDict\nfrom codecs import decode\nimport binascii\nfrom mifare_classic import MifareClassic, KaitaiStream\n\nsectorRx=re.compile(\"\\+Sector:\\s(\\d+)\")\n\nMETA_SIZE=16\n\nclass Sector():\n\tdef __init__(self, contents, meta):\n\t\tself.contents=contents\n\t\tself.meta=meta\n\t\tself._trailer=None\n\t\tself._values=None\n\tdef __repr__(self):\n\t\treturn self.__class__.__name__+\"(\"+repr(self.contents)+\", \"+repr(self.meta)+\")\"\n\t\n\t@property\n\tdef trailer(self):\n\t\tif not self._trailer:\n\t\t\tself._trailer=MifareClassic.Trailer(KaitaiStream(BytesIO(decode(self.meta.replace(\"-\", \"0\"), \"hex\"))), _root=MifareClassic)\n\t\treturn self._trailer\n\t\n\t@property\n\tdef values(self):\n\t\tvs=MifareClassic.Sector.Values(KaitaiStream(BytesIO(self.contents)), _root=MifareClassic)\n\t\tif not self._values:\n\t\t\tself._values=vs.values\n\t\treturn self._values\n\nclass DumpParser():\n\tdef __init__(self, dump):\n\t\tself.dump=dump\n\t\tself.contents=bytearray()\n\t\tself.onNewSector(None)\n\tdef onNewSector(self, sectorNo):\n\t\tif self.contents:\n\t\t\tif not self.meta:\n\t\t\t\t(self.contents, self.meta)=(self.contents[:-META_SIZE], self.contents[-META_SIZE:])\n\t\t\tself.dump.sectors[self.sectorNo]=Sector(self.contents, self.meta)\n\t\tself.sectorNo=sectorNo\n\t\tself.contents=bytearray()\n\t\tself.meta=\"\"\n\n\tdef processLine(self, line):\n\t\tline=line.strip()\n\t\tm=sectorRx.search(line)\n\t\t#print(line, m)\n\t\tif m:\n\t\t\tself.onNewSector(int(m.group(1)))\n\t\telse:\n\t\t\ttry:\n\t\t\t\tself.contents+=decode(line, \"hex\")\n\t\t\texcept binascii.Error as e:\n\t\t\t\tself.meta=line\n\t\t\t\t\n\t\t\t\t\n\tdef finish(self):\n\t\tself.onNewSector(None)\n\t\n\tdef __enter__(self):\n\t\tpass\n\t\n\tdef __exit__(self, *args, **kwargs):\n\t\tself.finish()\n\nclass Dump():\n\tdef __init__(self, data):\n\t\tif isinstance(data, _IOBase):\n\t\t\tself.read(data)\n\t\t\n\t\telif isinstance(data, str):\n\t\t\ttry:\n\t\t\t\tdata=Path(data)\n\t\t\texcept:\n\t\t\t\tself.read(data.splitlines())\n\t\t\t\treturn\n\t\t\tself.__class__.__init__(self, data)\n\t\telif isinstance(data, Path):\n\t\t\twith data.open(\"rt\", encoding=\"utf-8\") as f:\n\t\t\t\tself.__class__.__init__(self, f)\n\t\n\tdef read(self, lines):\n\t\tself.sectors={}\n\t\tp=DumpParser(self)\n\t\tfor line in lines:\n\t\t\tp.processLine(line)\n\t\tp.finish()\n\t\tdel(p)\n","sub_path":"MFCTool/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"505242309","text":"class Node(object):\n '''节点'''\n def __init__(self,value,id=None) -> None:\n self.value = value\n self.next = id\n\nclass SingleLink(object):\n '''单链表'''\n def __init__(self,node=None) -> None:\n '''定义头节点'''\n self._head = node\n def is_empty(self) -> bool:\n '''判断是否为空'''\n return self._head==None\n def length(self) -> int:\n '''判断链表长度'''\n res = 0\n cur = self._head\n while cur != None:\n res+=1\n cur = cur.next\n return res\n def travel(self) -> None:\n '''遍历链表'''\n cur = self._head\n while cur != None:\n print(str(cur.value)+\" \")\n cur = cur.next\n def add(self,value) -> None:\n '''链表头部增加节点'''\n cur = Node(value,self._head)\n self._head = cur\n def append(self,value) -> None:\n '''链表尾部增加节点'''\n cur = Node(value)\n if self.is_empty():\n self._head = cur\n else:\n end = self._head\n while end.next != None:\n end = end.next\n end.next = cur\n def insert(self,pos,value) -> None:\n '''在pos位置插入节点'''\n if pos <= 0:\n self.add(value)\n elif pos > self.length()-1:\n self.append(value)\n else:\n newNode = Node(value)\n cur = self._head\n for i in range(1,pos):\n cur = cur.next\n newNode.next = cur.next\n cur.next = newNode\n def remove(self,value) -> bool:\n '''移除值为value的节点'''\n cur = self._head\n if(cur.value == value):\n self._head = cur.next\n return True\n pre = self._head\n cur = cur.next\n while cur.value != value:\n cur = cur.next\n pre = pre.next\n if cur.value == None:return False\n pre.next = cur.next\n return True\n def search(self,value) -> bool:\n '''遍历链表'''\n cur = self._head\n while cur != None:\n if cur.value == value:\n return True\n return False\n","sub_path":"All Code/Link.py","file_name":"Link.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"179808524","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2019-7-13 10:30\r\n# @Author : cunyu\r\n# @Site : cunyu1943.github.io\r\n# @File : wordFreqSP.py\r\n# @Software: PyCharm\r\n\r\n\r\n\"\"\"\r\n时评词频统计\r\n\"\"\"\r\n\r\nimport xlwt\r\nimport os, codecs\r\nimport jieba\r\nfrom collections import Counter\r\n\r\n\r\n# 创建停用词list\r\ndef stopwordslist(filepath):\r\n\tstopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]\r\n\treturn stopwords\r\n\r\n\r\ndef getAllWords(filepath, txt):\r\n\tseg_list = jieba.cut(txt.strip())\r\n\tc = Counter()\r\n\tfor x in seg_list:\r\n\t\tif x not in stopwordslist(filepath):\r\n\t\t\tif len(x) > 1 and x != '\\r\\n' and x != ' ' and x != '\\t':\r\n\t\t\t\tc[x] += 1\r\n\twordFreqList = []\r\n\tfor item in c.most_common():\r\n\t\twordFreqList.append(item)\r\n\r\n\ttotalWords = 0\r\n\tfor i in range(len(wordFreqList)):\r\n\t\ttotalWords += wordFreqList[i][-1]\r\n\treturn wordFreqList, totalWords\r\n\r\n\r\nif __name__ == '__main__':\r\n\twith open('Sina博客_时评.txt', 'r', encoding='utf-8') as readFile, open('时评词频统计.txt', 'a',\r\n\t encoding='utf-8') as writeFile:\r\n\t\ttxt = readFile.read()\r\n\t\twriteFile.write('词' + '\\t\\t\\t' + '词频' + '\\t\\t\\t' + '累加频率')\r\n\t\tresult = getAllWords('stopword.txt', txt)\r\n\t\ttmpFreq = 0.0\r\n\t\tfor i in range(200):\r\n\t\t\tprint(result[0][i][0], str(result[0][i][-1]),str(result[0][i][-1] / result[-1]))\r\n\t\t\ttmpFreq += result[0][i][-1] / result[-1]\r\n\t\t\twriteFile.write(result[0][i][0] + '\\t\\t' + str(result[0][i][-1]) + '\\t\\t' + str(tmpFreq))\r\n","sub_path":"SinaBlog-master/frequency/wordFreqSP.py","file_name":"wordFreqSP.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"581987849","text":"from unittest import TestCase\nfrom unittest.mock import MagicMock\n\nfrom inventory_management.inventoryClass import inventory\nfrom inventory_management.furnitureClass import furniture\nfrom inventory_management.electricAppliancesClass import electricAppliances\nfrom inventory_management.market_prices import get_latest_price\n\nclass ModuleTests(TestCase):\n\n def test_module(self):\n productCode0 = \"productCode0\"\n description = \"description\"\n marketPrice = \"marketPrice\"\n rentalPrice = \"rentalPrice\"\n\n newItem0 = inventory(productCode0, description, marketPrice, rentalPrice)\n self.assertEqual(newItem0.returnAsDictionary()['productCode'], productCode0)\n\n productCode1 = \"productCode1\"\n material = \"material\"\n size = \"size\"\n\n newItem1 = furniture(productCode1, description, marketPrice, rentalPrice, material, size)\n self.assertEqual(newItem1.returnAsDictionary()['productCode'], productCode1)\n\n productCode2 = \"productCode2\"\n brand = \"brand\"\n voltage = \"voltage\"\n\n newItem2 = electricAppliances(productCode2, description, marketPrice, rentalPrice, brand, voltage)\n self.assertEqual(newItem2.returnAsDictionary()['productCode'], productCode2)","sub_path":"students/josephhayes/lesson01/assignment/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381965172","text":"import Settings\n\n\nclass Row:\n\n def __init__(self, master, mw):\n self.mw = mw # MainWindow\n self.master = master\n self.view = []\n self.hard = False\n\n def __getitem__(self, item):\n return self.view[item]\n\n def __setitem__(self, key, value):\n self.view[key] = value\n\n def append(self, value):\n self.view.append(value)\n\n def read(self):\n command = \"\"\n try:\n category = Settings.categories.index(self[0].get())\n except ValueError:\n self.mw.error(\"void\")\n category = 0\n if category == 2:\n command = Settings.commands[2][0].format(self[1].get())\n elif category == 3:\n name = self[1].get()\n if name == Settings.names[3][0]:\n command = \"$o\" # flag open cycle\n command += Settings.commands[3][0].format(\"cycle\"+str(self.mw.rows.index(self)), str(self[3].cget(\"text\")[self[3].cget(\"text\").index('=')+1:]))\n else:\n command = \"$c\" # flag close cycle\n elif category == 4:\n command = self[1].get()\n elif not Settings.categories[category][0] is None and not category == 3 and not category == 2:\n name = Settings.names[category].index(self[1].get())\n try:\n atts = self[3].cget(\"text\")\n except IndexError:\n atts = \"\"\n if category == 0 and name == 4:\n command = Settings.commands[category][name].format(atts[atts.index(\"=\")+1:])\n else:\n command = Settings.commands[category][name].format(atts)\n return command\n\n def grid(self):\n row_at_page = (self.mw.rows.index(self)) % 20\n for i in range(len(self.view)):\n self.view[i].grid(row=row_at_page, column=2*i, sticky='w')\n\n def grid_remove(self):\n for i in range(len(self.view)):\n self.view[i].grid_remove()\n\n def grid_option(self):\n row_at_page = (self.mw.rows.index(self)) % 20\n for i in range(len(self.view)):\n self.view[i].grid(row=row_at_page, column=i + 1, sticky='w')\n\n def update(self): # grid_remove() + grid(), but more effective\n row_at_page = (self.mw.rows.index(self)) % 20\n for i in range(len(self.view)):\n self.view[i].grid_remove()\n self.view[i].grid(row=row_at_page, column=2*i, sticky='w')\n\n def to_main(self):\n if self[1].get() == \"\":\n return \"\"\n else:\n return self[0].cget(\"text\") + \"=\" + self[1].get() + \", \"\n","sub_path":"Row.py","file_name":"Row.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"424645806","text":"import tweepy\nimport time\nimport random\nfrom configBot import create_api\n\napi = create_api()\n\ndef live_tweet_event(hashtag = \"#nohashtag\",\n first = \"first tweet\",\n final = \"final tweet\",\n tweets = []):\n # initial_tweet = \"test\" + \" \" + hashtag\n first_tweet = first + \" \" + hashtag\n api.update_status(status = first_tweet)\n print(first_tweet)\n #get initial tweet id\n get_tweet = api.home_timeline(count = 1)\n first_tweet_id = get_tweet[0].id\n for tweet in tweets:\n random_interval = random.randint(6, 7)\n random_interval = random_interval * 60\n time.sleep(random_interval)\n tweet_text = \"@yourfriend_Tim \" + tweet + \" \" + hashtag\n api.update_status(status=tweet_text, in_reply_to_status_id=first_tweet_id, )\n print(tweet_text)\n time.sleep(600)\n tweet_text = \"@yourfriend_Tim \" + final + \" \" + hashtag\n api.update_status(status = tweet_text, in_reply_to_status_id=first_tweet_id, )\n\n# sample tweet list, to be passed as a parameter into live_tweet_event()\ndem_debate_tweets = [\n \"Here we go!\",\n \"I'm not really that invested in politics, but I thought this would be fun\",\n \"I'm sure they each have their good qualities, but dang..what a bunch of DORKS!\",\n \"Didn't expect that, but okay. I guess that's politics for ya\",\n \"OMG, who is that old person? Is that Moses?\",\n \"Typical politician's, not answering the question smh.\",\n \"Sure, I'd vote for any of them, although I might throw up in my mouth a little for some. \",\n \"Wow, some great points being made! Too bad that one probably won't win\",\n \"We are screwed, everyone!\",\n \"That one needs to drop out. Quit while you're ahead!\",\n \"Out of your mind!!!! Try another career, please!!!!\",\n \"How come of I've never heard of them before?\",\n \"Listen to these people flapping their gums!\",\n \"Are the debates normally this boring?\",\n \"I hope somebody flips over a podium or something. I want to see some action!\",\n \"Interesting twist...? Probably not\",\n \"Some really dummies running, but better than what we have now\",\n \"I love each and every one of the candidates. LOVE THEM! \",\n \"I'm not sure if I really meant that last tweet. Still deciding.\"\n \"Is there a football game on now? What's the score and who's playing\",\n \"Is Moses still talking? Isn't it past his bedtime?\",\n \"Whoa, look out! Stuff is going down!!!!\",\n \"Well that's it. I know who I'm voting for.\",\n \"Just kidding, I'm still not sure.\"\n\n]\n","sub_path":"live_tweet_event.py","file_name":"live_tweet_event.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"549512582","text":"# URL: https://practice.geeksforgeeks.org/problems/-rearrange-array-alternately-1587115620/1/\n# Given a sorted array of positive integers. Your task is to rearrange the array elements alternatively i.e first element should be max value, second should be min value, third should be second max, fourth should be second min and so on.\n\n# Example 1:\n\n# Input:\n# N = 6\n# arr[] = {1,2,3,4,5,6}\n# Output: 6 1 5 2 4 3\n# Explanation: Max element = 6, min = 1, \n# second max = 5, second min = 2, and \n# so on... Modified array is : 6 1 5 2 4 3.\n# Example 2:\n\n# Input:\n# N = 11\n# arr[]={10,20,30,40,50,60,70,80,90,100,110}\n# Output:110 10 100 20 90 30 80 40 70 50 60\n# Explanation: Max element = 110, min = 10, \n# second max = 100, second min = 20, and \n# so on... Modified array is : \n# 110 10 100 20 90 30 80 40 70 50 60.\n# Your Task:\n# The task is to complete the function rearrange() which rearranges elements as explained above. Printing of the modified array will be handled by driver code.\n\n# Expected Time Complexity: O(N).\n# Expected Auxiliary Space: O(1).\n\n# Constraints:\n# 1 <= N <= 107\n# 1 <= arr[i] <= 107\n\nclass Solution:\n ##Complete this function\n #Function to rearrange the array elements alternately.\n\n def rearrange(self,arr, n): \n maxval = arr[-1]+1\n mincount = 0\n maxcount = -1\n for i in range(n):\n if(i%2==0):\n arr[i] = arr[i]+(arr[maxcount]%maxval)*maxval\n maxcount-=1\n elif(i%2!=0):\n arr[i]=arr[i]+(arr[mincount]%maxval)*maxval\n mincount +=1\n for i,n in enumerate(arr):\n arr[i] = n//maxval \n \n ##Your code here\n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\nimport math\ndef main():\n T=int(input())\n while(T>0):\n \n n=int(input())\n \n arr=[int(x) for x in input().strip().split()]\n \n ob=Solution()\n ob.rearrange(arr,n)\n \n for i in arr:\n print(i,end=\" \")\n \n print()\n \n T-=1\n\n\nif __name__ == \"__main__\":\n main()\n# } Driver Code Ends","sub_path":"Geeksforgeeks/02 Array/Rearrange Array Alternately.py","file_name":"Rearrange Array Alternately.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"212855047","text":"# Copyright (c) 2015, The MITRE Corporation. All rights reserved.\n# See LICENSE.txt for complete terms.\n\n\nimport stix\nfrom stix.common import vocabs, VocabString, StructuredTextList\nimport stix.bindings.incident as incident_binding\n\n\nclass NonPublicDataCompromised(VocabString):\n _namespace = \"http://stix.mitre.org/Incident-1\"\n _binding = incident_binding\n _binding_class = incident_binding.NonPublicDataCompromisedType\n \n def __init__(self, value=None, data_encrypted=None):\n self.data_encrypted = data_encrypted\n super(NonPublicDataCompromised, self).__init__(value)\n \n @classmethod\n def from_obj(cls, obj, return_obj=None):\n if not obj:\n return None\n\n if not return_obj:\n return_obj = cls()\n \n super(NonPublicDataCompromised, cls).from_obj(obj, return_obj=return_obj)\n return_obj.data_encrypted = obj.data_encrypted\n return return_obj\n \n def to_obj(self, return_obj=None, ns_info=None):\n if not return_obj:\n return_obj = self._binding_class()\n \n super(NonPublicDataCompromised, self).to_obj(return_obj=return_obj, ns_info=ns_info)\n return_obj.data_encrypted = self.data_encrypted\n return return_obj\n \n @classmethod\n def from_dict(cls, d, return_obj=None):\n if not d:\n return None\n\n if not return_obj:\n return_obj = cls()\n \n super(NonPublicDataCompromised, cls).from_dict(d, return_obj=return_obj)\n return_obj.data_encrypted = d.get('data_encrypted')\n return return_obj\n\n def is_plain(self):\n return False\n\n def to_dict(self):\n d = super(NonPublicDataCompromised, self).to_dict()\n\n if self.data_encrypted:\n d['data_encrypted'] = self.data_encrypted\n\n return d\n\n\nclass PropertyAffected(stix.Entity):\n _namespace = \"http://stix.mitre.org/Incident-1\"\n _binding = incident_binding\n _binding_class = incident_binding.PropertyAffectedType\n \n def __init__(self):\n self.property_ = None\n self.description_of_effect = None\n self.type_of_availability_loss = None\n self.duration_of_availability_loss = None\n self.non_public_data_compromised = None\n \n @property\n def property_(self):\n return self._property\n \n @property_.setter\n def property_(self, value):\n self._set_vocab(vocabs.LossProperty, property=value)\n\n @property\n def description_of_effect(self):\n \"\"\"A :class:`.StructuredTextList` object, containing descriptions about\n the purpose or intent of this object.\n\n Iterating over this object will yield its contents sorted by their\n ``ordinality`` value.\n\n Default Value: Empty :class:`.StructuredTextList` object.\n\n Note:\n IF this is set to a value that is not an instance of\n :class:`.StructuredText`, an effort will ne made to convert it.\n If this is set to an iterable, any values contained that are not\n an instance of :class:`.StructuredText` will be be converted.\n\n Returns:\n An instance of\n :class:`.StructuredTextList`\n\n \"\"\"\n return next(iter(self.descriptions_of_effect), None)\n\n @description_of_effect.setter\n def description_of_effect(self, value):\n self.descriptions_of_effect = value\n\n @property\n def descriptions_of_effect(self):\n return self._description_of_effect\n\n @descriptions_of_effect.setter\n def descriptions_of_effect(self, value):\n self._description_of_effect = StructuredTextList(value)\n\n @property\n def type_of_availability_loss(self):\n return self._type_of_availability_loss\n \n @type_of_availability_loss.setter\n def type_of_availability_loss(self, value):\n self._set_vocab(vocabs.AvailabilityLossType, type_of_availability_loss=value)\n \n @property\n def duration_of_availability_loss(self):\n return self._duration_of_availability_loss\n \n @duration_of_availability_loss.setter\n def duration_of_availability_loss(self, value):\n self._set_vocab(vocabs.LossDuration, duration_of_availability_loss=value)\n \n @property\n def non_public_data_compromised(self):\n return self._non_public_data_compromised\n \n @non_public_data_compromised.setter\n def non_public_data_compromised(self, value):\n self._set_var(NonPublicDataCompromised, non_public_data_compromised=value)\n \n @classmethod\n def from_obj(cls, obj, return_obj=None):\n if not obj:\n return None\n if not return_obj:\n return_obj = cls()\n \n return_obj.property_ = VocabString.from_obj(obj.Property)\n return_obj.descriptions_of_effect = StructuredTextList.from_obj(obj.Description_Of_Effect)\n return_obj.type_of_availability_loss = VocabString.from_obj(obj.Type_Of_Availability_Loss)\n return_obj.duration_of_availability_loss = VocabString.from_obj(obj.Duration_Of_Availability_Loss)\n return_obj.non_public_data_compromised = NonPublicDataCompromised.from_obj(obj.Non_Public_Data_Compromised)\n return return_obj\n \n def to_obj(self, return_obj=None, ns_info=None):\n super(PropertyAffected, self).to_obj(return_obj=return_obj, ns_info=ns_info)\n\n if not return_obj:\n return_obj = self._binding_class()\n \n if self.property_:\n return_obj.Property = self.property_.to_obj(ns_info=ns_info)\n if self.descriptions_of_effect:\n return_obj.Description_Of_Effect = self.descriptions_of_effect.to_obj(ns_info=ns_info)\n if self.type_of_availability_loss:\n return_obj.Type_Of_Availability_Loss = self.type_of_availability_loss.to_obj(ns_info=ns_info)\n if self.duration_of_availability_loss:\n return_obj.Duration_Of_Availability_Loss = self.duration_of_availability_loss.to_obj(ns_info=ns_info)\n if self.non_public_data_compromised:\n return_obj.Non_Public_Data_Compromised = self.non_public_data_compromised.to_obj(ns_info=ns_info)\n \n return return_obj\n \n @classmethod\n def from_dict(cls, d, return_obj=None):\n if not d:\n return None\n if not return_obj:\n return_obj = cls()\n \n return_obj.property_ = VocabString.from_dict(d.get('property'))\n return_obj.descriptions_of_effect = StructuredTextList.from_dict(d.get('description_of_effect'))\n return_obj.type_of_availability_loss = VocabString.from_dict(d.get('type_of_availability_loss'))\n return_obj.duration_of_availability_loss = VocabString.from_dict(d.get('duration_of_availability_loss'))\n return_obj.non_public_data_compromised = NonPublicDataCompromised.from_dict(d.get('non_public_data_compromised'))\n \n return return_obj\n \n def to_dict(self):\n return super(PropertyAffected, self).to_dict()\n","sub_path":"stix/incident/property_affected.py","file_name":"property_affected.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335081217","text":"from ast import literal_eval\nimport csv\nfrom django.contrib.auth.admin import User\nfrom django.core.files import File\nfrom django.core.management.base import BaseCommand\nfrom django.core.mail import (EmailMessage,)\nfrom ccd.settings import(BASE_DIR)\nfrom masterdata.models import (Boundary, DynamicContent)\nfrom partner.models import (PartnerReportFile,)\n\nEMAIL_HOST_USER = 'admin@meal.mahiti.org'\n\n\nclass Command(BaseCommand):\n help = 'Runs crone to generate report using csv data.'\n\n def get_repr(self, value):\n if callable(value):\n return '%s' % value()\n return value\n\n def get_field(self, instance, field):\n field_path = field.split('.')\n attr = instance\n for elem in field_path:\n try:\n attr = getattr(attr, elem)\n except AttributeError:\n return None\n return attr\n\n def add_arguments(self, parser):\n parser.add_argument('data', type=str)\n parser.add_argument(\n 'location_ids',\n type=str\n )\n\n def handle(self, *args, **options):\n from_ = EMAIL_HOST_USER\n try:\n data = literal_eval(options['data'])\n location_ids = literal_eval(options['location_ids'])\n location = Boundary.objects.filter(\n id__in=location_ids)\n headers = data.get('display_headers', [])\n level_name = data.get('level_name', [])\n level_headers = data.get('level_headers', [])\n user_id = data.get('user_id', 0)\n location_name = data.get('location_name')\n location_type = data.get('location_type')\n user_email = User.objects.get(id=int(user_id))\n user_name = user_email.first_name + ' ' + user_email.last_name\n with open(BASE_DIR + '/static/' + 'location_report.csv', 'wb+') as f:\n write = csv.writer(f)\n write.writerow(headers)\n for loc in location:\n location_list = []\n for level, head in zip(level_name, level_headers):\n obj = self.get_repr(self.get_field(loc, level))\n location_list.append(obj)\n write.writerow(location_list)\n part_report = PartnerReportFile.objects.create(user=user_email,\n name='location_report')\n part_report.report.save(f.name, File(f))\n download = BASE_DIR + '/' + part_report.report.url\n email_file_name = 'Location-Report.csv'\n get_content = DynamicContent.objects.get(active=2, content_type=1)\n sub = get_content.subject.format(location_name)\n body = get_content.content.format(\n location_name, user_name, location_type)\n user_mail_list = [user_email.email]\n email = EmailMessage(sub, body, from_,\n user_mail_list,\n )\n attachment = open(download, 'rb+')\n email.attach(email_file_name, attachment.read(), 'application/csv')\n email.send()\n except Exception as e:\n sub = e.message\n body = 'Dear Team, \\n\\n Please find the Task Report attached along with this mail.'\n user_mail_list = ['pradam.abhilash@mahiti.com']\n email = EmailMessage(sub, body, from_,\n user_mail_list,\n )\n email.send()\n","sub_path":"masterdata/management/commands/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296873832","text":"# %%\nfrom pyAudioAnalysis import audioBasicIO\nfrom pyAudioAnalysis import ShortTermFeatures\nimport matplotlib.pyplot as plt\nimport numpy as np\n# %%\nFs = 524288\nx = np.load(\"ae1.npy\")\nF, f_names = ShortTermFeatures.feature_extraction(\n x, Fs, 0.010*Fs, 0.005*Fs )\nplt.subplot(211)\nplt.plot(F[2, :])\nplt.xlabel('Frame no')\nplt.ylabel(f_names[2])\nplt.subplot(212)\nplt.plot(F[3, :])\nplt.xlabel('Frame no')\nplt.ylabel(f_names[3])\nplt.show()\n\n# %%\nplt.plot(x)\n\n# %%\n","sub_path":"pyaudioanalysis_test.py","file_name":"pyaudioanalysis_test.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"164234771","text":"from sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom catalog_setup import Genre, Base, Album\r\n\r\nengine = create_engine('sqlite:///musiccatalog.db')\r\nBase.metadata.bind = engine\r\n\r\nDBSession = sessionmaker(bind=engine)\r\nsession = DBSession()\r\n\r\nemail = \"tipcatdp@gmail.com\"\r\n# Starter items to be added to databsse -- 8 albums in 6 genres\r\n\r\n# Indie Rock Albums\r\nindie = Genre(name=\"Indie Rock\", creator=email)\r\nsession.add(indie)\r\nsession.commit()\r\n\r\n\r\nalbum1 = Album(\r\n artist=\"Guided By Voices\",\r\n title=\"Bee Thousand (1994)\",\r\n description=\"Seminal genre-defining album from Ohio lo-fi stalwarts\",\r\n creator=email,\r\n price=\"$17.99\",\r\n genre=indie)\r\nsession.add(album1)\r\nsession.commit()\r\n\r\n\r\nalbum2 = Album(\r\n artist=\"Pavement\",\r\n title=\"Slanted and Enchanted (1991)\",\r\n description=\"Arguably THE indie rock album\",\r\n creator=email,\r\n price=\"$17.99\",\r\n genre=indie)\r\nsession.add(album2)\r\nsession.commit()\r\n\r\nalbum3 = Album(\r\n artist=\"Sebadoh\",\r\n title=\"Bubble and Scrape (1993)\",\r\n description=\"Slacker lo-fi anthems for the socially challenged\",\r\n creator=email,\r\n price=\"$15.99\",\r\n genre=indie)\r\nsession.add(album3)\r\nsession.commit()\r\n\r\nalbum4 = Album(\r\n artist=\"Sonic Youth\",\r\n title=\"Daydream Nation (1988 - double album)\",\r\n description=\"Classic double album from NYC noise pioneers\",\r\n creator=email,\r\n price=\"$31.99\",\r\n genre=indie)\r\nsession.add(album4)\r\nsession.commit()\r\n\r\nalbum5 = Album(\r\n artist=\"My Bloody Valentine\",\r\n title=\"Loveless (1991)\",\r\n description=\"Trailblazing shoegaze album, still massively influential\",\r\n creator=email,\r\n price=\"$27.99\",\r\n genre=indie)\r\nsession.add(album5)\r\nsession.commit()\r\n\r\nalbum6 = Album(\r\n artist=\"Arcade Fire\",\r\n title=\"Funeral (2004)\",\r\n description=\"Released to universal acclaim, vital indie rock for the 00s\",\r\n creator=email,\r\n price=\"$19.99\",\r\n genre=indie)\r\nsession.add(album6)\r\nsession.commit()\r\n\r\nalbum7 = Album(\r\n artist=\"Beach House\",\r\n title=\"Teen Dream (2010)\",\r\n description=\"Redefining dream pop for the 2010s\",\r\n creator=email,\r\n price=\"$18.99\",\r\n genre=indie)\r\nsession.add(album7)\r\nsession.commit()\r\n\r\nalbum8 = Album(\r\n artist=\"Snail Mail\",\r\n title=\"Lush (2018)\",\r\n description=\"The new indie rock standard bearer\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=indie)\r\nsession.add(album8)\r\nsession.commit()\r\n\r\n\r\n# Classic Rock Albums\r\nclassic_rock = Genre(name=\"Classic Rock\", creator=email)\r\nsession.add(classic_rock)\r\nsession.commit()\r\n\r\n\r\nalbum1 = Album(\r\n artist=\"The Beatles\",\r\n title=\"The Beatles [White Album] (1968)\",\r\n description=\"The White Album. What more can one say?\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=classic_rock)\r\nsession.add(album1)\r\nsession.commit()\r\n\r\nalbum2 = Album(\r\n artist=\"The Rolling Stones\",\r\n title=\"Exile on Main Street (1972 - double album)\",\r\n description=\"The finest record from the Stones\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=classic_rock)\r\nsession.add(album2)\r\nsession.commit()\r\n\r\nalbum3 = Album(\r\n artist=\"Led Zeppelin\",\r\n title=\"IV (1971)\",\r\n description=\"\\\"Stairway to Heaven\\\" and the sound of 70s hard rock\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=classic_rock)\r\nsession.add(album3)\r\nsession.commit()\r\n\r\nalbum4 = Album(\r\n artist=\"The Kinks\",\r\n title=\"Lola Versus Powerman and the Moneygoround, Part One (1970)\",\r\n description=\"\\\"Lola\\\" and other gems from these British Invasion veterans\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=classic_rock)\r\nsession.add(album4)\r\nsession.commit()\r\n\r\nalbum5 = Album(\r\n artist=\"The Who\",\r\n title=\"Quadrophenia (1973)\",\r\n description=\"The last of The Who's rock \\'operas\\'\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=classic_rock)\r\nsession.add(album5)\r\nsession.commit()\r\n\r\nalbum6 = Album(\r\n artist=\"The Doors\",\r\n title=\"The Doors (1967)\",\r\n description=\"A defining statement of its time\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=classic_rock)\r\nsession.add(album6)\r\nsession.commit()\r\n\r\nalbum7 = Album(\r\n artist=\"Jimi Hendrix\",\r\n title=\"Are You Experienced? (1967)\",\r\n description=\"He reinvented the guitar and then some\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=classic_rock)\r\nsession.add(album7)\r\nsession.commit()\r\n\r\nalbum8 = Album(\r\n artist=\"Fleetwood Mac\",\r\n title=\"Rumours (1977)\",\r\n description=\"20 million copies sold, a soft rock classic of the 70s\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=classic_rock)\r\nsession.add(album8)\r\nsession.commit()\r\n\r\n# Punk\r\npunk = Genre(name=\"Punk\", creator=email)\r\nsession.add(punk)\r\nsession.commit()\r\n\r\nalbum1 = Album(\r\n artist=\"Ramones\",\r\n title=\"Ramones(1976)\",\r\n description=\"The first true punk album that changed rock forever\",\r\n creator=email,\r\n price=\"$18.99\",\r\n genre=punk)\r\nsession.add(album1)\r\nsession.commit()\r\n\r\nalbum2 = Album(\r\n artist=\"Sex Pistols\",\r\n title=\"Never Mind The Bollocks, Here's The Sex Pistols\",\r\n description=\"The band that made people fear punk rock\",\r\n creator=email,\r\n price=\"$18.99\",\r\n genre=punk)\r\nsession.add(album2)\r\nsession.commit()\r\n\r\nalbum3 = Album(\r\n artist=\"The Clash\",\r\n title=\"London Calling (1979)\",\r\n description=\"The punk band that performed radio friendly anthems\",\r\n creator=email,\r\n price=\"$22.99\",\r\n genre=punk)\r\nsession.add(album3)\r\nsession.commit()\r\n\r\nalbum4 = Album(\r\n artist=\"Buzzcocks\",\r\n title=\"Another Music in a Different Kitchen (1978)\",\r\n description=\"The poppiest of the punks\",\r\n creator=email,\r\n price=\"$17.99\",\r\n genre=punk)\r\nsession.add(album4)\r\nsession.commit()\r\n\r\nalbum5 = Album(\r\n artist=\"X\",\r\n title=\"Los Angeles (1980)\",\r\n description=\"Los Angeles punk rockers depict decadent glory\",\r\n creator=email,\r\n price=\"$15.99\",\r\n genre=punk)\r\nsession.add(album5)\r\nsession.commit()\r\n\r\nalbum6 = Album(\r\n artist=\"The Slits\",\r\n title=\"Cut (1979)\",\r\n description=\"A feminist punk classic\",\r\n creator=email,\r\n price=\"$13.99\",\r\n genre=punk)\r\nsession.add(album6)\r\nsession.commit()\r\n\r\nalbum7 = Album(\r\n artist=\"Wire\",\r\n title=\"Pink Flag (1977)\",\r\n description=\"Art-punk at its finest, pointed the way toward post-punk\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=punk)\r\nsession.add(album7)\r\nsession.commit()\r\n\r\nalbum8 = Album(\r\n artist=\"Crass\",\r\n title=\"Penis Envy (1981)\",\r\n description=\"Dangerous and uncompromising as punk should be\",\r\n creator=email,\r\n price=\"$31.99\",\r\n genre=punk)\r\nsession.add(album8)\r\nsession.commit()\r\n\r\n# Jazz\r\njazz = Genre(name=\"Jazz\", creator=email)\r\nsession.add(jazz)\r\nsession.commit()\r\n\r\nalbum1 = Album(\r\n artist=\"Louis Armstrong\",\r\n title=\"The Complete Hot Five and Hot Seven Recordings, vol. 1 (1925)\",\r\n description=\"Early classics from the original jazz master\",\r\n creator=email,\r\n price=\"$27.99\",\r\n genre=jazz)\r\nsession.add(album1)\r\nsession.commit()\r\n\r\nalbum2 = Album(\r\n artist=\"Duke Ellington\",\r\n title=\"Ellington at Newport (1956)\",\r\n description=\"Legendary live performance that sparked the Duke's comeback\",\r\n creator=email,\r\n price=\"$24.99\",\r\n genre=jazz)\r\nsession.add(album2)\r\nsession.commit()\r\n\r\nalbum3 = Album(\r\n artist=\"Miles Davis\",\r\n title=\"Kind of Blue (1959)\",\r\n description=\"Arguably the greatest jazz album of all time\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=jazz)\r\nsession.add(album3)\r\nsession.commit()\r\n\r\nalbum4 = Album(\r\n artist=\"John Coltrane\",\r\n title=\"Blue Train (1958)\",\r\n description=\"The first masterpiece by Coltrane\",\r\n creator=email,\r\n price=\"$25.99\",\r\n genre=jazz)\r\nsession.add(album4)\r\nsession.commit()\r\n\r\nalbum5 = Album(\r\n artist=\"Ornette Coleman\",\r\n title=\"The Shape of Jazz to Come (1959)\",\r\n description=\"The defining document of avant-garde jazz\",\r\n creator=email,\r\n price=\"$19.99\",\r\n genre=jazz)\r\nsession.add(album5)\r\nsession.commit()\r\n\r\nalbum6 = Album(\r\n artist=\"Charles Mingus\",\r\n title=\"Mingus Ah Um (1959)\",\r\n description=\"Legendary bassist throws down\",\r\n creator=email,\r\n price=\"$24.99\",\r\n genre=jazz)\r\nsession.add(album6)\r\nsession.commit()\r\n\r\nalbum7 = Album(\r\n artist=\"John Coltrane\",\r\n title=\"A Love Supreme (1965)\",\r\n description=\"Among the most important records ever made in any genre\",\r\n creator=email,\r\n price=\"$29.99\",\r\n genre=jazz)\r\nsession.add(album7)\r\nsession.commit()\r\n\r\nalbum8 = Album(\r\n artist=\"Miles Davis\",\r\n title=\"Bitches Brew (1970)\",\r\n description=\"The defining word on jazz fusion\",\r\n creator=email,\r\n price=\"$41.99\",\r\n genre=jazz)\r\nsession.add(album8)\r\nsession.commit()\r\n\r\n\r\n# Hip Hop\r\nhip_hop = Genre(name=\"Hip Hop\", creator=email)\r\nsession.add(hip_hop)\r\nsession.commit()\r\n\r\nalbum1 = Album(\r\n artist=\"Public Enemy\",\r\n title=\"It Takes a Nation of Millions to Hold Us Back (1988)\",\r\n description=\"Incendiary classic from NYC crew\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=hip_hop)\r\nsession.add(album1)\r\nsession.commit()\r\n\r\nalbum2 = Album(\r\n artist=\"Beastie Boys\",\r\n title=\"Paul's Boutique\",\r\n description=\"A new sound that changed the game\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=hip_hop)\r\nsession.add(album2)\r\nsession.commit()\r\n\r\nalbum3 = Album(\r\n artist=\"N.W.A.\",\r\n title=\"Straight Outta Compton (1988)\",\r\n description=\"\\'Gangsta\\' rap's defining statement\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=hip_hop)\r\nsession.add(album3)\r\nsession.commit()\r\n\r\nalbum3 = Album(\r\n artist=\"Wu-Tang Clan\",\r\n title=\"Enter the Wu-Tang (36 Chambers) (1993)\",\r\n description=\"To many, the greatest rap album of all time\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=hip_hop)\r\nsession.add(album3)\r\nsession.commit()\r\n\r\nalbum4 = Album(\r\n artist=\"The Notorious BIG\",\r\n title=\"Ready to Die (1994)\",\r\n description=\"East coast classic of hardcore rap from Big-E\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=hip_hop)\r\nsession.add(album4)\r\nsession.commit()\r\n\r\nalbum5 = Album(\r\n artist=\"2Pac\",\r\n title=\"All Eyez on Me (1996)\",\r\n description=\"To the west coast what Big-E is to the east\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=hip_hop)\r\nsession.add(album5)\r\nsession.commit()\r\n\r\nalbum6 = Album(\r\n artist=\"Dr Dre\",\r\n title=\"The Chronic (1992)\",\r\n description=\"G-Funk defined\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=hip_hop)\r\nsession.add(album7)\r\nsession.commit()\r\n\r\nalbum7 = Album(\r\n artist=\"Outkast\",\r\n title=\"Stankonia\",\r\n description=\"A classic of southern hip-hop\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=hip_hop)\r\nsession.add(album7)\r\nsession.commit()\r\n\r\nalbum8 = Album(\r\n artist=\"Kendrick Lamar\",\r\n title=\"DAMN.\",\r\n description=\"A contemporary hip-hop masterpiece\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=hip_hop)\r\nsession.add(album8)\r\nsession.commit()\r\n\r\n\r\n# R&B\r\nrandb = Genre(name=\"R&B\", creator=email)\r\nsession.add(randb)\r\nsession.commit()\r\n\r\nalbum1 = Album(\r\n artist=\"TLC\",\r\n title=\"CrazySexyCool (1994)\",\r\n description=\"The name says it all\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=randb)\r\nsession.add(album1)\r\nsession.commit()\r\n\r\nalbum2 = Album(\r\n artist=\"Boyz II Men\",\r\n title=\"II (1994)\",\r\n description=\"90s classic of vocal R&B\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=randb)\r\nsession.add(album2)\r\nsession.commit()\r\n\r\nalbum3 = Album(\r\n artist=\"Mariah Carey\",\r\n title=\"Music Box (1993)\",\r\n description=\"Her defining statement\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=randb)\r\nsession.add(album3)\r\nsession.commit()\r\n\r\nalbum4 = Album(\r\n artist=\"Usher\",\r\n title=\"My Way (1997)\",\r\n description=\"The R&B loverman par excellence\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=randb)\r\nsession.add(album4)\r\nsession.commit()\r\n\r\nalbum5 = Album(\r\n artist=\"Mary J. Blige\",\r\n title=\"What's the 411? (1992)\",\r\n description=\"The queen of hip-hop soul\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=randb)\r\nsession.add(album5)\r\nsession.commit()\r\n\r\nalbum6 = Album(\r\n artist=\"Destiny's Child\",\r\n title=\"The Writing's on the Wall (1999)\",\r\n description=\"The Supremes of the 90s\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=randb)\r\nsession.add(album6)\r\nsession.commit()\r\n\r\nalbum7 = Album(\r\n artist=\"Bell Biv Devoe\",\r\n title=\"Poison (1990)\",\r\n description=\"Not the hair metal band\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=randb)\r\nsession.add(album7)\r\nsession.commit()\r\n\r\nalbum8 = Album(\r\n artist=\"Blackstreet\",\r\n title=\"Another Level (1996)\",\r\n description=\"I like the way you work it...\",\r\n creator=email,\r\n price=\"$21.99\",\r\n genre=randb)\r\nsession.add(album8)\r\nsession.commit()\r\n\r\nprint(\"Added genres and albums!\")\r\n","sub_path":"music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":12902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"385094236","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport time\nfrom lxml import etree\nfrom geom import *\nfrom datetime import datetime\n\n\nclass Osmxml(object):\n def __init__(self, filename, sequentialOutputMode=False, \\\n noUploadFalse=True, osmVersion=False, timestamp=False,\\\n significantDigits=9, roundingDigits=7, addVisible=False):\n self.filename = filename\n self.sequentialOutputMode = sequentialOutputMode\n self.noUploadFalse = noUploadFalse\n self.significantDigits = significantDigits\n self.roundingDigits = roundingDigits\n self.addVisible = addVisible\n\n if sequentialOutputMode:\n self.fileNode = open(filename + '_nodes', 'w')\n self.fileWay = open(filename + '_ways', 'w+')\n self.fileRelation = open(filename + '_relations', 'w+')\n else:\n self.fileNode = open(filename, 'w')\n self.fileWay = self.fileRelation = self.fileNode\n\n self.attributes = {}\n if osmVersion:\n self.attributes.update({'version' : '1'})\n if timestamp:\n self.attributes.update({'timestamp' : datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')})\n if addVisible:\n self.attributes.update({'visible' : 'true'})\n self.isPython2 = sys.version_info < (3, 0)\n self.outputHeader()\n\n def outputHeader(self):\n f = self.fileNode\n if self.noUploadFalse:\n f.write('\\n\\n')\n else:\n f.write('\\n\\n')\n\n def outputNodes(self, nodes, featuresmap):\n f = self.fileNode\n for node in nodes:\n xmlattrs = {'id': str(node.id), 'lat': str(node.y * 10 ** -self.significantDigits),\n 'lon': str(node.x * 10 ** -self.significantDigits)}\n xmlattrs.update(self.attributes)\n\n xmlobject = etree.Element('node', xmlattrs)\n\n if node in featuresmap:\n for (key, value) in featuresmap[node].tags.items():\n tag = etree.Element('tag', {'k': key, 'v': value})\n xmlobject.append(tag)\n if self.isPython2:\n f.write(etree.tostring(xmlobject))\n else:\n f.write(etree.tostring(xmlobject, encoding='unicode'))\n f.write('\\n')\n\n def outputWays(self, ways, featuresmap):\n f = self.fileWay\n for way in ways:\n xmlattrs = {'id': str(way.id)}\n xmlattrs.update(self.attributes)\n\n xmlobject = etree.Element('way', xmlattrs)\n\n for node in way.points:\n nd = etree.Element('nd', {'ref': str(node.id)})\n xmlobject.append(nd)\n if way in featuresmap:\n for (key, value) in featuresmap[way].tags.items():\n tag = etree.Element('tag', {'k': key, 'v': value})\n xmlobject.append(tag)\n\n if self.isPython2:\n f.write(etree.tostring(xmlobject))\n else:\n f.write(etree.tostring(xmlobject, encoding='unicode'))\n f.write('\\n')\n\n def outputRelations(self, relations, featuresmap):\n f = self.fileRelation\n for relation in relations:\n xmlattrs = {'id': str(relation.id)}\n xmlattrs.update(self.attributes)\n\n xmlobject = etree.Element('relation', xmlattrs)\n\n for (member, role) in relation.members:\n member = etree.Element('member', {'type': 'way', 'ref': str(member.id), 'role': role})\n xmlobject.append(member)\n\n tag = etree.Element('tag', {'k': 'type', 'v': 'multipolygon'})\n xmlobject.append(tag)\n if relation in featuresmap:\n for (key, value) in featuresmap[relation].tags.items():\n tag = etree.Element('tag', {'k': key, 'v': value})\n xmlobject.append(tag)\n\n if self.isPython2:\n f.write(etree.tostring(xmlobject))\n else:\n f.write(etree.tostring(xmlobject, encoding='unicode'))\n f.write('\\n')\n\n\n def outputFooter(self):\n f = self.fileRelation\n f.write('')\n\n def output(self, geometries, features):\n # First, set up a few data structures for optimization purposes\n nodes = [geom for geom in geometries if type(geom) == Point]\n ways = [geom for geom in geometries if type(geom) == Way]\n relations = [geom for geom in geometries if type(geom) == Relation]\n featuresmap = {feature.geometry: feature for feature in features}\n self.outputNodes(nodes, featuresmap)\n self.outputWays(ways, featuresmap)\n self.outputRelations(relations, featuresmap)\n\n def finish(self):\n node = self.fileNode\n way = self.fileWay\n relation = self.fileRelation\n limit = 50000000\n\n self.outputFooter()\n # merge separate files\n if self.sequentialOutputMode:\n way.seek(0)\n data = way.read(limit)\n while data:\n node.write(data)\n data = way.read(limit)\n way.close()\n relation.seek(0)\n data = relation.read(limit)\n while data:\n node.write(data)\n data = relation.read(limit)\n relation.close()\n os.rename(self.filename + '_nodes', self.filename)\n os.remove(self.filename + '_ways')\n os.remove(self.filename + '_relations')\n node.close()\n","sub_path":"osmxml.py","file_name":"osmxml.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"139964691","text":"\n# coding: utf-8\n\n#dubbi e domade:\n #a cosa serve return?\n #selettori:\n\n\n#################\n# Display Table #\n#################\n\n### Modules\n\n### Constants\n\n### Functions & Procedures\ndef loadTable(csvPath):\n with open(csvPath, 'r') as csvFile:\n someColors = [nn.strip() for nn in csvFile.readlines()]\n return someColors\n\n\n\n\ndef convert2rgb(colorHex):\n rr = colorHex[:2]\n gg = colorHex[2:4]\n bb = colorHex[4:]\n return int(rr, 16), int(gg, 16), int(bb, 16)\n\ndef convert2hex(colorHex):\n return '#{}'.format(colorHex.upper())\n \n\ndef convert2dbColor(colorHex):\n rr = colorHex[:2]\n gg = colorHex[2:4]\n bb = colorHex[4:]\n return int(rr, 16)/255, int(gg, 16)/255, int(bb, 16)/255\n\n#converti il valore di dbcolor in %\ndef convert2ratio(colorHex):\n dbColor=convert2dbColor(colorHex)\n rr=dbColor[0]\n gg=dbColor[1]\n bb=dbColor[2]\n return '{:.0%}'.format(rr), '{:.0%}'.format(gg),'{:.0%}'.format(bb)\n\n \n\n### Variables\ncsvPath = 'webSafeColor.csv'\nmargin = 20\n### Instructions\nsomeColors = loadTable(csvPath)\n\n#print(convert2hex(someColors))\nii=0\nnewPage('A4')\n\nwhile someColors:\n \n aColor = someColors.pop()\n print(convert2rgb(aColor))\n dbColor = convert2dbColor(aColor)\n fill(*dbColor)\n \n hexColor = convert2hex(aColor)\n rgbColor = convert2rgb(aColor)\n ratioColor = convert2ratio(aColor)\n \n fill(*dbColor)\n rect(20,(height()-40)-ii,50,20)\n \n \n fill(0)\n \n text(str(rgbColor),(160,(height()-margin*2)-ii))\n text(str(ratioColor),(260,(height()-margin*2)-ii))\n text(hexColor,(90,(height()-margin*2)-ii))\n \n ii+=25 \n \n if ii>height()-margin*3:\n newPage('A4')\n ii=0\n \n \n \n\n\n\n\n","sub_path":"tabella/prova-tabella.py","file_name":"prova-tabella.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"291782404","text":"import pyram\nimport cPickle as pickle\nimport hadoopy\nimport zlib\nimport sklearn.svm\nimport sklearn.pls\nimport sklearn.metrics\nimport sklearn.cross_validation\nimport sklearn.grid_search\nimport sklearn.pipeline\nimport numpy as np\nimport snappy\n\n\ndef train(classifier_name, classifier_extra, label_values):\n import classipy\n label_values = list(label_values)\n hadoopy.counter('FeatureShape', str(len(label_values[0][1])))\n if classifier_name == 'svmlinear':\n return classipy.SVMLinear(options={'B': '1'}).train(label_values)\n elif classifier_name == 'svm':\n return classipy.SVM(options={'t': '2'}).train(label_values)\n elif classifier_name == 'svm_hik':\n return classipy.SVMScikit(kernel=classipy.kernels.histogram_intersection).train(label_values)\n elif classifier_name == 'svmlinear_autotune':\n\n def wrapped_optimizer(*args, **kw):\n for x in pyram.exponential_grid(*args, **kw):\n hadoopy.counter('X-Val', 'Rounds')\n yield x\n b = classipy.select_parameters(classipy.SVMLinear, label_values,\n {'c': (10**-2, 10**1, 10)},\n wrapped_optimizer,\n options={'B': '1'})[1]\n print(b)\n return classipy.SVMLinear(b).train(label_values)\n elif classifier_name == 'plslinearsvmxval':\n num_dims = label_values[0][1].size\n # Set the parameters by cross-validation\n #,'pls__n_components': [x for x in [1, 8, 16, 32, 64, 128, 256] if x <= num_dims]\n #('pls', sklearn.pls.PLSRegression(n_components=0)),\n tuned_parameters = [{'svm__C': [.001, .01, .1, 1, 10, 100]}]\n p = sklearn.pipeline.Pipeline([('svm', sklearn.svm.SVC(kernel=classipy.kernels.histogram_intersection, scale_C=True))]) # was cls\n #p = sklearn.grid_search.GridSearchCV(cls, tuned_parameters, score_func=sklearn.metrics.f1_score)\n num_neg = 0\n num_pos = 0\n import random\n random.shuffle(label_values)\n new_label_values = []\n for l, v in label_values:\n if l == 1:\n if num_pos < 100:\n new_label_values.append((l, v))\n num_pos += 1\n else:\n if num_neg < 100:\n new_label_values.append((l, v))\n num_neg += 1\n import sys\n sys.stderr.write('Num Neg[%d] Pos[%d]\\n' % (num_neg, num_pos))\n p.fit(*zip(*new_label_values)[::-1])\n return p # p.best_estimator_\n else:\n raise ValueError('Unknown classifier [%s]' % classifier_name)\n\n\ndef dumps(classifier_name, classifier_extra, classifier):\n return snappy.compress(pickle.dumps({'classifier_name': classifier_name,\n 'classifier_extra': classifier_extra,\n 'classifier': classifier}, -1))\n\n\ndef loads(classifier_ser):\n d = pickle.loads(snappy.decompress(classifier_ser))\n if d['classifier_name'] == 'plslinearsvmxval':\n def decision_function(x):\n for step_name, step in d['classifier'].steps[:-1]:\n x = step.transform(x)\n return d['classifier'].steps[-1][1].decision_function(x)\n d['classifier'].decision_function = decision_function\n return d['classifier']\n","sub_path":"picarus/_classifiers.py","file_name":"_classifiers.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"388728395","text":"# encoding: UTF-8\r\n\r\n# 从tdx下载期货数据.\r\n# 收盘后的数据基本正确, 但盘中实时拿数据时:\r\n# 1. 1Min的Bar可能不是最新的, 会缺几分钟.\r\n# 2. 当周期>1Min时, 最后一根Bar可能不是完整的, 强制修改后\r\n# - 5min修改后freq基本正确\r\n# - 1day在VNPY合成时不关心已经收到多少Bar, 所以影响也不大\r\n# - 但其它分钟周期因为不好精确到每个品种, 修改后的freq可能有错\r\n\r\nfrom datetime import datetime, timezone, timedelta, time\r\nimport sys\r\nimport requests\r\nimport execjs\r\nimport traceback\r\nfrom vnpy.trader.app.ctaStrategy.ctaBase import CtaBarData\r\nfrom pytdx.exhq import TdxExHq_API\r\nfrom pytdx.params import TDXParams\r\nfrom vnpy.trader.vtFunction import getJsonPath\r\nfrom vnpy.trader.vtGlobal import globalSetting\r\nfrom vnpy.trader.vtObject import VtErrorData\r\nimport json\r\nimport pandas as pd\r\n\r\nIP_LIST = [{'ip': '112.74.214.43', 'port': 7727},\r\n {'ip': '59.175.238.38', 'port': 7727},\r\n {'ip': '124.74.236.94', 'port': 7721},\r\n {'ip': '218.80.248.229', 'port': 7721},\r\n {'ip': '124.74.236.94', 'port': 7721},\r\n {'ip': '58.246.109.27', 'port': 7721}\r\n ]\r\n\r\n# 通达信 K 线种类\r\n# 0 - 5 分钟K 线\r\n# 1 - 15 分钟K 线\r\n# 2 - 30 分钟K 线\r\n# 3 - 1 小时K 线\r\n# 4 - 日K 线\r\n# 5 - 周K 线\r\n# 6 - 月K 线\r\n# 7 - 1 分钟\r\n# 8 - 1 分钟K 线\r\n# 9 - 日K 线\r\n# 10 - 季K 线\r\n# 11 - 年K 线\r\nPERIOD_MAPPING = {}\r\nPERIOD_MAPPING['1min'] = 8\r\nPERIOD_MAPPING['5min'] = 0\r\nPERIOD_MAPPING['15min'] = 1\r\nPERIOD_MAPPING['30min'] = 2\r\nPERIOD_MAPPING['1hour'] = 3\r\nPERIOD_MAPPING['1day'] = 4\r\nPERIOD_MAPPING['1week'] = 5\r\nPERIOD_MAPPING['1month'] = 6\r\n\r\n# 每个周期包含多少分钟 (估算值, 没考虑夜盘和10:15的影响)\r\nNUM_MINUTE_MAPPING = {}\r\nNUM_MINUTE_MAPPING['1min'] = 1\r\nNUM_MINUTE_MAPPING['5min'] = 5\r\nNUM_MINUTE_MAPPING['15min'] = 15\r\nNUM_MINUTE_MAPPING['30min'] = 30\r\nNUM_MINUTE_MAPPING['1hour'] = 60\r\nNUM_MINUTE_MAPPING['1day'] = 60*24\r\nNUM_MINUTE_MAPPING['1week'] = 60*24*7\r\nNUM_MINUTE_MAPPING['1month'] = 60*24*7*30\r\n\r\n# 常量\r\nQSIZE = 500\r\nALL_MARKET_BEGIN_HOUR = 8\r\nALL_MARKET_END_HOUR = 16\r\n\r\nclass TdxFutureData(object):\r\n\r\n api = None\r\n connection_status = False # 连接状态\r\n symbol_exchange_dict = {} # tdx合约与vn交易所的字典\r\n symbol_market_dict = {} # tdx合约与tdx市场的字典\r\n\r\n # ----------------------------------------------------------------------\r\n def __init__(self, strategy):\r\n \"\"\"\r\n 构造函数\r\n :param strategy: 上层策略,主要用与使用strategy.writeCtaLog()\r\n \"\"\"\r\n self.strategy = strategy\r\n\r\n self.connect()\r\n\r\n def connect(self):\r\n \"\"\"\r\n 连接API\r\n :return:\r\n \"\"\"\r\n\r\n # 创建api连接对象实例\r\n try:\r\n if self.api is None or self.connection_status == False:\r\n self.strategy.writeCtaLog(u'开始连接通达信行情服务器')\r\n TdxFutureData.api = TdxExHq_API(heartbeat=True, auto_retry=True, raise_exception=True)\r\n\r\n # 选取最佳服务器\r\n self.best_ip = self.select_best_ip()\r\n\r\n self.api.connect(self.best_ip['ip'], self.best_ip['port'])\r\n # 尝试获取市场合约统计\r\n c = self.api.get_instrument_count()\r\n if c < 10:\r\n err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])\r\n self.strategy.writeCtaError(err_msg)\r\n else:\r\n self.strategy.writeCtaLog(u'创建tdx连接, IP: {}/{}'.format(self.best_ip['ip'], self.best_ip['port']))\r\n # print(u'创建tdx连接, IP: {}/{}'.format(self.best_ip['ip'], self.best_ip['port']))\r\n TdxFutureData.connection_status = True\r\n\r\n # 更新 symbol_exchange_dict , symbol_market_dict\r\n self.qryInstrument()\r\n except Exception as ex:\r\n self.strategy.writeCtaLog(u'连接服务器tdx异常:{},{}'.format(str(ex), traceback.format_exc()))\r\n return\r\n\r\n # ----------------------------------------------------------------------\r\n def ping(self, ip, port=7709):\r\n \"\"\"\r\n ping行情服务器\r\n :param ip:\r\n :param port:\r\n :param type_:\r\n :return:\r\n \"\"\"\r\n apix = TdxExHq_API()\r\n __time1 = datetime.now()\r\n try:\r\n with apix.connect(ip, port):\r\n if apix.get_instrument_count() > 10000:\r\n _timestamp = datetime.now() - __time1\r\n self.strategy.writeCtaLog('服务器{}:{},耗时:{}'.format(ip,port,_timestamp))\r\n return _timestamp\r\n else:\r\n self.strategy.writeCtaLog(u'该服务器IP {}无响应'.format(ip))\r\n return timedelta(9, 9, 0)\r\n except:\r\n self.strategy.writeCtaError(u'tdx ping服务器,异常的响应{}'.format(ip))\r\n return timedelta(9, 9, 0)\r\n\r\n # ----------------------------------------------------------------------\r\n def select_best_ip(self):\r\n \"\"\"\r\n 选择行情服务器\r\n :return:\r\n \"\"\"\r\n self.strategy.writeCtaLog(u'选择通达信行情服务器')\r\n\r\n data_future = [self.ping(x['ip'], x['port']) for x in IP_LIST]\r\n\r\n best_future_ip = IP_LIST[data_future.index(min(data_future))]\r\n\r\n self.strategy.writeCtaLog(u'选取 {}:{}'.format(best_future_ip['ip'], best_future_ip['port']))\r\n # print(u'选取 {}:{}'.format(best_future_ip['ip'], best_future_ip['port']))\r\n return best_future_ip\r\n\r\n # ----------------------------------------------------------------------\r\n def qryInstrument(self):\r\n \"\"\"\r\n 查询/更新合约信息\r\n :return:\r\n \"\"\"\r\n if not self.connection_status:\r\n return\r\n\r\n if self.api is None:\r\n self.strategy.writeCtaLog(u'取不到api连接,更新合约信息失败')\r\n # print(u'取不到api连接,更新合约信息失败')\r\n return\r\n\r\n # 取得所有的合约信息\r\n num = self.api.get_instrument_count()\r\n if not isinstance(num,int):\r\n return\r\n\r\n all_contacts = sum([self.api.get_instrument_info((int(num / 500) - i) * 500, 500) for i in range(int(num / 500) + 1)],[])\r\n #[{\"category\":category,\"market\": int,\"code\":sting,\"name\":string,\"desc\":string},{}]\r\n\r\n # 对所有合约处理,更新字典 指数合约-tdx市场,指数合约-交易所\r\n for tdx_contract in all_contacts:\r\n tdx_symbol = tdx_contract.get('code', None)\r\n if tdx_symbol is None:\r\n continue\r\n tdx_market_id = tdx_contract.get('market')\r\n if tdx_market_id == 47: # 中金所\r\n TdxFutureData.symbol_exchange_dict.update({tdx_symbol: 'CFFEX'})\r\n TdxFutureData.symbol_market_dict.update({tdx_symbol:tdx_market_id})\r\n elif tdx_market_id == 28: # 郑商所\r\n TdxFutureData.symbol_exchange_dict.update({tdx_symbol: 'CZCE'})\r\n TdxFutureData.symbol_market_dict.update({tdx_symbol:tdx_market_id})\r\n elif tdx_market_id == 29: # 大商所\r\n TdxFutureData.symbol_exchange_dict.update({tdx_symbol: 'DCE'})\r\n TdxFutureData.symbol_market_dict.update({tdx_symbol:tdx_market_id})\r\n elif tdx_market_id == 30: # 上期所+能源\r\n TdxFutureData.symbol_exchange_dict.update({tdx_symbol: 'SHFE'})\r\n TdxFutureData.symbol_market_dict.update({tdx_symbol:tdx_market_id})\r\n\r\n # ----------------------------------------------------------------------\r\n def get_bars(self, symbol, period, callback, bar_is_completed=False, bar_freq=1, start_dt=None):\r\n \"\"\"\r\n 返回k线数据\r\n symbol:合约\r\n period: 周期: 1min,3min,5min,15min,30min,1day,3day,1hour,2hour,4hour,6hour,12hour\r\n \"\"\"\r\n\r\n ret_bars = []\r\n tdx_symbol = symbol.upper().replace('_' , '')\r\n tdx_symbol = tdx_symbol.replace('99' , 'L9')\r\n if tdx_symbol not in self.symbol_exchange_dict.keys():\r\n self.strategy.writeCtaError(u'{} 合约{}/{}不在下载清单中: {}'.format(datetime.now(), symbol, tdx_symbol, self.symbol_exchange_dict.keys()))\r\n # print(u'{} 合约{}/{}不在下载清单中: {}'.format(datetime.now(), symbol, tdx_symbol, self.symbol_exchange_dict.keys()))\r\n return False,ret_bars\r\n if period not in PERIOD_MAPPING.keys():\r\n self.strategy.writeCtaError(u'{} 周期{}不在下载清单中: {}'.format(datetime.now(), period, list(PERIOD_MAPPING.keys())))\r\n # print(u'{} 周期{}不在下载清单中: {}'.format(datetime.now(), period, list(PERIOD_MAPPING.keys())))\r\n return False,ret_bars\r\n if self.api is None:\r\n return False,ret_bars\r\n\r\n tdx_period = PERIOD_MAPPING.get(period)\r\n\r\n if start_dt is None:\r\n self.strategy.writeCtaLog(u'没有设置开始时间,缺省为10天前')\r\n qry_start_date = datetime.now() - timedelta(days=10)\r\n else:\r\n qry_start_date = start_dt\r\n end_date = datetime.combine(datetime.now() + timedelta(days=1),time(ALL_MARKET_END_HOUR, 0))\r\n if qry_start_date > end_date:\r\n qry_start_date = end_date\r\n self.strategy.writeCtaLog('{}开始下载tdx:{} {}数据, {} to {}.'.format(datetime.now(), tdx_symbol, tdx_period, qry_start_date, end_date))\r\n # print('{}开始下载tdx:{} {}数据, {} to {}.'.format(datetime.now(), tdx_symbol, tdx_period, last_date, end_date))\r\n\r\n try:\r\n _start_date = end_date\r\n _bars = []\r\n _pos = 0\r\n while _start_date > qry_start_date:\r\n _res = self.api.get_instrument_bars(\r\n PERIOD_MAPPING[period],\r\n self.symbol_market_dict[tdx_symbol],\r\n tdx_symbol,\r\n _pos,\r\n QSIZE)\r\n if _res is not None:\r\n _bars = _res + _bars\r\n _pos += QSIZE\r\n if _res is not None and len(_res) > 0:\r\n _start_date = _res[0]['datetime']\r\n _start_date = datetime.strptime(_start_date, '%Y-%m-%d %H:%M')\r\n self.strategy.writeCtaLog(u'分段取数据开始:{}'.format(_start_date))\r\n else:\r\n break\r\n if len(_bars) == 0:\r\n self.strategy.writeCtaError('{} Handling {}, len1={}..., continue'.format(\r\n str(datetime.now()), tdx_symbol, len(_bars)))\r\n return False, ret_bars\r\n\r\n current_datetime = datetime.now()\r\n data = self.api.to_df(_bars)\r\n data = data.assign(datetime=pd.to_datetime(data['datetime']))\r\n data = data.assign(ticker=symbol)\r\n data['instrument_id'] = data['ticker']\r\n # if future['market'] == 28 or future['market'] == 47:\r\n # # 大写字母: 郑州商品 or 中金所期货\r\n # data['instrument_id'] = data['ticker']\r\n # else:\r\n # data['instrument_id'] = data['ticker'].apply(lambda x: x.lower())\r\n\r\n data['symbol'] = symbol\r\n data = data.drop(\r\n ['year', 'month', 'day', 'hour', 'minute', 'price', 'amount', 'ticker'],\r\n errors='ignore',\r\n axis=1)\r\n data = data.rename(\r\n index=str,\r\n columns={\r\n 'position': 'open_interest',\r\n 'trade': 'volume',\r\n })\r\n if len(data) == 0:\r\n print('{} Handling {}, len2={}..., continue'.format(\r\n str(datetime.now()), tdx_symbol, len(data)))\r\n return False, ret_bars\r\n\r\n data['total_turnover'] = data['volume']\r\n data[\"limit_down\"] = 0\r\n data[\"limit_up\"] = 999999\r\n data['trading_date'] = data['datetime']\r\n data['trading_date'] = data['trading_date'].apply(lambda x: (x.strftime('%Y-%m-%d')))\r\n monday_ts = data['datetime'].dt.weekday == 0 # 星期一\r\n night_ts1 = data['datetime'].dt.hour > ALL_MARKET_END_HOUR\r\n night_ts2 = data['datetime'].dt.hour < ALL_MARKET_BEGIN_HOUR\r\n data.loc[night_ts1, 'datetime'] -= timedelta(days=1) # 所有日期的夜盘(21:00~24:00), 减一天\r\n monday_ts1 = monday_ts & night_ts1 # 星期一的夜盘(21:00~24:00), 再减两天\r\n data.loc[monday_ts1, 'datetime'] -= timedelta(days=2)\r\n monday_ts2 = monday_ts & night_ts2 # 星期一的夜盘(00:00~04:00), 再减两天\r\n data.loc[monday_ts2, 'datetime'] -= timedelta(days=2)\r\n # data['datetime'] -= timedelta(minutes=1) # 直接给Strategy使用, RiceQuant格式, 不需要减1分钟\r\n data['dt_datetime'] = data['datetime']\r\n data['date'] = data['datetime'].apply(lambda x: (x.strftime('%Y-%m-%d')))\r\n data['time'] = data['datetime'].apply(lambda x: (x.strftime('%H:%M:%S')))\r\n data['datetime'] = data['datetime'].apply(lambda x: float(x.strftime('%Y%m%d%H%M%S')))\r\n data = data.set_index('dt_datetime', drop=False)\r\n # data = data[int(last_date.strftime('%Y%m%d%H%M%S')):int(end_date.strftime('%Y%m%d%H%M%S'))]\r\n # data = data[str(last_date):str(end_date)]\r\n\r\n for index, row in data.iterrows():\r\n add_bar = CtaBarData()\r\n try:\r\n add_bar.vtSymbol = row['symbol']\r\n add_bar.symbol = row['symbol']\r\n add_bar.datetime = index\r\n add_bar.date = row['date']\r\n add_bar.time = row['time']\r\n add_bar.tradingDay = row['trading_date']\r\n add_bar.open = float(row['open'])\r\n add_bar.high = float(row['high'])\r\n add_bar.low = float(row['low'])\r\n add_bar.close = float(row['close'])\r\n add_bar.volume = float(row['volume'])\r\n except Exception as ex:\r\n self.strategy.writeCtaError('error when convert bar:{},ex:{},t:{}'.format(row, str(ex), traceback.format_exc()))\r\n # print('error when convert bar:{},ex:{},t:{}'.format(row, str(ex), traceback.format_exc()))\r\n return False\r\n\r\n if start_dt is not None and index < start_dt:\r\n continue\r\n ret_bars.append(add_bar)\r\n\r\n if callback is not None:\r\n freq = bar_freq\r\n bar_is_completed = True\r\n if period != '1min' and index == data['dt_datetime'][-1]:\r\n # 最后一个bar,可能是不完整的,强制修改\r\n # - 5min修改后freq基本正确\r\n # - 1day在VNPY合成时不关心已经收到多少Bar, 所以影响也不大\r\n # - 但其它分钟周期因为不好精确到每个品种, 修改后的freq可能有错\r\n if index > current_datetime:\r\n bar_is_completed = False\r\n # 根据秒数算的话,要+1,例如13:31,freq=31,第31根bar\r\n freq = NUM_MINUTE_MAPPING[period] - int((index - current_datetime).total_seconds() / 60)\r\n callback(add_bar, bar_is_completed, freq)\r\n\r\n return True,ret_bars\r\n except Exception as ex:\r\n self.strategy.writeCtaError('exception in get:{},{},{}'.format(tdx_symbol,str(ex), traceback.format_exc()))\r\n # print('exception in get:{},{},{}'.format(tdx_symbol,str(ex), traceback.format_exc()))\r\n self.strategy.writeCtaLog(u'重置连接')\r\n TdxFutureData.api = None\r\n self.connect()\r\n return False,ret_bars\r\n\r\n\r\nif __name__ == \"__main__\":\r\n class T(object):\r\n\r\n def writeCtaError(self,content):\r\n print(content,file=sys.stderr)\r\n\r\n def writeCtaLog(self,content):\r\n print(content)\r\n\r\n def display_bar(self,bar, bar_is_completed=True, freq=1):\r\n print(u'{} {}'.format(bar.vtSymbol,bar.datetime))\r\n\r\n t1 = T()\r\n t2 = T()\r\n # 创建API对象\r\n api_01 = TdxFutureData(t1)\r\n\r\n # 获取历史分钟线\r\n api_01.get_bars('rb1905', period='5min', callback=t1.display_bar)\r\n # api.get_bars(symbol, period='5min', callback=display_bar)\r\n # api_01.get_bars('IF99', period='1day', callback=t1.display_bar)\r\n\r\n # 测试单实例\r\n # api_02 = TdxFutureData(t2)\r\n #api_02.get_bars('IF99', period='1min', callback=t1.display_bar)","sub_path":"vnpy/data/tdx/tdx_future_data.py","file_name":"tdx_future_data.py","file_ext":"py","file_size_in_byte":17112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411903034","text":"#!/usr/bin/env python\n\"\"\"prtg_wan_1: module called query PRTG API for bandwidth statistics\n\nNOTES:\n* PRTG API is GET only and doesn't use sessions, so we pass the username and passhash on each connection.\n* We're using the table.json function to retrieve multiple sensor values at once. We filter the results by\nusing a tag we've assigned to the sensors in PRTG called \"statusboard_wan.\"\n\n* The JSON output from this module is still formatted in the old iPad Statusboard app format:\n\n{\"graph\":\n {\"title\": \"WAN Bandwidth (mbps)\", \"type\": \"line\", \"refreshEveryNSeconds\": 60,\n \"datasequences\": [\n {\"title\": \"RAL\",\n \"datapoints\": [{\"title\": \"17:11\", \"value\": 0.0}, {\"title\": \"17:17\", \"value\": 0.0}, ....\n {\"title\": \"TRI\",\n \"datapoints\": [{\"title\": \"17:11\", \"value\": 0.0}, {\"title\": \"17:12\", \"value\": 0.0}, ....\n\n\"\"\"\nimport time\nimport json\nimport logging.config\nimport requests\nfrom credentials import PRTG_USERNAME\nfrom credentials import PRTG_PASSHASH\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\n__author__ = 'scott@flakshack.com (Scott Vintinner)'\n\n# =================================SETTINGS======================================\nMAX_DATAPOINTS = 120\nSAMPLE_INTERVAL = 15\nGRAPH_TITLE = \"WAN (mbps)\"\nPRTG_AUTH = \"&username=\" + PRTG_USERNAME + \"&passhash=\" + PRTG_PASSHASH\nPRTG_URL = (\"https://prtg/api/table.json?\" +\n \"content=sensors&columns=objid,lastvalue&filter_tags=@tag(statusboard_wan1)\")\n\n# #### PRTG SENSORS #####\n# The sensors are retrieved by a tag filter, but we'll use this list to help name them.\n# id: This is the PRTG sensor ID (displayed on the sensor web page or URL)\n# name: This is the name of the device as it will appear on the graph\nPRTG_SENSORS = (\n {\"objid\": 7472, \"name\": \"CLT\"},\n {\"objid\": 7766, \"name\": \"TRI\"},\n {\"objid\": 7297, \"name\": \"RH\"},\n {\"objid\": 7295, \"name\": \"RAL\"}\n)\n\n# ================================================================================\n\n\nclass MonitorJSON:\n \"\"\"This is a simple class passed to Monitor threads so we can access the current JSON data in that thread\"\"\"\n def __init__(self):\n self.json = output_message(\"Loading...\", \"\")\n\n\nclass PRTGSensor:\n \"\"\"We create a single object of this class and store all our data in it.\"\"\"\n all_sensors = [] # Static array containing all sensors\n\n def __init__(self, sensor_id, sensor_name):\n self.objid = sensor_id\n self.name = sensor_name\n self.datapoints = [] # Hold raw values from PRTG\n self.__class__.all_sensors.append(self) # Add self to static array\n\n\nclass PRTGPausedException(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\n\nclass PRTGRequestException(Exception):\n pass\n\n\ndef output_message(message, detail):\n \"\"\"This function will output an error message formatted in JSON to display on the StatusBoard app\"\"\"\n statusbar_output = {\"graph\": {\"title\": GRAPH_TITLE, \"error\": {\"message\": message, \"detail\": detail}}}\n output = json.dumps(statusbar_output)\n return output\n\n\ndef generate_json(prtg_monitor):\n \"\"\"This function will connect to the PRTG server API and store the output in prtg_monitor.json\"\"\"\n\n logger = logging.getLogger(\"prtg_interface_1\")\n time_x_axis = time.strftime(\"%H:%M\") # The time that we'll add to the X axis of the chart\n\n # Create a list of PRTGSensors using the contants provided above (we'll store the data in this object)\n if len(PRTGSensor.all_sensors) == 0: # This is only done once\n for sensor in PRTG_SENSORS:\n PRTGSensor(sensor[\"objid\"], sensor[\"name\"])\n\n try:\n # ############ PRTG API CALL ###############\n logger.debug(\"Getting: \" + PRTG_URL)\n r = requests.get(PRTG_URL + PRTG_AUTH, verify=False)\n if r.status_code != 200:\n raise PRTGRequestException(\"Request error status: \" + str(r.status_code) + \" \" + r.reason)\n api_data = r.json() # Convert returned byte stream to json\n\n # Loop through our list of sensors, match the object id with the item returned by the API call\n # and add a datapoint to our array.\n for sensor in PRTGSensor.all_sensors:\n for api_sensor in api_data[\"sensors\"]:\n if api_sensor['lastvalue'] == '-':\n raise PRTGPausedException(sensor.name)\n if sensor.objid == api_sensor[\"objid\"]:\n # The lastvalue_raw field is the bps divided by 8. It doesn't make any sense\n # why they store it this way instead of just storing the bits. Seriously, WTF?\n # The conversion from raw to mbps is lastvalue_raw*8/(1000*1000)\n # Note we're using SI (decimal) notation here because that is what PRTG uses.\n mbps = round(((api_sensor[\"lastvalue_raw\"])*8)/1000000, 2) # Convert from PRTG raw to mbps\n sensor.datapoints.append({\"title\": time_x_axis, \"value\": mbps})\n\n # If we already have the max number of datapoints, delete the oldest item.\n if len(sensor.datapoints) >= MAX_DATAPOINTS:\n del(sensor.datapoints[0])\n\n # #### Format the JSON data that is expected by the javascript front-end #####\n statusbar_datasequences = []\n for sensor in PRTGSensor.all_sensors:\n statusbar_datasequences.append({\"title\": sensor.name, \"datapoints\": sensor.datapoints})\n\n statusbar_graph = {\n \"title\": GRAPH_TITLE, \"type\": \"line\",\n \"refreshEveryNSeconds\": SAMPLE_INTERVAL,\n \"datasequences\": statusbar_datasequences\n }\n statusbar_type = {\"graph\": statusbar_graph}\n prtg_monitor.json = json.dumps(statusbar_type)\n\n except PRTGPausedException as sensor_name:\n\n logger.error(\"PRTG sensor \" + str(sensor_name) + \" is paused\")\n prtg_monitor.json = output_message(\"PRTG sensor \" + str(sensor_name) + \" is paused\",'')\n PRTGSensor.all_sensors = [] # Reset the saved data\n\n except Exception as error:\n logger.error(\"Error getting data from PRTG: \" + str(error))\n prtg_monitor.json = output_message(\"Error getting data from PRTG\", str(error))\n PRTGSensor.all_sensors = [] # Reset the saved data\n\n logger.debug(prtg_monitor.json)\n\n\n# ======================================================\n# __main__\n#\n# If you run this module by itself, it will instantiate\n# the MonitorJSON class and start an infinite loop\n# printing data.\n# ======================================================\n#\nif __name__ == '__main__':\n\n # When run by itself, we need to create the logger object (which is normally created in webserver.py)\n try:\n f = open(\"log_settings.json\", 'rt')\n log_config = json.load(f)\n f.close()\n logging.config.dictConfig(log_config)\n except FileNotFoundError as e:\n print(\"Log configuration file not found: \" + str(e))\n logging.basicConfig(level=logging.DEBUG) # fallback to basic settings\n except json.decoder.JSONDecodeError as e:\n print(\"Error parsing logger config file: \" + str(e))\n raise\n\n monitor = MonitorJSON()\n while True:\n main_logger = logging.getLogger(__name__)\n generate_json(monitor)\n # Wait X seconds for the next iteration\n main_logger.debug(\"Waiting for \" + str(SAMPLE_INTERVAL) + \" seconds\")\n time.sleep(SAMPLE_INTERVAL)\n","sub_path":"prtg_interface_1.py","file_name":"prtg_interface_1.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"377905710","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport six\nfrom ..config import ERROR\n\n\n# Module API\n\ndef cast_boolean(format, value):\n if not isinstance(value, bool):\n if not isinstance(value, six.string_types):\n return ERROR\n value = value.strip().lower()\n if value in _TRUE_VALUES:\n value = True\n elif value in _FALSE_VALUES:\n value = False\n else:\n return ERROR\n return value\n\n\n# Internal\n\n_TRUE_VALUES = ['yes', 'y', 'true', 't', '1']\n_FALSE_VALUES = ['no', 'n', 'false', 'f', '0']\n","sub_path":"tableschema/types/boolean.py","file_name":"boolean.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"151738028","text":"import lxml.html as LH\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\n\nshoes = pd.read_html('http://www.shoemetro.com/t-shoe-size-chart.aspx')\n#print(shoes)\n\nw_shoes = shoes[2]\nm_shoes = shoes[3]\n\nw_columns = {\n \"size_column1\": [ w_shoes[0] ], #US\n \"size_column2\": [ w_shoes[1] ], #Euro\n \"size_column3\": [ w_shoes[2] ], #UK\n \"size_column_in\": [ w_shoes[3] ], #Inches\n \"size_column_cm\": [ w_shoes[4] ] #CM\n}\n\nm_columns = {\n \"size_column1\": [ m_shoes[0] ], #US\n \"size_column2\": [ m_shoes[1] ], #Euro\n \"size_column3\": [ m_shoes[2] ], #UK\n \"size_column_in\": [ m_shoes[3] ], #Inches\n \"size_column_cm\": [ m_shoes[4] ] #CM\n}\n\nconts = {\n \"euro\": ['germany', 'italy', 'france', 'switzerland', 'poland', 'netherlands', 'the netherlands', 'ukraine', 'greece', 'austria', 'sweden', 'norway', 'malta', 'czech republic', 'belgium', 'iceland', 'finland', 'croatia', 'cyprus', 'romania', 'hungary', 'denmark', 'bulgaria', 'luxembourg', 'monaco', 'slovenia', 'serbia', 'vatican city', 'albania', 'lithuania', 'belarus', 'montenegro', 'estonia', 'moldova', 'slovakia', 'bosnia', 'herzegovina', 'kosovo', 'latvia', 'san marino', 'macedonia', 'liechtenstein', 'andorra', 'gibraltar', 'faroe islands', 'isle of man', 'jersey', 'svalbard', 'jan mayen', 'aland islands'],\n \"uk\": ['great britain', 'ireland', 'scotland', 'wales'],\n \"usa\": ['usa', 'us', 'u.s.', 'u.s.a.', 'america', 'north america']\n}\n\ngender = input(\"Are you a male or female? \")\ngender = gender.lower()\n\n#Male\nif gender == 'male' or gender =='m':\n home = input(\"What is your country of residence? \")\n home = home.lower()\n size = input(\"What size are you? \") \n#Female\nelif gender == 'female' or gender =='f':\n home = input(\"What is your country of residence? \")\n home = home.lower()\n size = input(\"What size are you? \") \nelse:\n print(\"Program has quit.\")\n\ndef get_continent_list(home, dict_of_conts):\n for key in dict_of_conts:\n if home in dict_of_conts[key]:\n return dict_of_conts[key]\n\nresult = get_continent_list(home, conts)\n#print(result)\n\n#WOMEMN\nw_shoes.columns = ['US', 'Euro', 'UK', 'Inches', 'CM']\n#MEN\nm_shoes.columns = ['US', 'Euro', 'UK', 'Inches', 'CM']\n\nprint(\"You are a size %s in the %s, which means your sizes will be: \" % (size, home.title()))\n\n#US\nif result == conts[\"usa\"]:\n chart = w_shoes[ w_shoes['US'] == size]\n print(chart) \n#Euro\nelif result == conts[\"euro\"]:\n chart = w_shoes[ w_shoes['Euro'] == size]\n print(chart)\n#UK\nelif result == conts[\"uk\"]:\n chart = w_shoes[ w_shoes['UK'] == size]\n print(chart)\nelse:\n print(\"Invalid\")\n\n#US\nif result == conts[\"usa\"]:\n chart = m_shoes[ m_shoes['US'] == size]\n print(chart) \n#Euro\nelif result == conts[\"euro\"]:\n chart = m_shoes[ m_shoes['Euro'] == size]\n print(chart)\n#UK\nelif result == conts[\"uk\"]:\n chart = m_shoes[ m_shoes['UK'] == size]\n print(chart)\nelse:\n print(\"Invalid\")\n\n\n","sub_path":"mater2.py","file_name":"mater2.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"541726135","text":"class Location:\n\n locations = []\n\n \"\"\"\n Class for locations(map)\n ========================\n Attributes:\n -----------\n - id: Integer\n - name: String\n - envType: String\n - people: Dictionnary {race: number}\n - nbPeople: Integer\n - size: Integer\n \"\"\"\n\n def __init__(self, name, envType, peoples, size):\n Location.locations += [self]\n self.id = len(Location.locations)\n self.name = name\n self.envType = envType\n self.peoples = peoples\n self.size = size\n\n def classifyPeoples(self):\n table_race = {}\n for people in self.peoples:\n if people.race in table_race:\n table_race[people.race] += 1\n else:\n table_race[people.race] = 1\n return table_race\n\n def raceRate(self):\n \"\"\"\n Rates of races in the location\n ------------------------------\n Compute the rate of all races in the location\n\n OUTPUT: Dictionnary\n \"\"\"\n table_rate = {}\n for race in self.people:\n table_rate[race.name] = self.peoples[race]/len(self.peoples)\n return table_rate\n\n\nclass World:\n\n \"\"\"\n Class for World\n ===============\n Attributes\n ----------\n - name: String\n - locations\n \"\"\"\n\n def __init__(self, name, location):\n self.name = name\n self.location = location\n\nloc1 = Location('Grand Rue', 'rue', [], 0)\nloc2 = Location('Rue de l\\'écorcheur écorché', 'rue', [], 0)\nloc3 = Location('Avenue du blob', 'rue', [], 0)\nloc4 = Location('Rue de l\\'empire des ombres', 'rue', [], 0)\nloc5 = Location('Place de la routine', 'rue', [], 0)\nloc6 = Location('Place de l\\'arbre au pendu', 'rue', [], 0)\nloc7 = Location('Impasse de la sortie', 'rue', [], 0)\nloc8 = Location('Rue Lorenzo von Matterhorn', 'rue', [], 0)\nloc = {loc1: [loc6, loc5],\n loc2: [loc6, loc4],\n loc3: [loc7, loc8, loc5],\n loc4: [loc5, loc2],\n loc5: [loc1, loc3, loc4],\n loc6: [loc1, loc2, loc8],\n loc7: [loc3, loc8],\n loc8: [loc7, loc3, loc6]}\nworld = World('le MONDE !', loc)\n","sub_path":"location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"573890484","text":"from flask_admin.contrib import sqla\nfrom secure_views import SecureModelView\n\n\nclass UserView(SecureModelView):\n\n can_create = True\n can_edit = True\n can_delete = True\n\n column_list = (\n 'id',\n 'username',\n 'email',\n 'password',\n 'user_group',\n 'active',\n 'birthday',\n 'gender',\n 'weight',\n 'height',\n 'last_login',\n )\n\n column_searchable_list = ['id', 'email']\n column_filters = column_list\n","sub_path":"code/classes/Kevin Pielacki/remote-server/views/user_view.py","file_name":"user_view.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"289035455","text":"#######################################################################\n# 核心业务流程相关\n#######################################################################\n\n\nimport tornado.escape\nfrom bson.objectid import ObjectId\nfrom bson.timestamp import Timestamp\n\nimport time\n\nfrom handler_api_basic import ApiBasicHandler\nimport datebase_config\nimport overall_config\nimport ws_operate\n\nCC_DEPARTMENT_NAME = overall_config.CC_DEPARTMENT_NAME\nCC_SERVICE_DEPARTMENT_NAME = overall_config.CC_SERVICE_DEPARTMENT_NAME\nCC_FOREMAN_DEPARTMENT_NAME = overall_config.CC_FOREMAN_DEPARTMENT_NAME\nSTATION_ROOT_DEPARTMENT_NAME = overall_config.STATION_ROOT_DEPARTMENT_NAME\n\ndb_work = datebase_config.work\ndb_user = datebase_config.user\ndb_department = datebase_config.department\n\n\n#########################################################################\n# document_schema = {\n# '_id': ObjectID, # key\n# 'work_id': '213124', # 单号,适于人类阅读\n# 'state': '审核中', # 状态说明,[固定值,...]\n# 'operator' : { # 当前责任人\n# 'operator_id': 'union_id', # 用户id\n# 'operator_name': 'operator_name', # 参考值,id无效时显示\n# },\n# 'department':{\n# 'department_id': 'department_id', # 当前所在部门id\n# 'department_name': 'department_name'# 参考值,id无效时显示\n# },\n# 'target': [{union_id,operator_name}], # 流转目标人{id,name}列表\n# 'message':'',\n# 'add_on':[],\n# 'title': 'title', # 工单标题\n# 'form': [ # 表单内容,列表,按版本排序\n# [{'key': '字段头', 'value': '字段内容'}]\n# ],\n# 'flow': [{ # 流程记录列表,记录每个动作\n# operator:{\n# 'operator_id': '',\n# 'operator_name': ''\n# },\n# 'department':{\n# 'department_id': '',\n# 'department_name': ''\n# },\n# 'action': '', # 执行动作,[固定值,...]\n# 'action_target': [],\n# 'time': Timestamp, # 执行时间\n# 'form_version': '', # 工单内容版本序号,对应form\n# 'message': '', # 留言内容\n# 'add_on': [] # 留言附件url列表\n# }]\n# }\n#########################################################################\n\nclass Work(object):\n def __init__(self):\n self._id = ''\n self.work_id = ''\n self.state = ''\n self.operator = {}\n self.department = {}\n self.target = []\n self.message = ''\n self.add_on = []\n self.title = ''\n self.form = []\n self.flow = []\n\n def load(self, work):\n self._id = work.get('_id')\n self.work_id = work.get('work_id')\n self.state = work.get('state')\n self.operator = work.get('operator')\n self.department = work.get('department')\n self.target = work.get('target')\n self.message = work.get('message')\n self.add_on = work.get('add_on')\n self.title = work.get('title')\n self.form = work.get('form')\n self.flow = work.get('flow')\n\n def output(self):\n data = {\n '_id': self._id,\n 'work_id': self.work_id,\n 'state': self.state,\n 'operator': self.operator,\n 'department': self.department,\n 'target': self.target,\n 'message': self.message,\n 'add_on': self.add_on,\n 'title': self.title,\n 'form': self.form,\n 'flow': self.flow\n }\n return data\n\n def output_summary(self):\n # 输出摘要:\n # 包含最新版工单内容、主要流程记录[创建,派发,答复,结束]\n flow_summary = []\n for index, step in enumerate(self.flow):\n if step.get('action') in ['创建', '答复', '结束']:\n flow_summary.append(step)\n elif step.get('action') == '派发':\n if index + 1 == len(self.flow):\n flow_summary.append(step)\n break\n elif self.flow[index + 1]['action'] == '接收':\n flow_summary.append(step)\n\n data = {\n '_id': self._id,\n 'work_id': self.work_id,\n 'state': self.state,\n 'operator': self.operator,\n 'department': self.department,\n 'title': self.title,\n 'form': self.form[-1],\n 'flow': flow_summary\n }\n return data\n\n def create(self, title, form, union_id, department_id):\n # 创建工单\n\n # union_id:用户id\n # department_id:部门id\n\n self.title = title\n self.form.append(form)\n self.work_id = int(time.time())\n self.state = '待审核'\n self.operator = {'operator_id': union_id,\n 'operator_name': get_user_name_by_union_id(union_id)}\n self.department = {'department_id': department_id,\n 'department_name': get_department_name_by_department_id(department_id)}\n self.target = get_call_center_foreman_list()\n self.flow.append({\n 'operator': self.operator,\n 'department': self.department,\n 'action': '创建', # 执行动作,[固定值,...]\n 'action_target': [],\n 'time': int(time.time()), # 执行时间\n 'form_version': 0, # 工单内容版本序号,对应form\n 'message': '', # 留言内容\n 'add_on': [] # 留言附件url列表\n })\n\n def send(self, target_list, msg, add_on_list):\n self.state = '处理中'\n self.target = target_list\n self.message = msg\n self.add_on = add_on_list\n self.flow.append({\n 'operator': self.operator,\n 'department': self.department,\n 'action': '派发', # 执行动作,[固定值,...]\n 'action_target': self.target,\n 'time': int(time.time()), # 执行时间\n 'form_version': len(self.form) - 1, # 工单内容版本序号,对应form\n 'message': self.message,\n 'add_on': self.add_on\n })\n\n def cancel_sending(self):\n self.state = '处理中'\n self.target = []\n self.flow[-1]['message'] = ''\n self.flow[-1]['add_on'] = []\n self.flow.append({\n 'operator': self.operator,\n 'department': self.department,\n 'action': '撤回', # 执行动作,[固定值,...]\n 'action_target': [],\n 'time': int(time.time()), # 执行时间\n 'form_version': len(self.form) - 1, # 工单内容版本序号,对应form\n 'message': '',\n 'add_on': []\n })\n\n def refuse(self, union_id, department_id, msg):\n self.state = '处理中'\n self.target = []\n self.flow[-1]['message'] = ''\n self.flow[-1]['add_on'] = []\n self.flow.append({\n 'operator': {'operator_id': union_id,\n 'operator_name': get_user_name_by_union_id(union_id)},\n 'department': {'department_id': department_id,\n 'department_name': get_department_name_by_department_id(department_id)},\n 'action': '拒绝',\n 'action_target': [],\n 'time': int(time.time()),\n 'form_version': len(self.form) - 1, # 工单内容版本序号,对应form\n 'message': msg,\n 'add_on': []\n })\n\n def receive(self, union_id, department_id):\n self.state = '处理中'\n self.operator = {'operator_id': union_id,\n 'operator_name': get_user_name_by_union_id(union_id)}\n self.department = {'department_id': department_id,\n 'department_name': get_department_name_by_department_id(department_id)}\n self.target = []\n self.message = ''\n self.add_on = []\n self.flow.append({\n 'operator': self.operator,\n 'department': self.department,\n 'action': '接收', # 执行动作,[固定值,...]\n 'action_target': [],\n 'time': int(time.time()), # 执行时间\n 'form_version': len(self.form) - 1, # 工单内容版本序号,对应form\n 'message': '',\n 'add_on': []\n })\n\n def answer(self, union_id, department_id, msg, add_on_list):\n self.state = '已答复'\n self.operator = {}\n self.target = get_call_center_foreman_list()\n self.flow.append({\n 'operator': {'operator_id': union_id,\n 'operator_name': get_user_name_by_union_id(union_id)},\n 'department': {'department_id': department_id,\n 'department_name': get_department_name_by_department_id(department_id)},\n 'action': '答复', # 执行动作,[固定值,...]\n 'action_target': [],\n 'time': int(time.time()), # 执行时间\n 'form_version': len(self.form) - 1, # 工单内容版本序号,对应form\n 'message': msg,\n 'add_on': add_on_list\n })\n\n def update_form(self, msg):\n self.form.append(msg)\n self.flow.append({\n 'operator': self.operator,\n 'action': '修改', # 执行动作,[固定值,...]\n 'action_target': [],\n 'time': int(time.time()), # 执行时间\n 'form_version': len(self.form) - 1, # 工单内容版本序号,对应form\n 'message': '',\n 'add_on': []\n })\n\n def finish(self, union_id, department_id, ):\n self.state = '已结束'\n self.operator = {}\n self.target = []\n self.message = ''\n self.add_on = []\n self.flow.append({\n 'operator': {'operator_id': union_id,\n 'operator_name': get_user_name_by_union_id(union_id)},\n 'department': {'department_id': department_id,\n 'department_name': get_department_name_by_department_id(department_id)},\n 'action': '结束', # 执行动作,[固定值,...]\n 'action_target': [],\n 'time': int(time.time()), # 执行时间\n 'form_version': len(self.form) - 1, # 工单内容版本序号,对应form\n 'message': '',\n 'add_on': []\n })\n\n def stop(self):\n # 中断流转,返回中心重新处理\n pass\n\n\nclass CreateWorkOrderHandler(ApiBasicHandler):\n # 创建工单\n def post(self):\n data = tornado.escape.json_decode(self.request.body)\n title = data.get('title'),\n form = data.get('form'),\n\n work = Work()\n work.create(title=title,\n form=form,\n union_id=self.union_id,\n department_id=self.department_id)\n new_data = work.output()\n\n _id = db_work.insert(new_data)\n if _id:\n ws_operate.send_ws_update(target=work.target, api='work') # 推送通知\n self.return_result({})\n else:\n self.return_403()\n\n\nclass PersonalWorkOrderListHandler(ApiBasicHandler):\n # 个人负责工单列表\n def get(self):\n work_list = []\n cursor = db_work.find({'operator.operator_id': self.union_id},\n {'_id': 1, 'work_id': 1, 'state': 1, 'operator': 1, 'title': 1})\n for i in cursor:\n work_list.append(i)\n self.return_result({'workOrders': work_list})\n\n\nclass DepartmentWorkOrderListHandler(ApiBasicHandler):\n # 部门负责工单列表\n def get(self):\n pass\n\n\nclass WaitingWorkOrderListHandler(ApiBasicHandler):\n # 待处理工单列表\n def get(self):\n # db_work.find({'target.':})\n pass\n\n\nclass FinishedWorkOrderListHandler(ApiBasicHandler):\n def get(self):\n pass\n\n\nclass ActiveWorkOrderListHandler(ApiBasicHandler):\n # 全部活动工单列表\n def get(self):\n pass\n\n\nclass WorkDetailHandler(ApiBasicHandler):\n # 流转信息详情\n def get(self, work_id):\n work = db_work.find_one({'_id': work_id})\n self.return_result(work)\n\n\nclass WorkSummaryHandler(ApiBasicHandler):\n # 流转信息概要\n def get(self, work_id):\n work = Work()\n work.load(db_work.find_one({'_id': work_id}))\n summary = work.output_summary()\n self.return_result(summary)\n\n\nclass SendToPersonHandler(ApiBasicHandler):\n # 派发工单给个人\n def post(self):\n # {\n # '_id': '12312ABC4346FE', # 工单key\n # 'target': 'union_id', # 发送目标id\n # 'message':'asdas',\n # 'add_on':附件\n # }\n data = tornado.escape.json_decode(self.request.body)\n _id = data.get('_id')\n target_id = data.get('target')\n msg = data.get('message')\n\n work = Work()\n work.load(db_work.find_one({'_id': ObjectId(_id)}))\n work.send(target_list=[target_id], msg=msg, add_on_list=[])\n new_data = work.output()\n\n # 匹配key和执行人,成功则更���数据库,无匹配则返回none\n result = db_work.find_and_modify(query={'_id': ObjectId(_id),\n 'operator.operator_id': self.union_id},\n update=new_data)\n if result:\n ws_operate.send_ws_update(target=[target_id], api='work') # 推送通知\n self.return_result({})\n else:\n self.return_403()\n\n\nclass SendToDempartmentHandler(ApiBasicHandler):\n # 派发工单给部门\n def post(self):\n # {\n # '_id': '12312ABC4346FE', # 工单key\n # 'target': 'department_id', # 发送目标id\n # 'message':'asdas',\n # 'add_on':附件\n # }\n data = tornado.escape.json_decode(self.request.body)\n _id = data.get('_id')\n target_list = get_department_foreman_list(department_id=data.get('target'))\n msg = data.get('message')\n # add_on = data.get('add_on')\n\n work = Work()\n work.load(db_work.find_one({'_id': ObjectId(_id)}))\n work.send(target_list=target_list, msg=msg, add_on_list=[])\n new_data = work.output()\n\n # 匹配key和执行人,成功则更新数据库,无匹配则返回none\n result = db_work.find_and_modify(query={'_id': ObjectId(_id),\n 'operator.operator_id': self.union_id},\n update=new_data)\n\n if result:\n ws_operate.send_ws_update(target=target_list, api='work') # 推送通知\n self.return_result({})\n else:\n self.return_403()\n\n\nclass CancelSendingHandler(ApiBasicHandler):\n # 终止派发,取回\n def post(self):\n # {'id': '12312ABC4346FE', # 工单key}\n data = tornado.escape.json_decode(self.request.body)\n _id = data.get('_id')\n work = Work()\n work.load(db_work.find_one({'_id': ObjectId(_id)}))\n work.cancel_sending()\n new_data = work.output()\n\n # 匹配key和执行人,成功则更新数据库,无匹配则返回none\n result = db_work.find_and_modify(query={'_id': ObjectId(_id),\n 'operator.operator_id': self.union_id},\n update=new_data)\n if result:\n self.return_result({})\n else:\n self.return_403()\n\n\nclass ReceiveHandler(ApiBasicHandler):\n # 接收工单\n def post(self):\n # {'id': '12312ABC4346FE', # 工单key}\n data = tornado.escape.json_decode(self.request.body)\n _id = data.get('_id')\n work = Work()\n work.load(db_work.find_one({'_id': ObjectId(_id)}))\n work.receive(union_id=self.union_id, department_id=self.department_id)\n new_data = work.output()\n\n # 匹配key和执行人,成功则更新数据库,无匹配则返回none\n result = db_work.find_and_modify(query={'_id': ObjectId(_id),\n 'target.operator_id': self.union_id},\n update=new_data)\n if result:\n self.return_result({})\n else:\n self.return_403()\n\n\nclass RefuseHandler(ApiBasicHandler):\n # 拒绝接收\n def post(self):\n # {\n # 'id': '12312ABC4346FE', # 工单key\n # 'message':'refuse reason' # 拒收理由\n # }\n data = tornado.escape.json_decode(self.request.body)\n _id = data.get('_id')\n message = data.get('message')\n work = Work()\n work.load(db_work.find_one({'_id': ObjectId(_id)}))\n work.refuse(union_id=self.union_id, department_id=self.department_id, msg=message)\n new_data = work.output()\n\n # 匹配key和执行人,成功则更新数据库,无匹配则返回none\n result = db_work.find_and_modify(query={'_id': ObjectId(_id),\n 'target.operator_id': self.union_id},\n update=new_data)\n if result:\n self.return_result({})\n else:\n self.return_403()\n\n\nclass ReplyHandler(ApiBasicHandler):\n # 答复工单\n def post(self):\n # {\n # '_id': '12312ABC4346FE', # 工单key\n # 'message':'asdas',\n # 'add_on':附件\n # }\n data = tornado.escape.json_decode(self.request.body)\n _id = data.get('_id')\n target_list = get_call_center_foreman_list()\n msg = data.get('message')\n # add_on = data.get('add_on')\n\n work = Work()\n work.load(db_work.find_one({'_id': ObjectId(_id)}))\n work.answer(union_id=self.union_id, department_id=self.department_id, msg=msg, add_on_list=[])\n new_data = work.output()\n\n # 匹配key和执行人,成功则更新数据库,无匹配则返回none\n result = db_work.find_and_modify(query={'_id': ObjectId(_id),\n 'operator.operator_id': self.union_id},\n update=new_data)\n\n if result:\n ws_operate.send_ws_update(target=target_list, api='work') # 推送通知\n self.return_result({})\n else:\n self.return_403()\n\n\nclass FinishHandler(ApiBasicHandler):\n # 流转结束,归档\n def post(self):\n # {'id': '12312ABC4346FE', # 工单key}\n data = tornado.escape.json_decode(self.request.body)\n _id = data.get('_id')\n\n work = Work()\n work.load(db_work.find_one({'_id': ObjectId(_id)}))\n work.finish(union_id=self.union_id, department_id=self.department_id)\n new_data = work.output()\n\n # 匹配key和执行人,成功则更新数据库,无匹配则返回none\n result = db_work.find_and_modify(query={'_id': ObjectId(_id),\n 'target.operator_id': self.union_id},\n update=new_data)\n\n if result:\n self.return_result({})\n else:\n self.return_403()\n\n\n# 流转中相关处理方法\n\n\ndef get_user_name_by_union_id(union_id):\n cursor = db_user.find_one({'unionid': union_id}, {'name': 1})\n return cursor.get('name')\n\n\ndef get_department_name_by_department_id(department_id):\n cursor = db_department.find_one({'id': department_id}, {'name': 1})\n return cursor.get('name')\n\n\ndef get_department_foreman_list(department_id):\n # 获得部门值班员名单\n\n user_list = []\n # 包含所有下级部门的id列表\n root_and_children_department_list = get_department_children_id(department_id)\n # 遍历全部下级部门\n for child_id in root_and_children_department_list:\n # 获得部门员工列表\n cursor = db_user.find({'department': child_id}, {'unionid': 1, 'name': 1})\n # 部门内所有员工注入总表\n for i in cursor:\n user_list.append({'union_id': i.get('unionid'), 'name': i.get('name')})\n return user_list\n\n\ndef get_call_center_foreman_list():\n # 获得客服中心值班员名单 [{ union_id,name}...]\n department_id = get_call_center_foreman_department_id()\n return get_department_foreman_list(department_id)\n\n\ndef get_call_center_department_id():\n # 客服中心组织代码\n\n # {\n # \"id\": 3124145,\n # \"name\": \"客服中心\",\n # \"parentid\": 1,\n # \"createDeptGroup\": false,\n # \"autoAddUser\": false\n # }\n\n # 根据部门名称和根目录id找到部门信息,返回其部门id\n cc_department_info = db_department.find_one({'parentid': 1, 'name': CC_DEPARTMENT_NAME})\n department_id = cc_department_info.get('id')\n return department_id\n\n\ndef get_call_center_foreman_department_id():\n # 中心值班员组织代码\n parent_id = get_call_center_department_id()\n cc_foreman_department_info = db_department.find_one({'parentid': parent_id,\n 'name': CC_FOREMAN_DEPARTMENT_NAME})\n cc_foreman_department_id = cc_foreman_department_info.get('id')\n return cc_foreman_department_id\n\n\ndef get_call_center_service_department_id():\n # 中心话务员组织代码\n parent_id = get_call_center_department_id()\n cc_service_department_info = db_department.find_one({'parentid': parent_id,\n 'name': CC_SERVICE_DEPARTMENT_NAME})\n cc_service_department_id = cc_service_department_info.get('id')\n return cc_service_department_id\n\n\ndef get_station_root_department_id():\n # 站段根组织代码\n station_root_department_info = db_department.find_one({'parentid': 1,\n 'name': STATION_ROOT_DEPARTMENT_NAME})\n station_root_department_id = station_root_department_info.get('id')\n return station_root_department_id\n\n\ndef get_department_children_id(department_id):\n # 获取目标部门下所有子部门(含自己)列表\n children_id_list = []\n children = db_department.find({'parentid': department_id}, {'id': 1})\n for child in children:\n if child:\n child_id = child.get('id')\n children_id_list += get_department_children_id(child_id)\n return [department_id] + children_id_list\n\n\nif __name__ == '__main__':\n t = time.time()\n a = get_department_children_id(1)\n print(time.time() - t)\n print(a)\n print(len(set(a)))\n","sub_path":"hex/back_end/api_server/handler_workflow.py","file_name":"handler_workflow.py","file_ext":"py","file_size_in_byte":22907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"547443290","text":"\"\"\" Write a program that asks for name from the user and then asks for a number and stores the two in a dictionary as key-value pair.\n\nThe program then asks if the user wants to enter more data (More data (y/n)? ) and depending on user choice, \neither asks for another name-number pair or exits and stores the dictionary key, values in a list of tuples \nand prints a sorted version of the list. \n\nNote: Ignore the case where the name is already in the dictionary. \"\"\"\ndef appendtolist(name_input, num_input,user_list):\n stored_data = dict(Name=name_input,Number=num_input)\n key = stored_data['Name']\n value = stored_data['Number']\n to_append = (key,value)\n user_list.append(to_append)\n return user_list\n\n\ndef main():\n keepGoing = True\n askKeepGoing = \"\"\n user_list = []\n \n while keepGoing:\n name_input = input(\"Name: \")\n num_input = input(\"Number: \")\n user_list = appendtolist(name_input,num_input,user_list)\n \n askKeepGoing = input(\"More data (y/n)? \")\n if askKeepGoing == \"y\".lower():\n continue\n elif askKeepGoing == \"n\".lower(): \n user_list.sort()\n print(user_list)\n keepGoing = False\n\nmain()","sub_path":"class14_1.py","file_name":"class14_1.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"171124837","text":"from pyspark.sql import SparkSession\n\n# the source for this data pipeline is a kafka topic, defined below\nspark = SparkSession.builder.appName(\"fuel-level\").getOrCreate()\nspark.sparkContext.setLogLevel('WARN')\n\nkafkaRawStreamingDF = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"subscribe\",\"fuel-level\") \\\n .option(\"startingOffsets\",\"earliest\")\\\n .load()\n\n#this is necessary for Kafka Data Frame to be readable, into a single column value\nkafkaStreamingDF = kafkaRawStreamingDF.selectExpr(\"cast(key as string) key\", \"cast(value as string) value\")\n\n# this takes the stream and \"sinks\" it to the console as it is updated one at a time like this:\n# +--------------------+-----+\n# | Key|Value|\n# +--------------------+-----+\n# |1593939359 |13...|\n# +--------------------+-----+\nkafkaStreamingDF.writeStream.outputMode(\"append\").format(\"console\").start().awaitTermination()\n","sub_path":"sparkstream/kafkasource_consoleoutput.py","file_name":"kafkasource_consoleoutput.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"302391882","text":"import pyspark #alt+enter\nfrom pyspark import *\nfrom pyspark.conf import *\nfrom pyspark.sql import *\n\ndata_path = 'soc-LiveJournal1Adj.txt'\noutput_path = 'maximum_friend_number_Q2'\napp_name = 'Maximum Friend Number'\nmaster = 'local'\n\nsparkConfig = SparkConf().setAppName(app_name).setMaster(master)\nsc = SparkContext(conf=sparkConfig)\n\ndata = sc.textFile(data_path)\n\ndef mutual_friend_mapper(line):\n item = line.split(\"\\t\")\n user = item[0]\n friends = item[1].split(\",\")\n lst = []\n for friend in friends:\n if friend.isnumeric():\n person1 = str(min(int(user), int(friend)))\n person2 = str(max(int(user), int(friend)))\n key = person1 + \",\" + person2\n value = item[1]\n lst += [(key, (key, value))]\n return lst\n\ndef mutual_friend_reducer(l1,l2):\n key1,key2= l1[0],l2[0]\n value1,value2 = l1[1].split(\",\"),l2[1].split(\",\")\n lst = []\n for e in value1:\n if e in value2:\n lst.append(e)\n return str(len(lst))\n\nquestion1_out = data.flatMap(mutual_friend_mapper).reduceByKey(mutual_friend_reducer).filter(lambda x: isinstance(x[1], str)).map(lambda x: x[0] + \"\\t\" + x[1])\nmaximum_number = question1_out.map(lambda x: int(x.split(\"\\t\")[1])).reduce(lambda x, y: max(x, y))\nquestion2_out = question1_out.filter(lambda x: int(x.split(\"\\t\")[1])== maximum_number)\nquestion2_out.saveAsTextFile(output_path)\n","sub_path":"homework2/maximumFriendNo.py","file_name":"maximumFriendNo.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"507021622","text":"list_of_numbers = [4,\t80,\t85,\t59,\t37,25,\t5,\t64,\t66,\t81,20,\t64,\t41,\t22,\t76,76,\t55,\t96,\t2,\t68]\n\n#Your code here:\ndef merge_two_list(arreglo):\n odd = []\n even = []\n comb = []\n for x in range(len(arreglo)):\n if arreglo[x]%2!=0:\n odd.append(arreglo[x])\n elif arreglo[x]%2==0:\n even.append(arreglo[x])\n comb.append(odd)\n comb.append(even)\n return comb\n\nprint(merge_two_list(list_of_numbers))","sub_path":"exercises/08.2-Divide_and_conquer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"485258580","text":"#!/usr/bin/env python\n\n\nimport scipy.io as scio\nimport pandas as pd \nimport xarray as xr\nimport datetime as dt\nimport utils\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom datetime import datetime\nfrom matplotlib import rc\nimport scipy.signal as signal\nimport plotly\nimport numpy as np\n\ndef convert_to_ts(seconds):\n dt_list = []\n for s in np.nditer(seconds):\n dt_list.append(datetime.fromtimestamp(s))\n return dt_list\n\ndef load_dataframe(path):\n keys = ['time','depth','temp']\n mat = scio.loadmat(path)\n return pd.DataFrame(dict((k, mat[k].flatten()) for k in keys))\n\ndef plot_psd_(ax,D,M,FS,label,si=slice(1,None)):\n freqs,psd = utils.psd(D,M=M,FS=FS)\n high,low = utils.psd_err(M) #Specific for welch with hanning\n ax.loglog(freqs[si],psd[si],label=label)\n ax.fill_between(freqs[si],low*psd[si],high*psd[si],alpha=0.2)\n return freqs,psd\n\n\ndef plot_coh(ax,D1,D2,M,FS,label,si=slice(1,None)):\n nperseg = int(len(D1)/M)\n nd = 2*M*0.95\n freqs,coh = signal.coherence(D1,D2,nperseg=nperseg,fs=FS)\n beta = 1 - (0.005)**(1/(nd-1))\n ax.semilogx(freqs[si],coh[si])\n ax.plot(freqs[si],beta*np.ones(len(freqs[si])),'r--',label='Sig. Threshold 99')\n ax.set_xlabel('Frequency (CPD)')\n ax.set_ylabel(r'$\\gamma^2$')\n ax.grid()\n ax.legend()\n return freqs[si],coh[si],beta\n\n\ndef plot_csd(ax,D1,D2,M,FS,label,si=slice(1,None)):\n nperseg = int(len(D1)/M)\n freqs, Pxy = signal.csd(D1,D2, nperseg=nperseg,fs=FS)\n ax.plot(freqs[si],Pxy.real[si],marker='s',label='real')\n ax.plot(freqs[si],Pxy.imag[si],marker='s',label='imag')\n ax.grid()\n ax.legend()\n return freqs[si],Pxy[si]\n\ndef plot_phase(ax,D1,D2,M,FS,label,si=slice(1,None)):\n nperseg = int(len(D1)/M)\n freqs,coh = signal.coherence(D1,D2,nperseg=nperseg,fs=FS)\n freqs, Pxy = signal.csd(D1,D2, nperseg=nperseg,fs=FS)\n phase = np.angle(Pxy[si],deg=False)\n err = np.sqrt(1-coh[si]/(2*M))/(np.sqrt(coh[si]))\n ax.errorbar(freqs[si],phase,yerr=err,marker='s',ls='none',label=label)\n ax.set_ylabel('Radians')\n ax.grid()\n ax.legend()\n return freqs[si],phase,err\n\n\n#Thermistor chain at 34.2545, -120.0974\nTS1 = load_dataframe('../matlab/TS_1.mat')\n\n#Thermistor chain at 34.2389, -120.1026\nTS2 = load_dataframe('../matlab/TS_2.mat')\n\n#Depth Average\nTS1_DA = TS1.groupby('time').mean()\nTS2_DA = TS2.groupby('time').mean()\n\n#Spectral Parameters\nFS = 60*24 #Sampled per minute meaning 24*60 samples per day\nM = 4 #Number of segments\n\n#Plot the Coh,CSD,Phase\nf,ax = plt.subplots(3,1)\nfreqs,coh,beta = plot_coh( ax[0],TS1_DA.temp,TS2_DA.temp,M,FS,'Sq. Coh')\nfreqs,csd = plot_csd( ax[2],TS1_DA.temp,TS2_DA.temp,M,FS,'CSD',slice(1,10))\nfreqs,phase,err = plot_phase(ax[1],TS1_DA.temp,TS2_DA.temp,M,FS,'Phase',slice(1,10))\n\n#Title\nf.suptitle('Square Coherence')\nf.set_size_inches(10,7)\nf.set_dpi(200)\n\n\nsig_ind = np.where(coh>beta)[0][1:10]\nf,(ax1,ax2,ax3,ax4) = plt.subplots(4,1,sharex=True)\nfreqs,coh,beta = plot_coh( ax1,TS1_DA.temp,TS2_DA.temp,M,FS,'Sq. Coh',sig_ind)\nfreqs,csd = plot_csd( ax2,TS1_DA.temp,TS2_DA.temp,M,FS,'CSD',sig_ind)\nfreqs,phase,err = plot_phase(ax3,TS1_DA.temp,TS2_DA.temp,M,FS,'Phase',sig_ind)\n\nwl = 4*np.pi/abs(phase)\nwl_err = abs(wl)*(err/abs(phase))\nax4.plot(freqs, wl,marker='s',ls='none')\n\n\nax4.set_xlabel('Frequency (CPD)')\nax4.set_ylabel(r'Wavelength $\\lambda$')\nax4.grid(which='both')\n\nf.suptitle('Coherent Frequency and Phase')\nf.set_size_inches(10,7)\nf.set_dpi(200)\nplt.show()\n#\n#\nprint(phase*180/np.pi)\nprint(freqs/24)\n\n\n","sub_path":"IW/notebooks/SBC/SBC_SPEC.py","file_name":"SBC_SPEC.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"23378735","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('account/', include('account.urls')),\n path('social-auth/',include('social_django.urls', namespace='social')),\n path('advert/',include('advert.urls', namespace='advert')),\n path('blog/',include('blog.urls', namespace='blog')),\n path('marketing/',include('marketing.urls', namespace='marketing')),\n path('',include('home.urls', namespace='home')),\n path('others/',include('others.urls', namespace='others')),\n path('owner/',include('owner.urls', namespace='owner')),\n path('search/',include('search.urls', namespace='search')),\n path('hitcount/', include(('hitcount.urls', 'hitcount'), namespace='hitcount')),\n path('tracking/', include('tracking.urls')),\n path('ratings/', include('star_ratings.urls', namespace='ratings')),\n path('ckeditor/', include('ckeditor_uploader.urls')),\n]\nurlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n","sub_path":"rockflint/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57073718","text":"import traceback\n\nfrom django.core.paginator import Paginator\nfrom django.http import Http404, QueryDict\nfrom django.shortcuts import get_object_or_404\nfrom django.utils import timezone\nfrom django.utils.text import slugify\n\nfrom board.models import *\nfrom modules.response import StatusDone, StatusError\nfrom board.views import function as fn\n\ndef user_series(request, username, url=None):\n if not url:\n if request.method == 'GET':\n series = Series.objects.filter(owner__username=username, hide=False).order_by('-created_date')\n page = request.GET.get('page', 1)\n paginator = Paginator(series, 10)\n fn.page_check(page, paginator)\n series = paginator.get_page(page)\n return StatusDone({\n 'series': list(map(lambda item: {\n 'url': item.url,\n 'name': item.name,\n 'image': item.thumbnail(),\n 'created_date': convert_to_localtime(item.created_date).strftime('%Y년 %m월 %d일'),\n 'owner': item.owner.username,\n }, series)),\n 'last_page': series.paginator.num_pages\n })\n \n if request.method == 'POST':\n body = QueryDict(request.body)\n series = Series(\n owner=request.user,\n name=body.get('title')\n )\n series.url = slugify(series.name, allow_unicode=True)\n if series.url == '':\n series.url = randstr(15)\n i = 1\n while True:\n try:\n series.save()\n break\n except:\n if i > 1000:\n traceback.print_exc()\n return StatusError('TO', '일시적으로 오류가 발생했습니다.')\n series.url = slugify(series.name+'-'+str(i), allow_unicode=True)\n i += 1\n return StatusDone({\n 'url': series.url\n })\n \n if url:\n user = User.objects.get(username=username)\n series = get_object_or_404(Series, owner=user, url=url)\n if request.method == 'GET':\n if request.GET.get('type', 1):\n posts = Post.objects.filter(series=series, hide=False)\n return StatusDone({\n 'name': series.name,\n 'url': series.url,\n 'image': series.thumbnail(),\n 'owner': series.owner.username,\n 'owner_image': series.owner.profile.get_thumbnail(),\n 'description': series.text_md,\n 'posts': list(map(lambda post: {\n 'url': post.url,\n 'title': post.title,\n 'read_time': post.read_time,\n 'description': post.description(),\n 'created_date': convert_to_localtime(post.created_date).strftime('%Y년 %m월 %d일')\n }, posts))\n })\n \n if not request.user == series.owner:\n return StatusError('DU')\n\n if request.method == 'PUT':\n put = QueryDict(request.body)\n series.name = put.get('title')\n series.text_md = put.get('description')\n series.url = slugify(series.name, allow_unicode=True)\n if series.url == '':\n series.url = randstr(15)\n i = 1\n while True:\n try:\n series.save()\n break\n except:\n if i > 1000:\n traceback.print_exc()\n return StatusError('TO', '일시적으로 오류가 발생했습니다.')\n series.url = slugify(series.name+'-'+str(i), allow_unicode=True)\n i += 1\n return StatusDone({\n 'url': series.url\n })\n\n if request.method == 'DELETE':\n series.delete()\n return StatusDone()\n \n raise Http404","sub_path":"backend/src/board/views/api/v1/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"288730466","text":"# -*- coding: utf-8 -*-\nfrom flask import request, current_app\nfrom wtforms import Form, IntegerField, StringField\nfrom wtforms.meta import DefaultMeta\n\nfrom app.libs.error_code import ParameterException\n\n\nclass BindNameMeta(DefaultMeta):\n def bind_field(self, form, unbound_field, options):\n if 'custom_name' in unbound_field.kwargs:\n options['name'] = unbound_field.kwargs.pop('custom_name')\n return unbound_field.bind(form=form, **options)\n\n\nclass BaseForm(Form):\n Meta = BindNameMeta\n\n def __init__(self):\n data = request.json\n\n super(BaseForm, self).__init__(data=data)\n\n def validate_for_api(self):\n valid = super(BaseForm, self).validate()\n if not valid:\n raise ParameterException(msg=self.errors.values())\n return self\n\n\nclass SearchOrderForm(BaseForm):\n order_col = StringField()\n order_type = IntegerField()\n\n\nclass PageForm(BaseForm):\n page = IntegerField()\n limit = IntegerField()\n\n @staticmethod\n def fetch_page_param(page_form):\n cur_page = 1\n per_page = current_app.config['DEFAULT_LISTNUM_PER_PAGE']\n\n if page_form.page and page_form.page.data:\n cur_page = page_form.page.data\n\n if page_form.limit and page_form.limit.data:\n per_page = page_form.limit.data\n\n return cur_page, per_page\n\n\nclass ColumnSortForm(BaseForm):\n column_name = StringField()\n column_order = StringField()\n\n @staticmethod\n def fetch_column_param(column_form):\n column_name = 'id'\n column_order = 'descending'\n\n if column_form.column_name.data:\n column_name = column_form.column_name.data\n\n if column_form.column_order.data:\n column_order = column_form.column_order.data\n\n return column_name, column_order\n","sub_path":"python_learn/frame_api/src/app/validators/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"569151606","text":"with open('input.txt') as f:\n input = [line.strip() for line in f]\n\ndef find_path(graph, start_vertex, end_vertex, path=None):\n if path == None:\n path = []\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return path\n if start_vertex not in graph:\n return None\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_path = find_path(graph, vertex, end_vertex, path)\n if extended_path: \n return extended_path\n return None\n\ngraph = {}\nfor orbit in input:\n satellite = orbit.split(')')\n if satellite[0] in graph.keys():\n graph[satellite[0]].append(satellite[1])\n else:\n graph[satellite[0]] = [satellite[1]]\n if satellite[1] in graph.keys():\n graph[satellite[1]].append(satellite[0])\n else:\n graph[satellite[1]] = [satellite[0]]\n\npaths = []\nfor satellite in graph.keys():\n path = find_path(graph, 'COM', satellite)\n if path not in paths:\n paths.append(path)\n\nprint(f\"Part 1: {sum((len(path)-1) for path in paths)}\")\n\ncom_to_san = set(find_path(graph, 'COM', 'SAN'))\ncom_to_you = set(find_path(graph, 'COM', 'YOU'))\ncommon = com_to_san.symmetric_difference(com_to_you)\n\nprint(f\"Part 2: {len(set(common))-2}\")\n\n\n \n","sub_path":"day06/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"346737898","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 20. Valid Parentheses\n#\n# Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.\n#\n# The brackets must close in the correct order, \"()\" and \"()[]{}\" are all valid but \"(]\" and \"([)]\" are not.\n\n\nclass Solution:\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n valid_dict = {')': '(', ']': '[', '}': '{'}\n left_paren = ['(', '[', '{']\n tmp_stack = []\n str_len = len(s)\n if str_len > 0 and str_len % 2 == 0:\n for i in range(str_len):\n if s[i] in left_paren:\n tmp_stack.append(s[i])\n else:\n if len(tmp_stack) == 0:\n return False\n elif valid_dict.get(s[i]) == tmp_stack.pop():\n continue\n else:\n return False\n\n if len(tmp_stack) == 0:\n return True\n else:\n return False\n else:\n return False\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python/20_Valid_Parentheses.py","file_name":"20_Valid_Parentheses.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"507032505","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 26 13:47:22 2017\n\n@author: c1625914\n\"\"\"\n\nfrom __future__ import absolute_import, print_function, division\nimport matplotlib\n\nmatplotlib.rcParams['font.family'] = 'Latin Modern Roman'\n\nimport numpy as np\nfrom astropy.wcs import WCS\nfrom astropy.io import fits\nimport sys\nimport matplotlib.pyplot as plt\nsys.path.append('/home/daedalusdata/c1625914/UsefulCode/Functions/')\nimport tgfunctions as tg\n\nsteradian = 42545170296.1522 #arcsec^2\nm33_distance = 840 #kpc\nm33_ra = \t23.462051\nm33_dec = 30.660184\ninclination = 56\ninclination *= np.pi/180\n\nrad_conv = np.pi/(3600*180)\nmsun = 1.989e30\nmproton = 1.67e-27\n\nplt.close('all')\n\n######################################################\n###############FUV AND 24 MICRON######################\n######################################################\n \nmips_data,mips_header = fits.getdata('/home/daedalusdata/c1625914/M33/regrids/25_originals/MIPS24.fits',header=True) \nfuv_data,fuv_header = fits.getdata('/home/daedalusdata/c1625914/M33/regrids/25_originals/GALEXFUV.fits',header=True) \nirac_data,irac_header = fits.getdata('/home/daedalusdata/c1625914/M33/regrids/25_originals/IRAC3_6.fits',header=True) \nw_fuv = WCS('/home/daedalusdata/c1625914/M33/regrids/25_originals/GALEXFUV.fits')\nw_irac = WCS('/home/daedalusdata/c1625914/M33/regrids/25_originals/IRAC3_6.fits') \nw = WCS('/home/daedalusdata/c1625914/M33/regrids/25_originals/MIPS24.fits')\n\npixscale = tg.get_pixscale(fuv_header)\n\na = 30*60/pixscale\nb = 35*60/pixscale\n\nx_centre,y_centre = w_fuv.all_world2pix(m33_ra,m33_dec,1)\n\nworld_coords = np.zeros([fuv_data.shape[0],fuv_data.shape[1],2])\n\nworld_coords[:,:,0] = tg.ellipse(fuv_data,x_centre,y_centre,a,b)\nworld_coords[:,:,1] = tg.ellipse(fuv_data,x_centre,y_centre,a,b)\n\nmips_data_matched = np.zeros([fuv_data.shape[0],fuv_data.shape[1]])\nmips_data_matched[mips_data_matched == 0] = np.nan\n\nfor i in range(fuv_data.shape[0]):\n for j in range(fuv_data.shape[1]):\n if np.isnan(world_coords[i,j,0]) == False: \n world_coords[i,j,0], world_coords[i,j,1] = w_fuv.all_pix2world(j,i,1)\n \nfor i in range(fuv_data.shape[0]):\n for j in range(fuv_data.shape[1]):\n try:\n x,y = w.all_world2pix(world_coords[i,j,0],world_coords[i,j,1],1)\n x = int(round(x))\n y = int(round(y)) \n if x>=0 and y>=0:\n mips_data_matched[i,j] = mips_data[y,x]\n except:\n pass\n \nmips_data = mips_data_matched\n \nirac_data_matched = np.zeros([fuv_data.shape[0],fuv_data.shape[1]])\nirac_data_matched[irac_data_matched == 0] = np.nan\n \nfor i in range(fuv_data.shape[0]):\n for j in range(fuv_data.shape[1]):\n try:\n x,y = w_irac.all_world2pix(world_coords[i,j,0],world_coords[i,j,1],1)\n x = int(round(x))\n y = int(round(y)) \n if x>=0 and y>=0:\n irac_data_matched[i,j] = irac_data[y,x]\n except:\n pass\n \nirac_data = irac_data_matched\n\n#Errors!\n\nfuv_calib_error = 0.05\nmips_calib_error = 0.04\n\nfuv_background_error = tg.find_background_rms(fuv_data)\nmips_background_error = tg.find_background_rms(mips_data)\n\n#For FUV, worry about extinction error too\n\nfuv_extinction_error = 0.14\n\nfuv_data_add = np.nansum(fuv_data)\nmips_data_add = np.nansum(mips_data)\n\nfuv_data_error = np.sqrt( (fuv_data_add*fuv_calib_error)**2 + (fuv_data_add*fuv_extinction_error)**2 + fuv_background_error**2 )\nmips_data_error = np.sqrt( (mips_data_add*mips_calib_error)**2 + mips_background_error**2 )\n\nfuv_data_error_percent = fuv_data_error/fuv_data_add\nmips_data_error_percent = mips_data_error/mips_data_add\n\n#convert from Jy/px to Mjy/sr\n\nmips_data *= steradian/(10**6*pixscale**2) \nfuv_data *= steradian/(10**6*pixscale**2)\nirac_data *= steradian/(10**6*pixscale**2)\n\nmips_data -= 0.1*irac_data\nfuv_data -= 3e-3*irac_data\n\nfuv_data_add = np.nansum(fuv_data)\nmips_data_add = np.nansum(mips_data)\n \n#Leroy conversion factors\n\nfuv_data *= 8.1e-2\nmips_data *= 3.2e-3\n\n#Calculate surface SFR per pixel\n\nsigma_sfr = (fuv_data+mips_data)#*np.cos(inclination)\n\n#Also do it for just all the pixels at once\n\ntotal_sigma_sfr = (fuv_data_add*8.1e-2 + mips_data_add*3.2e-3)#*np.cos(inclination)\n\n#Error calculation!\n\nerror_plus = np.sqrt( fuv_data_error_percent**2 + mips_data_error_percent**2 + (1.2/3.2)**2 )\nerror_minus = np.sqrt( fuv_data_error_percent**2 + mips_data_error_percent**2 + (0.7/3.2)**2 )\n\n#Calculate area_per_pixel in kpc^2\n\narea_per_pix = (pixscale*rad_conv*m33_distance)**2\n\nsfr = sigma_sfr*area_per_pix\n\ntotal_sfr = np.nansum(sfr)\n\nerror_sfr_plus = total_sfr*error_plus\nerror_sfr_minus = total_sfr*error_minus\n\nfig = plt.figure(1)\nax = fig.add_axes([.1,.1,.8,.8])\n\nax.set_xticklabels([])\nax.set_yticklabels([])\nax.xaxis.set_ticks_position('none')\nax.yaxis.set_ticks_position('none')\n\nplt.imshow(sfr,vmin=np.nanpercentile(sfr,0.5), vmax=np.nanpercentile(sfr,99.5),origin='lower',cmap='hot',interpolation='none')\nplt.text(0.95,0.95,r'Total SFR: $'+str('{0:.2f}'.format(total_sfr))+'^{+'+str('{0:.2f}'.format(error_sfr_plus))+'}_{-'+str('{0:.2f}'.format(error_sfr_minus))+'}$ ($M_{\\odot}$/yr)',\n verticalalignment='top', horizontalalignment='right',\n transform = ax.transAxes)\ncbar = plt.colorbar(format='%.0e')\n\ncbar.set_label(r'SFR ($M_{\\odot}$/yr)')\n\nfits.writeto('/home/daedalusdata/c1625914/M33/regrids/output_fits_files/sfr_fuv_24_no_inclination.fits',sfr,fuv_header,clobber=True)","sub_path":"sfr_fuv_24.py","file_name":"sfr_fuv_24.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518749725","text":"__author__ = 'kevinschoon@gmail.com'\n\nimport asyncio\nimport json\nimport unittest\nimport aiohttp\n\nfrom signul.bootstrap import Signul\nfrom signul.actors.base import PoisonActor, SignulActor\nfrom signul.actors.http import HttpServer\n\n\nclass TestPOST(SignulActor):\n def __init__(self):\n super().__init__()\n self.context.system.exec(self.make_post)\n\n @asyncio.coroutine\n def make_post(self):\n yield from asyncio.sleep(3)\n resp = yield from aiohttp.request(\n method='POST', url='http://127.0.0.1:9999/test', data=json.dumps({\"Signul\": \"TestRequest\"})\n )\n print(resp)\n\n\nclass TestHttpServer(unittest.TestCase):\n def setUp(self):\n self.signul = Signul(dict(actors=[{'class': PoisonActor, 'args': [5]}, {'class': TestPOST}]))\n\n def test_basic_http(self):\n self.signul.add_actor(HttpServer, bus=self.signul.bus, loop=self.signul.loop, routes=['/test'])\n self.signul.initialize()\n self.signul.system.run_until_stop(exit_after=False)\n","sub_path":"tests/test_http_actor.py","file_name":"test_http_actor.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"110942","text":"import cv2\nimport numpy as np\n\ncap=cv2.VideoCapture(0)\n\nwhile(True):\n _,frame=cap.read()\n\n hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n\n #define kernel for gap closing in mask\n kernel = np.ones((5, 5), np.uint8)\n\n\n #define range of blue in hsv\n lower_blue=np.array([160,80,1])\n upper_blue=np.array([255,255,160])\n\n # threshold of only blue colors\n mask=cv2.inRange(frame,lower_blue,upper_blue)\n\n # bitwise add:adds the selected mask to selected images\n res=cv2.bitwise_and(frame,frame,mask=mask)\n\n #blur = cv2.bilateralFilter(res, 9, 75, 75) removes object texture\n #blur=cv2.GaussianBlur(res,(5,5),0) removes gaussian noise\n\n closing = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)\n\n cv2.imshow('frame',frame)\n cv2.imshow('mask',mask)\n cv2.imshow('closed', closing)\n cv2.imshow('result',res)\n\n\n if cv2.waitKey(1)==ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()","sub_path":"Core Operations/ImageProccessing/colorTracking.py","file_name":"colorTracking.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"528756988","text":"#!/usr/local/bin/python\nimport socket\nimport os\nimport sys\nimport select\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('0.0.0.0', 2222))\ns.listen(10)\n\nreadsocks, writesocks = [], []\nactive_socks = []\nactive_socks.append(s)\n\nwhile True:\n readsocks = active_socks.copy()\n res = select.select(readsocks, [], []) # readables, writeables, exceptions = select.select(readsocks, writesocks, [])\n print(type(res))\n print(type(res[0]))\n readables, writeables = res[0:2]\n for sockobj in readables:\n if sockobj == s:\n new_host = s.accept()\n active_socks.append(new_host[0])\n else:\n data = sockobj.recv(1024)\n if not data:\n sockobj.close()\n readsocks.remove(sockobj)\n else:\n sockobj.send(data)\n sockobj.close()\n active_socks.remove(sockobj)\n","sub_path":"socket_server_multi_tcp_select.py","file_name":"socket_server_multi_tcp_select.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"138322083","text":"'''\nCode written in Oct 2021 by Yuhan Wang\ncheck TES yield by taking bias tickle (from sodetlib) and IV curves\ndisplay quality in biasability, 50% RN target V bias, Psat and Rn\n'''\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport pysmurf.client\nimport argparse\nimport numpy as np\nimport os\nimport time\nimport glob\nfrom sodetlib.det_config import DetConfig\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport argparse\nimport time\nimport csv\nimport sodetlib.smurf_funcs.det_ops as det_op\nimport sodetlib.analysis.det_analysis as det_analysis\n\n\nstart_time=S.get_timestamp()\n\ntarget_BL = np.array([0,1,2,3,4,5,6,7,8,9,10,11])\n\n#this is more for keeping track of bath temp\nbath_temp = 100\nbias_high_command=20\nbias_low_command=0\nbias_step_command = 0.025\n\nsave_name = '{}_tes_yield.csv'.format(start_time)\nprint(f'Saving data to {os.path.join(S.output_dir, save_name)}')\ntes_yield_data = os.path.join(S.output_dir, save_name)\npath = os.path.join(S.output_dir, tes_yield_data) \n\nout_fn = path\n\nfieldnames = ['bath_temp', 'bias_line', 'band', 'data_path','notes']\nwith open(out_fn, 'w', newline = '') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n \nprint(f'Taking tickle on bias line all band')\n\ntickle_file = det_op.take_tickle(S, cfg, target_BL, tickle_freq=5, tickle_voltage=0.005,high_current=True)\ndet_analysis.analyze_tickle_data(S, tickle_file,normal_thresh=0.002) \n\nrow = {}\nrow['bath_temp'] = bath_temp\nrow['bias_line'] = str(target_BL)\nrow['band'] = 'all'\n \nrow['data_path'] = tickle_file\n \nwith open(out_fn, 'a', newline = '') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow(row)\n\nfor bias_gp in target_BL:\n row = {}\n row['bath_temp'] = bath_temp\n row['bias_line'] = bias_gp\n row['band'] = 'all'\n \n print(f'Taking IV on bias line {bias_gp}, all band')\n \n iv_data = S.run_iv(\n bias_groups = [bias_gp], wait_time=0.01, bias_high=bias_high_command,\n bias_low=bias_low_command, bias_step = bias_step_command,\n overbias_voltage=18, cool_wait=30, high_current_mode=False,\n make_plot=False, save_plot=True\n )\n dat_file = iv_data[0:-13]+'.npy' \n row['data_path'] = dat_file\n with open(out_fn, 'a', newline = '') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow(row)\n\n\ndef write_IV_into_dict(IV_csv):\n \n data_dict = np.genfromtxt(IV_csv, delimiter=\",\",unpack=True, dtype=None, names=True, encoding=None)\n psat_array = []\n \n data = []\n for ind in np.array([0,1,2,3,4,5,6,7,8,9,10,11])+1:\n file_path = str(data_dict['data_path'][ind])\n data.append(file_path)\n \n good_chans = 0\n psat_all = []\n all_data = dict()\n good = 0\n bad = 0\n\n for ind, bl in enumerate([0,1,2,3,4,5,6,7,8,9,10,11]):\n ch_psat = []\n if bl not in all_data.keys():\n all_data[bl] = dict()\n now = np.load(data[bl], allow_pickle=True).item()\n for sb in [0,1,2,3,4,5,6,7]:\n try:\n if len(now[sb].keys()) != 0:\n all_data[bl][sb] = dict()\n except:\n continue\n # print(now[sb].keys())\n for chan, d in now[sb].items():\n# print(chan)\n # print(d.keys())\n if (d['R'][-1] < 5e-3):\n continue\n elif len(np.where(d['R'] > 15e-3)[0]) > 0:\n continue\n# elif len(np.where(d['R'] < -2e-4)[0]) > 0:\n# continue\n\n # ind = np.where(d['p_tes']>15)[0]\n # if np.min(d['R'][ind]) < 7.5e-3:\n # continue\n # if np.max(d['R'][ind]) > 12e-3:\n # continue\n # if np.std(d['R'][-100:]) > 1e-4:\n # continue\n \n all_data[bl][sb][chan] = d\n good_chans += 1\n try:\n psat = np.float(get_psat(np.load(data[ind],allow_pickle=True).item(),sb,chan, level = 0.9, greedy = False))\n if psat > 0.5:\n ch_psat.append(psat)\n good = good + 1\n good_band.append(int(det_band))\n good_chan.append(int(det_chan))\n \n except:\n bad = bad + 1\n psat_all.append(ch_psat)\n psat_array.append(psat_all)\n return all_data,psat_array\n\n\nall_data_IV,Psat_array = write_IV_into_dict(out_fn)\n\ncommon_biases = set()\nnow_bias = set()\nfor bl in all_data_IV.keys():\n for sb in [0,1,2,3,4,5,6,7]:\n try:\n if len(all_data_IV[bl][sb].keys()) != 0:\n first_chan = next(iter(all_data_IV[bl][band]))\n now_bias = set(all_data_IV[bl][sb][first_chan]['v_bias'])\n if len(common_biases) == 0:\n common_biases = now_bias\n else:\n common_biases = common_biases.intersection(now_bias)\n except:\n continue\ncommon_biases = np.array(\n sorted([np.float(\"%0.3f\" % i) for i in common_biases])\n)\ncommon_biases = np.array(common_biases)\n\noperating_r = dict()\nfor bl in [0,1,2,3,4,5,6,7,8,9,10,11]:\n operating_r[bl] = dict()\n for band in [0,1,2,3,4,5,6,7]:\n try:\n if len(all_data_IV[bl][band].keys()) == 0:\n continue\n except:\n continue\n for v in common_biases:\n if v not in operating_r[bl].keys():\n operating_r[bl][v] = []\n first_chan = next(iter(all_data_IV[bl][band]))\n ind = np.where(\n (np.abs(all_data_IV[bl][band][first_chan]['v_bias'] - v)) < 3e-3\n )[0][0]\n for chan, d in all_data_IV[bl][band].items():\n operating_r[bl][v].append(d['R'][ind]/d['R_n']) \n\nbias_groups = target_BL\ntarget_vbias_list = []\nRN = []\nv_bias_all = []\nfor bl in bias_groups:\n percent_rn = 0.5\n target_v_bias = []\n\n for band in [0,1,2,3,4,5,6,7]:\n try:\n\n for ch,d in all_data_IV[bl][band].items():\n rn = d['R']/d['R_n']\n cross_idx = np.where(np.logical_and(rn - percent_rn >= 0, np.roll(rn - percent_rn, 1) < 0))[0]\n RN.append(d['R_n'])\n target_v_bias.append(d['v_bias'][cross_idx][0]) \n v_bias_all.append(d['v_bias'][cross_idx][0])\n except:\n continue\n\n# print(target_v_bias)\n med_target_v_bias = np.median(np.array(target_v_bias))\n target_vbias_list.append(round(med_target_v_bias,1))\ntarget_vbias_list = np.append(target_vbias_list,[0,0,0])\nprint(np.array(target_vbias_list))\n\ntotal_count = 0\nfig, axs = plt.subplots(6, 4,figsize=(25,30), gridspec_kw={'width_ratios': [2, 1,2,1]})\nfor bl in [0,1,2,3,4,5,6,7,8,9,10,11]:\n count_num = 0\n for band in [0,1,2,3,4,5,6,7]:\n try:\n for ch,d in all_data_IV[bl][band].items():\n axs[bl//2,bl%2*2].plot(d['v_bias'], d['R'], alpha=0.6)\n count_num = count_num + 1\n total_count = total_count + 1\n except:\n continue\n axs[bl//2,bl%2*2].set_xlabel('V_bias [V]')\n axs[bl//2,bl%2*2].set_ylabel('R [Ohm]')\n axs[bl//2,bl%2*2].grid()\n axs[bl//2,bl%2*2].axhspan(2.6e-3, 5.8e-3, facecolor='gray', alpha=0.2)\n axs[bl//2,bl%2*2].axvline(target_vbias_list[bl], linestyle='--', color='gray')\n axs[bl//2,bl%2*2].set_title('bl {}, yield {}'.format(bl,count_num))\n axs[bl//2,bl%2*2].set_ylim([-0.001,0.012])\n # print(bl)\n try:\n h = axs[bl//2,bl%2*2+1].hist(operating_r[bl][target_vbias_list[bl]], range=(0,1), bins=40)\n axs[bl//2,bl%2*2+1].axvline(np.median(operating_r[bl][target_vbias_list[bl]]),linestyle='--', color='gray')\n axs[bl//2,bl%2*2+1].set_xlabel(\"percentage Rn\")\n axs[bl//2,bl%2*2+1].set_ylabel(\"{} TESs total\".format(count_num))\n axs[bl//2,bl%2*2+1].set_title(\"optimal Vbias {}V for median {}Rn\".format(target_vbias_list[bl],np.round(np.median(operating_r[bl][target_vbias_list[bl]]),2)))\n except:\n continue\n\nsave_name = f'{start_time}_IV_yield.png'\nprint(f'Saving plot to {os.path.join(S.plot_dir, save_name)}')\nplt.savefig(os.path.join(S.plot_dir, save_name))\n\n\nfig, axs = plt.subplots(6, 4,figsize=(25,30), gridspec_kw={'width_ratios': [2, 2,2,2]})\nfor bl in [0,1,2,3,4,5,6,7,8,9,10,11]:\n count_num = 0\n Rn = []\n psat = []\n for band in [0,1,2,3,4,5,6,7]:\n try:\n for ch,d in all_data_IV[bl][band].items():\n Rn.append(d['R_n'])\n \n level = 0.9\n p = d['p_tes']\n rn = d['R']/d['R_n']\n cross_idx = np.where(np.logical_and(rn - level >= 0, np.roll(rn - level, 1) < 0))[0]\n try:\n assert len(cross_idx) == 1\n except AssertionError:\n continue\n \n cross_idx = cross_idx[:1]\n cross_idx = cross_idx[0]\n rn2p = interp1d(rn[cross_idx-1:cross_idx+1], p[cross_idx-1:cross_idx+1])\n psat.append(np.float(rn2p(level)))\n count_num = count_num + 1\n total_count = total_count + 1\n except:\n continue\n\n axs[bl//2,bl%2*2].set_xlabel('P_sat (pW)')\n axs[bl//2,bl%2*2].set_ylabel('count')\n axs[bl//2,bl%2*2].grid()\n axs[bl//2,bl%2*2].hist(psat, range=(0,15), bins=50,histtype= u'step',linewidth=2,color = 'r')\n axs[bl//2,bl%2*2].axvline(np.median(psat), linestyle='--', color='gray')\n axs[bl//2,bl%2*2].set_title('bl {}, yield {} median Psat {:.2f} pW'.format(bl,count_num,np.median(psat)))\n\n\n # print(bl)\n\n h = axs[bl//2,bl%2*2+1].hist(Rn, range=(0.005,0.01), bins=50,histtype= u'step',linewidth=2,color = 'k')\n axs[bl//2,bl%2*2+1].axvline(np.median(Rn),linestyle='--', color='gray')\n axs[bl//2,bl%2*2+1].set_xlabel(\"Rn (Ohm)\")\n axs[bl//2,bl%2*2+1].set_ylabel('count')\n axs[bl//2,bl%2*2+1].set_title('bl {}, midian Rn {:.4f} Ohm'.format(bl,np.median(Rn)))\n\n\nsave_name = f'{start_time}_IV_psat.png'\nprint(f'Saving plot to {os.path.join(S.plot_dir, save_name)}')\nplt.savefig(os.path.join(S.plot_dir, save_name))\n\nprint(f'Saving data to {out_fn}')\n","sub_path":"chw3k5/checklist/unversioned-daniel/tes_yield.py","file_name":"tes_yield.py","file_ext":"py","file_size_in_byte":10402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518959444","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDo statistics on students' Merge Requests for given class_id\n# TODO: get MR detail, check build result and get list of correct exercises\n# Turn to a website, allow students login using GitLab ID\n# Or turn to public site, use Lambda generates beautiful dashboard?\n# Data can exported as JSON or CSV\n\"\"\"\nimport argparse\nimport datetime\nimport time\nfrom collections import namedtuple\nimport json\nimport os\nimport urllib.request\nimport urllib.parse\nimport re\n\n\ndef send_get(url, headers, data=None):\n if data:\n assert isinstance(data, dict), \"data must be a dict, got {}\".format(\n type(data)\n )\n url = \"{}?{}\".format(url, urllib.parse.urlencode(data))\n\n with urllib.request.urlopen(\n urllib.request.Request(url, headers=headers)\n ) as response:\n if response.status == 200:\n payload = response.read().decode(\"utf-8\")\n return json.loads(payload)\n else:\n raise Exception(\"Failed {}\".format(payload))\n\n\ndef main():\n argp = argparse.ArgumentParser(__doc__)\n argp.add_argument(\"class_id\", help=\"E.g pymihcm1804\", type=str)\n\n args = argp.parse_args()\n class_id = args.class_id\n token = open(os.path.expanduser(\"~/.config/gitlab\")).read().strip()\n create_weekly_mr_stats_issue(class_id, token)\n\n\ndef ex_count_by_students(list_mr, base_url, ex_count, headers):\n exes = set()\n for mr in list_mr:\n url = base_url.format(mr)\n print(url)\n try:\n res = send_get(url, headers=headers)\n time.sleep(1)\n changes = res[\"changes\"]\n except Exception as e:\n print(e)\n continue\n\n for change in changes:\n path = change[\"old_path\"]\n try:\n ex = re.search(r\"ex\\d{1,2}\", path).group()\n except AttributeError:\n ex = \"\"\n exes.add(ex)\n for ex in exes:\n try:\n ex_count[ex] += 1\n except KeyError:\n pass\n\n return ex_count\n\n\ndef create_weekly_mr_stats_issue(class_id, token, ping_class=False):\n Student = namedtuple(\"Student\", [\"gitlabid\", \"name\", \"openMR\", \"closeMR\"])\n\n gitlab_time_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n start_2018 = datetime.datetime(2018, 1, 1)\n start_2018_str = datetime.datetime.strftime(start_2018, gitlab_time_format)\n\n headers = {\"Private-Token\": token, \"content-type\": \"application/json\"}\n\n base_url = \"https://gitlab.com/api/v4/\"\n group_members = base_url + \"groups/{}/members\"\n\n resp_data = send_get(\n group_members.format(class_id), headers=headers, data={\"per_page\": 100}\n )\n\n cntr = 1\n students = []\n print(\"List of {} students\".format(class_id))\n for student in resp_data:\n if isinstance(student, str):\n print(student, resp_data)\n continue\n if student[\"access_level\"] < 50:\n s = Student(student[\"username\"], student[\"name\"], 0, 0)\n print(cntr, s)\n students.append(s)\n cntr += 1\n\n # resp = requests.get('https://gitlab.com/api/v4/projects/pyfml%2Fpyfml',\n # headers=headers)\n # resp_data = resp.json()\n # the URL get from above resp\n MR_URL = \"https://gitlab.com/api/v4/projects/1591562/merge_requests\"\n\n merge_requests = []\n page = 1\n while True:\n print(\"Processing MRs page {}\".format(page))\n resp = send_get(\n MR_URL,\n headers=headers,\n data={\n \"per_page\": 100,\n \"page\": page,\n \"created_after\": start_2018_str,\n },\n )\n if not resp:\n break\n merge_requests.extend(resp)\n page = page + 1\n print(len(merge_requests))\n\n counter = {\n s.gitlabid: {\"openMR\": 0, \"closeMR\": 0, \"totalMR\": 0} for s in students\n }\n\n gitlabids = [s.gitlabid for s in students]\n\n for mr in merge_requests:\n mr_author = mr[\"author\"][\"username\"]\n if mr_author not in gitlabids:\n continue\n\n if mr[\"state\"] == \"opened\":\n counter[mr_author][\"openMR\"] += 1\n else:\n counter[mr_author][\"closeMR\"] += 1\n print(\n mr_author, mr[\"created_at\"], \"!{}\".format(mr[\"iid\"]), mr[\"state\"]\n )\n\n username_to_names = {s.gitlabid: s.name for s in students}\n\n columns = \"Rank\", \"GitLabID\", \"openMRs\", \"closeMRs\", \"Name\"\n table_sep = [\"---\"] * len(columns)\n\n data = [columns, table_sep]\n for rank, (username, mr_count) in enumerate(\n sorted(counter.items(), key=lambda x: x[1][\"openMR\"], reverse=True),\n start=1,\n ):\n\n user_merge_requests = sorted(\n [\n \"!{}\".format(mr[\"iid\"])\n for mr in merge_requests\n if mr[\"author\"][\"username\"] == username\n ]\n )\n arow = [\n str(rank),\n username,\n str(mr_count[\"openMR\"]),\n str(mr_count[\"closeMR\"]),\n \" \".join([username_to_names[username]] + user_merge_requests),\n ]\n data.append(arow)\n\n # Counting exercises by finding changes on every mr of every student\n ls_ex = ['ex3', 'ex35', 'ex4', 'ex5', 'ex6', 'ex69', 'ex7', 'ex8']\n ex_count = {ex: 0 for ex in ls_ex}\n base_change_url = ('https://gitlab.com/api/v4/projects/'\n '1591562/merge_requests/{}/changes')\n for username in gitlabids:\n print(username)\n user_mr = [mr[\"iid\"] for mr in merge_requests\n if mr[\"author\"][\"username\"] == username]\n ex_count_by_students(user_mr, base_change_url, ex_count, headers)\n\n data.append([\"---\"])\n data.append([\"Num\", \"Exercises\", \"Count\"])\n data.append([\"---\", \"---\", \"---\"])\n for idx, (ex, ex_count) in enumerate(ex_count.items(), start=1):\n data.append([str(idx), ex, str(ex_count)])\n\n description = \"\\n\".join([\"|\".join(row) for row in data])\n if ping_class:\n description = description + \"\\n@{}\".format(class_id)\n\n label = class_id\n ISSUE_URL = \"https://gitlab.com/api/v4/projects/1591562/issues\"\n json_data = json.dumps(\n {\n \"title\": \"[{}]: bảng tổng soát ngày {}\".format(\n label, datetime.datetime.now().strftime(\"%Y-%m-%d\")\n ),\n \"description\": description,\n # 'labels': [\"hcm1804\"] - '{\"error\":\"labels is invalid\"}'\n }\n ).encode(\"utf-8\")\n with urllib.request.urlopen(\n urllib.request.Request(ISSUE_URL, headers=headers, data=json_data)\n ) as response:\n if response.status == 201:\n payload = response.read().decode(\"utf-8\")\n created_issue = json.loads(payload)\n print(\"Created {}\".format(created_issue[\"web_url\"]))\n else:\n print(\"Failed {}\".format(payload))\n\n\ndef lambda_handler(*args):\n # For run on AWS Lambda\n import base64\n import boto3\n\n client = boto3.client(\"kms\")\n encrypted_gitlab_token = \"AQICAHhTH9iFUXSe2SisnI/nV9WRq6R8Ez2U5kEUMGVej/D3OQGWmi9PgDfzdLTopgwpUhRjAAAAcjBwBgkqhkiG9w0BBwagYzBhAgEAMFwGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMbTM5BHfFwFimHlqaAgEQgC9m27xNr+hEEDLqtsw/4R/l/Z0MBz1hX+owrPknaH8EkuyJl5xm6rKbG1WCvjM2fQ==\" # noqa\n gitlab_token = client.decrypt(\n CiphertextBlob=base64.b64decode(encrypted_gitlab_token)\n )[\"Plaintext\"].decode(\"ascii\")\n\n class_id = os.environ[\"class_id\"]\n token = gitlab_token\n create_weekly_mr_stats_issue(class_id, token, ping_class=True)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"students_dashboard.py","file_name":"students_dashboard.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"605788861","text":"from threading import Thread\nfrom time import sleep,ctime\n\n\nclass MyThread(Thread):\n\n\n def __init__(self,target=None,args=(),kwargs={},name=\"Thread-1\"):\n super(MyThread,self).__init__()\n self.target=target\n self.args=args\n self.kwargs=kwargs\n self.name=name\n def run(self):\n self.target(*self.args,**self.kwargs)\n\n\ndef player(sec,song):\n for i in range(2):\n print(\"Play %s:%s\"%(song,ctime()))\n\nt=MyThread(target=player,args=(3,),kwargs={'song':\"凉凉\"},\\\n name=\"happy\")\nt.start()\nt.join()","sub_path":"多任务编程/线程学习/myThread.py","file_name":"myThread.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"221124071","text":"import cv2\n\ncv2.namedWindow('Live Video Feed', cv2.WINDOW_NORMAL)\ncv2.resizeWindow('Live Video Feed', 640,640)\nwindowName = \"Live Video Feed\"\ncv2.namedWindow(windowName)\ncap = cv2.VideoCapture(0)\n\nif cap.isOpened():\n ret, frame = cap.read()\nelse:\n ret = False\n\nwhile ret:\n\n ret, frame = cap.read()\n output = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n cv2.namedWindow('gray', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('gray', 640,640)\n cv2.imshow('gray', output)\n\n \n cv2.namedWindow('frame', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('frame', 640,640)\n cv2.imshow(windowName, frame)\n\n if cv2.waitKey(1) == 27: # esc press\n break\n\ncv2.destroyAllWindows()\ncap.release()\n","sub_path":"prog08_Live_Webcam.py","file_name":"prog08_Live_Webcam.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"650373589","text":"import os\nimport json\nfrom datetime import (datetime, timedelta)\nimport queue\nimport threading\n\nimport tornado.web\nimport tornado.websocket\nimport tornado.gen\nfrom tornado.queues import Queue\n\nimport log_puller\nimport log_entry.parser as parser\nimport log_entry.entry as entry\nfrom detection import detect\nfrom settings import site_settings\n\nLOGS = dict()\nLAST_SESSION_ID = 0\nalert_messages = Queue()\nunique_alert_messages = set()\nwebsocket_connections = set()\n\n\nclass AlertHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def post(self):\n global alert_messages, unique_alert_messages\n print(self.request.body)\n data = self.request.body.decode()\n data_json = json.loads(data)\n alert_info = data_json['alerts'][0]\n alert_label = alert_info['labels']\n alert_msg = {}\n alert_msg['alertname'] = alert_label['alertname']\n alert_msg['startsAt'] = alert_info['startsAt']\n alert_name = alert_msg['alertname']\n print(alert_name)\n if alert_name in unique_alert_messages:\n self.finish()\n return\n unique_alert_messages.add(alert_name)\n yield alert_messages.put(alert_msg)\n self.finish()\n\n\ndef load_logs(data_dir):\n logs = dict(tidb=[], tikv=[], pd=[])\n for filename in os.listdir(data_dir):\n path = os.path.join(data_dir, filename)\n if os.path.isdir(path):\n l = load_logs(path)\n return merge_logs(logs, l)\n f = open(path, 'r')\n if \"tidb\" in filename:\n logs[\"tidb\"].append(dict(\n name=filename.split('-')[0],\n logs=parser.parse_text(f.read(), entry.SOURCE_TIDB)\n ))\n elif \"tikv\" in filename:\n logs[\"tikv\"].append(dict(\n name=filename.split('-')[0],\n logs=parser.parse_text(f.read(), entry.SOURCE_TIKV)\n ))\n elif \"pd\" in filename:\n logs[\"pd\"].append(dict(\n name=filename.split('-')[0],\n logs=parser.parse_text(f.read(), entry.SOURCE_PD)\n ))\n return logs\n\n\ndef merge_logs(logs, l):\n for k in logs.keys():\n logs[k] += l[k]\n return logs\n\n\nclass LogPullHandler(tornado.websocket.WebSocketHandler):\n def check_origin(self, origin):\n return True\n\n def on_message(self, message):\n global LOGS, LAST_SESSION_ID\n args = json.loads(message)\n ring_time = datetime.strptime(args['ring_time'][:19], '%Y-%m-%d %H:%M:%S')\n LAST_SESSION_ID += 1\n session_id = LAST_SESSION_ID\n data_dir = site_settings['data_path']\n log_puller.pull()\n logs = load_logs(data_dir)\n LOGS[session_id] = logs\n delta = timedelta(hours=1)\n time_start = datetime.strftime(ring_time-delta, \"%Y/%m/%d %H:%M:%S\")\n time_end = datetime.strftime(ring_time+delta, \"%Y/%m/%d %H:%M:%S\")\n length = 0\n for cluster, cluster_logs in logs.items():\n for l in cluster_logs:\n l['logs'] = list(entry.filter_log_entries(l['logs'],\n level=entry.LOG_ERROR,\n datetime=[time_start, time_end]))\n length += len(l['logs'])\n logs['time_start'] = time_start\n logs['time_end'] = time_end\n print(length)\n\n self.write_message(json.dumps(\n dict(logs=logs,\n session_id=session_id\n ))\n )\n self.close()\n\n\nclass MetricsRingHandler(tornado.websocket.WebSocketHandler):\n closed = False\n\n def check_origin(self, origin):\n return True\n\n @tornado.gen.coroutine\n def on_message(self, msg):\n global alert_messages\n while True:\n msg = yield alert_messages.get()\n if msg is None and not self.closed:\n break\n t = datetime.strptime(msg['startsAt'][:-16], '%Y-%m-%dT%H:%M:%S')\n self.write_message(json.dumps([{\n \"text\": msg['alertname'],\n \"link\": \"/log?ring_time={}\".format(t.strftime(\"%Y/%m/%d %H:%M:%S\"))\n }]))\n\n def on_close(self):\n self.closed = True\n\n\nclass LogTipsHandler(tornado.web.RequestHandler):\n def get(self):\n global LOGS\n session_id = int(self.get_query_argument('session_id'))\n logs = LOGS[session_id]\n result = detect.analyze(datetime=[logs['time_start'], logs['time_end']])\n unique = set()\n ret = []\n for r in result:\n if r[\"rewrite\"] in unique:\n continue\n unique.add(r[\"rewrite\"])\n ret.append(r)\n self.finish(json.dumps(ret))\n\n\nclass LogFilterHandler(tornado.web.RequestHandler):\n def post(self):\n global LOGS\n session_id = int(self.get_query_argument('session_id'))\n args = self.request.body.decode('utf8')\n logs = LOGS[session_id]\n for cluster, cluster_logs in logs.items():\n for l in cluster_logs:\n l['logs'] = list(entry.filter_log_entries(l['logs'],\n **json.loads(args)))\n self.finish(json.dumps(logs))\n","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598089176","text":"def parse_input(filepath: str) -> list:\n\n with open(filepath) as f:\n data = f.read().splitlines()\n\n return data\n\n\ndef decodeseatid(code: str) -> int:\n \n c = 8 \n frow = lambda x: '1' if x == 'B' else '0'\n fcol = lambda x: '1' if x == 'R' else '0'\n\n row = code[:7:]\n rowB = int(''.join([frow(x) for x in row]), 2)\n col = code[7::]\n colB = int(''.join([fcol(x) for x in col]), 2)\n return (rowB*c) + colB\n\n# What is the highest seat ID on a boarding pass in the list\ndef part1(data: list) -> int:\n\n highest = 0\n for i in data: \n highest = max(decodeseatid(i), highest)\n return highest\n\n# Find the missing seat ID not at the missing 'ends' of the plane\ndef part2(data: list, highest: int) -> int:\n\n res = [decodeseatid(x) for x in data]\n res.sort()\n sm = res[0] # the actual starting seat \n i = highest - 2\n i -= sm # adjust the indexing\n\n while i > 0:\n if res[i - 1] != res[i] - 1: \n return res[i] - 1\n i -= 1\n\nif __name__ == \"__main__\":\n d = parse_input('aoc_input.txt')\n mx = part1(d)\n print(part2(d, mx))","sub_path":"day_five/binaryboarding.py","file_name":"binaryboarding.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301350604","text":"\nimport base64\nfrom pathlib import Path\n\n\ndef highlight_max_margin(data, color='yellow'):\n '''\n highlight the maximum in a currency string Series\n '''\n attr = 'background-color: {}'.format(color)\n is_max = data == data.max()\n return [attr if v else '' for v in is_max]\n\n\ndef int_to_currency(val):\n \"\"\"\n Takes an int and cast to currency style\n \"\"\"\n val = str(val)\n _start_pos = 0\n _end_pos = len(val)%3\n if _end_pos == 0:\n _end_pos = 3\n currency = [\"$\"]\n while _end_pos <= len(val): \n currency.append(val[_start_pos:_end_pos] + \",\")\n _start_pos = _end_pos\n _end_pos += 3\n \n return \"\".join(currency)[:-1]\n\ndef currency_to_int(val):\n \"\"\"\n Convert an currency string to int\n \"\"\"\n return int(val[1:].replace(\",\",\"\"))\n\n\n\ndef img_to_bytes(img_path):\n img_bytes = Path(img_path).read_bytes()\n encoded = base64.b64encode(img_bytes).decode()\n return encoded","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10808870","text":"from __future__ import print_function\nimport numpy as np\nimport argparse\nimport cv2\n\n# Parse command line arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to the image\")\nargs = vars(ap.parse_args())\n\n# Load and display the original image\nimage = cv2.imread(args[\"image\"])\ncv2.imshow(\"Original\", image)\ncv2.waitKey(0)\n\n# OpenCV addition: values are clipped to ensure they never fall outside\n# the range [0,255]\nprint(\"Max of 255: {}\".format(cv2.add(np.uint8([200]), np.uint8([100]))))\nprint(\"Min of 0: {}\".format(cv2.subtract(np.uint8([50]), np.uint8([100]))))\n\n# NumPy addition: values wrap around (modulo arithmetic)\nprint(\"Wrap around: {}\".format(np.uint8([200]) + np.uint8([100])))\nprint(\"Wrap around: {}\".format(np.uint8([50]) - np.uint8([100])))\n\n# Now perform arithmetic on actual images\n# Add 100 to every pixel on the image; the result will look more \"washed\n# out\" than the original\nM = np.ones(image.shape, dtype=\"uint8\") * 100\nadded = cv2.add(image, M)\ncv2.imshow(\"Added\", added)\ncv2.waitKey(0)\n\n# Subtract 50 from every pixel; the result looks darker than the original\nM = np.ones(image.shape, dtype=\"uint8\") * 50\nsubtracted = cv2.subtract(image, M)\ncv2.imshow(\"Subtracted\", subtracted)\ncv2.waitKey(0)\n\n\n","sub_path":"arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"288661220","text":"# -*- coding: utf-8 -*-\n\"\"\"ThreatConnect TI Indicator\"\"\"\nimport json\n\ntry:\n from urllib import quote_plus # Python 2\nexcept ImportError:\n from urllib.parse import quote_plus # Python\n\nfrom tcex.tcex_ti.mappings.tcex_ti_mappings import TIMappings\n\n# import local modules for dynamic reference\nmodule = __import__(__name__)\n\n# custom_class = custom_indicator_class_factory(\n# entry.get('name'),\n# entry.get('apiEntity'),\n# entry.get('apiBranch'),\n# Indicator,\n# class_data,\n# value_fields,\n# )\n\n\ndef custom_indicator_class_factory(\n indicator_type, entity_type, branch_type, base_class, value_fields\n):\n \"\"\"Internal method for dynamically building Custom Indicator Class.\"\"\"\n value_count = len(value_fields)\n\n def init_1(self, tcex, value1, owner=None, **kwargs): # pylint: disable=W0641\n \"\"\"Init method for Custom Indicator Types with one value\n :param self:\n :param tcex:\n :param owner:\n :param value1:\n :param kwargs:\n \"\"\"\n base_class.__init__(self, tcex, indicator_type, entity_type, branch_type, owner, **kwargs)\n res = {v: k for k, v in self._metadata_map().items()}\n value1 = value1 or kwargs.pop(res.get(value_fields[0]), value_fields[0])\n self._data[value_fields[0]] = value1\n self.unique_id = kwargs.get('unique_id', value1)\n if self.unique_id:\n self.unique_id = quote_plus(self.fully_decode_uri(self.unique_id))\n\n def _set_unique_id_1(self, json_request):\n \"\"\"\n\n :param self:\n :param json_request:\n \"\"\"\n self.unique_id = json_request.get(value_fields[0])\n if self.unique_id:\n self.unique_id = quote_plus(self.fully_decode_uri(self.unique_id))\n\n def _metadata_map_1(self):\n metadata_map = base_class._metadata_map(self)\n for value in value_fields:\n manipulated_value = value.lower().replace(' ', '_')\n if manipulated_value not in metadata_map.keys():\n metadata_map[manipulated_value] = value\n return metadata_map\n\n def can_create_1(self): # pylint: disable=W0641\n \"\"\"\n Determines if the required data that the API endpoint is expecting is present.\n :return: Boolean\n \"\"\"\n if self.data.get(value_fields[0]):\n return True\n return False\n\n def init_2(self, tcex, value1, value2, owner=None, **kwargs): # pylint: disable=W0641\n \"\"\"Init method for Custom Indicator Types with two values.\n :param self:\n :param tcex:\n :param value1:\n :param value2:\n :param owner:\n :param kwargs:\n \"\"\"\n base_class.__init__(self, tcex, indicator_type, entity_type, branch_type, owner, **kwargs)\n res = {v: k for k, v in self._metadata_map().items()}\n value1 = value1 or kwargs.pop(res.get(value_fields[0]), value_fields[0])\n value2 = value2 or kwargs.pop(res.get(value_fields[0]), value_fields[1])\n self._data[value_fields[0]] = value1\n self._data[value_fields[1]] = value2\n if value1:\n value1 = quote_plus(self.fully_decode_uri(value1))\n if value2:\n value2 = quote_plus(self.fully_decode_uri(value2))\n self.unique_id = kwargs.get('unique_id', self.build_summary(value1, value2))\n\n def _set_unique_id_2(self, json_request):\n \"\"\"\n\n :param self:\n :param json_request:\n \"\"\"\n value_0 = json_request.get(value_fields[0], '')\n value_1 = json_request.get(value_fields[1], '')\n self.unique_id = self.build_summary(\n quote_plus(self.fully_decode_uri(value_0)) or None,\n quote_plus(self.fully_decode_uri(value_1)) or None,\n )\n\n def can_create_2(self): # pylint: disable=W0641\n \"\"\"\n Determines if the required data that the API endpoint is expecting is present.\n :return: Boolean\n \"\"\"\n if self.data.get(value_fields[0]) and self.data.get(value_fields[1]):\n return True\n return False\n\n def init_3(self, tcex, value1, value2, value3, owner=None, **kwargs): # pylint: disable=W0641\n \"\"\"Init method for Custom Indicator Types with three values.\n :param self:\n :param tcex:\n :param value1:\n :param value2:\n :param value3:\n :param kwargs:\n \"\"\"\n base_class.__init__(self, tcex, indicator_type, entity_type, branch_type, owner, **kwargs)\n res = {v: k for k, v in self._metadata_map().items()}\n value1 = value1 or kwargs.pop(res.get(value_fields[0]), value_fields[0])\n value2 = value2 or kwargs.pop(res.get(value_fields[0]), value_fields[1])\n value3 = value3 or kwargs.pop(res.get(value_fields[0]), value_fields[2])\n self._data[value_fields[0]] = value1\n self._data[value_fields[1]] = value2\n self._data[value_fields[2]] = value3\n if value1:\n value1 = quote_plus(self.fully_decode_uri(value1))\n if value2:\n value2 = quote_plus(self.fully_decode_uri(value2))\n if value3:\n value3 = quote_plus(self.fully_decode_uri(value3))\n self.unique_id = kwargs.get('unique_id', self.build_summary(value1, value2, value3))\n\n def _set_unique_id_3(self, json_request):\n \"\"\"\n\n :param self:\n :param json_request:\n \"\"\"\n value_0 = json_request.get(value_fields[0], '')\n value_1 = json_request.get(value_fields[1], '')\n value_2 = json_request.get(value_fields[2], '')\n self.unique_id = self.build_summary(\n quote_plus(self.fully_decode_uri(value_0)) or None,\n quote_plus(self.fully_decode_uri(value_1)) or None,\n quote_plus(self.fully_decode_uri(value_2)) or None,\n )\n\n def can_create_3(self): # pylint: disable=W0641\n \"\"\"\n Determines if the required data that the API endpoint is expecting is present.\n :return: Boolean\n \"\"\"\n if (\n self.data.get(value_fields[0])\n and self.data.get(value_fields[1])\n and self.data.get(value_fields[2])\n ):\n return True\n return False\n\n class_name = indicator_type.replace(' ', '')\n init_method = locals()['init_{}'.format(value_count)]\n set_unique_id_method = locals()['_set_unique_id_{}'.format(value_count)]\n can_create_method = locals()['can_create_{}'.format(value_count)]\n _metadata_map = locals()['_metadata_map_1']\n new_class = type(\n str(class_name),\n (base_class,),\n {\n '__init__': init_method,\n '_set_unique_id': set_unique_id_method,\n 'can_create': can_create_method,\n '_metadata_map': _metadata_map,\n },\n )\n return new_class\n\n\nclass Indicator(TIMappings):\n \"\"\"Unique API calls for Indicator API Endpoints\"\"\"\n\n def __init__(self, tcex, sub_type, api_entity, api_branch, owner, **kwargs):\n super(Indicator, self).__init__(\n tcex, 'Indicator', 'indicators', sub_type, api_entity, api_branch, owner\n )\n\n for arg, value in kwargs.items():\n self.add_key_value(arg, value)\n\n @staticmethod\n def is_indicator():\n return True\n\n @property\n def owner(self):\n return self._owner\n\n def can_create(self):\n \"\"\"\n Overridden by other indicator classes.\n\n Returns:\n\n \"\"\"\n return True\n\n def _metadata_map(self): # pylint: disable=R0201\n return {\n 'date_added': 'dateAdded',\n 'dns_active': 'dnsActive',\n 'last_modified': 'lastModified',\n 'private_flag': 'privateFlag',\n 'whois_active': 'whoisActive',\n 'key_name': 'Key Name',\n 'value_type': 'Value Type',\n 'value_name': 'Value Name',\n 'block': 'Block',\n 'mutex': 'Mutex',\n 'as_number': 'AS Number',\n 'hostname': 'hostName',\n }\n\n def add_key_value(self, key, value):\n \"\"\"\n Converts the value and adds it as a data field.\n\n Args:\n key:\n value:\n \"\"\"\n key = self._metadata_map().get(key, key)\n if key in ['dateAdded', 'lastModified']:\n self._data[key] = self._utils.format_datetime(value, date_format='%Y-%m-%dT%H:%M:%SZ')\n elif key == 'confidence':\n self._data[key] = int(value)\n elif key == 'rating':\n self._data[key] = float(value)\n elif key == 'unique_id':\n self._unique_id = quote_plus(self.fully_decode_uri(value))\n else:\n self._data[key] = value\n\n def status(self, status=None, cal_status=None):\n \"\"\"\n Updates the Indicators status\n Args:\n status: Valid values to set to active are ['active', '2', '1' ] while\n ['inactive', '-2', '-1', 0] will set it to inactive\n cal_status: Valid values to set to locked are ['locked', 'lock', '1' ] while\n ['unlock', 'unlocked', '0'] will set it to inactive\n\n Returns:\n\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n if not status and not cal_status:\n return None\n request_data = {}\n if status:\n status = str(status)\n if status.lower() in ['active', '2', '1']:\n request_data['active'] = 2\n elif status.lower() in ['inactive', '-2', '-1', '0']:\n request_data['active'] = -2\n if cal_status:\n cal_status = str(cal_status)\n if cal_status.lower() in ['locked', 'lock', '1']:\n request_data['activeLocked'] = 1\n elif cal_status.lower() in ['unlock', 'unlocked', '0']:\n request_data['activeLocked'] = 0\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )\n\n def rating(self, value):\n \"\"\"\n Updates the Indicators rating\n\n Args:\n value:\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n request_data = {'rating': value}\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )\n\n def confidence(self, value):\n \"\"\"\n Updates the Indicators confidence\n\n Args:\n value:\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n request_data = {'confidence': value}\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )\n\n def owners(self):\n \"\"\"\n\n :return:\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n return self.tc_requests.owners(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )\n\n def add_observers(self, count, date_observed):\n \"\"\"\n Adds a Indicator Observation\n\n Args:\n count:\n date_observed:\n\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n data = {\n 'count': count,\n 'dateObserved': self._utils.format_datetime(\n date_observed, date_format='%Y-%m-%dT%H:%M:%SZ'\n ),\n }\n\n return self.tc_requests.add_observations(\n self.api_type, self.api_branch, self.unique_id, data, owner=self.owner\n )\n\n def observation_count(self):\n \"\"\"\n Gets the indicators observation count.\n\n Returns:\n\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n return self.tc_requests.observation_count(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )\n\n def add_false_positive(self):\n \"\"\"\n Adds a Indicator FalsePositive\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n return self.tc_requests.add_false_positive(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )\n\n def observations(self):\n \"\"\"\n Gets the indicators observations.\n\n Returns:\n\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n return self.tc_requests.observations(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )\n\n def deleted(self, deleted_since, filters=None, params=None):\n \"\"\"\n Gets the indicators deleted.\n\n Args:\n params:\n filters:\n deleted_since: Date since its been deleted\n\n \"\"\"\n\n return self.tc_requests.deleted(\n self.api_type,\n self.api_branch,\n deleted_since,\n owner=self.owner,\n filters=filters,\n params=params,\n )\n\n @staticmethod\n def build_summary(val1=None, val2=None, val3=None):\n \"\"\"\n Constructs the summary given va1, va2, val3\n\n Args:\n val1:\n val2:\n val3:\n\n Returns:\n\n \"\"\"\n summary = []\n if val1 is not None:\n summary.append(val1)\n if val2 is not None:\n summary.append(val2)\n if val3 is not None:\n summary.append(val3)\n if not summary:\n return None\n return ' : '.join(summary)\n\n def __str__(self):\n \"\"\"Return string represtentation of object\"\"\"\n return json.dumps(self._data, indent=4)\n","sub_path":"tcex/tcex_ti/mappings/indicator/tcex_ti_indicator.py","file_name":"tcex_ti_indicator.py","file_ext":"py","file_size_in_byte":13782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"409222208","text":"import os\nimport random\nfrom tqdm import tqdm\nfrom gensim.corpora import WikiCorpus\n\nif __name__ == '__main__':\n \n if False:\n inp=\"/corpus/enwiki-latest-pages-articles.xml.bz2\"\n i = 0\n output_file=\"/corpus/wiki_englist_%07d.txt\"%i\n output = open(output_file, 'w',encoding=\"utf-8\")\n wiki = WikiCorpus(inp, lemmatize=False, dictionary={})\n for text in wiki.get_texts():\n output.write(\" \".join(text) + \"\\n\")\n i = i + 1\n if (i % 10000 == 0):\n output.close()\n\n output_file = \"/corpus/wiki_englist_%07d.txt\" % i\n output = open(output_file, 'w', encoding=\"utf-8\")\n print(\"Save \"+str(i) + \" articles\")\n output.close()\n \n\n output_dir = '/corpus/wiki/'\n wiki_train_raw = \"wiki.train.raw\"\n wiki_val_raw = \"wiki.valid.raw\"\n wiki_test_raw = \"wiki.test.raw\"\n \n with open(os.path.join(output_dir, wiki_train_raw), 'w') as fout_wiki_train:\n with open(os.path.join(output_dir, wiki_val_raw), 'w') as fout_wiki_val:\n with open(os.path.join(output_dir, wiki_test_raw), 'w') as fout_wiki_test:\n \n fileList = os.listdir('/corpus/')\n for file in tqdm(fileList):\n res = file.split('.')\n if len(res) == 2:\n name, ext = res\n else:\n continue\n if ext == 'txt' and name.startswith('wiki'):\n # print(file)\n with open(os.path.join('/corpus/', file)) as fin:\n lines = fin.readlines()\n # print(len(lines))\n for line in lines:\n i = random.randint(1,1000)\n if i >=1 and i<=800:\n fout_wiki_train.write(line)\n # fout_wiki_train.write('\\n')\n elif i > 800 and i <= 900:\n fout_wiki_val.write(line)\n # fout_wiki_val.write('\\n')\n else:\n fout_wiki_test.write(line)\n # fout_wiki_test.write('\\n')\n\n\n","sub_path":"utils/process_wiki.py","file_name":"process_wiki.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11649106","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport pwd\nimport grp\nimport os\n\ndef get_username_from_uid(uid):\n username = None\n try:\n username = pwd.getpwuid(uid).pw_name\n return username\n except KeyError:\n return username\n\ndef get_groupname_from_gid(gid):\n groupname = None\n try:\n groupname = grp.getgrgid(gid).gr_name\n return groupname\n except KeyError:\n return groupname\n\ndef get_uid_from_username(username):\n uid = None\n try:\n uid = pwd.getpwnam(username).pw_uid\n return int(uid)\n except KeyError:\n return uid\n\ndef get_gid_from_groupname(groupname):\n gid = None\n try:\n gid = grp.getgrnam(groupname).gr_gid\n return int(gid)\n except KeyError:\n return gid\n\nif get_username_from_uid(503) != None:\n if get_username_from_uid(503) != 'teamcity':\n username_uid_503 = get_username_from_uid(503)\n for new_uid in range(600,610):\n if get_username_from_uid(new_uid) == None:\n os.system(\"sudo usermod -u {0} {1}\".format(new_uid, username_uid_503))\n break\n\nif get_groupname_from_gid(503) != None:\n if get_groupname_from_gid(503) != 'teamcity':\n groupname_gid_503 = get_groupname_from_gid(503)\n for new_gid in range(600,610):\n if get_groupname_from_gid(new_gid) == None:\n os.system(\"sudo groupmod -g {0} {1}\".format(new_gid, groupname_gid_503))\n break\n\n# add user teamcity\ntry:\n os.system('useradd -u 503 -g 503 teamcity')\nexcept KeyError:\n print('user teamcity exist')\n\nfor home_dir in os.listdir('/home'):\n print(os.path.join('/home', home_dir))\n print(get_uid_from_username(home_dir), get_gid_from_groupname(home_dir))\n if get_uid_from_username(home_dir) != None or get_gid_from_groupname(home_dir) != None:\n os.chown(os.path.join('/home', home_dir), get_uid_from_username(home_dir), get_gid_from_groupname(home_dir))\n for dirpath, dirnames, filenames in os.walk(os.path.join('/home', home_dir), followlinks=False):\n for dirname in dirnames:\n # print(dirname)\n # print(get_uid_from_username(home_dir), get_gid_from_groupname(home_dir))\n if get_uid_from_username(home_dir) != None or get_gid_from_groupname(home_dir) != None:\n os.chown(os.path.join(dirpath, dirname), get_uid_from_username(home_dir), get_gid_from_groupname(home_dir))\n for filename in filenames:\n # print(filename)\n # print(get_uid_from_username(home_dir), get_gid_from_groupname(home_dir))\n if get_uid_from_username(home_dir) != None or get_gid_from_groupname(home_dir) != None:\n os.chown(os.path.join(dirpath, filename), get_uid_from_username(home_dir), get_gid_from_groupname(home_dir))\n\n","sub_path":"fix_teamcity_uid_gid.py","file_name":"fix_teamcity_uid_gid.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"116233497","text":"from psycopg2 import connect, Error\nfrom logger import escribir_al_log\n\nclass ConexionPG:\n\n db = None\n cursor = None\n\n def __init__(self, **parametros):\n try:\n self.db = connect(\n host=parametros['direccion_servidor'],\n user=parametros['usuario'],\n password=parametros['contrasena'],\n database=parametros['base_datos']\n )\n self.cursor = self.db.cursor()\n except Error as e:\n escribir_al_log(e, \"Ocurrio un error al conectar a la base de datos\")\n \n def _ejecutar_sql(\n self, sentencia_sql, parametros=None,\n escribir_en_bd=True\n ):\n try:\n self.cursor.execute(sentencia_sql, parametros)\n if escribir_en_bd:\n self.db.commit()\n except Exception as e:\n escribir_al_log(e, f\"Ocurrio un error al ejecutar la sentencia SQL:\\n\\n{sentencia_sql}\\n\")\n if escribir_en_bd:\n self.db.rollback()\n\n def _leer_desde_sql(self):\n registros = []\n try:\n registros = self.cursor.fetchall()\n except Exception as e:\n escribir_al_log(e,f'Ocurrio un error al momento de leer desde la BD')\n return registros\n \n\n def insertar_productos(self, nombre, descripcion, precio, cantidad):\n self._ejecutar_sql(\n \"INSERT INTO productos (nombre, descripcion, precio, cantidad) VALUES (%s, %s, %s, %s)\",\n (nombre, descripcion, precio, cantidad )\n )\n\n def modificar_productos(self, nombre, descripcion, precio, cantidad, id_prod):\n self._ejecutar_sql(\n \"update productos set nombre=%s, descripcion=%s, precio=%s, cantidad=%s where id_prod=%s \",\n (nombre, descripcion, precio, cantidad, id_prod)\n )\n\n def eliminar_productos(self, id_prod):\n self._ejecutar_sql(\n \"DELETE FROM productos where id_prod =%s\",\n (id_prod,)\n )\n def mostrar_factura(self):\n self._ejecutar_sql(\n \"select * from factura\"\n )","sub_path":"conexion.py","file_name":"conexion.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551097944","text":"from wired_injector.pipeline import Pipeline\nfrom wired_injector.pipeline import Operator, Result\nfrom wired_injector.pipeline.operators import Get\nfrom wired_injector.pipeline.results import (\n Error,\n Found,\n NotFound,\n)\n\nfrom .conftest import (\n DummyContainer,\n DummyLookupClass,\n DummyLookupProtocol,\n)\n\n\ndef test_get_setup() -> None:\n # Ensure it meets the protocol\n meets_protocol: Operator = Get(DummyLookupClass)\n assert meets_protocol\n\n # Do we store the right things?\n get = Get(DummyLookupClass)\n assert DummyLookupClass == get.lookup_key\n assert None is get.attr\n\n\ndef test_get_setup_attr() -> None:\n get = Get(DummyLookupClass, attr='title')\n assert 'title' == get.attr\n\n\ndef test_get_class(\n dummy_container: DummyContainer,\n dummy_pipeline: Pipeline,\n) -> None:\n # Set the lookup value to use a value that is a class, to simulate\n # injection.\n dummy_container.fake_lookups[DummyLookupClass] = DummyLookupClass\n\n get = Get(DummyLookupClass)\n result: Result = get(\n previous=None,\n pipeline=dummy_pipeline,\n )\n assert isinstance(result, Found)\n assert result.value == DummyLookupClass\n\n\ndef test_get_class_instance(\n dummy_container: DummyContainer,\n dummy_pipeline: Pipeline,\n) -> None:\n # Set the lookup value to use a value that is an instance\n dummy_container.fake_lookups[DummyLookupClass] = DummyLookupClass()\n\n get = Get(DummyLookupClass)\n result: Result = get(\n previous=None,\n pipeline=dummy_pipeline,\n )\n assert isinstance(result, Found)\n assert isinstance(result.value, DummyLookupClass)\n\n\ndef test_get_protocol(\n dummy_container: DummyContainer,\n dummy_pipeline: Pipeline,\n) -> None:\n # Lookup up a protocol, not a class\n\n # Set the lookup value\n dummy_container.fake_lookups[DummyLookupProtocol] = DummyLookupClass()\n\n get = Get(DummyLookupProtocol)\n result: Result = get(\n previous=None,\n pipeline=dummy_pipeline,\n )\n assert isinstance(result, Found)\n assert isinstance(result.value, DummyLookupClass)\n\n\ndef test_get_none(\n dummy_container: DummyContainer,\n dummy_pipeline: Pipeline,\n) -> None:\n # Lookup fails because nothing is in the container.\n\n get = Get(DummyLookupClass)\n result: Result = get(\n previous=None,\n pipeline=dummy_pipeline,\n )\n assert isinstance(result, NotFound)\n assert result.msg == \"No service 'DummyLookupClass' found in container\"\n assert result.value == Get\n\n\ndef test_get_attr(\n dummy_container: DummyContainer,\n dummy_pipeline: Pipeline,\n) -> None:\n # Set the lookup value\n dummy_container.fake_lookups[DummyLookupClass] = DummyLookupClass()\n\n get = Get(DummyLookupClass, attr='title')\n result: Result = get(\n previous=None,\n pipeline=dummy_pipeline,\n )\n assert isinstance(result, Found)\n assert 'Dummy Lookup Class' == result.value\n\n\ndef test_get_error_str(\n dummy_container: DummyContainer,\n dummy_pipeline: Pipeline,\n) -> None:\n # Try to do a container lookup on a string instead of a class\n\n get = Get('WRONG')\n result: Result = get(\n previous=None,\n pipeline=dummy_pipeline,\n )\n assert isinstance(result, Error)\n expected = \"Cannot use a string 'WRONG' as container lookup value\"\n assert result.msg == expected\n assert result.value == Get\n","sub_path":"tests/pipeline/test_operators_get.py","file_name":"test_operators_get.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205299617","text":"import AlphBeta\nimport Eval\nMAX_D = AlphBeta.MAX_D\n\n\n\nclass Player():\n def __init__(self,pieces):\n if pieces[0].color == 'W':\n self.player = 0\n else:\n self.player = 1\n self.pieces = pieces\n\n def GetMove(self,BOARD,p_t):\n move = AlphBeta.alphabeta(BOARD,MAX_D,-10000,10000,self.player,self.player,Eval.NoAggEval,p_t)\n return (move[0],(move[1],move[2]))\n","sub_path":"NoAggPlayer.py","file_name":"NoAggPlayer.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112573038","text":"import pygame as p3\nimport os\nimport time\nimport tkinter as tk\n\n#Variables Being Set\nrunning = 1\nwallpaper_num = 1\ntyping_num = 1\nnotepad_text = \"\"\nmini_terminal = 0\nlauncheropen = 0\nnotepadopen = 0\ndefragopen = 0\nlan_chatopen = 0\nmusicopen = 0\nsettingsopen = 0\nterminalopen = 0\n\nnotepad_maximised = 0\ndefrag_maximised = 0\nlan_chat_maximised = 0\nmusic_maximised = 0\nsettings_maximised = 0\nterminal_maximised = 0\ntempvar = 0\ntaskbar_slot_1 = 0\ntaskbar_slot_2 = 0\ntaskbar_slot_3 = 0\ntaskbar_slot_4 = 0\ntaskbar_slot_5 = 0\n\n#test\n\n#Initialising the Pygame\np3.init()\np3.display.set_caption(\"P3tray.OS\")\nscreen = p3.display.set_mode((800,480), p3.FULLSCREEN)\nscreen.fill((255,255,255))\nlogo = p3.image.load(\"images/logo.ico\").convert()\ngeneral_font = p3.font.Font(\"images/ascii/font.ttf\", 20)\nsystem_font = p3.font.Font(\"images/ascii/font.ttf\", 20)\nnotepad_font = p3.font.Font(\"images/ascii/font.ttf\", 20)\np3.display.set_icon(logo)\n\n#Music\np3_mixer = p3.mixer.music\np3_mixer.load(\"sounds/winxp.ogg\")\np3_mixer.play()\n\n\n\n#Image loading\n#desktop\nsafe_mode = p3.image.load(\"images/safe_mode.png\").convert()\nwallpaper = p3.image.load(\"images/wallpaper.png\").convert()\nwallpaper_2 = p3.image.load(\"images/wallpaper_2.png\").convert()\ntaskbar = p3.image.load(\"images/taskbar.png\").convert()\nbar = p3.image.load(\"images/bar.png\").convert()\nlauncher = p3.image.load(\"images/launchermenu.png\").convert()\nappback = p3.image.load(\"images/app/appback.png\").convert()\n#Buttons\ntribar_button = p3.image.load(\"images/buttons/tribar.png\").convert()\nsettings_button = p3.image.load(\"images/buttons/settings.png\").convert()\nbutton_bar = p3.image.load(\"images/buttons/button_bar.png\").convert()\nclose_button = p3.image.load(\"images/buttons/close_button.png\").convert()\nminimise_button = p3.image.load(\"images/buttons/minimise_button.png\").convert()\n#Application_buttons\n#File, Edit\nfile_button = p3.image.load(\"images/app/file_button.png\").convert()\nedit_button = p3.image.load(\"images/app/edit_button.png\").convert()\n#Random\nup_button = p3.image.load(\"images/buttons/up_button.png\").convert()\ndown_button = p3.image.load(\"images/buttons/down_button.png\").convert()\n#Easter Eggs\nbig_chungus = p3.image.load(\"images/easter_eggs/big_chungus.jpg\").convert()\n#Applications\n#Launcher and Taskbar\nlauncher_notepad = p3.image.load(\"images/notepad/icon.png\").convert()\ntaskbar_notepad = p3.image.load(\"images/notepad/icon.png\").convert()\nlauncher_defrag = p3.image.load(\"images/defrag/icon.png\").convert()\ntaskbar_defrag = p3.image.load(\"images/defrag/icon.png\").convert()\nlauncher_lan_chat = p3.image.load(\"images/lan_chat/icon.png\").convert()\ntaskbar_lan_chat = p3.image.load(\"images/lan_chat/icon.png\").convert()\nlauncher_music = p3.image.load(\"images/music/icon.png\").convert()\ntaskbar_music = p3.image.load(\"images/music/icon.png\").convert()\nlauncher_terminal = p3.image.load(\"images/terminal/icon.png\").convert()\ntaskbar_terminal = p3.image.load(\"images/terminal/icon.png\").convert()\n#Defrag\ndefrag_back = p3.image.load(\"images/defrag/defrag_back.png\").convert()\ndefrag_button = p3.image.load(\"images/defrag/defrag_button.png\").convert()\n#LAN Chat\nunavailable = p3.image.load(\"images/lan_chat/unavailable.png\").convert()\n#Music\nmegalovania_button = p3.image.load(\"images/music/megalovania_button.png\").convert()\nmegalovania_play_button = p3.image.load(\"images/music/megalovania_play_button.png\").convert()\nfloral_shoppe_button = p3.image.load(\"images/music/floral_shoppe_button.png\").convert()\nfloral_shoppe_play_button = p3.image.load(\"images/music/floral_shoppe_play_button.png\").convert()\n#Settings\nwallpaper_statement = p3.image.load(\"images/settings/wallpaper_statement.png\").convert()\n#Terminal Emulator\nterminal_back = p3.image.load(\"images/terminal/terminal_back.png\").convert()\n\n\nlauncherrect = bar.get_rect()\nlauncherrect.x = 200\nlauncherrect.y = 0\nlauncherrect.width = 400\nlauncherrectclose = bar.get_rect()\nlauncherrectclose.x = 1200\nlauncherrectclose.y = 0\nlauncherrectclose.width = 400\n\nlauncher_notepadrect = taskbar_notepad.get_rect()\nlauncher_notepadrect.x = 1200\nlauncher_notepadrect.y = 70\n\ntaskbar_notepadrect = taskbar_notepad.get_rect()\ntaskbar_notepadrect.x = 1010\ntaskbar_notepadrect.y = 40\n\nlauncher_defragrect = launcher_defrag.get_rect()\nlauncher_defragrect.x = 1260\nlauncher_defragrect.y = 70\n\ntaskbar_defragrect = taskbar_defrag.get_rect()\ntaskbar_defragrect.x = 1010\ntaskbar_defragrect.y = 100\n\nlauncher_lan_chatrect = launcher_lan_chat.get_rect()\nlauncher_lan_chatrect.x = 1320\nlauncher_lan_chatrect.y = 70\n\ntaskbar_lan_chatrect = taskbar_lan_chat.get_rect()\ntaskbar_lan_chatrect.x = 1010\ntaskbar_lan_chatrect.y = 160\n\nlauncher_musicrect = launcher_music.get_rect()\nlauncher_musicrect.x = 1380\nlauncher_musicrect.y = 70\n\ntaskbar_musicrect = taskbar_music.get_rect()\ntaskbar_musicrect.x = 1010\ntaskbar_musicrect.y = 220\n\nlauncher_terminalrect = launcher_terminal.get_rect()\nlauncher_terminalrect.x = 1440\nlauncher_terminalrect.y = 70\n\ntaskbar_terminalrect = taskbar_terminal.get_rect()\ntaskbar_terminalrect.x = 1010\ntaskbar_terminalrect.y = 280\n\ntribar_buttonrect = tribar_button.get_rect()\ntribar_buttonrect.x = 780\ntribar_buttonrect.y = 5\ntribar_button_closerect = tribar_button.get_rect()\ntribar_button_closerect.x = 1780\ntribar_button_closerect.y = 5\n\nsettings_buttonrect = settings_button.get_rect()\nsettings_buttonrect.x = 755\nsettings_buttonrect.y = 5\nsettings_button_closerect = settings_button.get_rect()\nsettings_button_closerect.x = 1755\nsettings_button_closerect.y = 5\n\nfile_buttonrect = file_button.get_rect()\nfile_buttonrect.x = 10\n\nclose_buttonrect = close_button.get_rect()\nclose_buttonrect.x = 1745\nclose_buttonrect.y = 35\nminimise_buttonrect = minimise_button.get_rect()\nminimise_buttonrect.x = 1775\nminimise_buttonrect.y = 35\n\nup_buttonrect = up_button.get_rect()\nup_buttonrect.x = 1180\nup_buttonrect.y = 60\ndown_buttonrect = down_button.get_rect()\ndown_buttonrect.x = 1180\ndown_buttonrect.y = 80\n\nmegalovania_buttonrect = megalovania_button.get_rect()\nmegalovania_buttonrect.x = 1100\nmegalovania_buttonrect.y = 100\n\nmegalovania_play_buttonrect = megalovania_play_button.get_rect()\nmegalovania_play_buttonrect.x = 1500\nmegalovania_play_buttonrect.y = 100\n\nfloral_shoppe_buttonrect = floral_shoppe_button.get_rect()\nfloral_shoppe_buttonrect.x = 1100\nfloral_shoppe_buttonrect.y = 200\n\nfloral_shoppe_play_buttonrect = floral_shoppe_play_button.get_rect()\nfloral_shoppe_play_buttonrect.x = 1500\nfloral_shoppe_play_buttonrect.y = 200\n\n\n#Rectangle Drawing\n\n \ndef rectchange():\n launcher_notepadrect.x = 1300\n launcher_defragrect.x = 1360\n launcher_lan_chatrect.x = 1420\ndef fix_buttonbar():\n close_buttonrect.x = 745\n minimise_buttonrect.x = 775\n\n#Applications, notepad, paint...\ndef music():\n if music_maximised == 1:\n screen.blit(appback, (70,30))\n screen.blit(button_bar, (720, 30))\n screen.blit(close_button, (745, 35))\n screen.blit(minimise_button, (775, 35))\n screen.blit(terminal_back, (70, 30))\n\n megalovania_buttonrect.x = 100\n megalovania_play_buttonrect.x = 600\n fix_buttonbar()\n if taskbar_slot_5 == \"terminal\":\n screen.blit(taskbar_music, (10, 220))\n taskbar_musicrect.x = 10\n \ndef settings():\n screen.blit(appback, (70,30))\n screen.blit(wallpaper_statement, (100, 60))\n screen.blit(down_button, (180,80))\n screen.blit(up_button, (180,60))\n p3.display.update()\n\ndef music():\n if music_maximised == 1:\n screen.blit(appback, (70,30))\n screen.blit(button_bar, (720, 30))\n screen.blit(close_button, (745, 35))\n screen.blit(minimise_button, (775, 35))\n screen.blit(megalovania_button, (100, 100))\n screen.blit(megalovania_play_button, (600, 100))\n screen.blit(floral_shoppe_button, (100, 200))\n screen.blit(floral_shoppe_play_button, (600, 200))\n\n megalovania_buttonrect.x = 100\n megalovania_play_buttonrect.x = 600\n floral_shoppe_buttonrect.x = 100\n floral_shoppe_play_buttonrect.x = 600\n fix_buttonbar()\n if taskbar_slot_4 == \"music\":\n screen.blit(taskbar_music, (10, 240))\n taskbar_musicrect.x = 10\n\ndef lan_chat():\n if lan_chat_maximised == 1:\n screen.blit(appback, (70,30))\n screen.blit(unavailable, (70, 30))\n screen.blit(button_bar, (720, 30))\n screen.blit(close_button, (745, 35))\n screen.blit(minimise_button, (775, 35))\n fix_buttonbar()\n if taskbar_slot_3 == \"lan_chat\":\n screen.blit (taskbar_lan_chat, (10,160))\n taskbar_lan_chatrect.x = 10\n \ndef defrag():\n if defrag_maximised == 1:\n screen.blit(appback, (70,30))\n screen.blit(defrag_back, (70, 30))\n screen.blit(defrag_button, (175, 475))\n screen.blit(button_bar, (720, 30))\n screen.blit(close_button, (745, 35))\n screen.blit(minimise_button, (775, 35))\n fix_buttonbar()\n if taskbar_slot_2 == \"defrag\":\n screen.blit (taskbar_defrag, (10,100))\n taskbar_defragrect.x = 10\ndef notepad():\n if notepad_maximised == 1:\n screen.blit(appback, (70,30))\n screen.blit(button_bar, (720, 30))\n screen.blit(close_button, (745, 35))\n screen.blit(minimise_button, (775, 35))\n fix_buttonbar()\n notepad_blit_text = notepad_font.render(notepad_text, True, [255,255,255], [195,195,195])\n screen.blit(notepad_blit_text, (100,70))\n if taskbar_slot_1 == \"notepad\":\n screen.blit (taskbar_notepad, (10,40))\n taskbar_notepadrect.x = 10\n \n#Desktop, Launcher and Taskbar\n \ndef update_screen():\n if wallpaper_num == 1:\n screen.blit(wallpaper, (0,0))\n if wallpaper_num == 2:\n screen.blit(wallpaper_2, (0,0))\n screen.blit(taskbar, (0,0))\n screen.blit(bar, (0,0))\n screen.blit(settings_button, (755,5))\n screen.blit(tribar_button, (780,5))\n screen.blit(file_button, (15,5))\n screen.blit(edit_button, (85,5))\n if settingsopen == 1:\n settings()\n if notepadopen == 1:\n notepad()\n if defragopen == 1:\n defrag()\n if lan_chatopen == 1:\n lan_chat()\n if musicopen == 1:\n music()\n if launcheropen == 1:\n open_launcher()\n if launcheropen == 0:\n rectchange()\n if notepad_text == \"big chungus\":\n screen.blit(big_chungus, (0,0))\n screen.blit(safe_mode, (720,440))\n p3.display.update()\n\n\n#END OF UPDATE SCREEN\n\n \ndef open_launcher():\n screen.blit(launcher, (150, 30))\n screen.blit(launcher_notepad, (200, 70))\n screen.blit(launcher_defrag, (260, 70))\n screen.blit(launcher_lan_chat, (320, 70))\n screen.blit(launcher_music, (380, 70))\n screen.blit(launcher_terminal, (440, 70))\n launcher_notepadrect.x = 200\n launcher_defragrect.x = 260\n launcher_lan_chatrect.x = 320\n launcher_musicrect.x = 380\n launcher_terminalrect.x = 440\n\n\n\n\n\n#Running\n\n\n\n\n \n\nwhile running == 1:\n for event in p3.event.get():\n if event.type == p3.QUIT:\n running = False\n \n if event.type == p3.MOUSEBUTTONDOWN:\n x_click, y_click = event.pos\n\n if launcherrect.collidepoint(x_click, y_click):\n x_click, y_click = 1, 1\n launcheropen = 1\n launcherrect.x = 1000\n launcherrectclose.x = 200\n \n if launcherrectclose.collidepoint(x_click, y_click):\n x_click, y_click = 1, 1\n launcheropen = 0\n launcherrect.x = 200\n launcherrectclose.x = 1000\n\n if launcher_notepadrect.collidepoint(x_click, y_click):\n x_click, y_click = 1, 1\n notepadopen = 1\n notepad_maximised = 1\n taskbar_slot_1 = \"notepad\"\n launcheropen = 0\n if launcher_defragrect.collidepoint(x_click, y_click):\n x_click, y_click = 1, 1\n defragopen = 1\n defrag_maximised = 1\n taskbar_slot_2 = \"defrag\"\n launcheropen = 0\n if launcher_lan_chatrect.collidepoint(x_click, y_click):\n x_click, y_click = 1, 1\n lan_chatopen = 1\n lan_chat_maximised = 1\n taskbar_slot_3 = \"lan_chat\"\n launcheropen = 0\n if launcher_musicrect.collidepoint(x_click, y_click):\n x_click, y_click = 1, 1\n musicopen = 1\n music_maximised = 1\n taskbar_slot_4 = \"music\"\n launcheropen = 0\n if launcher_terminalrect.collidepoint(x_click, y_click):\n x_click, y_click = 1, 1\n terminalopen = 1\n terminal_maximised = 1\n taskbar_slot_5 = \"terminal\"\n launcheropen = 0\n if musicopen == 1:\n if music_maximised == 1:\n if megalovania_buttonrect.collidepoint(x_click, y_click):\n p3_mixer.load(\"sounds/megalovania.ogg\")\n p3_mixer.play()\n if megalovania_play_buttonrect.collidepoint(x_click, y_click):\n p3_mixer.stop()\n \n if floral_shoppe_buttonrect.collidepoint(x_click, y_click):\n p3_mixer.load(\"sounds/floral_shoppe.ogg\")\n p3_mixer.play()\n if floral_shoppe_play_buttonrect.collidepoint(x_click, y_click):\n p3_mixer.stop()\n \n \n\n if taskbar_notepadrect.collidepoint(x_click, y_click):\n notepad_maximised = 1\n if taskbar_defragrect.collidepoint(x_click, y_click):\n defrag_maximised = 1\n if taskbar_lan_chatrect.collidepoint(x_click, y_click):\n lan_chat_maximised = 1\n if taskbar_musicrect.collidepoint(x_click, y_click):\n music_maximised = 1\n\n if settings_buttonrect.collidepoint(x_click, y_click):\n x_click, y_click = 1, 1\n settingsopen = 1\n settings_buttonrect.x = 1995\n settings_button_closerect.x = 755\n down_buttonrect.x = 180\n up_buttonrect.x = 180\n x_click, y_click = 1, 1\n \n if settings_button_closerect.collidepoint(x_click, y_click):\n settingsopen = 0\n settings_button_closerect.x = 1995\n settings_buttonrect.x = 755\n down_buttonrect.x = 1180\n up_buttonrect.x = 1180\n \n if close_buttonrect.collidepoint(x_click, y_click):\n if notepad_maximised == 1:\n notepadopen = 0\n taskbar_notepadrect.x = 1010\n if defrag_maximised == 1:\n defragopen = 0\n taskbar_defragrect.x = 1010\n if lan_chat_maximised == 1:\n lan_chatopen = 0\n taskbar_lan_chatrect.x = 1010\n if music_maximised ==1:\n musicopen = 0\n taskbar_musicrect.x = 1010\n \n if minimise_buttonrect.collidepoint(x_click, y_click):\n if notepad_maximised == 1:\n notepad_maximised = 0\n if defrag_maximised == 1:\n defrag_maximised = 0\n if lan_chat_maximised == 1:\n lan_chat_maximised = 0\n if music_maximised == 1:\n music_maximised = 0\n \n\n \n #Settings\n\n \n if settingsopen == 1:\n if down_buttonrect.collidepoint(x_click, y_click):\n wallpaper_num = 2\n if up_buttonrect.collidepoint(x_click, y_click):\n wallpaper_num = 1\n \n if event.type == p3.KEYDOWN:\n keys = p3.key.get_pressed()\n if keys[p3.K_LALT]:\n if keys[p3.K_F4]:\n running = 0\n if keys[p3.K_RALT]:\n if keys[p3.K_F4]:\n running = 0\n #if keys[p3.K_CTRL]:\n #if keys[p3.K_r]:\n #mini_terminal = 1\n #if keys[p3.K_RCTRL]:\n #if keys[p3.K_r]:\n #mini_terminal = 1\n #if keys[p3.K_CTRL]:\n #if keys[p3.K_b]:\n #lx_background = 1\n #if keys[p3.K_RCTRL]:\n #if keys[p3.K_f]:\n #lx_background = 0\n \n \n if notepadopen == 1:\n if typing_num == 1:\n if keys[p3.K_a]:\n notepad_text = notepad_text + \"a\"\n if keys[p3.K_b]:\n notepad_text = notepad_text + \"b\"\n if keys[p3.K_c]:\n notepad_text = notepad_text + \"c\"\n if keys[p3.K_d]:\n notepad_text = notepad_text + \"d\"\n if keys[p3.K_e]:\n notepad_text = notepad_text + \"e\"\n if keys[p3.K_f]:\n notepad_text = notepad_text + \"f\"\n if keys[p3.K_g]:\n notepad_text = notepad_text + \"g\"\n if keys[p3.K_h]:\n notepad_text = notepad_text + \"h\"\n if keys[p3.K_i]:\n notepad_text = notepad_text + \"i\"\n if keys[p3.K_j]:\n notepad_text = notepad_text + \"j\"\n if keys[p3.K_k]:\n notepad_text = notepad_text + \"k\"\n if keys[p3.K_l]:\n notepad_text = notepad_text + \"l\"\n if keys[p3.K_m]:\n notepad_text = notepad_text + \"m\"\n if keys[p3.K_n]:\n notepad_text = notepad_text + \"n\"\n if keys[p3.K_o]:\n notepad_text = notepad_text + \"o\"\n if keys[p3.K_p]:\n notepad_text = notepad_text + \"p\"\n if keys[p3.K_q]:\n notepad_text = notepad_text + \"q\"\n if keys[p3.K_r]:\n notepad_text = notepad_text + \"r\"\n if keys[p3.K_s]:\n notepad_text = notepad_text + \"s\"\n if keys[p3.K_t]:\n notepad_text = notepad_text + \"t\"\n if keys[p3.K_u]:\n notepad_text = notepad_text + \"u\"\n if keys[p3.K_v]:\n notepad_text = notepad_text + \"v\"\n if keys[p3.K_w]:\n notepad_text = notepad_text + \"w\"\n if keys[p3.K_x]:\n notepad_text = notepad_text + \"x\"\n if keys[p3.K_y]:\n notepad_text = notepad_text + \"y\"\n if keys[p3.K_z]:\n notepad_text = notepad_text + \"z\"\n if keys[p3.K_SPACE]:\n notepad_text = notepad_text + \" \"\n if keys[p3.K_BACKSPACE]:\n notepad_text_length = len(notepad_text)\n notepad_text_length = notepad_text_length - 1\n notepad_text = notepad_text[:notepad_text_length]\n \n else:\n update_screen()\n \np3.quit()\n","sub_path":"800x480/P3.py","file_name":"P3.py","file_ext":"py","file_size_in_byte":19706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381248507","text":"import gc\nimport os\nimport numpy as np\n\nimport matplotlib\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom spectrum_analysis.spectrum import *\nfrom spectrum_analysis.dictionaries import *\nfrom spectrum_analysis import data\n\nfrom PIL import Image\nfrom skimage import io\n\nimport traceback\n\nfrom svgpath2mpl import parse_path\nfrom mpl_toolkits import axes_grid1\n\n\n\"\"\"\nThis module contains the mapping class to work multiple x, y structured\ndata sets.\n\"\"\"\n\nclass scatter():\n def __init__(self, x, y, ax, msize=1, area=None, **kwargs):\n marker_size = 50\n\n # change markers if area is defined.\n # solution taken from here:\n # https://stackoverflow.com/questions/52303660/iterating-markers-in-plots/52303895#52303895\n # create array for linewidth corresponding to intensity\n if area is not None:\n area_sub = (area - min(area))\n area_norm = area_sub/max(area_sub)\n marker_linewidth = 2 + 13 * (1 - area_norm)\n else:\n # create frame like marker\n marker_linewidth = 5 * np.ones_like(y)\n\n # create all paths\n frames = []\n for marker_lw in marker_linewidth:\n marker_rest = marker_size - 2 * marker_lw\n markerstring = (f'm 0,0 v 0 {marker_size} h {marker_size} '\n f'v -{marker_size} '\n f'z m {marker_lw},{marker_lw} '\n f'h {marker_rest} v {marker_rest} '\n f'h -{marker_rest} z')\n frame = parse_path(markerstring)\n frames.append(frame)\n\n self.n = len(x)\n self.ax = ax\n self.ax.figure.canvas.draw()\n self.size_data=msize\n self.size = msize\n self.sc = self.mscatter(x, y, s=self.size, m=frames, **kwargs)\n self._resize()\n self.cid = ax.figure.canvas.mpl_connect('draw_event', self._resize)\n\n def _resize(self,event=None):\n ppd=72./self.ax.figure.dpi\n trans = self.ax.transData.transform\n s = ((trans((1, self.size_data)) - trans((0, 0))) * ppd)[1]\n if s != self.size:\n self.sc.set_sizes(s**2 * np.ones(self.n))\n self.size = s\n self._redraw_later()\n\n def _redraw_later(self):\n self.timer = self.ax.figure.canvas.new_timer(interval=10)\n self.timer.single_shot = True\n self.timer.add_callback(lambda : self.ax.figure.canvas.draw_idle())\n self.timer.start()\n\n def mscatter(self, x, y, ax=None, m=None, **kwargs):\n import matplotlib.markers as mmarkers\n if not ax: ax=plt.gca()\n scs = ax.scatter(x,y,**kwargs)\n if (m is not None):# and (len(m)==len(x)):\n paths = []\n for marker in m:\n if isinstance(marker, mmarkers.MarkerStyle):\n marker_obj = marker\n else:\n marker_obj = mmarkers.MarkerStyle(marker)\n path = marker_obj.get_path().transformed(\n marker_obj.get_transform())\n paths.append(path)\n scs.set_paths(paths)\n return scs\n\nclass mapping(spectrum):\n \"\"\"\n Class for working with x, y structured data sets.\n\n Attributes\n ----------\n folder : string\n name of the folder of the data to be analyzed\n\n listOfFiles : string\n List of files that are in the requested folder\n\n numberOfFiles : int\n Number of Files in the requested folder\n\n spectrum : int, default : 0\n Spectrum which is used as the reference spectrum for region selection.\n\n spectra : spectrum\n List containing all spectra of the mapping\n\n Parameters\n ----------\n foldername : string\n The folder of interest has to be in the current directory.\n The data will be prepared to analyze spectral data.\n\n datatype : string, default : 'txt'\n Type of the datafiles that should be used, like 'txt', 'csv',\n or 'dat'\n \"\"\"\n\n def __init__(self, foldername, plot=False, datatype='txt',\n peaknames={}):\n self.folder = foldername\n self.second_analysis = False\n self.answer = 'n'\n self.listOfFiles, self.numberOfFiles = data.GetFolderContent(\n self.folder,\n datatype)\n if os.path.exists(os.path.join(self.folder, 'results')) and not plot:\n self.second_analysis = True\n self.listOfFiles, self.numberOfFiles, self.indices = self.Get2ndLabels()\n\n self.spectrum = 0\n self.spectra = []\n for spec in self.listOfFiles:\n self.spectra.append(spectrum(spec.split('.')[-2]))\n\n self.pardir_peak = os.path.join(self.pardir, 'peakwise')\n self.pardir_peak_bg = os.path.join(self.pardir, 'peakwise_bg')\n self.peaknames = peaknames\n\n if not os.path.exists(self.pardir_peak):\n os.makedirs(self.pardir_peak)\n os.makedirs(self.pardir_peak_bg)\n\n @property\n def label(self):\n return self.spectra[self.spectrum].label\n\n @label.setter\n def label(self, spectrum):\n self.spectrum = spectrum\n\n @property\n def tmpdir(self):\n return self.spectra[self.spectrum].tmpdir\n\n @property\n def resdir(self):\n return self.spectra[self.spectrum].resdir\n\n @property\n def rawdir(self):\n return self.spectra[self.spectrum].rawdir\n\n @property\n def basdir(self):\n return self.spectra[self.spectrum].basdir\n\n @property\n def fitdir(self):\n return self.spectra[self.spectrum].fitdir\n\n @property\n def pardir(self):\n return self.spectra[self.spectrum].pardir\n\n @property\n def pardir_spec(self):\n return self.spectra[self.spectrum].pardir_spec\n\n @property\n def pltdir(self):\n return self.spectra[self.spectrum].pltdir\n\n @property\n def dendir(self):\n return self.spectra[self.spectrum].dendir\n\n @property\n def tmploc(self):\n return self.spectra[self.spectrum].tmploc\n\n @property\n def pltname(self):\n return self.spectra[self.spectrum].pltname\n\n @property\n def rawname(self):\n return self.spectra[self.spectrum].rawname\n\n @property\n def missingvalue(self):\n return self.spectra[self.spectrum].missingvalue\n\n def SplitLabel(self, file):\n return file.split(os.sep)[-1].split('.')[-2]\n\n def Get2ndLabels(self):\n \"\"\"\n Function to get a list of indices for the second analysis.\n \"\"\"\n list_of_files = []\n list_of_indices = []\n\n self.answer = input('These spectra have been analyzed already.\\n'\n 'Do you want to fit all of them again? (y/n)\\n')\n\n if self.answer == 'y':\n list_of_files = self.listOfFiles\n number_of_files = self.numberOfFiles\n list_of_indices = np.arange(self.numberOfFiles)\n elif self.answer == 'n':\n for i, label in enumerate(self.listOfFiles):\n print(f'{self.SplitLabel(label)}')\n\n print('Enter the spectra that you want to analyze again.\\n'\n 'It is enough to enter the appendant four letter number.\\n'\n '(Finish the selection with x).')\n\n while True:\n label = input()\n if label == 'x':\n break\n if any(label in file for file in self.listOfFiles):\n index = [i for i, file in enumerate(self.listOfFiles) if label in file]\n list_of_files.append(self.listOfFiles[index[0]])\n list_of_indices.append(index[0])\n print('Added ' + self.SplitLabel(self.listOfFiles[index[0]]))\n else:\n print('This spectrum does not exist.')\n number_of_files = len(list_of_files)\n\n return list_of_files, number_of_files, list_of_indices\n\n def PlotAllRawSpectra(self, x, y):\n for i, spectrum in enumerate(y):\n self.label = i\n self.PlotRawSpectrum(x[i], y[i])\n\n def ReduceAllRegions(self, x, y):\n \"\"\"\n Function that calculates the reduced spectra, as selected before\n by the method :func:`SelectRegion() `.\n\n Parameters\n ----------\n x : numpy.ndarray\n x-values of the selected spectrum.\n\n y : numpy.ndarray\n y-values of the selected spectrum.\n\n Returns\n -------\n xreduced : numpy.ndarray\n Reduced x-values of the spectrum.\n\n yreduced : numpy.ndarray\n Reduced y-values of the spectrum.\n \"\"\"\n xregion = self.SelectRegion(x[self.spectrum], y[self.spectrum])\n xmin, xmax = self.ExtractRegion(x[self.spectrum], xregion)\n\n xreduced = np.array([])\n yreduced = np.array([])\n\n for i, spectrum in enumerate(y):\n xtemp, ytemp = self.ReduceRegion(x[i], y[i], xmin, xmax)\n xreduced = data.VStack(i, xreduced, xtemp)\n yreduced = data.VStack(i, yreduced, ytemp)\n\n return xreduced, yreduced\n\n def RemoveAllMuons(self, x, y, prnt=False, **kwargs):\n \"\"\"\n Removes muons from all spectra and approximates linearly\n in the muon region.\n\n Parameters\n ----------\n x : numpy.ndarray\n x-data of the selected spectrum.\n\n y : numpy.ndarray\n y-data that contains muons which should be removed.\n\n prnt : boolean\n Prints if muons were found in the spectrum of interest.\n\n **kwargs\n see method :func:`DetectMuonsWavelet() `\n\n Returns\n -------\n y : numpy.ndarray\n Muon-free y-data.\n \"\"\"\n\n for i, spectrum in enumerate(y):\n y[i] = self.RemoveMuons(x[i], y[i], prnt=prnt, **kwargs)\n\n return y\n\n def SelectAllBaselines(self, x, y, color='b', degree=1):\n \"\"\"\n Function that lets the user distinguish between the background\n and the signal. It runs the\n method :func:`PlotVerticalLines() `\n to select the regions that do not belong to the background and\n are therefore not used for background fit.\n\n Parameters\n ----------\n x : numpy.ndarray\n x-data of the selected spectrum.\n\n y : numpy.ndarray\n y-data that should be cleaned from background.\n\n label : string, default: ''\n Label for the spectrumborders file in case you want to have\n different borders for different files.\n\n color : string, default 'b'\n Color of the plotted spectrum.\n\n Returns\n -------\n xregion : numpy.array\n Array containing the min and max x-values which should be excluded\n from background calculations.\n \"\"\"\n xregion = self.SelectBaseline(x[self.spectrum],\n y[self.spectrum],\n color=color, degree=degree)\n return xregion\n\n def FitAllBaselines(self, x, y, xregion, show=False, degree=1):\n \"\"\"\n Fit of the baseline by using the\n `PolynomalModel()\n `_\n from lmfit.\n\n Parameters\n ----------\n x : numpy.ndarray\n x-values of spectrum which should be background-corrected.\n\n y : numpy.ndarray\n y-values of spectrum which should be background-corrected.\n\n show : boolean, default: False\n Decides whether the a window with the fitted baseline is opened\n or not.\n\n degree : int, default: 1\n Degree of the polynomial that describes the background.\n\n Returns\n -------\n baselines : numpy.ndarray\n Baseline of the input spectrum.\n \"\"\"\n baselines = np.array([])\n for i, spectrum in enumerate(y):\n self.label = i\n baseline = self.FitBaseline(x[i], y[i], xregion, show=show, degree=degree)\n baselines = data.VStack(i, baselines, baseline)\n\n return baselines\n\n def EvaluateAllBaselines(self, x, baselinefits):\n baselines = np.array([])\n for i, spectrum in enumerate(x):\n self.label = i\n baseline = self.EvaluateBaseline(x[i], baselinefits[i][0])\n baselines = data.VStack(i, baselines, baseline)\n\n return baselines\n\n def WaveletSmoothAll(self, y, wavelet='sym8', level=2):\n \"\"\"\n Smooth arrays by using wavelet transformation and soft threshold.\n\n Parameters\n ----------\n y : numpy.ndarray\n Array that should be denoised.\n\n wavelet : string, default : 'sym8'\n Wavelet for the transformation, see pywt documentation for\n different wavelets.\n\n level : int, default : 2\n Used to vary the coefficient-level. 1 is the highest level,\n 2 the second highest, etc. Depends on the wavelet used.\n\n Returns\n -------\n ydenoised : numpy.ndarray\n Denoised array of the input array.\n \"\"\"\n ydenoised = np.array([])\n for i, spectrum in enumerate(y):\n ytemp = self.WaveletSmooth(y[i], wavelet=wavelet, level=level)\n ydenoised = data.VStack(i, ydenoised, ytemp)\n\n return ydenoised\n\n def NormalizeAll(self, y, ymax=None):\n ynormed = np.array([])\n\n if type(ymax) == type(None):\n for i, spectrum in enumerate(y):\n ynormed_temp, ymax_temp = self.Normalize(y[i])\n ynormed = data.VStack(i, ynormed, ynormed_temp)\n ymax = data.VStack(i, ymax, ymax_temp)\n else:\n for i, spectrum in enumerate(y):\n ynormed_temp, ymax_temp = self.Normalize(y[i], ymax=ymax[i])\n ynormed = data.VStack(i, ynormed, ynormed_temp)\n\n return ynormed, ymax\n\n def SelectAllPeaks(self, x, y, peaks):\n \"\"\"\n Function that lets the user select the maxima of the peaks to fit\n according to their line shape (Voigt, Fano, Lorentzian, Gaussian).\n The positions (x- and y-value) are taken as initial values in the\n function :func:`~spectrum.FitSpectrum`.\n It saves the selected positions to\n '/temp/locpeak_' + peaktype + '_' + label + '.dat'.\n\n Usage: Select peaks with left mouse click, remove them with right\n mouse click.\n\n Parameters\n ----------\n peaks : list, default: ['breit_wigner', 'lorentzian']\n Possible line shapes of the peaks to fit are\n 'breit_wigner', 'lorentzian', 'gaussian', and 'voigt'.\n See lmfit documentation\n (https://lmfit.github.io/lmfit-py/builtin_models.html)\n for details.\n\n x : numpy.ndarray\n x-values of the mapping\n\n y : numpy.ndarray\n y-values of the mapping\n \"\"\"\n select = True\n if self.answer == 'y':\n answer = input('Do you want to select all peaks again? (y/n)\\n')\n if answer == 'y':\n select = True\n else:\n select = False\n\n if select:\n for i, spectrum in enumerate(y):\n self.label = i\n self.SelectPeaks(x[i], y[i], peaks)\n\n def FitAllSpectra(self, x, y, peaks):\n results = []\n\n for i, spectrum in enumerate(y):\n self.label = i\n temp = self.FitSpectrum(x[i], y[i], peaks=peaks)\n results.append(temp)\n\n return results\n\n def PlotAllFits(self, x, y, ymax, fitresults, show=False):\n for i, spectrum in enumerate(y):\n print(self.label + ' plotted')\n self.label = i\n self.PlotFit(x[i], y[i], ymax[i], fitresults[i], show=show)\n\n def Save2nd(self, file, value, error):\n # get values and update them\n values, stderrs = np.genfromtxt(file, unpack = True)\n\n values[self.indices[self.spectrum]] = value\n stderrs[self.indices[self.spectrum]] = error\n\n with open(file, 'w') as f:\n for i in range(len(values)):\n f.write('{:>13.5f}'.format(values[i])\n + '\\t' + '{:>11.5f}'.format(stderrs[i])\n + '\\n')\n\n def SavePeak(self, ymax, peak, params, prefix='', directory=''):\n if directory == '':\n directory = self.pardir_peak\n # iterate through all fit parameters\n for name in params.keys():\n # and find the current peak\n peakparameter = re.findall(peak, name)\n # improvement possible here, but works for now\n if peak == 'c' and 'center' in name:\n peakparameter = []\n\n if peakparameter:\n # create file for each parameter\n file = self.get_file(directory=directory, prefix=prefix, suffix='',\n datatype='dat', label=name)\n\n # get parameters for saving\n if prefix == '':\n peakparameter = name.replace(peak, '')\n else:\n peakparameter = name\n value = params[name].value\n error = params[name].stderr\n\n value, error = self.ScaleParameters(ymax, peakparameter,\n value, error)\n\n if self.second_analysis == True:\n self.Save2nd(file, value, error)\n else:\n with open(file, 'a') as f:\n f.write(f'{value:>13.5f}\\t{error:>11.5f}\\n')\n\n def GenerateUsedPeaks(self, fitresults):\n # find all peaks that were fitted and generate a list\n allpeaks = []\n for i, fit in enumerate(fitresults):\n if fitresults[i] != None:\n allpeaks.extend(re.findall('prefix=\\'(.*?)\\'',\n fitresults[i].model.name))\n\n usedpeaks = list(set(allpeaks))\n\n return usedpeaks\n\n def SaveUnusedPeaks(self, peaks, usedpeaks, fitresults):\n # find all prefixes used in the current model\n modelpeaks = re.findall('prefix=\\'(.*?)\\'', fitresults.model.name)\n unusedpeaks = list(set(usedpeaks)-set(modelpeaks))\n\n # save default value for each parameter of unused peaks\n for peak in unusedpeaks:\n # get the peaktype and number of the peak\n number = int(re.findall('\\d', peak)[0]) - 1\n peaktype = re.sub('_p.*_', '', peak)\n\n # create model with parameters as before\n model = self.ChoosePeakType(peaktype, number)\n model = StartingParameters(model, peaks)\n model.make_params()\n\n # go through all parameters and write missing values\n for parameter in model.param_names:\n peakfile = self.get_file(directory=self.pardir_peak,\n prefix='', suffix='',\n label=parameter, datatype='dat')\n\n # open file and write missing values\n if self.second_analysis == True:\n self.Save2nd(peakfile, self.missingvalue, self.missingvalue)\n else:\n with open(peakfile, 'a') as f:\n f.write('{:>13.5f}'.format(self.missingvalue)\n + '\\t' + '{:>11.5f}'.format(self.missingvalue)\n + '\\n')\n\n def UpdatePeaklist(self, usedpeaks):\n # get all peaks that are used\n list_of_files, number_of_files = data.GetFolderContent(\n folder=self.pardir_peak,\n filetype='dat',\n quiet=True)\n peaklist = []\n for i, file in enumerate(list_of_files):\n peakfile = list_of_files[i].split(os.sep)[-1]\n parameter = peakfile.split('_')[-1]\n peak = re.sub(parameter, '', peakfile)\n peaklist.append(peak)\n\n # make a set of all peaks used\n peaklist = list(set(peaklist))\n for peak in peaklist:\n usedpeaks.append(peak)\n\n usedpeaks = list(set(peaklist))\n\n return usedpeaks\n\n def SaveAllFitParams(self, ymax, fitresults, peaks):\n usedpeaks = self.GenerateUsedPeaks(fitresults)\n\n if self.second_analysis == True:\n usedpeaks = self.UpdatePeaklist(usedpeaks)\n\n for i, spectrum in enumerate(ymax):\n self.label = i\n self.SaveFuncParams(self.SaveSpec, ymax[i][0], fitresults[i], peaks)\n self.SaveFuncParams(self.SavePeak, ymax[i][0], fitresults[i], peaks)\n self.SaveUnusedPeaks(peaks, usedpeaks, fitresults[i])\n\n def SaveAllBackgrounds(self, bgfits, fits, ymax, peaks):\n prefix = 'background'\n for i, background in enumerate(bgfits):\n self.label = i\n # save background parameters\n self.SaveBackground(bgfits[i][0], fits[i], ymax[i][0])\n # for each parameter in polynomial Model\n for parameter in bgfits[i][0].params.keys():\n self.SavePeak(ymax[i][0], parameter, bgfits[i][0].params,\n prefix=prefix, directory=self.pardir_peak_bg)\n # for constant from constant Model\n self.SavePeak(ymax[i][0], 'c', fits[i].params,\n prefix=prefix, directory=self.pardir_peak_bg)\n\n def LabelZ(self, clb, label='Integrated Intensity\\n', nbins=5,\n linear=False, unit='arb. u.'):\n \"\"\"\n Function to label the z-axis of the Plot.\n Parameters\n ----------\n plt : matplotlib.figure.Figure\n Plot that should be labeled.\n ax : matplotlib.axes.Axes\n Axis of interest.\n label : string\n Label that should be used for the z-axis.\n \"\"\"\n tick_locator = matplotlib.ticker.MaxNLocator(nbins=nbins)\n if linear:\n tick_locator = matplotlib.ticker.LinearLocator(numticks=nbins)\n clb.locator = tick_locator\n clb.update_ticks()\n\n # get tickvalues and reduce the decimals\n tickvalues = clb.get_ticks()\n tickvalues, max_exp = self.ReduceDecimals(tickvalues)\n\n clb.set_label(label\n + '(10$^{{{:1.0f}}}$ '.format(max_exp)\n + unit + ')', fontsize='small')\n clb.ax.set_yticklabels('{:1.2f}'.format(x) for x in tickvalues)\n clb.ax.tick_params(labelsize='small')\n\n def CreatePlotValues(self, maptype, y, **kwargs):\n \"\"\"\n Create plot values accordingly to the type specified\n \"\"\"\n plot_value = np.empty(y.shape[0])\n savefile = ''\n\n if maptype == 'raw':\n for i, spectrum in enumerate(y):\n selectedvalues = spectrum[(kwargs['x'][0] > kwargs['xmin'])\n & (kwargs['x'][0] < kwargs['xmax'])]\n plot_value[i] = sum(selectedvalues)\n\n savefile = os.path.join(self.pltdir, 'map_raw')\n elif maptype == 'params' or (maptype in mapoperators):\n plot_value = np.copy(y)\n savefile = os.path.join(self.pltdir, f'map_{kwargs[\"name\"]}')\n elif maptype == 'errs':\n plot_value = np.copy(y)\n savefile = os.path.join(self.pltdir, f'err_{kwargs[\"name\"]}')\n\n return plot_value, savefile\n\n def CorrectPlotValues(self, plot_value):\n \"\"\"\n Check plot_values and replace them with mean value.\n \"\"\"\n # check if any value in plot_value is a missing value or 1\n missingindices = [i for i, x in enumerate(plot_value)\n if ((x == self.missingvalue) or (x == 1.0)\n or (x == 0.0))]\n existingindices = [i for i, x in enumerate(plot_value)\n if (x != self.missingvalue) and (x != 1.0)\n and (x != 0.0)]\n # calculate the mean of the existing values\n fitmean = 0\n for index in existingindices:\n fitmean += plot_value[index]\n fitmean = fitmean / len(existingindices)\n\n # set the missing values as mean\n for index in missingindices:\n plot_value[index] = fitmean\n\n return plot_value, fitmean\n\n def CreatePlotMatrices(self, maptype, y, mapdims, **kwargs):\n\n plot_value, savefile = self.CreatePlotValues(maptype, y, **kwargs)\n\n plot_value, fitmean = self.CorrectPlotValues(plot_value)\n\n # create matrix for plotting\n plot_matrix = np.reshape(plot_value, mapdims)\n plot_matrix = np.flipud(plot_matrix)\n\n # create matrix with missing values\n missing_matrix = np.full_like(plot_matrix, False, dtype=bool)\n missing_matrix = (plot_matrix == fitmean)\n\n return plot_matrix, missing_matrix, savefile, fitmean\n\n def CreatePatchMask(self, mapdims, fig, missing_matrix, size=1.0):\n xdim = mapdims[0]\n ydim = mapdims[1]\n # Create list for all the missing values as missing patches\n missingboxes = []\n facecolor = []\n\n # find all fields not containing signals and append to\n for iy in range(0,ydim):\n for ix in range(0,xdim):\n if missing_matrix[iy][ix]:\n # calculate position correction for the patches\n corr = size / 2.0\n linecorr = matplotlib.rcParams['axes.linewidth']/fig.dpi/4\n # create the missing patch and add to list\n rect = matplotlib.patches.Rectangle((ix - corr + linecorr*4,\n iy - corr - linecorr), size, size)\n missingboxes.append(rect)\n if size == 1.0:\n facecolor.append((0.05, 0.05, 0.05, 1))\n else:\n facecolor.append('white')\n\n # Create patch collection with specified colour/alpha\n pc = matplotlib.collections.PatchCollection(missingboxes,\n facecolor=facecolor)\n return pc\n\n def ConfigureTicks(self, mapdims, step, xticker, plot, grid, remove=2):\n xdim = mapdims[0]\n ydim = mapdims[1]\n # create x and y ticks accordingly to the parameters of the mapping\n x_ticks = np.arange(step, step * (xdim + 1), step=xticker*step)\n y_ticks = np.arange(step, step * (ydim + 1), step=step)\n if not grid:\n y_ticks = y_ticks[::-1]\n\n\n plot.xticks(np.arange(xdim, step=xticker), x_ticks, fontsize='small')\n plot.yticks(np.arange(ydim), y_ticks, fontsize='small')\n\n ax = plot.gca()\n plot.setp(ax.xaxis.get_ticklabels()[1::remove], visible=False)\n if grid:\n plot.setp(ax.yaxis.get_ticklabels()[1::remove], visible=False)\n else:\n if mapdims[1] % 2 == 0:\n plot.setp(ax.yaxis.get_ticklabels()[0::remove], visible=False)\n else:\n plot.setp(ax.yaxis.get_ticklabels()[1::remove], visible=False)\n\n def ConfigurePlot(self, clb, plot, peak, **kwargs):\n # set title, label of x, y and z axis\n #plt.title('Mapping of ' + self.folder + ' ' + peak, fontsize='small')\n plot.ylabel('y-Position ($\\mathrm{\\mu}$m)', fontsize='small')\n plot.xlabel('x-Position ($\\mathrm{\\mu}$m)', fontsize='small')\n self.LabelZ(clb, **kwargs)\n\n # have a tight layout\n plot.tight_layout()\n\n def PlotMapping(self, maptype, y, mapdims, step, area=None,\n xticker=1, colormap='Reds', alpha=1.0,\n numbered=False, vmin=None, vmax=None, grid=False,\n background='', msize=1, plot_missing=True, **kwargs):\n \"\"\"\n Method to plot different mappings.\n Parameters\n ----------\n xmin : int\n Lowest wavenumber that should be used for integrating a spectral\n region.\n xmax : int\n Highest wavenumber that should be used for integrating a spectral\n region.\n maptype : string\n Plot any of the parameters in fitparameter/peakwise/\n xticker : int\n colormap : string or colormap\n Defines the coloring of the mapping according to the `matplotlib\n colormaps `_\n \"\"\"\n plot_matrix, missing_matrix, savefile, fitmean = self.CreatePlotMatrices(maptype,\n y, mapdims[::-1], **kwargs)\n\n # create and configure figure for mapping\n matplotlib.rcParams['font.sans-serif'] = \"Liberation Sans\"\n fontsize_int = 14 + 3 * np.sqrt(mapdims[0] * mapdims[1])\n matplotlib.rcParams.update({'font.size': fontsize_int})\n\n def set_size(mapdims, ax=None):\n w = mapdims[0]\n h = mapdims[1]\n \"\"\" w, h: width, height in inches \"\"\"\n if not ax: ax=plt.gca()\n left = ax.figure.subplotpars.left\n right = ax.figure.subplotpars.right\n top = ax.figure.subplotpars.top\n bot = ax.figure.subplotpars.bottom\n figw = float(w)/(right-left)\n figh = float(h)/(top-bot)\n # correct width and hight for non quadratic sizes\n dims = [figw, figh]\n dims.sort(reverse=True)\n correction = dims[0]/dims[1]/10\n figw = figw + correction*2\n figh = figh + correction\n ax.figure.set_size_inches(figw, figh)\n\n fig, ax = plt.subplots(figsize=mapdims)\n ax.set_aspect('equal')\n set_size(mapdims)\n self.ConfigureTicks(mapdims, step, xticker, plt, grid)\n\n # plot mapping, create patch mask and plot it over map\n if grid:\n # create data for plotting\n x = []\n y = []\n x_missing = []\n y_missing = []\n plot_matrix = np.flipud(plot_matrix)\n plot_vector = list(plot_matrix.flatten())\n missing_vector = np.full_like(plot_vector, False, dtype=bool)\n missing_vector = (plot_matrix == fitmean)\n missing_vector = missing_vector.flatten()\n if area is not None:\n area_corr = list(area.flatten())\n else:\n area_corr = None\n\n cor = 1.5\n for i in range(1, mapdims[1]+1):\n for j in range(1, mapdims[0]+1):\n x.append(j-cor)\n y.append(i-cor)\n\n ax.set_xlim(min(x), max(x)+1)\n ax.set_ylim(min(y), max(y)+1)\n\n deletelist = []\n for i, missing in enumerate(missing_vector):\n if missing:\n deletelist.append(i)\n deleted = 0\n for i in deletelist:\n x_missing.append(x[i-deleted])\n y_missing.append(y[i-deleted])\n del(x[i-deleted])\n del(y[i-deleted])\n del(plot_vector[i-deleted])\n if area_corr is not None:\n del(area_corr[i-deleted])\n deleted += 1\n\n try:\n img = io.imread(background)\n pos = cor - 2\n plt.imshow(img, zorder=0, cmap='Greys_r',\n extent=[0+pos, mapdims[0]+pos,\n 0+pos, mapdims[1]+pos])\n del img\n except ValueError:\n #traceback.print_exc()\n print('No background given.')\n\n if plot_missing:\n missng_col = scatter(x_missing, y_missing, ax, msize=msize,\n color='black', linewidth=0.5, alpha=alpha)\n\n sclb = scatter(x, y, ax, c=plot_vector, msize=msize, area=area_corr,\n cmap=colormap, linewidth=0.5, alpha=alpha)\n im = sclb.sc\n del sclb\n else:\n im = plt.imshow(plot_matrix, cmap=colormap, vmin=vmin, vmax=vmax)\n\n pc = self.CreatePatchMask(mapdims, fig, missing_matrix)\n ax.add_collection(pc)\n\n def add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs):\n \"\"\"Add a vertical color bar to an image plot.\"\"\"\n divider = axes_grid1.make_axes_locatable(im.axes)\n #width = axes_grid1.axes_size.AxesY(im.axes, aspect=1./aspect)\n #pad = axes_grid1.axes_size.Fraction(pad_fraction, width)\n current_ax = plt.gca()\n cax = divider.append_axes(\"right\", size='5%', pad=0.05)\n plt.sca(current_ax)\n cbar = im.axes.figure.colorbar(im, cax=cax, **kwargs)\n return cbar\n\n clb = add_colorbar(im)\n\n # number the patches if numbered == True\n def NumberMap(mapdims, ax):\n product = mapdims[0] * mapdims[1]\n for i in range(0, mapdims[0]):\n for j in range(0, mapdims[1]):\n color = 'black'\n if missing_matrix[j][mapdims[0] - i-1]:\n color = 'white'\n ax.text(mapdims[0] - i-1, j,\n product - (j * mapdims[0] + i),\n ha='center', va='center',\n color=color, fontsize=fontsize_int*0.4)\n\n if numbered:\n NumberMap(mapdims, ax)\n\n # configure, save and show the plot\n plotname = re.sub(os.path.join(self.folder, 'results', 'plot', ''), '', savefile)\n try:\n # remove grid prefix\n if grid:\n gridname = kwargs['name'].split('_')[0]\n peakparameter = re.sub(gridname, '', kwargs['name'])[1:]\n else:\n peakparameter = kwargs['name']\n\n # remove scaled prefix\n if vmin is not None:\n scalename = peakparameter.split('_')[0]\n peakparameter = re.sub(scalename, '', peakparameter)[1:]\n\n # define peak shape and parameter\n peakshape = peakparameter\n peakparameter = peakparameter.split('_')[-1]\n peakshape = re.sub('_' + peakparameter, '', peakshape)\n except KeyError:\n peakshape = 'raw'\n peakparameter = peakshape\n\n zlabel = self.peaknames[peakshape][peakparameter]['name'] + '\\n'\n unit = self.peaknames[peakshape][peakparameter]['unit']\n\n if maptype == 'errs':\n zlabel = 'Relative error of\\n' + zlabel\n\n self.ConfigurePlot(clb, plt,\n peak = peakshape,\n label = zlabel,\n unit = unit)\n try:\n if 'pca' in kwargs['name']:\n clb.set_label('')\n clb.set_ticks([])\n clb.set_label('Clusters')\n except KeyError:\n pass\n\n if isinstance(colormap, str):\n colormap_name = colormap\n else:\n colormap_name = colormap.name\n plt.savefig(f'{savefile}_{colormap_name}.pdf', format='pdf')\n plt.savefig(f'{savefile}_{colormap_name}.png')\n plt.clf()\n plt.close(fig)\n del im, clb\n gc.collect()\n\n print(f'{plotname} {colormap_name} plotted')\n\n return plot_matrix, plotname\n\n def PlotHistogram(self, plot_matrix, plotname, bins=5, rng=None):\n \"\"\"\n Function that plots a histogram of plotdata handed to it.\n \"\"\"\n matplotlib.rcParams.update({'font.size': 16})\n # flatten matrix to vector\n plot_values = plot_matrix.flatten()\n\n # generate peak name\n peakparameter = plotname.split('_')[-1]\n type = plotname.split('_')[0]\n peakshape = re.sub(type + '_', '', plotname)\n peakshape = re.sub('_' + peakparameter, '', peakshape)\n print(f'hist_{plotname}')\n\n # generate labels from peaknames\n label = self.peaknames[peakshape][peakparameter]['name']\n unit = self.peaknames[peakshape][peakparameter]['unit']\n\n fig, ax = plt.subplots()\n plt.hist(plot_values, bins=bins, range=rng)\n\n self.FormatxLabelAndTicks(plt, name=label, unit=unit)\n self.FormatyLabelAndTicks(plt, name='counts', unit='cts')\n\n plt.tight_layout()\n\n plt.savefig(os.path.join(self.folder, 'results', 'plot',\n f'hist_{plotname}.png'), dpi=300)\n fig.clf()\n plt.close()\n\n def PlotAllColormaps(self, maptype, y, mapdims, step, **kwargs):\n \"\"\"\n \"\"\"\n for category in cmaps:\n print(category[0])\n for colormap in category[1]:\n self.PlotMapping(maptype=maptype, y=y, mapdims=mapdims, step=step,\n colormap=colormap, **kwargs)\n\n def CreatePeakList(self, peakFileList, filetype='dat'):\n \"\"\"\n Function that creates a list of peaks from a list of file paths\n handed to it.\n \"\"\"\n peakList = []\n for mapping in peakFileList:\n mapping = mapping.split(os.sep)[-1]\n mapping = re.sub('.' + filetype, '', mapping)\n peakList.append(mapping)\n return peakList\n\n def ReplaceMissingValues(self, corrected, parameterArray):\n \"\"\"\n Function that returns a corrected array, with missing indices\n taken from parameterArray.\n \"\"\"\n missingvalue = self.missingvalue\n missingindices = [i for i, x in enumerate(parameterArray) if\n (x == missingvalue)]\n for index in missingindices:\n corrected[index] = missingvalue\n return corrected\n\n def ModifyValues(self, first, second, operation='div'):\n \"\"\"\n Function that modifies two arrays with the selected operation.\n It takes the missing values from both arrays and sets them as missing\n values of the resulting array.\n \"\"\"\n if operation == 'div':\n result = np.divide(first, second)\n elif operation == 'mult':\n result = np.multiply(first, second)\n elif operation == 'add':\n result = np.add(first, second)\n elif operation == 'sub':\n result = np.subtract(first, second)\n result = self.ReplaceMissingValues(result, first)\n result = self.ReplaceMissingValues(result, second)\n return result\n","sub_path":"lib/spectrum_analysis/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":38864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"61586208","text":"# Copyright (c) 2016, NECST Laboratory, Politecnico di Milano\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright \n# notice, this list of conditions and the following disclaimer in the \n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its \n# contributors may be used to endorse or promote products derived from \n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR \n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, \n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR \n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF \n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n__author__ = \"Marco Rabozzi, Luca Cerina, Giuseppe Natale\"\n__copyright__ = \"Copyright 2016, NECST Laboratory, Politecnico di Milano\"\n\nimport time\nimport struct\nfrom pynq import MMIO\nfrom pynq.iop import request_iop\nfrom pynq.iop import iop_const\nfrom pynq.iop import Pmod_IO\nfrom pynq.iop import Arduino_IO\nfrom pynq.iop import PMODA\nfrom pynq.iop import PMODB\nfrom pynq.iop import ARDUINO\nfrom pynq.iop import PMOD_GROVE_G3\nfrom pynq.iop import PMOD_GROVE_G4\nfrom pynq.iop import ARDUINO_GROVE_I2C\n\n\nPMOD_GROVE_DLIGHT_PROGRAM = \"pmod_grove_dlight.bin\"\nARDUINO_GROVE_DLIGHT_PROGRAM = \"arduino_grove_dlight.bin\"\nGROVE_DLIGHT_LOG_START = iop_const.MAILBOX_OFFSET+16\nGROVE_DLIGHT_LOG_END = GROVE_DLIGHT_LOG_START+(250*16)\n\nclass Grove_DLight(object):\n \"\"\"This class controls the Grove IIC color sensor.\n \n Grove Color sensor based on the TCS3414CS. \n Hardware version: v1.3.\n \n Attributes\n ----------\n iop : _IOP\n I/O processor instance used by Grove_Color.\n mmio : MMIO\n Memory-mapped I/O instance to read and write instructions and data.\n log_running : int\n The state of the log (0: stopped, 1: started).\n \n \"\"\"\n def __init__(self, if_id, gr_pin): \n \"\"\"Return a new instance of an Grove_Dlight object. \n \n Parameters\n ----------\n if_id : int\n IOP ID (1, 2, 3) corresponding to (PMODA, PMODB, ARDUINO).\n gr_pin: list\n A group of pins on stickit connector or arduino shield.\n \n \"\"\"\n if if_id in [PMODA, PMODB]:\n if not gr_pin in [PMOD_GROVE_G3,\n PMOD_GROVE_G4]:\n raise ValueError(\"DLight group number can only be G3 - G4.\")\n GROVE_DLIGHT_PROGRAM = PMOD_GROVE_DLIGHT_PROGRAM\n\n elif if_id in [ARDUINO]:\n if not gr_pin in [ARDUINO_GROVE_I2C]:\n raise ValueError(\"DLight group number can only be I2C.\")\n GROVE_DLIGHT_PROGRAM = ARDUINO_GROVE_DLIGHT_PROGRAM\n\n else:\n raise ValueError(\"No such IOP for grove device.\")\n\n self.iop = request_iop(if_id, GROVE_DLIGHT_PROGRAM)\n self.mmio = self.iop.mmio\n self.log_interval_ms = 1000\n self.log_running = 0\n self.iop.start()\n \n if if_id in [PMODA, PMODB]:\n self.mmio.write(iop_const.MAILBOX_OFFSET, gr_pin[0])\n self.mmio.write(iop_const.MAILBOX_OFFSET+4, gr_pin[1])\n\n self.mmio.write(iop_const.MAILBOX_OFFSET +\n iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 1)\n while (self.mmio.read(iop_const.MAILBOX_OFFSET +\n iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 1):\n pass\n\n def read_raw_light(self):\n \"\"\"Read the visible and IR channel values.\n\n Read the values from the grove digital light peripheral.\n \n Returns\n -------\n tuple\n A tuple containing 2 integer values ch0 (visible) and ch1 (IR).\n \n \"\"\"\n self.mmio.write(iop_const.MAILBOX_OFFSET+\n iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 3) \n while (self.mmio.read(iop_const.MAILBOX_OFFSET+\n iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 3):\n pass\n ch0 = self.mmio.read(iop_const.MAILBOX_OFFSET)\n ch1 = self.mmio.read(iop_const.MAILBOX_OFFSET + 0x4) \n return ch0,ch1\n\n def read_lux(self):\n \"\"\"Read the computed lux value of the sensor.\n \n Returns\n -------\n int\n The lux value from the sensor\n \n \"\"\"\n self.mmio.write(iop_const.MAILBOX_OFFSET +\n iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 5) \n while (self.mmio.read(iop_const.MAILBOX_OFFSET +\n iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 5):\n pass\n lux = self.mmio.read(iop_const.MAILBOX_OFFSET+0x8) \n return lux\n","sub_path":"python/pynq/iop/grove_dlight.py","file_name":"grove_dlight.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"148379508","text":"import h5py\nimport sys\nimport numpy\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plot\n\nimport cnn_tf\nimport cnn_keras\nimport regression\n\nepochs = 10\nt = 0.8\ntest_frequency = 1\nmin_improvement = 0.0001\n\nwith h5py.File(sys.argv[1], 'r') as f:\n data = f['data'][()]\n targets = f['targets'][()]\n\ndata = data / 255.0\n\ntrain_data = data[:int(data.shape[0] * t)]\ntrain_targets = targets[:int(data.shape[0] * t)]\ntest_data = data[int(data.shape[0] * t):]\ntest_targets = targets[int(data.shape[0] * t):]\n\nbatch_size = 128\n\nmodel = cnn_tf.LeNet5(input_size=(28, 28, 1), classes=10)\n\n\nlosses = []\naccuracies = []\nprev_loss = numpy.inf\nprev_model = model\nloss = model.loss(test_data, test_targets)\naccuracy = model.accuracy(test_data, test_targets)\nlosses.append(loss)\naccuracies.append(accuracy)\nfor epoch in tqdm(range(epochs), desc='Epoch'):\n model.fit(train_data, train_targets, batch_size=batch_size)\n if epoch % test_frequency == 0:\n loss = model.loss(test_data, test_targets)\n accuracy = model.accuracy(test_data, test_targets)\n losses.append(loss)\n accuracies.append(accuracy)\n if (prev_loss / loss) - 1 < min_improvement:\n break\n prev_loss = loss\n\n#plot.plot(losses)\n#plot.savefig('losses.png')\n\nplot.plot(accuracies)\nplot.savefig('accuracies.png')\n","sub_path":"facial_recognizer.py","file_name":"facial_recognizer.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"295507171","text":"#!/usr/bin/env python\n\nimport uuid\nfrom decimal import Decimal as Dec\nfrom datetime import datetime\n\nimport console\nfrom account import Account, Bank\n\n\nclass Bancomat:\n '''Класс банкомат\n\n bancomat_id - уникальный номер банкомата\n amount_money - количество денег в банкомате\n transaction - порядковый номер для транкзакций банкомата\n history - история операций в банкомате\n address - адресс установки банкомата\n '''\n\n def __init__(self, bancomat_id=str(uuid.uuid4()),\n amount_money=Dec(1E+7), transaction=0, history={}):\n self.bancomat_id = bancomat_id\n self.amount_money = amount_money\n self.transaction = transaction\n self.history = history\n self.address = ''\n\n def __repr__(self):\n return 'Bancomat : {} \\n\\t{} -- {} BYN'.format(self.bancomat_id,\n self.address,\n self.amount_money)\n\n def authentication(self):\n '''Метод производящий аутентификацию'''\n\n attempt = 3\n input('Вставте карточку нажмите ок')\n # _card = card # экземпляр обьекта Card будет считан с карточки\n _card = self.bank.accounts[list(self.bank.accounts.keys())[0]].card\n while attempt > 0:\n passwd = input('Введите пин-код: ')\n if _card.passwd == passwd:\n return _card.id\n else:\n attempt -= 1\n print('Осталось {} {}'.format(\n attempt, 'попытки' if attempt > 1 else 'попытка'))\n return None\n\n def sub_amount(self, account, other):\n '''Метод для:\n - увеличения номера транзакции\n - записи транзакции в историю\n - уменьшения количества наличных банкомата\n\n account - кто снимает деньги\n other - количесво снятых денег'''\n\n time = datetime.now()\n self.transaction += 1\n self.history[self.transaction] = \"{} -- {} -- Withdrawn {} BYN\".format(\n account.user.name, time.strftime('%d-%m-%Y %H:%M:%S'), other)\n self.amount_money -= Dec(other)\n\n def start(self, bank, address, money=None):\n '''Алгоритм работы банкомата'''\n\n self.address = address\n self.bank = bank\n if money:\n self.amount_money = money\n while True:\n card_id = self.authentication()\n if card_id:\n while True:\n account = bank.accounts[card_id]\n choice = console.get_choice()\n if choice in 'Ww':\n i = console.get_integer('How mach', minimum=5,\n allow_zero=False, default=5)\n if account.amount >= i:\n if self.amount_money >= i:\n print('Take your maney.')\n account.sub_amount(self.address, i)\n self.sub_amount(account, i)\n else:\n print(self)\n else:\n print(account)\n elif choice in 'Gg':\n print(account)\n elif choice in 'Ss':\n account.get_history()\n else:\n break\n\n\nif __name__ == '__main__':\n a = Account('Mike Doe', amount=100)\n print(a)\n c = Bank()\n c.add_account(a)\n print(c)\n b = Bancomat()\n print(b)\n b.start(c, 'Mogilev Jacubovskogo 66')\n","sub_path":"bancomat.py","file_name":"bancomat.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"249063393","text":"load(\"//bazel:felicia_cc.bzl\", \"fel_cxxopts\")\n\ndef fel_pybind_py_library(\n name,\n copts = [],\n cc_srcs = [],\n cc_deps = [],\n py_srcs = [],\n py_deps = []):\n cc_src = name + \"_py.cc\"\n if len(cc_srcs) == 0:\n cc_srcs = [cc_src]\n\n libname = \"%s.so\" % name\n\n native.cc_binary(\n name = libname,\n srcs = cc_srcs,\n copts = fel_cxxopts(is_external = True) + copts,\n linkshared = 1,\n linkstatic = 1,\n deps = [\"@pybind11\"] + cc_deps,\n )\n\n # On windows, it needs a .pyd file but it can't generate *.pyd above\n # at this moment\n generate_pyd(\n name = \"%s_pyd\" % name,\n file = \":%s\" % libname,\n out = \"%s.pyd\" % name,\n )\n\n native.py_library(\n name = name,\n data = select({\n \"//felicia:windows\": [\":%s.pyd\" % name],\n \"//conditions:default\": [\":%s\" % libname],\n }),\n srcs = py_srcs,\n deps = py_deps,\n imports = [\".\"],\n )\n\ndef _generate_pyd_impl(ctx):\n output = ctx.outputs.out\n target = ctx.attr.name[:-4]\n\n rest = []\n sopath = None\n for file in ctx.files.file:\n if \"%s.so\" % target == file.basename:\n sopath = file\n else:\n rest.append(file)\n\n if sopath == None:\n fail(\"Failed to generate pyd\")\n\n ctx.actions.run_shell(\n inputs = [sopath],\n outputs = [output],\n progress_message = \"Copy %s to %s\" % (sopath.short_path, output.short_path),\n command = \"cp %s %s\" % (sopath.path, output.path),\n )\n\ngenerate_pyd = rule(\n implementation = _generate_pyd_impl,\n attrs = {\n \"file\": attr.label(mandatory = True, allow_single_file = True),\n \"out\": attr.output(mandatory = True),\n },\n)\n","sub_path":"bazel/felicia_pybind.bzl","file_name":"felicia_pybind.bzl","file_ext":"bzl","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"434076014","text":"import copy\r\nimport logging\r\nimport os\r\nfrom collections import defaultdict\r\nfrom dataclasses import dataclass\r\nfrom typing import Iterable, List, Any, Dict, Union\r\n\r\nfrom aiohttp import ClientSession\r\n\r\nfrom azure_api import group_images\r\nfrom single_image_process import ImageFaceData\r\nfrom single_image_process import SingleImageProcess\r\n\r\nMAX_IDS_IN_GROUPING_REQUEST = 1000\r\n\r\n\r\nclass ImageNotExistException(Exception):\r\n def __init__(self, path):\r\n self.path = path\r\n\r\n def __str__(self):\r\n return f'{self.path} does not exist or it is not a file'\r\n\r\n\r\n@dataclass\r\nclass HandlerResult:\r\n data: Any\r\n path: str\r\n\r\n\r\n@dataclass\r\nclass GroupData:\r\n best_face: ImageFaceData\r\n count: int = 1\r\n\r\n\r\nclass ImageDetectResultsHandler:\r\n MAX_GROUPS_IN_GROUPING_REQUEST = 500\r\n\r\n def __init__(self, session):\r\n self._session = session\r\n self._groups = dict() # type: Dict[str, GroupData]\r\n self._faces_to_group = dict()\r\n self._best_faces_data = dict() # type: Dict[str, ImageFaceData]\r\n self._faces_count = defaultdict(int)\r\n self._log = logging.getLogger('ImageDetectResultsHandler')\r\n\r\n def add_result(self, result: Iterable[ImageFaceData]):\r\n for face in result:\r\n self._log.debug(f'image data {face}')\r\n self._faces_to_group[face.face_id] = face\r\n\r\n async def group_images(self):\r\n faces_to_group = copy.deepcopy(self._faces_to_group)\r\n groups = await group_images(self._session, [face_id for face_id in faces_to_group])\r\n self._handle_grouping_result(groups['groups'])\r\n for member in groups['messyGroup']:\r\n self._add_members_to_group(None, [member])\r\n\r\n def _handle_grouping_result(self, groups: List[List[str]]):\r\n for group_members in groups:\r\n matched_groups = [m for m in group_members if m in self._groups]\r\n if matched_groups:\r\n group_id = matched_groups[0]\r\n else:\r\n group_id = None\r\n self._add_members_to_group(group_id, group_members)\r\n\r\n def _add_members_to_group(self, group_id: Union[str, None], members_ids: List[str]):\r\n best_face = max((self._faces_to_group[m] for m in members_ids if m in self._faces_to_group), key=lambda x: x.ratio)\r\n if group_id:\r\n self._groups[group_id].count += len(members_ids) - 1\r\n if best_face.ratio > self._groups[group_id].best_face.ratio:\r\n # replace the group id\r\n self._groups[best_face.face_id] = GroupData(best_face, self._groups[group_id].count)\r\n self._groups.pop(group_id)\r\n else:\r\n self._groups[best_face.face_id] = GroupData(best_face, len(members_ids))\r\n\r\n async def get_most_common_best_smile(self) -> ImageFaceData:\r\n await self.group_images()\r\n most_common_group_data = max(self._groups.values(), key=lambda x: x.count)\r\n self._log.debug(f'most common best {most_common_group_data.best_face.face_id} count {most_common_group_data.count}')\r\n return most_common_group_data.best_face\r\n\r\n\r\nclass BestSmileHandler:\r\n def __init__(self, key: str, base_path: str):\r\n self._key = key\r\n self._base_path = base_path\r\n self._log = logging.getLogger('BestSmileHandler')\r\n\r\n def _validate_exists(self, paths: List[str]):\r\n for p in paths:\r\n if not os.path.isfile(os.path.join(self._base_path, p)):\r\n raise ImageNotExistException(p)\r\n\r\n async def handle_request(self, file_names: List[str]) -> HandlerResult:\r\n self._log.debug(f'processing new request: {file_names}')\r\n self._validate_exists(file_names)\r\n async with ClientSession(headers={'Ocp-Apim-Subscription-Key': self._key},\r\n raise_for_status=True) as session:\r\n results_handler = ImageDetectResultsHandler(session)\r\n for file_name in file_names:\r\n result = await SingleImageProcess(os.path.join(self._base_path, file_name), file_name, session).run()\r\n results_handler.add_result(result)\r\n most_common_best_smile = await results_handler.get_most_common_best_smile()\r\n return HandlerResult(data=most_common_best_smile.metadata,\r\n path=most_common_best_smile.image_name)\r\n","sub_path":"best_smile_handler.py","file_name":"best_smile_handler.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"265725217","text":"class Solution:\n def countRectangles(self, rectangles: List[List[int]], points: List[List[int]]) -> List[int]:\n n=len(points)\n res=[0]*n\n dic=collections.defaultdict(list)\n for rect in rectangles:\n dic[rect[1]].append(rect[0])\n\n for v in dic.values(): v.sort()\n\n for i, p in enumerate(points):\n x, y=p[0], p[1]\n for j in range(y, 101):\n if j in dic:\n pos=bisect.bisect_left(dic[j], x)\n res[i]+=len(dic[j])-pos\n\n return res\n","sub_path":"python/count-number-of-rectangles-containing-each-point.py","file_name":"count-number-of-rectangles-containing-each-point.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297887845","text":"#Helper Functions for Machine Learning. Functions to Load data from files and pre-process that data.\n\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n#Input: A 1-D array of integer class labels, the number of classes\n#Output: A 2-D array of shape: [len(class_labels), num_classes]\ndef one_hot_encoded(class_numbers, num_classes=None):\n if num_classes is None:\n num_classes = np.max(class_numbers) - 1\n return np.eye(num_classes, dtype=float)[class_numbers]\n\n#Input: An array of dimensions: [height, width, num_channels]\n#Output: Display the image\ndef plot_image(image):\n plt.axis(\"off\")\n plt.imshow(image)\n plt.show()\n\n#Re-constructs the object hierarchy from a file\ndef _unpickle_data(file_name):\n with open(file_name, mode='rb') as file:\n data = pickle.load(file, encoding='bytes')\n return data\n\n#Pickle up to 4 variables and store at the given file-path\ndef _pickle_data(file_path, var1, var2=None, var3=None, var4=None):\n output = open(file_path, 'wb')\n pickle.dump(var1, output)\n pickle.dump(var2, output)\n pickle.dump(var3, output)\n pickle.dump(var4, output)\n output.close()\n\n#Load a numpy array from a .npy file\ndef load_numpy_array(file_path):\n output = np.load(file_path)\n return output\n\n#Save a numpy array in a .npy file\ndef save_numpy_array(my_array, file_path):\n np.save(file_path, my_array)\n \n#Load a PNG image and return as a numpy array\ndef load_png_as_numpy(file_path):\n img = Image.open(file_path)\n arr = np.array(img)\n return arr\n\n#Returns a batch of MNIST images and one-hot labels\ndef get_MNIST_data_batch(batch_size):\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n batch = mnist.train.next_batch(batch_size)\n raw_data = batch[0]\n data = np.reshape(raw_data, [-1, 28, 28, 1])\n labels = one_hot_encoded(batch[1], 10)\n return data, labels\n\n#load cifar10 class names\ndef load_cifar10_class_names(file_path):\n raw = _unpickle_data(file_path)[b'label_names']\n class_names = [x.decode('utf-8') for x in raw]\n print(\"Cifar10 class names loaded.\")\n return class_names\n\n#Input: Training data path without number\n#Get cifar10 images, one-hot labels\ndef load_cifar10_data(file_path):\n images = np.zeros(shape=[50000, 32, 32, 3], dtype=float)\n cls = np.zeros(shape=[50000], dtype=int)\n begin = 0\n for i in range(5):\n data = _unpickle_data(file_path + str(i + 1))\n raw_image_batch = data[b'data']\n cls_batch = np.array(data[b'labels'])\n raw_image_batch = np.array(raw_image_batch, dtype=float) / 255.0\n raw_image_batch = raw_image_batch.reshape([-1, 3, 32, 32])\n raw_image_batch = raw_image_batch.transpose([0, 2, 3, 1])\n end = begin + len(raw_image_batch)\n images[begin:end,:,:,:] = raw_image_batch\n cls[begin:end] = cls_batch\n begin = end\n print(\"data_batch_\" + str(i+1) + \" Loaded\")\n return images, cls\n\n#Load the fine class names \ndef load_cifar100_class_names(file_path):\n raw = _unpickle_data(file_path)[b'fine_label_names']\n class_names = [x.decode('utf-8') for x in raw]\n return class_names\n\n#Input: The complete name of the filepath (there is only 1 training and 1 test file)\ndef load_cifar100_data(filename):\n batchdata = _unpickle_data(filename)\n labels = np.array(batchdata[b'fine_labels'])\n images = batchdata[b'data']\n images = np.array(images, dtype=float) / 255.0\n images = images.reshape([-1, 3, 32, 32])\n images = images.transpose([0, 2, 3, 1])\n return images, labels\n\n##Data Feed########################################################################################################################\n\n#More general function for extracting batches of cifar or MNIST data with the option to return one-hot labels\n#Input: data-set, integer labels, [batch_size, height, width, num_channels], number of classes (only necessary for one-hot), one-hot-output\n#Output: the data batch, labels (optionally one-hot)\ndef extract_random_batch(data, labels, data_batch_shape, one_hot=False, num_classes=0):\n batch_size = data_batch_shape[0]\n height = data_batch_shape[1]\n width = data_batch_shape[2]\n num_channels = data_batch_shape[3]\n \n random_indexes = np.zeros(shape=[batch_size], dtype=int)\n lbls = np.zeros(shape = [batch_size], dtype=int)\n batch = np.zeros(shape= [batch_size, height, width, num_channels], dtype=float)\n for n in range(batch_size):\n random_indexes[n] = np.random.randint(0, (len(data)-1))\n batch[n,:,:,:] = data[random_indexes[n],:,:,:]\n lbls[n] = labels[random_indexes[n]]\n if one_hot:\n lbls = one_hot_encoded(lbls, num_classes)\n return batch, lbls\n \n \n###Data Pre-processing#############################################################################################################\n#Input: A set of square images, the image size, % chance of vertical flip, % chance of horizontal flip\n#Ouput: Randomly flipped image\ndef random_flip(image, img_size, percent, percent2):\n vert = np.random.randint(0, 100)\n horiz = np.random.randint(0, 100)\n image_out = image\n if vert <= percent:\n image_out = np.flipud(image)\n if horiz <= percent2:\n image_out = np.fliplr(image)\n return image_out\n\n#Randomly adjust hue, contrast..\n\n\n","sub_path":"Dataset_functions.py","file_name":"Dataset_functions.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}